Skip to content

Commit

Permalink
drm/xe/vm: Remove VM_BIND_OP macro
Browse files Browse the repository at this point in the history
This macro was necessary when bind operations were shifted but this
is no longer the case, so removing to simplify code.

Signed-off-by: Francois Dugast <francois.dugast@intel.com>
Signed-off-by: Rodrigo Vivi <rodrigo.vivi@intel.com>
Reviewed-by: Matthew Brost <matthew.brost@intel.com>
  • Loading branch information
Francois Dugast authored and Rodrigo Vivi committed Dec 21, 2023
1 parent ea0640f commit 78ddc87
Showing 1 changed file with 19 additions and 23 deletions.
42 changes: 19 additions & 23 deletions drivers/gpu/drm/xe/xe_vm.c
Original file line number Diff line number Diff line change
Expand Up @@ -2273,8 +2273,6 @@ static int xe_vm_prefetch(struct xe_vm *vm, struct xe_vma *vma,
}
}

#define VM_BIND_OP(op) (op & 0xffff)

static void vm_set_async_error(struct xe_vm *vm, int err)
{
lockdep_assert_held(&vm->lock);
Expand All @@ -2290,7 +2288,7 @@ static int vm_bind_ioctl_lookup_vma(struct xe_vm *vm, struct xe_bo *bo,

lockdep_assert_held(&vm->lock);

switch (VM_BIND_OP(op)) {
switch (op) {
case XE_VM_BIND_OP_MAP:
case XE_VM_BIND_OP_MAP_USERPTR:
vma = xe_vm_find_overlapping_vma(vm, addr, range);
Expand Down Expand Up @@ -2400,10 +2398,10 @@ vm_bind_ioctl_ops_create(struct xe_vm *vm, struct xe_bo *bo,

vm_dbg(&vm->xe->drm,
"op=%d, addr=0x%016llx, range=0x%016llx, bo_offset_or_userptr=0x%016llx",
VM_BIND_OP(operation), (ULL)addr, (ULL)range,
operation, (ULL)addr, (ULL)range,
(ULL)bo_offset_or_userptr);

switch (VM_BIND_OP(operation)) {
switch (operation) {
case XE_VM_BIND_OP_MAP:
case XE_VM_BIND_OP_MAP_USERPTR:
ops = drm_gpuvm_sm_map_ops_create(&vm->gpuvm, addr, range,
Expand Down Expand Up @@ -3246,60 +3244,58 @@ static int vm_bind_ioctl_check_args(struct xe_device *xe,
*async = !!(flags & XE_VM_BIND_FLAG_ASYNC);
} else if (XE_IOCTL_DBG(xe, !*async) ||
XE_IOCTL_DBG(xe, !(flags & XE_VM_BIND_FLAG_ASYNC)) ||
XE_IOCTL_DBG(xe, VM_BIND_OP(op) ==
XE_VM_BIND_OP_RESTART)) {
XE_IOCTL_DBG(xe, op == XE_VM_BIND_OP_RESTART)) {
err = -EINVAL;
goto free_bind_ops;
}

if (XE_IOCTL_DBG(xe, !*async &&
VM_BIND_OP(op) == XE_VM_BIND_OP_UNMAP_ALL)) {
op == XE_VM_BIND_OP_UNMAP_ALL)) {
err = -EINVAL;
goto free_bind_ops;
}

if (XE_IOCTL_DBG(xe, !*async &&
VM_BIND_OP(op) == XE_VM_BIND_OP_PREFETCH)) {
op == XE_VM_BIND_OP_PREFETCH)) {
err = -EINVAL;
goto free_bind_ops;
}

if (XE_IOCTL_DBG(xe, VM_BIND_OP(op) >
XE_VM_BIND_OP_PREFETCH) ||
if (XE_IOCTL_DBG(xe, op > XE_VM_BIND_OP_PREFETCH) ||
XE_IOCTL_DBG(xe, flags & ~SUPPORTED_FLAGS) ||
XE_IOCTL_DBG(xe, obj && is_null) ||
XE_IOCTL_DBG(xe, obj_offset && is_null) ||
XE_IOCTL_DBG(xe, VM_BIND_OP(op) != XE_VM_BIND_OP_MAP &&
XE_IOCTL_DBG(xe, op != XE_VM_BIND_OP_MAP &&
is_null) ||
XE_IOCTL_DBG(xe, !obj &&
VM_BIND_OP(op) == XE_VM_BIND_OP_MAP &&
op == XE_VM_BIND_OP_MAP &&
!is_null) ||
XE_IOCTL_DBG(xe, !obj &&
VM_BIND_OP(op) == XE_VM_BIND_OP_UNMAP_ALL) ||
op == XE_VM_BIND_OP_UNMAP_ALL) ||
XE_IOCTL_DBG(xe, addr &&
VM_BIND_OP(op) == XE_VM_BIND_OP_UNMAP_ALL) ||
op == XE_VM_BIND_OP_UNMAP_ALL) ||
XE_IOCTL_DBG(xe, range &&
VM_BIND_OP(op) == XE_VM_BIND_OP_UNMAP_ALL) ||
op == XE_VM_BIND_OP_UNMAP_ALL) ||
XE_IOCTL_DBG(xe, obj &&
VM_BIND_OP(op) == XE_VM_BIND_OP_MAP_USERPTR) ||
op == XE_VM_BIND_OP_MAP_USERPTR) ||
XE_IOCTL_DBG(xe, obj &&
VM_BIND_OP(op) == XE_VM_BIND_OP_PREFETCH) ||
op == XE_VM_BIND_OP_PREFETCH) ||
XE_IOCTL_DBG(xe, region &&
VM_BIND_OP(op) != XE_VM_BIND_OP_PREFETCH) ||
op != XE_VM_BIND_OP_PREFETCH) ||
XE_IOCTL_DBG(xe, !(BIT(region) &
xe->info.mem_region_mask)) ||
XE_IOCTL_DBG(xe, obj &&
VM_BIND_OP(op) == XE_VM_BIND_OP_UNMAP)) {
op == XE_VM_BIND_OP_UNMAP)) {
err = -EINVAL;
goto free_bind_ops;
}

if (XE_IOCTL_DBG(xe, obj_offset & ~PAGE_MASK) ||
XE_IOCTL_DBG(xe, addr & ~PAGE_MASK) ||
XE_IOCTL_DBG(xe, range & ~PAGE_MASK) ||
XE_IOCTL_DBG(xe, !range && VM_BIND_OP(op) !=
XE_IOCTL_DBG(xe, !range && op !=
XE_VM_BIND_OP_RESTART &&
VM_BIND_OP(op) != XE_VM_BIND_OP_UNMAP_ALL)) {
op != XE_VM_BIND_OP_UNMAP_ALL)) {
err = -EINVAL;
goto free_bind_ops;
}
Expand Down Expand Up @@ -3363,7 +3359,7 @@ int xe_vm_bind_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
goto release_vm_lock;
}

if (VM_BIND_OP(bind_ops[0].op) == XE_VM_BIND_OP_RESTART) {
if (bind_ops[0].op == XE_VM_BIND_OP_RESTART) {
if (XE_IOCTL_DBG(xe, !(vm->flags & XE_VM_FLAG_ASYNC_BIND_OPS)))
err = -EOPNOTSUPP;
if (XE_IOCTL_DBG(xe, !err && args->num_syncs))
Expand Down

0 comments on commit 78ddc87

Please sign in to comment.