Skip to content

Commit

Permalink
drm/xe/uapi: Separate VM_BIND's operation and flag
Browse files Browse the repository at this point in the history
Use different members in the drm_xe_vm_bind_op for op and for flags as
it is done in other structures.

Type is left to u32 to leave enough room for future operations and flags.

v2: Remove the XE_VM_BIND_* flags shift (Rodrigo Vivi)

Closes: https://gitlab.freedesktop.org/drm/xe/kernel/-/issues/303
Signed-off-by: Francois Dugast <francois.dugast@intel.com>
Signed-off-by: Rodrigo Vivi <rodrigo.vivi@intel.com>
Reviewed-by: Matthew Brost <matthew.brost@intel.com>
  • Loading branch information
Francois Dugast authored and Rodrigo Vivi committed Dec 21, 2023
1 parent 7793d00 commit ea0640f
Show file tree
Hide file tree
Showing 2 changed files with 24 additions and 19 deletions.
29 changes: 16 additions & 13 deletions drivers/gpu/drm/xe/xe_vm.c
Original file line number Diff line number Diff line change
Expand Up @@ -2282,11 +2282,11 @@ static void vm_set_async_error(struct xe_vm *vm, int err)
}

static int vm_bind_ioctl_lookup_vma(struct xe_vm *vm, struct xe_bo *bo,
u64 addr, u64 range, u32 op)
u64 addr, u64 range, u32 op, u32 flags)
{
struct xe_device *xe = vm->xe;
struct xe_vma *vma;
bool async = !!(op & XE_VM_BIND_FLAG_ASYNC);
bool async = !!(flags & XE_VM_BIND_FLAG_ASYNC);

lockdep_assert_held(&vm->lock);

Expand Down Expand Up @@ -2387,7 +2387,7 @@ static void print_op(struct xe_device *xe, struct drm_gpuva_op *op)
static struct drm_gpuva_ops *
vm_bind_ioctl_ops_create(struct xe_vm *vm, struct xe_bo *bo,
u64 bo_offset_or_userptr, u64 addr, u64 range,
u32 operation, u8 tile_mask, u32 region)
u32 operation, u32 flags, u8 tile_mask, u32 region)
{
struct drm_gem_object *obj = bo ? &bo->ttm.base : NULL;
struct drm_gpuva_ops *ops;
Expand Down Expand Up @@ -2416,10 +2416,10 @@ vm_bind_ioctl_ops_create(struct xe_vm *vm, struct xe_bo *bo,

op->tile_mask = tile_mask;
op->map.immediate =
operation & XE_VM_BIND_FLAG_IMMEDIATE;
flags & XE_VM_BIND_FLAG_IMMEDIATE;
op->map.read_only =
operation & XE_VM_BIND_FLAG_READONLY;
op->map.is_null = operation & XE_VM_BIND_FLAG_NULL;
flags & XE_VM_BIND_FLAG_READONLY;
op->map.is_null = flags & XE_VM_BIND_FLAG_NULL;
}
break;
case XE_VM_BIND_OP_UNMAP:
Expand Down Expand Up @@ -3236,15 +3236,16 @@ static int vm_bind_ioctl_check_args(struct xe_device *xe,
u64 range = (*bind_ops)[i].range;
u64 addr = (*bind_ops)[i].addr;
u32 op = (*bind_ops)[i].op;
u32 flags = (*bind_ops)[i].flags;
u32 obj = (*bind_ops)[i].obj;
u64 obj_offset = (*bind_ops)[i].obj_offset;
u32 region = (*bind_ops)[i].region;
bool is_null = op & XE_VM_BIND_FLAG_NULL;
bool is_null = flags & XE_VM_BIND_FLAG_NULL;

if (i == 0) {
*async = !!(op & XE_VM_BIND_FLAG_ASYNC);
*async = !!(flags & XE_VM_BIND_FLAG_ASYNC);
} else if (XE_IOCTL_DBG(xe, !*async) ||
XE_IOCTL_DBG(xe, !(op & XE_VM_BIND_FLAG_ASYNC)) ||
XE_IOCTL_DBG(xe, !(flags & XE_VM_BIND_FLAG_ASYNC)) ||
XE_IOCTL_DBG(xe, VM_BIND_OP(op) ==
XE_VM_BIND_OP_RESTART)) {
err = -EINVAL;
Expand All @@ -3265,7 +3266,7 @@ static int vm_bind_ioctl_check_args(struct xe_device *xe,

if (XE_IOCTL_DBG(xe, VM_BIND_OP(op) >
XE_VM_BIND_OP_PREFETCH) ||
XE_IOCTL_DBG(xe, op & ~SUPPORTED_FLAGS) ||
XE_IOCTL_DBG(xe, flags & ~SUPPORTED_FLAGS) ||
XE_IOCTL_DBG(xe, obj && is_null) ||
XE_IOCTL_DBG(xe, obj_offset && is_null) ||
XE_IOCTL_DBG(xe, VM_BIND_OP(op) != XE_VM_BIND_OP_MAP &&
Expand Down Expand Up @@ -3480,8 +3481,9 @@ int xe_vm_bind_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
u64 range = bind_ops[i].range;
u64 addr = bind_ops[i].addr;
u32 op = bind_ops[i].op;
u32 flags = bind_ops[i].flags;

err = vm_bind_ioctl_lookup_vma(vm, bos[i], addr, range, op);
err = vm_bind_ioctl_lookup_vma(vm, bos[i], addr, range, op, flags);
if (err)
goto free_syncs;
}
Expand All @@ -3490,13 +3492,14 @@ int xe_vm_bind_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
u64 range = bind_ops[i].range;
u64 addr = bind_ops[i].addr;
u32 op = bind_ops[i].op;
u32 flags = bind_ops[i].flags;
u64 obj_offset = bind_ops[i].obj_offset;
u8 tile_mask = bind_ops[i].tile_mask;
u32 region = bind_ops[i].region;

ops[i] = vm_bind_ioctl_ops_create(vm, bos[i], obj_offset,
addr, range, op, tile_mask,
region);
addr, range, op, flags,
tile_mask, region);
if (IS_ERR(ops[i])) {
err = PTR_ERR(ops[i]);
ops[i] = NULL;
Expand Down
14 changes: 8 additions & 6 deletions include/uapi/drm/xe_drm.h
Original file line number Diff line number Diff line change
Expand Up @@ -660,8 +660,10 @@ struct drm_xe_vm_bind_op {
#define XE_VM_BIND_OP_RESTART 0x3
#define XE_VM_BIND_OP_UNMAP_ALL 0x4
#define XE_VM_BIND_OP_PREFETCH 0x5
/** @op: Bind operation to perform */
__u32 op;

#define XE_VM_BIND_FLAG_READONLY (0x1 << 16)
#define XE_VM_BIND_FLAG_READONLY (0x1 << 0)
/*
* A bind ops completions are always async, hence the support for out
* sync. This flag indicates the allocation of the memory for new page
Expand All @@ -686,22 +688,22 @@ struct drm_xe_vm_bind_op {
* configured in the VM and must be set if the VM is configured with
* DRM_XE_VM_CREATE_ASYNC_BIND_OPS and not in an error state.
*/
#define XE_VM_BIND_FLAG_ASYNC (0x1 << 17)
#define XE_VM_BIND_FLAG_ASYNC (0x1 << 1)
/*
* Valid on a faulting VM only, do the MAP operation immediately rather
* than deferring the MAP to the page fault handler.
*/
#define XE_VM_BIND_FLAG_IMMEDIATE (0x1 << 18)
#define XE_VM_BIND_FLAG_IMMEDIATE (0x1 << 2)
/*
* When the NULL flag is set, the page tables are setup with a special
* bit which indicates writes are dropped and all reads return zero. In
* the future, the NULL flags will only be valid for XE_VM_BIND_OP_MAP
* operations, the BO handle MBZ, and the BO offset MBZ. This flag is
* intended to implement VK sparse bindings.
*/
#define XE_VM_BIND_FLAG_NULL (0x1 << 19)
/** @op: Operation to perform (lower 16 bits) and flags (upper 16 bits) */
__u32 op;
#define XE_VM_BIND_FLAG_NULL (0x1 << 3)
/** @flags: Bind flags */
__u32 flags;

/** @mem_region: Memory region to prefetch VMA to, instance not a mask */
__u32 region;
Expand Down

0 comments on commit ea0640f

Please sign in to comment.