Skip to content

Commit

Permalink
accel/ivpu: Refactor memory ranges logic
Browse files Browse the repository at this point in the history
Add new dma range and change naming convention for virtual address
memory ranges managed by KMD.

New available ranges are named as follows:
 * global range - global context accessible by FW
 * aliased range - user context accessible by FW
 * dma range - user context accessible by DMA
 * shave range - user context accessible by shaves
 * global shave range - global context accessible by shave nn

Signed-off-by: Karol Wachowski <karol.wachowski@linux.intel.com>
Reviewed-by: Stanislaw Gruszka <stanislaw.gruszka@linux.intel.com>
Signed-off-by: Stanislaw Gruszka <stanislaw.gruszka@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20230731161258.2987564-6-stanislaw.gruszka@linux.intel.com
  • Loading branch information
Karol Wachowski authored and Stanislaw Gruszka committed Aug 9, 2023
1 parent aa5f04d commit 162f17b
Show file tree
Hide file tree
Showing 7 changed files with 32 additions and 31 deletions.
4 changes: 2 additions & 2 deletions drivers/accel/ivpu/ivpu_drv.c
Original file line number Diff line number Diff line change
Expand Up @@ -122,7 +122,7 @@ static int ivpu_get_capabilities(struct ivpu_device *vdev, struct drm_ivpu_param
args->value = 0;
break;
case DRM_IVPU_CAP_DMA_MEMORY_RANGE:
args->value = 0;
args->value = 1;
break;
default:
return -EINVAL;
Expand Down Expand Up @@ -160,7 +160,7 @@ static int ivpu_get_param_ioctl(struct drm_device *dev, void *data, struct drm_f
args->value = ivpu_get_context_count(vdev);
break;
case DRM_IVPU_PARAM_CONTEXT_BASE_ADDRESS:
args->value = vdev->hw->ranges.user_low.start;
args->value = vdev->hw->ranges.user.start;
break;
case DRM_IVPU_PARAM_CONTEXT_PRIORITY:
args->value = file_priv->priority;
Expand Down
18 changes: 8 additions & 10 deletions drivers/accel/ivpu/ivpu_fw.c
Original file line number Diff line number Diff line change
Expand Up @@ -204,7 +204,7 @@ static int ivpu_fw_update_global_range(struct ivpu_device *vdev)
return -EINVAL;
}

ivpu_hw_init_range(&vdev->hw->ranges.global_low, start, size);
ivpu_hw_init_range(&vdev->hw->ranges.global, start, size);
return 0;
}

Expand Down Expand Up @@ -245,7 +245,7 @@ static int ivpu_fw_mem_init(struct ivpu_device *vdev)
}

if (fw->shave_nn_size) {
fw->mem_shave_nn = ivpu_bo_alloc_internal(vdev, vdev->hw->ranges.global_high.start,
fw->mem_shave_nn = ivpu_bo_alloc_internal(vdev, vdev->hw->ranges.shave.start,
fw->shave_nn_size, DRM_IVPU_BO_UNCACHED);
if (!fw->mem_shave_nn) {
ivpu_err(vdev, "Failed to allocate shavenn buffer\n");
Expand Down Expand Up @@ -443,28 +443,26 @@ void ivpu_fw_boot_params_setup(struct ivpu_device *vdev, struct vpu_boot_params
* Uncached region of VPU address space, covers IPC buffers, job queues
* and log buffers, programmable to L2$ Uncached by VPU MTRR
*/
boot_params->shared_region_base = vdev->hw->ranges.global_low.start;
boot_params->shared_region_size = vdev->hw->ranges.global_low.end -
vdev->hw->ranges.global_low.start;
boot_params->shared_region_base = vdev->hw->ranges.global.start;
boot_params->shared_region_size = vdev->hw->ranges.global.end -
vdev->hw->ranges.global.start;

boot_params->ipc_header_area_start = ipc_mem_rx->vpu_addr;
boot_params->ipc_header_area_size = ipc_mem_rx->base.size / 2;

boot_params->ipc_payload_area_start = ipc_mem_rx->vpu_addr + ipc_mem_rx->base.size / 2;
boot_params->ipc_payload_area_size = ipc_mem_rx->base.size / 2;

boot_params->global_aliased_pio_base =
vdev->hw->ranges.global_aliased_pio.start;
boot_params->global_aliased_pio_size =
ivpu_hw_range_size(&vdev->hw->ranges.global_aliased_pio);
boot_params->global_aliased_pio_base = vdev->hw->ranges.user.start;
boot_params->global_aliased_pio_size = ivpu_hw_range_size(&vdev->hw->ranges.user);

/* Allow configuration for L2C_PAGE_TABLE with boot param value */
boot_params->autoconfig = 1;

/* Enable L2 cache for first 2GB of high memory */
boot_params->cache_defaults[VPU_BOOT_L2_CACHE_CFG_NN].use = 1;
boot_params->cache_defaults[VPU_BOOT_L2_CACHE_CFG_NN].cfg =
ADDR_TO_L2_CACHE_CFG(vdev->hw->ranges.global_high.start);
ADDR_TO_L2_CACHE_CFG(vdev->hw->ranges.shave.start);

if (vdev->fw->mem_shave_nn)
boot_params->shave_nn_fw_base = vdev->fw->mem_shave_nn->vpu_addr;
Expand Down
10 changes: 6 additions & 4 deletions drivers/accel/ivpu/ivpu_gem.c
Original file line number Diff line number Diff line change
Expand Up @@ -279,10 +279,12 @@ ivpu_bo_alloc_vpu_addr(struct ivpu_bo *bo, struct ivpu_mmu_context *ctx,
int ret;

if (!range) {
if (bo->flags & DRM_IVPU_BO_HIGH_MEM)
range = &vdev->hw->ranges.user_high;
if (bo->flags & DRM_IVPU_BO_SHAVE_MEM)
range = &vdev->hw->ranges.shave;
else if (bo->flags & DRM_IVPU_BO_DMA_MEM)
range = &vdev->hw->ranges.dma;
else
range = &vdev->hw->ranges.user_low;
range = &vdev->hw->ranges.user;
}

mutex_lock(&ctx->lock);
Expand Down Expand Up @@ -570,7 +572,7 @@ ivpu_bo_alloc_internal(struct ivpu_device *vdev, u64 vpu_addr, u64 size, u32 fla
fixed_range.end = vpu_addr + size;
range = &fixed_range;
} else {
range = &vdev->hw->ranges.global_low;
range = &vdev->hw->ranges.global;
}

bo = ivpu_bo_alloc(vdev, &vdev->gctx, size, flags, &internal_ops, range, 0);
Expand Down
9 changes: 4 additions & 5 deletions drivers/accel/ivpu/ivpu_hw.h
Original file line number Diff line number Diff line change
Expand Up @@ -38,11 +38,10 @@ struct ivpu_addr_range {
struct ivpu_hw_info {
const struct ivpu_hw_ops *ops;
struct {
struct ivpu_addr_range global_low;
struct ivpu_addr_range global_high;
struct ivpu_addr_range user_low;
struct ivpu_addr_range user_high;
struct ivpu_addr_range global_aliased_pio;
struct ivpu_addr_range global;
struct ivpu_addr_range user;
struct ivpu_addr_range shave;
struct ivpu_addr_range dma;
} ranges;
struct {
u8 min_ratio;
Expand Down
9 changes: 4 additions & 5 deletions drivers/accel/ivpu/ivpu_hw_37xx.c
Original file line number Diff line number Diff line change
Expand Up @@ -620,11 +620,10 @@ static int ivpu_hw_37xx_info_init(struct ivpu_device *vdev)

ivpu_pll_init_frequency_ratios(vdev);

ivpu_hw_init_range(&hw->ranges.global_low, 0x80000000, SZ_512M);
ivpu_hw_init_range(&hw->ranges.global_high, 0x180000000, SZ_2M);
ivpu_hw_init_range(&hw->ranges.user_low, 0xc0000000, 255 * SZ_1M);
ivpu_hw_init_range(&hw->ranges.user_high, 0x180000000, SZ_2G);
hw->ranges.global_aliased_pio = hw->ranges.user_low;
ivpu_hw_init_range(&hw->ranges.global, 0x80000000, SZ_512M);
ivpu_hw_init_range(&hw->ranges.user, 0xc0000000, 255 * SZ_1M);
ivpu_hw_init_range(&hw->ranges.shave, 0x180000000, SZ_2G);
ivpu_hw_init_range(&hw->ranges.dma, 0x200000000, SZ_8G);

return 0;
}
Expand Down
8 changes: 4 additions & 4 deletions drivers/accel/ivpu/ivpu_mmu_context.c
Original file line number Diff line number Diff line change
Expand Up @@ -431,11 +431,11 @@ ivpu_mmu_context_init(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx, u3
return ret;

if (!context_id) {
start = vdev->hw->ranges.global_low.start;
end = vdev->hw->ranges.global_high.end;
start = vdev->hw->ranges.global.start;
end = vdev->hw->ranges.shave.end;
} else {
start = vdev->hw->ranges.user_low.start;
end = vdev->hw->ranges.user_high.end;
start = vdev->hw->ranges.user.start;
end = vdev->hw->ranges.dma.end;
}

drm_mm_init(&ctx->mm, start, end - start);
Expand Down
5 changes: 4 additions & 1 deletion include/uapi/drm/ivpu_accel.h
Original file line number Diff line number Diff line change
Expand Up @@ -133,8 +133,10 @@ struct drm_ivpu_param {
__u64 value;
};

#define DRM_IVPU_BO_HIGH_MEM 0x00000001
#define DRM_IVPU_BO_SHAVE_MEM 0x00000001
#define DRM_IVPU_BO_HIGH_MEM DRM_IVPU_BO_SHAVE_MEM
#define DRM_IVPU_BO_MAPPABLE 0x00000002
#define DRM_IVPU_BO_DMA_MEM 0x00000004

#define DRM_IVPU_BO_CACHED 0x00000000
#define DRM_IVPU_BO_UNCACHED 0x00010000
Expand All @@ -144,6 +146,7 @@ struct drm_ivpu_param {
#define DRM_IVPU_BO_FLAGS \
(DRM_IVPU_BO_HIGH_MEM | \
DRM_IVPU_BO_MAPPABLE | \
DRM_IVPU_BO_DMA_MEM | \
DRM_IVPU_BO_CACHE_MASK)

/**
Expand Down

0 comments on commit 162f17b

Please sign in to comment.