Skip to content

Commit

Permalink
Merge tag 'amd-drm-next-5.16-2021-09-27' of https://gitlab.freedeskto…
Browse files Browse the repository at this point in the history
…p.org/agd5f/linux into drm-next

amd-drm-next-5.16-2021-09-27:

amdgpu:
- RAS improvements
- BACO fixes
- Yellow Carp updates
- Misc code cleanups
- Initial DP 2.0 support
- VCN priority handling
- Cyan Skillfish updates
- Rework IB handling for multimedia engine tests
- Backlight fixes
- DCN 3.1 power saving improvements
- Runtime PM fixes
- Modifier support for DCC image stores for gfx 10.3
- Hotplug fixes
- Clean up stack related warnings in display code
- DP alt mode fixes
- Display rework for better handling FP code
- Debugfs fixes

amdkfd:
- SVM fixes
- DMA map fixes

radeon:
- AGP fix

From: Alex Deucher <alexander.deucher@amd.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20210927212653.4575-1-alexander.deucher@amd.com
Signed-off-by: Dave Airlie <airlied@redhat.com>
  • Loading branch information
Dave Airlie committed Sep 28, 2021
2 parents f602a96 + 2485e27 commit 1e39445
Showing 192 changed files with 10,246 additions and 2,596 deletions.
4 changes: 2 additions & 2 deletions MAINTAINERS
Original file line number Diff line number Diff line change
@@ -977,12 +977,12 @@ L: platform-driver-x86@vger.kernel.org
S: Maintained
F: drivers/platform/x86/amd-pmc.*

AMD POWERPLAY
AMD POWERPLAY AND SWSMU
M: Evan Quan <evan.quan@amd.com>
L: amd-gfx@lists.freedesktop.org
S: Supported
T: git https://gitlab.freedesktop.org/agd5f/linux.git
F: drivers/gpu/drm/amd/pm/powerplay/
F: drivers/gpu/drm/amd/pm/

AMD PTDMA DRIVER
M: Sanjay R Mehta <sanju.mehta@amd.com>
143 changes: 109 additions & 34 deletions drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
Original file line number Diff line number Diff line change
@@ -43,14 +43,61 @@ const unsigned int amdgpu_ctx_num_entities[AMDGPU_HW_IP_NUM] = {
[AMDGPU_HW_IP_VCN_JPEG] = 1,
};

bool amdgpu_ctx_priority_is_valid(int32_t ctx_prio)
{
switch (ctx_prio) {
case AMDGPU_CTX_PRIORITY_UNSET:
case AMDGPU_CTX_PRIORITY_VERY_LOW:
case AMDGPU_CTX_PRIORITY_LOW:
case AMDGPU_CTX_PRIORITY_NORMAL:
case AMDGPU_CTX_PRIORITY_HIGH:
case AMDGPU_CTX_PRIORITY_VERY_HIGH:
return true;
default:
return false;
}
}

static enum drm_sched_priority
amdgpu_ctx_to_drm_sched_prio(int32_t ctx_prio)
{
switch (ctx_prio) {
case AMDGPU_CTX_PRIORITY_UNSET:
return DRM_SCHED_PRIORITY_UNSET;

case AMDGPU_CTX_PRIORITY_VERY_LOW:
return DRM_SCHED_PRIORITY_MIN;

case AMDGPU_CTX_PRIORITY_LOW:
return DRM_SCHED_PRIORITY_MIN;

case AMDGPU_CTX_PRIORITY_NORMAL:
return DRM_SCHED_PRIORITY_NORMAL;

case AMDGPU_CTX_PRIORITY_HIGH:
return DRM_SCHED_PRIORITY_HIGH;

case AMDGPU_CTX_PRIORITY_VERY_HIGH:
return DRM_SCHED_PRIORITY_HIGH;

/* This should not happen as we sanitized userspace provided priority
* already, WARN if this happens.
*/
default:
WARN(1, "Invalid context priority %d\n", ctx_prio);
return DRM_SCHED_PRIORITY_NORMAL;
}

}

static int amdgpu_ctx_priority_permit(struct drm_file *filp,
enum drm_sched_priority priority)
int32_t priority)
{
if (priority < 0 || priority >= DRM_SCHED_PRIORITY_COUNT)
if (!amdgpu_ctx_priority_is_valid(priority))
return -EINVAL;

/* NORMAL and below are accessible by everyone */
if (priority <= DRM_SCHED_PRIORITY_NORMAL)
if (priority <= AMDGPU_CTX_PRIORITY_NORMAL)
return 0;

if (capable(CAP_SYS_NICE))
@@ -62,53 +109,81 @@ static int amdgpu_ctx_priority_permit(struct drm_file *filp,
return -EACCES;
}

static enum gfx_pipe_priority amdgpu_ctx_sched_prio_to_compute_prio(enum drm_sched_priority prio)
static enum amdgpu_gfx_pipe_priority amdgpu_ctx_prio_to_compute_prio(int32_t prio)
{
switch (prio) {
case DRM_SCHED_PRIORITY_HIGH:
case DRM_SCHED_PRIORITY_KERNEL:
case AMDGPU_CTX_PRIORITY_HIGH:
case AMDGPU_CTX_PRIORITY_VERY_HIGH:
return AMDGPU_GFX_PIPE_PRIO_HIGH;
default:
return AMDGPU_GFX_PIPE_PRIO_NORMAL;
}
}

static unsigned int amdgpu_ctx_prio_sched_to_hw(struct amdgpu_device *adev,
enum drm_sched_priority prio,
u32 hw_ip)
static enum amdgpu_ring_priority_level amdgpu_ctx_sched_prio_to_ring_prio(int32_t prio)
{
switch (prio) {
case AMDGPU_CTX_PRIORITY_HIGH:
return AMDGPU_RING_PRIO_1;
case AMDGPU_CTX_PRIORITY_VERY_HIGH:
return AMDGPU_RING_PRIO_2;
default:
return AMDGPU_RING_PRIO_0;
}
}

static unsigned int amdgpu_ctx_get_hw_prio(struct amdgpu_ctx *ctx, u32 hw_ip)
{
struct amdgpu_device *adev = ctx->adev;
int32_t ctx_prio;
unsigned int hw_prio;

hw_prio = (hw_ip == AMDGPU_HW_IP_COMPUTE) ?
amdgpu_ctx_sched_prio_to_compute_prio(prio) :
AMDGPU_RING_PRIO_DEFAULT;
ctx_prio = (ctx->override_priority == AMDGPU_CTX_PRIORITY_UNSET) ?
ctx->init_priority : ctx->override_priority;

switch (hw_ip) {
case AMDGPU_HW_IP_COMPUTE:
hw_prio = amdgpu_ctx_prio_to_compute_prio(ctx_prio);
break;
case AMDGPU_HW_IP_VCE:
case AMDGPU_HW_IP_VCN_ENC:
hw_prio = amdgpu_ctx_sched_prio_to_ring_prio(ctx_prio);
break;
default:
hw_prio = AMDGPU_RING_PRIO_DEFAULT;
break;
}

hw_ip = array_index_nospec(hw_ip, AMDGPU_HW_IP_NUM);
if (adev->gpu_sched[hw_ip][hw_prio].num_scheds == 0)
hw_prio = AMDGPU_RING_PRIO_DEFAULT;

return hw_prio;
}


static int amdgpu_ctx_init_entity(struct amdgpu_ctx *ctx, u32 hw_ip,
const u32 ring)
const u32 ring)
{
struct amdgpu_device *adev = ctx->adev;
struct amdgpu_ctx_entity *entity;
struct drm_gpu_scheduler **scheds = NULL, *sched = NULL;
unsigned num_scheds = 0;
int32_t ctx_prio;
unsigned int hw_prio;
enum drm_sched_priority priority;
enum drm_sched_priority drm_prio;
int r;

entity = kzalloc(struct_size(entity, fences, amdgpu_sched_jobs),
GFP_KERNEL);
if (!entity)
return -ENOMEM;

ctx_prio = (ctx->override_priority == AMDGPU_CTX_PRIORITY_UNSET) ?
ctx->init_priority : ctx->override_priority;
entity->sequence = 1;
priority = (ctx->override_priority == DRM_SCHED_PRIORITY_UNSET) ?
ctx->init_priority : ctx->override_priority;
hw_prio = amdgpu_ctx_prio_sched_to_hw(adev, priority, hw_ip);
hw_prio = amdgpu_ctx_get_hw_prio(ctx, hw_ip);
drm_prio = amdgpu_ctx_to_drm_sched_prio(ctx_prio);

hw_ip = array_index_nospec(hw_ip, AMDGPU_HW_IP_NUM);
scheds = adev->gpu_sched[hw_ip][hw_prio].sched;
@@ -124,7 +199,7 @@ static int amdgpu_ctx_init_entity(struct amdgpu_ctx *ctx, u32 hw_ip,
num_scheds = 1;
}

r = drm_sched_entity_init(&entity->entity, priority, scheds, num_scheds,
r = drm_sched_entity_init(&entity->entity, drm_prio, scheds, num_scheds,
&ctx->guilty);
if (r)
goto error_free_entity;
@@ -139,7 +214,7 @@ static int amdgpu_ctx_init_entity(struct amdgpu_ctx *ctx, u32 hw_ip,
}

static int amdgpu_ctx_init(struct amdgpu_device *adev,
enum drm_sched_priority priority,
int32_t priority,
struct drm_file *filp,
struct amdgpu_ctx *ctx)
{
@@ -161,7 +236,7 @@ static int amdgpu_ctx_init(struct amdgpu_device *adev,
ctx->reset_counter_query = ctx->reset_counter;
ctx->vram_lost_counter = atomic_read(&adev->vram_lost_counter);
ctx->init_priority = priority;
ctx->override_priority = DRM_SCHED_PRIORITY_UNSET;
ctx->override_priority = AMDGPU_CTX_PRIORITY_UNSET;

return 0;
}
@@ -234,7 +309,7 @@ int amdgpu_ctx_get_entity(struct amdgpu_ctx *ctx, u32 hw_ip, u32 instance,
static int amdgpu_ctx_alloc(struct amdgpu_device *adev,
struct amdgpu_fpriv *fpriv,
struct drm_file *filp,
enum drm_sched_priority priority,
int32_t priority,
uint32_t *id)
{
struct amdgpu_ctx_mgr *mgr = &fpriv->ctx_mgr;
@@ -397,19 +472,19 @@ int amdgpu_ctx_ioctl(struct drm_device *dev, void *data,
{
int r;
uint32_t id;
enum drm_sched_priority priority;
int32_t priority;

union drm_amdgpu_ctx *args = data;
struct amdgpu_device *adev = drm_to_adev(dev);
struct amdgpu_fpriv *fpriv = filp->driver_priv;

id = args->in.ctx_id;
r = amdgpu_to_sched_priority(args->in.priority, &priority);
priority = args->in.priority;

/* For backwards compatibility reasons, we need to accept
* ioctls with garbage in the priority field */
if (r == -EINVAL)
priority = DRM_SCHED_PRIORITY_NORMAL;
if (!amdgpu_ctx_priority_is_valid(priority))
priority = AMDGPU_CTX_PRIORITY_NORMAL;

switch (args->in.op) {
case AMDGPU_CTX_OP_ALLOC_CTX:
@@ -515,22 +590,22 @@ struct dma_fence *amdgpu_ctx_get_fence(struct amdgpu_ctx *ctx,
}

static void amdgpu_ctx_set_entity_priority(struct amdgpu_ctx *ctx,
struct amdgpu_ctx_entity *aentity,
int hw_ip,
enum drm_sched_priority priority)
struct amdgpu_ctx_entity *aentity,
int hw_ip,
int32_t priority)
{
struct amdgpu_device *adev = ctx->adev;
unsigned int hw_prio;
struct drm_gpu_scheduler **scheds = NULL;
unsigned num_scheds;

/* set sw priority */
drm_sched_entity_set_priority(&aentity->entity, priority);
drm_sched_entity_set_priority(&aentity->entity,
amdgpu_ctx_to_drm_sched_prio(priority));

/* set hw priority */
if (hw_ip == AMDGPU_HW_IP_COMPUTE) {
hw_prio = amdgpu_ctx_prio_sched_to_hw(adev, priority,
AMDGPU_HW_IP_COMPUTE);
hw_prio = amdgpu_ctx_get_hw_prio(ctx, hw_ip);
hw_prio = array_index_nospec(hw_prio, AMDGPU_RING_PRIO_MAX);
scheds = adev->gpu_sched[hw_ip][hw_prio].sched;
num_scheds = adev->gpu_sched[hw_ip][hw_prio].num_scheds;
@@ -540,14 +615,14 @@ static void amdgpu_ctx_set_entity_priority(struct amdgpu_ctx *ctx,
}

void amdgpu_ctx_priority_override(struct amdgpu_ctx *ctx,
enum drm_sched_priority priority)
int32_t priority)
{
enum drm_sched_priority ctx_prio;
int32_t ctx_prio;
unsigned i, j;

ctx->override_priority = priority;

ctx_prio = (ctx->override_priority == DRM_SCHED_PRIORITY_UNSET) ?
ctx_prio = (ctx->override_priority == AMDGPU_CTX_PRIORITY_UNSET) ?
ctx->init_priority : ctx->override_priority;
for (i = 0; i < AMDGPU_HW_IP_NUM; ++i) {
for (j = 0; j < amdgpu_ctx_num_entities[i]; ++j) {
8 changes: 4 additions & 4 deletions drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.h
Original file line number Diff line number Diff line change
@@ -47,8 +47,8 @@ struct amdgpu_ctx {
spinlock_t ring_lock;
struct amdgpu_ctx_entity *entities[AMDGPU_HW_IP_NUM][AMDGPU_MAX_ENTITY_NUM];
bool preamble_presented;
enum drm_sched_priority init_priority;
enum drm_sched_priority override_priority;
int32_t init_priority;
int32_t override_priority;
struct mutex lock;
atomic_t guilty;
unsigned long ras_counter_ce;
@@ -75,8 +75,8 @@ void amdgpu_ctx_add_fence(struct amdgpu_ctx *ctx,
struct dma_fence *amdgpu_ctx_get_fence(struct amdgpu_ctx *ctx,
struct drm_sched_entity *entity,
uint64_t seq);
void amdgpu_ctx_priority_override(struct amdgpu_ctx *ctx,
enum drm_sched_priority priority);
bool amdgpu_ctx_priority_is_valid(int32_t ctx_prio);
void amdgpu_ctx_priority_override(struct amdgpu_ctx *ctx, int32_t ctx_prio);

int amdgpu_ctx_ioctl(struct drm_device *dev, void *data,
struct drm_file *filp);
Loading

0 comments on commit 1e39445

Please sign in to comment.