Skip to content

Commit

Permalink
Merge branch 'drm-next-4.6' of git://people.freedesktop.org/~agd5f/li…
Browse files Browse the repository at this point in the history
…nux into drm-next

Some more radeon and amdgpu stuff for drm-next.  Mostly just bug fixes
for new features and cleanups.

* 'drm-next-4.6' of git://people.freedesktop.org/~agd5f/linux:
  drm/amdgpu: fix rb bitmap & cu bitmap calculation
  drm/amdgpu: trace the pd_addr in vm_grab_id as well
  drm/amdgpu: fix VM faults caused by vm_grab_id() v4
  drm/amdgpu: update radeon acpi header
  drm/radeon: update radeon acpi header
  drm/amd: cleanup get_mfd_cell_dev()
  drm/amdgpu: fix error handling in amdgpu_bo_list_set
  drm/amd/powerplay: fix code style warning.
  drm/amd: Do not make DRM_AMD_ACP default to y
  drm/amdgpu/gfx: fix off by one in rb rework (v2)
  • Loading branch information
Dave Airlie committed Mar 8, 2016
2 parents 984fee6 + 6157bd7 commit 550e3b2
Show file tree
Hide file tree
Showing 18 changed files with 131 additions and 121 deletions.
1 change: 0 additions & 1 deletion drivers/gpu/drm/amd/acp/Kconfig
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,6 @@ menu "ACP Configuration"

config DRM_AMD_ACP
bool "Enable ACP IP support"
default y
select MFD_CORE
select PM_GENERIC_DOMAINS if PM
help
Expand Down
16 changes: 9 additions & 7 deletions drivers/gpu/drm/amd/amdgpu/amdgpu.h
Original file line number Diff line number Diff line change
Expand Up @@ -769,8 +769,9 @@ struct amdgpu_ib {
uint32_t *ptr;
struct amdgpu_fence *fence;
struct amdgpu_user_fence *user;
bool grabbed_vmid;
struct amdgpu_vm *vm;
unsigned vm_id;
uint64_t vm_pd_addr;
struct amdgpu_ctx *ctx;
uint32_t gds_base, gds_size;
uint32_t gws_base, gws_size;
Expand Down Expand Up @@ -877,10 +878,10 @@ struct amdgpu_vm_pt {
};

struct amdgpu_vm_id {
unsigned id;
uint64_t pd_gpu_addr;
struct amdgpu_vm_manager_id *mgr_id;
uint64_t pd_gpu_addr;
/* last flushed PD/PT update */
struct fence *flushed_updates;
struct fence *flushed_updates;
};

struct amdgpu_vm {
Expand Down Expand Up @@ -954,10 +955,11 @@ void amdgpu_vm_get_pt_bos(struct amdgpu_vm *vm, struct list_head *duplicates);
void amdgpu_vm_move_pt_bos_in_lru(struct amdgpu_device *adev,
struct amdgpu_vm *vm);
int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
struct amdgpu_sync *sync, struct fence *fence);
struct amdgpu_sync *sync, struct fence *fence,
unsigned *vm_id, uint64_t *vm_pd_addr);
void amdgpu_vm_flush(struct amdgpu_ring *ring,
struct amdgpu_vm *vm,
struct fence *updates);
unsigned vmid,
uint64_t pd_addr);
uint64_t amdgpu_vm_map_gart(const dma_addr_t *pages_addr, uint64_t addr);
int amdgpu_vm_update_page_directory(struct amdgpu_device *adev,
struct amdgpu_vm *vm);
Expand Down
6 changes: 2 additions & 4 deletions drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c
Original file line number Diff line number Diff line change
Expand Up @@ -240,12 +240,10 @@ static int acp_poweron(struct generic_pm_domain *genpd)
static struct device *get_mfd_cell_dev(const char *device_name, int r)
{
char auto_dev_name[25];
char buf[8];
struct device *dev;

sprintf(buf, ".%d.auto", r);
strcpy(auto_dev_name, device_name);
strcat(auto_dev_name, buf);
snprintf(auto_dev_name, sizeof(auto_dev_name),
"%s.%d.auto", device_name, r);
dev = bus_find_device_by_name(&platform_bus_type, NULL, auto_dev_name);
dev_info(dev, "device %s added to pm domain\n", auto_dev_name);

Expand Down
3 changes: 3 additions & 0 deletions drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c
Original file line number Diff line number Diff line change
Expand Up @@ -118,6 +118,7 @@ static int amdgpu_bo_list_set(struct amdgpu_device *adev,
usermm = amdgpu_ttm_tt_get_usermm(entry->robj->tbo.ttm);
if (usermm) {
if (usermm != current->mm) {
amdgpu_bo_unref(&entry->robj);
r = -EPERM;
goto error_free;
}
Expand Down Expand Up @@ -151,6 +152,8 @@ static int amdgpu_bo_list_set(struct amdgpu_device *adev,
return 0;

error_free:
while (i--)
amdgpu_bo_unref(&array[i].robj);
drm_free_large(array);
return r;
}
Expand Down
7 changes: 4 additions & 3 deletions drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
Original file line number Diff line number Diff line change
Expand Up @@ -75,6 +75,7 @@ int amdgpu_ib_get(struct amdgpu_device *adev, struct amdgpu_vm *vm,
}

ib->vm = vm;
ib->vm_id = 0;

return 0;
}
Expand Down Expand Up @@ -139,7 +140,7 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
return -EINVAL;
}

if (vm && !ibs->grabbed_vmid) {
if (vm && !ibs->vm_id) {
dev_err(adev->dev, "VM IB without ID\n");
return -EINVAL;
}
Expand All @@ -152,10 +153,10 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,

if (vm) {
/* do context switch */
amdgpu_vm_flush(ring, vm, last_vm_update);
amdgpu_vm_flush(ring, ib->vm_id, ib->vm_pd_addr);

if (ring->funcs->emit_gds_switch)
amdgpu_ring_emit_gds_switch(ring, ib->vm->ids[ring->idx].id,
amdgpu_ring_emit_gds_switch(ring, ib->vm_id,
ib->gds_base, ib->gds_size,
ib->gws_base, ib->gws_size,
ib->oa_base, ib->oa_size);
Expand Down
15 changes: 11 additions & 4 deletions drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
Original file line number Diff line number Diff line change
Expand Up @@ -105,16 +105,23 @@ static struct fence *amdgpu_job_dependency(struct amd_sched_job *sched_job)

struct fence *fence = amdgpu_sync_get_fence(&job->sync);

if (fence == NULL && vm && !job->ibs->grabbed_vmid) {
if (fence == NULL && vm && !job->ibs->vm_id) {
struct amdgpu_ring *ring = job->ring;
unsigned i, vm_id;
uint64_t vm_pd_addr;
int r;

r = amdgpu_vm_grab_id(vm, ring, &job->sync,
&job->base.s_fence->base);
&job->base.s_fence->base,
&vm_id, &vm_pd_addr);
if (r)
DRM_ERROR("Error getting VM ID (%d)\n", r);
else
job->ibs->grabbed_vmid = true;
else {
for (i = 0; i < job->num_ibs; ++i) {
job->ibs[i].vm_id = vm_id;
job->ibs[i].vm_pd_addr = vm_pd_addr;
}
}

fence = amdgpu_sync_get_fence(&job->sync);
}
Expand Down
19 changes: 11 additions & 8 deletions drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h
Original file line number Diff line number Diff line change
Expand Up @@ -100,21 +100,24 @@ TRACE_EVENT(amdgpu_sched_run_job,


TRACE_EVENT(amdgpu_vm_grab_id,
TP_PROTO(struct amdgpu_vm *vm, unsigned vmid, int ring),
TP_ARGS(vm, vmid, ring),
TP_PROTO(struct amdgpu_vm *vm, int ring, unsigned vmid,
uint64_t pd_addr),
TP_ARGS(vm, ring, vmid, pd_addr),
TP_STRUCT__entry(
__field(struct amdgpu_vm *, vm)
__field(u32, vmid)
__field(u32, ring)
__field(u32, vmid)
__field(u64, pd_addr)
),

TP_fast_assign(
__entry->vm = vm;
__entry->vmid = vmid;
__entry->ring = ring;
__entry->vmid = vmid;
__entry->pd_addr = pd_addr;
),
TP_printk("vm=%p, id=%u, ring=%u", __entry->vm, __entry->vmid,
__entry->ring)
TP_printk("vm=%p, ring=%u, id=%u, pd_addr=%010Lx", __entry->vm,
__entry->ring, __entry->vmid, __entry->pd_addr)
);

TRACE_EVENT(amdgpu_vm_bo_map,
Expand Down Expand Up @@ -231,8 +234,8 @@ TRACE_EVENT(amdgpu_vm_flush,
__entry->ring = ring;
__entry->id = id;
),
TP_printk("pd_addr=%010Lx, ring=%u, id=%u",
__entry->pd_addr, __entry->ring, __entry->id)
TP_printk("ring=%u, id=%u, pd_addr=%010Lx",
__entry->ring, __entry->id, __entry->pd_addr)
);

TRACE_EVENT(amdgpu_bo_list_set,
Expand Down
116 changes: 61 additions & 55 deletions drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
Original file line number Diff line number Diff line change
Expand Up @@ -50,6 +50,9 @@
* SI supports 16.
*/

/* Special value that no flush is necessary */
#define AMDGPU_VM_NO_FLUSH (~0ll)

/**
* amdgpu_vm_num_pde - return the number of page directory entries
*
Expand Down Expand Up @@ -157,50 +160,70 @@ void amdgpu_vm_move_pt_bos_in_lru(struct amdgpu_device *adev,
* Allocate an id for the vm, adding fences to the sync obj as necessary.
*/
int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
struct amdgpu_sync *sync, struct fence *fence)
struct amdgpu_sync *sync, struct fence *fence,
unsigned *vm_id, uint64_t *vm_pd_addr)
{
struct amdgpu_vm_id *vm_id = &vm->ids[ring->idx];
uint64_t pd_addr = amdgpu_bo_gpu_offset(vm->page_directory);
struct amdgpu_device *adev = ring->adev;
struct amdgpu_vm_manager_id *id;
struct amdgpu_vm_id *id = &vm->ids[ring->idx];
struct fence *updates = sync->last_vm_update;
int r;

mutex_lock(&adev->vm_manager.lock);

/* check if the id is still valid */
if (vm_id->id) {
if (id->mgr_id) {
struct fence *flushed = id->flushed_updates;
bool is_later;
long owner;

id = &adev->vm_manager.ids[vm_id->id];
owner = atomic_long_read(&id->owner);
if (owner == (long)vm) {
list_move_tail(&id->list, &adev->vm_manager.ids_lru);
trace_amdgpu_vm_grab_id(vm, vm_id->id, ring->idx);
if (!flushed)
is_later = true;
else if (!updates)
is_later = false;
else
is_later = fence_is_later(updates, flushed);

owner = atomic_long_read(&id->mgr_id->owner);
if (!is_later && owner == (long)id &&
pd_addr == id->pd_gpu_addr) {

fence_put(id->mgr_id->active);
id->mgr_id->active = fence_get(fence);

list_move_tail(&id->mgr_id->list,
&adev->vm_manager.ids_lru);

fence_put(id->active);
id->active = fence_get(fence);
*vm_id = id->mgr_id - adev->vm_manager.ids;
*vm_pd_addr = AMDGPU_VM_NO_FLUSH;
trace_amdgpu_vm_grab_id(vm, ring->idx, *vm_id,
*vm_pd_addr);

mutex_unlock(&adev->vm_manager.lock);
return 0;
}
}

/* we definately need to flush */
vm_id->pd_gpu_addr = ~0ll;
id->mgr_id = list_first_entry(&adev->vm_manager.ids_lru,
struct amdgpu_vm_manager_id,
list);

id = list_first_entry(&adev->vm_manager.ids_lru,
struct amdgpu_vm_manager_id,
list);
list_move_tail(&id->list, &adev->vm_manager.ids_lru);
atomic_long_set(&id->owner, (long)vm);
r = amdgpu_sync_fence(ring->adev, sync, id->mgr_id->active);
if (!r) {
fence_put(id->mgr_id->active);
id->mgr_id->active = fence_get(fence);

vm_id->id = id - adev->vm_manager.ids;
trace_amdgpu_vm_grab_id(vm, vm_id->id, ring->idx);
fence_put(id->flushed_updates);
id->flushed_updates = fence_get(updates);

r = amdgpu_sync_fence(ring->adev, sync, id->active);
id->pd_gpu_addr = pd_addr;

if (!r) {
fence_put(id->active);
id->active = fence_get(fence);
list_move_tail(&id->mgr_id->list, &adev->vm_manager.ids_lru);
atomic_long_set(&id->mgr_id->owner, (long)id);

*vm_id = id->mgr_id - adev->vm_manager.ids;
*vm_pd_addr = pd_addr;
trace_amdgpu_vm_grab_id(vm, ring->idx, *vm_id, *vm_pd_addr);
}

mutex_unlock(&adev->vm_manager.lock);
Expand All @@ -211,35 +234,18 @@ int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
* amdgpu_vm_flush - hardware flush the vm
*
* @ring: ring to use for flush
* @vm: vm we want to flush
* @updates: last vm update that we waited for
* @vmid: vmid number to use
* @pd_addr: address of the page directory
*
* Flush the vm.
* Emit a VM flush when it is necessary.
*/
void amdgpu_vm_flush(struct amdgpu_ring *ring,
struct amdgpu_vm *vm,
struct fence *updates)
unsigned vmid,
uint64_t pd_addr)
{
uint64_t pd_addr = amdgpu_bo_gpu_offset(vm->page_directory);
struct amdgpu_vm_id *vm_id = &vm->ids[ring->idx];
struct fence *flushed_updates = vm_id->flushed_updates;
bool is_later;

if (!flushed_updates)
is_later = true;
else if (!updates)
is_later = false;
else
is_later = fence_is_later(updates, flushed_updates);

if (pd_addr != vm_id->pd_gpu_addr || is_later) {
trace_amdgpu_vm_flush(pd_addr, ring->idx, vm_id->id);
if (is_later) {
vm_id->flushed_updates = fence_get(updates);
fence_put(flushed_updates);
}
vm_id->pd_gpu_addr = pd_addr;
amdgpu_ring_emit_vm_flush(ring, vm_id->id, vm_id->pd_gpu_addr);
if (pd_addr != AMDGPU_VM_NO_FLUSH) {
trace_amdgpu_vm_flush(pd_addr, ring->idx, vmid);
amdgpu_ring_emit_vm_flush(ring, vmid, pd_addr);
}
}

Expand Down Expand Up @@ -1284,7 +1290,7 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm)
int i, r;

for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
vm->ids[i].id = 0;
vm->ids[i].mgr_id = NULL;
vm->ids[i].flushed_updates = NULL;
}
vm->va = RB_ROOT;
Expand Down Expand Up @@ -1381,13 +1387,13 @@ void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
amdgpu_bo_unref(&vm->page_directory);
fence_put(vm->page_directory_fence);
for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
unsigned id = vm->ids[i].id;
struct amdgpu_vm_id *id = &vm->ids[i];

atomic_long_cmpxchg(&adev->vm_manager.ids[id].owner,
(long)vm, 0);
fence_put(vm->ids[i].flushed_updates);
if (id->mgr_id)
atomic_long_cmpxchg(&id->mgr_id->owner,
(long)id, 0);
fence_put(id->flushed_updates);
}

}

/**
Expand Down
2 changes: 1 addition & 1 deletion drivers/gpu/drm/amd/amdgpu/cik_sdma.c
Original file line number Diff line number Diff line change
Expand Up @@ -212,7 +212,7 @@ static void cik_sdma_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count)
static void cik_sdma_ring_emit_ib(struct amdgpu_ring *ring,
struct amdgpu_ib *ib)
{
u32 extra_bits = (ib->vm ? ib->vm->ids[ring->idx].id : 0) & 0xf;
u32 extra_bits = ib->vm_id & 0xf;
u32 next_rptr = ring->wptr + 5;

while ((next_rptr & 7) != 4)
Expand Down
3 changes: 0 additions & 3 deletions drivers/gpu/drm/amd/amdgpu/cikd.h
Original file line number Diff line number Diff line change
Expand Up @@ -46,9 +46,6 @@
#define BONAIRE_GB_ADDR_CONFIG_GOLDEN 0x12010001
#define HAWAII_GB_ADDR_CONFIG_GOLDEN 0x12011003

#define CIK_RB_BITMAP_WIDTH_PER_SH 2
#define HAWAII_RB_BITMAP_WIDTH_PER_SH 4

#define AMDGPU_NUM_OF_VMIDS 8

#define PIPEID(x) ((x) << 0)
Expand Down
Loading

0 comments on commit 550e3b2

Please sign in to comment.