Skip to content

Commit

Permalink
Merge branch 'drm-next-4.3' of git://people.freedesktop.org/~agd5f/li…
Browse files Browse the repository at this point in the history
…nux into drm-next

- DP fixes for radeon and amdgpu
- IH ring fix for tonga and fiji
- Lots of GPU scheduler fixes
- Misc additional fixes

* 'drm-next-4.3' of git://people.freedesktop.org/~agd5f/linux: (42 commits)
  drm/amdgpu: fix wait queue handling in the scheduler
  drm/amdgpu: remove extra parameters from scheduler callbacks
  drm/amdgpu: wake up scheduler only when neccessary
  drm/amdgpu: remove entity idle timeout v2
  drm/amdgpu: fix postclose order
  drm/amdgpu: use IB for copy buffer of eviction
  drm/amdgpu: adjust the judgement of removing fence callback
  drm/amdgpu: fix no sync_wait in copy_buffer
  drm/amdgpu: fix last_vm_update fence is not effetive for sched fence
  drm/amdgpu: add priv data to sched
  drm/amdgpu: add owner for sched fence
  drm/amdgpu: remove entity reference from sched fence
  drm/amdgpu: fix and cleanup amd_sched_entity_push_job
  drm/amdgpu: remove amdgpu_bo_list_clone
  drm/amdgpu: remove the context from amdgpu_job
  drm/amdgpu: remove unused parameters to amd_sched_create
  drm/amdgpu: remove sched_lock
  drm/amdgpu: remove prepare_job callback
  drm/amdgpu: cleanup a scheduler function name
  drm/amdgpu: reorder scheduler functions
  ...
  • Loading branch information
Dave Airlie committed Aug 27, 2015
2 parents db56176 + c2b6bd7 commit 40b2dff
Show file tree
Hide file tree
Showing 30 changed files with 671 additions and 727 deletions.
54 changes: 30 additions & 24 deletions drivers/gpu/drm/amd/amdgpu/amdgpu.h
Original file line number Diff line number Diff line change
Expand Up @@ -183,6 +183,7 @@ struct amdgpu_vm;
struct amdgpu_ring;
struct amdgpu_semaphore;
struct amdgpu_cs_parser;
struct amdgpu_job;
struct amdgpu_irq_src;
struct amdgpu_fpriv;

Expand Down Expand Up @@ -246,7 +247,7 @@ struct amdgpu_buffer_funcs {
unsigned copy_num_dw;

/* used for buffer migration */
void (*emit_copy_buffer)(struct amdgpu_ring *ring,
void (*emit_copy_buffer)(struct amdgpu_ib *ib,
/* src addr in bytes */
uint64_t src_offset,
/* dst addr in bytes */
Expand Down Expand Up @@ -439,9 +440,12 @@ int amdgpu_fence_wait_next(struct amdgpu_ring *ring);
int amdgpu_fence_wait_empty(struct amdgpu_ring *ring);
unsigned amdgpu_fence_count_emitted(struct amdgpu_ring *ring);

signed long amdgpu_fence_wait_any(struct amdgpu_device *adev,
struct amdgpu_fence **fences,
bool intr, long t);
signed long amdgpu_fence_wait_multiple(struct amdgpu_device *adev,
struct fence **array,
uint32_t count,
bool wait_all,
bool intr,
signed long t);
struct amdgpu_fence *amdgpu_fence_ref(struct amdgpu_fence *fence);
void amdgpu_fence_unref(struct amdgpu_fence **fence);

Expand Down Expand Up @@ -514,7 +518,7 @@ int amdgpu_copy_buffer(struct amdgpu_ring *ring,
uint64_t dst_offset,
uint32_t byte_count,
struct reservation_object *resv,
struct amdgpu_fence **fence);
struct fence **fence);
int amdgpu_mmap(struct file *filp, struct vm_area_struct *vma);

struct amdgpu_bo_list_entry {
Expand Down Expand Up @@ -650,7 +654,7 @@ struct amdgpu_sa_bo {
struct amdgpu_sa_manager *manager;
unsigned soffset;
unsigned eoffset;
struct amdgpu_fence *fence;
struct fence *fence;
};

/*
Expand Down Expand Up @@ -692,15 +696,16 @@ bool amdgpu_semaphore_emit_wait(struct amdgpu_ring *ring,
struct amdgpu_semaphore *semaphore);
void amdgpu_semaphore_free(struct amdgpu_device *adev,
struct amdgpu_semaphore **semaphore,
struct amdgpu_fence *fence);
struct fence *fence);

/*
* Synchronization
*/
struct amdgpu_sync {
struct amdgpu_semaphore *semaphores[AMDGPU_NUM_SYNCS];
struct amdgpu_fence *sync_to[AMDGPU_MAX_RINGS];
struct amdgpu_fence *last_vm_update;
DECLARE_HASHTABLE(fences, 4);
struct fence *last_vm_update;
};

void amdgpu_sync_create(struct amdgpu_sync *sync);
Expand All @@ -712,8 +717,9 @@ int amdgpu_sync_resv(struct amdgpu_device *adev,
void *owner);
int amdgpu_sync_rings(struct amdgpu_sync *sync,
struct amdgpu_ring *ring);
int amdgpu_sync_wait(struct amdgpu_sync *sync);
void amdgpu_sync_free(struct amdgpu_device *adev, struct amdgpu_sync *sync,
struct amdgpu_fence *fence);
struct fence *fence);

/*
* GART structures, functions & helpers
Expand Down Expand Up @@ -871,7 +877,7 @@ int amdgpu_sched_ib_submit_kernel_helper(struct amdgpu_device *adev,
struct amdgpu_ring *ring,
struct amdgpu_ib *ibs,
unsigned num_ibs,
int (*free_job)(struct amdgpu_cs_parser *),
int (*free_job)(struct amdgpu_job *),
void *owner,
struct fence **fence);

Expand Down Expand Up @@ -957,7 +963,7 @@ struct amdgpu_vm_id {
unsigned id;
uint64_t pd_gpu_addr;
/* last flushed PD/PT update */
struct amdgpu_fence *flushed_updates;
struct fence *flushed_updates;
/* last use of vmid */
struct amdgpu_fence *last_id_use;
};
Expand Down Expand Up @@ -1042,7 +1048,7 @@ struct amdgpu_ctx *amdgpu_ctx_get(struct amdgpu_fpriv *fpriv, uint32_t id);
int amdgpu_ctx_put(struct amdgpu_ctx *ctx);

uint64_t amdgpu_ctx_add_fence(struct amdgpu_ctx *ctx, struct amdgpu_ring *ring,
struct fence *fence, uint64_t queued_seq);
struct fence *fence);
struct fence *amdgpu_ctx_get_fence(struct amdgpu_ctx *ctx,
struct amdgpu_ring *ring, uint64_t seq);

Expand Down Expand Up @@ -1077,8 +1083,6 @@ struct amdgpu_bo_list {
struct amdgpu_bo_list_entry *array;
};

struct amdgpu_bo_list *
amdgpu_bo_list_clone(struct amdgpu_bo_list *list);
struct amdgpu_bo_list *
amdgpu_bo_list_get(struct amdgpu_fpriv *fpriv, int id);
void amdgpu_bo_list_put(struct amdgpu_bo_list *list);
Expand Down Expand Up @@ -1255,14 +1259,16 @@ struct amdgpu_cs_parser {

/* user fence */
struct amdgpu_user_fence uf;
};

struct amdgpu_ring *ring;
struct mutex job_lock;
struct work_struct job_work;
int (*prepare_job)(struct amdgpu_cs_parser *sched_job);
int (*run_job)(struct amdgpu_cs_parser *sched_job);
int (*free_job)(struct amdgpu_cs_parser *sched_job);
struct amd_sched_fence *s_fence;
struct amdgpu_job {
struct amd_sched_job base;
struct amdgpu_device *adev;
struct amdgpu_ib *ibs;
uint32_t num_ibs;
struct mutex job_lock;
struct amdgpu_user_fence uf;
int (*free_job)(struct amdgpu_job *sched_job);
};

static inline u32 amdgpu_get_ib_value(struct amdgpu_cs_parser *p, uint32_t ib_idx, int idx)
Expand Down Expand Up @@ -2241,7 +2247,7 @@ static inline void amdgpu_ring_write(struct amdgpu_ring *ring, uint32_t v)
#define amdgpu_display_add_connector(adev, ci, sd, ct, ib, coi, h, r) (adev)->mode_info.funcs->add_connector((adev), (ci), (sd), (ct), (ib), (coi), (h), (r))
#define amdgpu_display_stop_mc_access(adev, s) (adev)->mode_info.funcs->stop_mc_access((adev), (s))
#define amdgpu_display_resume_mc_access(adev, s) (adev)->mode_info.funcs->resume_mc_access((adev), (s))
#define amdgpu_emit_copy_buffer(adev, r, s, d, b) (adev)->mman.buffer_funcs->emit_copy_buffer((r), (s), (d), (b))
#define amdgpu_emit_copy_buffer(adev, ib, s, d, b) (adev)->mman.buffer_funcs->emit_copy_buffer((ib), (s), (d), (b))
#define amdgpu_emit_fill_buffer(adev, r, s, d, b) (adev)->mman.buffer_funcs->emit_fill_buffer((r), (s), (d), (b))
#define amdgpu_dpm_get_temperature(adev) (adev)->pm.funcs->get_temperature((adev))
#define amdgpu_dpm_pre_set_power_state(adev) (adev)->pm.funcs->pre_set_power_state((adev))
Expand Down Expand Up @@ -2343,7 +2349,7 @@ int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
struct amdgpu_sync *sync);
void amdgpu_vm_flush(struct amdgpu_ring *ring,
struct amdgpu_vm *vm,
struct amdgpu_fence *updates);
struct fence *updates);
void amdgpu_vm_fence(struct amdgpu_device *adev,
struct amdgpu_vm *vm,
struct amdgpu_fence *fence);
Expand Down Expand Up @@ -2373,7 +2379,7 @@ int amdgpu_vm_bo_unmap(struct amdgpu_device *adev,
uint64_t addr);
void amdgpu_vm_bo_rmv(struct amdgpu_device *adev,
struct amdgpu_bo_va *bo_va);

int amdgpu_vm_free_job(struct amdgpu_job *job);
/*
* functions used by amdgpu_encoder.c
*/
Expand Down
8 changes: 4 additions & 4 deletions drivers/gpu/drm/amd/amdgpu/amdgpu_benchmark.c
Original file line number Diff line number Diff line change
Expand Up @@ -33,7 +33,7 @@ static int amdgpu_benchmark_do_move(struct amdgpu_device *adev, unsigned size,
{
unsigned long start_jiffies;
unsigned long end_jiffies;
struct amdgpu_fence *fence = NULL;
struct fence *fence = NULL;
int i, r;

start_jiffies = jiffies;
Expand All @@ -42,17 +42,17 @@ static int amdgpu_benchmark_do_move(struct amdgpu_device *adev, unsigned size,
r = amdgpu_copy_buffer(ring, saddr, daddr, size, NULL, &fence);
if (r)
goto exit_do_move;
r = fence_wait(&fence->base, false);
r = fence_wait(fence, false);
if (r)
goto exit_do_move;
amdgpu_fence_unref(&fence);
fence_put(fence);
}
end_jiffies = jiffies;
r = jiffies_to_msecs(end_jiffies - start_jiffies);

exit_do_move:
if (fence)
amdgpu_fence_unref(&fence);
fence_put(fence);
return r;
}

Expand Down
33 changes: 0 additions & 33 deletions drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c
Original file line number Diff line number Diff line change
Expand Up @@ -62,39 +62,6 @@ static int amdgpu_bo_list_create(struct amdgpu_fpriv *fpriv,
return 0;
}

struct amdgpu_bo_list *
amdgpu_bo_list_clone(struct amdgpu_bo_list *list)
{
struct amdgpu_bo_list *result;
unsigned i;

result = kmalloc(sizeof(struct amdgpu_bo_list), GFP_KERNEL);
if (!result)
return NULL;

result->array = drm_calloc_large(list->num_entries,
sizeof(struct amdgpu_bo_list_entry));
if (!result->array) {
kfree(result);
return NULL;
}

mutex_init(&result->lock);
result->gds_obj = list->gds_obj;
result->gws_obj = list->gws_obj;
result->oa_obj = list->oa_obj;
result->has_userptr = list->has_userptr;
result->num_entries = list->num_entries;

memcpy(result->array, list->array, list->num_entries *
sizeof(struct amdgpu_bo_list_entry));

for (i = 0; i < result->num_entries; ++i)
amdgpu_bo_ref(result->array[i].robj);

return result;
}

static void amdgpu_bo_list_destroy(struct amdgpu_fpriv *fpriv, int id)
{
struct amdgpu_bo_list *list;
Expand Down
5 changes: 5 additions & 0 deletions drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c
Original file line number Diff line number Diff line change
Expand Up @@ -75,6 +75,11 @@ void amdgpu_connector_hotplug(struct drm_connector *connector)
if (!amdgpu_display_hpd_sense(adev, amdgpu_connector->hpd.hpd)) {
drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF);
} else if (amdgpu_atombios_dp_needs_link_train(amdgpu_connector)) {
/* Don't try to start link training before we
* have the dpcd */
if (!amdgpu_atombios_dp_get_dpcd(amdgpu_connector))
return;

/* set it to OFF so that drm_helper_connector_dpms()
* won't return immediately since the current state
* is ON at this point.
Expand Down
Loading

0 comments on commit 40b2dff

Please sign in to comment.