Skip to content

Commit

Permalink
drm/i915/gvt: Give new born vGPU higher scheduling chance
Browse files Browse the repository at this point in the history
This trys to give new born vGPU with higher scheduling chance
not only with adding to sched list head and also have higher
priority for workload sched for 2 seconds after starting to
schedule it. In order for fast GPU execution during VM boot,
and ensure guest driver setup with required state given in time.

This fixes recent failure seen on one VM with multiple linux VMs
running on kernel with commit 2621cef("drm/i915: Provide a timeout to i915_gem_wait_for_idle() on setup"),
which had shorter setup timeout that caused context state init failed.

v2: change to 2s for higher scheduling period

Cc: Yuan Hang <hang.yuan@intel.com>
Reviewed-by: Hang Yuan <hang.yuan@intel.com>
Signed-off-by: Zhenyu Wang <zhenyuw@linux.intel.com>
  • Loading branch information
Zhenyu Wang committed Sep 3, 2018
1 parent b244ffa commit 54ff01f
Showing 1 changed file with 27 additions and 7 deletions.
34 changes: 27 additions & 7 deletions drivers/gpu/drm/i915/gvt/sched_policy.c
Original file line number Diff line number Diff line change
Expand Up @@ -47,11 +47,15 @@ static bool vgpu_has_pending_workload(struct intel_vgpu *vgpu)
return false;
}

/* We give 2 seconds higher prio for vGPU during start */
#define GVT_SCHED_VGPU_PRI_TIME 2

struct vgpu_sched_data {
struct list_head lru_list;
struct intel_vgpu *vgpu;
bool active;

bool pri_sched;
ktime_t pri_time;
ktime_t sched_in_time;
ktime_t sched_time;
ktime_t left_ts;
Expand Down Expand Up @@ -183,6 +187,14 @@ static struct intel_vgpu *find_busy_vgpu(struct gvt_sched_data *sched_data)
if (!vgpu_has_pending_workload(vgpu_data->vgpu))
continue;

if (vgpu_data->pri_sched) {
if (ktime_before(ktime_get(), vgpu_data->pri_time)) {
vgpu = vgpu_data->vgpu;
break;
} else
vgpu_data->pri_sched = false;
}

/* Return the vGPU only if it has time slice left */
if (vgpu_data->left_ts > 0) {
vgpu = vgpu_data->vgpu;
Expand All @@ -202,19 +214,21 @@ static void tbs_sched_func(struct gvt_sched_data *sched_data)
struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
struct vgpu_sched_data *vgpu_data;
struct intel_vgpu *vgpu = NULL;

/* no active vgpu or has already had a target */
if (list_empty(&sched_data->lru_runq_head) || scheduler->next_vgpu)
goto out;

vgpu = find_busy_vgpu(sched_data);
if (vgpu) {
scheduler->next_vgpu = vgpu;

/* Move the last used vGPU to the tail of lru_list */
vgpu_data = vgpu->sched_data;
list_del_init(&vgpu_data->lru_list);
list_add_tail(&vgpu_data->lru_list,
&sched_data->lru_runq_head);
if (!vgpu_data->pri_sched) {
/* Move the last used vGPU to the tail of lru_list */
list_del_init(&vgpu_data->lru_list);
list_add_tail(&vgpu_data->lru_list,
&sched_data->lru_runq_head);
}
} else {
scheduler->next_vgpu = gvt->idle_vgpu;
}
Expand Down Expand Up @@ -328,11 +342,17 @@ static void tbs_sched_start_schedule(struct intel_vgpu *vgpu)
{
struct gvt_sched_data *sched_data = vgpu->gvt->scheduler.sched_data;
struct vgpu_sched_data *vgpu_data = vgpu->sched_data;
ktime_t now;

if (!list_empty(&vgpu_data->lru_list))
return;

list_add_tail(&vgpu_data->lru_list, &sched_data->lru_runq_head);
now = ktime_get();
vgpu_data->pri_time = ktime_add(now,
ktime_set(GVT_SCHED_VGPU_PRI_TIME, 0));
vgpu_data->pri_sched = true;

list_add(&vgpu_data->lru_list, &sched_data->lru_runq_head);

if (!hrtimer_active(&sched_data->timer))
hrtimer_start(&sched_data->timer, ktime_add_ns(ktime_get(),
Expand Down

0 comments on commit 54ff01f

Please sign in to comment.