diff --git a/drivers/gpu/drm/i915/gvt/gtt.c b/drivers/gpu/drm/i915/gvt/gtt.c index 8bfac4ed24e62..c4f752eeadcc8 100644 --- a/drivers/gpu/drm/i915/gvt/gtt.c +++ b/drivers/gpu/drm/i915/gvt/gtt.c @@ -1968,6 +1968,39 @@ int intel_vgpu_emulate_gtt_mmio_write(struct intel_vgpu *vgpu, unsigned int off, return ret; } +int intel_vgpu_write_protect_handler(struct intel_vgpu *vgpu, u64 pa, + void *p_data, unsigned int bytes) +{ + struct intel_gvt *gvt = vgpu->gvt; + int ret = 0; + + if (atomic_read(&vgpu->gtt.n_tracked_guest_page)) { + struct intel_vgpu_page_track *t; + + mutex_lock(&gvt->lock); + + t = intel_vgpu_find_tracked_page(vgpu, pa >> PAGE_SHIFT); + if (t) { + if (unlikely(vgpu->failsafe)) { + /* remove write protection to prevent furture traps */ + intel_vgpu_clean_page_track(vgpu, t); + } else { + ret = t->handler(t, pa, p_data, bytes); + if (ret) { + gvt_err("guest page write error %d, " + "gfn 0x%lx, pa 0x%llx, " + "var 0x%x, len %d\n", + ret, t->gfn, pa, + *(u32 *)p_data, bytes); + } + } + } + mutex_unlock(&gvt->lock); + } + return ret; +} + + static int alloc_scratch_pages(struct intel_vgpu *vgpu, intel_gvt_gtt_type_t type) { diff --git a/drivers/gpu/drm/i915/gvt/gtt.h b/drivers/gpu/drm/i915/gvt/gtt.h index f98c1c19b4cb4..4cc13b5934f1f 100644 --- a/drivers/gpu/drm/i915/gvt/gtt.h +++ b/drivers/gpu/drm/i915/gvt/gtt.h @@ -308,4 +308,7 @@ int intel_vgpu_emulate_gtt_mmio_read(struct intel_vgpu *vgpu, int intel_vgpu_emulate_gtt_mmio_write(struct intel_vgpu *vgpu, unsigned int off, void *p_data, unsigned int bytes); +int intel_vgpu_write_protect_handler(struct intel_vgpu *vgpu, u64 pa, + void *p_data, unsigned int bytes); + #endif /* _GVT_GTT_H_ */ diff --git a/drivers/gpu/drm/i915/gvt/gvt.c b/drivers/gpu/drm/i915/gvt/gvt.c index 643bb961d40df..fac54f32d33fc 100644 --- a/drivers/gpu/drm/i915/gvt/gvt.c +++ b/drivers/gpu/drm/i915/gvt/gvt.c @@ -183,6 +183,7 @@ static const struct intel_gvt_ops intel_gvt_ops = { .get_gvt_attrs = intel_get_gvt_attrs, .vgpu_query_plane = intel_vgpu_query_plane, .vgpu_get_dmabuf = intel_vgpu_get_dmabuf, + .write_protect_handler = intel_vgpu_write_protect_handler, }; /** diff --git a/drivers/gpu/drm/i915/gvt/gvt.h b/drivers/gpu/drm/i915/gvt/gvt.h index 0822d0fd45dab..7dc7a80213a8a 100644 --- a/drivers/gpu/drm/i915/gvt/gvt.h +++ b/drivers/gpu/drm/i915/gvt/gvt.h @@ -546,6 +546,8 @@ struct intel_gvt_ops { struct attribute_group ***intel_vgpu_type_groups); int (*vgpu_query_plane)(struct intel_vgpu *vgpu, void *); int (*vgpu_get_dmabuf)(struct intel_vgpu *vgpu, unsigned int); + int (*write_protect_handler)(struct intel_vgpu *, u64, void *, + unsigned int); }; diff --git a/drivers/gpu/drm/i915/gvt/kvmgt.c b/drivers/gpu/drm/i915/gvt/kvmgt.c index f86983d6655be..45bab5a6290be 100644 --- a/drivers/gpu/drm/i915/gvt/kvmgt.c +++ b/drivers/gpu/drm/i915/gvt/kvmgt.c @@ -1360,8 +1360,8 @@ static void kvmgt_page_track_write(struct kvm_vcpu *vcpu, gpa_t gpa, struct kvmgt_guest_info, track_node); if (kvmgt_gfn_is_write_protected(info, gpa_to_gfn(gpa))) - intel_gvt_ops->emulate_mmio_write(info->vgpu, gpa, - (void *)val, len); + intel_gvt_ops->write_protect_handler(info->vgpu, gpa, + (void *)val, len); } static void kvmgt_page_track_flush_slot(struct kvm *kvm, diff --git a/drivers/gpu/drm/i915/gvt/mmio.c b/drivers/gpu/drm/i915/gvt/mmio.c index b18a8bed6c182..562b5ad857a4e 100644 --- a/drivers/gpu/drm/i915/gvt/mmio.c +++ b/drivers/gpu/drm/i915/gvt/mmio.c @@ -117,25 +117,6 @@ static void failsafe_emulate_mmio_rw(struct intel_vgpu *vgpu, uint64_t pa, else memcpy(pt, p_data, bytes); - } else if (atomic_read(&vgpu->gtt.n_tracked_guest_page)) { - struct intel_vgpu_page_track *t; - - /* Since we enter the failsafe mode early during guest boot, - * guest may not have chance to set up its ppgtt table, so - * there should not be any wp pages for guest. Keep the wp - * related code here in case we need to handle it in furture. - */ - t = intel_vgpu_find_tracked_page(vgpu, pa >> PAGE_SHIFT); - if (t) { - /* remove write protection to prevent furture traps */ - intel_vgpu_clean_page_track(vgpu, t); - if (read) - intel_gvt_hypervisor_read_gpa(vgpu, pa, - p_data, bytes); - else - intel_gvt_hypervisor_write_gpa(vgpu, pa, - p_data, bytes); - } } mutex_unlock(&gvt->lock); } @@ -168,23 +149,6 @@ int intel_vgpu_emulate_mmio_read(struct intel_vgpu *vgpu, uint64_t pa, goto out; } - if (atomic_read(&vgpu->gtt.n_tracked_guest_page)) { - struct intel_vgpu_page_track *t; - - t = intel_vgpu_find_tracked_page(vgpu, pa >> PAGE_SHIFT); - if (t) { - ret = intel_gvt_hypervisor_read_gpa(vgpu, pa, - p_data, bytes); - if (ret) { - gvt_vgpu_err("guest page read error %d, " - "gfn 0x%lx, pa 0x%llx, var 0x%x, len %d\n", - ret, t->gfn, pa, *(u32 *)p_data, - bytes); - } - goto out; - } - } - offset = intel_vgpu_gpa_to_mmio_offset(vgpu, pa); if (WARN_ON(bytes > 8)) @@ -263,23 +227,6 @@ int intel_vgpu_emulate_mmio_write(struct intel_vgpu *vgpu, uint64_t pa, goto out; } - if (atomic_read(&vgpu->gtt.n_tracked_guest_page)) { - struct intel_vgpu_page_track *t; - - t = intel_vgpu_find_tracked_page(vgpu, pa >> PAGE_SHIFT); - if (t) { - ret = t->handler(t, pa, p_data, bytes); - if (ret) { - gvt_err("guest page write error %d, " - "gfn 0x%lx, pa 0x%llx, " - "var 0x%x, len %d\n", - ret, t->gfn, pa, - *(u32 *)p_data, bytes); - } - goto out; - } - } - offset = intel_vgpu_gpa_to_mmio_offset(vgpu, pa); if (WARN_ON(bytes > 8))