Skip to content

Commit

Permalink
Merge tag 'drm-intel-next-2017-03-20' of git://anongit.freedesktop.or…
Browse files Browse the repository at this point in the history
…g/git/drm-intel into drm-next

More in i915 for 4.12:

- designware i2c fixes from Hans de Goede, in a topic branch shared
  with other subsystems (maybe, they didn't confirm, but requested the
  pull)
- drop drm_panel usage from the intel dsi vbt panel (Jani)
- vblank evasion improvements and tracing (Maarten and Ville)
- clarify spinlock irq semantics again a bit (Tvrtko)
- new ->pwrite backend hook (right now just for shmem pageche writes),
  from Chris
- more planar/ccs work from Ville
- hotplug safe connector iterators everywhere
- userptr fixes (Chris)
- selftests for cache coloring eviction (Matthew Auld)
- extend debugfs drop_caches interface for shrinker testing (Chris)
- baytrail "the rps kills the machine" fix (Chris)
- use new atomic state iterators, a lot (Maarten)
- refactor guc/huc code some (Arkadiusz Hiler)
- tighten breadcrumbs rbtree a bit (Chris)
- improve wrap-around and time handling in rps residency counters
  (Mika)
- split reset-in-progress in two flags, backoff and handoff (Chris)
- other misc reset improvements from a few people
- bunch of vgpu interaction fixes with recent code changes
- misc stuff all over, as usual

* tag 'drm-intel-next-2017-03-20' of git://anongit.freedesktop.org/git/drm-intel: (144 commits)
  drm/i915: Update DRIVER_DATE to 20170320
  drm/i915: Initialise i915_gem_object_create_from_data() directly
  drm/i915: Correct error handling for i915_gem_object_create_from_data()
  drm/i915: i915_gem_object_create_from_data() doesn't require struct_mutex
  drm/i915: Retire an active batch pool object rather than allocate new
  drm/i915: Add i810/i815 pci-ids for completeness
  drm/i915: Skip execlists_dequeue() early if the list is empty
  drm/i915: Stop using obj->obj_exec_link outside of execbuf
  drm/i915: Squelch WARN for VLV_COUNTER_CONTROL
  drm/i915/glk: Enable pooled EUs for Geminilake
  drm/i915: Remove superfluous i915_add_request_no_flush() helper
  drm/i915/vgpu: Neuter forcewakes for VGPU more thoroughly
  drm/i915: Fix vGPU balloon for ggtt guard page
  drm/i915: Avoid use-after-free of ctx in request tracepoints
  drm/i915: Assert that the context pin_counts do not overflow
  drm/i915: Wait for reset to complete before returning from debugfs/i915_wedged
  drm/i915: Restore engine->submit_request before unwedging
  drm/i915: Move engine->submit_request selection to a vfunc
  drm/i915: Split I915_RESET_IN_PROGRESS into two flags
  drm/i915: make context status notifier head be per engine
  ...
  • Loading branch information
Dave Airlie committed Mar 22, 2017
2 parents 33d5f51 + c5bd2e1 commit be5df20
Show file tree
Hide file tree
Showing 72 changed files with 2,661 additions and 1,842 deletions.
87 changes: 87 additions & 0 deletions arch/x86/include/asm/iosf_mbi.h
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,8 @@
#ifndef IOSF_MBI_SYMS_H
#define IOSF_MBI_SYMS_H

#include <linux/notifier.h>

#define MBI_MCR_OFFSET 0xD0
#define MBI_MDR_OFFSET 0xD4
#define MBI_MCRX_OFFSET 0xD8
Expand Down Expand Up @@ -47,6 +49,10 @@
#define QRK_MBI_UNIT_MM 0x05
#define QRK_MBI_UNIT_SOC 0x31

/* Action values for the pmic_bus_access_notifier functions */
#define MBI_PMIC_BUS_ACCESS_BEGIN 1
#define MBI_PMIC_BUS_ACCESS_END 2

#if IS_ENABLED(CONFIG_IOSF_MBI)

bool iosf_mbi_available(void);
Expand Down Expand Up @@ -88,6 +94,65 @@ int iosf_mbi_write(u8 port, u8 opcode, u32 offset, u32 mdr);
*/
int iosf_mbi_modify(u8 port, u8 opcode, u32 offset, u32 mdr, u32 mask);

/**
* iosf_mbi_punit_acquire() - Acquire access to the P-Unit
*
* One some systems the P-Unit accesses the PMIC to change various voltages
* through the same bus as other kernel drivers use for e.g. battery monitoring.
*
* If a driver sends requests to the P-Unit which require the P-Unit to access
* the PMIC bus while another driver is also accessing the PMIC bus various bad
* things happen.
*
* To avoid these problems this function must be called before accessing the
* P-Unit or the PMIC, be it through iosf_mbi* functions or through other means.
*
* Note on these systems the i2c-bus driver will request a sempahore from the
* P-Unit for exclusive access to the PMIC bus when i2c drivers are accessing
* it, but this does not appear to be sufficient, we still need to avoid making
* certain P-Unit requests during the access window to avoid problems.
*
* This function locks a mutex, as such it may sleep.
*/
void iosf_mbi_punit_acquire(void);

/**
* iosf_mbi_punit_release() - Release access to the P-Unit
*/
void iosf_mbi_punit_release(void);

/**
* iosf_mbi_register_pmic_bus_access_notifier - Register PMIC bus notifier
*
* This function can be used by drivers which may need to acquire P-Unit
* managed resources from interrupt context, where iosf_mbi_punit_acquire()
* can not be used.
*
* This function allows a driver to register a notifier to get notified (in a
* process context) before other drivers start accessing the PMIC bus.
*
* This allows the driver to acquire any resources, which it may need during
* the window the other driver is accessing the PMIC, before hand.
*
* @nb: notifier_block to register
*/
int iosf_mbi_register_pmic_bus_access_notifier(struct notifier_block *nb);

/**
* iosf_mbi_register_pmic_bus_access_notifier - Unregister PMIC bus notifier
*
* @nb: notifier_block to unregister
*/
int iosf_mbi_unregister_pmic_bus_access_notifier(struct notifier_block *nb);

/**
* iosf_mbi_call_pmic_bus_access_notifier_chain - Call PMIC bus notifier chain
*
* @val: action to pass into listener's notifier_call function
* @v: data pointer to pass into listener's notifier_call function
*/
int iosf_mbi_call_pmic_bus_access_notifier_chain(unsigned long val, void *v);

#else /* CONFIG_IOSF_MBI is not enabled */
static inline
bool iosf_mbi_available(void)
Expand Down Expand Up @@ -115,6 +180,28 @@ int iosf_mbi_modify(u8 port, u8 opcode, u32 offset, u32 mdr, u32 mask)
WARN(1, "IOSF_MBI driver not available");
return -EPERM;
}

static inline void iosf_mbi_punit_acquire(void) {}
static inline void iosf_mbi_punit_release(void) {}

static inline
int iosf_mbi_register_pmic_bus_access_notifier(struct notifier_block *nb)
{
return 0;
}

static inline
int iosf_mbi_unregister_pmic_bus_access_notifier(struct notifier_block *nb)
{
return 0;
}

static inline
int iosf_mbi_call_pmic_bus_access_notifier_chain(unsigned long val, void *v)
{
return 0;
}

#endif /* CONFIG_IOSF_MBI */

#endif /* IOSF_MBI_SYMS_H */
49 changes: 49 additions & 0 deletions arch/x86/platform/intel/iosf_mbi.c
Original file line number Diff line number Diff line change
Expand Up @@ -34,6 +34,8 @@

static struct pci_dev *mbi_pdev;
static DEFINE_SPINLOCK(iosf_mbi_lock);
static DEFINE_MUTEX(iosf_mbi_punit_mutex);
static BLOCKING_NOTIFIER_HEAD(iosf_mbi_pmic_bus_access_notifier);

static inline u32 iosf_mbi_form_mcr(u8 op, u8 port, u8 offset)
{
Expand Down Expand Up @@ -190,6 +192,53 @@ bool iosf_mbi_available(void)
}
EXPORT_SYMBOL(iosf_mbi_available);

void iosf_mbi_punit_acquire(void)
{
mutex_lock(&iosf_mbi_punit_mutex);
}
EXPORT_SYMBOL(iosf_mbi_punit_acquire);

void iosf_mbi_punit_release(void)
{
mutex_unlock(&iosf_mbi_punit_mutex);
}
EXPORT_SYMBOL(iosf_mbi_punit_release);

int iosf_mbi_register_pmic_bus_access_notifier(struct notifier_block *nb)
{
int ret;

/* Wait for the bus to go inactive before registering */
mutex_lock(&iosf_mbi_punit_mutex);
ret = blocking_notifier_chain_register(
&iosf_mbi_pmic_bus_access_notifier, nb);
mutex_unlock(&iosf_mbi_punit_mutex);

return ret;
}
EXPORT_SYMBOL(iosf_mbi_register_pmic_bus_access_notifier);

int iosf_mbi_unregister_pmic_bus_access_notifier(struct notifier_block *nb)
{
int ret;

/* Wait for the bus to go inactive before unregistering */
mutex_lock(&iosf_mbi_punit_mutex);
ret = blocking_notifier_chain_unregister(
&iosf_mbi_pmic_bus_access_notifier, nb);
mutex_unlock(&iosf_mbi_punit_mutex);

return ret;
}
EXPORT_SYMBOL(iosf_mbi_unregister_pmic_bus_access_notifier);

int iosf_mbi_call_pmic_bus_access_notifier_chain(unsigned long val, void *v)
{
return blocking_notifier_call_chain(
&iosf_mbi_pmic_bus_access_notifier, val, v);
}
EXPORT_SYMBOL(iosf_mbi_call_pmic_bus_access_notifier_chain);

#ifdef CONFIG_IOSF_MBI_DEBUG
static u32 dbg_mdr;
static u32 dbg_mcr;
Expand Down
1 change: 1 addition & 0 deletions drivers/gpu/drm/i915/Kconfig
Original file line number Diff line number Diff line change
Expand Up @@ -20,6 +20,7 @@ config DRM_I915
select ACPI_VIDEO if ACPI
select ACPI_BUTTON if ACPI
select SYNC_FILE
select IOSF_MBI
help
Choose this option if you have a system that has "Intel Graphics
Media Accelerator" or "HD Graphics" integrated graphics,
Expand Down
2 changes: 1 addition & 1 deletion drivers/gpu/drm/i915/Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -105,8 +105,8 @@ i915-y += dvo_ch7017.o \
intel_dp.o \
intel_dsi.o \
intel_dsi_dcs_backlight.o \
intel_dsi_panel_vbt.o \
intel_dsi_pll.o \
intel_dsi_vbt.o \
intel_dvo.o \
intel_hdmi.o \
intel_i2c.o \
Expand Down
2 changes: 1 addition & 1 deletion drivers/gpu/drm/i915/gvt/gvt.h
Original file line number Diff line number Diff line change
Expand Up @@ -160,7 +160,6 @@ struct intel_vgpu {
atomic_t running_workload_num;
DECLARE_BITMAP(tlb_handle_pending, I915_NUM_ENGINES);
struct i915_gem_context *shadow_ctx;
struct notifier_block shadow_ctx_notifier_block;

#if IS_ENABLED(CONFIG_DRM_I915_GVT_KVMGT)
struct {
Expand Down Expand Up @@ -231,6 +230,7 @@ struct intel_gvt {
struct intel_gvt_gtt gtt;
struct intel_gvt_opregion opregion;
struct intel_gvt_workload_scheduler scheduler;
struct notifier_block shadow_ctx_notifier_block[I915_NUM_ENGINES];
DECLARE_HASHTABLE(cmd_table, GVT_CMD_HASH_BITS);
struct intel_vgpu_type *types;
unsigned int num_types;
Expand Down
47 changes: 20 additions & 27 deletions drivers/gpu/drm/i915/gvt/scheduler.c
Original file line number Diff line number Diff line change
Expand Up @@ -130,12 +130,10 @@ static int populate_shadow_context(struct intel_vgpu_workload *workload)
static int shadow_context_status_change(struct notifier_block *nb,
unsigned long action, void *data)
{
struct intel_vgpu *vgpu = container_of(nb,
struct intel_vgpu, shadow_ctx_notifier_block);
struct drm_i915_gem_request *req =
(struct drm_i915_gem_request *)data;
struct intel_gvt_workload_scheduler *scheduler =
&vgpu->gvt->scheduler;
struct drm_i915_gem_request *req = (struct drm_i915_gem_request *)data;
struct intel_gvt *gvt = container_of(nb, struct intel_gvt,
shadow_ctx_notifier_block[req->engine->id]);
struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
struct intel_vgpu_workload *workload =
scheduler->current_workload[req->engine->id];

Expand Down Expand Up @@ -214,7 +212,7 @@ static int dispatch_workload(struct intel_vgpu_workload *workload)
workload->status = ret;

if (!IS_ERR_OR_NULL(rq))
i915_add_request_no_flush(rq);
i915_add_request(rq);
mutex_unlock(&dev_priv->drm.struct_mutex);
return ret;
}
Expand Down Expand Up @@ -493,34 +491,32 @@ void intel_gvt_wait_vgpu_idle(struct intel_vgpu *vgpu)
void intel_gvt_clean_workload_scheduler(struct intel_gvt *gvt)
{
struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
int i;
struct intel_engine_cs *engine;
enum intel_engine_id i;

gvt_dbg_core("clean workload scheduler\n");

for (i = 0; i < I915_NUM_ENGINES; i++) {
if (scheduler->thread[i]) {
kthread_stop(scheduler->thread[i]);
scheduler->thread[i] = NULL;
}
for_each_engine(engine, gvt->dev_priv, i) {
atomic_notifier_chain_unregister(
&engine->context_status_notifier,
&gvt->shadow_ctx_notifier_block[i]);
kthread_stop(scheduler->thread[i]);
}
}

int intel_gvt_init_workload_scheduler(struct intel_gvt *gvt)
{
struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
struct workload_thread_param *param = NULL;
struct intel_engine_cs *engine;
enum intel_engine_id i;
int ret;
int i;

gvt_dbg_core("init workload scheduler\n");

init_waitqueue_head(&scheduler->workload_complete_wq);

for (i = 0; i < I915_NUM_ENGINES; i++) {
/* check ring mask at init time */
if (!HAS_ENGINE(gvt->dev_priv, i))
continue;

for_each_engine(engine, gvt->dev_priv, i) {
init_waitqueue_head(&scheduler->waitq[i]);

param = kzalloc(sizeof(*param), GFP_KERNEL);
Expand All @@ -539,6 +535,11 @@ int intel_gvt_init_workload_scheduler(struct intel_gvt *gvt)
ret = PTR_ERR(scheduler->thread[i]);
goto err;
}

gvt->shadow_ctx_notifier_block[i].notifier_call =
shadow_context_status_change;
atomic_notifier_chain_register(&engine->context_status_notifier,
&gvt->shadow_ctx_notifier_block[i]);
}
return 0;
err:
Expand All @@ -550,9 +551,6 @@ int intel_gvt_init_workload_scheduler(struct intel_gvt *gvt)

void intel_vgpu_clean_gvt_context(struct intel_vgpu *vgpu)
{
atomic_notifier_chain_unregister(&vgpu->shadow_ctx->status_notifier,
&vgpu->shadow_ctx_notifier_block);

i915_gem_context_put_unlocked(vgpu->shadow_ctx);
}

Expand All @@ -567,10 +565,5 @@ int intel_vgpu_init_gvt_context(struct intel_vgpu *vgpu)

vgpu->shadow_ctx->engine[RCS].initialised = true;

vgpu->shadow_ctx_notifier_block.notifier_call =
shadow_context_status_change;

atomic_notifier_chain_register(&vgpu->shadow_ctx->status_notifier,
&vgpu->shadow_ctx_notifier_block);
return 0;
}
25 changes: 14 additions & 11 deletions drivers/gpu/drm/i915/i915_cmd_parser.c
Original file line number Diff line number Diff line change
Expand Up @@ -1279,11 +1279,17 @@ int intel_engine_cmd_parser(struct intel_engine_cs *engine,
* space. Parsing should be faster in some cases this way.
*/
batch_end = cmd + (batch_len / sizeof(*batch_end));
while (cmd < batch_end) {
do {
u32 length;

if (*cmd == MI_BATCH_BUFFER_END)
if (*cmd == MI_BATCH_BUFFER_END) {
if (needs_clflush_after) {
void *ptr = ptr_mask_bits(shadow_batch_obj->mm.mapping);
drm_clflush_virt_range(ptr,
(void *)(cmd + 1) - ptr);
}
break;
}

desc = find_cmd(engine, *cmd, desc, &default_desc);
if (!desc) {
Expand Down Expand Up @@ -1323,17 +1329,14 @@ int intel_engine_cmd_parser(struct intel_engine_cs *engine,
}

cmd += length;
}

if (cmd >= batch_end) {
DRM_DEBUG_DRIVER("CMD: Got to the end of the buffer w/o a BBE cmd!\n");
ret = -EINVAL;
}
if (cmd >= batch_end) {
DRM_DEBUG_DRIVER("CMD: Got to the end of the buffer w/o a BBE cmd!\n");
ret = -EINVAL;
break;
}
} while (1);

if (ret == 0 && needs_clflush_after)
drm_clflush_virt_range(shadow_batch_obj->mm.mapping, batch_len);
i915_gem_object_unpin_map(shadow_batch_obj);

return ret;
}

Expand Down
Loading

0 comments on commit be5df20

Please sign in to comment.