Skip to content

Commit

Permalink
drm/msm/dpu: hw_intr: always call dpu_hw_intr_clear_intr_status_nolock
Browse files Browse the repository at this point in the history
Always call dpu_hw_intr_clear_intr_status_nolock() from the
dpu_hw_intr_dispatch_irqs(). This simplifies the callback function
(which call clears the interrupts anyway) and enforces clearing the hw
interrupt status.

Signed-off-by: Dmitry Baryshkov <dmitry.baryshkov@linaro.org>
Reviewed-by: Bjorn Andersson <bjorn.andersson@linaro.org>
Reviewed-by: Abhinav Kumar <abhinavk@codeaurora.org>
Link: https://lore.kernel.org/r/20210516202910.2141079-3-dmitry.baryshkov@linaro.org
Signed-off-by: Rob Clark <robdclark@chromium.org>
  • Loading branch information
Dmitry Baryshkov authored and Rob Clark committed Jun 23, 2021
1 parent 09e3a2b commit 98fbe6b
Show file tree
Hide file tree
Showing 3 changed files with 18 additions and 39 deletions.
9 changes: 0 additions & 9 deletions drivers/gpu/drm/msm/disp/dpu1/dpu_core_irq.c
Original file line number Diff line number Diff line change
Expand Up @@ -41,15 +41,6 @@ static void dpu_core_irq_callback_handler(void *arg, int irq_idx)
if (cb->func)
cb->func(cb->arg, irq_idx);
spin_unlock_irqrestore(&dpu_kms->irq_obj.cb_lock, irq_flags);

/*
* Clear pending interrupt status in HW.
* NOTE: dpu_core_irq_callback_handler is protected by top-level
* spinlock, so it is safe to clear any interrupt status here.
*/
dpu_kms->hw_intr->ops.clear_intr_status_nolock(
dpu_kms->hw_intr,
irq_idx);
}

int dpu_core_irq_idx_lookup(struct dpu_kms *dpu_kms,
Expand Down
39 changes: 18 additions & 21 deletions drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.c
Original file line number Diff line number Diff line change
Expand Up @@ -1362,6 +1362,22 @@ static int dpu_hw_intr_irqidx_lookup(struct dpu_hw_intr *intr,
return -EINVAL;
}

static void dpu_hw_intr_clear_intr_status_nolock(struct dpu_hw_intr *intr,
int irq_idx)
{
int reg_idx;

if (!intr)
return;

reg_idx = dpu_irq_map[irq_idx].reg_idx;
DPU_REG_WRITE(&intr->hw, dpu_intr_set[reg_idx].clr_off,
dpu_irq_map[irq_idx].irq_mask);

/* ensure register writes go through */
wmb();
}

static void dpu_hw_intr_dispatch_irq(struct dpu_hw_intr *intr,
void (*cbfunc)(void *, int),
void *arg)
Expand Down Expand Up @@ -1430,9 +1446,8 @@ static void dpu_hw_intr_dispatch_irq(struct dpu_hw_intr *intr,
*/
if (cbfunc)
cbfunc(arg, irq_idx);
else
intr->ops.clear_intr_status_nolock(
intr, irq_idx);

dpu_hw_intr_clear_intr_status_nolock(intr, irq_idx);

/*
* When callback finish, clear the irq_status
Expand Down Expand Up @@ -1597,23 +1612,6 @@ static int dpu_hw_intr_disable_irqs(struct dpu_hw_intr *intr)
return 0;
}


static void dpu_hw_intr_clear_intr_status_nolock(struct dpu_hw_intr *intr,
int irq_idx)
{
int reg_idx;

if (!intr)
return;

reg_idx = dpu_irq_map[irq_idx].reg_idx;
DPU_REG_WRITE(&intr->hw, dpu_intr_set[reg_idx].clr_off,
dpu_irq_map[irq_idx].irq_mask);

/* ensure register writes go through */
wmb();
}

static u32 dpu_hw_intr_get_interrupt_status(struct dpu_hw_intr *intr,
int irq_idx, bool clear)
{
Expand Down Expand Up @@ -1655,7 +1653,6 @@ static void __setup_intr_ops(struct dpu_hw_intr_ops *ops)
ops->dispatch_irqs = dpu_hw_intr_dispatch_irq;
ops->clear_all_irqs = dpu_hw_intr_clear_irqs;
ops->disable_all_irqs = dpu_hw_intr_disable_irqs;
ops->clear_intr_status_nolock = dpu_hw_intr_clear_intr_status_nolock;
ops->get_interrupt_status = dpu_hw_intr_get_interrupt_status;
}

Expand Down
9 changes: 0 additions & 9 deletions drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.h
Original file line number Diff line number Diff line change
Expand Up @@ -142,15 +142,6 @@ struct dpu_hw_intr_ops {
void (*cbfunc)(void *arg, int irq_idx),
void *arg);

/**
* clear_intr_status_nolock() - clears the HW interrupts without lock
* @intr: HW interrupt handle
* @irq_idx: Lookup irq index return from irq_idx_lookup
*/
void (*clear_intr_status_nolock)(
struct dpu_hw_intr *intr,
int irq_idx);

/**
* get_interrupt_status - Gets HW interrupt status, and clear if set,
* based on given lookup IRQ index.
Expand Down

0 comments on commit 98fbe6b

Please sign in to comment.