Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 311754
b: refs/heads/master
c: be2cf20
h: refs/heads/master
v: v3
  • Loading branch information
Benjamin Herrenschmidt committed Jul 10, 2012
1 parent f5e3e68 commit e26c40e
Show file tree
Hide file tree
Showing 6 changed files with 91 additions and 17 deletions.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: bc51b0c22cebf5c311a6f1895fcca9f78efd0478
refs/heads/master: be2cf20a5ad31ebb13562c1c866ecc626fbd721e
2 changes: 2 additions & 0 deletions trunk/arch/powerpc/include/asm/hw_irq.h
Original file line number Diff line number Diff line change
Expand Up @@ -125,6 +125,8 @@ static inline bool arch_irq_disabled_regs(struct pt_regs *regs)
return !regs->softe;
}

extern bool prep_irq_for_idle(void);

#else /* CONFIG_PPC64 */

#define SET_MSR_EE(x) mtmsr(x)
Expand Down
46 changes: 46 additions & 0 deletions trunk/arch/powerpc/kernel/irq.c
Original file line number Diff line number Diff line change
Expand Up @@ -286,6 +286,52 @@ void notrace restore_interrupts(void)
__hard_irq_enable();
}

/*
* This is a helper to use when about to go into idle low-power
* when the latter has the side effect of re-enabling interrupts
* (such as calling H_CEDE under pHyp).
*
* You call this function with interrupts soft-disabled (this is
* already the case when ppc_md.power_save is called). The function
* will return whether to enter power save or just return.
*
* In the former case, it will have notified lockdep of interrupts
* being re-enabled and generally sanitized the lazy irq state,
* and in the latter case it will leave with interrupts hard
* disabled and marked as such, so the local_irq_enable() call
* in cpu_idle() will properly re-enable everything.
*/
bool prep_irq_for_idle(void)
{
/*
* First we need to hard disable to ensure no interrupt
* occurs before we effectively enter the low power state
*/
hard_irq_disable();

/*
* If anything happened while we were soft-disabled,
* we return now and do not enter the low power state.
*/
if (lazy_irq_pending())
return false;

/* Tell lockdep we are about to re-enable */
trace_hardirqs_on();

/*
* Mark interrupts as soft-enabled and clear the
* PACA_IRQ_HARD_DIS from the pending mask since we
* are about to hard enable as well as a side effect
* of entering the low power state.
*/
local_paca->irq_happened &= ~PACA_IRQ_HARD_DIS;
local_paca->soft_enabled = 1;

/* Tell the caller to enter the low power state */
return true;
}

#endif /* CONFIG_PPC64 */

int arch_show_interrupts(struct seq_file *p, int prec)
Expand Down
11 changes: 6 additions & 5 deletions trunk/arch/powerpc/platforms/cell/pervasive.c
Original file line number Diff line number Diff line change
Expand Up @@ -42,11 +42,9 @@ static void cbe_power_save(void)
{
unsigned long ctrl, thread_switch_control;

/*
* We need to hard disable interrupts, the local_irq_enable() done by
* our caller upon return will hard re-enable.
*/
hard_irq_disable();
/* Ensure our interrupt state is properly tracked */
if (!prep_irq_for_idle())
return;

ctrl = mfspr(SPRN_CTRLF);

Expand Down Expand Up @@ -81,6 +79,9 @@ static void cbe_power_save(void)
*/
ctrl &= ~(CTRL_RUNLATCH | CTRL_TE);
mtspr(SPRN_CTRLT, ctrl);

/* Re-enable interrupts in MSR */
__hard_irq_enable();
}

static int cbe_system_reset_exception(struct pt_regs *regs)
Expand Down
17 changes: 10 additions & 7 deletions trunk/arch/powerpc/platforms/pseries/processor_idle.c
Original file line number Diff line number Diff line change
Expand Up @@ -99,15 +99,18 @@ static int snooze_loop(struct cpuidle_device *dev,
static void check_and_cede_processor(void)
{
/*
* Interrupts are soft-disabled at this point,
* but not hard disabled. So an interrupt might have
* occurred before entering NAP, and would be potentially
* lost (edge events, decrementer events, etc...) unless
* we first hard disable then check.
* Ensure our interrupt state is properly tracked,
* also checks if no interrupt has occurred while we
* were soft-disabled
*/
hard_irq_disable();
if (!lazy_irq_pending())
if (prep_irq_for_idle()) {
cede_processor();
#ifdef CONFIG_TRACE_IRQFLAGS
/* Ensure that H_CEDE returns with IRQs on */
if (WARN_ON(!(mfmsr() & MSR_EE)))
__hard_irq_enable();
#endif
}
}

static int dedicated_cede_loop(struct cpuidle_device *dev,
Expand Down
30 changes: 26 additions & 4 deletions trunk/drivers/of/base.c
Original file line number Diff line number Diff line change
Expand Up @@ -511,6 +511,22 @@ struct device_node *of_find_node_with_property(struct device_node *from,
}
EXPORT_SYMBOL(of_find_node_with_property);

static const struct of_device_id *of_match_compat(const struct of_device_id *matches,
const char *compat)
{
while (matches->name[0] || matches->type[0] || matches->compatible[0]) {
const char *cp = matches->compatible;
int len = strlen(cp);

if (len > 0 && of_compat_cmp(compat, cp, len) == 0)
return matches;

matches++;
}

return NULL;
}

/**
* of_match_node - Tell if an device_node has a matching of_match structure
* @matches: array of of device match structures to search in
Expand All @@ -521,9 +537,18 @@ EXPORT_SYMBOL(of_find_node_with_property);
const struct of_device_id *of_match_node(const struct of_device_id *matches,
const struct device_node *node)
{
struct property *prop;
const char *cp;

if (!matches)
return NULL;

of_property_for_each_string(node, "compatible", prop, cp) {
const struct of_device_id *match = of_match_compat(matches, cp);
if (match)
return match;
}

while (matches->name[0] || matches->type[0] || matches->compatible[0]) {
int match = 1;
if (matches->name[0])
Expand All @@ -532,10 +557,7 @@ const struct of_device_id *of_match_node(const struct of_device_id *matches,
if (matches->type[0])
match &= node->type
&& !strcmp(matches->type, node->type);
if (matches->compatible[0])
match &= of_device_is_compatible(node,
matches->compatible);
if (match)
if (match && !matches->compatible[0])
return matches;
matches++;
}
Expand Down

0 comments on commit e26c40e

Please sign in to comment.