Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 87484
b: refs/heads/master
c: f79abb6
h: refs/heads/master
v: v3
  • Loading branch information
Zhang Wei authored and Dan Williams committed Mar 19, 2008
1 parent c8915a7 commit 890468c
Show file tree
Hide file tree
Showing 12 changed files with 132 additions and 171 deletions.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: d7a0e1f56472db0825e13f9dd39f0ad79b8c8b3e
refs/heads/master: f79abb627f033c85a6088231f20c85bc4a9bd757
11 changes: 11 additions & 0 deletions trunk/drivers/acpi/ec.c
Original file line number Diff line number Diff line change
Expand Up @@ -129,6 +129,7 @@ static struct acpi_ec {
struct mutex lock;
wait_queue_head_t wait;
struct list_head list;
atomic_t irq_count;
u8 handlers_installed;
} *boot_ec, *first_ec;

Expand Down Expand Up @@ -181,6 +182,8 @@ static int acpi_ec_wait(struct acpi_ec *ec, enum ec_event event, int force_poll)
{
int ret = 0;

atomic_set(&ec->irq_count, 0);

if (unlikely(event == ACPI_EC_EVENT_OBF_1 &&
test_bit(EC_FLAGS_NO_OBF1_GPE, &ec->flags)))
force_poll = 1;
Expand Down Expand Up @@ -227,6 +230,7 @@ static int acpi_ec_wait(struct acpi_ec *ec, enum ec_event event, int force_poll)
while (time_before(jiffies, delay)) {
if (acpi_ec_check_status(ec, event))
goto end;
msleep(5);
}
}
pr_err(PREFIX "acpi_ec_wait timeout,"
Expand Down Expand Up @@ -529,6 +533,13 @@ static u32 acpi_ec_gpe_handler(void *data)
struct acpi_ec *ec = data;

pr_debug(PREFIX "~~~> interrupt\n");
atomic_inc(&ec->irq_count);
if (atomic_read(&ec->irq_count) > 5) {
pr_err(PREFIX "GPE storm detected, disabling EC GPE\n");
acpi_disable_gpe(NULL, ec->gpe, ACPI_ISR);
clear_bit(EC_FLAGS_GPE_MODE, &ec->flags);
return ACPI_INTERRUPT_HANDLED;
}
clear_bit(EC_FLAGS_WAIT_GPE, &ec->flags);
if (test_bit(EC_FLAGS_GPE_MODE, &ec->flags))
wake_up(&ec->wait);
Expand Down
30 changes: 30 additions & 0 deletions trunk/drivers/dma/fsldma.c
Original file line number Diff line number Diff line change
Expand Up @@ -123,6 +123,11 @@ static dma_addr_t get_ndar(struct fsl_dma_chan *fsl_chan)
return DMA_IN(fsl_chan, &fsl_chan->reg_base->ndar, 64);
}

static u32 get_bcr(struct fsl_dma_chan *fsl_chan)
{
return DMA_IN(fsl_chan, &fsl_chan->reg_base->bcr, 32);
}

static int dma_is_idle(struct fsl_dma_chan *fsl_chan)
{
u32 sr = get_sr(fsl_chan);
Expand Down Expand Up @@ -426,6 +431,9 @@ fsl_dma_prep_interrupt(struct dma_chan *chan)
new->async_tx.cookie = -EBUSY;
new->async_tx.ack = 0;

/* Insert the link descriptor to the LD ring */
list_add_tail(&new->node, &new->async_tx.tx_list);

/* Set End-of-link to the last link descriptor of new list*/
set_ld_eol(fsl_chan, new);

Expand Down Expand Up @@ -701,6 +709,23 @@ static irqreturn_t fsl_dma_chan_do_interrupt(int irq, void *data)
if (stat & FSL_DMA_SR_TE)
dev_err(fsl_chan->dev, "Transfer Error!\n");

/* Programming Error
* The DMA_INTERRUPT async_tx is a NULL transfer, which will
* triger a PE interrupt.
*/
if (stat & FSL_DMA_SR_PE) {
dev_dbg(fsl_chan->dev, "event: Programming Error INT\n");
if (get_bcr(fsl_chan) == 0) {
/* BCR register is 0, this is a DMA_INTERRUPT async_tx.
* Now, update the completed cookie, and continue the
* next uncompleted transfer.
*/
fsl_dma_update_completed_cookie(fsl_chan);
fsl_chan_xfer_ld_queue(fsl_chan);
}
stat &= ~FSL_DMA_SR_PE;
}

/* If the link descriptor segment transfer finishes,
* we will recycle the used descriptor.
*/
Expand Down Expand Up @@ -841,6 +866,11 @@ static int fsl_dma_self_test(struct fsl_dma_chan *fsl_chan)
tx3 = fsl_dma_prep_memcpy(chan, dma_dest, dma_src, test_size / 4, 0);
async_tx_ack(tx3);

/* Interrupt tx test */
tx1 = fsl_dma_prep_interrupt(chan);
async_tx_ack(tx1);
cookie = fsl_dma_tx_submit(tx1);

/* Test exchanging the prepared tx sort */
cookie = fsl_dma_tx_submit(tx3);
cookie = fsl_dma_tx_submit(tx2);
Expand Down
1 change: 1 addition & 0 deletions trunk/drivers/dma/fsldma.h
Original file line number Diff line number Diff line change
Expand Up @@ -40,6 +40,7 @@
#define FSL_DMA_MR_EOTIE 0x00000080

#define FSL_DMA_SR_CH 0x00000020
#define FSL_DMA_SR_PE 0x00000010
#define FSL_DMA_SR_CB 0x00000004
#define FSL_DMA_SR_TE 0x00000080
#define FSL_DMA_SR_EOSI 0x00000002
Expand Down
36 changes: 9 additions & 27 deletions trunk/drivers/ide/ide-taskfile.c
Original file line number Diff line number Diff line change
Expand Up @@ -422,25 +422,6 @@ void task_end_request(ide_drive_t *drive, struct request *rq, u8 stat)
ide_end_request(drive, 1, rq->nr_sectors);
}

/*
* We got an interrupt on a task_in case, but no errors and no DRQ.
*
* It might be a spurious irq (shared irq), but it might be a
* command that had no output.
*/
static ide_startstop_t task_in_unexpected(ide_drive_t *drive, struct request *rq, u8 stat)
{
/* Command all done? */
if (OK_STAT(stat, READY_STAT, BUSY_STAT)) {
task_end_request(drive, rq, stat);
return ide_stopped;
}

/* Assume it was a spurious irq */
ide_set_handler(drive, &task_in_intr, WAIT_WORSTCASE, NULL);
return ide_started;
}

/*
* Handler for command with PIO data-in phase (Read/Read Multiple).
*/
Expand All @@ -450,17 +431,18 @@ static ide_startstop_t task_in_intr(ide_drive_t *drive)
struct request *rq = HWGROUP(drive)->rq;
u8 stat = ide_read_status(drive);

/* Error? */
if (stat & ERR_STAT)
return task_error(drive, rq, __FUNCTION__, stat);

/* Didn't want any data? Odd. */
if (!(stat & DRQ_STAT))
return task_in_unexpected(drive, rq, stat);
/* new way for dealing with premature shared PCI interrupts */
if (!OK_STAT(stat, DRQ_STAT, BAD_R_STAT)) {
if (stat & (ERR_STAT | DRQ_STAT))
return task_error(drive, rq, __FUNCTION__, stat);
/* No data yet, so wait for another IRQ. */
ide_set_handler(drive, &task_in_intr, WAIT_WORSTCASE, NULL);
return ide_started;
}

ide_pio_datablock(drive, rq, 0);

/* Are we done? Check status and finish transfer. */
/* If it was the last datablock check status and finish transfer. */
if (!hwif->nleft) {
stat = wait_drive_not_busy(drive);
if (!OK_STAT(stat, 0, BAD_STAT))
Expand Down
3 changes: 0 additions & 3 deletions trunk/include/linux/sched.h
Original file line number Diff line number Diff line change
Expand Up @@ -929,9 +929,6 @@ struct sched_entity {
u64 vruntime;
u64 prev_sum_exec_runtime;

u64 last_wakeup;
u64 avg_overlap;

#ifdef CONFIG_SCHEDSTATS
u64 wait_start;
u64 wait_max;
Expand Down
1 change: 1 addition & 0 deletions trunk/include/linux/topology.h
Original file line number Diff line number Diff line change
Expand Up @@ -138,6 +138,7 @@
| SD_BALANCE_FORK \
| SD_BALANCE_EXEC \
| SD_WAKE_AFFINE \
| SD_WAKE_IDLE \
| SD_SHARE_PKG_RESOURCES\
| BALANCE_FOR_MC_POWER, \
.last_balance = jiffies, \
Expand Down
11 changes: 1 addition & 10 deletions trunk/kernel/sched.c
Original file line number Diff line number Diff line change
Expand Up @@ -1396,12 +1396,6 @@ task_hot(struct task_struct *p, u64 now, struct sched_domain *sd)
{
s64 delta;

/*
* Buddy candidates are cache hot:
*/
if (&p->se == cfs_rq_of(&p->se)->next)
return 1;

if (p->sched_class != &fair_sched_class)
return 0;

Expand Down Expand Up @@ -1861,11 +1855,10 @@ static int try_to_wake_up(struct task_struct *p, unsigned int state, int sync)
schedstat_inc(p, se.nr_wakeups_remote);
update_rq_clock(rq);
activate_task(rq, p, 1);
check_preempt_curr(rq, p);
success = 1;

out_running:
check_preempt_curr(rq, p);

p->state = TASK_RUNNING;
#ifdef CONFIG_SMP
if (p->sched_class->task_wake_up)
Expand Down Expand Up @@ -1899,8 +1892,6 @@ static void __sched_fork(struct task_struct *p)
p->se.exec_start = 0;
p->se.sum_exec_runtime = 0;
p->se.prev_sum_exec_runtime = 0;
p->se.last_wakeup = 0;
p->se.avg_overlap = 0;

#ifdef CONFIG_SCHEDSTATS
p->se.wait_start = 0;
Expand Down
1 change: 0 additions & 1 deletion trunk/kernel/sched_debug.c
Original file line number Diff line number Diff line change
Expand Up @@ -288,7 +288,6 @@ void proc_sched_show_task(struct task_struct *p, struct seq_file *m)
PN(se.exec_start);
PN(se.vruntime);
PN(se.sum_exec_runtime);
PN(se.avg_overlap);

nr_switches = p->nvcsw + p->nivcsw;

Expand Down
Loading

0 comments on commit 890468c

Please sign in to comment.