diff --git a/[refs] b/[refs] index 3944e8cb3572..2b7d6e309126 100644 --- a/[refs] +++ b/[refs] @@ -1,2 +1,2 @@ --- -refs/heads/master: 6c3c3158a81d6a92d335dd27ad9eb43f6b4c664b +refs/heads/master: 098fb9db2c74cfd6ffdbf61eb026a0c21abc5f75 diff --git a/trunk/drivers/ide/ide-taskfile.c b/trunk/drivers/ide/ide-taskfile.c index 4c86a8d84b4c..0518a2e948cf 100644 --- a/trunk/drivers/ide/ide-taskfile.c +++ b/trunk/drivers/ide/ide-taskfile.c @@ -422,25 +422,6 @@ void task_end_request(ide_drive_t *drive, struct request *rq, u8 stat) ide_end_request(drive, 1, rq->nr_sectors); } -/* - * We got an interrupt on a task_in case, but no errors and no DRQ. - * - * It might be a spurious irq (shared irq), but it might be a - * command that had no output. - */ -static ide_startstop_t task_in_unexpected(ide_drive_t *drive, struct request *rq, u8 stat) -{ - /* Command all done? */ - if (OK_STAT(stat, READY_STAT, BUSY_STAT)) { - task_end_request(drive, rq, stat); - return ide_stopped; - } - - /* Assume it was a spurious irq */ - ide_set_handler(drive, &task_in_intr, WAIT_WORSTCASE, NULL); - return ide_started; -} - /* * Handler for command with PIO data-in phase (Read/Read Multiple). */ @@ -450,17 +431,18 @@ static ide_startstop_t task_in_intr(ide_drive_t *drive) struct request *rq = HWGROUP(drive)->rq; u8 stat = ide_read_status(drive); - /* Error? */ - if (stat & ERR_STAT) - return task_error(drive, rq, __FUNCTION__, stat); - - /* Didn't want any data? Odd. */ - if (!(stat & DRQ_STAT)) - return task_in_unexpected(drive, rq, stat); + /* new way for dealing with premature shared PCI interrupts */ + if (!OK_STAT(stat, DRQ_STAT, BAD_R_STAT)) { + if (stat & (ERR_STAT | DRQ_STAT)) + return task_error(drive, rq, __FUNCTION__, stat); + /* No data yet, so wait for another IRQ. */ + ide_set_handler(drive, &task_in_intr, WAIT_WORSTCASE, NULL); + return ide_started; + } ide_pio_datablock(drive, rq, 0); - /* Are we done? Check status and finish transfer. */ + /* If it was the last datablock check status and finish transfer. */ if (!hwif->nleft) { stat = wait_drive_not_busy(drive); if (!OK_STAT(stat, 0, BAD_STAT)) diff --git a/trunk/kernel/sched_fair.c b/trunk/kernel/sched_fair.c index f2cc59080efa..70679b266693 100644 --- a/trunk/kernel/sched_fair.c +++ b/trunk/kernel/sched_fair.c @@ -980,12 +980,59 @@ static inline int wake_idle(int cpu, struct task_struct *p) #endif #ifdef CONFIG_SMP + +static int +wake_affine(struct rq *rq, struct sched_domain *this_sd, struct task_struct *p, + int cpu, int this_cpu, int sync, int idx, + unsigned long load, unsigned long this_load, + unsigned int imbalance) +{ + unsigned long tl = this_load; + unsigned long tl_per_task; + + if (!(this_sd->flags & SD_WAKE_AFFINE)) + return 0; + + /* + * Attract cache-cold tasks on sync wakeups: + */ + if (sync && !task_hot(p, rq->clock, this_sd)) + return 1; + + schedstat_inc(p, se.nr_wakeups_affine_attempts); + tl_per_task = cpu_avg_load_per_task(this_cpu); + + /* + * If sync wakeup then subtract the (maximum possible) + * effect of the currently running task from the load + * of the current CPU: + */ + if (sync) + tl -= current->se.load.weight; + + if ((tl <= load && tl + target_load(cpu, idx) <= tl_per_task) || + 100*(tl + p->se.load.weight) <= imbalance*load) { + /* + * This domain has SD_WAKE_AFFINE and + * p is cache cold in this domain, and + * there is no bad imbalance. + */ + schedstat_inc(this_sd, ttwu_move_affine); + schedstat_inc(p, se.nr_wakeups_affine); + + return 1; + } + return 0; +} + static int select_task_rq_fair(struct task_struct *p, int sync) { - int cpu, this_cpu; - struct rq *rq; struct sched_domain *sd, *this_sd = NULL; - int new_cpu; + unsigned long load, this_load; + int cpu, this_cpu, new_cpu; + unsigned int imbalance; + struct rq *rq; + int idx; cpu = task_cpu(p); rq = task_rq(p); @@ -1008,66 +1055,35 @@ static int select_task_rq_fair(struct task_struct *p, int sync) /* * Check for affine wakeup and passive balancing possibilities. */ - if (this_sd) { - int idx = this_sd->wake_idx; - unsigned int imbalance; - unsigned long load, this_load; - - imbalance = 100 + (this_sd->imbalance_pct - 100) / 2; - - load = source_load(cpu, idx); - this_load = target_load(this_cpu, idx); - - new_cpu = this_cpu; /* Wake to this CPU if we can */ - - if (this_sd->flags & SD_WAKE_AFFINE) { - unsigned long tl = this_load; - unsigned long tl_per_task; - - /* - * Attract cache-cold tasks on sync wakeups: - */ - if (sync && !task_hot(p, rq->clock, this_sd)) - goto out_set_cpu; - - schedstat_inc(p, se.nr_wakeups_affine_attempts); - tl_per_task = cpu_avg_load_per_task(this_cpu); - - /* - * If sync wakeup then subtract the (maximum possible) - * effect of the currently running task from the load - * of the current CPU: - */ - if (sync) - tl -= current->se.load.weight; - - if ((tl <= load && - tl + target_load(cpu, idx) <= tl_per_task) || - 100*(tl + p->se.load.weight) <= imbalance*load) { - /* - * This domain has SD_WAKE_AFFINE and - * p is cache cold in this domain, and - * there is no bad imbalance. - */ - schedstat_inc(this_sd, ttwu_move_affine); - schedstat_inc(p, se.nr_wakeups_affine); - goto out_set_cpu; - } - } + if (!this_sd) + goto out_keep_cpu; - /* - * Start passive balancing when half the imbalance_pct - * limit is reached. - */ - if (this_sd->flags & SD_WAKE_BALANCE) { - if (imbalance*this_load <= 100*load) { - schedstat_inc(this_sd, ttwu_move_balance); - schedstat_inc(p, se.nr_wakeups_passive); - goto out_set_cpu; - } + idx = this_sd->wake_idx; + + imbalance = 100 + (this_sd->imbalance_pct - 100) / 2; + + load = source_load(cpu, idx); + this_load = target_load(this_cpu, idx); + + new_cpu = this_cpu; /* Wake to this CPU if we can */ + + if (wake_affine(rq, this_sd, p, cpu, this_cpu, sync, idx, + load, this_load, imbalance)) + goto out_set_cpu; + + /* + * Start passive balancing when half the imbalance_pct + * limit is reached. + */ + if (this_sd->flags & SD_WAKE_BALANCE) { + if (imbalance*this_load <= 100*load) { + schedstat_inc(this_sd, ttwu_move_balance); + schedstat_inc(p, se.nr_wakeups_passive); + goto out_set_cpu; } } +out_keep_cpu: new_cpu = cpu; /* Could not wake to this_cpu. Wake to cpu instead */ out_set_cpu: return wake_idle(new_cpu, p); diff --git a/trunk/mm/slub.c b/trunk/mm/slub.c index ca71d5b81e4a..96d63eb3ab17 100644 --- a/trunk/mm/slub.c +++ b/trunk/mm/slub.c @@ -1536,15 +1536,9 @@ static void *__slab_alloc(struct kmem_cache *s, * That is only possible if certain conditions are met that are being * checked when a slab is created. */ - if (!(gfpflags & __GFP_NORETRY) && - (s->flags & __PAGE_ALLOC_FALLBACK)) { - if (gfpflags & __GFP_WAIT) - local_irq_enable(); - object = kmalloc_large(s->objsize, gfpflags); - if (gfpflags & __GFP_WAIT) - local_irq_disable(); - return object; - } + if (!(gfpflags & __GFP_NORETRY) && (s->flags & __PAGE_ALLOC_FALLBACK)) + return kmalloc_large(s->objsize, gfpflags); + return NULL; debug: if (!alloc_debug_processing(s, c->page, object, addr))