Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 87474
b: refs/heads/master
c: 098fb9d
h: refs/heads/master
v: v3
  • Loading branch information
Ingo Molnar committed Mar 19, 2008
1 parent 1a14e3a commit a3ca40c
Show file tree
Hide file tree
Showing 4 changed files with 88 additions and 96 deletions.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: 6c3c3158a81d6a92d335dd27ad9eb43f6b4c664b
refs/heads/master: 098fb9db2c74cfd6ffdbf61eb026a0c21abc5f75
36 changes: 9 additions & 27 deletions trunk/drivers/ide/ide-taskfile.c
Original file line number Diff line number Diff line change
Expand Up @@ -422,25 +422,6 @@ void task_end_request(ide_drive_t *drive, struct request *rq, u8 stat)
ide_end_request(drive, 1, rq->nr_sectors);
}

/*
* We got an interrupt on a task_in case, but no errors and no DRQ.
*
* It might be a spurious irq (shared irq), but it might be a
* command that had no output.
*/
static ide_startstop_t task_in_unexpected(ide_drive_t *drive, struct request *rq, u8 stat)
{
/* Command all done? */
if (OK_STAT(stat, READY_STAT, BUSY_STAT)) {
task_end_request(drive, rq, stat);
return ide_stopped;
}

/* Assume it was a spurious irq */
ide_set_handler(drive, &task_in_intr, WAIT_WORSTCASE, NULL);
return ide_started;
}

/*
* Handler for command with PIO data-in phase (Read/Read Multiple).
*/
Expand All @@ -450,17 +431,18 @@ static ide_startstop_t task_in_intr(ide_drive_t *drive)
struct request *rq = HWGROUP(drive)->rq;
u8 stat = ide_read_status(drive);

/* Error? */
if (stat & ERR_STAT)
return task_error(drive, rq, __FUNCTION__, stat);

/* Didn't want any data? Odd. */
if (!(stat & DRQ_STAT))
return task_in_unexpected(drive, rq, stat);
/* new way for dealing with premature shared PCI interrupts */
if (!OK_STAT(stat, DRQ_STAT, BAD_R_STAT)) {
if (stat & (ERR_STAT | DRQ_STAT))
return task_error(drive, rq, __FUNCTION__, stat);
/* No data yet, so wait for another IRQ. */
ide_set_handler(drive, &task_in_intr, WAIT_WORSTCASE, NULL);
return ide_started;
}

ide_pio_datablock(drive, rq, 0);

/* Are we done? Check status and finish transfer. */
/* If it was the last datablock check status and finish transfer. */
if (!hwif->nleft) {
stat = wait_drive_not_busy(drive);
if (!OK_STAT(stat, 0, BAD_STAT))
Expand Down
134 changes: 75 additions & 59 deletions trunk/kernel/sched_fair.c
Original file line number Diff line number Diff line change
Expand Up @@ -980,12 +980,59 @@ static inline int wake_idle(int cpu, struct task_struct *p)
#endif

#ifdef CONFIG_SMP

static int
wake_affine(struct rq *rq, struct sched_domain *this_sd, struct task_struct *p,
int cpu, int this_cpu, int sync, int idx,
unsigned long load, unsigned long this_load,
unsigned int imbalance)
{
unsigned long tl = this_load;
unsigned long tl_per_task;

if (!(this_sd->flags & SD_WAKE_AFFINE))
return 0;

/*
* Attract cache-cold tasks on sync wakeups:
*/
if (sync && !task_hot(p, rq->clock, this_sd))
return 1;

schedstat_inc(p, se.nr_wakeups_affine_attempts);
tl_per_task = cpu_avg_load_per_task(this_cpu);

/*
* If sync wakeup then subtract the (maximum possible)
* effect of the currently running task from the load
* of the current CPU:
*/
if (sync)
tl -= current->se.load.weight;

if ((tl <= load && tl + target_load(cpu, idx) <= tl_per_task) ||
100*(tl + p->se.load.weight) <= imbalance*load) {
/*
* This domain has SD_WAKE_AFFINE and
* p is cache cold in this domain, and
* there is no bad imbalance.
*/
schedstat_inc(this_sd, ttwu_move_affine);
schedstat_inc(p, se.nr_wakeups_affine);

return 1;
}
return 0;
}

static int select_task_rq_fair(struct task_struct *p, int sync)
{
int cpu, this_cpu;
struct rq *rq;
struct sched_domain *sd, *this_sd = NULL;
int new_cpu;
unsigned long load, this_load;
int cpu, this_cpu, new_cpu;
unsigned int imbalance;
struct rq *rq;
int idx;

cpu = task_cpu(p);
rq = task_rq(p);
Expand All @@ -1008,66 +1055,35 @@ static int select_task_rq_fair(struct task_struct *p, int sync)
/*
* Check for affine wakeup and passive balancing possibilities.
*/
if (this_sd) {
int idx = this_sd->wake_idx;
unsigned int imbalance;
unsigned long load, this_load;

imbalance = 100 + (this_sd->imbalance_pct - 100) / 2;

load = source_load(cpu, idx);
this_load = target_load(this_cpu, idx);

new_cpu = this_cpu; /* Wake to this CPU if we can */

if (this_sd->flags & SD_WAKE_AFFINE) {
unsigned long tl = this_load;
unsigned long tl_per_task;

/*
* Attract cache-cold tasks on sync wakeups:
*/
if (sync && !task_hot(p, rq->clock, this_sd))
goto out_set_cpu;

schedstat_inc(p, se.nr_wakeups_affine_attempts);
tl_per_task = cpu_avg_load_per_task(this_cpu);

/*
* If sync wakeup then subtract the (maximum possible)
* effect of the currently running task from the load
* of the current CPU:
*/
if (sync)
tl -= current->se.load.weight;

if ((tl <= load &&
tl + target_load(cpu, idx) <= tl_per_task) ||
100*(tl + p->se.load.weight) <= imbalance*load) {
/*
* This domain has SD_WAKE_AFFINE and
* p is cache cold in this domain, and
* there is no bad imbalance.
*/
schedstat_inc(this_sd, ttwu_move_affine);
schedstat_inc(p, se.nr_wakeups_affine);
goto out_set_cpu;
}
}
if (!this_sd)
goto out_keep_cpu;

/*
* Start passive balancing when half the imbalance_pct
* limit is reached.
*/
if (this_sd->flags & SD_WAKE_BALANCE) {
if (imbalance*this_load <= 100*load) {
schedstat_inc(this_sd, ttwu_move_balance);
schedstat_inc(p, se.nr_wakeups_passive);
goto out_set_cpu;
}
idx = this_sd->wake_idx;

imbalance = 100 + (this_sd->imbalance_pct - 100) / 2;

load = source_load(cpu, idx);
this_load = target_load(this_cpu, idx);

new_cpu = this_cpu; /* Wake to this CPU if we can */

if (wake_affine(rq, this_sd, p, cpu, this_cpu, sync, idx,
load, this_load, imbalance))
goto out_set_cpu;

/*
* Start passive balancing when half the imbalance_pct
* limit is reached.
*/
if (this_sd->flags & SD_WAKE_BALANCE) {
if (imbalance*this_load <= 100*load) {
schedstat_inc(this_sd, ttwu_move_balance);
schedstat_inc(p, se.nr_wakeups_passive);
goto out_set_cpu;
}
}

out_keep_cpu:
new_cpu = cpu; /* Could not wake to this_cpu. Wake to cpu instead */
out_set_cpu:
return wake_idle(new_cpu, p);
Expand Down
12 changes: 3 additions & 9 deletions trunk/mm/slub.c
Original file line number Diff line number Diff line change
Expand Up @@ -1536,15 +1536,9 @@ static void *__slab_alloc(struct kmem_cache *s,
* That is only possible if certain conditions are met that are being
* checked when a slab is created.
*/
if (!(gfpflags & __GFP_NORETRY) &&
(s->flags & __PAGE_ALLOC_FALLBACK)) {
if (gfpflags & __GFP_WAIT)
local_irq_enable();
object = kmalloc_large(s->objsize, gfpflags);
if (gfpflags & __GFP_WAIT)
local_irq_disable();
return object;
}
if (!(gfpflags & __GFP_NORETRY) && (s->flags & __PAGE_ALLOC_FALLBACK))
return kmalloc_large(s->objsize, gfpflags);

return NULL;
debug:
if (!alloc_debug_processing(s, c->page, object, addr))
Expand Down

0 comments on commit a3ca40c

Please sign in to comment.