Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 64419
b: refs/heads/master
c: 0b887d0
h: refs/heads/master
i:
  64417: 5036a2c
  64415: d31f1c7
v: v3
  • Loading branch information
Linus Torvalds committed Aug 25, 2007
1 parent 0733e48 commit 0dc5050
Show file tree
Hide file tree
Showing 10 changed files with 63 additions and 59 deletions.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: 136c4bbfe69336cd1d0b076cfc0ef6b92d576a19
refs/heads/master: 0b887d037bf4b76eec1c960e5feecd6a5a806971
24 changes: 12 additions & 12 deletions trunk/arch/i386/boot/boot.h
Original file line number Diff line number Diff line change
Expand Up @@ -87,7 +87,7 @@ static inline void set_fs(u16 seg)
static inline u16 fs(void)
{
u16 seg;
asm("movw %%fs,%0" : "=rm" (seg));
asm volatile("movw %%fs,%0" : "=rm" (seg));
return seg;
}

Expand All @@ -98,7 +98,7 @@ static inline void set_gs(u16 seg)
static inline u16 gs(void)
{
u16 seg;
asm("movw %%gs,%0" : "=rm" (seg));
asm volatile("movw %%gs,%0" : "=rm" (seg));
return seg;
}

Expand All @@ -107,19 +107,19 @@ typedef unsigned int addr_t;
static inline u8 rdfs8(addr_t addr)
{
u8 v;
asm("movb %%fs:%1,%0" : "=r" (v) : "m" (*(u8 *)addr));
asm volatile("movb %%fs:%1,%0" : "=r" (v) : "m" (*(u8 *)addr));
return v;
}
static inline u16 rdfs16(addr_t addr)
{
u16 v;
asm("movw %%fs:%1,%0" : "=r" (v) : "m" (*(u16 *)addr));
asm volatile("movw %%fs:%1,%0" : "=r" (v) : "m" (*(u16 *)addr));
return v;
}
static inline u32 rdfs32(addr_t addr)
{
u32 v;
asm("movl %%fs:%1,%0" : "=r" (v) : "m" (*(u32 *)addr));
asm volatile("movl %%fs:%1,%0" : "=r" (v) : "m" (*(u32 *)addr));
return v;
}

Expand All @@ -139,19 +139,19 @@ static inline void wrfs32(u32 v, addr_t addr)
static inline u8 rdgs8(addr_t addr)
{
u8 v;
asm("movb %%gs:%1,%0" : "=r" (v) : "m" (*(u8 *)addr));
asm volatile("movb %%gs:%1,%0" : "=r" (v) : "m" (*(u8 *)addr));
return v;
}
static inline u16 rdgs16(addr_t addr)
{
u16 v;
asm("movw %%gs:%1,%0" : "=r" (v) : "m" (*(u16 *)addr));
asm volatile("movw %%gs:%1,%0" : "=r" (v) : "m" (*(u16 *)addr));
return v;
}
static inline u32 rdgs32(addr_t addr)
{
u32 v;
asm("movl %%gs:%1,%0" : "=r" (v) : "m" (*(u32 *)addr));
asm volatile("movl %%gs:%1,%0" : "=r" (v) : "m" (*(u32 *)addr));
return v;
}

Expand Down Expand Up @@ -180,15 +180,15 @@ static inline int memcmp(const void *s1, const void *s2, size_t len)
static inline int memcmp_fs(const void *s1, addr_t s2, size_t len)
{
u8 diff;
asm("fs; repe; cmpsb; setnz %0"
: "=qm" (diff), "+D" (s1), "+S" (s2), "+c" (len));
asm volatile("fs; repe; cmpsb; setnz %0"
: "=qm" (diff), "+D" (s1), "+S" (s2), "+c" (len));
return diff;
}
static inline int memcmp_gs(const void *s1, addr_t s2, size_t len)
{
u8 diff;
asm("gs; repe; cmpsb; setnz %0"
: "=qm" (diff), "+D" (s1), "+S" (s2), "+c" (len));
asm volatile("gs; repe; cmpsb; setnz %0"
: "=qm" (diff), "+D" (s1), "+S" (s2), "+c" (len));
return diff;
}

Expand Down
3 changes: 2 additions & 1 deletion trunk/arch/i386/boot/cpucheck.c
Original file line number Diff line number Diff line change
Expand Up @@ -96,7 +96,8 @@ static int has_fpu(void)
asm volatile("movl %0,%%cr0" : : "r" (cr0));
}

asm("fninit ; fnstsw %0 ; fnstcw %1" : "+m" (fsw), "+m" (fcw));
asm volatile("fninit ; fnstsw %0 ; fnstcw %1"
: "+m" (fsw), "+m" (fcw));

return fsw == 0 && (fcw & 0x103f) == 0x003f;
}
Expand Down
6 changes: 3 additions & 3 deletions trunk/arch/i386/boot/edd.c
Original file line number Diff line number Diff line change
Expand Up @@ -30,9 +30,9 @@ static int read_mbr(u8 devno, void *buf)
cx = 0x0001; /* Sector 0-0-1 */
dx = devno;
bx = (size_t)buf;
asm("pushfl; stc; int $0x13; setc %%al; popfl"
: "+a" (ax), "+c" (cx), "+d" (dx), "+b" (bx)
: : "esi", "edi", "memory");
asm volatile("pushfl; stc; int $0x13; setc %%al; popfl"
: "+a" (ax), "+c" (cx), "+d" (dx), "+b" (bx)
: : "esi", "edi", "memory");

return -(u8)ax; /* 0 or -1 */
}
Expand Down
14 changes: 7 additions & 7 deletions trunk/arch/i386/boot/tty.c
Original file line number Diff line number Diff line change
Expand Up @@ -54,9 +54,9 @@ static u8 gettime(void)
u16 ax = 0x0200;
u16 cx, dx;

asm("int $0x1a"
: "+a" (ax), "=c" (cx), "=d" (dx)
: : "ebx", "esi", "edi");
asm volatile("int $0x1a"
: "+a" (ax), "=c" (cx), "=d" (dx)
: : "ebx", "esi", "edi");

return dx >> 8;
}
Expand All @@ -67,17 +67,17 @@ static u8 gettime(void)
int getchar(void)
{
u16 ax = 0;
asm("int $0x16" : "+a" (ax));
asm volatile("int $0x16" : "+a" (ax));

return ax & 0xff;
}

static int kbd_pending(void)
{
u8 pending;
asm("int $0x16; setnz %0"
: "=rm" (pending)
: "a" (0x0100));
asm volatile("int $0x16; setnz %0"
: "=rm" (pending)
: "a" (0x0100));
return pending;
}

Expand Down
17 changes: 9 additions & 8 deletions trunk/arch/i386/boot/video-vga.c
Original file line number Diff line number Diff line change
Expand Up @@ -47,16 +47,16 @@ static u8 vga_set_basic_mode(void)

#ifdef CONFIG_VIDEO_400_HACK
if (adapter >= ADAPTER_VGA) {
asm(INT10
: : "a" (0x1202), "b" (0x0030)
: "ecx", "edx", "esi", "edi");
asm volatile(INT10
: : "a" (0x1202), "b" (0x0030)
: "ecx", "edx", "esi", "edi");
}
#endif

ax = 0x0f00;
asm(INT10
: "+a" (ax)
: : "ebx", "ecx", "edx", "esi", "edi");
asm volatile(INT10
: "+a" (ax)
: : "ebx", "ecx", "edx", "esi", "edi");

mode = (u8)ax;

Expand All @@ -73,9 +73,10 @@ static u8 vga_set_basic_mode(void)
mode = 3;

/* Set the mode */
ax = mode;
asm volatile(INT10
: : "a" (mode)
: "ebx", "ecx", "edx", "esi", "edi");
: "+a" (ax)
: : "ebx", "ecx", "edx", "esi", "edi");
do_restore = 1;
return mode;
}
Expand Down
2 changes: 1 addition & 1 deletion trunk/drivers/md/Kconfig
Original file line number Diff line number Diff line change
Expand Up @@ -263,7 +263,7 @@ config DM_MULTIPATH_EMC

config DM_MULTIPATH_RDAC
tristate "LSI/Engenio RDAC multipath support (EXPERIMENTAL)"
depends on DM_MULTIPATH && BLK_DEV_DM && EXPERIMENTAL
depends on DM_MULTIPATH && BLK_DEV_DM && SCSI && EXPERIMENTAL
---help---
Multipath support for LSI/Engenio RDAC.

Expand Down
8 changes: 1 addition & 7 deletions trunk/kernel/sched.c
Original file line number Diff line number Diff line change
Expand Up @@ -2180,12 +2180,6 @@ int can_migrate_task(struct task_struct *p, struct rq *rq, int this_cpu,
if (task_running(rq, p))
return 0;

/*
* Aggressive migration if too many balance attempts have failed:
*/
if (sd->nr_balance_failed > sd->cache_nice_tries)
return 1;

return 1;
}

Expand Down Expand Up @@ -4923,7 +4917,7 @@ static inline void sched_init_granularity(void)
if (sysctl_sched_granularity > gran_limit)
sysctl_sched_granularity = gran_limit;

sysctl_sched_runtime_limit = sysctl_sched_granularity * 8;
sysctl_sched_runtime_limit = sysctl_sched_granularity * 5;
sysctl_sched_wakeup_granularity = sysctl_sched_granularity / 2;
}

Expand Down
35 changes: 19 additions & 16 deletions trunk/kernel/sched_fair.c
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,7 @@

/*
* Preemption granularity:
* (default: 2 msec, units: nanoseconds)
* (default: 10 msec, units: nanoseconds)
*
* NOTE: this granularity value is not the same as the concept of
* 'timeslice length' - timeslices in CFS will typically be somewhat
Expand All @@ -31,18 +31,17 @@
* number of CPUs. (i.e. factor 2x on 2-way systems, 3x on 4-way
* systems, 4x on 8-way systems, 5x on 16-way systems, etc.)
*/
unsigned int sysctl_sched_granularity __read_mostly = 2000000000ULL/HZ;
unsigned int sysctl_sched_granularity __read_mostly = 10000000UL;

/*
* SCHED_BATCH wake-up granularity.
* (default: 10 msec, units: nanoseconds)
* (default: 25 msec, units: nanoseconds)
*
* This option delays the preemption effects of decoupled workloads
* and reduces their over-scheduling. Synchronous workloads will still
* have immediate wakeup/sleep latencies.
*/
unsigned int sysctl_sched_batch_wakeup_granularity __read_mostly =
10000000000ULL/HZ;
unsigned int sysctl_sched_batch_wakeup_granularity __read_mostly = 25000000UL;

/*
* SCHED_OTHER wake-up granularity.
Expand All @@ -52,12 +51,12 @@ unsigned int sysctl_sched_batch_wakeup_granularity __read_mostly =
* and reduces their over-scheduling. Synchronous workloads will still
* have immediate wakeup/sleep latencies.
*/
unsigned int sysctl_sched_wakeup_granularity __read_mostly = 1000000000ULL/HZ;
unsigned int sysctl_sched_wakeup_granularity __read_mostly = 1000000UL;

unsigned int sysctl_sched_stat_granularity __read_mostly;

/*
* Initialized in sched_init_granularity():
* Initialized in sched_init_granularity() [to 5 times the base granularity]:
*/
unsigned int sysctl_sched_runtime_limit __read_mostly;

Expand Down Expand Up @@ -304,9 +303,9 @@ __update_curr(struct cfs_rq *cfs_rq, struct sched_entity *curr)
delta_mine = calc_delta_mine(delta_exec, curr->load.weight, lw);

if (cfs_rq->sleeper_bonus > sysctl_sched_granularity) {
delta = min(cfs_rq->sleeper_bonus, (u64)delta_exec);
delta = calc_delta_mine(delta, curr->load.weight, lw);
delta = min((u64)delta, cfs_rq->sleeper_bonus);
delta = min((u64)delta_mine, cfs_rq->sleeper_bonus);
delta = min(delta, (unsigned long)(
(long)sysctl_sched_runtime_limit - curr->wait_runtime));
cfs_rq->sleeper_bonus -= delta;
delta_mine -= delta;
}
Expand Down Expand Up @@ -494,6 +493,13 @@ static void __enqueue_sleeper(struct cfs_rq *cfs_rq, struct sched_entity *se)
unsigned long load = cfs_rq->load.weight, delta_fair;
long prev_runtime;

/*
* Do not boost sleepers if there's too much bonus 'in flight'
* already:
*/
if (unlikely(cfs_rq->sleeper_bonus > sysctl_sched_runtime_limit))
return;

if (sysctl_sched_features & SCHED_FEAT_SLEEPER_LOAD_AVG)
load = rq_of(cfs_rq)->cpu_load[2];

Expand All @@ -513,16 +519,13 @@ static void __enqueue_sleeper(struct cfs_rq *cfs_rq, struct sched_entity *se)

prev_runtime = se->wait_runtime;
__add_wait_runtime(cfs_rq, se, delta_fair);
schedstat_add(cfs_rq, wait_runtime, se->wait_runtime);
delta_fair = se->wait_runtime - prev_runtime;

/*
* Track the amount of bonus we've given to sleepers:
*/
cfs_rq->sleeper_bonus += delta_fair;
if (unlikely(cfs_rq->sleeper_bonus > sysctl_sched_runtime_limit))
cfs_rq->sleeper_bonus = sysctl_sched_runtime_limit;

schedstat_add(cfs_rq, wait_runtime, se->wait_runtime);
}

static void enqueue_sleeper(struct cfs_rq *cfs_rq, struct sched_entity *se)
Expand Down Expand Up @@ -1044,7 +1047,7 @@ static void task_new_fair(struct rq *rq, struct task_struct *p)
* -granularity/2, so initialize the task with that:
*/
if (sysctl_sched_features & SCHED_FEAT_START_DEBIT)
p->se.wait_runtime = -(sysctl_sched_granularity / 2);
p->se.wait_runtime = -((long)sysctl_sched_granularity / 2);

__enqueue_entity(cfs_rq, se);
}
Expand All @@ -1057,7 +1060,7 @@ static void task_new_fair(struct rq *rq, struct task_struct *p)
*/
static void set_curr_task_fair(struct rq *rq)
{
struct sched_entity *se = &rq->curr.se;
struct sched_entity *se = &rq->curr->se;

for_each_sched_entity(se)
set_next_entity(cfs_rq_of(se), se);
Expand Down
11 changes: 8 additions & 3 deletions trunk/kernel/sched_rt.c
Original file line number Diff line number Diff line change
Expand Up @@ -207,10 +207,15 @@ static void task_tick_rt(struct rq *rq, struct task_struct *p)
return;

p->time_slice = static_prio_timeslice(p->static_prio);
set_tsk_need_resched(p);

/* put it at the end of the queue: */
requeue_task_rt(rq, p);
/*
* Requeue to the end of queue if we are not the only element
* on the queue:
*/
if (p->run_list.prev != p->run_list.next) {
requeue_task_rt(rq, p);
set_tsk_need_resched(p);
}
}

static struct sched_class rt_sched_class __read_mostly = {
Expand Down

0 comments on commit 0dc5050

Please sign in to comment.