Skip to content

Commit

Permalink
[PATCH] i386/x86_64 segment register access update
Browse files Browse the repository at this point in the history
The new i386/x86_64 assemblers no longer accept instructions for moving
between a segment register and a 32bit memory location, i.e.,

        movl (%eax),%ds
        movl %ds,(%eax)

To generate instructions for moving between a segment register and a
16bit memory location without the 16bit operand size prefix, 0x66,

        mov (%eax),%ds
        mov %ds,(%eax)

should be used. It will work with both new and old assemblers. The
assembler starting from 2.16.90.0.1 will also support

        movw (%eax),%ds
        movw %ds,(%eax)

without the 0x66 prefix. I am enclosing patches for 2.4 and 2.6 kernels
here. The resulting kernel binaries should be unchanged as before, with
old and new assemblers, if gcc never generates memory access for

               unsigned gsindex;
               asm volatile("movl %%gs,%0" : "=g" (gsindex));

If gcc does generate memory access for the code above, the upper bits
in gsindex are undefined and the new assembler doesn't allow it.

Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
  • Loading branch information
H. J. Lu authored and Linus Torvalds committed May 1, 2005
1 parent d5b63d7 commit fd51f66
Show file tree
Hide file tree
Showing 4 changed files with 13 additions and 13 deletions.
4 changes: 2 additions & 2 deletions arch/i386/kernel/process.c
Original file line number Diff line number Diff line change
Expand Up @@ -611,8 +611,8 @@ struct task_struct fastcall * __switch_to(struct task_struct *prev_p, struct tas
* Save away %fs and %gs. No need to save %es and %ds, as
* those are always kernel segments while inside the kernel.
*/
asm volatile("movl %%fs,%0":"=m" (*(int *)&prev->fs));
asm volatile("movl %%gs,%0":"=m" (*(int *)&prev->gs));
asm volatile("mov %%fs,%0":"=m" (prev->fs));
asm volatile("mov %%gs,%0":"=m" (prev->gs));

/*
* Restore %fs and %gs if needed.
Expand Down
4 changes: 2 additions & 2 deletions arch/i386/kernel/vm86.c
Original file line number Diff line number Diff line change
Expand Up @@ -294,8 +294,8 @@ static void do_sys_vm86(struct kernel_vm86_struct *info, struct task_struct *tsk
*/
info->regs32->eax = 0;
tsk->thread.saved_esp0 = tsk->thread.esp0;
asm volatile("movl %%fs,%0":"=m" (tsk->thread.saved_fs));
asm volatile("movl %%gs,%0":"=m" (tsk->thread.saved_gs));
asm volatile("mov %%fs,%0":"=m" (tsk->thread.saved_fs));
asm volatile("mov %%gs,%0":"=m" (tsk->thread.saved_gs));

tss = &per_cpu(init_tss, get_cpu());
tsk->thread.esp0 = (unsigned long) &info->VM86_TSS_ESP0;
Expand Down
12 changes: 6 additions & 6 deletions arch/x86_64/kernel/process.c
Original file line number Diff line number Diff line change
Expand Up @@ -402,10 +402,10 @@ int copy_thread(int nr, unsigned long clone_flags, unsigned long rsp,
p->thread.fs = me->thread.fs;
p->thread.gs = me->thread.gs;

asm("movl %%gs,%0" : "=m" (p->thread.gsindex));
asm("movl %%fs,%0" : "=m" (p->thread.fsindex));
asm("movl %%es,%0" : "=m" (p->thread.es));
asm("movl %%ds,%0" : "=m" (p->thread.ds));
asm("mov %%gs,%0" : "=m" (p->thread.gsindex));
asm("mov %%fs,%0" : "=m" (p->thread.fsindex));
asm("mov %%es,%0" : "=m" (p->thread.es));
asm("mov %%ds,%0" : "=m" (p->thread.ds));

if (unlikely(me->thread.io_bitmap_ptr != NULL)) {
p->thread.io_bitmap_ptr = kmalloc(IO_BITMAP_BYTES, GFP_KERNEL);
Expand Down Expand Up @@ -468,11 +468,11 @@ struct task_struct *__switch_to(struct task_struct *prev_p, struct task_struct *
* Switch DS and ES.
* This won't pick up thread selector changes, but I guess that is ok.
*/
asm volatile("movl %%es,%0" : "=m" (prev->es));
asm volatile("mov %%es,%0" : "=m" (prev->es));
if (unlikely(next->es | prev->es))
loadsegment(es, next->es);

asm volatile ("movl %%ds,%0" : "=m" (prev->ds));
asm volatile ("mov %%ds,%0" : "=m" (prev->ds));
if (unlikely(next->ds | prev->ds))
loadsegment(ds, next->ds);

Expand Down
6 changes: 3 additions & 3 deletions include/asm-i386/system.h
Original file line number Diff line number Diff line change
Expand Up @@ -81,7 +81,7 @@ static inline unsigned long _get_base(char * addr)
#define loadsegment(seg,value) \
asm volatile("\n" \
"1:\t" \
"movl %0,%%" #seg "\n" \
"mov %0,%%" #seg "\n" \
"2:\n" \
".section .fixup,\"ax\"\n" \
"3:\t" \
Expand All @@ -93,13 +93,13 @@ static inline unsigned long _get_base(char * addr)
".align 4\n\t" \
".long 1b,3b\n" \
".previous" \
: :"m" (*(unsigned int *)&(value)))
: :"m" (value))

/*
* Save a segment register away
*/
#define savesegment(seg, value) \
asm volatile("movl %%" #seg ",%0":"=m" (*(int *)&(value)))
asm volatile("mov %%" #seg ",%0":"=m" (value))

/*
* Clear and set 'TS' bit respectively
Expand Down

0 comments on commit fd51f66

Please sign in to comment.