Skip to content

Commit

Permalink
[PATCH] i386: inline assembler: cleanup and encapsulate descriptor an…
Browse files Browse the repository at this point in the history
…d task register management

i386 inline assembler cleanup.

This change encapsulates descriptor and task register management.  Also,
it is possible to improve assembler generation in two cases; savesegment
may store the value in a register instead of a memory location, which
allows GCC to optimize stack variables into registers, and MOV MEM, SEG
is always a 16-bit write to memory, making the casting in math-emu
unnecessary.

Signed-off-by: Zachary Amsden <zach@vmware.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
  • Loading branch information
Zachary Amsden authored and Linus Torvalds committed Sep 5, 2005
1 parent 245067d commit 4d37e7e
Show file tree
Hide file tree
Showing 11 changed files with 43 additions and 40 deletions.
4 changes: 2 additions & 2 deletions arch/i386/kernel/cpu/common.c
Original file line number Diff line number Diff line change
Expand Up @@ -613,8 +613,8 @@ void __devinit cpu_init(void)
memcpy(thread->tls_array, &per_cpu(cpu_gdt_table, cpu),
GDT_ENTRY_TLS_ENTRIES * 8);

__asm__ __volatile__("lgdt %0" : : "m" (cpu_gdt_descr[cpu]));
__asm__ __volatile__("lidt %0" : : "m" (idt_descr));
load_gdt(&cpu_gdt_descr[cpu]);
load_idt(&idt_descr);

/*
* Delete NT
Expand Down
2 changes: 1 addition & 1 deletion arch/i386/kernel/doublefault.c
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,7 @@ static void doublefault_fn(void)
struct Xgt_desc_struct gdt_desc = {0, 0};
unsigned long gdt, tss;

__asm__ __volatile__("sgdt %0": "=m" (gdt_desc): :"memory");
store_gdt(&gdt_desc);
gdt = gdt_desc.address;

printk("double fault, gdt at %08lx [%d bytes]\n", gdt, gdt_desc.size);
Expand Down
5 changes: 2 additions & 3 deletions arch/i386/kernel/efi.c
Original file line number Diff line number Diff line change
Expand Up @@ -104,8 +104,7 @@ static void efi_call_phys_prelog(void)
local_flush_tlb();

cpu_gdt_descr[0].address = __pa(cpu_gdt_descr[0].address);
__asm__ __volatile__("lgdt %0":"=m"
(*(struct Xgt_desc_struct *) __pa(&cpu_gdt_descr[0])));
load_gdt((struct Xgt_desc_struct *) __pa(&cpu_gdt_descr[0]));
}

static void efi_call_phys_epilog(void)
Expand All @@ -114,7 +113,7 @@ static void efi_call_phys_epilog(void)

cpu_gdt_descr[0].address =
(unsigned long) __va(cpu_gdt_descr[0].address);
__asm__ __volatile__("lgdt %0":"=m"(cpu_gdt_descr));
load_gdt(&cpu_gdt_descr[0]);
cr4 = read_cr4();

if (cr4 & X86_CR4_PSE) {
Expand Down
9 changes: 5 additions & 4 deletions arch/i386/kernel/reboot.c
Original file line number Diff line number Diff line change
Expand Up @@ -13,6 +13,7 @@
#include <linux/dmi.h>
#include <asm/uaccess.h>
#include <asm/apic.h>
#include <asm/desc.h>
#include "mach_reboot.h"
#include <linux/reboot_fixups.h>

Expand Down Expand Up @@ -242,13 +243,13 @@ void machine_real_restart(unsigned char *code, int length)

/* Set up the IDT for real mode. */

__asm__ __volatile__ ("lidt %0" : : "m" (real_mode_idt));
load_idt(&real_mode_idt);

/* Set up a GDT from which we can load segment descriptors for real
mode. The GDT is not used in real mode; it is just needed here to
prepare the descriptors. */

__asm__ __volatile__ ("lgdt %0" : : "m" (real_mode_gdt));
load_gdt(&real_mode_gdt);

/* Load the data segment registers, and thus the descriptors ready for
real mode. The base address of each segment is 0x100, 16 times the
Expand Down Expand Up @@ -316,7 +317,7 @@ void machine_emergency_restart(void)
if (!reboot_thru_bios) {
if (efi_enabled) {
efi.reset_system(EFI_RESET_COLD, EFI_SUCCESS, 0, NULL);
__asm__ __volatile__("lidt %0": :"m" (no_idt));
load_idt(&no_idt);
__asm__ __volatile__("int3");
}
/* rebooting needs to touch the page at absolute addr 0 */
Expand All @@ -325,7 +326,7 @@ void machine_emergency_restart(void)
mach_reboot_fixups(); /* for board specific fixups */
mach_reboot();
/* That didn't work - force a triple fault.. */
__asm__ __volatile__("lidt %0": :"m" (no_idt));
load_idt(&no_idt);
__asm__ __volatile__("int3");
}
}
Expand Down
4 changes: 2 additions & 2 deletions arch/i386/kernel/signal.c
Original file line number Diff line number Diff line change
Expand Up @@ -278,9 +278,9 @@ setup_sigcontext(struct sigcontext __user *sc, struct _fpstate __user *fpstate,
int tmp, err = 0;

tmp = 0;
__asm__("movl %%gs,%0" : "=r"(tmp): "0"(tmp));
savesegment(gs, tmp);
err |= __put_user(tmp, (unsigned int __user *)&sc->gs);
__asm__("movl %%fs,%0" : "=r"(tmp): "0"(tmp));
savesegment(fs, tmp);
err |= __put_user(tmp, (unsigned int __user *)&sc->fs);

err |= __put_user(regs->xes, (unsigned int __user *)&sc->es);
Expand Down
2 changes: 1 addition & 1 deletion arch/i386/kernel/traps.c
Original file line number Diff line number Diff line change
Expand Up @@ -1008,7 +1008,7 @@ void __init trap_init_f00f_bug(void)
* it uses the read-only mapped virtual address.
*/
idt_descr.address = fix_to_virt(FIX_F00F_IDT);
__asm__ __volatile__("lidt %0" : : "m" (idt_descr));
load_idt(&idt_descr);
}
#endif

Expand Down
4 changes: 2 additions & 2 deletions arch/i386/kernel/vm86.c
Original file line number Diff line number Diff line change
Expand Up @@ -294,8 +294,8 @@ static void do_sys_vm86(struct kernel_vm86_struct *info, struct task_struct *tsk
*/
info->regs32->eax = 0;
tsk->thread.saved_esp0 = tsk->thread.esp0;
asm volatile("mov %%fs,%0":"=m" (tsk->thread.saved_fs));
asm volatile("mov %%gs,%0":"=m" (tsk->thread.saved_gs));
savesegment(fs, tsk->thread.saved_fs);
savesegment(gs, tsk->thread.saved_gs);

tss = &per_cpu(init_tss, get_cpu());
tsk->thread.esp0 = (unsigned long) &info->VM86_TSS_ESP0;
Expand Down
13 changes: 3 additions & 10 deletions arch/i386/math-emu/get_address.c
Original file line number Diff line number Diff line change
Expand Up @@ -155,7 +155,6 @@ static long pm_address(u_char FPU_modrm, u_char segment,
{
struct desc_struct descriptor;
unsigned long base_address, limit, address, seg_top;
unsigned short selector;

segment--;

Expand All @@ -173,17 +172,11 @@ static long pm_address(u_char FPU_modrm, u_char segment,
/* fs and gs aren't used by the kernel, so they still have their
user-space values. */
case PREFIX_FS_-1:
/* The cast is needed here to get gcc 2.8.0 to use a 16 bit register
in the assembler statement. */

__asm__("mov %%fs,%0":"=r" (selector));
addr->selector = selector;
/* N.B. - movl %seg, mem is a 2 byte write regardless of prefix */
savesegment(fs, addr->selector);
break;
case PREFIX_GS_-1:
/* The cast is needed here to get gcc 2.8.0 to use a 16 bit register
in the assembler statement. */
__asm__("mov %%gs,%0":"=r" (selector));
addr->selector = selector;
savesegment(gs, addr->selector);
break;
default:
addr->selector = PM_REG_(segment);
Expand Down
26 changes: 13 additions & 13 deletions arch/i386/power/cpu.c
Original file line number Diff line number Diff line change
Expand Up @@ -42,17 +42,17 @@ void __save_processor_state(struct saved_context *ctxt)
/*
* descriptor tables
*/
asm volatile ("sgdt %0" : "=m" (ctxt->gdt_limit));
asm volatile ("sidt %0" : "=m" (ctxt->idt_limit));
asm volatile ("str %0" : "=m" (ctxt->tr));
store_gdt(&ctxt->gdt_limit);
store_idt(&ctxt->idt_limit);
store_tr(ctxt->tr);

/*
* segment registers
*/
asm volatile ("movw %%es, %0" : "=m" (ctxt->es));
asm volatile ("movw %%fs, %0" : "=m" (ctxt->fs));
asm volatile ("movw %%gs, %0" : "=m" (ctxt->gs));
asm volatile ("movw %%ss, %0" : "=m" (ctxt->ss));
savesegment(es, ctxt->es);
savesegment(fs, ctxt->fs);
savesegment(gs, ctxt->gs);
savesegment(ss, ctxt->ss);

/*
* control registers
Expand Down Expand Up @@ -118,16 +118,16 @@ void __restore_processor_state(struct saved_context *ctxt)
* now restore the descriptor tables to their proper values
* ltr is done i fix_processor_context().
*/
asm volatile ("lgdt %0" :: "m" (ctxt->gdt_limit));
asm volatile ("lidt %0" :: "m" (ctxt->idt_limit));
load_gdt(&ctxt->gdt_limit);
load_idt(&ctxt->idt_limit);

/*
* segment registers
*/
asm volatile ("movw %0, %%es" :: "r" (ctxt->es));
asm volatile ("movw %0, %%fs" :: "r" (ctxt->fs));
asm volatile ("movw %0, %%gs" :: "r" (ctxt->gs));
asm volatile ("movw %0, %%ss" :: "r" (ctxt->ss));
loadsegment(es, ctxt->es);
loadsegment(fs, ctxt->fs);
loadsegment(gs, ctxt->gs);
loadsegment(ss, ctxt->ss);

/*
* sysenter MSRs
Expand Down
10 changes: 10 additions & 0 deletions include/asm-i386/desc.h
Original file line number Diff line number Diff line change
Expand Up @@ -30,6 +30,16 @@ extern struct Xgt_desc_struct idt_descr, cpu_gdt_descr[NR_CPUS];
#define load_TR_desc() __asm__ __volatile__("ltr %%ax"::"a" (GDT_ENTRY_TSS*8))
#define load_LDT_desc() __asm__ __volatile__("lldt %%ax"::"a" (GDT_ENTRY_LDT*8))

#define load_gdt(dtr) __asm__ __volatile("lgdt %0"::"m" (*dtr))
#define load_idt(dtr) __asm__ __volatile("lidt %0"::"m" (*dtr))
#define load_tr(tr) __asm__ __volatile("ltr %0"::"mr" (tr))
#define load_ldt(ldt) __asm__ __volatile("lldt %0"::"mr" (ldt))

#define store_gdt(dtr) __asm__ ("sgdt %0":"=m" (*dtr))
#define store_idt(dtr) __asm__ ("sidt %0":"=m" (*dtr))
#define store_tr(tr) __asm__ ("str %0":"=mr" (tr))
#define store_ldt(ldt) __asm__ ("sldt %0":"=mr" (ldt))

/*
* This is the ldt that every process will get unless we need
* something other than this.
Expand Down
4 changes: 2 additions & 2 deletions include/asm-i386/system.h
Original file line number Diff line number Diff line change
Expand Up @@ -93,13 +93,13 @@ static inline unsigned long _get_base(char * addr)
".align 4\n\t" \
".long 1b,3b\n" \
".previous" \
: :"m" (value))
: :"rm" (value))

/*
* Save a segment register away
*/
#define savesegment(seg, value) \
asm volatile("mov %%" #seg ",%0":"=m" (value))
asm volatile("mov %%" #seg ",%0":"=rm" (value))

/*
* Clear and set 'TS' bit respectively
Expand Down

0 comments on commit 4d37e7e

Please sign in to comment.