Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 6976
b: refs/heads/master
c: 4bb0d3e
h: refs/heads/master
v: v3
  • Loading branch information
Zachary Amsden authored and Linus Torvalds committed Sep 5, 2005
1 parent db9f281 commit 7d31140
Show file tree
Hide file tree
Showing 16 changed files with 85 additions and 84 deletions.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: 2a0694d15d55d0deed928786a6393d5e45e37d76
refs/heads/master: 4bb0d3ec3e5b1e9e2399cdc641b3b6521ac9cdaa
12 changes: 6 additions & 6 deletions trunk/arch/i386/kernel/cpu/common.c
Original file line number Diff line number Diff line change
Expand Up @@ -642,12 +642,12 @@ void __devinit cpu_init(void)
asm volatile ("xorl %eax, %eax; movl %eax, %fs; movl %eax, %gs");

/* Clear all 6 debug registers: */

#define CD(register) set_debugreg(0, register)

CD(0); CD(1); CD(2); CD(3); /* no db4 and db5 */; CD(6); CD(7);

#undef CD
set_debugreg(0, 0);
set_debugreg(0, 1);
set_debugreg(0, 2);
set_debugreg(0, 3);
set_debugreg(0, 6);
set_debugreg(0, 7);

/*
* Force FPU initialization:
Expand Down
12 changes: 3 additions & 9 deletions trunk/arch/i386/kernel/cpu/cpufreq/longhaul.c
Original file line number Diff line number Diff line change
Expand Up @@ -64,8 +64,6 @@ static int dont_scale_voltage;
#define dprintk(msg...) cpufreq_debug_printk(CPUFREQ_DEBUG_DRIVER, "longhaul", msg)


#define __hlt() __asm__ __volatile__("hlt": : :"memory")

/* Clock ratios multiplied by 10 */
static int clock_ratio[32];
static int eblcr_table[32];
Expand Down Expand Up @@ -168,11 +166,9 @@ static void do_powersaver(union msr_longhaul *longhaul,
outb(0xFE,0x21); /* TMR0 only */
outb(0xFF,0x80); /* delay */

local_irq_enable();

__hlt();
safe_halt();
wrmsrl(MSR_VIA_LONGHAUL, longhaul->val);
__hlt();
halt();

local_irq_disable();

Expand Down Expand Up @@ -251,9 +247,7 @@ static void longhaul_setstate(unsigned int clock_ratio_index)
bcr2.bits.CLOCKMUL = clock_ratio_index;
local_irq_disable();
wrmsrl (MSR_VIA_BCR2, bcr2.val);
local_irq_enable();

__hlt();
safe_halt();

/* Disable software clock multiplier */
rdmsrl (MSR_VIA_BCR2, bcr2.val);
Expand Down
6 changes: 1 addition & 5 deletions trunk/arch/i386/kernel/cpu/cyrix.c
Original file line number Diff line number Diff line change
Expand Up @@ -132,11 +132,7 @@ static void __init set_cx86_memwb(void)
setCx86(CX86_CCR2, getCx86(CX86_CCR2) & ~0x04);
/* set 'Not Write-through' */
cr0 = 0x20000000;
__asm__("movl %%cr0,%%eax\n\t"
"orl %0,%%eax\n\t"
"movl %%eax,%%cr0\n"
: : "r" (cr0)
:"ax");
write_cr0(read_cr0() | cr0);
/* CCR2 bit 2: lock NW bit and set WT1 */
setCx86(CX86_CCR2, getCx86(CX86_CCR2) | 0x14 );
}
Expand Down
4 changes: 2 additions & 2 deletions trunk/arch/i386/kernel/efi.c
Original file line number Diff line number Diff line change
Expand Up @@ -79,7 +79,7 @@ static void efi_call_phys_prelog(void)
* directory. If I have PSE, I just need to duplicate one entry in
* page directory.
*/
__asm__ __volatile__("movl %%cr4, %0":"=r"(cr4));
cr4 = read_cr4();

if (cr4 & X86_CR4_PSE) {
efi_bak_pg_dir_pointer[0].pgd =
Expand Down Expand Up @@ -115,7 +115,7 @@ static void efi_call_phys_epilog(void)
cpu_gdt_descr[0].address =
(unsigned long) __va(cpu_gdt_descr[0].address);
__asm__ __volatile__("lgdt %0":"=m"(cpu_gdt_descr));
__asm__ __volatile__("movl %%cr4, %0":"=r"(cr4));
cr4 = read_cr4();

if (cr4 & X86_CR4_PSE) {
swapper_pg_dir[pgd_index(0)].pgd =
Expand Down
8 changes: 1 addition & 7 deletions trunk/arch/i386/kernel/machine_kexec.c
Original file line number Diff line number Diff line change
Expand Up @@ -17,13 +17,7 @@
#include <asm/apic.h>
#include <asm/cpufeature.h>
#include <asm/desc.h>

static inline unsigned long read_cr3(void)
{
unsigned long cr3;
asm volatile("movl %%cr3,%0": "=r"(cr3));
return cr3;
}
#include <asm/system.h>

#define PAGE_ALIGNED __attribute__ ((__aligned__(PAGE_SIZE)))

Expand Down
16 changes: 6 additions & 10 deletions trunk/arch/i386/kernel/process.c
Original file line number Diff line number Diff line change
Expand Up @@ -313,16 +313,12 @@ void show_regs(struct pt_regs * regs)
printk(" DS: %04x ES: %04x\n",
0xffff & regs->xds,0xffff & regs->xes);

__asm__("movl %%cr0, %0": "=r" (cr0));
__asm__("movl %%cr2, %0": "=r" (cr2));
__asm__("movl %%cr3, %0": "=r" (cr3));
/* This could fault if %cr4 does not exist */
__asm__("1: movl %%cr4, %0 \n"
"2: \n"
".section __ex_table,\"a\" \n"
".long 1b,2b \n"
".previous \n"
: "=r" (cr4): "0" (0));
cr0 = read_cr0();
cr2 = read_cr2();
cr3 = read_cr3();
if (current_cpu_data.x86 > 4) {
cr4 = read_cr4();
}
printk("CR0: %08lx CR2: %08lx CR3: %08lx CR4: %08lx\n", cr0, cr2, cr3, cr4);
show_trace(NULL, &regs->esp);
}
Expand Down
2 changes: 1 addition & 1 deletion trunk/arch/i386/kernel/smp.c
Original file line number Diff line number Diff line change
Expand Up @@ -576,7 +576,7 @@ static void stop_this_cpu (void * dummy)
local_irq_disable();
disable_local_APIC();
if (cpu_data[smp_processor_id()].hlt_works_ok)
for(;;) __asm__("hlt");
for(;;) halt();
for (;;);
}

Expand Down
6 changes: 3 additions & 3 deletions trunk/arch/i386/mm/fault.c
Original file line number Diff line number Diff line change
Expand Up @@ -233,7 +233,7 @@ fastcall void do_page_fault(struct pt_regs *regs, unsigned long error_code)
int write, si_code;

/* get the address */
__asm__("movl %%cr2,%0":"=r" (address));
address = read_cr2();

if (notify_die(DIE_PAGE_FAULT, "page fault", regs, error_code, 14,
SIGSEGV) == NOTIFY_STOP)
Expand Down Expand Up @@ -453,7 +453,7 @@ fastcall void do_page_fault(struct pt_regs *regs, unsigned long error_code)
printk(" at virtual address %08lx\n",address);
printk(KERN_ALERT " printing eip:\n");
printk("%08lx\n", regs->eip);
asm("movl %%cr3,%0":"=r" (page));
page = read_cr3();
page = ((unsigned long *) __va(page))[address >> 22];
printk(KERN_ALERT "*pde = %08lx\n", page);
/*
Expand Down Expand Up @@ -526,7 +526,7 @@ fastcall void do_page_fault(struct pt_regs *regs, unsigned long error_code)
pmd_t *pmd, *pmd_k;
pte_t *pte_k;

asm("movl %%cr3,%0":"=r" (pgd_paddr));
pgd_paddr = read_cr3();
pgd = index + (pgd_t *)__va(pgd_paddr);
pgd_k = init_mm.pgd + index;

Expand Down
2 changes: 1 addition & 1 deletion trunk/arch/i386/mm/pageattr.c
Original file line number Diff line number Diff line change
Expand Up @@ -62,7 +62,7 @@ static void flush_kernel_map(void *dummy)
{
/* Could use CLFLUSH here if the CPU supports it (Hammer,P4) */
if (boot_cpu_data.x86_model >= 4)
asm volatile("wbinvd":::"memory");
wbinvd();
/* Flush all to work around Errata in early athlons regarding
* large page flushing.
*/
Expand Down
16 changes: 8 additions & 8 deletions trunk/arch/i386/power/cpu.c
Original file line number Diff line number Diff line change
Expand Up @@ -57,10 +57,10 @@ void __save_processor_state(struct saved_context *ctxt)
/*
* control registers
*/
asm volatile ("movl %%cr0, %0" : "=r" (ctxt->cr0));
asm volatile ("movl %%cr2, %0" : "=r" (ctxt->cr2));
asm volatile ("movl %%cr3, %0" : "=r" (ctxt->cr3));
asm volatile ("movl %%cr4, %0" : "=r" (ctxt->cr4));
ctxt->cr0 = read_cr0();
ctxt->cr2 = read_cr2();
ctxt->cr3 = read_cr3();
ctxt->cr4 = read_cr4();
}

void save_processor_state(void)
Expand Down Expand Up @@ -109,10 +109,10 @@ void __restore_processor_state(struct saved_context *ctxt)
/*
* control registers
*/
asm volatile ("movl %0, %%cr4" :: "r" (ctxt->cr4));
asm volatile ("movl %0, %%cr3" :: "r" (ctxt->cr3));
asm volatile ("movl %0, %%cr2" :: "r" (ctxt->cr2));
asm volatile ("movl %0, %%cr0" :: "r" (ctxt->cr0));
write_cr4(ctxt->cr4);
write_cr3(ctxt->cr3);
write_cr2(ctxt->cr2);
write_cr2(ctxt->cr0);

/*
* now restore the descriptor tables to their proper values
Expand Down
2 changes: 1 addition & 1 deletion trunk/include/asm-i386/agp.h
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,7 @@ int unmap_page_from_agp(struct page *page);
/* Could use CLFLUSH here if the cpu supports it. But then it would
need to be called for each cacheline of the whole page so it may not be
worth it. Would need a page for it. */
#define flush_agp_cache() asm volatile("wbinvd":::"memory")
#define flush_agp_cache() wbinvd()

/* Convert a physical address to an address suitable for the GART. */
#define phys_to_gart(x) (x)
Expand Down
5 changes: 4 additions & 1 deletion trunk/include/asm-i386/bugs.h
Original file line number Diff line number Diff line change
Expand Up @@ -118,7 +118,10 @@ static void __init check_hlt(void)
printk("disabled\n");
return;
}
__asm__ __volatile__("hlt ; hlt ; hlt ; hlt");
halt();
halt();
halt();
halt();
printk("OK.\n");
}

Expand Down
22 changes: 9 additions & 13 deletions trunk/include/asm-i386/processor.h
Original file line number Diff line number Diff line change
Expand Up @@ -203,9 +203,7 @@ static inline unsigned int cpuid_edx(unsigned int op)
return edx;
}

#define load_cr3(pgdir) \
asm volatile("movl %0,%%cr3": :"r" (__pa(pgdir)))

#define load_cr3(pgdir) write_cr3(__pa(pgdir))

/*
* Intel CPU features in CR4
Expand All @@ -232,22 +230,20 @@ extern unsigned long mmu_cr4_features;

static inline void set_in_cr4 (unsigned long mask)
{
unsigned cr4;
mmu_cr4_features |= mask;
__asm__("movl %%cr4,%%eax\n\t"
"orl %0,%%eax\n\t"
"movl %%eax,%%cr4\n"
: : "irg" (mask)
:"ax");
cr4 = read_cr4();
cr4 |= mask;
write_cr4(cr4);
}

static inline void clear_in_cr4 (unsigned long mask)
{
unsigned cr4;
mmu_cr4_features &= ~mask;
__asm__("movl %%cr4,%%eax\n\t"
"andl %0,%%eax\n\t"
"movl %%eax,%%cr4\n"
: : "irg" (~mask)
:"ax");
cr4 = read_cr4();
cr4 &= ~mask;
write_cr4(cr4);
}

/*
Expand Down
28 changes: 25 additions & 3 deletions trunk/include/asm-i386/system.h
Original file line number Diff line number Diff line change
Expand Up @@ -107,13 +107,33 @@ static inline unsigned long _get_base(char * addr)
#define clts() __asm__ __volatile__ ("clts")
#define read_cr0() ({ \
unsigned int __dummy; \
__asm__( \
__asm__ __volatile__( \
"movl %%cr0,%0\n\t" \
:"=r" (__dummy)); \
__dummy; \
})
#define write_cr0(x) \
__asm__("movl %0,%%cr0": :"r" (x));
__asm__ __volatile__("movl %0,%%cr0": :"r" (x));

#define read_cr2() ({ \
unsigned int __dummy; \
__asm__ __volatile__( \
"movl %%cr2,%0\n\t" \
:"=r" (__dummy)); \
__dummy; \
})
#define write_cr2(x) \
__asm__ __volatile__("movl %0,%%cr2": :"r" (x));

#define read_cr3() ({ \
unsigned int __dummy; \
__asm__ ( \
"movl %%cr3,%0\n\t" \
:"=r" (__dummy)); \
__dummy; \
})
#define write_cr3(x) \
__asm__ __volatile__("movl %0,%%cr3": :"r" (x));

#define read_cr4() ({ \
unsigned int __dummy; \
Expand All @@ -123,7 +143,7 @@ static inline unsigned long _get_base(char * addr)
__dummy; \
})
#define write_cr4(x) \
__asm__("movl %0,%%cr4": :"r" (x));
__asm__ __volatile__("movl %0,%%cr4": :"r" (x));
#define stts() write_cr0(8 | read_cr0())

#endif /* __KERNEL__ */
Expand Down Expand Up @@ -447,6 +467,8 @@ struct alt_instr {
#define local_irq_enable() __asm__ __volatile__("sti": : :"memory")
/* used in the idle loop; sti takes one instruction cycle to complete */
#define safe_halt() __asm__ __volatile__("sti; hlt": : :"memory")
/* used when interrupts are already enabled or to shutdown the processor */
#define halt() __asm__ __volatile__("hlt": : :"memory")

#define irqs_disabled() \
({ \
Expand Down
26 changes: 13 additions & 13 deletions trunk/include/asm-i386/xor.h
Original file line number Diff line number Diff line change
Expand Up @@ -535,29 +535,29 @@ static struct xor_block_template xor_block_p5_mmx = {

#define XMMS_SAVE do { \
preempt_disable(); \
cr0 = read_cr0(); \
clts(); \
__asm__ __volatile__ ( \
"movl %%cr0,%0 ;\n\t" \
"clts ;\n\t" \
"movups %%xmm0,(%1) ;\n\t" \
"movups %%xmm1,0x10(%1) ;\n\t" \
"movups %%xmm2,0x20(%1) ;\n\t" \
"movups %%xmm3,0x30(%1) ;\n\t" \
: "=&r" (cr0) \
"movups %%xmm0,(%0) ;\n\t" \
"movups %%xmm1,0x10(%0) ;\n\t" \
"movups %%xmm2,0x20(%0) ;\n\t" \
"movups %%xmm3,0x30(%0) ;\n\t" \
: \
: "r" (xmm_save) \
: "memory"); \
} while(0)

#define XMMS_RESTORE do { \
__asm__ __volatile__ ( \
"sfence ;\n\t" \
"movups (%1),%%xmm0 ;\n\t" \
"movups 0x10(%1),%%xmm1 ;\n\t" \
"movups 0x20(%1),%%xmm2 ;\n\t" \
"movups 0x30(%1),%%xmm3 ;\n\t" \
"movl %0,%%cr0 ;\n\t" \
"movups (%0),%%xmm0 ;\n\t" \
"movups 0x10(%0),%%xmm1 ;\n\t" \
"movups 0x20(%0),%%xmm2 ;\n\t" \
"movups 0x30(%0),%%xmm3 ;\n\t" \
: \
: "r" (cr0), "r" (xmm_save) \
: "r" (xmm_save) \
: "memory"); \
write_cr0(cr0); \
preempt_enable(); \
} while(0)

Expand Down

0 comments on commit 7d31140

Please sign in to comment.