Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 61210
b: refs/heads/master
c: 485cf92
h: refs/heads/master
v: v3
  • Loading branch information
Linus Torvalds committed Jul 18, 2007
1 parent e870ae9 commit 2b4d7b2
Show file tree
Hide file tree
Showing 168 changed files with 17,022 additions and 3,229 deletions.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: 3fd8f9e4b6c184d03d340bc86630f700de967fa8
refs/heads/master: 485cf925d8b7a6b3c62fe5f1e167f2d0d4edf32a
5 changes: 5 additions & 0 deletions trunk/MAINTAINERS
Original file line number Diff line number Diff line change
Expand Up @@ -4110,6 +4110,11 @@ W: http://www.polyware.nl/~middelin/En/hobbies.html
W: http://www.polyware.nl/~middelin/hobbies.html
S: Maintained

ZS DECSTATION Z85C30 SERIAL DRIVER
P: Maciej W. Rozycki
M: macro@linux-mips.org
S: Maintained

THE REST
P: Linus Torvalds
S: Buried alive in reporters
1 change: 1 addition & 0 deletions trunk/arch/frv/kernel/setup.c
Original file line number Diff line number Diff line change
Expand Up @@ -29,6 +29,7 @@
#include <linux/serial.h>
#include <linux/serial_core.h>
#include <linux/serial_reg.h>
#include <linux/serial_8250.h>

#include <asm/setup.h>
#include <asm/irq.h>
Expand Down
2 changes: 2 additions & 0 deletions trunk/arch/i386/Kconfig
Original file line number Diff line number Diff line change
Expand Up @@ -222,6 +222,8 @@ config PARAVIRT
However, when run without a hypervisor the kernel is
theoretically slower. If in doubt, say N.

source "arch/i386/xen/Kconfig"

config VMI
bool "VMI Paravirt-ops support"
depends on PARAVIRT
Expand Down
3 changes: 3 additions & 0 deletions trunk/arch/i386/Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -93,6 +93,9 @@ mflags-$(CONFIG_X86_ES7000) := -Iinclude/asm-i386/mach-es7000
mcore-$(CONFIG_X86_ES7000) := mach-default
core-$(CONFIG_X86_ES7000) := arch/i386/mach-es7000/

# Xen paravirtualization support
core-$(CONFIG_XEN) += arch/i386/xen/

# default subarch .h files
mflags-y += -Iinclude/asm-i386/mach-default

Expand Down
2 changes: 2 additions & 0 deletions trunk/arch/i386/boot/compressed/relocs.c
Original file line number Diff line number Diff line change
Expand Up @@ -31,6 +31,8 @@ static const char* safe_abs_relocs[] = {
"__kernel_rt_sigreturn",
"__kernel_sigreturn",
"SYSENTER_RETURN",
"xen_irq_disable_direct_reloc",
"xen_save_fl_direct_reloc",
};

static int is_safe_abs_reloc(const char* sym_name)
Expand Down
9 changes: 9 additions & 0 deletions trunk/arch/i386/kernel/asm-offsets.c
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,8 @@
#include <asm/thread_info.h>
#include <asm/elf.h>

#include <xen/interface/xen.h>

#define DEFINE(sym, val) \
asm volatile("\n->" #sym " %0 " #val : : "i" (val))

Expand Down Expand Up @@ -59,6 +61,7 @@ void foo(void)
OFFSET(TI_addr_limit, thread_info, addr_limit);
OFFSET(TI_restart_block, thread_info, restart_block);
OFFSET(TI_sysenter_return, thread_info, sysenter_return);
OFFSET(TI_cpu, thread_info, cpu);
BLANK();

OFFSET(GDS_size, Xgt_desc_struct, size);
Expand Down Expand Up @@ -115,4 +118,10 @@ void foo(void)
OFFSET(PARAVIRT_iret, paravirt_ops, iret);
OFFSET(PARAVIRT_read_cr0, paravirt_ops, read_cr0);
#endif

#ifdef CONFIG_XEN
BLANK();
OFFSET(XEN_vcpu_info_mask, vcpu_info, evtchn_upcall_mask);
OFFSET(XEN_vcpu_info_pending, vcpu_info, evtchn_upcall_pending);
#endif
}
85 changes: 85 additions & 0 deletions trunk/arch/i386/kernel/entry.S
Original file line number Diff line number Diff line change
Expand Up @@ -1023,6 +1023,91 @@ ENTRY(kernel_thread_helper)
CFI_ENDPROC
ENDPROC(kernel_thread_helper)

#ifdef CONFIG_XEN
ENTRY(xen_hypervisor_callback)
CFI_STARTPROC
pushl $0
CFI_ADJUST_CFA_OFFSET 4
SAVE_ALL
TRACE_IRQS_OFF

/* Check to see if we got the event in the critical
region in xen_iret_direct, after we've reenabled
events and checked for pending events. This simulates
iret instruction's behaviour where it delivers a
pending interrupt when enabling interrupts. */
movl PT_EIP(%esp),%eax
cmpl $xen_iret_start_crit,%eax
jb 1f
cmpl $xen_iret_end_crit,%eax
jae 1f

call xen_iret_crit_fixup

1: mov %esp, %eax
call xen_evtchn_do_upcall
jmp ret_from_intr
CFI_ENDPROC
ENDPROC(xen_hypervisor_callback)

# Hypervisor uses this for application faults while it executes.
# We get here for two reasons:
# 1. Fault while reloading DS, ES, FS or GS
# 2. Fault while executing IRET
# Category 1 we fix up by reattempting the load, and zeroing the segment
# register if the load fails.
# Category 2 we fix up by jumping to do_iret_error. We cannot use the
# normal Linux return path in this case because if we use the IRET hypercall
# to pop the stack frame we end up in an infinite loop of failsafe callbacks.
# We distinguish between categories by maintaining a status value in EAX.
ENTRY(xen_failsafe_callback)
CFI_STARTPROC
pushl %eax
CFI_ADJUST_CFA_OFFSET 4
movl $1,%eax
1: mov 4(%esp),%ds
2: mov 8(%esp),%es
3: mov 12(%esp),%fs
4: mov 16(%esp),%gs
testl %eax,%eax
popl %eax
CFI_ADJUST_CFA_OFFSET -4
lea 16(%esp),%esp
CFI_ADJUST_CFA_OFFSET -16
jz 5f
addl $16,%esp
jmp iret_exc # EAX != 0 => Category 2 (Bad IRET)
5: pushl $0 # EAX == 0 => Category 1 (Bad segment)
CFI_ADJUST_CFA_OFFSET 4
SAVE_ALL
jmp ret_from_exception
CFI_ENDPROC

.section .fixup,"ax"
6: xorl %eax,%eax
movl %eax,4(%esp)
jmp 1b
7: xorl %eax,%eax
movl %eax,8(%esp)
jmp 2b
8: xorl %eax,%eax
movl %eax,12(%esp)
jmp 3b
9: xorl %eax,%eax
movl %eax,16(%esp)
jmp 4b
.previous
.section __ex_table,"a"
.align 4
.long 1b,6b
.long 2b,7b
.long 3b,8b
.long 4b,9b
.previous
ENDPROC(xen_failsafe_callback)

#endif /* CONFIG_XEN */

.section .rodata,"a"
#include "syscall_table.S"

Expand Down
5 changes: 4 additions & 1 deletion trunk/arch/i386/kernel/head.S
Original file line number Diff line number Diff line change
Expand Up @@ -510,7 +510,8 @@ ENTRY(_stext)
/*
* BSS section
*/
.section ".bss.page_aligned","w"
.section ".bss.page_aligned","wa"
.align PAGE_SIZE_asm
ENTRY(swapper_pg_dir)
.fill 1024,4,0
ENTRY(swapper_pg_pmd)
Expand Down Expand Up @@ -538,6 +539,8 @@ fault_msg:
.ascii "Int %d: CR2 %p err %p EIP %p CS %p flags %p\n"
.asciz "Stack: %p %p %p %p %p %p %p %p\n"

#include "../xen/xen-head.S"

/*
* The IDT and GDT 'descriptors' are a strange 48-bit object
* only used by the lidt and lgdt instructions. They are not
Expand Down
37 changes: 36 additions & 1 deletion trunk/arch/i386/kernel/paravirt.c
Original file line number Diff line number Diff line change
Expand Up @@ -228,6 +228,41 @@ static int __init print_banner(void)
}
core_initcall(print_banner);

static struct resource reserve_ioports = {
.start = 0,
.end = IO_SPACE_LIMIT,
.name = "paravirt-ioport",
.flags = IORESOURCE_IO | IORESOURCE_BUSY,
};

static struct resource reserve_iomem = {
.start = 0,
.end = -1,
.name = "paravirt-iomem",
.flags = IORESOURCE_MEM | IORESOURCE_BUSY,
};

/*
* Reserve the whole legacy IO space to prevent any legacy drivers
* from wasting time probing for their hardware. This is a fairly
* brute-force approach to disabling all non-virtual drivers.
*
* Note that this must be called very early to have any effect.
*/
int paravirt_disable_iospace(void)
{
int ret;

ret = request_resource(&ioport_resource, &reserve_ioports);
if (ret == 0) {
ret = request_resource(&iomem_resource, &reserve_iomem);
if (ret)
release_resource(&reserve_ioports);
}

return ret;
}

struct paravirt_ops paravirt_ops = {
.name = "bare hardware",
.paravirt_enabled = 0,
Expand Down Expand Up @@ -267,7 +302,7 @@ struct paravirt_ops paravirt_ops = {
.write_msr = native_write_msr_safe,
.read_tsc = native_read_tsc,
.read_pmc = native_read_pmc,
.get_scheduled_cycles = native_read_tsc,
.sched_clock = native_sched_clock,
.get_cpu_khz = native_calculate_cpu_khz,
.load_tr_desc = native_load_tr_desc,
.set_ldt = native_set_ldt,
Expand Down
2 changes: 2 additions & 0 deletions trunk/arch/i386/kernel/setup.c
Original file line number Diff line number Diff line change
Expand Up @@ -601,6 +601,8 @@ void __init setup_arch(char **cmdline_p)
* NOTE: at this point the bootmem allocator is fully available.
*/

paravirt_post_allocator_init();

dmi_scan_machine();

#ifdef CONFIG_X86_GENERICARCH
Expand Down
5 changes: 3 additions & 2 deletions trunk/arch/i386/kernel/smp.c
Original file line number Diff line number Diff line change
Expand Up @@ -22,6 +22,7 @@

#include <asm/mtrr.h>
#include <asm/tlbflush.h>
#include <asm/mmu_context.h>
#include <mach_apic.h>

/*
Expand Down Expand Up @@ -249,13 +250,13 @@ static unsigned long flush_va;
static DEFINE_SPINLOCK(tlbstate_lock);

/*
* We cannot call mmdrop() because we are in interrupt context,
* We cannot call mmdrop() because we are in interrupt context,
* instead update mm->cpu_vm_mask.
*
* We need to reload %cr3 since the page tables may be going
* away from under us..
*/
static inline void leave_mm (unsigned long cpu)
void leave_mm(unsigned long cpu)
{
if (per_cpu(cpu_tlbstate, cpu).state == TLBSTATE_OK)
BUG();
Expand Down
8 changes: 3 additions & 5 deletions trunk/arch/i386/kernel/smpboot.c
Original file line number Diff line number Diff line change
Expand Up @@ -148,7 +148,7 @@ void __init smp_alloc_memory(void)
* a given CPU
*/

static void __cpuinit smp_store_cpu_info(int id)
void __cpuinit smp_store_cpu_info(int id)
{
struct cpuinfo_x86 *c = cpu_data + id;

Expand Down Expand Up @@ -308,8 +308,7 @@ cpumask_t cpu_coregroup_map(int cpu)
/* representing cpus for which sibling maps can be computed */
static cpumask_t cpu_sibling_setup_map;

static inline void
set_cpu_sibling_map(int cpu)
void set_cpu_sibling_map(int cpu)
{
int i;
struct cpuinfo_x86 *c = cpu_data;
Expand Down Expand Up @@ -1144,8 +1143,7 @@ void __init native_smp_prepare_boot_cpu(void)
}

#ifdef CONFIG_HOTPLUG_CPU
static void
remove_siblinginfo(int cpu)
void remove_siblinginfo(int cpu)
{
int sibling;
struct cpuinfo_x86 *c = cpu_data;
Expand Down
23 changes: 15 additions & 8 deletions trunk/arch/i386/kernel/tsc.c
Original file line number Diff line number Diff line change
Expand Up @@ -84,7 +84,7 @@ static inline int check_tsc_unstable(void)
*
* -johnstul@us.ibm.com "math is hard, lets go shopping!"
*/
static unsigned long cyc2ns_scale __read_mostly;
unsigned long cyc2ns_scale __read_mostly;

#define CYC2NS_SCALE_FACTOR 10 /* 2^10, carefully chosen */

Expand All @@ -93,15 +93,10 @@ static inline void set_cyc2ns_scale(unsigned long cpu_khz)
cyc2ns_scale = (1000000 << CYC2NS_SCALE_FACTOR)/cpu_khz;
}

static inline unsigned long long cycles_2_ns(unsigned long long cyc)
{
return (cyc * cyc2ns_scale) >> CYC2NS_SCALE_FACTOR;
}

/*
* Scheduler clock - returns current time in nanosec units.
*/
unsigned long long sched_clock(void)
unsigned long long native_sched_clock(void)
{
unsigned long long this_offset;

Expand All @@ -118,12 +113,24 @@ unsigned long long sched_clock(void)
return (jiffies_64 - INITIAL_JIFFIES) * (1000000000 / HZ);

/* read the Time Stamp Counter: */
get_scheduled_cycles(this_offset);
rdtscll(this_offset);

/* return the value in ns */
return cycles_2_ns(this_offset);
}

/* We need to define a real function for sched_clock, to override the
weak default version */
#ifdef CONFIG_PARAVIRT
unsigned long long sched_clock(void)
{
return paravirt_sched_clock();
}
#else
unsigned long long sched_clock(void)
__attribute__((alias("native_sched_clock")));
#endif

unsigned long native_calculate_cpu_khz(void)
{
unsigned long long start, end;
Expand Down
4 changes: 2 additions & 2 deletions trunk/arch/i386/kernel/vmi.c
Original file line number Diff line number Diff line change
Expand Up @@ -362,7 +362,7 @@ static void *vmi_kmap_atomic_pte(struct page *page, enum km_type type)
}
#endif

static void vmi_allocate_pt(u32 pfn)
static void vmi_allocate_pt(struct mm_struct *mm, u32 pfn)
{
vmi_set_page_type(pfn, VMI_PAGE_L1);
vmi_ops.allocate_page(pfn, VMI_PAGE_L1, 0, 0, 0);
Expand Down Expand Up @@ -891,7 +891,7 @@ static inline int __init activate_vmi(void)
paravirt_ops.setup_boot_clock = vmi_time_bsp_init;
paravirt_ops.setup_secondary_clock = vmi_time_ap_init;
#endif
paravirt_ops.get_scheduled_cycles = vmi_get_sched_cycles;
paravirt_ops.sched_clock = vmi_sched_clock;
paravirt_ops.get_cpu_khz = vmi_cpu_khz;

/* We have true wallclock functions; disable CMOS clock sync */
Expand Down
6 changes: 3 additions & 3 deletions trunk/arch/i386/kernel/vmiclock.c
Original file line number Diff line number Diff line change
Expand Up @@ -64,10 +64,10 @@ int vmi_set_wallclock(unsigned long now)
return 0;
}

/* paravirt_ops.get_scheduled_cycles = vmi_get_sched_cycles */
unsigned long long vmi_get_sched_cycles(void)
/* paravirt_ops.sched_clock = vmi_sched_clock */
unsigned long long vmi_sched_clock(void)
{
return vmi_timer_ops.get_cycle_counter(VMI_CYCLES_AVAILABLE);
return cycles_2_ns(vmi_timer_ops.get_cycle_counter(VMI_CYCLES_AVAILABLE));
}

/* paravirt_ops.get_cpu_khz = vmi_cpu_khz */
Expand Down
1 change: 1 addition & 0 deletions trunk/arch/i386/kernel/vmlinux.lds.S
Original file line number Diff line number Diff line change
Expand Up @@ -88,6 +88,7 @@ SECTIONS

. = ALIGN(4096);
.data.page_aligned : AT(ADDR(.data.page_aligned) - LOAD_OFFSET) {
*(.data.page_aligned)
*(.data.idt)
}

Expand Down
Loading

0 comments on commit 2b4d7b2

Please sign in to comment.