Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 167489
b: refs/heads/master
c: e9a63a4
h: refs/heads/master
i:
  167487: 3c43425
v: v3
  • Loading branch information
Roland McGrath committed Oct 14, 2009
1 parent 96c7be7 commit 8020576
Show file tree
Hide file tree
Showing 42 changed files with 139 additions and 248 deletions.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: 655bdb0e07384d04d1ecfd709c7aee6cc8bd8793
refs/heads/master: e9a63a4e559fbdc522072281d05e6b13c1022f4b
30 changes: 0 additions & 30 deletions trunk/Documentation/feature-removal-schedule.txt
Original file line number Diff line number Diff line change
Expand Up @@ -451,33 +451,3 @@ Why: OSS sound_core grabs all legacy minors (0-255) of SOUND_MAJOR
will also allow making ALSA OSS emulation independent of
sound_core. The dependency will be broken then too.
Who: Tejun Heo <tj@kernel.org>

----------------------------

What: Support for VMware's guest paravirtuliazation technique [VMI] will be
dropped.
When: 2.6.37 or earlier.
Why: With the recent innovations in CPU hardware acceleration technologies
from Intel and AMD, VMware ran a few experiments to compare these
techniques to guest paravirtualization technique on VMware's platform.
These hardware assisted virtualization techniques have outperformed the
performance benefits provided by VMI in most of the workloads. VMware
expects that these hardware features will be ubiquitous in a couple of
years, as a result, VMware has started a phased retirement of this
feature from the hypervisor. We will be removing this feature from the
Kernel too. Right now we are targeting 2.6.37 but can retire earlier if
technical reasons (read opportunity to remove major chunk of pvops)
arise.

Please note that VMI has always been an optimization and non-VMI kernels
still work fine on VMware's platform.
Latest versions of VMware's product which support VMI are,
Workstation 7.0 and VSphere 4.0 on ESX side, future maintainence
releases for these products will continue supporting VMI.

For more details about VMI retirement take a look at this,
http://blogs.vmware.com/guestosguide/2009/09/vmi-retirement.html

Who: Alok N Kataria <akataria@vmware.com>

----------------------------
7 changes: 0 additions & 7 deletions trunk/MAINTAINERS
Original file line number Diff line number Diff line change
Expand Up @@ -4076,13 +4076,6 @@ M: Peter Zijlstra <a.p.zijlstra@chello.nl>
M: Paul Mackerras <paulus@samba.org>
M: Ingo Molnar <mingo@elte.hu>
S: Supported
F: kernel/perf_event.c
F: include/linux/perf_event.h
F: arch/*/*/kernel/perf_event.c
F: arch/*/include/asm/perf_event.h
F: arch/*/lib/perf_event.c
F: arch/*/kernel/perf_callchain.c
F: tools/perf/

PERSONALITY HANDLING
M: Christoph Hellwig <hch@infradead.org>
Expand Down
2 changes: 1 addition & 1 deletion trunk/arch/s390/hypfs/hypfs_diag.c
Original file line number Diff line number Diff line change
Expand Up @@ -438,7 +438,7 @@ static int diag204_probe(void)
}
if (diag204((unsigned long)SUBC_STIB6 |
(unsigned long)INFO_EXT, pages, buf) >= 0) {
diag204_store_sc = SUBC_STIB6;
diag204_store_sc = SUBC_STIB7;
diag204_info_type = INFO_EXT;
goto out;
}
Expand Down
6 changes: 3 additions & 3 deletions trunk/arch/s390/kernel/processor.c
Original file line number Diff line number Diff line change
Expand Up @@ -31,9 +31,9 @@ void __cpuinit print_cpu_info(void)

static int show_cpuinfo(struct seq_file *m, void *v)
{
static const char *hwcap_str[10] = {
static const char *hwcap_str[9] = {
"esan3", "zarch", "stfle", "msa", "ldisp", "eimm", "dfp",
"edat", "etf3eh", "highgprs"
"edat", "etf3eh"
};
struct _lowcore *lc;
unsigned long n = (unsigned long) v - 1;
Expand All @@ -48,7 +48,7 @@ static int show_cpuinfo(struct seq_file *m, void *v)
num_online_cpus(), loops_per_jiffy/(500000/HZ),
(loops_per_jiffy/(5000/HZ))%100);
seq_puts(m, "features\t: ");
for (i = 0; i < 10; i++)
for (i = 0; i < 9; i++)
if (hwcap_str[i] && (elf_hwcap & (1UL << i)))
seq_printf(m, "%s ", hwcap_str[i]);
seq_puts(m, "\n");
Expand Down
2 changes: 1 addition & 1 deletion trunk/arch/sh/kernel/entry-common.S
Original file line number Diff line number Diff line change
Expand Up @@ -121,7 +121,7 @@ noresched:
ENTRY(resume_userspace)
! r8: current_thread_info
cli
TRACE_IRQS_OFF
TRACE_IRQS_OfF
mov.l @(TI_FLAGS,r8), r0 ! current_thread_info->flags
tst #(_TIF_WORK_MASK & 0xff), r0
bt/s __restore_all
Expand Down
37 changes: 10 additions & 27 deletions trunk/arch/sh/kernel/ftrace.c
Original file line number Diff line number Diff line change
Expand Up @@ -291,48 +291,31 @@ struct syscall_metadata *syscall_nr_to_meta(int nr)
return syscalls_metadata[nr];
}

int syscall_name_to_nr(char *name)
{
int i;

if (!syscalls_metadata)
return -1;
for (i = 0; i < NR_syscalls; i++)
if (syscalls_metadata[i])
if (!strcmp(syscalls_metadata[i]->name, name))
return i;
return -1;
}

void set_syscall_enter_id(int num, int id)
{
syscalls_metadata[num]->enter_id = id;
}

void set_syscall_exit_id(int num, int id)
{
syscalls_metadata[num]->exit_id = id;
}

static int __init arch_init_ftrace_syscalls(void)
void arch_init_ftrace_syscalls(void)
{
int i;
struct syscall_metadata *meta;
unsigned long **psys_syscall_table = &sys_call_table;
static atomic_t refs;

if (atomic_inc_return(&refs) != 1)
goto end;

syscalls_metadata = kzalloc(sizeof(*syscalls_metadata) *
FTRACE_SYSCALL_MAX, GFP_KERNEL);
if (!syscalls_metadata) {
WARN_ON(1);
return -ENOMEM;
return;
}

for (i = 0; i < FTRACE_SYSCALL_MAX; i++) {
meta = find_syscall_meta(psys_syscall_table[i]);
syscalls_metadata[i] = meta;
}
return;

return 0;
/* Paranoid: avoid overflow */
end:
atomic_dec(&refs);
}
arch_initcall(arch_init_ftrace_syscalls);
#endif /* CONFIG_FTRACE_SYSCALLS */
2 changes: 0 additions & 2 deletions trunk/arch/sh/kernel/setup.c
Original file line number Diff line number Diff line change
Expand Up @@ -549,8 +549,6 @@ static int show_cpuinfo(struct seq_file *m, void *v)

if (cpu == 0)
seq_printf(m, "machine\t\t: %s\n", get_system_type());
else
seq_printf(m, "\n");

seq_printf(m, "processor\t: %d\n", cpu);
seq_printf(m, "cpu family\t: %s\n", init_utsname()->machine);
Expand Down
9 changes: 5 additions & 4 deletions trunk/arch/sh/kernel/signal_32.c
Original file line number Diff line number Diff line change
Expand Up @@ -145,7 +145,7 @@ static inline int restore_sigcontext_fpu(struct sigcontext __user *sc)
{
struct task_struct *tsk = current;

if (!(boot_cpu_data.flags & CPU_HAS_FPU))
if (!(current_cpu_data.flags & CPU_HAS_FPU))
return 0;

set_used_math();
Expand All @@ -158,7 +158,7 @@ static inline int save_sigcontext_fpu(struct sigcontext __user *sc,
{
struct task_struct *tsk = current;

if (!(boot_cpu_data.flags & CPU_HAS_FPU))
if (!(current_cpu_data.flags & CPU_HAS_FPU))
return 0;

if (!used_math()) {
Expand Down Expand Up @@ -199,7 +199,7 @@ restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc, int *r0_p
#undef COPY

#ifdef CONFIG_SH_FPU
if (boot_cpu_data.flags & CPU_HAS_FPU) {
if (current_cpu_data.flags & CPU_HAS_FPU) {
int owned_fp;
struct task_struct *tsk = current;

Expand Down Expand Up @@ -472,7 +472,6 @@ static int setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
err |= __put_user(OR_R0_R0, &frame->retcode[6]);
err |= __put_user((__NR_rt_sigreturn), &frame->retcode[7]);
regs->pr = (unsigned long) frame->retcode;
flush_icache_range(regs->pr, regs->pr + sizeof(frame->retcode));
}

if (err)
Expand All @@ -498,6 +497,8 @@ static int setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
pr_debug("SIG deliver (%s:%d): sp=%p pc=%08lx pr=%08lx\n",
current->comm, task_pid_nr(current), frame, regs->pc, regs->pr);

flush_icache_range(regs->pr, regs->pr + sizeof(frame->retcode));

return 0;

give_sigsegv:
Expand Down
2 changes: 0 additions & 2 deletions trunk/arch/sh/kernel/smp.c
Original file line number Diff line number Diff line change
Expand Up @@ -35,8 +35,6 @@ static inline void __init smp_store_cpu_info(unsigned int cpu)
{
struct sh_cpuinfo *c = cpu_data + cpu;

memcpy(c, &boot_cpu_data, sizeof(struct sh_cpuinfo));

c->loops_per_jiffy = loops_per_jiffy;
}

Expand Down
7 changes: 3 additions & 4 deletions trunk/arch/sh/kernel/traps_32.c
Original file line number Diff line number Diff line change
Expand Up @@ -25,7 +25,6 @@
#include <linux/kexec.h>
#include <linux/limits.h>
#include <linux/proc_fs.h>
#include <linux/sysfs.h>
#include <asm/system.h>
#include <asm/uaccess.h>
#include <asm/fpu.h>
Expand Down Expand Up @@ -160,12 +159,12 @@ void die(const char * str, struct pt_regs * regs, long err)

oops_enter();

spin_lock_irq(&die_lock);
console_verbose();
spin_lock_irq(&die_lock);
bust_spinlocks(1);

printk("%s: %04lx [#%d]\n", str, err & 0xffff, ++die_counter);
sysfs_printk_last_file();

print_modules();
show_regs(regs);

Expand All @@ -181,7 +180,6 @@ void die(const char * str, struct pt_regs * regs, long err)
bust_spinlocks(0);
add_taint(TAINT_DIE);
spin_unlock_irq(&die_lock);
oops_exit();

if (kexec_should_crash(current))
crash_kexec(regs);
Expand All @@ -192,6 +190,7 @@ void die(const char * str, struct pt_regs * regs, long err)
if (panic_on_oops)
panic("Fatal exception");

oops_exit();
do_exit(SIGSEGV);
}

Expand Down
2 changes: 1 addition & 1 deletion trunk/arch/sh/mm/cache.c
Original file line number Diff line number Diff line change
Expand Up @@ -128,7 +128,7 @@ void __update_cache(struct vm_area_struct *vma,
return;

page = pfn_to_page(pfn);
if (pfn_valid(pfn)) {
if (pfn_valid(pfn) && page_mapping(page)) {
int dirty = test_and_clear_bit(PG_dcache_dirty, &page->flags);
if (dirty) {
unsigned long addr = (unsigned long)page_address(page);
Expand Down
4 changes: 2 additions & 2 deletions trunk/arch/sparc/kernel/ldc.c
Original file line number Diff line number Diff line change
Expand Up @@ -1242,13 +1242,13 @@ int ldc_bind(struct ldc_channel *lp, const char *name)
snprintf(lp->tx_irq_name, LDC_IRQ_NAME_MAX, "%s TX", name);

err = request_irq(lp->cfg.rx_irq, ldc_rx,
IRQF_SAMPLE_RANDOM | IRQF_DISABLED | IRQF_SHARED,
IRQF_SAMPLE_RANDOM | IRQF_SHARED,
lp->rx_irq_name, lp);
if (err)
return err;

err = request_irq(lp->cfg.tx_irq, ldc_tx,
IRQF_SAMPLE_RANDOM | IRQF_DISABLED | IRQF_SHARED,
IRQF_SAMPLE_RANDOM | IRQF_SHARED,
lp->tx_irq_name, lp);
if (err) {
free_irq(lp->cfg.rx_irq, lp);
Expand Down
2 changes: 1 addition & 1 deletion trunk/arch/sparc/kernel/perf_event.c
Original file line number Diff line number Diff line change
Expand Up @@ -437,7 +437,7 @@ static const struct sparc_pmu niagara2_pmu = {
.lower_shift = 6,
.event_mask = 0xfff,
.hv_bit = 0x8,
.irq_bit = 0x30,
.irq_bit = 0x03,
.upper_nop = 0x220,
.lower_nop = 0x220,
};
Expand Down
2 changes: 1 addition & 1 deletion trunk/arch/sparc/mm/init_64.c
Original file line number Diff line number Diff line change
Expand Up @@ -265,7 +265,7 @@ static void flush_dcache(unsigned long pfn)
struct page *page;

page = pfn_to_page(pfn);
if (page) {
if (page && page_mapping(page)) {
unsigned long pg_flags;

pg_flags = page->flags;
Expand Down
11 changes: 1 addition & 10 deletions trunk/arch/x86/Kconfig
Original file line number Diff line number Diff line change
Expand Up @@ -491,7 +491,7 @@ if PARAVIRT_GUEST
source "arch/x86/xen/Kconfig"

config VMI
bool "VMI Guest support (DEPRECATED)"
bool "VMI Guest support"
select PARAVIRT
depends on X86_32
---help---
Expand All @@ -500,15 +500,6 @@ config VMI
at the moment), by linking the kernel to a GPL-ed ROM module
provided by the hypervisor.

As of September 2009, VMware has started a phased retirement
of this feature from VMware's products. Please see
feature-removal-schedule.txt for details. If you are
planning to enable this option, please note that you cannot
live migrate a VMI enabled VM to a future VMware product,
which doesn't support VMI. So if you expect your kernel to
seamlessly migrate to newer VMware products, keep this
disabled.

config KVM_CLOCK
bool "KVM paravirtualized clock"
select PARAVIRT
Expand Down
28 changes: 24 additions & 4 deletions trunk/arch/x86/include/asm/paravirt.h
Original file line number Diff line number Diff line change
Expand Up @@ -840,22 +840,42 @@ static __always_inline void __raw_spin_unlock(struct raw_spinlock *lock)

static inline unsigned long __raw_local_save_flags(void)
{
return PVOP_CALLEE0(unsigned long, pv_irq_ops.save_fl);
unsigned long f;

asm volatile(paravirt_alt(PARAVIRT_CALL)
: "=a"(f)
: paravirt_type(pv_irq_ops.save_fl),
paravirt_clobber(CLBR_EAX)
: "memory", "cc");
return f;
}

static inline void raw_local_irq_restore(unsigned long f)
{
PVOP_VCALLEE1(pv_irq_ops.restore_fl, f);
asm volatile(paravirt_alt(PARAVIRT_CALL)
: "=a"(f)
: PV_FLAGS_ARG(f),
paravirt_type(pv_irq_ops.restore_fl),
paravirt_clobber(CLBR_EAX)
: "memory", "cc");
}

static inline void raw_local_irq_disable(void)
{
PVOP_VCALLEE0(pv_irq_ops.irq_disable);
asm volatile(paravirt_alt(PARAVIRT_CALL)
:
: paravirt_type(pv_irq_ops.irq_disable),
paravirt_clobber(CLBR_EAX)
: "memory", "eax", "cc");
}

static inline void raw_local_irq_enable(void)
{
PVOP_VCALLEE0(pv_irq_ops.irq_enable);
asm volatile(paravirt_alt(PARAVIRT_CALL)
:
: paravirt_type(pv_irq_ops.irq_enable),
paravirt_clobber(CLBR_EAX)
: "memory", "eax", "cc");
}

static inline unsigned long __raw_local_irq_save(void)
Expand Down
10 changes: 4 additions & 6 deletions trunk/arch/x86/include/asm/paravirt_types.h
Original file line number Diff line number Diff line change
Expand Up @@ -494,11 +494,10 @@ int paravirt_disable_iospace(void);
#define EXTRA_CLOBBERS
#define VEXTRA_CLOBBERS
#else /* CONFIG_X86_64 */
/* [re]ax isn't an arg, but the return val */
#define PVOP_VCALL_ARGS \
unsigned long __edi = __edi, __esi = __esi, \
__edx = __edx, __ecx = __ecx, __eax = __eax
#define PVOP_CALL_ARGS PVOP_VCALL_ARGS
__edx = __edx, __ecx = __ecx
#define PVOP_CALL_ARGS PVOP_VCALL_ARGS, __eax

#define PVOP_CALL_ARG1(x) "D" ((unsigned long)(x))
#define PVOP_CALL_ARG2(x) "S" ((unsigned long)(x))
Expand All @@ -510,7 +509,6 @@ int paravirt_disable_iospace(void);
"=c" (__ecx)
#define PVOP_CALL_CLOBBERS PVOP_VCALL_CLOBBERS, "=a" (__eax)

/* void functions are still allowed [re]ax for scratch */
#define PVOP_VCALLEE_CLOBBERS "=a" (__eax)
#define PVOP_CALLEE_CLOBBERS PVOP_VCALLEE_CLOBBERS

Expand Down Expand Up @@ -585,8 +583,8 @@ int paravirt_disable_iospace(void);
VEXTRA_CLOBBERS, \
pre, post, ##__VA_ARGS__)

#define __PVOP_VCALLEESAVE(op, pre, post, ...) \
____PVOP_VCALL(op.func, CLBR_RET_REG, \
#define __PVOP_VCALLEESAVE(rettype, op, pre, post, ...) \
____PVOP_CALL(rettype, op.func, CLBR_RET_REG, \
PVOP_VCALLEE_CLOBBERS, , \
pre, post, ##__VA_ARGS__)

Expand Down
Loading

0 comments on commit 8020576

Please sign in to comment.