Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 167474
b: refs/heads/master
c: d470c05
h: refs/heads/master
v: v3
  • Loading branch information
Linus Torvalds committed Oct 14, 2009
1 parent 0680e26 commit 5ea3a82
Show file tree
Hide file tree
Showing 71 changed files with 661 additions and 415 deletions.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: 457b646189e47f9d48588809da3e806ec363f219
refs/heads/master: d470c05bedc27dbd2df9d0bb6fd82336e4ff43db
30 changes: 30 additions & 0 deletions trunk/Documentation/feature-removal-schedule.txt
Original file line number Diff line number Diff line change
Expand Up @@ -451,3 +451,33 @@ Why: OSS sound_core grabs all legacy minors (0-255) of SOUND_MAJOR
will also allow making ALSA OSS emulation independent of
sound_core. The dependency will be broken then too.
Who: Tejun Heo <tj@kernel.org>

----------------------------

What: Support for VMware's guest paravirtuliazation technique [VMI] will be
dropped.
When: 2.6.37 or earlier.
Why: With the recent innovations in CPU hardware acceleration technologies
from Intel and AMD, VMware ran a few experiments to compare these
techniques to guest paravirtualization technique on VMware's platform.
These hardware assisted virtualization techniques have outperformed the
performance benefits provided by VMI in most of the workloads. VMware
expects that these hardware features will be ubiquitous in a couple of
years, as a result, VMware has started a phased retirement of this
feature from the hypervisor. We will be removing this feature from the
Kernel too. Right now we are targeting 2.6.37 but can retire earlier if
technical reasons (read opportunity to remove major chunk of pvops)
arise.

Please note that VMI has always been an optimization and non-VMI kernels
still work fine on VMware's platform.
Latest versions of VMware's product which support VMI are,
Workstation 7.0 and VSphere 4.0 on ESX side, future maintainence
releases for these products will continue supporting VMI.

For more details about VMI retirement take a look at this,
http://blogs.vmware.com/guestosguide/2009/09/vmi-retirement.html

Who: Alok N Kataria <akataria@vmware.com>

----------------------------
16 changes: 12 additions & 4 deletions trunk/Documentation/filesystems/ext3.txt
Original file line number Diff line number Diff line change
Expand Up @@ -123,10 +123,18 @@ resuid=n The user ID which may use the reserved blocks.

sb=n Use alternate superblock at this location.

quota
noquota
grpquota
usrquota
quota These options are ignored by the filesystem. They
noquota are used only by quota tools to recognize volumes
grpquota where quota should be turned on. See documentation
usrquota in the quota-tools package for more details
(http://sourceforge.net/projects/linuxquota).

jqfmt=<quota type> These options tell filesystem details about quota
usrjquota=<file> so that quota information can be properly updated
grpjquota=<file> during journal replay. They replace the above
quota options. See documentation in the quota-tools
package for more details
(http://sourceforge.net/projects/linuxquota).

bh (*) ext3 associates buffer heads to data pages to
nobh (a) cache disk block mapping information
Expand Down
1 change: 1 addition & 0 deletions trunk/Documentation/sound/alsa/HD-Audio-Models.txt
Original file line number Diff line number Diff line change
Expand Up @@ -359,6 +359,7 @@ STAC9227/9228/9229/927x
5stack-no-fp D965 5stack without front panel
dell-3stack Dell Dimension E520
dell-bios Fixes with Dell BIOS setup
volknob Fixes with volume-knob widget 0x24
auto BIOS setup (default)

STAC92HD71B*
Expand Down
7 changes: 7 additions & 0 deletions trunk/MAINTAINERS
Original file line number Diff line number Diff line change
Expand Up @@ -4076,6 +4076,13 @@ M: Peter Zijlstra <a.p.zijlstra@chello.nl>
M: Paul Mackerras <paulus@samba.org>
M: Ingo Molnar <mingo@elte.hu>
S: Supported
F: kernel/perf_event.c
F: include/linux/perf_event.h
F: arch/*/*/kernel/perf_event.c
F: arch/*/include/asm/perf_event.h
F: arch/*/lib/perf_event.c
F: arch/*/kernel/perf_callchain.c
F: tools/perf/

PERSONALITY HANDLING
M: Christoph Hellwig <hch@infradead.org>
Expand Down
46 changes: 2 additions & 44 deletions trunk/Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -179,46 +179,9 @@ SUBARCH := $(shell uname -m | sed -e s/i.86/i386/ -e s/sun4u/sparc64/ \
# Alternatively CROSS_COMPILE can be set in the environment.
# Default value for CROSS_COMPILE is not to prefix executables
# Note: Some architectures assign CROSS_COMPILE in their arch/*/Makefile
#
# To force ARCH and CROSS_COMPILE settings include kernel.* files
# in the kernel tree - do not patch this file.
export KBUILD_BUILDHOST := $(SUBARCH)

# Kbuild save the ARCH and CROSS_COMPILE setting in kernel.* files.
# Restore these settings and check that user did not specify
# conflicting values.

saved_arch := $(shell cat include/generated/kernel.arch 2> /dev/null)
saved_cross := $(shell cat include/generated/kernel.cross 2> /dev/null)

ifneq ($(CROSS_COMPILE),)
ifneq ($(saved_cross),)
ifneq ($(CROSS_COMPILE),$(saved_cross))
$(error CROSS_COMPILE changed from \
"$(saved_cross)" to \
to "$(CROSS_COMPILE)". \
Use "make mrproper" to fix it up)
endif
endif
else
CROSS_COMPILE := $(saved_cross)
endif

ifneq ($(ARCH),)
ifneq ($(saved_arch),)
ifneq ($(saved_arch),$(ARCH))
$(error ARCH changed from \
"$(saved_arch)" to "$(ARCH)". \
Use "make mrproper" to fix it up)
endif
endif
else
ifneq ($(saved_arch),)
ARCH := $(saved_arch)
else
ARCH := $(SUBARCH)
endif
endif
ARCH ?= $(SUBARCH)
CROSS_COMPILE ?=

# Architecture as present in compile.h
UTS_MACHINE := $(ARCH)
Expand Down Expand Up @@ -483,11 +446,6 @@ ifeq ($(config-targets),1)
include $(srctree)/arch/$(SRCARCH)/Makefile
export KBUILD_DEFCONFIG KBUILD_KCONFIG

# save ARCH & CROSS_COMPILE settings
$(shell mkdir -p include/generated && \
echo $(ARCH) > include/generated/kernel.arch && \
echo $(CROSS_COMPILE) > include/generated/kernel.cross)

config: scripts_basic outputmakefile FORCE
$(Q)mkdir -p include/linux include/config
$(Q)$(MAKE) $(build)=scripts/kconfig $@
Expand Down
11 changes: 10 additions & 1 deletion trunk/arch/x86/Kconfig
Original file line number Diff line number Diff line change
Expand Up @@ -491,7 +491,7 @@ if PARAVIRT_GUEST
source "arch/x86/xen/Kconfig"

config VMI
bool "VMI Guest support"
bool "VMI Guest support (DEPRECATED)"
select PARAVIRT
depends on X86_32
---help---
Expand All @@ -500,6 +500,15 @@ config VMI
at the moment), by linking the kernel to a GPL-ed ROM module
provided by the hypervisor.

As of September 2009, VMware has started a phased retirement
of this feature from VMware's products. Please see
feature-removal-schedule.txt for details. If you are
planning to enable this option, please note that you cannot
live migrate a VMI enabled VM to a future VMware product,
which doesn't support VMI. So if you expect your kernel to
seamlessly migrate to newer VMware products, keep this
disabled.

config KVM_CLOCK
bool "KVM paravirtualized clock"
select PARAVIRT
Expand Down
28 changes: 4 additions & 24 deletions trunk/arch/x86/include/asm/paravirt.h
Original file line number Diff line number Diff line change
Expand Up @@ -840,42 +840,22 @@ static __always_inline void __raw_spin_unlock(struct raw_spinlock *lock)

static inline unsigned long __raw_local_save_flags(void)
{
unsigned long f;

asm volatile(paravirt_alt(PARAVIRT_CALL)
: "=a"(f)
: paravirt_type(pv_irq_ops.save_fl),
paravirt_clobber(CLBR_EAX)
: "memory", "cc");
return f;
return PVOP_CALLEE0(unsigned long, pv_irq_ops.save_fl);
}

static inline void raw_local_irq_restore(unsigned long f)
{
asm volatile(paravirt_alt(PARAVIRT_CALL)
: "=a"(f)
: PV_FLAGS_ARG(f),
paravirt_type(pv_irq_ops.restore_fl),
paravirt_clobber(CLBR_EAX)
: "memory", "cc");
PVOP_VCALLEE1(pv_irq_ops.restore_fl, f);
}

static inline void raw_local_irq_disable(void)
{
asm volatile(paravirt_alt(PARAVIRT_CALL)
:
: paravirt_type(pv_irq_ops.irq_disable),
paravirt_clobber(CLBR_EAX)
: "memory", "eax", "cc");
PVOP_VCALLEE0(pv_irq_ops.irq_disable);
}

static inline void raw_local_irq_enable(void)
{
asm volatile(paravirt_alt(PARAVIRT_CALL)
:
: paravirt_type(pv_irq_ops.irq_enable),
paravirt_clobber(CLBR_EAX)
: "memory", "eax", "cc");
PVOP_VCALLEE0(pv_irq_ops.irq_enable);
}

static inline unsigned long __raw_local_irq_save(void)
Expand Down
10 changes: 6 additions & 4 deletions trunk/arch/x86/include/asm/paravirt_types.h
Original file line number Diff line number Diff line change
Expand Up @@ -494,10 +494,11 @@ int paravirt_disable_iospace(void);
#define EXTRA_CLOBBERS
#define VEXTRA_CLOBBERS
#else /* CONFIG_X86_64 */
/* [re]ax isn't an arg, but the return val */
#define PVOP_VCALL_ARGS \
unsigned long __edi = __edi, __esi = __esi, \
__edx = __edx, __ecx = __ecx
#define PVOP_CALL_ARGS PVOP_VCALL_ARGS, __eax
__edx = __edx, __ecx = __ecx, __eax = __eax
#define PVOP_CALL_ARGS PVOP_VCALL_ARGS

#define PVOP_CALL_ARG1(x) "D" ((unsigned long)(x))
#define PVOP_CALL_ARG2(x) "S" ((unsigned long)(x))
Expand All @@ -509,6 +510,7 @@ int paravirt_disable_iospace(void);
"=c" (__ecx)
#define PVOP_CALL_CLOBBERS PVOP_VCALL_CLOBBERS, "=a" (__eax)

/* void functions are still allowed [re]ax for scratch */
#define PVOP_VCALLEE_CLOBBERS "=a" (__eax)
#define PVOP_CALLEE_CLOBBERS PVOP_VCALLEE_CLOBBERS

Expand Down Expand Up @@ -583,8 +585,8 @@ int paravirt_disable_iospace(void);
VEXTRA_CLOBBERS, \
pre, post, ##__VA_ARGS__)

#define __PVOP_VCALLEESAVE(rettype, op, pre, post, ...) \
____PVOP_CALL(rettype, op.func, CLBR_RET_REG, \
#define __PVOP_VCALLEESAVE(op, pre, post, ...) \
____PVOP_VCALL(op.func, CLBR_RET_REG, \
PVOP_VCALLEE_CLOBBERS, , \
pre, post, ##__VA_ARGS__)

Expand Down
2 changes: 0 additions & 2 deletions trunk/arch/x86/kernel/irq.c
Original file line number Diff line number Diff line change
Expand Up @@ -244,7 +244,6 @@ unsigned int __irq_entry do_IRQ(struct pt_regs *regs)
__func__, smp_processor_id(), vector, irq);
}

run_local_timers();
irq_exit();

set_irq_regs(old_regs);
Expand All @@ -269,7 +268,6 @@ void smp_generic_interrupt(struct pt_regs *regs)
if (generic_interrupt_extension)
generic_interrupt_extension();

run_local_timers();
irq_exit();

set_irq_regs(old_regs);
Expand Down
2 changes: 1 addition & 1 deletion trunk/arch/x86/kernel/pci-dma.c
Original file line number Diff line number Diff line change
Expand Up @@ -311,7 +311,7 @@ void pci_iommu_shutdown(void)
amd_iommu_shutdown();
}
/* Must execute after PCI subsystem */
fs_initcall(pci_iommu_init);
rootfs_initcall(pci_iommu_init);

#ifdef CONFIG_PCI
/* Many VIA bridges seem to corrupt data for DAC. Disable it here */
Expand Down
1 change: 0 additions & 1 deletion trunk/arch/x86/kernel/smp.c
Original file line number Diff line number Diff line change
Expand Up @@ -198,7 +198,6 @@ void smp_reschedule_interrupt(struct pt_regs *regs)
{
ack_APIC_irq();
inc_irq_stat(irq_resched_count);
run_local_timers();
/*
* KVM uses this interrupt to force a cpu out of guest mode
*/
Expand Down
3 changes: 2 additions & 1 deletion trunk/arch/x86/kernel/time.c
Original file line number Diff line number Diff line change
Expand Up @@ -38,7 +38,8 @@ unsigned long profile_pc(struct pt_regs *regs)
#ifdef CONFIG_FRAME_POINTER
return *(unsigned long *)(regs->bp + sizeof(long));
#else
unsigned long *sp = (unsigned long *)regs->sp;
unsigned long *sp =
(unsigned long *)kernel_stack_pointer(regs);
/*
* Return address is either directly at stack pointer
* or above a saved flags. Eflags has bits 22-31 zero,
Expand Down
12 changes: 10 additions & 2 deletions trunk/arch/x86/kernel/trampoline.c
Original file line number Diff line number Diff line change
Expand Up @@ -3,8 +3,16 @@
#include <asm/trampoline.h>
#include <asm/e820.h>

#if defined(CONFIG_X86_64) && defined(CONFIG_ACPI_SLEEP)
#define __trampinit
#define __trampinitdata
#else
#define __trampinit __cpuinit
#define __trampinitdata __cpuinitdata
#endif

/* ready for x86_64 and x86 */
unsigned char *__cpuinitdata trampoline_base = __va(TRAMPOLINE_BASE);
unsigned char *__trampinitdata trampoline_base = __va(TRAMPOLINE_BASE);

void __init reserve_trampoline_memory(void)
{
Expand All @@ -26,7 +34,7 @@ void __init reserve_trampoline_memory(void)
* bootstrap into the page concerned. The caller
* has made sure it's suitably aligned.
*/
unsigned long __cpuinit setup_trampoline(void)
unsigned long __trampinit setup_trampoline(void)
{
memcpy(trampoline_base, trampoline_data, TRAMPOLINE_SIZE);
return virt_to_phys(trampoline_base);
Expand Down
4 changes: 4 additions & 0 deletions trunk/arch/x86/kernel/trampoline_64.S
Original file line number Diff line number Diff line change
Expand Up @@ -32,8 +32,12 @@
#include <asm/segment.h>
#include <asm/processor-flags.h>

#ifdef CONFIG_ACPI_SLEEP
.section .rodata, "a", @progbits
#else
/* We can free up the trampoline after bootup if cpu hotplug is not supported. */
__CPUINITRODATA
#endif
.code16

ENTRY(trampoline_data)
Expand Down
2 changes: 1 addition & 1 deletion trunk/arch/x86/kernel/vmi_32.c
Original file line number Diff line number Diff line change
Expand Up @@ -648,7 +648,7 @@ static inline int __init activate_vmi(void)

pv_info.paravirt_enabled = 1;
pv_info.kernel_rpl = kernel_cs & SEGMENT_RPL_MASK;
pv_info.name = "vmi";
pv_info.name = "vmi [deprecated]";

pv_init_ops.patch = vmi_patch;

Expand Down
16 changes: 4 additions & 12 deletions trunk/block/blk-core.c
Original file line number Diff line number Diff line change
Expand Up @@ -70,7 +70,7 @@ static void drive_stat_acct(struct request *rq, int new_io)
part_stat_inc(cpu, part, merges[rw]);
else {
part_round_stats(cpu, part);
part_inc_in_flight(part);
part_inc_in_flight(part, rw);
}

part_stat_unlock();
Expand Down Expand Up @@ -1030,9 +1030,9 @@ static void part_round_stats_single(int cpu, struct hd_struct *part,
if (now == part->stamp)
return;

if (part->in_flight) {
if (part_in_flight(part)) {
__part_stat_add(cpu, part, time_in_queue,
part->in_flight * (now - part->stamp));
part_in_flight(part) * (now - part->stamp));
__part_stat_add(cpu, part, io_ticks, (now - part->stamp));
}
part->stamp = now;
Expand Down Expand Up @@ -1739,7 +1739,7 @@ static void blk_account_io_done(struct request *req)
part_stat_inc(cpu, part, ios[rw]);
part_stat_add(cpu, part, ticks[rw], duration);
part_round_stats(cpu, part);
part_dec_in_flight(part);
part_dec_in_flight(part, rw);

part_stat_unlock();
}
Expand Down Expand Up @@ -2492,14 +2492,6 @@ int kblockd_schedule_work(struct request_queue *q, struct work_struct *work)
}
EXPORT_SYMBOL(kblockd_schedule_work);

int kblockd_schedule_delayed_work(struct request_queue *q,
struct delayed_work *work,
unsigned long delay)
{
return queue_delayed_work(kblockd_workqueue, work, delay);
}
EXPORT_SYMBOL(kblockd_schedule_delayed_work);

int __init blk_dev_init(void)
{
BUILD_BUG_ON(__REQ_NR_BITS > 8 *
Expand Down
2 changes: 1 addition & 1 deletion trunk/block/blk-merge.c
Original file line number Diff line number Diff line change
Expand Up @@ -351,7 +351,7 @@ static void blk_account_io_merge(struct request *req)
part = disk_map_sector_rcu(req->rq_disk, blk_rq_pos(req));

part_round_stats(cpu, part);
part_dec_in_flight(part);
part_dec_in_flight(part, rq_data_dir(req));

part_stat_unlock();
}
Expand Down
Loading

0 comments on commit 5ea3a82

Please sign in to comment.