Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 223441
b: refs/heads/master
c: 5167695
h: refs/heads/master
i:
  223439: 3b02b34
v: v3
  • Loading branch information
Peter Zijlstra authored and Ingo Molnar committed Dec 8, 2010
1 parent 428f889 commit 979c2a2
Show file tree
Hide file tree
Showing 14 changed files with 59 additions and 75 deletions.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: 147dd5610c8d1bacb88a6c1dfdaceaf257946ed0
refs/heads/master: 5167695753c63444a9e6cbbef136200a16c7a225
2 changes: 1 addition & 1 deletion trunk/arch/x86/boot/compressed/misc.c
Original file line number Diff line number Diff line change
Expand Up @@ -355,7 +355,7 @@ asmlinkage void decompress_kernel(void *rmode, memptr heap,
if (heap > 0x3fffffffffffUL)
error("Destination address too large");
#else
if (heap > ((-__PAGE_OFFSET-(128<<20)-1) & 0x7fffffff))
if (heap > ((-__PAGE_OFFSET-(512<<20)-1) & 0x7fffffff))
error("Destination address too large");
#endif
#ifndef CONFIG_RELOCATABLE
Expand Down
8 changes: 0 additions & 8 deletions trunk/arch/x86/kernel/apic/apic.c
Original file line number Diff line number Diff line change
Expand Up @@ -1389,14 +1389,6 @@ void __cpuinit end_local_APIC_setup(void)

setup_apic_nmi_watchdog(NULL);
apic_pm_activate();

/*
* Now that local APIC setup is completed for BP, configure the fault
* handling for interrupt remapping.
*/
if (!smp_processor_id() && intr_remapping_enabled)
enable_drhd_fault_handling();

}

#ifdef CONFIG_X86_X2APIC
Expand Down
4 changes: 2 additions & 2 deletions trunk/arch/x86/kernel/apic/io_apic.c
Original file line number Diff line number Diff line change
Expand Up @@ -2430,12 +2430,13 @@ static void ack_apic_level(struct irq_data *data)
{
struct irq_cfg *cfg = data->chip_data;
int i, do_unmask_irq = 0, irq = data->irq;
struct irq_desc *desc = irq_to_desc(irq);
unsigned long v;

irq_complete_move(cfg);
#ifdef CONFIG_GENERIC_PENDING_IRQ
/* If we are moving the irq we need to mask it */
if (unlikely(irq_to_desc(irq)->status & IRQ_MOVE_PENDING)) {
if (unlikely(desc->status & IRQ_MOVE_PENDING)) {
do_unmask_irq = 1;
mask_ioapic(cfg);
}
Expand Down Expand Up @@ -3412,7 +3413,6 @@ dmar_msi_set_affinity(struct irq_data *data, const struct cpumask *mask,
msg.data |= MSI_DATA_VECTOR(cfg->vector);
msg.address_lo &= ~MSI_ADDR_DEST_ID_MASK;
msg.address_lo |= MSI_ADDR_DEST_ID(dest);
msg.address_hi = MSI_ADDR_BASE_HI | MSI_ADDR_EXT_DEST_ID(dest);

dmar_msi_write(irq, &msg);

Expand Down
7 changes: 7 additions & 0 deletions trunk/arch/x86/kernel/apic/probe_64.c
Original file line number Diff line number Diff line change
Expand Up @@ -79,6 +79,13 @@ void __init default_setup_apic_routing(void)
/* need to update phys_pkg_id */
apic->phys_pkg_id = apicid_phys_pkg_id;
}

/*
* Now that apic routing model is selected, configure the
* fault handling for intr remapping.
*/
if (intr_remapping_enabled)
enable_drhd_fault_handling();
}

/* Same for both flat and physical. */
Expand Down
12 changes: 5 additions & 7 deletions trunk/arch/x86/kernel/head_32.S
Original file line number Diff line number Diff line change
Expand Up @@ -60,18 +60,16 @@
#define PAGE_TABLE_SIZE(pages) ((pages) / PTRS_PER_PGD)
#endif

/* Number of possible pages in the lowmem region */
LOWMEM_PAGES = (((1<<32) - __PAGE_OFFSET) >> PAGE_SHIFT)

/* Enough space to fit pagetables for the low memory linear map */
MAPPING_BEYOND_END = PAGE_TABLE_SIZE(LOWMEM_PAGES) << PAGE_SHIFT
MAPPING_BEYOND_END = \
PAGE_TABLE_SIZE(((1<<32) - __PAGE_OFFSET) >> PAGE_SHIFT) << PAGE_SHIFT

/*
* Worst-case size of the kernel mapping we need to make:
* a relocatable kernel can live anywhere in lowmem, so we need to be able
* to map all of lowmem.
* the worst-case size of the kernel itself, plus the extra we need
* to map for the linear map.
*/
KERNEL_PAGES = LOWMEM_PAGES
KERNEL_PAGES = (KERNEL_IMAGE_SIZE + MAPPING_BEYOND_END)>>PAGE_SHIFT

INIT_MAP_SIZE = PAGE_TABLE_SIZE(KERNEL_PAGES) * PAGE_SIZE_asm
RESERVE_BRK(pagetables, INIT_MAP_SIZE)
Expand Down
26 changes: 10 additions & 16 deletions trunk/arch/x86/kernel/hpet.c
Original file line number Diff line number Diff line change
Expand Up @@ -27,9 +27,6 @@
#define HPET_DEV_FSB_CAP 0x1000
#define HPET_DEV_PERI_CAP 0x2000

#define HPET_MIN_CYCLES 128
#define HPET_MIN_PROG_DELTA (HPET_MIN_CYCLES + (HPET_MIN_CYCLES >> 1))

#define EVT_TO_HPET_DEV(evt) container_of(evt, struct hpet_dev, evt)

/*
Expand Down Expand Up @@ -302,9 +299,8 @@ static void hpet_legacy_clockevent_register(void)
/* Calculate the min / max delta */
hpet_clockevent.max_delta_ns = clockevent_delta2ns(0x7FFFFFFF,
&hpet_clockevent);
/* Setup minimum reprogramming delta. */
hpet_clockevent.min_delta_ns = clockevent_delta2ns(HPET_MIN_PROG_DELTA,
&hpet_clockevent);
/* 5 usec minimum reprogramming delta. */
hpet_clockevent.min_delta_ns = 5000;

/*
* Start hpet with the boot cpu mask and make it
Expand Down Expand Up @@ -397,24 +393,22 @@ static int hpet_next_event(unsigned long delta,
* the wraparound into account) nor a simple count down event
* mode. Further the write to the comparator register is
* delayed internally up to two HPET clock cycles in certain
* chipsets (ATI, ICH9,10). Some newer AMD chipsets have even
* longer delays. We worked around that by reading back the
* compare register, but that required another workaround for
* ICH9,10 chips where the first readout after write can
* return the old stale value. We already had a minimum
* programming delta of 5us enforced, but a NMI or SMI hitting
* chipsets (ATI, ICH9,10). We worked around that by reading
* back the compare register, but that required another
* workaround for ICH9,10 chips where the first readout after
* write can return the old stale value. We already have a
* minimum delta of 5us enforced, but a NMI or SMI hitting
* between the counter readout and the comparator write can
* move us behind that point easily. Now instead of reading
* the compare register back several times, we make the ETIME
* decision based on the following: Return ETIME if the
* counter value after the write is less than HPET_MIN_CYCLES
* counter value after the write is less than 8 HPET cycles
* away from the event or if the counter is already ahead of
* the event. The minimum programming delta for the generic
* clockevents code is set to 1.5 * HPET_MIN_CYCLES.
* the event.
*/
res = (s32)(cnt - hpet_readl(HPET_COUNTER));

return res < HPET_MIN_CYCLES ? -ETIME : 0;
return res < 8 ? -ETIME : 0;
}

static void hpet_legacy_set_mode(enum clock_event_mode mode,
Expand Down
3 changes: 1 addition & 2 deletions trunk/arch/x86/kernel/xsave.c
Original file line number Diff line number Diff line change
Expand Up @@ -394,8 +394,7 @@ static void __init setup_xstate_init(void)
* Setup init_xstate_buf to represent the init state of
* all the features managed by the xsave
*/
init_xstate_buf = alloc_bootmem_align(xstate_size,
__alignof__(struct xsave_struct));
init_xstate_buf = alloc_bootmem(xstate_size);
init_xstate_buf->i387.mxcsr = MXCSR_DEFAULT;

clts();
Expand Down
4 changes: 2 additions & 2 deletions trunk/arch/x86/vdso/Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -25,7 +25,7 @@ targets += vdso.so vdso.so.dbg vdso.lds $(vobjs-y)

export CPPFLAGS_vdso.lds += -P -C

VDSO_LDFLAGS_vdso.lds = -m64 -Wl,-soname=linux-vdso.so.1 \
VDSO_LDFLAGS_vdso.lds = -m elf_x86_64 -Wl,-soname=linux-vdso.so.1 \
-Wl,-z,max-page-size=4096 -Wl,-z,common-page-size=4096

$(obj)/vdso.o: $(src)/vdso.S $(obj)/vdso.so
Expand Down Expand Up @@ -69,7 +69,7 @@ vdso32.so-$(VDSO32-y) += sysenter
vdso32-images = $(vdso32.so-y:%=vdso32-%.so)

CPPFLAGS_vdso32.lds = $(CPPFLAGS_vdso.lds)
VDSO_LDFLAGS_vdso32.lds = -m32 -Wl,-soname=linux-gate.so.1
VDSO_LDFLAGS_vdso32.lds = -m elf_i386 -Wl,-soname=linux-gate.so.1

# This makes sure the $(obj) subdirectory exists even though vdso32/
# is not a kbuild sub-make subdirectory.
Expand Down
5 changes: 0 additions & 5 deletions trunk/drivers/pci/dmar.c
Original file line number Diff line number Diff line change
Expand Up @@ -1417,11 +1417,6 @@ int __init enable_drhd_fault_handling(void)
(unsigned long long)drhd->reg_base_addr, ret);
return -1;
}

/*
* Clear any previous faults.
*/
dmar_fault(iommu->irq, iommu);
}

return 0;
Expand Down
23 changes: 0 additions & 23 deletions trunk/drivers/pci/quirks.c
Original file line number Diff line number Diff line change
Expand Up @@ -2764,29 +2764,6 @@ DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_RICOH, PCI_DEVICE_ID_RICOH_R5C832, ricoh_m
DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_RICOH, PCI_DEVICE_ID_RICOH_R5C832, ricoh_mmc_fixup_r5c832);
#endif /*CONFIG_MMC_RICOH_MMC*/

#if defined(CONFIG_DMAR) || defined(CONFIG_INTR_REMAP)
#define VTUNCERRMSK_REG 0x1ac
#define VTD_MSK_SPEC_ERRORS (1 << 31)
/*
* This is a quirk for masking vt-d spec defined errors to platform error
* handling logic. With out this, platforms using Intel 7500, 5500 chipsets
* (and the derivative chipsets like X58 etc) seem to generate NMI/SMI (based
* on the RAS config settings of the platform) when a vt-d fault happens.
* The resulting SMI caused the system to hang.
*
* VT-d spec related errors are already handled by the VT-d OS code, so no
* need to report the same error through other channels.
*/
static void vtd_mask_spec_errors(struct pci_dev *dev)
{
u32 word;

pci_read_config_dword(dev, VTUNCERRMSK_REG, &word);
pci_write_config_dword(dev, VTUNCERRMSK_REG, word | VTD_MSK_SPEC_ERRORS);
}
DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x342e, vtd_mask_spec_errors);
DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x3c28, vtd_mask_spec_errors);
#endif

static void pci_do_fixups(struct pci_dev *dev, struct pci_fixup *f,
struct pci_fixup *end)
Expand Down
2 changes: 0 additions & 2 deletions trunk/include/linux/bootmem.h
Original file line number Diff line number Diff line change
Expand Up @@ -105,8 +105,6 @@ extern void *__alloc_bootmem_low_node(pg_data_t *pgdat,

#define alloc_bootmem(x) \
__alloc_bootmem(x, SMP_CACHE_BYTES, __pa(MAX_DMA_ADDRESS))
#define alloc_bootmem_align(x, align) \
__alloc_bootmem(x, align, __pa(MAX_DMA_ADDRESS))
#define alloc_bootmem_nopanic(x) \
__alloc_bootmem_nopanic(x, SMP_CACHE_BYTES, __pa(MAX_DMA_ADDRESS))
#define alloc_bootmem_pages(x) \
Expand Down
1 change: 1 addition & 0 deletions trunk/include/linux/perf_event.h
Original file line number Diff line number Diff line change
Expand Up @@ -887,6 +887,7 @@ struct perf_cpu_context {
int exclusive;
struct list_head rotation_list;
int jiffies_interval;
struct pmu *active_pmu;
};

struct perf_output_handle {
Expand Down
35 changes: 29 additions & 6 deletions trunk/kernel/perf_event.c
Original file line number Diff line number Diff line change
Expand Up @@ -3824,6 +3824,8 @@ static void perf_event_task_event(struct perf_task_event *task_event)
rcu_read_lock();
list_for_each_entry_rcu(pmu, &pmus, entry) {
cpuctx = get_cpu_ptr(pmu->pmu_cpu_context);
if (cpuctx->active_pmu != pmu)
goto next;
perf_event_task_ctx(&cpuctx->ctx, task_event);

ctx = task_event->task_ctx;
Expand Down Expand Up @@ -3959,6 +3961,8 @@ static void perf_event_comm_event(struct perf_comm_event *comm_event)
rcu_read_lock();
list_for_each_entry_rcu(pmu, &pmus, entry) {
cpuctx = get_cpu_ptr(pmu->pmu_cpu_context);
if (cpuctx->active_pmu != pmu)
goto next;
perf_event_comm_ctx(&cpuctx->ctx, comm_event);

ctxn = pmu->task_ctx_nr;
Expand Down Expand Up @@ -4144,6 +4148,8 @@ static void perf_event_mmap_event(struct perf_mmap_event *mmap_event)
rcu_read_lock();
list_for_each_entry_rcu(pmu, &pmus, entry) {
cpuctx = get_cpu_ptr(pmu->pmu_cpu_context);
if (cpuctx->active_pmu != pmu)
goto next;
perf_event_mmap_ctx(&cpuctx->ctx, mmap_event,
vma->vm_flags & VM_EXEC);

Expand Down Expand Up @@ -5145,20 +5151,36 @@ static void *find_pmu_context(int ctxn)
return NULL;
}

static void free_pmu_context(void * __percpu cpu_context)
static void update_pmu_context(struct pmu *pmu, struct pmu *old_pmu)
{
struct pmu *pmu;
int cpu;

for_each_possible_cpu(cpu) {
struct perf_cpu_context *cpuctx;

cpuctx = per_cpu_ptr(pmu->pmu_cpu_context, cpu);

if (cpuctx->active_pmu == old_pmu)
cpuctx->active_pmu = pmu;
}
}

static void free_pmu_context(struct pmu *pmu)
{
struct pmu *i;

mutex_lock(&pmus_lock);
/*
* Like a real lame refcount.
*/
list_for_each_entry(pmu, &pmus, entry) {
if (pmu->pmu_cpu_context == cpu_context)
list_for_each_entry(i, &pmus, entry) {
if (i->pmu_cpu_context == pmu->pmu_cpu_context) {
update_pmu_context(i, pmu);
goto out;
}
}

free_percpu(cpu_context);
free_percpu(pmu->pmu_cpu_context);
out:
mutex_unlock(&pmus_lock);
}
Expand Down Expand Up @@ -5190,6 +5212,7 @@ int perf_pmu_register(struct pmu *pmu)
cpuctx->ctx.pmu = pmu;
cpuctx->jiffies_interval = 1;
INIT_LIST_HEAD(&cpuctx->rotation_list);
cpuctx->active_pmu = pmu;
}

got_cpu_context:
Expand Down Expand Up @@ -5241,7 +5264,7 @@ void perf_pmu_unregister(struct pmu *pmu)
synchronize_rcu();

free_percpu(pmu->pmu_disable_count);
free_pmu_context(pmu->pmu_cpu_context);
free_pmu_context(pmu);
}

struct pmu *perf_init_event(struct perf_event *event)
Expand Down

0 comments on commit 979c2a2

Please sign in to comment.