Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 210373
b: refs/heads/master
c: febc88c
h: refs/heads/master
i:
  210371: 0d3a6f3
v: v3
  • Loading branch information
Thomas Gleixner committed Sep 8, 2010
1 parent db2d855 commit ec50596
Show file tree
Hide file tree
Showing 41 changed files with 177 additions and 260 deletions.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: 1faa6ec8ccbde8c4f0237be80473a4294ebf8289
refs/heads/master: febc88c5948f81114f64c3412011d695aecae233
6 changes: 0 additions & 6 deletions trunk/Documentation/DocBook/kernel-locking.tmpl
Original file line number Diff line number Diff line change
Expand Up @@ -1961,12 +1961,6 @@ machines due to caching.
</sect1>
</chapter>

<chapter id="apiref">
<title>Mutex API reference</title>
!Iinclude/linux/mutex.h
!Ekernel/mutex.c
</chapter>

<chapter id="references">
<title>Further reading</title>

Expand Down
3 changes: 1 addition & 2 deletions trunk/Documentation/mutex-design.txt
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,7 @@ firstly, there's nothing wrong with semaphores. But if the simpler
mutex semantics are sufficient for your code, then there are a couple
of advantages of mutexes:

- 'struct mutex' is smaller on most architectures: E.g. on x86,
- 'struct mutex' is smaller on most architectures: .e.g on x86,
'struct semaphore' is 20 bytes, 'struct mutex' is 16 bytes.
A smaller structure size means less RAM footprint, and better
CPU-cache utilization.
Expand Down Expand Up @@ -136,4 +136,3 @@ the APIs of 'struct mutex' have been streamlined:
void mutex_lock_nested(struct mutex *lock, unsigned int subclass);
int mutex_lock_interruptible_nested(struct mutex *lock,
unsigned int subclass);
int atomic_dec_and_mutex_lock(atomic_t *cnt, struct mutex *lock);
2 changes: 0 additions & 2 deletions trunk/MAINTAINERS
Original file line number Diff line number Diff line change
Expand Up @@ -4810,7 +4810,6 @@ RCUTORTURE MODULE
M: Josh Triplett <josh@freedesktop.org>
M: "Paul E. McKenney" <paulmck@linux.vnet.ibm.com>
S: Supported
T: git://git.kernel.org/pub/scm/linux/kernel/git/paulmck/linux-2.6-rcu.git
F: Documentation/RCU/torture.txt
F: kernel/rcutorture.c

Expand All @@ -4835,7 +4834,6 @@ M: Dipankar Sarma <dipankar@in.ibm.com>
M: "Paul E. McKenney" <paulmck@linux.vnet.ibm.com>
W: http://www.rdrop.com/users/paulmck/rclock/
S: Supported
T: git://git.kernel.org/pub/scm/linux/kernel/git/paulmck/linux-2.6-rcu.git
F: Documentation/RCU/
F: include/linux/rcu*
F: include/linux/srcu*
Expand Down
4 changes: 2 additions & 2 deletions trunk/arch/x86/include/asm/iomap.h
Original file line number Diff line number Diff line change
Expand Up @@ -26,11 +26,11 @@
#include <asm/pgtable.h>
#include <asm/tlbflush.h>

void __iomem *
void *
iomap_atomic_prot_pfn(unsigned long pfn, enum km_type type, pgprot_t prot);

void
iounmap_atomic(void __iomem *kvaddr, enum km_type type);
iounmap_atomic(void *kvaddr, enum km_type type);

int
iomap_create_wc(resource_size_t base, unsigned long size, pgprot_t *prot);
Expand Down
4 changes: 2 additions & 2 deletions trunk/arch/x86/kernel/cpu/mcheck/mce_amd.c
Original file line number Diff line number Diff line change
Expand Up @@ -530,7 +530,7 @@ static __cpuinit int threshold_create_bank(unsigned int cpu, unsigned int bank)
err = -ENOMEM;
goto out;
}
if (!zalloc_cpumask_var(&b->cpus, GFP_KERNEL)) {
if (!alloc_cpumask_var(&b->cpus, GFP_KERNEL)) {
kfree(b);
err = -ENOMEM;
goto out;
Expand All @@ -543,7 +543,7 @@ static __cpuinit int threshold_create_bank(unsigned int cpu, unsigned int bank)
#ifndef CONFIG_SMP
cpumask_setall(b->cpus);
#else
cpumask_set_cpu(cpu, b->cpus);
cpumask_copy(b->cpus, c->llc_shared_map);
#endif

per_cpu(threshold_banks, cpu)[bank] = b;
Expand Down
9 changes: 4 additions & 5 deletions trunk/arch/x86/kernel/cpu/mcheck/therm_throt.c
Original file line number Diff line number Diff line change
Expand Up @@ -202,11 +202,10 @@ static int therm_throt_process(bool new_event, int event, int level)

#ifdef CONFIG_SYSFS
/* Add/Remove thermal_throttle interface for CPU device: */
static __cpuinit int thermal_throttle_add_dev(struct sys_device *sys_dev,
unsigned int cpu)
static __cpuinit int thermal_throttle_add_dev(struct sys_device *sys_dev)
{
int err;
struct cpuinfo_x86 *c = &cpu_data(cpu);
struct cpuinfo_x86 *c = &cpu_data(smp_processor_id());

err = sysfs_create_group(&sys_dev->kobj, &thermal_attr_group);
if (err)
Expand Down Expand Up @@ -252,7 +251,7 @@ thermal_throttle_cpu_callback(struct notifier_block *nfb,
case CPU_UP_PREPARE:
case CPU_UP_PREPARE_FROZEN:
mutex_lock(&therm_cpu_lock);
err = thermal_throttle_add_dev(sys_dev, cpu);
err = thermal_throttle_add_dev(sys_dev);
mutex_unlock(&therm_cpu_lock);
WARN_ON(err);
break;
Expand Down Expand Up @@ -288,7 +287,7 @@ static __init int thermal_throttle_init_device(void)
#endif
/* connect live CPUs to sysfs */
for_each_online_cpu(cpu) {
err = thermal_throttle_add_dev(get_cpu_sysdev(cpu), cpu);
err = thermal_throttle_add_dev(get_cpu_sysdev(cpu));
WARN_ON(err);
}
#ifdef CONFIG_HOTPLUG_CPU
Expand Down
59 changes: 13 additions & 46 deletions trunk/arch/x86/kernel/cpu/perf_event.c
Original file line number Diff line number Diff line change
Expand Up @@ -1154,7 +1154,7 @@ static int x86_pmu_handle_irq(struct pt_regs *regs)
/*
* event overflow
*/
handled++;
handled = 1;
data.period = event->hw.last_period;

if (!x86_perf_event_set_period(event))
Expand Down Expand Up @@ -1200,20 +1200,12 @@ void perf_events_lapic_init(void)
apic_write(APIC_LVTPC, APIC_DM_NMI);
}

struct pmu_nmi_state {
unsigned int marked;
int handled;
};

static DEFINE_PER_CPU(struct pmu_nmi_state, pmu_nmi);

static int __kprobes
perf_event_nmi_handler(struct notifier_block *self,
unsigned long cmd, void *__args)
{
struct die_args *args = __args;
unsigned int this_nmi;
int handled;
struct pt_regs *regs;

if (!atomic_read(&active_events))
return NOTIFY_DONE;
Expand All @@ -1222,47 +1214,22 @@ perf_event_nmi_handler(struct notifier_block *self,
case DIE_NMI:
case DIE_NMI_IPI:
break;
case DIE_NMIUNKNOWN:
this_nmi = percpu_read(irq_stat.__nmi_count);
if (this_nmi != __get_cpu_var(pmu_nmi).marked)
/* let the kernel handle the unknown nmi */
return NOTIFY_DONE;
/*
* This one is a PMU back-to-back nmi. Two events
* trigger 'simultaneously' raising two back-to-back
* NMIs. If the first NMI handles both, the latter
* will be empty and daze the CPU. So, we drop it to
* avoid false-positive 'unknown nmi' messages.
*/
return NOTIFY_STOP;

default:
return NOTIFY_DONE;
}

apic_write(APIC_LVTPC, APIC_DM_NMI);

handled = x86_pmu.handle_irq(args->regs);
if (!handled)
return NOTIFY_DONE;
regs = args->regs;

this_nmi = percpu_read(irq_stat.__nmi_count);
if ((handled > 1) ||
/* the next nmi could be a back-to-back nmi */
((__get_cpu_var(pmu_nmi).marked == this_nmi) &&
(__get_cpu_var(pmu_nmi).handled > 1))) {
/*
* We could have two subsequent back-to-back nmis: The
* first handles more than one counter, the 2nd
* handles only one counter and the 3rd handles no
* counter.
*
* This is the 2nd nmi because the previous was
* handling more than one counter. We will mark the
* next (3rd) and then drop it if unhandled.
*/
__get_cpu_var(pmu_nmi).marked = this_nmi + 1;
__get_cpu_var(pmu_nmi).handled = handled;
}
apic_write(APIC_LVTPC, APIC_DM_NMI);
/*
* Can't rely on the handled return value to say it was our NMI, two
* events could trigger 'simultaneously' raising two back-to-back NMIs.
*
* If the first NMI handles both, the latter will be empty and daze
* the CPU.
*/
x86_pmu.handle_irq(regs);

return NOTIFY_STOP;
}
Expand Down
15 changes: 6 additions & 9 deletions trunk/arch/x86/kernel/cpu/perf_event_intel.c
Original file line number Diff line number Diff line change
Expand Up @@ -712,8 +712,7 @@ static int intel_pmu_handle_irq(struct pt_regs *regs)
struct perf_sample_data data;
struct cpu_hw_events *cpuc;
int bit, loops;
u64 status;
int handled = 0;
u64 ack, status;

perf_sample_data_init(&data, 0);

Expand All @@ -729,7 +728,6 @@ static int intel_pmu_handle_irq(struct pt_regs *regs)

loops = 0;
again:
intel_pmu_ack_status(status);
if (++loops > 100) {
WARN_ONCE(1, "perfevents: irq loop stuck!\n");
perf_event_print_debug();
Expand All @@ -738,22 +736,19 @@ static int intel_pmu_handle_irq(struct pt_regs *regs)
}

inc_irq_stat(apic_perf_irqs);
ack = status;

intel_pmu_lbr_read();

/*
* PEBS overflow sets bit 62 in the global status register
*/
if (__test_and_clear_bit(62, (unsigned long *)&status)) {
handled++;
if (__test_and_clear_bit(62, (unsigned long *)&status))
x86_pmu.drain_pebs(regs);
}

for_each_set_bit(bit, (unsigned long *)&status, X86_PMC_IDX_MAX) {
struct perf_event *event = cpuc->events[bit];

handled++;

if (!test_bit(bit, cpuc->active_mask))
continue;

Expand All @@ -766,6 +761,8 @@ static int intel_pmu_handle_irq(struct pt_regs *regs)
x86_pmu_stop(event);
}

intel_pmu_ack_status(ack);

/*
* Repeat if there is more work to be done:
*/
Expand All @@ -775,7 +772,7 @@ static int intel_pmu_handle_irq(struct pt_regs *regs)

done:
intel_pmu_enable_all(0);
return handled;
return 1;
}

static struct event_constraint *
Expand Down
2 changes: 1 addition & 1 deletion trunk/arch/x86/kernel/cpu/perf_event_p4.c
Original file line number Diff line number Diff line change
Expand Up @@ -692,7 +692,7 @@ static int p4_pmu_handle_irq(struct pt_regs *regs)
inc_irq_stat(apic_perf_irqs);
}

return handled;
return handled > 0;
}

/*
Expand Down
3 changes: 2 additions & 1 deletion trunk/arch/x86/kernel/trampoline.c
Original file line number Diff line number Diff line change
Expand Up @@ -45,7 +45,8 @@ void __init setup_trampoline_page_table(void)
/* Copy kernel address range */
clone_pgd_range(trampoline_pg_dir + KERNEL_PGD_BOUNDARY,
swapper_pg_dir + KERNEL_PGD_BOUNDARY,
KERNEL_PGD_PTRS);
min_t(unsigned long, KERNEL_PGD_PTRS,
KERNEL_PGD_BOUNDARY));

/* Initialize low mappings */
clone_pgd_range(trampoline_pg_dir,
Expand Down
6 changes: 3 additions & 3 deletions trunk/arch/x86/mm/iomap_32.c
Original file line number Diff line number Diff line change
Expand Up @@ -74,7 +74,7 @@ void *kmap_atomic_prot_pfn(unsigned long pfn, enum km_type type, pgprot_t prot)
/*
* Map 'pfn' using fixed map 'type' and protections 'prot'
*/
void __iomem *
void *
iomap_atomic_prot_pfn(unsigned long pfn, enum km_type type, pgprot_t prot)
{
/*
Expand All @@ -86,12 +86,12 @@ iomap_atomic_prot_pfn(unsigned long pfn, enum km_type type, pgprot_t prot)
if (!pat_enabled && pgprot_val(prot) == pgprot_val(PAGE_KERNEL_WC))
prot = PAGE_KERNEL_UC_MINUS;

return (void __force __iomem *) kmap_atomic_prot_pfn(pfn, type, prot);
return kmap_atomic_prot_pfn(pfn, type, prot);
}
EXPORT_SYMBOL_GPL(iomap_atomic_prot_pfn);

void
iounmap_atomic(void __iomem *kvaddr, enum km_type type)
iounmap_atomic(void *kvaddr, enum km_type type)
{
unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK;
enum fixed_addresses idx = type + KM_TYPE_NR*smp_processor_id();
Expand Down
22 changes: 5 additions & 17 deletions trunk/arch/x86/oprofile/nmi_int.c
Original file line number Diff line number Diff line change
Expand Up @@ -568,13 +568,8 @@ static int __init init_sysfs(void)
int error;

error = sysdev_class_register(&oprofile_sysclass);
if (error)
return error;

error = sysdev_register(&device_oprofile);
if (error)
sysdev_class_unregister(&oprofile_sysclass);

if (!error)
error = sysdev_register(&device_oprofile);
return error;
}

Expand All @@ -585,10 +580,8 @@ static void exit_sysfs(void)
}

#else

static inline int init_sysfs(void) { return 0; }
static inline void exit_sysfs(void) { }

#define init_sysfs() do { } while (0)
#define exit_sysfs() do { } while (0)
#endif /* CONFIG_PM */

static int __init p4_init(char **cpu_type)
Expand Down Expand Up @@ -702,8 +695,6 @@ int __init op_nmi_init(struct oprofile_operations *ops)
char *cpu_type = NULL;
int ret = 0;

using_nmi = 0;

if (!cpu_has_apic)
return -ENODEV;

Expand Down Expand Up @@ -783,10 +774,7 @@ int __init op_nmi_init(struct oprofile_operations *ops)

mux_init(ops);

ret = init_sysfs();
if (ret)
return ret;

init_sysfs();
using_nmi = 1;
printk(KERN_INFO "oprofile: using NMI interrupt.\n");
return 0;
Expand Down
Loading

0 comments on commit ec50596

Please sign in to comment.