diff --git a/[refs] b/[refs]
index d8653cdeb9fe..205c67002247 100644
--- a/[refs]
+++ b/[refs]
@@ -1,2 +1,2 @@
---
-refs/heads/master: 1faa6ec8ccbde8c4f0237be80473a4294ebf8289
+refs/heads/master: febc88c5948f81114f64c3412011d695aecae233
diff --git a/trunk/Documentation/DocBook/kernel-locking.tmpl b/trunk/Documentation/DocBook/kernel-locking.tmpl
index a0d479d1e1dd..0b1a3f97f285 100644
--- a/trunk/Documentation/DocBook/kernel-locking.tmpl
+++ b/trunk/Documentation/DocBook/kernel-locking.tmpl
@@ -1961,12 +1961,6 @@ machines due to caching.
-
- Mutex API reference
-!Iinclude/linux/mutex.h
-!Ekernel/mutex.c
-
-
Further reading
diff --git a/trunk/Documentation/mutex-design.txt b/trunk/Documentation/mutex-design.txt
index 38c10fd7f411..c91ccc0720fa 100644
--- a/trunk/Documentation/mutex-design.txt
+++ b/trunk/Documentation/mutex-design.txt
@@ -9,7 +9,7 @@ firstly, there's nothing wrong with semaphores. But if the simpler
mutex semantics are sufficient for your code, then there are a couple
of advantages of mutexes:
- - 'struct mutex' is smaller on most architectures: E.g. on x86,
+ - 'struct mutex' is smaller on most architectures: .e.g on x86,
'struct semaphore' is 20 bytes, 'struct mutex' is 16 bytes.
A smaller structure size means less RAM footprint, and better
CPU-cache utilization.
@@ -136,4 +136,3 @@ the APIs of 'struct mutex' have been streamlined:
void mutex_lock_nested(struct mutex *lock, unsigned int subclass);
int mutex_lock_interruptible_nested(struct mutex *lock,
unsigned int subclass);
- int atomic_dec_and_mutex_lock(atomic_t *cnt, struct mutex *lock);
diff --git a/trunk/MAINTAINERS b/trunk/MAINTAINERS
index 7e189b57c8b5..087912aa09bd 100644
--- a/trunk/MAINTAINERS
+++ b/trunk/MAINTAINERS
@@ -4810,7 +4810,6 @@ RCUTORTURE MODULE
M: Josh Triplett
M: "Paul E. McKenney"
S: Supported
-T: git://git.kernel.org/pub/scm/linux/kernel/git/paulmck/linux-2.6-rcu.git
F: Documentation/RCU/torture.txt
F: kernel/rcutorture.c
@@ -4835,7 +4834,6 @@ M: Dipankar Sarma
M: "Paul E. McKenney"
W: http://www.rdrop.com/users/paulmck/rclock/
S: Supported
-T: git://git.kernel.org/pub/scm/linux/kernel/git/paulmck/linux-2.6-rcu.git
F: Documentation/RCU/
F: include/linux/rcu*
F: include/linux/srcu*
diff --git a/trunk/arch/x86/include/asm/iomap.h b/trunk/arch/x86/include/asm/iomap.h
index c4191b3b7056..f35eb45d6576 100644
--- a/trunk/arch/x86/include/asm/iomap.h
+++ b/trunk/arch/x86/include/asm/iomap.h
@@ -26,11 +26,11 @@
#include
#include
-void __iomem *
+void *
iomap_atomic_prot_pfn(unsigned long pfn, enum km_type type, pgprot_t prot);
void
-iounmap_atomic(void __iomem *kvaddr, enum km_type type);
+iounmap_atomic(void *kvaddr, enum km_type type);
int
iomap_create_wc(resource_size_t base, unsigned long size, pgprot_t *prot);
diff --git a/trunk/arch/x86/kernel/cpu/mcheck/mce_amd.c b/trunk/arch/x86/kernel/cpu/mcheck/mce_amd.c
index 5e975298fa81..224392d8fe8c 100644
--- a/trunk/arch/x86/kernel/cpu/mcheck/mce_amd.c
+++ b/trunk/arch/x86/kernel/cpu/mcheck/mce_amd.c
@@ -530,7 +530,7 @@ static __cpuinit int threshold_create_bank(unsigned int cpu, unsigned int bank)
err = -ENOMEM;
goto out;
}
- if (!zalloc_cpumask_var(&b->cpus, GFP_KERNEL)) {
+ if (!alloc_cpumask_var(&b->cpus, GFP_KERNEL)) {
kfree(b);
err = -ENOMEM;
goto out;
@@ -543,7 +543,7 @@ static __cpuinit int threshold_create_bank(unsigned int cpu, unsigned int bank)
#ifndef CONFIG_SMP
cpumask_setall(b->cpus);
#else
- cpumask_set_cpu(cpu, b->cpus);
+ cpumask_copy(b->cpus, c->llc_shared_map);
#endif
per_cpu(threshold_banks, cpu)[bank] = b;
diff --git a/trunk/arch/x86/kernel/cpu/mcheck/therm_throt.c b/trunk/arch/x86/kernel/cpu/mcheck/therm_throt.c
index d9368eeda309..c2a8b26d4fea 100644
--- a/trunk/arch/x86/kernel/cpu/mcheck/therm_throt.c
+++ b/trunk/arch/x86/kernel/cpu/mcheck/therm_throt.c
@@ -202,11 +202,10 @@ static int therm_throt_process(bool new_event, int event, int level)
#ifdef CONFIG_SYSFS
/* Add/Remove thermal_throttle interface for CPU device: */
-static __cpuinit int thermal_throttle_add_dev(struct sys_device *sys_dev,
- unsigned int cpu)
+static __cpuinit int thermal_throttle_add_dev(struct sys_device *sys_dev)
{
int err;
- struct cpuinfo_x86 *c = &cpu_data(cpu);
+ struct cpuinfo_x86 *c = &cpu_data(smp_processor_id());
err = sysfs_create_group(&sys_dev->kobj, &thermal_attr_group);
if (err)
@@ -252,7 +251,7 @@ thermal_throttle_cpu_callback(struct notifier_block *nfb,
case CPU_UP_PREPARE:
case CPU_UP_PREPARE_FROZEN:
mutex_lock(&therm_cpu_lock);
- err = thermal_throttle_add_dev(sys_dev, cpu);
+ err = thermal_throttle_add_dev(sys_dev);
mutex_unlock(&therm_cpu_lock);
WARN_ON(err);
break;
@@ -288,7 +287,7 @@ static __init int thermal_throttle_init_device(void)
#endif
/* connect live CPUs to sysfs */
for_each_online_cpu(cpu) {
- err = thermal_throttle_add_dev(get_cpu_sysdev(cpu), cpu);
+ err = thermal_throttle_add_dev(get_cpu_sysdev(cpu));
WARN_ON(err);
}
#ifdef CONFIG_HOTPLUG_CPU
diff --git a/trunk/arch/x86/kernel/cpu/perf_event.c b/trunk/arch/x86/kernel/cpu/perf_event.c
index 3efdf2870a35..f2da20fda02d 100644
--- a/trunk/arch/x86/kernel/cpu/perf_event.c
+++ b/trunk/arch/x86/kernel/cpu/perf_event.c
@@ -1154,7 +1154,7 @@ static int x86_pmu_handle_irq(struct pt_regs *regs)
/*
* event overflow
*/
- handled++;
+ handled = 1;
data.period = event->hw.last_period;
if (!x86_perf_event_set_period(event))
@@ -1200,20 +1200,12 @@ void perf_events_lapic_init(void)
apic_write(APIC_LVTPC, APIC_DM_NMI);
}
-struct pmu_nmi_state {
- unsigned int marked;
- int handled;
-};
-
-static DEFINE_PER_CPU(struct pmu_nmi_state, pmu_nmi);
-
static int __kprobes
perf_event_nmi_handler(struct notifier_block *self,
unsigned long cmd, void *__args)
{
struct die_args *args = __args;
- unsigned int this_nmi;
- int handled;
+ struct pt_regs *regs;
if (!atomic_read(&active_events))
return NOTIFY_DONE;
@@ -1222,47 +1214,22 @@ perf_event_nmi_handler(struct notifier_block *self,
case DIE_NMI:
case DIE_NMI_IPI:
break;
- case DIE_NMIUNKNOWN:
- this_nmi = percpu_read(irq_stat.__nmi_count);
- if (this_nmi != __get_cpu_var(pmu_nmi).marked)
- /* let the kernel handle the unknown nmi */
- return NOTIFY_DONE;
- /*
- * This one is a PMU back-to-back nmi. Two events
- * trigger 'simultaneously' raising two back-to-back
- * NMIs. If the first NMI handles both, the latter
- * will be empty and daze the CPU. So, we drop it to
- * avoid false-positive 'unknown nmi' messages.
- */
- return NOTIFY_STOP;
+
default:
return NOTIFY_DONE;
}
- apic_write(APIC_LVTPC, APIC_DM_NMI);
-
- handled = x86_pmu.handle_irq(args->regs);
- if (!handled)
- return NOTIFY_DONE;
+ regs = args->regs;
- this_nmi = percpu_read(irq_stat.__nmi_count);
- if ((handled > 1) ||
- /* the next nmi could be a back-to-back nmi */
- ((__get_cpu_var(pmu_nmi).marked == this_nmi) &&
- (__get_cpu_var(pmu_nmi).handled > 1))) {
- /*
- * We could have two subsequent back-to-back nmis: The
- * first handles more than one counter, the 2nd
- * handles only one counter and the 3rd handles no
- * counter.
- *
- * This is the 2nd nmi because the previous was
- * handling more than one counter. We will mark the
- * next (3rd) and then drop it if unhandled.
- */
- __get_cpu_var(pmu_nmi).marked = this_nmi + 1;
- __get_cpu_var(pmu_nmi).handled = handled;
- }
+ apic_write(APIC_LVTPC, APIC_DM_NMI);
+ /*
+ * Can't rely on the handled return value to say it was our NMI, two
+ * events could trigger 'simultaneously' raising two back-to-back NMIs.
+ *
+ * If the first NMI handles both, the latter will be empty and daze
+ * the CPU.
+ */
+ x86_pmu.handle_irq(regs);
return NOTIFY_STOP;
}
diff --git a/trunk/arch/x86/kernel/cpu/perf_event_intel.c b/trunk/arch/x86/kernel/cpu/perf_event_intel.c
index ee05c90012d2..d8d86d014008 100644
--- a/trunk/arch/x86/kernel/cpu/perf_event_intel.c
+++ b/trunk/arch/x86/kernel/cpu/perf_event_intel.c
@@ -712,8 +712,7 @@ static int intel_pmu_handle_irq(struct pt_regs *regs)
struct perf_sample_data data;
struct cpu_hw_events *cpuc;
int bit, loops;
- u64 status;
- int handled = 0;
+ u64 ack, status;
perf_sample_data_init(&data, 0);
@@ -729,7 +728,6 @@ static int intel_pmu_handle_irq(struct pt_regs *regs)
loops = 0;
again:
- intel_pmu_ack_status(status);
if (++loops > 100) {
WARN_ONCE(1, "perfevents: irq loop stuck!\n");
perf_event_print_debug();
@@ -738,22 +736,19 @@ static int intel_pmu_handle_irq(struct pt_regs *regs)
}
inc_irq_stat(apic_perf_irqs);
+ ack = status;
intel_pmu_lbr_read();
/*
* PEBS overflow sets bit 62 in the global status register
*/
- if (__test_and_clear_bit(62, (unsigned long *)&status)) {
- handled++;
+ if (__test_and_clear_bit(62, (unsigned long *)&status))
x86_pmu.drain_pebs(regs);
- }
for_each_set_bit(bit, (unsigned long *)&status, X86_PMC_IDX_MAX) {
struct perf_event *event = cpuc->events[bit];
- handled++;
-
if (!test_bit(bit, cpuc->active_mask))
continue;
@@ -766,6 +761,8 @@ static int intel_pmu_handle_irq(struct pt_regs *regs)
x86_pmu_stop(event);
}
+ intel_pmu_ack_status(ack);
+
/*
* Repeat if there is more work to be done:
*/
@@ -775,7 +772,7 @@ static int intel_pmu_handle_irq(struct pt_regs *regs)
done:
intel_pmu_enable_all(0);
- return handled;
+ return 1;
}
static struct event_constraint *
diff --git a/trunk/arch/x86/kernel/cpu/perf_event_p4.c b/trunk/arch/x86/kernel/cpu/perf_event_p4.c
index b560db3305be..7e578e9cc58b 100644
--- a/trunk/arch/x86/kernel/cpu/perf_event_p4.c
+++ b/trunk/arch/x86/kernel/cpu/perf_event_p4.c
@@ -692,7 +692,7 @@ static int p4_pmu_handle_irq(struct pt_regs *regs)
inc_irq_stat(apic_perf_irqs);
}
- return handled;
+ return handled > 0;
}
/*
diff --git a/trunk/arch/x86/kernel/trampoline.c b/trunk/arch/x86/kernel/trampoline.c
index e2a595257390..a874495b3673 100644
--- a/trunk/arch/x86/kernel/trampoline.c
+++ b/trunk/arch/x86/kernel/trampoline.c
@@ -45,7 +45,8 @@ void __init setup_trampoline_page_table(void)
/* Copy kernel address range */
clone_pgd_range(trampoline_pg_dir + KERNEL_PGD_BOUNDARY,
swapper_pg_dir + KERNEL_PGD_BOUNDARY,
- KERNEL_PGD_PTRS);
+ min_t(unsigned long, KERNEL_PGD_PTRS,
+ KERNEL_PGD_BOUNDARY));
/* Initialize low mappings */
clone_pgd_range(trampoline_pg_dir,
diff --git a/trunk/arch/x86/mm/iomap_32.c b/trunk/arch/x86/mm/iomap_32.c
index 72fc70cf6184..84e236ce76ba 100644
--- a/trunk/arch/x86/mm/iomap_32.c
+++ b/trunk/arch/x86/mm/iomap_32.c
@@ -74,7 +74,7 @@ void *kmap_atomic_prot_pfn(unsigned long pfn, enum km_type type, pgprot_t prot)
/*
* Map 'pfn' using fixed map 'type' and protections 'prot'
*/
-void __iomem *
+void *
iomap_atomic_prot_pfn(unsigned long pfn, enum km_type type, pgprot_t prot)
{
/*
@@ -86,12 +86,12 @@ iomap_atomic_prot_pfn(unsigned long pfn, enum km_type type, pgprot_t prot)
if (!pat_enabled && pgprot_val(prot) == pgprot_val(PAGE_KERNEL_WC))
prot = PAGE_KERNEL_UC_MINUS;
- return (void __force __iomem *) kmap_atomic_prot_pfn(pfn, type, prot);
+ return kmap_atomic_prot_pfn(pfn, type, prot);
}
EXPORT_SYMBOL_GPL(iomap_atomic_prot_pfn);
void
-iounmap_atomic(void __iomem *kvaddr, enum km_type type)
+iounmap_atomic(void *kvaddr, enum km_type type)
{
unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK;
enum fixed_addresses idx = type + KM_TYPE_NR*smp_processor_id();
diff --git a/trunk/arch/x86/oprofile/nmi_int.c b/trunk/arch/x86/oprofile/nmi_int.c
index cfe4faabb0f6..f6b48f6c5951 100644
--- a/trunk/arch/x86/oprofile/nmi_int.c
+++ b/trunk/arch/x86/oprofile/nmi_int.c
@@ -568,13 +568,8 @@ static int __init init_sysfs(void)
int error;
error = sysdev_class_register(&oprofile_sysclass);
- if (error)
- return error;
-
- error = sysdev_register(&device_oprofile);
- if (error)
- sysdev_class_unregister(&oprofile_sysclass);
-
+ if (!error)
+ error = sysdev_register(&device_oprofile);
return error;
}
@@ -585,10 +580,8 @@ static void exit_sysfs(void)
}
#else
-
-static inline int init_sysfs(void) { return 0; }
-static inline void exit_sysfs(void) { }
-
+#define init_sysfs() do { } while (0)
+#define exit_sysfs() do { } while (0)
#endif /* CONFIG_PM */
static int __init p4_init(char **cpu_type)
@@ -702,8 +695,6 @@ int __init op_nmi_init(struct oprofile_operations *ops)
char *cpu_type = NULL;
int ret = 0;
- using_nmi = 0;
-
if (!cpu_has_apic)
return -ENODEV;
@@ -783,10 +774,7 @@ int __init op_nmi_init(struct oprofile_operations *ops)
mux_init(ops);
- ret = init_sysfs();
- if (ret)
- return ret;
-
+ init_sysfs();
using_nmi = 1;
printk(KERN_INFO "oprofile: using NMI interrupt.\n");
return 0;
diff --git a/trunk/drivers/oprofile/buffer_sync.c b/trunk/drivers/oprofile/buffer_sync.c
index b7e755f4178a..a9352b2c7ac4 100644
--- a/trunk/drivers/oprofile/buffer_sync.c
+++ b/trunk/drivers/oprofile/buffer_sync.c
@@ -141,6 +141,16 @@ static struct notifier_block module_load_nb = {
.notifier_call = module_load_notify,
};
+
+static void end_sync(void)
+{
+ end_cpu_work();
+ /* make sure we don't leak task structs */
+ process_task_mortuary();
+ process_task_mortuary();
+}
+
+
int sync_start(void)
{
int err;
@@ -148,7 +158,7 @@ int sync_start(void)
if (!zalloc_cpumask_var(&marked_cpus, GFP_KERNEL))
return -ENOMEM;
- mutex_lock(&buffer_mutex);
+ start_cpu_work();
err = task_handoff_register(&task_free_nb);
if (err)
@@ -163,10 +173,7 @@ int sync_start(void)
if (err)
goto out4;
- start_cpu_work();
-
out:
- mutex_unlock(&buffer_mutex);
return err;
out4:
profile_event_unregister(PROFILE_MUNMAP, &munmap_nb);
@@ -175,6 +182,7 @@ int sync_start(void)
out2:
task_handoff_unregister(&task_free_nb);
out1:
+ end_sync();
free_cpumask_var(marked_cpus);
goto out;
}
@@ -182,20 +190,11 @@ int sync_start(void)
void sync_stop(void)
{
- /* flush buffers */
- mutex_lock(&buffer_mutex);
- end_cpu_work();
unregister_module_notifier(&module_load_nb);
profile_event_unregister(PROFILE_MUNMAP, &munmap_nb);
profile_event_unregister(PROFILE_TASK_EXIT, &task_exit_nb);
task_handoff_unregister(&task_free_nb);
- mutex_unlock(&buffer_mutex);
- flush_scheduled_work();
-
- /* make sure we don't leak task structs */
- process_task_mortuary();
- process_task_mortuary();
-
+ end_sync();
free_cpumask_var(marked_cpus);
}
diff --git a/trunk/drivers/oprofile/cpu_buffer.c b/trunk/drivers/oprofile/cpu_buffer.c
index f179ac2ea801..219f79e2210a 100644
--- a/trunk/drivers/oprofile/cpu_buffer.c
+++ b/trunk/drivers/oprofile/cpu_buffer.c
@@ -120,6 +120,8 @@ void end_cpu_work(void)
cancel_delayed_work(&b->work);
}
+
+ flush_scheduled_work();
}
/*
diff --git a/trunk/fs/fuse/dev.c b/trunk/fs/fuse/dev.c
index d367af1514ef..69ad053ffd78 100644
--- a/trunk/fs/fuse/dev.c
+++ b/trunk/fs/fuse/dev.c
@@ -276,7 +276,7 @@ static void flush_bg_queue(struct fuse_conn *fc)
* Called with fc->lock, unlocks it
*/
static void request_end(struct fuse_conn *fc, struct fuse_req *req)
-__releases(fc->lock)
+__releases(&fc->lock)
{
void (*end) (struct fuse_conn *, struct fuse_req *) = req->end;
req->end = NULL;
@@ -306,8 +306,8 @@ __releases(fc->lock)
static void wait_answer_interruptible(struct fuse_conn *fc,
struct fuse_req *req)
-__releases(fc->lock)
-__acquires(fc->lock)
+__releases(&fc->lock)
+__acquires(&fc->lock)
{
if (signal_pending(current))
return;
@@ -325,8 +325,8 @@ static void queue_interrupt(struct fuse_conn *fc, struct fuse_req *req)
}
static void request_wait_answer(struct fuse_conn *fc, struct fuse_req *req)
-__releases(fc->lock)
-__acquires(fc->lock)
+__releases(&fc->lock)
+__acquires(&fc->lock)
{
if (!fc->no_interrupt) {
/* Any signal may interrupt this */
@@ -905,8 +905,8 @@ static int request_pending(struct fuse_conn *fc)
/* Wait until a request is available on the pending list */
static void request_wait(struct fuse_conn *fc)
-__releases(fc->lock)
-__acquires(fc->lock)
+__releases(&fc->lock)
+__acquires(&fc->lock)
{
DECLARE_WAITQUEUE(wait, current);
@@ -934,7 +934,7 @@ __acquires(fc->lock)
*/
static int fuse_read_interrupt(struct fuse_conn *fc, struct fuse_copy_state *cs,
size_t nbytes, struct fuse_req *req)
-__releases(fc->lock)
+__releases(&fc->lock)
{
struct fuse_in_header ih;
struct fuse_interrupt_in arg;
@@ -1720,8 +1720,8 @@ static unsigned fuse_dev_poll(struct file *file, poll_table *wait)
* This function releases and reacquires fc->lock
*/
static void end_requests(struct fuse_conn *fc, struct list_head *head)
-__releases(fc->lock)
-__acquires(fc->lock)
+__releases(&fc->lock)
+__acquires(&fc->lock)
{
while (!list_empty(head)) {
struct fuse_req *req;
@@ -1744,8 +1744,8 @@ __acquires(fc->lock)
* locked).
*/
static void end_io_requests(struct fuse_conn *fc)
-__releases(fc->lock)
-__acquires(fc->lock)
+__releases(&fc->lock)
+__acquires(&fc->lock)
{
while (!list_empty(&fc->io)) {
struct fuse_req *req =
@@ -1769,16 +1769,6 @@ __acquires(fc->lock)
}
}
-static void end_queued_requests(struct fuse_conn *fc)
-__releases(fc->lock)
-__acquires(fc->lock)
-{
- fc->max_background = UINT_MAX;
- flush_bg_queue(fc);
- end_requests(fc, &fc->pending);
- end_requests(fc, &fc->processing);
-}
-
/*
* Abort all requests.
*
@@ -1805,7 +1795,8 @@ void fuse_abort_conn(struct fuse_conn *fc)
fc->connected = 0;
fc->blocked = 0;
end_io_requests(fc);
- end_queued_requests(fc);
+ end_requests(fc, &fc->pending);
+ end_requests(fc, &fc->processing);
wake_up_all(&fc->waitq);
wake_up_all(&fc->blocked_waitq);
kill_fasync(&fc->fasync, SIGIO, POLL_IN);
@@ -1820,9 +1811,8 @@ int fuse_dev_release(struct inode *inode, struct file *file)
if (fc) {
spin_lock(&fc->lock);
fc->connected = 0;
- fc->blocked = 0;
- end_queued_requests(fc);
- wake_up_all(&fc->blocked_waitq);
+ end_requests(fc, &fc->pending);
+ end_requests(fc, &fc->processing);
spin_unlock(&fc->lock);
fuse_conn_put(fc);
}
diff --git a/trunk/fs/fuse/file.c b/trunk/fs/fuse/file.c
index c8224587123f..147c1f71bdb9 100644
--- a/trunk/fs/fuse/file.c
+++ b/trunk/fs/fuse/file.c
@@ -1144,8 +1144,8 @@ static void fuse_writepage_finish(struct fuse_conn *fc, struct fuse_req *req)
/* Called under fc->lock, may release and reacquire it */
static void fuse_send_writepage(struct fuse_conn *fc, struct fuse_req *req)
-__releases(fc->lock)
-__acquires(fc->lock)
+__releases(&fc->lock)
+__acquires(&fc->lock)
{
struct fuse_inode *fi = get_fuse_inode(req->inode);
loff_t size = i_size_read(req->inode);
@@ -1183,8 +1183,8 @@ __acquires(fc->lock)
* Called with fc->lock
*/
void fuse_flush_writepages(struct inode *inode)
-__releases(fc->lock)
-__acquires(fc->lock)
+__releases(&fc->lock)
+__acquires(&fc->lock)
{
struct fuse_conn *fc = get_fuse_conn(inode);
struct fuse_inode *fi = get_fuse_inode(inode);
diff --git a/trunk/fs/nfsd/nfs4state.c b/trunk/fs/nfsd/nfs4state.c
index cf0d2ffb3c84..3dfef0623968 100644
--- a/trunk/fs/nfsd/nfs4state.c
+++ b/trunk/fs/nfsd/nfs4state.c
@@ -440,7 +440,7 @@ test_share(struct nfs4_stateid *stp, struct nfsd4_open *open) {
static int nfs4_access_to_omode(u32 access)
{
- switch (access & NFS4_SHARE_ACCESS_BOTH) {
+ switch (access) {
case NFS4_SHARE_ACCESS_READ:
return O_RDONLY;
case NFS4_SHARE_ACCESS_WRITE:
diff --git a/trunk/include/linux/io-mapping.h b/trunk/include/linux/io-mapping.h
index 7fb592793738..0a6b3d5c490c 100644
--- a/trunk/include/linux/io-mapping.h
+++ b/trunk/include/linux/io-mapping.h
@@ -79,7 +79,7 @@ io_mapping_free(struct io_mapping *mapping)
}
/* Atomic map/unmap */
-static inline void __iomem *
+static inline void *
io_mapping_map_atomic_wc(struct io_mapping *mapping,
unsigned long offset,
int slot)
@@ -94,12 +94,12 @@ io_mapping_map_atomic_wc(struct io_mapping *mapping,
}
static inline void
-io_mapping_unmap_atomic(void __iomem *vaddr, int slot)
+io_mapping_unmap_atomic(void *vaddr, int slot)
{
iounmap_atomic(vaddr, slot);
}
-static inline void __iomem *
+static inline void *
io_mapping_map_wc(struct io_mapping *mapping, unsigned long offset)
{
resource_size_t phys_addr;
@@ -111,7 +111,7 @@ io_mapping_map_wc(struct io_mapping *mapping, unsigned long offset)
}
static inline void
-io_mapping_unmap(void __iomem *vaddr)
+io_mapping_unmap(void *vaddr)
{
iounmap(vaddr);
}
@@ -125,38 +125,38 @@ struct io_mapping;
static inline struct io_mapping *
io_mapping_create_wc(resource_size_t base, unsigned long size)
{
- return (struct io_mapping __force *) ioremap_wc(base, size);
+ return (struct io_mapping *) ioremap_wc(base, size);
}
static inline void
io_mapping_free(struct io_mapping *mapping)
{
- iounmap((void __force __iomem *) mapping);
+ iounmap(mapping);
}
/* Atomic map/unmap */
-static inline void __iomem *
+static inline void *
io_mapping_map_atomic_wc(struct io_mapping *mapping,
unsigned long offset,
int slot)
{
- return ((char __force __iomem *) mapping) + offset;
+ return ((char *) mapping) + offset;
}
static inline void
-io_mapping_unmap_atomic(void __iomem *vaddr, int slot)
+io_mapping_unmap_atomic(void *vaddr, int slot)
{
}
/* Non-atomic map/unmap */
-static inline void __iomem *
+static inline void *
io_mapping_map_wc(struct io_mapping *mapping, unsigned long offset)
{
- return ((char __force __iomem *) mapping) + offset;
+ return ((char *) mapping) + offset;
}
static inline void
-io_mapping_unmap(void __iomem *vaddr)
+io_mapping_unmap(void *vaddr)
{
}
diff --git a/trunk/include/linux/mutex.h b/trunk/include/linux/mutex.h
index f363bc8fdc74..878cab4f5fcc 100644
--- a/trunk/include/linux/mutex.h
+++ b/trunk/include/linux/mutex.h
@@ -78,14 +78,6 @@ struct mutex_waiter {
# include
#else
# define __DEBUG_MUTEX_INITIALIZER(lockname)
-/**
- * mutex_init - initialize the mutex
- * @mutex: the mutex to be initialized
- *
- * Initialize the mutex to unlocked state.
- *
- * It is not allowed to initialize an already locked mutex.
- */
# define mutex_init(mutex) \
do { \
static struct lock_class_key __key; \
diff --git a/trunk/include/linux/semaphore.h b/trunk/include/linux/semaphore.h
index 7415839ac890..5310d27abd2a 100644
--- a/trunk/include/linux/semaphore.h
+++ b/trunk/include/linux/semaphore.h
@@ -26,6 +26,9 @@ struct semaphore {
.wait_list = LIST_HEAD_INIT((name).wait_list), \
}
+#define DEFINE_SEMAPHORE(name) \
+ struct semaphore name = __SEMAPHORE_INITIALIZER(name, 1)
+
#define DECLARE_MUTEX(name) \
struct semaphore name = __SEMAPHORE_INITIALIZER(name, 1)
diff --git a/trunk/kernel/debug/kdb/kdb_bp.c b/trunk/kernel/debug/kdb/kdb_bp.c
index 20059ef4459a..75bd9b3ebbb7 100644
--- a/trunk/kernel/debug/kdb/kdb_bp.c
+++ b/trunk/kernel/debug/kdb/kdb_bp.c
@@ -274,6 +274,7 @@ static int kdb_bp(int argc, const char **argv)
int i, bpno;
kdb_bp_t *bp, *bp_check;
int diag;
+ int free;
char *symname = NULL;
long offset = 0ul;
int nextarg;
@@ -304,6 +305,7 @@ static int kdb_bp(int argc, const char **argv)
/*
* Find an empty bp structure to allocate
*/
+ free = KDB_MAXBPT;
for (bpno = 0, bp = kdb_breakpoints; bpno < KDB_MAXBPT; bpno++, bp++) {
if (bp->bp_free)
break;
diff --git a/trunk/kernel/hrtimer.c b/trunk/kernel/hrtimer.c
index 1decafbb6b1a..ce669174f355 100644
--- a/trunk/kernel/hrtimer.c
+++ b/trunk/kernel/hrtimer.c
@@ -1091,10 +1091,11 @@ EXPORT_SYMBOL_GPL(hrtimer_cancel);
*/
ktime_t hrtimer_get_remaining(const struct hrtimer *timer)
{
+ struct hrtimer_clock_base *base;
unsigned long flags;
ktime_t rem;
- lock_hrtimer_base(timer, &flags);
+ base = lock_hrtimer_base(timer, &flags);
rem = hrtimer_expires_remaining(timer);
unlock_hrtimer_base(timer, &flags);
diff --git a/trunk/kernel/mutex.c b/trunk/kernel/mutex.c
index 200407c1502f..4c0b7b3e6d2e 100644
--- a/trunk/kernel/mutex.c
+++ b/trunk/kernel/mutex.c
@@ -36,6 +36,15 @@
# include
#endif
+/***
+ * mutex_init - initialize the mutex
+ * @lock: the mutex to be initialized
+ * @key: the lock_class_key for the class; used by mutex lock debugging
+ *
+ * Initialize the mutex to unlocked state.
+ *
+ * It is not allowed to initialize an already locked mutex.
+ */
void
__mutex_init(struct mutex *lock, const char *name, struct lock_class_key *key)
{
@@ -59,7 +68,7 @@ EXPORT_SYMBOL(__mutex_init);
static __used noinline void __sched
__mutex_lock_slowpath(atomic_t *lock_count);
-/**
+/***
* mutex_lock - acquire the mutex
* @lock: the mutex to be acquired
*
@@ -96,7 +105,7 @@ EXPORT_SYMBOL(mutex_lock);
static __used noinline void __sched __mutex_unlock_slowpath(atomic_t *lock_count);
-/**
+/***
* mutex_unlock - release the mutex
* @lock: the mutex to be released
*
@@ -355,8 +364,8 @@ __mutex_lock_killable_slowpath(atomic_t *lock_count);
static noinline int __sched
__mutex_lock_interruptible_slowpath(atomic_t *lock_count);
-/**
- * mutex_lock_interruptible - acquire the mutex, interruptible
+/***
+ * mutex_lock_interruptible - acquire the mutex, interruptable
* @lock: the mutex to be acquired
*
* Lock the mutex like mutex_lock(), and return 0 if the mutex has
@@ -447,15 +456,15 @@ static inline int __mutex_trylock_slowpath(atomic_t *lock_count)
return prev == 1;
}
-/**
- * mutex_trylock - try to acquire the mutex, without waiting
+/***
+ * mutex_trylock - try acquire the mutex, without waiting
* @lock: the mutex to be acquired
*
* Try to acquire the mutex atomically. Returns 1 if the mutex
* has been acquired successfully, and 0 on contention.
*
* NOTE: this function follows the spin_trylock() convention, so
- * it is negated from the down_trylock() return values! Be careful
+ * it is negated to the down_trylock() return values! Be careful
* about this when converting semaphore users to mutexes.
*
* This function must not be used in interrupt context. The
diff --git a/trunk/kernel/perf_event.c b/trunk/kernel/perf_event.c
index 657555a5f30f..403d1804b198 100644
--- a/trunk/kernel/perf_event.c
+++ b/trunk/kernel/perf_event.c
@@ -402,31 +402,11 @@ static void perf_group_detach(struct perf_event *event)
}
}
-static inline int
-event_filter_match(struct perf_event *event)
-{
- return event->cpu == -1 || event->cpu == smp_processor_id();
-}
-
static void
event_sched_out(struct perf_event *event,
struct perf_cpu_context *cpuctx,
struct perf_event_context *ctx)
{
- u64 delta;
- /*
- * An event which could not be activated because of
- * filter mismatch still needs to have its timings
- * maintained, otherwise bogus information is return
- * via read() for time_enabled, time_running:
- */
- if (event->state == PERF_EVENT_STATE_INACTIVE
- && !event_filter_match(event)) {
- delta = ctx->time - event->tstamp_stopped;
- event->tstamp_running += delta;
- event->tstamp_stopped = ctx->time;
- }
-
if (event->state != PERF_EVENT_STATE_ACTIVE)
return;
@@ -452,7 +432,9 @@ group_sched_out(struct perf_event *group_event,
struct perf_event_context *ctx)
{
struct perf_event *event;
- int state = group_event->state;
+
+ if (group_event->state != PERF_EVENT_STATE_ACTIVE)
+ return;
event_sched_out(group_event, cpuctx, ctx);
@@ -462,7 +444,7 @@ group_sched_out(struct perf_event *group_event,
list_for_each_entry(event, &group_event->sibling_list, group_entry)
event_sched_out(event, cpuctx, ctx);
- if (state == PERF_EVENT_STATE_ACTIVE && group_event->attr.exclusive)
+ if (group_event->attr.exclusive)
cpuctx->exclusive = 0;
}
diff --git a/trunk/kernel/sched_fair.c b/trunk/kernel/sched_fair.c
index 134f7edb30c6..ab661ebc4895 100644
--- a/trunk/kernel/sched_fair.c
+++ b/trunk/kernel/sched_fair.c
@@ -1313,7 +1313,7 @@ static struct sched_group *
find_idlest_group(struct sched_domain *sd, struct task_struct *p,
int this_cpu, int load_idx)
{
- struct sched_group *idlest = NULL, *group = sd->groups;
+ struct sched_group *idlest = NULL, *this = NULL, *group = sd->groups;
unsigned long min_load = ULONG_MAX, this_load = 0;
int imbalance = 100 + (sd->imbalance_pct-100)/2;
@@ -1348,6 +1348,7 @@ find_idlest_group(struct sched_domain *sd, struct task_struct *p,
if (local_group) {
this_load = avg_load;
+ this = group;
} else if (avg_load < min_load) {
min_load = avg_load;
idlest = group;
diff --git a/trunk/kernel/sys.c b/trunk/kernel/sys.c
index 7f5a0cd296a9..e9ad44489828 100644
--- a/trunk/kernel/sys.c
+++ b/trunk/kernel/sys.c
@@ -931,7 +931,6 @@ SYSCALL_DEFINE2(setpgid, pid_t, pid, pid_t, pgid)
pgid = pid;
if (pgid < 0)
return -EINVAL;
- rcu_read_lock();
/* From this point forward we keep holding onto the tasklist lock
* so that our parent does not change from under us. -DaveM
@@ -985,7 +984,6 @@ SYSCALL_DEFINE2(setpgid, pid_t, pid, pid_t, pgid)
out:
/* All paths lead to here, thus we are safe. -DaveM */
write_unlock_irq(&tasklist_lock);
- rcu_read_unlock();
return err;
}
diff --git a/trunk/kernel/sysctl.c b/trunk/kernel/sysctl.c
index f88552c6d227..ca38e8e3e907 100644
--- a/trunk/kernel/sysctl.c
+++ b/trunk/kernel/sysctl.c
@@ -1713,7 +1713,10 @@ static __init int sysctl_init(void)
{
sysctl_set_parent(NULL, root_table);
#ifdef CONFIG_SYSCTL_SYSCALL_CHECK
- sysctl_check_table(current->nsproxy, root_table);
+ {
+ int err;
+ err = sysctl_check_table(current->nsproxy, root_table);
+ }
#endif
return 0;
}
diff --git a/trunk/kernel/trace/ftrace.c b/trunk/kernel/trace/ftrace.c
index 7cb1f45a1de1..0d88ce9b9fb8 100644
--- a/trunk/kernel/trace/ftrace.c
+++ b/trunk/kernel/trace/ftrace.c
@@ -381,19 +381,12 @@ static int function_stat_show(struct seq_file *m, void *v)
{
struct ftrace_profile *rec = v;
char str[KSYM_SYMBOL_LEN];
- int ret = 0;
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+ static DEFINE_MUTEX(mutex);
static struct trace_seq s;
unsigned long long avg;
unsigned long long stddev;
#endif
- mutex_lock(&ftrace_profile_lock);
-
- /* we raced with function_profile_reset() */
- if (unlikely(rec->counter == 0)) {
- ret = -EBUSY;
- goto out;
- }
kallsyms_lookup(rec->ip, NULL, NULL, NULL, str);
seq_printf(m, " %-30.30s %10lu", str, rec->counter);
@@ -415,6 +408,7 @@ static int function_stat_show(struct seq_file *m, void *v)
do_div(stddev, (rec->counter - 1) * 1000);
}
+ mutex_lock(&mutex);
trace_seq_init(&s);
trace_print_graph_duration(rec->time, &s);
trace_seq_puts(&s, " ");
@@ -422,12 +416,11 @@ static int function_stat_show(struct seq_file *m, void *v)
trace_seq_puts(&s, " ");
trace_print_graph_duration(stddev, &s);
trace_print_seq(m, &s);
+ mutex_unlock(&mutex);
#endif
seq_putc(m, '\n');
-out:
- mutex_unlock(&ftrace_profile_lock);
- return ret;
+ return 0;
}
static void ftrace_profile_reset(struct ftrace_profile_stat *stat)
diff --git a/trunk/kernel/trace/ring_buffer.c b/trunk/kernel/trace/ring_buffer.c
index 492197e2f86c..19cccc3c3028 100644
--- a/trunk/kernel/trace/ring_buffer.c
+++ b/trunk/kernel/trace/ring_buffer.c
@@ -2985,11 +2985,13 @@ static void rb_advance_reader(struct ring_buffer_per_cpu *cpu_buffer)
static void rb_advance_iter(struct ring_buffer_iter *iter)
{
+ struct ring_buffer *buffer;
struct ring_buffer_per_cpu *cpu_buffer;
struct ring_buffer_event *event;
unsigned length;
cpu_buffer = iter->cpu_buffer;
+ buffer = cpu_buffer->buffer;
/*
* Check if we are at the end of the buffer.
diff --git a/trunk/kernel/watchdog.c b/trunk/kernel/watchdog.c
index 7f9c3c52ecc1..0d53c8e853b1 100644
--- a/trunk/kernel/watchdog.c
+++ b/trunk/kernel/watchdog.c
@@ -122,7 +122,7 @@ static void __touch_watchdog(void)
void touch_softlockup_watchdog(void)
{
- __raw_get_cpu_var(watchdog_touch_ts) = 0;
+ __get_cpu_var(watchdog_touch_ts) = 0;
}
EXPORT_SYMBOL(touch_softlockup_watchdog);
@@ -142,14 +142,7 @@ void touch_all_softlockup_watchdogs(void)
#ifdef CONFIG_HARDLOCKUP_DETECTOR
void touch_nmi_watchdog(void)
{
- if (watchdog_enabled) {
- unsigned cpu;
-
- for_each_present_cpu(cpu) {
- if (per_cpu(watchdog_nmi_touch, cpu) != true)
- per_cpu(watchdog_nmi_touch, cpu) = true;
- }
- }
+ __get_cpu_var(watchdog_nmi_touch) = true;
touch_softlockup_watchdog();
}
EXPORT_SYMBOL(touch_nmi_watchdog);
@@ -440,9 +433,6 @@ static int watchdog_enable(int cpu)
wake_up_process(p);
}
- /* if any cpu succeeds, watchdog is considered enabled for the system */
- watchdog_enabled = 1;
-
return 0;
}
@@ -465,6 +455,9 @@ static void watchdog_disable(int cpu)
per_cpu(softlockup_watchdog, cpu) = NULL;
kthread_stop(p);
}
+
+ /* if any cpu succeeds, watchdog is considered enabled for the system */
+ watchdog_enabled = 1;
}
static void watchdog_enable_all_cpus(void)
diff --git a/trunk/security/apparmor/include/resource.h b/trunk/security/apparmor/include/resource.h
index 02baec732bb5..3c88be946494 100644
--- a/trunk/security/apparmor/include/resource.h
+++ b/trunk/security/apparmor/include/resource.h
@@ -33,8 +33,8 @@ struct aa_rlimit {
};
int aa_map_resource(int resource);
-int aa_task_setrlimit(struct aa_profile *profile, struct task_struct *,
- unsigned int resource, struct rlimit *new_rlim);
+int aa_task_setrlimit(struct aa_profile *profile, unsigned int resource,
+ struct rlimit *new_rlim);
void __aa_transition_rlimits(struct aa_profile *old, struct aa_profile *new);
diff --git a/trunk/security/apparmor/lib.c b/trunk/security/apparmor/lib.c
index 506d2baf6147..6e85cdb4303f 100644
--- a/trunk/security/apparmor/lib.c
+++ b/trunk/security/apparmor/lib.c
@@ -40,7 +40,6 @@ char *aa_split_fqname(char *fqname, char **ns_name)
*ns_name = NULL;
if (name[0] == ':') {
char *split = strchr(&name[1], ':');
- *ns_name = skip_spaces(&name[1]);
if (split) {
/* overwrite ':' with \0 */
*split = 0;
@@ -48,6 +47,7 @@ char *aa_split_fqname(char *fqname, char **ns_name)
} else
/* a ns name without a following profile is allowed */
name = NULL;
+ *ns_name = &name[1];
}
if (name && *name == 0)
name = NULL;
diff --git a/trunk/security/apparmor/lsm.c b/trunk/security/apparmor/lsm.c
index cf1de4462ccd..f73e2c204218 100644
--- a/trunk/security/apparmor/lsm.c
+++ b/trunk/security/apparmor/lsm.c
@@ -614,7 +614,7 @@ static int apparmor_task_setrlimit(struct task_struct *task,
int error = 0;
if (!unconfined(profile))
- error = aa_task_setrlimit(profile, task, resource, new_rlim);
+ error = aa_task_setrlimit(profile, resource, new_rlim);
return error;
}
diff --git a/trunk/security/apparmor/path.c b/trunk/security/apparmor/path.c
index 82396050f186..19358dc14605 100644
--- a/trunk/security/apparmor/path.c
+++ b/trunk/security/apparmor/path.c
@@ -59,7 +59,8 @@ static int d_namespace_path(struct path *path, char *buf, int buflen,
{
struct path root, tmp;
char *res;
- int connected, error = 0;
+ int deleted, connected;
+ int error = 0;
/* Get the root we want to resolve too, released below */
if (flags & PATH_CHROOT_REL) {
@@ -73,8 +74,19 @@ static int d_namespace_path(struct path *path, char *buf, int buflen,
}
spin_lock(&dcache_lock);
- tmp = root;
- res = __d_path(path, &tmp, buf, buflen);
+ /* There is a race window between path lookup here and the
+ * need to strip the " (deleted) string that __d_path applies
+ * Detect the race and relookup the path
+ *
+ * The stripping of (deleted) is a hack that could be removed
+ * with an updated __d_path
+ */
+ do {
+ tmp = root;
+ deleted = d_unlinked(path->dentry);
+ res = __d_path(path, &tmp, buf, buflen);
+
+ } while (deleted != d_unlinked(path->dentry));
spin_unlock(&dcache_lock);
*name = res;
@@ -86,17 +98,21 @@ static int d_namespace_path(struct path *path, char *buf, int buflen,
*name = buf;
goto out;
}
+ if (deleted) {
+ /* On some filesystems, newly allocated dentries appear to the
+ * security_path hooks as a deleted dentry except without an
+ * inode allocated.
+ *
+ * Remove the appended deleted text and return as string for
+ * normal mediation, or auditing. The (deleted) string is
+ * guaranteed to be added in this case, so just strip it.
+ */
+ buf[buflen - 11] = 0; /* - (len(" (deleted)") +\0) */
- /* Handle two cases:
- * 1. A deleted dentry && profile is not allowing mediation of deleted
- * 2. On some filesystems, newly allocated dentries appear to the
- * security_path hooks as a deleted dentry except without an inode
- * allocated.
- */
- if (d_unlinked(path->dentry) && path->dentry->d_inode &&
- !(flags & PATH_MEDIATE_DELETED)) {
+ if (path->dentry->d_inode && !(flags & PATH_MEDIATE_DELETED)) {
error = -ENOENT;
goto out;
+ }
}
/* Determine if the path is connected to the expected root */
diff --git a/trunk/security/apparmor/policy.c b/trunk/security/apparmor/policy.c
index 52cc865f1464..3cdc1ad0787e 100644
--- a/trunk/security/apparmor/policy.c
+++ b/trunk/security/apparmor/policy.c
@@ -1151,14 +1151,12 @@ ssize_t aa_remove_profiles(char *fqname, size_t size)
/* released below */
ns = aa_get_namespace(root);
+ write_lock(&ns->lock);
if (!name) {
/* remove namespace - can only happen if fqname[0] == ':' */
- write_lock(&ns->parent->lock);
__remove_namespace(ns);
- write_unlock(&ns->parent->lock);
} else {
/* remove profile */
- write_lock(&ns->lock);
profile = aa_get_profile(__lookup_profile(&ns->base, name));
if (!profile) {
error = -ENOENT;
@@ -1167,8 +1165,8 @@ ssize_t aa_remove_profiles(char *fqname, size_t size)
}
name = profile->base.hname;
__remove_profile(profile);
- write_unlock(&ns->lock);
}
+ write_unlock(&ns->lock);
/* don't fail removal if audit fails */
(void) audit_policy(OP_PROF_RM, GFP_KERNEL, name, info, error);
diff --git a/trunk/security/apparmor/resource.c b/trunk/security/apparmor/resource.c
index a4136c10b1c6..4a368f1fd36d 100644
--- a/trunk/security/apparmor/resource.c
+++ b/trunk/security/apparmor/resource.c
@@ -72,7 +72,6 @@ int aa_map_resource(int resource)
/**
* aa_task_setrlimit - test permission to set an rlimit
* @profile - profile confining the task (NOT NULL)
- * @task - task the resource is being set on
* @resource - the resource being set
* @new_rlim - the new resource limit (NOT NULL)
*
@@ -80,21 +79,18 @@ int aa_map_resource(int resource)
*
* Returns: 0 or error code if setting resource failed
*/
-int aa_task_setrlimit(struct aa_profile *profile, struct task_struct *task,
- unsigned int resource, struct rlimit *new_rlim)
+int aa_task_setrlimit(struct aa_profile *profile, unsigned int resource,
+ struct rlimit *new_rlim)
{
int error = 0;
- /* TODO: extend resource control to handle other (non current)
- * processes. AppArmor rules currently have the implicit assumption
- * that the task is setting the resource of the current process
- */
- if ((task != current->group_leader) ||
- (profile->rlimits.mask & (1 << resource) &&
- new_rlim->rlim_max > profile->rlimits.limits[resource].rlim_max))
- error = -EACCES;
+ if (profile->rlimits.mask & (1 << resource) &&
+ new_rlim->rlim_max > profile->rlimits.limits[resource].rlim_max)
+
+ error = audit_resource(profile, resource, new_rlim->rlim_max,
+ -EACCES);
- return audit_resource(profile, resource, new_rlim->rlim_max, error);
+ return error;
}
/**
diff --git a/trunk/security/integrity/ima/ima.h b/trunk/security/integrity/ima/ima.h
index 3fbcd1dda0ef..16d100d3fc38 100644
--- a/trunk/security/integrity/ima/ima.h
+++ b/trunk/security/integrity/ima/ima.h
@@ -35,7 +35,6 @@ enum tpm_pcrs { TPM_PCR0 = 0, TPM_PCR8 = 8 };
#define IMA_MEASURE_HTABLE_SIZE (1 << IMA_HASH_BITS)
/* set during initialization */
-extern int iint_initialized;
extern int ima_initialized;
extern int ima_used_chip;
extern char *ima_hash;
diff --git a/trunk/security/integrity/ima/ima_iint.c b/trunk/security/integrity/ima/ima_iint.c
index afba4aef812f..7625b85c2274 100644
--- a/trunk/security/integrity/ima/ima_iint.c
+++ b/trunk/security/integrity/ima/ima_iint.c
@@ -22,9 +22,8 @@
RADIX_TREE(ima_iint_store, GFP_ATOMIC);
DEFINE_SPINLOCK(ima_iint_lock);
-static struct kmem_cache *iint_cache __read_mostly;
-int iint_initialized = 0;
+static struct kmem_cache *iint_cache __read_mostly;
/* ima_iint_find_get - return the iint associated with an inode
*
@@ -142,7 +141,6 @@ static int __init ima_iintcache_init(void)
iint_cache =
kmem_cache_create("iint_cache", sizeof(struct ima_iint_cache), 0,
SLAB_PANIC, init_once);
- iint_initialized = 1;
return 0;
}
security_initcall(ima_iintcache_init);
diff --git a/trunk/security/integrity/ima/ima_main.c b/trunk/security/integrity/ima/ima_main.c
index e662b89d4079..f93641382e9f 100644
--- a/trunk/security/integrity/ima/ima_main.c
+++ b/trunk/security/integrity/ima/ima_main.c
@@ -148,14 +148,12 @@ void ima_counts_get(struct file *file)
struct ima_iint_cache *iint;
int rc;
- if (!iint_initialized || !S_ISREG(inode->i_mode))
+ if (!ima_initialized || !S_ISREG(inode->i_mode))
return;
iint = ima_iint_find_get(inode);
if (!iint)
return;
mutex_lock(&iint->mutex);
- if (!ima_initialized)
- goto out;
rc = ima_must_measure(iint, inode, MAY_READ, FILE_CHECK);
if (rc < 0)
goto out;
@@ -215,7 +213,7 @@ void ima_file_free(struct file *file)
struct inode *inode = file->f_dentry->d_inode;
struct ima_iint_cache *iint;
- if (!iint_initialized || !S_ISREG(inode->i_mode))
+ if (!ima_initialized || !S_ISREG(inode->i_mode))
return;
iint = ima_iint_find_get(inode);
if (!iint)
@@ -232,7 +230,7 @@ static int process_measurement(struct file *file, const unsigned char *filename,
{
struct inode *inode = file->f_dentry->d_inode;
struct ima_iint_cache *iint;
- int rc = 0;
+ int rc;
if (!ima_initialized || !S_ISREG(inode->i_mode))
return 0;
diff --git a/trunk/tools/perf/util/callchain.h b/trunk/tools/perf/util/callchain.h
index 6de4313924fb..624a96c636fd 100644
--- a/trunk/tools/perf/util/callchain.h
+++ b/trunk/tools/perf/util/callchain.h
@@ -50,7 +50,6 @@ static inline void callchain_init(struct callchain_node *node)
INIT_LIST_HEAD(&node->children);
INIT_LIST_HEAD(&node->val);
- node->children_hit = 0;
node->parent = NULL;
node->hit = 0;
}