diff --git a/[refs] b/[refs] index 1296493b87d7..442e10c0cf03 100644 --- a/[refs] +++ b/[refs] @@ -1,2 +1,2 @@ --- -refs/heads/master: 0e11c8e8a60f8591556d142c2e1e53eaf86ab528 +refs/heads/master: 3aac7a8d574285be855450d103a421b8f8ec89d4 diff --git a/trunk/Documentation/atomic_ops.txt b/trunk/Documentation/atomic_ops.txt index d9ca5be9b471..27f2b21a9d5c 100644 --- a/trunk/Documentation/atomic_ops.txt +++ b/trunk/Documentation/atomic_ops.txt @@ -253,8 +253,6 @@ This performs an atomic exchange operation on the atomic variable v, setting the given new value. It returns the old value that the atomic variable v had just before the operation. -atomic_xchg requires explicit memory barriers around the operation. - int atomic_cmpxchg(atomic_t *v, int old, int new); This performs an atomic compare exchange operation on the atomic value v, diff --git a/trunk/Documentation/memory-barriers.txt b/trunk/Documentation/memory-barriers.txt index fa5d8a9ae205..3c4e1b3b80a1 100644 --- a/trunk/Documentation/memory-barriers.txt +++ b/trunk/Documentation/memory-barriers.txt @@ -1685,7 +1685,6 @@ explicit lock operations, described later). These include: xchg(); cmpxchg(); - atomic_xchg(); atomic_cmpxchg(); atomic_inc_return(); atomic_dec_return(); diff --git a/trunk/include/linux/rcupdate.h b/trunk/include/linux/rcupdate.h index 7f89cea596e1..275aa3f1062d 100644 --- a/trunk/include/linux/rcupdate.h +++ b/trunk/include/linux/rcupdate.h @@ -53,10 +53,7 @@ extern int rcutorture_runnable; /* for sysctl */ extern void rcutorture_record_test_transition(void); extern void rcutorture_record_progress(unsigned long vernum); extern void do_trace_rcu_torture_read(char *rcutorturename, - struct rcu_head *rhp, - unsigned long secs, - unsigned long c_old, - unsigned long c); + struct rcu_head *rhp); #else static inline void rcutorture_record_test_transition(void) { @@ -66,13 +63,9 @@ static inline void rcutorture_record_progress(unsigned long vernum) } #ifdef CONFIG_RCU_TRACE extern void do_trace_rcu_torture_read(char *rcutorturename, - struct rcu_head *rhp, - unsigned long secs, - unsigned long c_old, - unsigned long c); + struct rcu_head *rhp); #else -#define do_trace_rcu_torture_read(rcutorturename, rhp, secs, c_old, c) \ - do { } while (0) +#define do_trace_rcu_torture_read(rcutorturename, rhp) do { } while (0) #endif #endif diff --git a/trunk/include/trace/events/rcu.h b/trunk/include/trace/events/rcu.h index 09af021c8e96..f91949850fce 100644 --- a/trunk/include/trace/events/rcu.h +++ b/trunk/include/trace/events/rcu.h @@ -393,7 +393,7 @@ TRACE_EVENT(rcu_kfree_callback, */ TRACE_EVENT(rcu_batch_start, - TP_PROTO(char *rcuname, long qlen_lazy, long qlen, int blimit), + TP_PROTO(char *rcuname, long qlen_lazy, long qlen, long blimit), TP_ARGS(rcuname, qlen_lazy, qlen, blimit), @@ -401,7 +401,7 @@ TRACE_EVENT(rcu_batch_start, __field(char *, rcuname) __field(long, qlen_lazy) __field(long, qlen) - __field(int, blimit) + __field(long, blimit) ), TP_fast_assign( @@ -411,7 +411,7 @@ TRACE_EVENT(rcu_batch_start, __entry->blimit = blimit; ), - TP_printk("%s CBs=%ld/%ld bl=%d", + TP_printk("%s CBs=%ld/%ld bl=%ld", __entry->rcuname, __entry->qlen_lazy, __entry->qlen, __entry->blimit) ); @@ -523,30 +523,22 @@ TRACE_EVENT(rcu_batch_end, */ TRACE_EVENT(rcu_torture_read, - TP_PROTO(char *rcutorturename, struct rcu_head *rhp, - unsigned long secs, unsigned long c_old, unsigned long c), + TP_PROTO(char *rcutorturename, struct rcu_head *rhp), - TP_ARGS(rcutorturename, rhp, secs, c_old, c), + TP_ARGS(rcutorturename, rhp), TP_STRUCT__entry( __field(char *, rcutorturename) __field(struct rcu_head *, rhp) - __field(unsigned long, secs) - __field(unsigned long, c_old) - __field(unsigned long, c) ), TP_fast_assign( __entry->rcutorturename = rcutorturename; __entry->rhp = rhp; - __entry->secs = secs; - __entry->c_old = c_old; - __entry->c = c; ), - TP_printk("%s torture read %p %luus c: %lu %lu", - __entry->rcutorturename, __entry->rhp, - __entry->secs, __entry->c_old, __entry->c) + TP_printk("%s torture read %p", + __entry->rcutorturename, __entry->rhp) ); /* @@ -616,8 +608,7 @@ TRACE_EVENT(rcu_barrier, #define trace_rcu_invoke_kfree_callback(rcuname, rhp, offset) do { } while (0) #define trace_rcu_batch_end(rcuname, callbacks_invoked, cb, nr, iit, risk) \ do { } while (0) -#define trace_rcu_torture_read(rcutorturename, rhp, secs, c_old, c) \ - do { } while (0) +#define trace_rcu_torture_read(rcutorturename, rhp) do { } while (0) #define trace_rcu_barrier(name, s, cpu, cnt, done) do { } while (0) #endif /* #else #ifdef CONFIG_RCU_TRACE */ diff --git a/trunk/kernel/rcupdate.c b/trunk/kernel/rcupdate.c index 303359d1ca88..a2cf76177b44 100644 --- a/trunk/kernel/rcupdate.c +++ b/trunk/kernel/rcupdate.c @@ -404,14 +404,11 @@ EXPORT_SYMBOL_GPL(rcuhead_debug_descr); #endif /* #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD */ #if defined(CONFIG_TREE_RCU) || defined(CONFIG_TREE_PREEMPT_RCU) || defined(CONFIG_RCU_TRACE) -void do_trace_rcu_torture_read(char *rcutorturename, struct rcu_head *rhp, - unsigned long secs, - unsigned long c_old, unsigned long c) +void do_trace_rcu_torture_read(char *rcutorturename, struct rcu_head *rhp) { - trace_rcu_torture_read(rcutorturename, rhp, secs, c_old, c); + trace_rcu_torture_read(rcutorturename, rhp); } EXPORT_SYMBOL_GPL(do_trace_rcu_torture_read); #else -#define do_trace_rcu_torture_read(rcutorturename, rhp, secs, c_old, c) \ - do { } while (0) +#define do_trace_rcu_torture_read(rcutorturename, rhp) do { } while (0) #endif diff --git a/trunk/kernel/rcutorture.c b/trunk/kernel/rcutorture.c index 3ebc8bfb5525..31dea01c85fd 100644 --- a/trunk/kernel/rcutorture.c +++ b/trunk/kernel/rcutorture.c @@ -46,7 +46,6 @@ #include #include #include -#include #include MODULE_LICENSE("GPL"); @@ -846,7 +845,7 @@ static int rcu_torture_boost(void *arg) /* Wait for the next test interval. */ oldstarttime = boost_starttime; while (ULONG_CMP_LT(jiffies, oldstarttime)) { - schedule_timeout_interruptible(oldstarttime - jiffies); + schedule_timeout_uninterruptible(1); rcu_stutter_wait("rcu_torture_boost"); if (kthread_should_stop() || fullstop != FULLSTOP_DONTSTOP) @@ -1029,6 +1028,7 @@ void rcutorture_trace_dump(void) return; if (atomic_xchg(&beenhere, 1) != 0) return; + do_trace_rcu_torture_read(cur_ops->name, (struct rcu_head *)~0UL); ftrace_dump(DUMP_ALL); } @@ -1042,16 +1042,13 @@ static void rcu_torture_timer(unsigned long unused) { int idx; int completed; - int completed_end; static DEFINE_RCU_RANDOM(rand); static DEFINE_SPINLOCK(rand_lock); struct rcu_torture *p; int pipe_count; - unsigned long long ts; idx = cur_ops->readlock(); completed = cur_ops->completed(); - ts = trace_clock_local(); p = rcu_dereference_check(rcu_torture_current, rcu_read_lock_bh_held() || rcu_read_lock_sched_held() || @@ -1061,6 +1058,7 @@ static void rcu_torture_timer(unsigned long unused) cur_ops->readunlock(idx); return; } + do_trace_rcu_torture_read(cur_ops->name, &p->rtort_rcu); if (p->rtort_mbtest == 0) atomic_inc(&n_rcu_torture_mberror); spin_lock(&rand_lock); @@ -1073,16 +1071,10 @@ static void rcu_torture_timer(unsigned long unused) /* Should not happen, but... */ pipe_count = RCU_TORTURE_PIPE_LEN; } - completed_end = cur_ops->completed(); - if (pipe_count > 1) { - unsigned long __maybe_unused ts_rem = do_div(ts, NSEC_PER_USEC); - - do_trace_rcu_torture_read(cur_ops->name, &p->rtort_rcu, ts, - completed, completed_end); + if (pipe_count > 1) rcutorture_trace_dump(); - } __this_cpu_inc(rcu_torture_count[pipe_count]); - completed = completed_end - completed; + completed = cur_ops->completed() - completed; if (completed > RCU_TORTURE_PIPE_LEN) { /* Should not happen, but... */ completed = RCU_TORTURE_PIPE_LEN; @@ -1102,13 +1094,11 @@ static int rcu_torture_reader(void *arg) { int completed; - int completed_end; int idx; DEFINE_RCU_RANDOM(rand); struct rcu_torture *p; int pipe_count; struct timer_list t; - unsigned long long ts; VERBOSE_PRINTK_STRING("rcu_torture_reader task started"); set_user_nice(current, 19); @@ -1122,7 +1112,6 @@ rcu_torture_reader(void *arg) } idx = cur_ops->readlock(); completed = cur_ops->completed(); - ts = trace_clock_local(); p = rcu_dereference_check(rcu_torture_current, rcu_read_lock_bh_held() || rcu_read_lock_sched_held() || @@ -1133,6 +1122,7 @@ rcu_torture_reader(void *arg) schedule_timeout_interruptible(HZ); continue; } + do_trace_rcu_torture_read(cur_ops->name, &p->rtort_rcu); if (p->rtort_mbtest == 0) atomic_inc(&n_rcu_torture_mberror); cur_ops->read_delay(&rand); @@ -1142,17 +1132,10 @@ rcu_torture_reader(void *arg) /* Should not happen, but... */ pipe_count = RCU_TORTURE_PIPE_LEN; } - completed_end = cur_ops->completed(); - if (pipe_count > 1) { - unsigned long __maybe_unused ts_rem = - do_div(ts, NSEC_PER_USEC); - - do_trace_rcu_torture_read(cur_ops->name, &p->rtort_rcu, - ts, completed, completed_end); + if (pipe_count > 1) rcutorture_trace_dump(); - } __this_cpu_inc(rcu_torture_count[pipe_count]); - completed = completed_end - completed; + completed = cur_ops->completed() - completed; if (completed > RCU_TORTURE_PIPE_LEN) { /* Should not happen, but... */ completed = RCU_TORTURE_PIPE_LEN; @@ -1318,35 +1301,19 @@ static void rcu_torture_shuffle_tasks(void) set_cpus_allowed_ptr(reader_tasks[i], shuffle_tmp_mask); } + if (fakewriter_tasks) { for (i = 0; i < nfakewriters; i++) if (fakewriter_tasks[i]) set_cpus_allowed_ptr(fakewriter_tasks[i], shuffle_tmp_mask); } + if (writer_task) set_cpus_allowed_ptr(writer_task, shuffle_tmp_mask); + if (stats_task) set_cpus_allowed_ptr(stats_task, shuffle_tmp_mask); - if (stutter_task) - set_cpus_allowed_ptr(stutter_task, shuffle_tmp_mask); - if (fqs_task) - set_cpus_allowed_ptr(fqs_task, shuffle_tmp_mask); - if (shutdown_task) - set_cpus_allowed_ptr(shutdown_task, shuffle_tmp_mask); -#ifdef CONFIG_HOTPLUG_CPU - if (onoff_task) - set_cpus_allowed_ptr(onoff_task, shuffle_tmp_mask); -#endif /* #ifdef CONFIG_HOTPLUG_CPU */ - if (stall_task) - set_cpus_allowed_ptr(stall_task, shuffle_tmp_mask); - if (barrier_cbs_tasks) - for (i = 0; i < n_barrier_cbs; i++) - if (barrier_cbs_tasks[i]) - set_cpus_allowed_ptr(barrier_cbs_tasks[i], - shuffle_tmp_mask); - if (barrier_task) - set_cpus_allowed_ptr(barrier_task, shuffle_tmp_mask); if (rcu_idle_cpu == -1) rcu_idle_cpu = num_online_cpus() - 1; diff --git a/trunk/kernel/trace/trace_clock.c b/trunk/kernel/trace/trace_clock.c index 1bbb1b200cec..394783531cbb 100644 --- a/trunk/kernel/trace/trace_clock.c +++ b/trunk/kernel/trace/trace_clock.c @@ -44,7 +44,6 @@ u64 notrace trace_clock_local(void) return clock; } -EXPORT_SYMBOL_GPL(trace_clock_local); /* * trace_clock(): 'between' trace clock. Not completely serialized, diff --git a/trunk/lib/Kconfig.debug b/trunk/lib/Kconfig.debug index 7d83f52fbade..3a353091a903 100644 --- a/trunk/lib/Kconfig.debug +++ b/trunk/lib/Kconfig.debug @@ -1008,7 +1008,6 @@ config RCU_CPU_STALL_INFO config RCU_TRACE bool "Enable tracing for RCU" depends on DEBUG_KERNEL - select TRACE_CLOCK help This option provides tracing in RCU which presents stats in debugfs for debugging RCU implementation.