Skip to content

Commit

Permalink
workqueue: temporarily remove workqueue tracing
Browse files Browse the repository at this point in the history
Strip tracing code from workqueue and remove workqueue tracing.  This
is temporary measure till concurrency managed workqueue is complete.

Signed-off-by: Tejun Heo <tj@kernel.org>
Cc: Frederic Weisbecker <fweisbec@gmail.com>
  • Loading branch information
Tejun Heo committed Jun 29, 2010
1 parent a62428c commit 6416669
Show file tree
Hide file tree
Showing 3 changed files with 3 additions and 114 deletions.
92 changes: 0 additions & 92 deletions include/trace/events/workqueue.h

This file was deleted.

11 changes: 0 additions & 11 deletions kernel/trace/Kconfig
Original file line number Diff line number Diff line change
Expand Up @@ -391,17 +391,6 @@ config KMEMTRACE

If unsure, say N.

config WORKQUEUE_TRACER
bool "Trace workqueues"
select GENERIC_TRACER
help
The workqueue tracer provides some statistical information
about each cpu workqueue thread such as the number of the
works inserted and executed since their creation. It can help
to evaluate the amount of work each of them has to perform.
For example it can help a developer to decide whether he should
choose a per-cpu workqueue instead of a singlethreaded one.

config BLK_DEV_IO_TRACE
bool "Support for tracing block IO actions"
depends on SYSFS
Expand Down
14 changes: 3 additions & 11 deletions kernel/workqueue.c
Original file line number Diff line number Diff line change
Expand Up @@ -33,8 +33,6 @@
#include <linux/kallsyms.h>
#include <linux/debug_locks.h>
#include <linux/lockdep.h>
#define CREATE_TRACE_POINTS
#include <trace/events/workqueue.h>

/*
* Structure fields follow one of the following exclusion rules.
Expand Down Expand Up @@ -243,10 +241,10 @@ static inline void clear_wq_data(struct work_struct *work)
atomic_long_set(&work->data, work_static(work));
}

static inline
struct cpu_workqueue_struct *get_wq_data(struct work_struct *work)
static inline struct cpu_workqueue_struct *get_wq_data(struct work_struct *work)
{
return (void *) (atomic_long_read(&work->data) & WORK_STRUCT_WQ_DATA_MASK);
return (void *)(atomic_long_read(&work->data) &
WORK_STRUCT_WQ_DATA_MASK);
}

/**
Expand All @@ -265,8 +263,6 @@ static void insert_work(struct cpu_workqueue_struct *cwq,
struct work_struct *work, struct list_head *head,
unsigned int extra_flags)
{
trace_workqueue_insertion(cwq->thread, work);

/* we own @work, set data and link */
set_wq_data(work, cwq, extra_flags);

Expand Down Expand Up @@ -431,7 +427,6 @@ static void process_one_work(struct cpu_workqueue_struct *cwq,
struct lockdep_map lockdep_map = work->lockdep_map;
#endif
/* claim and process */
trace_workqueue_execution(cwq->thread, work);
debug_work_deactivate(work);
cwq->current_work = work;
list_del_init(&work->entry);
Expand Down Expand Up @@ -1017,8 +1012,6 @@ static int create_workqueue_thread(struct cpu_workqueue_struct *cwq, int cpu)
return PTR_ERR(p);
cwq->thread = p;

trace_workqueue_creation(cwq->thread, cpu);

return 0;
}

Expand Down Expand Up @@ -1123,7 +1116,6 @@ static void cleanup_workqueue_thread(struct cpu_workqueue_struct *cwq)
* checks list_empty(), and a "normal" queue_work() can't use
* a dead CPU.
*/
trace_workqueue_destruction(cwq->thread);
kthread_stop(cwq->thread);
cwq->thread = NULL;
}
Expand Down

0 comments on commit 6416669

Please sign in to comment.