Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 100518
b: refs/heads/master
c: a98a3c3
h: refs/heads/master
v: v3
  • Loading branch information
Steven Rostedt authored and Thomas Gleixner committed May 23, 2008
1 parent 4372c87 commit d10c20b
Show file tree
Hide file tree
Showing 2 changed files with 138 additions and 9 deletions.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: 05bd68c514579e007b46e4fa0461b78416a3f4c2
refs/heads/master: a98a3c3fde3ae7614f19758a043691b6f59dac53
145 changes: 137 additions & 8 deletions trunk/kernel/trace/trace.c
Original file line number Diff line number Diff line change
Expand Up @@ -35,6 +35,15 @@
unsigned long __read_mostly tracing_max_latency = (cycle_t)ULONG_MAX;
unsigned long __read_mostly tracing_thresh;

/* dummy trace to disable tracing */
static struct tracer no_tracer __read_mostly =
{
.name = "none",
};

static int trace_alloc_page(void);
static int trace_free_page(void);

static int tracing_disabled = 1;

long
Expand Down Expand Up @@ -2364,6 +2373,70 @@ tracing_read_pipe(struct file *filp, char __user *ubuf,
return read;
}

static ssize_t
tracing_entries_read(struct file *filp, char __user *ubuf,
size_t cnt, loff_t *ppos)
{
struct trace_array *tr = filp->private_data;
char buf[64];
int r;

r = sprintf(buf, "%lu\n", tr->entries);
return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
}

static ssize_t
tracing_entries_write(struct file *filp, const char __user *ubuf,
size_t cnt, loff_t *ppos)
{
unsigned long val;
char buf[64];

if (cnt > 63)
cnt = 63;

if (copy_from_user(&buf, ubuf, cnt))
return -EFAULT;

buf[cnt] = 0;

val = simple_strtoul(buf, NULL, 10);

/* must have at least 1 entry */
if (!val)
return -EINVAL;

mutex_lock(&trace_types_lock);

if (current_trace != &no_tracer) {
cnt = -EBUSY;
pr_info("ftrace: set current_tracer to none"
" before modifying buffer size\n");
goto out;
}

if (val > global_trace.entries) {
while (global_trace.entries < val) {
if (trace_alloc_page()) {
cnt = -ENOMEM;
goto out;
}
}
} else {
/* include the number of entries in val (inc of page entries) */
while (global_trace.entries > val + (ENTRIES_PER_PAGE - 1))
trace_free_page();
}

filp->f_pos += cnt;

out:
max_tr.entries = global_trace.entries;
mutex_unlock(&trace_types_lock);

return cnt;
}

static struct file_operations tracing_max_lat_fops = {
.open = tracing_open_generic,
.read = tracing_max_lat_read,
Expand All @@ -2389,6 +2462,12 @@ static struct file_operations tracing_pipe_fops = {
.release = tracing_release_pipe,
};

static struct file_operations tracing_entries_fops = {
.open = tracing_open_generic,
.read = tracing_entries_read,
.write = tracing_entries_write,
};

#ifdef CONFIG_DYNAMIC_FTRACE

static ssize_t
Expand Down Expand Up @@ -2500,6 +2579,12 @@ static __init void tracer_init_debugfs(void)
pr_warning("Could not create debugfs "
"'tracing_threash' entry\n");

entry = debugfs_create_file("trace_entries", 0644, d_tracer,
&global_trace, &tracing_entries_fops);
if (!entry)
pr_warning("Could not create debugfs "
"'tracing_threash' entry\n");

#ifdef CONFIG_DYNAMIC_FTRACE
entry = debugfs_create_file("dyn_ftrace_total_info", 0444, d_tracer,
&ftrace_update_tot_cnt,
Expand All @@ -2510,12 +2595,6 @@ static __init void tracer_init_debugfs(void)
#endif
}

/* dummy trace to disable tracing */
static struct tracer no_tracer __read_mostly =
{
.name = "none",
};

static int trace_alloc_page(void)
{
struct trace_array_cpu *data;
Expand Down Expand Up @@ -2552,15 +2631,13 @@ static int trace_alloc_page(void)
/* Now that we successfully allocate a page per CPU, add them */
for_each_possible_cpu(i) {
data = global_trace.data[i];
data->lock = (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED;
page = list_entry(pages.next, struct page, lru);
list_del_init(&page->lru);
list_add_tail(&page->lru, &data->trace_pages);
ClearPageLRU(page);

#ifdef CONFIG_TRACER_MAX_TRACE
data = max_tr.data[i];
data->lock = (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED;
page = list_entry(pages.next, struct page, lru);
list_del_init(&page->lru);
list_add_tail(&page->lru, &data->trace_pages);
Expand All @@ -2579,6 +2656,55 @@ static int trace_alloc_page(void)
return -ENOMEM;
}

static int trace_free_page(void)
{
struct trace_array_cpu *data;
struct page *page;
struct list_head *p;
int i;
int ret = 0;

/* free one page from each buffer */
for_each_possible_cpu(i) {
data = global_trace.data[i];
p = data->trace_pages.next;
if (p == &data->trace_pages) {
/* should never happen */
WARN_ON(1);
tracing_disabled = 1;
ret = -1;
break;
}
page = list_entry(p, struct page, lru);
ClearPageLRU(page);
list_del(&page->lru);
__free_page(page);

tracing_reset(data);

#ifdef CONFIG_TRACER_MAX_TRACE
data = max_tr.data[i];
p = data->trace_pages.next;
if (p == &data->trace_pages) {
/* should never happen */
WARN_ON(1);
tracing_disabled = 1;
ret = -1;
break;
}
page = list_entry(p, struct page, lru);
ClearPageLRU(page);
list_del(&page->lru);
__free_page(page);

tracing_reset(data);
#endif
}
global_trace.entries -= ENTRIES_PER_PAGE;

return ret;
}

__init static int tracer_alloc_buffers(void)
{
struct trace_array_cpu *data;
Expand Down Expand Up @@ -2609,6 +2735,9 @@ __init static int tracer_alloc_buffers(void)
/* use the LRU flag to differentiate the two buffers */
ClearPageLRU(page);

data->lock = (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED;
max_tr.data[i]->lock = (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED;

/* Only allocate if we are actually using the max trace */
#ifdef CONFIG_TRACER_MAX_TRACE
array = (void *)__get_free_page(GFP_KERNEL);
Expand Down

0 comments on commit d10c20b

Please sign in to comment.