Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 100597
b: refs/heads/master
c: d61fc44
h: refs/heads/master
i:
  100595: 9c3f880
v: v3
  • Loading branch information
Pekka Paalanen authored and Thomas Gleixner committed May 24, 2008
1 parent 104e534 commit fe259af
Show file tree
Hide file tree
Showing 8 changed files with 333 additions and 202 deletions.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: 0fd0e3da4557c479b820b9a4a7afa25b4637ddf2
refs/heads/master: d61fc44853f46fb002228b18aa5f30db21fcd4ac
20 changes: 8 additions & 12 deletions trunk/arch/x86/Kconfig.debug
Original file line number Diff line number Diff line change
Expand Up @@ -170,30 +170,26 @@ config IOMMU_LEAK

config MMIOTRACE_HOOKS
bool
default n

config MMIOTRACE
tristate "Memory mapped IO tracing"
bool "Memory mapped IO tracing"
depends on DEBUG_KERNEL && RELAY && DEBUG_FS
select MMIOTRACE_HOOKS
default n
default y
help
This will build a kernel module called mmiotrace.
Making this a built-in is heavily discouraged.

Mmiotrace traces Memory Mapped I/O access and is meant for debugging
and reverse engineering. The kernel module offers wrapped
versions of the ioremap family of functions. The driver to be traced
must be modified to call these wrappers. A user space program is
required to collect the MMIO data.
Mmiotrace traces Memory Mapped I/O access and is meant for
debugging and reverse engineering. It is called from the ioremap
implementation and works via page faults. A user space program is
required to collect the MMIO data from debugfs files.
Tracing is disabled by default and can be enabled from a debugfs
file.

See http://nouveau.freedesktop.org/wiki/MmioTrace
If you are not helping to develop drivers, say N.

config MMIOTRACE_TEST
tristate "Test module for mmiotrace"
depends on MMIOTRACE && m
default n
help
This is a dumb module for testing mmiotrace. It is very dangerous
as it will write garbage to IO memory starting at a given address.
Expand Down
2 changes: 1 addition & 1 deletion trunk/arch/x86/kernel/mmiotrace/Makefile
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
obj-$(CONFIG_MMIOTRACE_HOOKS) += kmmio.o
obj-$(CONFIG_MMIOTRACE) += mmiotrace.o
mmiotrace-objs := pf_in.o mmio-mod.o
mmiotrace-y := pf_in.o mmio-mod.o
obj-$(CONFIG_MMIOTRACE_TEST) += testmmiotrace.o
72 changes: 32 additions & 40 deletions trunk/arch/x86/kernel/mmiotrace/kmmio.c
Original file line number Diff line number Diff line change
Expand Up @@ -19,6 +19,7 @@
#include <linux/preempt.h>
#include <linux/percpu.h>
#include <linux/kdebug.h>
#include <linux/mutex.h>
#include <asm/io.h>
#include <asm/cacheflush.h>
#include <asm/errno.h>
Expand Down Expand Up @@ -59,7 +60,7 @@ struct kmmio_context {
static int kmmio_die_notifier(struct notifier_block *nb, unsigned long val,
void *args);

static DECLARE_MUTEX(kmmio_init_mutex);
static DEFINE_MUTEX(kmmio_init_mutex);
static DEFINE_SPINLOCK(kmmio_lock);

/* These are protected by kmmio_lock */
Expand Down Expand Up @@ -90,7 +91,7 @@ static struct notifier_block nb_die = {
*/
void reference_kmmio(void)
{
down(&kmmio_init_mutex);
mutex_lock(&kmmio_init_mutex);
spin_lock_irq(&kmmio_lock);
if (!kmmio_initialized) {
int i;
Expand All @@ -101,7 +102,7 @@ void reference_kmmio(void)
}
kmmio_initialized++;
spin_unlock_irq(&kmmio_lock);
up(&kmmio_init_mutex);
mutex_unlock(&kmmio_init_mutex);
}
EXPORT_SYMBOL_GPL(reference_kmmio);

Expand All @@ -115,7 +116,7 @@ void unreference_kmmio(void)
{
bool unreg = false;

down(&kmmio_init_mutex);
mutex_lock(&kmmio_init_mutex);
spin_lock_irq(&kmmio_lock);

if (kmmio_initialized == 1) {
Expand All @@ -128,7 +129,7 @@ void unreference_kmmio(void)

if (unreg)
unregister_die_notifier(&nb_die); /* calls sync_rcu() */
up(&kmmio_init_mutex);
mutex_unlock(&kmmio_init_mutex);
}
EXPORT_SYMBOL(unreference_kmmio);

Expand Down Expand Up @@ -244,17 +245,13 @@ int kmmio_handler(struct pt_regs *regs, unsigned long addr)
* Preemption is now disabled to prevent process switch during
* single stepping. We can only handle one active kmmio trace
* per cpu, so ensure that we finish it before something else
* gets to run.
*
* XXX what if an interrupt occurs between returning from
* do_page_fault() and entering the single-step exception handler?
* And that interrupt triggers a kmmio trap?
* XXX If we tracing an interrupt service routine or whatever, is
* this enough to keep it on the current cpu?
* gets to run. We also hold the RCU read lock over single
* stepping to avoid looking up the probe and kmmio_fault_page
* again.
*/
preempt_disable();

rcu_read_lock();

faultpage = get_kmmio_fault_page(addr);
if (!faultpage) {
/*
Expand Down Expand Up @@ -287,14 +284,24 @@ int kmmio_handler(struct pt_regs *regs, unsigned long addr)
if (ctx->probe && ctx->probe->pre_handler)
ctx->probe->pre_handler(ctx->probe, regs, addr);

/*
* Enable single-stepping and disable interrupts for the faulting
* context. Local interrupts must not get enabled during stepping.
*/
regs->flags |= TF_MASK;
regs->flags &= ~IF_MASK;

/* Now we set present bit in PTE and single step. */
disarm_kmmio_fault_page(ctx->fpage->page, NULL);

/*
* If another cpu accesses the same page while we are stepping,
* the access will not be caught. It will simply succeed and the
* only downside is we lose the event. If this becomes a problem,
* the user should drop to single cpu before tracing.
*/

put_cpu_var(kmmio_ctx);
rcu_read_unlock();
return 1;

no_kmmio_ctx:
Expand All @@ -313,39 +320,23 @@ int kmmio_handler(struct pt_regs *regs, unsigned long addr)
static int post_kmmio_handler(unsigned long condition, struct pt_regs *regs)
{
int ret = 0;
struct kmmio_probe *probe;
struct kmmio_fault_page *faultpage;
struct kmmio_context *ctx = &get_cpu_var(kmmio_ctx);

if (!ctx->active)
goto out;

rcu_read_lock();

faultpage = get_kmmio_fault_page(ctx->addr);
probe = get_kmmio_probe(ctx->addr);
if (faultpage != ctx->fpage || probe != ctx->probe) {
/*
* The trace setup changed after kmmio_handler() and before
* running this respective post handler. User does not want
* the result anymore.
*/
ctx->probe = NULL;
ctx->fpage = NULL;
}

if (ctx->probe && ctx->probe->post_handler)
ctx->probe->post_handler(ctx->probe, condition, regs);

if (ctx->fpage)
arm_kmmio_fault_page(ctx->fpage->page, NULL);
arm_kmmio_fault_page(ctx->fpage->page, NULL);

regs->flags &= ~TF_MASK;
regs->flags |= ctx->saved_flags;

/* These were acquired in kmmio_handler(). */
ctx->active--;
BUG_ON(ctx->active);
rcu_read_unlock();
preempt_enable_no_resched();

/*
Expand All @@ -355,8 +346,6 @@ static int post_kmmio_handler(unsigned long condition, struct pt_regs *regs)
*/
if (!(regs->flags & TF_MASK))
ret = 1;

rcu_read_unlock();
out:
put_cpu_var(kmmio_ctx);
return ret;
Expand Down Expand Up @@ -411,23 +400,24 @@ static void release_kmmio_fault_page(unsigned long page,

int register_kmmio_probe(struct kmmio_probe *p)
{
unsigned long flags;
int ret = 0;
unsigned long size = 0;

spin_lock_irq(&kmmio_lock);
kmmio_count++;
spin_lock_irqsave(&kmmio_lock, flags);
if (get_kmmio_probe(p->addr)) {
ret = -EEXIST;
goto out;
}
kmmio_count++;
list_add_rcu(&p->list, &kmmio_probes);
while (size < p->len) {
if (add_kmmio_fault_page(p->addr + size))
pr_err("kmmio: Unable to set page fault.\n");
size += PAGE_SIZE;
}
out:
spin_unlock_irq(&kmmio_lock);
spin_unlock_irqrestore(&kmmio_lock, flags);
/*
* XXX: What should I do here?
* Here was a call to global_flush_tlb(), but it does not exist
Expand Down Expand Up @@ -478,7 +468,8 @@ static void remove_kmmio_fault_pages(struct rcu_head *head)

/*
* Remove a kmmio probe. You have to synchronize_rcu() before you can be
* sure that the callbacks will not be called anymore.
* sure that the callbacks will not be called anymore. Only after that
* you may actually release your struct kmmio_probe.
*
* Unregistering a kmmio fault page has three steps:
* 1. release_kmmio_fault_page()
Expand All @@ -490,18 +481,19 @@ static void remove_kmmio_fault_pages(struct rcu_head *head)
*/
void unregister_kmmio_probe(struct kmmio_probe *p)
{
unsigned long flags;
unsigned long size = 0;
struct kmmio_fault_page *release_list = NULL;
struct kmmio_delayed_release *drelease;

spin_lock_irq(&kmmio_lock);
spin_lock_irqsave(&kmmio_lock, flags);
while (size < p->len) {
release_kmmio_fault_page(p->addr + size, &release_list);
size += PAGE_SIZE;
}
list_del_rcu(&p->list);
kmmio_count--;
spin_unlock_irq(&kmmio_lock);
spin_unlock_irqrestore(&kmmio_lock, flags);

drelease = kmalloc(sizeof(*drelease), GFP_ATOMIC);
if (!drelease) {
Expand Down
Loading

0 comments on commit fe259af

Please sign in to comment.