Skip to content

Commit

Permalink
lguest: get rid of lg variable assignments
Browse files Browse the repository at this point in the history
We can save some lines of code by getting rid of
*lg = cpu... lines of code spread everywhere by now.

Signed-off-by: Glauber de Oliveira Costa <gcosta@redhat.com>
Signed-off-by: Rusty Russell <rusty@rustcorp.com.au>
  • Loading branch information
Glauber de Oliveira Costa authored and Rusty Russell committed Jan 30, 2008
1 parent 934faab commit 382ac6b
Show file tree
Hide file tree
Showing 7 changed files with 149 additions and 159 deletions.
24 changes: 11 additions & 13 deletions drivers/lguest/core.c
Original file line number Diff line number Diff line change
Expand Up @@ -151,23 +151,23 @@ int lguest_address_ok(const struct lguest *lg,
/* This routine copies memory from the Guest. Here we can see how useful the
* kill_lguest() routine we met in the Launcher can be: we return a random
* value (all zeroes) instead of needing to return an error. */
void __lgread(struct lguest *lg, void *b, unsigned long addr, unsigned bytes)
void __lgread(struct lg_cpu *cpu, void *b, unsigned long addr, unsigned bytes)
{
if (!lguest_address_ok(lg, addr, bytes)
|| copy_from_user(b, lg->mem_base + addr, bytes) != 0) {
if (!lguest_address_ok(cpu->lg, addr, bytes)
|| copy_from_user(b, cpu->lg->mem_base + addr, bytes) != 0) {
/* copy_from_user should do this, but as we rely on it... */
memset(b, 0, bytes);
kill_guest(lg, "bad read address %#lx len %u", addr, bytes);
kill_guest(cpu, "bad read address %#lx len %u", addr, bytes);
}
}

/* This is the write (copy into guest) version. */
void __lgwrite(struct lguest *lg, unsigned long addr, const void *b,
void __lgwrite(struct lg_cpu *cpu, unsigned long addr, const void *b,
unsigned bytes)
{
if (!lguest_address_ok(lg, addr, bytes)
|| copy_to_user(lg->mem_base + addr, b, bytes) != 0)
kill_guest(lg, "bad write address %#lx len %u", addr, bytes);
if (!lguest_address_ok(cpu->lg, addr, bytes)
|| copy_to_user(cpu->lg->mem_base + addr, b, bytes) != 0)
kill_guest(cpu, "bad write address %#lx len %u", addr, bytes);
}
/*:*/

Expand All @@ -176,10 +176,8 @@ void __lgwrite(struct lguest *lg, unsigned long addr, const void *b,
* going around and around until something interesting happens. */
int run_guest(struct lg_cpu *cpu, unsigned long __user *user)
{
struct lguest *lg = cpu->lg;

/* We stop running once the Guest is dead. */
while (!lg->dead) {
while (!cpu->lg->dead) {
/* First we run any hypercalls the Guest wants done. */
if (cpu->hcall)
do_hypercalls(cpu);
Expand Down Expand Up @@ -212,7 +210,7 @@ int run_guest(struct lg_cpu *cpu, unsigned long __user *user)

/* Just make absolutely sure the Guest is still alive. One of
* those hypercalls could have been fatal, for example. */
if (lg->dead)
if (cpu->lg->dead)
break;

/* If the Guest asked to be stopped, we sleep. The Guest's
Expand All @@ -237,7 +235,7 @@ int run_guest(struct lg_cpu *cpu, unsigned long __user *user)
lguest_arch_handle_trap(cpu);
}

if (lg->dead == ERR_PTR(-ERESTART))
if (cpu->lg->dead == ERR_PTR(-ERESTART))
return -ERESTART;
/* The Guest is dead => "No such file or directory" */
return -ENOENT;
Expand Down
49 changes: 23 additions & 26 deletions drivers/lguest/hypercalls.c
Original file line number Diff line number Diff line change
Expand Up @@ -31,8 +31,6 @@
* Or gets killed. Or, in the case of LHCALL_CRASH, both. */
static void do_hcall(struct lg_cpu *cpu, struct hcall_args *args)
{
struct lguest *lg = cpu->lg;

switch (args->arg0) {
case LHCALL_FLUSH_ASYNC:
/* This call does nothing, except by breaking out of the Guest
Expand All @@ -41,19 +39,19 @@ static void do_hcall(struct lg_cpu *cpu, struct hcall_args *args)
case LHCALL_LGUEST_INIT:
/* You can't get here unless you're already initialized. Don't
* do that. */
kill_guest(lg, "already have lguest_data");
kill_guest(cpu, "already have lguest_data");
break;
case LHCALL_SHUTDOWN: {
/* Shutdown is such a trivial hypercall that we do it in four
* lines right here. */
char msg[128];
/* If the lgread fails, it will call kill_guest() itself; the
* kill_guest() with the message will be ignored. */
__lgread(lg, msg, args->arg1, sizeof(msg));
__lgread(cpu, msg, args->arg1, sizeof(msg));
msg[sizeof(msg)-1] = '\0';
kill_guest(lg, "CRASH: %s", msg);
kill_guest(cpu, "CRASH: %s", msg);
if (args->arg2 == LGUEST_SHUTDOWN_RESTART)
lg->dead = ERR_PTR(-ERESTART);
cpu->lg->dead = ERR_PTR(-ERESTART);
break;
}
case LHCALL_FLUSH_TLB:
Expand All @@ -74,10 +72,10 @@ static void do_hcall(struct lg_cpu *cpu, struct hcall_args *args)
guest_set_stack(cpu, args->arg1, args->arg2, args->arg3);
break;
case LHCALL_SET_PTE:
guest_set_pte(lg, args->arg1, args->arg2, __pte(args->arg3));
guest_set_pte(cpu, args->arg1, args->arg2, __pte(args->arg3));
break;
case LHCALL_SET_PMD:
guest_set_pmd(lg, args->arg1, args->arg2);
guest_set_pmd(cpu->lg, args->arg1, args->arg2);
break;
case LHCALL_SET_CLOCKEVENT:
guest_set_clockevent(cpu, args->arg1);
Expand All @@ -96,7 +94,7 @@ static void do_hcall(struct lg_cpu *cpu, struct hcall_args *args)
default:
/* It should be an architecture-specific hypercall. */
if (lguest_arch_do_hcall(cpu, args))
kill_guest(lg, "Bad hypercall %li\n", args->arg0);
kill_guest(cpu, "Bad hypercall %li\n", args->arg0);
}
}
/*:*/
Expand All @@ -112,10 +110,9 @@ static void do_async_hcalls(struct lg_cpu *cpu)
{
unsigned int i;
u8 st[LHCALL_RING_SIZE];
struct lguest *lg = cpu->lg;

/* For simplicity, we copy the entire call status array in at once. */
if (copy_from_user(&st, &lg->lguest_data->hcall_status, sizeof(st)))
if (copy_from_user(&st, &cpu->lg->lguest_data->hcall_status, sizeof(st)))
return;

/* We process "struct lguest_data"s hcalls[] ring once. */
Expand All @@ -137,18 +134,18 @@ static void do_async_hcalls(struct lg_cpu *cpu)

/* Copy the hypercall arguments into a local copy of
* the hcall_args struct. */
if (copy_from_user(&args, &lg->lguest_data->hcalls[n],
if (copy_from_user(&args, &cpu->lg->lguest_data->hcalls[n],
sizeof(struct hcall_args))) {
kill_guest(lg, "Fetching async hypercalls");
kill_guest(cpu, "Fetching async hypercalls");
break;
}

/* Do the hypercall, same as a normal one. */
do_hcall(cpu, &args);

/* Mark the hypercall done. */
if (put_user(0xFF, &lg->lguest_data->hcall_status[n])) {
kill_guest(lg, "Writing result for async hypercall");
if (put_user(0xFF, &cpu->lg->lguest_data->hcall_status[n])) {
kill_guest(cpu, "Writing result for async hypercall");
break;
}

Expand All @@ -163,29 +160,28 @@ static void do_async_hcalls(struct lg_cpu *cpu)
* Guest makes a hypercall, we end up here to set things up: */
static void initialize(struct lg_cpu *cpu)
{
struct lguest *lg = cpu->lg;
/* You can't do anything until you're initialized. The Guest knows the
* rules, so we're unforgiving here. */
if (cpu->hcall->arg0 != LHCALL_LGUEST_INIT) {
kill_guest(lg, "hypercall %li before INIT", cpu->hcall->arg0);
kill_guest(cpu, "hypercall %li before INIT", cpu->hcall->arg0);
return;
}

if (lguest_arch_init_hypercalls(cpu))
kill_guest(lg, "bad guest page %p", lg->lguest_data);
kill_guest(cpu, "bad guest page %p", cpu->lg->lguest_data);

/* The Guest tells us where we're not to deliver interrupts by putting
* the range of addresses into "struct lguest_data". */
if (get_user(lg->noirq_start, &lg->lguest_data->noirq_start)
|| get_user(lg->noirq_end, &lg->lguest_data->noirq_end))
kill_guest(lg, "bad guest page %p", lg->lguest_data);
if (get_user(cpu->lg->noirq_start, &cpu->lg->lguest_data->noirq_start)
|| get_user(cpu->lg->noirq_end, &cpu->lg->lguest_data->noirq_end))
kill_guest(cpu, "bad guest page %p", cpu->lg->lguest_data);

/* We write the current time into the Guest's data page once so it can
* set its clock. */
write_timestamp(lg);
write_timestamp(cpu);

/* page_tables.c will also do some setup. */
page_table_guest_data_init(lg);
page_table_guest_data_init(cpu);

/* This is the one case where the above accesses might have been the
* first write to a Guest page. This may have caused a copy-on-write
Expand Down Expand Up @@ -237,10 +233,11 @@ void do_hypercalls(struct lg_cpu *cpu)

/* This routine supplies the Guest with time: it's used for wallclock time at
* initial boot and as a rough time source if the TSC isn't available. */
void write_timestamp(struct lguest *lg)
void write_timestamp(struct lg_cpu *cpu)
{
struct timespec now;
ktime_get_real_ts(&now);
if (copy_to_user(&lg->lguest_data->time, &now, sizeof(struct timespec)))
kill_guest(lg, "Writing timestamp");
if (copy_to_user(&cpu->lg->lguest_data->time,
&now, sizeof(struct timespec)))
kill_guest(cpu, "Writing timestamp");
}
54 changes: 26 additions & 28 deletions drivers/lguest/interrupts_and_traps.c
Original file line number Diff line number Diff line change
Expand Up @@ -41,11 +41,11 @@ static int idt_present(u32 lo, u32 hi)

/* We need a helper to "push" a value onto the Guest's stack, since that's a
* big part of what delivering an interrupt does. */
static void push_guest_stack(struct lguest *lg, unsigned long *gstack, u32 val)
static void push_guest_stack(struct lg_cpu *cpu, unsigned long *gstack, u32 val)
{
/* Stack grows upwards: move stack then write value. */
*gstack -= 4;
lgwrite(lg, *gstack, u32, val);
lgwrite(cpu, *gstack, u32, val);
}

/*H:210 The set_guest_interrupt() routine actually delivers the interrupt or
Expand All @@ -65,7 +65,6 @@ static void set_guest_interrupt(struct lg_cpu *cpu, u32 lo, u32 hi, int has_err)
unsigned long gstack, origstack;
u32 eflags, ss, irq_enable;
unsigned long virtstack;
struct lguest *lg = cpu->lg;

/* There are two cases for interrupts: one where the Guest is already
* in the kernel, and a more complex one where the Guest is in
Expand All @@ -81,8 +80,8 @@ static void set_guest_interrupt(struct lg_cpu *cpu, u32 lo, u32 hi, int has_err)
* stack: when the Guest does an "iret" back from the interrupt
* handler the CPU will notice they're dropping privilege
* levels and expect these here. */
push_guest_stack(lg, &gstack, cpu->regs->ss);
push_guest_stack(lg, &gstack, cpu->regs->esp);
push_guest_stack(cpu, &gstack, cpu->regs->ss);
push_guest_stack(cpu, &gstack, cpu->regs->esp);
} else {
/* We're staying on the same Guest (kernel) stack. */
virtstack = cpu->regs->esp;
Expand All @@ -96,20 +95,20 @@ static void set_guest_interrupt(struct lg_cpu *cpu, u32 lo, u32 hi, int has_err)
* Guest's "irq_enabled" field into the eflags word: we saw the Guest
* copy it back in "lguest_iret". */
eflags = cpu->regs->eflags;
if (get_user(irq_enable, &lg->lguest_data->irq_enabled) == 0
if (get_user(irq_enable, &cpu->lg->lguest_data->irq_enabled) == 0
&& !(irq_enable & X86_EFLAGS_IF))
eflags &= ~X86_EFLAGS_IF;

/* An interrupt is expected to push three things on the stack: the old
* "eflags" word, the old code segment, and the old instruction
* pointer. */
push_guest_stack(lg, &gstack, eflags);
push_guest_stack(lg, &gstack, cpu->regs->cs);
push_guest_stack(lg, &gstack, cpu->regs->eip);
push_guest_stack(cpu, &gstack, eflags);
push_guest_stack(cpu, &gstack, cpu->regs->cs);
push_guest_stack(cpu, &gstack, cpu->regs->eip);

/* For the six traps which supply an error code, we push that, too. */
if (has_err)
push_guest_stack(lg, &gstack, cpu->regs->errcode);
push_guest_stack(cpu, &gstack, cpu->regs->errcode);

/* Now we've pushed all the old state, we change the stack, the code
* segment and the address to execute. */
Expand All @@ -121,8 +120,8 @@ static void set_guest_interrupt(struct lg_cpu *cpu, u32 lo, u32 hi, int has_err)
/* There are two kinds of interrupt handlers: 0xE is an "interrupt
* gate" which expects interrupts to be disabled on entry. */
if (idt_type(lo, hi) == 0xE)
if (put_user(0, &lg->lguest_data->irq_enabled))
kill_guest(lg, "Disabling interrupts");
if (put_user(0, &cpu->lg->lguest_data->irq_enabled))
kill_guest(cpu, "Disabling interrupts");
}

/*H:205
Expand All @@ -133,17 +132,16 @@ static void set_guest_interrupt(struct lg_cpu *cpu, u32 lo, u32 hi, int has_err)
void maybe_do_interrupt(struct lg_cpu *cpu)
{
unsigned int irq;
struct lguest *lg = cpu->lg;
DECLARE_BITMAP(blk, LGUEST_IRQS);
struct desc_struct *idt;

/* If the Guest hasn't even initialized yet, we can do nothing. */
if (!lg->lguest_data)
if (!cpu->lg->lguest_data)
return;

/* Take our "irqs_pending" array and remove any interrupts the Guest
* wants blocked: the result ends up in "blk". */
if (copy_from_user(&blk, lg->lguest_data->blocked_interrupts,
if (copy_from_user(&blk, cpu->lg->lguest_data->blocked_interrupts,
sizeof(blk)))
return;

Expand All @@ -157,19 +155,20 @@ void maybe_do_interrupt(struct lg_cpu *cpu)

/* They may be in the middle of an iret, where they asked us never to
* deliver interrupts. */
if (cpu->regs->eip >= lg->noirq_start && cpu->regs->eip < lg->noirq_end)
if (cpu->regs->eip >= cpu->lg->noirq_start &&
(cpu->regs->eip < cpu->lg->noirq_end))
return;

/* If they're halted, interrupts restart them. */
if (cpu->halted) {
/* Re-enable interrupts. */
if (put_user(X86_EFLAGS_IF, &lg->lguest_data->irq_enabled))
kill_guest(lg, "Re-enabling interrupts");
if (put_user(X86_EFLAGS_IF, &cpu->lg->lguest_data->irq_enabled))
kill_guest(cpu, "Re-enabling interrupts");
cpu->halted = 0;
} else {
/* Otherwise we check if they have interrupts disabled. */
u32 irq_enabled;
if (get_user(irq_enabled, &lg->lguest_data->irq_enabled))
if (get_user(irq_enabled, &cpu->lg->lguest_data->irq_enabled))
irq_enabled = 0;
if (!irq_enabled)
return;
Expand All @@ -194,7 +193,7 @@ void maybe_do_interrupt(struct lg_cpu *cpu)
* did this more often, but it can actually be quite slow: doing it
* here is a compromise which means at least it gets updated every
* timer interrupt. */
write_timestamp(lg);
write_timestamp(cpu);
}
/*:*/

Expand Down Expand Up @@ -315,10 +314,9 @@ void pin_stack_pages(struct lg_cpu *cpu)
{
unsigned int i;

struct lguest *lg = cpu->lg;
/* Depending on the CONFIG_4KSTACKS option, the Guest can have one or
* two pages of stack space. */
for (i = 0; i < lg->stack_pages; i++)
for (i = 0; i < cpu->lg->stack_pages; i++)
/* The stack grows *upwards*, so the address we're given is the
* start of the page after the kernel stack. Subtract one to
* get back onto the first stack page, and keep subtracting to
Expand All @@ -339,10 +337,10 @@ void guest_set_stack(struct lg_cpu *cpu, u32 seg, u32 esp, unsigned int pages)
/* You are not allowed have a stack segment with privilege level 0: bad
* Guest! */
if ((seg & 0x3) != GUEST_PL)
kill_guest(cpu->lg, "bad stack segment %i", seg);
kill_guest(cpu, "bad stack segment %i", seg);
/* We only expect one or two stack pages. */
if (pages > 2)
kill_guest(cpu->lg, "bad stack pages %u", pages);
kill_guest(cpu, "bad stack pages %u", pages);
/* Save where the stack is, and how many pages */
cpu->ss1 = seg;
cpu->esp1 = esp;
Expand All @@ -356,7 +354,7 @@ void guest_set_stack(struct lg_cpu *cpu, u32 seg, u32 esp, unsigned int pages)

/*H:235 This is the routine which actually checks the Guest's IDT entry and
* transfers it into the entry in "struct lguest": */
static void set_trap(struct lguest *lg, struct desc_struct *trap,
static void set_trap(struct lg_cpu *cpu, struct desc_struct *trap,
unsigned int num, u32 lo, u32 hi)
{
u8 type = idt_type(lo, hi);
Expand All @@ -369,7 +367,7 @@ static void set_trap(struct lguest *lg, struct desc_struct *trap,

/* We only support interrupt and trap gates. */
if (type != 0xE && type != 0xF)
kill_guest(lg, "bad IDT type %i", type);
kill_guest(cpu, "bad IDT type %i", type);

/* We only copy the handler address, present bit, privilege level and
* type. The privilege level controls where the trap can be triggered
Expand Down Expand Up @@ -399,9 +397,9 @@ void load_guest_idt_entry(struct lg_cpu *cpu, unsigned int num, u32 lo, u32 hi)

/* Check that the Guest doesn't try to step outside the bounds. */
if (num >= ARRAY_SIZE(cpu->arch.idt))
kill_guest(cpu->lg, "Setting idt entry %u", num);
kill_guest(cpu, "Setting idt entry %u", num);
else
set_trap(cpu->lg, &cpu->arch.idt[num], num, lo, hi);
set_trap(cpu, &cpu->arch.idt[num], num, lo, hi);
}

/* The default entry for each interrupt points into the Switcher routines which
Expand Down
Loading

0 comments on commit 382ac6b

Please sign in to comment.