Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 297100
b: refs/heads/master
c: 06ce2c6
h: refs/heads/master
v: v3
  • Loading branch information
Paul Mackerras authored and Avi Kivity committed Mar 5, 2012
1 parent e0bb883 commit 27da283
Show file tree
Hide file tree
Showing 4 changed files with 118 additions and 3 deletions.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: 9d0ef5ea043d1242897d15c71bd1a15da79b4a5d
refs/heads/master: 06ce2c63d933e347f8a199f123a8a293619ab3d2
18 changes: 18 additions & 0 deletions trunk/arch/powerpc/include/asm/kvm_book3s_64.h
Original file line number Diff line number Diff line change
Expand Up @@ -113,6 +113,11 @@ static inline unsigned long hpte_page_size(unsigned long h, unsigned long l)
return 0; /* error */
}

static inline unsigned long hpte_rpn(unsigned long ptel, unsigned long psize)
{
return ((ptel & HPTE_R_RPN) & ~(psize - 1)) >> PAGE_SHIFT;
}

static inline int hpte_cache_flags_ok(unsigned long ptel, unsigned long io_type)
{
unsigned int wimg = ptel & HPTE_R_WIMG;
Expand All @@ -139,6 +144,19 @@ static inline unsigned long hpte_cache_bits(unsigned long pte_val)
#endif
}

static inline void lock_rmap(unsigned long *rmap)
{
do {
while (test_bit(KVMPPC_RMAP_LOCK_BIT, rmap))
cpu_relax();
} while (test_and_set_bit_lock(KVMPPC_RMAP_LOCK_BIT, rmap));
}

static inline void unlock_rmap(unsigned long *rmap)
{
__clear_bit_unlock(KVMPPC_RMAP_LOCK_BIT, rmap);
}

static inline bool slot_is_aligned(struct kvm_memory_slot *memslot,
unsigned long pagesize)
{
Expand Down
17 changes: 16 additions & 1 deletion trunk/arch/powerpc/include/asm/kvm_host.h
Original file line number Diff line number Diff line change
Expand Up @@ -170,12 +170,27 @@ struct kvmppc_rma_info {
/*
* The reverse mapping array has one entry for each HPTE,
* which stores the guest's view of the second word of the HPTE
* (including the guest physical address of the mapping).
* (including the guest physical address of the mapping),
* plus forward and backward pointers in a doubly-linked ring
* of HPTEs that map the same host page. The pointers in this
* ring are 32-bit HPTE indexes, to save space.
*/
struct revmap_entry {
unsigned long guest_rpte;
unsigned int forw, back;
};

/*
* We use the top bit of each memslot->rmap entry as a lock bit,
* and bit 32 as a present flag. The bottom 32 bits are the
* index in the guest HPT of a HPTE that points to the page.
*/
#define KVMPPC_RMAP_LOCK_BIT 63
#define KVMPPC_RMAP_REF_BIT 33
#define KVMPPC_RMAP_REFERENCED (1ul << KVMPPC_RMAP_REF_BIT)
#define KVMPPC_RMAP_PRESENT 0x100000000ul
#define KVMPPC_RMAP_INDEX 0xfffffffful

/* Low-order bits in kvm->arch.slot_phys[][] */
#define KVMPPC_PAGE_ORDER_MASK 0x1f
#define KVMPPC_PAGE_NO_CACHE HPTE_R_I /* 0x20 */
Expand Down
84 changes: 83 additions & 1 deletion trunk/arch/powerpc/kvm/book3s_hv_rm_mmu.c
Original file line number Diff line number Diff line change
Expand Up @@ -54,6 +54,70 @@ static void *real_vmalloc_addr(void *x)
return __va(addr);
}

/*
* Add this HPTE into the chain for the real page.
* Must be called with the chain locked; it unlocks the chain.
*/
static void kvmppc_add_revmap_chain(struct kvm *kvm, struct revmap_entry *rev,
unsigned long *rmap, long pte_index, int realmode)
{
struct revmap_entry *head, *tail;
unsigned long i;

if (*rmap & KVMPPC_RMAP_PRESENT) {
i = *rmap & KVMPPC_RMAP_INDEX;
head = &kvm->arch.revmap[i];
if (realmode)
head = real_vmalloc_addr(head);
tail = &kvm->arch.revmap[head->back];
if (realmode)
tail = real_vmalloc_addr(tail);
rev->forw = i;
rev->back = head->back;
tail->forw = pte_index;
head->back = pte_index;
} else {
rev->forw = rev->back = pte_index;
i = pte_index;
}
smp_wmb();
*rmap = i | KVMPPC_RMAP_REFERENCED | KVMPPC_RMAP_PRESENT; /* unlock */
}

/* Remove this HPTE from the chain for a real page */
static void remove_revmap_chain(struct kvm *kvm, long pte_index,
unsigned long hpte_v)
{
struct revmap_entry *rev, *next, *prev;
unsigned long gfn, ptel, head;
struct kvm_memory_slot *memslot;
unsigned long *rmap;

rev = real_vmalloc_addr(&kvm->arch.revmap[pte_index]);
ptel = rev->guest_rpte;
gfn = hpte_rpn(ptel, hpte_page_size(hpte_v, ptel));
memslot = builtin_gfn_to_memslot(kvm, gfn);
if (!memslot || (memslot->flags & KVM_MEMSLOT_INVALID))
return;

rmap = real_vmalloc_addr(&memslot->rmap[gfn - memslot->base_gfn]);
lock_rmap(rmap);

head = *rmap & KVMPPC_RMAP_INDEX;
next = real_vmalloc_addr(&kvm->arch.revmap[rev->forw]);
prev = real_vmalloc_addr(&kvm->arch.revmap[rev->back]);
next->back = rev->back;
prev->forw = rev->forw;
if (head == pte_index) {
head = rev->forw;
if (head == pte_index)
*rmap &= ~(KVMPPC_RMAP_PRESENT | KVMPPC_RMAP_INDEX);
else
*rmap = (*rmap & ~KVMPPC_RMAP_INDEX) | head;
}
unlock_rmap(rmap);
}

long kvmppc_h_enter(struct kvm_vcpu *vcpu, unsigned long flags,
long pte_index, unsigned long pteh, unsigned long ptel)
{
Expand All @@ -66,6 +130,7 @@ long kvmppc_h_enter(struct kvm_vcpu *vcpu, unsigned long flags,
struct kvm_memory_slot *memslot;
unsigned long *physp, pte_size;
unsigned long is_io;
unsigned long *rmap;
bool realmode = vcpu->arch.vcore->vcore_state == VCORE_RUNNING;

psize = hpte_page_size(pteh, ptel);
Expand All @@ -83,6 +148,7 @@ long kvmppc_h_enter(struct kvm_vcpu *vcpu, unsigned long flags,
if (!slot_is_aligned(memslot, psize))
return H_PARAMETER;
slot_fn = gfn - memslot->base_gfn;
rmap = &memslot->rmap[slot_fn];

physp = kvm->arch.slot_phys[memslot->id];
if (!physp)
Expand Down Expand Up @@ -164,13 +230,25 @@ long kvmppc_h_enter(struct kvm_vcpu *vcpu, unsigned long flags,
}

/* Save away the guest's idea of the second HPTE dword */
rev = real_vmalloc_addr(&kvm->arch.revmap[pte_index]);
rev = &kvm->arch.revmap[pte_index];
if (realmode)
rev = real_vmalloc_addr(rev);
if (rev)
rev->guest_rpte = g_ptel;

/* Link HPTE into reverse-map chain */
if (realmode)
rmap = real_vmalloc_addr(rmap);
lock_rmap(rmap);
kvmppc_add_revmap_chain(kvm, rev, rmap, pte_index, realmode);

hpte[1] = ptel;

/* Write the first HPTE dword, unlocking the HPTE and making it valid */
eieio();
hpte[0] = pteh;
asm volatile("ptesync" : : : "memory");

vcpu->arch.gpr[4] = pte_index;
return H_SUCCESS;
}
Expand Down Expand Up @@ -220,6 +298,8 @@ long kvmppc_h_remove(struct kvm_vcpu *vcpu, unsigned long flags,
vcpu->arch.gpr[4] = v = hpte[0] & ~HPTE_V_HVLOCK;
vcpu->arch.gpr[5] = r = hpte[1];
rb = compute_tlbie_rb(v, r, pte_index);
remove_revmap_chain(kvm, pte_index, v);
smp_wmb();
hpte[0] = 0;
if (!(flags & H_LOCAL)) {
while(!try_lock_tlbie(&kvm->arch.tlbie_lock))
Expand Down Expand Up @@ -293,6 +373,8 @@ long kvmppc_h_bulk_remove(struct kvm_vcpu *vcpu)
flags |= (hp[1] >> 5) & 0x0c;
args[i * 2] = ((0x80 | flags) << 56) + pte_index;
tlbrb[n_inval++] = compute_tlbie_rb(hp[0], hp[1], pte_index);
remove_revmap_chain(kvm, pte_index, hp[0]);
smp_wmb();
hp[0] = 0;
}
if (n_inval == 0)
Expand Down

0 comments on commit 27da283

Please sign in to comment.