Skip to content

Commit

Permalink
powerpc: convert various functions to use ptdescs
Browse files Browse the repository at this point in the history
In order to split struct ptdesc from struct page, convert various
functions to use ptdescs.

Link: https://lkml.kernel.org/r/20230807230513.102486-13-vishal.moola@gmail.com
Signed-off-by: Vishal Moola (Oracle) <vishal.moola@gmail.com>
Acked-by: Mike Rapoport (IBM) <rppt@kernel.org>
Cc: Arnd Bergmann <arnd@arndb.de>
Cc: Catalin Marinas <catalin.marinas@arm.com>
Cc: Christophe Leroy <christophe.leroy@csgroup.eu>
Cc: Claudio Imbrenda <imbrenda@linux.ibm.com>
Cc: Dave Hansen <dave.hansen@linux.intel.com>
Cc: David Hildenbrand <david@redhat.com>
Cc: "David S. Miller" <davem@davemloft.net>
Cc: Dinh Nguyen <dinguyen@kernel.org>
Cc: Geert Uytterhoeven <geert@linux-m68k.org>
Cc: Geert Uytterhoeven <geert+renesas@glider.be>
Cc: Guo Ren <guoren@kernel.org>
Cc: Huacai Chen <chenhuacai@kernel.org>
Cc: Hugh Dickins <hughd@google.com>
Cc: John Paul Adrian Glaubitz <glaubitz@physik.fu-berlin.de>
Cc: Jonas Bonn <jonas@southpole.se>
Cc: Matthew Wilcox <willy@infradead.org>
Cc: Palmer Dabbelt <palmer@rivosinc.com>
Cc: Paul Walmsley <paul.walmsley@sifive.com>
Cc: Richard Weinberger <richard@nod.at>
Cc: Thomas Bogendoerfer <tsbogend@alpha.franken.de>
Cc: Yoshinori Sato <ysato@users.sourceforge.jp>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
  • Loading branch information
Vishal Moola (Oracle) authored and Andrew Morton committed Aug 21, 2023
1 parent 7e11dca commit 4eaca96
Show file tree
Hide file tree
Showing 3 changed files with 50 additions and 50 deletions.
10 changes: 5 additions & 5 deletions arch/powerpc/mm/book3s64/mmu_context.c
Original file line number Diff line number Diff line change
Expand Up @@ -246,15 +246,15 @@ static void destroy_contexts(mm_context_t *ctx)
static void pmd_frag_destroy(void *pmd_frag)
{
int count;
struct page *page;
struct ptdesc *ptdesc;

page = virt_to_page(pmd_frag);
ptdesc = virt_to_ptdesc(pmd_frag);
/* drop all the pending references */
count = ((unsigned long)pmd_frag & ~PAGE_MASK) >> PMD_FRAG_SIZE_SHIFT;
/* We allow PTE_FRAG_NR fragments from a PTE page */
if (atomic_sub_and_test(PMD_FRAG_NR - count, &page->pt_frag_refcount)) {
pgtable_pmd_page_dtor(page);
__free_page(page);
if (atomic_sub_and_test(PMD_FRAG_NR - count, &ptdesc->pt_frag_refcount)) {
pagetable_pmd_dtor(ptdesc);
pagetable_free(ptdesc);
}
}

Expand Down
32 changes: 16 additions & 16 deletions arch/powerpc/mm/book3s64/pgtable.c
Original file line number Diff line number Diff line change
Expand Up @@ -384,22 +384,22 @@ static pmd_t *get_pmd_from_cache(struct mm_struct *mm)
static pmd_t *__alloc_for_pmdcache(struct mm_struct *mm)
{
void *ret = NULL;
struct page *page;
struct ptdesc *ptdesc;
gfp_t gfp = GFP_KERNEL_ACCOUNT | __GFP_ZERO;

if (mm == &init_mm)
gfp &= ~__GFP_ACCOUNT;
page = alloc_page(gfp);
if (!page)
ptdesc = pagetable_alloc(gfp, 0);
if (!ptdesc)
return NULL;
if (!pgtable_pmd_page_ctor(page)) {
__free_pages(page, 0);
if (!pagetable_pmd_ctor(ptdesc)) {
pagetable_free(ptdesc);
return NULL;
}

atomic_set(&page->pt_frag_refcount, 1);
atomic_set(&ptdesc->pt_frag_refcount, 1);

ret = page_address(page);
ret = ptdesc_address(ptdesc);
/*
* if we support only one fragment just return the
* allocated page.
Expand All @@ -409,12 +409,12 @@ static pmd_t *__alloc_for_pmdcache(struct mm_struct *mm)

spin_lock(&mm->page_table_lock);
/*
* If we find pgtable_page set, we return
* If we find ptdesc_page set, we return
* the allocated page with single fragment
* count.
*/
if (likely(!mm->context.pmd_frag)) {
atomic_set(&page->pt_frag_refcount, PMD_FRAG_NR);
atomic_set(&ptdesc->pt_frag_refcount, PMD_FRAG_NR);
mm->context.pmd_frag = ret + PMD_FRAG_SIZE;
}
spin_unlock(&mm->page_table_lock);
Expand All @@ -435,15 +435,15 @@ pmd_t *pmd_fragment_alloc(struct mm_struct *mm, unsigned long vmaddr)

void pmd_fragment_free(unsigned long *pmd)
{
struct page *page = virt_to_page(pmd);
struct ptdesc *ptdesc = virt_to_ptdesc(pmd);

if (PageReserved(page))
return free_reserved_page(page);
if (pagetable_is_reserved(ptdesc))
return free_reserved_ptdesc(ptdesc);

BUG_ON(atomic_read(&page->pt_frag_refcount) <= 0);
if (atomic_dec_and_test(&page->pt_frag_refcount)) {
pgtable_pmd_page_dtor(page);
__free_page(page);
BUG_ON(atomic_read(&ptdesc->pt_frag_refcount) <= 0);
if (atomic_dec_and_test(&ptdesc->pt_frag_refcount)) {
pagetable_pmd_dtor(ptdesc);
pagetable_free(ptdesc);
}
}

Expand Down
58 changes: 29 additions & 29 deletions arch/powerpc/mm/pgtable-frag.c
Original file line number Diff line number Diff line change
Expand Up @@ -18,15 +18,15 @@
void pte_frag_destroy(void *pte_frag)
{
int count;
struct page *page;
struct ptdesc *ptdesc;

page = virt_to_page(pte_frag);
ptdesc = virt_to_ptdesc(pte_frag);
/* drop all the pending references */
count = ((unsigned long)pte_frag & ~PAGE_MASK) >> PTE_FRAG_SIZE_SHIFT;
/* We allow PTE_FRAG_NR fragments from a PTE page */
if (atomic_sub_and_test(PTE_FRAG_NR - count, &page->pt_frag_refcount)) {
pgtable_pte_page_dtor(page);
__free_page(page);
if (atomic_sub_and_test(PTE_FRAG_NR - count, &ptdesc->pt_frag_refcount)) {
pagetable_pte_dtor(ptdesc);
pagetable_free(ptdesc);
}
}

Expand Down Expand Up @@ -55,25 +55,25 @@ static pte_t *get_pte_from_cache(struct mm_struct *mm)
static pte_t *__alloc_for_ptecache(struct mm_struct *mm, int kernel)
{
void *ret = NULL;
struct page *page;
struct ptdesc *ptdesc;

if (!kernel) {
page = alloc_page(PGALLOC_GFP | __GFP_ACCOUNT);
if (!page)
ptdesc = pagetable_alloc(PGALLOC_GFP | __GFP_ACCOUNT, 0);
if (!ptdesc)
return NULL;
if (!pgtable_pte_page_ctor(page)) {
__free_page(page);
if (!pagetable_pte_ctor(ptdesc)) {
pagetable_free(ptdesc);
return NULL;
}
} else {
page = alloc_page(PGALLOC_GFP);
if (!page)
ptdesc = pagetable_alloc(PGALLOC_GFP, 0);
if (!ptdesc)
return NULL;
}

atomic_set(&page->pt_frag_refcount, 1);
atomic_set(&ptdesc->pt_frag_refcount, 1);

ret = page_address(page);
ret = ptdesc_address(ptdesc);
/*
* if we support only one fragment just return the
* allocated page.
Expand All @@ -82,12 +82,12 @@ static pte_t *__alloc_for_ptecache(struct mm_struct *mm, int kernel)
return ret;
spin_lock(&mm->page_table_lock);
/*
* If we find pgtable_page set, we return
* If we find ptdesc_page set, we return
* the allocated page with single fragment
* count.
*/
if (likely(!pte_frag_get(&mm->context))) {
atomic_set(&page->pt_frag_refcount, PTE_FRAG_NR);
atomic_set(&ptdesc->pt_frag_refcount, PTE_FRAG_NR);
pte_frag_set(&mm->context, ret + PTE_FRAG_SIZE);
}
spin_unlock(&mm->page_table_lock);
Expand All @@ -108,28 +108,28 @@ pte_t *pte_fragment_alloc(struct mm_struct *mm, int kernel)

static void pte_free_now(struct rcu_head *head)
{
struct page *page;
struct ptdesc *ptdesc;

page = container_of(head, struct page, rcu_head);
pgtable_pte_page_dtor(page);
__free_page(page);
ptdesc = container_of(head, struct ptdesc, pt_rcu_head);
pagetable_pte_dtor(ptdesc);
pagetable_free(ptdesc);
}

void pte_fragment_free(unsigned long *table, int kernel)
{
struct page *page = virt_to_page(table);
struct ptdesc *ptdesc = virt_to_ptdesc(table);

if (PageReserved(page))
return free_reserved_page(page);
if (pagetable_is_reserved(ptdesc))
return free_reserved_ptdesc(ptdesc);

BUG_ON(atomic_read(&page->pt_frag_refcount) <= 0);
if (atomic_dec_and_test(&page->pt_frag_refcount)) {
BUG_ON(atomic_read(&ptdesc->pt_frag_refcount) <= 0);
if (atomic_dec_and_test(&ptdesc->pt_frag_refcount)) {
if (kernel)
__free_page(page);
else if (TestClearPageActive(page))
call_rcu(&page->rcu_head, pte_free_now);
pagetable_free(ptdesc);
else if (folio_test_clear_active(ptdesc_folio(ptdesc)))
call_rcu(&ptdesc->pt_rcu_head, pte_free_now);
else
pte_free_now(&page->rcu_head);
pte_free_now(&ptdesc->pt_rcu_head);
}
}

Expand Down

0 comments on commit 4eaca96

Please sign in to comment.