Skip to content

Commit

Permalink
Merge branch 'core' of git://git.kernel.org/pub/scm/linux/kernel/git/…
Browse files Browse the repository at this point in the history
…joro/iommu into slab-struct_slab-part2-v1

Merge iommu tree for a series that removes usage of struct page
'freelist' field.
  • Loading branch information
Vlastimil Babka committed Jan 6, 2022
2 parents b01af5c + aade40b commit 9cc960a
Show file tree
Hide file tree
Showing 10 changed files with 314 additions and 469 deletions.
110 changes: 41 additions & 69 deletions drivers/iommu/amd/io_pgtable.c
Original file line number Diff line number Diff line change
Expand Up @@ -74,87 +74,61 @@ static u64 *first_pte_l7(u64 *pte, unsigned long *page_size,
*
****************************************************************************/

static void free_page_list(struct page *freelist)
static void free_pt_page(u64 *pt, struct list_head *freelist)
{
while (freelist != NULL) {
unsigned long p = (unsigned long)page_address(freelist);
struct page *p = virt_to_page(pt);

freelist = freelist->freelist;
free_page(p);
}
list_add_tail(&p->lru, freelist);
}

static struct page *free_pt_page(unsigned long pt, struct page *freelist)
static void free_pt_lvl(u64 *pt, struct list_head *freelist, int lvl)
{
struct page *p = virt_to_page((void *)pt);
u64 *p;
int i;

p->freelist = freelist;
for (i = 0; i < 512; ++i) {
/* PTE present? */
if (!IOMMU_PTE_PRESENT(pt[i]))
continue;

return p;
}
/* Large PTE? */
if (PM_PTE_LEVEL(pt[i]) == 0 ||
PM_PTE_LEVEL(pt[i]) == 7)
continue;

#define DEFINE_FREE_PT_FN(LVL, FN) \
static struct page *free_pt_##LVL (unsigned long __pt, struct page *freelist) \
{ \
unsigned long p; \
u64 *pt; \
int i; \
\
pt = (u64 *)__pt; \
\
for (i = 0; i < 512; ++i) { \
/* PTE present? */ \
if (!IOMMU_PTE_PRESENT(pt[i])) \
continue; \
\
/* Large PTE? */ \
if (PM_PTE_LEVEL(pt[i]) == 0 || \
PM_PTE_LEVEL(pt[i]) == 7) \
continue; \
\
p = (unsigned long)IOMMU_PTE_PAGE(pt[i]); \
freelist = FN(p, freelist); \
} \
\
return free_pt_page((unsigned long)pt, freelist); \
}
/*
* Free the next level. No need to look at l1 tables here since
* they can only contain leaf PTEs; just free them directly.
*/
p = IOMMU_PTE_PAGE(pt[i]);
if (lvl > 2)
free_pt_lvl(p, freelist, lvl - 1);
else
free_pt_page(p, freelist);
}

DEFINE_FREE_PT_FN(l2, free_pt_page)
DEFINE_FREE_PT_FN(l3, free_pt_l2)
DEFINE_FREE_PT_FN(l4, free_pt_l3)
DEFINE_FREE_PT_FN(l5, free_pt_l4)
DEFINE_FREE_PT_FN(l6, free_pt_l5)
free_pt_page(pt, freelist);
}

static struct page *free_sub_pt(unsigned long root, int mode,
struct page *freelist)
static void free_sub_pt(u64 *root, int mode, struct list_head *freelist)
{
switch (mode) {
case PAGE_MODE_NONE:
case PAGE_MODE_7_LEVEL:
break;
case PAGE_MODE_1_LEVEL:
freelist = free_pt_page(root, freelist);
free_pt_page(root, freelist);
break;
case PAGE_MODE_2_LEVEL:
freelist = free_pt_l2(root, freelist);
break;
case PAGE_MODE_3_LEVEL:
freelist = free_pt_l3(root, freelist);
break;
case PAGE_MODE_4_LEVEL:
freelist = free_pt_l4(root, freelist);
break;
case PAGE_MODE_5_LEVEL:
freelist = free_pt_l5(root, freelist);
break;
case PAGE_MODE_6_LEVEL:
freelist = free_pt_l6(root, freelist);
free_pt_lvl(root, freelist, mode);
break;
default:
BUG();
}

return freelist;
}

void amd_iommu_domain_set_pgtable(struct protection_domain *domain,
Expand Down Expand Up @@ -362,9 +336,9 @@ static u64 *fetch_pte(struct amd_io_pgtable *pgtable,
return pte;
}

static struct page *free_clear_pte(u64 *pte, u64 pteval, struct page *freelist)
static void free_clear_pte(u64 *pte, u64 pteval, struct list_head *freelist)
{
unsigned long pt;
u64 *pt;
int mode;

while (cmpxchg64(pte, pteval, 0) != pteval) {
Expand All @@ -373,12 +347,12 @@ static struct page *free_clear_pte(u64 *pte, u64 pteval, struct page *freelist)
}

if (!IOMMU_PTE_PRESENT(pteval))
return freelist;
return;

pt = (unsigned long)IOMMU_PTE_PAGE(pteval);
pt = IOMMU_PTE_PAGE(pteval);
mode = IOMMU_PTE_MODE(pteval);

return free_sub_pt(pt, mode, freelist);
free_sub_pt(pt, mode, freelist);
}

/*
Expand All @@ -392,7 +366,7 @@ static int iommu_v1_map_page(struct io_pgtable_ops *ops, unsigned long iova,
phys_addr_t paddr, size_t size, int prot, gfp_t gfp)
{
struct protection_domain *dom = io_pgtable_ops_to_domain(ops);
struct page *freelist = NULL;
LIST_HEAD(freelist);
bool updated = false;
u64 __pte, *pte;
int ret, i, count;
Expand All @@ -412,9 +386,9 @@ static int iommu_v1_map_page(struct io_pgtable_ops *ops, unsigned long iova,
goto out;

for (i = 0; i < count; ++i)
freelist = free_clear_pte(&pte[i], pte[i], freelist);
free_clear_pte(&pte[i], pte[i], &freelist);

if (freelist != NULL)
if (!list_empty(&freelist))
updated = true;

if (count > 1) {
Expand Down Expand Up @@ -449,7 +423,7 @@ static int iommu_v1_map_page(struct io_pgtable_ops *ops, unsigned long iova,
}

/* Everything flushed out, free pages now */
free_page_list(freelist);
put_pages_list(&freelist);

return ret;
}
Expand Down Expand Up @@ -511,8 +485,7 @@ static void v1_free_pgtable(struct io_pgtable *iop)
{
struct amd_io_pgtable *pgtable = container_of(iop, struct amd_io_pgtable, iop);
struct protection_domain *dom;
struct page *freelist = NULL;
unsigned long root;
LIST_HEAD(freelist);

if (pgtable->mode == PAGE_MODE_NONE)
return;
Expand All @@ -529,10 +502,9 @@ static void v1_free_pgtable(struct io_pgtable *iop)
BUG_ON(pgtable->mode < PAGE_MODE_NONE ||
pgtable->mode > PAGE_MODE_6_LEVEL);

root = (unsigned long)pgtable->root;
freelist = free_sub_pt(root, pgtable->mode, freelist);
free_sub_pt(pgtable->root, pgtable->mode, &freelist);

free_page_list(freelist);
put_pages_list(&freelist);
}

static struct io_pgtable *v1_alloc_pgtable(struct io_pgtable_cfg *cfg, void *cookie)
Expand Down
Loading

0 comments on commit 9cc960a

Please sign in to comment.