Skip to content

Commit

Permalink
iommu/amd: Adopt IO page table framework for AMD IOMMU v1 page table
Browse files Browse the repository at this point in the history
Switch to using IO page table framework for AMD IOMMU v1 page table.

Signed-off-by: Suravee Suthikulpanit <suravee.suthikulpanit@amd.com>
Link: https://lore.kernel.org/r/20201215073705.123786-14-suravee.suthikulpanit@amd.com
Signed-off-by: Joerg Roedel <jroedel@suse.de>
  • Loading branch information
Suravee Suthikulpanit authored and Joerg Roedel committed Jan 28, 2021
1 parent fd86c95 commit 89c9a09
Show file tree
Hide file tree
Showing 3 changed files with 39 additions and 12 deletions.
1 change: 1 addition & 0 deletions drivers/iommu/amd/amd_iommu.h
Original file line number Diff line number Diff line change
Expand Up @@ -36,6 +36,7 @@ extern void amd_iommu_disable(void);
extern int amd_iommu_reenable(int);
extern int amd_iommu_enable_faulting(void);
extern int amd_iommu_guest_ir;
extern enum io_pgtable_fmt amd_iommu_pgtable;

/* IOMMUv2 specific functions */
struct iommu_domain;
Expand Down
2 changes: 2 additions & 0 deletions drivers/iommu/amd/init.c
Original file line number Diff line number Diff line change
Expand Up @@ -147,6 +147,8 @@ struct ivmd_header {
bool amd_iommu_dump;
bool amd_iommu_irq_remap __read_mostly;

enum io_pgtable_fmt amd_iommu_pgtable = AMD_IOMMU_V1;

int amd_iommu_guest_ir = AMD_IOMMU_GUEST_IR_VAPIC;
static int amd_iommu_xt_mode = IRQ_REMAP_XAPIC_MODE;

Expand Down
48 changes: 36 additions & 12 deletions drivers/iommu/amd/iommu.c
Original file line number Diff line number Diff line change
Expand Up @@ -31,6 +31,7 @@
#include <linux/irqdomain.h>
#include <linux/percpu.h>
#include <linux/iova.h>
#include <linux/io-pgtable.h>
#include <asm/irq_remapping.h>
#include <asm/io_apic.h>
#include <asm/apic.h>
Expand Down Expand Up @@ -1900,7 +1901,7 @@ static void protection_domain_free(struct protection_domain *domain)
kfree(domain);
}

static int protection_domain_init(struct protection_domain *domain, int mode)
static int protection_domain_init_v1(struct protection_domain *domain, int mode)
{
u64 *pt_root = NULL;

Expand All @@ -1923,34 +1924,55 @@ static int protection_domain_init(struct protection_domain *domain, int mode)
return 0;
}

static struct protection_domain *protection_domain_alloc(int mode)
static struct protection_domain *protection_domain_alloc(unsigned int type)
{
struct io_pgtable_ops *pgtbl_ops;
struct protection_domain *domain;
int pgtable = amd_iommu_pgtable;
int mode = DEFAULT_PGTABLE_LEVEL;
int ret;

domain = kzalloc(sizeof(*domain), GFP_KERNEL);
if (!domain)
return NULL;

if (protection_domain_init(domain, mode))
/*
* Force IOMMU v1 page table when iommu=pt and
* when allocating domain for pass-through devices.
*/
if (type == IOMMU_DOMAIN_IDENTITY) {
pgtable = AMD_IOMMU_V1;
mode = PAGE_MODE_NONE;
} else if (type == IOMMU_DOMAIN_UNMANAGED) {
pgtable = AMD_IOMMU_V1;
}

switch (pgtable) {
case AMD_IOMMU_V1:
ret = protection_domain_init_v1(domain, mode);
break;
default:
ret = -EINVAL;
}

if (ret)
goto out_err;

return domain;
pgtbl_ops = alloc_io_pgtable_ops(pgtable, &domain->iop.pgtbl_cfg, domain);
if (!pgtbl_ops)
goto out_err;

return domain;
out_err:
kfree(domain);

return NULL;
}

static struct iommu_domain *amd_iommu_domain_alloc(unsigned type)
{
struct protection_domain *domain;
int mode = DEFAULT_PGTABLE_LEVEL;

if (type == IOMMU_DOMAIN_IDENTITY)
mode = PAGE_MODE_NONE;

domain = protection_domain_alloc(mode);
domain = protection_domain_alloc(type);
if (!domain)
return NULL;

Expand Down Expand Up @@ -2069,7 +2091,8 @@ static int amd_iommu_map(struct iommu_domain *dom, unsigned long iova,
int prot = 0;
int ret = -EINVAL;

if (domain->iop.mode == PAGE_MODE_NONE)
if ((amd_iommu_pgtable == AMD_IOMMU_V1) &&
(domain->iop.mode == PAGE_MODE_NONE))
return -EINVAL;

if (iommu_prot & IOMMU_READ)
Expand All @@ -2092,7 +2115,8 @@ static size_t amd_iommu_unmap(struct iommu_domain *dom, unsigned long iova,
struct protection_domain *domain = to_pdomain(dom);
struct io_pgtable_ops *ops = &domain->iop.iop.ops;

if (domain->iop.mode == PAGE_MODE_NONE)
if ((amd_iommu_pgtable == AMD_IOMMU_V1) &&
(domain->iop.mode == PAGE_MODE_NONE))
return 0;

return (ops->unmap) ? ops->unmap(ops, iova, page_size, gather) : 0;
Expand Down

0 comments on commit 89c9a09

Please sign in to comment.