Skip to content

Commit

Permalink
iommu/vt-d: Remove domain and devinfo mempool
Browse files Browse the repository at this point in the history
The domain and devinfo memory blocks are only allocated during device
probe and released during remove. There's no hot-path context, hence
no need for memory pools.

Signed-off-by: Lu Baolu <baolu.lu@linux.intel.com>
Reviewed-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: Jason Gunthorpe <jgg@nvidia.com>
Link: https://lore.kernel.org/r/20220214025704.3184654-1-baolu.lu@linux.intel.com
Link: https://lore.kernel.org/r/20220301020159.633356-5-baolu.lu@linux.intel.com
Signed-off-by: Joerg Roedel <jroedel@suse.de>
  • Loading branch information
Lu Baolu authored and Joerg Roedel committed Mar 4, 2022
1 parent c8850a6 commit ee2653b
Showing 1 changed file with 5 additions and 99 deletions.
104 changes: 5 additions & 99 deletions drivers/iommu/intel/iommu.c
Original file line number Diff line number Diff line change
Expand Up @@ -452,9 +452,6 @@ static int __init intel_iommu_setup(char *str)
}
__setup("intel_iommu=", intel_iommu_setup);

static struct kmem_cache *iommu_domain_cache;
static struct kmem_cache *iommu_devinfo_cache;

void *alloc_pgtable_page(int node)
{
struct page *page;
Expand All @@ -471,26 +468,6 @@ void free_pgtable_page(void *vaddr)
free_page((unsigned long)vaddr);
}

static inline void *alloc_domain_mem(void)
{
return kmem_cache_alloc(iommu_domain_cache, GFP_ATOMIC);
}

static void free_domain_mem(void *vaddr)
{
kmem_cache_free(iommu_domain_cache, vaddr);
}

static inline void * alloc_devinfo_mem(void)
{
return kmem_cache_alloc(iommu_devinfo_cache, GFP_ATOMIC);
}

static inline void free_devinfo_mem(void *vaddr)
{
kmem_cache_free(iommu_devinfo_cache, vaddr);
}

static inline int domain_type_is_si(struct dmar_domain *domain)
{
return domain->domain.type == IOMMU_DOMAIN_IDENTITY;
Expand Down Expand Up @@ -1885,11 +1862,10 @@ static struct dmar_domain *alloc_domain(unsigned int type)
{
struct dmar_domain *domain;

domain = alloc_domain_mem();
domain = kzalloc(sizeof(*domain), GFP_KERNEL);
if (!domain)
return NULL;

memset(domain, 0, sizeof(*domain));
domain->nid = NUMA_NO_NODE;
if (first_level_by_default(type))
domain->flags |= DOMAIN_FLAG_USE_FIRST_LEVEL;
Expand Down Expand Up @@ -1973,7 +1949,7 @@ static void domain_exit(struct dmar_domain *domain)
put_pages_list(&freelist);
}

free_domain_mem(domain);
kfree(domain);
}

/*
Expand Down Expand Up @@ -2558,7 +2534,7 @@ static struct dmar_domain *dmar_insert_one_dev_info(struct intel_iommu *iommu,
unsigned long flags;
int ret;

info = alloc_devinfo_mem();
info = kzalloc(sizeof(*info), GFP_KERNEL);
if (!info)
return NULL;

Expand All @@ -2574,13 +2550,9 @@ static struct dmar_domain *dmar_insert_one_dev_info(struct intel_iommu *iommu,
info->segment = pci_domain_nr(pdev->bus);
}

info->ats_supported = info->pasid_supported = info->pri_supported = 0;
info->ats_enabled = info->pasid_enabled = info->pri_enabled = 0;
info->ats_qdep = 0;
info->dev = dev;
info->domain = domain;
info->iommu = iommu;
info->pasid_table = NULL;

if (dev && dev_is_pci(dev)) {
struct pci_dev *pdev = to_pci_dev(info->dev);
Expand Down Expand Up @@ -2610,7 +2582,7 @@ static struct dmar_domain *dmar_insert_one_dev_info(struct intel_iommu *iommu,

if (ret) {
spin_unlock_irqrestore(&device_domain_lock, flags);
free_devinfo_mem(info);
kfree(info);
return NULL;
}

Expand Down Expand Up @@ -3343,65 +3315,6 @@ static int __init init_dmars(void)
return ret;
}

static inline int iommu_domain_cache_init(void)
{
int ret = 0;

iommu_domain_cache = kmem_cache_create("iommu_domain",
sizeof(struct dmar_domain),
0,
SLAB_HWCACHE_ALIGN,

NULL);
if (!iommu_domain_cache) {
pr_err("Couldn't create iommu_domain cache\n");
ret = -ENOMEM;
}

return ret;
}

static inline int iommu_devinfo_cache_init(void)
{
int ret = 0;

iommu_devinfo_cache = kmem_cache_create("iommu_devinfo",
sizeof(struct device_domain_info),
0,
SLAB_HWCACHE_ALIGN,
NULL);
if (!iommu_devinfo_cache) {
pr_err("Couldn't create devinfo cache\n");
ret = -ENOMEM;
}

return ret;
}

static int __init iommu_init_mempool(void)
{
int ret;

ret = iommu_domain_cache_init();
if (ret)
goto domain_error;

ret = iommu_devinfo_cache_init();
if (!ret)
return ret;

kmem_cache_destroy(iommu_domain_cache);
domain_error:

return -ENOMEM;
}

static void __init iommu_exit_mempool(void)
{
kmem_cache_destroy(iommu_devinfo_cache);
kmem_cache_destroy(iommu_domain_cache);
}

static void __init init_no_remapping_devices(void)
{
struct dmar_drhd_unit *drhd;
Expand Down Expand Up @@ -4253,12 +4166,6 @@ int __init intel_iommu_init(void)
force_on = (!intel_iommu_tboot_noforce && tboot_force_iommu()) ||
platform_optin_force_iommu();

if (iommu_init_mempool()) {
if (force_on)
panic("tboot: Failed to initialize iommu memory\n");
return -ENOMEM;
}

down_write(&dmar_global_lock);
if (dmar_table_init()) {
if (force_on)
Expand Down Expand Up @@ -4379,7 +4286,6 @@ int __init intel_iommu_init(void)
out_free_dmar:
intel_iommu_free_dmars();
up_write(&dmar_global_lock);
iommu_exit_mempool();
return ret;
}

Expand Down Expand Up @@ -4436,7 +4342,7 @@ static void __dmar_remove_one_dev_info(struct device_domain_info *info)
domain_detach_iommu(domain, iommu);
spin_unlock_irqrestore(&iommu->lock, flags);

free_devinfo_mem(info);
kfree(info);
}

static void dmar_remove_one_dev_info(struct device *dev)
Expand Down

0 comments on commit ee2653b

Please sign in to comment.