Skip to content

Commit

Permalink
Merge tag 'iommu-fixes-v4.2-rc4' of git://git.kernel.org/pub/scm/linu…
Browse files Browse the repository at this point in the history
…x/kernel/git/joro/iommu

Pull IOMMU fixes from Joerg Roedel:
 "These fixes are all for the AMD IOMMU driver:

   - A regression with HSA caused by the conversion of the driver to
     default domains.  The fixes make sure that an HSA device can still
     be attached to an IOMMUv2 domain and that these domains also allow
     non-IOMMUv2 capable devices.

   - Fix iommu=pt mode which did not work because the dma_ops where set
     to nommu_ops, which breaks devices that can only do 32bit DMA.

   - Fix an issue with non-PCI devices not working, because there are no
     dma_ops for them.  This issue was discovered recently as new AMD
     x86 platforms have non-PCI devices too"

* tag 'iommu-fixes-v4.2-rc4' of git://git.kernel.org/pub/scm/linux/kernel/git/joro/iommu:
  iommu/amd: Allow non-ATS devices in IOMMUv2 domains
  iommu/amd: Set global dma_ops if swiotlb is disabled
  iommu/amd: Use swiotlb in passthrough mode
  iommu/amd: Allow non-IOMMUv2 devices in IOMMUv2 domains
  iommu/amd: Use iommu core for passthrough mode
  iommu/amd: Use iommu_attach_group()
  • Loading branch information
Linus Torvalds committed Jul 31, 2015
2 parents 23ff9e1 + 1c1cc45 commit 5e49e0b
Show file tree
Hide file tree
Showing 3 changed files with 51 additions and 81 deletions.
98 changes: 29 additions & 69 deletions drivers/iommu/amd_iommu.c
Original file line number Diff line number Diff line change
Expand Up @@ -76,8 +76,6 @@ LIST_HEAD(hpet_map);
* Domain for untranslated devices - only allocated
* if iommu=pt passed on kernel cmd line.
*/
static struct protection_domain *pt_domain;

static const struct iommu_ops amd_iommu_ops;

static ATOMIC_NOTIFIER_HEAD(ppr_notifier);
Expand All @@ -96,7 +94,7 @@ struct iommu_dev_data {
struct protection_domain *domain; /* Domain the device is bound to */
u16 devid; /* PCI Device ID */
bool iommu_v2; /* Device can make use of IOMMUv2 */
bool passthrough; /* Default for device is pt_domain */
bool passthrough; /* Device is identity mapped */
struct {
bool enabled;
int qdep;
Expand All @@ -116,7 +114,6 @@ struct iommu_cmd {
struct kmem_cache *amd_iommu_irq_cache;

static void update_domain(struct protection_domain *domain);
static int alloc_passthrough_domain(void);
static int protection_domain_init(struct protection_domain *domain);

/****************************************************************************
Expand Down Expand Up @@ -2167,15 +2164,17 @@ static int attach_device(struct device *dev,
dev_data = get_dev_data(dev);

if (domain->flags & PD_IOMMUV2_MASK) {
if (!dev_data->iommu_v2 || !dev_data->passthrough)
if (!dev_data->passthrough)
return -EINVAL;

if (pdev_iommuv2_enable(pdev) != 0)
return -EINVAL;
if (dev_data->iommu_v2) {
if (pdev_iommuv2_enable(pdev) != 0)
return -EINVAL;

dev_data->ats.enabled = true;
dev_data->ats.qdep = pci_ats_queue_depth(pdev);
dev_data->pri_tlp = pci_pri_tlp_required(pdev);
dev_data->ats.enabled = true;
dev_data->ats.qdep = pci_ats_queue_depth(pdev);
dev_data->pri_tlp = pci_pri_tlp_required(pdev);
}
} else if (amd_iommu_iotlb_sup &&
pci_enable_ats(pdev, PAGE_SHIFT) == 0) {
dev_data->ats.enabled = true;
Expand Down Expand Up @@ -2221,15 +2220,6 @@ static void __detach_device(struct iommu_dev_data *dev_data)
do_detach(head);

spin_unlock_irqrestore(&domain->lock, flags);

/*
* If we run in passthrough mode the device must be assigned to the
* passthrough domain if it is detached from any other domain.
* Make sure we can deassign from the pt_domain itself.
*/
if (dev_data->passthrough &&
(dev_data->domain == NULL && domain != pt_domain))
__attach_device(dev_data, pt_domain);
}

/*
Expand All @@ -2249,7 +2239,7 @@ static void detach_device(struct device *dev)
__detach_device(dev_data);
write_unlock_irqrestore(&amd_iommu_devtable_lock, flags);

if (domain->flags & PD_IOMMUV2_MASK)
if (domain->flags & PD_IOMMUV2_MASK && dev_data->iommu_v2)
pdev_iommuv2_disable(to_pci_dev(dev));
else if (dev_data->ats.enabled)
pci_disable_ats(to_pci_dev(dev));
Expand Down Expand Up @@ -2287,17 +2277,15 @@ static int amd_iommu_add_device(struct device *dev)

BUG_ON(!dev_data);

if (dev_data->iommu_v2)
if (iommu_pass_through || dev_data->iommu_v2)
iommu_request_dm_for_dev(dev);

/* Domains are initialized for this device - have a look what we ended up with */
domain = iommu_get_domain_for_dev(dev);
if (domain->type == IOMMU_DOMAIN_IDENTITY) {
if (domain->type == IOMMU_DOMAIN_IDENTITY)
dev_data->passthrough = true;
dev->archdata.dma_ops = &nommu_dma_ops;
} else {
else
dev->archdata.dma_ops = &amd_iommu_dma_ops;
}

out:
iommu_completion_wait(iommu);
Expand Down Expand Up @@ -2862,8 +2850,17 @@ int __init amd_iommu_init_api(void)

int __init amd_iommu_init_dma_ops(void)
{
swiotlb = iommu_pass_through ? 1 : 0;
iommu_detected = 1;
swiotlb = 0;

/*
* In case we don't initialize SWIOTLB (actually the common case
* when AMD IOMMU is enabled), make sure there are global
* dma_ops set as a fall-back for devices not handled by this
* driver (for example non-PCI devices).
*/
if (!swiotlb)
dma_ops = &nommu_dma_ops;

amd_iommu_stats_init();

Expand Down Expand Up @@ -2947,21 +2944,6 @@ static struct protection_domain *protection_domain_alloc(void)
return NULL;
}

static int alloc_passthrough_domain(void)
{
if (pt_domain != NULL)
return 0;

/* allocate passthrough domain */
pt_domain = protection_domain_alloc();
if (!pt_domain)
return -ENOMEM;

pt_domain->mode = PAGE_MODE_NONE;

return 0;
}

static struct iommu_domain *amd_iommu_domain_alloc(unsigned type)
{
struct protection_domain *pdomain;
Expand Down Expand Up @@ -3222,33 +3204,6 @@ static const struct iommu_ops amd_iommu_ops = {
*
*****************************************************************************/

int __init amd_iommu_init_passthrough(void)
{
struct iommu_dev_data *dev_data;
struct pci_dev *dev = NULL;
int ret;

ret = alloc_passthrough_domain();
if (ret)
return ret;

for_each_pci_dev(dev) {
if (!check_device(&dev->dev))
continue;

dev_data = get_dev_data(&dev->dev);
dev_data->passthrough = true;

attach_device(&dev->dev, pt_domain);
}

amd_iommu_stats_init();

pr_info("AMD-Vi: Initialized for Passthrough Mode\n");

return 0;
}

/* IOMMUv2 specific functions */
int amd_iommu_register_ppr_notifier(struct notifier_block *nb)
{
Expand Down Expand Up @@ -3363,7 +3318,12 @@ static int __flush_pasid(struct protection_domain *domain, int pasid,
struct amd_iommu *iommu;
int qdep;

BUG_ON(!dev_data->ats.enabled);
/*
There might be non-IOMMUv2 capable devices in an IOMMUv2
* domain.
*/
if (!dev_data->ats.enabled)
continue;

qdep = dev_data->ats.qdep;
iommu = amd_iommu_rlookup_table[dev_data->devid];
Expand Down
10 changes: 1 addition & 9 deletions drivers/iommu/amd_iommu_init.c
Original file line number Diff line number Diff line change
Expand Up @@ -2026,14 +2026,6 @@ static bool detect_ivrs(void)
return true;
}

static int amd_iommu_init_dma(void)
{
if (iommu_pass_through)
return amd_iommu_init_passthrough();
else
return amd_iommu_init_dma_ops();
}

/****************************************************************************
*
* AMD IOMMU Initialization State Machine
Expand Down Expand Up @@ -2073,7 +2065,7 @@ static int __init state_next(void)
init_state = ret ? IOMMU_INIT_ERROR : IOMMU_INTERRUPTS_EN;
break;
case IOMMU_INTERRUPTS_EN:
ret = amd_iommu_init_dma();
ret = amd_iommu_init_dma_ops();
init_state = ret ? IOMMU_INIT_ERROR : IOMMU_DMA_OPS;
break;
case IOMMU_DMA_OPS:
Expand Down
24 changes: 21 additions & 3 deletions drivers/iommu/amd_iommu_v2.c
Original file line number Diff line number Diff line change
Expand Up @@ -132,11 +132,19 @@ static struct device_state *get_device_state(u16 devid)

static void free_device_state(struct device_state *dev_state)
{
struct iommu_group *group;

/*
* First detach device from domain - No more PRI requests will arrive
* from that device after it is unbound from the IOMMUv2 domain.
*/
iommu_detach_device(dev_state->domain, &dev_state->pdev->dev);
group = iommu_group_get(&dev_state->pdev->dev);
if (WARN_ON(!group))
return;

iommu_detach_group(dev_state->domain, group);

iommu_group_put(group);

/* Everything is down now, free the IOMMUv2 domain */
iommu_domain_free(dev_state->domain);
Expand Down Expand Up @@ -731,6 +739,7 @@ EXPORT_SYMBOL(amd_iommu_unbind_pasid);
int amd_iommu_init_device(struct pci_dev *pdev, int pasids)
{
struct device_state *dev_state;
struct iommu_group *group;
unsigned long flags;
int ret, tmp;
u16 devid;
Expand Down Expand Up @@ -776,10 +785,16 @@ int amd_iommu_init_device(struct pci_dev *pdev, int pasids)
if (ret)
goto out_free_domain;

ret = iommu_attach_device(dev_state->domain, &pdev->dev);
if (ret != 0)
group = iommu_group_get(&pdev->dev);
if (!group)
goto out_free_domain;

ret = iommu_attach_group(dev_state->domain, group);
if (ret != 0)
goto out_drop_group;

iommu_group_put(group);

spin_lock_irqsave(&state_lock, flags);

if (__get_device_state(devid) != NULL) {
Expand All @@ -794,6 +809,9 @@ int amd_iommu_init_device(struct pci_dev *pdev, int pasids)

return 0;

out_drop_group:
iommu_group_put(group);

out_free_domain:
iommu_domain_free(dev_state->domain);

Expand Down

0 comments on commit 5e49e0b

Please sign in to comment.