From 8dff954c0396017eed10d4c7552b7186a6f99005 Mon Sep 17 00:00:00 2001 From: Joerg Roedel Date: Tue, 1 Sep 2009 16:00:35 +0200 Subject: [PATCH] --- yaml --- r: 158078 b: refs/heads/master c: ac0101d396fee24994632f71b55b9f7f9ee16eff h: refs/heads/master v: v3 --- [refs] | 2 +- trunk/arch/x86/Kconfig | 1 + trunk/arch/x86/include/asm/amd_iommu_types.h | 8 +- trunk/arch/x86/kernel/amd_iommu.c | 119 +++++-------------- trunk/arch/x86/kernel/amd_iommu_init.c | 34 ++---- trunk/arch/x86/kernel/pci-dma.c | 9 +- 6 files changed, 51 insertions(+), 122 deletions(-) diff --git a/[refs] b/[refs] index 4e5ae76bfc82..eff61fed1b84 100644 --- a/[refs] +++ b/[refs] @@ -1,2 +1,2 @@ --- -refs/heads/master: 85da07c409daba3d067824f0051d58f70cb571a0 +refs/heads/master: ac0101d396fee24994632f71b55b9f7f9ee16eff diff --git a/trunk/arch/x86/Kconfig b/trunk/arch/x86/Kconfig index 1d9c18aa17eb..13ffa5df37d7 100644 --- a/trunk/arch/x86/Kconfig +++ b/trunk/arch/x86/Kconfig @@ -586,6 +586,7 @@ config GART_IOMMU bool "GART IOMMU support" if EMBEDDED default y select SWIOTLB + select AGP depends on X86_64 && PCI ---help--- Support for full DMA access of devices with 32bit memory access only diff --git a/trunk/arch/x86/include/asm/amd_iommu_types.h b/trunk/arch/x86/include/asm/amd_iommu_types.h index 3a6a3259e1eb..0c878caaa0a2 100644 --- a/trunk/arch/x86/include/asm/amd_iommu_types.h +++ b/trunk/arch/x86/include/asm/amd_iommu_types.h @@ -198,7 +198,7 @@ extern bool amd_iommu_dump; #define DUMP_printk(format, arg...) \ do { \ if (amd_iommu_dump) \ - printk(KERN_INFO "AMD-Vi: " format, ## arg); \ + printk(KERN_INFO "AMD IOMMU: " format, ## arg); \ } while(0); /* @@ -337,9 +337,6 @@ struct amd_iommu { /* if one, we need to send a completion wait command */ bool need_sync; - /* becomes true if a command buffer reset is running */ - bool reset_in_progress; - /* default dma_ops domain for that IOMMU */ struct dma_ops_domain *default_dom; }; @@ -460,7 +457,4 @@ static inline void amd_iommu_stats_init(void) { } #endif /* CONFIG_AMD_IOMMU_STATS */ -/* some function prototypes */ -extern void amd_iommu_reset_cmd_buffer(struct amd_iommu *iommu); - #endif /* _ASM_X86_AMD_IOMMU_TYPES_H */ diff --git a/trunk/arch/x86/kernel/amd_iommu.c b/trunk/arch/x86/kernel/amd_iommu.c index 8c93b7c7735e..6c99f5037801 100644 --- a/trunk/arch/x86/kernel/amd_iommu.c +++ b/trunk/arch/x86/kernel/amd_iommu.c @@ -41,7 +41,9 @@ static DEFINE_RWLOCK(amd_iommu_devtable_lock); static LIST_HEAD(iommu_pd_list); static DEFINE_SPINLOCK(iommu_pd_list_lock); +#ifdef CONFIG_IOMMU_API static struct iommu_ops amd_iommu_ops; +#endif /* * general struct to manage commands send to an IOMMU @@ -59,7 +61,10 @@ static u64* alloc_pte(struct protection_domain *dom, static void dma_ops_reserve_addresses(struct dma_ops_domain *dom, unsigned long start_page, unsigned int pages); -static void reset_iommu_command_buffer(struct amd_iommu *iommu); + +#ifndef BUS_NOTIFY_UNBOUND_DRIVER +#define BUS_NOTIFY_UNBOUND_DRIVER 0x0005 +#endif #ifdef CONFIG_AMD_IOMMU_STATS @@ -133,25 +138,7 @@ static int iommu_has_npcache(struct amd_iommu *iommu) * ****************************************************************************/ -static void dump_dte_entry(u16 devid) -{ - int i; - - for (i = 0; i < 8; ++i) - pr_err("AMD-Vi: DTE[%d]: %08x\n", i, - amd_iommu_dev_table[devid].data[i]); -} - -static void dump_command(unsigned long phys_addr) -{ - struct iommu_cmd *cmd = phys_to_virt(phys_addr); - int i; - - for (i = 0; i < 4; ++i) - pr_err("AMD-Vi: CMD[%d]: %08x\n", i, cmd->data[i]); -} - -static void iommu_print_event(struct amd_iommu *iommu, void *__evt) +static void iommu_print_event(void *__evt) { u32 *event = __evt; int type = (event[1] >> EVENT_TYPE_SHIFT) & EVENT_TYPE_MASK; @@ -160,7 +147,7 @@ static void iommu_print_event(struct amd_iommu *iommu, void *__evt) int flags = (event[1] >> EVENT_FLAGS_SHIFT) & EVENT_FLAGS_MASK; u64 address = (u64)(((u64)event[3]) << 32) | event[2]; - printk(KERN_ERR "AMD-Vi: Event logged ["); + printk(KERN_ERR "AMD IOMMU: Event logged ["); switch (type) { case EVENT_TYPE_ILL_DEV: @@ -168,7 +155,6 @@ static void iommu_print_event(struct amd_iommu *iommu, void *__evt) "address=0x%016llx flags=0x%04x]\n", PCI_BUS(devid), PCI_SLOT(devid), PCI_FUNC(devid), address, flags); - dump_dte_entry(devid); break; case EVENT_TYPE_IO_FAULT: printk("IO_PAGE_FAULT device=%02x:%02x.%x " @@ -190,8 +176,6 @@ static void iommu_print_event(struct amd_iommu *iommu, void *__evt) break; case EVENT_TYPE_ILL_CMD: printk("ILLEGAL_COMMAND_ERROR address=0x%016llx]\n", address); - reset_iommu_command_buffer(iommu); - dump_command(address); break; case EVENT_TYPE_CMD_HARD_ERR: printk("COMMAND_HARDWARE_ERROR address=0x%016llx " @@ -225,7 +209,7 @@ static void iommu_poll_events(struct amd_iommu *iommu) tail = readl(iommu->mmio_base + MMIO_EVT_TAIL_OFFSET); while (head != tail) { - iommu_print_event(iommu, iommu->evt_buf + head); + iommu_print_event(iommu->evt_buf + head); head = (head + EVENT_ENTRY_SIZE) % iommu->evt_buf_size; } @@ -312,11 +296,8 @@ static void __iommu_wait_for_completion(struct amd_iommu *iommu) status &= ~MMIO_STATUS_COM_WAIT_INT_MASK; writel(status, iommu->mmio_base + MMIO_STATUS_OFFSET); - if (unlikely(i == EXIT_LOOP_COUNT)) { - spin_unlock(&iommu->lock); - reset_iommu_command_buffer(iommu); - spin_lock(&iommu->lock); - } + if (unlikely(i == EXIT_LOOP_COUNT)) + panic("AMD IOMMU: Completion wait loop failed\n"); } /* @@ -463,68 +444,38 @@ static void iommu_flush_tlb_pde(struct amd_iommu *iommu, u16 domid) iommu_queue_inv_iommu_pages(iommu, address, domid, 1, 1); } -/* - * This function flushes one domain on one IOMMU - */ -static void flush_domain_on_iommu(struct amd_iommu *iommu, u16 domid) -{ - struct iommu_cmd cmd; - unsigned long flags; - - __iommu_build_inv_iommu_pages(&cmd, CMD_INV_IOMMU_ALL_PAGES_ADDRESS, - domid, 1, 1); - - spin_lock_irqsave(&iommu->lock, flags); - __iommu_queue_command(iommu, &cmd); - __iommu_completion_wait(iommu); - __iommu_wait_for_completion(iommu); - spin_unlock_irqrestore(&iommu->lock, flags); -} - -static void flush_all_domains_on_iommu(struct amd_iommu *iommu) -{ - int i; - - for (i = 1; i < MAX_DOMAIN_ID; ++i) { - if (!test_bit(i, amd_iommu_pd_alloc_bitmap)) - continue; - flush_domain_on_iommu(iommu, i); - } - -} - /* * This function is used to flush the IO/TLB for a given protection domain * on every IOMMU in the system */ static void iommu_flush_domain(u16 domid) { + unsigned long flags; struct amd_iommu *iommu; + struct iommu_cmd cmd; INC_STATS_COUNTER(domain_flush_all); - for_each_iommu(iommu) - flush_domain_on_iommu(iommu, domid); -} - -void amd_iommu_flush_all_domains(void) -{ - struct amd_iommu *iommu; + __iommu_build_inv_iommu_pages(&cmd, CMD_INV_IOMMU_ALL_PAGES_ADDRESS, + domid, 1, 1); - for_each_iommu(iommu) - flush_all_domains_on_iommu(iommu); + for_each_iommu(iommu) { + spin_lock_irqsave(&iommu->lock, flags); + __iommu_queue_command(iommu, &cmd); + __iommu_completion_wait(iommu); + __iommu_wait_for_completion(iommu); + spin_unlock_irqrestore(&iommu->lock, flags); + } } -static void flush_all_devices_for_iommu(struct amd_iommu *iommu) +void amd_iommu_flush_all_domains(void) { int i; - for (i = 0; i <= amd_iommu_last_bdf; ++i) { - if (iommu != amd_iommu_rlookup_table[i]) + for (i = 1; i < MAX_DOMAIN_ID; ++i) { + if (!test_bit(i, amd_iommu_pd_alloc_bitmap)) continue; - - iommu_queue_inv_dev_entry(iommu, i); - iommu_completion_wait(iommu); + iommu_flush_domain(i); } } @@ -534,6 +485,8 @@ void amd_iommu_flush_all_devices(void) int i; for (i = 0; i <= amd_iommu_last_bdf; ++i) { + if (amd_iommu_pd_table[i] == NULL) + continue; iommu = amd_iommu_rlookup_table[i]; if (!iommu) @@ -544,22 +497,6 @@ void amd_iommu_flush_all_devices(void) } } -static void reset_iommu_command_buffer(struct amd_iommu *iommu) -{ - pr_err("AMD-Vi: Resetting IOMMU command buffer\n"); - - if (iommu->reset_in_progress) - panic("AMD-Vi: ILLEGAL_COMMAND_ERROR while resetting command buffer\n"); - - iommu->reset_in_progress = true; - - amd_iommu_reset_cmd_buffer(iommu); - flush_all_devices_for_iommu(iommu); - flush_all_domains_on_iommu(iommu); - - iommu->reset_in_progress = false; -} - /**************************************************************************** * * The functions below are used the create the page table mappings for diff --git a/trunk/arch/x86/kernel/amd_iommu_init.c b/trunk/arch/x86/kernel/amd_iommu_init.c index 779ace292475..c1b17e97252e 100644 --- a/trunk/arch/x86/kernel/amd_iommu_init.c +++ b/trunk/arch/x86/kernel/amd_iommu_init.c @@ -252,7 +252,7 @@ static void __init iommu_feature_disable(struct amd_iommu *iommu, u8 bit) /* Function to enable the hardware */ static void iommu_enable(struct amd_iommu *iommu) { - printk(KERN_INFO "AMD-Vi: Enabling IOMMU at %s cap 0x%hx\n", + printk(KERN_INFO "AMD IOMMU: Enabling IOMMU at %s cap 0x%hx\n", dev_name(&iommu->dev->dev), iommu->cap_ptr); iommu_feature_enable(iommu, CONTROL_IOMMU_EN); @@ -434,20 +434,6 @@ static u8 * __init alloc_command_buffer(struct amd_iommu *iommu) return cmd_buf; } -/* - * This function resets the command buffer if the IOMMU stopped fetching - * commands from it. - */ -void amd_iommu_reset_cmd_buffer(struct amd_iommu *iommu) -{ - iommu_feature_disable(iommu, CONTROL_CMDBUF_EN); - - writel(0x00, iommu->mmio_base + MMIO_CMD_HEAD_OFFSET); - writel(0x00, iommu->mmio_base + MMIO_CMD_TAIL_OFFSET); - - iommu_feature_enable(iommu, CONTROL_CMDBUF_EN); -} - /* * This function writes the command buffer address to the hardware and * enables it. @@ -464,7 +450,11 @@ static void iommu_enable_command_buffer(struct amd_iommu *iommu) memcpy_toio(iommu->mmio_base + MMIO_CMD_BUF_OFFSET, &entry, sizeof(entry)); - amd_iommu_reset_cmd_buffer(iommu); + /* set head and tail to zero manually */ + writel(0x00, iommu->mmio_base + MMIO_CMD_HEAD_OFFSET); + writel(0x00, iommu->mmio_base + MMIO_CMD_TAIL_OFFSET); + + iommu_feature_enable(iommu, CONTROL_CMDBUF_EN); } static void __init free_command_buffer(struct amd_iommu *iommu) @@ -868,7 +858,7 @@ static int __init init_iommu_all(struct acpi_table_header *table) switch (*p) { case ACPI_IVHD_TYPE: - DUMP_printk("device: %02x:%02x.%01x cap: %04x " + DUMP_printk("IOMMU: device: %02x:%02x.%01x cap: %04x " "seg: %d flags: %01x info %04x\n", PCI_BUS(h->devid), PCI_SLOT(h->devid), PCI_FUNC(h->devid), h->cap_ptr, @@ -912,7 +902,7 @@ static int __init iommu_setup_msi(struct amd_iommu *iommu) r = request_irq(iommu->dev->irq, amd_iommu_int_handler, IRQF_SAMPLE_RANDOM, - "AMD-Vi", + "AMD IOMMU", NULL); if (r) { @@ -1160,7 +1150,7 @@ int __init amd_iommu_init(void) if (no_iommu) { - printk(KERN_INFO "AMD-Vi disabled by kernel command line\n"); + printk(KERN_INFO "AMD IOMMU disabled by kernel command line\n"); return 0; } @@ -1258,16 +1248,16 @@ int __init amd_iommu_init(void) enable_iommus(); - printk(KERN_INFO "AMD-Vi: device isolation "); + printk(KERN_INFO "AMD IOMMU: device isolation "); if (amd_iommu_isolate) printk("enabled\n"); else printk("disabled\n"); if (amd_iommu_unmap_flush) - printk(KERN_INFO "AMD-Vi: IO/TLB flush on unmap enabled\n"); + printk(KERN_INFO "AMD IOMMU: IO/TLB flush on unmap enabled\n"); else - printk(KERN_INFO "AMD-Vi: Lazy IO/TLB flushing enabled\n"); + printk(KERN_INFO "AMD IOMMU: Lazy IO/TLB flushing enabled\n"); out: return ret; diff --git a/trunk/arch/x86/kernel/pci-dma.c b/trunk/arch/x86/kernel/pci-dma.c index 1a041bcf506b..873aa079d166 100644 --- a/trunk/arch/x86/kernel/pci-dma.c +++ b/trunk/arch/x86/kernel/pci-dma.c @@ -32,7 +32,14 @@ int no_iommu __read_mostly; /* Set this to 1 if there is a HW IOMMU in the system */ int iommu_detected __read_mostly = 0; -int iommu_pass_through; +/* + * This variable becomes 1 if iommu=pt is passed on the kernel command line. + * If this variable is 1, IOMMU implementations do no DMA ranslation for + * devices and allow every device to access to whole physical memory. This is + * useful if a user want to use an IOMMU only for KVM device assignment to + * guests and not for driver dma translation. + */ +int iommu_pass_through __read_mostly; dma_addr_t bad_dma_address __read_mostly = 0; EXPORT_SYMBOL(bad_dma_address);