From 4e8e01686ce121ce1f27f2d179e879487afec492 Mon Sep 17 00:00:00 2001 From: Joerg Roedel Date: Wed, 2 Sep 2009 14:24:08 +0200 Subject: [PATCH] --- yaml --- r: 158087 b: refs/heads/master c: 9355a08186e52b7c120adea91c984923b54efa10 h: refs/heads/master i: 158085: e4a0a28fbcb37d3cda6e9a15768dfec1aa86095b 158083: 99fdba140d93c653f194077a929c845738b812ea 158079: b07a46456d9a97750c812aaa998626844c66ac47 v: v3 --- [refs] | 2 +- trunk/arch/x86/Kconfig | 1 + trunk/arch/x86/include/asm/amd_iommu.h | 1 - trunk/arch/x86/include/asm/amd_iommu_types.h | 21 +- trunk/arch/x86/kernel/amd_iommu.c | 282 ++++--------------- trunk/arch/x86/kernel/amd_iommu_init.c | 42 +-- trunk/arch/x86/kernel/pci-dma.c | 9 +- 7 files changed, 84 insertions(+), 274 deletions(-) diff --git a/[refs] b/[refs] index c80ad8d3d60a..2d04c2055035 100644 --- a/[refs] +++ b/[refs] @@ -1,2 +1,2 @@ --- -refs/heads/master: 03362a05c55122baff3556109c922285299dfec4 +refs/heads/master: 9355a08186e52b7c120adea91c984923b54efa10 diff --git a/trunk/arch/x86/Kconfig b/trunk/arch/x86/Kconfig index 1d9c18aa17eb..13ffa5df37d7 100644 --- a/trunk/arch/x86/Kconfig +++ b/trunk/arch/x86/Kconfig @@ -586,6 +586,7 @@ config GART_IOMMU bool "GART IOMMU support" if EMBEDDED default y select SWIOTLB + select AGP depends on X86_64 && PCI ---help--- Support for full DMA access of devices with 32bit memory access only diff --git a/trunk/arch/x86/include/asm/amd_iommu.h b/trunk/arch/x86/include/asm/amd_iommu.h index ac95995b7bad..bdf96f119f06 100644 --- a/trunk/arch/x86/include/asm/amd_iommu.h +++ b/trunk/arch/x86/include/asm/amd_iommu.h @@ -25,7 +25,6 @@ #ifdef CONFIG_AMD_IOMMU extern int amd_iommu_init(void); extern int amd_iommu_init_dma_ops(void); -extern int amd_iommu_init_passthrough(void); extern void amd_iommu_detect(void); extern irqreturn_t amd_iommu_int_handler(int irq, void *data); extern void amd_iommu_flush_all_domains(void); diff --git a/trunk/arch/x86/include/asm/amd_iommu_types.h b/trunk/arch/x86/include/asm/amd_iommu_types.h index 86a56b49f2c6..7fce4ef77bdd 100644 --- a/trunk/arch/x86/include/asm/amd_iommu_types.h +++ b/trunk/arch/x86/include/asm/amd_iommu_types.h @@ -143,16 +143,24 @@ #define EVT_BUFFER_SIZE 8192 /* 512 entries */ #define EVT_LEN_MASK (0x9ULL << 56) -#define PAGE_MODE_NONE 0x00 #define PAGE_MODE_1_LEVEL 0x01 #define PAGE_MODE_2_LEVEL 0x02 #define PAGE_MODE_3_LEVEL 0x03 +#define PAGE_MODE_4_LEVEL 0x04 +#define PAGE_MODE_5_LEVEL 0x05 +#define PAGE_MODE_6_LEVEL 0x06 #define IOMMU_PDE_NL_0 0x000ULL #define IOMMU_PDE_NL_1 0x200ULL #define IOMMU_PDE_NL_2 0x400ULL #define IOMMU_PDE_NL_3 0x600ULL +#define PM_LEVEL_SHIFT(x) (12 + ((x) * 9)) +#define PM_LEVEL_SIZE(x) (((x) < 6) ? \ + ((1ULL << PM_LEVEL_SHIFT((x))) - 1): \ + (0xffffffffffffffffULL)) +#define PM_LEVEL_INDEX(x, a) (((a) >> PM_LEVEL_SHIFT((x))) & 0x1ffULL) + #define IOMMU_PTE_L2_INDEX(address) (((address) >> 30) & 0x1ffULL) #define IOMMU_PTE_L1_INDEX(address) (((address) >> 21) & 0x1ffULL) #define IOMMU_PTE_L0_INDEX(address) (((address) >> 12) & 0x1ffULL) @@ -195,14 +203,11 @@ #define PD_DMA_OPS_MASK (1UL << 0) /* domain used for dma_ops */ #define PD_DEFAULT_MASK (1UL << 1) /* domain is a default dma_ops domain for an IOMMU */ -#define PD_PASSTHROUGH_MASK (1UL << 2) /* domain has no page - translation */ - extern bool amd_iommu_dump; #define DUMP_printk(format, arg...) \ do { \ if (amd_iommu_dump) \ - printk(KERN_INFO "AMD-Vi: " format, ## arg); \ + printk(KERN_INFO "AMD IOMMU: " format, ## arg); \ } while(0); /* @@ -341,9 +346,6 @@ struct amd_iommu { /* if one, we need to send a completion wait command */ bool need_sync; - /* becomes true if a command buffer reset is running */ - bool reset_in_progress; - /* default dma_ops domain for that IOMMU */ struct dma_ops_domain *default_dom; }; @@ -464,7 +466,4 @@ static inline void amd_iommu_stats_init(void) { } #endif /* CONFIG_AMD_IOMMU_STATS */ -/* some function prototypes */ -extern void amd_iommu_reset_cmd_buffer(struct amd_iommu *iommu); - #endif /* _ASM_X86_AMD_IOMMU_TYPES_H */ diff --git a/trunk/arch/x86/kernel/amd_iommu.c b/trunk/arch/x86/kernel/amd_iommu.c index dc19ed43b54e..29bcd358b6ce 100644 --- a/trunk/arch/x86/kernel/amd_iommu.c +++ b/trunk/arch/x86/kernel/amd_iommu.c @@ -41,14 +41,9 @@ static DEFINE_RWLOCK(amd_iommu_devtable_lock); static LIST_HEAD(iommu_pd_list); static DEFINE_SPINLOCK(iommu_pd_list_lock); -/* - * Domain for untranslated devices - only allocated - * if iommu=pt passed on kernel cmd line. - */ -static struct protection_domain *pt_domain; - #ifdef CONFIG_IOMMU_API static struct iommu_ops amd_iommu_ops; +#endif /* * general struct to manage commands send to an IOMMU @@ -66,7 +61,12 @@ static u64* alloc_pte(struct protection_domain *dom, static void dma_ops_reserve_addresses(struct dma_ops_domain *dom, unsigned long start_page, unsigned int pages); -static void reset_iommu_command_buffer(struct amd_iommu *iommu); +static u64 *fetch_pte(struct protection_domain *domain, + unsigned long address); + +#ifndef BUS_NOTIFY_UNBOUND_DRIVER +#define BUS_NOTIFY_UNBOUND_DRIVER 0x0005 +#endif #ifdef CONFIG_AMD_IOMMU_STATS @@ -140,25 +140,7 @@ static int iommu_has_npcache(struct amd_iommu *iommu) * ****************************************************************************/ -static void dump_dte_entry(u16 devid) -{ - int i; - - for (i = 0; i < 8; ++i) - pr_err("AMD-Vi: DTE[%d]: %08x\n", i, - amd_iommu_dev_table[devid].data[i]); -} - -static void dump_command(unsigned long phys_addr) -{ - struct iommu_cmd *cmd = phys_to_virt(phys_addr); - int i; - - for (i = 0; i < 4; ++i) - pr_err("AMD-Vi: CMD[%d]: %08x\n", i, cmd->data[i]); -} - -static void iommu_print_event(struct amd_iommu *iommu, void *__evt) +static void iommu_print_event(void *__evt) { u32 *event = __evt; int type = (event[1] >> EVENT_TYPE_SHIFT) & EVENT_TYPE_MASK; @@ -167,7 +149,7 @@ static void iommu_print_event(struct amd_iommu *iommu, void *__evt) int flags = (event[1] >> EVENT_FLAGS_SHIFT) & EVENT_FLAGS_MASK; u64 address = (u64)(((u64)event[3]) << 32) | event[2]; - printk(KERN_ERR "AMD-Vi: Event logged ["); + printk(KERN_ERR "AMD IOMMU: Event logged ["); switch (type) { case EVENT_TYPE_ILL_DEV: @@ -175,7 +157,6 @@ static void iommu_print_event(struct amd_iommu *iommu, void *__evt) "address=0x%016llx flags=0x%04x]\n", PCI_BUS(devid), PCI_SLOT(devid), PCI_FUNC(devid), address, flags); - dump_dte_entry(devid); break; case EVENT_TYPE_IO_FAULT: printk("IO_PAGE_FAULT device=%02x:%02x.%x " @@ -197,8 +178,6 @@ static void iommu_print_event(struct amd_iommu *iommu, void *__evt) break; case EVENT_TYPE_ILL_CMD: printk("ILLEGAL_COMMAND_ERROR address=0x%016llx]\n", address); - reset_iommu_command_buffer(iommu); - dump_command(address); break; case EVENT_TYPE_CMD_HARD_ERR: printk("COMMAND_HARDWARE_ERROR address=0x%016llx " @@ -232,7 +211,7 @@ static void iommu_poll_events(struct amd_iommu *iommu) tail = readl(iommu->mmio_base + MMIO_EVT_TAIL_OFFSET); while (head != tail) { - iommu_print_event(iommu, iommu->evt_buf + head); + iommu_print_event(iommu->evt_buf + head); head = (head + EVENT_ENTRY_SIZE) % iommu->evt_buf_size; } @@ -319,11 +298,8 @@ static void __iommu_wait_for_completion(struct amd_iommu *iommu) status &= ~MMIO_STATUS_COM_WAIT_INT_MASK; writel(status, iommu->mmio_base + MMIO_STATUS_OFFSET); - if (unlikely(i == EXIT_LOOP_COUNT)) { - spin_unlock(&iommu->lock); - reset_iommu_command_buffer(iommu); - spin_lock(&iommu->lock); - } + if (unlikely(i == EXIT_LOOP_COUNT)) + panic("AMD IOMMU: Completion wait loop failed\n"); } /* @@ -470,68 +446,38 @@ static void iommu_flush_tlb_pde(struct amd_iommu *iommu, u16 domid) iommu_queue_inv_iommu_pages(iommu, address, domid, 1, 1); } -/* - * This function flushes one domain on one IOMMU - */ -static void flush_domain_on_iommu(struct amd_iommu *iommu, u16 domid) -{ - struct iommu_cmd cmd; - unsigned long flags; - - __iommu_build_inv_iommu_pages(&cmd, CMD_INV_IOMMU_ALL_PAGES_ADDRESS, - domid, 1, 1); - - spin_lock_irqsave(&iommu->lock, flags); - __iommu_queue_command(iommu, &cmd); - __iommu_completion_wait(iommu); - __iommu_wait_for_completion(iommu); - spin_unlock_irqrestore(&iommu->lock, flags); -} - -static void flush_all_domains_on_iommu(struct amd_iommu *iommu) -{ - int i; - - for (i = 1; i < MAX_DOMAIN_ID; ++i) { - if (!test_bit(i, amd_iommu_pd_alloc_bitmap)) - continue; - flush_domain_on_iommu(iommu, i); - } - -} - /* * This function is used to flush the IO/TLB for a given protection domain * on every IOMMU in the system */ static void iommu_flush_domain(u16 domid) { + unsigned long flags; struct amd_iommu *iommu; + struct iommu_cmd cmd; INC_STATS_COUNTER(domain_flush_all); - for_each_iommu(iommu) - flush_domain_on_iommu(iommu, domid); -} - -void amd_iommu_flush_all_domains(void) -{ - struct amd_iommu *iommu; + __iommu_build_inv_iommu_pages(&cmd, CMD_INV_IOMMU_ALL_PAGES_ADDRESS, + domid, 1, 1); - for_each_iommu(iommu) - flush_all_domains_on_iommu(iommu); + for_each_iommu(iommu) { + spin_lock_irqsave(&iommu->lock, flags); + __iommu_queue_command(iommu, &cmd); + __iommu_completion_wait(iommu); + __iommu_wait_for_completion(iommu); + spin_unlock_irqrestore(&iommu->lock, flags); + } } -static void flush_all_devices_for_iommu(struct amd_iommu *iommu) +void amd_iommu_flush_all_domains(void) { int i; - for (i = 0; i <= amd_iommu_last_bdf; ++i) { - if (iommu != amd_iommu_rlookup_table[i]) + for (i = 1; i < MAX_DOMAIN_ID; ++i) { + if (!test_bit(i, amd_iommu_pd_alloc_bitmap)) continue; - - iommu_queue_inv_dev_entry(iommu, i); - iommu_completion_wait(iommu); + iommu_flush_domain(i); } } @@ -541,6 +487,8 @@ void amd_iommu_flush_all_devices(void) int i; for (i = 0; i <= amd_iommu_last_bdf; ++i) { + if (amd_iommu_pd_table[i] == NULL) + continue; iommu = amd_iommu_rlookup_table[i]; if (!iommu) @@ -551,22 +499,6 @@ void amd_iommu_flush_all_devices(void) } } -static void reset_iommu_command_buffer(struct amd_iommu *iommu) -{ - pr_err("AMD-Vi: Resetting IOMMU command buffer\n"); - - if (iommu->reset_in_progress) - panic("AMD-Vi: ILLEGAL_COMMAND_ERROR while resetting command buffer\n"); - - iommu->reset_in_progress = true; - - amd_iommu_reset_cmd_buffer(iommu); - flush_all_devices_for_iommu(iommu); - flush_all_domains_on_iommu(iommu); - - iommu->reset_in_progress = false; -} - /**************************************************************************** * * The functions below are used the create the page table mappings for @@ -740,24 +672,24 @@ static int init_unity_mappings_for_device(struct dma_ops_domain *dma_dom, * This function checks if there is a PTE for a given dma address. If * there is one, it returns the pointer to it. */ -static u64* fetch_pte(struct protection_domain *domain, +static u64 *fetch_pte(struct protection_domain *domain, unsigned long address) { + int level; u64 *pte; - pte = &domain->pt_root[IOMMU_PTE_L2_INDEX(address)]; + level = domain->mode - 1; + pte = &domain->pt_root[PM_LEVEL_INDEX(level, address)]; - if (!IOMMU_PTE_PRESENT(*pte)) - return NULL; - - pte = IOMMU_PTE_PAGE(*pte); - pte = &pte[IOMMU_PTE_L1_INDEX(address)]; + while (level > 0) { + if (!IOMMU_PTE_PRESENT(*pte)) + return NULL; - if (!IOMMU_PTE_PRESENT(*pte)) - return NULL; + level -= 1; - pte = IOMMU_PTE_PAGE(*pte); - pte = &pte[IOMMU_PTE_L0_INDEX(address)]; + pte = IOMMU_PTE_PAGE(*pte); + pte = &pte[PM_LEVEL_INDEX(level, address)]; + } return pte; } @@ -1137,48 +1069,32 @@ static struct protection_domain *domain_for_device(u16 devid) * If a device is not yet associated with a domain, this function does * assigns it visible for the hardware */ -static void __attach_device(struct amd_iommu *iommu, - struct protection_domain *domain, - u16 devid) +static void attach_device(struct amd_iommu *iommu, + struct protection_domain *domain, + u16 devid) { - u64 pte_root; - - /* lock domain */ - spin_lock(&domain->lock); + unsigned long flags; + u64 pte_root = virt_to_phys(domain->pt_root); - pte_root = virt_to_phys(domain->pt_root); + domain->dev_cnt += 1; pte_root |= (domain->mode & DEV_ENTRY_MODE_MASK) << DEV_ENTRY_MODE_SHIFT; pte_root |= IOMMU_PTE_IR | IOMMU_PTE_IW | IOMMU_PTE_P | IOMMU_PTE_TV; - amd_iommu_dev_table[devid].data[2] = domain->id; - amd_iommu_dev_table[devid].data[1] = upper_32_bits(pte_root); + write_lock_irqsave(&amd_iommu_devtable_lock, flags); amd_iommu_dev_table[devid].data[0] = lower_32_bits(pte_root); + amd_iommu_dev_table[devid].data[1] = upper_32_bits(pte_root); + amd_iommu_dev_table[devid].data[2] = domain->id; amd_iommu_pd_table[devid] = domain; - - domain->dev_cnt += 1; - - /* ready */ - spin_unlock(&domain->lock); -} - -static void attach_device(struct amd_iommu *iommu, - struct protection_domain *domain, - u16 devid) -{ - unsigned long flags; - - write_lock_irqsave(&amd_iommu_devtable_lock, flags); - __attach_device(iommu, domain, devid); write_unlock_irqrestore(&amd_iommu_devtable_lock, flags); - /* - * We might boot into a crash-kernel here. The crashed kernel - * left the caches in the IOMMU dirty. So we have to flush - * here to evict all dirty stuff. - */ + /* + * We might boot into a crash-kernel here. The crashed kernel + * left the caches in the IOMMU dirty. So we have to flush + * here to evict all dirty stuff. + */ iommu_queue_inv_dev_entry(iommu, devid); iommu_flush_tlb_pde(iommu, domain->id); } @@ -1205,15 +1121,6 @@ static void __detach_device(struct protection_domain *domain, u16 devid) /* ready */ spin_unlock(&domain->lock); - - /* - * If we run in passthrough mode the device must be assigned to the - * passthrough domain if it is detached from any other domain - */ - if (iommu_pass_through) { - struct amd_iommu *iommu = amd_iommu_rlookup_table[devid]; - __attach_device(iommu, pt_domain, devid); - } } /* @@ -1259,8 +1166,6 @@ static int device_change_notifier(struct notifier_block *nb, case BUS_NOTIFY_UNBOUND_DRIVER: if (!domain) goto out; - if (iommu_pass_through) - break; detach_device(domain, devid); break; case BUS_NOTIFY_ADD_DEVICE: @@ -2085,47 +1990,19 @@ static void cleanup_domain(struct protection_domain *domain) write_unlock_irqrestore(&amd_iommu_devtable_lock, flags); } -static void protection_domain_free(struct protection_domain *domain) -{ - if (!domain) - return; - - if (domain->id) - domain_id_free(domain->id); - - kfree(domain); -} - -static struct protection_domain *protection_domain_alloc(void) +static int amd_iommu_domain_init(struct iommu_domain *dom) { struct protection_domain *domain; domain = kzalloc(sizeof(*domain), GFP_KERNEL); if (!domain) - return NULL; + return -ENOMEM; spin_lock_init(&domain->lock); + domain->mode = PAGE_MODE_3_LEVEL; domain->id = domain_id_alloc(); if (!domain->id) - goto out_err; - - return domain; - -out_err: - kfree(domain); - - return NULL; -} - -static int amd_iommu_domain_init(struct iommu_domain *dom) -{ - struct protection_domain *domain; - - domain = protection_domain_alloc(); - if (!domain) goto out_free; - - domain->mode = PAGE_MODE_3_LEVEL; domain->pt_root = (void *)get_zeroed_page(GFP_KERNEL); if (!domain->pt_root) goto out_free; @@ -2135,7 +2012,7 @@ static int amd_iommu_domain_init(struct iommu_domain *dom) return 0; out_free: - protection_domain_free(domain); + kfree(domain); return -ENOMEM; } @@ -2316,46 +2193,3 @@ static struct iommu_ops amd_iommu_ops = { .domain_has_cap = amd_iommu_domain_has_cap, }; -/***************************************************************************** - * - * The next functions do a basic initialization of IOMMU for pass through - * mode - * - * In passthrough mode the IOMMU is initialized and enabled but not used for - * DMA-API translation. - * - *****************************************************************************/ - -int __init amd_iommu_init_passthrough(void) -{ - struct pci_dev *dev = NULL; - u16 devid, devid2; - - /* allocate passthroug domain */ - pt_domain = protection_domain_alloc(); - if (!pt_domain) - return -ENOMEM; - - pt_domain->mode |= PAGE_MODE_NONE; - - while ((dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev)) != NULL) { - struct amd_iommu *iommu; - - devid = calc_devid(dev->bus->number, dev->devfn); - if (devid > amd_iommu_last_bdf) - continue; - - devid2 = amd_iommu_alias_table[devid]; - - iommu = amd_iommu_rlookup_table[devid2]; - if (!iommu) - continue; - - __attach_device(iommu, pt_domain, devid); - __attach_device(iommu, pt_domain, devid2); - } - - pr_info("AMD-Vi: Initialized for Passthrough Mode\n"); - - return 0; -} diff --git a/trunk/arch/x86/kernel/amd_iommu_init.c b/trunk/arch/x86/kernel/amd_iommu_init.c index b4b61d462dcc..c1b17e97252e 100644 --- a/trunk/arch/x86/kernel/amd_iommu_init.c +++ b/trunk/arch/x86/kernel/amd_iommu_init.c @@ -252,7 +252,7 @@ static void __init iommu_feature_disable(struct amd_iommu *iommu, u8 bit) /* Function to enable the hardware */ static void iommu_enable(struct amd_iommu *iommu) { - printk(KERN_INFO "AMD-Vi: Enabling IOMMU at %s cap 0x%hx\n", + printk(KERN_INFO "AMD IOMMU: Enabling IOMMU at %s cap 0x%hx\n", dev_name(&iommu->dev->dev), iommu->cap_ptr); iommu_feature_enable(iommu, CONTROL_IOMMU_EN); @@ -434,20 +434,6 @@ static u8 * __init alloc_command_buffer(struct amd_iommu *iommu) return cmd_buf; } -/* - * This function resets the command buffer if the IOMMU stopped fetching - * commands from it. - */ -void amd_iommu_reset_cmd_buffer(struct amd_iommu *iommu) -{ - iommu_feature_disable(iommu, CONTROL_CMDBUF_EN); - - writel(0x00, iommu->mmio_base + MMIO_CMD_HEAD_OFFSET); - writel(0x00, iommu->mmio_base + MMIO_CMD_TAIL_OFFSET); - - iommu_feature_enable(iommu, CONTROL_CMDBUF_EN); -} - /* * This function writes the command buffer address to the hardware and * enables it. @@ -464,7 +450,11 @@ static void iommu_enable_command_buffer(struct amd_iommu *iommu) memcpy_toio(iommu->mmio_base + MMIO_CMD_BUF_OFFSET, &entry, sizeof(entry)); - amd_iommu_reset_cmd_buffer(iommu); + /* set head and tail to zero manually */ + writel(0x00, iommu->mmio_base + MMIO_CMD_HEAD_OFFSET); + writel(0x00, iommu->mmio_base + MMIO_CMD_TAIL_OFFSET); + + iommu_feature_enable(iommu, CONTROL_CMDBUF_EN); } static void __init free_command_buffer(struct amd_iommu *iommu) @@ -868,7 +858,7 @@ static int __init init_iommu_all(struct acpi_table_header *table) switch (*p) { case ACPI_IVHD_TYPE: - DUMP_printk("device: %02x:%02x.%01x cap: %04x " + DUMP_printk("IOMMU: device: %02x:%02x.%01x cap: %04x " "seg: %d flags: %01x info %04x\n", PCI_BUS(h->devid), PCI_SLOT(h->devid), PCI_FUNC(h->devid), h->cap_ptr, @@ -912,7 +902,7 @@ static int __init iommu_setup_msi(struct amd_iommu *iommu) r = request_irq(iommu->dev->irq, amd_iommu_int_handler, IRQF_SAMPLE_RANDOM, - "AMD-Vi", + "AMD IOMMU", NULL); if (r) { @@ -1160,7 +1150,7 @@ int __init amd_iommu_init(void) if (no_iommu) { - printk(KERN_INFO "AMD-Vi disabled by kernel command line\n"); + printk(KERN_INFO "AMD IOMMU disabled by kernel command line\n"); return 0; } @@ -1252,28 +1242,22 @@ int __init amd_iommu_init(void) if (ret) goto free; - if (iommu_pass_through) - ret = amd_iommu_init_passthrough(); - else - ret = amd_iommu_init_dma_ops(); + ret = amd_iommu_init_dma_ops(); if (ret) goto free; enable_iommus(); - if (iommu_pass_through) - goto out; - - printk(KERN_INFO "AMD-Vi: device isolation "); + printk(KERN_INFO "AMD IOMMU: device isolation "); if (amd_iommu_isolate) printk("enabled\n"); else printk("disabled\n"); if (amd_iommu_unmap_flush) - printk(KERN_INFO "AMD-Vi: IO/TLB flush on unmap enabled\n"); + printk(KERN_INFO "AMD IOMMU: IO/TLB flush on unmap enabled\n"); else - printk(KERN_INFO "AMD-Vi: Lazy IO/TLB flushing enabled\n"); + printk(KERN_INFO "AMD IOMMU: Lazy IO/TLB flushing enabled\n"); out: return ret; diff --git a/trunk/arch/x86/kernel/pci-dma.c b/trunk/arch/x86/kernel/pci-dma.c index 873aa079d166..1a041bcf506b 100644 --- a/trunk/arch/x86/kernel/pci-dma.c +++ b/trunk/arch/x86/kernel/pci-dma.c @@ -32,14 +32,7 @@ int no_iommu __read_mostly; /* Set this to 1 if there is a HW IOMMU in the system */ int iommu_detected __read_mostly = 0; -/* - * This variable becomes 1 if iommu=pt is passed on the kernel command line. - * If this variable is 1, IOMMU implementations do no DMA ranslation for - * devices and allow every device to access to whole physical memory. This is - * useful if a user want to use an IOMMU only for KVM device assignment to - * guests and not for driver dma translation. - */ -int iommu_pass_through __read_mostly; +int iommu_pass_through; dma_addr_t bad_dma_address __read_mostly = 0; EXPORT_SYMBOL(bad_dma_address);