diff --git a/[refs] b/[refs] index 638ece9feda6..54e5eca5adf7 100644 --- a/[refs] +++ b/[refs] @@ -1,2 +1,2 @@ --- -refs/heads/master: 2c14ddc135d93cb4b33ee4bd4a2b05e21a4a762c +refs/heads/master: fffcda1183e93df84ad73ba7eb7782a5c354e2b3 diff --git a/trunk/Documentation/x86/x86_64/boot-options.txt b/trunk/Documentation/x86/x86_64/boot-options.txt index 092e596a1301..c54b4f503e2a 100644 --- a/trunk/Documentation/x86/x86_64/boot-options.txt +++ b/trunk/Documentation/x86/x86_64/boot-options.txt @@ -206,7 +206,7 @@ IOMMU (input/output memory management unit) (e.g. because you have < 3 GB memory). Kernel boot message: "PCI-DMA: Disabling IOMMU" - 2. : AMD GART based hardware IOMMU. + 2. : AMD GART based hardware IOMMU. Kernel boot message: "PCI-DMA: using GART IOMMU" 3. : Software IOMMU implementation. Used diff --git a/trunk/arch/mips/Kconfig b/trunk/arch/mips/Kconfig index 351c80fbba7e..8e256cc5dcd9 100644 --- a/trunk/arch/mips/Kconfig +++ b/trunk/arch/mips/Kconfig @@ -997,6 +997,9 @@ config IRQ_GT641XX config IRQ_GIC bool +config IRQ_CPU_OCTEON + bool + config MIPS_BOARDS_GEN bool @@ -1356,6 +1359,8 @@ config CPU_SB1 config CPU_CAVIUM_OCTEON bool "Cavium Octeon processor" depends on SYS_HAS_CPU_CAVIUM_OCTEON + select IRQ_CPU + select IRQ_CPU_OCTEON select CPU_HAS_PREFETCH select CPU_SUPPORTS_64BIT_KERNEL select SYS_SUPPORTS_SMP diff --git a/trunk/arch/mips/alchemy/devboards/db1x00/board_setup.c b/trunk/arch/mips/alchemy/devboards/db1x00/board_setup.c index 5c956fe8760f..05f120ff90f9 100644 --- a/trunk/arch/mips/alchemy/devboards/db1x00/board_setup.c +++ b/trunk/arch/mips/alchemy/devboards/db1x00/board_setup.c @@ -127,10 +127,13 @@ const char *get_system_type(void) void __init board_setup(void) { unsigned long bcsr1, bcsr2; + u32 pin_func; bcsr1 = DB1000_BCSR_PHYS_ADDR; bcsr2 = DB1000_BCSR_PHYS_ADDR + DB1000_BCSR_HEXLED_OFS; + pin_func = 0; + #ifdef CONFIG_MIPS_DB1000 printk(KERN_INFO "AMD Alchemy Au1000/Db1000 Board\n"); #endif @@ -161,16 +164,12 @@ void __init board_setup(void) /* Not valid for Au1550 */ #if defined(CONFIG_IRDA) && \ (defined(CONFIG_SOC_AU1000) || defined(CONFIG_SOC_AU1100)) - { - u32 pin_func; - - /* Set IRFIRSEL instead of GPIO15 */ - pin_func = au_readl(SYS_PINFUNC) | SYS_PF_IRF; - au_writel(pin_func, SYS_PINFUNC); - /* Power off until the driver is in use */ - bcsr_mod(BCSR_RESETS, BCSR_RESETS_IRDA_MODE_MASK, - BCSR_RESETS_IRDA_MODE_OFF); - } + /* Set IRFIRSEL instead of GPIO15 */ + pin_func = au_readl(SYS_PINFUNC) | SYS_PF_IRF; + au_writel(pin_func, SYS_PINFUNC); + /* Power off until the driver is in use */ + bcsr_mod(BCSR_RESETS, BCSR_RESETS_IRDA_MODE_MASK, + BCSR_RESETS_IRDA_MODE_OFF); #endif bcsr_write(BCSR_PCMCIA, 0); /* turn off PCMCIA power */ @@ -178,35 +177,31 @@ void __init board_setup(void) alchemy_gpio1_input_enable(); #ifdef CONFIG_MIPS_MIRAGE - { - u32 pin_func; - - /* GPIO[20] is output */ - alchemy_gpio_direction_output(20, 0); + /* GPIO[20] is output */ + alchemy_gpio_direction_output(20, 0); - /* Set GPIO[210:208] instead of SSI_0 */ - pin_func = au_readl(SYS_PINFUNC) | SYS_PF_S0; + /* Set GPIO[210:208] instead of SSI_0 */ + pin_func = au_readl(SYS_PINFUNC) | SYS_PF_S0; - /* Set GPIO[215:211] for LEDs */ - pin_func |= 5 << 2; + /* Set GPIO[215:211] for LEDs */ + pin_func |= 5 << 2; - /* Set GPIO[214:213] for more LEDs */ - pin_func |= 5 << 12; + /* Set GPIO[214:213] for more LEDs */ + pin_func |= 5 << 12; - /* Set GPIO[207:200] instead of PCMCIA/LCD */ - pin_func |= SYS_PF_LCD | SYS_PF_PC; - au_writel(pin_func, SYS_PINFUNC); + /* Set GPIO[207:200] instead of PCMCIA/LCD */ + pin_func |= SYS_PF_LCD | SYS_PF_PC; + au_writel(pin_func, SYS_PINFUNC); - /* - * Enable speaker amplifier. This should - * be part of the audio driver. - */ - alchemy_gpio_direction_output(209, 1); + /* + * Enable speaker amplifier. This should + * be part of the audio driver. + */ + alchemy_gpio_direction_output(209, 1); - pm_power_off = mirage_power_off; - _machine_halt = mirage_power_off; - _machine_restart = (void(*)(char *))mips_softreset; - } + pm_power_off = mirage_power_off; + _machine_halt = mirage_power_off; + _machine_restart = (void(*)(char *))mips_softreset; #endif #ifdef CONFIG_MIPS_BOSPORUS diff --git a/trunk/arch/mips/alchemy/xxs1500/init.c b/trunk/arch/mips/alchemy/xxs1500/init.c index 34a90a4bb6f4..15125c2fda7d 100644 --- a/trunk/arch/mips/alchemy/xxs1500/init.c +++ b/trunk/arch/mips/alchemy/xxs1500/init.c @@ -51,9 +51,10 @@ void __init prom_init(void) prom_init_cmdline(); memsize_str = prom_getenv("memsize"); - if (!memsize_str || strict_strtoul(memsize_str, 0, &memsize)) + if (!memsize_str) memsize = 0x04000000; - + else + strict_strtoul(memsize_str, 0, &memsize); add_memory_region(0, memsize, BOOT_MEM_RAM); } diff --git a/trunk/arch/mips/boot/compressed/calc_vmlinuz_load_addr.c b/trunk/arch/mips/boot/compressed/calc_vmlinuz_load_addr.c index 9a6243676e22..88c9d963be88 100644 --- a/trunk/arch/mips/boot/compressed/calc_vmlinuz_load_addr.c +++ b/trunk/arch/mips/boot/compressed/calc_vmlinuz_load_addr.c @@ -16,8 +16,8 @@ int main(int argc, char *argv[]) { - unsigned long long vmlinux_size, vmlinux_load_addr, vmlinuz_load_addr; struct stat sb; + uint64_t vmlinux_size, vmlinux_load_addr, vmlinuz_load_addr; if (argc != 3) { fprintf(stderr, "Usage: %s \n", diff --git a/trunk/arch/mips/cavium-octeon/Kconfig b/trunk/arch/mips/cavium-octeon/Kconfig index cad555ebeca3..caae22858163 100644 --- a/trunk/arch/mips/cavium-octeon/Kconfig +++ b/trunk/arch/mips/cavium-octeon/Kconfig @@ -1,7 +1,11 @@ -if CPU_CAVIUM_OCTEON +config CAVIUM_OCTEON_SPECIFIC_OPTIONS + bool "Enable Octeon specific options" + depends on CPU_CAVIUM_OCTEON + default "y" config CAVIUM_CN63XXP1 bool "Enable CN63XXP1 errata worarounds" + depends on CAVIUM_OCTEON_SPECIFIC_OPTIONS default "n" help The CN63XXP1 chip requires build time workarounds to @@ -12,6 +16,7 @@ config CAVIUM_CN63XXP1 config CAVIUM_OCTEON_2ND_KERNEL bool "Build the kernel to be used as a 2nd kernel on the same chip" + depends on CAVIUM_OCTEON_SPECIFIC_OPTIONS default "n" help This option configures this kernel to be linked at a different @@ -21,6 +26,7 @@ config CAVIUM_OCTEON_2ND_KERNEL config CAVIUM_OCTEON_HW_FIX_UNALIGNED bool "Enable hardware fixups of unaligned loads and stores" + depends on CAVIUM_OCTEON_SPECIFIC_OPTIONS default "y" help Configure the Octeon hardware to automatically fix unaligned loads @@ -32,6 +38,7 @@ config CAVIUM_OCTEON_HW_FIX_UNALIGNED config CAVIUM_OCTEON_CVMSEG_SIZE int "Number of L1 cache lines reserved for CVMSEG memory" + depends on CAVIUM_OCTEON_SPECIFIC_OPTIONS range 0 54 default 1 help @@ -43,6 +50,7 @@ config CAVIUM_OCTEON_CVMSEG_SIZE config CAVIUM_OCTEON_LOCK_L2 bool "Lock often used kernel code in the L2" + depends on CAVIUM_OCTEON_SPECIFIC_OPTIONS default "y" help Enable locking parts of the kernel into the L2 cache. @@ -85,6 +93,7 @@ config CAVIUM_OCTEON_LOCK_L2_MEMCPY config ARCH_SPARSEMEM_ENABLE def_bool y select SPARSEMEM_STATIC + depends on CPU_CAVIUM_OCTEON config CAVIUM_OCTEON_HELPER def_bool y @@ -98,8 +107,6 @@ config NEED_SG_DMA_LENGTH config SWIOTLB def_bool y + depends on CPU_CAVIUM_OCTEON select IOMMU_HELPER select NEED_SG_DMA_LENGTH - - -endif # CPU_CAVIUM_OCTEON diff --git a/trunk/arch/mips/include/asm/cache.h b/trunk/arch/mips/include/asm/cache.h index b4db69fbc40c..650ac9ba734c 100644 --- a/trunk/arch/mips/include/asm/cache.h +++ b/trunk/arch/mips/include/asm/cache.h @@ -17,6 +17,6 @@ #define SMP_CACHE_SHIFT L1_CACHE_SHIFT #define SMP_CACHE_BYTES L1_CACHE_BYTES -#define __read_mostly __attribute__((__section__(".data..read_mostly"))) +#define __read_mostly __attribute__((__section__(".data.read_mostly"))) #endif /* _ASM_CACHE_H */ diff --git a/trunk/arch/mips/include/asm/cevt-r4k.h b/trunk/arch/mips/include/asm/cevt-r4k.h index 65f9bdd02f1f..fa4328f9124f 100644 --- a/trunk/arch/mips/include/asm/cevt-r4k.h +++ b/trunk/arch/mips/include/asm/cevt-r4k.h @@ -14,9 +14,6 @@ #ifndef __ASM_CEVT_R4K_H #define __ASM_CEVT_R4K_H -#include -#include - DECLARE_PER_CPU(struct clock_event_device, mips_clockevent_device); void mips_event_handler(struct clock_event_device *dev); diff --git a/trunk/arch/mips/include/asm/hugetlb.h b/trunk/arch/mips/include/asm/hugetlb.h index c565b7c3f0b5..f5e856015329 100644 --- a/trunk/arch/mips/include/asm/hugetlb.h +++ b/trunk/arch/mips/include/asm/hugetlb.h @@ -70,7 +70,6 @@ static inline pte_t huge_ptep_get_and_clear(struct mm_struct *mm, static inline void huge_ptep_clear_flush(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep) { - flush_tlb_mm(vma->vm_mm); } static inline int huge_pte_none(pte_t pte) diff --git a/trunk/arch/mips/include/asm/mach-bcm63xx/bcm963xx_tag.h b/trunk/arch/mips/include/asm/mach-bcm63xx/bcm963xx_tag.h index ed72e6a26b73..32978d32561a 100644 --- a/trunk/arch/mips/include/asm/mach-bcm63xx/bcm963xx_tag.h +++ b/trunk/arch/mips/include/asm/mach-bcm63xx/bcm963xx_tag.h @@ -88,7 +88,7 @@ struct bcm_tag { char kernel_crc[CRC_LEN]; /* 228-235: Unused at present */ char reserved1[8]; - /* 236-239: CRC32 of header excluding last 20 bytes */ + /* 236-239: CRC32 of header excluding tagVersion */ char header_crc[CRC_LEN]; /* 240-255: Unused at present */ char reserved2[16]; diff --git a/trunk/arch/mips/jazz/jazzdma.c b/trunk/arch/mips/jazz/jazzdma.c index 2d8e447cb828..9ce9f64cb76f 100644 --- a/trunk/arch/mips/jazz/jazzdma.c +++ b/trunk/arch/mips/jazz/jazzdma.c @@ -211,7 +211,7 @@ EXPORT_SYMBOL(vdma_free); */ int vdma_remap(unsigned long laddr, unsigned long paddr, unsigned long size) { - int first, pages; + int first, pages, npages; if (laddr > 0xffffff) { if (vdma_debug) @@ -228,7 +228,8 @@ int vdma_remap(unsigned long laddr, unsigned long paddr, unsigned long size) return -EINVAL; /* invalid physical address */ } - pages = (((paddr & (VDMA_PAGESIZE - 1)) + size) >> 12) + 1; + npages = pages = + (((paddr & (VDMA_PAGESIZE - 1)) + size) >> 12) + 1; first = laddr >> 12; if (vdma_debug) printk("vdma_remap: first=%x, pages=%x\n", first, pages); diff --git a/trunk/arch/mips/jz4740/dma.c b/trunk/arch/mips/jz4740/dma.c index d7feb898692c..5ebe75a68350 100644 --- a/trunk/arch/mips/jz4740/dma.c +++ b/trunk/arch/mips/jz4740/dma.c @@ -242,7 +242,9 @@ EXPORT_SYMBOL_GPL(jz4740_dma_get_residue); static void jz4740_dma_chan_irq(struct jz4740_dma_chan *dma) { - (void) jz4740_dma_read(JZ_REG_DMA_STATUS_CTRL(dma->id)); + uint32_t status; + + status = jz4740_dma_read(JZ_REG_DMA_STATUS_CTRL(dma->id)); jz4740_dma_write_mask(JZ_REG_DMA_STATUS_CTRL(dma->id), 0, JZ_DMA_STATUS_CTRL_ENABLE | JZ_DMA_STATUS_CTRL_TRANSFER_DONE); diff --git a/trunk/arch/mips/jz4740/time.c b/trunk/arch/mips/jz4740/time.c index eaa853a54af6..fe01678d94fd 100644 --- a/trunk/arch/mips/jz4740/time.c +++ b/trunk/arch/mips/jz4740/time.c @@ -89,7 +89,7 @@ static int jz4740_clockevent_set_next(unsigned long evt, static struct clock_event_device jz4740_clockevent = { .name = "jz4740-timer", - .features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT, + .features = CLOCK_EVT_FEAT_PERIODIC, .set_next_event = jz4740_clockevent_set_next, .set_mode = jz4740_clockevent_set_mode, .rating = 200, diff --git a/trunk/arch/mips/jz4740/timer.c b/trunk/arch/mips/jz4740/timer.c index 654d5c3900b6..b2c015129055 100644 --- a/trunk/arch/mips/jz4740/timer.c +++ b/trunk/arch/mips/jz4740/timer.c @@ -27,13 +27,11 @@ void jz4740_timer_enable_watchdog(void) { writel(BIT(16), jz4740_timer_base + JZ_REG_TIMER_STOP_CLEAR); } -EXPORT_SYMBOL_GPL(jz4740_timer_enable_watchdog); void jz4740_timer_disable_watchdog(void) { writel(BIT(16), jz4740_timer_base + JZ_REG_TIMER_STOP_SET); } -EXPORT_SYMBOL_GPL(jz4740_timer_disable_watchdog); void __init jz4740_timer_init(void) { diff --git a/trunk/arch/mips/kernel/ftrace.c b/trunk/arch/mips/kernel/ftrace.c index feb8021a305f..94ca2b018af7 100644 --- a/trunk/arch/mips/kernel/ftrace.c +++ b/trunk/arch/mips/kernel/ftrace.c @@ -23,7 +23,6 @@ #define JAL 0x0c000000 /* jump & link: ip --> ra, jump to target */ #define ADDR_MASK 0x03ffffff /* op_code|addr : 31...26|25 ....0 */ -#define JUMP_RANGE_MASK ((1UL << 28) - 1) #define INSN_NOP 0x00000000 /* nop */ #define INSN_JAL(addr) \ @@ -45,12 +44,12 @@ static inline void ftrace_dyn_arch_init_insns(void) /* jal (ftrace_caller + 8), jump over the first two instruction */ buf = (u32 *)&insn_jal_ftrace_caller; - uasm_i_jal(&buf, (FTRACE_ADDR + 8) & JUMP_RANGE_MASK); + uasm_i_jal(&buf, (FTRACE_ADDR + 8)); #ifdef CONFIG_FUNCTION_GRAPH_TRACER /* j ftrace_graph_caller */ buf = (u32 *)&insn_j_ftrace_graph_caller; - uasm_i_j(&buf, (unsigned long)ftrace_graph_caller & JUMP_RANGE_MASK); + uasm_i_j(&buf, (unsigned long)ftrace_graph_caller); #endif } diff --git a/trunk/arch/mips/kernel/ptrace.c b/trunk/arch/mips/kernel/ptrace.c index 584e6b55c865..d21c388c0116 100644 --- a/trunk/arch/mips/kernel/ptrace.c +++ b/trunk/arch/mips/kernel/ptrace.c @@ -540,8 +540,8 @@ asmlinkage void do_syscall_trace(struct pt_regs *regs, int entryexit) secure_computing(regs->regs[2]); if (unlikely(current->audit_context) && entryexit) - audit_syscall_exit(AUDITSC_RESULT(regs->regs[7]), - -regs->regs[2]); + audit_syscall_exit(AUDITSC_RESULT(regs->regs[2]), + regs->regs[2]); if (!(current->ptrace & PT_PTRACED)) goto out; diff --git a/trunk/arch/mips/kernel/scall32-o32.S b/trunk/arch/mips/kernel/scall32-o32.S index 7f1377eb22d3..7f5468b38d4c 100644 --- a/trunk/arch/mips/kernel/scall32-o32.S +++ b/trunk/arch/mips/kernel/scall32-o32.S @@ -565,7 +565,7 @@ einval: li v0, -ENOSYS sys sys_ioprio_get 2 /* 4315 */ sys sys_utimensat 4 sys sys_signalfd 3 - sys sys_ni_syscall 0 /* was timerfd */ + sys sys_ni_syscall 0 sys sys_eventfd 1 sys sys_fallocate 6 /* 4320 */ sys sys_timerfd_create 2 diff --git a/trunk/arch/mips/kernel/scall64-64.S b/trunk/arch/mips/kernel/scall64-64.S index 7c0ef7f128bf..a2e1fcbc41dc 100644 --- a/trunk/arch/mips/kernel/scall64-64.S +++ b/trunk/arch/mips/kernel/scall64-64.S @@ -404,7 +404,7 @@ sys_call_table: PTR sys_ioprio_get PTR sys_utimensat /* 5275 */ PTR sys_signalfd - PTR sys_ni_syscall /* was timerfd */ + PTR sys_ni_syscall PTR sys_eventfd PTR sys_fallocate PTR sys_timerfd_create /* 5280 */ diff --git a/trunk/arch/mips/kernel/scall64-n32.S b/trunk/arch/mips/kernel/scall64-n32.S index de6c5563beab..b2c7624995b8 100644 --- a/trunk/arch/mips/kernel/scall64-n32.S +++ b/trunk/arch/mips/kernel/scall64-n32.S @@ -403,7 +403,7 @@ EXPORT(sysn32_call_table) PTR sys_ioprio_get PTR compat_sys_utimensat PTR compat_sys_signalfd /* 6280 */ - PTR sys_ni_syscall /* was timerfd */ + PTR sys_ni_syscall PTR sys_eventfd PTR sys_fallocate PTR sys_timerfd_create diff --git a/trunk/arch/mips/kernel/scall64-o32.S b/trunk/arch/mips/kernel/scall64-o32.S index b0541dda8830..049a9c8c49a0 100644 --- a/trunk/arch/mips/kernel/scall64-o32.S +++ b/trunk/arch/mips/kernel/scall64-o32.S @@ -522,7 +522,7 @@ sys_call_table: PTR sys_ioprio_get /* 4315 */ PTR compat_sys_utimensat PTR compat_sys_signalfd - PTR sys_ni_syscall /* was timerfd */ + PTR sys_ni_syscall PTR sys_eventfd PTR sys32_fallocate /* 4320 */ PTR sys_timerfd_create diff --git a/trunk/arch/mips/kernel/vmlinux.lds.S b/trunk/arch/mips/kernel/vmlinux.lds.S index e4b0b0bec039..832afbb87588 100644 --- a/trunk/arch/mips/kernel/vmlinux.lds.S +++ b/trunk/arch/mips/kernel/vmlinux.lds.S @@ -74,7 +74,6 @@ SECTIONS INIT_TASK_DATA(PAGE_SIZE) NOSAVE_DATA CACHELINE_ALIGNED_DATA(1 << CONFIG_MIPS_L1_CACHE_SHIFT) - READ_MOSTLY_DATA(1 << CONFIG_MIPS_L1_CACHE_SHIFT) DATA_DATA CONSTRUCTORS } diff --git a/trunk/arch/mips/loongson/common/env.c b/trunk/arch/mips/loongson/common/env.c index d93830ad6113..11b193f848f8 100644 --- a/trunk/arch/mips/loongson/common/env.c +++ b/trunk/arch/mips/loongson/common/env.c @@ -29,10 +29,9 @@ unsigned long memsize, highmemsize; #define parse_even_earlier(res, option, p) \ do { \ - unsigned int tmp __maybe_unused; \ - \ + int ret; \ if (strncmp(option, (char *)p, strlen(option)) == 0) \ - tmp = strict_strtol((char *)p + strlen(option"="), 10, &res); \ + ret = strict_strtol((char *)p + strlen(option"="), 10, &res); \ } while (0) void __init prom_init_env(void) diff --git a/trunk/arch/mips/mm/c-r4k.c b/trunk/arch/mips/mm/c-r4k.c index 71bddf8f7d25..b4923a75cb4b 100644 --- a/trunk/arch/mips/mm/c-r4k.c +++ b/trunk/arch/mips/mm/c-r4k.c @@ -1075,6 +1075,7 @@ static int __cpuinit probe_scache(void) unsigned long flags, addr, begin, end, pow2; unsigned int config = read_c0_config(); struct cpuinfo_mips *c = ¤t_cpu_data; + int tmp; if (config & CONF_SC) return 0; @@ -1107,6 +1108,7 @@ static int __cpuinit probe_scache(void) /* Now search for the wrap around point. */ pow2 = (128 * 1024); + tmp = 0; for (addr = begin + (128 * 1024); addr < end; addr = begin + pow2) { cache_op(Index_Load_Tag_SD, addr); __asm__ __volatile__("nop; nop; nop; nop;"); /* hazard... */ diff --git a/trunk/arch/mips/mm/tlbex.c b/trunk/arch/mips/mm/tlbex.c index f5734c2c8097..5ef294fbb6e7 100644 --- a/trunk/arch/mips/mm/tlbex.c +++ b/trunk/arch/mips/mm/tlbex.c @@ -1151,8 +1151,8 @@ static void __cpuinit build_r4000_tlb_refill_handler(void) struct uasm_reloc *r = relocs; u32 *f; unsigned int final_len; - struct mips_huge_tlb_info htlb_info __maybe_unused; - enum vmalloc64_mode vmalloc_mode __maybe_unused; + struct mips_huge_tlb_info htlb_info; + enum vmalloc64_mode vmalloc_mode; memset(tlb_handler, 0, sizeof(tlb_handler)); memset(labels, 0, sizeof(labels)); diff --git a/trunk/arch/mips/mti-malta/malta-init.c b/trunk/arch/mips/mti-malta/malta-init.c index 31180c321a1a..414f0c99b196 100644 --- a/trunk/arch/mips/mti-malta/malta-init.c +++ b/trunk/arch/mips/mti-malta/malta-init.c @@ -193,6 +193,8 @@ extern struct plat_smp_ops msmtc_smp_ops; void __init prom_init(void) { + int result; + prom_argc = fw_arg0; _prom_argv = (int *) fw_arg1; _prom_envp = (int *) fw_arg2; @@ -358,14 +360,20 @@ void __init prom_init(void) #ifdef CONFIG_SERIAL_8250_CONSOLE console_config(); #endif -#ifdef CONFIG_MIPS_CMP /* Early detection of CMP support */ - if (gcmp_probe(GCMP_BASE_ADDR, GCMP_ADDRSPACE_SZ)) + result = gcmp_probe(GCMP_BASE_ADDR, GCMP_ADDRSPACE_SZ); + +#ifdef CONFIG_MIPS_CMP + if (result) register_smp_ops(&cmp_smp_ops); - else #endif #ifdef CONFIG_MIPS_MT_SMP +#ifdef CONFIG_MIPS_CMP + if (!result) register_smp_ops(&vsmp_smp_ops); +#else + register_smp_ops(&vsmp_smp_ops); +#endif #endif #ifdef CONFIG_MIPS_MT_SMTC register_smp_ops(&msmtc_smp_ops); diff --git a/trunk/arch/mips/mti-malta/malta-int.c b/trunk/arch/mips/mti-malta/malta-int.c index e85c977328da..9027061f0ead 100644 --- a/trunk/arch/mips/mti-malta/malta-int.c +++ b/trunk/arch/mips/mti-malta/malta-int.c @@ -56,6 +56,7 @@ static DEFINE_RAW_SPINLOCK(mips_irq_lock); static inline int mips_pcibios_iack(void) { int irq; + u32 dummy; /* * Determine highest priority pending interrupt by performing @@ -82,7 +83,7 @@ static inline int mips_pcibios_iack(void) BONITO_PCIMAP_CFG = 0x20000; /* Flush Bonito register block */ - (void) BONITO_PCIMAP_CFG; + dummy = BONITO_PCIMAP_CFG; iob(); /* sync */ irq = __raw_readl((u32 *)_pcictrl_bonito_pcicfg); diff --git a/trunk/arch/mips/pmc-sierra/msp71xx/msp_irq_per.c b/trunk/arch/mips/pmc-sierra/msp71xx/msp_irq_per.c index 98fd0099d964..f9b9dcdfa9dd 100644 --- a/trunk/arch/mips/pmc-sierra/msp71xx/msp_irq_per.c +++ b/trunk/arch/mips/pmc-sierra/msp71xx/msp_irq_per.c @@ -97,7 +97,7 @@ static int msp_per_irq_set_affinity(struct irq_data *d, static struct irq_chip msp_per_irq_controller = { .name = "MSP_PER", - .irq_enable = unmask_per_irq, + .irq_enable = unmask_per_irq. .irq_disable = mask_per_irq, .irq_ack = msp_per_irq_ack, #ifdef CONFIG_SMP diff --git a/trunk/arch/mips/power/hibernate.S b/trunk/arch/mips/power/hibernate.S index f8a751c03282..dbb5c7b4b70f 100644 --- a/trunk/arch/mips/power/hibernate.S +++ b/trunk/arch/mips/power/hibernate.S @@ -35,7 +35,7 @@ LEAF(swsusp_arch_resume) 0: PTR_L t1, PBE_ADDRESS(t0) /* source */ PTR_L t2, PBE_ORIG_ADDRESS(t0) /* destination */ - PTR_ADDU t3, t1, PAGE_SIZE + PTR_ADDIU t3, t1, PAGE_SIZE 1: REG_L t8, (t1) REG_S t8, (t2) diff --git a/trunk/arch/mips/sgi-ip22/ip22-platform.c b/trunk/arch/mips/sgi-ip22/ip22-platform.c index 698904daf901..deddbf0ebe5c 100644 --- a/trunk/arch/mips/sgi-ip22/ip22-platform.c +++ b/trunk/arch/mips/sgi-ip22/ip22-platform.c @@ -132,7 +132,7 @@ static struct platform_device eth1_device = { */ static int __init sgiseeq_devinit(void) { - unsigned int pbdma __maybe_unused; + unsigned int tmp; int res, i; eth0_pd.hpc = hpc3c0; @@ -151,7 +151,7 @@ static int __init sgiseeq_devinit(void) /* Second HPC is missing? */ if (ip22_is_fullhouse() || - get_dbe(pbdma, (unsigned int *)&hpc3c1->pbdma[1])) + get_dbe(tmp, (unsigned int *)&hpc3c1->pbdma[1])) return 0; sgimc->giopar |= SGIMC_GIOPAR_MASTEREXP1 | SGIMC_GIOPAR_EXP164 | diff --git a/trunk/arch/mips/sgi-ip22/ip22-time.c b/trunk/arch/mips/sgi-ip22/ip22-time.c index 1a94c9894188..603fc91c1030 100644 --- a/trunk/arch/mips/sgi-ip22/ip22-time.c +++ b/trunk/arch/mips/sgi-ip22/ip22-time.c @@ -32,7 +32,7 @@ static unsigned long dosample(void) { u32 ct0, ct1; - u8 msb; + u8 msb, lsb; /* Start the counter. */ sgint->tcword = (SGINT_TCWORD_CNT2 | SGINT_TCWORD_CALL | @@ -46,7 +46,7 @@ static unsigned long dosample(void) /* Latch and spin until top byte of counter2 is zero */ do { writeb(SGINT_TCWORD_CNT2 | SGINT_TCWORD_CLAT, &sgint->tcword); - (void) readb(&sgint->tcnt2); + lsb = readb(&sgint->tcnt2); msb = readb(&sgint->tcnt2); ct1 = read_c0_count(); } while (msb); diff --git a/trunk/arch/mips/sgi-ip27/ip27-hubio.c b/trunk/arch/mips/sgi-ip27/ip27-hubio.c index cd0d5b06cd83..a1fa4abb3f6a 100644 --- a/trunk/arch/mips/sgi-ip27/ip27-hubio.c +++ b/trunk/arch/mips/sgi-ip27/ip27-hubio.c @@ -29,6 +29,7 @@ unsigned long hub_pio_map(cnodeid_t cnode, xwidgetnum_t widget, unsigned long xtalk_addr, size_t size) { nasid_t nasid = COMPACT_TO_NASID_NODEID(cnode); + volatile hubreg_t junk; unsigned i; /* use small-window mapping if possible */ @@ -63,7 +64,7 @@ unsigned long hub_pio_map(cnodeid_t cnode, xwidgetnum_t widget, * after we write it. */ IIO_ITTE_PUT(nasid, i, HUB_PIO_MAP_TO_MEM, widget, xtalk_addr); - (void) HUB_L(IIO_ITTE_GET(nasid, i)); + junk = HUB_L(IIO_ITTE_GET(nasid, i)); return NODE_BWIN_BASE(nasid, widget) + (xtalk_addr % BWIN_SIZE); } diff --git a/trunk/arch/mips/sgi-ip27/ip27-klnuma.c b/trunk/arch/mips/sgi-ip27/ip27-klnuma.c index 1d1919a44e88..c3d30a88daf3 100644 --- a/trunk/arch/mips/sgi-ip27/ip27-klnuma.c +++ b/trunk/arch/mips/sgi-ip27/ip27-klnuma.c @@ -54,8 +54,11 @@ void __init setup_replication_mask(void) static __init void set_ktext_source(nasid_t client_nasid, nasid_t server_nasid) { + cnodeid_t client_cnode; kern_vars_t *kvp; + client_cnode = NASID_TO_COMPACT_NODEID(client_nasid); + kvp = &hub_data(client_nasid)->kern_vars; KERN_VARS_ADDR(client_nasid) = (unsigned long)kvp; diff --git a/trunk/arch/mips/sni/time.c b/trunk/arch/mips/sni/time.c index 0904d4d30cb3..c76151b56568 100644 --- a/trunk/arch/mips/sni/time.c +++ b/trunk/arch/mips/sni/time.c @@ -95,7 +95,7 @@ static void __init sni_a20r_timer_setup(void) static __init unsigned long dosample(void) { u32 ct0, ct1; - volatile u8 msb; + volatile u8 msb, lsb; /* Start the counter. */ outb_p(0x34, 0x43); @@ -108,7 +108,7 @@ static __init unsigned long dosample(void) /* Latch and spin until top byte of counter0 is zero */ do { outb(0x00, 0x43); - (void) inb(0x40); + lsb = inb(0x40); msb = inb(0x40); ct1 = read_c0_count(); } while (msb); diff --git a/trunk/arch/x86/Kconfig b/trunk/arch/x86/Kconfig index 8cc29da2d689..cc6c53a95bfd 100644 --- a/trunk/arch/x86/Kconfig +++ b/trunk/arch/x86/Kconfig @@ -690,7 +690,6 @@ config AMD_IOMMU bool "AMD IOMMU support" select SWIOTLB select PCI_MSI - select PCI_IOV depends on X86_64 && PCI && ACPI ---help--- With this option you can enable support for AMD IOMMU hardware in diff --git a/trunk/arch/x86/include/asm/amd_iommu_proto.h b/trunk/arch/x86/include/asm/amd_iommu_proto.h index 55d95eb789b3..916bc8111a01 100644 --- a/trunk/arch/x86/include/asm/amd_iommu_proto.h +++ b/trunk/arch/x86/include/asm/amd_iommu_proto.h @@ -19,12 +19,13 @@ #ifndef _ASM_X86_AMD_IOMMU_PROTO_H #define _ASM_X86_AMD_IOMMU_PROTO_H -#include +struct amd_iommu; extern int amd_iommu_init_dma_ops(void); extern int amd_iommu_init_passthrough(void); -extern irqreturn_t amd_iommu_int_thread(int irq, void *data); extern irqreturn_t amd_iommu_int_handler(int irq, void *data); +extern void amd_iommu_flush_all_domains(void); +extern void amd_iommu_flush_all_devices(void); extern void amd_iommu_apply_erratum_63(u16 devid); extern void amd_iommu_reset_cmd_buffer(struct amd_iommu *iommu); extern int amd_iommu_init_devices(void); @@ -43,12 +44,4 @@ static inline bool is_rd890_iommu(struct pci_dev *pdev) (pdev->device == PCI_DEVICE_ID_RD890_IOMMU); } -static inline bool iommu_feature(struct amd_iommu *iommu, u64 f) -{ - if (!(iommu->cap & (1 << IOMMU_CAP_EFR))) - return false; - - return !!(iommu->features & f); -} - #endif /* _ASM_X86_AMD_IOMMU_PROTO_H */ diff --git a/trunk/arch/x86/include/asm/amd_iommu_types.h b/trunk/arch/x86/include/asm/amd_iommu_types.h index 4c9982995414..e3509fc303bf 100644 --- a/trunk/arch/x86/include/asm/amd_iommu_types.h +++ b/trunk/arch/x86/include/asm/amd_iommu_types.h @@ -68,25 +68,12 @@ #define MMIO_CONTROL_OFFSET 0x0018 #define MMIO_EXCL_BASE_OFFSET 0x0020 #define MMIO_EXCL_LIMIT_OFFSET 0x0028 -#define MMIO_EXT_FEATURES 0x0030 #define MMIO_CMD_HEAD_OFFSET 0x2000 #define MMIO_CMD_TAIL_OFFSET 0x2008 #define MMIO_EVT_HEAD_OFFSET 0x2010 #define MMIO_EVT_TAIL_OFFSET 0x2018 #define MMIO_STATUS_OFFSET 0x2020 - -/* Extended Feature Bits */ -#define FEATURE_PREFETCH (1ULL<<0) -#define FEATURE_PPR (1ULL<<1) -#define FEATURE_X2APIC (1ULL<<2) -#define FEATURE_NX (1ULL<<3) -#define FEATURE_GT (1ULL<<4) -#define FEATURE_IA (1ULL<<6) -#define FEATURE_GA (1ULL<<7) -#define FEATURE_HE (1ULL<<8) -#define FEATURE_PC (1ULL<<9) - /* MMIO status bits */ #define MMIO_STATUS_COM_WAIT_INT_MASK 0x04 @@ -126,9 +113,7 @@ /* command specific defines */ #define CMD_COMPL_WAIT 0x01 #define CMD_INV_DEV_ENTRY 0x02 -#define CMD_INV_IOMMU_PAGES 0x03 -#define CMD_INV_IOTLB_PAGES 0x04 -#define CMD_INV_ALL 0x08 +#define CMD_INV_IOMMU_PAGES 0x03 #define CMD_COMPL_WAIT_STORE_MASK 0x01 #define CMD_COMPL_WAIT_INT_MASK 0x02 @@ -230,8 +215,6 @@ #define IOMMU_PTE_IR (1ULL << 61) #define IOMMU_PTE_IW (1ULL << 62) -#define DTE_FLAG_IOTLB 0x01 - #define IOMMU_PAGE_MASK (((1ULL << 52) - 1) & ~0xfffULL) #define IOMMU_PTE_PRESENT(pte) ((pte) & IOMMU_PTE_P) #define IOMMU_PTE_PAGE(pte) (phys_to_virt((pte) & IOMMU_PAGE_MASK)) @@ -244,7 +227,6 @@ /* IOMMU capabilities */ #define IOMMU_CAP_IOTLB 24 #define IOMMU_CAP_NPCACHE 26 -#define IOMMU_CAP_EFR 27 #define MAX_DOMAIN_ID 65536 @@ -267,8 +249,6 @@ extern bool amd_iommu_dump; /* global flag if IOMMUs cache non-present entries */ extern bool amd_iommu_np_cache; -/* Only true if all IOMMUs support device IOTLBs */ -extern bool amd_iommu_iotlb_sup; /* * Make iterating over all IOMMUs easier @@ -391,9 +371,6 @@ struct amd_iommu { /* flags read from acpi table */ u8 acpi_flags; - /* Extended features */ - u64 features; - /* * Capability pointer. There could be more than one IOMMU per PCI * device function if there are more than one AMD IOMMU capability @@ -432,6 +409,9 @@ struct amd_iommu { /* if one, we need to send a completion wait command */ bool need_sync; + /* becomes true if a command buffer reset is running */ + bool reset_in_progress; + /* default dma_ops domain for that IOMMU */ struct dma_ops_domain *default_dom; diff --git a/trunk/arch/x86/kernel/Makefile b/trunk/arch/x86/kernel/Makefile index 7338ef2218bc..97ebf82e0b7f 100644 --- a/trunk/arch/x86/kernel/Makefile +++ b/trunk/arch/x86/kernel/Makefile @@ -117,7 +117,7 @@ obj-$(CONFIG_OF) += devicetree.o ifeq ($(CONFIG_X86_64),y) obj-$(CONFIG_AUDIT) += audit_64.o - obj-$(CONFIG_GART_IOMMU) += pci-gart_64.o aperture_64.o + obj-$(CONFIG_GART_IOMMU) += amd_gart_64.o aperture_64.o obj-$(CONFIG_CALGARY_IOMMU) += pci-calgary_64.o tce_64.o obj-$(CONFIG_AMD_IOMMU) += amd_iommu_init.o amd_iommu.o diff --git a/trunk/arch/x86/kernel/pci-gart_64.c b/trunk/arch/x86/kernel/amd_gart_64.c similarity index 100% rename from trunk/arch/x86/kernel/pci-gart_64.c rename to trunk/arch/x86/kernel/amd_gart_64.c diff --git a/trunk/arch/x86/kernel/amd_iommu.c b/trunk/arch/x86/kernel/amd_iommu.c index 873e7e1ead7b..57ca77787220 100644 --- a/trunk/arch/x86/kernel/amd_iommu.c +++ b/trunk/arch/x86/kernel/amd_iommu.c @@ -18,7 +18,6 @@ */ #include -#include #include #include #include @@ -26,7 +25,6 @@ #include #include #include -#include #include #include #include @@ -36,7 +34,7 @@ #define CMD_SET_TYPE(cmd, t) ((cmd)->data[1] |= ((t) << 28)) -#define LOOP_TIMEOUT 100000 +#define EXIT_LOOP_COUNT 10000000 static DEFINE_RWLOCK(amd_iommu_devtable_lock); @@ -59,6 +57,7 @@ struct iommu_cmd { u32 data[4]; }; +static void reset_iommu_command_buffer(struct amd_iommu *iommu); static void update_domain(struct protection_domain *domain); /**************************************************************************** @@ -323,6 +322,8 @@ static void iommu_print_event(struct amd_iommu *iommu, void *__evt) break; case EVENT_TYPE_ILL_CMD: printk("ILLEGAL_COMMAND_ERROR address=0x%016llx]\n", address); + iommu->reset_in_progress = true; + reset_iommu_command_buffer(iommu); dump_command(address); break; case EVENT_TYPE_CMD_HARD_ERR: @@ -366,7 +367,7 @@ static void iommu_poll_events(struct amd_iommu *iommu) spin_unlock_irqrestore(&iommu->lock, flags); } -irqreturn_t amd_iommu_int_thread(int irq, void *data) +irqreturn_t amd_iommu_int_handler(int irq, void *data) { struct amd_iommu *iommu; @@ -376,300 +377,192 @@ irqreturn_t amd_iommu_int_thread(int irq, void *data) return IRQ_HANDLED; } -irqreturn_t amd_iommu_int_handler(int irq, void *data) -{ - return IRQ_WAKE_THREAD; -} - /**************************************************************************** * * IOMMU command queuing functions * ****************************************************************************/ -static int wait_on_sem(volatile u64 *sem) -{ - int i = 0; - - while (*sem == 0 && i < LOOP_TIMEOUT) { - udelay(1); - i += 1; - } - - if (i == LOOP_TIMEOUT) { - pr_alert("AMD-Vi: Completion-Wait loop timed out\n"); - return -EIO; - } - - return 0; -} - -static void copy_cmd_to_buffer(struct amd_iommu *iommu, - struct iommu_cmd *cmd, - u32 tail) +/* + * Writes the command to the IOMMUs command buffer and informs the + * hardware about the new command. Must be called with iommu->lock held. + */ +static int __iommu_queue_command(struct amd_iommu *iommu, struct iommu_cmd *cmd) { + u32 tail, head; u8 *target; + WARN_ON(iommu->cmd_buf_size & CMD_BUFFER_UNINITIALIZED); + tail = readl(iommu->mmio_base + MMIO_CMD_TAIL_OFFSET); target = iommu->cmd_buf + tail; - tail = (tail + sizeof(*cmd)) % iommu->cmd_buf_size; - - /* Copy command to buffer */ - memcpy(target, cmd, sizeof(*cmd)); - - /* Tell the IOMMU about it */ + memcpy_toio(target, cmd, sizeof(*cmd)); + tail = (tail + sizeof(*cmd)) % iommu->cmd_buf_size; + head = readl(iommu->mmio_base + MMIO_CMD_HEAD_OFFSET); + if (tail == head) + return -ENOMEM; writel(tail, iommu->mmio_base + MMIO_CMD_TAIL_OFFSET); -} - -static void build_completion_wait(struct iommu_cmd *cmd, u64 address) -{ - WARN_ON(address & 0x7ULL); - - memset(cmd, 0, sizeof(*cmd)); - cmd->data[0] = lower_32_bits(__pa(address)) | CMD_COMPL_WAIT_STORE_MASK; - cmd->data[1] = upper_32_bits(__pa(address)); - cmd->data[2] = 1; - CMD_SET_TYPE(cmd, CMD_COMPL_WAIT); -} - -static void build_inv_dte(struct iommu_cmd *cmd, u16 devid) -{ - memset(cmd, 0, sizeof(*cmd)); - cmd->data[0] = devid; - CMD_SET_TYPE(cmd, CMD_INV_DEV_ENTRY); -} - -static void build_inv_iommu_pages(struct iommu_cmd *cmd, u64 address, - size_t size, u16 domid, int pde) -{ - u64 pages; - int s; - - pages = iommu_num_pages(address, size, PAGE_SIZE); - s = 0; - - if (pages > 1) { - /* - * If we have to flush more than one page, flush all - * TLB entries for this domain - */ - address = CMD_INV_IOMMU_ALL_PAGES_ADDRESS; - s = 1; - } - - address &= PAGE_MASK; - - memset(cmd, 0, sizeof(*cmd)); - cmd->data[1] |= domid; - cmd->data[2] = lower_32_bits(address); - cmd->data[3] = upper_32_bits(address); - CMD_SET_TYPE(cmd, CMD_INV_IOMMU_PAGES); - if (s) /* size bit - we flush more than one 4kb page */ - cmd->data[2] |= CMD_INV_IOMMU_PAGES_SIZE_MASK; - if (pde) /* PDE bit - we wan't flush everything not only the PTEs */ - cmd->data[2] |= CMD_INV_IOMMU_PAGES_PDE_MASK; -} - -static void build_inv_iotlb_pages(struct iommu_cmd *cmd, u16 devid, int qdep, - u64 address, size_t size) -{ - u64 pages; - int s; - - pages = iommu_num_pages(address, size, PAGE_SIZE); - s = 0; - - if (pages > 1) { - /* - * If we have to flush more than one page, flush all - * TLB entries for this domain - */ - address = CMD_INV_IOMMU_ALL_PAGES_ADDRESS; - s = 1; - } - - address &= PAGE_MASK; - memset(cmd, 0, sizeof(*cmd)); - cmd->data[0] = devid; - cmd->data[0] |= (qdep & 0xff) << 24; - cmd->data[1] = devid; - cmd->data[2] = lower_32_bits(address); - cmd->data[3] = upper_32_bits(address); - CMD_SET_TYPE(cmd, CMD_INV_IOTLB_PAGES); - if (s) - cmd->data[2] |= CMD_INV_IOMMU_PAGES_SIZE_MASK; -} - -static void build_inv_all(struct iommu_cmd *cmd) -{ - memset(cmd, 0, sizeof(*cmd)); - CMD_SET_TYPE(cmd, CMD_INV_ALL); + return 0; } /* - * Writes the command to the IOMMUs command buffer and informs the - * hardware about the new command. + * General queuing function for commands. Takes iommu->lock and calls + * __iommu_queue_command(). */ static int iommu_queue_command(struct amd_iommu *iommu, struct iommu_cmd *cmd) { - u32 left, tail, head, next_tail; unsigned long flags; + int ret; - WARN_ON(iommu->cmd_buf_size & CMD_BUFFER_UNINITIALIZED); - -again: spin_lock_irqsave(&iommu->lock, flags); + ret = __iommu_queue_command(iommu, cmd); + if (!ret) + iommu->need_sync = true; + spin_unlock_irqrestore(&iommu->lock, flags); - head = readl(iommu->mmio_base + MMIO_CMD_HEAD_OFFSET); - tail = readl(iommu->mmio_base + MMIO_CMD_TAIL_OFFSET); - next_tail = (tail + sizeof(*cmd)) % iommu->cmd_buf_size; - left = (head - next_tail) % iommu->cmd_buf_size; - - if (left <= 2) { - struct iommu_cmd sync_cmd; - volatile u64 sem = 0; - int ret; - - build_completion_wait(&sync_cmd, (u64)&sem); - copy_cmd_to_buffer(iommu, &sync_cmd, tail); + return ret; +} - spin_unlock_irqrestore(&iommu->lock, flags); +/* + * This function waits until an IOMMU has completed a completion + * wait command + */ +static void __iommu_wait_for_completion(struct amd_iommu *iommu) +{ + int ready = 0; + unsigned status = 0; + unsigned long i = 0; - if ((ret = wait_on_sem(&sem)) != 0) - return ret; + INC_STATS_COUNTER(compl_wait); - goto again; + while (!ready && (i < EXIT_LOOP_COUNT)) { + ++i; + /* wait for the bit to become one */ + status = readl(iommu->mmio_base + MMIO_STATUS_OFFSET); + ready = status & MMIO_STATUS_COM_WAIT_INT_MASK; } - copy_cmd_to_buffer(iommu, cmd, tail); - - /* We need to sync now to make sure all commands are processed */ - iommu->need_sync = true; - - spin_unlock_irqrestore(&iommu->lock, flags); + /* set bit back to zero */ + status &= ~MMIO_STATUS_COM_WAIT_INT_MASK; + writel(status, iommu->mmio_base + MMIO_STATUS_OFFSET); - return 0; + if (unlikely(i == EXIT_LOOP_COUNT)) + iommu->reset_in_progress = true; } /* * This function queues a completion wait command into the command * buffer of an IOMMU */ -static int iommu_completion_wait(struct amd_iommu *iommu) +static int __iommu_completion_wait(struct amd_iommu *iommu) { struct iommu_cmd cmd; - volatile u64 sem = 0; - int ret; - if (!iommu->need_sync) - return 0; + memset(&cmd, 0, sizeof(cmd)); + cmd.data[0] = CMD_COMPL_WAIT_INT_MASK; + CMD_SET_TYPE(&cmd, CMD_COMPL_WAIT); - build_completion_wait(&cmd, (u64)&sem); - - ret = iommu_queue_command(iommu, &cmd); - if (ret) - return ret; - - return wait_on_sem(&sem); + return __iommu_queue_command(iommu, &cmd); } -static int iommu_flush_dte(struct amd_iommu *iommu, u16 devid) +/* + * This function is called whenever we need to ensure that the IOMMU has + * completed execution of all commands we sent. It sends a + * COMPLETION_WAIT command and waits for it to finish. The IOMMU informs + * us about that by writing a value to a physical address we pass with + * the command. + */ +static int iommu_completion_wait(struct amd_iommu *iommu) { - struct iommu_cmd cmd; + int ret = 0; + unsigned long flags; - build_inv_dte(&cmd, devid); + spin_lock_irqsave(&iommu->lock, flags); - return iommu_queue_command(iommu, &cmd); -} + if (!iommu->need_sync) + goto out; -static void iommu_flush_dte_all(struct amd_iommu *iommu) -{ - u32 devid; + ret = __iommu_completion_wait(iommu); - for (devid = 0; devid <= 0xffff; ++devid) - iommu_flush_dte(iommu, devid); + iommu->need_sync = false; - iommu_completion_wait(iommu); -} + if (ret) + goto out; -/* - * This function uses heavy locking and may disable irqs for some time. But - * this is no issue because it is only called during resume. - */ -static void iommu_flush_tlb_all(struct amd_iommu *iommu) -{ - u32 dom_id; + __iommu_wait_for_completion(iommu); - for (dom_id = 0; dom_id <= 0xffff; ++dom_id) { - struct iommu_cmd cmd; - build_inv_iommu_pages(&cmd, 0, CMD_INV_IOMMU_ALL_PAGES_ADDRESS, - dom_id, 1); - iommu_queue_command(iommu, &cmd); - } +out: + spin_unlock_irqrestore(&iommu->lock, flags); - iommu_completion_wait(iommu); + if (iommu->reset_in_progress) + reset_iommu_command_buffer(iommu); + + return 0; } -static void iommu_flush_all(struct amd_iommu *iommu) +static void iommu_flush_complete(struct protection_domain *domain) { - struct iommu_cmd cmd; - - build_inv_all(&cmd); + int i; - iommu_queue_command(iommu, &cmd); - iommu_completion_wait(iommu); -} + for (i = 0; i < amd_iommus_present; ++i) { + if (!domain->dev_iommu[i]) + continue; -void iommu_flush_all_caches(struct amd_iommu *iommu) -{ - if (iommu_feature(iommu, FEATURE_IA)) { - iommu_flush_all(iommu); - } else { - iommu_flush_dte_all(iommu); - iommu_flush_tlb_all(iommu); + /* + * Devices of this domain are behind this IOMMU + * We need to wait for completion of all commands. + */ + iommu_completion_wait(amd_iommus[i]); } } /* - * Command send function for flushing on-device TLB + * Command send function for invalidating a device table entry */ -static int device_flush_iotlb(struct device *dev, u64 address, size_t size) +static int iommu_flush_device(struct device *dev) { - struct pci_dev *pdev = to_pci_dev(dev); struct amd_iommu *iommu; struct iommu_cmd cmd; u16 devid; - int qdep; - qdep = pci_ats_queue_depth(pdev); devid = get_device_id(dev); iommu = amd_iommu_rlookup_table[devid]; - build_inv_iotlb_pages(&cmd, devid, qdep, address, size); + /* Build command */ + memset(&cmd, 0, sizeof(cmd)); + CMD_SET_TYPE(&cmd, CMD_INV_DEV_ENTRY); + cmd.data[0] = devid; return iommu_queue_command(iommu, &cmd); } +static void __iommu_build_inv_iommu_pages(struct iommu_cmd *cmd, u64 address, + u16 domid, int pde, int s) +{ + memset(cmd, 0, sizeof(*cmd)); + address &= PAGE_MASK; + CMD_SET_TYPE(cmd, CMD_INV_IOMMU_PAGES); + cmd->data[1] |= domid; + cmd->data[2] = lower_32_bits(address); + cmd->data[3] = upper_32_bits(address); + if (s) /* size bit - we flush more than one 4kb page */ + cmd->data[2] |= CMD_INV_IOMMU_PAGES_SIZE_MASK; + if (pde) /* PDE bit - we wan't flush everything not only the PTEs */ + cmd->data[2] |= CMD_INV_IOMMU_PAGES_PDE_MASK; +} + /* - * Command send function for invalidating a device table entry + * Generic command send function for invalidaing TLB entries */ -static int device_flush_dte(struct device *dev) +static int iommu_queue_inv_iommu_pages(struct amd_iommu *iommu, + u64 address, u16 domid, int pde, int s) { - struct amd_iommu *iommu; - struct pci_dev *pdev; - u16 devid; + struct iommu_cmd cmd; int ret; - pdev = to_pci_dev(dev); - devid = get_device_id(dev); - iommu = amd_iommu_rlookup_table[devid]; + __iommu_build_inv_iommu_pages(&cmd, address, domid, pde, s); - ret = iommu_flush_dte(iommu, devid); - if (ret) - return ret; - - if (pci_ats_enabled(pdev)) - ret = device_flush_iotlb(dev, 0, ~0UL); + ret = iommu_queue_command(iommu, &cmd); return ret; } @@ -679,14 +572,23 @@ static int device_flush_dte(struct device *dev) * It invalidates a single PTE if the range to flush is within a single * page. Otherwise it flushes the whole TLB of the IOMMU. */ -static void __domain_flush_pages(struct protection_domain *domain, - u64 address, size_t size, int pde) +static void __iommu_flush_pages(struct protection_domain *domain, + u64 address, size_t size, int pde) { - struct iommu_dev_data *dev_data; - struct iommu_cmd cmd; - int ret = 0, i; + int s = 0, i; + unsigned long pages = iommu_num_pages(address, size, PAGE_SIZE); + + address &= PAGE_MASK; + + if (pages > 1) { + /* + * If we have to flush more than one page, flush all + * TLB entries for this domain + */ + address = CMD_INV_IOMMU_ALL_PAGES_ADDRESS; + s = 1; + } - build_inv_iommu_pages(&cmd, address, size, domain->id, pde); for (i = 0; i < amd_iommus_present; ++i) { if (!domain->dev_iommu[i]) @@ -696,70 +598,101 @@ static void __domain_flush_pages(struct protection_domain *domain, * Devices of this domain are behind this IOMMU * We need a TLB flush */ - ret |= iommu_queue_command(amd_iommus[i], &cmd); - } - - list_for_each_entry(dev_data, &domain->dev_list, list) { - struct pci_dev *pdev = to_pci_dev(dev_data->dev); - - if (!pci_ats_enabled(pdev)) - continue; - - ret |= device_flush_iotlb(dev_data->dev, address, size); + iommu_queue_inv_iommu_pages(amd_iommus[i], address, + domain->id, pde, s); } - WARN_ON(ret); + return; } -static void domain_flush_pages(struct protection_domain *domain, - u64 address, size_t size) +static void iommu_flush_pages(struct protection_domain *domain, + u64 address, size_t size) { - __domain_flush_pages(domain, address, size, 0); + __iommu_flush_pages(domain, address, size, 0); } /* Flush the whole IO/TLB for a given protection domain */ -static void domain_flush_tlb(struct protection_domain *domain) +static void iommu_flush_tlb(struct protection_domain *domain) { - __domain_flush_pages(domain, 0, CMD_INV_IOMMU_ALL_PAGES_ADDRESS, 0); + __iommu_flush_pages(domain, 0, CMD_INV_IOMMU_ALL_PAGES_ADDRESS, 0); } /* Flush the whole IO/TLB for a given protection domain - including PDE */ -static void domain_flush_tlb_pde(struct protection_domain *domain) +static void iommu_flush_tlb_pde(struct protection_domain *domain) { - __domain_flush_pages(domain, 0, CMD_INV_IOMMU_ALL_PAGES_ADDRESS, 1); + __iommu_flush_pages(domain, 0, CMD_INV_IOMMU_ALL_PAGES_ADDRESS, 1); } -static void domain_flush_complete(struct protection_domain *domain) + +/* + * This function flushes the DTEs for all devices in domain + */ +static void iommu_flush_domain_devices(struct protection_domain *domain) { - int i; + struct iommu_dev_data *dev_data; + unsigned long flags; - for (i = 0; i < amd_iommus_present; ++i) { - if (!domain->dev_iommu[i]) - continue; + spin_lock_irqsave(&domain->lock, flags); - /* - * Devices of this domain are behind this IOMMU - * We need to wait for completion of all commands. - */ - iommu_completion_wait(amd_iommus[i]); + list_for_each_entry(dev_data, &domain->dev_list, list) + iommu_flush_device(dev_data->dev); + + spin_unlock_irqrestore(&domain->lock, flags); +} + +static void iommu_flush_all_domain_devices(void) +{ + struct protection_domain *domain; + unsigned long flags; + + spin_lock_irqsave(&amd_iommu_pd_lock, flags); + + list_for_each_entry(domain, &amd_iommu_pd_list, list) { + iommu_flush_domain_devices(domain); + iommu_flush_complete(domain); } + + spin_unlock_irqrestore(&amd_iommu_pd_lock, flags); } +void amd_iommu_flush_all_devices(void) +{ + iommu_flush_all_domain_devices(); +} /* - * This function flushes the DTEs for all devices in domain + * This function uses heavy locking and may disable irqs for some time. But + * this is no issue because it is only called during resume. */ -static void domain_flush_devices(struct protection_domain *domain) +void amd_iommu_flush_all_domains(void) { - struct iommu_dev_data *dev_data; + struct protection_domain *domain; unsigned long flags; - spin_lock_irqsave(&domain->lock, flags); + spin_lock_irqsave(&amd_iommu_pd_lock, flags); - list_for_each_entry(dev_data, &domain->dev_list, list) - device_flush_dte(dev_data->dev); + list_for_each_entry(domain, &amd_iommu_pd_list, list) { + spin_lock(&domain->lock); + iommu_flush_tlb_pde(domain); + iommu_flush_complete(domain); + spin_unlock(&domain->lock); + } - spin_unlock_irqrestore(&domain->lock, flags); + spin_unlock_irqrestore(&amd_iommu_pd_lock, flags); +} + +static void reset_iommu_command_buffer(struct amd_iommu *iommu) +{ + pr_err("AMD-Vi: Resetting IOMMU command buffer\n"); + + if (iommu->reset_in_progress) + panic("AMD-Vi: ILLEGAL_COMMAND_ERROR while resetting command buffer\n"); + + amd_iommu_reset_cmd_buffer(iommu); + amd_iommu_flush_all_devices(); + amd_iommu_flush_all_domains(); + + iommu->reset_in_progress = false; } /**************************************************************************** @@ -1477,22 +1410,17 @@ static bool dma_ops_domain(struct protection_domain *domain) return domain->flags & PD_DMA_OPS_MASK; } -static void set_dte_entry(u16 devid, struct protection_domain *domain, bool ats) +static void set_dte_entry(u16 devid, struct protection_domain *domain) { u64 pte_root = virt_to_phys(domain->pt_root); - u32 flags = 0; pte_root |= (domain->mode & DEV_ENTRY_MODE_MASK) << DEV_ENTRY_MODE_SHIFT; pte_root |= IOMMU_PTE_IR | IOMMU_PTE_IW | IOMMU_PTE_P | IOMMU_PTE_TV; - if (ats) - flags |= DTE_FLAG_IOTLB; - - amd_iommu_dev_table[devid].data[3] |= flags; - amd_iommu_dev_table[devid].data[2] = domain->id; - amd_iommu_dev_table[devid].data[1] = upper_32_bits(pte_root); - amd_iommu_dev_table[devid].data[0] = lower_32_bits(pte_root); + amd_iommu_dev_table[devid].data[2] = domain->id; + amd_iommu_dev_table[devid].data[1] = upper_32_bits(pte_root); + amd_iommu_dev_table[devid].data[0] = lower_32_bits(pte_root); } static void clear_dte_entry(u16 devid) @@ -1509,42 +1437,34 @@ static void do_attach(struct device *dev, struct protection_domain *domain) { struct iommu_dev_data *dev_data; struct amd_iommu *iommu; - struct pci_dev *pdev; - bool ats = false; u16 devid; devid = get_device_id(dev); iommu = amd_iommu_rlookup_table[devid]; dev_data = get_dev_data(dev); - pdev = to_pci_dev(dev); - - if (amd_iommu_iotlb_sup) - ats = pci_ats_enabled(pdev); /* Update data structures */ dev_data->domain = domain; list_add(&dev_data->list, &domain->dev_list); - set_dte_entry(devid, domain, ats); + set_dte_entry(devid, domain); /* Do reference counting */ domain->dev_iommu[iommu->index] += 1; domain->dev_cnt += 1; /* Flush the DTE entry */ - device_flush_dte(dev); + iommu_flush_device(dev); } static void do_detach(struct device *dev) { struct iommu_dev_data *dev_data; struct amd_iommu *iommu; - struct pci_dev *pdev; u16 devid; devid = get_device_id(dev); iommu = amd_iommu_rlookup_table[devid]; dev_data = get_dev_data(dev); - pdev = to_pci_dev(dev); /* decrease reference counters */ dev_data->domain->dev_iommu[iommu->index] -= 1; @@ -1556,7 +1476,7 @@ static void do_detach(struct device *dev) clear_dte_entry(devid); /* Flush the DTE entry */ - device_flush_dte(dev); + iommu_flush_device(dev); } /* @@ -1619,13 +1539,9 @@ static int __attach_device(struct device *dev, static int attach_device(struct device *dev, struct protection_domain *domain) { - struct pci_dev *pdev = to_pci_dev(dev); unsigned long flags; int ret; - if (amd_iommu_iotlb_sup) - pci_enable_ats(pdev, PAGE_SHIFT); - write_lock_irqsave(&amd_iommu_devtable_lock, flags); ret = __attach_device(dev, domain); write_unlock_irqrestore(&amd_iommu_devtable_lock, flags); @@ -1635,7 +1551,7 @@ static int attach_device(struct device *dev, * left the caches in the IOMMU dirty. So we have to flush * here to evict all dirty stuff. */ - domain_flush_tlb_pde(domain); + iommu_flush_tlb_pde(domain); return ret; } @@ -1682,16 +1598,12 @@ static void __detach_device(struct device *dev) */ static void detach_device(struct device *dev) { - struct pci_dev *pdev = to_pci_dev(dev); unsigned long flags; /* lock device table */ write_lock_irqsave(&amd_iommu_devtable_lock, flags); __detach_device(dev); write_unlock_irqrestore(&amd_iommu_devtable_lock, flags); - - if (amd_iommu_iotlb_sup && pci_ats_enabled(pdev)) - pci_disable_ats(pdev); } /* @@ -1780,7 +1692,7 @@ static int device_change_notifier(struct notifier_block *nb, goto out; } - device_flush_dte(dev); + iommu_flush_device(dev); iommu_completion_wait(iommu); out: @@ -1841,9 +1753,8 @@ static void update_device_table(struct protection_domain *domain) struct iommu_dev_data *dev_data; list_for_each_entry(dev_data, &domain->dev_list, list) { - struct pci_dev *pdev = to_pci_dev(dev_data->dev); u16 devid = get_device_id(dev_data->dev); - set_dte_entry(devid, domain, pci_ats_enabled(pdev)); + set_dte_entry(devid, domain); } } @@ -1853,9 +1764,8 @@ static void update_domain(struct protection_domain *domain) return; update_device_table(domain); - - domain_flush_devices(domain); - domain_flush_tlb_pde(domain); + iommu_flush_domain_devices(domain); + iommu_flush_tlb_pde(domain); domain->updated = false; } @@ -2014,10 +1924,10 @@ static dma_addr_t __map_single(struct device *dev, ADD_STATS_COUNTER(alloced_io_mem, size); if (unlikely(dma_dom->need_flush && !amd_iommu_unmap_flush)) { - domain_flush_tlb(&dma_dom->domain); + iommu_flush_tlb(&dma_dom->domain); dma_dom->need_flush = false; } else if (unlikely(amd_iommu_np_cache)) - domain_flush_pages(&dma_dom->domain, address, size); + iommu_flush_pages(&dma_dom->domain, address, size); out: return address; @@ -2066,7 +1976,7 @@ static void __unmap_single(struct dma_ops_domain *dma_dom, dma_ops_free_addresses(dma_dom, dma_addr, pages); if (amd_iommu_unmap_flush || dma_dom->need_flush) { - domain_flush_pages(&dma_dom->domain, flush_addr, size); + iommu_flush_pages(&dma_dom->domain, flush_addr, size); dma_dom->need_flush = false; } } @@ -2102,7 +2012,7 @@ static dma_addr_t map_page(struct device *dev, struct page *page, if (addr == DMA_ERROR_CODE) goto out; - domain_flush_complete(domain); + iommu_flush_complete(domain); out: spin_unlock_irqrestore(&domain->lock, flags); @@ -2129,7 +2039,7 @@ static void unmap_page(struct device *dev, dma_addr_t dma_addr, size_t size, __unmap_single(domain->priv, dma_addr, size, dir); - domain_flush_complete(domain); + iommu_flush_complete(domain); spin_unlock_irqrestore(&domain->lock, flags); } @@ -2194,7 +2104,7 @@ static int map_sg(struct device *dev, struct scatterlist *sglist, goto unmap; } - domain_flush_complete(domain); + iommu_flush_complete(domain); out: spin_unlock_irqrestore(&domain->lock, flags); @@ -2240,7 +2150,7 @@ static void unmap_sg(struct device *dev, struct scatterlist *sglist, s->dma_address = s->dma_length = 0; } - domain_flush_complete(domain); + iommu_flush_complete(domain); spin_unlock_irqrestore(&domain->lock, flags); } @@ -2290,7 +2200,7 @@ static void *alloc_coherent(struct device *dev, size_t size, goto out_free; } - domain_flush_complete(domain); + iommu_flush_complete(domain); spin_unlock_irqrestore(&domain->lock, flags); @@ -2322,7 +2232,7 @@ static void free_coherent(struct device *dev, size_t size, __unmap_single(domain->priv, dma_addr, size, DMA_BIDIRECTIONAL); - domain_flush_complete(domain); + iommu_flush_complete(domain); spin_unlock_irqrestore(&domain->lock, flags); @@ -2566,7 +2476,7 @@ static void amd_iommu_detach_device(struct iommu_domain *dom, if (!iommu) return; - device_flush_dte(dev); + iommu_flush_device(dev); iommu_completion_wait(iommu); } @@ -2632,7 +2542,7 @@ static int amd_iommu_unmap(struct iommu_domain *dom, unsigned long iova, unmap_size = iommu_unmap_page(domain, iova, page_size); mutex_unlock(&domain->api_lock); - domain_flush_tlb_pde(domain); + iommu_flush_tlb_pde(domain); return get_order(unmap_size); } diff --git a/trunk/arch/x86/kernel/amd_iommu_init.c b/trunk/arch/x86/kernel/amd_iommu_init.c index 9179c21120a8..246d727b65b7 100644 --- a/trunk/arch/x86/kernel/amd_iommu_init.c +++ b/trunk/arch/x86/kernel/amd_iommu_init.c @@ -137,7 +137,6 @@ int amd_iommus_present; /* IOMMUs have a non-present cache? */ bool amd_iommu_np_cache __read_mostly; -bool amd_iommu_iotlb_sup __read_mostly = true; /* * The ACPI table parsing functions set this variable on an error @@ -181,12 +180,6 @@ static u32 dev_table_size; /* size of the device table */ static u32 alias_table_size; /* size of the alias table */ static u32 rlookup_table_size; /* size if the rlookup table */ -/* - * This function flushes all internal caches of - * the IOMMU used by this driver. - */ -extern void iommu_flush_all_caches(struct amd_iommu *iommu); - static inline void update_last_devid(u16 devid) { if (devid > amd_iommu_last_bdf) @@ -300,23 +293,9 @@ static void iommu_feature_disable(struct amd_iommu *iommu, u8 bit) /* Function to enable the hardware */ static void iommu_enable(struct amd_iommu *iommu) { - static const char * const feat_str[] = { - "PreF", "PPR", "X2APIC", "NX", "GT", "[5]", - "IA", "GA", "HE", "PC", NULL - }; - int i; - - printk(KERN_INFO "AMD-Vi: Enabling IOMMU at %s cap 0x%hx", + printk(KERN_INFO "AMD-Vi: Enabling IOMMU at %s cap 0x%hx\n", dev_name(&iommu->dev->dev), iommu->cap_ptr); - if (iommu->cap & (1 << IOMMU_CAP_EFR)) { - printk(KERN_CONT " extended features: "); - for (i = 0; feat_str[i]; ++i) - if (iommu_feature(iommu, (1ULL << i))) - printk(KERN_CONT " %s", feat_str[i]); - } - printk(KERN_CONT "\n"); - iommu_feature_enable(iommu, CONTROL_IOMMU_EN); } @@ -672,7 +651,7 @@ static void __init set_device_exclusion_range(u16 devid, struct ivmd_header *m) static void __init init_iommu_from_pci(struct amd_iommu *iommu) { int cap_ptr = iommu->cap_ptr; - u32 range, misc, low, high; + u32 range, misc; int i, j; pci_read_config_dword(iommu->dev, cap_ptr + MMIO_CAP_HDR_OFFSET, @@ -688,15 +667,6 @@ static void __init init_iommu_from_pci(struct amd_iommu *iommu) MMIO_GET_LD(range)); iommu->evt_msi_num = MMIO_MSI_NUM(misc); - if (!(iommu->cap & (1 << IOMMU_CAP_IOTLB))) - amd_iommu_iotlb_sup = false; - - /* read extended feature bits */ - low = readl(iommu->mmio_base + MMIO_EXT_FEATURES); - high = readl(iommu->mmio_base + MMIO_EXT_FEATURES + 4); - - iommu->features = ((u64)high << 32) | low; - if (!is_rd890_iommu(iommu->dev)) return; @@ -1034,11 +1004,10 @@ static int iommu_setup_msi(struct amd_iommu *iommu) if (pci_enable_msi(iommu->dev)) return 1; - r = request_threaded_irq(iommu->dev->irq, - amd_iommu_int_handler, - amd_iommu_int_thread, - 0, "AMD-Vi", - iommu->dev); + r = request_irq(iommu->dev->irq, amd_iommu_int_handler, + IRQF_SAMPLE_RANDOM, + "AMD-Vi", + NULL); if (r) { pci_disable_msi(iommu->dev); @@ -1275,7 +1244,6 @@ static void enable_iommus(void) iommu_set_exclusion_range(iommu); iommu_init_msi(iommu); iommu_enable(iommu); - iommu_flush_all_caches(iommu); } } @@ -1306,8 +1274,8 @@ static void amd_iommu_resume(void) * we have to flush after the IOMMUs are enabled because a * disabled IOMMU will never execute the commands we send */ - for_each_iommu(iommu) - iommu_flush_all_caches(iommu); + amd_iommu_flush_all_devices(); + amd_iommu_flush_all_domains(); } static int amd_iommu_suspend(void) diff --git a/trunk/arch/x86/kernel/pci-iommu_table.c b/trunk/arch/x86/kernel/pci-iommu_table.c index 35ccf75696eb..55d745ec1181 100644 --- a/trunk/arch/x86/kernel/pci-iommu_table.c +++ b/trunk/arch/x86/kernel/pci-iommu_table.c @@ -50,14 +50,20 @@ void __init check_iommu_entries(struct iommu_table_entry *start, struct iommu_table_entry *finish) { struct iommu_table_entry *p, *q, *x; + char sym_p[KSYM_SYMBOL_LEN]; + char sym_q[KSYM_SYMBOL_LEN]; /* Simple cyclic dependency checker. */ for (p = start; p < finish; p++) { q = find_dependents_of(start, finish, p); x = find_dependents_of(start, finish, q); if (p == x) { - printk(KERN_ERR "CYCLIC DEPENDENCY FOUND! %pS depends on %pS and vice-versa. BREAKING IT.\n", - p->detect, q->detect); + sprint_symbol(sym_p, (unsigned long)p->detect); + sprint_symbol(sym_q, (unsigned long)q->detect); + + printk(KERN_ERR "CYCLIC DEPENDENCY FOUND! %s depends" \ + " on %s and vice-versa. BREAKING IT.\n", + sym_p, sym_q); /* Heavy handed way..*/ x->depend = 0; } @@ -66,8 +72,12 @@ void __init check_iommu_entries(struct iommu_table_entry *start, for (p = start; p < finish; p++) { q = find_dependents_of(p, finish, p); if (q && q > p) { - printk(KERN_ERR "EXECUTION ORDER INVALID! %pS should be called before %pS!\n", - p->detect, q->detect); + sprint_symbol(sym_p, (unsigned long)p->detect); + sprint_symbol(sym_q, (unsigned long)q->detect); + + printk(KERN_ERR "EXECUTION ORDER INVALID! %s "\ + "should be called before %s!\n", + sym_p, sym_q); } } } diff --git a/trunk/drivers/pci/intel-iommu.c b/trunk/drivers/pci/intel-iommu.c index 6af6b628175b..d552d2c77844 100644 --- a/trunk/drivers/pci/intel-iommu.c +++ b/trunk/drivers/pci/intel-iommu.c @@ -39,7 +39,6 @@ #include #include #include -#include #include #include #include "pci.h" diff --git a/trunk/drivers/pci/iov.c b/trunk/drivers/pci/iov.c index 42fae4776515..553d8ee55c1c 100644 --- a/trunk/drivers/pci/iov.c +++ b/trunk/drivers/pci/iov.c @@ -13,7 +13,6 @@ #include #include #include -#include #include "pci.h" #define VIRTFN_ID_LEN 16 diff --git a/trunk/drivers/pci/pci.h b/trunk/drivers/pci/pci.h index 4020025f854e..a6ec200fe5ee 100644 --- a/trunk/drivers/pci/pci.h +++ b/trunk/drivers/pci/pci.h @@ -250,6 +250,15 @@ struct pci_sriov { u8 __iomem *mstate; /* VF Migration State Array */ }; +/* Address Translation Service */ +struct pci_ats { + int pos; /* capability position */ + int stu; /* Smallest Translation Unit */ + int qdep; /* Invalidate Queue Depth */ + int ref_cnt; /* Physical Function reference count */ + unsigned int is_enabled:1; /* Enable bit is set */ +}; + #ifdef CONFIG_PCI_IOV extern int pci_iov_init(struct pci_dev *dev); extern void pci_iov_release(struct pci_dev *dev); @@ -260,6 +269,19 @@ extern resource_size_t pci_sriov_resource_alignment(struct pci_dev *dev, extern void pci_restore_iov_state(struct pci_dev *dev); extern int pci_iov_bus_range(struct pci_bus *bus); +extern int pci_enable_ats(struct pci_dev *dev, int ps); +extern void pci_disable_ats(struct pci_dev *dev); +extern int pci_ats_queue_depth(struct pci_dev *dev); +/** + * pci_ats_enabled - query the ATS status + * @dev: the PCI device + * + * Returns 1 if ATS capability is enabled, or 0 if not. + */ +static inline int pci_ats_enabled(struct pci_dev *dev) +{ + return dev->ats && dev->ats->is_enabled; +} #else static inline int pci_iov_init(struct pci_dev *dev) { @@ -282,6 +304,21 @@ static inline int pci_iov_bus_range(struct pci_bus *bus) return 0; } +static inline int pci_enable_ats(struct pci_dev *dev, int ps) +{ + return -ENODEV; +} +static inline void pci_disable_ats(struct pci_dev *dev) +{ +} +static inline int pci_ats_queue_depth(struct pci_dev *dev) +{ + return -ENODEV; +} +static inline int pci_ats_enabled(struct pci_dev *dev) +{ + return 0; +} #endif /* CONFIG_PCI_IOV */ static inline resource_size_t pci_resource_alignment(struct pci_dev *dev, diff --git a/trunk/fs/nilfs2/alloc.c b/trunk/fs/nilfs2/alloc.c index f7684483785e..0a0a66d98cce 100644 --- a/trunk/fs/nilfs2/alloc.c +++ b/trunk/fs/nilfs2/alloc.c @@ -646,7 +646,7 @@ int nilfs_palloc_freev(struct inode *inode, __u64 *entry_nrs, size_t nitems) unsigned long group, group_offset; int i, j, n, ret; - for (i = 0; i < nitems; i = j) { + for (i = 0; i < nitems; i += n) { group = nilfs_palloc_group(inode, entry_nrs[i], &group_offset); ret = nilfs_palloc_get_desc_block(inode, group, 0, &desc_bh); if (ret < 0) diff --git a/trunk/fs/xfs/linux-2.6/xfs_sync.c b/trunk/fs/xfs/linux-2.6/xfs_sync.c index 3e898a48122d..e4f9c1b0836c 100644 --- a/trunk/fs/xfs/linux-2.6/xfs_sync.c +++ b/trunk/fs/xfs/linux-2.6/xfs_sync.c @@ -926,7 +926,6 @@ xfs_reclaim_inodes_ag( XFS_LOOKUP_BATCH, XFS_ICI_RECLAIM_TAG); if (!nr_found) { - done = 1; rcu_read_unlock(); break; } diff --git a/trunk/fs/xfs/xfs_trans_ail.c b/trunk/fs/xfs/xfs_trans_ail.c index 5fc2380092c8..acdb92f14d51 100644 --- a/trunk/fs/xfs/xfs_trans_ail.c +++ b/trunk/fs/xfs/xfs_trans_ail.c @@ -346,23 +346,20 @@ xfs_ail_delete( */ STATIC void xfs_ail_worker( - struct work_struct *work) + struct work_struct *work) { - struct xfs_ail *ailp = container_of(to_delayed_work(work), + struct xfs_ail *ailp = container_of(to_delayed_work(work), struct xfs_ail, xa_work); - xfs_mount_t *mp = ailp->xa_mount; + long tout; + xfs_lsn_t target = ailp->xa_target; + xfs_lsn_t lsn; + xfs_log_item_t *lip; + int flush_log, count, stuck; + xfs_mount_t *mp = ailp->xa_mount; struct xfs_ail_cursor *cur = &ailp->xa_cursors; - xfs_log_item_t *lip; - xfs_lsn_t lsn; - xfs_lsn_t target; - long tout = 10; - int flush_log = 0; - int stuck = 0; - int count = 0; - int push_xfsbufd = 0; + int push_xfsbufd = 0; spin_lock(&ailp->xa_lock); - target = ailp->xa_target; xfs_trans_ail_cursor_init(ailp, cur); lip = xfs_trans_ail_cursor_first(ailp, cur, ailp->xa_last_pushed_lsn); if (!lip || XFS_FORCED_SHUTDOWN(mp)) { @@ -371,7 +368,8 @@ xfs_ail_worker( */ xfs_trans_ail_cursor_done(ailp, cur); spin_unlock(&ailp->xa_lock); - goto out_done; + ailp->xa_last_pushed_lsn = 0; + return; } XFS_STATS_INC(xs_push_ail); @@ -388,7 +386,8 @@ xfs_ail_worker( * lots of contention on the AIL lists. */ lsn = lip->li_lsn; - while ((XFS_LSN_CMP(lip->li_lsn, target) <= 0)) { + flush_log = stuck = count = 0; + while ((XFS_LSN_CMP(lip->li_lsn, target) < 0)) { int lock_result; /* * If we can lock the item without sleeping, unlock the AIL @@ -481,25 +480,21 @@ xfs_ail_worker( } /* assume we have more work to do in a short while */ -out_done: + tout = 10; if (!count) { /* We're past our target or empty, so idle */ ailp->xa_last_pushed_lsn = 0; /* - * We clear the XFS_AIL_PUSHING_BIT first before checking - * whether the target has changed. If the target has changed, - * this pushes the requeue race directly onto the result of the - * atomic test/set bit, so we are guaranteed that either the - * the pusher that changed the target or ourselves will requeue - * the work (but not both). + * Check for an updated push target before clearing the + * XFS_AIL_PUSHING_BIT. If the target changed, we've got more + * work to do. Wait a bit longer before starting that work. */ - clear_bit(XFS_AIL_PUSHING_BIT, &ailp->xa_flags); smp_rmb(); - if (XFS_LSN_CMP(ailp->xa_target, target) == 0 || - test_and_set_bit(XFS_AIL_PUSHING_BIT, &ailp->xa_flags)) + if (ailp->xa_target == target) { + clear_bit(XFS_AIL_PUSHING_BIT, &ailp->xa_flags); return; - + } tout = 50; } else if (XFS_LSN_CMP(lsn, target) >= 0) { /* @@ -558,7 +553,7 @@ xfs_ail_push( * the XFS_AIL_PUSHING_BIT. */ smp_wmb(); - xfs_trans_ail_copy_lsn(ailp, &ailp->xa_target, &threshold_lsn); + ailp->xa_target = threshold_lsn; if (!test_and_set_bit(XFS_AIL_PUSHING_BIT, &ailp->xa_flags)) queue_delayed_work(xfs_syncd_wq, &ailp->xa_work, 0); } diff --git a/trunk/include/linux/pci-ats.h b/trunk/include/linux/pci-ats.h deleted file mode 100644 index 655824fa4c76..000000000000 --- a/trunk/include/linux/pci-ats.h +++ /dev/null @@ -1,52 +0,0 @@ -#ifndef LINUX_PCI_ATS_H -#define LINUX_PCI_ATS_H - -/* Address Translation Service */ -struct pci_ats { - int pos; /* capability position */ - int stu; /* Smallest Translation Unit */ - int qdep; /* Invalidate Queue Depth */ - int ref_cnt; /* Physical Function reference count */ - unsigned int is_enabled:1; /* Enable bit is set */ -}; - -#ifdef CONFIG_PCI_IOV - -extern int pci_enable_ats(struct pci_dev *dev, int ps); -extern void pci_disable_ats(struct pci_dev *dev); -extern int pci_ats_queue_depth(struct pci_dev *dev); -/** - * pci_ats_enabled - query the ATS status - * @dev: the PCI device - * - * Returns 1 if ATS capability is enabled, or 0 if not. - */ -static inline int pci_ats_enabled(struct pci_dev *dev) -{ - return dev->ats && dev->ats->is_enabled; -} - -#else /* CONFIG_PCI_IOV */ - -static inline int pci_enable_ats(struct pci_dev *dev, int ps) -{ - return -ENODEV; -} - -static inline void pci_disable_ats(struct pci_dev *dev) -{ -} - -static inline int pci_ats_queue_depth(struct pci_dev *dev) -{ - return -ENODEV; -} - -static inline int pci_ats_enabled(struct pci_dev *dev) -{ - return 0; -} - -#endif /* CONFIG_PCI_IOV */ - -#endif /* LINUX_PCI_ATS_H*/ diff --git a/trunk/lib/dma-debug.c b/trunk/lib/dma-debug.c index db07bfd9298e..4bfb0471f106 100644 --- a/trunk/lib/dma-debug.c +++ b/trunk/lib/dma-debug.c @@ -649,7 +649,7 @@ static int dma_debug_fs_init(void) return -ENOMEM; } -static int device_dma_allocations(struct device *dev, struct dma_debug_entry **out_entry) +static int device_dma_allocations(struct device *dev) { struct dma_debug_entry *entry; unsigned long flags; @@ -660,10 +660,8 @@ static int device_dma_allocations(struct device *dev, struct dma_debug_entry **o for (i = 0; i < HASH_SIZE; ++i) { spin_lock(&dma_entry_hash[i].lock); list_for_each_entry(entry, &dma_entry_hash[i].list, list) { - if (entry->dev == dev) { + if (entry->dev == dev) count += 1; - *out_entry = entry; - } } spin_unlock(&dma_entry_hash[i].lock); } @@ -676,7 +674,6 @@ static int device_dma_allocations(struct device *dev, struct dma_debug_entry **o static int dma_debug_device_change(struct notifier_block *nb, unsigned long action, void *data) { struct device *dev = data; - struct dma_debug_entry *uninitialized_var(entry); int count; if (global_disable) @@ -684,17 +681,12 @@ static int dma_debug_device_change(struct notifier_block *nb, unsigned long acti switch (action) { case BUS_NOTIFY_UNBOUND_DRIVER: - count = device_dma_allocations(dev, &entry); + count = device_dma_allocations(dev); if (count == 0) break; - err_printk(dev, entry, "DMA-API: device driver has pending " + err_printk(dev, NULL, "DMA-API: device driver has pending " "DMA allocations while released from device " - "[count=%d]\n" - "One of leaked entries details: " - "[device address=0x%016llx] [size=%llu bytes] " - "[mapped with %s] [mapped as %s]\n", - count, entry->dev_addr, entry->size, - dir2name[entry->direction], type2name[entry->type]); + "[count=%d]\n", count); break; default: break; diff --git a/trunk/net/9p/client.c b/trunk/net/9p/client.c index a9aa2dd66482..77367745be9b 100644 --- a/trunk/net/9p/client.c +++ b/trunk/net/9p/client.c @@ -614,7 +614,7 @@ p9_client_rpc(struct p9_client *c, int8_t type, const char *fmt, ...) err = c->trans_mod->request(c, req); if (err < 0) { - if (err != -ERESTARTSYS && err != -EFAULT) + if (err != -ERESTARTSYS) c->status = Disconnected; goto reterr; } diff --git a/trunk/net/9p/trans_common.c b/trunk/net/9p/trans_common.c index 9a70ebdec56e..e883172f9aa2 100644 --- a/trunk/net/9p/trans_common.c +++ b/trunk/net/9p/trans_common.c @@ -63,7 +63,7 @@ p9_payload_gup(struct p9_req_t *req, size_t *pdata_off, int *pdata_len, int nr_pages, u8 rw) { uint32_t first_page_bytes = 0; - int32_t pdata_mapped_pages; + uint32_t pdata_mapped_pages; struct trans_rpage_info *rpinfo; *pdata_off = (__force size_t)req->tc->pubuf & (PAGE_SIZE-1); @@ -75,9 +75,14 @@ p9_payload_gup(struct p9_req_t *req, size_t *pdata_off, int *pdata_len, rpinfo = req->tc->private; pdata_mapped_pages = get_user_pages_fast((unsigned long)req->tc->pubuf, nr_pages, rw, &rpinfo->rp_data[0]); - if (pdata_mapped_pages <= 0) - return pdata_mapped_pages; + if (pdata_mapped_pages < 0) { + printk(KERN_ERR "get_user_pages_fast failed:%d udata:%p" + "nr_pages:%d\n", pdata_mapped_pages, + req->tc->pubuf, nr_pages); + pdata_mapped_pages = 0; + return -EIO; + } rpinfo->rp_nr_pages = pdata_mapped_pages; if (*pdata_off) { *pdata_len = first_page_bytes;