From 18cb304e8e3ba816136197fb09967b3d9b6d4bdf Mon Sep 17 00:00:00 2001 From: Linus Torvalds Date: Fri, 24 May 2013 10:45:59 -0700 Subject: [PATCH] --- yaml --- r: 376195 b: refs/heads/master c: a8432588fc1e406e800e8bf88bebf4cd390b7f46 h: refs/heads/master i: 376193: c6c87e8a49c10eb2050ab364f9340fe4cba79bfb 376191: 2bf3d94ce25996b5b31843746b490f21a88668b1 v: v3 --- [refs] | 2 +- .../Documentation/ABI/testing/sysfs-class-mtd | 6 +- trunk/Documentation/acpi/enumeration.txt | 77 + .../devicetree/bindings/dma/atmel-dma.txt | 35 +- .../devicetree/bindings/mips/ralink.txt | 17 + .../devicetree/bindings/mtd/gpmc-nand.txt | 28 +- .../devicetree/bindings/mtd/partition.txt | 36 +- .../devicetree/bindings/net/gpmc-eth.txt | 56 +- .../bindings/thermal/armada-thermal.txt | 22 + .../devicetree/bindings/vendor-prefixes.txt | 1 + .../exynos/hdmi.txt => video/exynos_hdmi.txt} | 0 .../hdmiddc.txt => video/exynos_hdmiddc.txt} | 0 .../hdmiphy.txt => video/exynos_hdmiphy.txt} | 0 .../mixer.txt => video/exynos_mixer.txt} | 0 .../Documentation/devicetree/usage-model.txt | 8 +- trunk/Documentation/dmatest.txt | 81 + trunk/Documentation/filesystems/btrfs.txt | 180 +- trunk/Documentation/filesystems/f2fs.txt | 4 +- trunk/Documentation/gpio.txt | 10 +- trunk/Documentation/kernel-parameters.txt | 21 + .../Documentation/kernel-per-CPU-kthreads.txt | 202 + trunk/Documentation/power/devices.txt | 15 +- trunk/Documentation/power/interface.txt | 4 +- trunk/Documentation/power/notifiers.txt | 6 +- trunk/Documentation/power/states.txt | 30 +- .../thermal/exynos_thermal_emulation | 8 +- trunk/Documentation/thermal/sysfs-api.txt | 28 +- trunk/Documentation/xtensa/mmu.txt | 46 + trunk/Documentation/zh_CN/gpio.txt | 8 +- trunk/MAINTAINERS | 45 +- trunk/Makefile | 4 +- trunk/arch/Kconfig | 3 + trunk/arch/alpha/Kconfig | 3 - trunk/arch/arc/Kconfig | 35 +- trunk/arch/arc/Makefile | 15 +- trunk/arch/arc/boot/Makefile | 19 +- trunk/arch/arc/boot/dts/Makefile | 4 +- trunk/arch/arc/boot/dts/abilis_tb100.dtsi | 340 ++ trunk/arch/arc/boot/dts/abilis_tb100_dvk.dts | 127 + trunk/arch/arc/boot/dts/abilis_tb101.dtsi | 349 ++ trunk/arch/arc/boot/dts/abilis_tb101_dvk.dts | 127 + trunk/arch/arc/boot/dts/abilis_tb10x.dtsi | 247 + trunk/arch/arc/boot/dts/nsimosci.dts | 77 + trunk/arch/arc/configs/fpga_defconfig | 3 +- trunk/arch/arc/configs/nsimosci_defconfig | 75 + trunk/arch/arc/configs/tb10x_defconfig | 117 + trunk/arch/arc/include/asm/Kbuild | 1 - trunk/arch/arc/include/asm/cache.h | 3 - trunk/arch/arc/include/asm/cacheflush.h | 72 +- trunk/arch/arc/include/asm/irq.h | 3 +- trunk/arch/arc/include/asm/page.h | 16 +- trunk/arch/arc/include/asm/pgtable.h | 3 + trunk/arch/arc/include/asm/serial.h | 10 + trunk/arch/arc/include/asm/shmparam.h | 18 + trunk/arch/arc/include/asm/tlb.h | 27 +- trunk/arch/arc/kernel/asm-offsets.c | 2 +- trunk/arch/arc/kernel/clk.c | 2 +- trunk/arch/arc/kernel/disasm.c | 2 +- trunk/arch/arc/kernel/entry.S | 8 +- trunk/arch/arc/kernel/irq.c | 25 +- trunk/arch/arc/kernel/kprobes.c | 1 - trunk/arch/arc/kernel/module.c | 4 +- trunk/arch/arc/kernel/setup.c | 36 +- trunk/arch/arc/kernel/time.c | 1 - trunk/arch/arc/kernel/traps.c | 24 +- trunk/arch/arc/kernel/troubleshoot.c | 50 +- trunk/arch/arc/mm/Makefile | 2 +- trunk/arch/arc/mm/cache_arc700.c | 470 +- trunk/arch/arc/mm/extable.c | 4 +- trunk/arch/arc/mm/fault.c | 1 - trunk/arch/arc/mm/init.c | 3 - trunk/arch/arc/mm/ioremap.c | 2 +- trunk/arch/arc/mm/mmap.c | 78 + trunk/arch/arc/mm/tlb.c | 47 +- trunk/arch/arc/plat-arcfpga/platform.c | 12 + trunk/arch/arc/plat-tb10x/Kconfig | 29 + trunk/arch/arc/plat-tb10x/Makefile | 21 + trunk/arch/arc/plat-tb10x/tb10x.c | 71 + trunk/arch/arm/Kconfig | 7 +- trunk/arch/arm/Makefile | 2 +- trunk/arch/arm/boot/dts/am33xx.dtsi | 12 + trunk/arch/arm/boot/dts/cros5250-common.dtsi | 8 + .../arch/arm/boot/dts/exynos5250-smdk5250.dts | 2 +- trunk/arch/arm/boot/dts/exynos5250-snow.dts | 2 +- trunk/arch/arm/boot/dts/omap3-beagle-xm.dts | 1 + trunk/arch/arm/boot/dts/omap3-evm.dts | 1 + trunk/arch/arm/boot/dts/omap3-overo.dtsi | 1 + trunk/arch/arm/boot/dts/omap3.dtsi | 1 - trunk/arch/arm/boot/dts/omap36xx.dtsi | 6 +- trunk/arch/arm/boot/dts/omap4-sdp.dts | 14 +- trunk/arch/arm/boot/dts/omap4-var-som.dts | 2 +- trunk/arch/arm/boot/dts/omap4460.dtsi | 6 +- trunk/arch/arm/common/mcpm_platsmp.c | 3 - trunk/arch/arm/configs/omap1_defconfig | 1 - trunk/arch/arm/configs/omap2plus_defconfig | 5 +- trunk/arch/arm/include/asm/cmpxchg.h | 8 +- trunk/arch/arm/kernel/devtree.c | 7 + trunk/arch/arm/kernel/setup.c | 13 +- trunk/arch/arm/kernel/smp.c | 2 +- .../arm/mach-exynos/include/mach/regs-pmu.h | 1 + trunk/arch/arm/mach-exynos/pmu.c | 5 +- trunk/arch/arm/mach-imx/Kconfig | 5 +- trunk/arch/arm/mach-imx/Makefile | 1 - trunk/arch/arm/mach-imx/common.h | 1 + trunk/arch/arm/mach-imx/headsmp.S | 2 +- trunk/arch/arm/mach-imx/hotplug.c | 2 + trunk/arch/arm/mach-imx/iram_alloc.c | 73 - trunk/arch/arm/mach-omap1/dma.c | 8 +- trunk/arch/arm/mach-omap2/Kconfig | 2 - trunk/arch/arm/mach-omap2/Makefile | 8 +- trunk/arch/arm/mach-omap2/board-omap3beagle.c | 6 +- .../arm/mach-omap2/board-rx51-peripherals.c | 4 +- trunk/arch/arm/mach-omap2/dma.c | 4 + trunk/arch/arm/mach-omap2/gpmc.c | 38 +- trunk/arch/arm/mach-omap2/id.c | 13 +- trunk/arch/arm/mach-omap2/mux34xx.h | 6 +- trunk/arch/arm/mach-omap2/omap_device.c | 2 +- trunk/arch/arm/mach-omap2/soc.h | 2 + trunk/arch/arm/mach-prima2/Kconfig | 2 +- trunk/arch/arm/mach-pxa/Kconfig | 1 - trunk/arch/arm/mach-spear/spear13xx.c | 2 + trunk/arch/arm/mach-tegra/Kconfig | 1 + trunk/arch/arm/mach-tegra/tegra2_emc.c | 5 - trunk/arch/arm/mach-ux500/Kconfig | 2 + trunk/arch/arm/mach-ux500/board-mop500.c | 4 +- trunk/arch/arm/mach-ux500/cpu-db8500.c | 2 +- trunk/arch/arm/plat-orion/Makefile | 2 +- trunk/arch/arm/plat-orion/gpio.c | 2 +- trunk/arch/arm/plat-samsung/adc.c | 5 - trunk/arch/arm/xen/enlighten.c | 33 +- trunk/arch/arm64/Kconfig | 9 +- trunk/arch/arm64/boot/dts/foundation-v8.dts | 2 +- trunk/arch/arm64/include/asm/assembler.h | 2 +- trunk/arch/arm64/include/asm/system_misc.h | 2 +- trunk/arch/arm64/include/asm/unistd32.h | 2 +- trunk/arch/arm64/kernel/debug-monitors.c | 2 - trunk/arch/arm64/kernel/early_printk.c | 5 +- trunk/arch/arm64/kernel/process.c | 8 +- trunk/arch/arm64/kernel/setup.c | 12 +- trunk/arch/arm64/kernel/sys32.S | 7 - trunk/arch/arm64/lib/bitops.S | 10 +- trunk/arch/arm64/mm/cache.S | 2 +- trunk/arch/arm64/mm/fault.c | 3 +- trunk/arch/arm64/mm/proc.S | 3 +- trunk/arch/avr32/Kconfig | 8 +- trunk/arch/avr32/include/asm/Kbuild | 1 + trunk/arch/avr32/include/asm/numnodes.h | 7 - trunk/arch/avr32/include/asm/param.h | 9 - trunk/arch/avr32/include/uapi/asm/Kbuild | 1 + trunk/arch/avr32/include/uapi/asm/param.h | 18 - trunk/arch/avr32/kernel/module.c | 2 +- trunk/arch/blackfin/Kconfig | 5 +- trunk/arch/blackfin/Makefile | 6 +- trunk/arch/blackfin/boot/Makefile | 16 +- trunk/arch/blackfin/include/asm/atomic.h | 2 + trunk/arch/blackfin/include/asm/bfin_sdh.h | 31 +- trunk/arch/blackfin/include/asm/bitops.h | 1 + .../blackfin/include/asm/def_LPBlackfin.h | 2 + trunk/arch/blackfin/include/asm/mem_init.h | 9 + .../blackfin/kernel/cplb-nompu/cplbinit.c | 16 +- .../arch/blackfin/kernel/cplb-nompu/cplbmgr.c | 27 +- trunk/arch/blackfin/kernel/cplbinfo.c | 9 +- trunk/arch/blackfin/kernel/setup.c | 2 +- trunk/arch/blackfin/mach-bf537/boards/stamp.c | 1 + trunk/arch/blackfin/mach-bf538/boards/ezkit.c | 1 - .../mach-bf609/include/mach/cdefBF60x_base.h | 2 + trunk/arch/cris/Kconfig | 1 - trunk/arch/cris/arch-v32/drivers/Kconfig | 1 - trunk/arch/hexagon/Kconfig | 5 - trunk/arch/hexagon/kernel/vm_entry.S | 12 +- trunk/arch/ia64/Kconfig | 3 - trunk/arch/m68k/Kconfig | 3 - trunk/arch/m68k/Kconfig.cpu | 15 +- trunk/arch/m68k/Kconfig.machine | 16 +- trunk/arch/m68k/Makefile | 1 + trunk/arch/m68k/include/asm/commproc.h | 17 - trunk/arch/m68k/include/asm/dbg.h | 6 - trunk/arch/m68k/include/asm/dma.h | 2 +- trunk/arch/m68k/include/asm/m53xxacr.h | 4 +- .../include/asm/{m532xsim.h => m53xxsim.h} | 12 +- trunk/arch/m68k/include/asm/m54xxacr.h | 7 +- trunk/arch/m68k/include/asm/mcfgpio.h | 10 +- trunk/arch/m68k/include/asm/mcfsim.h | 4 +- trunk/arch/m68k/include/asm/mcftimer.h | 2 +- trunk/arch/m68k/platform/coldfire/Makefile | 2 +- .../platform/coldfire/{m532x.c => m53xx.c} | 23 +- trunk/arch/m68k/platform/coldfire/timers.c | 2 +- trunk/arch/metag/Kconfig | 3 - trunk/arch/microblaze/Kconfig | 3 - trunk/arch/microblaze/configs/mmu_defconfig | 1 - trunk/arch/microblaze/include/asm/pci.h | 2 - trunk/arch/microblaze/include/asm/uaccess.h | 30 +- trunk/arch/microblaze/kernel/cpu/cpuinfo.c | 5 + trunk/arch/microblaze/kernel/head.S | 20 +- trunk/arch/microblaze/kernel/intc.c | 2 + trunk/arch/microblaze/kernel/process.c | 5 + trunk/arch/microblaze/mm/init.c | 9 +- trunk/arch/microblaze/pci/pci-common.c | 1 - trunk/arch/mips/Kbuild | 4 + trunk/arch/mips/Kconfig | 62 +- trunk/arch/mips/Makefile | 1 + trunk/arch/mips/alchemy/Kconfig | 3 - trunk/arch/mips/alchemy/Platform | 22 +- trunk/arch/mips/alchemy/board-gpr.c | 1 + trunk/arch/mips/alchemy/common/time.c | 1 + trunk/arch/mips/ar7/memory.c | 1 - trunk/arch/mips/ath79/setup.c | 17 +- trunk/arch/mips/bcm63xx/Kconfig | 4 + .../arch/mips/bcm63xx/boards/board_bcm963xx.c | 6 +- trunk/arch/mips/bcm63xx/clk.c | 43 +- trunk/arch/mips/bcm63xx/cpu.c | 142 +- trunk/arch/mips/bcm63xx/dev-flash.c | 6 + trunk/arch/mips/bcm63xx/dev-spi.c | 26 +- trunk/arch/mips/bcm63xx/irq.c | 22 + trunk/arch/mips/bcm63xx/prom.c | 2 + trunk/arch/mips/bcm63xx/reset.c | 28 + trunk/arch/mips/bcm63xx/setup.c | 5 +- trunk/arch/mips/cavium-octeon/octeon-irq.c | 5 +- trunk/arch/mips/cobalt/reset.c | 1 + trunk/arch/mips/configs/db1000_defconfig | 1 - trunk/arch/mips/configs/db1235_defconfig | 1 - trunk/arch/mips/configs/lemote2f_defconfig | 1 - trunk/arch/mips/configs/malta_defconfig | 69 +- trunk/arch/mips/configs/malta_kvm_defconfig | 456 ++ .../mips/configs/malta_kvm_guest_defconfig | 453 ++ trunk/arch/mips/configs/maltaaprp_defconfig | 195 + trunk/arch/mips/configs/maltasmtc_defconfig | 196 + trunk/arch/mips/configs/maltasmvp_defconfig | 199 + trunk/arch/mips/configs/maltaup_defconfig | 194 + trunk/arch/mips/configs/sead3_defconfig | 3 - trunk/arch/mips/configs/sead3micro_defconfig | 122 + trunk/arch/mips/fw/lib/Makefile | 2 + trunk/arch/mips/fw/lib/cmdline.c | 101 + trunk/arch/mips/include/asm/asm.h | 2 + trunk/arch/mips/include/asm/bootinfo.h | 1 + trunk/arch/mips/include/asm/branch.h | 40 +- trunk/arch/mips/include/asm/clock.h | 2 - trunk/arch/mips/include/asm/cpu-features.h | 3 + trunk/arch/mips/include/asm/dma-coherence.h | 15 + trunk/arch/mips/include/asm/dma-mapping.h | 1 + trunk/arch/mips/include/asm/fpu_emulator.h | 6 + trunk/arch/mips/include/asm/fw/fw.h | 47 + trunk/arch/mips/include/asm/gic.h | 16 +- trunk/arch/mips/include/asm/hazards.h | 371 +- trunk/arch/mips/include/asm/idle.h | 23 + trunk/arch/mips/include/asm/inst.h | 12 + trunk/arch/mips/include/asm/io.h | 2 +- trunk/arch/mips/include/asm/irqflags.h | 153 +- trunk/arch/mips/include/asm/kvm_host.h | 667 +++ .../include/asm/mach-bcm63xx/bcm63xx_clk.h | 11 - .../include/asm/mach-bcm63xx/bcm63xx_cpu.h | 141 +- .../asm/mach-bcm63xx/bcm63xx_dev_spi.h | 11 +- .../include/asm/mach-bcm63xx/bcm63xx_gpio.h | 2 + .../include/asm/mach-bcm63xx/bcm63xx_regs.h | 105 +- .../mips/include/asm/mach-bcm63xx/ioremap.h | 1 + .../include/asm/mach-generic/dma-coherence.h | 5 +- .../mips/include/asm/mach-generic/spaces.h | 9 +- .../asm/mach-loongson/cpu-feature-overrides.h | 1 + .../mips/include/asm/mach-ralink/mt7620.h | 84 + .../mips/include/asm/mach-ralink/rt288x.h | 53 + .../rt288x/cpu-feature-overrides.h | 56 + .../mips/include/asm/mach-ralink/rt305x.h | 27 + .../rt305x/cpu-feature-overrides.h | 56 + .../mips/include/asm/mach-ralink/rt3883.h | 252 + .../rt3883/cpu-feature-overrides.h | 55 + .../asm/mach-sead3/cpu-feature-overrides.h | 4 + .../mips/include/asm/mips-boards/generic.h | 3 + .../arch/mips/include/asm/mips-boards/prom.h | 47 - trunk/arch/mips/include/asm/mips_machine.h | 4 - trunk/arch/mips/include/asm/mipsregs.h | 19 + trunk/arch/mips/include/asm/mmu_context.h | 21 +- .../arch/mips/include/asm/netlogic/haldefs.h | 92 +- .../mips/include/asm/netlogic/mips-extns.h | 20 +- .../mips/include/asm/netlogic/xlp-hal/pic.h | 53 - .../mips/include/asm/netlogic/xlp-hal/usb.h | 64 - trunk/arch/mips/include/asm/page.h | 17 +- trunk/arch/mips/include/asm/pgtable.h | 1 + trunk/arch/mips/include/asm/processor.h | 6 +- trunk/arch/mips/include/asm/prom.h | 3 + trunk/arch/mips/include/asm/sn/sn_private.h | 2 +- trunk/arch/mips/include/asm/sn/types.h | 1 - trunk/arch/mips/include/asm/spinlock.h | 120 +- trunk/arch/mips/include/asm/stackframe.h | 12 +- trunk/arch/mips/include/asm/thread_info.h | 8 +- trunk/arch/mips/include/asm/time.h | 8 +- trunk/arch/mips/include/asm/uaccess.h | 25 +- trunk/arch/mips/include/asm/uasm.h | 84 +- trunk/arch/mips/include/uapi/asm/inst.h | 564 +++ trunk/arch/mips/include/uapi/asm/kvm.h | 55 + trunk/arch/mips/include/uapi/asm/unistd.h | 5 +- trunk/arch/mips/kernel/Makefile | 9 +- trunk/arch/mips/kernel/asm-offsets.c | 66 + trunk/arch/mips/kernel/binfmt_elfo32.c | 4 + trunk/arch/mips/kernel/branch.c | 178 + trunk/arch/mips/kernel/cevt-gic.c | 104 + trunk/arch/mips/kernel/cevt-r4k.c | 13 +- trunk/arch/mips/kernel/cpu-probe.c | 201 +- trunk/arch/mips/kernel/crash_dump.c | 1 + trunk/arch/mips/kernel/csrc-gic.c | 13 +- trunk/arch/mips/kernel/genex.S | 79 +- trunk/arch/mips/kernel/idle.c | 244 + trunk/arch/mips/kernel/irq-gic.c | 47 + trunk/arch/mips/kernel/kprobes.c | 5 +- trunk/arch/mips/kernel/linux32.c | 7 - trunk/arch/mips/kernel/mips_machine.c | 22 +- trunk/arch/mips/kernel/proc.c | 7 +- trunk/arch/mips/kernel/process.c | 154 +- trunk/arch/mips/kernel/prom.c | 33 + trunk/arch/mips/kernel/scall32-o32.S | 9 + trunk/arch/mips/kernel/scall64-64.S | 1 + trunk/arch/mips/kernel/scall64-o32.S | 2 +- trunk/arch/mips/kernel/setup.c | 22 + trunk/arch/mips/kernel/signal.c | 9 + trunk/arch/mips/kernel/smp-mt.c | 3 +- trunk/arch/mips/kernel/smp.c | 2 + trunk/arch/mips/kernel/smtc-asm.S | 3 + trunk/arch/mips/kernel/smtc.c | 5 +- trunk/arch/mips/kernel/traps.c | 321 +- trunk/arch/mips/kernel/unaligned.c | 1489 +++++- trunk/arch/mips/kvm/00README.txt | 31 + trunk/arch/mips/kvm/Kconfig | 49 + trunk/arch/mips/kvm/Makefile | 13 + trunk/arch/mips/kvm/kvm_cb.c | 14 + trunk/arch/mips/kvm/kvm_locore.S | 650 +++ trunk/arch/mips/kvm/kvm_mips.c | 958 ++++ trunk/arch/mips/kvm/kvm_mips_comm.h | 23 + trunk/arch/mips/kvm/kvm_mips_commpage.c | 37 + trunk/arch/mips/kvm/kvm_mips_dyntrans.c | 149 + trunk/arch/mips/kvm/kvm_mips_emul.c | 1829 ++++++++ trunk/arch/mips/kvm/kvm_mips_int.c | 243 + trunk/arch/mips/kvm/kvm_mips_int.h | 49 + trunk/arch/mips/kvm/kvm_mips_opcode.h | 24 + trunk/arch/mips/kvm/kvm_mips_stats.c | 82 + trunk/arch/mips/kvm/kvm_tlb.c | 949 ++++ trunk/arch/mips/kvm/kvm_trap_emul.c | 482 ++ trunk/arch/mips/kvm/trace.h | 46 + trunk/arch/mips/lantiq/xway/gptu.c | 6 +- trunk/arch/mips/lib/bitops.c | 14 +- trunk/arch/mips/lib/memset.S | 84 +- trunk/arch/mips/lib/mips-atomic.c | 149 +- trunk/arch/mips/lib/strlen_user.S | 9 +- trunk/arch/mips/lib/strncpy_user.S | 32 +- trunk/arch/mips/lib/strnlen_user.S | 2 +- trunk/arch/mips/loongson/common/Makefile | 2 +- trunk/arch/mips/loongson/common/reset.c | 1 + trunk/arch/mips/loongson1/common/reset.c | 1 + trunk/arch/mips/math-emu/cp1emu.c | 919 +++- trunk/arch/mips/math-emu/dsemul.c | 30 +- trunk/arch/mips/mm/Makefile | 4 +- trunk/arch/mips/mm/c-r4k.c | 30 +- trunk/arch/mips/mm/cache.c | 1 + trunk/arch/mips/mm/dma-default.c | 25 +- trunk/arch/mips/mm/page.c | 10 + trunk/arch/mips/mm/tlb-r4k.c | 2 + trunk/arch/mips/mm/tlbex.c | 53 +- trunk/arch/mips/mm/uasm-micromips.c | 221 + trunk/arch/mips/mm/uasm-mips.c | 205 + trunk/arch/mips/mm/uasm.c | 326 +- trunk/arch/mips/mti-malta/Makefile | 5 +- trunk/arch/mips/mti-malta/Platform | 6 +- trunk/arch/mips/mti-malta/malta-cmdline.c | 59 - trunk/arch/mips/mti-malta/malta-display.c | 38 +- trunk/arch/mips/mti-malta/malta-init.c | 153 +- trunk/arch/mips/mti-malta/malta-int.c | 4 +- trunk/arch/mips/mti-malta/malta-memory.c | 104 +- trunk/arch/mips/mti-malta/malta-setup.c | 87 +- trunk/arch/mips/mti-malta/malta-time.c | 55 +- trunk/arch/mips/mti-sead3/Makefile | 8 +- trunk/arch/mips/mti-sead3/leds-sead3.c | 24 +- trunk/arch/mips/mti-sead3/sead3-cmdline.c | 46 - trunk/arch/mips/mti-sead3/sead3-console.c | 2 +- trunk/arch/mips/mti-sead3/sead3-display.c | 1 - trunk/arch/mips/mti-sead3/sead3-init.c | 130 +- trunk/arch/mips/mti-sead3/sead3-int.c | 1 - trunk/arch/mips/mti-sead3/sead3-setup.c | 4 - trunk/arch/mips/mti-sead3/sead3-time.c | 1 - trunk/arch/mips/netlogic/Kconfig | 17 +- trunk/arch/mips/netlogic/common/smp.c | 21 +- trunk/arch/mips/netlogic/dts/Makefile | 1 + trunk/arch/mips/netlogic/dts/xlp_evp.dts | 2 +- trunk/arch/mips/netlogic/dts/xlp_svp.dts | 124 + trunk/arch/mips/netlogic/xlp/nlm_hal.c | 62 +- trunk/arch/mips/netlogic/xlp/setup.c | 23 +- trunk/arch/mips/netlogic/xlp/usb-init.c | 49 +- trunk/arch/mips/netlogic/xlr/setup.c | 1 + trunk/arch/mips/oprofile/op_model_mipsxx.c | 2 +- trunk/arch/mips/pci/pci-ar71xx.c | 6 +- trunk/arch/mips/pci/pci-ar724x.c | 18 +- trunk/arch/mips/pci/pci-bcm63xx.c | 11 +- trunk/arch/mips/pmcs-msp71xx/msp_prom.c | 2 +- trunk/arch/mips/pmcs-msp71xx/msp_setup.c | 1 + trunk/arch/mips/powertv/init.c | 3 +- trunk/arch/mips/powertv/init.h | 2 + trunk/arch/mips/powertv/memory.c | 1 - trunk/arch/mips/powertv/powertv_setup.c | 1 - trunk/arch/mips/ralink/Kconfig | 23 + trunk/arch/mips/ralink/Makefile | 3 + trunk/arch/mips/ralink/Platform | 18 + trunk/arch/mips/ralink/common.h | 11 +- trunk/arch/mips/ralink/dts/Makefile | 3 + trunk/arch/mips/ralink/dts/mt7620a.dtsi | 58 + trunk/arch/mips/ralink/dts/mt7620a_eval.dts | 16 + trunk/arch/mips/ralink/dts/rt2880.dtsi | 58 + trunk/arch/mips/ralink/dts/rt2880_eval.dts | 46 + trunk/arch/mips/ralink/dts/rt3050.dtsi | 62 +- trunk/arch/mips/ralink/dts/rt3052_eval.dts | 16 +- trunk/arch/mips/ralink/dts/rt3883.dtsi | 58 + trunk/arch/mips/ralink/dts/rt3883_eval.dts | 16 + trunk/arch/mips/ralink/early_printk.c | 4 + trunk/arch/mips/ralink/irq.c | 5 + trunk/arch/mips/ralink/mt7620.c | 234 + trunk/arch/mips/ralink/of.c | 9 + trunk/arch/mips/ralink/rt288x.c | 143 + trunk/arch/mips/ralink/rt305x.c | 70 +- trunk/arch/mips/ralink/rt3883.c | 246 + trunk/arch/mips/sgi-ip27/ip27-klnuma.c | 2 +- trunk/arch/mips/sgi-ip27/ip27-memory.c | 16 +- trunk/arch/mips/sgi-ip27/ip27-timer.c | 2 +- trunk/arch/mips/txx9/generic/setup.c | 3 +- trunk/arch/mips/vr41xx/common/pmu.c | 1 + trunk/arch/mips/wrppmc/reset.c | 1 + trunk/arch/openrisc/Kconfig | 3 - trunk/arch/parisc/Kconfig | 2 +- trunk/arch/parisc/Makefile | 21 +- trunk/arch/parisc/include/asm/hardirq.h | 9 + trunk/arch/parisc/include/asm/processor.h | 3 + trunk/arch/parisc/kernel/entry.S | 155 +- trunk/arch/parisc/kernel/irq.c | 101 +- trunk/arch/parisc/kernel/sys_parisc32.c | 8 - trunk/arch/parisc/mm/init.c | 4 +- trunk/arch/powerpc/Kconfig | 5 - trunk/arch/powerpc/Kconfig.debug | 23 + trunk/arch/powerpc/configs/ps3_defconfig | 1 - .../powerpc/include/asm/context_tracking.h | 10 + trunk/arch/powerpc/include/asm/firmware.h | 4 +- trunk/arch/powerpc/include/asm/hw_irq.h | 5 +- trunk/arch/powerpc/include/asm/opal.h | 5 +- trunk/arch/powerpc/include/asm/pci-bridge.h | 2 + trunk/arch/powerpc/include/asm/pgalloc-64.h | 2 +- trunk/arch/powerpc/include/asm/processor.h | 6 + .../arch/powerpc/include/asm/pte-hash64-64k.h | 2 +- trunk/arch/powerpc/include/asm/rtas.h | 2 + trunk/arch/powerpc/include/asm/thread_info.h | 7 +- trunk/arch/powerpc/include/asm/udbg.h | 1 + trunk/arch/powerpc/kernel/asm-offsets.c | 6 + trunk/arch/powerpc/kernel/cpu_setup_power.S | 8 +- trunk/arch/powerpc/kernel/entry_32.S | 2 - trunk/arch/powerpc/kernel/entry_64.S | 35 +- trunk/arch/powerpc/kernel/exceptions-64e.S | 8 +- trunk/arch/powerpc/kernel/machine_kexec_64.c | 4 + trunk/arch/powerpc/kernel/misc_32.S | 11 + trunk/arch/powerpc/kernel/misc_64.S | 11 + trunk/arch/powerpc/kernel/pci-common.c | 12 +- trunk/arch/powerpc/kernel/pci_64.c | 10 + trunk/arch/powerpc/kernel/pci_dn.c | 8 + trunk/arch/powerpc/kernel/ppc_ksyms.c | 3 +- trunk/arch/powerpc/kernel/process.c | 8 + trunk/arch/powerpc/kernel/ptrace.c | 5 + trunk/arch/powerpc/kernel/rtas.c | 113 + trunk/arch/powerpc/kernel/rtas_flash.c | 10 +- trunk/arch/powerpc/kernel/signal.c | 7 +- trunk/arch/powerpc/kernel/sys_ppc32.c | 8 - trunk/arch/powerpc/kernel/traps.c | 80 +- trunk/arch/powerpc/kernel/udbg.c | 3 + trunk/arch/powerpc/mm/fault.c | 41 +- trunk/arch/powerpc/mm/hash_utils_64.c | 36 +- trunk/arch/powerpc/mm/init_64.c | 3 +- trunk/arch/powerpc/perf/core-book3s.c | 280 +- trunk/arch/powerpc/platforms/40x/Kconfig | 1 - trunk/arch/powerpc/platforms/44x/Kconfig | 1 - trunk/arch/powerpc/platforms/85xx/Kconfig | 3 +- trunk/arch/powerpc/platforms/86xx/Kconfig | 3 - trunk/arch/powerpc/platforms/8xx/Kconfig | 1 - trunk/arch/powerpc/platforms/Kconfig | 6 +- trunk/arch/powerpc/platforms/powernv/Kconfig | 1 + trunk/arch/powerpc/platforms/powernv/opal.c | 30 +- .../arch/powerpc/platforms/powernv/pci-ioda.c | 36 +- trunk/arch/powerpc/platforms/powernv/pci.c | 18 +- trunk/arch/powerpc/platforms/powernv/pci.h | 2 + .../arch/powerpc/platforms/powernv/powernv.h | 2 + trunk/arch/powerpc/platforms/powernv/setup.c | 16 +- trunk/arch/powerpc/platforms/powernv/smp.c | 62 +- trunk/arch/powerpc/platforms/pseries/Kconfig | 1 + trunk/arch/powerpc/platforms/pseries/msi.c | 75 +- .../arch/powerpc/platforms/pseries/suspend.c | 22 + trunk/arch/powerpc/platforms/wsp/ics.c | 2 +- trunk/arch/powerpc/sysdev/Makefile | 2 + trunk/arch/powerpc/sysdev/ehv_pic.c | 2 +- trunk/arch/powerpc/sysdev/mpic.c | 2 +- trunk/arch/powerpc/sysdev/udbg_memcons.c | 105 + trunk/arch/powerpc/sysdev/xics/ics-opal.c | 2 +- trunk/arch/s390/Kconfig | 1 - trunk/arch/s390/include/asm/ftrace.h | 12 +- trunk/arch/s390/include/asm/page.h | 20 +- trunk/arch/s390/include/asm/pgtable.h | 4 +- trunk/arch/s390/kernel/compat_wrapper.S | 13 - trunk/arch/s390/kernel/dis.c | 2 + trunk/arch/s390/kernel/ftrace.c | 9 +- trunk/arch/s390/kernel/mcount.S | 2 + trunk/arch/s390/kernel/mcount64.S | 2 + trunk/arch/s390/kernel/smp.c | 2 +- trunk/arch/s390/kernel/syscalls.S | 4 +- trunk/arch/s390/mm/pgtable.c | 3 +- trunk/arch/sh/Kconfig | 3 - trunk/arch/sh/boards/mach-sdk7786/Makefile | 2 +- trunk/arch/sh/boards/mach-x3proto/Makefile | 2 +- trunk/arch/sh/kernel/cpu/sh2a/Makefile | 2 +- trunk/arch/sh/kernel/cpu/sh3/Makefile | 2 +- trunk/arch/sh/kernel/cpu/sh4a/Makefile | 2 +- trunk/arch/sparc/Kconfig | 5 - trunk/arch/sparc/kernel/sys32.S | 9 - trunk/arch/sparc/kernel/systbls_64.S | 2 +- trunk/arch/tile/Kconfig | 14 +- trunk/arch/tile/include/hv/hypervisor.h | 27 +- trunk/arch/tile/kernel/head_32.S | 2 +- trunk/arch/tile/kernel/head_64.S | 12 +- trunk/arch/tile/lib/spinlock_32.c | 2 +- trunk/arch/unicore32/Kconfig | 6 +- trunk/arch/unicore32/kernel/sys.c | 10 +- trunk/arch/x86/Kconfig | 4 - trunk/arch/x86/ia32/sys_ia32.c | 9 - trunk/arch/x86/include/asm/sys_ia32.h | 3 - trunk/arch/x86/include/asm/syscalls.h | 4 +- trunk/arch/x86/include/uapi/asm/msr-index.h | 3 + trunk/arch/x86/kernel/head64.c | 2 +- trunk/arch/x86/kernel/microcode_intel_early.c | 5 +- trunk/arch/x86/kernel/process.c | 5 +- trunk/arch/x86/kernel/vm86_32.c | 38 +- trunk/arch/x86/kvm/emulate.c | 42 +- trunk/arch/x86/kvm/vmx.c | 6 + trunk/arch/x86/kvm/x86.c | 40 +- trunk/arch/x86/mm/init.c | 19 +- trunk/arch/x86/pci/xen.c | 5 +- trunk/arch/x86/syscalls/syscall_32.tbl | 2 +- trunk/arch/x86/xen/enlighten.c | 50 +- trunk/arch/x86/xen/spinlock.c | 2 +- trunk/arch/xtensa/Kconfig | 60 +- trunk/arch/xtensa/boot/boot-elf/Makefile | 1 + trunk/arch/xtensa/boot/boot-elf/boot.lds.S | 64 +- trunk/arch/xtensa/boot/boot-elf/bootstrap.S | 101 +- trunk/arch/xtensa/boot/boot-redboot/boot.ld | 2 +- trunk/arch/xtensa/boot/boot-uboot/Makefile | 6 +- trunk/arch/xtensa/configs/iss_defconfig | 1 - trunk/arch/xtensa/configs/s6105_defconfig | 1 - trunk/arch/xtensa/include/asm/Kbuild | 1 + trunk/arch/xtensa/include/asm/ftrace.h | 34 +- .../arch/xtensa/include/asm/initialize_mmu.h | 107 + trunk/arch/xtensa/include/asm/irqflags.h | 5 +- trunk/arch/xtensa/include/asm/linkage.h | 16 - trunk/arch/xtensa/include/asm/stacktrace.h | 36 + trunk/arch/xtensa/include/asm/timex.h | 9 +- trunk/arch/xtensa/include/asm/traps.h | 5 +- trunk/arch/xtensa/include/asm/vectors.h | 125 + trunk/arch/xtensa/kernel/Makefile | 8 +- trunk/arch/xtensa/kernel/entry.S | 84 +- trunk/arch/xtensa/kernel/head.S | 39 +- trunk/arch/xtensa/kernel/platform.c | 1 + trunk/arch/xtensa/kernel/stacktrace.c | 120 + trunk/arch/xtensa/kernel/traps.c | 67 +- trunk/arch/xtensa/kernel/vectors.S | 16 +- trunk/arch/xtensa/kernel/vmlinux.lds.S | 48 +- trunk/arch/xtensa/kernel/xtensa_ksyms.c | 5 + trunk/arch/xtensa/mm/mmu.c | 14 +- trunk/arch/xtensa/oprofile/backtrace.c | 4 +- trunk/arch/xtensa/platforms/iss/console.c | 12 +- .../platforms/iss/include/platform/simcall.h | 24 +- trunk/arch/xtensa/platforms/iss/setup.c | 8 +- trunk/arch/xtensa/platforms/iss/simdisk.c | 8 +- trunk/arch/xtensa/platforms/xt2000/setup.c | 2 + trunk/arch/xtensa/platforms/xtfpga/setup.c | 2 + trunk/drivers/acpi/ac.c | 33 + trunk/drivers/acpi/acpica/exfldio.c | 14 +- trunk/drivers/acpi/acpica/nsinit.c | 1 + trunk/drivers/acpi/acpica/utosi.c | 3 +- trunk/drivers/acpi/ec.c | 4 +- trunk/drivers/acpi/pci_root.c | 4 +- trunk/drivers/acpi/processor_driver.c | 8 +- trunk/drivers/acpi/processor_idle.c | 29 +- trunk/drivers/acpi/scan.c | 3 +- trunk/drivers/acpi/video.c | 8 + trunk/drivers/ata/pata_ep93xx.c | 5 - trunk/drivers/base/bus.c | 1 + trunk/drivers/base/core.c | 6 +- trunk/drivers/base/power/common.c | 12 +- trunk/drivers/bcma/driver_mips.c | 2 +- trunk/drivers/bcma/scan.c | 2 + trunk/drivers/block/Makefile | 1 + trunk/drivers/block/{nvme.c => nvme-core.c} | 594 ++- trunk/drivers/block/nvme-scsi.c | 3053 ++++++++++++ trunk/drivers/block/rbd.c | 935 ++-- trunk/drivers/char/hw_random/mxc-rnga.c | 6 - trunk/drivers/char/hw_random/omap-rng.c | 5 - trunk/drivers/char/ipmi/ipmi_bt_sm.c | 4 +- trunk/drivers/char/ipmi/ipmi_devintf.c | 14 +- trunk/drivers/char/ipmi/ipmi_msghandler.c | 3 +- trunk/drivers/char/ipmi/ipmi_si_intf.c | 16 +- trunk/drivers/char/lp.c | 3 + trunk/drivers/char/ttyprintk.c | 2 +- trunk/drivers/cpufreq/Kconfig | 2 +- trunk/drivers/cpufreq/Kconfig.arm | 15 +- trunk/drivers/cpufreq/arm_big_little.c | 7 +- trunk/drivers/cpufreq/arm_big_little.h | 5 + trunk/drivers/cpufreq/arm_big_little_dt.c | 9 +- trunk/drivers/cpufreq/cpufreq-cpu0.c | 27 +- trunk/drivers/cpufreq/cpufreq.c | 10 +- trunk/drivers/cpufreq/cpufreq_governor.c | 11 +- trunk/drivers/cpufreq/cpufreq_governor.h | 1 + trunk/drivers/cpufreq/cpufreq_ondemand.c | 1 - trunk/drivers/cpufreq/cpufreq_stats.c | 7 +- trunk/drivers/cpufreq/intel_pstate.c | 122 +- trunk/drivers/cpufreq/kirkwood-cpufreq.c | 4 - trunk/drivers/cpufreq/loongson2_cpufreq.c | 2 + trunk/drivers/crypto/nx/nx-aes-cbc.c | 1 + trunk/drivers/crypto/nx/nx-aes-ecb.c | 1 + trunk/drivers/crypto/nx/nx-aes-gcm.c | 2 +- trunk/drivers/crypto/nx/nx-sha256.c | 8 +- trunk/drivers/crypto/nx/nx-sha512.c | 7 +- trunk/drivers/crypto/nx/nx.c | 38 +- trunk/drivers/dma/Kconfig | 16 +- trunk/drivers/dma/Makefile | 3 +- trunk/drivers/dma/acpi-dma.c | 279 ++ trunk/drivers/dma/at_hdmac.c | 97 +- trunk/drivers/dma/at_hdmac_regs.h | 4 + trunk/drivers/dma/coh901318.c | 4 +- trunk/drivers/dma/dmaengine.c | 17 +- trunk/drivers/dma/dmatest.c | 887 +++- trunk/drivers/dma/dw_dmac.c | 203 +- trunk/drivers/dma/dw_dmac_regs.h | 6 +- trunk/drivers/dma/imx-dma.c | 7 +- trunk/drivers/dma/imx-sdma.c | 4 +- trunk/drivers/dma/ioat/dma.c | 8 +- trunk/drivers/dma/ioat/dma.h | 53 +- trunk/drivers/dma/ioat/dma_v2.h | 2 + trunk/drivers/dma/ioat/dma_v3.c | 912 +++- trunk/drivers/dma/ioat/hw.h | 88 +- trunk/drivers/dma/ioat/pci.c | 20 + trunk/drivers/dma/ioat/registers.h | 4 + trunk/drivers/dma/ipu/ipu_idmac.c | 6 +- trunk/drivers/dma/of-dma.c | 96 +- trunk/drivers/dma/omap-dma.c | 38 +- trunk/drivers/dma/pch_dma.c | 2 +- trunk/drivers/dma/pl330.c | 10 +- trunk/drivers/dma/sh/Kconfig | 24 + trunk/drivers/dma/sh/Makefile | 3 +- trunk/drivers/dma/sh/sudmac.c | 428 ++ trunk/drivers/dma/sirf-dma.c | 24 +- trunk/drivers/dma/tegra20-apb-dma.c | 92 +- trunk/drivers/dma/timb_dma.c | 2 +- trunk/drivers/dma/txx9dmac.c | 8 +- trunk/drivers/edac/edac_mc_sysfs.c | 18 +- trunk/drivers/extcon/Kconfig | 2 +- trunk/drivers/firewire/core-cdev.c | 27 +- trunk/drivers/firewire/core-device.c | 4 +- trunk/drivers/firewire/net.c | 7 +- trunk/drivers/firewire/ohci.c | 270 +- trunk/drivers/firewire/sbp2.c | 10 +- trunk/drivers/gpio/Kconfig | 3 +- trunk/drivers/gpio/gpio-langwell.c | 17 +- trunk/drivers/gpio/gpio-lpc32xx.c | 2 +- trunk/drivers/gpio/gpio-ml-ioh.c | 3 +- trunk/drivers/gpio/gpio-mvebu.c | 5 - trunk/drivers/gpio/gpio-mxs.c | 3 +- trunk/drivers/gpio/gpio-omap.c | 48 +- trunk/drivers/gpio/gpio-pch.c | 3 +- trunk/drivers/gpio/gpio-sch.c | 6 +- trunk/drivers/gpio/gpio-tegra.c | 5 - trunk/drivers/gpio/gpio-viperboard.c | 3 +- trunk/drivers/gpu/drm/drm_crtc.c | 5 + trunk/drivers/gpu/drm/drm_crtc_helper.c | 27 +- trunk/drivers/gpu/drm/drm_drv.c | 20 +- trunk/drivers/gpu/drm/drm_encoder_slave.c | 6 +- trunk/drivers/gpu/drm/drm_mm.c | 34 +- trunk/drivers/gpu/drm/drm_modes.c | 1 + trunk/drivers/gpu/drm/exynos/exynos_hdmi.c | 5 - trunk/drivers/gpu/drm/i915/i915_gem.c | 8 +- trunk/drivers/gpu/drm/i915/i915_gem_gtt.c | 15 +- trunk/drivers/gpu/drm/i915/i915_reg.h | 2 - trunk/drivers/gpu/drm/i915/intel_ddi.c | 5 + trunk/drivers/gpu/drm/i915/intel_dp.c | 77 +- trunk/drivers/gpu/drm/i915/intel_drv.h | 1 + trunk/drivers/gpu/drm/i915/intel_fb.c | 16 +- trunk/drivers/gpu/drm/i915/intel_pm.c | 44 +- trunk/drivers/gpu/drm/mgag200/mgag200_mode.c | 90 +- .../gpu/drm/nouveau/core/engine/device/nvc0.c | 2 - .../gpu/drm/nouveau/core/engine/fifo/nv50.c | 2 + .../gpu/drm/nouveau/core/engine/fifo/nvc0.c | 10 + .../gpu/drm/nouveau/core/engine/fifo/nve0.c | 3 + .../gpu/drm/nouveau/core/subdev/bios/init.c | 4 +- .../gpu/drm/nouveau/core/subdev/ltcg/nvc0.c | 17 +- trunk/drivers/gpu/drm/nouveau/nouveau_drm.c | 25 +- trunk/drivers/gpu/drm/qxl/qxl_cmd.c | 29 +- trunk/drivers/gpu/drm/qxl/qxl_display.c | 17 +- trunk/drivers/gpu/drm/qxl/qxl_drv.h | 7 - trunk/drivers/gpu/drm/qxl/qxl_ioctl.c | 1 + trunk/drivers/gpu/drm/radeon/atombios_crtc.c | 6 - trunk/drivers/gpu/drm/radeon/evergreen.c | 31 +- trunk/drivers/gpu/drm/radeon/evergreen_hdmi.c | 7 +- trunk/drivers/gpu/drm/radeon/r300_cmdbuf.c | 2 +- trunk/drivers/gpu/drm/radeon/r600_hdmi.c | 9 +- trunk/drivers/gpu/drm/radeon/radeon.h | 2 + trunk/drivers/gpu/drm/radeon/radeon_asic.c | 22 +- trunk/drivers/gpu/drm/radeon/radeon_bios.c | 28 +- trunk/drivers/gpu/drm/radeon/radeon_device.c | 1 + trunk/drivers/gpu/drm/radeon/radeon_drv.c | 12 +- trunk/drivers/gpu/drm/radeon/radeon_family.h | 1 + .../gpu/drm/radeon/radeon_legacy_crtc.c | 4 - trunk/drivers/gpu/drm/radeon/radeon_mode.h | 1 - trunk/drivers/gpu/drm/radeon/radeon_ttm.c | 2 +- trunk/drivers/gpu/drm/radeon/si.c | 370 +- trunk/drivers/gpu/drm/radeon/sid.h | 1 + trunk/drivers/gpu/host1x/drm/dc.c | 5 - trunk/drivers/hid/hid-core.c | 3 +- trunk/drivers/hid/hid-debug.c | 15 +- trunk/drivers/hid/hid-steelseries.c | 9 +- trunk/drivers/hv/channel_mgmt.c | 2 +- trunk/drivers/hwmon/abituguru.c | 16 +- trunk/drivers/hwmon/iio_hwmon.c | 8 +- trunk/drivers/hwmon/nct6775.c | 6 +- trunk/drivers/hwmon/tmp401.c | 2 +- trunk/drivers/i2c/busses/Kconfig | 4 +- .../drivers/i2c/busses/i2c-designware-core.c | 14 +- .../drivers/i2c/busses/i2c-designware-core.h | 2 + .../i2c/busses/i2c-designware-platdrv.c | 1 + trunk/drivers/i2c/busses/i2c-i801.c | 6 +- trunk/drivers/i2c/busses/i2c-mv64xxx.c | 8 +- trunk/drivers/i2c/busses/i2c-s3c2410.c | 5 - trunk/drivers/i2c/busses/i2c-sirf.c | 6 - trunk/drivers/i2c/busses/i2c-tegra.c | 5 - trunk/drivers/i2c/i2c-core.c | 3 +- trunk/drivers/i2c/muxes/Kconfig | 4 +- trunk/drivers/idle/intel_idle.c | 21 + trunk/drivers/iio/adc/exynos_adc.c | 8 +- .../iio/common/st_sensors/st_sensors_core.c | 2 + trunk/drivers/iio/dac/Kconfig | 6 +- trunk/drivers/infiniband/core/iwcm.c | 2 + trunk/drivers/infiniband/core/verbs.c | 3 +- .../infiniband/hw/cxgb3/iwch_provider.c | 2 +- trunk/drivers/infiniband/hw/cxgb4/qp.c | 22 +- .../drivers/infiniband/hw/ipath/ipath_verbs.c | 19 +- trunk/drivers/infiniband/hw/mlx4/cq.c | 21 + trunk/drivers/infiniband/hw/mlx4/qp.c | 6 + trunk/drivers/infiniband/hw/qib/qib_sysfs.c | 6 +- trunk/drivers/infiniband/hw/qib/qib_verbs.c | 3 +- .../drivers/infiniband/ulp/ipoib/ipoib_main.c | 2 +- .../drivers/infiniband/ulp/iser/iscsi_iser.c | 24 +- .../drivers/infiniband/ulp/iser/iscsi_iser.h | 24 +- .../drivers/infiniband/ulp/iser/iser_memory.c | 3 +- .../drivers/infiniband/ulp/iser/iser_verbs.c | 36 +- trunk/drivers/infiniband/ulp/srpt/ib_srpt.c | 2 +- trunk/drivers/input/keyboard/Kconfig | 6 +- trunk/drivers/input/misc/Kconfig | 8 +- trunk/drivers/input/mouse/Kconfig | 2 +- trunk/drivers/input/tablet/wacom_wac.c | 89 +- trunk/drivers/input/tablet/wacom_wac.h | 1 + trunk/drivers/input/touchscreen/egalax_ts.c | 2 +- trunk/drivers/isdn/capi/kcapi.c | 6 +- trunk/drivers/leds/Kconfig | 6 +- trunk/drivers/leds/leds-gpio.c | 9 +- trunk/drivers/lguest/page_tables.c | 1 + trunk/drivers/md/dm-bufio.c | 24 +- trunk/drivers/md/dm-cache-metadata.c | 4 +- trunk/drivers/md/dm-cache-policy.h | 4 +- trunk/drivers/md/dm-cache-target.c | 100 +- trunk/drivers/md/dm-mpath.c | 1 + trunk/drivers/md/dm-snap.c | 1 + trunk/drivers/md/dm-stripe.c | 11 +- trunk/drivers/md/dm-table.c | 2 +- trunk/drivers/md/dm-thin-metadata.c | 36 +- trunk/drivers/md/dm-thin-metadata.h | 7 + trunk/drivers/md/dm-thin.c | 200 +- .../md/persistent-data/dm-space-map-disk.c | 3 +- .../persistent-data/dm-space-map-metadata.c | 127 +- .../drivers/md/persistent-data/dm-space-map.h | 23 + trunk/drivers/memory/emif.c | 6 - trunk/drivers/mfd/Kconfig | 3 +- trunk/drivers/mfd/ab8500-core.c | 24 +- trunk/drivers/mfd/ab8500-debugfs.c | 16 +- trunk/drivers/mfd/ab8500-gpadc.c | 7 +- trunk/drivers/mfd/ab8500-sysctrl.c | 15 +- trunk/drivers/mfd/abx500-core.c | 2 +- trunk/drivers/mfd/cros_ec_spi.c | 6 +- trunk/drivers/mfd/db8500-prcmu.c | 3 + trunk/drivers/mfd/intel_msic.c | 5 - trunk/drivers/mfd/si476x-cmd.c | 152 +- trunk/drivers/misc/atmel-ssc.c | 5 - trunk/drivers/misc/dummy-irq.c | 6 +- trunk/drivers/misc/mei/bus.c | 2 + trunk/drivers/misc/mei/main.c | 17 +- trunk/drivers/misc/vmw_vmci/Kconfig | 2 +- trunk/drivers/misc/vmw_vmci/vmci_queue_pair.c | 2 +- trunk/drivers/mmc/host/mmci.c | 9 +- trunk/drivers/mtd/Kconfig | 13 - trunk/drivers/mtd/Makefile | 3 +- trunk/drivers/mtd/chips/Kconfig | 1 - trunk/drivers/mtd/devices/Kconfig | 64 - trunk/drivers/mtd/devices/Makefile | 5 - trunk/drivers/mtd/devices/bcm47xxsflash.c | 13 +- trunk/drivers/mtd/devices/bcm47xxsflash.h | 59 + trunk/drivers/mtd/devices/doc2000.c | 1178 ----- trunk/drivers/mtd/devices/doc2001.c | 824 ---- trunk/drivers/mtd/devices/doc2001plus.c | 1080 ----- trunk/drivers/mtd/devices/docecc.c | 521 --- trunk/drivers/mtd/devices/docg3.c | 15 +- trunk/drivers/mtd/devices/docprobe.c | 325 -- trunk/drivers/mtd/devices/elm.c | 9 +- trunk/drivers/mtd/devices/m25p80.c | 25 +- trunk/drivers/mtd/devices/mtd_dataflash.c | 4 +- trunk/drivers/mtd/maps/Kconfig | 79 +- trunk/drivers/mtd/maps/Makefile | 8 - trunk/drivers/mtd/maps/bfin-async-flash.c | 3 +- trunk/drivers/mtd/maps/ck804xrom.c | 3 +- trunk/drivers/mtd/maps/dbox2-flash.c | 123 - trunk/drivers/mtd/maps/dc21285.c | 3 +- trunk/drivers/mtd/maps/dilnetpc.c | 496 -- trunk/drivers/mtd/maps/dmv182.c | 146 - trunk/drivers/mtd/maps/gpio-addr-flash.c | 3 +- trunk/drivers/mtd/maps/h720x-flash.c | 120 - trunk/drivers/mtd/maps/impa7.c | 7 +- trunk/drivers/mtd/maps/intel_vr_nor.c | 4 +- trunk/drivers/mtd/maps/ixp2000.c | 253 - trunk/drivers/mtd/maps/ixp4xx.c | 2 +- trunk/drivers/mtd/maps/lantiq-flash.c | 3 +- trunk/drivers/mtd/maps/mbx860.c | 98 - trunk/drivers/mtd/maps/pci.c | 3 +- trunk/drivers/mtd/maps/physmap.c | 17 +- trunk/drivers/mtd/maps/physmap_of.c | 16 +- trunk/drivers/mtd/maps/plat-ram.c | 2 +- trunk/drivers/mtd/maps/pxa2xx-flash.c | 4 +- trunk/drivers/mtd/maps/rbtx4939-flash.c | 5 +- trunk/drivers/mtd/maps/rpxlite.c | 64 - trunk/drivers/mtd/maps/sa1100-flash.c | 2 +- trunk/drivers/mtd/maps/solutionengine.c | 2 +- trunk/drivers/mtd/maps/tqm8xxl.c | 249 - trunk/drivers/mtd/maps/tsunami_flash.c | 5 +- trunk/drivers/mtd/mtdchar.c | 52 +- trunk/drivers/mtd/mtdcore.c | 26 +- trunk/drivers/mtd/mtdcore.h | 30 +- trunk/drivers/mtd/mtdpart.c | 4 +- trunk/drivers/mtd/nand/Kconfig | 32 +- trunk/drivers/mtd/nand/Makefile | 3 - trunk/drivers/mtd/nand/atmel_nand.c | 15 +- trunk/drivers/mtd/nand/bf5xx_nand.c | 16 +- trunk/drivers/mtd/nand/cafe_nand.c | 10 +- trunk/drivers/mtd/nand/davinci_nand.c | 16 +- trunk/drivers/mtd/nand/denali_dt.c | 18 +- trunk/drivers/mtd/nand/docg4.c | 13 +- trunk/drivers/mtd/nand/fsmc_nand.c | 13 +- trunk/drivers/mtd/nand/gpio.c | 8 +- trunk/drivers/mtd/nand/h1910.c | 167 - trunk/drivers/mtd/nand/lpc32xx_mlc.c | 9 +- trunk/drivers/mtd/nand/nand_base.c | 233 +- trunk/drivers/mtd/nand/nand_bbt.c | 25 - trunk/drivers/mtd/nand/nand_ids.c | 242 +- trunk/drivers/mtd/nand/nandsim.c | 24 +- trunk/drivers/mtd/nand/nuc900_nand.c | 9 - trunk/drivers/mtd/nand/omap2.c | 9 +- trunk/drivers/mtd/nand/orion_nand.c | 13 +- trunk/drivers/mtd/nand/ppchameleonevb.c | 403 -- trunk/drivers/mtd/nand/pxa3xx_nand.c | 2 +- trunk/drivers/mtd/nand/rtc_from4.c | 624 --- trunk/drivers/mtd/nand/sh_flctl.c | 16 +- trunk/drivers/mtd/nand/sm_common.c | 62 +- trunk/drivers/mtd/nand/txx9ndfmc.c | 13 +- trunk/drivers/mtd/ofpart.c | 7 +- trunk/drivers/mtd/onenand/Kconfig | 7 - trunk/drivers/mtd/onenand/Makefile | 3 - trunk/drivers/mtd/onenand/omap2.c | 14 +- trunk/drivers/mtd/onenand/onenand_sim.c | 564 --- trunk/drivers/net/bonding/bond_3ad.c | 21 +- trunk/drivers/net/bonding/bond_3ad.h | 2 + trunk/drivers/net/bonding/bond_main.c | 26 +- trunk/drivers/net/bonding/bond_procfs.c | 2 +- trunk/drivers/net/bonding/bond_sysfs.c | 13 +- trunk/drivers/net/caif/Kconfig | 2 +- trunk/drivers/net/ethernet/3com/3c59x.c | 25 +- trunk/drivers/net/ethernet/adi/bfin_mac.c | 3 +- .../net/ethernet/broadcom/bnx2x/bnx2x_cmn.c | 9 +- trunk/drivers/net/ethernet/broadcom/tg3.c | 61 +- trunk/drivers/net/ethernet/brocade/bna/bnad.c | 5 +- trunk/drivers/net/ethernet/cadence/Kconfig | 3 +- trunk/drivers/net/ethernet/cadence/macb.c | 18 +- trunk/drivers/net/ethernet/cadence/macb.h | 7 + trunk/drivers/net/ethernet/calxeda/Kconfig | 2 +- .../net/ethernet/emulex/benet/be_cmds.c | 18 +- .../net/ethernet/emulex/benet/be_main.c | 48 +- trunk/drivers/net/ethernet/freescale/fec.h | 10 +- .../drivers/net/ethernet/freescale/fec_main.c | 70 +- .../net/ethernet/freescale/gianfar_ptp.c | 1 + trunk/drivers/net/ethernet/ibm/emac/core.c | 36 +- trunk/drivers/net/ethernet/icplus/ipg.h | 86 +- .../net/ethernet/marvell/mv643xx_eth.c | 6 +- .../net/ethernet/mellanox/mlx4/en_ethtool.c | 2 +- .../net/ethernet/mellanox/mlx4/en_netdev.c | 16 +- .../net/ethernet/mellanox/mlx4/en_resources.c | 2 +- trunk/drivers/net/ethernet/mellanox/mlx4/eq.c | 4 +- trunk/drivers/net/ethernet/mellanox/mlx4/fw.c | 4 +- .../drivers/net/ethernet/mellanox/mlx4/mcg.c | 120 +- .../drivers/net/ethernet/mellanox/mlx4/mlx4.h | 79 - .../ethernet/mellanox/mlx4/resource_tracker.c | 29 +- .../drivers/net/ethernet/mellanox/mlx4/srq.c | 15 + .../net/ethernet/qlogic/qlcnic/qlcnic.h | 8 + .../ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c | 95 +- .../ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h | 4 +- .../ethernet/qlogic/qlcnic/qlcnic_83xx_init.c | 24 +- .../ethernet/qlogic/qlcnic/qlcnic_ethtool.c | 54 +- .../net/ethernet/qlogic/qlcnic/qlcnic_hw.c | 49 +- .../net/ethernet/qlogic/qlcnic/qlcnic_hw.h | 2 +- .../net/ethernet/qlogic/qlcnic/qlcnic_main.c | 111 +- .../qlogic/qlcnic/qlcnic_sriov_common.c | 9 +- .../ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c | 3 - .../net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c | 3 + .../net/ethernet/qlogic/qlge/qlge_main.c | 7 + trunk/drivers/net/ethernet/realtek/8139cp.c | 1 + trunk/drivers/net/ethernet/realtek/r8169.c | 41 +- trunk/drivers/net/ethernet/renesas/sh_eth.c | 5 - trunk/drivers/net/ethernet/sfc/efx.c | 8 +- trunk/drivers/net/ethernet/sfc/net_driver.h | 31 +- trunk/drivers/net/ethernet/sfc/ptp.c | 4 +- trunk/drivers/net/ethernet/sfc/rx.c | 8 +- .../net/ethernet/stmicro/stmmac/Kconfig | 2 +- trunk/drivers/net/ethernet/tile/tilegx.c | 2 +- .../drivers/net/ethernet/toshiba/spider_net.c | 3 +- trunk/drivers/net/irda/bfin_sir.c | 6 +- trunk/drivers/net/macvlan.c | 7 +- trunk/drivers/net/ntb_netdev.c | 2 + trunk/drivers/net/phy/Kconfig | 2 +- trunk/drivers/net/usb/cdc_ether.c | 7 + trunk/drivers/net/usb/qmi_wwan.c | 8 + trunk/drivers/net/usb/rtl8150.c | 100 +- trunk/drivers/net/usb/sierra_net.c | 38 +- trunk/drivers/net/usb/usbnet.c | 79 +- trunk/drivers/net/virtio_net.c | 11 +- trunk/drivers/net/vxlan.c | 17 +- trunk/drivers/net/wireless/ath/ath5k/base.c | 5 + trunk/drivers/net/wireless/ath/ath9k/Kconfig | 2 +- .../net/wireless/ath/ath9k/ar9003_calib.c | 2 +- .../net/wireless/ath/ath9k/ar9485_initvals.h | 2 +- .../wireless/ath/ath9k/ar9565_1p0_initvals.h | 138 +- trunk/drivers/net/wireless/ath/ath9k/ath9k.h | 4 +- trunk/drivers/net/wireless/ath/ath9k/debug.c | 8 + trunk/drivers/net/wireless/ath/ath9k/debug.h | 5 + trunk/drivers/net/wireless/ath/ath9k/init.c | 10 +- trunk/drivers/net/wireless/ath/ath9k/main.c | 18 +- trunk/drivers/net/wireless/ath/ath9k/xmit.c | 74 +- trunk/drivers/net/wireless/b43/dma.c | 19 + trunk/drivers/net/wireless/b43/dma.h | 4 +- trunk/drivers/net/wireless/b43/main.c | 43 +- .../wireless/brcm80211/brcmfmac/wl_cfg80211.c | 7 +- .../drivers/net/wireless/iwlegacy/4965-mac.c | 3 +- trunk/drivers/net/wireless/iwlegacy/common.c | 2 +- .../drivers/net/wireless/iwlwifi/mvm/fw-api.h | 27 + .../net/wireless/iwlwifi/mvm/mac-ctxt.c | 12 +- .../net/wireless/iwlwifi/mvm/mac80211.c | 17 +- trunk/drivers/net/wireless/iwlwifi/mvm/mvm.h | 1 + trunk/drivers/net/wireless/iwlwifi/mvm/ops.c | 1 + trunk/drivers/net/wireless/iwlwifi/mvm/scan.c | 6 + trunk/drivers/net/wireless/iwlwifi/mvm/sta.c | 13 +- trunk/drivers/net/wireless/iwlwifi/mvm/sta.h | 2 - trunk/drivers/net/wireless/iwlwifi/mvm/tx.c | 48 +- trunk/drivers/net/wireless/mac80211_hwsim.c | 18 +- trunk/drivers/net/wireless/mwifiex/cfg80211.c | 3 - trunk/drivers/net/wireless/mwifiex/cmdevt.c | 1 + trunk/drivers/net/wireless/mwifiex/main.c | 1 + .../drivers/net/wireless/mwifiex/sta_ioctl.c | 21 +- .../net/wireless/rtlwifi/rtl8188ee/trx.h | 4 +- .../net/wireless/rtlwifi/rtl8192cu/sw.c | 1 + trunk/drivers/ntb/ntb_hw.c | 10 +- trunk/drivers/ntb/ntb_transport.c | 175 +- trunk/drivers/of/base.c | 4 +- trunk/drivers/of/of_mdio.c | 11 +- trunk/drivers/pci/bus.c | 1 + trunk/drivers/pci/hotplug/acpiphp_glue.c | 14 + trunk/drivers/pci/msi.c | 6 +- trunk/drivers/pci/probe.c | 1 - trunk/drivers/pcmcia/m8xx_pcmcia.c | 140 - trunk/drivers/pinctrl/pinctrl-abx500.c | 30 +- trunk/drivers/pinctrl/pinctrl-coh901.c | 5 - trunk/drivers/pinctrl/pinctrl-exynos5440.c | 5 - trunk/drivers/pinctrl/pinctrl-lantiq.c | 3 +- trunk/drivers/pinctrl/pinctrl-samsung.c | 5 - trunk/drivers/pinctrl/pinctrl-single.c | 3 +- trunk/drivers/pinctrl/pinctrl-xway.c | 4 - trunk/drivers/pinctrl/sh-pfc/Kconfig | 26 +- trunk/drivers/pinctrl/vt8500/pinctrl-wm8750.c | 2 +- trunk/drivers/platform/x86/Kconfig | 8 + trunk/drivers/platform/x86/Makefile | 2 + trunk/drivers/platform/x86/asus-nb-wmi.c | 9 + trunk/drivers/platform/x86/dell-laptop.c | 10 +- trunk/drivers/platform/x86/dell-wmi-aio.c | 53 +- trunk/drivers/platform/x86/hp-wmi.c | 24 + trunk/drivers/platform/x86/hp_accel.c | 3 +- trunk/drivers/platform/x86/ideapad-laptop.c | 6 +- trunk/drivers/platform/x86/pvpanic.c | 124 + trunk/drivers/platform/x86/samsung-q10.c | 5 +- trunk/drivers/platform/x86/sony-laptop.c | 20 +- trunk/drivers/pwm/pwm-imx.c | 5 - trunk/drivers/pwm/pwm-puv3.c | 5 - trunk/drivers/pwm/pwm-pxa.c | 5 - trunk/drivers/pwm/pwm-tegra.c | 5 - trunk/drivers/pwm/pwm-tiecap.c | 5 - trunk/drivers/pwm/pwm-tiehrpwm.c | 5 - trunk/drivers/pwm/pwm-tipwmss.c | 5 - trunk/drivers/pwm/pwm-vt8500.c | 5 - trunk/drivers/regulator/Kconfig | 2 +- trunk/drivers/rtc/Kconfig | 2 - trunk/drivers/rtc/rtc-nuc900.c | 5 - trunk/drivers/rtc/rtc-omap.c | 5 - trunk/drivers/rtc/rtc-s3c.c | 5 - trunk/drivers/rtc/rtc-tegra.c | 6 - trunk/drivers/rtc/rtc-tile.c | 1 + trunk/drivers/s390/block/xpram.c | 1 + trunk/drivers/s390/cio/chp.c | 36 + trunk/drivers/s390/cio/chsc.h | 4 +- trunk/drivers/scsi/Kconfig | 2 + trunk/drivers/scsi/aic94xx/aic94xx_dev.c | 24 +- trunk/drivers/scsi/aic94xx/aic94xx_hwi.c | 2 +- trunk/drivers/scsi/aic94xx/aic94xx_tmf.c | 2 +- trunk/drivers/scsi/be2iscsi/be.h | 2 +- trunk/drivers/scsi/be2iscsi/be_cmds.c | 172 +- trunk/drivers/scsi/be2iscsi/be_cmds.h | 27 +- trunk/drivers/scsi/be2iscsi/be_iscsi.c | 70 +- trunk/drivers/scsi/be2iscsi/be_iscsi.h | 2 +- trunk/drivers/scsi/be2iscsi/be_main.c | 375 +- trunk/drivers/scsi/be2iscsi/be_main.h | 29 +- trunk/drivers/scsi/be2iscsi/be_mgmt.c | 43 +- trunk/drivers/scsi/be2iscsi/be_mgmt.h | 35 +- trunk/drivers/scsi/bnx2fc/bnx2fc.h | 8 +- trunk/drivers/scsi/bnx2fc/bnx2fc_els.c | 2 +- trunk/drivers/scsi/bnx2fc/bnx2fc_fcoe.c | 55 +- trunk/drivers/scsi/bnx2fc/bnx2fc_hwi.c | 8 +- trunk/drivers/scsi/bnx2fc/bnx2fc_io.c | 9 +- trunk/drivers/scsi/bnx2fc/bnx2fc_tgt.c | 2 +- trunk/drivers/scsi/csiostor/csio_lnode.h | 2 +- trunk/drivers/scsi/csiostor/csio_rnode.h | 2 +- trunk/drivers/scsi/fnic/fnic.h | 34 +- trunk/drivers/scsi/fnic/fnic_fcs.c | 564 ++- trunk/drivers/scsi/fnic/fnic_fip.h | 68 + trunk/drivers/scsi/fnic/fnic_main.c | 51 +- trunk/drivers/scsi/fnic/vnic_dev.c | 10 + trunk/drivers/scsi/fnic/vnic_dev.h | 2 + trunk/drivers/scsi/fnic/vnic_devcmd.h | 67 + trunk/drivers/scsi/ibmvscsi/ibmvfc.c | 85 +- trunk/drivers/scsi/ibmvscsi/ibmvfc.h | 7 +- trunk/drivers/scsi/ipr.c | 16 +- trunk/drivers/scsi/ipr.h | 2 +- trunk/drivers/scsi/isci/remote_device.c | 4 +- trunk/drivers/scsi/isci/remote_device.h | 2 +- trunk/drivers/scsi/isci/request.c | 6 +- trunk/drivers/scsi/isci/task.c | 2 +- trunk/drivers/scsi/libsas/sas_ata.c | 18 +- trunk/drivers/scsi/libsas/sas_discover.c | 34 +- trunk/drivers/scsi/libsas/sas_expander.c | 110 +- trunk/drivers/scsi/libsas/sas_internal.h | 10 +- trunk/drivers/scsi/libsas/sas_port.c | 2 +- trunk/drivers/scsi/lpfc/lpfc.h | 24 +- trunk/drivers/scsi/lpfc/lpfc_attr.c | 166 +- trunk/drivers/scsi/lpfc/lpfc_bsg.c | 39 +- trunk/drivers/scsi/lpfc/lpfc_crtn.h | 1 + trunk/drivers/scsi/lpfc/lpfc_ct.c | 3 +- trunk/drivers/scsi/lpfc/lpfc_els.c | 113 +- trunk/drivers/scsi/lpfc/lpfc_hbadisc.c | 91 +- trunk/drivers/scsi/lpfc/lpfc_hw.h | 1 + trunk/drivers/scsi/lpfc/lpfc_hw4.h | 7 +- trunk/drivers/scsi/lpfc/lpfc_init.c | 607 ++- trunk/drivers/scsi/lpfc/lpfc_logmsg.h | 1 + trunk/drivers/scsi/lpfc/lpfc_mbox.c | 9 +- trunk/drivers/scsi/lpfc/lpfc_mem.c | 14 +- trunk/drivers/scsi/lpfc/lpfc_nportdisc.c | 25 +- trunk/drivers/scsi/lpfc/lpfc_scsi.c | 798 +++- trunk/drivers/scsi/lpfc/lpfc_sli.c | 106 +- trunk/drivers/scsi/lpfc/lpfc_sli4.h | 21 +- trunk/drivers/scsi/lpfc/lpfc_version.h | 2 +- trunk/drivers/scsi/lpfc/lpfc_vport.c | 25 +- trunk/drivers/scsi/lpfc/lpfc_vport.h | 1 + .../drivers/scsi/megaraid/megaraid_sas_base.c | 3 +- trunk/drivers/scsi/mvsas/mv_init.c | 2 +- trunk/drivers/scsi/mvsas/mv_sas.c | 16 +- trunk/drivers/scsi/mvsas/mv_sas.h | 4 +- trunk/drivers/scsi/pm8001/Makefile | 7 +- trunk/drivers/scsi/pm8001/pm8001_ctl.c | 74 +- trunk/drivers/scsi/pm8001/pm8001_defs.h | 34 +- trunk/drivers/scsi/pm8001/pm8001_hwi.c | 817 +++- trunk/drivers/scsi/pm8001/pm8001_hwi.h | 4 +- trunk/drivers/scsi/pm8001/pm8001_init.c | 383 +- trunk/drivers/scsi/pm8001/pm8001_sas.c | 119 +- trunk/drivers/scsi/pm8001/pm8001_sas.h | 181 +- trunk/drivers/scsi/pm8001/pm80xx_hwi.c | 4130 +++++++++++++++++ trunk/drivers/scsi/pm8001/pm80xx_hwi.h | 1523 ++++++ trunk/drivers/scsi/qla2xxx/Kconfig | 4 +- trunk/drivers/scsi/qla2xxx/qla_mr.c | 6 +- trunk/drivers/scsi/qla2xxx/qla_os.c | 2 +- trunk/drivers/scsi/qla4xxx/ql4_iocb.c | 1 + trunk/drivers/scsi/qla4xxx/ql4_os.c | 45 +- trunk/drivers/scsi/qla4xxx/ql4_version.h | 2 +- trunk/drivers/scsi/scsi_debug.c | 97 +- trunk/drivers/scsi/scsi_error.c | 37 +- trunk/drivers/scsi/scsi_lib.c | 9 +- trunk/drivers/scsi/scsi_pm.c | 84 +- trunk/drivers/scsi/scsi_transport_iscsi.c | 94 +- trunk/drivers/scsi/sd.c | 42 +- trunk/drivers/scsi/sd.h | 1 + trunk/drivers/scsi/sd_dif.c | 8 - trunk/drivers/scsi/ufs/Kconfig | 11 + trunk/drivers/scsi/ufs/Makefile | 1 + trunk/drivers/scsi/ufs/ufshcd-pltfrm.c | 217 + trunk/drivers/scsi/ufs/ufshcd.c | 2 +- trunk/drivers/spi/Kconfig | 8 +- trunk/drivers/spi/spi-atmel.c | 51 +- trunk/drivers/spi/spi-davinci.c | 2 +- trunk/drivers/spi/spi-tegra20-sflash.c | 5 - trunk/drivers/spi/spi.c | 9 +- trunk/drivers/ssb/driver_mipscore.c | 2 +- trunk/drivers/staging/Kconfig | 4 +- trunk/drivers/staging/android/Kconfig | 2 +- trunk/drivers/staging/android/logger.c | 4 +- trunk/drivers/staging/android/logger.h | 2 +- trunk/drivers/staging/comedi/Kconfig | 9 + trunk/drivers/staging/comedi/comedi_buf.c | 12 + trunk/drivers/staging/comedi/comedi_fops.c | 3 - .../drivers/staging/comedi/drivers/ni_labpc.c | 8 +- .../drivers/staging/comedi/drivers/ni_labpc.h | 1 + .../staging/comedi/drivers/ni_mio_common.c | 20 +- trunk/drivers/staging/dwc2/Kconfig | 2 +- trunk/drivers/staging/dwc2/hcd_intr.c | 2 - trunk/drivers/staging/dwc2/platform.c | 13 +- trunk/drivers/staging/gdm72xx/Kconfig | 2 +- trunk/drivers/staging/iio/accel/Kconfig | 2 +- trunk/drivers/staging/iio/adc/Kconfig | 2 +- trunk/drivers/staging/iio/adc/mxs-lradc.c | 5 +- trunk/drivers/staging/iio/addac/Kconfig | 2 +- .../drivers/staging/iio/light/tsl2x7x_core.c | 9 +- trunk/drivers/staging/iio/resolver/Kconfig | 4 +- trunk/drivers/staging/iio/trigger/Kconfig | 2 +- trunk/drivers/staging/imx-drm/Kconfig | 5 + trunk/drivers/staging/imx-drm/imx-tve.c | 4 +- trunk/drivers/staging/media/solo6x10/Kconfig | 1 + trunk/drivers/staging/nvec/nvec.c | 26 +- trunk/drivers/staging/nvec/nvec.h | 5 +- trunk/drivers/staging/nvec/nvec_kbd.c | 10 +- trunk/drivers/staging/nvec/nvec_power.c | 1 + trunk/drivers/staging/nvec/nvec_ps2.c | 8 +- trunk/drivers/staging/sep/Kconfig | 2 +- .../staging/ste_rmi4/synaptics_i2c_rmi4.c | 6 +- trunk/drivers/staging/vt6656/hostap.c | 2 +- trunk/drivers/staging/vt6656/iwctl.c | 6 + .../staging/zcache/ramster/ramster-howto.txt | 366 ++ trunk/drivers/staging/zcache/zcache-main.c | 8 +- trunk/drivers/target/iscsi/iscsi_target.c | 63 +- .../drivers/target/iscsi/iscsi_target_erl1.c | 7 +- .../target/iscsi/iscsi_target_parameters.c | 8 +- .../target/iscsi/iscsi_target_parameters.h | 4 +- trunk/drivers/target/target_core_configfs.c | 11 +- trunk/drivers/target/target_core_device.c | 14 - trunk/drivers/target/target_core_file.c | 9 +- trunk/drivers/target/target_core_iblock.c | 2 + trunk/drivers/target/target_core_internal.h | 1 - trunk/drivers/target/target_core_rd.c | 21 +- trunk/drivers/target/target_core_rd.h | 1 + trunk/drivers/target/target_core_transport.c | 13 +- trunk/drivers/thermal/Kconfig | 28 +- trunk/drivers/thermal/Makefile | 10 +- trunk/drivers/thermal/armada_thermal.c | 222 + trunk/drivers/thermal/cpu_cooling.c | 295 +- .../drivers/thermal/db8500_cpufreq_cooling.c | 2 +- trunk/drivers/thermal/db8500_thermal.c | 19 +- trunk/drivers/thermal/dove_thermal.c | 11 +- trunk/drivers/thermal/exynos_thermal.c | 201 +- trunk/drivers/thermal/fair_share.c | 15 +- trunk/drivers/thermal/kirkwood_thermal.c | 12 +- trunk/drivers/thermal/rcar_thermal.c | 34 +- trunk/drivers/thermal/step_wise.c | 26 +- .../thermal/{thermal_sys.c => thermal_core.c} | 197 +- trunk/drivers/thermal/thermal_core.h | 27 + trunk/drivers/thermal/user_space.c | 15 +- trunk/drivers/tty/ehv_bytechan.c | 1 + trunk/drivers/tty/mxser.c | 11 +- trunk/drivers/tty/n_tty.c | 8 + trunk/drivers/tty/rocket.c | 288 +- trunk/drivers/tty/serial/68328serial.c | 1 - trunk/drivers/tty/serial/8250/8250_dw.c | 7 +- trunk/drivers/tty/serial/amba-pl011.c | 2 +- trunk/drivers/tty/serial/bcm63xx_uart.c | 1 - trunk/drivers/tty/serial/mcf.c | 4 +- trunk/drivers/tty/serial/mpc52xx_uart.c | 11 +- trunk/drivers/tty/serial/nwpserial.c | 2 +- trunk/drivers/tty/serial/samsung.c | 1 + trunk/drivers/tty/tty_audit.c | 104 +- trunk/drivers/tty/vt/vt.c | 14 +- trunk/drivers/tty/vt/vt_ioctl.c | 67 +- trunk/drivers/uio/Kconfig | 1 + trunk/drivers/usb/atm/cxacru.c | 3 +- trunk/drivers/usb/chipidea/Kconfig | 2 +- trunk/drivers/usb/chipidea/ci13xxx_imx.c | 15 +- trunk/drivers/usb/chipidea/core.c | 5 - trunk/drivers/usb/core/Kconfig | 2 +- trunk/drivers/usb/core/quirks.c | 3 + trunk/drivers/usb/dwc3/Kconfig | 6 +- trunk/drivers/usb/dwc3/dwc3-exynos.c | 6 +- trunk/drivers/usb/gadget/Kconfig | 1 - trunk/drivers/usb/gadget/atmel_usba_udc.c | 2 - trunk/drivers/usb/gadget/bcm63xx_udc.c | 11 - trunk/drivers/usb/gadget/configfs.c | 8 +- trunk/drivers/usb/gadget/dummy_hcd.c | 5 +- trunk/drivers/usb/gadget/f_ecm.c | 1 + trunk/drivers/usb/gadget/f_subset.c | 1 + trunk/drivers/usb/gadget/f_uac2.c | 2 - trunk/drivers/usb/gadget/fusb300_udc.c | 4 +- trunk/drivers/usb/gadget/imx_udc.c | 2 - trunk/drivers/usb/gadget/m66592-udc.c | 4 +- trunk/drivers/usb/gadget/pxa25x_udc.c | 1 - trunk/drivers/usb/gadget/r8a66597-udc.c | 4 +- trunk/drivers/usb/gadget/s3c-hsotg.c | 2 +- trunk/drivers/usb/gadget/s3c2410_udc.c | 3 +- trunk/drivers/usb/gadget/zero.c | 8 +- trunk/drivers/usb/host/Kconfig | 17 +- trunk/drivers/usb/host/ehci-atmel.c | 6 +- trunk/drivers/usb/host/ehci-hcd.c | 17 - trunk/drivers/usb/host/ehci-omap.c | 8 +- trunk/drivers/usb/host/ehci-orion.c | 6 +- trunk/drivers/usb/host/ehci-s5p.c | 5 +- trunk/drivers/usb/host/ehci-spear.c | 6 +- trunk/drivers/usb/host/ehci-tegra.c | 6 +- trunk/drivers/usb/host/ehci-tilegx.c | 7 +- trunk/drivers/usb/host/isp1760-hcd.c | 2 +- trunk/drivers/usb/host/isp1760-if.c | 4 +- trunk/drivers/usb/host/ohci-at91.c | 6 +- trunk/drivers/usb/host/ohci-exynos.c | 4 +- trunk/drivers/usb/host/ohci-hcd.c | 34 +- trunk/drivers/usb/host/ohci-nxp.c | 50 +- trunk/drivers/usb/host/ohci-omap3.c | 8 +- trunk/drivers/usb/host/ohci-pxa27x.c | 6 +- trunk/drivers/usb/host/ohci-spear.c | 6 +- trunk/drivers/usb/host/ohci-tilegx.c | 7 +- trunk/drivers/usb/host/oxu210hp-hcd.c | 2 +- trunk/drivers/usb/host/sl811-hcd.c | 2 +- trunk/drivers/usb/host/uhci-hub.c | 3 +- trunk/drivers/usb/host/uhci-platform.c | 6 +- trunk/drivers/usb/host/uhci-q.c | 2 +- trunk/drivers/usb/host/xhci-mem.c | 17 +- trunk/drivers/usb/musb/musb_dsps.c | 1 + trunk/drivers/usb/musb/omap2430.c | 3 +- trunk/drivers/usb/phy/Kconfig | 7 +- trunk/drivers/usb/phy/phy-ab8500-usb.c | 2 - trunk/drivers/usb/phy/phy-fsl-usb.c | 1 + trunk/drivers/usb/phy/phy-gpio-vbus-usb.c | 3 +- trunk/drivers/usb/phy/phy-isp1301.c | 1 + trunk/drivers/usb/phy/phy-mv-u3d-usb.c | 5 - trunk/drivers/usb/phy/phy-mv-usb.c | 3 - trunk/drivers/usb/phy/phy-mxs-usb.c | 8 +- trunk/drivers/usb/phy/phy-nop.c | 2 - trunk/drivers/usb/phy/phy-samsung-usb2.c | 5 - trunk/drivers/usb/phy/phy-samsung-usb3.c | 5 - trunk/drivers/usb/serial/ftdi_sio.c | 30 +- trunk/drivers/usb/serial/ftdi_sio_ids.h | 2 + trunk/drivers/usb/serial/generic.c | 31 + trunk/drivers/usb/serial/io_ti.c | 22 +- trunk/drivers/usb/serial/option.c | 13 +- trunk/drivers/usb/serial/ti_usb_3410_5052.c | 23 +- trunk/drivers/usb/serial/usb-serial.c | 30 +- trunk/drivers/usb/storage/realtek_cr.c | 8 +- trunk/drivers/vhost/vringh.c | 3 + trunk/drivers/video/Kconfig | 4 +- trunk/drivers/video/au1100fb.c | 22 +- trunk/drivers/video/backlight/Kconfig | 4 +- trunk/drivers/video/console/Makefile | 2 + trunk/drivers/video/mxsfb.c | 8 +- trunk/drivers/video/omap2/dss/hdmi.c | 4 - trunk/drivers/video/omap2/vrfb.c | 5 - trunk/drivers/w1/masters/Kconfig | 2 +- trunk/drivers/w1/masters/omap_hdq.c | 5 - trunk/drivers/watchdog/ath79_wdt.c | 13 +- trunk/drivers/watchdog/davinci_wdt.c | 14 +- trunk/drivers/watchdog/imx2_wdt.c | 5 - trunk/drivers/watchdog/s3c2410_wdt.c | 7 +- trunk/drivers/watchdog/shwdt.c | 7 +- trunk/drivers/watchdog/watchdog_dev.c | 3 +- trunk/drivers/xen/Kconfig | 9 +- trunk/drivers/xen/balloon.c | 3 +- trunk/drivers/xen/events.c | 3 +- trunk/drivers/xen/privcmd.c | 2 +- trunk/drivers/xen/tmem.c | 87 +- trunk/drivers/xen/xen-selfballoon.c | 47 +- trunk/drivers/xen/xenbus/xenbus_dev_backend.c | 21 +- trunk/fs/btrfs/Kconfig | 22 +- trunk/fs/btrfs/backref.c | 88 +- trunk/fs/btrfs/backref.h | 3 - trunk/fs/btrfs/btrfs_inode.h | 2 +- trunk/fs/btrfs/check-integrity.c | 2 +- trunk/fs/btrfs/compression.c | 14 +- trunk/fs/btrfs/compression.h | 2 - trunk/fs/btrfs/ctree.c | 386 +- trunk/fs/btrfs/ctree.h | 153 +- trunk/fs/btrfs/delayed-inode.c | 66 +- trunk/fs/btrfs/delayed-ref.c | 30 +- trunk/fs/btrfs/delayed-ref.h | 1 + trunk/fs/btrfs/dev-replace.c | 5 + trunk/fs/btrfs/dir-item.c | 11 +- trunk/fs/btrfs/disk-io.c | 459 +- trunk/fs/btrfs/disk-io.h | 5 +- trunk/fs/btrfs/extent-tree.c | 631 ++- trunk/fs/btrfs/extent_io.c | 448 +- trunk/fs/btrfs/extent_io.h | 46 +- trunk/fs/btrfs/extent_map.c | 23 +- trunk/fs/btrfs/extent_map.h | 3 +- trunk/fs/btrfs/file-item.c | 102 +- trunk/fs/btrfs/file.c | 37 +- trunk/fs/btrfs/free-space-cache.c | 639 ++- trunk/fs/btrfs/free-space-cache.h | 7 + trunk/fs/btrfs/inode-item.c | 17 +- trunk/fs/btrfs/inode-map.c | 8 +- trunk/fs/btrfs/inode.c | 262 +- trunk/fs/btrfs/ioctl.c | 118 +- trunk/fs/btrfs/locking.c | 4 +- trunk/fs/btrfs/ordered-data.c | 28 +- trunk/fs/btrfs/ordered-data.h | 3 +- trunk/fs/btrfs/print-tree.c | 9 +- trunk/fs/btrfs/print-tree.h | 2 +- trunk/fs/btrfs/qgroup.c | 840 +++- trunk/fs/btrfs/raid56.c | 16 +- trunk/fs/btrfs/reada.c | 5 +- trunk/fs/btrfs/relocation.c | 116 +- trunk/fs/btrfs/root-tree.c | 7 +- trunk/fs/btrfs/scrub.c | 140 +- trunk/fs/btrfs/send.c | 32 +- trunk/fs/btrfs/send.h | 1 - trunk/fs/btrfs/super.c | 108 +- trunk/fs/btrfs/transaction.c | 95 +- trunk/fs/btrfs/transaction.h | 3 +- trunk/fs/btrfs/tree-log.c | 390 +- trunk/fs/btrfs/tree-log.h | 3 - trunk/fs/btrfs/ulist.c | 58 +- trunk/fs/btrfs/ulist.h | 6 + trunk/fs/btrfs/volumes.c | 151 +- trunk/fs/btrfs/volumes.h | 33 +- trunk/fs/btrfs/xattr.c | 4 +- trunk/fs/ecryptfs/crypto.c | 141 +- trunk/fs/ecryptfs/ecryptfs_kernel.h | 3 +- trunk/fs/ecryptfs/read_write.c | 9 +- trunk/fs/ext4/ext4.h | 8 +- trunk/fs/ext4/extents.c | 9 +- trunk/fs/ext4/extents_status.c | 17 +- trunk/fs/ext4/extents_status.h | 3 +- trunk/fs/ext4/file.c | 4 +- trunk/fs/ext4/inode.c | 85 +- trunk/fs/ext4/mballoc.c | 6 +- trunk/fs/ext4/page-io.c | 121 +- trunk/fs/f2fs/checkpoint.c | 63 +- trunk/fs/f2fs/data.c | 202 +- trunk/fs/f2fs/debug.c | 10 +- trunk/fs/f2fs/dir.c | 110 +- trunk/fs/f2fs/f2fs.h | 93 +- trunk/fs/f2fs/file.c | 116 +- trunk/fs/f2fs/gc.c | 123 +- trunk/fs/f2fs/gc.h | 12 +- trunk/fs/f2fs/inode.c | 68 +- trunk/fs/f2fs/namei.c | 80 +- trunk/fs/f2fs/node.c | 411 +- trunk/fs/f2fs/node.h | 20 +- trunk/fs/f2fs/recovery.c | 83 +- trunk/fs/f2fs/segment.c | 137 +- trunk/fs/f2fs/segment.h | 41 +- trunk/fs/f2fs/super.c | 67 +- trunk/fs/f2fs/xattr.c | 28 +- trunk/fs/gfs2/Kconfig | 2 +- trunk/fs/gfs2/lops.c | 2 +- trunk/fs/gfs2/quota.c | 4 +- trunk/fs/gfs2/rgrp.c | 9 +- trunk/fs/namei.c | 2 +- trunk/fs/nfs/nfs4_fs.h | 3 +- trunk/fs/nfs/nfs4filelayout.h | 2 + trunk/fs/nfs/nfs4filelayoutdev.c | 26 +- trunk/fs/nfs/nfs4proc.c | 119 +- trunk/fs/nfs/nfs4state.c | 11 +- trunk/fs/nfs/nfs4xdr.c | 2 +- trunk/fs/nfs/super.c | 48 +- trunk/fs/nfsd/nfs4proc.c | 15 +- trunk/fs/nfsd/nfs4recover.c | 12 +- trunk/fs/notify/fanotify/fanotify_user.c | 17 + trunk/fs/pstore/ram.c | 2 + trunk/fs/romfs/mmap-nommu.c | 5 +- trunk/fs/xfs/Kconfig | 13 + trunk/fs/xfs/mrlock.h | 12 +- trunk/fs/xfs/xfs.h | 5 + trunk/fs/xfs/xfs_alloc_btree.c | 4 +- trunk/fs/xfs/xfs_bmap_btree.c | 4 +- trunk/fs/xfs/xfs_btree.h | 2 +- trunk/fs/xfs/xfs_dir2_node.c | 4 +- trunk/fs/xfs/xfs_ialloc_btree.c | 4 +- trunk/fs/xfs/xfs_inode.c | 2 +- trunk/fs/xfs/xfs_ioctl.c | 14 +- trunk/fs/xfs/xfs_ioctl32.c | 14 +- trunk/fs/xfs/xfs_linux.h | 24 +- trunk/fs/xfs/xfs_message.c | 8 + trunk/fs/xfs/xfs_message.h | 1 + trunk/fs/xfs/xfs_trans.h | 4 +- trunk/include/acpi/acpiosxf.h | 2 +- trunk/include/acpi/processor.h | 10 +- trunk/include/drm/drmP.h | 3 +- trunk/include/drm/drm_fb_helper.h | 15 +- trunk/include/drm/drm_os_linux.h | 9 - trunk/include/drm/drm_pciids.h | 6 + trunk/include/linux/acpi_dma.h | 116 + trunk/include/linux/audit.h | 48 +- trunk/include/linux/bcma/bcma.h | 5 +- trunk/include/linux/brcmphy.h | 5 + trunk/include/linux/compat.h | 2 + trunk/include/linux/cpu_cooling.h | 25 +- trunk/include/linux/cpuidle.h | 2 +- trunk/include/linux/device-mapper.h | 15 + trunk/include/linux/dmaengine.h | 15 +- trunk/include/linux/f2fs_fs.h | 17 +- trunk/include/linux/ftrace.h | 4 + trunk/include/linux/ftrace_event.h | 1 + trunk/include/linux/gpio.h | 6 +- trunk/include/linux/hid.h | 2 +- trunk/include/linux/journal-head.h | 8 +- trunk/include/linux/kref.h | 33 + trunk/include/linux/mfd/abx500/ab8500.h | 2 - trunk/include/linux/mlx4/device.h | 104 +- trunk/include/linux/mlx4/qp.h | 29 +- trunk/include/linux/mlx4/srq.h | 2 + trunk/include/linux/mtd/mtd.h | 8 +- trunk/include/linux/mtd/nand.h | 121 +- trunk/include/linux/mtd/physmap.h | 2 +- trunk/include/linux/mtd/plat-ram.h | 4 +- trunk/include/linux/netdevice.h | 11 + trunk/include/linux/nfs_xdr.h | 2 +- trunk/include/linux/nvme.h | 158 +- trunk/include/linux/of_dma.h | 10 +- trunk/include/linux/of_platform.h | 5 +- trunk/include/linux/pci-acpi.h | 2 + trunk/include/linux/pci_ids.h | 6 +- trunk/include/linux/pinctrl/pinconf-generic.h | 12 +- trunk/include/linux/platform_data/elm.h | 2 +- trunk/include/linux/platform_data/imx-iram.h | 41 - trunk/include/linux/printk.h | 1 + trunk/include/linux/sched.h | 1 + trunk/include/linux/socket.h | 2 - trunk/include/linux/spi/spi.h | 4 +- trunk/include/linux/sudmac.h | 52 + trunk/include/linux/thermal.h | 15 +- trunk/include/linux/time.h | 4 - trunk/include/linux/tty.h | 6 +- trunk/include/linux/uio.h | 3 + trunk/include/linux/usb/gadget.h | 5 +- trunk/include/linux/usb/serial.h | 4 + trunk/include/linux/usb/usbnet.h | 5 + trunk/include/linux/vt_kern.h | 2 +- trunk/include/net/mac80211.h | 12 +- trunk/include/net/netfilter/nf_log.h | 3 +- trunk/include/net/netfilter/nfnetlink_log.h | 3 +- trunk/include/net/sock.h | 12 + trunk/include/scsi/libsas.h | 4 +- trunk/include/scsi/osd_protocol.h | 2 +- trunk/include/scsi/sas.h | 22 +- trunk/include/scsi/sas_ata.h | 4 +- trunk/include/scsi/scsi_device.h | 16 +- trunk/include/scsi/scsi_transport_iscsi.h | 8 +- trunk/include/scsi/scsi_transport_sas.h | 7 - trunk/include/sound/tlv.h | 6 +- trunk/include/target/target_core_base.h | 5 - trunk/include/trace/events/ext4.h | 4 +- trunk/include/trace/events/f2fs.h | 682 +++ trunk/include/uapi/linux/audit.h | 4 +- trunk/include/uapi/linux/btrfs.h | 32 +- trunk/include/uapi/linux/if_cablemodem.h | 12 +- trunk/include/uapi/linux/virtio_console.h | 2 +- trunk/ipc/sem.c | 14 + trunk/ipc/shm.c | 8 +- trunk/kernel/audit.c | 516 +- trunk/kernel/audit.h | 156 + trunk/kernel/auditfilter.c | 360 +- trunk/kernel/auditsc.c | 407 +- trunk/kernel/cpu/idle.c | 2 + trunk/kernel/events/core.c | 240 +- trunk/kernel/kmod.c | 5 + trunk/kernel/module.c | 21 +- trunk/kernel/params.c | 5 +- trunk/kernel/rcutree_plugin.h | 4 +- trunk/kernel/sys_ni.c | 1 + trunk/kernel/sysctl_binary.c | 4 +- trunk/kernel/time/Kconfig | 5 - trunk/kernel/time/tick-broadcast.c | 10 +- trunk/kernel/time/tick-sched.c | 3 +- trunk/kernel/timer.c | 2 +- trunk/kernel/trace/Kconfig | 2 +- trunk/kernel/trace/ftrace.c | 126 +- trunk/kernel/trace/trace_events.c | 54 +- trunk/kernel/trace/trace_events_filter.c | 4 + trunk/kernel/trace/trace_kprobe.c | 320 +- trunk/kernel/workqueue.c | 19 +- trunk/lib/Makefile | 2 +- trunk/lib/iovec.c | 53 + trunk/lib/klist.c | 2 +- trunk/mm/mmap.c | 8 +- trunk/mm/page_alloc.c | 2 +- trunk/mm/slab_common.c | 20 +- trunk/net/802/mrp.c | 4 +- trunk/net/batman-adv/distributed-arp-table.c | 13 + trunk/net/batman-adv/main.c | 19 +- trunk/net/batman-adv/network-coding.c | 8 +- trunk/net/batman-adv/originator.c | 16 + trunk/net/batman-adv/originator.h | 1 + trunk/net/batman-adv/soft-interface.c | 1 + trunk/net/batman-adv/translation-table.c | 7 +- trunk/net/bridge/netfilter/ebt_log.c | 11 +- trunk/net/bridge/netfilter/ebt_ulog.c | 18 +- trunk/net/ceph/osd_client.c | 5 +- trunk/net/core/dev.c | 11 + trunk/net/core/iovec.c | 50 - trunk/net/core/sock.c | 12 - trunk/net/ipv4/gre.c | 8 +- trunk/net/ipv4/ip_gre.c | 3 +- trunk/net/ipv4/ip_output.c | 2 +- trunk/net/ipv4/netfilter/ipt_ULOG.c | 13 +- trunk/net/ipv4/tcp.c | 29 +- trunk/net/ipv4/tcp_input.c | 23 +- trunk/net/ipv4/tcp_output.c | 10 +- trunk/net/ipv4/udp.c | 4 +- trunk/net/ipv6/ip6_gre.c | 2 + trunk/net/ipv6/ip6_output.c | 2 +- trunk/net/ipv6/tcp_ipv6.c | 12 + trunk/net/ipv6/udp.c | 13 +- trunk/net/ipv6/udp_impl.h | 2 + trunk/net/ipv6/udplite.c | 2 +- trunk/net/ipv6/xfrm6_policy.c | 4 +- trunk/net/irda/irlap_frame.c | 2 +- trunk/net/mac80211/ieee80211_i.h | 1 + trunk/net/mac80211/mlme.c | 61 +- trunk/net/mac80211/rate.c | 9 +- trunk/net/mac80211/rx.c | 3 + trunk/net/mac80211/tkip.c | 4 +- trunk/net/mac80211/util.c | 7 + trunk/net/netfilter/nf_log.c | 7 +- trunk/net/netfilter/nfnetlink_log.c | 6 +- trunk/net/netfilter/nfnetlink_queue_core.c | 2 + trunk/net/netfilter/xt_LOG.c | 13 +- trunk/net/netfilter/xt_NFLOG.c | 3 +- trunk/net/netfilter/xt_TCPOPTSTRIP.c | 17 +- trunk/net/netlabel/netlabel_domainhash.c | 69 + trunk/net/socket.c | 6 +- trunk/net/sunrpc/auth_gss/auth_gss.c | 3 +- trunk/net/sunrpc/auth_gss/gss_rpc_xdr.c | 58 +- trunk/net/sunrpc/clnt.c | 2 +- trunk/net/wireless/core.c | 17 +- trunk/net/wireless/nl80211.c | 4 + trunk/net/wireless/sme.c | 3 +- trunk/net/wireless/trace.h | 23 +- trunk/net/xfrm/xfrm_output.c | 1 + trunk/scripts/package/Makefile | 2 +- trunk/sound/aoa/fabrics/layout.c | 8 + trunk/sound/aoa/soundbus/i2sbus/core.c | 3 +- trunk/sound/atmel/abdac.c | 2 - trunk/sound/atmel/ac97c.c | 2 - trunk/sound/mips/hal2.c | 1 - trunk/sound/mips/sgio2audio.c | 1 - trunk/sound/oss/Kconfig | 1 + trunk/sound/pci/hda/hda_codec.c | 7 +- trunk/sound/pci/hda/hda_generic.c | 9 +- trunk/sound/pci/hda/hda_intel.c | 2 +- trunk/sound/pci/hda/patch_conexant.c | 17 +- trunk/sound/pci/hda/patch_hdmi.c | 54 +- trunk/sound/pci/hda/patch_realtek.c | 1 + trunk/sound/soc/codecs/ab8500-codec.h | 36 +- trunk/sound/soc/codecs/da7213.c | 8 +- trunk/sound/soc/codecs/wm0010.c | 1 + trunk/sound/soc/codecs/wm8994.c | 1 + trunk/sound/soc/davinci/davinci-mcasp.c | 7 +- trunk/sound/soc/fsl/imx-ssi.c | 6 - trunk/sound/soc/kirkwood/kirkwood-i2s.c | 5 - trunk/sound/soc/soc-dapm.c | 4 +- trunk/sound/usb/proc.c | 22 +- .../perf/scripts/python/net_dropmonitor.py | 39 +- trunk/tools/power/x86/turbostat/turbostat.c | 54 +- trunk/virt/kvm/kvm_main.c | 20 +- 1575 files changed, 54025 insertions(+), 23136 deletions(-) create mode 100644 trunk/Documentation/devicetree/bindings/mips/ralink.txt create mode 100644 trunk/Documentation/devicetree/bindings/thermal/armada-thermal.txt rename trunk/Documentation/devicetree/bindings/{drm/exynos/hdmi.txt => video/exynos_hdmi.txt} (100%) rename trunk/Documentation/devicetree/bindings/{drm/exynos/hdmiddc.txt => video/exynos_hdmiddc.txt} (100%) rename trunk/Documentation/devicetree/bindings/{drm/exynos/hdmiphy.txt => video/exynos_hdmiphy.txt} (100%) rename trunk/Documentation/devicetree/bindings/{drm/exynos/mixer.txt => video/exynos_mixer.txt} (100%) create mode 100644 trunk/Documentation/dmatest.txt create mode 100644 trunk/Documentation/kernel-per-CPU-kthreads.txt create mode 100644 trunk/Documentation/xtensa/mmu.txt create mode 100644 trunk/arch/arc/boot/dts/abilis_tb100.dtsi create mode 100644 trunk/arch/arc/boot/dts/abilis_tb100_dvk.dts create mode 100644 trunk/arch/arc/boot/dts/abilis_tb101.dtsi create mode 100644 trunk/arch/arc/boot/dts/abilis_tb101_dvk.dts create mode 100644 trunk/arch/arc/boot/dts/abilis_tb10x.dtsi create mode 100644 trunk/arch/arc/boot/dts/nsimosci.dts create mode 100644 trunk/arch/arc/configs/nsimosci_defconfig create mode 100644 trunk/arch/arc/configs/tb10x_defconfig create mode 100644 trunk/arch/arc/include/asm/shmparam.h create mode 100644 trunk/arch/arc/mm/mmap.c create mode 100644 trunk/arch/arc/plat-tb10x/Kconfig create mode 100644 trunk/arch/arc/plat-tb10x/Makefile create mode 100644 trunk/arch/arc/plat-tb10x/tb10x.c delete mode 100644 trunk/arch/arm/mach-imx/iram_alloc.c delete mode 100644 trunk/arch/avr32/include/asm/numnodes.h delete mode 100644 trunk/arch/avr32/include/asm/param.h delete mode 100644 trunk/arch/avr32/include/uapi/asm/param.h delete mode 100644 trunk/arch/m68k/include/asm/dbg.h rename trunk/arch/m68k/include/asm/{m532xsim.h => m53xxsim.h} (99%) rename trunk/arch/m68k/platform/coldfire/{m532x.c => m53xx.c} (98%) create mode 100644 trunk/arch/mips/configs/malta_kvm_defconfig create mode 100644 trunk/arch/mips/configs/malta_kvm_guest_defconfig create mode 100644 trunk/arch/mips/configs/maltaaprp_defconfig create mode 100644 trunk/arch/mips/configs/maltasmtc_defconfig create mode 100644 trunk/arch/mips/configs/maltasmvp_defconfig create mode 100644 trunk/arch/mips/configs/maltaup_defconfig create mode 100644 trunk/arch/mips/configs/sead3micro_defconfig create mode 100644 trunk/arch/mips/fw/lib/cmdline.c create mode 100644 trunk/arch/mips/include/asm/dma-coherence.h create mode 100644 trunk/arch/mips/include/asm/fw/fw.h create mode 100644 trunk/arch/mips/include/asm/idle.h create mode 100644 trunk/arch/mips/include/asm/kvm_host.h delete mode 100644 trunk/arch/mips/include/asm/mach-bcm63xx/bcm63xx_clk.h create mode 100644 trunk/arch/mips/include/asm/mach-ralink/mt7620.h create mode 100644 trunk/arch/mips/include/asm/mach-ralink/rt288x.h create mode 100644 trunk/arch/mips/include/asm/mach-ralink/rt288x/cpu-feature-overrides.h create mode 100644 trunk/arch/mips/include/asm/mach-ralink/rt305x/cpu-feature-overrides.h create mode 100644 trunk/arch/mips/include/asm/mach-ralink/rt3883.h create mode 100644 trunk/arch/mips/include/asm/mach-ralink/rt3883/cpu-feature-overrides.h delete mode 100644 trunk/arch/mips/include/asm/mips-boards/prom.h delete mode 100644 trunk/arch/mips/include/asm/netlogic/xlp-hal/usb.h create mode 100644 trunk/arch/mips/include/uapi/asm/kvm.h create mode 100644 trunk/arch/mips/kernel/cevt-gic.c create mode 100644 trunk/arch/mips/kernel/idle.c create mode 100644 trunk/arch/mips/kvm/00README.txt create mode 100644 trunk/arch/mips/kvm/Kconfig create mode 100644 trunk/arch/mips/kvm/Makefile create mode 100644 trunk/arch/mips/kvm/kvm_cb.c create mode 100644 trunk/arch/mips/kvm/kvm_locore.S create mode 100644 trunk/arch/mips/kvm/kvm_mips.c create mode 100644 trunk/arch/mips/kvm/kvm_mips_comm.h create mode 100644 trunk/arch/mips/kvm/kvm_mips_commpage.c create mode 100644 trunk/arch/mips/kvm/kvm_mips_dyntrans.c create mode 100644 trunk/arch/mips/kvm/kvm_mips_emul.c create mode 100644 trunk/arch/mips/kvm/kvm_mips_int.c create mode 100644 trunk/arch/mips/kvm/kvm_mips_int.h create mode 100644 trunk/arch/mips/kvm/kvm_mips_opcode.h create mode 100644 trunk/arch/mips/kvm/kvm_mips_stats.c create mode 100644 trunk/arch/mips/kvm/kvm_tlb.c create mode 100644 trunk/arch/mips/kvm/kvm_trap_emul.c create mode 100644 trunk/arch/mips/kvm/trace.h create mode 100644 trunk/arch/mips/mm/uasm-micromips.c create mode 100644 trunk/arch/mips/mm/uasm-mips.c delete mode 100644 trunk/arch/mips/mti-malta/malta-cmdline.c delete mode 100644 trunk/arch/mips/mti-sead3/sead3-cmdline.c create mode 100644 trunk/arch/mips/netlogic/dts/xlp_svp.dts create mode 100644 trunk/arch/mips/ralink/dts/mt7620a.dtsi create mode 100644 trunk/arch/mips/ralink/dts/mt7620a_eval.dts create mode 100644 trunk/arch/mips/ralink/dts/rt2880.dtsi create mode 100644 trunk/arch/mips/ralink/dts/rt2880_eval.dts create mode 100644 trunk/arch/mips/ralink/dts/rt3883.dtsi create mode 100644 trunk/arch/mips/ralink/dts/rt3883_eval.dts create mode 100644 trunk/arch/mips/ralink/mt7620.c create mode 100644 trunk/arch/mips/ralink/rt288x.c create mode 100644 trunk/arch/mips/ralink/rt3883.c create mode 100644 trunk/arch/powerpc/include/asm/context_tracking.h create mode 100644 trunk/arch/powerpc/sysdev/udbg_memcons.c delete mode 100644 trunk/arch/xtensa/include/asm/linkage.h create mode 100644 trunk/arch/xtensa/include/asm/stacktrace.h create mode 100644 trunk/arch/xtensa/include/asm/vectors.h create mode 100644 trunk/arch/xtensa/kernel/stacktrace.c rename trunk/drivers/block/{nvme.c => nvme-core.c} (79%) create mode 100644 trunk/drivers/block/nvme-scsi.c create mode 100644 trunk/drivers/dma/acpi-dma.c create mode 100644 trunk/drivers/dma/sh/Kconfig create mode 100644 trunk/drivers/dma/sh/sudmac.c delete mode 100644 trunk/drivers/mtd/devices/doc2000.c delete mode 100644 trunk/drivers/mtd/devices/doc2001.c delete mode 100644 trunk/drivers/mtd/devices/doc2001plus.c delete mode 100644 trunk/drivers/mtd/devices/docecc.c delete mode 100644 trunk/drivers/mtd/devices/docprobe.c delete mode 100644 trunk/drivers/mtd/maps/dbox2-flash.c delete mode 100644 trunk/drivers/mtd/maps/dilnetpc.c delete mode 100644 trunk/drivers/mtd/maps/dmv182.c delete mode 100644 trunk/drivers/mtd/maps/h720x-flash.c delete mode 100644 trunk/drivers/mtd/maps/ixp2000.c delete mode 100644 trunk/drivers/mtd/maps/mbx860.c delete mode 100644 trunk/drivers/mtd/maps/rpxlite.c delete mode 100644 trunk/drivers/mtd/maps/tqm8xxl.c delete mode 100644 trunk/drivers/mtd/nand/h1910.c delete mode 100644 trunk/drivers/mtd/nand/ppchameleonevb.c delete mode 100644 trunk/drivers/mtd/nand/rtc_from4.c delete mode 100644 trunk/drivers/mtd/onenand/onenand_sim.c create mode 100644 trunk/drivers/platform/x86/pvpanic.c create mode 100644 trunk/drivers/scsi/fnic/fnic_fip.h create mode 100644 trunk/drivers/scsi/pm8001/pm80xx_hwi.c create mode 100644 trunk/drivers/scsi/pm8001/pm80xx_hwi.h create mode 100644 trunk/drivers/scsi/ufs/ufshcd-pltfrm.c create mode 100644 trunk/drivers/staging/zcache/ramster/ramster-howto.txt create mode 100644 trunk/drivers/thermal/armada_thermal.c rename trunk/drivers/thermal/{thermal_sys.c => thermal_core.c} (89%) create mode 100644 trunk/include/linux/acpi_dma.h delete mode 100644 trunk/include/linux/platform_data/imx-iram.h create mode 100644 trunk/include/linux/sudmac.h create mode 100644 trunk/include/trace/events/f2fs.h create mode 100644 trunk/lib/iovec.c diff --git a/[refs] b/[refs] index 13823bb3847d..ad00a0f7308b 100644 --- a/[refs] +++ b/[refs] @@ -1,2 +1,2 @@ --- -refs/heads/master: c2b93e0699723700f886ce17bb65ffd771195a6d +refs/heads/master: a8432588fc1e406e800e8bf88bebf4cd390b7f46 diff --git a/trunk/Documentation/ABI/testing/sysfs-class-mtd b/trunk/Documentation/ABI/testing/sysfs-class-mtd index 938ef71e2035..3105644b3bfc 100644 --- a/trunk/Documentation/ABI/testing/sysfs-class-mtd +++ b/trunk/Documentation/ABI/testing/sysfs-class-mtd @@ -14,8 +14,7 @@ Description: The /sys/class/mtd/mtd{0,1,2,3,...} directories correspond to each /dev/mtdX character device. These may represent physical/simulated flash devices, partitions on a flash - device, or concatenated flash devices. They exist regardless - of whether CONFIG_MTD_CHAR is actually enabled. + device, or concatenated flash devices. What: /sys/class/mtd/mtdXro/ Date: April 2009 @@ -23,8 +22,7 @@ KernelVersion: 2.6.29 Contact: linux-mtd@lists.infradead.org Description: These directories provide the corresponding read-only device - nodes for /sys/class/mtd/mtdX/ . They are only created - (for the benefit of udev) if CONFIG_MTD_CHAR is enabled. + nodes for /sys/class/mtd/mtdX/ . What: /sys/class/mtd/mtdX/dev Date: April 2009 diff --git a/trunk/Documentation/acpi/enumeration.txt b/trunk/Documentation/acpi/enumeration.txt index b0d541042ac6..d9be7a97dff3 100644 --- a/trunk/Documentation/acpi/enumeration.txt +++ b/trunk/Documentation/acpi/enumeration.txt @@ -66,6 +66,83 @@ the ACPI device explicitly to acpi_platform_device_ids list defined in drivers/acpi/acpi_platform.c. This limitation is only for the platform devices, SPI and I2C devices are created automatically as described below. +DMA support +~~~~~~~~~~~ +DMA controllers enumerated via ACPI should be registered in the system to +provide generic access to their resources. For example, a driver that would +like to be accessible to slave devices via generic API call +dma_request_slave_channel() must register itself at the end of the probe +function like this: + + err = devm_acpi_dma_controller_register(dev, xlate_func, dw); + /* Handle the error if it's not a case of !CONFIG_ACPI */ + +and implement custom xlate function if needed (usually acpi_dma_simple_xlate() +is enough) which converts the FixedDMA resource provided by struct +acpi_dma_spec into the corresponding DMA channel. A piece of code for that case +could look like: + + #ifdef CONFIG_ACPI + struct filter_args { + /* Provide necessary information for the filter_func */ + ... + }; + + static bool filter_func(struct dma_chan *chan, void *param) + { + /* Choose the proper channel */ + ... + } + + static struct dma_chan *xlate_func(struct acpi_dma_spec *dma_spec, + struct acpi_dma *adma) + { + dma_cap_mask_t cap; + struct filter_args args; + + /* Prepare arguments for filter_func */ + ... + return dma_request_channel(cap, filter_func, &args); + } + #else + static struct dma_chan *xlate_func(struct acpi_dma_spec *dma_spec, + struct acpi_dma *adma) + { + return NULL; + } + #endif + +dma_request_slave_channel() will call xlate_func() for each registered DMA +controller. In the xlate function the proper channel must be chosen based on +information in struct acpi_dma_spec and the properties of the controller +provided by struct acpi_dma. + +Clients must call dma_request_slave_channel() with the string parameter that +corresponds to a specific FixedDMA resource. By default "tx" means the first +entry of the FixedDMA resource array, "rx" means the second entry. The table +below shows a layout: + + Device (I2C0) + { + ... + Method (_CRS, 0, NotSerialized) + { + Name (DBUF, ResourceTemplate () + { + FixedDMA (0x0018, 0x0004, Width32bit, _Y48) + FixedDMA (0x0019, 0x0005, Width32bit, ) + }) + ... + } + } + +So, the FixedDMA with request line 0x0018 is "tx" and next one is "rx" in +this example. + +In robust cases the client unfortunately needs to call +acpi_dma_request_slave_chan_by_index() directly and therefore choose the +specific FixedDMA resource by its index. + SPI serial bus support ~~~~~~~~~~~~~~~~~~~~~~ Slave devices behind SPI bus have SpiSerialBus resource attached to them. diff --git a/trunk/Documentation/devicetree/bindings/dma/atmel-dma.txt b/trunk/Documentation/devicetree/bindings/dma/atmel-dma.txt index 3c046ee6e8b5..c80e8a3402f0 100644 --- a/trunk/Documentation/devicetree/bindings/dma/atmel-dma.txt +++ b/trunk/Documentation/devicetree/bindings/dma/atmel-dma.txt @@ -1,14 +1,39 @@ * Atmel Direct Memory Access Controller (DMA) Required properties: -- compatible: Should be "atmel,-dma" -- reg: Should contain DMA registers location and length -- interrupts: Should contain DMA interrupt +- compatible: Should be "atmel,-dma". +- reg: Should contain DMA registers location and length. +- interrupts: Should contain DMA interrupt. +- #dma-cells: Must be <2>, used to represent the number of integer cells in +the dmas property of client devices. -Examples: +Example: -dma@ffffec00 { +dma0: dma@ffffec00 { compatible = "atmel,at91sam9g45-dma"; reg = <0xffffec00 0x200>; interrupts = <21>; + #dma-cells = <2>; +}; + +DMA clients connected to the Atmel DMA controller must use the format +described in the dma.txt file, using a three-cell specifier for each channel: +a phandle plus two interger cells. +The three cells in order are: + +1. A phandle pointing to the DMA controller. +2. The memory interface (16 most significant bits), the peripheral interface +(16 less significant bits). +3. The peripheral identifier for the hardware handshaking interface. The +identifier can be different for tx and rx. + +Example: + +i2c0@i2c@f8010000 { + compatible = "atmel,at91sam9x5-i2c"; + reg = <0xf8010000 0x100>; + interrupts = <9 4 6>; + dmas = <&dma0 1 7>, + <&dma0 1 8>; + dma-names = "tx", "rx"; }; diff --git a/trunk/Documentation/devicetree/bindings/mips/ralink.txt b/trunk/Documentation/devicetree/bindings/mips/ralink.txt new file mode 100644 index 000000000000..b35a8d04f8b6 --- /dev/null +++ b/trunk/Documentation/devicetree/bindings/mips/ralink.txt @@ -0,0 +1,17 @@ +Ralink MIPS SoC device tree bindings + +1. SoCs + +Each device tree must specify a compatible value for the Ralink SoC +it uses in the compatible property of the root node. The compatible +value must be one of the following values: + + ralink,rt2880-soc + ralink,rt3050-soc + ralink,rt3052-soc + ralink,rt3350-soc + ralink,rt3352-soc + ralink,rt3883-soc + ralink,rt5350-soc + ralink,mt7620a-soc + ralink,mt7620n-soc diff --git a/trunk/Documentation/devicetree/bindings/mtd/gpmc-nand.txt b/trunk/Documentation/devicetree/bindings/mtd/gpmc-nand.txt index e7f8d7ed47eb..6a983c1d87cd 100644 --- a/trunk/Documentation/devicetree/bindings/mtd/gpmc-nand.txt +++ b/trunk/Documentation/devicetree/bindings/mtd/gpmc-nand.txt @@ -56,20 +56,20 @@ Example for an AM33xx board: nand-bus-width = <16>; ti,nand-ecc-opt = "bch8"; - gpmc,sync-clk = <0>; - gpmc,cs-on = <0>; - gpmc,cs-rd-off = <44>; - gpmc,cs-wr-off = <44>; - gpmc,adv-on = <6>; - gpmc,adv-rd-off = <34>; - gpmc,adv-wr-off = <44>; - gpmc,we-off = <40>; - gpmc,oe-off = <54>; - gpmc,access = <64>; - gpmc,rd-cycle = <82>; - gpmc,wr-cycle = <82>; - gpmc,wr-access = <40>; - gpmc,wr-data-mux-bus = <0>; + gpmc,sync-clk-ps = <0>; + gpmc,cs-on-ns = <0>; + gpmc,cs-rd-off-ns = <44>; + gpmc,cs-wr-off-ns = <44>; + gpmc,adv-on-ns = <6>; + gpmc,adv-rd-off-ns = <34>; + gpmc,adv-wr-off-ns = <44>; + gpmc,we-off-ns = <40>; + gpmc,oe-off-ns = <54>; + gpmc,access-ns = <64>; + gpmc,rd-cycle-ns = <82>; + gpmc,wr-cycle-ns = <82>; + gpmc,wr-access-ns = <40>; + gpmc,wr-data-mux-bus-ns = <0>; #address-cells = <1>; #size-cells = <1>; diff --git a/trunk/Documentation/devicetree/bindings/mtd/partition.txt b/trunk/Documentation/devicetree/bindings/mtd/partition.txt index 6e1f61f1e789..9315ac96b49b 100644 --- a/trunk/Documentation/devicetree/bindings/mtd/partition.txt +++ b/trunk/Documentation/devicetree/bindings/mtd/partition.txt @@ -5,8 +5,12 @@ on platforms which have strong conventions about which portions of a flash are used for what purposes, but which don't use an on-flash partition table such as RedBoot. -#address-cells & #size-cells must both be present in the mtd device and be -equal to 1. +#address-cells & #size-cells must both be present in the mtd device. There are +two valid values for both: +<1>: for partitions that require a single 32-bit cell to represent their + size/address (aka the value is below 4 GiB) +<2>: for partitions that require two 32-bit cells to represent their + size/address (aka the value is 4 GiB or greater). Required properties: - reg : The partition's offset and size within the mtd bank. @@ -36,3 +40,31 @@ flash@0 { reg = <0x0100000 0x200000>; }; }; + +flash@1 { + #address-cells = <1>; + #size-cells = <2>; + + /* a 4 GiB partition */ + partition@0 { + label = "filesystem"; + reg = <0x00000000 0x1 0x00000000>; + }; +}; + +flash@2 { + #address-cells = <2>; + #size-cells = <2>; + + /* an 8 GiB partition */ + partition@0 { + label = "filesystem #1"; + reg = <0x0 0x00000000 0x2 0x00000000>; + }; + + /* a 4 GiB partition */ + partition@200000000 { + label = "filesystem #2"; + reg = <0x2 0x00000000 0x1 0x00000000>; + }; +}; diff --git a/trunk/Documentation/devicetree/bindings/net/gpmc-eth.txt b/trunk/Documentation/devicetree/bindings/net/gpmc-eth.txt index 24cb4e46f675..ace4a64b3695 100644 --- a/trunk/Documentation/devicetree/bindings/net/gpmc-eth.txt +++ b/trunk/Documentation/devicetree/bindings/net/gpmc-eth.txt @@ -26,16 +26,16 @@ Required properties: - bank-width: Address width of the device in bytes. GPMC supports 8-bit and 16-bit devices and so must be either 1 or 2 bytes. - compatible: Compatible string property for the ethernet child device. -- gpmc,cs-on: Chip-select assertion time -- gpmc,cs-rd-off: Chip-select de-assertion time for reads -- gpmc,cs-wr-off: Chip-select de-assertion time for writes -- gpmc,oe-on: Output-enable assertion time -- gpmc,oe-off Output-enable de-assertion time -- gpmc,we-on: Write-enable assertion time -- gpmc,we-off: Write-enable de-assertion time -- gpmc,access: Start cycle to first data capture (read access) -- gpmc,rd-cycle: Total read cycle time -- gpmc,wr-cycle: Total write cycle time +- gpmc,cs-on-ns: Chip-select assertion time +- gpmc,cs-rd-off-ns: Chip-select de-assertion time for reads +- gpmc,cs-wr-off-ns: Chip-select de-assertion time for writes +- gpmc,oe-on-ns: Output-enable assertion time +- gpmc,oe-off-ns: Output-enable de-assertion time +- gpmc,we-on-ns: Write-enable assertion time +- gpmc,we-off-ns: Write-enable de-assertion time +- gpmc,access-ns: Start cycle to first data capture (read access) +- gpmc,rd-cycle-ns: Total read cycle time +- gpmc,wr-cycle-ns: Total write cycle time - reg: Chip-select, base address (relative to chip-select) and size of the memory mapped for the device. Note that base address will be typically 0 as this @@ -65,24 +65,24 @@ gpmc: gpmc@6e000000 { bank-width = <2>; gpmc,mux-add-data; - gpmc,cs-on = <0>; - gpmc,cs-rd-off = <186>; - gpmc,cs-wr-off = <186>; - gpmc,adv-on = <12>; - gpmc,adv-rd-off = <48>; - gpmc,adv-wr-off = <48>; - gpmc,oe-on = <54>; - gpmc,oe-off = <168>; - gpmc,we-on = <54>; - gpmc,we-off = <168>; - gpmc,rd-cycle = <186>; - gpmc,wr-cycle = <186>; - gpmc,access = <114>; - gpmc,page-burst-access = <6>; - gpmc,bus-turnaround = <12>; - gpmc,cycle2cycle-delay = <18>; - gpmc,wr-data-mux-bus = <90>; - gpmc,wr-access = <186>; + gpmc,cs-on-ns = <0>; + gpmc,cs-rd-off-ns = <186>; + gpmc,cs-wr-off-ns = <186>; + gpmc,adv-on-ns = <12>; + gpmc,adv-rd-off-ns = <48>; + gpmc,adv-wr-off-ns = <48>; + gpmc,oe-on-ns = <54>; + gpmc,oe-off-ns = <168>; + gpmc,we-on-ns = <54>; + gpmc,we-off-ns = <168>; + gpmc,rd-cycle-ns = <186>; + gpmc,wr-cycle-ns = <186>; + gpmc,access-ns = <114>; + gpmc,page-burst-access-ns = <6>; + gpmc,bus-turnaround-ns = <12>; + gpmc,cycle2cycle-delay-ns = <18>; + gpmc,wr-data-mux-bus-ns = <90>; + gpmc,wr-access-ns = <186>; gpmc,cycle2cycle-samecsen; gpmc,cycle2cycle-diffcsen; diff --git a/trunk/Documentation/devicetree/bindings/thermal/armada-thermal.txt b/trunk/Documentation/devicetree/bindings/thermal/armada-thermal.txt new file mode 100644 index 000000000000..fff93d5f92de --- /dev/null +++ b/trunk/Documentation/devicetree/bindings/thermal/armada-thermal.txt @@ -0,0 +1,22 @@ +* Marvell Armada 370/XP thermal management + +Required properties: + +- compatible: Should be set to one of the following: + marvell,armada370-thermal + marvell,armadaxp-thermal + +- reg: Device's register space. + Two entries are expected, see the examples below. + The first one is required for the sensor register; + the second one is required for the control register + to be used for sensor initialization (a.k.a. calibration). + +Example: + + thermal@d0018300 { + compatible = "marvell,armada370-thermal"; + reg = <0xd0018300 0x4 + 0xd0018304 0x4>; + status = "okay"; + }; diff --git a/trunk/Documentation/devicetree/bindings/vendor-prefixes.txt b/trunk/Documentation/devicetree/bindings/vendor-prefixes.txt index 4d1919bf2332..6931c4348d24 100644 --- a/trunk/Documentation/devicetree/bindings/vendor-prefixes.txt +++ b/trunk/Documentation/devicetree/bindings/vendor-prefixes.txt @@ -42,6 +42,7 @@ onnn ON Semiconductor Corp. picochip Picochip Ltd powervr PowerVR (deprecated, use img) qcom Qualcomm, Inc. +ralink Mediatek/Ralink Technology Corp. ramtron Ramtron International realtek Realtek Semiconductor Corp. renesas Renesas Electronics Corporation diff --git a/trunk/Documentation/devicetree/bindings/drm/exynos/hdmi.txt b/trunk/Documentation/devicetree/bindings/video/exynos_hdmi.txt similarity index 100% rename from trunk/Documentation/devicetree/bindings/drm/exynos/hdmi.txt rename to trunk/Documentation/devicetree/bindings/video/exynos_hdmi.txt diff --git a/trunk/Documentation/devicetree/bindings/drm/exynos/hdmiddc.txt b/trunk/Documentation/devicetree/bindings/video/exynos_hdmiddc.txt similarity index 100% rename from trunk/Documentation/devicetree/bindings/drm/exynos/hdmiddc.txt rename to trunk/Documentation/devicetree/bindings/video/exynos_hdmiddc.txt diff --git a/trunk/Documentation/devicetree/bindings/drm/exynos/hdmiphy.txt b/trunk/Documentation/devicetree/bindings/video/exynos_hdmiphy.txt similarity index 100% rename from trunk/Documentation/devicetree/bindings/drm/exynos/hdmiphy.txt rename to trunk/Documentation/devicetree/bindings/video/exynos_hdmiphy.txt diff --git a/trunk/Documentation/devicetree/bindings/drm/exynos/mixer.txt b/trunk/Documentation/devicetree/bindings/video/exynos_mixer.txt similarity index 100% rename from trunk/Documentation/devicetree/bindings/drm/exynos/mixer.txt rename to trunk/Documentation/devicetree/bindings/video/exynos_mixer.txt diff --git a/trunk/Documentation/devicetree/usage-model.txt b/trunk/Documentation/devicetree/usage-model.txt index ef9d06c9f8fd..0efedaad5165 100644 --- a/trunk/Documentation/devicetree/usage-model.txt +++ b/trunk/Documentation/devicetree/usage-model.txt @@ -191,9 +191,11 @@ Linux it will look something like this: }; The bootargs property contains the kernel arguments, and the initrd-* -properties define the address and size of an initrd blob. The -chosen node may also optionally contain an arbitrary number of -additional properties for platform-specific configuration data. +properties define the address and size of an initrd blob. Note that +initrd-end is the first address after the initrd image, so this doesn't +match the usual semantic of struct resource. The chosen node may also +optionally contain an arbitrary number of additional properties for +platform-specific configuration data. During early boot, the architecture setup code calls of_scan_flat_dt() several times with different helper callbacks to parse device tree diff --git a/trunk/Documentation/dmatest.txt b/trunk/Documentation/dmatest.txt new file mode 100644 index 000000000000..279ac0a8c5b1 --- /dev/null +++ b/trunk/Documentation/dmatest.txt @@ -0,0 +1,81 @@ + DMA Test Guide + ============== + + Andy Shevchenko + +This small document introduces how to test DMA drivers using dmatest module. + + Part 1 - How to build the test module + +The menuconfig contains an option that could be found by following path: + Device Drivers -> DMA Engine support -> DMA Test client + +In the configuration file the option called CONFIG_DMATEST. The dmatest could +be built as module or inside kernel. Let's consider those cases. + + Part 2 - When dmatest is built as a module... + +After mounting debugfs and loading the module, the /sys/kernel/debug/dmatest +folder with nodes will be created. They are the same as module parameters with +addition of the 'run' node that controls run and stop phases of the test. + +Note that in this case test will not run on load automatically. + +Example of usage: + % echo dma0chan0 > /sys/kernel/debug/dmatest/channel + % echo 2000 > /sys/kernel/debug/dmatest/timeout + % echo 1 > /sys/kernel/debug/dmatest/iterations + % echo 1 > /sys/kernel/debug/dmatest/run + +Hint: available channel list could be extracted by running the following +command: + % ls -1 /sys/class/dma/ + +After a while you will start to get messages about current status or error like +in the original code. + +Note that running a new test will stop any in progress test. + +The following command should return actual state of the test. + % cat /sys/kernel/debug/dmatest/run + +To wait for test done the user may perform a busy loop that checks the state. + + % while [ $(cat /sys/kernel/debug/dmatest/run) = "Y" ] + > do + > echo -n "." + > sleep 1 + > done + > echo + + Part 3 - When built-in in the kernel... + +The module parameters that is supplied to the kernel command line will be used +for the first performed test. After user gets a control, the test could be +interrupted or re-run with same or different parameters. For the details see +the above section "Part 2 - When dmatest is built as a module..." + +In both cases the module parameters are used as initial values for the test case. +You always could check them at run-time by running + % grep -H . /sys/module/dmatest/parameters/* + + Part 4 - Gathering the test results + +The module provides a storage for the test results in the memory. The gathered +data could be used after test is done. + +The special file 'results' in the debugfs represents gathered data of the in +progress test. The messages collected are printed to the kernel log as well. + +Example of output: + % cat /sys/kernel/debug/dmatest/results + dma0chan0-copy0: #1: No errors with src_off=0x7bf dst_off=0x8ad len=0x3fea (0) + +The message format is unified across the different types of errors. A number in +the parens represents additional information, e.g. error code, error counter, +or status. + +Comparison between buffers is stored to the dedicated structure. + +Note that the verify result is now accessible only via file 'results' in the +debugfs. diff --git a/trunk/Documentation/filesystems/btrfs.txt b/trunk/Documentation/filesystems/btrfs.txt index 7671352216f1..b349d57b76ea 100644 --- a/trunk/Documentation/filesystems/btrfs.txt +++ b/trunk/Documentation/filesystems/btrfs.txt @@ -1,8 +1,8 @@ - BTRFS - ===== +BTRFS +===== -Btrfs is a new copy on write filesystem for Linux aimed at +Btrfs is a copy on write filesystem for Linux aimed at implementing advanced features while focusing on fault tolerance, repair and easy administration. Initially developed by Oracle, Btrfs is licensed under the GPL and open for contribution from anyone. @@ -34,9 +34,175 @@ The main Btrfs features include: * Online filesystem defragmentation +Mount Options +============= - MAILING LIST - ============ +When mounting a btrfs filesystem, the following option are accepted. +Unless otherwise specified, all options default to off. + + alloc_start= + Debugging option to force all block allocations above a certain + byte threshold on each block device. The value is specified in + bytes, optionally with a K, M, or G suffix, case insensitive. + Default is 1MB. + + autodefrag + Detect small random writes into files and queue them up for the + defrag process. Works best for small files; Not well suited for + large database workloads. + + check_int + check_int_data + check_int_print_mask= + These debugging options control the behavior of the integrity checking + module (the BTRFS_FS_CHECK_INTEGRITY config option required). + + check_int enables the integrity checker module, which examines all + block write requests to ensure on-disk consistency, at a large + memory and CPU cost. + + check_int_data includes extent data in the integrity checks, and + implies the check_int option. + + check_int_print_mask takes a bitmask of BTRFSIC_PRINT_MASK_* values + as defined in fs/btrfs/check-integrity.c, to control the integrity + checker module behavior. + + See comments at the top of fs/btrfs/check-integrity.c for more info. + + compress + compress= + compress-force + compress-force= + Control BTRFS file data compression. Type may be specified as "zlib" + "lzo" or "no" (for no compression, used for remounting). If no type + is specified, zlib is used. If compress-force is specified, + all files will be compressed, whether or not they compress well. + If compression is enabled, nodatacow and nodatasum are disabled. + + degraded + Allow mounts to continue with missing devices. A read-write mount may + fail with too many devices missing, for example if a stripe member + is completely missing. + + device= + Specify a device during mount so that ioctls on the control device + can be avoided. Especialy useful when trying to mount a multi-device + setup as root. May be specified multiple times for multiple devices. + + discard + Issue frequent commands to let the block device reclaim space freed by + the filesystem. This is useful for SSD devices, thinly provisioned + LUNs and virtual machine images, but may have a significant + performance impact. (The fstrim command is also available to + initiate batch trims from userspace). + + enospc_debug + Debugging option to be more verbose in some ENOSPC conditions. + + fatal_errors= + Action to take when encountering a fatal error: + "bug" - BUG() on a fatal error. This is the default. + "panic" - panic() on a fatal error. + + flushoncommit + The 'flushoncommit' mount option forces any data dirtied by a write in a + prior transaction to commit as part of the current commit. This makes + the committed state a fully consistent view of the file system from the + application's perspective (i.e., it includes all completed file system + operations). This was previously the behavior only when a snapshot is + created. + + inode_cache + Enable free inode number caching. Defaults to off due to an overflow + problem when the free space crcs don't fit inside a single page. + + max_inline= + Specify the maximum amount of space, in bytes, that can be inlined in + a metadata B-tree leaf. The value is specified in bytes, optionally + with a K, M, or G suffix, case insensitive. In practice, this value + is limited by the root sector size, with some space unavailable due + to leaf headers. For a 4k sectorsize, max inline data is ~3900 bytes. + + metadata_ratio= + Specify that 1 metadata chunk should be allocated after every + data chunks. Off by default. + + noacl + Disable support for Posix Access Control Lists (ACLs). See the + acl(5) manual page for more information about ACLs. + + nobarrier + Disables the use of block layer write barriers. Write barriers ensure + that certain IOs make it through the device cache and are on persistent + storage. If used on a device with a volatile (non-battery-backed) + write-back cache, this option will lead to filesystem corruption on a + system crash or power loss. + + nodatacow + Disable data copy-on-write for newly created files. Implies nodatasum, + and disables all compression. + + nodatasum + Disable data checksumming for newly created files. + + notreelog + Disable the tree logging used for fsync and O_SYNC writes. + + recovery + Enable autorecovery attempts if a bad tree root is found at mount time. + Currently this scans a list of several previous tree roots and tries to + use the first readable. + + skip_balance + Skip automatic resume of interrupted balance operation after mount. + May be resumed with "btrfs balance resume." + + space_cache (*) + Enable the on-disk freespace cache. + nospace_cache + Disable freespace cache loading without clearing the cache. + clear_cache + Force clearing and rebuilding of the disk space cache if something + has gone wrong. + + ssd + nossd + ssd_spread + Options to control ssd allocation schemes. By default, BTRFS will + enable or disable ssd allocation heuristics depending on whether a + rotational or nonrotational disk is in use. The ssd and nossd options + can override this autodetection. + + The ssd_spread mount option attempts to allocate into big chunks + of unused space, and may perform better on low-end ssds. ssd_spread + implies ssd, enabling all other ssd heuristics as well. + + subvol= + Mount subvolume at rather than the root subvolume. is + relative to the top level subvolume. + + subvolid= + Mount subvolume specified by an ID number rather than the root subvolume. + This allows mounting of subvolumes which are not in the root of the mounted + filesystem. + You can use "btrfs subvolume list" to see subvolume ID numbers. + + subvolrootid= (deprecated) + Mount subvolume specified by rather than the root subvolume. + This allows mounting of subvolumes which are not in the root of the mounted + filesystem. + You can use "btrfs subvolume show " to see the object ID for a subvolume. + + thread_pool= + The number of worker threads to allocate. The default number is equal + to the number of CPUs + 2, or 8, whichever is smaller. + + user_subvol_rm_allowed + Allow subvolumes to be deleted by a non-root user. Use with caution. + +MAILING LIST +============ There is a Btrfs mailing list hosted on vger.kernel.org. You can find details on how to subscribe here: @@ -49,8 +215,8 @@ http://dir.gmane.org/gmane.comp.file-systems.btrfs - IRC - === +IRC +=== Discussion of Btrfs also occurs on the #btrfs channel of the Freenode IRC network. diff --git a/trunk/Documentation/filesystems/f2fs.txt b/trunk/Documentation/filesystems/f2fs.txt index dcf338e62b71..bd3c56c67380 100644 --- a/trunk/Documentation/filesystems/f2fs.txt +++ b/trunk/Documentation/filesystems/f2fs.txt @@ -146,7 +146,7 @@ USAGE Format options -------------- --l [label] : Give a volume label, up to 256 unicode name. +-l [label] : Give a volume label, up to 512 unicode name. -a [0 or 1] : Split start location of each area for heap-based allocation. 1 is set by default, which performs this. -o [int] : Set overprovision ratio in percent over volume size. @@ -156,6 +156,8 @@ Format options -z [int] : Set the number of sections per zone. 1 is set by default. -e [str] : Set basic extension list. e.g. "mp3,gif,mov" +-t [0 or 1] : Disable discard command or not. + 1 is set by default, which conducts discard. ================================================================================ DESIGN diff --git a/trunk/Documentation/gpio.txt b/trunk/Documentation/gpio.txt index 77a1d11af723..6f83fa965b4b 100644 --- a/trunk/Documentation/gpio.txt +++ b/trunk/Documentation/gpio.txt @@ -72,11 +72,11 @@ in this document, but drivers acting as clients to the GPIO interface must not care how it's implemented.) That said, if the convention is supported on their platform, drivers should -use it when possible. Platforms must declare GENERIC_GPIO support in their -Kconfig (boolean true), and provide an file. Drivers that can't -work without standard GPIO calls should have Kconfig entries which depend -on GENERIC_GPIO. The GPIO calls are available, either as "real code" or as -optimized-away stubs, when drivers use the include file: +use it when possible. Platforms must select ARCH_REQUIRE_GPIOLIB or +ARCH_WANT_OPTIONAL_GPIOLIB in their Kconfig. Drivers that can't work without +standard GPIO calls should have Kconfig entries which depend on GPIOLIB. The +GPIO calls are available, either as "real code" or as optimized-away stubs, +when drivers use the include file: #include diff --git a/trunk/Documentation/kernel-parameters.txt b/trunk/Documentation/kernel-parameters.txt index c3bfacb92910..6e3b18a8afc6 100644 --- a/trunk/Documentation/kernel-parameters.txt +++ b/trunk/Documentation/kernel-parameters.txt @@ -3005,6 +3005,27 @@ bytes respectively. Such letter suffixes can also be entirely omitted. Force threading of all interrupt handlers except those marked explicitly IRQF_NO_THREAD. + tmem [KNL,XEN] + Enable the Transcendent memory driver if built-in. + + tmem.cleancache=0|1 [KNL, XEN] + Default is on (1). Disable the usage of the cleancache + API to send anonymous pages to the hypervisor. + + tmem.frontswap=0|1 [KNL, XEN] + Default is on (1). Disable the usage of the frontswap + API to send swap pages to the hypervisor. If disabled + the selfballooning and selfshrinking are force disabled. + + tmem.selfballooning=0|1 [KNL, XEN] + Default is on (1). Disable the driving of swap pages + to the hypervisor. + + tmem.selfshrinking=0|1 [KNL, XEN] + Default is on (1). Partial swapoff that immediately + transfers pages from Xen hypervisor back to the + kernel based on different criteria. + topology= [S390] Format: {off | on} Specify if the kernel should make use of the cpu diff --git a/trunk/Documentation/kernel-per-CPU-kthreads.txt b/trunk/Documentation/kernel-per-CPU-kthreads.txt new file mode 100644 index 000000000000..cbf7ae412da4 --- /dev/null +++ b/trunk/Documentation/kernel-per-CPU-kthreads.txt @@ -0,0 +1,202 @@ +REDUCING OS JITTER DUE TO PER-CPU KTHREADS + +This document lists per-CPU kthreads in the Linux kernel and presents +options to control their OS jitter. Note that non-per-CPU kthreads are +not listed here. To reduce OS jitter from non-per-CPU kthreads, bind +them to a "housekeeping" CPU dedicated to such work. + + +REFERENCES + +o Documentation/IRQ-affinity.txt: Binding interrupts to sets of CPUs. + +o Documentation/cgroups: Using cgroups to bind tasks to sets of CPUs. + +o man taskset: Using the taskset command to bind tasks to sets + of CPUs. + +o man sched_setaffinity: Using the sched_setaffinity() system + call to bind tasks to sets of CPUs. + +o /sys/devices/system/cpu/cpuN/online: Control CPU N's hotplug state, + writing "0" to offline and "1" to online. + +o In order to locate kernel-generated OS jitter on CPU N: + + cd /sys/kernel/debug/tracing + echo 1 > max_graph_depth # Increase the "1" for more detail + echo function_graph > current_tracer + # run workload + cat per_cpu/cpuN/trace + + +KTHREADS + +Name: ehca_comp/%u +Purpose: Periodically process Infiniband-related work. +To reduce its OS jitter, do any of the following: +1. Don't use eHCA Infiniband hardware, instead choosing hardware + that does not require per-CPU kthreads. This will prevent these + kthreads from being created in the first place. (This will + work for most people, as this hardware, though important, is + relatively old and is produced in relatively low unit volumes.) +2. Do all eHCA-Infiniband-related work on other CPUs, including + interrupts. +3. Rework the eHCA driver so that its per-CPU kthreads are + provisioned only on selected CPUs. + + +Name: irq/%d-%s +Purpose: Handle threaded interrupts. +To reduce its OS jitter, do the following: +1. Use irq affinity to force the irq threads to execute on + some other CPU. + +Name: kcmtpd_ctr_%d +Purpose: Handle Bluetooth work. +To reduce its OS jitter, do one of the following: +1. Don't use Bluetooth, in which case these kthreads won't be + created in the first place. +2. Use irq affinity to force Bluetooth-related interrupts to + occur on some other CPU and furthermore initiate all + Bluetooth activity on some other CPU. + +Name: ksoftirqd/%u +Purpose: Execute softirq handlers when threaded or when under heavy load. +To reduce its OS jitter, each softirq vector must be handled +separately as follows: +TIMER_SOFTIRQ: Do all of the following: +1. To the extent possible, keep the CPU out of the kernel when it + is non-idle, for example, by avoiding system calls and by forcing + both kernel threads and interrupts to execute elsewhere. +2. Build with CONFIG_HOTPLUG_CPU=y. After boot completes, force + the CPU offline, then bring it back online. This forces + recurring timers to migrate elsewhere. If you are concerned + with multiple CPUs, force them all offline before bringing the + first one back online. Once you have onlined the CPUs in question, + do not offline any other CPUs, because doing so could force the + timer back onto one of the CPUs in question. +NET_TX_SOFTIRQ and NET_RX_SOFTIRQ: Do all of the following: +1. Force networking interrupts onto other CPUs. +2. Initiate any network I/O on other CPUs. +3. Once your application has started, prevent CPU-hotplug operations + from being initiated from tasks that might run on the CPU to + be de-jittered. (It is OK to force this CPU offline and then + bring it back online before you start your application.) +BLOCK_SOFTIRQ: Do all of the following: +1. Force block-device interrupts onto some other CPU. +2. Initiate any block I/O on other CPUs. +3. Once your application has started, prevent CPU-hotplug operations + from being initiated from tasks that might run on the CPU to + be de-jittered. (It is OK to force this CPU offline and then + bring it back online before you start your application.) +BLOCK_IOPOLL_SOFTIRQ: Do all of the following: +1. Force block-device interrupts onto some other CPU. +2. Initiate any block I/O and block-I/O polling on other CPUs. +3. Once your application has started, prevent CPU-hotplug operations + from being initiated from tasks that might run on the CPU to + be de-jittered. (It is OK to force this CPU offline and then + bring it back online before you start your application.) +TASKLET_SOFTIRQ: Do one or more of the following: +1. Avoid use of drivers that use tasklets. (Such drivers will contain + calls to things like tasklet_schedule().) +2. Convert all drivers that you must use from tasklets to workqueues. +3. Force interrupts for drivers using tasklets onto other CPUs, + and also do I/O involving these drivers on other CPUs. +SCHED_SOFTIRQ: Do all of the following: +1. Avoid sending scheduler IPIs to the CPU to be de-jittered, + for example, ensure that at most one runnable kthread is present + on that CPU. If a thread that expects to run on the de-jittered + CPU awakens, the scheduler will send an IPI that can result in + a subsequent SCHED_SOFTIRQ. +2. Build with CONFIG_RCU_NOCB_CPU=y, CONFIG_RCU_NOCB_CPU_ALL=y, + CONFIG_NO_HZ_FULL=y, and, in addition, ensure that the CPU + to be de-jittered is marked as an adaptive-ticks CPU using the + "nohz_full=" boot parameter. This reduces the number of + scheduler-clock interrupts that the de-jittered CPU receives, + minimizing its chances of being selected to do the load balancing + work that runs in SCHED_SOFTIRQ context. +3. To the extent possible, keep the CPU out of the kernel when it + is non-idle, for example, by avoiding system calls and by + forcing both kernel threads and interrupts to execute elsewhere. + This further reduces the number of scheduler-clock interrupts + received by the de-jittered CPU. +HRTIMER_SOFTIRQ: Do all of the following: +1. To the extent possible, keep the CPU out of the kernel when it + is non-idle. For example, avoid system calls and force both + kernel threads and interrupts to execute elsewhere. +2. Build with CONFIG_HOTPLUG_CPU=y. Once boot completes, force the + CPU offline, then bring it back online. This forces recurring + timers to migrate elsewhere. If you are concerned with multiple + CPUs, force them all offline before bringing the first one + back online. Once you have onlined the CPUs in question, do not + offline any other CPUs, because doing so could force the timer + back onto one of the CPUs in question. +RCU_SOFTIRQ: Do at least one of the following: +1. Offload callbacks and keep the CPU in either dyntick-idle or + adaptive-ticks state by doing all of the following: + a. Build with CONFIG_RCU_NOCB_CPU=y, CONFIG_RCU_NOCB_CPU_ALL=y, + CONFIG_NO_HZ_FULL=y, and, in addition ensure that the CPU + to be de-jittered is marked as an adaptive-ticks CPU using + the "nohz_full=" boot parameter. Bind the rcuo kthreads + to housekeeping CPUs, which can tolerate OS jitter. + b. To the extent possible, keep the CPU out of the kernel + when it is non-idle, for example, by avoiding system + calls and by forcing both kernel threads and interrupts + to execute elsewhere. +2. Enable RCU to do its processing remotely via dyntick-idle by + doing all of the following: + a. Build with CONFIG_NO_HZ=y and CONFIG_RCU_FAST_NO_HZ=y. + b. Ensure that the CPU goes idle frequently, allowing other + CPUs to detect that it has passed through an RCU quiescent + state. If the kernel is built with CONFIG_NO_HZ_FULL=y, + userspace execution also allows other CPUs to detect that + the CPU in question has passed through a quiescent state. + c. To the extent possible, keep the CPU out of the kernel + when it is non-idle, for example, by avoiding system + calls and by forcing both kernel threads and interrupts + to execute elsewhere. + +Name: rcuc/%u +Purpose: Execute RCU callbacks in CONFIG_RCU_BOOST=y kernels. +To reduce its OS jitter, do at least one of the following: +1. Build the kernel with CONFIG_PREEMPT=n. This prevents these + kthreads from being created in the first place, and also obviates + the need for RCU priority boosting. This approach is feasible + for workloads that do not require high degrees of responsiveness. +2. Build the kernel with CONFIG_RCU_BOOST=n. This prevents these + kthreads from being created in the first place. This approach + is feasible only if your workload never requires RCU priority + boosting, for example, if you ensure frequent idle time on all + CPUs that might execute within the kernel. +3. Build with CONFIG_RCU_NOCB_CPU=y and CONFIG_RCU_NOCB_CPU_ALL=y, + which offloads all RCU callbacks to kthreads that can be moved + off of CPUs susceptible to OS jitter. This approach prevents the + rcuc/%u kthreads from having any work to do, so that they are + never awakened. +4. Ensure that the CPU never enters the kernel, and, in particular, + avoid initiating any CPU hotplug operations on this CPU. This is + another way of preventing any callbacks from being queued on the + CPU, again preventing the rcuc/%u kthreads from having any work + to do. + +Name: rcuob/%d, rcuop/%d, and rcuos/%d +Purpose: Offload RCU callbacks from the corresponding CPU. +To reduce its OS jitter, do at least one of the following: +1. Use affinity, cgroups, or other mechanism to force these kthreads + to execute on some other CPU. +2. Build with CONFIG_RCU_NOCB_CPUS=n, which will prevent these + kthreads from being created in the first place. However, please + note that this will not eliminate OS jitter, but will instead + shift it to RCU_SOFTIRQ. + +Name: watchdog/%u +Purpose: Detect software lockups on each CPU. +To reduce its OS jitter, do at least one of the following: +1. Build with CONFIG_LOCKUP_DETECTOR=n, which will prevent these + kthreads from being created in the first place. +2. Echo a zero to /proc/sys/kernel/watchdog to disable the + watchdog timer. +3. Echo a large number of /proc/sys/kernel/watchdog_thresh in + order to reduce the frequency of OS jitter due to the watchdog + timer down to a level that is acceptable for your workload. diff --git a/trunk/Documentation/power/devices.txt b/trunk/Documentation/power/devices.txt index 504dfe4d52eb..a66c9821b5ce 100644 --- a/trunk/Documentation/power/devices.txt +++ b/trunk/Documentation/power/devices.txt @@ -268,7 +268,7 @@ situations. System Power Management Phases ------------------------------ Suspending or resuming the system is done in several phases. Different phases -are used for standby or memory sleep states ("suspend-to-RAM") and the +are used for freeze, standby, and memory sleep states ("suspend-to-RAM") and the hibernation state ("suspend-to-disk"). Each phase involves executing callbacks for every device before the next phase begins. Not all busses or classes support all these callbacks and not all drivers use all the callbacks. The @@ -309,7 +309,8 @@ execute the corresponding method from dev->driver->pm instead if there is one. Entering System Suspend ----------------------- -When the system goes into the standby or memory sleep state, the phases are: +When the system goes into the freeze, standby or memory sleep state, +the phases are: prepare, suspend, suspend_late, suspend_noirq. @@ -368,7 +369,7 @@ the devices that were suspended. Leaving System Suspend ---------------------- -When resuming from standby or memory sleep, the phases are: +When resuming from freeze, standby or memory sleep, the phases are: resume_noirq, resume_early, resume, complete. @@ -433,8 +434,8 @@ the system log. Entering Hibernation -------------------- -Hibernating the system is more complicated than putting it into the standby or -memory sleep state, because it involves creating and saving a system image. +Hibernating the system is more complicated than putting it into the other +sleep states, because it involves creating and saving a system image. Therefore there are more phases for hibernation, with a different set of callbacks. These phases always run after tasks have been frozen and memory has been freed. @@ -485,8 +486,8 @@ image forms an atomic snapshot of the system state. At this point the system image is saved, and the devices then need to be prepared for the upcoming system shutdown. This is much like suspending them -before putting the system into the standby or memory sleep state, and the phases -are similar. +before putting the system into the freeze, standby or memory sleep state, +and the phases are similar. 9. The prepare phase is discussed above. diff --git a/trunk/Documentation/power/interface.txt b/trunk/Documentation/power/interface.txt index c537834af005..f1f0f59a7c47 100644 --- a/trunk/Documentation/power/interface.txt +++ b/trunk/Documentation/power/interface.txt @@ -7,8 +7,8 @@ running. The interface exists in /sys/power/ directory (assuming sysfs is mounted at /sys). /sys/power/state controls system power state. Reading from this file -returns what states are supported, which is hard-coded to 'standby' -(Power-On Suspend), 'mem' (Suspend-to-RAM), and 'disk' +returns what states are supported, which is hard-coded to 'freeze', +'standby' (Power-On Suspend), 'mem' (Suspend-to-RAM), and 'disk' (Suspend-to-Disk). Writing to this file one of those strings causes the system to diff --git a/trunk/Documentation/power/notifiers.txt b/trunk/Documentation/power/notifiers.txt index c2a4a346c0d9..a81fa254303d 100644 --- a/trunk/Documentation/power/notifiers.txt +++ b/trunk/Documentation/power/notifiers.txt @@ -15,8 +15,10 @@ A suspend/hibernation notifier may be used for this purpose. The subsystems or drivers having such needs can register suspend notifiers that will be called upon the following events by the PM core: -PM_HIBERNATION_PREPARE The system is going to hibernate or suspend, tasks will - be frozen immediately. +PM_HIBERNATION_PREPARE The system is going to hibernate, tasks will be frozen + immediately. This is different from PM_SUSPEND_PREPARE + below because here we do additional work between notifiers + and drivers freezing. PM_POST_HIBERNATION The system memory state has been restored from a hibernation image or an error occurred during diff --git a/trunk/Documentation/power/states.txt b/trunk/Documentation/power/states.txt index 4416b28630df..442d43df9b25 100644 --- a/trunk/Documentation/power/states.txt +++ b/trunk/Documentation/power/states.txt @@ -2,12 +2,26 @@ System Power Management States -The kernel supports three power management states generically, though -each is dependent on platform support code to implement the low-level -details for each state. This file describes each state, what they are +The kernel supports four power management states generically, though +one is generic and the other three are dependent on platform support +code to implement the low-level details for each state. +This file describes each state, what they are commonly called, what ACPI state they map to, and what string to write to /sys/power/state to enter that state +state: Freeze / Low-Power Idle +ACPI state: S0 +String: "freeze" + +This state is a generic, pure software, light-weight, low-power state. +It allows more energy to be saved relative to idle by freezing user +space and putting all I/O devices into low-power states (possibly +lower-power than available at run time), such that the processors can +spend more time in their idle states. +This state can be used for platforms without Standby/Suspend-to-RAM +support, or it can be used in addition to Suspend-to-RAM (memory sleep) +to provide reduced resume latency. + State: Standby / Power-On Suspend ACPI State: S1 @@ -22,9 +36,6 @@ We try to put devices in a low-power state equivalent to D1, which also offers low power savings, but low resume latency. Not all devices support D1, and those that don't are left on. -A transition from Standby to the On state should take about 1-2 -seconds. - State: Suspend-to-RAM ACPI State: S3 @@ -42,9 +53,6 @@ transition back to the On state. For at least ACPI, STR requires some minimal boot-strapping code to resume the system from STR. This may be true on other platforms. -A transition from Suspend-to-RAM to the On state should take about -3-5 seconds. - State: Suspend-to-disk ACPI State: S4 @@ -74,7 +82,3 @@ low-power state (like ACPI S4), or it may simply power down. Powering down offers greater savings, and allows this mechanism to work on any system. However, entering a real low-power state allows the user to trigger wake up events (e.g. pressing a key or opening a laptop lid). - -A transition from Suspend-to-Disk to the On state should take about 30 -seconds, though it's typically a bit more with the current -implementation. diff --git a/trunk/Documentation/thermal/exynos_thermal_emulation b/trunk/Documentation/thermal/exynos_thermal_emulation index b73bbfb697bb..36a3e79c1203 100644 --- a/trunk/Documentation/thermal/exynos_thermal_emulation +++ b/trunk/Documentation/thermal/exynos_thermal_emulation @@ -13,11 +13,11 @@ Thermal emulation mode supports software debug for TMU's operation. User can set manually with software code and TMU will read current temperature from user value not from sensor's value. -Enabling CONFIG_EXYNOS_THERMAL_EMUL option will make this support in available. -When it's enabled, sysfs node will be created under -/sys/bus/platform/devices/'exynos device name'/ with name of 'emulation'. +Enabling CONFIG_THERMAL_EMULATION option will make this support available. +When it's enabled, sysfs node will be created as +/sys/devices/virtual/thermal/thermal_zone'zone id'/emul_temp. -The sysfs node, 'emulation', will contain value 0 for the initial state. When you input any +The sysfs node, 'emul_node', will contain value 0 for the initial state. When you input any temperature you want to update to sysfs node, it automatically enable emulation mode and current temperature will be changed into it. (Exynos also supports user changable delay time which would be used to delay of diff --git a/trunk/Documentation/thermal/sysfs-api.txt b/trunk/Documentation/thermal/sysfs-api.txt index 6859661c9d31..a71bd5b90fe8 100644 --- a/trunk/Documentation/thermal/sysfs-api.txt +++ b/trunk/Documentation/thermal/sysfs-api.txt @@ -31,15 +31,17 @@ temperature) and throttle appropriate devices. 1. thermal sysfs driver interface functions 1.1 thermal zone device interface -1.1.1 struct thermal_zone_device *thermal_zone_device_register(char *name, +1.1.1 struct thermal_zone_device *thermal_zone_device_register(char *type, int trips, int mask, void *devdata, - struct thermal_zone_device_ops *ops) + struct thermal_zone_device_ops *ops, + const struct thermal_zone_params *tzp, + int passive_delay, int polling_delay)) This interface function adds a new thermal zone device (sensor) to /sys/class/thermal folder as thermal_zone[0-*]. It tries to bind all the thermal cooling devices registered at the same time. - name: the thermal zone name. + type: the thermal zone type. trips: the total number of trip points this thermal zone supports. mask: Bit string: If 'n'th bit is set, then trip point 'n' is writeable. devdata: device private data @@ -57,6 +59,12 @@ temperature) and throttle appropriate devices. will be fired. .set_emul_temp: set the emulation temperature which helps in debugging different threshold temperature points. + tzp: thermal zone platform parameters. + passive_delay: number of milliseconds to wait between polls when + performing passive cooling. + polling_delay: number of milliseconds to wait between polls when checking + whether trip points have been crossed (0 for interrupt driven systems). + 1.1.2 void thermal_zone_device_unregister(struct thermal_zone_device *tz) @@ -265,6 +273,10 @@ emul_temp Unit: millidegree Celsius WO, Optional + WARNING: Be careful while enabling this option on production systems, + because userland can easily disable the thermal policy by simply + flooding this sysfs node with low temperature values. + ***************************** * Cooling device attributes * ***************************** @@ -363,7 +375,7 @@ This function returns the thermal_instance corresponding to a given {thermal_zone, cooling_device, trip_point} combination. Returns NULL if such an instance does not exist. -5.3:notify_thermal_framework: +5.3:thermal_notify_framework: This function handles the trip events from sensor drivers. It starts throttling the cooling devices according to the policy configured. For CRITICAL and HOT trip points, this notifies the respective drivers, @@ -375,11 +387,3 @@ platform data is provided, this uses the step_wise throttling policy. This function serves as an arbitrator to set the state of a cooling device. It sets the cooling device to the deepest cooling state if possible. - -5.5:thermal_register_governor: -This function lets the various thermal governors to register themselves -with the Thermal framework. At run time, depending on a zone's platform -data, a particular governor is used for throttling. - -5.6:thermal_unregister_governor: -This function unregisters a governor from the thermal framework. diff --git a/trunk/Documentation/xtensa/mmu.txt b/trunk/Documentation/xtensa/mmu.txt new file mode 100644 index 000000000000..2b1af7606d57 --- /dev/null +++ b/trunk/Documentation/xtensa/mmu.txt @@ -0,0 +1,46 @@ +MMUv3 initialization sequence. + +The code in the initialize_mmu macro sets up MMUv3 memory mapping +identically to MMUv2 fixed memory mapping. Depending on +CONFIG_INITIALIZE_XTENSA_MMU_INSIDE_VMLINUX symbol this code is +located in one of the following address ranges: + + 0xF0000000..0xFFFFFFFF (will keep same address in MMU v2 layout; + typically ROM) + 0x00000000..0x07FFFFFF (system RAM; this code is actually linked + at 0xD0000000..0xD7FFFFFF [cached] + or 0xD8000000..0xDFFFFFFF [uncached]; + in any case, initially runs elsewhere + than linked, so have to be careful) + +The code has the following assumptions: + This code fragment is run only on an MMU v3. + TLBs are in their reset state. + ITLBCFG and DTLBCFG are zero (reset state). + RASID is 0x04030201 (reset state). + PS.RING is zero (reset state). + LITBASE is zero (reset state, PC-relative literals); required to be PIC. + +TLB setup proceeds along the following steps. + + Legend: + VA = virtual address (two upper nibbles of it); + PA = physical address (two upper nibbles of it); + pc = physical range that contains this code; + +After step 2, we jump to virtual address in 0x40000000..0x5fffffff +that corresponds to next instruction to execute in this code. +After step 4, we jump to intended (linked) address of this code. + + Step 0 Step1 Step 2 Step3 Step 4 Step5 + ============ ===== ============ ===== ============ ===== + VA PA PA VA PA PA VA PA PA + ------ -- -- ------ -- -- ------ -- -- + E0..FF -> E0 -> E0 E0..FF -> E0 F0..FF -> F0 -> F0 + C0..DF -> C0 -> C0 C0..DF -> C0 E0..EF -> F0 -> F0 + A0..BF -> A0 -> A0 A0..BF -> A0 D8..DF -> 00 -> 00 + 80..9F -> 80 -> 80 80..9F -> 80 D0..D7 -> 00 -> 00 + 60..7F -> 60 -> 60 60..7F -> 60 + 40..5F -> 40 40..5F -> pc -> pc 40..5F -> pc + 20..3F -> 20 -> 20 20..3F -> 20 + 00..1F -> 00 -> 00 00..1F -> 00 diff --git a/trunk/Documentation/zh_CN/gpio.txt b/trunk/Documentation/zh_CN/gpio.txt index 4fa7b4e6f856..d5b8f01833f4 100644 --- a/trunk/Documentation/zh_CN/gpio.txt +++ b/trunk/Documentation/zh_CN/gpio.txt @@ -84,10 +84,10 @@ GPIO 公约 控制器的抽象函数来实现它。(有一些可选的代码能支持这种策略的实现,本文档 后面会介绍,但作为 GPIO 接口的客户端驱动程序必须与它的实现无关。) -也就是说,如果在他们的平台上支持这个公约,驱动应尽可能的使用它。平台 -必须在 Kconfig 中声明对 GENERIC_GPIO的支持 (布尔型 true),并提供 -一个 文件。那些调用标准 GPIO 函数的驱动应该在 Kconfig -入口中声明依赖GENERIC_GPIO。当驱动包含文件: +也就是说,如果在他们的平台上支持这个公约,驱动应尽可能的使用它。同时,平台 +必须在 Kconfig 中选择 ARCH_REQUIRE_GPIOLIB 或者 ARCH_WANT_OPTIONAL_GPIOLIB +选项。那些调用标准 GPIO 函数的驱动应该在 Kconfig 入口中声明依赖GENERIC_GPIO。 +当驱动包含文件: #include diff --git a/trunk/MAINTAINERS b/trunk/MAINTAINERS index 5f5c895e6621..b645a3d5fd30 100644 --- a/trunk/MAINTAINERS +++ b/trunk/MAINTAINERS @@ -3865,9 +3865,16 @@ M: K. Y. Srinivasan M: Haiyang Zhang L: devel@linuxdriverproject.org S: Maintained -F: drivers/hv/ +F: arch/x86/include/asm/mshyperv.h +F: arch/x86/include/uapi/asm/hyperv.h +F: arch/x86/kernel/cpu/mshyperv.c F: drivers/hid/hid-hyperv.c +F: drivers/hv/ F: drivers/net/hyperv/ +F: drivers/scsi/storvsc_drv.c +F: drivers/video/hyperv_fb.c +F: include/linux/hyperv.h +F: tools/hv/ I2C OVER PARALLEL PORT M: Jean Delvare @@ -4641,12 +4648,13 @@ F: include/linux/sunrpc/ F: include/uapi/linux/sunrpc/ KERNEL VIRTUAL MACHINE (KVM) -M: Marcelo Tosatti M: Gleb Natapov +M: Paolo Bonzini L: kvm@vger.kernel.org -W: http://kvm.qumranet.com +W: http://linux-kvm.org S: Supported -F: Documentation/*/kvm.txt +F: Documentation/*/kvm*.txt +F: Documentation/virtual/kvm/ F: arch/*/kvm/ F: arch/*/include/asm/kvm* F: include/linux/kvm* @@ -4976,6 +4984,13 @@ S: Maintained F: Documentation/hwmon/lm90 F: drivers/hwmon/lm90.c +LM95234 HARDWARE MONITOR DRIVER +M: Guenter Roeck +L: lm-sensors@lm-sensors.org +S: Maintained +F: Documentation/hwmon/lm95234 +F: drivers/hwmon/lm95234.c + LME2510 MEDIA DRIVER M: Malcolm Priestley L: linux-media@vger.kernel.org @@ -5509,18 +5524,18 @@ F: Documentation/networking/s2io.txt F: Documentation/networking/vxge.txt F: drivers/net/ethernet/neterion/ -NETFILTER/IPTABLES/IPCHAINS -P: Harald Welte -P: Jozsef Kadlecsik +NETFILTER/IPTABLES M: Pablo Neira Ayuso M: Patrick McHardy +M: Jozsef Kadlecsik L: netfilter-devel@vger.kernel.org L: netfilter@vger.kernel.org L: coreteam@netfilter.org W: http://www.netfilter.org/ W: http://www.iptables.org/ -T: git git://1984.lsi.us.es/nf -T: git git://1984.lsi.us.es/nf-next +Q: http://patchwork.ozlabs.org/project/netfilter-devel/list/ +T: git git://git.kernel.org/pub/scm/linux/kernel/git/pablo/nf.git +T: git git://git.kernel.org/pub/scm/linux/kernel/git/pablo/nf-next.git S: Supported F: include/linux/netfilter* F: include/linux/netfilter/ @@ -7854,7 +7869,7 @@ L: linux-scsi@vger.kernel.org L: target-devel@vger.kernel.org L: http://groups.google.com/group/linux-iscsi-target-dev W: http://www.linux-iscsi.org -T: git git://git.kernel.org/pub/scm/linux/kernel/git/nab/lio-core.git master +T: git git://git.kernel.org/pub/scm/linux/kernel/git/nab/target-pending.git master S: Supported F: drivers/target/ F: include/target/ @@ -8029,11 +8044,14 @@ F: arch/xtensa/ THERMAL M: Zhang Rui +M: Eduardo Valentin L: linux-pm@vger.kernel.org T: git git://git.kernel.org/pub/scm/linux/kernel/git/rzhang/linux.git +Q: https://patchwork.kernel.org/project/linux-pm/list/ S: Supported F: drivers/thermal/ F: include/linux/thermal.h +F: include/linux/cpu_cooling.h THINGM BLINK(1) USB RGB LED DRIVER M: Vivien Didelot @@ -8179,6 +8197,13 @@ F: drivers/mmc/host/sh_mobile_sdhi.c F: include/linux/mmc/tmio.h F: include/linux/mmc/sh_mobile_sdhi.h +TMP401 HARDWARE MONITOR DRIVER +M: Guenter Roeck +L: lm-sensors@lm-sensors.org +S: Maintained +F: Documentation/hwmon/tmp401 +F: drivers/hwmon/tmp401.c + TMPFS (SHMEM FILESYSTEM) M: Hugh Dickins L: linux-mm@kvack.org diff --git a/trunk/Makefile b/trunk/Makefile index a3a834b11a97..93875f5122a3 100644 --- a/trunk/Makefile +++ b/trunk/Makefile @@ -1,7 +1,7 @@ VERSION = 3 -PATCHLEVEL = 9 +PATCHLEVEL = 10 SUBLEVEL = 0 -EXTRAVERSION = +EXTRAVERSION = -rc2 NAME = Unicycling Gorilla # *DOCUMENTATION* diff --git a/trunk/arch/Kconfig b/trunk/arch/Kconfig index dd0e8eb8042f..a4429bcd609e 100644 --- a/trunk/arch/Kconfig +++ b/trunk/arch/Kconfig @@ -213,6 +213,9 @@ config USE_GENERIC_SMP_HELPERS config GENERIC_SMP_IDLE_THREAD bool +config GENERIC_IDLE_POLL_SETUP + bool + # Select if arch init_task initializer is different to init/init_task.c config ARCH_INIT_TASK bool diff --git a/trunk/arch/alpha/Kconfig b/trunk/arch/alpha/Kconfig index 8629127640cf..837a1f2d8b96 100644 --- a/trunk/arch/alpha/Kconfig +++ b/trunk/arch/alpha/Kconfig @@ -55,9 +55,6 @@ config GENERIC_CALIBRATE_DELAY bool default y -config GENERIC_GPIO - bool - config ZONE_DMA bool default y diff --git a/trunk/arch/arc/Kconfig b/trunk/arch/arc/Kconfig index e6f4eca09ee3..5917099470ea 100644 --- a/trunk/arch/arc/Kconfig +++ b/trunk/arch/arc/Kconfig @@ -16,8 +16,6 @@ config ARC select GENERIC_FIND_FIRST_BIT # for now, we don't need GENERIC_IRQ_PROBE, CONFIG_GENERIC_IRQ_CHIP select GENERIC_IRQ_SHOW - select GENERIC_KERNEL_EXECVE - select GENERIC_KERNEL_THREAD select GENERIC_PENDING_IRQ if SMP select GENERIC_SMP_IDLE_THREAD select HAVE_ARCH_KGDB @@ -61,9 +59,6 @@ config GENERIC_CALIBRATE_DELAY config GENERIC_HWEIGHT def_bool y -config BINFMT_ELF - def_bool y - config STACKTRACE_SUPPORT def_bool y select STACKTRACE @@ -82,6 +77,7 @@ menu "ARC Architecture Configuration" menu "ARC Platform/SoC/Board" source "arch/arc/plat-arcfpga/Kconfig" +source "arch/arc/plat-tb10x/Kconfig" #New platform adds here endmenu @@ -134,9 +130,6 @@ if SMP config ARC_HAS_COH_CACHES def_bool n -config ARC_HAS_COH_LLSC - def_bool n - config ARC_HAS_COH_RTSC def_bool n @@ -189,6 +182,10 @@ config ARC_CACHE_PAGES Note that Global I/D ENABLE + Per Page DISABLE works but corollary Global DISABLE + Per Page ENABLE won't work +config ARC_CACHE_VIPT_ALIASING + bool "Support VIPT Aliasing D$" + default n + endif #ARC_CACHE config ARC_HAS_ICCM @@ -304,6 +301,9 @@ config ARC_FPU_SAVE_RESTORE based on actual usage of FPU by a task. Thus our implemn does this for all tasks in system. +config ARC_CANT_LLSC + def_bool n + menuconfig ARC_CPU_REL_4_10 bool "Enable support for Rel 4.10 features" default n @@ -314,9 +314,7 @@ menuconfig ARC_CPU_REL_4_10 config ARC_HAS_LLSC bool "Insn: LLOCK/SCOND (efficient atomic ops)" default y - depends on ARC_CPU_770 - # if SMP, enable LLSC ONLY if ARC implementation has coherent atomics - depends on !SMP || ARC_HAS_COH_LLSC + depends on ARC_CPU_770 && !ARC_CANT_LLSC config ARC_HAS_SWAPE bool "Insn: SWAPE (endian-swap)" @@ -415,13 +413,6 @@ config ARC_DBG_TLB_MISS_COUNT Counts number of I and D TLB Misses and exports them via Debugfs The counters can be cleared via Debugfs as well -config CMDLINE - string "Kernel command line to built-in" - default "print-fatal-signals=1" - help - The default command line which will be appended to the optional - u-boot provided command line (see below) - config CMDLINE_UBOOT bool "Support U-boot kernel command line passing" default n @@ -430,8 +421,8 @@ config CMDLINE_UBOOT command line from the U-boot environment to the Linux kernel then switch this option on. ARC U-boot will setup the cmdline in RAM/flash and set r2 to point - to it. kernel startup code will copy the string into cmdline buffer - and also append CONFIG_CMDLINE. + to it. kernel startup code will append this to DeviceTree + /bootargs provided cmdline args. config ARC_BUILTIN_DTB_NAME string "Built in DTB" @@ -441,6 +432,10 @@ config ARC_BUILTIN_DTB_NAME source "kernel/Kconfig.preempt" +menu "Executable file formats" +source "fs/Kconfig.binfmt" +endmenu + endmenu # "ARC Architecture Configuration" source "mm/Kconfig" diff --git a/trunk/arch/arc/Makefile b/trunk/arch/arc/Makefile index 92379c7cbc1a..183397fd289e 100644 --- a/trunk/arch/arc/Makefile +++ b/trunk/arch/arc/Makefile @@ -8,6 +8,10 @@ UTS_MACHINE := arc +ifeq ($(CROSS_COMPILE),) +CROSS_COMPILE := arc-elf32- +endif + KBUILD_DEFCONFIG := fpga_defconfig cflags-y += -mA7 -fno-common -pipe -fno-builtin -D__linux__ @@ -87,20 +91,23 @@ core-y += arch/arc/ core-y += arch/arc/boot/dts/ core-$(CONFIG_ARC_PLAT_FPGA_LEGACY) += arch/arc/plat-arcfpga/ +core-$(CONFIG_ARC_PLAT_TB10X) += arch/arc/plat-tb10x/ drivers-$(CONFIG_OPROFILE) += arch/arc/oprofile/ libs-y += arch/arc/lib/ $(LIBGCC) +boot := arch/arc/boot + #default target for make without any arguements. -KBUILD_IMAGE := bootpImage +KBUILD_IMAGE := bootpImage all: $(KBUILD_IMAGE) -boot := arch/arc/boot - bootpImage: vmlinux -uImage: vmlinux +boot_targets += uImage uImage.bin uImage.gz + +$(boot_targets): vmlinux $(Q)$(MAKE) $(build)=$(boot) $(boot)/$@ %.dtb %.dtb.S %.dtb.o: scripts diff --git a/trunk/arch/arc/boot/Makefile b/trunk/arch/arc/boot/Makefile index 7d514c24e095..e597cb34c16a 100644 --- a/trunk/arch/arc/boot/Makefile +++ b/trunk/arch/arc/boot/Makefile @@ -3,7 +3,6 @@ targets := vmlinux.bin vmlinux.bin.gz uImage # uImage build relies on mkimage being availble on your host for ARC target # You will need to build u-boot for ARC, rename mkimage to arc-elf32-mkimage # and make sure it's reacable from your PATH -MKIMAGE := $(srctree)/scripts/mkuboot.sh OBJCOPYFLAGS= -O binary -R .note -R .note.gnu.build-id -R .comment -S @@ -12,7 +11,12 @@ LINUX_START_TEXT = $$(readelf -h vmlinux | \ UIMAGE_LOADADDR = $(CONFIG_LINUX_LINK_BASE) UIMAGE_ENTRYADDR = $(LINUX_START_TEXT) -UIMAGE_COMPRESSION = gzip + +suffix-y := bin +suffix-$(CONFIG_KERNEL_GZIP) := gz + +targets += uImage uImage.bin uImage.gz +extra-y += vmlinux.bin vmlinux.bin.gz $(obj)/vmlinux.bin: vmlinux FORCE $(call if_changed,objcopy) @@ -20,7 +24,12 @@ $(obj)/vmlinux.bin: vmlinux FORCE $(obj)/vmlinux.bin.gz: $(obj)/vmlinux.bin FORCE $(call if_changed,gzip) -$(obj)/uImage: $(obj)/vmlinux.bin.gz FORCE - $(call if_changed,uimage) +$(obj)/uImage.bin: $(obj)/vmlinux.bin FORCE + $(call if_changed,uimage,none) + +$(obj)/uImage.gz: $(obj)/vmlinux.bin.gz FORCE + $(call if_changed,uimage,gzip) -PHONY += FORCE +$(obj)/uImage: $(obj)/uImage.$(suffix-y) + @ln -sf $(notdir $<) $@ + @echo ' Image $@ is ready' diff --git a/trunk/arch/arc/boot/dts/Makefile b/trunk/arch/arc/boot/dts/Makefile index 5776835d583f..faf240e29ec2 100644 --- a/trunk/arch/arc/boot/dts/Makefile +++ b/trunk/arch/arc/boot/dts/Makefile @@ -8,6 +8,8 @@ endif obj-y += $(builtindtb-y).dtb.o targets += $(builtindtb-y).dtb +.SECONDARY: $(obj)/$(builtindtb-y).dtb.S + dtbs: $(addprefix $(obj)/, $(builtindtb-y).dtb) -clean-files := *.dtb +clean-files := *.dtb *.dtb.S diff --git a/trunk/arch/arc/boot/dts/abilis_tb100.dtsi b/trunk/arch/arc/boot/dts/abilis_tb100.dtsi new file mode 100644 index 000000000000..941ad118a7e7 --- /dev/null +++ b/trunk/arch/arc/boot/dts/abilis_tb100.dtsi @@ -0,0 +1,340 @@ +/* + * Abilis Systems TB100 SOC device tree + * + * Copyright (C) Abilis Systems 2013 + * + * Author: Christian Ruppert + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ + +/include/ "abilis_tb10x.dtsi" + +/* interrupt specifiers + * -------------------- + * 0: rising, 1: low, 2: high, 3: falling, + */ + +/ { + clock-frequency = <500000000>; /* 500 MHZ */ + + soc100 { + bus-frequency = <166666666>; + + pll0: oscillator { + clock-frequency = <1000000000>; + }; + cpu_clk: clkdiv_cpu { + clock-mult = <1>; + clock-div = <2>; + }; + ahb_clk: clkdiv_ahb { + clock-mult = <1>; + clock-div = <6>; + }; + + iomux: iomux@FF10601c { + /* Port 1 */ + pctl_tsin_s0: pctl-tsin-s0 { /* Serial TS-in 0 */ + pingrp = "mis0_pins"; + }; + pctl_tsin_s1: pctl-tsin-s1 { /* Serial TS-in 1 */ + pingrp = "mis1_pins"; + }; + pctl_gpio_a: pctl-gpio-a { /* GPIO bank A */ + pingrp = "gpioa_pins"; + }; + pctl_tsin_p1: pctl-tsin-p1 { /* Parallel TS-in 1 */ + pingrp = "mip1_pins"; + }; + /* Port 2 */ + pctl_tsin_s2: pctl-tsin-s2 { /* Serial TS-in 2 */ + pingrp = "mis2_pins"; + }; + pctl_tsin_s3: pctl-tsin-s3 { /* Serial TS-in 3 */ + pingrp = "mis3_pins"; + }; + pctl_gpio_c: pctl-gpio-c { /* GPIO bank C */ + pingrp = "gpioc_pins"; + }; + pctl_tsin_p3: pctl-tsin-p3 { /* Parallel TS-in 3 */ + pingrp = "mip3_pins"; + }; + /* Port 3 */ + pctl_tsin_s4: pctl-tsin-s4 { /* Serial TS-in 4 */ + pingrp = "mis4_pins"; + }; + pctl_tsin_s5: pctl-tsin-s5 { /* Serial TS-in 5 */ + pingrp = "mis5_pins"; + }; + pctl_gpio_e: pctl-gpio-e { /* GPIO bank E */ + pingrp = "gpioe_pins"; + }; + pctl_tsin_p5: pctl-tsin-p5 { /* Parallel TS-in 5 */ + pingrp = "mip5_pins"; + }; + /* Port 4 */ + pctl_tsin_s6: pctl-tsin-s6 { /* Serial TS-in 6 */ + pingrp = "mis6_pins"; + }; + pctl_tsin_s7: pctl-tsin-s7 { /* Serial TS-in 7 */ + pingrp = "mis7_pins"; + }; + pctl_gpio_g: pctl-gpio-g { /* GPIO bank G */ + pingrp = "gpiog_pins"; + }; + pctl_tsin_p7: pctl-tsin-p7 { /* Parallel TS-in 7 */ + pingrp = "mip7_pins"; + }; + /* Port 5 */ + pctl_gpio_j: pctl-gpio-j { /* GPIO bank J */ + pingrp = "gpioj_pins"; + }; + pctl_gpio_k: pctl-gpio-k { /* GPIO bank K */ + pingrp = "gpiok_pins"; + }; + pctl_ciplus: pctl-ciplus { /* CI+ interface */ + pingrp = "ciplus_pins"; + }; + pctl_mcard: pctl-mcard { /* M-Card interface */ + pingrp = "mcard_pins"; + }; + /* Port 6 */ + pctl_tsout_p: pctl-tsout-p { /* Parallel TS-out */ + pingrp = "mop_pins"; + }; + pctl_tsout_s0: pctl-tsout-s0 { /* Serial TS-out 0 */ + pingrp = "mos0_pins"; + }; + pctl_tsout_s1: pctl-tsout-s1 { /* Serial TS-out 1 */ + pingrp = "mos1_pins"; + }; + pctl_tsout_s2: pctl-tsout-s2 { /* Serial TS-out 2 */ + pingrp = "mos2_pins"; + }; + pctl_tsout_s3: pctl-tsout-s3 { /* Serial TS-out 3 */ + pingrp = "mos3_pins"; + }; + /* Port 7 */ + pctl_uart0: pctl-uart0 { /* UART 0 */ + pingrp = "uart0_pins"; + }; + pctl_uart1: pctl-uart1 { /* UART 1 */ + pingrp = "uart1_pins"; + }; + pctl_gpio_l: pctl-gpio-l { /* GPIO bank L */ + pingrp = "gpiol_pins"; + }; + pctl_gpio_m: pctl-gpio-m { /* GPIO bank M */ + pingrp = "gpiom_pins"; + }; + /* Port 8 */ + pctl_spi3: pctl-spi3 { + pingrp = "spi3_pins"; + }; + /* Port 9 */ + pctl_spi1: pctl-spi1 { + pingrp = "spi1_pins"; + }; + pctl_gpio_n: pctl-gpio-n { + pingrp = "gpion_pins"; + }; + /* Unmuxed GPIOs */ + pctl_gpio_b: pctl-gpio-b { + pingrp = "gpiob_pins"; + }; + pctl_gpio_d: pctl-gpio-d { + pingrp = "gpiod_pins"; + }; + pctl_gpio_f: pctl-gpio-f { + pingrp = "gpiof_pins"; + }; + pctl_gpio_h: pctl-gpio-h { + pingrp = "gpioh_pins"; + }; + pctl_gpio_i: pctl-gpio-i { + pingrp = "gpioi_pins"; + }; + }; + + gpioa: gpio@FF140000 { + compatible = "abilis,tb10x-gpio"; + interrupt-controller; + #interrupt-cells = <1>; + interrupt-parent = <&tb10x_ictl>; + interrupts = <27 1>; + reg = <0xFF140000 0x1000>; + gpio-controller; + #gpio-cells = <1>; + gpio-base = <0>; + gpio-pins = <&pctl_gpio_a>; + }; + gpiob: gpio@FF141000 { + compatible = "abilis,tb10x-gpio"; + interrupt-controller; + #interrupt-cells = <1>; + interrupt-parent = <&tb10x_ictl>; + interrupts = <27 1>; + reg = <0xFF141000 0x1000>; + gpio-controller; + #gpio-cells = <1>; + gpio-base = <3>; + gpio-pins = <&pctl_gpio_b>; + }; + gpioc: gpio@FF142000 { + compatible = "abilis,tb10x-gpio"; + interrupt-controller; + #interrupt-cells = <1>; + interrupt-parent = <&tb10x_ictl>; + interrupts = <27 1>; + reg = <0xFF142000 0x1000>; + gpio-controller; + #gpio-cells = <1>; + gpio-base = <5>; + gpio-pins = <&pctl_gpio_c>; + }; + gpiod: gpio@FF143000 { + compatible = "abilis,tb10x-gpio"; + interrupt-controller; + #interrupt-cells = <1>; + interrupt-parent = <&tb10x_ictl>; + interrupts = <27 1>; + reg = <0xFF143000 0x1000>; + gpio-controller; + #gpio-cells = <1>; + gpio-base = <8>; + gpio-pins = <&pctl_gpio_d>; + }; + gpioe: gpio@FF144000 { + compatible = "abilis,tb10x-gpio"; + interrupt-controller; + #interrupt-cells = <1>; + interrupt-parent = <&tb10x_ictl>; + interrupts = <27 1>; + reg = <0xFF144000 0x1000>; + gpio-controller; + #gpio-cells = <1>; + gpio-base = <10>; + gpio-pins = <&pctl_gpio_e>; + }; + gpiof: gpio@FF145000 { + compatible = "abilis,tb10x-gpio"; + interrupt-controller; + #interrupt-cells = <1>; + interrupt-parent = <&tb10x_ictl>; + interrupts = <27 1>; + reg = <0xFF145000 0x1000>; + gpio-controller; + #gpio-cells = <1>; + gpio-base = <13>; + gpio-pins = <&pctl_gpio_f>; + }; + gpiog: gpio@FF146000 { + compatible = "abilis,tb10x-gpio"; + interrupt-controller; + #interrupt-cells = <1>; + interrupt-parent = <&tb10x_ictl>; + interrupts = <27 1>; + reg = <0xFF146000 0x1000>; + gpio-controller; + #gpio-cells = <1>; + gpio-base = <15>; + gpio-pins = <&pctl_gpio_g>; + }; + gpioh: gpio@FF147000 { + compatible = "abilis,tb10x-gpio"; + interrupt-controller; + #interrupt-cells = <1>; + interrupt-parent = <&tb10x_ictl>; + interrupts = <27 1>; + reg = <0xFF147000 0x1000>; + gpio-controller; + #gpio-cells = <1>; + gpio-base = <18>; + gpio-pins = <&pctl_gpio_h>; + }; + gpioi: gpio@FF148000 { + compatible = "abilis,tb10x-gpio"; + interrupt-controller; + #interrupt-cells = <1>; + interrupt-parent = <&tb10x_ictl>; + interrupts = <27 1>; + reg = <0xFF148000 0x1000>; + gpio-controller; + #gpio-cells = <1>; + gpio-base = <20>; + gpio-pins = <&pctl_gpio_i>; + }; + gpioj: gpio@FF149000 { + compatible = "abilis,tb10x-gpio"; + interrupt-controller; + #interrupt-cells = <1>; + interrupt-parent = <&tb10x_ictl>; + interrupts = <27 1>; + reg = <0xFF149000 0x1000>; + gpio-controller; + #gpio-cells = <1>; + gpio-base = <32>; + gpio-pins = <&pctl_gpio_j>; + }; + gpiok: gpio@FF14a000 { + compatible = "abilis,tb10x-gpio"; + interrupt-controller; + #interrupt-cells = <1>; + interrupt-parent = <&tb10x_ictl>; + interrupts = <27 1>; + reg = <0xFF14A000 0x1000>; + gpio-controller; + #gpio-cells = <1>; + gpio-base = <64>; + gpio-pins = <&pctl_gpio_k>; + }; + gpiol: gpio@FF14b000 { + compatible = "abilis,tb10x-gpio"; + interrupt-controller; + #interrupt-cells = <1>; + interrupt-parent = <&tb10x_ictl>; + interrupts = <27 1>; + reg = <0xFF14B000 0x1000>; + gpio-controller; + #gpio-cells = <1>; + gpio-base = <86>; + gpio-pins = <&pctl_gpio_l>; + }; + gpiom: gpio@FF14c000 { + compatible = "abilis,tb10x-gpio"; + interrupt-controller; + #interrupt-cells = <1>; + interrupt-parent = <&tb10x_ictl>; + interrupts = <27 1>; + reg = <0xFF14C000 0x1000>; + gpio-controller; + #gpio-cells = <1>; + gpio-base = <90>; + gpio-pins = <&pctl_gpio_m>; + }; + gpion: gpio@FF14d000 { + compatible = "abilis,tb10x-gpio"; + interrupt-controller; + #interrupt-cells = <1>; + interrupt-parent = <&tb10x_ictl>; + interrupts = <27 1>; + reg = <0xFF14D000 0x1000>; + gpio-controller; + #gpio-cells = <1>; + gpio-base = <94>; + gpio-pins = <&pctl_gpio_n>; + }; + }; +}; diff --git a/trunk/arch/arc/boot/dts/abilis_tb100_dvk.dts b/trunk/arch/arc/boot/dts/abilis_tb100_dvk.dts new file mode 100644 index 000000000000..c0fd3623c393 --- /dev/null +++ b/trunk/arch/arc/boot/dts/abilis_tb100_dvk.dts @@ -0,0 +1,127 @@ +/* + * Abilis Systems TB100 Development Kit PCB device tree + * + * Copyright (C) Abilis Systems 2013 + * + * Author: Christian Ruppert + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ + +/dts-v1/; + +/include/ "abilis_tb100.dtsi" + +/ { + chosen { + bootargs = "earlycon=uart8250,mmio32,0xff100000,9600n8 console=ttyS0,9600n8"; + }; + + aliases { }; + + memory { + device_type = "memory"; + reg = <0x80000000 0x08000000>; /* 128M */ + }; + + soc100 { + uart@FF100000 { + pinctrl-names = "abilis,simple-default"; + pinctrl-0 = <&pctl_uart0>; + }; + ethernet@FE100000 { + phy-mode = "rgmii"; + }; + + i2c0: i2c@FF120000 { + sda-hold-time = <432>; + }; + i2c1: i2c@FF121000 { + sda-hold-time = <432>; + }; + i2c2: i2c@FF122000 { + sda-hold-time = <432>; + }; + i2c3: i2c@FF123000 { + sda-hold-time = <432>; + }; + i2c4: i2c@FF124000 { + sda-hold-time = <432>; + }; + + leds { + compatible = "gpio-leds"; + power { + label = "Power"; + gpios = <&gpioi 0>; + linux,default-trigger = "default-on"; + }; + heartbeat { + label = "Heartbeat"; + gpios = <&gpioi 1>; + linux,default-trigger = "heartbeat"; + }; + led2 { + label = "LED2"; + gpios = <&gpioi 2>; + default-state = "off"; + }; + led3 { + label = "LED3"; + gpios = <&gpioi 3>; + default-state = "off"; + }; + led4 { + label = "LED4"; + gpios = <&gpioi 4>; + default-state = "off"; + }; + led5 { + label = "LED5"; + gpios = <&gpioi 5>; + default-state = "off"; + }; + led6 { + label = "LED6"; + gpios = <&gpioi 6>; + default-state = "off"; + }; + led7 { + label = "LED7"; + gpios = <&gpioi 7>; + default-state = "off"; + }; + led8 { + label = "LED8"; + gpios = <&gpioi 8>; + default-state = "off"; + }; + led9 { + label = "LED9"; + gpios = <&gpioi 9>; + default-state = "off"; + }; + led10 { + label = "LED10"; + gpios = <&gpioi 10>; + default-state = "off"; + }; + led11 { + label = "LED11"; + gpios = <&gpioi 11>; + default-state = "off"; + }; + }; + }; +}; diff --git a/trunk/arch/arc/boot/dts/abilis_tb101.dtsi b/trunk/arch/arc/boot/dts/abilis_tb101.dtsi new file mode 100644 index 000000000000..fd25c212049f --- /dev/null +++ b/trunk/arch/arc/boot/dts/abilis_tb101.dtsi @@ -0,0 +1,349 @@ +/* + * Abilis Systems TB101 SOC device tree + * + * Copyright (C) Abilis Systems 2013 + * + * Author: Christian Ruppert + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ + +/include/ "abilis_tb10x.dtsi" + +/* interrupt specifiers + * -------------------- + * 0: rising, 1: low, 2: high, 3: falling, + */ + +/ { + clock-frequency = <500000000>; /* 500 MHZ */ + + soc100 { + bus-frequency = <166666666>; + + pll0: oscillator { + clock-frequency = <1000000000>; + }; + cpu_clk: clkdiv_cpu { + clock-mult = <1>; + clock-div = <2>; + }; + ahb_clk: clkdiv_ahb { + clock-mult = <1>; + clock-div = <6>; + }; + + iomux: iomux@FF10601c { + /* Port 1 */ + pctl_tsin_s0: pctl-tsin-s0 { /* Serial TS-in 0 */ + pingrp = "mis0_pins"; + }; + pctl_tsin_s1: pctl-tsin-s1 { /* Serial TS-in 1 */ + pingrp = "mis1_pins"; + }; + pctl_gpio_a: pctl-gpio-a { /* GPIO bank A */ + pingrp = "gpioa_pins"; + }; + pctl_tsin_p1: pctl-tsin-p1 { /* Parallel TS-in 1 */ + pingrp = "mip1_pins"; + }; + /* Port 2 */ + pctl_tsin_s2: pctl-tsin-s2 { /* Serial TS-in 2 */ + pingrp = "mis2_pins"; + }; + pctl_tsin_s3: pctl-tsin-s3 { /* Serial TS-in 3 */ + pingrp = "mis3_pins"; + }; + pctl_gpio_c: pctl-gpio-c { /* GPIO bank C */ + pingrp = "gpioc_pins"; + }; + pctl_tsin_p3: pctl-tsin-p3 { /* Parallel TS-in 3 */ + pingrp = "mip3_pins"; + }; + /* Port 3 */ + pctl_tsin_s4: pctl-tsin-s4 { /* Serial TS-in 4 */ + pingrp = "mis4_pins"; + }; + pctl_tsin_s5: pctl-tsin-s5 { /* Serial TS-in 5 */ + pingrp = "mis5_pins"; + }; + pctl_gpio_e: pctl-gpio-e { /* GPIO bank E */ + pingrp = "gpioe_pins"; + }; + pctl_tsin_p5: pctl-tsin-p5 { /* Parallel TS-in 5 */ + pingrp = "mip5_pins"; + }; + /* Port 4 */ + pctl_tsin_s6: pctl-tsin-s6 { /* Serial TS-in 6 */ + pingrp = "mis6_pins"; + }; + pctl_tsin_s7: pctl-tsin-s7 { /* Serial TS-in 7 */ + pingrp = "mis7_pins"; + }; + pctl_gpio_g: pctl-gpio-g { /* GPIO bank G */ + pingrp = "gpiog_pins"; + }; + pctl_tsin_p7: pctl-tsin-p7 { /* Parallel TS-in 7 */ + pingrp = "mip7_pins"; + }; + /* Port 5 */ + pctl_gpio_j: pctl-gpio-j { /* GPIO bank J */ + pingrp = "gpioj_pins"; + }; + pctl_gpio_k: pctl-gpio-k { /* GPIO bank K */ + pingrp = "gpiok_pins"; + }; + pctl_ciplus: pctl-ciplus { /* CI+ interface */ + pingrp = "ciplus_pins"; + }; + pctl_mcard: pctl-mcard { /* M-Card interface */ + pingrp = "mcard_pins"; + }; + pctl_stc0: pctl-stc0 { /* Smart card I/F 0 */ + pingrp = "stc0_pins"; + }; + pctl_stc1: pctl-stc1 { /* Smart card I/F 1 */ + pingrp = "stc1_pins"; + }; + /* Port 6 */ + pctl_tsout_p: pctl-tsout-p { /* Parallel TS-out */ + pingrp = "mop_pins"; + }; + pctl_tsout_s0: pctl-tsout-s0 { /* Serial TS-out 0 */ + pingrp = "mos0_pins"; + }; + pctl_tsout_s1: pctl-tsout-s1 { /* Serial TS-out 1 */ + pingrp = "mos1_pins"; + }; + pctl_tsout_s2: pctl-tsout-s2 { /* Serial TS-out 2 */ + pingrp = "mos2_pins"; + }; + pctl_tsout_s3: pctl-tsout-s3 { /* Serial TS-out 3 */ + pingrp = "mos3_pins"; + }; + /* Port 7 */ + pctl_uart0: pctl-uart0 { /* UART 0 */ + pingrp = "uart0_pins"; + }; + pctl_uart1: pctl-uart1 { /* UART 1 */ + pingrp = "uart1_pins"; + }; + pctl_gpio_l: pctl-gpio-l { /* GPIO bank L */ + pingrp = "gpiol_pins"; + }; + pctl_gpio_m: pctl-gpio-m { /* GPIO bank M */ + pingrp = "gpiom_pins"; + }; + /* Port 8 */ + pctl_spi3: pctl-spi3 { + pingrp = "spi3_pins"; + }; + pctl_jtag: pctl-jtag { + pingrp = "jtag_pins"; + }; + /* Port 9 */ + pctl_spi1: pctl-spi1 { + pingrp = "spi1_pins"; + }; + pctl_gpio_n: pctl-gpio-n { + pingrp = "gpion_pins"; + }; + /* Unmuxed GPIOs */ + pctl_gpio_b: pctl-gpio-b { + pingrp = "gpiob_pins"; + }; + pctl_gpio_d: pctl-gpio-d { + pingrp = "gpiod_pins"; + }; + pctl_gpio_f: pctl-gpio-f { + pingrp = "gpiof_pins"; + }; + pctl_gpio_h: pctl-gpio-h { + pingrp = "gpioh_pins"; + }; + pctl_gpio_i: pctl-gpio-i { + pingrp = "gpioi_pins"; + }; + }; + + gpioa: gpio@FF140000 { + compatible = "abilis,tb10x-gpio"; + interrupt-controller; + #interrupt-cells = <1>; + interrupt-parent = <&tb10x_ictl>; + interrupts = <27 1>; + reg = <0xFF140000 0x1000>; + gpio-controller; + #gpio-cells = <1>; + gpio-base = <0>; + gpio-pins = <&pctl_gpio_a>; + }; + gpiob: gpio@FF141000 { + compatible = "abilis,tb10x-gpio"; + interrupt-controller; + #interrupt-cells = <1>; + interrupt-parent = <&tb10x_ictl>; + interrupts = <27 1>; + reg = <0xFF141000 0x1000>; + gpio-controller; + #gpio-cells = <1>; + gpio-base = <3>; + gpio-pins = <&pctl_gpio_b>; + }; + gpioc: gpio@FF142000 { + compatible = "abilis,tb10x-gpio"; + interrupt-controller; + #interrupt-cells = <1>; + interrupt-parent = <&tb10x_ictl>; + interrupts = <27 1>; + reg = <0xFF142000 0x1000>; + gpio-controller; + #gpio-cells = <1>; + gpio-base = <5>; + gpio-pins = <&pctl_gpio_c>; + }; + gpiod: gpio@FF143000 { + compatible = "abilis,tb10x-gpio"; + interrupt-controller; + #interrupt-cells = <1>; + interrupt-parent = <&tb10x_ictl>; + interrupts = <27 1>; + reg = <0xFF143000 0x1000>; + gpio-controller; + #gpio-cells = <1>; + gpio-base = <8>; + gpio-pins = <&pctl_gpio_d>; + }; + gpioe: gpio@FF144000 { + compatible = "abilis,tb10x-gpio"; + interrupt-controller; + #interrupt-cells = <1>; + interrupt-parent = <&tb10x_ictl>; + interrupts = <27 1>; + reg = <0xFF144000 0x1000>; + gpio-controller; + #gpio-cells = <1>; + gpio-base = <10>; + gpio-pins = <&pctl_gpio_e>; + }; + gpiof: gpio@FF145000 { + compatible = "abilis,tb10x-gpio"; + interrupt-controller; + #interrupt-cells = <1>; + interrupt-parent = <&tb10x_ictl>; + interrupts = <27 1>; + reg = <0xFF145000 0x1000>; + gpio-controller; + #gpio-cells = <1>; + gpio-base = <13>; + gpio-pins = <&pctl_gpio_f>; + }; + gpiog: gpio@FF146000 { + compatible = "abilis,tb10x-gpio"; + interrupt-controller; + #interrupt-cells = <1>; + interrupt-parent = <&tb10x_ictl>; + interrupts = <27 1>; + reg = <0xFF146000 0x1000>; + gpio-controller; + #gpio-cells = <1>; + gpio-base = <15>; + gpio-pins = <&pctl_gpio_g>; + }; + gpioh: gpio@FF147000 { + compatible = "abilis,tb10x-gpio"; + interrupt-controller; + #interrupt-cells = <1>; + interrupt-parent = <&tb10x_ictl>; + interrupts = <27 1>; + reg = <0xFF147000 0x1000>; + gpio-controller; + #gpio-cells = <1>; + gpio-base = <18>; + gpio-pins = <&pctl_gpio_h>; + }; + gpioi: gpio@FF148000 { + compatible = "abilis,tb10x-gpio"; + interrupt-controller; + #interrupt-cells = <1>; + interrupt-parent = <&tb10x_ictl>; + interrupts = <27 1>; + reg = <0xFF148000 0x1000>; + gpio-controller; + #gpio-cells = <1>; + gpio-base = <20>; + gpio-pins = <&pctl_gpio_i>; + }; + gpioj: gpio@FF149000 { + compatible = "abilis,tb10x-gpio"; + interrupt-controller; + #interrupt-cells = <1>; + interrupt-parent = <&tb10x_ictl>; + interrupts = <27 1>; + reg = <0xFF149000 0x1000>; + gpio-controller; + #gpio-cells = <1>; + gpio-base = <32>; + gpio-pins = <&pctl_gpio_j>; + }; + gpiok: gpio@FF14a000 { + compatible = "abilis,tb10x-gpio"; + interrupt-controller; + #interrupt-cells = <1>; + interrupt-parent = <&tb10x_ictl>; + interrupts = <27 1>; + reg = <0xFF14A000 0x1000>; + gpio-controller; + #gpio-cells = <1>; + gpio-base = <64>; + gpio-pins = <&pctl_gpio_k>; + }; + gpiol: gpio@FF14b000 { + compatible = "abilis,tb10x-gpio"; + interrupt-controller; + #interrupt-cells = <1>; + interrupt-parent = <&tb10x_ictl>; + interrupts = <27 1>; + reg = <0xFF14B000 0x1000>; + gpio-controller; + #gpio-cells = <1>; + gpio-base = <86>; + gpio-pins = <&pctl_gpio_l>; + }; + gpiom: gpio@FF14c000 { + compatible = "abilis,tb10x-gpio"; + interrupt-controller; + #interrupt-cells = <1>; + interrupt-parent = <&tb10x_ictl>; + interrupts = <27 1>; + reg = <0xFF14C000 0x1000>; + gpio-controller; + #gpio-cells = <1>; + gpio-base = <90>; + gpio-pins = <&pctl_gpio_m>; + }; + gpion: gpio@FF14d000 { + compatible = "abilis,tb10x-gpio"; + interrupt-controller; + #interrupt-cells = <1>; + interrupt-parent = <&tb10x_ictl>; + interrupts = <27 1>; + reg = <0xFF14D000 0x1000>; + gpio-controller; + #gpio-cells = <1>; + gpio-base = <94>; + gpio-pins = <&pctl_gpio_n>; + }; + }; +}; diff --git a/trunk/arch/arc/boot/dts/abilis_tb101_dvk.dts b/trunk/arch/arc/boot/dts/abilis_tb101_dvk.dts new file mode 100644 index 000000000000..6f8c381f6268 --- /dev/null +++ b/trunk/arch/arc/boot/dts/abilis_tb101_dvk.dts @@ -0,0 +1,127 @@ +/* + * Abilis Systems TB101 Development Kit PCB device tree + * + * Copyright (C) Abilis Systems 2013 + * + * Author: Christian Ruppert + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ + +/dts-v1/; + +/include/ "abilis_tb101.dtsi" + +/ { + chosen { + bootargs = "earlycon=uart8250,mmio32,0xff100000,9600n8 console=ttyS0,9600n8"; + }; + + aliases { }; + + memory { + device_type = "memory"; + reg = <0x80000000 0x08000000>; /* 128M */ + }; + + soc100 { + uart@FF100000 { + pinctrl-names = "abilis,simple-default"; + pinctrl-0 = <&pctl_uart0>; + }; + ethernet@FE100000 { + phy-mode = "rgmii"; + }; + + i2c0: i2c@FF120000 { + sda-hold-time = <432>; + }; + i2c1: i2c@FF121000 { + sda-hold-time = <432>; + }; + i2c2: i2c@FF122000 { + sda-hold-time = <432>; + }; + i2c3: i2c@FF123000 { + sda-hold-time = <432>; + }; + i2c4: i2c@FF124000 { + sda-hold-time = <432>; + }; + + leds { + compatible = "gpio-leds"; + power { + label = "Power"; + gpios = <&gpioi 0>; + linux,default-trigger = "default-on"; + }; + heartbeat { + label = "Heartbeat"; + gpios = <&gpioi 1>; + linux,default-trigger = "heartbeat"; + }; + led2 { + label = "LED2"; + gpios = <&gpioi 2>; + default-state = "off"; + }; + led3 { + label = "LED3"; + gpios = <&gpioi 3>; + default-state = "off"; + }; + led4 { + label = "LED4"; + gpios = <&gpioi 4>; + default-state = "off"; + }; + led5 { + label = "LED5"; + gpios = <&gpioi 5>; + default-state = "off"; + }; + led6 { + label = "LED6"; + gpios = <&gpioi 6>; + default-state = "off"; + }; + led7 { + label = "LED7"; + gpios = <&gpioi 7>; + default-state = "off"; + }; + led8 { + label = "LED8"; + gpios = <&gpioi 8>; + default-state = "off"; + }; + led9 { + label = "LED9"; + gpios = <&gpioi 9>; + default-state = "off"; + }; + led10 { + label = "LED10"; + gpios = <&gpioi 10>; + default-state = "off"; + }; + led11 { + label = "LED11"; + gpios = <&gpioi 11>; + default-state = "off"; + }; + }; + }; +}; diff --git a/trunk/arch/arc/boot/dts/abilis_tb10x.dtsi b/trunk/arch/arc/boot/dts/abilis_tb10x.dtsi new file mode 100644 index 000000000000..a6139fc5aaa3 --- /dev/null +++ b/trunk/arch/arc/boot/dts/abilis_tb10x.dtsi @@ -0,0 +1,247 @@ +/* + * Abilis Systems TB10X SOC device tree + * + * Copyright (C) Abilis Systems 2013 + * + * Author: Christian Ruppert + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ + +/* interrupt specifiers + * -------------------- + * 0: rising, 1: low, 2: high, 3: falling, + */ + +/ { + compatible = "abilis,arc-tb10x"; + #address-cells = <1>; + #size-cells = <1>; + + cpus { + #address-cells = <1>; + #size-cells = <0>; + cpu@0 { + device_type = "cpu"; + compatible = "snps,arc770d"; + reg = <0>; + }; + }; + + soc100 { + #address-cells = <1>; + #size-cells = <1>; + device_type = "soc"; + ranges = <0xfe000000 0xfe000000 0x02000000 + 0x000F0000 0x000F0000 0x00010000>; + compatible = "abilis,tb10x", "simple-bus"; + + pll0: oscillator { + compatible = "fixed-clock"; + #clock-cells = <0>; + clock-output-names = "pll0"; + }; + cpu_clk: clkdiv_cpu { + compatible = "fixed-factor-clock"; + #clock-cells = <0>; + clocks = <&pll0>; + clock-output-names = "cpu_clk"; + }; + ahb_clk: clkdiv_ahb { + compatible = "fixed-factor-clock"; + #clock-cells = <0>; + clocks = <&pll0>; + clock-output-names = "ahb_clk"; + }; + + iomux: iomux@FF10601c { + #address-cells = <1>; + #size-cells = <1>; + compatible = "abilis,tb10x-iomux"; + reg = <0xFF10601c 0x4>; + }; + + intc: interrupt-controller { + compatible = "snps,arc700-intc"; + interrupt-controller; + #interrupt-cells = <1>; + }; + tb10x_ictl: pic@fe002000 { + compatible = "abilis,tb10x_ictl"; + reg = <0xFE002000 0x20>; + interrupt-controller; + #interrupt-cells = <2>; + interrupt-parent = <&intc>; + interrupts = <5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 + 20 21 22 23 24 25 26 27 28 29 30 31>; + }; + + uart@FF100000 { + compatible = "snps,dw-apb-uart", + "abilis,simple-pinctrl"; + reg = <0xFF100000 0x100>; + clock-frequency = <166666666>; + interrupts = <25 1>; + reg-shift = <2>; + reg-io-width = <4>; + interrupt-parent = <&tb10x_ictl>; + }; + ethernet@FE100000 { + compatible = "snps,dwmac-3.70a","snps,dwmac"; + reg = <0xFE100000 0x1058>; + interrupt-parent = <&tb10x_ictl>; + interrupts = <6 1>; + interrupt-names = "macirq"; + clocks = <&ahb_clk>; + clock-names = "stmmaceth"; + }; + dma@FE000000 { + compatible = "snps,dma-spear1340"; + reg = <0xFE000000 0x400>; + interrupt-parent = <&tb10x_ictl>; + interrupts = <14 1>; + dma-channels = <6>; + dma-requests = <0>; + dma-masters = <1>; + #dma-cells = <3>; + chan_allocation_order = <0>; + chan_priority = <1>; + block_size = <0x7ff>; + data_width = <2 0 0 0>; + clocks = <&ahb_clk>; + clock-names = "hclk"; + }; + + i2c0: i2c@FF120000 { + #address-cells = <1>; + #size-cells = <0>; + compatible = "snps,designware-i2c"; + reg = <0xFF120000 0x1000>; + interrupt-parent = <&tb10x_ictl>; + interrupts = <12 1>; + clocks = <&ahb_clk>; + }; + i2c1: i2c@FF121000 { + #address-cells = <1>; + #size-cells = <0>; + compatible = "snps,designware-i2c"; + reg = <0xFF121000 0x1000>; + interrupt-parent = <&tb10x_ictl>; + interrupts = <12 1>; + clocks = <&ahb_clk>; + }; + i2c2: i2c@FF122000 { + #address-cells = <1>; + #size-cells = <0>; + compatible = "snps,designware-i2c"; + reg = <0xFF122000 0x1000>; + interrupt-parent = <&tb10x_ictl>; + interrupts = <12 1>; + clocks = <&ahb_clk>; + }; + i2c3: i2c@FF123000 { + #address-cells = <1>; + #size-cells = <0>; + compatible = "snps,designware-i2c"; + reg = <0xFF123000 0x1000>; + interrupt-parent = <&tb10x_ictl>; + interrupts = <12 1>; + clocks = <&ahb_clk>; + }; + i2c4: i2c@FF124000 { + #address-cells = <1>; + #size-cells = <0>; + compatible = "snps,designware-i2c"; + reg = <0xFF124000 0x1000>; + interrupt-parent = <&tb10x_ictl>; + interrupts = <12 1>; + clocks = <&ahb_clk>; + }; + + spi0: spi@0xFE010000 { + #address-cells = <1>; + #size-cells = <0>; + cell-index = <0>; + compatible = "abilis,tb100-spi"; + num-cs = <1>; + reg = <0xFE010000 0x20>; + interrupt-parent = <&tb10x_ictl>; + interrupts = <26 1>; + clocks = <&ahb_clk>; + }; + spi1: spi@0xFE011000 { + #address-cells = <1>; + #size-cells = <0>; + cell-index = <1>; + compatible = "abilis,tb100-spi", + "abilis,simple-pinctrl"; + num-cs = <2>; + reg = <0xFE011000 0x20>; + interrupt-parent = <&tb10x_ictl>; + interrupts = <10 1>; + clocks = <&ahb_clk>; + }; + + tb10x_tsm: tb10x-tsm@ff316000 { + compatible = "abilis,tb100-tsm"; + reg = <0xff316000 0x400>; + interrupt-parent = <&tb10x_ictl>; + interrupts = <17 1>; + output-clkdiv = <4>; + global-packet-delay = <0x21>; + port-packet-delay = <0>; + }; + tb10x_stream_proc: tb10x-stream-proc { + compatible = "abilis,tb100-streamproc"; + reg = <0xfff00000 0x200>, + <0x000f0000 0x10000>, + <0xfff00200 0x105>, + <0xff10600c 0x1>, + <0xfe001018 0x1>; + reg-names = "mbox", + "sp_iccm", + "mbox_irq", + "cpuctrl", + "a6it_int_force"; + interrupt-parent = <&tb10x_ictl>; + interrupts = <20 1>, <19 1>; + interrupt-names = "cmd_irq", "event_irq"; + }; + tb10x_mdsc0: tb10x-mdscr@FF300000 { + compatible = "abilis,tb100-mdscr"; + reg = <0xFF300000 0x7000>; + tb100-mdscr-manage-tsin; + }; + tb10x_mscr0: tb10x-mdscr@FF307000 { + compatible = "abilis,tb100-mdscr"; + reg = <0xFF307000 0x7000>; + }; + tb10x_scr0: tb10x-mdscr@ff30e000 { + compatible = "abilis,tb100-mdscr"; + reg = <0xFF30e000 0x4000>; + tb100-mdscr-manage-tsin; + }; + tb10x_scr1: tb10x-mdscr@ff312000 { + compatible = "abilis,tb100-mdscr"; + reg = <0xFF312000 0x4000>; + tb100-mdscr-manage-tsin; + }; + tb10x_wfb: tb10x-wfb@ff319000 { + compatible = "abilis,tb100-wfb"; + reg = <0xff319000 0x1000>; + interrupt-parent = <&tb10x_ictl>; + interrupts = <16 1>; + }; + }; +}; diff --git a/trunk/arch/arc/boot/dts/nsimosci.dts b/trunk/arch/arc/boot/dts/nsimosci.dts new file mode 100644 index 000000000000..ea16d782af58 --- /dev/null +++ b/trunk/arch/arc/boot/dts/nsimosci.dts @@ -0,0 +1,77 @@ +/* + * Copyright (C) 2013 Synopsys, Inc. (www.synopsys.com) + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ +/dts-v1/; + +/include/ "skeleton.dtsi" + +/ { + compatible = "snps,nsimosci"; + clock-frequency = <80000000>; /* 80 MHZ */ + #address-cells = <1>; + #size-cells = <1>; + interrupt-parent = <&intc>; + + chosen { + bootargs = "console=tty0 consoleblank=0"; + }; + + aliases { + serial0 = &uart0; + }; + + memory { + device_type = "memory"; + reg = <0x80000000 0x10000000>; /* 256M */ + }; + + fpga { + compatible = "simple-bus"; + #address-cells = <1>; + #size-cells = <1>; + + /* child and parent address space 1:1 mapped */ + ranges; + + intc: interrupt-controller { + compatible = "snps,arc700-intc"; + interrupt-controller; + #interrupt-cells = <1>; + }; + + uart0: serial@c0000000 { + compatible = "snps,dw-apb-uart"; + reg = <0xc0000000 0x2000>; + interrupts = <11>; + #clock-frequency = <80000000>; + clock-frequency = <3686400>; + baud = <115200>; + reg-shift = <2>; + reg-io-width = <4>; + status = "okay"; + }; + + pgu0: pgu@c9000000 { + compatible = "snps,arcpgufb"; + reg = <0xc9000000 0x400>; + }; + + ps2: ps2@c9001000 { + compatible = "snps,arc_ps2"; + reg = <0xc9000400 0x14>; + interrupts = <13>; + interrupt-names = "arc_ps2_irq"; + }; + + eth0: ethernet@c0003000 { + compatible = "snps,oscilan"; + reg = <0xc0003000 0x44>; + interrupts = <7>, <8>; + interrupt-names = "rx", "tx"; + }; + }; +}; diff --git a/trunk/arch/arc/configs/fpga_defconfig b/trunk/arch/arc/configs/fpga_defconfig index b8698067ebbe..95350be6ef6f 100644 --- a/trunk/arch/arc/configs/fpga_defconfig +++ b/trunk/arch/arc/configs/fpga_defconfig @@ -9,7 +9,7 @@ CONFIG_NAMESPACES=y # CONFIG_UTS_NS is not set # CONFIG_PID_NS is not set CONFIG_BLK_DEV_INITRD=y -CONFIG_INITRAMFS_SOURCE="../arc_initramfs" +CONFIG_INITRAMFS_SOURCE="../arc_initramfs/" CONFIG_KALLSYMS_ALL=y CONFIG_EMBEDDED=y # CONFIG_SLUB_DEBUG is not set @@ -24,6 +24,7 @@ CONFIG_ARC_PLAT_FPGA_LEGACY=y CONFIG_ARC_BOARD_ML509=y # CONFIG_ARC_HAS_RTSC is not set CONFIG_ARC_BUILTIN_DTB_NAME="angel4" +CONFIG_PREEMPT=y # CONFIG_COMPACTION is not set # CONFIG_CROSS_MEMORY_ATTACH is not set CONFIG_NET=y diff --git a/trunk/arch/arc/configs/nsimosci_defconfig b/trunk/arch/arc/configs/nsimosci_defconfig new file mode 100644 index 000000000000..446c96c24eff --- /dev/null +++ b/trunk/arch/arc/configs/nsimosci_defconfig @@ -0,0 +1,75 @@ +CONFIG_CROSS_COMPILE="arc-elf32-" +# CONFIG_LOCALVERSION_AUTO is not set +CONFIG_DEFAULT_HOSTNAME="ARCLinux" +# CONFIG_SWAP is not set +CONFIG_HIGH_RES_TIMERS=y +CONFIG_IKCONFIG=y +CONFIG_IKCONFIG_PROC=y +CONFIG_NAMESPACES=y +# CONFIG_UTS_NS is not set +# CONFIG_PID_NS is not set +CONFIG_BLK_DEV_INITRD=y +CONFIG_INITRAMFS_SOURCE="../arc_initramfs" +CONFIG_KALLSYMS_ALL=y +CONFIG_EMBEDDED=y +# CONFIG_SLUB_DEBUG is not set +# CONFIG_COMPAT_BRK is not set +CONFIG_KPROBES=y +CONFIG_MODULES=y +# CONFIG_LBDAF is not set +# CONFIG_BLK_DEV_BSG is not set +# CONFIG_IOSCHED_DEADLINE is not set +# CONFIG_IOSCHED_CFQ is not set +CONFIG_ARC_PLAT_FPGA_LEGACY=y +CONFIG_ARC_BOARD_ML509=y +# CONFIG_ARC_IDE is not set +# CONFIG_ARCTANGENT_EMAC is not set +# CONFIG_ARC_HAS_RTSC is not set +CONFIG_ARC_BUILTIN_DTB_NAME="nsimosci" +# CONFIG_COMPACTION is not set +# CONFIG_CROSS_MEMORY_ATTACH is not set +CONFIG_NET=y +CONFIG_PACKET=y +CONFIG_UNIX=y +CONFIG_UNIX_DIAG=y +CONFIG_NET_KEY=y +CONFIG_INET=y +# CONFIG_IPV6 is not set +# CONFIG_STANDALONE is not set +# CONFIG_PREVENT_FIRMWARE_BUILD is not set +# CONFIG_FIRMWARE_IN_KERNEL is not set +# CONFIG_BLK_DEV is not set +CONFIG_NETDEVICES=y +# CONFIG_INPUT_MOUSEDEV_PSAUX is not set +# CONFIG_MOUSE_PS2_ALPS is not set +# CONFIG_MOUSE_PS2_LOGIPS2PP is not set +# CONFIG_MOUSE_PS2_SYNAPTICS is not set +# CONFIG_MOUSE_PS2_TRACKPOINT is not set +CONFIG_MOUSE_PS2_TOUCHKIT=y +# CONFIG_SERIO_I8042 is not set +# CONFIG_SERIO_SERPORT is not set +CONFIG_SERIO_ARC_PS2=y +# CONFIG_LEGACY_PTYS is not set +# CONFIG_DEVKMEM is not set +CONFIG_SERIAL_8250=y +CONFIG_SERIAL_8250_CONSOLE=y +CONFIG_SERIAL_8250_DW=y +CONFIG_SERIAL_ARC=y +CONFIG_SERIAL_ARC_CONSOLE=y +# CONFIG_HW_RANDOM is not set +# CONFIG_HWMON is not set +CONFIG_FB=y +# CONFIG_VGA_CONSOLE is not set +CONFIG_FRAMEBUFFER_CONSOLE=y +CONFIG_LOGO=y +# CONFIG_HID is not set +# CONFIG_USB_SUPPORT is not set +# CONFIG_IOMMU_SUPPORT is not set +CONFIG_EXT2_FS=y +CONFIG_EXT2_FS_XATTR=y +CONFIG_TMPFS=y +# CONFIG_MISC_FILESYSTEMS is not set +CONFIG_NFS_FS=y +# CONFIG_ENABLE_WARN_DEPRECATED is not set +# CONFIG_ENABLE_MUST_CHECK is not set +CONFIG_XZ_DEC=y diff --git a/trunk/arch/arc/configs/tb10x_defconfig b/trunk/arch/arc/configs/tb10x_defconfig new file mode 100644 index 000000000000..4fa5cd9f2202 --- /dev/null +++ b/trunk/arch/arc/configs/tb10x_defconfig @@ -0,0 +1,117 @@ +CONFIG_CROSS_COMPILE="arc-elf32-" +# CONFIG_LOCALVERSION_AUTO is not set +CONFIG_DEFAULT_HOSTNAME="tb10x" +CONFIG_SYSVIPC=y +CONFIG_POSIX_MQUEUE=y +CONFIG_HIGH_RES_TIMERS=y +CONFIG_BSD_PROCESS_ACCT=y +CONFIG_BSD_PROCESS_ACCT_V3=y +CONFIG_IKCONFIG=y +CONFIG_IKCONFIG_PROC=y +CONFIG_LOG_BUF_SHIFT=16 +CONFIG_BLK_DEV_INITRD=y +CONFIG_INITRAMFS_SOURCE="../tb10x-rootfs.cpio" +CONFIG_INITRAMFS_ROOT_UID=2100 +CONFIG_INITRAMFS_ROOT_GID=501 +# CONFIG_RD_GZIP is not set +CONFIG_SYSCTL_SYSCALL=y +CONFIG_KALLSYMS_ALL=y +# CONFIG_AIO is not set +CONFIG_EMBEDDED=y +# CONFIG_COMPAT_BRK is not set +CONFIG_SLAB=y +CONFIG_MODULES=y +CONFIG_MODULE_FORCE_LOAD=y +CONFIG_MODULE_UNLOAD=y +# CONFIG_BLOCK is not set +CONFIG_ARC_PLAT_TB10X=y +CONFIG_ARC_CACHE_LINE_SHIFT=5 +# CONFIG_ARC_HAS_RTSC is not set +CONFIG_ARC_STACK_NONEXEC=y +CONFIG_HZ=250 +CONFIG_ARC_BUILTIN_DTB_NAME="abilis_tb100_dvk" +CONFIG_PREEMPT_VOLUNTARY=y +# CONFIG_COMPACTION is not set +# CONFIG_CROSS_MEMORY_ATTACH is not set +CONFIG_NET=y +CONFIG_PACKET=y +CONFIG_UNIX=y +CONFIG_INET=y +CONFIG_IP_MULTICAST=y +# CONFIG_INET_XFRM_MODE_TRANSPORT is not set +# CONFIG_INET_XFRM_MODE_TUNNEL is not set +# CONFIG_INET_XFRM_MODE_BEET is not set +# CONFIG_INET_LRO is not set +# CONFIG_INET_DIAG is not set +# CONFIG_IPV6 is not set +# CONFIG_WIRELESS is not set +# CONFIG_FIRMWARE_IN_KERNEL is not set +CONFIG_PROC_DEVICETREE=y +CONFIG_NETDEVICES=y +# CONFIG_NET_CADENCE is not set +# CONFIG_NET_VENDOR_BROADCOM is not set +# CONFIG_NET_VENDOR_INTEL is not set +# CONFIG_NET_VENDOR_MARVELL is not set +# CONFIG_NET_VENDOR_MICREL is not set +# CONFIG_NET_VENDOR_NATSEMI is not set +# CONFIG_NET_VENDOR_SEEQ is not set +CONFIG_STMMAC_ETH=y +CONFIG_STMMAC_DEBUG_FS=y +CONFIG_STMMAC_DA=y +CONFIG_STMMAC_CHAINED=y +# CONFIG_NET_VENDOR_WIZNET is not set +# CONFIG_WLAN is not set +# CONFIG_INPUT is not set +# CONFIG_SERIO is not set +# CONFIG_VT is not set +CONFIG_DEVPTS_MULTIPLE_INSTANCES=y +# CONFIG_LEGACY_PTYS is not set +# CONFIG_DEVKMEM is not set +CONFIG_SERIAL_8250=y +CONFIG_SERIAL_8250_CONSOLE=y +CONFIG_SERIAL_8250_NR_UARTS=1 +CONFIG_SERIAL_8250_RUNTIME_UARTS=1 +CONFIG_SERIAL_8250_DW=y +# CONFIG_HW_RANDOM is not set +CONFIG_I2C=y +# CONFIG_I2C_COMPAT is not set +CONFIG_I2C_DESIGNWARE_PLATFORM=y +CONFIG_GPIO_SYSFS=y +# CONFIG_HWMON is not set +# CONFIG_USB_SUPPORT is not set +CONFIG_NEW_LEDS=y +CONFIG_LEDS_CLASS=y +CONFIG_LEDS_GPIO=y +CONFIG_LEDS_TRIGGERS=y +CONFIG_LEDS_TRIGGER_TIMER=y +CONFIG_LEDS_TRIGGER_ONESHOT=y +CONFIG_LEDS_TRIGGER_HEARTBEAT=y +CONFIG_LEDS_TRIGGER_CPU=y +CONFIG_LEDS_TRIGGER_GPIO=y +CONFIG_LEDS_TRIGGER_DEFAULT_ON=y +CONFIG_LEDS_TRIGGER_TRANSIENT=y +CONFIG_DMADEVICES=y +CONFIG_DW_DMAC=y +CONFIG_NET_DMA=y +CONFIG_ASYNC_TX_DMA=y +# CONFIG_IOMMU_SUPPORT is not set +# CONFIG_DNOTIFY is not set +CONFIG_PROC_KCORE=y +CONFIG_TMPFS=y +CONFIG_CONFIGFS_FS=y +# CONFIG_MISC_FILESYSTEMS is not set +# CONFIG_NETWORK_FILESYSTEMS is not set +# CONFIG_ENABLE_WARN_DEPRECATED is not set +CONFIG_MAGIC_SYSRQ=y +CONFIG_STRIP_ASM_SYMS=y +CONFIG_DEBUG_FS=y +CONFIG_HEADERS_CHECK=y +CONFIG_DEBUG_SECTION_MISMATCH=y +CONFIG_DETECT_HUNG_TASK=y +CONFIG_SCHEDSTATS=y +CONFIG_TIMER_STATS=y +CONFIG_DEBUG_INFO=y +CONFIG_DEBUG_MEMORY_INIT=y +CONFIG_DEBUG_STACKOVERFLOW=y +# CONFIG_CRYPTO_ANSI_CPRNG is not set +# CONFIG_CRYPTO_HW is not set diff --git a/trunk/arch/arc/include/asm/Kbuild b/trunk/arch/arc/include/asm/Kbuild index 48af742f8b5a..d8dd660898b9 100644 --- a/trunk/arch/arc/include/asm/Kbuild +++ b/trunk/arch/arc/include/asm/Kbuild @@ -32,7 +32,6 @@ generic-y += resource.h generic-y += scatterlist.h generic-y += sembuf.h generic-y += shmbuf.h -generic-y += shmparam.h generic-y += siginfo.h generic-y += socket.h generic-y += sockios.h diff --git a/trunk/arch/arc/include/asm/cache.h b/trunk/arch/arc/include/asm/cache.h index 6632273861fd..d5555fe4742a 100644 --- a/trunk/arch/arc/include/asm/cache.h +++ b/trunk/arch/arc/include/asm/cache.h @@ -55,9 +55,6 @@ : "r"(data), "r"(ptr)); \ }) -/* used to give SHMLBA a value to avoid Cache Aliasing */ -extern unsigned int ARC_shmlba; - #define ARCH_DMA_MINALIGN L1_CACHE_BYTES /* diff --git a/trunk/arch/arc/include/asm/cacheflush.h b/trunk/arch/arc/include/asm/cacheflush.h index 97ee96f26505..9f841af41092 100644 --- a/trunk/arch/arc/include/asm/cacheflush.h +++ b/trunk/arch/arc/include/asm/cacheflush.h @@ -19,13 +19,24 @@ #define _ASM_CACHEFLUSH_H #include +#include + +/* + * Semantically we need this because icache doesn't snoop dcache/dma. + * However ARC Cache flush requires paddr as well as vaddr, latter not available + * in the flush_icache_page() API. So we no-op it but do the equivalent work + * in update_mmu_cache() + */ +#define flush_icache_page(vma, page) void flush_cache_all(void); void flush_icache_range(unsigned long start, unsigned long end); -void flush_icache_page(struct vm_area_struct *vma, struct page *page); -void flush_icache_range_vaddr(unsigned long paddr, unsigned long u_vaddr, - int len); +void __sync_icache_dcache(unsigned long paddr, unsigned long vaddr, int len); +void __inv_icache_page(unsigned long paddr, unsigned long vaddr); +void ___flush_dcache_page(unsigned long paddr, unsigned long vaddr); +#define __flush_dcache_page(p, v) \ + ___flush_dcache_page((unsigned long)p, (unsigned long)v) #define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1 @@ -42,23 +53,60 @@ void dma_cache_wback(unsigned long start, unsigned long sz); #define flush_cache_vmap(start, end) flush_cache_all() #define flush_cache_vunmap(start, end) flush_cache_all() -/* - * VM callbacks when entire/range of user-space V-P mappings are - * torn-down/get-invalidated - * - * Currently we don't support D$ aliasing configs for our VIPT caches - * NOPS for VIPT Cache with non-aliasing D$ configurations only - */ -#define flush_cache_dup_mm(mm) /* called on fork */ +#define flush_cache_dup_mm(mm) /* called on fork (VIVT only) */ + +#ifndef CONFIG_ARC_CACHE_VIPT_ALIASING + #define flush_cache_mm(mm) /* called on munmap/exit */ #define flush_cache_range(mm, u_vstart, u_vend) #define flush_cache_page(vma, u_vaddr, pfn) /* PF handling/COW-break */ +#else /* VIPT aliasing dcache */ + +/* To clear out stale userspace mappings */ +void flush_cache_mm(struct mm_struct *mm); +void flush_cache_range(struct vm_area_struct *vma, + unsigned long start,unsigned long end); +void flush_cache_page(struct vm_area_struct *vma, + unsigned long user_addr, unsigned long page); + +/* + * To make sure that userspace mapping is flushed to memory before + * get_user_pages() uses a kernel mapping to access the page + */ +#define ARCH_HAS_FLUSH_ANON_PAGE +void flush_anon_page(struct vm_area_struct *vma, + struct page *page, unsigned long u_vaddr); + +#endif /* CONFIG_ARC_CACHE_VIPT_ALIASING */ + +/* + * Simple wrapper over config option + * Bootup code ensures that hardware matches kernel configuration + */ +static inline int cache_is_vipt_aliasing(void) +{ +#ifdef CONFIG_ARC_CACHE_VIPT_ALIASING + return 1; +#else + return 0; +#endif +} + +#define CACHE_COLOR(addr) (((unsigned long)(addr) >> (PAGE_SHIFT)) & 3) + +/* + * checks if two addresses (after page aligning) index into same cache set + */ +#define addr_not_cache_congruent(addr1, addr2) \ + cache_is_vipt_aliasing() ? \ + (CACHE_COLOR(addr1) != CACHE_COLOR(addr2)) : 0 \ + #define copy_to_user_page(vma, page, vaddr, dst, src, len) \ do { \ memcpy(dst, src, len); \ if (vma->vm_flags & VM_EXEC) \ - flush_icache_range_vaddr((unsigned long)(dst), vaddr, len);\ + __sync_icache_dcache((unsigned long)(dst), vaddr, len); \ } while (0) #define copy_from_user_page(vma, page, vaddr, dst, src, len) \ diff --git a/trunk/arch/arc/include/asm/irq.h b/trunk/arch/arc/include/asm/irq.h index 4c588f9820cf..57898a17eb82 100644 --- a/trunk/arch/arc/include/asm/irq.h +++ b/trunk/arch/arc/include/asm/irq.h @@ -9,7 +9,8 @@ #ifndef __ASM_ARC_IRQ_H #define __ASM_ARC_IRQ_H -#define NR_IRQS 32 +#define NR_CPU_IRQS 32 /* number of interrupt lines of ARC770 CPU */ +#define NR_IRQS 128 /* allow some CPU external IRQ handling */ /* Platform Independent IRQs */ #define TIMER0_IRQ 3 diff --git a/trunk/arch/arc/include/asm/page.h b/trunk/arch/arc/include/asm/page.h index bdf546104551..374a35514116 100644 --- a/trunk/arch/arc/include/asm/page.h +++ b/trunk/arch/arc/include/asm/page.h @@ -16,13 +16,27 @@ #define get_user_page(vaddr) __get_free_page(GFP_KERNEL) #define free_user_page(page, addr) free_page(addr) -/* TBD: for now don't worry about VIPT D$ aliasing */ #define clear_page(paddr) memset((paddr), 0, PAGE_SIZE) #define copy_page(to, from) memcpy((to), (from), PAGE_SIZE) +#ifndef CONFIG_ARC_CACHE_VIPT_ALIASING + #define clear_user_page(addr, vaddr, pg) clear_page(addr) #define copy_user_page(vto, vfrom, vaddr, pg) copy_page(vto, vfrom) +#else /* VIPT aliasing dcache */ + +struct vm_area_struct; +struct page; + +#define __HAVE_ARCH_COPY_USER_HIGHPAGE + +void copy_user_highpage(struct page *to, struct page *from, + unsigned long u_vaddr, struct vm_area_struct *vma); +void clear_user_page(void *to, unsigned long u_vaddr, struct page *page); + +#endif /* CONFIG_ARC_CACHE_VIPT_ALIASING */ + #undef STRICT_MM_TYPECHECKS #ifdef STRICT_MM_TYPECHECKS diff --git a/trunk/arch/arc/include/asm/pgtable.h b/trunk/arch/arc/include/asm/pgtable.h index b7e36684c091..1cc4720faccb 100644 --- a/trunk/arch/arc/include/asm/pgtable.h +++ b/trunk/arch/arc/include/asm/pgtable.h @@ -395,6 +395,9 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, #include +/* to cope with aliasing VIPT cache */ +#define HAVE_ARCH_UNMAPPED_AREA + /* * No page table caches to initialise */ diff --git a/trunk/arch/arc/include/asm/serial.h b/trunk/arch/arc/include/asm/serial.h index 4dff5a1e4128..602b0970a764 100644 --- a/trunk/arch/arc/include/asm/serial.h +++ b/trunk/arch/arc/include/asm/serial.h @@ -22,4 +22,14 @@ #define BASE_BAUD (arc_get_core_freq() / 16) +/* + * This is definitely going to break early 8250 consoles on multi-platform + * images but hey, it won't add any code complexity for a debug feature of + * one broken driver. + */ +#ifdef CONFIG_ARC_PLAT_TB10X +#undef BASE_BAUD +#define BASE_BAUD (arc_get_core_freq() / 16 / 3) +#endif + #endif /* _ASM_ARC_SERIAL_H */ diff --git a/trunk/arch/arc/include/asm/shmparam.h b/trunk/arch/arc/include/asm/shmparam.h new file mode 100644 index 000000000000..fffeecc04270 --- /dev/null +++ b/trunk/arch/arc/include/asm/shmparam.h @@ -0,0 +1,18 @@ +/* + * Copyright (C) 2013 Synopsys, Inc. (www.synopsys.com) + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#ifndef __ARC_ASM_SHMPARAM_H +#define __ARC_ASM_SHMPARAM_H + +/* Handle upto 2 cache bins */ +#define SHMLBA (2 * PAGE_SIZE) + +/* Enforce SHMLBA in shmat */ +#define __ARCH_FORCE_SHMLBA + +#endif diff --git a/trunk/arch/arc/include/asm/tlb.h b/trunk/arch/arc/include/asm/tlb.h index 3eb2ce0bdfa3..85b6df839bd7 100644 --- a/trunk/arch/arc/include/asm/tlb.h +++ b/trunk/arch/arc/include/asm/tlb.h @@ -21,20 +21,35 @@ #ifndef __ASSEMBLY__ -#define tlb_flush(tlb) local_flush_tlb_mm((tlb)->mm) +#define tlb_flush(tlb) \ +do { \ + if (tlb->fullmm) \ + flush_tlb_mm((tlb)->mm); \ +} while (0) /* * This pair is called at time of munmap/exit to flush cache and TLB entries * for mappings being torn down. - * 1) cache-flush part -implemented via tlb_start_vma( ) can be NOP (for now) - * as we don't support aliasing configs in our VIPT D$. - * 2) tlb-flush part - implemted via tlb_end_vma( ) can be NOP as well- - * albiet for difft reasons - its better handled by moving to new ASID + * 1) cache-flush part -implemented via tlb_start_vma( ) for VIPT aliasing D$ + * 2) tlb-flush part - implemted via tlb_end_vma( ) flushes the TLB range * * Note, read http://lkml.org/lkml/2004/1/15/6 */ +#ifndef CONFIG_ARC_CACHE_VIPT_ALIASING #define tlb_start_vma(tlb, vma) -#define tlb_end_vma(tlb, vma) +#else +#define tlb_start_vma(tlb, vma) \ +do { \ + if (!tlb->fullmm) \ + flush_cache_range(vma, vma->vm_start, vma->vm_end); \ +} while(0) +#endif + +#define tlb_end_vma(tlb, vma) \ +do { \ + if (!tlb->fullmm) \ + flush_tlb_range(vma, vma->vm_start, vma->vm_end); \ +} while (0) #define __tlb_remove_tlb_entry(tlb, ptep, address) diff --git a/trunk/arch/arc/kernel/asm-offsets.c b/trunk/arch/arc/kernel/asm-offsets.c index 0dc148ebce74..7dcda7025241 100644 --- a/trunk/arch/arc/kernel/asm-offsets.c +++ b/trunk/arch/arc/kernel/asm-offsets.c @@ -11,9 +11,9 @@ #include #include #include +#include #include #include -#include int main(void) { diff --git a/trunk/arch/arc/kernel/clk.c b/trunk/arch/arc/kernel/clk.c index 66ce0dc917fb..10c7b0b5a079 100644 --- a/trunk/arch/arc/kernel/clk.c +++ b/trunk/arch/arc/kernel/clk.c @@ -8,7 +8,7 @@ #include -unsigned long core_freq = 800000000; +unsigned long core_freq = 80000000; /* * As of now we default to device-tree provided clock diff --git a/trunk/arch/arc/kernel/disasm.c b/trunk/arch/arc/kernel/disasm.c index d14764ae2c60..b8a549c4f540 100644 --- a/trunk/arch/arc/kernel/disasm.c +++ b/trunk/arch/arc/kernel/disasm.c @@ -12,8 +12,8 @@ #include #include #include +#include #include -#include #if defined(CONFIG_KGDB) || defined(CONFIG_ARC_MISALIGN_ACCESS) || \ defined(CONFIG_KPROBES) diff --git a/trunk/arch/arc/kernel/entry.S b/trunk/arch/arc/kernel/entry.S index 91eeab81f52d..0c6d664d4a83 100644 --- a/trunk/arch/arc/kernel/entry.S +++ b/trunk/arch/arc/kernel/entry.S @@ -393,12 +393,14 @@ ARC_ENTRY EV_TLBProtV #ifdef CONFIG_ARC_MISALIGN_ACCESS SAVE_CALLEE_SAVED_USER mov r3, sp ; callee_regs -#endif bl do_misaligned_access -#ifdef CONFIG_ARC_MISALIGN_ACCESS - DISCARD_CALLEE_SAVED_USER + ; TBD: optimize - do this only if a callee reg was involved + ; either a dst of emulated LD/ST or src with address-writeback + RESTORE_CALLEE_SAVED_USER +#else + bl do_misaligned_error #endif b ret_from_exception diff --git a/trunk/arch/arc/kernel/irq.c b/trunk/arch/arc/kernel/irq.c index 551c10dff481..8115fa531575 100644 --- a/trunk/arch/arc/kernel/irq.c +++ b/trunk/arch/arc/kernel/irq.c @@ -11,6 +11,8 @@ #include #include #include +#include +#include "../../drivers/irqchip/irqchip.h" #include #include #include @@ -26,7 +28,7 @@ * -Disable all IRQs (on CPU side) * -Optionally, setup the High priority Interrupts as Level 2 IRQs */ -void __init arc_init_IRQ(void) +void __cpuinit arc_init_IRQ(void) { int level_mask = 0; @@ -97,15 +99,13 @@ static const struct irq_domain_ops arc_intc_domain_ops = { static struct irq_domain *root_domain; -void __init init_onchip_IRQ(void) +static int __init +init_onchip_IRQ(struct device_node *intc, struct device_node *parent) { - struct device_node *intc = NULL; + if (parent) + panic("DeviceTree incore intc not a root irq controller\n"); - intc = of_find_compatible_node(NULL, NULL, "snps,arc700-intc"); - if(!intc) - panic("DeviceTree Missing incore intc\n"); - - root_domain = irq_domain_add_legacy(intc, NR_IRQS, 0, 0, + root_domain = irq_domain_add_legacy(intc, NR_CPU_IRQS, 0, 0, &arc_intc_domain_ops, NULL); if (!root_domain) @@ -113,8 +113,12 @@ void __init init_onchip_IRQ(void) /* with this we don't need to export root_domain */ irq_set_default_host(root_domain); + + return 0; } +IRQCHIP_DECLARE(arc_intc, "snps,arc700-intc", init_onchip_IRQ); + /* * Late Interrupt system init called from start_kernel for Boot CPU only * @@ -123,12 +127,13 @@ void __init init_onchip_IRQ(void) */ void __init init_IRQ(void) { - init_onchip_IRQ(); - /* Any external intc can be setup here */ if (machine_desc->init_irq) machine_desc->init_irq(); + /* process the entire interrupt tree in one go */ + irqchip_init(); + #ifdef CONFIG_SMP /* Master CPU can initialize it's side of IPI */ if (machine_desc->init_smp) diff --git a/trunk/arch/arc/kernel/kprobes.c b/trunk/arch/arc/kernel/kprobes.c index 3bfeacb674de..5a7b80e2d883 100644 --- a/trunk/arch/arc/kernel/kprobes.c +++ b/trunk/arch/arc/kernel/kprobes.c @@ -10,7 +10,6 @@ #include #include #include -#include #include #include #include diff --git a/trunk/arch/arc/kernel/module.c b/trunk/arch/arc/kernel/module.c index cdd359352c0a..376e04622962 100644 --- a/trunk/arch/arc/kernel/module.c +++ b/trunk/arch/arc/kernel/module.c @@ -47,7 +47,7 @@ int module_frob_arch_sections(Elf_Ehdr *hdr, Elf_Shdr *sechdrs, } } #endif - return 0; + return 0; } void module_arch_cleanup(struct module *mod) @@ -141,5 +141,5 @@ int module_finalize(const Elf32_Ehdr *hdr, const Elf_Shdr *sechdrs, mod->arch.unw_info = unw; } #endif - return 0; + return 0; } diff --git a/trunk/arch/arc/kernel/setup.c b/trunk/arch/arc/kernel/setup.c index 2d95ac07df7b..b2b3731dd1e9 100644 --- a/trunk/arch/arc/kernel/setup.c +++ b/trunk/arch/arc/kernel/setup.c @@ -14,14 +14,13 @@ #include #include #include +#include #include #include #include -#include #include #include #include -#include #include #include #include @@ -32,14 +31,14 @@ int running_on_hw = 1; /* vs. on ISS */ char __initdata command_line[COMMAND_LINE_SIZE]; -struct machine_desc *machine_desc __initdata; +struct machine_desc *machine_desc __cpuinitdata; struct task_struct *_current_task[NR_CPUS]; /* For stack switching */ struct cpuinfo_arc cpuinfo_arc700[NR_CPUS]; -void __init read_arc_build_cfg_regs(void) +void __cpuinit read_arc_build_cfg_regs(void) { struct bcr_perip uncached_space; struct cpuinfo_arc *cpu = &cpuinfo_arc700[smp_processor_id()]; @@ -238,7 +237,7 @@ char *arc_extn_mumbojumbo(int cpu_id, char *buf, int len) return buf; } -void __init arc_chk_ccms(void) +void __cpuinit arc_chk_ccms(void) { #if defined(CONFIG_ARC_HAS_DCCM) || defined(CONFIG_ARC_HAS_ICCM) struct cpuinfo_arc *cpu = &cpuinfo_arc700[smp_processor_id()]; @@ -273,7 +272,7 @@ void __init arc_chk_ccms(void) * hardware has dedicated regs which need to be saved/restored on ctx-sw * (Single Precision uses core regs), thus kernel is kind of oblivious to it */ -void __init arc_chk_fpu(void) +void __cpuinit arc_chk_fpu(void) { struct cpuinfo_arc *cpu = &cpuinfo_arc700[smp_processor_id()]; @@ -294,7 +293,7 @@ void __init arc_chk_fpu(void) * such as only for boot CPU etc */ -void __init setup_processor(void) +void __cpuinit setup_processor(void) { char str[512]; int cpu_id = smp_processor_id(); @@ -319,23 +318,20 @@ void __init setup_processor(void) void __init setup_arch(char **cmdline_p) { + /* This also populates @boot_command_line from /bootargs */ + machine_desc = setup_machine_fdt(__dtb_start); + if (!machine_desc) + panic("Embedded DT invalid\n"); + + /* Append any u-boot provided cmdline */ #ifdef CONFIG_CMDLINE_UBOOT - /* Make sure that a whitespace is inserted before */ - strlcat(command_line, " ", sizeof(command_line)); + /* Add a whitespace seperator between the 2 cmdlines */ + strlcat(boot_command_line, " ", COMMAND_LINE_SIZE); + strlcat(boot_command_line, command_line, COMMAND_LINE_SIZE); #endif - /* - * Append .config cmdline to base command line, which might already - * contain u-boot "bootargs" (handled by head.S, if so configured) - */ - strlcat(command_line, CONFIG_CMDLINE, sizeof(command_line)); /* Save unparsed command line copy for /proc/cmdline */ - strlcpy(boot_command_line, command_line, COMMAND_LINE_SIZE); - *cmdline_p = command_line; - - machine_desc = setup_machine_fdt(__dtb_start); - if (!machine_desc) - panic("Embedded DT invalid\n"); + *cmdline_p = boot_command_line; /* To force early parsing of things like mem=xxx */ parse_early_param(); diff --git a/trunk/arch/arc/kernel/time.c b/trunk/arch/arc/kernel/time.c index f13f72807aa5..09f4309aa2c0 100644 --- a/trunk/arch/arc/kernel/time.c +++ b/trunk/arch/arc/kernel/time.c @@ -33,7 +33,6 @@ #include #include #include -#include #include #include #include diff --git a/trunk/arch/arc/kernel/traps.c b/trunk/arch/arc/kernel/traps.c index 7496995371e8..0471d9c9dd54 100644 --- a/trunk/arch/arc/kernel/traps.c +++ b/trunk/arch/arc/kernel/traps.c @@ -16,11 +16,12 @@ #include #include #include -#include +#include +#include +#include #include -#include #include -#include +#include void __init trap_init(void) { @@ -83,6 +84,7 @@ DO_ERROR_INFO(SIGILL, "Invalid Extn Insn", do_extension_fault, ILL_ILLOPC) DO_ERROR_INFO(SIGILL, "Illegal Insn (or Seq)", insterror_is_error, ILL_ILLOPC) DO_ERROR_INFO(SIGBUS, "Invalid Mem Access", do_memory_error, BUS_ADRERR) DO_ERROR_INFO(SIGTRAP, "Breakpoint Set", trap_is_brkpt, TRAP_BRKPT) +DO_ERROR_INFO(SIGBUS, "Misaligned Access", do_misaligned_error, BUS_ADRALN) #ifdef CONFIG_ARC_MISALIGN_ACCESS /* @@ -91,21 +93,11 @@ DO_ERROR_INFO(SIGTRAP, "Breakpoint Set", trap_is_brkpt, TRAP_BRKPT) int do_misaligned_access(unsigned long cause, unsigned long address, struct pt_regs *regs, struct callee_regs *cregs) { - if (misaligned_fixup(address, regs, cause, cregs) != 0) { - siginfo_t info; - - info.si_signo = SIGBUS; - info.si_errno = 0; - info.si_code = BUS_ADRALN; - info.si_addr = (void __user *)address; - return handle_exception(cause, "Misaligned Access", regs, - &info); - } + if (misaligned_fixup(address, regs, cause, cregs) != 0) + return do_misaligned_error(cause, address, regs); + return 0; } - -#else -DO_ERROR_INFO(SIGSEGV, "Misaligned Access", do_misaligned_access, SEGV_ACCERR) #endif /* diff --git a/trunk/arch/arc/kernel/troubleshoot.c b/trunk/arch/arc/kernel/troubleshoot.c index 0aec01985bf9..11c301b81c92 100644 --- a/trunk/arch/arc/kernel/troubleshoot.c +++ b/trunk/arch/arc/kernel/troubleshoot.c @@ -26,7 +26,6 @@ static noinline void print_reg_file(long *reg_rev, int start_num) char buf[512]; int n = 0, len = sizeof(buf); - /* weird loop because pt_regs regs rev r12..r0, r25..r13 */ for (i = start_num; i < start_num + 13; i++) { n += scnprintf(buf + n, len - n, "r%02u: 0x%08lx\t", i, (unsigned long)*reg_rev); @@ -34,13 +33,18 @@ static noinline void print_reg_file(long *reg_rev, int start_num) if (((i + 1) % 3) == 0) n += scnprintf(buf + n, len - n, "\n"); + /* because pt_regs has regs reversed: r12..r0, r25..r13 */ reg_rev--; } if (start_num != 0) n += scnprintf(buf + n, len - n, "\n\n"); - pr_info("%s", buf); + /* To continue printing callee regs on same line as scratch regs */ + if (start_num == 0) + pr_info("%s", buf); + else + pr_cont("%s\n", buf); } static void show_callee_regs(struct callee_regs *cregs) @@ -83,6 +87,10 @@ static void show_faulting_vma(unsigned long address, char *buf) dev_t dev = 0; char *nm = buf; + /* can't use print_vma_addr() yet as it doesn't check for + * non-inclusive vma + */ + vma = find_vma(current->active_mm, address); /* check against the find_vma( ) behaviour which returns the next VMA @@ -98,10 +106,13 @@ static void show_faulting_vma(unsigned long address, char *buf) ino = inode->i_ino; } pr_info(" @off 0x%lx in [%s]\n" - " VMA: 0x%08lx to 0x%08lx\n\n", - address - vma->vm_start, nm, vma->vm_start, vma->vm_end); - } else + " VMA: 0x%08lx to 0x%08lx\n", + vma->vm_start < TASK_UNMAPPED_BASE ? + address : address - vma->vm_start, + nm, vma->vm_start, vma->vm_end); + } else { pr_info(" @No matching VMA found\n"); + } } static void show_ecr_verbose(struct pt_regs *regs) @@ -110,7 +121,7 @@ static void show_ecr_verbose(struct pt_regs *regs) unsigned long address; cause_reg = current->thread.cause_code; - pr_info("\n[ECR]: 0x%08x => ", cause_reg); + pr_info("\n[ECR ]: 0x%08x => ", cause_reg); /* For Data fault, this is data address not instruction addr */ address = current->thread.fault_address; @@ -120,7 +131,7 @@ static void show_ecr_verbose(struct pt_regs *regs) /* For DTLB Miss or ProtV, display the memory involved too */ if (vec == ECR_V_DTLB_MISS) { - pr_cont("Invalid (%s) @ 0x%08lx by insn @ 0x%08lx\n", + pr_cont("Invalid %s 0x%08lx by insn @ 0x%08lx\n", (cause_code == 0x01) ? "Read From" : ((cause_code == 0x02) ? "Write to" : "EX"), address, regs->ret); @@ -168,20 +179,23 @@ void show_regs(struct pt_regs *regs) if (current->thread.cause_code) show_ecr_verbose(regs); - pr_info("[EFA]: 0x%08lx\n", current->thread.fault_address); - pr_info("[ERET]: 0x%08lx (PC of Faulting Instr)\n", regs->ret); + pr_info("[EFA ]: 0x%08lx\n[BLINK ]: %pS\n[ERET ]: %pS\n", + current->thread.fault_address, + (void *)regs->blink, (void *)regs->ret); - show_faulting_vma(regs->ret, buf); /* faulting code, not data */ + if (user_mode(regs)) + show_faulting_vma(regs->ret, buf); /* faulting code, not data */ - /* can't use print_vma_addr() yet as it doesn't check for - * non-inclusive vma - */ + pr_info("[STAT32]: 0x%08lx", regs->status32); + +#define STS_BIT(r, bit) r->status32 & STATUS_##bit##_MASK ? #bit : "" + if (!user_mode(regs)) + pr_cont(" : %2s %2s %2s %2s %2s\n", + STS_BIT(regs, AE), STS_BIT(regs, A2), STS_BIT(regs, A1), + STS_BIT(regs, E2), STS_BIT(regs, E1)); - /* print special regs */ - pr_info("status32: 0x%08lx\n", regs->status32); - pr_info(" SP: 0x%08lx\tFP: 0x%08lx\n", regs->sp, regs->fp); - pr_info("BTA: 0x%08lx\tBLINK: 0x%08lx\n", - regs->bta, regs->blink); + pr_info("BTA: 0x%08lx\t SP: 0x%08lx\t FP: 0x%08lx\n", + regs->bta, regs->sp, regs->fp); pr_info("LPS: 0x%08lx\tLPE: 0x%08lx\tLPC: 0x%08lx\n", regs->lp_start, regs->lp_end, regs->lp_count); diff --git a/trunk/arch/arc/mm/Makefile b/trunk/arch/arc/mm/Makefile index 168dc146a8f6..ac95cc239c1e 100644 --- a/trunk/arch/arc/mm/Makefile +++ b/trunk/arch/arc/mm/Makefile @@ -7,4 +7,4 @@ # obj-y := extable.o ioremap.o dma.o fault.o init.o -obj-y += tlb.o tlbex.o cache_arc700.o +obj-y += tlb.o tlbex.o cache_arc700.o mmap.o diff --git a/trunk/arch/arc/mm/cache_arc700.c b/trunk/arch/arc/mm/cache_arc700.c index 88d617d84234..2f12bca8aef3 100644 --- a/trunk/arch/arc/mm/cache_arc700.c +++ b/trunk/arch/arc/mm/cache_arc700.c @@ -68,20 +68,11 @@ #include #include #include +#include #include #include #include - -#ifdef CONFIG_ARC_HAS_ICACHE -static void __ic_line_inv_no_alias(unsigned long, int); -static void __ic_line_inv_2_alias(unsigned long, int); -static void __ic_line_inv_4_alias(unsigned long, int); - -/* Holds the ptr to flush routine, dependign on size due to aliasing issues */ -static void (*___flush_icache_rtn) (unsigned long, int); -#endif - char *arc_cache_mumbojumbo(int cpu_id, char *buf, int len) { int n = 0; @@ -109,7 +100,7 @@ char *arc_cache_mumbojumbo(int cpu_id, char *buf, int len) * the cpuinfo structure for later use. * No Validation done here, simply read/convert the BCRs */ -void __init read_decode_cache_bcr(void) +void __cpuinit read_decode_cache_bcr(void) { struct bcr_cache ibcr, dbcr; struct cpuinfo_arc_cache *p_ic, *p_dc; @@ -141,13 +132,14 @@ void __init read_decode_cache_bcr(void) * 3. Enable the Caches, setup default flush mode for D-Cache * 3. Calculate the SHMLBA used by user space */ -void __init arc_cache_init(void) +void __cpuinit arc_cache_init(void) { unsigned int temp; unsigned int cpu = smp_processor_id(); struct cpuinfo_arc_cache *ic = &cpuinfo_arc700[cpu].icache; struct cpuinfo_arc_cache *dc = &cpuinfo_arc700[cpu].dcache; int way_pg_ratio = way_pg_ratio; + int dcache_does_alias; char str[256]; printk(arc_cache_mumbojumbo(0, str, sizeof(str))); @@ -171,30 +163,6 @@ void __init arc_cache_init(void) } #endif - - /* - * if Cache way size is <= page size then no aliasing exhibited - * otherwise ratio determines num of aliases. - * e.g. 32K I$, 2 way set assoc, 8k pg size - * way-sz = 32k/2 = 16k - * way-pg-ratio = 16k/8k = 2, so 2 aliases possible - * (meaning 1 line could be in 2 possible locations). - */ - way_pg_ratio = ic->sz / ARC_ICACHE_WAYS / PAGE_SIZE; - switch (way_pg_ratio) { - case 0: - case 1: - ___flush_icache_rtn = __ic_line_inv_no_alias; - break; - case 2: - ___flush_icache_rtn = __ic_line_inv_2_alias; - break; - case 4: - ___flush_icache_rtn = __ic_line_inv_4_alias; - break; - default: - panic("Unsupported I-Cache Sz\n"); - } #endif /* Enable/disable I-Cache */ @@ -218,9 +186,13 @@ void __init arc_cache_init(void) panic("Cache H/W doesn't match kernel Config"); } + dcache_does_alias = (dc->sz / ARC_DCACHE_WAYS) > PAGE_SIZE; + /* check for D-Cache aliasing */ - if ((dc->sz / ARC_DCACHE_WAYS) > PAGE_SIZE) - panic("D$ aliasing not handled right now\n"); + if (dcache_does_alias && !cache_is_vipt_aliasing()) + panic("Enable CONFIG_ARC_CACHE_VIPT_ALIASING\n"); + else if (!dcache_does_alias && cache_is_vipt_aliasing()) + panic("Don't need CONFIG_ARC_CACHE_VIPT_ALIASING\n"); #endif /* Set the default Invalidate Mode to "simpy discard dirty lines" @@ -303,47 +275,57 @@ static inline void __dc_entire_op(const int cacheop) * Per Line Operation on D-Cache * Doesn't deal with type-of-op/IRQ-disabling/waiting-for-flush-to-complete * It's sole purpose is to help gcc generate ZOL + * (aliasing VIPT dcache flushing needs both vaddr and paddr) */ -static inline void __dc_line_loop(unsigned long start, unsigned long sz, - int aux_reg) +static inline void __dc_line_loop(unsigned long paddr, unsigned long vaddr, + unsigned long sz, const int aux_reg) { - int num_lines, slack; + int num_lines; /* Ensure we properly floor/ceil the non-line aligned/sized requests - * and have @start - aligned to cache line and integral @num_lines. + * and have @paddr - aligned to cache line and integral @num_lines. * This however can be avoided for page sized since: - * -@start will be cache-line aligned already (being page aligned) + * -@paddr will be cache-line aligned already (being page aligned) * -@sz will be integral multiple of line size (being page sized). */ if (!(__builtin_constant_p(sz) && sz == PAGE_SIZE)) { - slack = start & ~DCACHE_LINE_MASK; - sz += slack; - start -= slack; + sz += paddr & ~DCACHE_LINE_MASK; + paddr &= DCACHE_LINE_MASK; + vaddr &= DCACHE_LINE_MASK; } num_lines = DIV_ROUND_UP(sz, ARC_DCACHE_LINE_LEN); +#if (CONFIG_ARC_MMU_VER <= 2) + paddr |= (vaddr >> PAGE_SHIFT) & 0x1F; +#endif + while (num_lines-- > 0) { #if (CONFIG_ARC_MMU_VER > 2) /* * Just as for I$, in MMU v3, D$ ops also require * "tag" bits in DC_PTAG, "index" bits in FLDL,IVDL ops - * But we pass phy addr for both. This works since Linux - * doesn't support aliasing configs for D$, yet. - * Thus paddr is enough to provide both tag and index. */ - write_aux_reg(ARC_REG_DC_PTAG, start); + write_aux_reg(ARC_REG_DC_PTAG, paddr); + + write_aux_reg(aux_reg, vaddr); + vaddr += ARC_DCACHE_LINE_LEN; +#else + /* paddr contains stuffed vaddrs bits */ + write_aux_reg(aux_reg, paddr); #endif - write_aux_reg(aux_reg, start); - start += ARC_DCACHE_LINE_LEN; + paddr += ARC_DCACHE_LINE_LEN; } } +/* For kernel mappings cache operation: index is same as paddr */ +#define __dc_line_op_k(p, sz, op) __dc_line_op(p, p, sz, op) + /* * D-Cache : Per Line INV (discard or wback+discard) or FLUSH (wback) */ -static inline void __dc_line_op(unsigned long start, unsigned long sz, - const int cacheop) +static inline void __dc_line_op(unsigned long paddr, unsigned long vaddr, + unsigned long sz, const int cacheop) { unsigned long flags, tmp = tmp; int aux; @@ -366,7 +348,7 @@ static inline void __dc_line_op(unsigned long start, unsigned long sz, else aux = ARC_REG_DC_FLDL; - __dc_line_loop(start, sz, aux); + __dc_line_loop(paddr, vaddr, sz, aux); if (cacheop & OP_FLUSH) /* flush / flush-n-inv both wait */ wait_for_flush(); @@ -381,7 +363,8 @@ static inline void __dc_line_op(unsigned long start, unsigned long sz, #else #define __dc_entire_op(cacheop) -#define __dc_line_op(start, sz, cacheop) +#define __dc_line_op(paddr, vaddr, sz, cacheop) +#define __dc_line_op_k(paddr, sz, cacheop) #endif /* CONFIG_ARC_HAS_DCACHE */ @@ -391,75 +374,38 @@ static inline void __dc_line_op(unsigned long start, unsigned long sz, /* * I-Cache Aliasing in ARC700 VIPT caches * - * For fetching code from I$, ARC700 uses vaddr (embedded in program code) - * to "index" into SET of cache-line and paddr from MMU to match the TAG - * in the WAYS of SET. - * - * However the CDU iterface (to flush/inv) lines from software, only takes - * paddr (to have simpler hardware interface). For simpler cases, using paddr - * alone suffices. - * e.g. 2-way-set-assoc, 16K I$ (8k MMU pg sz, 32b cache line size): - * way_sz = cache_sz / num_ways = 16k/2 = 8k - * num_sets = way_sz / line_sz = 8k/32 = 256 => 8 bits - * Ignoring the bottom 5 bits corresp to the off within a 32b cacheline, - * bits req for calc set-index = bits 12:5 (0 based). Since this range fits - * inside the bottom 13 bits of paddr, which are same for vaddr and paddr - * (with 8k pg sz), paddr alone can be safely used by CDU to unambigously - * locate a cache-line. + * ARC VIPT I-cache uses vaddr to index into cache and paddr to match the tag. + * The orig Cache Management Module "CDU" only required paddr to invalidate a + * certain line since it sufficed as index in Non-Aliasing VIPT cache-geometry. + * Infact for distinct V1,V2,P: all of {V1-P},{V2-P},{P-P} would end up fetching + * the exact same line. * - * However for a difft sized cache, say 32k I$, above math yields need - * for 14 bits of vaddr to locate a cache line, which can't be provided by - * paddr, since the bit 13 (0 based) might differ between the two. - * - * This lack of extra bits needed for correct line addressing, defines the - * classical problem of Cache aliasing with VIPT architectures - * num_aliases = 1 << extra_bits - * e.g. 2-way-set-assoc, 32K I$ with 8k MMU pg sz => 2 aliases - * 2-way-set-assoc, 64K I$ with 8k MMU pg sz => 4 aliases - * 2-way-set-assoc, 16K I$ with 8k MMU pg sz => NO aliases + * However for larger Caches (way-size > page-size) - i.e. in Aliasing config, + * paddr alone could not be used to correctly index the cache. * * ------------------ * MMU v1/v2 (Fixed Page Size 8k) * ------------------ * The solution was to provide CDU with these additonal vaddr bits. These - * would be bits [x:13], x would depend on cache-geom. + * would be bits [x:13], x would depend on cache-geometry, 13 comes from + * standard page size of 8k. * H/w folks chose [17:13] to be a future safe range, and moreso these 5 bits * of vaddr could easily be "stuffed" in the paddr as bits [4:0] since the * orig 5 bits of paddr were anyways ignored by CDU line ops, as they * represent the offset within cache-line. The adv of using this "clumsy" - * interface for additional info was no new reg was needed in CDU. + * interface for additional info was no new reg was needed in CDU programming + * model. * * 17:13 represented the max num of bits passable, actual bits needed were * fewer, based on the num-of-aliases possible. * -for 2 alias possibility, only bit 13 needed (32K cache) * -for 4 alias possibility, bits 14:13 needed (64K cache) * - * Since vaddr was not available for all instances of I$ flush req by core - * kernel, the only safe way (non-optimal though) was to kill all possible - * lines which could represent an alias (even if they didnt represent one - * in execution). - * e.g. for 64K I$, 4 aliases possible, so we did - * flush start - * flush start | 0x01 - * flush start | 0x2 - * flush start | 0x3 - * - * The penalty was invoking the operation itself, since tag match is anyways - * paddr based, a line which didn't represent an alias would not match the - * paddr, hence wont be killed - * - * Note that aliasing concerns are independent of line-sz for a given cache - * geometry (size + set_assoc) because the extra bits required by line-sz are - * reduced from the set calc. - * e.g. 2-way-set-assoc, 32K I$ with 8k MMU pg sz and using math above - * 32b line-sz: 9 bits set-index-calc, 5 bits offset-in-line => 1 extra bit - * 64b line-sz: 8 bits set-index-calc, 6 bits offset-in-line => 1 extra bit - * * ------------------ * MMU v3 * ------------------ - * This ver of MMU supports var page sizes (1k-16k) - Linux will support - * 8k (default), 16k and 4k. + * This ver of MMU supports variable page sizes (1k-16k): although Linux will + * only support 8k (default), 16k and 4k. * However from hardware perspective, smaller page sizes aggrevate aliasing * meaning more vaddr bits needed to disambiguate the cache-line-op ; * the existing scheme of piggybacking won't work for certain configurations. @@ -468,144 +414,53 @@ static inline void __dc_line_op(unsigned long start, unsigned long sz, */ /*********************************************************** - * Machine specific helpers for per line I-Cache invalidate. - * 3 routines to accpunt for 1, 2, 4 aliases possible + * Machine specific helper for per line I-Cache invalidate. */ - -static void __ic_line_inv_no_alias(unsigned long start, int num_lines) -{ - while (num_lines-- > 0) { -#if (CONFIG_ARC_MMU_VER > 2) - write_aux_reg(ARC_REG_IC_PTAG, start); -#endif - write_aux_reg(ARC_REG_IC_IVIL, start); - start += ARC_ICACHE_LINE_LEN; - } -} - -static void __ic_line_inv_2_alias(unsigned long start, int num_lines) -{ - while (num_lines-- > 0) { - -#if (CONFIG_ARC_MMU_VER > 2) - /* - * MMU v3, CDU prog model (for line ops) now uses a new IC_PTAG - * reg to pass the "tag" bits and existing IVIL reg only looks - * at bits relevant for "index" (details above) - * Programming Notes: - * -when writing tag to PTAG reg, bit chopping can be avoided, - * CDU ignores non-tag bits. - * -Ideally "index" must be computed from vaddr, but it is not - * avail in these rtns. So to be safe, we kill the lines in all - * possible indexes corresp to num of aliases possible for - * given cache config. - */ - write_aux_reg(ARC_REG_IC_PTAG, start); - write_aux_reg(ARC_REG_IC_IVIL, - start & ~(0x1 << PAGE_SHIFT)); - write_aux_reg(ARC_REG_IC_IVIL, start | (0x1 << PAGE_SHIFT)); -#else - write_aux_reg(ARC_REG_IC_IVIL, start); - write_aux_reg(ARC_REG_IC_IVIL, start | 0x01); -#endif - start += ARC_ICACHE_LINE_LEN; - } -} - -static void __ic_line_inv_4_alias(unsigned long start, int num_lines) -{ - while (num_lines-- > 0) { - -#if (CONFIG_ARC_MMU_VER > 2) - write_aux_reg(ARC_REG_IC_PTAG, start); - - write_aux_reg(ARC_REG_IC_IVIL, - start & ~(0x3 << PAGE_SHIFT)); - write_aux_reg(ARC_REG_IC_IVIL, - start & ~(0x2 << PAGE_SHIFT)); - write_aux_reg(ARC_REG_IC_IVIL, - start & ~(0x1 << PAGE_SHIFT)); - write_aux_reg(ARC_REG_IC_IVIL, start | (0x3 << PAGE_SHIFT)); -#else - write_aux_reg(ARC_REG_IC_IVIL, start); - write_aux_reg(ARC_REG_IC_IVIL, start | 0x01); - write_aux_reg(ARC_REG_IC_IVIL, start | 0x02); - write_aux_reg(ARC_REG_IC_IVIL, start | 0x03); -#endif - start += ARC_ICACHE_LINE_LEN; - } -} - -static void __ic_line_inv(unsigned long start, unsigned long sz) +static void __ic_line_inv_vaddr(unsigned long paddr, unsigned long vaddr, + unsigned long sz) { unsigned long flags; - int num_lines, slack; + int num_lines; /* - * Ensure we properly floor/ceil the non-line aligned/sized requests - * and have @start - aligned to cache line, and integral @num_lines + * Ensure we properly floor/ceil the non-line aligned/sized requests: * However page sized flushes can be compile time optimised. - * -@start will be cache-line aligned already (being page aligned) + * -@paddr will be cache-line aligned already (being page aligned) * -@sz will be integral multiple of line size (being page sized). */ if (!(__builtin_constant_p(sz) && sz == PAGE_SIZE)) { - slack = start & ~ICACHE_LINE_MASK; - sz += slack; - start -= slack; + sz += paddr & ~ICACHE_LINE_MASK; + paddr &= ICACHE_LINE_MASK; + vaddr &= ICACHE_LINE_MASK; } num_lines = DIV_ROUND_UP(sz, ARC_ICACHE_LINE_LEN); - local_irq_save(flags); - (*___flush_icache_rtn) (start, num_lines); - local_irq_restore(flags); -} - -/* Unlike routines above, having vaddr for flush op (along with paddr), - * prevents the need to speculatively kill the lines in multiple sets - * based on ratio of way_sz : pg_sz - */ -static void __ic_line_inv_vaddr(unsigned long phy_start, - unsigned long vaddr, unsigned long sz) -{ - unsigned long flags; - int num_lines, slack; - unsigned int addr; - - slack = phy_start & ~ICACHE_LINE_MASK; - sz += slack; - phy_start -= slack; - num_lines = DIV_ROUND_UP(sz, ARC_ICACHE_LINE_LEN); - -#if (CONFIG_ARC_MMU_VER > 2) - vaddr &= ~ICACHE_LINE_MASK; - addr = phy_start; -#else +#if (CONFIG_ARC_MMU_VER <= 2) /* bits 17:13 of vaddr go as bits 4:0 of paddr */ - addr = phy_start | ((vaddr >> 13) & 0x1F); + paddr |= (vaddr >> PAGE_SHIFT) & 0x1F; #endif local_irq_save(flags); while (num_lines-- > 0) { #if (CONFIG_ARC_MMU_VER > 2) /* tag comes from phy addr */ - write_aux_reg(ARC_REG_IC_PTAG, addr); + write_aux_reg(ARC_REG_IC_PTAG, paddr); /* index bits come from vaddr */ write_aux_reg(ARC_REG_IC_IVIL, vaddr); vaddr += ARC_ICACHE_LINE_LEN; #else - /* this paddr contains vaddrs bits as needed */ - write_aux_reg(ARC_REG_IC_IVIL, addr); + /* paddr contains stuffed vaddrs bits */ + write_aux_reg(ARC_REG_IC_IVIL, paddr); #endif - addr += ARC_ICACHE_LINE_LEN; + paddr += ARC_ICACHE_LINE_LEN; } local_irq_restore(flags); } #else -#define __ic_line_inv(start, sz) #define __ic_line_inv_vaddr(pstart, vstart, sz) #endif /* CONFIG_ARC_HAS_ICACHE */ @@ -615,35 +470,72 @@ static void __ic_line_inv_vaddr(unsigned long phy_start, * Exported APIs */ -/* TBD: use pg_arch_1 to optimize this */ +/* + * Handle cache congruency of kernel and userspace mappings of page when kernel + * writes-to/reads-from + * + * The idea is to defer flushing of kernel mapping after a WRITE, possible if: + * -dcache is NOT aliasing, hence any U/K-mappings of page are congruent + * -U-mapping doesn't exist yet for page (finalised in update_mmu_cache) + * -In SMP, if hardware caches are coherent + * + * There's a corollary case, where kernel READs from a userspace mapped page. + * If the U-mapping is not congruent to to K-mapping, former needs flushing. + */ void flush_dcache_page(struct page *page) { - __dc_line_op((unsigned long)page_address(page), PAGE_SIZE, OP_FLUSH); + struct address_space *mapping; + + if (!cache_is_vipt_aliasing()) { + set_bit(PG_arch_1, &page->flags); + return; + } + + /* don't handle anon pages here */ + mapping = page_mapping(page); + if (!mapping) + return; + + /* + * pagecache page, file not yet mapped to userspace + * Make a note that K-mapping is dirty + */ + if (!mapping_mapped(mapping)) { + set_bit(PG_arch_1, &page->flags); + } else if (page_mapped(page)) { + + /* kernel reading from page with U-mapping */ + void *paddr = page_address(page); + unsigned long vaddr = page->index << PAGE_CACHE_SHIFT; + + if (addr_not_cache_congruent(paddr, vaddr)) + __flush_dcache_page(paddr, vaddr); + } } EXPORT_SYMBOL(flush_dcache_page); void dma_cache_wback_inv(unsigned long start, unsigned long sz) { - __dc_line_op(start, sz, OP_FLUSH_N_INV); + __dc_line_op_k(start, sz, OP_FLUSH_N_INV); } EXPORT_SYMBOL(dma_cache_wback_inv); void dma_cache_inv(unsigned long start, unsigned long sz) { - __dc_line_op(start, sz, OP_INV); + __dc_line_op_k(start, sz, OP_INV); } EXPORT_SYMBOL(dma_cache_inv); void dma_cache_wback(unsigned long start, unsigned long sz) { - __dc_line_op(start, sz, OP_FLUSH); + __dc_line_op_k(start, sz, OP_FLUSH); } EXPORT_SYMBOL(dma_cache_wback); /* - * This is API for making I/D Caches consistent when modifying code - * (loadable modules, kprobes, etc) + * This is API for making I/D Caches consistent when modifying + * kernel code (loadable modules, kprobes, kgdb...) * This is called on insmod, with kernel virtual address for CODE of * the module. ARC cache maintenance ops require PHY address thus we * need to convert vmalloc addr to PHY addr @@ -652,7 +544,6 @@ void flush_icache_range(unsigned long kstart, unsigned long kend) { unsigned int tot_sz, off, sz; unsigned long phy, pfn; - unsigned long flags; /* printk("Kernel Cache Cohenercy: %lx to %lx\n",kstart, kend); */ @@ -673,8 +564,13 @@ void flush_icache_range(unsigned long kstart, unsigned long kend) /* Case: Kernel Phy addr (0x8000_0000 onwards) */ if (likely(kstart > PAGE_OFFSET)) { - __ic_line_inv(kstart, kend - kstart); - __dc_line_op(kstart, kend - kstart, OP_FLUSH); + /* + * The 2nd arg despite being paddr will be used to index icache + * This is OK since no alternate virtual mappings will exist + * given the callers for this case: kprobe/kgdb in built-in + * kernel code only. + */ + __sync_icache_dcache(kstart, kstart, kend - kstart); return; } @@ -692,42 +588,45 @@ void flush_icache_range(unsigned long kstart, unsigned long kend) pfn = vmalloc_to_pfn((void *)kstart); phy = (pfn << PAGE_SHIFT) + off; sz = min_t(unsigned int, tot_sz, PAGE_SIZE - off); - local_irq_save(flags); - __dc_line_op(phy, sz, OP_FLUSH); - __ic_line_inv(phy, sz); - local_irq_restore(flags); + __sync_icache_dcache(phy, kstart, sz); kstart += sz; tot_sz -= sz; } } /* - * Optimised ver of flush_icache_range() with spec callers: ptrace/signals - * where vaddr is also available. This allows passing both vaddr and paddr - * bits to CDU for cache flush, short-circuting the current pessimistic algo - * which kills all possible aliases. - * An added adv of knowing that vaddr is user-vaddr avoids various checks - * and handling for k-vaddr, k-paddr as done in orig ver above + * General purpose helper to make I and D cache lines consistent. + * @paddr is phy addr of region + * @vaddr is typically user or kernel vaddr (vmalloc) + * Howver in one instance, flush_icache_range() by kprobe (for a breakpt in + * builtin kernel code) @vaddr will be paddr only, meaning CDU operation will + * use a paddr to index the cache (despite VIPT). This is fine since since a + * built-in kernel page will not have any virtual mappings (not even kernel) + * kprobe on loadable module is different as it will have kvaddr. */ -void flush_icache_range_vaddr(unsigned long paddr, unsigned long u_vaddr, - int len) +void __sync_icache_dcache(unsigned long paddr, unsigned long vaddr, int len) { - __ic_line_inv_vaddr(paddr, u_vaddr, len); - __dc_line_op(paddr, len, OP_FLUSH); + unsigned long flags; + + local_irq_save(flags); + __ic_line_inv_vaddr(paddr, vaddr, len); + __dc_line_op(paddr, vaddr, len, OP_FLUSH); + local_irq_restore(flags); +} + +/* wrapper to compile time eliminate alignment checks in flush loop */ +void __inv_icache_page(unsigned long paddr, unsigned long vaddr) +{ + __ic_line_inv_vaddr(paddr, vaddr, PAGE_SIZE); } /* - * XXX: This also needs to be optim using pg_arch_1 - * This is called when a page-cache page is about to be mapped into a - * user process' address space. It offers an opportunity for a - * port to ensure d-cache/i-cache coherency if necessary. + * wrapper to clearout kernel or userspace mappings of a page + * For kernel mappings @vaddr == @paddr */ -void flush_icache_page(struct vm_area_struct *vma, struct page *page) +void ___flush_dcache_page(unsigned long paddr, unsigned long vaddr) { - if (!(vma->vm_flags & VM_EXEC)) - return; - - __ic_line_inv((unsigned long)page_address(page), PAGE_SIZE); + __dc_line_op(paddr, vaddr & PAGE_MASK, PAGE_SIZE, OP_FLUSH_N_INV); } void flush_icache_all(void) @@ -756,6 +655,87 @@ noinline void flush_cache_all(void) } +#ifdef CONFIG_ARC_CACHE_VIPT_ALIASING + +void flush_cache_mm(struct mm_struct *mm) +{ + flush_cache_all(); +} + +void flush_cache_page(struct vm_area_struct *vma, unsigned long u_vaddr, + unsigned long pfn) +{ + unsigned int paddr = pfn << PAGE_SHIFT; + + __sync_icache_dcache(paddr, u_vaddr, PAGE_SIZE); +} + +void flush_cache_range(struct vm_area_struct *vma, unsigned long start, + unsigned long end) +{ + flush_cache_all(); +} + +void copy_user_highpage(struct page *to, struct page *from, + unsigned long u_vaddr, struct vm_area_struct *vma) +{ + void *kfrom = page_address(from); + void *kto = page_address(to); + int clean_src_k_mappings = 0; + + /* + * If SRC page was already mapped in userspace AND it's U-mapping is + * not congruent with K-mapping, sync former to physical page so that + * K-mapping in memcpy below, sees the right data + * + * Note that while @u_vaddr refers to DST page's userspace vaddr, it is + * equally valid for SRC page as well + */ + if (page_mapped(from) && addr_not_cache_congruent(kfrom, u_vaddr)) { + __flush_dcache_page(kfrom, u_vaddr); + clean_src_k_mappings = 1; + } + + copy_page(kto, kfrom); + + /* + * Mark DST page K-mapping as dirty for a later finalization by + * update_mmu_cache(). Although the finalization could have been done + * here as well (given that both vaddr/paddr are available). + * But update_mmu_cache() already has code to do that for other + * non copied user pages (e.g. read faults which wire in pagecache page + * directly). + */ + set_bit(PG_arch_1, &to->flags); + + /* + * if SRC was already usermapped and non-congruent to kernel mapping + * sync the kernel mapping back to physical page + */ + if (clean_src_k_mappings) { + __flush_dcache_page(kfrom, kfrom); + } else { + set_bit(PG_arch_1, &from->flags); + } +} + +void clear_user_page(void *to, unsigned long u_vaddr, struct page *page) +{ + clear_page(to); + set_bit(PG_arch_1, &page->flags); +} + +void flush_anon_page(struct vm_area_struct *vma, struct page *page, + unsigned long u_vaddr) +{ + /* TBD: do we really need to clear the kernel mapping */ + __flush_dcache_page(page_address(page), u_vaddr); + __flush_dcache_page(page_address(page), page_address(page)); + +} + +#endif + /********************************************************************** * Explicit Cache flush request from user space via syscall * Needed for JITs which generate code on the fly diff --git a/trunk/arch/arc/mm/extable.c b/trunk/arch/arc/mm/extable.c index 014172ba8432..aa652e281324 100644 --- a/trunk/arch/arc/mm/extable.c +++ b/trunk/arch/arc/mm/extable.c @@ -27,7 +27,7 @@ int fixup_exception(struct pt_regs *regs) #ifdef CONFIG_CC_OPTIMIZE_FOR_SIZE -long arc_copy_from_user_noinline(void *to, const void __user * from, +long arc_copy_from_user_noinline(void *to, const void __user *from, unsigned long n) { return __arc_copy_from_user(to, from, n); @@ -48,7 +48,7 @@ unsigned long arc_clear_user_noinline(void __user *to, } EXPORT_SYMBOL(arc_clear_user_noinline); -long arc_strncpy_from_user_noinline (char *dst, const char __user *src, +long arc_strncpy_from_user_noinline(char *dst, const char __user *src, long count) { return __arc_strncpy_from_user(dst, src, count); diff --git a/trunk/arch/arc/mm/fault.c b/trunk/arch/arc/mm/fault.c index af55aab803d2..689ffd86d5e9 100644 --- a/trunk/arch/arc/mm/fault.c +++ b/trunk/arch/arc/mm/fault.c @@ -12,7 +12,6 @@ #include #include #include -#include #include #include #include diff --git a/trunk/arch/arc/mm/init.c b/trunk/arch/arc/mm/init.c index 727d4794ea0f..4a177365b2c4 100644 --- a/trunk/arch/arc/mm/init.c +++ b/trunk/arch/arc/mm/init.c @@ -10,9 +10,6 @@ #include #include #include -#ifdef CONFIG_BLOCK_DEV_RAM -#include -#endif #include #include #include diff --git a/trunk/arch/arc/mm/ioremap.c b/trunk/arch/arc/mm/ioremap.c index 3e5c92c79936..739e65f355de 100644 --- a/trunk/arch/arc/mm/ioremap.c +++ b/trunk/arch/arc/mm/ioremap.c @@ -12,7 +12,7 @@ #include #include #include -#include +#include void __iomem *ioremap(unsigned long paddr, unsigned long size) { diff --git a/trunk/arch/arc/mm/mmap.c b/trunk/arch/arc/mm/mmap.c new file mode 100644 index 000000000000..2e06d56e987b --- /dev/null +++ b/trunk/arch/arc/mm/mmap.c @@ -0,0 +1,78 @@ +/* + * ARC700 mmap + * + * (started from arm version - for VIPT alias handling) + * + * Copyright (C) 2013 Synopsys, Inc. (www.synopsys.com) + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include +#include +#include +#include +#include + +#define COLOUR_ALIGN(addr, pgoff) \ + ((((addr) + SHMLBA - 1) & ~(SHMLBA - 1)) + \ + (((pgoff) << PAGE_SHIFT) & (SHMLBA - 1))) + +/* + * Ensure that shared mappings are correctly aligned to + * avoid aliasing issues with VIPT caches. + * We need to ensure that + * a specific page of an object is always mapped at a multiple of + * SHMLBA bytes. + */ +unsigned long +arch_get_unmapped_area(struct file *filp, unsigned long addr, + unsigned long len, unsigned long pgoff, unsigned long flags) +{ + struct mm_struct *mm = current->mm; + struct vm_area_struct *vma; + int do_align = 0; + int aliasing = cache_is_vipt_aliasing(); + struct vm_unmapped_area_info info; + + /* + * We only need to do colour alignment if D cache aliases. + */ + if (aliasing) + do_align = filp || (flags & MAP_SHARED); + + /* + * We enforce the MAP_FIXED case. + */ + if (flags & MAP_FIXED) { + if (aliasing && flags & MAP_SHARED && + (addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)) + return -EINVAL; + return addr; + } + + if (len > TASK_SIZE) + return -ENOMEM; + + if (addr) { + if (do_align) + addr = COLOUR_ALIGN(addr, pgoff); + else + addr = PAGE_ALIGN(addr); + + vma = find_vma(mm, addr); + if (TASK_SIZE - len >= addr && + (!vma || addr + len <= vma->vm_start)) + return addr; + } + + info.flags = 0; + info.length = len; + info.low_limit = mm->mmap_base; + info.high_limit = TASK_SIZE; + info.align_mask = do_align ? (PAGE_MASK & (SHMLBA - 1)) : 0; + info.align_offset = pgoff << PAGE_SHIFT; + return vm_unmapped_area(&info); +} diff --git a/trunk/arch/arc/mm/tlb.c b/trunk/arch/arc/mm/tlb.c index 9b9ce23f4ec3..066145b5f348 100644 --- a/trunk/arch/arc/mm/tlb.c +++ b/trunk/arch/arc/mm/tlb.c @@ -418,23 +418,52 @@ void create_tlb(struct vm_area_struct *vma, unsigned long address, pte_t *ptep) local_irq_restore(flags); } -/* arch hook called by core VM at the end of handle_mm_fault( ), - * when a new PTE is entered in Page Tables or an existing one - * is modified. We aggresively pre-install a TLB entry +/* + * Called at the end of pagefault, for a userspace mapped page + * -pre-install the corresponding TLB entry into MMU + * -Finalize the delayed D-cache flush of kernel mapping of page due to + * flush_dcache_page(), copy_user_page() + * + * Note that flush (when done) involves both WBACK - so physical page is + * in sync as well as INV - so any non-congruent aliases don't remain */ - -void update_mmu_cache(struct vm_area_struct *vma, unsigned long vaddress, +void update_mmu_cache(struct vm_area_struct *vma, unsigned long vaddr_unaligned, pte_t *ptep) { + unsigned long vaddr = vaddr_unaligned & PAGE_MASK; + unsigned long paddr = pte_val(*ptep) & PAGE_MASK; + + create_tlb(vma, vaddr, ptep); + + /* + * Exec page : Independent of aliasing/page-color considerations, + * since icache doesn't snoop dcache on ARC, any dirty + * K-mapping of a code page needs to be wback+inv so that + * icache fetch by userspace sees code correctly. + * !EXEC page: If K-mapping is NOT congruent to U-mapping, flush it + * so userspace sees the right data. + * (Avoids the flush for Non-exec + congruent mapping case) + */ + if (vma->vm_flags & VM_EXEC || addr_not_cache_congruent(paddr, vaddr)) { + struct page *page = pfn_to_page(pte_pfn(*ptep)); + + int dirty = test_and_clear_bit(PG_arch_1, &page->flags); + if (dirty) { + /* wback + inv dcache lines */ + __flush_dcache_page(paddr, paddr); - create_tlb(vma, vaddress, ptep); + /* invalidate any existing icache lines */ + if (vma->vm_flags & VM_EXEC) + __inv_icache_page(paddr, vaddr); + } + } } /* Read the Cache Build Confuration Registers, Decode them and save into * the cpuinfo structure for later use. * No Validation is done here, simply read/convert the BCRs */ -void __init read_decode_mmu_bcr(void) +void __cpuinit read_decode_mmu_bcr(void) { unsigned int tmp; struct bcr_mmu_1_2 *mmu2; /* encoded MMU2 attr */ @@ -466,7 +495,7 @@ void __init read_decode_mmu_bcr(void) char *arc_mmu_mumbojumbo(int cpu_id, char *buf, int len) { int n = 0; - struct cpuinfo_arc_mmu *p_mmu = &cpuinfo_arc700[smp_processor_id()].mmu; + struct cpuinfo_arc_mmu *p_mmu = &cpuinfo_arc700[cpu_id].mmu; n += scnprintf(buf + n, len - n, "ARC700 MMU [v%x]\t: %dk PAGE, ", p_mmu->ver, TO_KB(p_mmu->pg_sz)); @@ -480,7 +509,7 @@ char *arc_mmu_mumbojumbo(int cpu_id, char *buf, int len) return buf; } -void __init arc_mmu_init(void) +void __cpuinit arc_mmu_init(void) { char str[256]; struct cpuinfo_arc_mmu *mmu = &cpuinfo_arc700[smp_processor_id()].mmu; diff --git a/trunk/arch/arc/plat-arcfpga/platform.c b/trunk/arch/arc/plat-arcfpga/platform.c index 4e20a1a5104d..b3700c064c06 100644 --- a/trunk/arch/arc/plat-arcfpga/platform.c +++ b/trunk/arch/arc/plat-arcfpga/platform.c @@ -224,3 +224,15 @@ MACHINE_START(ML509, "ml509") .init_smp = iss_model_init_smp, #endif MACHINE_END + +static const char *nsimosci_compat[] __initdata = { + "snps,nsimosci", + NULL, +}; + +MACHINE_START(NSIMOSCI, "nsimosci") + .dt_compat = nsimosci_compat, + .init_early = NULL, + .init_machine = plat_fpga_populate_dev, + .init_irq = NULL, +MACHINE_END diff --git a/trunk/arch/arc/plat-tb10x/Kconfig b/trunk/arch/arc/plat-tb10x/Kconfig new file mode 100644 index 000000000000..1d3452100f1f --- /dev/null +++ b/trunk/arch/arc/plat-tb10x/Kconfig @@ -0,0 +1,29 @@ +# Abilis Systems TB10x platform kernel configuration file +# +# Author: Christian Ruppert +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License version 2 as +# published by the Free Software Foundation. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program; if not, write to the Free Software +# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + + +menuconfig ARC_PLAT_TB10X + bool "Abilis TB10x" + select COMMON_CLK + select PINCTRL + select PINMUX + select ARCH_REQUIRE_GPIOLIB + help + Support for platforms based on the TB10x home media gateway SOC by + Abilis Systems. TB10x is based on the ARC700 CPU architecture. + Say Y if you are building a kernel for one of the SOCs in this + series (e.g. TB100 or TB101). If in doubt say N. diff --git a/trunk/arch/arc/plat-tb10x/Makefile b/trunk/arch/arc/plat-tb10x/Makefile new file mode 100644 index 000000000000..89611d25ef35 --- /dev/null +++ b/trunk/arch/arc/plat-tb10x/Makefile @@ -0,0 +1,21 @@ +# Abilis Systems TB10x platform Makefile +# +# Author: Christian Ruppert +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License version 2 as +# published by the Free Software Foundation. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program; if not, write to the Free Software +# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + + +KBUILD_CFLAGS += -Iarch/arc/plat-tb10x/include + +obj-y += tb10x.o diff --git a/trunk/arch/arc/plat-tb10x/tb10x.c b/trunk/arch/arc/plat-tb10x/tb10x.c new file mode 100644 index 000000000000..d3567691c7e1 --- /dev/null +++ b/trunk/arch/arc/plat-tb10x/tb10x.c @@ -0,0 +1,71 @@ +/* + * Abilis Systems TB10x platform initialisation + * + * Copyright (C) Abilis Systems 2012 + * + * Author: Christian Ruppert + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ + + +#include +#include +#include +#include + +#include + + +static void __init tb10x_platform_init(void) +{ + of_clk_init(NULL); + of_platform_populate(NULL, of_default_bus_match_table, NULL, NULL); +} + +static void __init tb10x_platform_late_init(void) +{ + struct device_node *dn; + + /* + * Pinctrl documentation recommends setting up the iomux here for + * all modules which don't require control over the pins themselves. + * Modules which need this kind of assistance are compatible with + * "abilis,simple-pinctrl", i.e. we can easily iterate over them. + * TODO: Does this recommended method work cleanly with pins required + * by modules? + */ + for_each_compatible_node(dn, NULL, "abilis,simple-pinctrl") { + struct platform_device *pd = of_find_device_by_node(dn); + struct pinctrl *pctl; + + pctl = pinctrl_get_select(&pd->dev, "abilis,simple-default"); + if (IS_ERR(pctl)) { + int ret = PTR_ERR(pctl); + dev_err(&pd->dev, "Could not set up pinctrl: %d\n", + ret); + } + } +} + +static const char *tb10x_compat[] __initdata = { + "abilis,arc-tb10x", + NULL, +}; + +MACHINE_START(TB10x, "tb10x") + .dt_compat = tb10x_compat, + .init_machine = tb10x_platform_init, + .init_late = tb10x_platform_late_init, +MACHINE_END diff --git a/trunk/arch/arm/Kconfig b/trunk/arch/arm/Kconfig index aa71a2321040..49d993cee512 100644 --- a/trunk/arch/arm/Kconfig +++ b/trunk/arch/arm/Kconfig @@ -38,6 +38,7 @@ config ARM select HAVE_GENERIC_HARDIRQS select HAVE_HW_BREAKPOINT if (PERF_EVENTS && (CPU_V6 || CPU_V6K || CPU_V7)) select HAVE_IDE if PCI || ISA || PCMCIA + select HAVE_IRQ_TIME_ACCOUNTING select HAVE_KERNEL_GZIP select HAVE_KERNEL_LZMA select HAVE_KERNEL_LZO @@ -109,9 +110,6 @@ config MIGHT_HAVE_PCI config SYS_SUPPORTS_APM_EMULATION bool -config GENERIC_GPIO - bool - config HAVE_TCM bool select GENERIC_ALLOCATOR @@ -491,7 +489,7 @@ config ARCH_IXP4XX config ARCH_DOVE bool "Marvell Dove" select ARCH_REQUIRE_GPIOLIB - select CPU_V7 + select CPU_PJ4 select GENERIC_CLOCKEVENTS select MIGHT_HAVE_PCI select PINCTRL @@ -900,7 +898,6 @@ config ARCH_MULTI_V7 bool "ARMv7 based platforms (Cortex-A, PJ4, Scorpion, Krait)" default y select ARCH_MULTI_V6_V7 - select ARCH_VEXPRESS select CPU_V7 config ARCH_MULTI_V6_V7 diff --git a/trunk/arch/arm/Makefile b/trunk/arch/arm/Makefile index 47374085befd..1ba358ba16b8 100644 --- a/trunk/arch/arm/Makefile +++ b/trunk/arch/arm/Makefile @@ -309,7 +309,7 @@ define archhelp echo ' Image - Uncompressed kernel image (arch/$(ARCH)/boot/Image)' echo '* xipImage - XIP kernel image, if configured (arch/$(ARCH)/boot/xipImage)' echo ' uImage - U-Boot wrapped zImage' - echo ' bootpImage - Combined zImage and initial RAM disk' + echo ' bootpImage - Combined zImage and initial RAM disk' echo ' (supply initrd image via make variable INITRD=)' echo '* dtbs - Build device tree blobs for enabled boards' echo ' install - Install uncompressed kernel' diff --git a/trunk/arch/arm/boot/dts/am33xx.dtsi b/trunk/arch/arm/boot/dts/am33xx.dtsi index d1101103aa51..1460d9b88adf 100644 --- a/trunk/arch/arm/boot/dts/am33xx.dtsi +++ b/trunk/arch/arm/boot/dts/am33xx.dtsi @@ -403,5 +403,17 @@ 0x44d80000 0x2000>; /* M3 DMEM */ ti,hwmods = "wkup_m3"; }; + + gpmc: gpmc@50000000 { + compatible = "ti,am3352-gpmc"; + ti,hwmods = "gpmc"; + reg = <0x50000000 0x2000>; + interrupts = <100>; + num-cs = <7>; + num-waitpins = <2>; + #address-cells = <2>; + #size-cells = <1>; + status = "disabled"; + }; }; }; diff --git a/trunk/arch/arm/boot/dts/cros5250-common.dtsi b/trunk/arch/arm/boot/dts/cros5250-common.dtsi index 0a61bbb9102f..3f0239ec1bc5 100644 --- a/trunk/arch/arm/boot/dts/cros5250-common.dtsi +++ b/trunk/arch/arm/boot/dts/cros5250-common.dtsi @@ -175,6 +175,14 @@ i2c@12C70000 { samsung,i2c-sda-delay = <100>; samsung,i2c-max-bus-freq = <378000>; + + trackpad { + reg = <0x67>; + compatible = "cypress,cyapa"; + interrupts = <2 0>; + interrupt-parent = <&gpx1>; + wakeup-source; + }; }; i2c@12C80000 { diff --git a/trunk/arch/arm/boot/dts/exynos5250-smdk5250.dts b/trunk/arch/arm/boot/dts/exynos5250-smdk5250.dts index 26d856ba50a1..3e0c792e2767 100644 --- a/trunk/arch/arm/boot/dts/exynos5250-smdk5250.dts +++ b/trunk/arch/arm/boot/dts/exynos5250-smdk5250.dts @@ -214,7 +214,7 @@ }; usb@12110000 { - samsung,vbus-gpio = <&gpx2 6 1 3 3>; + samsung,vbus-gpio = <&gpx2 6 0>; }; dp-controller { diff --git a/trunk/arch/arm/boot/dts/exynos5250-snow.dts b/trunk/arch/arm/boot/dts/exynos5250-snow.dts index bf4744bab445..d449feb7e143 100644 --- a/trunk/arch/arm/boot/dts/exynos5250-snow.dts +++ b/trunk/arch/arm/boot/dts/exynos5250-snow.dts @@ -183,7 +183,7 @@ }; usb@12110000 { - samsung,vbus-gpio = <&gpx1 1 1 3 3>; + samsung,vbus-gpio = <&gpx1 1 0>; }; fixed-rate-clocks { diff --git a/trunk/arch/arm/boot/dts/omap3-beagle-xm.dts b/trunk/arch/arm/boot/dts/omap3-beagle-xm.dts index 5a31964ae339..3046d1f81be0 100644 --- a/trunk/arch/arm/boot/dts/omap3-beagle-xm.dts +++ b/trunk/arch/arm/boot/dts/omap3-beagle-xm.dts @@ -122,6 +122,7 @@ &usb_otg_hs { interface-type = <0>; + usb-phy = <&usb2_phy>; mode = <3>; power = <50>; }; diff --git a/trunk/arch/arm/boot/dts/omap3-evm.dts b/trunk/arch/arm/boot/dts/omap3-evm.dts index 05f51e10ddd6..96d1c206a57b 100644 --- a/trunk/arch/arm/boot/dts/omap3-evm.dts +++ b/trunk/arch/arm/boot/dts/omap3-evm.dts @@ -68,6 +68,7 @@ &usb_otg_hs { interface-type = <0>; + usb-phy = <&usb2_phy>; mode = <3>; power = <50>; }; diff --git a/trunk/arch/arm/boot/dts/omap3-overo.dtsi b/trunk/arch/arm/boot/dts/omap3-overo.dtsi index d4a7280d18b7..a626c50041f6 100644 --- a/trunk/arch/arm/boot/dts/omap3-overo.dtsi +++ b/trunk/arch/arm/boot/dts/omap3-overo.dtsi @@ -73,6 +73,7 @@ &usb_otg_hs { interface-type = <0>; + usb-phy = <&usb2_phy>; mode = <3>; power = <50>; }; diff --git a/trunk/arch/arm/boot/dts/omap3.dtsi b/trunk/arch/arm/boot/dts/omap3.dtsi index 4ad03d9dbf0c..82a404da1c0d 100644 --- a/trunk/arch/arm/boot/dts/omap3.dtsi +++ b/trunk/arch/arm/boot/dts/omap3.dtsi @@ -519,7 +519,6 @@ interrupts = <0 92 0x4>, <0 93 0x4>; interrupt-names = "mc", "dma"; ti,hwmods = "usb_otg_hs"; - usb-phy = <&usb2_phy>; multipoint = <1>; num-eps = <16>; ram-bits = <12>; diff --git a/trunk/arch/arm/boot/dts/omap36xx.dtsi b/trunk/arch/arm/boot/dts/omap36xx.dtsi index b89233e43b0f..f3447bc1b032 100644 --- a/trunk/arch/arm/boot/dts/omap36xx.dtsi +++ b/trunk/arch/arm/boot/dts/omap36xx.dtsi @@ -20,9 +20,9 @@ cpu@0 { operating-points = < /* kHz uV */ - 300000 975000 - 600000 1075000 - 800000 1200000 + 300000 1012500 + 600000 1200000 + 800000 1325000 >; clock-latency = <300000>; /* From legacy driver */ }; diff --git a/trunk/arch/arm/boot/dts/omap4-sdp.dts b/trunk/arch/arm/boot/dts/omap4-sdp.dts index c387bdc1b1d1..a35d9cd58063 100644 --- a/trunk/arch/arm/boot/dts/omap4-sdp.dts +++ b/trunk/arch/arm/boot/dts/omap4-sdp.dts @@ -223,6 +223,15 @@ >; }; + mcspi1_pins: pinmux_mcspi1_pins { + pinctrl-single,pins = < + 0xf2 0x100 /* mcspi1_clk.mcspi1_clk INPUT | MODE0 */ + 0xf4 0x100 /* mcspi1_somi.mcspi1_somi INPUT | MODE0 */ + 0xf6 0x100 /* mcspi1_simo.mcspi1_simo INPUT | MODE0 */ + 0xf8 0x100 /* mcspi1_cs0.mcspi1_cs0 INPUT | MODE0*/ + >; + }; + dss_hdmi_pins: pinmux_dss_hdmi_pins { pinctrl-single,pins = < 0x5a 0x118 /* hdmi_cec.hdmi_cec INPUT PULLUP | MODE 0 */ @@ -358,12 +367,15 @@ }; &mcspi1 { + pinctrl-names = "default"; + pinctrl-0 = <&mcspi1_pins>; + eth@0 { compatible = "ks8851"; spi-max-frequency = <24000000>; reg = <0>; interrupt-parent = <&gpio2>; - interrupts = <2>; /* gpio line 34 */ + interrupts = <2 8>; /* gpio line 34, low triggered */ vdd-supply = <&vdd_eth>; }; }; diff --git a/trunk/arch/arm/boot/dts/omap4-var-som.dts b/trunk/arch/arm/boot/dts/omap4-var-som.dts index 222a413c2c51..7e04103779c4 100644 --- a/trunk/arch/arm/boot/dts/omap4-var-som.dts +++ b/trunk/arch/arm/boot/dts/omap4-var-som.dts @@ -68,7 +68,7 @@ spi-max-frequency = <24000000>; reg = <0>; interrupt-parent = <&gpio6>; - interrupts = <11>; /* gpio line 171 */ + interrupts = <11 8>; /* gpio line 171, low triggered */ vdd-supply = <&vdd_eth>; }; }; diff --git a/trunk/arch/arm/boot/dts/omap4460.dtsi b/trunk/arch/arm/boot/dts/omap4460.dtsi index 7c2c23cc17ef..2cf227c86099 100644 --- a/trunk/arch/arm/boot/dts/omap4460.dtsi +++ b/trunk/arch/arm/boot/dts/omap4460.dtsi @@ -15,9 +15,9 @@ cpu@0 { operating-points = < /* kHz uV */ - 350000 975000 - 700000 1075000 - 920000 1200000 + 350000 1025000 + 700000 1200000 + 920000 1313000 >; clock-latency = <300000>; /* From legacy driver */ }; diff --git a/trunk/arch/arm/common/mcpm_platsmp.c b/trunk/arch/arm/common/mcpm_platsmp.c index 52b88d81b7bb..3caed0db6986 100644 --- a/trunk/arch/arm/common/mcpm_platsmp.c +++ b/trunk/arch/arm/common/mcpm_platsmp.c @@ -15,8 +15,6 @@ #include #include -#include - #include #include #include @@ -49,7 +47,6 @@ static int __cpuinit mcpm_boot_secondary(unsigned int cpu, struct task_struct *i static void __cpuinit mcpm_secondary_init(unsigned int cpu) { mcpm_cpu_powered_up(); - gic_secondary_init(0); } #ifdef CONFIG_HOTPLUG_CPU diff --git a/trunk/arch/arm/configs/omap1_defconfig b/trunk/arch/arm/configs/omap1_defconfig index 7e0ebb64a7f9..9940f7b4e438 100644 --- a/trunk/arch/arm/configs/omap1_defconfig +++ b/trunk/arch/arm/configs/omap1_defconfig @@ -199,7 +199,6 @@ CONFIG_USB_PHY=y CONFIG_USB_DEBUG=y CONFIG_USB_DEVICEFS=y # CONFIG_USB_DEVICE_CLASS is not set -CONFIG_USB_SUSPEND=y CONFIG_USB_MON=y CONFIG_USB_OHCI_HCD=y CONFIG_USB_STORAGE=y diff --git a/trunk/arch/arm/configs/omap2plus_defconfig b/trunk/arch/arm/configs/omap2plus_defconfig index 33903ca0d879..435d69b83e32 100644 --- a/trunk/arch/arm/configs/omap2plus_defconfig +++ b/trunk/arch/arm/configs/omap2plus_defconfig @@ -137,6 +137,8 @@ CONFIG_SERIAL_8250_DETECT_IRQ=y CONFIG_SERIAL_8250_RSA=y CONFIG_SERIAL_AMBA_PL011=y CONFIG_SERIAL_AMBA_PL011_CONSOLE=y +CONFIG_SERIAL_OMAP=y +CONFIG_SERIAL_OMAP_CONSOLE=y CONFIG_HW_RANDOM=y CONFIG_I2C_CHARDEV=y CONFIG_SPI=y @@ -153,6 +155,7 @@ CONFIG_OMAP_WATCHDOG=y CONFIG_TWL4030_WATCHDOG=y CONFIG_MFD_TPS65217=y CONFIG_MFD_TPS65910=y +CONFIG_TWL6040_CORE=y CONFIG_REGULATOR_TWL4030=y CONFIG_REGULATOR_TPS65023=y CONFIG_REGULATOR_TPS6507X=y @@ -195,12 +198,12 @@ CONFIG_SND_USB_AUDIO=m CONFIG_SND_SOC=m CONFIG_SND_OMAP_SOC=m CONFIG_SND_OMAP_SOC_OMAP_TWL4030=m +CONFIG_SND_OMAP_SOC_OMAP_ABE_TWL6040=m CONFIG_SND_OMAP_SOC_OMAP3_PANDORA=m CONFIG_USB=y CONFIG_USB_DEBUG=y CONFIG_USB_ANNOUNCE_NEW_DEVICES=y CONFIG_USB_DEVICEFS=y -CONFIG_USB_SUSPEND=y CONFIG_USB_MON=y CONFIG_USB_WDM=y CONFIG_USB_STORAGE=y diff --git a/trunk/arch/arm/include/asm/cmpxchg.h b/trunk/arch/arm/include/asm/cmpxchg.h index 7eb18c1d8d6c..4f009c10540d 100644 --- a/trunk/arch/arm/include/asm/cmpxchg.h +++ b/trunk/arch/arm/include/asm/cmpxchg.h @@ -233,15 +233,15 @@ static inline unsigned long __cmpxchg_local(volatile void *ptr, ((__typeof__(*(ptr)))atomic64_cmpxchg(container_of((ptr), \ atomic64_t, \ counter), \ - (unsigned long)(o), \ - (unsigned long)(n))) + (unsigned long long)(o), \ + (unsigned long long)(n))) #define cmpxchg64_local(ptr, o, n) \ ((__typeof__(*(ptr)))local64_cmpxchg(container_of((ptr), \ local64_t, \ a), \ - (unsigned long)(o), \ - (unsigned long)(n))) + (unsigned long long)(o), \ + (unsigned long long)(n))) #endif /* __LINUX_ARM_ARCH__ >= 6 */ diff --git a/trunk/arch/arm/kernel/devtree.c b/trunk/arch/arm/kernel/devtree.c index 70f1bdeb241b..5af04f6daa33 100644 --- a/trunk/arch/arm/kernel/devtree.c +++ b/trunk/arch/arm/kernel/devtree.c @@ -180,6 +180,13 @@ struct machine_desc * __init setup_machine_fdt(unsigned int dt_phys) unsigned long dt_root; const char *model; +#ifdef CONFIG_ARCH_MULTIPLATFORM + DT_MACHINE_START(GENERIC_DT, "Generic DT based system") + MACHINE_END + + mdesc_best = (struct machine_desc *)&__mach_desc_GENERIC_DT; +#endif + if (!dt_phys) return NULL; diff --git a/trunk/arch/arm/kernel/setup.c b/trunk/arch/arm/kernel/setup.c index 728007c4a2b7..1522c7ae31b0 100644 --- a/trunk/arch/arm/kernel/setup.c +++ b/trunk/arch/arm/kernel/setup.c @@ -18,6 +18,7 @@ #include #include #include +#include #include #include #include @@ -659,9 +660,19 @@ struct screen_info screen_info = { static int __init customize_machine(void) { - /* customizes platform devices, or adds new ones */ + /* + * customizes platform devices, or adds new ones + * On DT based machines, we fall back to populating the + * machine from the device tree, if no callback is provided, + * otherwise we would always need an init_machine callback. + */ if (machine_desc->init_machine) machine_desc->init_machine(); +#ifdef CONFIG_OF + else + of_platform_populate(NULL, of_default_bus_match_table, + NULL, NULL); +#endif return 0; } arch_initcall(customize_machine); diff --git a/trunk/arch/arm/kernel/smp.c b/trunk/arch/arm/kernel/smp.c index 47ab90563bf4..550d63cef68e 100644 --- a/trunk/arch/arm/kernel/smp.c +++ b/trunk/arch/arm/kernel/smp.c @@ -251,7 +251,7 @@ void __ref cpu_die(void) * this returns, power and/or clocks can be removed at any point * from this CPU and its cache by platform_cpu_kill(). */ - RCU_NONIDLE(complete(&cpu_died)); + complete(&cpu_died); /* * Ensure that the cache lines associated with that completion are diff --git a/trunk/arch/arm/mach-exynos/include/mach/regs-pmu.h b/trunk/arch/arm/mach-exynos/include/mach/regs-pmu.h index 3f30aa1ae354..57344b7e98ce 100644 --- a/trunk/arch/arm/mach-exynos/include/mach/regs-pmu.h +++ b/trunk/arch/arm/mach-exynos/include/mach/regs-pmu.h @@ -344,6 +344,7 @@ #define EXYNOS5_FSYS_ARM_OPTION S5P_PMUREG(0x2208) #define EXYNOS5_ISP_ARM_OPTION S5P_PMUREG(0x2288) #define EXYNOS5_ARM_COMMON_OPTION S5P_PMUREG(0x2408) +#define EXYNOS5_ARM_L2_OPTION S5P_PMUREG(0x2608) #define EXYNOS5_TOP_PWR_OPTION S5P_PMUREG(0x2C48) #define EXYNOS5_TOP_PWR_SYSMEM_OPTION S5P_PMUREG(0x2CC8) #define EXYNOS5_JPEG_MEM_OPTION S5P_PMUREG(0x2F48) diff --git a/trunk/arch/arm/mach-exynos/pmu.c b/trunk/arch/arm/mach-exynos/pmu.c index daebc1abc966..97d688526258 100644 --- a/trunk/arch/arm/mach-exynos/pmu.c +++ b/trunk/arch/arm/mach-exynos/pmu.c @@ -228,6 +228,7 @@ static struct exynos_pmu_conf exynos5250_pmu_config[] = { { EXYNOS5_DIS_IRQ_ISP_ARM_CENTRAL_SYS_PWR_REG, { 0x0, 0x0, 0x0} }, { EXYNOS5_ARM_COMMON_SYS_PWR_REG, { 0x0, 0x0, 0x2} }, { EXYNOS5_ARM_L2_SYS_PWR_REG, { 0x3, 0x3, 0x3} }, + { EXYNOS5_ARM_L2_OPTION, { 0x10, 0x10, 0x0 } }, { EXYNOS5_CMU_ACLKSTOP_SYS_PWR_REG, { 0x1, 0x0, 0x1} }, { EXYNOS5_CMU_SCLKSTOP_SYS_PWR_REG, { 0x1, 0x0, 0x1} }, { EXYNOS5_CMU_RESET_SYS_PWR_REG, { 0x1, 0x1, 0x0} }, @@ -353,11 +354,9 @@ static void exynos5_init_pmu(void) /* * SKIP_DEACTIVATE_ACEACP_IN_PWDN_BITFIELD Enable - * MANUAL_L2RSTDISABLE_CONTROL_BITFIELD Enable */ tmp = __raw_readl(EXYNOS5_ARM_COMMON_OPTION); - tmp |= (EXYNOS5_MANUAL_L2RSTDISABLE_CONTROL | - EXYNOS5_SKIP_DEACTIVATE_ACEACP_IN_PWDN); + tmp |= EXYNOS5_SKIP_DEACTIVATE_ACEACP_IN_PWDN; __raw_writel(tmp, EXYNOS5_ARM_COMMON_OPTION); /* diff --git a/trunk/arch/arm/mach-imx/Kconfig b/trunk/arch/arm/mach-imx/Kconfig index 78f795d73cb6..ba44328464f3 100644 --- a/trunk/arch/arm/mach-imx/Kconfig +++ b/trunk/arch/arm/mach-imx/Kconfig @@ -5,6 +5,7 @@ config ARCH_MXC select AUTO_ZRELADDR if !ZBOOT_ROM select CLKDEV_LOOKUP select CLKSRC_MMIO + select GENERIC_ALLOCATOR select GENERIC_CLOCKEVENTS select GENERIC_IRQ_CHIP select MULTI_IRQ_HANDLER @@ -61,10 +62,6 @@ config MXC_ULPI config ARCH_HAS_RNGA bool -config IRAM_ALLOC - bool - select GENERIC_ALLOCATOR - config HAVE_IMX_ANATOP bool diff --git a/trunk/arch/arm/mach-imx/Makefile b/trunk/arch/arm/mach-imx/Makefile index 930958973f81..70ae7c490ac0 100644 --- a/trunk/arch/arm/mach-imx/Makefile +++ b/trunk/arch/arm/mach-imx/Makefile @@ -23,7 +23,6 @@ obj-$(CONFIG_ARCH_MXC_IOMUX_V3) += iomux-v3.o obj-$(CONFIG_MXC_TZIC) += tzic.o obj-$(CONFIG_MXC_AVIC) += avic.o -obj-$(CONFIG_IRAM_ALLOC) += iram_alloc.o obj-$(CONFIG_MXC_ULPI) += ulpi.o obj-$(CONFIG_MXC_USE_EPIT) += epit.o obj-$(CONFIG_MXC_DEBUG_BOARD) += 3ds_debugboard.o diff --git a/trunk/arch/arm/mach-imx/common.h b/trunk/arch/arm/mach-imx/common.h index 4cba7dbb079f..c08ae3f99cee 100644 --- a/trunk/arch/arm/mach-imx/common.h +++ b/trunk/arch/arm/mach-imx/common.h @@ -12,6 +12,7 @@ #define __ASM_ARCH_MXC_COMMON_H__ struct platform_device; +struct pt_regs; struct clk; enum mxc_cpu_pwr_mode; diff --git a/trunk/arch/arm/mach-imx/headsmp.S b/trunk/arch/arm/mach-imx/headsmp.S index a58c8b0527cc..67b9c48dcafe 100644 --- a/trunk/arch/arm/mach-imx/headsmp.S +++ b/trunk/arch/arm/mach-imx/headsmp.S @@ -24,7 +24,7 @@ ENTRY(v7_secondary_startup) ENDPROC(v7_secondary_startup) #endif -#ifdef CONFIG_PM +#ifdef CONFIG_ARM_CPU_SUSPEND /* * The following code must assume it is running from physical address * where absolute virtual addresses to the data section have to be diff --git a/trunk/arch/arm/mach-imx/hotplug.c b/trunk/arch/arm/mach-imx/hotplug.c index 5e91112dcbee..3daf1ed90579 100644 --- a/trunk/arch/arm/mach-imx/hotplug.c +++ b/trunk/arch/arm/mach-imx/hotplug.c @@ -11,7 +11,9 @@ */ #include +#include #include +#include #include "common.h" diff --git a/trunk/arch/arm/mach-imx/iram_alloc.c b/trunk/arch/arm/mach-imx/iram_alloc.c deleted file mode 100644 index e05cf407db65..000000000000 --- a/trunk/arch/arm/mach-imx/iram_alloc.c +++ /dev/null @@ -1,73 +0,0 @@ -/* - * Copyright (C) 2010 Freescale Semiconductor, Inc. All Rights Reserved. - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License - * as published by the Free Software Foundation; either version 2 - * of the License, or (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, - * MA 02110-1301, USA. - */ - -#include -#include -#include -#include -#include -#include "linux/platform_data/imx-iram.h" - -static unsigned long iram_phys_base; -static void __iomem *iram_virt_base; -static struct gen_pool *iram_pool; - -static inline void __iomem *iram_phys_to_virt(unsigned long p) -{ - return iram_virt_base + (p - iram_phys_base); -} - -void __iomem *iram_alloc(unsigned int size, unsigned long *dma_addr) -{ - if (!iram_pool) - return NULL; - - *dma_addr = gen_pool_alloc(iram_pool, size); - pr_debug("iram alloc - %dB@0x%lX\n", size, *dma_addr); - if (!*dma_addr) - return NULL; - return iram_phys_to_virt(*dma_addr); -} -EXPORT_SYMBOL(iram_alloc); - -void iram_free(unsigned long addr, unsigned int size) -{ - if (!iram_pool) - return; - - gen_pool_free(iram_pool, addr, size); -} -EXPORT_SYMBOL(iram_free); - -int __init iram_init(unsigned long base, unsigned long size) -{ - iram_phys_base = base; - - iram_pool = gen_pool_create(PAGE_SHIFT, -1); - if (!iram_pool) - return -ENOMEM; - - gen_pool_add(iram_pool, base, size, -1); - iram_virt_base = ioremap(iram_phys_base, size); - if (!iram_virt_base) - return -EIO; - - pr_debug("i.MX IRAM pool: %ld KB@0x%p\n", size / 1024, iram_virt_base); - return 0; -} diff --git a/trunk/arch/arm/mach-omap1/dma.c b/trunk/arch/arm/mach-omap1/dma.c index 1a4e887f028d..68ab858e27b7 100644 --- a/trunk/arch/arm/mach-omap1/dma.c +++ b/trunk/arch/arm/mach-omap1/dma.c @@ -301,7 +301,7 @@ static int __init omap1_system_dma_init(void) if (ret) { dev_err(&pdev->dev, "%s: Unable to add resources for %s%d\n", __func__, pdev->name, pdev->id); - goto exit_device_put; + goto exit_iounmap; } p = kzalloc(sizeof(struct omap_system_dma_plat_info), GFP_KERNEL); @@ -309,7 +309,7 @@ static int __init omap1_system_dma_init(void) dev_err(&pdev->dev, "%s: Unable to allocate 'p' for %s\n", __func__, pdev->name); ret = -ENOMEM; - goto exit_device_del; + goto exit_iounmap; } d = kzalloc(sizeof(struct omap_dma_dev_attr), GFP_KERNEL); @@ -402,8 +402,8 @@ static int __init omap1_system_dma_init(void) kfree(d); exit_release_p: kfree(p); -exit_device_del: - platform_device_del(pdev); +exit_iounmap: + iounmap(dma_base); exit_device_put: platform_device_put(pdev); diff --git a/trunk/arch/arm/mach-omap2/Kconfig b/trunk/arch/arm/mach-omap2/Kconfig index 857b1f097fd8..f49cd51e162a 100644 --- a/trunk/arch/arm/mach-omap2/Kconfig +++ b/trunk/arch/arm/mach-omap2/Kconfig @@ -37,8 +37,6 @@ config ARCH_OMAP2PLUS_TYPICAL select NEON if ARCH_OMAP3 || ARCH_OMAP4 || SOC_OMAP5 select PM_RUNTIME select REGULATOR - select SERIAL_OMAP - select SERIAL_OMAP_CONSOLE select TWL4030_CORE if ARCH_OMAP3 || ARCH_OMAP4 select TWL4030_POWER if ARCH_OMAP3 || ARCH_OMAP4 select VFP diff --git a/trunk/arch/arm/mach-omap2/Makefile b/trunk/arch/arm/mach-omap2/Makefile index 62bb352c2d37..55a9d6777683 100644 --- a/trunk/arch/arm/mach-omap2/Makefile +++ b/trunk/arch/arm/mach-omap2/Makefile @@ -32,12 +32,12 @@ obj-$(CONFIG_SOC_HAS_OMAP2_SDRC) += sdrc.o # SMP support ONLY available for OMAP4 -obj-$(CONFIG_SMP) += omap-smp.o omap-headsmp.o -obj-$(CONFIG_HOTPLUG_CPU) += omap-hotplug.o +smp-$(CONFIG_SMP) += omap-smp.o omap-headsmp.o +smp-$(CONFIG_HOTPLUG_CPU) += omap-hotplug.o omap-4-5-common = omap4-common.o omap-wakeupgen.o \ sleep44xx.o -obj-$(CONFIG_ARCH_OMAP4) += $(omap-4-5-common) -obj-$(CONFIG_SOC_OMAP5) += $(omap-4-5-common) +obj-$(CONFIG_ARCH_OMAP4) += $(omap-4-5-common) $(smp-y) +obj-$(CONFIG_SOC_OMAP5) += $(omap-4-5-common) $(smp-y) plus_sec := $(call as-instr,.arch_extension sec,+sec) AFLAGS_omap-headsmp.o :=-Wa,-march=armv7-a$(plus_sec) diff --git a/trunk/arch/arm/mach-omap2/board-omap3beagle.c b/trunk/arch/arm/mach-omap2/board-omap3beagle.c index 6de78605c0af..04c116555412 100644 --- a/trunk/arch/arm/mach-omap2/board-omap3beagle.c +++ b/trunk/arch/arm/mach-omap2/board-omap3beagle.c @@ -112,13 +112,13 @@ static u8 omap3_beagle_version; */ static struct { int mmc1_gpio_wp; - int usb_pwr_level; + bool usb_pwr_level; /* 0 - Active Low, 1 - Active High */ int dvi_pd_gpio; int usr_button_gpio; int mmc_caps; } beagle_config = { .mmc1_gpio_wp = -EINVAL, - .usb_pwr_level = GPIOF_OUT_INIT_LOW, + .usb_pwr_level = 0, .dvi_pd_gpio = -EINVAL, .usr_button_gpio = 4, .mmc_caps = MMC_CAP_4_BIT_DATA | MMC_CAP_8_BIT_DATA, @@ -178,7 +178,7 @@ static void __init omap3_beagle_init_rev(void) case 0: printk(KERN_INFO "OMAP3 Beagle Rev: xM Ax/Bx\n"); omap3_beagle_version = OMAP3BEAGLE_BOARD_XM; - beagle_config.usb_pwr_level = GPIOF_OUT_INIT_HIGH; + beagle_config.usb_pwr_level = 1; beagle_config.mmc_caps &= ~MMC_CAP_8_BIT_DATA; break; case 2: diff --git a/trunk/arch/arm/mach-omap2/board-rx51-peripherals.c b/trunk/arch/arm/mach-omap2/board-rx51-peripherals.c index 1a884670a6c4..18ca61e300b3 100644 --- a/trunk/arch/arm/mach-omap2/board-rx51-peripherals.c +++ b/trunk/arch/arm/mach-omap2/board-rx51-peripherals.c @@ -73,11 +73,11 @@ #define LIS302_IRQ1_GPIO 181 #define LIS302_IRQ2_GPIO 180 /* Not yet in use */ -/* list all spi devices here */ +/* List all SPI devices here. Note that the list/probe order seems to matter! */ enum { RX51_SPI_WL1251, - RX51_SPI_MIPID, /* LCD panel */ RX51_SPI_TSC2005, /* Touch Controller */ + RX51_SPI_MIPID, /* LCD panel */ }; static struct wl12xx_platform_data wl1251_pdata; diff --git a/trunk/arch/arm/mach-omap2/dma.c b/trunk/arch/arm/mach-omap2/dma.c index dab9fc014b97..49fd0d501c9b 100644 --- a/trunk/arch/arm/mach-omap2/dma.c +++ b/trunk/arch/arm/mach-omap2/dma.c @@ -28,6 +28,7 @@ #include #include #include +#include #include #include "soc.h" @@ -304,6 +305,9 @@ static int __init omap2_system_dma_init(void) if (res) return res; + if (of_have_populated_dt()) + return res; + pdev = platform_device_register_full(&omap_dma_dev_info); if (IS_ERR(pdev)) return PTR_ERR(pdev); diff --git a/trunk/arch/arm/mach-omap2/gpmc.c b/trunk/arch/arm/mach-omap2/gpmc.c index ed946df5ad8a..6c4da1254f53 100644 --- a/trunk/arch/arm/mach-omap2/gpmc.c +++ b/trunk/arch/arm/mach-omap2/gpmc.c @@ -1520,36 +1520,22 @@ static int gpmc_probe_dt(struct platform_device *pdev) return ret; } - for_each_node_by_name(child, "nand") { - ret = gpmc_probe_nand_child(pdev, child); - if (ret < 0) { - of_node_put(child); - return ret; - } - } + for_each_child_of_node(pdev->dev.of_node, child) { - for_each_node_by_name(child, "onenand") { - ret = gpmc_probe_onenand_child(pdev, child); - if (ret < 0) { - of_node_put(child); - return ret; - } - } + if (!child->name) + continue; - for_each_node_by_name(child, "nor") { - ret = gpmc_probe_generic_child(pdev, child); - if (ret < 0) { - of_node_put(child); - return ret; - } - } + if (of_node_cmp(child->name, "nand") == 0) + ret = gpmc_probe_nand_child(pdev, child); + else if (of_node_cmp(child->name, "onenand") == 0) + ret = gpmc_probe_onenand_child(pdev, child); + else if (of_node_cmp(child->name, "ethernet") == 0 || + of_node_cmp(child->name, "nor") == 0) + ret = gpmc_probe_generic_child(pdev, child); - for_each_node_by_name(child, "ethernet") { - ret = gpmc_probe_generic_child(pdev, child); - if (ret < 0) { + if (WARN(ret < 0, "%s: probing gpmc child %s failed\n", + __func__, child->full_name)) of_node_put(child); - return ret; - } } return 0; diff --git a/trunk/arch/arm/mach-omap2/id.c b/trunk/arch/arm/mach-omap2/id.c index 0f4c18e6e60c..1272c41d4749 100644 --- a/trunk/arch/arm/mach-omap2/id.c +++ b/trunk/arch/arm/mach-omap2/id.c @@ -419,11 +419,15 @@ void __init omap3xxx_check_revision(void) cpu_rev = "1.0"; break; case 1: - /* FALLTHROUGH */ - default: omap_revision = AM335X_REV_ES2_0; cpu_rev = "2.0"; break; + case 2: + /* FALLTHROUGH */ + default: + omap_revision = AM335X_REV_ES2_1; + cpu_rev = "2.1"; + break; } break; case 0xb8f2: @@ -644,13 +648,12 @@ void __init omap_soc_device_init(void) soc_dev_attr->revision = soc_rev; soc_dev = soc_device_register(soc_dev_attr); - if (IS_ERR_OR_NULL(soc_dev)) { + if (IS_ERR(soc_dev)) { kfree(soc_dev_attr); return; } parent = soc_device_to_device(soc_dev); - if (!IS_ERR_OR_NULL(parent)) - device_create_file(parent, &omap_soc_attr); + device_create_file(parent, &omap_soc_attr); } #endif /* CONFIG_SOC_BUS */ diff --git a/trunk/arch/arm/mach-omap2/mux34xx.h b/trunk/arch/arm/mach-omap2/mux34xx.h index 6543ebf8ecfc..3f26d297c082 100644 --- a/trunk/arch/arm/mach-omap2/mux34xx.h +++ b/trunk/arch/arm/mach-omap2/mux34xx.h @@ -393,6 +393,10 @@ #define OMAP3_CONTROL_PADCONF_SAD2D_SWAKEUP_OFFSET 0xa1c #define OMAP3_CONTROL_PADCONF_JTAG_RTCK_OFFSET 0xa1e #define OMAP3_CONTROL_PADCONF_JTAG_TDO_OFFSET 0xa20 +#define OMAP3_CONTROL_PADCONF_GPIO_127 0xa24 +#define OMAP3_CONTROL_PADCONF_GPIO_126 0xa26 +#define OMAP3_CONTROL_PADCONF_GPIO_128 0xa28 +#define OMAP3_CONTROL_PADCONF_GPIO_129 0xa2a #define OMAP3_CONTROL_PADCONF_MUX_SIZE \ - (OMAP3_CONTROL_PADCONF_JTAG_TDO_OFFSET + 0x2) + (OMAP3_CONTROL_PADCONF_GPIO_129 + 0x2) diff --git a/trunk/arch/arm/mach-omap2/omap_device.c b/trunk/arch/arm/mach-omap2/omap_device.c index eeea4fa28fbc..e6d230700b2b 100644 --- a/trunk/arch/arm/mach-omap2/omap_device.c +++ b/trunk/arch/arm/mach-omap2/omap_device.c @@ -876,4 +876,4 @@ static int __init omap_device_late_init(void) bus_for_each_dev(&platform_bus_type, NULL, NULL, omap_device_late_idle); return 0; } -omap_late_initcall(omap_device_late_init); +omap_late_initcall_sync(omap_device_late_init); diff --git a/trunk/arch/arm/mach-omap2/soc.h b/trunk/arch/arm/mach-omap2/soc.h index 18fdeeb3a44a..197cc16870d9 100644 --- a/trunk/arch/arm/mach-omap2/soc.h +++ b/trunk/arch/arm/mach-omap2/soc.h @@ -396,6 +396,7 @@ IS_OMAP_TYPE(3430, 0x3430) #define AM335X_CLASS 0x33500033 #define AM335X_REV_ES1_0 AM335X_CLASS #define AM335X_REV_ES2_0 (AM335X_CLASS | (0x1 << 8)) +#define AM335X_REV_ES2_1 (AM335X_CLASS | (0x2 << 8)) #define OMAP443X_CLASS 0x44300044 #define OMAP4430_REV_ES1_0 (OMAP443X_CLASS | (0x10 << 8)) @@ -496,6 +497,7 @@ level(__##fn); #define omap_subsys_initcall(fn) omap_initcall(subsys_initcall, fn) #define omap_device_initcall(fn) omap_initcall(device_initcall, fn) #define omap_late_initcall(fn) omap_initcall(late_initcall, fn) +#define omap_late_initcall_sync(fn) omap_initcall(late_initcall_sync, fn) #endif /* __ASSEMBLY__ */ diff --git a/trunk/arch/arm/mach-prima2/Kconfig b/trunk/arch/arm/mach-prima2/Kconfig index 80ca974b2f82..6988b117fc17 100644 --- a/trunk/arch/arm/mach-prima2/Kconfig +++ b/trunk/arch/arm/mach-prima2/Kconfig @@ -38,7 +38,7 @@ config ARCH_MARCO select CPU_V7 select HAVE_ARM_SCU if SMP select HAVE_SMP - select SMP_ON_UP + select SMP_ON_UP if SMP help Support for CSR SiRFSoC ARM Cortex A9 Platform diff --git a/trunk/arch/arm/mach-pxa/Kconfig b/trunk/arch/arm/mach-pxa/Kconfig index 9075461999c1..96100dbf5a2e 100644 --- a/trunk/arch/arm/mach-pxa/Kconfig +++ b/trunk/arch/arm/mach-pxa/Kconfig @@ -162,7 +162,6 @@ config MACH_XCEP select MTD select MTD_CFI select MTD_CFI_INTELEXT - select MTD_CHAR select MTD_PHYSMAP select PXA25x select SMC91X diff --git a/trunk/arch/arm/mach-spear/spear13xx.c b/trunk/arch/arm/mach-spear/spear13xx.c index 3621599c38ad..7aa6e8cf830f 100644 --- a/trunk/arch/arm/mach-spear/spear13xx.c +++ b/trunk/arch/arm/mach-spear/spear13xx.c @@ -35,6 +35,8 @@ void __init spear13xx_l2x0_init(void) * write alloc and 'Full line of zero' options * */ + if (!IS_ENABLED(CONFIG_CACHE_L2X0)) + return; writel_relaxed(0x06, VA_L2CC_BASE + L2X0_PREFETCH_CTRL); diff --git a/trunk/arch/arm/mach-tegra/Kconfig b/trunk/arch/arm/mach-tegra/Kconfig index 20c3b372cdf5..84d72fc36dfe 100644 --- a/trunk/arch/arm/mach-tegra/Kconfig +++ b/trunk/arch/arm/mach-tegra/Kconfig @@ -63,6 +63,7 @@ config ARCH_TEGRA_114_SOC select ARM_ARCH_TIMER select ARM_GIC select ARM_L1_CACHE_SHIFT_6 + select CPU_FREQ_TABLE if CPU_FREQ select CPU_V7 select PINCTRL select PINCTRL_TEGRA114 diff --git a/trunk/arch/arm/mach-tegra/tegra2_emc.c b/trunk/arch/arm/mach-tegra/tegra2_emc.c index 9e8bdfa2b369..31e69a019bdd 100644 --- a/trunk/arch/arm/mach-tegra/tegra2_emc.c +++ b/trunk/arch/arm/mach-tegra/tegra2_emc.c @@ -307,11 +307,6 @@ static int tegra_emc_probe(struct platform_device *pdev) } res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - if (!res) { - dev_err(&pdev->dev, "missing register base\n"); - return -ENOMEM; - } - emc_regbase = devm_ioremap_resource(&pdev->dev, res); if (IS_ERR(emc_regbase)) return PTR_ERR(emc_regbase); diff --git a/trunk/arch/arm/mach-ux500/Kconfig b/trunk/arch/arm/mach-ux500/Kconfig index f66d7deae46d..6a4387e39df8 100644 --- a/trunk/arch/arm/mach-ux500/Kconfig +++ b/trunk/arch/arm/mach-ux500/Kconfig @@ -19,6 +19,8 @@ if ARCH_U8500 config UX500_SOC_COMMON bool default y + select ABX500_CORE + select AB8500_CORE select ARM_ERRATA_754322 select ARM_ERRATA_764369 if SMP select ARM_GIC diff --git a/trunk/arch/arm/mach-ux500/board-mop500.c b/trunk/arch/arm/mach-ux500/board-mop500.c index a15dd6b63a8f..3cd555ac6d0a 100644 --- a/trunk/arch/arm/mach-ux500/board-mop500.c +++ b/trunk/arch/arm/mach-ux500/board-mop500.c @@ -403,8 +403,8 @@ static int mop500_prox_activate(struct device *dev) "no regulator\n"); return PTR_ERR(prox_regulator); } - regulator_enable(prox_regulator); - return 0; + + return regulator_enable(prox_regulator); } static void mop500_prox_deactivate(struct device *dev) diff --git a/trunk/arch/arm/mach-ux500/cpu-db8500.c b/trunk/arch/arm/mach-ux500/cpu-db8500.c index 995928ba22fd..e90b5ab23b6d 100644 --- a/trunk/arch/arm/mach-ux500/cpu-db8500.c +++ b/trunk/arch/arm/mach-ux500/cpu-db8500.c @@ -191,7 +191,7 @@ static const char *db8500_read_soc_id(void) /* Throw these device-specific numbers into the entropy pool */ add_device_randomness(uid, 0x14); return kasprintf(GFP_KERNEL, "%08x%08x%08x%08x%08x", - readl((u32 *)uid+1), + readl((u32 *)uid+0), readl((u32 *)uid+1), readl((u32 *)uid+2), readl((u32 *)uid+3), readl((u32 *)uid+4)); } diff --git a/trunk/arch/arm/plat-orion/Makefile b/trunk/arch/arm/plat-orion/Makefile index 2eca54b65906..9433605cd290 100644 --- a/trunk/arch/arm/plat-orion/Makefile +++ b/trunk/arch/arm/plat-orion/Makefile @@ -3,6 +3,6 @@ # ccflags-$(CONFIG_ARCH_MULTIPLATFORM) := -I$(srctree)/$(src)/include -orion-gpio-$(CONFIG_GENERIC_GPIO) += gpio.o +orion-gpio-$(CONFIG_GPIOLIB) += gpio.o obj-$(CONFIG_PLAT_ORION_LEGACY) += irq.o pcie.o time.o common.o mpp.o obj-$(CONFIG_PLAT_ORION_LEGACY) += $(orion-gpio-y) diff --git a/trunk/arch/arm/plat-orion/gpio.c b/trunk/arch/arm/plat-orion/gpio.c index e39c2ba6e2fb..249fe6333e18 100644 --- a/trunk/arch/arm/plat-orion/gpio.c +++ b/trunk/arch/arm/plat-orion/gpio.c @@ -150,7 +150,7 @@ orion_gpio_is_valid(struct orion_gpio_chip *ochip, unsigned pin, int mode) } /* - * GENERIC_GPIO primitives. + * GPIO primitives. */ static int orion_gpio_request(struct gpio_chip *chip, unsigned pin) { diff --git a/trunk/arch/arm/plat-samsung/adc.c b/trunk/arch/arm/plat-samsung/adc.c index ca07cb1b155a..79690f2f6d3f 100644 --- a/trunk/arch/arm/plat-samsung/adc.c +++ b/trunk/arch/arm/plat-samsung/adc.c @@ -381,11 +381,6 @@ static int s3c_adc_probe(struct platform_device *pdev) } regs = platform_get_resource(pdev, IORESOURCE_MEM, 0); - if (!regs) { - dev_err(dev, "failed to find registers\n"); - return -ENXIO; - } - adc->regs = devm_ioremap_resource(dev, regs); if (IS_ERR(adc->regs)) return PTR_ERR(adc->regs); diff --git a/trunk/arch/arm/xen/enlighten.c b/trunk/arch/arm/xen/enlighten.c index d30042e39974..13609e01f4b7 100644 --- a/trunk/arch/arm/xen/enlighten.c +++ b/trunk/arch/arm/xen/enlighten.c @@ -152,11 +152,12 @@ int xen_unmap_domain_mfn_range(struct vm_area_struct *vma, } EXPORT_SYMBOL_GPL(xen_unmap_domain_mfn_range); -static int __init xen_secondary_init(unsigned int cpu) +static void __init xen_percpu_init(void *unused) { struct vcpu_register_vcpu_info info; struct vcpu_info *vcpup; int err; + int cpu = get_cpu(); pr_info("Xen: initializing cpu%d\n", cpu); vcpup = per_cpu_ptr(xen_vcpu_info, cpu); @@ -165,14 +166,10 @@ static int __init xen_secondary_init(unsigned int cpu) info.offset = offset_in_page(vcpup); err = HYPERVISOR_vcpu_op(VCPUOP_register_vcpu_info, cpu, &info); - if (err) { - pr_debug("register_vcpu_info failed: err=%d\n", err); - } else { - /* This cpu is using the registered vcpu info, even if - later ones fail to. */ - per_cpu(xen_vcpu, cpu) = vcpup; - } - return 0; + BUG_ON(err); + per_cpu(xen_vcpu, cpu) = vcpup; + + enable_percpu_irq(xen_events_irq, 0); } static void xen_restart(char str, const char *cmd) @@ -208,7 +205,6 @@ static int __init xen_guest_init(void) const char *version = NULL; const char *xen_prefix = "xen,xen-"; struct resource res; - int i; node = of_find_compatible_node(NULL, NULL, "xen,xen"); if (!node) { @@ -265,19 +261,23 @@ static int __init xen_guest_init(void) sizeof(struct vcpu_info)); if (xen_vcpu_info == NULL) return -ENOMEM; - for_each_online_cpu(i) - xen_secondary_init(i); gnttab_init(); if (!xen_initial_domain()) xenbus_probe(NULL); + return 0; +} +core_initcall(xen_guest_init); + +static int __init xen_pm_init(void) +{ pm_power_off = xen_power_off; arm_pm_restart = xen_restart; return 0; } -core_initcall(xen_guest_init); +subsys_initcall(xen_pm_init); static irqreturn_t xen_arm_callback(int irq, void *arg) { @@ -285,11 +285,6 @@ static irqreturn_t xen_arm_callback(int irq, void *arg) return IRQ_HANDLED; } -static __init void xen_percpu_enable_events(void *unused) -{ - enable_percpu_irq(xen_events_irq, 0); -} - static int __init xen_init_events(void) { if (!xen_domain() || xen_events_irq < 0) @@ -303,7 +298,7 @@ static int __init xen_init_events(void) return -EINVAL; } - on_each_cpu(xen_percpu_enable_events, NULL, 0); + on_each_cpu(xen_percpu_init, NULL, 0); return 0; } diff --git a/trunk/arch/arm64/Kconfig b/trunk/arch/arm64/Kconfig index 73b6e764034c..56b3f6d447ae 100644 --- a/trunk/arch/arm64/Kconfig +++ b/trunk/arch/arm64/Kconfig @@ -6,6 +6,7 @@ config ARM64 select ARCH_WANT_FRAME_POINTERS select ARM_AMBA select ARM_ARCH_TIMER + select ARM_GIC select CLONE_BACKWARDS select COMMON_CLK select GENERIC_CLOCKEVENTS @@ -31,6 +32,8 @@ config ARM64 select OF select OF_EARLY_FLATTREE select PERF_USE_VMALLOC + select POWER_RESET + select POWER_SUPPLY select RTC_LIB select SPARSE_IRQ select SYSCTL_EXCEPTION_TRACE @@ -92,9 +95,6 @@ config SWIOTLB config IOMMU_HELPER def_bool SWIOTLB -config GENERIC_GPIO - bool - source "init/Kconfig" source "kernel/Kconfig.freezer" @@ -105,6 +105,7 @@ config ARCH_VEXPRESS bool "ARMv8 software model (Versatile Express)" select ARCH_REQUIRE_GPIOLIB select COMMON_CLK_VERSATILE + select POWER_RESET_VEXPRESS select VEXPRESS_CONFIG help This enables support for the ARMv8 software model (Versatile @@ -121,8 +122,6 @@ endmenu menu "Kernel Features" -source "kernel/time/Kconfig" - config ARM64_64K_PAGES bool "Enable 64KB pages support" help diff --git a/trunk/arch/arm64/boot/dts/foundation-v8.dts b/trunk/arch/arm64/boot/dts/foundation-v8.dts index 198682b6de31..84fcc5018284 100644 --- a/trunk/arch/arm64/boot/dts/foundation-v8.dts +++ b/trunk/arch/arm64/boot/dts/foundation-v8.dts @@ -23,7 +23,7 @@ }; cpus { - #address-cells = <1>; + #address-cells = <2>; #size-cells = <0>; cpu@0 { diff --git a/trunk/arch/arm64/include/asm/assembler.h b/trunk/arch/arm64/include/asm/assembler.h index c8eedc604984..5aceb83b3f5c 100644 --- a/trunk/arch/arm64/include/asm/assembler.h +++ b/trunk/arch/arm64/include/asm/assembler.h @@ -82,7 +82,7 @@ .macro enable_dbg_if_not_stepping, tmp mrs \tmp, mdscr_el1 - tbnz \tmp, #1, 9990f + tbnz \tmp, #0, 9990f enable_dbg 9990: .endm diff --git a/trunk/arch/arm64/include/asm/system_misc.h b/trunk/arch/arm64/include/asm/system_misc.h index 95e407255347..a6e1750369ef 100644 --- a/trunk/arch/arm64/include/asm/system_misc.h +++ b/trunk/arch/arm64/include/asm/system_misc.h @@ -41,7 +41,7 @@ extern void show_pte(struct mm_struct *mm, unsigned long addr); extern void __show_regs(struct pt_regs *); void soft_restart(unsigned long); -extern void (*pm_restart)(const char *cmd); +extern void (*arm_pm_restart)(char str, const char *cmd); #define UDBG_UNDEFINED (1 << 0) #define UDBG_SYSCALL (1 << 1) diff --git a/trunk/arch/arm64/include/asm/unistd32.h b/trunk/arch/arm64/include/asm/unistd32.h index 12f22492df4c..58125bf008d3 100644 --- a/trunk/arch/arm64/include/asm/unistd32.h +++ b/trunk/arch/arm64/include/asm/unistd32.h @@ -389,7 +389,7 @@ __SYSCALL(364, sys_perf_event_open) __SYSCALL(365, compat_sys_recvmmsg) __SYSCALL(366, sys_accept4) __SYSCALL(367, sys_fanotify_init) -__SYSCALL(368, compat_sys_fanotify_mark_wrapper) +__SYSCALL(368, compat_sys_fanotify_mark) __SYSCALL(369, sys_prlimit64) __SYSCALL(370, sys_name_to_handle_at) __SYSCALL(371, compat_sys_open_by_handle_at) diff --git a/trunk/arch/arm64/kernel/debug-monitors.c b/trunk/arch/arm64/kernel/debug-monitors.c index 0c3ba9f51376..f4726dc054b3 100644 --- a/trunk/arch/arm64/kernel/debug-monitors.c +++ b/trunk/arch/arm64/kernel/debug-monitors.c @@ -136,8 +136,6 @@ void disable_debug_monitors(enum debug_el el) */ static void clear_os_lock(void *unused) { - asm volatile("msr mdscr_el1, %0" : : "r" (0)); - isb(); asm volatile("msr oslar_el1, %0" : : "r" (0)); isb(); } diff --git a/trunk/arch/arm64/kernel/early_printk.c b/trunk/arch/arm64/kernel/early_printk.c index ac974f48a7a2..fbb6e1843659 100644 --- a/trunk/arch/arm64/kernel/early_printk.c +++ b/trunk/arch/arm64/kernel/early_printk.c @@ -95,7 +95,7 @@ static void early_write(struct console *con, const char *s, unsigned n) } } -static struct console early_console = { +static struct console early_console_dev = { .name = "earlycon", .write = early_write, .flags = CON_PRINTBUFFER | CON_BOOT, @@ -145,7 +145,8 @@ static int __init setup_early_printk(char *buf) early_base = early_io_map(paddr, EARLYCON_IOBASE); printch = match->printch; - register_console(&early_console); + early_console = &early_console_dev; + register_console(&early_console_dev); return 0; } diff --git a/trunk/arch/arm64/kernel/process.c b/trunk/arch/arm64/kernel/process.c index f4919721f7dd..46f02c3b5015 100644 --- a/trunk/arch/arm64/kernel/process.c +++ b/trunk/arch/arm64/kernel/process.c @@ -81,8 +81,8 @@ void soft_restart(unsigned long addr) void (*pm_power_off)(void); EXPORT_SYMBOL_GPL(pm_power_off); -void (*pm_restart)(const char *cmd); -EXPORT_SYMBOL_GPL(pm_restart); +void (*arm_pm_restart)(char str, const char *cmd); +EXPORT_SYMBOL_GPL(arm_pm_restart); void arch_cpu_idle_prepare(void) { @@ -131,8 +131,8 @@ void machine_restart(char *cmd) local_fiq_disable(); /* Now call the architecture specific reboot code. */ - if (pm_restart) - pm_restart(cmd); + if (arm_pm_restart) + arm_pm_restart('h', cmd); /* * Whoops - the architecture was unable to reboot. diff --git a/trunk/arch/arm64/kernel/setup.c b/trunk/arch/arm64/kernel/setup.c index 6a9a53292590..add6ea616843 100644 --- a/trunk/arch/arm64/kernel/setup.c +++ b/trunk/arch/arm64/kernel/setup.c @@ -282,12 +282,13 @@ void __init setup_arch(char **cmdline_p) #endif } -static int __init arm64_of_clk_init(void) +static int __init arm64_device_init(void) { of_clk_init(NULL); + of_platform_populate(NULL, of_default_bus_match_table, NULL, NULL); return 0; } -arch_initcall(arm64_of_clk_init); +arch_initcall(arm64_device_init); static DEFINE_PER_CPU(struct cpu, cpu_data); @@ -305,13 +306,6 @@ static int __init topology_init(void) } subsys_initcall(topology_init); -static int __init arm64_device_probe(void) -{ - of_platform_populate(NULL, of_default_bus_match_table, NULL, NULL); - return 0; -} -device_initcall(arm64_device_probe); - static const char *hwcap_str[] = { "fp", "asimd", diff --git a/trunk/arch/arm64/kernel/sys32.S b/trunk/arch/arm64/kernel/sys32.S index db01aa978c41..a1b19ed7467c 100644 --- a/trunk/arch/arm64/kernel/sys32.S +++ b/trunk/arch/arm64/kernel/sys32.S @@ -104,13 +104,6 @@ compat_sys_fallocate_wrapper: b sys_fallocate ENDPROC(compat_sys_fallocate_wrapper) -compat_sys_fanotify_mark_wrapper: - orr x2, x2, x3, lsl #32 - mov w3, w4 - mov w4, w5 - b sys_fanotify_mark -ENDPROC(compat_sys_fanotify_mark_wrapper) - #undef __SYSCALL #define __SYSCALL(x, y) .quad y // x diff --git a/trunk/arch/arm64/lib/bitops.S b/trunk/arch/arm64/lib/bitops.S index 36216d30cb9a..e5db797790d3 100644 --- a/trunk/arch/arm64/lib/bitops.S +++ b/trunk/arch/arm64/lib/bitops.S @@ -21,13 +21,13 @@ /* * x0: bits 5:0 bit offset - * bits 63:6 word offset + * bits 31:6 word offset * x1: address */ .macro bitop, name, instr ENTRY( \name ) - and x3, x0, #63 // Get bit offset - eor x0, x0, x3 // Clear low bits + and w3, w0, #63 // Get bit offset + eor w0, w0, w3 // Clear low bits mov x2, #1 add x1, x1, x0, lsr #3 // Get word offset lsl x3, x2, x3 // Create mask @@ -41,8 +41,8 @@ ENDPROC(\name ) .macro testop, name, instr ENTRY( \name ) - and x3, x0, #63 // Get bit offset - eor x0, x0, x3 // Clear low bits + and w3, w0, #63 // Get bit offset + eor w0, w0, w3 // Clear low bits mov x2, #1 add x1, x1, x0, lsr #3 // Get word offset lsl x4, x2, x3 // Create mask diff --git a/trunk/arch/arm64/mm/cache.S b/trunk/arch/arm64/mm/cache.S index abe69b80cf7f..48a386094fa3 100644 --- a/trunk/arch/arm64/mm/cache.S +++ b/trunk/arch/arm64/mm/cache.S @@ -52,7 +52,7 @@ loop1: add x2, x2, #4 // add 4 (line length offset) mov x4, #0x3ff and x4, x4, x1, lsr #3 // find maximum number on the way size - clz x5, x4 // find bit position of way size increment + clz w5, w4 // find bit position of way size increment mov x7, #0x7fff and x7, x7, x1, lsr #13 // extract max number of the index size loop2: diff --git a/trunk/arch/arm64/mm/fault.c b/trunk/arch/arm64/mm/fault.c index 52638171d6fd..98af6e760cce 100644 --- a/trunk/arch/arm64/mm/fault.c +++ b/trunk/arch/arm64/mm/fault.c @@ -148,6 +148,7 @@ void do_bad_area(unsigned long addr, unsigned int esr, struct pt_regs *regs) #define VM_FAULT_BADACCESS 0x020000 #define ESR_WRITE (1 << 6) +#define ESR_CM (1 << 8) #define ESR_LNX_EXEC (1 << 24) /* @@ -206,7 +207,7 @@ static int __kprobes do_page_fault(unsigned long addr, unsigned int esr, struct task_struct *tsk; struct mm_struct *mm; int fault, sig, code; - int write = esr & ESR_WRITE; + bool write = (esr & ESR_WRITE) && !(esr & ESR_CM); unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE | (write ? FAULT_FLAG_WRITE : 0); diff --git a/trunk/arch/arm64/mm/proc.S b/trunk/arch/arm64/mm/proc.S index f1d8b9bbfdad..a82ae8868077 100644 --- a/trunk/arch/arm64/mm/proc.S +++ b/trunk/arch/arm64/mm/proc.S @@ -119,8 +119,7 @@ ENTRY(__cpu_setup) mov x0, #3 << 20 msr cpacr_el1, x0 // Enable FP/ASIMD - mov x0, #1 - msr oslar_el1, x0 // Set the debug OS lock + msr mdscr_el1, xzr // Reset mdscr_el1 tlbi vmalle1is // invalidate I + D TLBs /* * Memory region attributes for LPAE: diff --git a/trunk/arch/avr32/Kconfig b/trunk/arch/avr32/Kconfig index 22c40308360b..549903cfc2cb 100644 --- a/trunk/arch/avr32/Kconfig +++ b/trunk/arch/avr32/Kconfig @@ -26,9 +26,6 @@ config AVR32 There is an AVR32 Linux project with a web page at http://avr32linux.org/. -config GENERIC_GPIO - def_bool y - config STACKTRACE_SUPPORT def_bool y @@ -208,6 +205,11 @@ config ARCH_DISCONTIGMEM_ENABLE config ARCH_SPARSEMEM_ENABLE def_bool n +config NODES_SHIFT + int + default "2" + depends on NEED_MULTIPLE_NODES + source "mm/Kconfig" config OWNERSHIP_TRACE diff --git a/trunk/arch/avr32/include/asm/Kbuild b/trunk/arch/avr32/include/asm/Kbuild index 4dd4f78d3dcc..d22af851f3f6 100644 --- a/trunk/arch/avr32/include/asm/Kbuild +++ b/trunk/arch/avr32/include/asm/Kbuild @@ -2,3 +2,4 @@ generic-y += clkdev.h generic-y += exec.h generic-y += trace_clock.h +generic-y += param.h diff --git a/trunk/arch/avr32/include/asm/numnodes.h b/trunk/arch/avr32/include/asm/numnodes.h deleted file mode 100644 index 0b864d7ce330..000000000000 --- a/trunk/arch/avr32/include/asm/numnodes.h +++ /dev/null @@ -1,7 +0,0 @@ -#ifndef __ASM_AVR32_NUMNODES_H -#define __ASM_AVR32_NUMNODES_H - -/* Max 4 nodes */ -#define NODES_SHIFT 2 - -#endif /* __ASM_AVR32_NUMNODES_H */ diff --git a/trunk/arch/avr32/include/asm/param.h b/trunk/arch/avr32/include/asm/param.h deleted file mode 100644 index 009a167aea1f..000000000000 --- a/trunk/arch/avr32/include/asm/param.h +++ /dev/null @@ -1,9 +0,0 @@ -#ifndef __ASM_AVR32_PARAM_H -#define __ASM_AVR32_PARAM_H - -#include - -# define HZ CONFIG_HZ -# define USER_HZ 100 /* User interfaces are in "ticks" */ -# define CLOCKS_PER_SEC (USER_HZ) /* frequency at which times() counts */ -#endif /* __ASM_AVR32_PARAM_H */ diff --git a/trunk/arch/avr32/include/uapi/asm/Kbuild b/trunk/arch/avr32/include/uapi/asm/Kbuild index df53e7a46774..3b85eaddf525 100644 --- a/trunk/arch/avr32/include/uapi/asm/Kbuild +++ b/trunk/arch/avr32/include/uapi/asm/Kbuild @@ -33,3 +33,4 @@ header-y += termbits.h header-y += termios.h header-y += types.h header-y += unistd.h +generic-y += param.h diff --git a/trunk/arch/avr32/include/uapi/asm/param.h b/trunk/arch/avr32/include/uapi/asm/param.h deleted file mode 100644 index d28aa5ee6d37..000000000000 --- a/trunk/arch/avr32/include/uapi/asm/param.h +++ /dev/null @@ -1,18 +0,0 @@ -#ifndef _UAPI__ASM_AVR32_PARAM_H -#define _UAPI__ASM_AVR32_PARAM_H - - -#ifndef HZ -# define HZ 100 -#endif - -/* TODO: Should be configurable */ -#define EXEC_PAGESIZE 4096 - -#ifndef NOGROUP -# define NOGROUP (-1) -#endif - -#define MAXHOSTNAMELEN 64 - -#endif /* _UAPI__ASM_AVR32_PARAM_H */ diff --git a/trunk/arch/avr32/kernel/module.c b/trunk/arch/avr32/kernel/module.c index 596f7305d93f..2c9412908024 100644 --- a/trunk/arch/avr32/kernel/module.c +++ b/trunk/arch/avr32/kernel/module.c @@ -264,7 +264,7 @@ int apply_relocate_add(Elf32_Shdr *sechdrs, const char *strtab, break; case R_AVR32_GOT18SW: if ((relocation & 0xfffe0003) != 0 - && (relocation & 0xfffc0003) != 0xffff0000) + && (relocation & 0xfffc0000) != 0xfffc0000) return reloc_overflow(module, "R_AVR32_GOT18SW", relocation); relocation >>= 2; diff --git a/trunk/arch/blackfin/Kconfig b/trunk/arch/blackfin/Kconfig index 453ebe46b065..a117652b5fea 100644 --- a/trunk/arch/blackfin/Kconfig +++ b/trunk/arch/blackfin/Kconfig @@ -27,7 +27,7 @@ config BLACKFIN select HAVE_OPROFILE select HAVE_PERF_EVENTS select ARCH_HAVE_CUSTOM_GPIO_H - select ARCH_WANT_OPTIONAL_GPIOLIB + select ARCH_REQUIRE_GPIOLIB select HAVE_UID16 select HAVE_UNDERSCORE_SYMBOL_PREFIX select VIRT_TO_BUS @@ -52,9 +52,6 @@ config GENERIC_BUG config ZONE_DMA def_bool y -config GENERIC_GPIO - def_bool y - config FORCE_MAX_ZONEORDER int default "14" diff --git a/trunk/arch/blackfin/Makefile b/trunk/arch/blackfin/Makefile index 66cf00095b84..1fce08632ad7 100644 --- a/trunk/arch/blackfin/Makefile +++ b/trunk/arch/blackfin/Makefile @@ -141,11 +141,11 @@ archclean: INSTALL_PATH ?= /tftpboot boot := arch/$(ARCH)/boot -BOOT_TARGETS = vmImage vmImage.bin vmImage.bz2 vmImage.gz vmImage.lzma vmImage.lzo vmImage.xip +BOOT_TARGETS = uImage uImage.bin uImage.bz2 uImage.gz uImage.lzma uImage.lzo uImage.xip PHONY += $(BOOT_TARGETS) install -KBUILD_IMAGE := $(boot)/vmImage +KBUILD_IMAGE := $(boot)/uImage -all: vmImage +all: uImage $(BOOT_TARGETS): vmlinux $(Q)$(MAKE) $(build)=$(boot) $(boot)/$@ diff --git a/trunk/arch/blackfin/boot/Makefile b/trunk/arch/blackfin/boot/Makefile index f7d27d50d02c..3efaa094fb90 100644 --- a/trunk/arch/blackfin/boot/Makefile +++ b/trunk/arch/blackfin/boot/Makefile @@ -6,7 +6,7 @@ # for more details. # -targets := vmImage vmImage.bin vmImage.bz2 vmImage.gz vmImage.lzma vmImage.lzo vmImage.xip +targets := uImage uImage.bin uImage.bz2 uImage.gz uImage.lzma uImage.lzo uImage.xip extra-y += vmlinux.bin vmlinux.bin.gz vmlinux.bin.bz2 vmlinux.bin.lzma vmlinux.bin.lzo vmlinux.bin.xip ifeq ($(CONFIG_RAMKERNEL),y) @@ -39,22 +39,22 @@ quiet_cmd_mk_bin_xip = BIN $@ $(obj)/vmlinux.bin.xip: $(obj)/vmlinux.bin FORCE $(call if_changed,mk_bin_xip) -$(obj)/vmImage.bin: $(obj)/vmlinux.bin +$(obj)/uImage.bin: $(obj)/vmlinux.bin $(call if_changed,uimage,none) -$(obj)/vmImage.bz2: $(obj)/vmlinux.bin.bz2 +$(obj)/uImage.bz2: $(obj)/vmlinux.bin.bz2 $(call if_changed,uimage,bzip2) -$(obj)/vmImage.gz: $(obj)/vmlinux.bin.gz +$(obj)/uImage.gz: $(obj)/vmlinux.bin.gz $(call if_changed,uimage,gzip) -$(obj)/vmImage.lzma: $(obj)/vmlinux.bin.lzma +$(obj)/uImage.lzma: $(obj)/vmlinux.bin.lzma $(call if_changed,uimage,lzma) -$(obj)/vmImage.lzo: $(obj)/vmlinux.bin.lzo +$(obj)/uImage.lzo: $(obj)/vmlinux.bin.lzo $(call if_changed,uimage,lzo) -$(obj)/vmImage.xip: $(obj)/vmlinux.bin.xip +$(obj)/uImage.xip: $(obj)/vmlinux.bin.xip $(call if_changed,uimage,none) suffix-y := bin @@ -64,7 +64,7 @@ suffix-$(CONFIG_KERNEL_LZMA) := lzma suffix-$(CONFIG_KERNEL_LZO) := lzo suffix-$(CONFIG_ROMKERNEL) := xip -$(obj)/vmImage: $(obj)/vmImage.$(suffix-y) +$(obj)/uImage: $(obj)/uImage.$(suffix-y) @ln -sf $(notdir $<) $@ install: diff --git a/trunk/arch/blackfin/include/asm/atomic.h b/trunk/arch/blackfin/include/asm/atomic.h index c8db653c72d2..a107a98e9978 100644 --- a/trunk/arch/blackfin/include/asm/atomic.h +++ b/trunk/arch/blackfin/include/asm/atomic.h @@ -11,7 +11,9 @@ #ifdef CONFIG_SMP +#include #include +#include asmlinkage int __raw_uncached_fetch_asm(const volatile int *ptr); asmlinkage int __raw_atomic_update_asm(volatile int *ptr, int value); diff --git a/trunk/arch/blackfin/include/asm/bfin_sdh.h b/trunk/arch/blackfin/include/asm/bfin_sdh.h index 6a4cfe2d3367..a99957ea9e9b 100644 --- a/trunk/arch/blackfin/include/asm/bfin_sdh.h +++ b/trunk/arch/blackfin/include/asm/bfin_sdh.h @@ -24,18 +24,27 @@ struct bfin_sd_host { #define CMD_INT_E (1 << 8) /* Command Interrupt */ #define CMD_PEND_E (1 << 9) /* Command Pending */ #define CMD_E (1 << 10) /* Command Enable */ +#ifdef RSI_BLKSZ +#define CMD_CRC_CHECK_D (1 << 11) /* CRC Check is disabled */ +#define CMD_DATA0_BUSY (1 << 12) /* Check for Busy State on the DATA0 pin */ +#endif /* SDH_PWR_CTL bitmasks */ +#ifndef RSI_BLKSZ #define PWR_ON 0x3 /* Power On */ #define SD_CMD_OD (1 << 6) /* Open Drain Output */ #define ROD_CTL (1 << 7) /* Rod Control */ +#endif /* SDH_CLK_CTL bitmasks */ #define CLKDIV 0xff /* MC_CLK Divisor */ #define CLK_E (1 << 8) /* MC_CLK Bus Clock Enable */ #define PWR_SV_E (1 << 9) /* Power Save Enable */ #define CLKDIV_BYPASS (1 << 10) /* Bypass Divisor */ -#define WIDE_BUS (1 << 11) /* Wide Bus Mode Enable */ +#define BUS_MODE_MASK 0x1800 /* Bus Mode Mask */ +#define STD_BUS_1 0x000 /* Standard Bus 1 bit mode */ +#define WIDE_BUS_4 0x800 /* Wide Bus 4 bit mode */ +#define BYTE_BUS_8 0x1000 /* Byte Bus 8 bit mode */ /* SDH_RESP_CMD bitmasks */ #define RESP_CMD 0x3f /* Response Command */ @@ -45,7 +54,13 @@ struct bfin_sd_host { #define DTX_DIR (1 << 1) /* Data Transfer Direction */ #define DTX_MODE (1 << 2) /* Data Transfer Mode */ #define DTX_DMA_E (1 << 3) /* Data Transfer DMA Enable */ +#ifndef RSI_BLKSZ #define DTX_BLK_LGTH (0xf << 4) /* Data Transfer Block Length */ +#else + +/* Bit masks for SDH_BLK_SIZE */ +#define DTX_BLK_LGTH 0x1fff /* Data Transfer Block Length */ +#endif /* SDH_STATUS bitmasks */ #define CMD_CRC_FAIL (1 << 0) /* CMD CRC Fail */ @@ -114,10 +129,14 @@ struct bfin_sd_host { /* SDH_E_STATUS bitmasks */ #define SDIO_INT_DET (1 << 1) /* SDIO Int Detected */ #define SD_CARD_DET (1 << 4) /* SD Card Detect */ +#define SD_CARD_BUSYMODE (1 << 31) /* Card is in Busy mode */ +#define SD_CARD_SLPMODE (1 << 30) /* Card in Sleep Mode */ +#define SD_CARD_READY (1 << 17) /* Card Ready */ /* SDH_E_MASK bitmasks */ #define SDIO_MSK (1 << 1) /* Mask SDIO Int Detected */ -#define SCD_MSK (1 << 6) /* Mask Card Detect */ +#define SCD_MSK (1 << 4) /* Mask Card Detect */ +#define CARD_READY_MSK (1 << 16) /* Mask Card Ready */ /* SDH_CFG bitmasks */ #define CLKS_EN (1 << 0) /* Clocks Enable */ @@ -126,7 +145,15 @@ struct bfin_sd_host { #define SD_RST (1 << 4) /* SDMMC Reset */ #define PUP_SDDAT (1 << 5) /* Pull-up SD_DAT */ #define PUP_SDDAT3 (1 << 6) /* Pull-up SD_DAT3 */ +#ifndef RSI_BLKSZ #define PD_SDDAT3 (1 << 7) /* Pull-down SD_DAT3 */ +#else +#define PWR_ON 0x600 /* Power On */ +#define SD_CMD_OD (1 << 11) /* Open Drain Output */ +#define BOOT_EN (1 << 12) /* Boot Enable */ +#define BOOT_MODE (1 << 13) /* Alternate Boot Mode */ +#define BOOT_ACK_EN (1 << 14) /* Boot ACK is expected */ +#endif /* SDH_RD_WAIT_EN bitmasks */ #define RWR (1 << 0) /* Read Wait Request */ diff --git a/trunk/arch/blackfin/include/asm/bitops.h b/trunk/arch/blackfin/include/asm/bitops.h index 8a0fed16058f..0ca40dd44724 100644 --- a/trunk/arch/blackfin/include/asm/bitops.h +++ b/trunk/arch/blackfin/include/asm/bitops.h @@ -41,6 +41,7 @@ #include #else +#include #include /* swab32 */ #include diff --git a/trunk/arch/blackfin/include/asm/def_LPBlackfin.h b/trunk/arch/blackfin/include/asm/def_LPBlackfin.h index fe0ca03a1cb2..ca67145c6a45 100644 --- a/trunk/arch/blackfin/include/asm/def_LPBlackfin.h +++ b/trunk/arch/blackfin/include/asm/def_LPBlackfin.h @@ -622,10 +622,12 @@ do { \ #define PAGE_SIZE_4KB 0x00010000 /* 4 KB page size */ #define PAGE_SIZE_1MB 0x00020000 /* 1 MB page size */ #define PAGE_SIZE_4MB 0x00030000 /* 4 MB page size */ +#ifdef CONFIG_BF60x #define PAGE_SIZE_16KB 0x00040000 /* 16 KB page size */ #define PAGE_SIZE_64KB 0x00050000 /* 64 KB page size */ #define PAGE_SIZE_16MB 0x00060000 /* 16 MB page size */ #define PAGE_SIZE_64MB 0x00070000 /* 64 MB page size */ +#endif #define CPLB_L1SRAM 0x00000020 /* 0=SRAM mapped in L1, 0=SRAM not * mapped to L1 */ diff --git a/trunk/arch/blackfin/include/asm/mem_init.h b/trunk/arch/blackfin/include/asm/mem_init.h index 9b33e7247864..c865b33eeb68 100644 --- a/trunk/arch/blackfin/include/asm/mem_init.h +++ b/trunk/arch/blackfin/include/asm/mem_init.h @@ -335,6 +335,7 @@ struct ddr_config { u32 ddr_clk; u32 dmc_ddrctl; + u32 dmc_effctl; u32 dmc_ddrcfg; u32 dmc_ddrtr0; u32 dmc_ddrtr1; @@ -348,6 +349,7 @@ static struct ddr_config ddr_config_table[] __attribute__((section(".data_l1"))) [0] = { .ddr_clk = 125, .dmc_ddrctl = 0x00000904, + .dmc_effctl = 0x004400C0, .dmc_ddrcfg = 0x00000422, .dmc_ddrtr0 = 0x20705212, .dmc_ddrtr1 = 0x201003CF, @@ -358,6 +360,7 @@ static struct ddr_config ddr_config_table[] __attribute__((section(".data_l1"))) [1] = { .ddr_clk = 133, .dmc_ddrctl = 0x00000904, + .dmc_effctl = 0x004400C0, .dmc_ddrcfg = 0x00000422, .dmc_ddrtr0 = 0x20806313, .dmc_ddrtr1 = 0x2013040D, @@ -368,6 +371,7 @@ static struct ddr_config ddr_config_table[] __attribute__((section(".data_l1"))) [2] = { .ddr_clk = 150, .dmc_ddrctl = 0x00000904, + .dmc_effctl = 0x004400C0, .dmc_ddrcfg = 0x00000422, .dmc_ddrtr0 = 0x20A07323, .dmc_ddrtr1 = 0x20160492, @@ -378,6 +382,7 @@ static struct ddr_config ddr_config_table[] __attribute__((section(".data_l1"))) [3] = { .ddr_clk = 166, .dmc_ddrctl = 0x00000904, + .dmc_effctl = 0x004400C0, .dmc_ddrcfg = 0x00000422, .dmc_ddrtr0 = 0x20A07323, .dmc_ddrtr1 = 0x2016050E, @@ -388,6 +393,7 @@ static struct ddr_config ddr_config_table[] __attribute__((section(".data_l1"))) [4] = { .ddr_clk = 200, .dmc_ddrctl = 0x00000904, + .dmc_effctl = 0x004400C0, .dmc_ddrcfg = 0x00000422, .dmc_ddrtr0 = 0x20a07323, .dmc_ddrtr1 = 0x2016050f, @@ -398,6 +404,7 @@ static struct ddr_config ddr_config_table[] __attribute__((section(".data_l1"))) [5] = { .ddr_clk = 225, .dmc_ddrctl = 0x00000904, + .dmc_effctl = 0x004400C0, .dmc_ddrcfg = 0x00000422, .dmc_ddrtr0 = 0x20E0A424, .dmc_ddrtr1 = 0x302006DB, @@ -408,6 +415,7 @@ static struct ddr_config ddr_config_table[] __attribute__((section(".data_l1"))) [6] = { .ddr_clk = 250, .dmc_ddrctl = 0x00000904, + .dmc_effctl = 0x004400C0, .dmc_ddrcfg = 0x00000422, .dmc_ddrtr0 = 0x20E0A424, .dmc_ddrtr1 = 0x3020079E, @@ -469,6 +477,7 @@ static inline void init_dmc(u32 dmc_clk) bfin_write_DMC0_TR2(ddr_config_table[i].dmc_ddrtr2); bfin_write_DMC0_MR(ddr_config_table[i].dmc_ddrmr); bfin_write_DMC0_EMR1(ddr_config_table[i].dmc_ddrmr1); + bfin_write_DMC0_EFFCTL(ddr_config_table[i].dmc_effctl); bfin_write_DMC0_CTL(ddr_config_table[i].dmc_ddrctl); break; } diff --git a/trunk/arch/blackfin/kernel/cplb-nompu/cplbinit.c b/trunk/arch/blackfin/kernel/cplb-nompu/cplbinit.c index 34e96ce02aa9..b49a53b583d5 100644 --- a/trunk/arch/blackfin/kernel/cplb-nompu/cplbinit.c +++ b/trunk/arch/blackfin/kernel/cplb-nompu/cplbinit.c @@ -30,6 +30,7 @@ void __init generate_cplb_tables_cpu(unsigned int cpu) { int i_d, i_i; unsigned long addr; + unsigned long cplb_pageflags, cplb_pagesize; struct cplb_entry *d_tbl = dcplb_tbl[cpu]; struct cplb_entry *i_tbl = icplb_tbl[cpu]; @@ -49,11 +50,20 @@ void __init generate_cplb_tables_cpu(unsigned int cpu) /* Cover kernel memory with 4M pages. */ addr = 0; - for (; addr < memory_start; addr += 4 * 1024 * 1024) { +#ifdef PAGE_SIZE_16MB + cplb_pageflags = PAGE_SIZE_16MB; + cplb_pagesize = SIZE_16M; +#else + cplb_pageflags = PAGE_SIZE_4MB; + cplb_pagesize = SIZE_4M; +#endif + + + for (; addr < memory_start; addr += cplb_pagesize) { d_tbl[i_d].addr = addr; - d_tbl[i_d++].data = SDRAM_DGENERIC | PAGE_SIZE_4MB; + d_tbl[i_d++].data = SDRAM_DGENERIC | cplb_pageflags; i_tbl[i_i].addr = addr; - i_tbl[i_i++].data = SDRAM_IGENERIC | PAGE_SIZE_4MB; + i_tbl[i_i++].data = SDRAM_IGENERIC | cplb_pageflags; } #ifdef CONFIG_ROMKERNEL diff --git a/trunk/arch/blackfin/kernel/cplb-nompu/cplbmgr.c b/trunk/arch/blackfin/kernel/cplb-nompu/cplbmgr.c index e854f9066cbd..79cc0f6dcdd5 100644 --- a/trunk/arch/blackfin/kernel/cplb-nompu/cplbmgr.c +++ b/trunk/arch/blackfin/kernel/cplb-nompu/cplbmgr.c @@ -145,7 +145,7 @@ MGR_ATTR static int dcplb_miss(int cpu) unsigned long addr = bfin_read_DCPLB_FAULT_ADDR(); int status = bfin_read_DCPLB_STATUS(); int idx; - unsigned long d_data, base, addr1, eaddr; + unsigned long d_data, base, addr1, eaddr, cplb_pagesize, cplb_pageflags; nr_dcplb_miss[cpu]++; if (unlikely(status & FAULT_USERSUPV)) @@ -167,18 +167,37 @@ MGR_ATTR static int dcplb_miss(int cpu) if (unlikely(d_data == 0)) return CPLB_NO_ADDR_MATCH; - addr1 = addr & ~(SIZE_4M - 1); addr &= ~(SIZE_1M - 1); d_data |= PAGE_SIZE_1MB; - if (addr1 >= base && (addr1 + SIZE_4M) <= eaddr) { + + /* BF60x support large than 4M CPLB page size */ +#ifdef PAGE_SIZE_16MB + cplb_pageflags = PAGE_SIZE_16MB; + cplb_pagesize = SIZE_16M; +#else + cplb_pageflags = PAGE_SIZE_4MB; + cplb_pagesize = SIZE_4M; +#endif + +find_pagesize: + addr1 = addr & ~(cplb_pagesize - 1); + if (addr1 >= base && (addr1 + cplb_pagesize) <= eaddr) { /* * This works because * (PAGE_SIZE_4MB & PAGE_SIZE_1MB) == PAGE_SIZE_1MB. */ - d_data |= PAGE_SIZE_4MB; + d_data |= cplb_pageflags; addr = addr1; + goto found_pagesize; + } else { + if (cplb_pagesize > SIZE_4M) { + cplb_pageflags = PAGE_SIZE_4MB; + cplb_pagesize = SIZE_4M; + goto find_pagesize; + } } +found_pagesize: #ifdef CONFIG_BF60x if ((addr >= ASYNC_BANK0_BASE) && (addr < ASYNC_BANK3_BASE + ASYNC_BANK3_SIZE)) diff --git a/trunk/arch/blackfin/kernel/cplbinfo.c b/trunk/arch/blackfin/kernel/cplbinfo.c index 404045dcc5e4..5b80d59e66e5 100644 --- a/trunk/arch/blackfin/kernel/cplbinfo.c +++ b/trunk/arch/blackfin/kernel/cplbinfo.c @@ -17,8 +17,13 @@ #include #include -static char const page_strtbl[][3] = { "1K", "4K", "1M", "4M" }; -#define page(flags) (((flags) & 0x30000) >> 16) +static char const page_strtbl[][4] = { + "1K", "4K", "1M", "4M", +#ifdef CONFIG_BF60x + "16K", "64K", "16M", "64M", +#endif +}; +#define page(flags) (((flags) & 0x70000) >> 16) #define strpage(flags) page_strtbl[page(flags)] struct cplbinfo_data { diff --git a/trunk/arch/blackfin/kernel/setup.c b/trunk/arch/blackfin/kernel/setup.c index fb96e607adcf..107b306b06f1 100644 --- a/trunk/arch/blackfin/kernel/setup.c +++ b/trunk/arch/blackfin/kernel/setup.c @@ -1314,7 +1314,7 @@ static int show_cpuinfo(struct seq_file *m, void *v) seq_printf(m, "(Compiled for Rev %d)", bfin_compiled_revid()); } - seq_printf(m, "\ncpu MHz\t\t: %lu.%03lu/%lu.%03lu\n", + seq_printf(m, "\ncpu MHz\t\t: %lu.%06lu/%lu.%06lu\n", cclk/1000000, cclk%1000000, sclk/1000000, sclk%1000000); seq_printf(m, "bogomips\t: %lu.%02lu\n" diff --git a/trunk/arch/blackfin/mach-bf537/boards/stamp.c b/trunk/arch/blackfin/mach-bf537/boards/stamp.c index 95114ed395ac..6a3a14bcd3a1 100644 --- a/trunk/arch/blackfin/mach-bf537/boards/stamp.c +++ b/trunk/arch/blackfin/mach-bf537/boards/stamp.c @@ -455,6 +455,7 @@ static struct platform_device bfin_async_nand_device = { static void bfin_plat_nand_init(void) { gpio_request(BFIN_NAND_PLAT_READY, "bfin_nand_plat"); + gpio_direction_input(BFIN_NAND_PLAT_READY); } #else static void bfin_plat_nand_init(void) {} diff --git a/trunk/arch/blackfin/mach-bf538/boards/ezkit.c b/trunk/arch/blackfin/mach-bf538/boards/ezkit.c index a4fce0370c1d..755f0dc12010 100644 --- a/trunk/arch/blackfin/mach-bf538/boards/ezkit.c +++ b/trunk/arch/blackfin/mach-bf538/boards/ezkit.c @@ -764,7 +764,6 @@ static struct platform_device i2c_bfin_twi1_device = { .num_resources = ARRAY_SIZE(bfin_twi1_resource), .resource = bfin_twi1_resource, }; -#endif /* CONFIG_BF542 */ #endif /* CONFIG_I2C_BLACKFIN_TWI */ #if defined(CONFIG_KEYBOARD_GPIO) || defined(CONFIG_KEYBOARD_GPIO_MODULE) diff --git a/trunk/arch/blackfin/mach-bf609/include/mach/cdefBF60x_base.h b/trunk/arch/blackfin/mach-bf609/include/mach/cdefBF60x_base.h index 4954cf3f7e16..102ee4025ac9 100644 --- a/trunk/arch/blackfin/mach-bf609/include/mach/cdefBF60x_base.h +++ b/trunk/arch/blackfin/mach-bf609/include/mach/cdefBF60x_base.h @@ -312,6 +312,8 @@ #define bfin_write_DMC0_EMR1(val) bfin_write32(DMC0_EMR1, val) #define bfin_read_DMC0_CTL() bfin_read32(DMC0_CTL) #define bfin_write_DMC0_CTL(val) bfin_write32(DMC0_CTL, val) +#define bfin_read_DMC0_EFFCTL() bfin_read32(DMC0_EFFCTL) +#define bfin_write_DMC0_EFFCTL(val) bfin_write32(DMC0_EFFCTL, val) #define bfin_read_DMC0_STAT() bfin_read32(DMC0_STAT) #define bfin_write_DMC0_STAT(val) bfin_write32(DMC0_STAT, val) #define bfin_read_DMC0_DLLCTL() bfin_read32(DMC0_DLLCTL) diff --git a/trunk/arch/cris/Kconfig b/trunk/arch/cris/Kconfig index 06dd026533e3..8769a9045a54 100644 --- a/trunk/arch/cris/Kconfig +++ b/trunk/arch/cris/Kconfig @@ -264,7 +264,6 @@ config ETRAX_AXISFLASHMAP select MTD_CFI select MTD_CFI_AMDSTD select MTD_JEDECPROBE if ETRAX_ARCH_V32 - select MTD_CHAR select MTD_BLOCK select MTD_COMPLEX_MAPPINGS help diff --git a/trunk/arch/cris/arch-v32/drivers/Kconfig b/trunk/arch/cris/arch-v32/drivers/Kconfig index af4a486dadcd..c55971a40c34 100644 --- a/trunk/arch/cris/arch-v32/drivers/Kconfig +++ b/trunk/arch/cris/arch-v32/drivers/Kconfig @@ -404,7 +404,6 @@ config ETRAX_AXISFLASHMAP select MTD_CFI select MTD_CFI_AMDSTD select MTD_JEDECPROBE - select MTD_CHAR select MTD_BLOCK select MTD_COMPLEX_MAPPINGS help diff --git a/trunk/arch/hexagon/Kconfig b/trunk/arch/hexagon/Kconfig index 04dff5bdcbf7..33a97929d055 100644 --- a/trunk/arch/hexagon/Kconfig +++ b/trunk/arch/hexagon/Kconfig @@ -30,8 +30,6 @@ config HEXAGON select GENERIC_CLOCKEVENTS_BROADCAST select MODULES_USE_ELF_RELA select GENERIC_CPU_DEVICES - select GENERIC_KERNEL_THREAD - select GENERIC_KERNEL_EXECVE ---help--- Qualcomm Hexagon is a processor architecture designed for high performance and low power across a wide variety of applications. @@ -157,9 +155,6 @@ source "mm/Kconfig" source "kernel/Kconfig.hz" -config GENERIC_GPIO - def_bool n - endmenu source "init/Kconfig" diff --git a/trunk/arch/hexagon/kernel/vm_entry.S b/trunk/arch/hexagon/kernel/vm_entry.S index e3086185fc9f..67c6ccc14770 100644 --- a/trunk/arch/hexagon/kernel/vm_entry.S +++ b/trunk/arch/hexagon/kernel/vm_entry.S @@ -291,12 +291,12 @@ event_dispatch: /* "Nested control path" -- if the previous mode was kernel */ { R0 = memw(R29 + #_PT_ER_VMEST); - R16.L = #LO(do_work_pending); + R26.L = #LO(do_work_pending); } { P0 = tstbit(R0, #HVM_VMEST_UM_SFT); if (!P0.new) jump:nt restore_all; - R16.H = #HI(do_work_pending); + R26.H = #HI(do_work_pending); R0 = #VM_INT_DISABLE; } @@ -304,7 +304,7 @@ event_dispatch: * Check also the return from fork/system call, normally coming back from * user mode * - * R16 needs to have do_work_pending, and R0 should have VM_INT_DISABLE + * R26 needs to have do_work_pending, and R0 should have VM_INT_DISABLE */ check_work_pending: @@ -313,7 +313,7 @@ check_work_pending: { R0 = R29; /* regs should still be at top of stack */ R1 = memw(THREADINFO_REG + #_THREAD_INFO_FLAGS); - callr R16; + callr R26; } { @@ -375,11 +375,11 @@ _K_enter_debug: ret_from_fork: { call schedule_tail - R16.H = #HI(do_work_pending); + R26.H = #HI(do_work_pending); } { P0 = cmp.eq(R24, #0); - R16.L = #LO(do_work_pending); + R26.L = #LO(do_work_pending); R0 = #VM_INT_DISABLE; } if P0 jump check_work_pending diff --git a/trunk/arch/ia64/Kconfig b/trunk/arch/ia64/Kconfig index d393f841ff5a..1a2b7749b047 100644 --- a/trunk/arch/ia64/Kconfig +++ b/trunk/arch/ia64/Kconfig @@ -101,9 +101,6 @@ config GENERIC_CALIBRATE_DELAY config HAVE_SETUP_PER_CPU_AREA def_bool y -config GENERIC_GPIO - bool - config DMI bool default y diff --git a/trunk/arch/m68k/Kconfig b/trunk/arch/m68k/Kconfig index 6de813370b8c..821170e5f6ed 100644 --- a/trunk/arch/m68k/Kconfig +++ b/trunk/arch/m68k/Kconfig @@ -35,9 +35,6 @@ config ARCH_HAS_ILOG2_U32 config ARCH_HAS_ILOG2_U64 bool -config GENERIC_GPIO - bool - config GENERIC_HWEIGHT bool default y diff --git a/trunk/arch/m68k/Kconfig.cpu b/trunk/arch/m68k/Kconfig.cpu index b1cfff832fb5..33013dfcd3e1 100644 --- a/trunk/arch/m68k/Kconfig.cpu +++ b/trunk/arch/m68k/Kconfig.cpu @@ -22,8 +22,7 @@ config M68KCLASSIC config COLDFIRE bool "Coldfire CPU family support" - select GENERIC_GPIO - select ARCH_WANT_OPTIONAL_GPIOLIB + select ARCH_REQUIRE_GPIOLIB select ARCH_HAVE_CUSTOM_GPIO_H select CPU_HAS_NO_BITFIELDS select CPU_HAS_NO_MULDIV64 @@ -224,13 +223,25 @@ config M5307 help Motorola ColdFire 5307 processor support. +config M53xx + bool + config M532x bool "MCF532x" depends on !MMU + select M53xx select HAVE_CACHE_CB help Freescale (Motorola) ColdFire 532x processor support. +config M537x + bool "MCF537x" + depends on !MMU + select M53xx + select HAVE_CACHE_CB + help + Freescale ColdFire 537x processor support. + config M5407 bool "MCF5407" depends on !MMU diff --git a/trunk/arch/m68k/Kconfig.machine b/trunk/arch/m68k/Kconfig.machine index 7240584d3439..b9ab0a69561c 100644 --- a/trunk/arch/m68k/Kconfig.machine +++ b/trunk/arch/m68k/Kconfig.machine @@ -358,6 +358,13 @@ config COBRA5329 help Support for the senTec COBRA5329 board. +config M5373EVB + bool "Freescale M5373EVB board support" + depends on M537x + select FREESCALE + help + Support for the Freescale M5373EVB board. + config M5407C3 bool "Motorola M5407C3 board support" depends on M5407 @@ -539,15 +546,6 @@ config ROMVEC 68000 type variants the vectors are at the base of the boot device on system startup. -config ROMVECSIZE - hex "Size of ROM vector region (in bytes)" - default "0x400" - depends on ROM - help - Define the size of the vector region in ROM. For most 68000 - variants this would be 0x400 bytes in size. Set to 0 if you do - not want a vector region at the start of the ROM. - config ROMSTART hex "Address of the base of system image in ROM" default "0x400" diff --git a/trunk/arch/m68k/Makefile b/trunk/arch/m68k/Makefile index 2f02acfb8edf..7f7830f2c5bc 100644 --- a/trunk/arch/m68k/Makefile +++ b/trunk/arch/m68k/Makefile @@ -45,6 +45,7 @@ cpuflags-$(CONFIG_M5441x) := $(call cc-option,-mcpu=54455,-mcfv4e) cpuflags-$(CONFIG_M54xx) := $(call cc-option,-mcpu=5475,-m5200) cpuflags-$(CONFIG_M5407) := $(call cc-option,-mcpu=5407,-m5200) cpuflags-$(CONFIG_M532x) := $(call cc-option,-mcpu=532x,-m5307) +cpuflags-$(CONFIG_M537x) := $(call cc-option,-mcpu=537x,-m5307) cpuflags-$(CONFIG_M5307) := $(call cc-option,-mcpu=5307,-m5200) cpuflags-$(CONFIG_M528x) := $(call cc-option,-mcpu=528x,-m5307) cpuflags-$(CONFIG_M5275) := $(call cc-option,-mcpu=5275,-m5307) diff --git a/trunk/arch/m68k/include/asm/commproc.h b/trunk/arch/m68k/include/asm/commproc.h index a73998528d26..66a36bd51aa1 100644 --- a/trunk/arch/m68k/include/asm/commproc.h +++ b/trunk/arch/m68k/include/asm/commproc.h @@ -480,23 +480,6 @@ typedef struct scc_enet { #define SICR_ENET_CLKRT ((uint)0x0000003d) #endif -#ifdef CONFIG_RPXLITE -/* This ENET stuff is for the MPC850 with ethernet on SCC2. Some of - * this may be unique to the RPX-Lite configuration. - * Note TENA is on Port B. - */ -#define PA_ENET_RXD ((ushort)0x0004) -#define PA_ENET_TXD ((ushort)0x0008) -#define PA_ENET_TCLK ((ushort)0x0200) -#define PA_ENET_RCLK ((ushort)0x0800) -#define PB_ENET_TENA ((uint)0x00002000) -#define PC_ENET_CLSN ((ushort)0x0040) -#define PC_ENET_RENA ((ushort)0x0080) - -#define SICR_ENET_MASK ((uint)0x0000ff00) -#define SICR_ENET_CLKRT ((uint)0x00003d00) -#endif - #ifdef CONFIG_BSEIP /* This ENET stuff is for the MPC823 with ethernet on SCC2. * This is unique to the BSE ip-Engine board. diff --git a/trunk/arch/m68k/include/asm/dbg.h b/trunk/arch/m68k/include/asm/dbg.h deleted file mode 100644 index 27af3270f671..000000000000 --- a/trunk/arch/m68k/include/asm/dbg.h +++ /dev/null @@ -1,6 +0,0 @@ -#define DEBUG 1 -#ifdef CONFIG_COLDFIRE -#define BREAK asm volatile ("halt") -#else -#define BREAK *(volatile unsigned char *)0xdeadbee0 = 0 -#endif diff --git a/trunk/arch/m68k/include/asm/dma.h b/trunk/arch/m68k/include/asm/dma.h index 0ff3fc6a6d9a..429fe26e320c 100644 --- a/trunk/arch/m68k/include/asm/dma.h +++ b/trunk/arch/m68k/include/asm/dma.h @@ -39,7 +39,7 @@ #define MAX_M68K_DMA_CHANNELS 4 #elif defined(CONFIG_M5272) #define MAX_M68K_DMA_CHANNELS 1 -#elif defined(CONFIG_M532x) +#elif defined(CONFIG_M53xx) #define MAX_M68K_DMA_CHANNELS 0 #else #define MAX_M68K_DMA_CHANNELS 2 diff --git a/trunk/arch/m68k/include/asm/m53xxacr.h b/trunk/arch/m68k/include/asm/m53xxacr.h index cd952b0a8bd3..3177ce8331d6 100644 --- a/trunk/arch/m68k/include/asm/m53xxacr.h +++ b/trunk/arch/m68k/include/asm/m53xxacr.h @@ -55,8 +55,8 @@ #define CACHE_SIZE 0x2000 /* 8k of unified cache */ #define ICACHE_SIZE CACHE_SIZE #define DCACHE_SIZE CACHE_SIZE -#elif defined(CONFIG_M532x) -#define CACHE_SIZE 0x4000 /* 32k of unified cache */ +#elif defined(CONFIG_M53xx) +#define CACHE_SIZE 0x4000 /* 16k of unified cache */ #define ICACHE_SIZE CACHE_SIZE #define DCACHE_SIZE CACHE_SIZE #endif diff --git a/trunk/arch/m68k/include/asm/m532xsim.h b/trunk/arch/m68k/include/asm/m53xxsim.h similarity index 99% rename from trunk/arch/m68k/include/asm/m532xsim.h rename to trunk/arch/m68k/include/asm/m53xxsim.h index 8668e47ced0e..faa1a2133bfd 100644 --- a/trunk/arch/m68k/include/asm/m532xsim.h +++ b/trunk/arch/m68k/include/asm/m53xxsim.h @@ -1,15 +1,15 @@ /****************************************************************************/ /* - * m532xsim.h -- ColdFire 5329 registers + * m53xxsim.h -- ColdFire 5329 registers */ /****************************************************************************/ -#ifndef m532xsim_h -#define m532xsim_h +#ifndef m53xxsim_h +#define m53xxsim_h /****************************************************************************/ -#define CPU_NAME "COLDFIRE(m532x)" +#define CPU_NAME "COLDFIRE(m53xx)" #define CPU_INSTR_PER_JIFFY 3 #define MCF_BUSCLK (MCF_CLK / 3) @@ -107,7 +107,7 @@ /* * QSPI module. */ -#define MCFQSPI_BASE 0xFC058000 /* Base address of QSPI */ +#define MCFQSPI_BASE 0xFC05C000 /* Base address of QSPI */ #define MCFQSPI_SIZE 0x40 /* Size of QSPI region */ #define MCFQSPI_CS0 84 @@ -1238,4 +1238,4 @@ #define MCFEPORT_EPFR (0xFC094006) /********************************************************************/ -#endif /* m532xsim_h */ +#endif /* m53xxsim_h */ diff --git a/trunk/arch/m68k/include/asm/m54xxacr.h b/trunk/arch/m68k/include/asm/m54xxacr.h index 192bbfeabf70..6d13cae44af5 100644 --- a/trunk/arch/m68k/include/asm/m54xxacr.h +++ b/trunk/arch/m68k/include/asm/m54xxacr.h @@ -96,8 +96,13 @@ */ #define ACR0_MODE (ACR_BA(CONFIG_MBAR)+ACR_ADMSK(0x1000000)+ \ ACR_ENABLE+ACR_SUPER+ACR_CM_OFF_PRE+ACR_SP) +#if defined(CONFIG_CACHE_COPYBACK) #define ACR1_MODE (ACR_BA(CONFIG_RAMBASE)+ACR_ADMSK(CONFIG_RAMSIZE)+ \ - ACR_ENABLE+ACR_SUPER+ACR_SP) + ACR_ENABLE+ACR_SUPER+ACR_SP+ACR_CM_CP) +#else +#define ACR1_MODE (ACR_BA(CONFIG_RAMBASE)+ACR_ADMSK(CONFIG_RAMSIZE)+ \ + ACR_ENABLE+ACR_SUPER+ACR_SP+ACR_CM_WT) +#endif #define ACR2_MODE 0 #define ACR3_MODE (ACR_BA(CONFIG_RAMBASE)+ACR_ADMSK(CONFIG_RAMSIZE)+ \ ACR_ENABLE+ACR_SUPER+ACR_SP) diff --git a/trunk/arch/m68k/include/asm/mcfgpio.h b/trunk/arch/m68k/include/asm/mcfgpio.h index fa1059f50dfc..c41ebf45f1d0 100644 --- a/trunk/arch/m68k/include/asm/mcfgpio.h +++ b/trunk/arch/m68k/include/asm/mcfgpio.h @@ -104,7 +104,7 @@ static inline void gpio_free(unsigned gpio) #if defined(CONFIG_M5206) || defined(CONFIG_M5206e) || \ defined(CONFIG_M520x) || defined(CONFIG_M523x) || \ defined(CONFIG_M527x) || defined(CONFIG_M528x) || \ - defined(CONFIG_M532x) || defined(CONFIG_M54xx) || \ + defined(CONFIG_M53xx) || defined(CONFIG_M54xx) || \ defined(CONFIG_M5441x) /* These parts have GPIO organized by 8 bit ports */ @@ -139,7 +139,7 @@ static inline void gpio_free(unsigned gpio) #if defined(CONFIG_M520x) || defined(CONFIG_M523x) || \ defined(CONFIG_M527x) || defined(CONFIG_M528x) || \ - defined(CONFIG_M532x) || defined(CONFIG_M5441x) + defined(CONFIG_M53xx) || defined(CONFIG_M5441x) /* * These parts have an 'Edge' Port module (external interrupt/GPIO) which uses * read-modify-write to change an output and a GPIO module which has separate @@ -195,7 +195,7 @@ static inline u32 __mcfgpio_ppdr(unsigned gpio) return MCFSIM2_GPIO1READ; #elif defined(CONFIG_M520x) || defined(CONFIG_M523x) || \ defined(CONFIG_M527x) || defined(CONFIG_M528x) || \ - defined(CONFIG_M532x) || defined(CONFIG_M5441x) + defined(CONFIG_M53xx) || defined(CONFIG_M5441x) #if !defined(CONFIG_M5441x) if (gpio < 8) return MCFEPORT_EPPDR; @@ -237,7 +237,7 @@ static inline u32 __mcfgpio_podr(unsigned gpio) return MCFSIM2_GPIO1WRITE; #elif defined(CONFIG_M520x) || defined(CONFIG_M523x) || \ defined(CONFIG_M527x) || defined(CONFIG_M528x) || \ - defined(CONFIG_M532x) || defined(CONFIG_M5441x) + defined(CONFIG_M53xx) || defined(CONFIG_M5441x) #if !defined(CONFIG_M5441x) if (gpio < 8) return MCFEPORT_EPDR; @@ -279,7 +279,7 @@ static inline u32 __mcfgpio_pddr(unsigned gpio) return MCFSIM2_GPIO1ENABLE; #elif defined(CONFIG_M520x) || defined(CONFIG_M523x) || \ defined(CONFIG_M527x) || defined(CONFIG_M528x) || \ - defined(CONFIG_M532x) || defined(CONFIG_M5441x) + defined(CONFIG_M53xx) || defined(CONFIG_M5441x) #if !defined(CONFIG_M5441x) if (gpio < 8) return MCFEPORT_EPDDR; diff --git a/trunk/arch/m68k/include/asm/mcfsim.h b/trunk/arch/m68k/include/asm/mcfsim.h index a04fd9b2714c..bc867de8a1e9 100644 --- a/trunk/arch/m68k/include/asm/mcfsim.h +++ b/trunk/arch/m68k/include/asm/mcfsim.h @@ -36,8 +36,8 @@ #elif defined(CONFIG_M5307) #include #include -#elif defined(CONFIG_M532x) -#include +#elif defined(CONFIG_M53xx) +#include #elif defined(CONFIG_M5407) #include #include diff --git a/trunk/arch/m68k/include/asm/mcftimer.h b/trunk/arch/m68k/include/asm/mcftimer.h index da2fa43c2e45..089f0f150bbf 100644 --- a/trunk/arch/m68k/include/asm/mcftimer.h +++ b/trunk/arch/m68k/include/asm/mcftimer.h @@ -19,7 +19,7 @@ #define MCFTIMER_TRR 0x04 /* Timer Reference (r/w) */ #define MCFTIMER_TCR 0x08 /* Timer Capture reg (r/w) */ #define MCFTIMER_TCN 0x0C /* Timer Counter reg (r/w) */ -#if defined(CONFIG_M532x) || defined(CONFIG_M5441x) +#if defined(CONFIG_M53xx) || defined(CONFIG_M5441x) #define MCFTIMER_TER 0x03 /* Timer Event reg (r/w) */ #else #define MCFTIMER_TER 0x11 /* Timer Event reg (r/w) */ diff --git a/trunk/arch/m68k/platform/coldfire/Makefile b/trunk/arch/m68k/platform/coldfire/Makefile index 02591a109f8c..68f0fac60099 100644 --- a/trunk/arch/m68k/platform/coldfire/Makefile +++ b/trunk/arch/m68k/platform/coldfire/Makefile @@ -25,7 +25,7 @@ obj-$(CONFIG_M527x) += m527x.o pit.o intc-2.o reset.o obj-$(CONFIG_M5272) += m5272.o intc-5272.o timers.o obj-$(CONFIG_M528x) += m528x.o pit.o intc-2.o reset.o obj-$(CONFIG_M5307) += m5307.o timers.o intc.o reset.o -obj-$(CONFIG_M532x) += m532x.o timers.o intc-simr.o reset.o +obj-$(CONFIG_M53xx) += m53xx.o timers.o intc-simr.o reset.o obj-$(CONFIG_M5407) += m5407.o timers.o intc.o reset.o obj-$(CONFIG_M54xx) += m54xx.o sltimers.o intc-2.o obj-$(CONFIG_M5441x) += m5441x.o pit.o intc-simr.o reset.o diff --git a/trunk/arch/m68k/platform/coldfire/m532x.c b/trunk/arch/m68k/platform/coldfire/m53xx.c similarity index 98% rename from trunk/arch/m68k/platform/coldfire/m532x.c rename to trunk/arch/m68k/platform/coldfire/m53xx.c index 7951d1d43357..5286f98fbed0 100644 --- a/trunk/arch/m68k/platform/coldfire/m532x.c +++ b/trunk/arch/m68k/platform/coldfire/m53xx.c @@ -1,7 +1,7 @@ /***************************************************************************/ /* - * linux/arch/m68knommu/platform/532x/config.c + * m53xx.c -- platform support for ColdFire 53xx based boards * * Copyright (C) 1999-2002, Greg Ungerer (gerg@snapgear.com) * Copyright (C) 2000, Lineo (www.lineo.com) @@ -118,7 +118,8 @@ static struct clk * const enable_clks[] __initconst = { &__clk_0_24, /* mcfuart.0 */ &__clk_0_25, /* mcfuart.1 */ &__clk_0_26, /* mcfuart.2 */ - + &__clk_0_28, /* mcftmr.0 */ + &__clk_0_29, /* mcftmr.1 */ &__clk_0_32, /* mcfpit.0 */ &__clk_0_33, /* mcfpit.1 */ &__clk_0_37, /* mcfeport.0 */ @@ -134,8 +135,6 @@ static struct clk * const disable_clks[] __initconst = { &__clk_0_17, /* edma */ &__clk_0_22, /* mcfi2c.0 */ &__clk_0_23, /* mcfqspi.0 */ - &__clk_0_28, /* mcftmr.0 */ - &__clk_0_29, /* mcftmr.1 */ &__clk_0_30, /* mcftmr.2 */ &__clk_0_31, /* mcftmr.3 */ &__clk_0_34, /* mcfpit.2 */ @@ -153,7 +152,7 @@ static struct clk * const disable_clks[] __initconst = { }; -static void __init m532x_clk_init(void) +static void __init m53xx_clk_init(void) { unsigned i; @@ -169,7 +168,7 @@ static void __init m532x_clk_init(void) #if IS_ENABLED(CONFIG_SPI_COLDFIRE_QSPI) -static void __init m532x_qspi_init(void) +static void __init m53xx_qspi_init(void) { /* setup QSPS pins for QSPI with gpio CS control */ writew(0x01f0, MCFGPIO_PAR_QSPI); @@ -179,7 +178,7 @@ static void __init m532x_qspi_init(void) /***************************************************************************/ -static void __init m532x_uarts_init(void) +static void __init m53xx_uarts_init(void) { /* UART GPIO initialization */ writew(readw(MCFGPIO_PAR_UART) | 0x0FFF, MCFGPIO_PAR_UART); @@ -187,7 +186,7 @@ static void __init m532x_uarts_init(void) /***************************************************************************/ -static void __init m532x_fec_init(void) +static void __init m53xx_fec_init(void) { u8 v; @@ -217,11 +216,11 @@ void __init config_BSP(char *commandp, int size) } #endif mach_sched_init = hw_timer_init; - m532x_clk_init(); - m532x_uarts_init(); - m532x_fec_init(); + m53xx_clk_init(); + m53xx_uarts_init(); + m53xx_fec_init(); #if IS_ENABLED(CONFIG_SPI_COLDFIRE_QSPI) - m532x_qspi_init(); + m53xx_qspi_init(); #endif #ifdef CONFIG_BDM_DISABLE diff --git a/trunk/arch/m68k/platform/coldfire/timers.c b/trunk/arch/m68k/platform/coldfire/timers.c index 51f6d2af807f..d06068e45764 100644 --- a/trunk/arch/m68k/platform/coldfire/timers.c +++ b/trunk/arch/m68k/platform/coldfire/timers.c @@ -36,7 +36,7 @@ */ void coldfire_profile_init(void); -#if defined(CONFIG_M532x) || defined(CONFIG_M5441x) +#if defined(CONFIG_M53xx) || defined(CONFIG_M5441x) #define __raw_readtrr __raw_readl #define __raw_writetrr __raw_writel #else diff --git a/trunk/arch/metag/Kconfig b/trunk/arch/metag/Kconfig index 6f16c1469327..dcd94406030e 100644 --- a/trunk/arch/metag/Kconfig +++ b/trunk/arch/metag/Kconfig @@ -52,9 +52,6 @@ config GENERIC_HWEIGHT config GENERIC_CALIBRATE_DELAY def_bool y -config GENERIC_GPIO - def_bool n - config NO_IOPORT def_bool y diff --git a/trunk/arch/microblaze/Kconfig b/trunk/arch/microblaze/Kconfig index 54237af0b07c..d22a4ecffff4 100644 --- a/trunk/arch/microblaze/Kconfig +++ b/trunk/arch/microblaze/Kconfig @@ -54,9 +54,6 @@ config GENERIC_HWEIGHT config GENERIC_CALIBRATE_DELAY def_bool y -config GENERIC_GPIO - bool - config GENERIC_CSUM def_bool y diff --git a/trunk/arch/microblaze/configs/mmu_defconfig b/trunk/arch/microblaze/configs/mmu_defconfig index d2b097a652d9..3649a8b150c0 100644 --- a/trunk/arch/microblaze/configs/mmu_defconfig +++ b/trunk/arch/microblaze/configs/mmu_defconfig @@ -17,7 +17,6 @@ CONFIG_MODULE_UNLOAD=y # CONFIG_BLK_DEV_BSG is not set CONFIG_PARTITION_ADVANCED=y # CONFIG_EFI_PARTITION is not set -CONFIG_OPT_LIB_ASM=y CONFIG_XILINX_MICROBLAZE0_USE_MSR_INSTR=1 CONFIG_XILINX_MICROBLAZE0_USE_PCMP_INSTR=1 CONFIG_XILINX_MICROBLAZE0_USE_BARREL=1 diff --git a/trunk/arch/microblaze/include/asm/pci.h b/trunk/arch/microblaze/include/asm/pci.h index 41cc841091b0..d52abb6812fa 100644 --- a/trunk/arch/microblaze/include/asm/pci.h +++ b/trunk/arch/microblaze/include/asm/pci.h @@ -153,7 +153,5 @@ extern void __init xilinx_pci_init(void); static inline void __init xilinx_pci_init(void) { return; } #endif -#include - #endif /* __KERNEL__ */ #endif /* __ASM_MICROBLAZE_PCI_H */ diff --git a/trunk/arch/microblaze/include/asm/uaccess.h b/trunk/arch/microblaze/include/asm/uaccess.h index a1ab5f0009ef..efe59d881789 100644 --- a/trunk/arch/microblaze/include/asm/uaccess.h +++ b/trunk/arch/microblaze/include/asm/uaccess.h @@ -90,17 +90,25 @@ static inline int ___range_ok(unsigned long addr, unsigned long size) #else -/* - * Address is valid if: - * - "addr", "addr + size" and "size" are all below the limit - */ -#define access_ok(type, addr, size) \ - (get_fs().seg >= (((unsigned long)(addr)) | \ - (size) | ((unsigned long)(addr) + (size)))) - -/* || printk("access_ok failed for %s at 0x%08lx (size %d), seg 0x%08x\n", - type?"WRITE":"READ",addr,size,get_fs().seg)) */ - +static inline int access_ok(int type, const void __user *addr, + unsigned long size) +{ + if (!size) + goto ok; + + if ((get_fs().seg < ((unsigned long)addr)) || + (get_fs().seg < ((unsigned long)addr + size - 1))) { + pr_debug("ACCESS fail: %s at 0x%08x (size 0x%x), seg 0x%08x\n", + type ? "WRITE" : "READ ", (u32)addr, (u32)size, + (u32)get_fs().seg); + return 0; + } +ok: + pr_debug("ACCESS OK: %s at 0x%08x (size 0x%x), seg 0x%08x\n", + type ? "WRITE" : "READ ", (u32)addr, (u32)size, + (u32)get_fs().seg); + return 1; +} #endif #ifdef CONFIG_MMU diff --git a/trunk/arch/microblaze/kernel/cpu/cpuinfo.c b/trunk/arch/microblaze/kernel/cpu/cpuinfo.c index 0b2299bcb948..410398f6db55 100644 --- a/trunk/arch/microblaze/kernel/cpu/cpuinfo.c +++ b/trunk/arch/microblaze/kernel/cpu/cpuinfo.c @@ -37,6 +37,8 @@ const struct cpu_ver_key cpu_ver_lookup[] = { {"8.20.a", 0x15}, {"8.20.b", 0x16}, {"8.30.a", 0x17}, + {"8.40.a", 0x18}, + {"8.40.b", 0x19}, {NULL, 0}, }; @@ -57,6 +59,9 @@ const struct family_string_key family_string_lookup[] = { {"virtex6", 0xe}, /* FIXME There is no key code defined for spartan2 */ {"spartan2", 0xf0}, + {"kintex7", 0x10}, + {"artix7", 0x11}, + {"zynq7000", 0x12}, {NULL, 0}, }; diff --git a/trunk/arch/microblaze/kernel/head.S b/trunk/arch/microblaze/kernel/head.S index eef84de5e8c8..fcc797feb9db 100644 --- a/trunk/arch/microblaze/kernel/head.S +++ b/trunk/arch/microblaze/kernel/head.S @@ -112,16 +112,16 @@ no_fdt_arg: * copy command line directly to cmd_line placed in data section. */ beqid r5, skip /* Skip if NULL pointer */ - or r6, r0, r0 /* incremment */ + or r11, r0, r0 /* incremment */ ori r4, r0, cmd_line /* load address of command line */ tophys(r4,r4) /* convert to phys address */ ori r3, r0, COMMAND_LINE_SIZE - 1 /* number of loops */ _copy_command_line: /* r2=r5+r6 - r5 contain pointer to command line */ - lbu r2, r5, r6 + lbu r2, r5, r11 beqid r2, skip /* Skip if no data */ - sb r2, r4, r6 /* addr[r4+r6]= r2*/ - addik r6, r6, 1 /* increment counting */ + sb r2, r4, r11 /* addr[r4+r6]= r2 */ + addik r11, r11, 1 /* increment counting */ bgtid r3, _copy_command_line /* loop for all entries */ addik r3, r3, -1 /* decrement loop */ addik r5, r4, 0 /* add new space for command line */ @@ -131,13 +131,13 @@ skip: #ifdef NOT_COMPILE /* save bram context */ - or r6, r0, r0 /* incremment */ + or r11, r0, r0 /* incremment */ ori r4, r0, TOPHYS(_bram_load_start) /* save bram context */ ori r3, r0, (LMB_SIZE - 4) _copy_bram: - lw r7, r0, r6 /* r7 = r0 + r6 */ - sw r7, r4, r6 /* addr[r4 + r6] = r7*/ - addik r6, r6, 4 /* increment counting */ + lw r7, r0, r11 /* r7 = r0 + r6 */ + sw r7, r4, r11 /* addr[r4 + r6] = r7 */ + addik r11, r11, 4 /* increment counting */ bgtid r3, _copy_bram /* loop for all entries */ addik r3, r3, -4 /* descrement loop */ #endif @@ -303,8 +303,8 @@ jump_over2: * the exception vectors, using a 4k real==virtual mapping. */ /* Use temporary TLB_ID for LMB - clear this temporary mapping later */ - ori r6, r0, MICROBLAZE_LMB_TLB_ID - mts rtlbx,r6 + ori r11, r0, MICROBLAZE_LMB_TLB_ID + mts rtlbx,r11 ori r4,r0,(TLB_WR | TLB_EX) ori r3,r0,(TLB_VALID | TLB_PAGESZ(PAGESZ_4K)) diff --git a/trunk/arch/microblaze/kernel/intc.c b/trunk/arch/microblaze/kernel/intc.c index 8778adf72bd3..d85fa3a2b0f8 100644 --- a/trunk/arch/microblaze/kernel/intc.c +++ b/trunk/arch/microblaze/kernel/intc.c @@ -172,4 +172,6 @@ void __init init_IRQ(void) * and commits this patch. ~~gcl */ root_domain = irq_domain_add_linear(intc, nr_irq, &xintc_irq_domain_ops, (void *)intr_mask); + + irq_set_default_host(root_domain); } diff --git a/trunk/arch/microblaze/kernel/process.c b/trunk/arch/microblaze/kernel/process.c index a55893807274..7d1a9c8b1f3d 100644 --- a/trunk/arch/microblaze/kernel/process.c +++ b/trunk/arch/microblaze/kernel/process.c @@ -160,3 +160,8 @@ int dump_fpu(struct pt_regs *regs, elf_fpregset_t *fpregs) return 0; /* MicroBlaze has no separate FPU registers */ } #endif /* CONFIG_MMU */ + +void arch_cpu_idle(void) +{ + local_irq_enable(); +} diff --git a/trunk/arch/microblaze/mm/init.c b/trunk/arch/microblaze/mm/init.c index 4ec137d13ad7..b38ae3acfeb4 100644 --- a/trunk/arch/microblaze/mm/init.c +++ b/trunk/arch/microblaze/mm/init.c @@ -404,10 +404,11 @@ asmlinkage void __init mmu_init(void) #if defined(CONFIG_BLK_DEV_INITRD) /* Remove the init RAM disk from the available memory. */ -/* if (initrd_start) { - mem_pieces_remove(&phys_avail, __pa(initrd_start), - initrd_end - initrd_start, 1); - }*/ + if (initrd_start) { + unsigned long size; + size = initrd_end - initrd_start; + memblock_reserve(virt_to_phys(initrd_start), size); + } #endif /* CONFIG_BLK_DEV_INITRD */ /* Initialize the MMU hardware */ diff --git a/trunk/arch/microblaze/pci/pci-common.c b/trunk/arch/microblaze/pci/pci-common.c index 9ea521e4959e..bdb8ea100e73 100644 --- a/trunk/arch/microblaze/pci/pci-common.c +++ b/trunk/arch/microblaze/pci/pci-common.c @@ -30,7 +30,6 @@ #include #include #include -#include #include #include diff --git a/trunk/arch/mips/Kbuild b/trunk/arch/mips/Kbuild index 7dd65cfae837..d2cfe45f332b 100644 --- a/trunk/arch/mips/Kbuild +++ b/trunk/arch/mips/Kbuild @@ -17,3 +17,7 @@ obj- := $(platform-) obj-y += kernel/ obj-y += mm/ obj-y += math-emu/ + +ifdef CONFIG_KVM +obj-y += kvm/ +endif diff --git a/trunk/arch/mips/Kconfig b/trunk/arch/mips/Kconfig index e5f3794744f1..7a58ab933b20 100644 --- a/trunk/arch/mips/Kconfig +++ b/trunk/arch/mips/Kconfig @@ -61,8 +61,7 @@ config MIPS_ALCHEMY select SYS_HAS_CPU_MIPS32_R1 select SYS_SUPPORTS_32BIT_KERNEL select SYS_SUPPORTS_APM_EMULATION - select GENERIC_GPIO - select ARCH_WANT_OPTIONAL_GPIOLIB + select ARCH_REQUIRE_GPIOLIB select SYS_SUPPORTS_ZBOOT select USB_ARCH_HAS_OHCI select USB_ARCH_HAS_EHCI @@ -225,7 +224,6 @@ config MACH_JZ4740 select SYS_SUPPORTS_ZBOOT_UART16550 select DMA_NONCOHERENT select IRQ_CPU - select GENERIC_GPIO select ARCH_REQUIRE_GPIOLIB select SYS_HAS_EARLY_PRINTK select HAVE_PWM @@ -306,7 +304,6 @@ config MIPS_MALTA select HW_HAS_PCI select I8253 select I8259 - select MIPS_BOARDS_GEN select MIPS_BONITO64 select MIPS_CPU_SCACHE select PCI_GT64XXX_PCI0 @@ -337,12 +334,12 @@ config MIPS_SEAD3 select BOOT_RAW select CEVT_R4K select CSRC_R4K + select CSRC_GIC select CPU_MIPSR2_IRQ_VI select CPU_MIPSR2_IRQ_EI select DMA_NONCOHERENT select IRQ_CPU select IRQ_GIC - select MIPS_BOARDS_GEN select MIPS_CPU_SCACHE select MIPS_MSC select SYS_HAS_CPU_MIPS32_R1 @@ -354,6 +351,7 @@ config MIPS_SEAD3 select SYS_SUPPORTS_BIG_ENDIAN select SYS_SUPPORTS_LITTLE_ENDIAN select SYS_SUPPORTS_SMARTMIPS + select SYS_SUPPORTS_MICROMIPS select USB_ARCH_HAS_EHCI select USB_EHCI_BIG_ENDIAN_DESC select USB_EHCI_BIG_ENDIAN_MMIO @@ -912,6 +910,9 @@ config CEVT_GT641XX config CEVT_R4K bool +config CEVT_GIC + bool + config CEVT_SB1250 bool @@ -937,7 +938,6 @@ config CSRC_SB1250 bool config GPIO_TXX9 - select GENERIC_GPIO select ARCH_REQUIRE_GPIOLIB bool @@ -985,9 +985,6 @@ config MIPS_MSC config MIPS_NILE4 bool -config MIPS_DISABLE_OBSOLETE_IDE - bool - config SYNC_R4K bool @@ -1009,9 +1006,6 @@ config GENERIC_ISA_DMA_SUPPORT_BROKEN config ISA_DMA_API bool -config GENERIC_GPIO - bool - config HOLES_IN_ZONE bool @@ -1081,9 +1075,6 @@ config IRQ_GT641XX config IRQ_GIC bool -config MIPS_BOARDS_GEN - bool - config PCI_GT64XXX_PCI0 bool @@ -1112,7 +1103,6 @@ config SOC_PNX833X select SYS_SUPPORTS_32BIT_KERNEL select SYS_SUPPORTS_LITTLE_ENDIAN select SYS_SUPPORTS_BIG_ENDIAN - select GENERIC_GPIO select CPU_MIPSR2_IRQ_VI config SOC_PNX8335 @@ -1154,7 +1144,7 @@ config BOOT_ELF32 config MIPS_L1_CACHE_SHIFT int - default "4" if MACH_DECSTATION || MIKROTIK_RB532 || PMC_MSP4200_EVAL + default "4" if MACH_DECSTATION || MIKROTIK_RB532 || PMC_MSP4200_EVAL || SOC_RT288X default "6" if MIPS_CPU_SCACHE default "7" if SGI_IP22 || SGI_IP27 || SGI_IP28 || SNI_RM || CPU_CAVIUM_OCTEON default "5" @@ -1203,7 +1193,6 @@ config CPU_LOONGSON2F bool "Loongson 2F" depends on SYS_HAS_CPU_LOONGSON2F select CPU_LOONGSON2 - select GENERIC_GPIO select ARCH_REQUIRE_GPIOLIB help The Loongson 2F processor implements the MIPS III instruction set @@ -1244,6 +1233,7 @@ config CPU_MIPS32_R2 select CPU_HAS_PREFETCH select CPU_SUPPORTS_32BIT_KERNEL select CPU_SUPPORTS_HIGHMEM + select HAVE_KVM help Choose this option to build a kernel for release 2 or later of the MIPS32 architecture. Most modern embedded systems with a 32-bit @@ -1744,6 +1734,20 @@ config 64BIT endchoice +config KVM_GUEST + bool "KVM Guest Kernel" + help + Select this option if building a guest kernel for KVM (Trap & Emulate) mode + +config KVM_HOST_FREQ + int "KVM Host Processor Frequency (MHz)" + depends on KVM_GUEST + default 500 + help + Select this option if building a guest kernel for KVM to skip + RTC emulation when determining guest CPU Frequency. Instead, the guest + processor frequency is automatically derived from the host frequency. + choice prompt "Kernel page size" default PAGE_SIZE_4KB @@ -1819,6 +1823,15 @@ config FORCE_MAX_ZONEORDER The page size is not necessarily 4KB. Keep this in mind when choosing a value for this option. +config CEVT_GIC + bool "Use GIC global counter for clock events" + depends on IRQ_GIC && !(MIPS_SEAD3 || MIPS_MT_SMTC) + help + Use the GIC global counter for the clock events. The R4K clock + event driver is always present, so if the platform ends up not + detecting a GIC, it will fall back to the R4K timer for the + generation of clock events. + config BOARD_SCACHE bool @@ -2024,6 +2037,7 @@ config SB1_PASS_2_1_WORKAROUNDS depends on CPU_SB1 && CPU_SB1_PASS_2 default y + config 64BIT_PHYS_ADDR bool @@ -2042,6 +2056,13 @@ config CPU_HAS_SMARTMIPS you don't know you probably don't have SmartMIPS and should say N here. +config CPU_MICROMIPS + depends on SYS_SUPPORTS_MICROMIPS + bool "Build kernel using microMIPS ISA" + help + When this option is enabled the kernel will be built using the + microMIPS ISA + config CPU_HAS_WB bool @@ -2104,6 +2125,9 @@ config SYS_SUPPORTS_HIGHMEM config SYS_SUPPORTS_SMARTMIPS bool +config SYS_SUPPORTS_MICROMIPS + bool + config ARCH_FLATMEM_ENABLE def_bool y depends on !NUMA && !CPU_LOONGSON2 @@ -2564,3 +2588,5 @@ source "security/Kconfig" source "crypto/Kconfig" source "lib/Kconfig" + +source "arch/mips/kvm/Kconfig" diff --git a/trunk/arch/mips/Makefile b/trunk/arch/mips/Makefile index 6f7978f95090..dd58a04ef4bc 100644 --- a/trunk/arch/mips/Makefile +++ b/trunk/arch/mips/Makefile @@ -114,6 +114,7 @@ cflags-$(CONFIG_CPU_BIG_ENDIAN) += $(shell $(CC) -dumpmachine |grep -q 'mips.*e cflags-$(CONFIG_CPU_LITTLE_ENDIAN) += $(shell $(CC) -dumpmachine |grep -q 'mips.*el-.*' || echo -EL $(undef-all) $(predef-le)) cflags-$(CONFIG_CPU_HAS_SMARTMIPS) += $(call cc-option,-msmartmips) +cflags-$(CONFIG_CPU_MICROMIPS) += $(call cc-option,-mmicromips -mno-jals) cflags-$(CONFIG_SB1XXX_CORELIS) += $(call cc-option,-mno-sched-prolog) \ -fno-omit-frame-pointer diff --git a/trunk/arch/mips/alchemy/Kconfig b/trunk/arch/mips/alchemy/Kconfig index c8862bdc2ff2..7032ac7ecd1b 100644 --- a/trunk/arch/mips/alchemy/Kconfig +++ b/trunk/arch/mips/alchemy/Kconfig @@ -31,7 +31,6 @@ config MIPS_DB1000 select ALCHEMY_GPIOINT_AU1000 select DMA_NONCOHERENT select HW_HAS_PCI - select MIPS_DISABLE_OBSOLETE_IDE select SYS_SUPPORTS_BIG_ENDIAN select SYS_SUPPORTS_LITTLE_ENDIAN select SYS_HAS_EARLY_PRINTK @@ -41,7 +40,6 @@ config MIPS_DB1235 select ARCH_REQUIRE_GPIOLIB select HW_HAS_PCI select DMA_COHERENT - select MIPS_DISABLE_OBSOLETE_IDE select SYS_SUPPORTS_LITTLE_ENDIAN select SYS_HAS_EARLY_PRINTK @@ -57,7 +55,6 @@ config MIPS_GPR select ALCHEMY_GPIOINT_AU1000 select HW_HAS_PCI select DMA_NONCOHERENT - select MIPS_DISABLE_OBSOLETE_IDE select SYS_SUPPORTS_LITTLE_ENDIAN select SYS_HAS_EARLY_PRINTK diff --git a/trunk/arch/mips/alchemy/Platform b/trunk/arch/mips/alchemy/Platform index fa1bdd1aea15..b3afcdd8d77a 100644 --- a/trunk/arch/mips/alchemy/Platform +++ b/trunk/arch/mips/alchemy/Platform @@ -5,32 +5,14 @@ platform-$(CONFIG_MIPS_ALCHEMY) += alchemy/common/ # -# AMD Alchemy Pb1100 eval board -# -platform-$(CONFIG_MIPS_PB1100) += alchemy/devboards/ -load-$(CONFIG_MIPS_PB1100) += 0xffffffff80100000 - -# -# AMD Alchemy Pb1500 eval board -# -platform-$(CONFIG_MIPS_PB1500) += alchemy/devboards/ -load-$(CONFIG_MIPS_PB1500) += 0xffffffff80100000 - -# -# AMD Alchemy Pb1550 eval board -# -platform-$(CONFIG_MIPS_PB1550) += alchemy/devboards/ -load-$(CONFIG_MIPS_PB1550) += 0xffffffff80100000 - -# -# AMD Alchemy Db1000/Db1500/Db1100 eval boards +# AMD Alchemy Db1000/Db1500/Pb1500/Db1100/Pb1100 eval boards # platform-$(CONFIG_MIPS_DB1000) += alchemy/devboards/ cflags-$(CONFIG_MIPS_DB1000) += -I$(srctree)/arch/mips/include/asm/mach-db1x00 load-$(CONFIG_MIPS_DB1000) += 0xffffffff80100000 # -# AMD Alchemy Db1200/Pb1200/Db1550/Db1300 eval boards +# AMD Alchemy Db1200/Pb1200/Db1550/Pb1550/Db1300 eval boards # platform-$(CONFIG_MIPS_DB1235) += alchemy/devboards/ cflags-$(CONFIG_MIPS_DB1235) += -I$(srctree)/arch/mips/include/asm/mach-db1x00 diff --git a/trunk/arch/mips/alchemy/board-gpr.c b/trunk/arch/mips/alchemy/board-gpr.c index cb0f6afb7389..9edc35ff8cf1 100644 --- a/trunk/arch/mips/alchemy/board-gpr.c +++ b/trunk/arch/mips/alchemy/board-gpr.c @@ -31,6 +31,7 @@ #include #include #include +#include #include #include #include diff --git a/trunk/arch/mips/alchemy/common/time.c b/trunk/arch/mips/alchemy/common/time.c index 38afb11ba2c4..93fa586d52e2 100644 --- a/trunk/arch/mips/alchemy/common/time.c +++ b/trunk/arch/mips/alchemy/common/time.c @@ -36,6 +36,7 @@ #include #include +#include #include #include #include diff --git a/trunk/arch/mips/ar7/memory.c b/trunk/arch/mips/ar7/memory.c index 28abfeef09d6..92dfa481205b 100644 --- a/trunk/arch/mips/ar7/memory.c +++ b/trunk/arch/mips/ar7/memory.c @@ -30,7 +30,6 @@ #include #include -#include static int __init memsize(void) { diff --git a/trunk/arch/mips/ath79/setup.c b/trunk/arch/mips/ath79/setup.c index d5b3c9057018..8be4e856b8b8 100644 --- a/trunk/arch/mips/ath79/setup.c +++ b/trunk/arch/mips/ath79/setup.c @@ -19,6 +19,7 @@ #include #include +#include #include /* for mips_hpt_frequency */ #include /* for _machine_{restart,halt} */ #include @@ -51,20 +52,6 @@ static void ath79_halt(void) cpu_wait(); } -static void __init ath79_detect_mem_size(void) -{ - unsigned long size; - - for (size = ATH79_MEM_SIZE_MIN; size < ATH79_MEM_SIZE_MAX; - size <<= 1) { - if (!memcmp(ath79_detect_mem_size, - ath79_detect_mem_size + size, 1024)) - break; - } - - add_memory_region(0, size, BOOT_MEM_RAM); -} - static void __init ath79_detect_sys_type(void) { char *chip = "????"; @@ -212,7 +199,7 @@ void __init plat_mem_setup(void) AR71XX_DDR_CTRL_SIZE); ath79_detect_sys_type(); - ath79_detect_mem_size(); + detect_memory_region(0, ATH79_MEM_SIZE_MIN, ATH79_MEM_SIZE_MAX); ath79_clocks_init(); _machine_restart = ath79_restart; diff --git a/trunk/arch/mips/bcm63xx/Kconfig b/trunk/arch/mips/bcm63xx/Kconfig index d03e8799d1cf..5639662fd503 100644 --- a/trunk/arch/mips/bcm63xx/Kconfig +++ b/trunk/arch/mips/bcm63xx/Kconfig @@ -25,6 +25,10 @@ config BCM63XX_CPU_6358 bool "support 6358 CPU" select HW_HAS_PCI +config BCM63XX_CPU_6362 + bool "support 6362 CPU" + select HW_HAS_PCI + config BCM63XX_CPU_6368 bool "support 6368 CPU" select HW_HAS_PCI diff --git a/trunk/arch/mips/bcm63xx/boards/board_bcm963xx.c b/trunk/arch/mips/bcm63xx/boards/board_bcm963xx.c index 9aa7d44898ed..a9505c4867e8 100644 --- a/trunk/arch/mips/bcm63xx/boards/board_bcm963xx.c +++ b/trunk/arch/mips/bcm63xx/boards/board_bcm963xx.c @@ -726,11 +726,11 @@ void __init board_prom_init(void) u32 val; /* read base address of boot chip select (0) - * 6328 does not have MPI but boots from a fixed address + * 6328/6362 do not have MPI but boot from a fixed address */ - if (BCMCPU_IS_6328()) + if (BCMCPU_IS_6328() || BCMCPU_IS_6362()) { val = 0x18000000; - else { + } else { val = bcm_mpi_readl(MPI_CSBASE_REG(0)); val &= MPI_CSBASE_BASE_MASK; } diff --git a/trunk/arch/mips/bcm63xx/clk.c b/trunk/arch/mips/bcm63xx/clk.c index b9e948d59430..c726a97fc798 100644 --- a/trunk/arch/mips/bcm63xx/clk.c +++ b/trunk/arch/mips/bcm63xx/clk.c @@ -15,7 +15,13 @@ #include #include #include -#include + +struct clk { + void (*set)(struct clk *, int); + unsigned int rate; + unsigned int usage; + int id; +}; static DEFINE_MUTEX(clocks_mutex); @@ -119,11 +125,18 @@ static struct clk clk_ephy = { */ static void enetsw_set(struct clk *clk, int enable) { - if (!BCMCPU_IS_6368()) + if (BCMCPU_IS_6328()) + bcm_hwclock_set(CKCTL_6328_ROBOSW_EN, enable); + else if (BCMCPU_IS_6362()) + bcm_hwclock_set(CKCTL_6362_ROBOSW_EN, enable); + else if (BCMCPU_IS_6368()) + bcm_hwclock_set(CKCTL_6368_ROBOSW_EN | + CKCTL_6368_SWPKT_USB_EN | + CKCTL_6368_SWPKT_SAR_EN, + enable); + else return; - bcm_hwclock_set(CKCTL_6368_ROBOSW_EN | - CKCTL_6368_SWPKT_USB_EN | - CKCTL_6368_SWPKT_SAR_EN, enable); + if (enable) { /* reset switch core afer clock change */ bcm63xx_core_set_reset(BCM63XX_RESET_ENETSW, 1); @@ -160,6 +173,8 @@ static void usbh_set(struct clk *clk, int enable) bcm_hwclock_set(CKCTL_6328_USBH_EN, enable); else if (BCMCPU_IS_6348()) bcm_hwclock_set(CKCTL_6348_USBH_EN, enable); + else if (BCMCPU_IS_6362()) + bcm_hwclock_set(CKCTL_6362_USBH_EN, enable); else if (BCMCPU_IS_6368()) bcm_hwclock_set(CKCTL_6368_USBH_EN, enable); } @@ -175,6 +190,8 @@ static void usbd_set(struct clk *clk, int enable) { if (BCMCPU_IS_6328()) bcm_hwclock_set(CKCTL_6328_USBD_EN, enable); + else if (BCMCPU_IS_6362()) + bcm_hwclock_set(CKCTL_6362_USBD_EN, enable); else if (BCMCPU_IS_6368()) bcm_hwclock_set(CKCTL_6368_USBD_EN, enable); } @@ -196,6 +213,8 @@ static void spi_set(struct clk *clk, int enable) mask = CKCTL_6348_SPI_EN; else if (BCMCPU_IS_6358()) mask = CKCTL_6358_SPI_EN; + else if (BCMCPU_IS_6362()) + mask = CKCTL_6362_SPI_EN; else /* BCMCPU_IS_6368 */ mask = CKCTL_6368_SPI_EN; @@ -236,7 +255,10 @@ static struct clk clk_xtm = { */ static void ipsec_set(struct clk *clk, int enable) { - bcm_hwclock_set(CKCTL_6368_IPSEC_EN, enable); + if (BCMCPU_IS_6362()) + bcm_hwclock_set(CKCTL_6362_IPSEC_EN, enable); + else if (BCMCPU_IS_6368()) + bcm_hwclock_set(CKCTL_6368_IPSEC_EN, enable); } static struct clk clk_ipsec = { @@ -249,7 +271,10 @@ static struct clk clk_ipsec = { static void pcie_set(struct clk *clk, int enable) { - bcm_hwclock_set(CKCTL_6328_PCIE_EN, enable); + if (BCMCPU_IS_6328()) + bcm_hwclock_set(CKCTL_6328_PCIE_EN, enable); + else if (BCMCPU_IS_6362()) + bcm_hwclock_set(CKCTL_6362_PCIE_EN, enable); } static struct clk clk_pcie = { @@ -315,9 +340,9 @@ struct clk *clk_get(struct device *dev, const char *id) return &clk_periph; if (BCMCPU_IS_6358() && !strcmp(id, "pcm")) return &clk_pcm; - if (BCMCPU_IS_6368() && !strcmp(id, "ipsec")) + if ((BCMCPU_IS_6362() || BCMCPU_IS_6368()) && !strcmp(id, "ipsec")) return &clk_ipsec; - if (BCMCPU_IS_6328() && !strcmp(id, "pcie")) + if ((BCMCPU_IS_6328() || BCMCPU_IS_6362()) && !strcmp(id, "pcie")) return &clk_pcie; return ERR_PTR(-ENOENT); } diff --git a/trunk/arch/mips/bcm63xx/cpu.c b/trunk/arch/mips/bcm63xx/cpu.c index a7afb289b15a..79fe32df5e96 100644 --- a/trunk/arch/mips/bcm63xx/cpu.c +++ b/trunk/arch/mips/bcm63xx/cpu.c @@ -25,7 +25,7 @@ const int *bcm63xx_irqs; EXPORT_SYMBOL(bcm63xx_irqs); static u16 bcm63xx_cpu_id; -static u16 bcm63xx_cpu_rev; +static u8 bcm63xx_cpu_rev; static unsigned int bcm63xx_cpu_freq; static unsigned int bcm63xx_memory_size; @@ -71,6 +71,15 @@ static const int bcm6358_irqs[] = { }; +static const unsigned long bcm6362_regs_base[] = { + __GEN_CPU_REGS_TABLE(6362) +}; + +static const int bcm6362_irqs[] = { + __GEN_CPU_IRQ_TABLE(6362) + +}; + static const unsigned long bcm6368_regs_base[] = { __GEN_CPU_REGS_TABLE(6368) }; @@ -87,7 +96,7 @@ u16 __bcm63xx_get_cpu_id(void) EXPORT_SYMBOL(__bcm63xx_get_cpu_id); -u16 bcm63xx_get_cpu_rev(void) +u8 bcm63xx_get_cpu_rev(void) { return bcm63xx_cpu_rev; } @@ -169,6 +178,42 @@ static unsigned int detect_cpu_clock(void) return (16 * 1000000 * n1 * n2) / m1; } + case BCM6362_CPU_ID: + { + unsigned int tmp, mips_pll_fcvo; + + tmp = bcm_misc_readl(MISC_STRAPBUS_6362_REG); + mips_pll_fcvo = (tmp & STRAPBUS_6362_FCVO_MASK) + >> STRAPBUS_6362_FCVO_SHIFT; + switch (mips_pll_fcvo) { + case 0x03: + case 0x0b: + case 0x13: + case 0x1b: + return 240000000; + case 0x04: + case 0x0c: + case 0x14: + case 0x1c: + return 160000000; + case 0x05: + case 0x0e: + case 0x16: + case 0x1e: + case 0x1f: + return 400000000; + case 0x06: + return 440000000; + case 0x07: + case 0x17: + return 384000000; + case 0x15: + case 0x1d: + return 200000000; + default: + return 320000000; + } + } case BCM6368_CPU_ID: { unsigned int tmp, p1, p2, ndiv, m1; @@ -205,7 +250,7 @@ static unsigned int detect_memory_size(void) unsigned int cols = 0, rows = 0, is_32bits = 0, banks = 0; u32 val; - if (BCMCPU_IS_6328()) + if (BCMCPU_IS_6328() || BCMCPU_IS_6362()) return bcm_ddr_readl(DDR_CSEND_REG) << 24; if (BCMCPU_IS_6345()) { @@ -240,53 +285,27 @@ static unsigned int detect_memory_size(void) void __init bcm63xx_cpu_init(void) { - unsigned int tmp, expected_cpu_id; + unsigned int tmp; struct cpuinfo_mips *c = ¤t_cpu_data; unsigned int cpu = smp_processor_id(); + u32 chipid_reg; /* soc registers location depends on cpu type */ - expected_cpu_id = 0; + chipid_reg = 0; switch (c->cputype) { case CPU_BMIPS3300: - if ((read_c0_prid() & 0xff00) == PRID_IMP_BMIPS3300_ALT) { - expected_cpu_id = BCM6348_CPU_ID; - bcm63xx_regs_base = bcm6348_regs_base; - bcm63xx_irqs = bcm6348_irqs; - } else { + if ((read_c0_prid() & 0xff00) != PRID_IMP_BMIPS3300_ALT) __cpu_name[cpu] = "Broadcom BCM6338"; - expected_cpu_id = BCM6338_CPU_ID; - bcm63xx_regs_base = bcm6338_regs_base; - bcm63xx_irqs = bcm6338_irqs; - } - break; + /* fall-through */ case CPU_BMIPS32: - expected_cpu_id = BCM6345_CPU_ID; - bcm63xx_regs_base = bcm6345_regs_base; - bcm63xx_irqs = bcm6345_irqs; + chipid_reg = BCM_6345_PERF_BASE; break; case CPU_BMIPS4350: - if ((read_c0_prid() & 0xf0) == 0x10) { - expected_cpu_id = BCM6358_CPU_ID; - bcm63xx_regs_base = bcm6358_regs_base; - bcm63xx_irqs = bcm6358_irqs; - } else { - /* all newer chips have the same chip id location */ - u16 chip_id = bcm_readw(BCM_6368_PERF_BASE); - - switch (chip_id) { - case BCM6328_CPU_ID: - expected_cpu_id = BCM6328_CPU_ID; - bcm63xx_regs_base = bcm6328_regs_base; - bcm63xx_irqs = bcm6328_irqs; - break; - case BCM6368_CPU_ID: - expected_cpu_id = BCM6368_CPU_ID; - bcm63xx_regs_base = bcm6368_regs_base; - bcm63xx_irqs = bcm6368_irqs; - break; - } - } + if ((read_c0_prid() & 0xf0) == 0x10) + chipid_reg = BCM_6345_PERF_BASE; + else + chipid_reg = BCM_6368_PERF_BASE; break; } @@ -294,20 +313,47 @@ void __init bcm63xx_cpu_init(void) * really early to panic, but delaying panic would not help since we * will never get any working console */ - if (!expected_cpu_id) + if (!chipid_reg) panic("unsupported Broadcom CPU"); - /* - * bcm63xx_regs_base is set, we can access soc registers - */ - - /* double check CPU type */ - tmp = bcm_perf_readl(PERF_REV_REG); + /* read out CPU type */ + tmp = bcm_readl(chipid_reg); bcm63xx_cpu_id = (tmp & REV_CHIPID_MASK) >> REV_CHIPID_SHIFT; bcm63xx_cpu_rev = (tmp & REV_REVID_MASK) >> REV_REVID_SHIFT; - if (bcm63xx_cpu_id != expected_cpu_id) - panic("bcm63xx CPU id mismatch"); + switch (bcm63xx_cpu_id) { + case BCM6328_CPU_ID: + bcm63xx_regs_base = bcm6328_regs_base; + bcm63xx_irqs = bcm6328_irqs; + break; + case BCM6338_CPU_ID: + bcm63xx_regs_base = bcm6338_regs_base; + bcm63xx_irqs = bcm6338_irqs; + break; + case BCM6345_CPU_ID: + bcm63xx_regs_base = bcm6345_regs_base; + bcm63xx_irqs = bcm6345_irqs; + break; + case BCM6348_CPU_ID: + bcm63xx_regs_base = bcm6348_regs_base; + bcm63xx_irqs = bcm6348_irqs; + break; + case BCM6358_CPU_ID: + bcm63xx_regs_base = bcm6358_regs_base; + bcm63xx_irqs = bcm6358_irqs; + break; + case BCM6362_CPU_ID: + bcm63xx_regs_base = bcm6362_regs_base; + bcm63xx_irqs = bcm6362_irqs; + break; + case BCM6368_CPU_ID: + bcm63xx_regs_base = bcm6368_regs_base; + bcm63xx_irqs = bcm6368_irqs; + break; + default: + panic("unsupported broadcom CPU %x", bcm63xx_cpu_id); + break; + } bcm63xx_cpu_freq = detect_cpu_clock(); bcm63xx_memory_size = detect_memory_size(); diff --git a/trunk/arch/mips/bcm63xx/dev-flash.c b/trunk/arch/mips/bcm63xx/dev-flash.c index 58371c7deac2..588d1ec622e4 100644 --- a/trunk/arch/mips/bcm63xx/dev-flash.c +++ b/trunk/arch/mips/bcm63xx/dev-flash.c @@ -77,6 +77,12 @@ static int __init bcm63xx_detect_flash_type(void) return BCM63XX_FLASH_TYPE_PARALLEL; else return BCM63XX_FLASH_TYPE_SERIAL; + case BCM6362_CPU_ID: + val = bcm_misc_readl(MISC_STRAPBUS_6362_REG); + if (val & STRAPBUS_6362_BOOT_SEL_SERIAL) + return BCM63XX_FLASH_TYPE_SERIAL; + else + return BCM63XX_FLASH_TYPE_NAND; case BCM6368_CPU_ID: val = bcm_gpio_readl(GPIO_STRAPBUS_REG); switch (val & STRAPBUS_6368_BOOT_SEL_MASK) { diff --git a/trunk/arch/mips/bcm63xx/dev-spi.c b/trunk/arch/mips/bcm63xx/dev-spi.c index e97fd60e92ef..3065bb61820d 100644 --- a/trunk/arch/mips/bcm63xx/dev-spi.c +++ b/trunk/arch/mips/bcm63xx/dev-spi.c @@ -22,10 +22,6 @@ /* * register offsets */ -static const unsigned long bcm6338_regs_spi[] = { - __GEN_SPI_REGS_TABLE(6338) -}; - static const unsigned long bcm6348_regs_spi[] = { __GEN_SPI_REGS_TABLE(6348) }; @@ -34,23 +30,15 @@ static const unsigned long bcm6358_regs_spi[] = { __GEN_SPI_REGS_TABLE(6358) }; -static const unsigned long bcm6368_regs_spi[] = { - __GEN_SPI_REGS_TABLE(6368) -}; - const unsigned long *bcm63xx_regs_spi; EXPORT_SYMBOL(bcm63xx_regs_spi); static __init void bcm63xx_spi_regs_init(void) { - if (BCMCPU_IS_6338()) - bcm63xx_regs_spi = bcm6338_regs_spi; - if (BCMCPU_IS_6348()) + if (BCMCPU_IS_6338() || BCMCPU_IS_6348()) bcm63xx_regs_spi = bcm6348_regs_spi; - if (BCMCPU_IS_6358()) + if (BCMCPU_IS_6358() || BCMCPU_IS_6362() || BCMCPU_IS_6368()) bcm63xx_regs_spi = bcm6358_regs_spi; - if (BCMCPU_IS_6368()) - bcm63xx_regs_spi = bcm6368_regs_spi; } #else static __init void bcm63xx_spi_regs_init(void) { } @@ -93,13 +81,13 @@ int __init bcm63xx_spi_register(void) spi_resources[1].start = bcm63xx_get_irq_number(IRQ_SPI); if (BCMCPU_IS_6338() || BCMCPU_IS_6348()) { - spi_resources[0].end += BCM_6338_RSET_SPI_SIZE - 1; - spi_pdata.fifo_size = SPI_6338_MSG_DATA_SIZE; - spi_pdata.msg_type_shift = SPI_6338_MSG_TYPE_SHIFT; - spi_pdata.msg_ctl_width = SPI_6338_MSG_CTL_WIDTH; + spi_resources[0].end += BCM_6348_RSET_SPI_SIZE - 1; + spi_pdata.fifo_size = SPI_6348_MSG_DATA_SIZE; + spi_pdata.msg_type_shift = SPI_6348_MSG_TYPE_SHIFT; + spi_pdata.msg_ctl_width = SPI_6348_MSG_CTL_WIDTH; } - if (BCMCPU_IS_6358() || BCMCPU_IS_6368()) { + if (BCMCPU_IS_6358() || BCMCPU_IS_6362() || BCMCPU_IS_6368()) { spi_resources[0].end += BCM_6358_RSET_SPI_SIZE - 1; spi_pdata.fifo_size = SPI_6358_MSG_DATA_SIZE; spi_pdata.msg_type_shift = SPI_6358_MSG_TYPE_SHIFT; diff --git a/trunk/arch/mips/bcm63xx/irq.c b/trunk/arch/mips/bcm63xx/irq.c index da24c2bd9b7c..c0ab3887f42e 100644 --- a/trunk/arch/mips/bcm63xx/irq.c +++ b/trunk/arch/mips/bcm63xx/irq.c @@ -82,6 +82,17 @@ static void __internal_irq_unmask_64(unsigned int irq) __maybe_unused; #define ext_irq_cfg_reg1 PERF_EXTIRQ_CFG_REG_6358 #define ext_irq_cfg_reg2 0 #endif +#ifdef CONFIG_BCM63XX_CPU_6362 +#define irq_stat_reg PERF_IRQSTAT_6362_REG +#define irq_mask_reg PERF_IRQMASK_6362_REG +#define irq_bits 64 +#define is_ext_irq_cascaded 1 +#define ext_irq_start (BCM_6362_EXT_IRQ0 - IRQ_INTERNAL_BASE) +#define ext_irq_end (BCM_6362_EXT_IRQ3 - IRQ_INTERNAL_BASE) +#define ext_irq_count 4 +#define ext_irq_cfg_reg1 PERF_EXTIRQ_CFG_REG_6362 +#define ext_irq_cfg_reg2 0 +#endif #ifdef CONFIG_BCM63XX_CPU_6368 #define irq_stat_reg PERF_IRQSTAT_6368_REG #define irq_mask_reg PERF_IRQMASK_6368_REG @@ -170,6 +181,16 @@ static void bcm63xx_init_irq(void) ext_irq_end = BCM_6358_EXT_IRQ3 - IRQ_INTERNAL_BASE; ext_irq_cfg_reg1 = PERF_EXTIRQ_CFG_REG_6358; break; + case BCM6362_CPU_ID: + irq_stat_addr += PERF_IRQSTAT_6362_REG; + irq_mask_addr += PERF_IRQMASK_6362_REG; + irq_bits = 64; + ext_irq_count = 4; + is_ext_irq_cascaded = 1; + ext_irq_start = BCM_6362_EXT_IRQ0 - IRQ_INTERNAL_BASE; + ext_irq_end = BCM_6362_EXT_IRQ3 - IRQ_INTERNAL_BASE; + ext_irq_cfg_reg1 = PERF_EXTIRQ_CFG_REG_6362; + break; case BCM6368_CPU_ID: irq_stat_addr += PERF_IRQSTAT_6368_REG; irq_mask_addr += PERF_IRQMASK_6368_REG; @@ -458,6 +479,7 @@ static int bcm63xx_external_irq_set_type(struct irq_data *d, case BCM6338_CPU_ID: case BCM6345_CPU_ID: case BCM6358_CPU_ID: + case BCM6362_CPU_ID: case BCM6368_CPU_ID: if (levelsense) reg |= EXTIRQ_CFG_LEVELSENSE(irq); diff --git a/trunk/arch/mips/bcm63xx/prom.c b/trunk/arch/mips/bcm63xx/prom.c index 10eaff458071..fd698087fbfd 100644 --- a/trunk/arch/mips/bcm63xx/prom.c +++ b/trunk/arch/mips/bcm63xx/prom.c @@ -36,6 +36,8 @@ void __init prom_init(void) mask = CKCTL_6348_ALL_SAFE_EN; else if (BCMCPU_IS_6358()) mask = CKCTL_6358_ALL_SAFE_EN; + else if (BCMCPU_IS_6362()) + mask = CKCTL_6362_ALL_SAFE_EN; else if (BCMCPU_IS_6368()) mask = CKCTL_6368_ALL_SAFE_EN; else diff --git a/trunk/arch/mips/bcm63xx/reset.c b/trunk/arch/mips/bcm63xx/reset.c index 68a31bb90cbf..317931c6cf58 100644 --- a/trunk/arch/mips/bcm63xx/reset.c +++ b/trunk/arch/mips/bcm63xx/reset.c @@ -85,6 +85,20 @@ #define BCM6358_RESET_PCIE 0 #define BCM6358_RESET_PCIE_EXT 0 +#define BCM6362_RESET_SPI SOFTRESET_6362_SPI_MASK +#define BCM6362_RESET_ENET 0 +#define BCM6362_RESET_USBH SOFTRESET_6362_USBH_MASK +#define BCM6362_RESET_USBD SOFTRESET_6362_USBS_MASK +#define BCM6362_RESET_DSL 0 +#define BCM6362_RESET_SAR SOFTRESET_6362_SAR_MASK +#define BCM6362_RESET_EPHY SOFTRESET_6362_EPHY_MASK +#define BCM6362_RESET_ENETSW SOFTRESET_6362_ENETSW_MASK +#define BCM6362_RESET_PCM SOFTRESET_6362_PCM_MASK +#define BCM6362_RESET_MPI 0 +#define BCM6362_RESET_PCIE (SOFTRESET_6362_PCIE_MASK | \ + SOFTRESET_6362_PCIE_CORE_MASK) +#define BCM6362_RESET_PCIE_EXT SOFTRESET_6362_PCIE_EXT_MASK + #define BCM6368_RESET_SPI SOFTRESET_6368_SPI_MASK #define BCM6368_RESET_ENET 0 #define BCM6368_RESET_USBH SOFTRESET_6368_USBH_MASK @@ -119,6 +133,10 @@ static const u32 bcm6358_reset_bits[] = { __GEN_RESET_BITS_TABLE(6358) }; +static const u32 bcm6362_reset_bits[] = { + __GEN_RESET_BITS_TABLE(6362) +}; + static const u32 bcm6368_reset_bits[] = { __GEN_RESET_BITS_TABLE(6368) }; @@ -140,6 +158,9 @@ static int __init bcm63xx_reset_bits_init(void) } else if (BCMCPU_IS_6358()) { reset_reg = PERF_SOFTRESET_6358_REG; bcm63xx_reset_bits = bcm6358_reset_bits; + } else if (BCMCPU_IS_6362()) { + reset_reg = PERF_SOFTRESET_6362_REG; + bcm63xx_reset_bits = bcm6362_reset_bits; } else if (BCMCPU_IS_6368()) { reset_reg = PERF_SOFTRESET_6368_REG; bcm63xx_reset_bits = bcm6368_reset_bits; @@ -182,6 +203,13 @@ static const u32 bcm63xx_reset_bits[] = { #define reset_reg PERF_SOFTRESET_6358_REG #endif +#ifdef CONFIG_BCM63XX_CPU_6362 +static const u32 bcm63xx_reset_bits[] = { + __GEN_RESET_BITS_TABLE(6362) +}; +#define reset_reg PERF_SOFTRESET_6362_REG +#endif + #ifdef CONFIG_BCM63XX_CPU_6368 static const u32 bcm63xx_reset_bits[] = { __GEN_RESET_BITS_TABLE(6368) diff --git a/trunk/arch/mips/bcm63xx/setup.c b/trunk/arch/mips/bcm63xx/setup.c index 35e18e98beb9..24a24445db64 100644 --- a/trunk/arch/mips/bcm63xx/setup.c +++ b/trunk/arch/mips/bcm63xx/setup.c @@ -83,6 +83,9 @@ void bcm63xx_machine_reboot(void) case BCM6358_CPU_ID: perf_regs[0] = PERF_EXTIRQ_CFG_REG_6358; break; + case BCM6362_CPU_ID: + perf_regs[0] = PERF_EXTIRQ_CFG_REG_6362; + break; } for (i = 0; i < 2; i++) { @@ -126,7 +129,7 @@ static void __bcm63xx_machine_reboot(char *p) const char *get_system_type(void) { static char buf[128]; - snprintf(buf, sizeof(buf), "bcm63xx/%s (0x%04x/0x%04X)", + snprintf(buf, sizeof(buf), "bcm63xx/%s (0x%04x/0x%02X)", board_get_name(), bcm63xx_get_cpu_id(), bcm63xx_get_cpu_rev()); return buf; diff --git a/trunk/arch/mips/cavium-octeon/octeon-irq.c b/trunk/arch/mips/cavium-octeon/octeon-irq.c index 156aa6143e11..a22f06a6f7ca 100644 --- a/trunk/arch/mips/cavium-octeon/octeon-irq.c +++ b/trunk/arch/mips/cavium-octeon/octeon-irq.c @@ -1032,9 +1032,8 @@ static int octeon_irq_gpio_map_common(struct irq_domain *d, if (!octeon_irq_virq_in_range(virq)) return -EINVAL; - hw += gpiod->base_hwirq; - line = hw >> 6; - bit = hw & 63; + line = (hw + gpiod->base_hwirq) >> 6; + bit = (hw + gpiod->base_hwirq) & 63; if (line > line_limit || octeon_irq_ciu_to_irq[line][bit] != 0) return -EINVAL; diff --git a/trunk/arch/mips/cobalt/reset.c b/trunk/arch/mips/cobalt/reset.c index 516b4428df4e..4eedd481dd00 100644 --- a/trunk/arch/mips/cobalt/reset.c +++ b/trunk/arch/mips/cobalt/reset.c @@ -12,6 +12,7 @@ #include #include +#include #include #include diff --git a/trunk/arch/mips/configs/db1000_defconfig b/trunk/arch/mips/configs/db1000_defconfig index face9d26e6d5..bac26b971c5e 100644 --- a/trunk/arch/mips/configs/db1000_defconfig +++ b/trunk/arch/mips/configs/db1000_defconfig @@ -228,7 +228,6 @@ CONFIG_HIDRAW=y CONFIG_USB_HID=y CONFIG_USB_SUPPORT=y CONFIG_USB=y -CONFIG_USB_SUSPEND=y CONFIG_USB_EHCI_HCD=y CONFIG_USB_EHCI_ROOT_HUB_TT=y CONFIG_USB_EHCI_TT_NEWSCHED=y diff --git a/trunk/arch/mips/configs/db1235_defconfig b/trunk/arch/mips/configs/db1235_defconfig index 14752dde7540..e2b4ad55462f 100644 --- a/trunk/arch/mips/configs/db1235_defconfig +++ b/trunk/arch/mips/configs/db1235_defconfig @@ -344,7 +344,6 @@ CONFIG_UHID=y CONFIG_USB_HIDDEV=y CONFIG_USB=y CONFIG_USB_DYNAMIC_MINORS=y -CONFIG_USB_SUSPEND=y CONFIG_USB_EHCI_HCD=y CONFIG_USB_EHCI_HCD_PLATFORM=y CONFIG_USB_EHCI_ROOT_HUB_TT=y diff --git a/trunk/arch/mips/configs/lemote2f_defconfig b/trunk/arch/mips/configs/lemote2f_defconfig index b6acd2f256b6..343bebc4b63b 100644 --- a/trunk/arch/mips/configs/lemote2f_defconfig +++ b/trunk/arch/mips/configs/lemote2f_defconfig @@ -300,7 +300,6 @@ CONFIG_USB=y CONFIG_USB_DEVICEFS=y # CONFIG_USB_DEVICE_CLASS is not set CONFIG_USB_DYNAMIC_MINORS=y -CONFIG_USB_SUSPEND=y CONFIG_USB_OTG_WHITELIST=y CONFIG_USB_MON=y CONFIG_USB_EHCI_HCD=y diff --git a/trunk/arch/mips/configs/malta_defconfig b/trunk/arch/mips/configs/malta_defconfig index cd732e5b4fd5..ce1d3eeeb737 100644 --- a/trunk/arch/mips/configs/malta_defconfig +++ b/trunk/arch/mips/configs/malta_defconfig @@ -2,30 +2,21 @@ CONFIG_MIPS_MALTA=y CONFIG_CPU_LITTLE_ENDIAN=y CONFIG_CPU_MIPS32_R2=y CONFIG_MIPS_MT_SMP=y -CONFIG_NO_HZ=y -CONFIG_HIGH_RES_TIMERS=y CONFIG_HZ_100=y -CONFIG_EXPERIMENTAL=y CONFIG_SYSVIPC=y +CONFIG_NO_HZ=y +CONFIG_HIGH_RES_TIMERS=y CONFIG_LOG_BUF_SHIFT=15 -CONFIG_SYSFS_DEPRECATED_V2=y -CONFIG_RELAY=y CONFIG_NAMESPACES=y -CONFIG_UTS_NS=y -CONFIG_IPC_NS=y -CONFIG_PID_NS=y -# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set +CONFIG_RELAY=y CONFIG_EXPERT=y -# CONFIG_SYSCTL_SYSCALL is not set # CONFIG_COMPAT_BRK is not set CONFIG_SLAB=y CONFIG_MODULES=y CONFIG_MODULE_UNLOAD=y CONFIG_MODVERSIONS=y CONFIG_MODULE_SRCVERSION_ALL=y -# CONFIG_BLK_DEV_BSG is not set CONFIG_PCI=y -CONFIG_PM=y CONFIG_PACKET=y CONFIG_UNIX=y CONFIG_XFRM_USER=m @@ -41,8 +32,6 @@ CONFIG_IP_PNP=y CONFIG_IP_PNP_DHCP=y CONFIG_IP_PNP_BOOTP=y CONFIG_NET_IPIP=m -CONFIG_NET_IPGRE=m -CONFIG_NET_IPGRE_BROADCAST=y CONFIG_IP_MROUTE=y CONFIG_IP_PIMSM_V1=y CONFIG_IP_PIMSM_V2=y @@ -65,7 +54,6 @@ CONFIG_IPV6_MROUTE=y CONFIG_IPV6_PIMSM_V2=y CONFIG_NETWORK_SECMARK=y CONFIG_NETFILTER=y -CONFIG_NETFILTER_NETLINK_QUEUE=m CONFIG_NF_CONNTRACK=m CONFIG_NF_CONNTRACK_SECMARK=y CONFIG_NF_CONNTRACK_EVENTS=y @@ -136,23 +124,15 @@ CONFIG_IP_VS_DH=m CONFIG_IP_VS_SH=m CONFIG_IP_VS_SED=m CONFIG_IP_VS_NQ=m -CONFIG_IP_VS_FTP=m CONFIG_NF_CONNTRACK_IPV4=m CONFIG_IP_NF_QUEUE=m CONFIG_IP_NF_IPTABLES=m -CONFIG_IP_NF_MATCH_ADDRTYPE=m CONFIG_IP_NF_MATCH_AH=m CONFIG_IP_NF_MATCH_ECN=m CONFIG_IP_NF_MATCH_TTL=m CONFIG_IP_NF_FILTER=m CONFIG_IP_NF_TARGET_REJECT=m -CONFIG_IP_NF_TARGET_LOG=m CONFIG_IP_NF_TARGET_ULOG=m -CONFIG_NF_NAT=m -CONFIG_IP_NF_TARGET_MASQUERADE=m -CONFIG_IP_NF_TARGET_NETMAP=m -CONFIG_IP_NF_TARGET_REDIRECT=m -CONFIG_NF_NAT_SNMP_BASIC=m CONFIG_IP_NF_MANGLE=m CONFIG_IP_NF_TARGET_CLUSTERIP=m CONFIG_IP_NF_TARGET_ECN=m @@ -162,8 +142,6 @@ CONFIG_IP_NF_ARPTABLES=m CONFIG_IP_NF_ARPFILTER=m CONFIG_IP_NF_ARP_MANGLE=m CONFIG_NF_CONNTRACK_IPV6=m -CONFIG_IP6_NF_QUEUE=m -CONFIG_IP6_NF_IPTABLES=m CONFIG_IP6_NF_MATCH_AH=m CONFIG_IP6_NF_MATCH_EUI64=m CONFIG_IP6_NF_MATCH_FRAG=m @@ -173,7 +151,6 @@ CONFIG_IP6_NF_MATCH_IPV6HEADER=m CONFIG_IP6_NF_MATCH_MH=m CONFIG_IP6_NF_MATCH_RT=m CONFIG_IP6_NF_TARGET_HL=m -CONFIG_IP6_NF_TARGET_LOG=m CONFIG_IP6_NF_FILTER=m CONFIG_IP6_NF_TARGET_REJECT=m CONFIG_IP6_NF_MANGLE=m @@ -247,12 +224,10 @@ CONFIG_MAC80211=m CONFIG_MAC80211_RC_PID=y CONFIG_MAC80211_RC_DEFAULT_PID=y CONFIG_MAC80211_MESH=y -CONFIG_MAC80211_LEDS=y CONFIG_RFKILL=m CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" CONFIG_CONNECTOR=m CONFIG_MTD=y -CONFIG_MTD_PARTITIONS=y CONFIG_MTD_CHAR=y CONFIG_MTD_BLOCK=y CONFIG_MTD_OOPS=m @@ -271,7 +246,6 @@ CONFIG_BLK_DEV_NBD=m CONFIG_BLK_DEV_RAM=y CONFIG_CDROM_PKTCDVD=m CONFIG_ATA_OVER_ETH=m -# CONFIG_MISC_DEVICES is not set CONFIG_IDE=y CONFIG_BLK_DEV_IDECD=y CONFIG_IDE_GENERIC=y @@ -317,13 +291,19 @@ CONFIG_DM_MIRROR=m CONFIG_DM_ZERO=m CONFIG_DM_MULTIPATH=m CONFIG_NETDEVICES=y -CONFIG_IFB=m -CONFIG_DUMMY=m CONFIG_BONDING=m -CONFIG_MACVLAN=m +CONFIG_DUMMY=m CONFIG_EQUALIZER=m +CONFIG_IFB=m +CONFIG_MACVLAN=m CONFIG_TUN=m CONFIG_VETH=m +# CONFIG_NET_VENDOR_3COM is not set +CONFIG_PCNET32=y +CONFIG_CHELSIO_T3=m +CONFIG_AX88796=m +CONFIG_NETXEN_NIC=m +CONFIG_TC35815=m CONFIG_MARVELL_PHY=m CONFIG_DAVICOM_PHY=m CONFIG_QSEMI_PHY=m @@ -334,14 +314,6 @@ CONFIG_SMSC_PHY=m CONFIG_BROADCOM_PHY=m CONFIG_ICPLUS_PHY=m CONFIG_REALTEK_PHY=m -CONFIG_MDIO_BITBANG=m -CONFIG_NET_ETHERNET=y -CONFIG_AX88796=m -CONFIG_NET_PCI=y -CONFIG_PCNET32=y -CONFIG_TC35815=m -CONFIG_CHELSIO_T3=m -CONFIG_NETXEN_NIC=m CONFIG_ATMEL=m CONFIG_PCI_ATMEL=m CONFIG_PRISM54=m @@ -352,15 +324,7 @@ CONFIG_HOSTAP_PLX=m CONFIG_HOSTAP_PCI=m CONFIG_IPW2100=m CONFIG_IPW2100_MONITOR=y -CONFIG_IPW2200=m -CONFIG_IPW2200_MONITOR=y -CONFIG_IPW2200_PROMISCUOUS=y -CONFIG_IPW2200_QOS=y CONFIG_LIBERTAS=m -CONFIG_HERMES=m -CONFIG_PLX_HERMES=m -CONFIG_TMD_HERMES=m -CONFIG_NORTEL_HERMES=m # CONFIG_INPUT_KEYBOARD is not set # CONFIG_INPUT_MOUSE is not set # CONFIG_SERIO_I8042 is not set @@ -373,12 +337,6 @@ CONFIG_FB_CIRRUS=y # CONFIG_VGA_CONSOLE is not set CONFIG_FRAMEBUFFER_CONSOLE=y CONFIG_HID=m -CONFIG_LEDS_CLASS=y -CONFIG_LEDS_TRIGGER_TIMER=m -CONFIG_LEDS_TRIGGER_IDE_DISK=y -CONFIG_LEDS_TRIGGER_HEARTBEAT=m -CONFIG_LEDS_TRIGGER_BACKLIGHT=m -CONFIG_LEDS_TRIGGER_DEFAULT_ON=m CONFIG_RTC_CLASS=y CONFIG_RTC_DRV_CMOS=y CONFIG_UIO=m @@ -398,7 +356,6 @@ CONFIG_XFS_QUOTA=y CONFIG_XFS_POSIX_ACL=y CONFIG_QUOTA=y CONFIG_QFMT_V2=y -CONFIG_AUTOFS_FS=y CONFIG_FUSE_FS=m CONFIG_ISO9660_FS=m CONFIG_JOLIET=y @@ -425,7 +382,6 @@ CONFIG_ROMFS_FS=m CONFIG_SYSV_FS=m CONFIG_UFS_FS=m CONFIG_NFS_FS=y -CONFIG_NFS_V3=y CONFIG_ROOT_NFS=y CONFIG_NFSD=y CONFIG_NFSD_V3=y @@ -466,7 +422,6 @@ CONFIG_NLS_ISO8859_14=m CONFIG_NLS_ISO8859_15=m CONFIG_NLS_KOI8_R=m CONFIG_NLS_KOI8_U=m -# CONFIG_RCU_CPU_STALL_DETECTOR is not set CONFIG_CRYPTO_NULL=m CONFIG_CRYPTO_CRYPTD=m CONFIG_CRYPTO_LRW=m diff --git a/trunk/arch/mips/configs/malta_kvm_defconfig b/trunk/arch/mips/configs/malta_kvm_defconfig new file mode 100644 index 000000000000..341bb47204d6 --- /dev/null +++ b/trunk/arch/mips/configs/malta_kvm_defconfig @@ -0,0 +1,456 @@ +CONFIG_MIPS_MALTA=y +CONFIG_CPU_LITTLE_ENDIAN=y +CONFIG_CPU_MIPS32_R2=y +CONFIG_PAGE_SIZE_16KB=y +CONFIG_MIPS_MT_SMP=y +CONFIG_HZ_100=y +CONFIG_SYSVIPC=y +CONFIG_NO_HZ=y +CONFIG_HIGH_RES_TIMERS=y +CONFIG_LOG_BUF_SHIFT=15 +CONFIG_NAMESPACES=y +CONFIG_RELAY=y +CONFIG_EXPERT=y +CONFIG_PERF_EVENTS=y +# CONFIG_COMPAT_BRK is not set +CONFIG_SLAB=y +CONFIG_MODULES=y +CONFIG_MODULE_UNLOAD=y +CONFIG_MODVERSIONS=y +CONFIG_MODULE_SRCVERSION_ALL=y +CONFIG_PCI=y +CONFIG_PACKET=y +CONFIG_UNIX=y +CONFIG_XFRM_USER=m +CONFIG_NET_KEY=y +CONFIG_NET_KEY_MIGRATE=y +CONFIG_INET=y +CONFIG_IP_MULTICAST=y +CONFIG_IP_ADVANCED_ROUTER=y +CONFIG_IP_MULTIPLE_TABLES=y +CONFIG_IP_ROUTE_MULTIPATH=y +CONFIG_IP_ROUTE_VERBOSE=y +CONFIG_IP_PNP=y +CONFIG_IP_PNP_DHCP=y +CONFIG_IP_PNP_BOOTP=y +CONFIG_NET_IPIP=m +CONFIG_IP_MROUTE=y +CONFIG_IP_PIMSM_V1=y +CONFIG_IP_PIMSM_V2=y +CONFIG_SYN_COOKIES=y +CONFIG_INET_AH=m +CONFIG_INET_ESP=m +CONFIG_INET_IPCOMP=m +CONFIG_INET_XFRM_MODE_TRANSPORT=m +CONFIG_INET_XFRM_MODE_TUNNEL=m +CONFIG_TCP_MD5SIG=y +CONFIG_IPV6_PRIVACY=y +CONFIG_IPV6_ROUTER_PREF=y +CONFIG_IPV6_ROUTE_INFO=y +CONFIG_IPV6_OPTIMISTIC_DAD=y +CONFIG_INET6_AH=m +CONFIG_INET6_ESP=m +CONFIG_INET6_IPCOMP=m +CONFIG_IPV6_TUNNEL=m +CONFIG_IPV6_MROUTE=y +CONFIG_IPV6_PIMSM_V2=y +CONFIG_NETWORK_SECMARK=y +CONFIG_NETFILTER=y +CONFIG_NF_CONNTRACK=m +CONFIG_NF_CONNTRACK_SECMARK=y +CONFIG_NF_CONNTRACK_EVENTS=y +CONFIG_NF_CT_PROTO_DCCP=m +CONFIG_NF_CT_PROTO_UDPLITE=m +CONFIG_NF_CONNTRACK_AMANDA=m +CONFIG_NF_CONNTRACK_FTP=m +CONFIG_NF_CONNTRACK_H323=m +CONFIG_NF_CONNTRACK_IRC=m +CONFIG_NF_CONNTRACK_PPTP=m +CONFIG_NF_CONNTRACK_SANE=m +CONFIG_NF_CONNTRACK_SIP=m +CONFIG_NF_CONNTRACK_TFTP=m +CONFIG_NF_CT_NETLINK=m +CONFIG_NETFILTER_TPROXY=m +CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m +CONFIG_NETFILTER_XT_TARGET_CONNMARK=m +CONFIG_NETFILTER_XT_TARGET_MARK=m +CONFIG_NETFILTER_XT_TARGET_NFLOG=m +CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m +CONFIG_NETFILTER_XT_TARGET_TPROXY=m +CONFIG_NETFILTER_XT_TARGET_TRACE=m +CONFIG_NETFILTER_XT_TARGET_SECMARK=m +CONFIG_NETFILTER_XT_TARGET_TCPMSS=m +CONFIG_NETFILTER_XT_TARGET_TCPOPTSTRIP=m +CONFIG_NETFILTER_XT_MATCH_COMMENT=m +CONFIG_NETFILTER_XT_MATCH_CONNBYTES=m +CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=m +CONFIG_NETFILTER_XT_MATCH_CONNMARK=m +CONFIG_NETFILTER_XT_MATCH_CONNTRACK=m +CONFIG_NETFILTER_XT_MATCH_DCCP=m +CONFIG_NETFILTER_XT_MATCH_ESP=m +CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=m +CONFIG_NETFILTER_XT_MATCH_HELPER=m +CONFIG_NETFILTER_XT_MATCH_IPRANGE=m +CONFIG_NETFILTER_XT_MATCH_LENGTH=m +CONFIG_NETFILTER_XT_MATCH_LIMIT=m +CONFIG_NETFILTER_XT_MATCH_MAC=m +CONFIG_NETFILTER_XT_MATCH_MARK=m +CONFIG_NETFILTER_XT_MATCH_MULTIPORT=m +CONFIG_NETFILTER_XT_MATCH_OWNER=m +CONFIG_NETFILTER_XT_MATCH_POLICY=m +CONFIG_NETFILTER_XT_MATCH_PKTTYPE=m +CONFIG_NETFILTER_XT_MATCH_QUOTA=m +CONFIG_NETFILTER_XT_MATCH_RATEEST=m +CONFIG_NETFILTER_XT_MATCH_REALM=m +CONFIG_NETFILTER_XT_MATCH_RECENT=m +CONFIG_NETFILTER_XT_MATCH_SOCKET=m +CONFIG_NETFILTER_XT_MATCH_STATE=m +CONFIG_NETFILTER_XT_MATCH_STATISTIC=m +CONFIG_NETFILTER_XT_MATCH_STRING=m +CONFIG_NETFILTER_XT_MATCH_TCPMSS=m +CONFIG_NETFILTER_XT_MATCH_TIME=m +CONFIG_NETFILTER_XT_MATCH_U32=m +CONFIG_IP_VS=m +CONFIG_IP_VS_IPV6=y +CONFIG_IP_VS_PROTO_TCP=y +CONFIG_IP_VS_PROTO_UDP=y +CONFIG_IP_VS_PROTO_ESP=y +CONFIG_IP_VS_PROTO_AH=y +CONFIG_IP_VS_RR=m +CONFIG_IP_VS_WRR=m +CONFIG_IP_VS_LC=m +CONFIG_IP_VS_WLC=m +CONFIG_IP_VS_LBLC=m +CONFIG_IP_VS_LBLCR=m +CONFIG_IP_VS_DH=m +CONFIG_IP_VS_SH=m +CONFIG_IP_VS_SED=m +CONFIG_IP_VS_NQ=m +CONFIG_NF_CONNTRACK_IPV4=m +CONFIG_IP_NF_QUEUE=m +CONFIG_IP_NF_IPTABLES=m +CONFIG_IP_NF_MATCH_AH=m +CONFIG_IP_NF_MATCH_ECN=m +CONFIG_IP_NF_MATCH_TTL=m +CONFIG_IP_NF_FILTER=m +CONFIG_IP_NF_TARGET_REJECT=m +CONFIG_IP_NF_TARGET_ULOG=m +CONFIG_IP_NF_MANGLE=m +CONFIG_IP_NF_TARGET_CLUSTERIP=m +CONFIG_IP_NF_TARGET_ECN=m +CONFIG_IP_NF_TARGET_TTL=m +CONFIG_IP_NF_RAW=m +CONFIG_IP_NF_ARPTABLES=m +CONFIG_IP_NF_ARPFILTER=m +CONFIG_IP_NF_ARP_MANGLE=m +CONFIG_NF_CONNTRACK_IPV6=m +CONFIG_IP6_NF_MATCH_AH=m +CONFIG_IP6_NF_MATCH_EUI64=m +CONFIG_IP6_NF_MATCH_FRAG=m +CONFIG_IP6_NF_MATCH_OPTS=m +CONFIG_IP6_NF_MATCH_HL=m +CONFIG_IP6_NF_MATCH_IPV6HEADER=m +CONFIG_IP6_NF_MATCH_MH=m +CONFIG_IP6_NF_MATCH_RT=m +CONFIG_IP6_NF_TARGET_HL=m +CONFIG_IP6_NF_FILTER=m +CONFIG_IP6_NF_TARGET_REJECT=m +CONFIG_IP6_NF_MANGLE=m +CONFIG_IP6_NF_RAW=m +CONFIG_BRIDGE_NF_EBTABLES=m +CONFIG_BRIDGE_EBT_BROUTE=m +CONFIG_BRIDGE_EBT_T_FILTER=m +CONFIG_BRIDGE_EBT_T_NAT=m +CONFIG_BRIDGE_EBT_802_3=m +CONFIG_BRIDGE_EBT_AMONG=m +CONFIG_BRIDGE_EBT_ARP=m +CONFIG_BRIDGE_EBT_IP=m +CONFIG_BRIDGE_EBT_IP6=m +CONFIG_BRIDGE_EBT_LIMIT=m +CONFIG_BRIDGE_EBT_MARK=m +CONFIG_BRIDGE_EBT_PKTTYPE=m +CONFIG_BRIDGE_EBT_STP=m +CONFIG_BRIDGE_EBT_VLAN=m +CONFIG_BRIDGE_EBT_ARPREPLY=m +CONFIG_BRIDGE_EBT_DNAT=m +CONFIG_BRIDGE_EBT_MARK_T=m +CONFIG_BRIDGE_EBT_REDIRECT=m +CONFIG_BRIDGE_EBT_SNAT=m +CONFIG_BRIDGE_EBT_LOG=m +CONFIG_BRIDGE_EBT_ULOG=m +CONFIG_BRIDGE_EBT_NFLOG=m +CONFIG_IP_SCTP=m +CONFIG_BRIDGE=m +CONFIG_VLAN_8021Q=m +CONFIG_VLAN_8021Q_GVRP=y +CONFIG_ATALK=m +CONFIG_DEV_APPLETALK=m +CONFIG_IPDDP=m +CONFIG_IPDDP_ENCAP=y +CONFIG_IPDDP_DECAP=y +CONFIG_PHONET=m +CONFIG_NET_SCHED=y +CONFIG_NET_SCH_CBQ=m +CONFIG_NET_SCH_HTB=m +CONFIG_NET_SCH_HFSC=m +CONFIG_NET_SCH_PRIO=m +CONFIG_NET_SCH_RED=m +CONFIG_NET_SCH_SFQ=m +CONFIG_NET_SCH_TEQL=m +CONFIG_NET_SCH_TBF=m +CONFIG_NET_SCH_GRED=m +CONFIG_NET_SCH_DSMARK=m +CONFIG_NET_SCH_NETEM=m +CONFIG_NET_SCH_INGRESS=m +CONFIG_NET_CLS_BASIC=m +CONFIG_NET_CLS_TCINDEX=m +CONFIG_NET_CLS_ROUTE4=m +CONFIG_NET_CLS_FW=m +CONFIG_NET_CLS_U32=m +CONFIG_NET_CLS_RSVP=m +CONFIG_NET_CLS_RSVP6=m +CONFIG_NET_CLS_FLOW=m +CONFIG_NET_CLS_ACT=y +CONFIG_NET_ACT_POLICE=y +CONFIG_NET_ACT_GACT=m +CONFIG_GACT_PROB=y +CONFIG_NET_ACT_MIRRED=m +CONFIG_NET_ACT_IPT=m +CONFIG_NET_ACT_NAT=m +CONFIG_NET_ACT_PEDIT=m +CONFIG_NET_ACT_SIMP=m +CONFIG_NET_ACT_SKBEDIT=m +CONFIG_NET_CLS_IND=y +CONFIG_CFG80211=m +CONFIG_MAC80211=m +CONFIG_MAC80211_RC_PID=y +CONFIG_MAC80211_RC_DEFAULT_PID=y +CONFIG_MAC80211_MESH=y +CONFIG_RFKILL=m +CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" +CONFIG_CONNECTOR=m +CONFIG_MTD=y +CONFIG_MTD_CHAR=y +CONFIG_MTD_BLOCK=y +CONFIG_MTD_OOPS=m +CONFIG_MTD_CFI=y +CONFIG_MTD_CFI_INTELEXT=y +CONFIG_MTD_CFI_AMDSTD=y +CONFIG_MTD_CFI_STAA=y +CONFIG_MTD_PHYSMAP=y +CONFIG_MTD_UBI=m +CONFIG_MTD_UBI_GLUEBI=m +CONFIG_BLK_DEV_FD=m +CONFIG_BLK_DEV_UMEM=m +CONFIG_BLK_DEV_LOOP=m +CONFIG_BLK_DEV_CRYPTOLOOP=m +CONFIG_BLK_DEV_NBD=m +CONFIG_BLK_DEV_RAM=y +CONFIG_CDROM_PKTCDVD=m +CONFIG_ATA_OVER_ETH=m +CONFIG_IDE=y +CONFIG_BLK_DEV_IDECD=y +CONFIG_IDE_GENERIC=y +CONFIG_BLK_DEV_GENERIC=y +CONFIG_BLK_DEV_PIIX=y +CONFIG_BLK_DEV_IT8213=m +CONFIG_BLK_DEV_TC86C001=m +CONFIG_RAID_ATTRS=m +CONFIG_SCSI=m +CONFIG_SCSI_TGT=m +CONFIG_BLK_DEV_SD=m +CONFIG_CHR_DEV_ST=m +CONFIG_CHR_DEV_OSST=m +CONFIG_BLK_DEV_SR=m +CONFIG_BLK_DEV_SR_VENDOR=y +CONFIG_CHR_DEV_SG=m +CONFIG_SCSI_MULTI_LUN=y +CONFIG_SCSI_CONSTANTS=y +CONFIG_SCSI_LOGGING=y +CONFIG_SCSI_SCAN_ASYNC=y +CONFIG_SCSI_FC_ATTRS=m +CONFIG_ISCSI_TCP=m +CONFIG_BLK_DEV_3W_XXXX_RAID=m +CONFIG_SCSI_3W_9XXX=m +CONFIG_SCSI_ACARD=m +CONFIG_SCSI_AACRAID=m +CONFIG_SCSI_AIC7XXX=m +CONFIG_AIC7XXX_RESET_DELAY_MS=15000 +# CONFIG_AIC7XXX_DEBUG_ENABLE is not set +CONFIG_MD=y +CONFIG_BLK_DEV_MD=m +CONFIG_MD_LINEAR=m +CONFIG_MD_RAID0=m +CONFIG_MD_RAID1=m +CONFIG_MD_RAID10=m +CONFIG_MD_RAID456=m +CONFIG_MD_MULTIPATH=m +CONFIG_MD_FAULTY=m +CONFIG_BLK_DEV_DM=m +CONFIG_DM_CRYPT=m +CONFIG_DM_SNAPSHOT=m +CONFIG_DM_MIRROR=m +CONFIG_DM_ZERO=m +CONFIG_DM_MULTIPATH=m +CONFIG_NETDEVICES=y +CONFIG_BONDING=m +CONFIG_DUMMY=m +CONFIG_EQUALIZER=m +CONFIG_IFB=m +CONFIG_MACVLAN=m +CONFIG_TUN=m +CONFIG_VETH=m +CONFIG_PCNET32=y +CONFIG_CHELSIO_T3=m +CONFIG_AX88796=m +CONFIG_NETXEN_NIC=m +CONFIG_TC35815=m +CONFIG_MARVELL_PHY=m +CONFIG_DAVICOM_PHY=m +CONFIG_QSEMI_PHY=m +CONFIG_LXT_PHY=m +CONFIG_CICADA_PHY=m +CONFIG_VITESSE_PHY=m +CONFIG_SMSC_PHY=m +CONFIG_BROADCOM_PHY=m +CONFIG_ICPLUS_PHY=m +CONFIG_REALTEK_PHY=m +CONFIG_ATMEL=m +CONFIG_PCI_ATMEL=m +CONFIG_PRISM54=m +CONFIG_HOSTAP=m +CONFIG_HOSTAP_FIRMWARE=y +CONFIG_HOSTAP_FIRMWARE_NVRAM=y +CONFIG_HOSTAP_PLX=m +CONFIG_HOSTAP_PCI=m +CONFIG_IPW2100=m +CONFIG_IPW2100_MONITOR=y +CONFIG_LIBERTAS=m +# CONFIG_INPUT_KEYBOARD is not set +# CONFIG_INPUT_MOUSE is not set +# CONFIG_SERIO_I8042 is not set +CONFIG_VT_HW_CONSOLE_BINDING=y +CONFIG_SERIAL_8250=y +CONFIG_SERIAL_8250_CONSOLE=y +# CONFIG_HWMON is not set +CONFIG_FB=y +CONFIG_FB_CIRRUS=y +# CONFIG_VGA_CONSOLE is not set +CONFIG_FRAMEBUFFER_CONSOLE=y +CONFIG_HID=m +CONFIG_RTC_CLASS=y +CONFIG_RTC_DRV_CMOS=y +CONFIG_UIO=m +CONFIG_UIO_CIF=m +CONFIG_EXT2_FS=y +CONFIG_EXT3_FS=y +CONFIG_REISERFS_FS=m +CONFIG_REISERFS_PROC_INFO=y +CONFIG_REISERFS_FS_XATTR=y +CONFIG_REISERFS_FS_POSIX_ACL=y +CONFIG_REISERFS_FS_SECURITY=y +CONFIG_JFS_FS=m +CONFIG_JFS_POSIX_ACL=y +CONFIG_JFS_SECURITY=y +CONFIG_XFS_FS=m +CONFIG_XFS_QUOTA=y +CONFIG_XFS_POSIX_ACL=y +CONFIG_QUOTA=y +CONFIG_QFMT_V2=y +CONFIG_FUSE_FS=m +CONFIG_ISO9660_FS=m +CONFIG_JOLIET=y +CONFIG_ZISOFS=y +CONFIG_UDF_FS=m +CONFIG_MSDOS_FS=m +CONFIG_VFAT_FS=m +CONFIG_PROC_KCORE=y +CONFIG_TMPFS=y +CONFIG_CONFIGFS_FS=y +CONFIG_AFFS_FS=m +CONFIG_HFS_FS=m +CONFIG_HFSPLUS_FS=m +CONFIG_BEFS_FS=m +CONFIG_BFS_FS=m +CONFIG_EFS_FS=m +CONFIG_JFFS2_FS=m +CONFIG_JFFS2_FS_XATTR=y +CONFIG_JFFS2_COMPRESSION_OPTIONS=y +CONFIG_JFFS2_RUBIN=y +CONFIG_CRAMFS=m +CONFIG_VXFS_FS=m +CONFIG_MINIX_FS=m +CONFIG_ROMFS_FS=m +CONFIG_SYSV_FS=m +CONFIG_UFS_FS=m +CONFIG_NFS_FS=y +CONFIG_ROOT_NFS=y +CONFIG_NFSD=y +CONFIG_NFSD_V3=y +CONFIG_NLS_CODEPAGE_437=m +CONFIG_NLS_CODEPAGE_737=m +CONFIG_NLS_CODEPAGE_775=m +CONFIG_NLS_CODEPAGE_850=m +CONFIG_NLS_CODEPAGE_852=m +CONFIG_NLS_CODEPAGE_855=m +CONFIG_NLS_CODEPAGE_857=m +CONFIG_NLS_CODEPAGE_860=m +CONFIG_NLS_CODEPAGE_861=m +CONFIG_NLS_CODEPAGE_862=m +CONFIG_NLS_CODEPAGE_863=m +CONFIG_NLS_CODEPAGE_864=m +CONFIG_NLS_CODEPAGE_865=m +CONFIG_NLS_CODEPAGE_866=m +CONFIG_NLS_CODEPAGE_869=m +CONFIG_NLS_CODEPAGE_936=m +CONFIG_NLS_CODEPAGE_950=m +CONFIG_NLS_CODEPAGE_932=m +CONFIG_NLS_CODEPAGE_949=m +CONFIG_NLS_CODEPAGE_874=m +CONFIG_NLS_ISO8859_8=m +CONFIG_NLS_CODEPAGE_1250=m +CONFIG_NLS_CODEPAGE_1251=m +CONFIG_NLS_ASCII=m +CONFIG_NLS_ISO8859_1=m +CONFIG_NLS_ISO8859_2=m +CONFIG_NLS_ISO8859_3=m +CONFIG_NLS_ISO8859_4=m +CONFIG_NLS_ISO8859_5=m +CONFIG_NLS_ISO8859_6=m +CONFIG_NLS_ISO8859_7=m +CONFIG_NLS_ISO8859_9=m +CONFIG_NLS_ISO8859_13=m +CONFIG_NLS_ISO8859_14=m +CONFIG_NLS_ISO8859_15=m +CONFIG_NLS_KOI8_R=m +CONFIG_NLS_KOI8_U=m +CONFIG_RCU_CPU_STALL_TIMEOUT=60 +CONFIG_ENABLE_DEFAULT_TRACERS=y +CONFIG_CRYPTO_NULL=m +CONFIG_CRYPTO_CRYPTD=m +CONFIG_CRYPTO_LRW=m +CONFIG_CRYPTO_PCBC=m +CONFIG_CRYPTO_HMAC=y +CONFIG_CRYPTO_XCBC=m +CONFIG_CRYPTO_MD4=m +CONFIG_CRYPTO_SHA256=m +CONFIG_CRYPTO_SHA512=m +CONFIG_CRYPTO_TGR192=m +CONFIG_CRYPTO_WP512=m +CONFIG_CRYPTO_ANUBIS=m +CONFIG_CRYPTO_BLOWFISH=m +CONFIG_CRYPTO_CAMELLIA=m +CONFIG_CRYPTO_CAST5=m +CONFIG_CRYPTO_CAST6=m +CONFIG_CRYPTO_FCRYPT=m +CONFIG_CRYPTO_KHAZAD=m +CONFIG_CRYPTO_SERPENT=m +CONFIG_CRYPTO_TEA=m +CONFIG_CRYPTO_TWOFISH=m +# CONFIG_CRYPTO_ANSI_CPRNG is not set +CONFIG_CRC16=m +CONFIG_VIRTUALIZATION=y +CONFIG_KVM=m +CONFIG_KVM_MIPS_DYN_TRANS=y +CONFIG_KVM_MIPS_DEBUG_COP0_COUNTERS=y +CONFIG_VHOST_NET=m diff --git a/trunk/arch/mips/configs/malta_kvm_guest_defconfig b/trunk/arch/mips/configs/malta_kvm_guest_defconfig new file mode 100644 index 000000000000..2b8558b71080 --- /dev/null +++ b/trunk/arch/mips/configs/malta_kvm_guest_defconfig @@ -0,0 +1,453 @@ +CONFIG_MIPS_MALTA=y +CONFIG_CPU_LITTLE_ENDIAN=y +CONFIG_CPU_MIPS32_R2=y +CONFIG_KVM_GUEST=y +CONFIG_PAGE_SIZE_16KB=y +CONFIG_HZ_100=y +CONFIG_SYSVIPC=y +CONFIG_NO_HZ=y +CONFIG_HIGH_RES_TIMERS=y +CONFIG_LOG_BUF_SHIFT=15 +CONFIG_NAMESPACES=y +CONFIG_RELAY=y +CONFIG_BLK_DEV_INITRD=y +CONFIG_EXPERT=y +# CONFIG_COMPAT_BRK is not set +CONFIG_SLAB=y +CONFIG_MODULES=y +CONFIG_MODULE_UNLOAD=y +CONFIG_MODVERSIONS=y +CONFIG_MODULE_SRCVERSION_ALL=y +CONFIG_PCI=y +CONFIG_PACKET=y +CONFIG_UNIX=y +CONFIG_XFRM_USER=m +CONFIG_NET_KEY=y +CONFIG_NET_KEY_MIGRATE=y +CONFIG_INET=y +CONFIG_IP_MULTICAST=y +CONFIG_IP_ADVANCED_ROUTER=y +CONFIG_IP_MULTIPLE_TABLES=y +CONFIG_IP_ROUTE_MULTIPATH=y +CONFIG_IP_ROUTE_VERBOSE=y +CONFIG_IP_PNP=y +CONFIG_IP_PNP_DHCP=y +CONFIG_IP_PNP_BOOTP=y +CONFIG_NET_IPIP=m +CONFIG_IP_MROUTE=y +CONFIG_IP_PIMSM_V1=y +CONFIG_IP_PIMSM_V2=y +CONFIG_SYN_COOKIES=y +CONFIG_INET_AH=m +CONFIG_INET_ESP=m +CONFIG_INET_IPCOMP=m +CONFIG_INET_XFRM_MODE_TRANSPORT=m +CONFIG_INET_XFRM_MODE_TUNNEL=m +CONFIG_TCP_MD5SIG=y +CONFIG_IPV6_PRIVACY=y +CONFIG_IPV6_ROUTER_PREF=y +CONFIG_IPV6_ROUTE_INFO=y +CONFIG_IPV6_OPTIMISTIC_DAD=y +CONFIG_INET6_AH=m +CONFIG_INET6_ESP=m +CONFIG_INET6_IPCOMP=m +CONFIG_IPV6_TUNNEL=m +CONFIG_IPV6_MROUTE=y +CONFIG_IPV6_PIMSM_V2=y +CONFIG_NETWORK_SECMARK=y +CONFIG_NETFILTER=y +CONFIG_NF_CONNTRACK=m +CONFIG_NF_CONNTRACK_SECMARK=y +CONFIG_NF_CONNTRACK_EVENTS=y +CONFIG_NF_CT_PROTO_DCCP=m +CONFIG_NF_CT_PROTO_UDPLITE=m +CONFIG_NF_CONNTRACK_AMANDA=m +CONFIG_NF_CONNTRACK_FTP=m +CONFIG_NF_CONNTRACK_H323=m +CONFIG_NF_CONNTRACK_IRC=m +CONFIG_NF_CONNTRACK_PPTP=m +CONFIG_NF_CONNTRACK_SANE=m +CONFIG_NF_CONNTRACK_SIP=m +CONFIG_NF_CONNTRACK_TFTP=m +CONFIG_NF_CT_NETLINK=m +CONFIG_NETFILTER_TPROXY=m +CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m +CONFIG_NETFILTER_XT_TARGET_CONNMARK=m +CONFIG_NETFILTER_XT_TARGET_MARK=m +CONFIG_NETFILTER_XT_TARGET_NFLOG=m +CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m +CONFIG_NETFILTER_XT_TARGET_TPROXY=m +CONFIG_NETFILTER_XT_TARGET_TRACE=m +CONFIG_NETFILTER_XT_TARGET_SECMARK=m +CONFIG_NETFILTER_XT_TARGET_TCPMSS=m +CONFIG_NETFILTER_XT_TARGET_TCPOPTSTRIP=m +CONFIG_NETFILTER_XT_MATCH_COMMENT=m +CONFIG_NETFILTER_XT_MATCH_CONNBYTES=m +CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=m +CONFIG_NETFILTER_XT_MATCH_CONNMARK=m +CONFIG_NETFILTER_XT_MATCH_CONNTRACK=m +CONFIG_NETFILTER_XT_MATCH_DCCP=m +CONFIG_NETFILTER_XT_MATCH_ESP=m +CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=m +CONFIG_NETFILTER_XT_MATCH_HELPER=m +CONFIG_NETFILTER_XT_MATCH_IPRANGE=m +CONFIG_NETFILTER_XT_MATCH_LENGTH=m +CONFIG_NETFILTER_XT_MATCH_LIMIT=m +CONFIG_NETFILTER_XT_MATCH_MAC=m +CONFIG_NETFILTER_XT_MATCH_MARK=m +CONFIG_NETFILTER_XT_MATCH_MULTIPORT=m +CONFIG_NETFILTER_XT_MATCH_OWNER=m +CONFIG_NETFILTER_XT_MATCH_POLICY=m +CONFIG_NETFILTER_XT_MATCH_PKTTYPE=m +CONFIG_NETFILTER_XT_MATCH_QUOTA=m +CONFIG_NETFILTER_XT_MATCH_RATEEST=m +CONFIG_NETFILTER_XT_MATCH_REALM=m +CONFIG_NETFILTER_XT_MATCH_RECENT=m +CONFIG_NETFILTER_XT_MATCH_SOCKET=m +CONFIG_NETFILTER_XT_MATCH_STATE=m +CONFIG_NETFILTER_XT_MATCH_STATISTIC=m +CONFIG_NETFILTER_XT_MATCH_STRING=m +CONFIG_NETFILTER_XT_MATCH_TCPMSS=m +CONFIG_NETFILTER_XT_MATCH_TIME=m +CONFIG_NETFILTER_XT_MATCH_U32=m +CONFIG_IP_VS=m +CONFIG_IP_VS_IPV6=y +CONFIG_IP_VS_PROTO_TCP=y +CONFIG_IP_VS_PROTO_UDP=y +CONFIG_IP_VS_PROTO_ESP=y +CONFIG_IP_VS_PROTO_AH=y +CONFIG_IP_VS_RR=m +CONFIG_IP_VS_WRR=m +CONFIG_IP_VS_LC=m +CONFIG_IP_VS_WLC=m +CONFIG_IP_VS_LBLC=m +CONFIG_IP_VS_LBLCR=m +CONFIG_IP_VS_DH=m +CONFIG_IP_VS_SH=m +CONFIG_IP_VS_SED=m +CONFIG_IP_VS_NQ=m +CONFIG_NF_CONNTRACK_IPV4=m +CONFIG_IP_NF_QUEUE=m +CONFIG_IP_NF_IPTABLES=m +CONFIG_IP_NF_MATCH_AH=m +CONFIG_IP_NF_MATCH_ECN=m +CONFIG_IP_NF_MATCH_TTL=m +CONFIG_IP_NF_FILTER=m +CONFIG_IP_NF_TARGET_REJECT=m +CONFIG_IP_NF_TARGET_ULOG=m +CONFIG_IP_NF_MANGLE=m +CONFIG_IP_NF_TARGET_CLUSTERIP=m +CONFIG_IP_NF_TARGET_ECN=m +CONFIG_IP_NF_TARGET_TTL=m +CONFIG_IP_NF_RAW=m +CONFIG_IP_NF_ARPTABLES=m +CONFIG_IP_NF_ARPFILTER=m +CONFIG_IP_NF_ARP_MANGLE=m +CONFIG_NF_CONNTRACK_IPV6=m +CONFIG_IP6_NF_MATCH_AH=m +CONFIG_IP6_NF_MATCH_EUI64=m +CONFIG_IP6_NF_MATCH_FRAG=m +CONFIG_IP6_NF_MATCH_OPTS=m +CONFIG_IP6_NF_MATCH_HL=m +CONFIG_IP6_NF_MATCH_IPV6HEADER=m +CONFIG_IP6_NF_MATCH_MH=m +CONFIG_IP6_NF_MATCH_RT=m +CONFIG_IP6_NF_TARGET_HL=m +CONFIG_IP6_NF_FILTER=m +CONFIG_IP6_NF_TARGET_REJECT=m +CONFIG_IP6_NF_MANGLE=m +CONFIG_IP6_NF_RAW=m +CONFIG_BRIDGE_NF_EBTABLES=m +CONFIG_BRIDGE_EBT_BROUTE=m +CONFIG_BRIDGE_EBT_T_FILTER=m +CONFIG_BRIDGE_EBT_T_NAT=m +CONFIG_BRIDGE_EBT_802_3=m +CONFIG_BRIDGE_EBT_AMONG=m +CONFIG_BRIDGE_EBT_ARP=m +CONFIG_BRIDGE_EBT_IP=m +CONFIG_BRIDGE_EBT_IP6=m +CONFIG_BRIDGE_EBT_LIMIT=m +CONFIG_BRIDGE_EBT_MARK=m +CONFIG_BRIDGE_EBT_PKTTYPE=m +CONFIG_BRIDGE_EBT_STP=m +CONFIG_BRIDGE_EBT_VLAN=m +CONFIG_BRIDGE_EBT_ARPREPLY=m +CONFIG_BRIDGE_EBT_DNAT=m +CONFIG_BRIDGE_EBT_MARK_T=m +CONFIG_BRIDGE_EBT_REDIRECT=m +CONFIG_BRIDGE_EBT_SNAT=m +CONFIG_BRIDGE_EBT_LOG=m +CONFIG_BRIDGE_EBT_ULOG=m +CONFIG_BRIDGE_EBT_NFLOG=m +CONFIG_IP_SCTP=m +CONFIG_BRIDGE=m +CONFIG_VLAN_8021Q=m +CONFIG_VLAN_8021Q_GVRP=y +CONFIG_ATALK=m +CONFIG_DEV_APPLETALK=m +CONFIG_IPDDP=m +CONFIG_IPDDP_ENCAP=y +CONFIG_IPDDP_DECAP=y +CONFIG_PHONET=m +CONFIG_NET_SCHED=y +CONFIG_NET_SCH_CBQ=m +CONFIG_NET_SCH_HTB=m +CONFIG_NET_SCH_HFSC=m +CONFIG_NET_SCH_PRIO=m +CONFIG_NET_SCH_RED=m +CONFIG_NET_SCH_SFQ=m +CONFIG_NET_SCH_TEQL=m +CONFIG_NET_SCH_TBF=m +CONFIG_NET_SCH_GRED=m +CONFIG_NET_SCH_DSMARK=m +CONFIG_NET_SCH_NETEM=m +CONFIG_NET_SCH_INGRESS=m +CONFIG_NET_CLS_BASIC=m +CONFIG_NET_CLS_TCINDEX=m +CONFIG_NET_CLS_ROUTE4=m +CONFIG_NET_CLS_FW=m +CONFIG_NET_CLS_U32=m +CONFIG_NET_CLS_RSVP=m +CONFIG_NET_CLS_RSVP6=m +CONFIG_NET_CLS_FLOW=m +CONFIG_NET_CLS_ACT=y +CONFIG_NET_ACT_POLICE=y +CONFIG_NET_ACT_GACT=m +CONFIG_GACT_PROB=y +CONFIG_NET_ACT_MIRRED=m +CONFIG_NET_ACT_IPT=m +CONFIG_NET_ACT_NAT=m +CONFIG_NET_ACT_PEDIT=m +CONFIG_NET_ACT_SIMP=m +CONFIG_NET_ACT_SKBEDIT=m +CONFIG_NET_CLS_IND=y +CONFIG_CFG80211=m +CONFIG_MAC80211=m +CONFIG_MAC80211_RC_PID=y +CONFIG_MAC80211_RC_DEFAULT_PID=y +CONFIG_MAC80211_MESH=y +CONFIG_RFKILL=m +CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" +CONFIG_CONNECTOR=m +CONFIG_MTD=y +CONFIG_MTD_CHAR=y +CONFIG_MTD_BLOCK=y +CONFIG_MTD_OOPS=m +CONFIG_MTD_CFI=y +CONFIG_MTD_CFI_INTELEXT=y +CONFIG_MTD_CFI_AMDSTD=y +CONFIG_MTD_CFI_STAA=y +CONFIG_MTD_PHYSMAP=y +CONFIG_MTD_UBI=m +CONFIG_MTD_UBI_GLUEBI=m +CONFIG_BLK_DEV_FD=m +CONFIG_BLK_DEV_UMEM=m +CONFIG_BLK_DEV_LOOP=m +CONFIG_BLK_DEV_CRYPTOLOOP=m +CONFIG_BLK_DEV_NBD=m +CONFIG_BLK_DEV_RAM=y +CONFIG_CDROM_PKTCDVD=m +CONFIG_ATA_OVER_ETH=m +CONFIG_VIRTIO_BLK=y +CONFIG_IDE=y +CONFIG_BLK_DEV_IDECD=y +CONFIG_IDE_GENERIC=y +CONFIG_BLK_DEV_GENERIC=y +CONFIG_BLK_DEV_PIIX=y +CONFIG_BLK_DEV_IT8213=m +CONFIG_BLK_DEV_TC86C001=m +CONFIG_RAID_ATTRS=m +CONFIG_SCSI=m +CONFIG_SCSI_TGT=m +CONFIG_BLK_DEV_SD=m +CONFIG_CHR_DEV_ST=m +CONFIG_CHR_DEV_OSST=m +CONFIG_BLK_DEV_SR=m +CONFIG_BLK_DEV_SR_VENDOR=y +CONFIG_CHR_DEV_SG=m +CONFIG_SCSI_MULTI_LUN=y +CONFIG_SCSI_CONSTANTS=y +CONFIG_SCSI_LOGGING=y +CONFIG_SCSI_SCAN_ASYNC=y +CONFIG_SCSI_FC_ATTRS=m +CONFIG_ISCSI_TCP=m +CONFIG_BLK_DEV_3W_XXXX_RAID=m +CONFIG_SCSI_3W_9XXX=m +CONFIG_SCSI_ACARD=m +CONFIG_SCSI_AACRAID=m +CONFIG_SCSI_AIC7XXX=m +CONFIG_AIC7XXX_RESET_DELAY_MS=15000 +# CONFIG_AIC7XXX_DEBUG_ENABLE is not set +CONFIG_MD=y +CONFIG_BLK_DEV_MD=m +CONFIG_MD_LINEAR=m +CONFIG_MD_RAID0=m +CONFIG_MD_RAID1=m +CONFIG_MD_RAID10=m +CONFIG_MD_RAID456=m +CONFIG_MD_MULTIPATH=m +CONFIG_MD_FAULTY=m +CONFIG_BLK_DEV_DM=m +CONFIG_DM_CRYPT=m +CONFIG_DM_SNAPSHOT=m +CONFIG_DM_MIRROR=m +CONFIG_DM_ZERO=m +CONFIG_DM_MULTIPATH=m +CONFIG_NETDEVICES=y +CONFIG_BONDING=m +CONFIG_DUMMY=m +CONFIG_EQUALIZER=m +CONFIG_IFB=m +CONFIG_MACVLAN=m +CONFIG_TUN=m +CONFIG_VETH=m +CONFIG_VIRTIO_NET=y +CONFIG_PCNET32=y +CONFIG_CHELSIO_T3=m +CONFIG_AX88796=m +CONFIG_NETXEN_NIC=m +CONFIG_TC35815=m +CONFIG_MARVELL_PHY=m +CONFIG_DAVICOM_PHY=m +CONFIG_QSEMI_PHY=m +CONFIG_LXT_PHY=m +CONFIG_CICADA_PHY=m +CONFIG_VITESSE_PHY=m +CONFIG_SMSC_PHY=m +CONFIG_BROADCOM_PHY=m +CONFIG_ICPLUS_PHY=m +CONFIG_REALTEK_PHY=m +CONFIG_ATMEL=m +CONFIG_PCI_ATMEL=m +CONFIG_PRISM54=m +CONFIG_HOSTAP=m +CONFIG_HOSTAP_FIRMWARE=y +CONFIG_HOSTAP_FIRMWARE_NVRAM=y +CONFIG_HOSTAP_PLX=m +CONFIG_HOSTAP_PCI=m +CONFIG_IPW2100=m +CONFIG_IPW2100_MONITOR=y +CONFIG_LIBERTAS=m +# CONFIG_INPUT_KEYBOARD is not set +# CONFIG_INPUT_MOUSE is not set +# CONFIG_SERIO_I8042 is not set +CONFIG_VT_HW_CONSOLE_BINDING=y +CONFIG_SERIAL_8250=y +CONFIG_SERIAL_8250_CONSOLE=y +# CONFIG_HWMON is not set +CONFIG_FB=y +CONFIG_FB_CIRRUS=y +# CONFIG_VGA_CONSOLE is not set +CONFIG_FRAMEBUFFER_CONSOLE=y +CONFIG_HID=m +CONFIG_RTC_CLASS=y +CONFIG_RTC_DRV_CMOS=y +CONFIG_UIO=m +CONFIG_UIO_CIF=m +CONFIG_VIRTIO_PCI=y +CONFIG_VIRTIO_BALLOON=y +CONFIG_VIRTIO_MMIO=y +CONFIG_EXT2_FS=y +CONFIG_EXT3_FS=y +CONFIG_REISERFS_FS=m +CONFIG_REISERFS_PROC_INFO=y +CONFIG_REISERFS_FS_XATTR=y +CONFIG_REISERFS_FS_POSIX_ACL=y +CONFIG_REISERFS_FS_SECURITY=y +CONFIG_JFS_FS=m +CONFIG_JFS_POSIX_ACL=y +CONFIG_JFS_SECURITY=y +CONFIG_XFS_FS=m +CONFIG_XFS_QUOTA=y +CONFIG_XFS_POSIX_ACL=y +CONFIG_QUOTA=y +CONFIG_QFMT_V2=y +CONFIG_FUSE_FS=m +CONFIG_ISO9660_FS=m +CONFIG_JOLIET=y +CONFIG_ZISOFS=y +CONFIG_UDF_FS=m +CONFIG_MSDOS_FS=m +CONFIG_VFAT_FS=m +CONFIG_PROC_KCORE=y +CONFIG_TMPFS=y +CONFIG_AFFS_FS=m +CONFIG_HFS_FS=m +CONFIG_HFSPLUS_FS=m +CONFIG_BEFS_FS=m +CONFIG_BFS_FS=m +CONFIG_EFS_FS=m +CONFIG_JFFS2_FS=m +CONFIG_JFFS2_FS_XATTR=y +CONFIG_JFFS2_COMPRESSION_OPTIONS=y +CONFIG_JFFS2_RUBIN=y +CONFIG_CRAMFS=m +CONFIG_VXFS_FS=m +CONFIG_MINIX_FS=m +CONFIG_ROMFS_FS=m +CONFIG_SYSV_FS=m +CONFIG_UFS_FS=m +CONFIG_NFS_FS=y +CONFIG_ROOT_NFS=y +CONFIG_NFSD=y +CONFIG_NFSD_V3=y +CONFIG_NLS_CODEPAGE_437=m +CONFIG_NLS_CODEPAGE_737=m +CONFIG_NLS_CODEPAGE_775=m +CONFIG_NLS_CODEPAGE_850=m +CONFIG_NLS_CODEPAGE_852=m +CONFIG_NLS_CODEPAGE_855=m +CONFIG_NLS_CODEPAGE_857=m +CONFIG_NLS_CODEPAGE_860=m +CONFIG_NLS_CODEPAGE_861=m +CONFIG_NLS_CODEPAGE_862=m +CONFIG_NLS_CODEPAGE_863=m +CONFIG_NLS_CODEPAGE_864=m +CONFIG_NLS_CODEPAGE_865=m +CONFIG_NLS_CODEPAGE_866=m +CONFIG_NLS_CODEPAGE_869=m +CONFIG_NLS_CODEPAGE_936=m +CONFIG_NLS_CODEPAGE_950=m +CONFIG_NLS_CODEPAGE_932=m +CONFIG_NLS_CODEPAGE_949=m +CONFIG_NLS_CODEPAGE_874=m +CONFIG_NLS_ISO8859_8=m +CONFIG_NLS_CODEPAGE_1250=m +CONFIG_NLS_CODEPAGE_1251=m +CONFIG_NLS_ASCII=m +CONFIG_NLS_ISO8859_1=m +CONFIG_NLS_ISO8859_2=m +CONFIG_NLS_ISO8859_3=m +CONFIG_NLS_ISO8859_4=m +CONFIG_NLS_ISO8859_5=m +CONFIG_NLS_ISO8859_6=m +CONFIG_NLS_ISO8859_7=m +CONFIG_NLS_ISO8859_9=m +CONFIG_NLS_ISO8859_13=m +CONFIG_NLS_ISO8859_14=m +CONFIG_NLS_ISO8859_15=m +CONFIG_NLS_KOI8_R=m +CONFIG_NLS_KOI8_U=m +CONFIG_CRYPTO_NULL=m +CONFIG_CRYPTO_CRYPTD=m +CONFIG_CRYPTO_LRW=m +CONFIG_CRYPTO_PCBC=m +CONFIG_CRYPTO_HMAC=y +CONFIG_CRYPTO_XCBC=m +CONFIG_CRYPTO_MD4=m +CONFIG_CRYPTO_SHA256=m +CONFIG_CRYPTO_SHA512=m +CONFIG_CRYPTO_TGR192=m +CONFIG_CRYPTO_WP512=m +CONFIG_CRYPTO_ANUBIS=m +CONFIG_CRYPTO_BLOWFISH=m +CONFIG_CRYPTO_CAMELLIA=m +CONFIG_CRYPTO_CAST5=m +CONFIG_CRYPTO_CAST6=m +CONFIG_CRYPTO_FCRYPT=m +CONFIG_CRYPTO_KHAZAD=m +CONFIG_CRYPTO_SERPENT=m +CONFIG_CRYPTO_TEA=m +CONFIG_CRYPTO_TWOFISH=m +# CONFIG_CRYPTO_ANSI_CPRNG is not set +CONFIG_CRC16=m diff --git a/trunk/arch/mips/configs/maltaaprp_defconfig b/trunk/arch/mips/configs/maltaaprp_defconfig new file mode 100644 index 000000000000..93057a760dfa --- /dev/null +++ b/trunk/arch/mips/configs/maltaaprp_defconfig @@ -0,0 +1,195 @@ +CONFIG_MIPS_MALTA=y +CONFIG_CPU_LITTLE_ENDIAN=y +CONFIG_CPU_MIPS32_R2=y +CONFIG_MIPS_VPE_LOADER=y +CONFIG_MIPS_VPE_APSP_API=y +CONFIG_HZ_100=y +CONFIG_LOCALVERSION="aprp" +CONFIG_SYSVIPC=y +CONFIG_POSIX_MQUEUE=y +CONFIG_AUDIT=y +CONFIG_IKCONFIG=y +CONFIG_IKCONFIG_PROC=y +CONFIG_LOG_BUF_SHIFT=15 +CONFIG_SYSCTL_SYSCALL=y +CONFIG_EMBEDDED=y +CONFIG_SLAB=y +CONFIG_MODULES=y +CONFIG_MODULE_UNLOAD=y +CONFIG_MODVERSIONS=y +CONFIG_MODULE_SRCVERSION_ALL=y +# CONFIG_BLK_DEV_BSG is not set +CONFIG_PCI=y +# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set +CONFIG_NET=y +CONFIG_PACKET=y +CONFIG_UNIX=y +CONFIG_XFRM_USER=m +CONFIG_NET_KEY=y +CONFIG_INET=y +CONFIG_IP_MULTICAST=y +CONFIG_IP_ADVANCED_ROUTER=y +CONFIG_IP_MULTIPLE_TABLES=y +CONFIG_IP_ROUTE_MULTIPATH=y +CONFIG_IP_ROUTE_VERBOSE=y +CONFIG_IP_PNP=y +CONFIG_IP_PNP_DHCP=y +CONFIG_IP_PNP_BOOTP=y +CONFIG_NET_IPIP=m +CONFIG_IP_MROUTE=y +CONFIG_IP_PIMSM_V1=y +CONFIG_IP_PIMSM_V2=y +CONFIG_SYN_COOKIES=y +CONFIG_INET_AH=m +CONFIG_INET_ESP=m +CONFIG_INET_IPCOMP=m +# CONFIG_INET_LRO is not set +CONFIG_IPV6_PRIVACY=y +CONFIG_INET6_AH=m +CONFIG_INET6_ESP=m +CONFIG_INET6_IPCOMP=m +CONFIG_IPV6_TUNNEL=m +CONFIG_BRIDGE=m +CONFIG_VLAN_8021Q=m +CONFIG_ATALK=m +CONFIG_DEV_APPLETALK=m +CONFIG_IPDDP=m +CONFIG_IPDDP_ENCAP=y +CONFIG_IPDDP_DECAP=y +CONFIG_NET_SCHED=y +CONFIG_NET_SCH_CBQ=m +CONFIG_NET_SCH_HTB=m +CONFIG_NET_SCH_HFSC=m +CONFIG_NET_SCH_PRIO=m +CONFIG_NET_SCH_RED=m +CONFIG_NET_SCH_SFQ=m +CONFIG_NET_SCH_TEQL=m +CONFIG_NET_SCH_TBF=m +CONFIG_NET_SCH_GRED=m +CONFIG_NET_SCH_DSMARK=m +CONFIG_NET_SCH_NETEM=m +CONFIG_NET_SCH_INGRESS=m +CONFIG_NET_CLS_BASIC=m +CONFIG_NET_CLS_TCINDEX=m +CONFIG_NET_CLS_ROUTE4=m +CONFIG_NET_CLS_FW=m +CONFIG_NET_CLS_U32=m +CONFIG_NET_CLS_RSVP=m +CONFIG_NET_CLS_RSVP6=m +CONFIG_NET_CLS_ACT=y +CONFIG_NET_ACT_POLICE=y +CONFIG_NET_CLS_IND=y +# CONFIG_WIRELESS is not set +CONFIG_BLK_DEV_LOOP=y +CONFIG_BLK_DEV_CRYPTOLOOP=m +CONFIG_IDE=y +# CONFIG_IDE_PROC_FS is not set +# CONFIG_IDEPCI_PCIBUS_ORDER is not set +CONFIG_BLK_DEV_GENERIC=y +CONFIG_BLK_DEV_PIIX=y +CONFIG_SCSI=y +CONFIG_BLK_DEV_SD=y +CONFIG_CHR_DEV_SG=y +# CONFIG_SCSI_LOWLEVEL is not set +CONFIG_NETDEVICES=y +# CONFIG_NET_VENDOR_3COM is not set +# CONFIG_NET_VENDOR_ADAPTEC is not set +# CONFIG_NET_VENDOR_ALTEON is not set +CONFIG_PCNET32=y +# CONFIG_NET_VENDOR_ATHEROS is not set +# CONFIG_NET_VENDOR_BROADCOM is not set +# CONFIG_NET_VENDOR_BROCADE is not set +# CONFIG_NET_VENDOR_CHELSIO is not set +# CONFIG_NET_VENDOR_CISCO is not set +# CONFIG_NET_VENDOR_DEC is not set +# CONFIG_NET_VENDOR_DLINK is not set +# CONFIG_NET_VENDOR_EMULEX is not set +# CONFIG_NET_VENDOR_EXAR is not set +# CONFIG_NET_VENDOR_HP is not set +# CONFIG_NET_VENDOR_INTEL is not set +# CONFIG_NET_VENDOR_MARVELL is not set +# CONFIG_NET_VENDOR_MELLANOX is not set +# CONFIG_NET_VENDOR_MICREL is not set +# CONFIG_NET_VENDOR_MYRI is not set +# CONFIG_NET_VENDOR_NATSEMI is not set +# CONFIG_NET_VENDOR_NVIDIA is not set +# CONFIG_NET_VENDOR_OKI is not set +# CONFIG_NET_PACKET_ENGINE is not set +# CONFIG_NET_VENDOR_QLOGIC is not set +# CONFIG_NET_VENDOR_REALTEK is not set +# CONFIG_NET_VENDOR_RDC is not set +# CONFIG_NET_VENDOR_SEEQ is not set +# CONFIG_NET_VENDOR_SILAN is not set +# CONFIG_NET_VENDOR_SIS is not set +# CONFIG_NET_VENDOR_SMSC is not set +# CONFIG_NET_VENDOR_STMICRO is not set +# CONFIG_NET_VENDOR_SUN is not set +# CONFIG_NET_VENDOR_TEHUTI is not set +# CONFIG_NET_VENDOR_TI is not set +# CONFIG_NET_VENDOR_TOSHIBA is not set +# CONFIG_NET_VENDOR_VIA is not set +# CONFIG_WLAN is not set +# CONFIG_VT is not set +CONFIG_LEGACY_PTY_COUNT=16 +CONFIG_SERIAL_8250=y +CONFIG_SERIAL_8250_CONSOLE=y +CONFIG_HW_RANDOM=y +# CONFIG_HWMON is not set +CONFIG_VIDEO_OUTPUT_CONTROL=m +CONFIG_FB=y +CONFIG_FIRMWARE_EDID=y +CONFIG_FB_MATROX=y +CONFIG_FB_MATROX_G=y +CONFIG_USB=y +CONFIG_USB_EHCI_HCD=y +# CONFIG_USB_EHCI_TT_NEWSCHED is not set +CONFIG_USB_UHCI_HCD=y +CONFIG_USB_STORAGE=y +CONFIG_NEW_LEDS=y +CONFIG_LEDS_CLASS=y +CONFIG_LEDS_TRIGGERS=y +CONFIG_LEDS_TRIGGER_TIMER=y +CONFIG_LEDS_TRIGGER_IDE_DISK=y +CONFIG_LEDS_TRIGGER_HEARTBEAT=y +CONFIG_LEDS_TRIGGER_BACKLIGHT=y +CONFIG_LEDS_TRIGGER_DEFAULT_ON=y +CONFIG_RTC_CLASS=y +CONFIG_RTC_DRV_CMOS=y +CONFIG_EXT2_FS=y +CONFIG_EXT3_FS=y +# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set +CONFIG_XFS_FS=y +CONFIG_XFS_QUOTA=y +CONFIG_XFS_POSIX_ACL=y +CONFIG_QUOTA=y +CONFIG_QFMT_V2=y +CONFIG_MSDOS_FS=m +CONFIG_VFAT_FS=m +CONFIG_PROC_KCORE=y +CONFIG_TMPFS=y +CONFIG_NFS_FS=y +CONFIG_ROOT_NFS=y +CONFIG_CIFS=m +CONFIG_CIFS_WEAK_PW_HASH=y +CONFIG_CIFS_XATTR=y +CONFIG_CIFS_POSIX=y +CONFIG_NLS_CODEPAGE_437=m +CONFIG_NLS_ISO8859_1=m +# CONFIG_FTRACE is not set +CONFIG_CRYPTO_NULL=m +CONFIG_CRYPTO_PCBC=m +CONFIG_CRYPTO_HMAC=y +CONFIG_CRYPTO_MICHAEL_MIC=m +CONFIG_CRYPTO_SHA512=m +CONFIG_CRYPTO_TGR192=m +CONFIG_CRYPTO_WP512=m +CONFIG_CRYPTO_ANUBIS=m +CONFIG_CRYPTO_BLOWFISH=m +CONFIG_CRYPTO_CAST5=m +CONFIG_CRYPTO_CAST6=m +CONFIG_CRYPTO_KHAZAD=m +CONFIG_CRYPTO_SERPENT=m +CONFIG_CRYPTO_TEA=m +CONFIG_CRYPTO_TWOFISH=m +# CONFIG_CRYPTO_ANSI_CPRNG is not set +# CONFIG_CRYPTO_HW is not set diff --git a/trunk/arch/mips/configs/maltasmtc_defconfig b/trunk/arch/mips/configs/maltasmtc_defconfig new file mode 100644 index 000000000000..4e54b75d89be --- /dev/null +++ b/trunk/arch/mips/configs/maltasmtc_defconfig @@ -0,0 +1,196 @@ +CONFIG_MIPS_MALTA=y +CONFIG_CPU_LITTLE_ENDIAN=y +CONFIG_CPU_MIPS32_R2=y +CONFIG_MIPS_MT_SMTC=y +# CONFIG_MIPS_MT_FPAFF is not set +CONFIG_NR_CPUS=9 +CONFIG_HZ_48=y +CONFIG_LOCALVERSION="smtc" +CONFIG_SYSVIPC=y +CONFIG_POSIX_MQUEUE=y +CONFIG_AUDIT=y +CONFIG_IKCONFIG=y +CONFIG_IKCONFIG_PROC=y +CONFIG_LOG_BUF_SHIFT=15 +CONFIG_SYSCTL_SYSCALL=y +CONFIG_EMBEDDED=y +CONFIG_SLAB=y +CONFIG_MODULES=y +CONFIG_MODULE_UNLOAD=y +CONFIG_MODVERSIONS=y +CONFIG_MODULE_SRCVERSION_ALL=y +# CONFIG_BLK_DEV_BSG is not set +CONFIG_PCI=y +# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set +CONFIG_NET=y +CONFIG_PACKET=y +CONFIG_UNIX=y +CONFIG_XFRM_USER=m +CONFIG_NET_KEY=y +CONFIG_INET=y +CONFIG_IP_MULTICAST=y +CONFIG_IP_ADVANCED_ROUTER=y +CONFIG_IP_MULTIPLE_TABLES=y +CONFIG_IP_ROUTE_MULTIPATH=y +CONFIG_IP_ROUTE_VERBOSE=y +CONFIG_IP_PNP=y +CONFIG_IP_PNP_DHCP=y +CONFIG_IP_PNP_BOOTP=y +CONFIG_NET_IPIP=m +CONFIG_IP_MROUTE=y +CONFIG_IP_PIMSM_V1=y +CONFIG_IP_PIMSM_V2=y +CONFIG_SYN_COOKIES=y +CONFIG_INET_AH=m +CONFIG_INET_ESP=m +CONFIG_INET_IPCOMP=m +# CONFIG_INET_LRO is not set +CONFIG_IPV6_PRIVACY=y +CONFIG_INET6_AH=m +CONFIG_INET6_ESP=m +CONFIG_INET6_IPCOMP=m +CONFIG_IPV6_TUNNEL=m +CONFIG_BRIDGE=m +CONFIG_VLAN_8021Q=m +CONFIG_ATALK=m +CONFIG_DEV_APPLETALK=m +CONFIG_IPDDP=m +CONFIG_IPDDP_ENCAP=y +CONFIG_IPDDP_DECAP=y +CONFIG_NET_SCHED=y +CONFIG_NET_SCH_CBQ=m +CONFIG_NET_SCH_HTB=m +CONFIG_NET_SCH_HFSC=m +CONFIG_NET_SCH_PRIO=m +CONFIG_NET_SCH_RED=m +CONFIG_NET_SCH_SFQ=m +CONFIG_NET_SCH_TEQL=m +CONFIG_NET_SCH_TBF=m +CONFIG_NET_SCH_GRED=m +CONFIG_NET_SCH_DSMARK=m +CONFIG_NET_SCH_NETEM=m +CONFIG_NET_SCH_INGRESS=m +CONFIG_NET_CLS_BASIC=m +CONFIG_NET_CLS_TCINDEX=m +CONFIG_NET_CLS_ROUTE4=m +CONFIG_NET_CLS_FW=m +CONFIG_NET_CLS_U32=m +CONFIG_NET_CLS_RSVP=m +CONFIG_NET_CLS_RSVP6=m +CONFIG_NET_CLS_ACT=y +CONFIG_NET_ACT_POLICE=y +CONFIG_NET_CLS_IND=y +# CONFIG_WIRELESS is not set +CONFIG_BLK_DEV_LOOP=y +CONFIG_BLK_DEV_CRYPTOLOOP=m +CONFIG_IDE=y +# CONFIG_IDE_PROC_FS is not set +# CONFIG_IDEPCI_PCIBUS_ORDER is not set +CONFIG_BLK_DEV_GENERIC=y +CONFIG_BLK_DEV_PIIX=y +CONFIG_SCSI=y +CONFIG_BLK_DEV_SD=y +CONFIG_CHR_DEV_SG=y +# CONFIG_SCSI_LOWLEVEL is not set +CONFIG_NETDEVICES=y +# CONFIG_NET_VENDOR_3COM is not set +# CONFIG_NET_VENDOR_ADAPTEC is not set +# CONFIG_NET_VENDOR_ALTEON is not set +CONFIG_PCNET32=y +# CONFIG_NET_VENDOR_ATHEROS is not set +# CONFIG_NET_VENDOR_BROADCOM is not set +# CONFIG_NET_VENDOR_BROCADE is not set +# CONFIG_NET_VENDOR_CHELSIO is not set +# CONFIG_NET_VENDOR_CISCO is not set +# CONFIG_NET_VENDOR_DEC is not set +# CONFIG_NET_VENDOR_DLINK is not set +# CONFIG_NET_VENDOR_EMULEX is not set +# CONFIG_NET_VENDOR_EXAR is not set +# CONFIG_NET_VENDOR_HP is not set +# CONFIG_NET_VENDOR_INTEL is not set +# CONFIG_NET_VENDOR_MARVELL is not set +# CONFIG_NET_VENDOR_MELLANOX is not set +# CONFIG_NET_VENDOR_MICREL is not set +# CONFIG_NET_VENDOR_MYRI is not set +# CONFIG_NET_VENDOR_NATSEMI is not set +# CONFIG_NET_VENDOR_NVIDIA is not set +# CONFIG_NET_VENDOR_OKI is not set +# CONFIG_NET_PACKET_ENGINE is not set +# CONFIG_NET_VENDOR_QLOGIC is not set +# CONFIG_NET_VENDOR_REALTEK is not set +# CONFIG_NET_VENDOR_RDC is not set +# CONFIG_NET_VENDOR_SEEQ is not set +# CONFIG_NET_VENDOR_SILAN is not set +# CONFIG_NET_VENDOR_SIS is not set +# CONFIG_NET_VENDOR_SMSC is not set +# CONFIG_NET_VENDOR_STMICRO is not set +# CONFIG_NET_VENDOR_SUN is not set +# CONFIG_NET_VENDOR_TEHUTI is not set +# CONFIG_NET_VENDOR_TI is not set +# CONFIG_NET_VENDOR_TOSHIBA is not set +# CONFIG_NET_VENDOR_VIA is not set +# CONFIG_WLAN is not set +# CONFIG_VT is not set +CONFIG_LEGACY_PTY_COUNT=16 +CONFIG_SERIAL_8250=y +CONFIG_SERIAL_8250_CONSOLE=y +CONFIG_HW_RANDOM=y +# CONFIG_HWMON is not set +CONFIG_VIDEO_OUTPUT_CONTROL=m +CONFIG_FB=y +CONFIG_FIRMWARE_EDID=y +CONFIG_FB_MATROX=y +CONFIG_FB_MATROX_G=y +CONFIG_USB=y +CONFIG_USB_EHCI_HCD=y +# CONFIG_USB_EHCI_TT_NEWSCHED is not set +CONFIG_USB_UHCI_HCD=y +CONFIG_USB_STORAGE=y +CONFIG_NEW_LEDS=y +CONFIG_LEDS_CLASS=y +CONFIG_LEDS_TRIGGERS=y +CONFIG_LEDS_TRIGGER_TIMER=y +CONFIG_LEDS_TRIGGER_IDE_DISK=y +CONFIG_LEDS_TRIGGER_HEARTBEAT=y +CONFIG_LEDS_TRIGGER_BACKLIGHT=y +CONFIG_LEDS_TRIGGER_DEFAULT_ON=y +CONFIG_RTC_CLASS=y +CONFIG_RTC_DRV_CMOS=y +CONFIG_EXT2_FS=y +CONFIG_EXT3_FS=y +# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set +CONFIG_XFS_FS=y +CONFIG_XFS_QUOTA=y +CONFIG_XFS_POSIX_ACL=y +CONFIG_QUOTA=y +CONFIG_QFMT_V2=y +CONFIG_MSDOS_FS=m +CONFIG_VFAT_FS=m +CONFIG_PROC_KCORE=y +CONFIG_TMPFS=y +CONFIG_NFS_FS=y +CONFIG_ROOT_NFS=y +CONFIG_CIFS=m +CONFIG_CIFS_WEAK_PW_HASH=y +CONFIG_CIFS_XATTR=y +CONFIG_CIFS_POSIX=y +CONFIG_NLS_CODEPAGE_437=m +CONFIG_NLS_ISO8859_1=m +# CONFIG_FTRACE is not set +CONFIG_CRYPTO_NULL=m +CONFIG_CRYPTO_PCBC=m +CONFIG_CRYPTO_HMAC=y +CONFIG_CRYPTO_MICHAEL_MIC=m +CONFIG_CRYPTO_SHA512=m +CONFIG_CRYPTO_TGR192=m +CONFIG_CRYPTO_WP512=m +CONFIG_CRYPTO_ANUBIS=m +CONFIG_CRYPTO_BLOWFISH=m +CONFIG_CRYPTO_CAST5=m +CONFIG_CRYPTO_CAST6=m +CONFIG_CRYPTO_KHAZAD=m +CONFIG_CRYPTO_SERPENT=m +CONFIG_CRYPTO_TEA=m +CONFIG_CRYPTO_TWOFISH=m +# CONFIG_CRYPTO_ANSI_CPRNG is not set +# CONFIG_CRYPTO_HW is not set diff --git a/trunk/arch/mips/configs/maltasmvp_defconfig b/trunk/arch/mips/configs/maltasmvp_defconfig new file mode 100644 index 000000000000..8a666021b870 --- /dev/null +++ b/trunk/arch/mips/configs/maltasmvp_defconfig @@ -0,0 +1,199 @@ +CONFIG_MIPS_MALTA=y +CONFIG_CPU_LITTLE_ENDIAN=y +CONFIG_CPU_MIPS32_R2=y +CONFIG_MIPS_MT_SMP=y +CONFIG_SCHED_SMT=y +CONFIG_MIPS_CMP=y +CONFIG_NR_CPUS=8 +CONFIG_HZ_100=y +CONFIG_LOCALVERSION="cmp" +CONFIG_SYSVIPC=y +CONFIG_POSIX_MQUEUE=y +CONFIG_AUDIT=y +CONFIG_NO_HZ=y +CONFIG_IKCONFIG=y +CONFIG_IKCONFIG_PROC=y +CONFIG_LOG_BUF_SHIFT=15 +CONFIG_SYSCTL_SYSCALL=y +CONFIG_EMBEDDED=y +CONFIG_SLAB=y +CONFIG_MODULES=y +CONFIG_MODULE_UNLOAD=y +CONFIG_MODVERSIONS=y +CONFIG_MODULE_SRCVERSION_ALL=y +# CONFIG_BLK_DEV_BSG is not set +CONFIG_PCI=y +# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set +CONFIG_NET=y +CONFIG_PACKET=y +CONFIG_UNIX=y +CONFIG_XFRM_USER=m +CONFIG_NET_KEY=y +CONFIG_INET=y +CONFIG_IP_MULTICAST=y +CONFIG_IP_ADVANCED_ROUTER=y +CONFIG_IP_MULTIPLE_TABLES=y +CONFIG_IP_ROUTE_MULTIPATH=y +CONFIG_IP_ROUTE_VERBOSE=y +CONFIG_IP_PNP=y +CONFIG_IP_PNP_DHCP=y +CONFIG_IP_PNP_BOOTP=y +CONFIG_NET_IPIP=m +CONFIG_IP_MROUTE=y +CONFIG_IP_PIMSM_V1=y +CONFIG_IP_PIMSM_V2=y +CONFIG_SYN_COOKIES=y +CONFIG_INET_AH=m +CONFIG_INET_ESP=m +CONFIG_INET_IPCOMP=m +# CONFIG_INET_LRO is not set +CONFIG_IPV6_PRIVACY=y +CONFIG_INET6_AH=m +CONFIG_INET6_ESP=m +CONFIG_INET6_IPCOMP=m +CONFIG_IPV6_TUNNEL=m +CONFIG_BRIDGE=m +CONFIG_VLAN_8021Q=m +CONFIG_ATALK=m +CONFIG_DEV_APPLETALK=m +CONFIG_IPDDP=m +CONFIG_IPDDP_ENCAP=y +CONFIG_IPDDP_DECAP=y +CONFIG_NET_SCHED=y +CONFIG_NET_SCH_CBQ=m +CONFIG_NET_SCH_HTB=m +CONFIG_NET_SCH_HFSC=m +CONFIG_NET_SCH_PRIO=m +CONFIG_NET_SCH_RED=m +CONFIG_NET_SCH_SFQ=m +CONFIG_NET_SCH_TEQL=m +CONFIG_NET_SCH_TBF=m +CONFIG_NET_SCH_GRED=m +CONFIG_NET_SCH_DSMARK=m +CONFIG_NET_SCH_NETEM=m +CONFIG_NET_SCH_INGRESS=m +CONFIG_NET_CLS_BASIC=m +CONFIG_NET_CLS_TCINDEX=m +CONFIG_NET_CLS_ROUTE4=m +CONFIG_NET_CLS_FW=m +CONFIG_NET_CLS_U32=m +CONFIG_NET_CLS_RSVP=m +CONFIG_NET_CLS_RSVP6=m +CONFIG_NET_CLS_ACT=y +CONFIG_NET_ACT_POLICE=y +CONFIG_NET_CLS_IND=y +# CONFIG_WIRELESS is not set +CONFIG_BLK_DEV_LOOP=y +CONFIG_BLK_DEV_CRYPTOLOOP=m +CONFIG_IDE=y +# CONFIG_IDE_PROC_FS is not set +# CONFIG_IDEPCI_PCIBUS_ORDER is not set +CONFIG_BLK_DEV_GENERIC=y +CONFIG_BLK_DEV_PIIX=y +CONFIG_SCSI=y +CONFIG_BLK_DEV_SD=y +CONFIG_CHR_DEV_SG=y +# CONFIG_SCSI_LOWLEVEL is not set +CONFIG_NETDEVICES=y +# CONFIG_NET_VENDOR_3COM is not set +# CONFIG_NET_VENDOR_ADAPTEC is not set +# CONFIG_NET_VENDOR_ALTEON is not set +CONFIG_PCNET32=y +# CONFIG_NET_VENDOR_ATHEROS is not set +# CONFIG_NET_VENDOR_BROADCOM is not set +# CONFIG_NET_VENDOR_BROCADE is not set +# CONFIG_NET_VENDOR_CHELSIO is not set +# CONFIG_NET_VENDOR_CISCO is not set +# CONFIG_NET_VENDOR_DEC is not set +# CONFIG_NET_VENDOR_DLINK is not set +# CONFIG_NET_VENDOR_EMULEX is not set +# CONFIG_NET_VENDOR_EXAR is not set +# CONFIG_NET_VENDOR_HP is not set +# CONFIG_NET_VENDOR_INTEL is not set +# CONFIG_NET_VENDOR_MARVELL is not set +# CONFIG_NET_VENDOR_MELLANOX is not set +# CONFIG_NET_VENDOR_MICREL is not set +# CONFIG_NET_VENDOR_MYRI is not set +# CONFIG_NET_VENDOR_NATSEMI is not set +# CONFIG_NET_VENDOR_NVIDIA is not set +# CONFIG_NET_VENDOR_OKI is not set +# CONFIG_NET_PACKET_ENGINE is not set +# CONFIG_NET_VENDOR_QLOGIC is not set +# CONFIG_NET_VENDOR_REALTEK is not set +# CONFIG_NET_VENDOR_RDC is not set +# CONFIG_NET_VENDOR_SEEQ is not set +# CONFIG_NET_VENDOR_SILAN is not set +# CONFIG_NET_VENDOR_SIS is not set +# CONFIG_NET_VENDOR_SMSC is not set +# CONFIG_NET_VENDOR_STMICRO is not set +# CONFIG_NET_VENDOR_SUN is not set +# CONFIG_NET_VENDOR_TEHUTI is not set +# CONFIG_NET_VENDOR_TI is not set +# CONFIG_NET_VENDOR_TOSHIBA is not set +# CONFIG_NET_VENDOR_VIA is not set +# CONFIG_NET_VENDOR_WIZNET is not set +# CONFIG_WLAN is not set +# CONFIG_VT is not set +CONFIG_LEGACY_PTY_COUNT=4 +CONFIG_SERIAL_8250=y +CONFIG_SERIAL_8250_CONSOLE=y +CONFIG_HW_RANDOM=y +# CONFIG_HWMON is not set +CONFIG_VIDEO_OUTPUT_CONTROL=m +CONFIG_FB=y +CONFIG_FIRMWARE_EDID=y +CONFIG_FB_MATROX=y +CONFIG_FB_MATROX_G=y +CONFIG_USB=y +CONFIG_USB_EHCI_HCD=y +# CONFIG_USB_EHCI_TT_NEWSCHED is not set +CONFIG_USB_UHCI_HCD=y +CONFIG_USB_STORAGE=y +CONFIG_NEW_LEDS=y +CONFIG_LEDS_CLASS=y +CONFIG_LEDS_TRIGGERS=y +CONFIG_LEDS_TRIGGER_TIMER=y +CONFIG_LEDS_TRIGGER_IDE_DISK=y +CONFIG_LEDS_TRIGGER_HEARTBEAT=y +CONFIG_LEDS_TRIGGER_BACKLIGHT=y +CONFIG_LEDS_TRIGGER_DEFAULT_ON=y +CONFIG_RTC_CLASS=y +CONFIG_RTC_DRV_CMOS=y +CONFIG_EXT2_FS=y +CONFIG_EXT3_FS=y +# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set +CONFIG_XFS_FS=y +CONFIG_XFS_QUOTA=y +CONFIG_XFS_POSIX_ACL=y +CONFIG_QUOTA=y +CONFIG_QFMT_V2=y +CONFIG_MSDOS_FS=m +CONFIG_VFAT_FS=m +CONFIG_PROC_KCORE=y +CONFIG_TMPFS=y +CONFIG_NFS_FS=y +CONFIG_ROOT_NFS=y +CONFIG_CIFS=m +CONFIG_CIFS_WEAK_PW_HASH=y +CONFIG_CIFS_XATTR=y +CONFIG_CIFS_POSIX=y +CONFIG_NLS_CODEPAGE_437=m +CONFIG_NLS_ISO8859_1=m +# CONFIG_FTRACE is not set +CONFIG_CRYPTO_NULL=m +CONFIG_CRYPTO_PCBC=m +CONFIG_CRYPTO_HMAC=y +CONFIG_CRYPTO_MICHAEL_MIC=m +CONFIG_CRYPTO_SHA512=m +CONFIG_CRYPTO_TGR192=m +CONFIG_CRYPTO_WP512=m +CONFIG_CRYPTO_ANUBIS=m +CONFIG_CRYPTO_BLOWFISH=m +CONFIG_CRYPTO_CAST5=m +CONFIG_CRYPTO_CAST6=m +CONFIG_CRYPTO_KHAZAD=m +CONFIG_CRYPTO_SERPENT=m +CONFIG_CRYPTO_TEA=m +CONFIG_CRYPTO_TWOFISH=m +# CONFIG_CRYPTO_ANSI_CPRNG is not set +# CONFIG_CRYPTO_HW is not set diff --git a/trunk/arch/mips/configs/maltaup_defconfig b/trunk/arch/mips/configs/maltaup_defconfig new file mode 100644 index 000000000000..9868fc9c1133 --- /dev/null +++ b/trunk/arch/mips/configs/maltaup_defconfig @@ -0,0 +1,194 @@ +CONFIG_MIPS_MALTA=y +CONFIG_CPU_LITTLE_ENDIAN=y +CONFIG_CPU_MIPS32_R2=y +CONFIG_HZ_100=y +CONFIG_LOCALVERSION="up" +CONFIG_SYSVIPC=y +CONFIG_POSIX_MQUEUE=y +CONFIG_AUDIT=y +CONFIG_NO_HZ=y +CONFIG_IKCONFIG=y +CONFIG_IKCONFIG_PROC=y +CONFIG_LOG_BUF_SHIFT=15 +CONFIG_SYSCTL_SYSCALL=y +CONFIG_EMBEDDED=y +CONFIG_SLAB=y +CONFIG_MODULES=y +CONFIG_MODULE_UNLOAD=y +CONFIG_MODVERSIONS=y +CONFIG_MODULE_SRCVERSION_ALL=y +# CONFIG_BLK_DEV_BSG is not set +CONFIG_PCI=y +# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set +CONFIG_NET=y +CONFIG_PACKET=y +CONFIG_UNIX=y +CONFIG_XFRM_USER=m +CONFIG_NET_KEY=y +CONFIG_INET=y +CONFIG_IP_MULTICAST=y +CONFIG_IP_ADVANCED_ROUTER=y +CONFIG_IP_MULTIPLE_TABLES=y +CONFIG_IP_ROUTE_MULTIPATH=y +CONFIG_IP_ROUTE_VERBOSE=y +CONFIG_IP_PNP=y +CONFIG_IP_PNP_DHCP=y +CONFIG_IP_PNP_BOOTP=y +CONFIG_NET_IPIP=m +CONFIG_IP_MROUTE=y +CONFIG_IP_PIMSM_V1=y +CONFIG_IP_PIMSM_V2=y +CONFIG_SYN_COOKIES=y +CONFIG_INET_AH=m +CONFIG_INET_ESP=m +CONFIG_INET_IPCOMP=m +# CONFIG_INET_LRO is not set +CONFIG_IPV6_PRIVACY=y +CONFIG_INET6_AH=m +CONFIG_INET6_ESP=m +CONFIG_INET6_IPCOMP=m +CONFIG_IPV6_TUNNEL=m +CONFIG_BRIDGE=m +CONFIG_VLAN_8021Q=m +CONFIG_ATALK=m +CONFIG_DEV_APPLETALK=m +CONFIG_IPDDP=m +CONFIG_IPDDP_ENCAP=y +CONFIG_IPDDP_DECAP=y +CONFIG_NET_SCHED=y +CONFIG_NET_SCH_CBQ=m +CONFIG_NET_SCH_HTB=m +CONFIG_NET_SCH_HFSC=m +CONFIG_NET_SCH_PRIO=m +CONFIG_NET_SCH_RED=m +CONFIG_NET_SCH_SFQ=m +CONFIG_NET_SCH_TEQL=m +CONFIG_NET_SCH_TBF=m +CONFIG_NET_SCH_GRED=m +CONFIG_NET_SCH_DSMARK=m +CONFIG_NET_SCH_NETEM=m +CONFIG_NET_SCH_INGRESS=m +CONFIG_NET_CLS_BASIC=m +CONFIG_NET_CLS_TCINDEX=m +CONFIG_NET_CLS_ROUTE4=m +CONFIG_NET_CLS_FW=m +CONFIG_NET_CLS_U32=m +CONFIG_NET_CLS_RSVP=m +CONFIG_NET_CLS_RSVP6=m +CONFIG_NET_CLS_ACT=y +CONFIG_NET_ACT_POLICE=y +CONFIG_NET_CLS_IND=y +# CONFIG_WIRELESS is not set +CONFIG_BLK_DEV_LOOP=y +CONFIG_BLK_DEV_CRYPTOLOOP=m +CONFIG_IDE=y +# CONFIG_IDE_PROC_FS is not set +# CONFIG_IDEPCI_PCIBUS_ORDER is not set +CONFIG_BLK_DEV_GENERIC=y +CONFIG_BLK_DEV_PIIX=y +CONFIG_SCSI=y +CONFIG_BLK_DEV_SD=y +CONFIG_CHR_DEV_SG=y +# CONFIG_SCSI_LOWLEVEL is not set +CONFIG_NETDEVICES=y +# CONFIG_NET_VENDOR_3COM is not set +# CONFIG_NET_VENDOR_ADAPTEC is not set +# CONFIG_NET_VENDOR_ALTEON is not set +CONFIG_PCNET32=y +# CONFIG_NET_VENDOR_ATHEROS is not set +# CONFIG_NET_VENDOR_BROADCOM is not set +# CONFIG_NET_VENDOR_BROCADE is not set +# CONFIG_NET_VENDOR_CHELSIO is not set +# CONFIG_NET_VENDOR_CISCO is not set +# CONFIG_NET_VENDOR_DEC is not set +# CONFIG_NET_VENDOR_DLINK is not set +# CONFIG_NET_VENDOR_EMULEX is not set +# CONFIG_NET_VENDOR_EXAR is not set +# CONFIG_NET_VENDOR_HP is not set +# CONFIG_NET_VENDOR_INTEL is not set +# CONFIG_NET_VENDOR_MARVELL is not set +# CONFIG_NET_VENDOR_MELLANOX is not set +# CONFIG_NET_VENDOR_MICREL is not set +# CONFIG_NET_VENDOR_MYRI is not set +# CONFIG_NET_VENDOR_NATSEMI is not set +# CONFIG_NET_VENDOR_NVIDIA is not set +# CONFIG_NET_VENDOR_OKI is not set +# CONFIG_NET_PACKET_ENGINE is not set +# CONFIG_NET_VENDOR_QLOGIC is not set +# CONFIG_NET_VENDOR_REALTEK is not set +# CONFIG_NET_VENDOR_RDC is not set +# CONFIG_NET_VENDOR_SEEQ is not set +# CONFIG_NET_VENDOR_SILAN is not set +# CONFIG_NET_VENDOR_SIS is not set +# CONFIG_NET_VENDOR_SMSC is not set +# CONFIG_NET_VENDOR_STMICRO is not set +# CONFIG_NET_VENDOR_SUN is not set +# CONFIG_NET_VENDOR_TEHUTI is not set +# CONFIG_NET_VENDOR_TI is not set +# CONFIG_NET_VENDOR_TOSHIBA is not set +# CONFIG_NET_VENDOR_VIA is not set +# CONFIG_WLAN is not set +# CONFIG_VT is not set +CONFIG_LEGACY_PTY_COUNT=16 +CONFIG_SERIAL_8250=y +CONFIG_SERIAL_8250_CONSOLE=y +CONFIG_HW_RANDOM=y +# CONFIG_HWMON is not set +CONFIG_VIDEO_OUTPUT_CONTROL=m +CONFIG_FB=y +CONFIG_FIRMWARE_EDID=y +CONFIG_FB_MATROX=y +CONFIG_FB_MATROX_G=y +CONFIG_USB=y +CONFIG_USB_EHCI_HCD=y +# CONFIG_USB_EHCI_TT_NEWSCHED is not set +CONFIG_USB_UHCI_HCD=y +CONFIG_USB_STORAGE=y +CONFIG_NEW_LEDS=y +CONFIG_LEDS_CLASS=y +CONFIG_LEDS_TRIGGERS=y +CONFIG_LEDS_TRIGGER_TIMER=y +CONFIG_LEDS_TRIGGER_IDE_DISK=y +CONFIG_LEDS_TRIGGER_HEARTBEAT=y +CONFIG_LEDS_TRIGGER_BACKLIGHT=y +CONFIG_LEDS_TRIGGER_DEFAULT_ON=y +CONFIG_RTC_CLASS=y +CONFIG_RTC_DRV_CMOS=y +CONFIG_EXT2_FS=y +CONFIG_EXT3_FS=y +# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set +CONFIG_XFS_FS=y +CONFIG_XFS_QUOTA=y +CONFIG_XFS_POSIX_ACL=y +CONFIG_QUOTA=y +CONFIG_QFMT_V2=y +CONFIG_MSDOS_FS=m +CONFIG_VFAT_FS=m +CONFIG_PROC_KCORE=y +CONFIG_TMPFS=y +CONFIG_NFS_FS=y +CONFIG_ROOT_NFS=y +CONFIG_CIFS=m +CONFIG_CIFS_WEAK_PW_HASH=y +CONFIG_CIFS_XATTR=y +CONFIG_CIFS_POSIX=y +CONFIG_NLS_CODEPAGE_437=m +CONFIG_NLS_ISO8859_1=m +# CONFIG_FTRACE is not set +CONFIG_CRYPTO_NULL=m +CONFIG_CRYPTO_PCBC=m +CONFIG_CRYPTO_HMAC=y +CONFIG_CRYPTO_MICHAEL_MIC=m +CONFIG_CRYPTO_SHA512=m +CONFIG_CRYPTO_TGR192=m +CONFIG_CRYPTO_WP512=m +CONFIG_CRYPTO_ANUBIS=m +CONFIG_CRYPTO_BLOWFISH=m +CONFIG_CRYPTO_CAST5=m +CONFIG_CRYPTO_CAST6=m +CONFIG_CRYPTO_KHAZAD=m +CONFIG_CRYPTO_SERPENT=m +CONFIG_CRYPTO_TEA=m +CONFIG_CRYPTO_TWOFISH=m +# CONFIG_CRYPTO_ANSI_CPRNG is not set +# CONFIG_CRYPTO_HW is not set diff --git a/trunk/arch/mips/configs/sead3_defconfig b/trunk/arch/mips/configs/sead3_defconfig index e3eec68d9132..0abe681c11a0 100644 --- a/trunk/arch/mips/configs/sead3_defconfig +++ b/trunk/arch/mips/configs/sead3_defconfig @@ -2,7 +2,6 @@ CONFIG_MIPS_SEAD3=y CONFIG_CPU_LITTLE_ENDIAN=y CONFIG_CPU_MIPS32_R2=y CONFIG_HZ_100=y -CONFIG_EXPERIMENTAL=y CONFIG_SYSVIPC=y CONFIG_POSIX_MQUEUE=y CONFIG_NO_HZ=y @@ -115,10 +114,8 @@ CONFIG_NLS_ISO8859_1=y CONFIG_NLS_ISO8859_15=y CONFIG_NLS_UTF8=y # CONFIG_FTRACE is not set -CONFIG_CRYPTO=y CONFIG_CRYPTO_CBC=y CONFIG_CRYPTO_ECB=y -CONFIG_CRYPTO_AES=y CONFIG_CRYPTO_ARC4=y # CONFIG_CRYPTO_ANSI_CPRNG is not set # CONFIG_CRYPTO_HW is not set diff --git a/trunk/arch/mips/configs/sead3micro_defconfig b/trunk/arch/mips/configs/sead3micro_defconfig new file mode 100644 index 000000000000..2a0da5bf4b64 --- /dev/null +++ b/trunk/arch/mips/configs/sead3micro_defconfig @@ -0,0 +1,122 @@ +CONFIG_MIPS_SEAD3=y +CONFIG_CPU_LITTLE_ENDIAN=y +CONFIG_CPU_MIPS32_R2=y +CONFIG_CPU_MICROMIPS=y +CONFIG_HZ_100=y +CONFIG_SYSVIPC=y +CONFIG_POSIX_MQUEUE=y +CONFIG_NO_HZ=y +CONFIG_HIGH_RES_TIMERS=y +CONFIG_IKCONFIG=y +CONFIG_IKCONFIG_PROC=y +CONFIG_LOG_BUF_SHIFT=15 +CONFIG_EMBEDDED=y +CONFIG_SLAB=y +CONFIG_PROFILING=y +CONFIG_OPROFILE=y +CONFIG_MODULES=y +# CONFIG_BLK_DEV_BSG is not set +# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set +CONFIG_NET=y +CONFIG_PACKET=y +CONFIG_UNIX=y +CONFIG_INET=y +CONFIG_IP_PNP=y +CONFIG_IP_PNP_DHCP=y +CONFIG_IP_PNP_BOOTP=y +# CONFIG_INET_XFRM_MODE_TRANSPORT is not set +# CONFIG_INET_XFRM_MODE_TUNNEL is not set +# CONFIG_INET_XFRM_MODE_BEET is not set +# CONFIG_INET_LRO is not set +# CONFIG_INET_DIAG is not set +# CONFIG_IPV6 is not set +# CONFIG_WIRELESS is not set +CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" +CONFIG_MTD=y +CONFIG_MTD_CHAR=y +CONFIG_MTD_BLOCK=y +CONFIG_MTD_CFI=y +CONFIG_MTD_CFI_INTELEXT=y +CONFIG_MTD_PHYSMAP=y +CONFIG_MTD_UBI=y +CONFIG_MTD_UBI_GLUEBI=y +CONFIG_BLK_DEV_LOOP=y +CONFIG_BLK_DEV_CRYPTOLOOP=m +CONFIG_SCSI=y +# CONFIG_SCSI_PROC_FS is not set +CONFIG_BLK_DEV_SD=y +CONFIG_CHR_DEV_SG=y +# CONFIG_SCSI_LOWLEVEL is not set +CONFIG_NETDEVICES=y +CONFIG_SMSC911X=y +# CONFIG_NET_VENDOR_WIZNET is not set +CONFIG_MARVELL_PHY=y +CONFIG_DAVICOM_PHY=y +CONFIG_QSEMI_PHY=y +CONFIG_LXT_PHY=y +CONFIG_CICADA_PHY=y +CONFIG_VITESSE_PHY=y +CONFIG_SMSC_PHY=y +CONFIG_BROADCOM_PHY=y +CONFIG_ICPLUS_PHY=y +# CONFIG_WLAN is not set +# CONFIG_INPUT_MOUSEDEV is not set +# CONFIG_INPUT_KEYBOARD is not set +# CONFIG_INPUT_MOUSE is not set +# CONFIG_SERIO is not set +# CONFIG_CONSOLE_TRANSLATIONS is not set +CONFIG_VT_HW_CONSOLE_BINDING=y +CONFIG_LEGACY_PTY_COUNT=32 +CONFIG_SERIAL_8250=y +CONFIG_SERIAL_8250_CONSOLE=y +CONFIG_SERIAL_8250_NR_UARTS=2 +CONFIG_SERIAL_8250_RUNTIME_UARTS=2 +# CONFIG_HW_RANDOM is not set +CONFIG_I2C=y +# CONFIG_I2C_COMPAT is not set +CONFIG_I2C_CHARDEV=y +# CONFIG_I2C_HELPER_AUTO is not set +CONFIG_SPI=y +CONFIG_SENSORS_ADT7475=y +CONFIG_BACKLIGHT_LCD_SUPPORT=y +CONFIG_LCD_CLASS_DEVICE=y +CONFIG_BACKLIGHT_CLASS_DEVICE=y +# CONFIG_VGA_CONSOLE is not set +CONFIG_USB=y +CONFIG_USB_ANNOUNCE_NEW_DEVICES=y +CONFIG_USB_EHCI_HCD=y +CONFIG_USB_EHCI_ROOT_HUB_TT=y +CONFIG_USB_STORAGE=y +CONFIG_MMC=y +CONFIG_MMC_DEBUG=y +CONFIG_MMC_SPI=y +CONFIG_NEW_LEDS=y +CONFIG_LEDS_CLASS=y +CONFIG_LEDS_TRIGGERS=y +CONFIG_LEDS_TRIGGER_HEARTBEAT=y +CONFIG_RTC_CLASS=y +CONFIG_RTC_DRV_M41T80=y +CONFIG_EXT3_FS=y +# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set +CONFIG_XFS_FS=y +CONFIG_XFS_QUOTA=y +CONFIG_XFS_POSIX_ACL=y +CONFIG_QUOTA=y +# CONFIG_PRINT_QUOTA_WARNING is not set +CONFIG_MSDOS_FS=m +CONFIG_VFAT_FS=m +CONFIG_TMPFS=y +CONFIG_JFFS2_FS=y +CONFIG_NFS_FS=y +CONFIG_ROOT_NFS=y +CONFIG_NLS_CODEPAGE_437=y +CONFIG_NLS_ASCII=y +CONFIG_NLS_ISO8859_1=y +CONFIG_NLS_ISO8859_15=y +CONFIG_NLS_UTF8=y +# CONFIG_FTRACE is not set +CONFIG_CRYPTO_CBC=y +CONFIG_CRYPTO_ECB=y +CONFIG_CRYPTO_ARC4=y +# CONFIG_CRYPTO_ANSI_CPRNG is not set +# CONFIG_CRYPTO_HW is not set diff --git a/trunk/arch/mips/fw/lib/Makefile b/trunk/arch/mips/fw/lib/Makefile index 84befc968fc4..529150516777 100644 --- a/trunk/arch/mips/fw/lib/Makefile +++ b/trunk/arch/mips/fw/lib/Makefile @@ -2,4 +2,6 @@ # Makefile for generic prom monitor library routines under Linux. # +lib-y += cmdline.o + lib-$(CONFIG_64BIT) += call_o32.o diff --git a/trunk/arch/mips/fw/lib/cmdline.c b/trunk/arch/mips/fw/lib/cmdline.c new file mode 100644 index 000000000000..ffd0345780ae --- /dev/null +++ b/trunk/arch/mips/fw/lib/cmdline.c @@ -0,0 +1,101 @@ +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved. + */ +#include +#include +#include + +#include +#include + +int fw_argc; +int *_fw_argv; +int *_fw_envp; + +void __init fw_init_cmdline(void) +{ + int i; + + /* Validate command line parameters. */ + if ((fw_arg0 >= CKSEG0) || (fw_arg1 < CKSEG0)) { + fw_argc = 0; + _fw_argv = NULL; + } else { + fw_argc = (fw_arg0 & 0x0000ffff); + _fw_argv = (int *)fw_arg1; + } + + /* Validate environment pointer. */ + if (fw_arg2 < CKSEG0) + _fw_envp = NULL; + else + _fw_envp = (int *)fw_arg2; + + for (i = 1; i < fw_argc; i++) { + strlcat(arcs_cmdline, fw_argv(i), COMMAND_LINE_SIZE); + if (i < (fw_argc - 1)) + strlcat(arcs_cmdline, " ", COMMAND_LINE_SIZE); + } +} + +char * __init fw_getcmdline(void) +{ + return &(arcs_cmdline[0]); +} + +char *fw_getenv(char *envname) +{ + char *result = NULL; + + if (_fw_envp != NULL) { + /* + * Return a pointer to the given environment variable. + * YAMON uses "name", "value" pairs, while U-Boot uses + * "name=value". + */ + int i, yamon, index = 0; + + yamon = (strchr(fw_envp(index), '=') == NULL); + i = strlen(envname); + + while (fw_envp(index)) { + if (strncmp(envname, fw_envp(index), i) == 0) { + if (yamon) { + result = fw_envp(index + 1); + break; + } else if (fw_envp(index)[i] == '=') { + result = (fw_envp(index + 1) + i); + break; + } + } + + /* Increment array index. */ + if (yamon) + index += 2; + else + index += 1; + } + } + + return result; +} + +unsigned long fw_getenvl(char *envname) +{ + unsigned long envl = 0UL; + char *str; + long val; + int tmp; + + str = fw_getenv(envname); + if (str) { + tmp = kstrtol(str, 0, &val); + envl = (unsigned long)val; + } + + return envl; +} diff --git a/trunk/arch/mips/include/asm/asm.h b/trunk/arch/mips/include/asm/asm.h index 164a21e65b42..879691d194af 100644 --- a/trunk/arch/mips/include/asm/asm.h +++ b/trunk/arch/mips/include/asm/asm.h @@ -296,6 +296,7 @@ symbol = value #define LONG_SUBU subu #define LONG_L lw #define LONG_S sw +#define LONG_SP swp #define LONG_SLL sll #define LONG_SLLV sllv #define LONG_SRL srl @@ -318,6 +319,7 @@ symbol = value #define LONG_SUBU dsubu #define LONG_L ld #define LONG_S sd +#define LONG_SP sdp #define LONG_SLL dsll #define LONG_SLLV dsllv #define LONG_SRL dsrl diff --git a/trunk/arch/mips/include/asm/bootinfo.h b/trunk/arch/mips/include/asm/bootinfo.h index b71dd5b16085..4d2cdea5aa37 100644 --- a/trunk/arch/mips/include/asm/bootinfo.h +++ b/trunk/arch/mips/include/asm/bootinfo.h @@ -104,6 +104,7 @@ struct boot_mem_map { extern struct boot_mem_map boot_mem_map; extern void add_memory_region(phys_t start, phys_t size, long type); +extern void detect_memory_region(phys_t start, phys_t sz_min, phys_t sz_max); extern void prom_init(void); extern void prom_free_prom_memory(void); diff --git a/trunk/arch/mips/include/asm/branch.h b/trunk/arch/mips/include/asm/branch.h index 888766ae1f85..e28a3e0eb3cb 100644 --- a/trunk/arch/mips/include/asm/branch.h +++ b/trunk/arch/mips/include/asm/branch.h @@ -11,6 +11,14 @@ #include #include +extern int __isa_exception_epc(struct pt_regs *regs); +extern int __compute_return_epc(struct pt_regs *regs); +extern int __compute_return_epc_for_insn(struct pt_regs *regs, + union mips_instruction insn); +extern int __microMIPS_compute_return_epc(struct pt_regs *regs); +extern int __MIPS16e_compute_return_epc(struct pt_regs *regs); + + static inline int delay_slot(struct pt_regs *regs) { return regs->cp0_cause & CAUSEF_BD; @@ -18,20 +26,27 @@ static inline int delay_slot(struct pt_regs *regs) static inline unsigned long exception_epc(struct pt_regs *regs) { - if (!delay_slot(regs)) + if (likely(!delay_slot(regs))) return regs->cp0_epc; + if (get_isa16_mode(regs->cp0_epc)) + return __isa_exception_epc(regs); + return regs->cp0_epc + 4; } #define BRANCH_LIKELY_TAKEN 0x0001 -extern int __compute_return_epc(struct pt_regs *regs); -extern int __compute_return_epc_for_insn(struct pt_regs *regs, - union mips_instruction insn); - static inline int compute_return_epc(struct pt_regs *regs) { + if (get_isa16_mode(regs->cp0_epc)) { + if (cpu_has_mmips) + return __microMIPS_compute_return_epc(regs); + if (cpu_has_mips16) + return __MIPS16e_compute_return_epc(regs); + return regs->cp0_epc; + } + if (!delay_slot(regs)) { regs->cp0_epc += 4; return 0; @@ -40,4 +55,19 @@ static inline int compute_return_epc(struct pt_regs *regs) return __compute_return_epc(regs); } +static inline int MIPS16e_compute_return_epc(struct pt_regs *regs, + union mips16e_instruction *inst) +{ + if (likely(!delay_slot(regs))) { + if (inst->ri.opcode == MIPS16e_extend_op) { + regs->cp0_epc += 4; + return 0; + } + regs->cp0_epc += 2; + return 0; + } + + return __MIPS16e_compute_return_epc(regs); +} + #endif /* _ASM_BRANCH_H */ diff --git a/trunk/arch/mips/include/asm/clock.h b/trunk/arch/mips/include/asm/clock.h index c9456e7a7283..778e32d817bc 100644 --- a/trunk/arch/mips/include/asm/clock.h +++ b/trunk/arch/mips/include/asm/clock.h @@ -6,8 +6,6 @@ #include #include -extern void (*cpu_wait) (void); - struct clk; struct clk_ops { diff --git a/trunk/arch/mips/include/asm/cpu-features.h b/trunk/arch/mips/include/asm/cpu-features.h index 1a57e8b4d092..e5ec8fcd8afa 100644 --- a/trunk/arch/mips/include/asm/cpu-features.h +++ b/trunk/arch/mips/include/asm/cpu-features.h @@ -113,6 +113,9 @@ #ifndef cpu_has_pindexed_dcache #define cpu_has_pindexed_dcache (cpu_data[0].dcache.flags & MIPS_CACHE_PINDEX) #endif +#ifndef cpu_has_local_ebase +#define cpu_has_local_ebase 1 +#endif /* * I-Cache snoops remote store. This only matters on SMP. Some multiprocessors diff --git a/trunk/arch/mips/include/asm/dma-coherence.h b/trunk/arch/mips/include/asm/dma-coherence.h new file mode 100644 index 000000000000..242cbb3ca582 --- /dev/null +++ b/trunk/arch/mips/include/asm/dma-coherence.h @@ -0,0 +1,15 @@ +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (C) 2006 Ralf Baechle + * + */ +#ifndef __ASM_DMA_COHERENCE_H +#define __ASM_DMA_COHERENCE_H + +extern int coherentio; +extern int hw_coherentio; + +#endif diff --git a/trunk/arch/mips/include/asm/dma-mapping.h b/trunk/arch/mips/include/asm/dma-mapping.h index f8fc74b6cb47..84238c574d5e 100644 --- a/trunk/arch/mips/include/asm/dma-mapping.h +++ b/trunk/arch/mips/include/asm/dma-mapping.h @@ -2,6 +2,7 @@ #define _ASM_DMA_MAPPING_H #include +#include #include #include diff --git a/trunk/arch/mips/include/asm/fpu_emulator.h b/trunk/arch/mips/include/asm/fpu_emulator.h index 3b4092705567..2abb587d5ab4 100644 --- a/trunk/arch/mips/include/asm/fpu_emulator.h +++ b/trunk/arch/mips/include/asm/fpu_emulator.h @@ -54,6 +54,12 @@ do { \ extern int mips_dsemul(struct pt_regs *regs, mips_instruction ir, unsigned long cpc); extern int do_dsemulret(struct pt_regs *xcp); +extern int fpu_emulator_cop1Handler(struct pt_regs *xcp, + struct mips_fpu_struct *ctx, int has_fpu, + void *__user *fault_addr); +int process_fpemu_return(int sig, void __user *fault_addr); +int mm_isBranchInstr(struct pt_regs *regs, struct mm_decoded_insn dec_insn, + unsigned long *contpc); /* * Instruction inserted following the badinst to further tag the sequence diff --git a/trunk/arch/mips/include/asm/fw/fw.h b/trunk/arch/mips/include/asm/fw/fw.h new file mode 100644 index 000000000000..d6c50a7e9ede --- /dev/null +++ b/trunk/arch/mips/include/asm/fw/fw.h @@ -0,0 +1,47 @@ +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (C) 2012 MIPS Technologies, Inc. + */ +#ifndef __ASM_FW_H_ +#define __ASM_FW_H_ + +#include /* For cleaner code... */ + +enum fw_memtypes { + fw_dontuse, + fw_code, + fw_free, +}; + +typedef struct { + unsigned long base; /* Within KSEG0 */ + unsigned int size; /* bytes */ + enum fw_memtypes type; /* fw_memtypes */ +} fw_memblock_t; + +/* Maximum number of memory block descriptors. */ +#define FW_MAX_MEMBLOCKS 32 + +extern int fw_argc; +extern int *_fw_argv; +extern int *_fw_envp; + +/* + * Most firmware like YAMON, PMON, etc. pass arguments and environment + * variables as 32-bit pointers. These take care of sign extension. + */ +#define fw_argv(index) ((char *)(long)_fw_argv[(index)]) +#define fw_envp(index) ((char *)(long)_fw_envp[(index)]) + +extern void fw_init_cmdline(void); +extern char *fw_getcmdline(void); +extern fw_memblock_t *fw_getmdesc(void); +extern void fw_meminit(void); +extern char *fw_getenv(char *name); +extern unsigned long fw_getenvl(char *name); +extern void fw_init_early_console(char port); + +#endif /* __ASM_FW_H_ */ diff --git a/trunk/arch/mips/include/asm/gic.h b/trunk/arch/mips/include/asm/gic.h index bdc9786ab5a7..7153b32de18e 100644 --- a/trunk/arch/mips/include/asm/gic.h +++ b/trunk/arch/mips/include/asm/gic.h @@ -202,7 +202,7 @@ #define GIC_VPE_WD_COUNT0_OFS 0x0094 #define GIC_VPE_WD_INITIAL0_OFS 0x0098 #define GIC_VPE_COMPARE_LO_OFS 0x00a0 -#define GIC_VPE_COMPARE_HI 0x00a4 +#define GIC_VPE_COMPARE_HI_OFS 0x00a4 #define GIC_VPE_EIC_SHADOW_SET_BASE 0x0100 #define GIC_VPE_EIC_SS(intr) \ @@ -359,7 +359,11 @@ struct gic_shared_intr_map { /* Mapped interrupt to pin X, then GIC will generate the vector (X+1). */ #define GIC_PIN_TO_VEC_OFFSET (1) -extern int gic_present; +#include +#include + +extern unsigned int gic_present; +extern unsigned int gic_frequency; extern unsigned long _gic_base; extern unsigned int gic_irq_base; extern unsigned int gic_irq_flags[]; @@ -368,18 +372,20 @@ extern struct gic_shared_intr_map gic_shared_intr_map[]; extern void gic_init(unsigned long gic_base_addr, unsigned long gic_addrspace_size, struct gic_intr_map *intrmap, unsigned int intrmap_size, unsigned int irqbase); - extern void gic_clocksource_init(unsigned int); -extern unsigned int gic_get_int(void); +extern unsigned int gic_compare_int (void); +extern cycle_t gic_read_count(void); +extern cycle_t gic_read_compare(void); +extern void gic_write_compare(cycle_t cnt); extern void gic_send_ipi(unsigned int intr); extern unsigned int plat_ipi_call_int_xlate(unsigned int); extern unsigned int plat_ipi_resched_int_xlate(unsigned int); extern void gic_bind_eic_interrupt(int irq, int set); extern unsigned int gic_get_timer_pending(void); +extern unsigned int gic_get_int(void); extern void gic_enable_interrupt(int irq_vec); extern void gic_disable_interrupt(int irq_vec); extern void gic_irq_ack(struct irq_data *d); extern void gic_finish_irq(struct irq_data *d); extern void gic_platform_init(int irqs, struct irq_chip *irq_controller); - #endif /* _ASM_GICREGS_H */ diff --git a/trunk/arch/mips/include/asm/hazards.h b/trunk/arch/mips/include/asm/hazards.h index 44d6a5bde4a1..e3ee92d4dbe7 100644 --- a/trunk/arch/mips/include/asm/hazards.h +++ b/trunk/arch/mips/include/asm/hazards.h @@ -10,34 +10,13 @@ #ifndef _ASM_HAZARDS_H #define _ASM_HAZARDS_H -#ifdef __ASSEMBLY__ -#define ASMMACRO(name, code...) .macro name; code; .endm -#else - -#include - -#define ASMMACRO(name, code...) \ -__asm__(".macro " #name "; " #code "; .endm"); \ - \ -static inline void name(void) \ -{ \ - __asm__ __volatile__ (#name); \ -} - -/* - * MIPS R2 instruction hazard barrier. Needs to be called as a subroutine. - */ -extern void mips_ihb(void); - -#endif +#include -ASMMACRO(_ssnop, - sll $0, $0, 1 - ) +#define ___ssnop \ + sll $0, $0, 1 -ASMMACRO(_ehb, - sll $0, $0, 3 - ) +#define ___ehb \ + sll $0, $0, 3 /* * TLB hazards @@ -48,24 +27,24 @@ ASMMACRO(_ehb, * MIPSR2 defines ehb for hazard avoidance */ -ASMMACRO(mtc0_tlbw_hazard, - _ehb - ) -ASMMACRO(tlbw_use_hazard, - _ehb - ) -ASMMACRO(tlb_probe_hazard, - _ehb - ) -ASMMACRO(irq_enable_hazard, - _ehb - ) -ASMMACRO(irq_disable_hazard, - _ehb - ) -ASMMACRO(back_to_back_c0_hazard, - _ehb - ) +#define __mtc0_tlbw_hazard \ + ___ehb + +#define __tlbw_use_hazard \ + ___ehb + +#define __tlb_probe_hazard \ + ___ehb + +#define __irq_enable_hazard \ + ___ehb + +#define __irq_disable_hazard \ + ___ehb + +#define __back_to_back_c0_hazard \ + ___ehb + /* * gcc has a tradition of misscompiling the previous construct using the * address of a label as argument to inline assembler. Gas otoh has the @@ -94,24 +73,42 @@ do { \ * These are slightly complicated by the fact that we guarantee R1 kernels to * run fine on R2 processors. */ -ASMMACRO(mtc0_tlbw_hazard, - _ssnop; _ssnop; _ehb - ) -ASMMACRO(tlbw_use_hazard, - _ssnop; _ssnop; _ssnop; _ehb - ) -ASMMACRO(tlb_probe_hazard, - _ssnop; _ssnop; _ssnop; _ehb - ) -ASMMACRO(irq_enable_hazard, - _ssnop; _ssnop; _ssnop; _ehb - ) -ASMMACRO(irq_disable_hazard, - _ssnop; _ssnop; _ssnop; _ehb - ) -ASMMACRO(back_to_back_c0_hazard, - _ssnop; _ssnop; _ssnop; _ehb - ) + +#define __mtc0_tlbw_hazard \ + ___ssnop; \ + ___ssnop; \ + ___ehb + +#define __tlbw_use_hazard \ + ___ssnop; \ + ___ssnop; \ + ___ssnop; \ + ___ehb + +#define __tlb_probe_hazard \ + ___ssnop; \ + ___ssnop; \ + ___ssnop; \ + ___ehb + +#define __irq_enable_hazard \ + ___ssnop; \ + ___ssnop; \ + ___ssnop; \ + ___ehb + +#define __irq_disable_hazard \ + ___ssnop; \ + ___ssnop; \ + ___ssnop; \ + ___ehb + +#define __back_to_back_c0_hazard \ + ___ssnop; \ + ___ssnop; \ + ___ssnop; \ + ___ehb + /* * gcc has a tradition of misscompiling the previous construct using the * address of a label as argument to inline assembler. Gas otoh has the @@ -147,18 +144,18 @@ do { \ * R10000 rocks - all hazards handled in hardware, so this becomes a nobrainer. */ -ASMMACRO(mtc0_tlbw_hazard, - ) -ASMMACRO(tlbw_use_hazard, - ) -ASMMACRO(tlb_probe_hazard, - ) -ASMMACRO(irq_enable_hazard, - ) -ASMMACRO(irq_disable_hazard, - ) -ASMMACRO(back_to_back_c0_hazard, - ) +#define __mtc0_tlbw_hazard + +#define __tlbw_use_hazard + +#define __tlb_probe_hazard + +#define __irq_enable_hazard + +#define __irq_disable_hazard + +#define __back_to_back_c0_hazard + #define instruction_hazard() do { } while (0) #elif defined(CONFIG_CPU_SB1) @@ -166,19 +163,21 @@ ASMMACRO(back_to_back_c0_hazard, /* * Mostly like R4000 for historic reasons */ -ASMMACRO(mtc0_tlbw_hazard, - ) -ASMMACRO(tlbw_use_hazard, - ) -ASMMACRO(tlb_probe_hazard, - ) -ASMMACRO(irq_enable_hazard, - ) -ASMMACRO(irq_disable_hazard, - _ssnop; _ssnop; _ssnop - ) -ASMMACRO(back_to_back_c0_hazard, - ) +#define __mtc0_tlbw_hazard + +#define __tlbw_use_hazard + +#define __tlb_probe_hazard + +#define __irq_enable_hazard + +#define __irq_disable_hazard \ + ___ssnop; \ + ___ssnop; \ + ___ssnop + +#define __back_to_back_c0_hazard + #define instruction_hazard() do { } while (0) #else @@ -192,24 +191,35 @@ ASMMACRO(back_to_back_c0_hazard, * hazard so this is nice trick to have an optimal code for a range of * processors. */ -ASMMACRO(mtc0_tlbw_hazard, - nop; nop - ) -ASMMACRO(tlbw_use_hazard, - nop; nop; nop - ) -ASMMACRO(tlb_probe_hazard, - nop; nop; nop - ) -ASMMACRO(irq_enable_hazard, - _ssnop; _ssnop; _ssnop; - ) -ASMMACRO(irq_disable_hazard, - nop; nop; nop - ) -ASMMACRO(back_to_back_c0_hazard, - _ssnop; _ssnop; _ssnop; - ) +#define __mtc0_tlbw_hazard \ + nop; \ + nop + +#define __tlbw_use_hazard \ + nop; \ + nop; \ + nop + +#define __tlb_probe_hazard \ + nop; \ + nop; \ + nop + +#define __irq_enable_hazard \ + ___ssnop; \ + ___ssnop; \ + ___ssnop + +#define __irq_disable_hazard \ + nop; \ + nop; \ + nop + +#define __back_to_back_c0_hazard \ + ___ssnop; \ + ___ssnop; \ + ___ssnop + #define instruction_hazard() do { } while (0) #endif @@ -218,32 +228,137 @@ ASMMACRO(back_to_back_c0_hazard, /* FPU hazards */ #if defined(CONFIG_CPU_SB1) -ASMMACRO(enable_fpu_hazard, - .set push; - .set mips64; - .set noreorder; - _ssnop; - bnezl $0, .+4; - _ssnop; - .set pop -) -ASMMACRO(disable_fpu_hazard, -) + +#define __enable_fpu_hazard \ + .set push; \ + .set mips64; \ + .set noreorder; \ + ___ssnop; \ + bnezl $0, .+4; \ + ___ssnop; \ + .set pop + +#define __disable_fpu_hazard #elif defined(CONFIG_CPU_MIPSR2) -ASMMACRO(enable_fpu_hazard, - _ehb -) -ASMMACRO(disable_fpu_hazard, - _ehb -) + +#define __enable_fpu_hazard \ + ___ehb + +#define __disable_fpu_hazard \ + ___ehb + #else -ASMMACRO(enable_fpu_hazard, - nop; nop; nop; nop -) -ASMMACRO(disable_fpu_hazard, - _ehb -) + +#define __enable_fpu_hazard \ + nop; \ + nop; \ + nop; \ + nop + +#define __disable_fpu_hazard \ + ___ehb + #endif +#ifdef __ASSEMBLY__ + +#define _ssnop ___ssnop +#define _ehb ___ehb +#define mtc0_tlbw_hazard __mtc0_tlbw_hazard +#define tlbw_use_hazard __tlbw_use_hazard +#define tlb_probe_hazard __tlb_probe_hazard +#define irq_enable_hazard __irq_enable_hazard +#define irq_disable_hazard __irq_disable_hazard +#define back_to_back_c0_hazard __back_to_back_c0_hazard +#define enable_fpu_hazard __enable_fpu_hazard +#define disable_fpu_hazard __disable_fpu_hazard + +#else + +#define _ssnop() \ +do { \ + __asm__ __volatile__( \ + __stringify(___ssnop) \ + ); \ +} while (0) + +#define _ehb() \ +do { \ + __asm__ __volatile__( \ + __stringify(___ehb) \ + ); \ +} while (0) + + +#define mtc0_tlbw_hazard() \ +do { \ + __asm__ __volatile__( \ + __stringify(__mtc0_tlbw_hazard) \ + ); \ +} while (0) + + +#define tlbw_use_hazard() \ +do { \ + __asm__ __volatile__( \ + __stringify(__tlbw_use_hazard) \ + ); \ +} while (0) + + +#define tlb_probe_hazard() \ +do { \ + __asm__ __volatile__( \ + __stringify(__tlb_probe_hazard) \ + ); \ +} while (0) + + +#define irq_enable_hazard() \ +do { \ + __asm__ __volatile__( \ + __stringify(__irq_enable_hazard) \ + ); \ +} while (0) + + +#define irq_disable_hazard() \ +do { \ + __asm__ __volatile__( \ + __stringify(__irq_disable_hazard) \ + ); \ +} while (0) + + +#define back_to_back_c0_hazard() \ +do { \ + __asm__ __volatile__( \ + __stringify(__back_to_back_c0_hazard) \ + ); \ +} while (0) + + +#define enable_fpu_hazard() \ +do { \ + __asm__ __volatile__( \ + __stringify(__enable_fpu_hazard) \ + ); \ +} while (0) + + +#define disable_fpu_hazard() \ +do { \ + __asm__ __volatile__( \ + __stringify(__disable_fpu_hazard) \ + ); \ +} while (0) + +/* + * MIPS R2 instruction hazard barrier. Needs to be called as a subroutine. + */ +extern void mips_ihb(void); + +#endif /* __ASSEMBLY__ */ + #endif /* _ASM_HAZARDS_H */ diff --git a/trunk/arch/mips/include/asm/idle.h b/trunk/arch/mips/include/asm/idle.h new file mode 100644 index 000000000000..d192158886b1 --- /dev/null +++ b/trunk/arch/mips/include/asm/idle.h @@ -0,0 +1,23 @@ +#ifndef __ASM_IDLE_H +#define __ASM_IDLE_H + +#include + +extern void (*cpu_wait)(void); +extern void r4k_wait(void); +extern asmlinkage void __r4k_wait(void); +extern void r4k_wait_irqoff(void); +extern void __pastwait(void); + +static inline int using_rollback_handler(void) +{ + return cpu_wait == r4k_wait; +} + +static inline int address_is_in_r4k_wait_irqoff(unsigned long addr) +{ + return addr >= (unsigned long)r4k_wait_irqoff && + addr < (unsigned long)__pastwait; +} + +#endif /* __ASM_IDLE_H */ diff --git a/trunk/arch/mips/include/asm/inst.h b/trunk/arch/mips/include/asm/inst.h index f1eadf764071..22912f78401c 100644 --- a/trunk/arch/mips/include/asm/inst.h +++ b/trunk/arch/mips/include/asm/inst.h @@ -73,4 +73,16 @@ typedef unsigned int mips_instruction; +/* microMIPS instruction decode structure. Do NOT export!!! */ +struct mm_decoded_insn { + mips_instruction insn; + mips_instruction next_insn; + int pc_inc; + int next_pc_inc; + int micro_mips_mode; +}; + +/* Recode table from 16-bit register notation to 32-bit GPR. Do NOT export!!! */ +extern const int reg16to32[]; + #endif /* _ASM_INST_H */ diff --git a/trunk/arch/mips/include/asm/io.h b/trunk/arch/mips/include/asm/io.h index 1be13727323f..b7e59853fd33 100644 --- a/trunk/arch/mips/include/asm/io.h +++ b/trunk/arch/mips/include/asm/io.h @@ -118,7 +118,7 @@ static inline void set_io_port_base(unsigned long base) */ static inline unsigned long virt_to_phys(volatile const void *address) { - return (unsigned long)address - PAGE_OFFSET + PHYS_OFFSET; + return __pa(address); } /* diff --git a/trunk/arch/mips/include/asm/irqflags.h b/trunk/arch/mips/include/asm/irqflags.h index 9f3384c789d7..45c00951888b 100644 --- a/trunk/arch/mips/include/asm/irqflags.h +++ b/trunk/arch/mips/include/asm/irqflags.h @@ -14,53 +14,48 @@ #ifndef __ASSEMBLY__ #include +#include #include #if defined(CONFIG_CPU_MIPSR2) && !defined(CONFIG_MIPS_MT_SMTC) -__asm__( - " .macro arch_local_irq_disable\n" +static inline void arch_local_irq_disable(void) +{ + __asm__ __volatile__( " .set push \n" " .set noat \n" " di \n" - " irq_disable_hazard \n" + " " __stringify(__irq_disable_hazard) " \n" " .set pop \n" - " .endm \n"); - -static inline void arch_local_irq_disable(void) -{ - __asm__ __volatile__( - "arch_local_irq_disable" - : /* no outputs */ - : /* no inputs */ - : "memory"); + : /* no outputs */ + : /* no inputs */ + : "memory"); } +static inline unsigned long arch_local_irq_save(void) +{ + unsigned long flags; -__asm__( - " .macro arch_local_irq_save result \n" + asm __volatile__( " .set push \n" " .set reorder \n" " .set noat \n" - " di \\result \n" - " andi \\result, 1 \n" - " irq_disable_hazard \n" + " di %[flags] \n" + " andi %[flags], 1 \n" + " " __stringify(__irq_disable_hazard) " \n" " .set pop \n" - " .endm \n"); + : [flags] "=r" (flags) + : /* no inputs */ + : "memory"); -static inline unsigned long arch_local_irq_save(void) -{ - unsigned long flags; - asm volatile("arch_local_irq_save\t%0" - : "=r" (flags) - : /* no inputs */ - : "memory"); return flags; } +static inline void arch_local_irq_restore(unsigned long flags) +{ + unsigned long __tmp1; -__asm__( - " .macro arch_local_irq_restore flags \n" + __asm__ __volatile__( " .set push \n" " .set noreorder \n" " .set noat \n" @@ -69,7 +64,7 @@ __asm__( * Slow, but doesn't suffer from a relatively unlikely race * condition we're having since days 1. */ - " beqz \\flags, 1f \n" + " beqz %[flags], 1f \n" " di \n" " ei \n" "1: \n" @@ -78,33 +73,44 @@ __asm__( * Fast, dangerous. Life is fun, life is good. */ " mfc0 $1, $12 \n" - " ins $1, \\flags, 0, 1 \n" + " ins $1, %[flags], 0, 1 \n" " mtc0 $1, $12 \n" #endif - " irq_disable_hazard \n" + " " __stringify(__irq_disable_hazard) " \n" " .set pop \n" - " .endm \n"); - -static inline void arch_local_irq_restore(unsigned long flags) -{ - unsigned long __tmp1; - - __asm__ __volatile__( - "arch_local_irq_restore\t%0" - : "=r" (__tmp1) - : "0" (flags) - : "memory"); + : [flags] "=r" (__tmp1) + : "0" (flags) + : "memory"); } static inline void __arch_local_irq_restore(unsigned long flags) { - unsigned long __tmp1; - __asm__ __volatile__( - "arch_local_irq_restore\t%0" - : "=r" (__tmp1) - : "0" (flags) - : "memory"); + " .set push \n" + " .set noreorder \n" + " .set noat \n" +#if defined(CONFIG_IRQ_CPU) + /* + * Slow, but doesn't suffer from a relatively unlikely race + * condition we're having since days 1. + */ + " beqz %[flags], 1f \n" + " di \n" + " ei \n" + "1: \n" +#else + /* + * Fast, dangerous. Life is fun, life is good. + */ + " mfc0 $1, $12 \n" + " ins $1, %[flags], 0, 1 \n" + " mtc0 $1, $12 \n" +#endif + " " __stringify(__irq_disable_hazard) " \n" + " .set pop \n" + : [flags] "=r" (flags) + : "0" (flags) + : "memory"); } #else /* Functions that require preempt_{dis,en}able() are in mips-atomic.c */ @@ -115,8 +121,18 @@ void __arch_local_irq_restore(unsigned long flags); #endif /* if defined(CONFIG_CPU_MIPSR2) && !defined(CONFIG_MIPS_MT_SMTC) */ -__asm__( - " .macro arch_local_irq_enable \n" +extern void smtc_ipi_replay(void); + +static inline void arch_local_irq_enable(void) +{ +#ifdef CONFIG_MIPS_MT_SMTC + /* + * SMTC kernel needs to do a software replay of queued + * IPIs, at the cost of call overhead on each local_irq_enable() + */ + smtc_ipi_replay(); +#endif + __asm__ __volatile__( " .set push \n" " .set reorder \n" " .set noat \n" @@ -133,45 +149,28 @@ __asm__( " xori $1,0x1e \n" " mtc0 $1,$12 \n" #endif - " irq_enable_hazard \n" + " " __stringify(__irq_enable_hazard) " \n" " .set pop \n" - " .endm"); - -extern void smtc_ipi_replay(void); - -static inline void arch_local_irq_enable(void) -{ -#ifdef CONFIG_MIPS_MT_SMTC - /* - * SMTC kernel needs to do a software replay of queued - * IPIs, at the cost of call overhead on each local_irq_enable() - */ - smtc_ipi_replay(); -#endif - __asm__ __volatile__( - "arch_local_irq_enable" - : /* no outputs */ - : /* no inputs */ - : "memory"); + : /* no outputs */ + : /* no inputs */ + : "memory"); } +static inline unsigned long arch_local_save_flags(void) +{ + unsigned long flags; -__asm__( - " .macro arch_local_save_flags flags \n" + asm __volatile__( " .set push \n" " .set reorder \n" #ifdef CONFIG_MIPS_MT_SMTC - " mfc0 \\flags, $2, 1 \n" + " mfc0 %[flags], $2, 1 \n" #else - " mfc0 \\flags, $12 \n" + " mfc0 %[flags], $12 \n" #endif " .set pop \n" - " .endm \n"); + : [flags] "=r" (flags)); -static inline unsigned long arch_local_save_flags(void) -{ - unsigned long flags; - asm volatile("arch_local_save_flags %0" : "=r" (flags)); return flags; } diff --git a/trunk/arch/mips/include/asm/kvm_host.h b/trunk/arch/mips/include/asm/kvm_host.h new file mode 100644 index 000000000000..143875c6c95a --- /dev/null +++ b/trunk/arch/mips/include/asm/kvm_host.h @@ -0,0 +1,667 @@ +/* +* This file is subject to the terms and conditions of the GNU General Public +* License. See the file "COPYING" in the main directory of this archive +* for more details. +* +* Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved. +* Authors: Sanjay Lal +*/ + +#ifndef __MIPS_KVM_HOST_H__ +#define __MIPS_KVM_HOST_H__ + +#include +#include +#include +#include +#include +#include +#include +#include + + +#define KVM_MAX_VCPUS 1 +#define KVM_USER_MEM_SLOTS 8 +/* memory slots that does not exposed to userspace */ +#define KVM_PRIVATE_MEM_SLOTS 0 + +#define KVM_COALESCED_MMIO_PAGE_OFFSET 1 + +/* Don't support huge pages */ +#define KVM_HPAGE_GFN_SHIFT(x) 0 + +/* We don't currently support large pages. */ +#define KVM_NR_PAGE_SIZES 1 +#define KVM_PAGES_PER_HPAGE(x) 1 + + + +/* Special address that contains the comm page, used for reducing # of traps */ +#define KVM_GUEST_COMMPAGE_ADDR 0x0 + +#define KVM_GUEST_KERNEL_MODE(vcpu) ((kvm_read_c0_guest_status(vcpu->arch.cop0) & (ST0_EXL | ST0_ERL)) || \ + ((kvm_read_c0_guest_status(vcpu->arch.cop0) & KSU_USER) == 0)) + +#define KVM_GUEST_KUSEG 0x00000000UL +#define KVM_GUEST_KSEG0 0x40000000UL +#define KVM_GUEST_KSEG23 0x60000000UL +#define KVM_GUEST_KSEGX(a) ((_ACAST32_(a)) & 0x60000000) +#define KVM_GUEST_CPHYSADDR(a) ((_ACAST32_(a)) & 0x1fffffff) + +#define KVM_GUEST_CKSEG0ADDR(a) (KVM_GUEST_CPHYSADDR(a) | KVM_GUEST_KSEG0) +#define KVM_GUEST_CKSEG1ADDR(a) (KVM_GUEST_CPHYSADDR(a) | KVM_GUEST_KSEG1) +#define KVM_GUEST_CKSEG23ADDR(a) (KVM_GUEST_CPHYSADDR(a) | KVM_GUEST_KSEG23) + +/* + * Map an address to a certain kernel segment + */ +#define KVM_GUEST_KSEG0ADDR(a) (KVM_GUEST_CPHYSADDR(a) | KVM_GUEST_KSEG0) +#define KVM_GUEST_KSEG1ADDR(a) (KVM_GUEST_CPHYSADDR(a) | KVM_GUEST_KSEG1) +#define KVM_GUEST_KSEG23ADDR(a) (KVM_GUEST_CPHYSADDR(a) | KVM_GUEST_KSEG23) + +#define KVM_INVALID_PAGE 0xdeadbeef +#define KVM_INVALID_INST 0xdeadbeef +#define KVM_INVALID_ADDR 0xdeadbeef + +#define KVM_MALTA_GUEST_RTC_ADDR 0xb8000070UL + +#define GUEST_TICKS_PER_JIFFY (40000000/HZ) +#define MS_TO_NS(x) (x * 1E6L) + +#define CAUSEB_DC 27 +#define CAUSEF_DC (_ULCAST_(1) << 27) + +struct kvm; +struct kvm_run; +struct kvm_vcpu; +struct kvm_interrupt; + +extern atomic_t kvm_mips_instance; +extern pfn_t(*kvm_mips_gfn_to_pfn) (struct kvm *kvm, gfn_t gfn); +extern void (*kvm_mips_release_pfn_clean) (pfn_t pfn); +extern bool(*kvm_mips_is_error_pfn) (pfn_t pfn); + +struct kvm_vm_stat { + u32 remote_tlb_flush; +}; + +struct kvm_vcpu_stat { + u32 wait_exits; + u32 cache_exits; + u32 signal_exits; + u32 int_exits; + u32 cop_unusable_exits; + u32 tlbmod_exits; + u32 tlbmiss_ld_exits; + u32 tlbmiss_st_exits; + u32 addrerr_st_exits; + u32 addrerr_ld_exits; + u32 syscall_exits; + u32 resvd_inst_exits; + u32 break_inst_exits; + u32 flush_dcache_exits; + u32 halt_wakeup; +}; + +enum kvm_mips_exit_types { + WAIT_EXITS, + CACHE_EXITS, + SIGNAL_EXITS, + INT_EXITS, + COP_UNUSABLE_EXITS, + TLBMOD_EXITS, + TLBMISS_LD_EXITS, + TLBMISS_ST_EXITS, + ADDRERR_ST_EXITS, + ADDRERR_LD_EXITS, + SYSCALL_EXITS, + RESVD_INST_EXITS, + BREAK_INST_EXITS, + FLUSH_DCACHE_EXITS, + MAX_KVM_MIPS_EXIT_TYPES +}; + +struct kvm_arch_memory_slot { +}; + +struct kvm_arch { + /* Guest GVA->HPA page table */ + unsigned long *guest_pmap; + unsigned long guest_pmap_npages; + + /* Wired host TLB used for the commpage */ + int commpage_tlb; +}; + +#define N_MIPS_COPROC_REGS 32 +#define N_MIPS_COPROC_SEL 8 + +struct mips_coproc { + unsigned long reg[N_MIPS_COPROC_REGS][N_MIPS_COPROC_SEL]; +#ifdef CONFIG_KVM_MIPS_DEBUG_COP0_COUNTERS + unsigned long stat[N_MIPS_COPROC_REGS][N_MIPS_COPROC_SEL]; +#endif +}; + +/* + * Coprocessor 0 register names + */ +#define MIPS_CP0_TLB_INDEX 0 +#define MIPS_CP0_TLB_RANDOM 1 +#define MIPS_CP0_TLB_LOW 2 +#define MIPS_CP0_TLB_LO0 2 +#define MIPS_CP0_TLB_LO1 3 +#define MIPS_CP0_TLB_CONTEXT 4 +#define MIPS_CP0_TLB_PG_MASK 5 +#define MIPS_CP0_TLB_WIRED 6 +#define MIPS_CP0_HWRENA 7 +#define MIPS_CP0_BAD_VADDR 8 +#define MIPS_CP0_COUNT 9 +#define MIPS_CP0_TLB_HI 10 +#define MIPS_CP0_COMPARE 11 +#define MIPS_CP0_STATUS 12 +#define MIPS_CP0_CAUSE 13 +#define MIPS_CP0_EXC_PC 14 +#define MIPS_CP0_PRID 15 +#define MIPS_CP0_CONFIG 16 +#define MIPS_CP0_LLADDR 17 +#define MIPS_CP0_WATCH_LO 18 +#define MIPS_CP0_WATCH_HI 19 +#define MIPS_CP0_TLB_XCONTEXT 20 +#define MIPS_CP0_ECC 26 +#define MIPS_CP0_CACHE_ERR 27 +#define MIPS_CP0_TAG_LO 28 +#define MIPS_CP0_TAG_HI 29 +#define MIPS_CP0_ERROR_PC 30 +#define MIPS_CP0_DEBUG 23 +#define MIPS_CP0_DEPC 24 +#define MIPS_CP0_PERFCNT 25 +#define MIPS_CP0_ERRCTL 26 +#define MIPS_CP0_DATA_LO 28 +#define MIPS_CP0_DATA_HI 29 +#define MIPS_CP0_DESAVE 31 + +#define MIPS_CP0_CONFIG_SEL 0 +#define MIPS_CP0_CONFIG1_SEL 1 +#define MIPS_CP0_CONFIG2_SEL 2 +#define MIPS_CP0_CONFIG3_SEL 3 + +/* Config0 register bits */ +#define CP0C0_M 31 +#define CP0C0_K23 28 +#define CP0C0_KU 25 +#define CP0C0_MDU 20 +#define CP0C0_MM 17 +#define CP0C0_BM 16 +#define CP0C0_BE 15 +#define CP0C0_AT 13 +#define CP0C0_AR 10 +#define CP0C0_MT 7 +#define CP0C0_VI 3 +#define CP0C0_K0 0 + +/* Config1 register bits */ +#define CP0C1_M 31 +#define CP0C1_MMU 25 +#define CP0C1_IS 22 +#define CP0C1_IL 19 +#define CP0C1_IA 16 +#define CP0C1_DS 13 +#define CP0C1_DL 10 +#define CP0C1_DA 7 +#define CP0C1_C2 6 +#define CP0C1_MD 5 +#define CP0C1_PC 4 +#define CP0C1_WR 3 +#define CP0C1_CA 2 +#define CP0C1_EP 1 +#define CP0C1_FP 0 + +/* Config2 Register bits */ +#define CP0C2_M 31 +#define CP0C2_TU 28 +#define CP0C2_TS 24 +#define CP0C2_TL 20 +#define CP0C2_TA 16 +#define CP0C2_SU 12 +#define CP0C2_SS 8 +#define CP0C2_SL 4 +#define CP0C2_SA 0 + +/* Config3 Register bits */ +#define CP0C3_M 31 +#define CP0C3_ISA_ON_EXC 16 +#define CP0C3_ULRI 13 +#define CP0C3_DSPP 10 +#define CP0C3_LPA 7 +#define CP0C3_VEIC 6 +#define CP0C3_VInt 5 +#define CP0C3_SP 4 +#define CP0C3_MT 2 +#define CP0C3_SM 1 +#define CP0C3_TL 0 + +/* Have config1, Cacheable, noncoherent, write-back, write allocate*/ +#define MIPS_CONFIG0 \ + ((1 << CP0C0_M) | (0x3 << CP0C0_K0)) + +/* Have config2, no coprocessor2 attached, no MDMX support attached, + no performance counters, watch registers present, + no code compression, EJTAG present, no FPU, no watch registers */ +#define MIPS_CONFIG1 \ +((1 << CP0C1_M) | \ + (0 << CP0C1_C2) | (0 << CP0C1_MD) | (0 << CP0C1_PC) | \ + (0 << CP0C1_WR) | (0 << CP0C1_CA) | (1 << CP0C1_EP) | \ + (0 << CP0C1_FP)) + +/* Have config3, no tertiary/secondary caches implemented */ +#define MIPS_CONFIG2 \ +((1 << CP0C2_M)) + +/* No config4, no DSP ASE, no large physaddr (PABITS), + no external interrupt controller, no vectored interrupts, + no 1kb pages, no SmartMIPS ASE, no trace logic */ +#define MIPS_CONFIG3 \ +((0 << CP0C3_M) | (0 << CP0C3_DSPP) | (0 << CP0C3_LPA) | \ + (0 << CP0C3_VEIC) | (0 << CP0C3_VInt) | (0 << CP0C3_SP) | \ + (0 << CP0C3_SM) | (0 << CP0C3_TL)) + +/* MMU types, the first four entries have the same layout as the + CP0C0_MT field. */ +enum mips_mmu_types { + MMU_TYPE_NONE, + MMU_TYPE_R4000, + MMU_TYPE_RESERVED, + MMU_TYPE_FMT, + MMU_TYPE_R3000, + MMU_TYPE_R6000, + MMU_TYPE_R8000 +}; + +/* + * Trap codes + */ +#define T_INT 0 /* Interrupt pending */ +#define T_TLB_MOD 1 /* TLB modified fault */ +#define T_TLB_LD_MISS 2 /* TLB miss on load or ifetch */ +#define T_TLB_ST_MISS 3 /* TLB miss on a store */ +#define T_ADDR_ERR_LD 4 /* Address error on a load or ifetch */ +#define T_ADDR_ERR_ST 5 /* Address error on a store */ +#define T_BUS_ERR_IFETCH 6 /* Bus error on an ifetch */ +#define T_BUS_ERR_LD_ST 7 /* Bus error on a load or store */ +#define T_SYSCALL 8 /* System call */ +#define T_BREAK 9 /* Breakpoint */ +#define T_RES_INST 10 /* Reserved instruction exception */ +#define T_COP_UNUSABLE 11 /* Coprocessor unusable */ +#define T_OVFLOW 12 /* Arithmetic overflow */ + +/* + * Trap definitions added for r4000 port. + */ +#define T_TRAP 13 /* Trap instruction */ +#define T_VCEI 14 /* Virtual coherency exception */ +#define T_FPE 15 /* Floating point exception */ +#define T_WATCH 23 /* Watch address reference */ +#define T_VCED 31 /* Virtual coherency data */ + +/* Resume Flags */ +#define RESUME_FLAG_DR (1<<0) /* Reload guest nonvolatile state? */ +#define RESUME_FLAG_HOST (1<<1) /* Resume host? */ + +#define RESUME_GUEST 0 +#define RESUME_GUEST_DR RESUME_FLAG_DR +#define RESUME_HOST RESUME_FLAG_HOST + +enum emulation_result { + EMULATE_DONE, /* no further processing */ + EMULATE_DO_MMIO, /* kvm_run filled with MMIO request */ + EMULATE_FAIL, /* can't emulate this instruction */ + EMULATE_WAIT, /* WAIT instruction */ + EMULATE_PRIV_FAIL, +}; + +#define MIPS3_PG_G 0x00000001 /* Global; ignore ASID if in lo0 & lo1 */ +#define MIPS3_PG_V 0x00000002 /* Valid */ +#define MIPS3_PG_NV 0x00000000 +#define MIPS3_PG_D 0x00000004 /* Dirty */ + +#define mips3_paddr_to_tlbpfn(x) \ + (((unsigned long)(x) >> MIPS3_PG_SHIFT) & MIPS3_PG_FRAME) +#define mips3_tlbpfn_to_paddr(x) \ + ((unsigned long)((x) & MIPS3_PG_FRAME) << MIPS3_PG_SHIFT) + +#define MIPS3_PG_SHIFT 6 +#define MIPS3_PG_FRAME 0x3fffffc0 + +#define VPN2_MASK 0xffffe000 +#define TLB_IS_GLOBAL(x) (((x).tlb_lo0 & MIPS3_PG_G) && ((x).tlb_lo1 & MIPS3_PG_G)) +#define TLB_VPN2(x) ((x).tlb_hi & VPN2_MASK) +#define TLB_ASID(x) ((x).tlb_hi & ASID_MASK) +#define TLB_IS_VALID(x, va) (((va) & (1 << PAGE_SHIFT)) ? ((x).tlb_lo1 & MIPS3_PG_V) : ((x).tlb_lo0 & MIPS3_PG_V)) + +struct kvm_mips_tlb { + long tlb_mask; + long tlb_hi; + long tlb_lo0; + long tlb_lo1; +}; + +#define KVM_MIPS_GUEST_TLB_SIZE 64 +struct kvm_vcpu_arch { + void *host_ebase, *guest_ebase; + unsigned long host_stack; + unsigned long host_gp; + + /* Host CP0 registers used when handling exits from guest */ + unsigned long host_cp0_badvaddr; + unsigned long host_cp0_cause; + unsigned long host_cp0_epc; + unsigned long host_cp0_entryhi; + uint32_t guest_inst; + + /* GPRS */ + unsigned long gprs[32]; + unsigned long hi; + unsigned long lo; + unsigned long pc; + + /* FPU State */ + struct mips_fpu_struct fpu; + + /* COP0 State */ + struct mips_coproc *cop0; + + /* Host KSEG0 address of the EI/DI offset */ + void *kseg0_commpage; + + u32 io_gpr; /* GPR used as IO source/target */ + + /* Used to calibrate the virutal count register for the guest */ + int32_t host_cp0_count; + + /* Bitmask of exceptions that are pending */ + unsigned long pending_exceptions; + + /* Bitmask of pending exceptions to be cleared */ + unsigned long pending_exceptions_clr; + + unsigned long pending_load_cause; + + /* Save/Restore the entryhi register when are are preempted/scheduled back in */ + unsigned long preempt_entryhi; + + /* S/W Based TLB for guest */ + struct kvm_mips_tlb guest_tlb[KVM_MIPS_GUEST_TLB_SIZE]; + + /* Cached guest kernel/user ASIDs */ + uint32_t guest_user_asid[NR_CPUS]; + uint32_t guest_kernel_asid[NR_CPUS]; + struct mm_struct guest_kernel_mm, guest_user_mm; + + struct kvm_mips_tlb shadow_tlb[NR_CPUS][KVM_MIPS_GUEST_TLB_SIZE]; + + + struct hrtimer comparecount_timer; + + int last_sched_cpu; + + /* WAIT executed */ + int wait; +}; + + +#define kvm_read_c0_guest_index(cop0) (cop0->reg[MIPS_CP0_TLB_INDEX][0]) +#define kvm_write_c0_guest_index(cop0, val) (cop0->reg[MIPS_CP0_TLB_INDEX][0] = val) +#define kvm_read_c0_guest_entrylo0(cop0) (cop0->reg[MIPS_CP0_TLB_LO0][0]) +#define kvm_read_c0_guest_entrylo1(cop0) (cop0->reg[MIPS_CP0_TLB_LO1][0]) +#define kvm_read_c0_guest_context(cop0) (cop0->reg[MIPS_CP0_TLB_CONTEXT][0]) +#define kvm_write_c0_guest_context(cop0, val) (cop0->reg[MIPS_CP0_TLB_CONTEXT][0] = (val)) +#define kvm_read_c0_guest_userlocal(cop0) (cop0->reg[MIPS_CP0_TLB_CONTEXT][2]) +#define kvm_read_c0_guest_pagemask(cop0) (cop0->reg[MIPS_CP0_TLB_PG_MASK][0]) +#define kvm_write_c0_guest_pagemask(cop0, val) (cop0->reg[MIPS_CP0_TLB_PG_MASK][0] = (val)) +#define kvm_read_c0_guest_wired(cop0) (cop0->reg[MIPS_CP0_TLB_WIRED][0]) +#define kvm_write_c0_guest_wired(cop0, val) (cop0->reg[MIPS_CP0_TLB_WIRED][0] = (val)) +#define kvm_read_c0_guest_badvaddr(cop0) (cop0->reg[MIPS_CP0_BAD_VADDR][0]) +#define kvm_write_c0_guest_badvaddr(cop0, val) (cop0->reg[MIPS_CP0_BAD_VADDR][0] = (val)) +#define kvm_read_c0_guest_count(cop0) (cop0->reg[MIPS_CP0_COUNT][0]) +#define kvm_write_c0_guest_count(cop0, val) (cop0->reg[MIPS_CP0_COUNT][0] = (val)) +#define kvm_read_c0_guest_entryhi(cop0) (cop0->reg[MIPS_CP0_TLB_HI][0]) +#define kvm_write_c0_guest_entryhi(cop0, val) (cop0->reg[MIPS_CP0_TLB_HI][0] = (val)) +#define kvm_read_c0_guest_compare(cop0) (cop0->reg[MIPS_CP0_COMPARE][0]) +#define kvm_write_c0_guest_compare(cop0, val) (cop0->reg[MIPS_CP0_COMPARE][0] = (val)) +#define kvm_read_c0_guest_status(cop0) (cop0->reg[MIPS_CP0_STATUS][0]) +#define kvm_write_c0_guest_status(cop0, val) (cop0->reg[MIPS_CP0_STATUS][0] = (val)) +#define kvm_read_c0_guest_intctl(cop0) (cop0->reg[MIPS_CP0_STATUS][1]) +#define kvm_write_c0_guest_intctl(cop0, val) (cop0->reg[MIPS_CP0_STATUS][1] = (val)) +#define kvm_read_c0_guest_cause(cop0) (cop0->reg[MIPS_CP0_CAUSE][0]) +#define kvm_write_c0_guest_cause(cop0, val) (cop0->reg[MIPS_CP0_CAUSE][0] = (val)) +#define kvm_read_c0_guest_epc(cop0) (cop0->reg[MIPS_CP0_EXC_PC][0]) +#define kvm_write_c0_guest_epc(cop0, val) (cop0->reg[MIPS_CP0_EXC_PC][0] = (val)) +#define kvm_read_c0_guest_prid(cop0) (cop0->reg[MIPS_CP0_PRID][0]) +#define kvm_write_c0_guest_prid(cop0, val) (cop0->reg[MIPS_CP0_PRID][0] = (val)) +#define kvm_read_c0_guest_ebase(cop0) (cop0->reg[MIPS_CP0_PRID][1]) +#define kvm_write_c0_guest_ebase(cop0, val) (cop0->reg[MIPS_CP0_PRID][1] = (val)) +#define kvm_read_c0_guest_config(cop0) (cop0->reg[MIPS_CP0_CONFIG][0]) +#define kvm_read_c0_guest_config1(cop0) (cop0->reg[MIPS_CP0_CONFIG][1]) +#define kvm_read_c0_guest_config2(cop0) (cop0->reg[MIPS_CP0_CONFIG][2]) +#define kvm_read_c0_guest_config3(cop0) (cop0->reg[MIPS_CP0_CONFIG][3]) +#define kvm_read_c0_guest_config7(cop0) (cop0->reg[MIPS_CP0_CONFIG][7]) +#define kvm_write_c0_guest_config(cop0, val) (cop0->reg[MIPS_CP0_CONFIG][0] = (val)) +#define kvm_write_c0_guest_config1(cop0, val) (cop0->reg[MIPS_CP0_CONFIG][1] = (val)) +#define kvm_write_c0_guest_config2(cop0, val) (cop0->reg[MIPS_CP0_CONFIG][2] = (val)) +#define kvm_write_c0_guest_config3(cop0, val) (cop0->reg[MIPS_CP0_CONFIG][3] = (val)) +#define kvm_write_c0_guest_config7(cop0, val) (cop0->reg[MIPS_CP0_CONFIG][7] = (val)) +#define kvm_read_c0_guest_errorepc(cop0) (cop0->reg[MIPS_CP0_ERROR_PC][0]) +#define kvm_write_c0_guest_errorepc(cop0, val) (cop0->reg[MIPS_CP0_ERROR_PC][0] = (val)) + +#define kvm_set_c0_guest_status(cop0, val) (cop0->reg[MIPS_CP0_STATUS][0] |= (val)) +#define kvm_clear_c0_guest_status(cop0, val) (cop0->reg[MIPS_CP0_STATUS][0] &= ~(val)) +#define kvm_set_c0_guest_cause(cop0, val) (cop0->reg[MIPS_CP0_CAUSE][0] |= (val)) +#define kvm_clear_c0_guest_cause(cop0, val) (cop0->reg[MIPS_CP0_CAUSE][0] &= ~(val)) +#define kvm_change_c0_guest_cause(cop0, change, val) \ +{ \ + kvm_clear_c0_guest_cause(cop0, change); \ + kvm_set_c0_guest_cause(cop0, ((val) & (change))); \ +} +#define kvm_set_c0_guest_ebase(cop0, val) (cop0->reg[MIPS_CP0_PRID][1] |= (val)) +#define kvm_clear_c0_guest_ebase(cop0, val) (cop0->reg[MIPS_CP0_PRID][1] &= ~(val)) +#define kvm_change_c0_guest_ebase(cop0, change, val) \ +{ \ + kvm_clear_c0_guest_ebase(cop0, change); \ + kvm_set_c0_guest_ebase(cop0, ((val) & (change))); \ +} + + +struct kvm_mips_callbacks { + int (*handle_cop_unusable) (struct kvm_vcpu *vcpu); + int (*handle_tlb_mod) (struct kvm_vcpu *vcpu); + int (*handle_tlb_ld_miss) (struct kvm_vcpu *vcpu); + int (*handle_tlb_st_miss) (struct kvm_vcpu *vcpu); + int (*handle_addr_err_st) (struct kvm_vcpu *vcpu); + int (*handle_addr_err_ld) (struct kvm_vcpu *vcpu); + int (*handle_syscall) (struct kvm_vcpu *vcpu); + int (*handle_res_inst) (struct kvm_vcpu *vcpu); + int (*handle_break) (struct kvm_vcpu *vcpu); + int (*vm_init) (struct kvm *kvm); + int (*vcpu_init) (struct kvm_vcpu *vcpu); + int (*vcpu_setup) (struct kvm_vcpu *vcpu); + gpa_t(*gva_to_gpa) (gva_t gva); + void (*queue_timer_int) (struct kvm_vcpu *vcpu); + void (*dequeue_timer_int) (struct kvm_vcpu *vcpu); + void (*queue_io_int) (struct kvm_vcpu *vcpu, + struct kvm_mips_interrupt *irq); + void (*dequeue_io_int) (struct kvm_vcpu *vcpu, + struct kvm_mips_interrupt *irq); + int (*irq_deliver) (struct kvm_vcpu *vcpu, unsigned int priority, + uint32_t cause); + int (*irq_clear) (struct kvm_vcpu *vcpu, unsigned int priority, + uint32_t cause); + int (*vcpu_ioctl_get_regs) (struct kvm_vcpu *vcpu, + struct kvm_regs *regs); + int (*vcpu_ioctl_set_regs) (struct kvm_vcpu *vcpu, + struct kvm_regs *regs); +}; +extern struct kvm_mips_callbacks *kvm_mips_callbacks; +int kvm_mips_emulation_init(struct kvm_mips_callbacks **install_callbacks); + +/* Debug: dump vcpu state */ +int kvm_arch_vcpu_dump_regs(struct kvm_vcpu *vcpu); + +/* Trampoline ASM routine to start running in "Guest" context */ +extern int __kvm_mips_vcpu_run(struct kvm_run *run, struct kvm_vcpu *vcpu); + +/* TLB handling */ +uint32_t kvm_get_kernel_asid(struct kvm_vcpu *vcpu); + +uint32_t kvm_get_user_asid(struct kvm_vcpu *vcpu); + +uint32_t kvm_get_commpage_asid (struct kvm_vcpu *vcpu); + +extern int kvm_mips_handle_kseg0_tlb_fault(unsigned long badbaddr, + struct kvm_vcpu *vcpu); + +extern int kvm_mips_handle_commpage_tlb_fault(unsigned long badvaddr, + struct kvm_vcpu *vcpu); + +extern int kvm_mips_handle_mapped_seg_tlb_fault(struct kvm_vcpu *vcpu, + struct kvm_mips_tlb *tlb, + unsigned long *hpa0, + unsigned long *hpa1); + +extern enum emulation_result kvm_mips_handle_tlbmiss(unsigned long cause, + uint32_t *opc, + struct kvm_run *run, + struct kvm_vcpu *vcpu); + +extern enum emulation_result kvm_mips_handle_tlbmod(unsigned long cause, + uint32_t *opc, + struct kvm_run *run, + struct kvm_vcpu *vcpu); + +extern void kvm_mips_dump_host_tlbs(void); +extern void kvm_mips_dump_guest_tlbs(struct kvm_vcpu *vcpu); +extern void kvm_mips_dump_shadow_tlbs(struct kvm_vcpu *vcpu); +extern void kvm_mips_flush_host_tlb(int skip_kseg0); +extern int kvm_mips_host_tlb_inv(struct kvm_vcpu *vcpu, unsigned long entryhi); +extern int kvm_mips_host_tlb_inv_index(struct kvm_vcpu *vcpu, int index); + +extern int kvm_mips_guest_tlb_lookup(struct kvm_vcpu *vcpu, + unsigned long entryhi); +extern int kvm_mips_host_tlb_lookup(struct kvm_vcpu *vcpu, unsigned long vaddr); +extern unsigned long kvm_mips_translate_guest_kseg0_to_hpa(struct kvm_vcpu *vcpu, + unsigned long gva); +extern void kvm_get_new_mmu_context(struct mm_struct *mm, unsigned long cpu, + struct kvm_vcpu *vcpu); +extern void kvm_shadow_tlb_put(struct kvm_vcpu *vcpu); +extern void kvm_shadow_tlb_load(struct kvm_vcpu *vcpu); +extern void kvm_local_flush_tlb_all(void); +extern void kvm_mips_init_shadow_tlb(struct kvm_vcpu *vcpu); +extern void kvm_mips_alloc_new_mmu_context(struct kvm_vcpu *vcpu); +extern void kvm_mips_vcpu_load(struct kvm_vcpu *vcpu, int cpu); +extern void kvm_mips_vcpu_put(struct kvm_vcpu *vcpu); + +/* Emulation */ +uint32_t kvm_get_inst(uint32_t *opc, struct kvm_vcpu *vcpu); +enum emulation_result update_pc(struct kvm_vcpu *vcpu, uint32_t cause); + +extern enum emulation_result kvm_mips_emulate_inst(unsigned long cause, + uint32_t *opc, + struct kvm_run *run, + struct kvm_vcpu *vcpu); + +extern enum emulation_result kvm_mips_emulate_syscall(unsigned long cause, + uint32_t *opc, + struct kvm_run *run, + struct kvm_vcpu *vcpu); + +extern enum emulation_result kvm_mips_emulate_tlbmiss_ld(unsigned long cause, + uint32_t *opc, + struct kvm_run *run, + struct kvm_vcpu *vcpu); + +extern enum emulation_result kvm_mips_emulate_tlbinv_ld(unsigned long cause, + uint32_t *opc, + struct kvm_run *run, + struct kvm_vcpu *vcpu); + +extern enum emulation_result kvm_mips_emulate_tlbmiss_st(unsigned long cause, + uint32_t *opc, + struct kvm_run *run, + struct kvm_vcpu *vcpu); + +extern enum emulation_result kvm_mips_emulate_tlbinv_st(unsigned long cause, + uint32_t *opc, + struct kvm_run *run, + struct kvm_vcpu *vcpu); + +extern enum emulation_result kvm_mips_emulate_tlbmod(unsigned long cause, + uint32_t *opc, + struct kvm_run *run, + struct kvm_vcpu *vcpu); + +extern enum emulation_result kvm_mips_emulate_fpu_exc(unsigned long cause, + uint32_t *opc, + struct kvm_run *run, + struct kvm_vcpu *vcpu); + +extern enum emulation_result kvm_mips_handle_ri(unsigned long cause, + uint32_t *opc, + struct kvm_run *run, + struct kvm_vcpu *vcpu); + +extern enum emulation_result kvm_mips_emulate_ri_exc(unsigned long cause, + uint32_t *opc, + struct kvm_run *run, + struct kvm_vcpu *vcpu); + +extern enum emulation_result kvm_mips_emulate_bp_exc(unsigned long cause, + uint32_t *opc, + struct kvm_run *run, + struct kvm_vcpu *vcpu); + +extern enum emulation_result kvm_mips_complete_mmio_load(struct kvm_vcpu *vcpu, + struct kvm_run *run); + +enum emulation_result kvm_mips_emulate_count(struct kvm_vcpu *vcpu); + +enum emulation_result kvm_mips_check_privilege(unsigned long cause, + uint32_t *opc, + struct kvm_run *run, + struct kvm_vcpu *vcpu); + +enum emulation_result kvm_mips_emulate_cache(uint32_t inst, + uint32_t *opc, + uint32_t cause, + struct kvm_run *run, + struct kvm_vcpu *vcpu); +enum emulation_result kvm_mips_emulate_CP0(uint32_t inst, + uint32_t *opc, + uint32_t cause, + struct kvm_run *run, + struct kvm_vcpu *vcpu); +enum emulation_result kvm_mips_emulate_store(uint32_t inst, + uint32_t cause, + struct kvm_run *run, + struct kvm_vcpu *vcpu); +enum emulation_result kvm_mips_emulate_load(uint32_t inst, + uint32_t cause, + struct kvm_run *run, + struct kvm_vcpu *vcpu); + +/* Dynamic binary translation */ +extern int kvm_mips_trans_cache_index(uint32_t inst, uint32_t *opc, + struct kvm_vcpu *vcpu); +extern int kvm_mips_trans_cache_va(uint32_t inst, uint32_t *opc, + struct kvm_vcpu *vcpu); +extern int kvm_mips_trans_mfc0(uint32_t inst, uint32_t *opc, + struct kvm_vcpu *vcpu); +extern int kvm_mips_trans_mtc0(uint32_t inst, uint32_t *opc, + struct kvm_vcpu *vcpu); + +/* Misc */ +extern void mips32_SyncICache(unsigned long addr, unsigned long size); +extern int kvm_mips_dump_stats(struct kvm_vcpu *vcpu); +extern unsigned long kvm_mips_get_ramsize(struct kvm *kvm); + + +#endif /* __MIPS_KVM_HOST_H__ */ diff --git a/trunk/arch/mips/include/asm/mach-bcm63xx/bcm63xx_clk.h b/trunk/arch/mips/include/asm/mach-bcm63xx/bcm63xx_clk.h deleted file mode 100644 index 8fcf8df4418a..000000000000 --- a/trunk/arch/mips/include/asm/mach-bcm63xx/bcm63xx_clk.h +++ /dev/null @@ -1,11 +0,0 @@ -#ifndef BCM63XX_CLK_H_ -#define BCM63XX_CLK_H_ - -struct clk { - void (*set)(struct clk *, int); - unsigned int rate; - unsigned int usage; - int id; -}; - -#endif /* ! BCM63XX_CLK_H_ */ diff --git a/trunk/arch/mips/include/asm/mach-bcm63xx/bcm63xx_cpu.h b/trunk/arch/mips/include/asm/mach-bcm63xx/bcm63xx_cpu.h index cb922b9cb0e9..336228990808 100644 --- a/trunk/arch/mips/include/asm/mach-bcm63xx/bcm63xx_cpu.h +++ b/trunk/arch/mips/include/asm/mach-bcm63xx/bcm63xx_cpu.h @@ -14,11 +14,12 @@ #define BCM6345_CPU_ID 0x6345 #define BCM6348_CPU_ID 0x6348 #define BCM6358_CPU_ID 0x6358 +#define BCM6362_CPU_ID 0x6362 #define BCM6368_CPU_ID 0x6368 void __init bcm63xx_cpu_init(void); u16 __bcm63xx_get_cpu_id(void); -u16 bcm63xx_get_cpu_rev(void); +u8 bcm63xx_get_cpu_rev(void); unsigned int bcm63xx_get_cpu_freq(void); #ifdef CONFIG_BCM63XX_CPU_6328 @@ -86,6 +87,20 @@ unsigned int bcm63xx_get_cpu_freq(void); # define BCMCPU_IS_6358() (0) #endif +#ifdef CONFIG_BCM63XX_CPU_6362 +# ifdef bcm63xx_get_cpu_id +# undef bcm63xx_get_cpu_id +# define bcm63xx_get_cpu_id() __bcm63xx_get_cpu_id() +# define BCMCPU_RUNTIME_DETECT +# else +# define bcm63xx_get_cpu_id() BCM6362_CPU_ID +# endif +# define BCMCPU_IS_6362() (bcm63xx_get_cpu_id() == BCM6362_CPU_ID) +#else +# define BCMCPU_IS_6362() (0) +#endif + + #ifdef CONFIG_BCM63XX_CPU_6368 # ifdef bcm63xx_get_cpu_id # undef bcm63xx_get_cpu_id @@ -405,6 +420,62 @@ enum bcm63xx_regs_set { #define BCM_6358_MISC_BASE (0xdeadbeef) +/* + * 6362 register sets base address + */ +#define BCM_6362_DSL_LMEM_BASE (0xdeadbeef) +#define BCM_6362_PERF_BASE (0xb0000000) +#define BCM_6362_TIMER_BASE (0xb0000040) +#define BCM_6362_WDT_BASE (0xb000005c) +#define BCM_6362_UART0_BASE (0xb0000100) +#define BCM_6362_UART1_BASE (0xb0000120) +#define BCM_6362_GPIO_BASE (0xb0000080) +#define BCM_6362_SPI_BASE (0xb0000800) +#define BCM_6362_HSSPI_BASE (0xb0001000) +#define BCM_6362_UDC0_BASE (0xdeadbeef) +#define BCM_6362_USBDMA_BASE (0xb000c000) +#define BCM_6362_OHCI0_BASE (0xb0002600) +#define BCM_6362_OHCI_PRIV_BASE (0xdeadbeef) +#define BCM_6362_USBH_PRIV_BASE (0xb0002700) +#define BCM_6362_USBD_BASE (0xb0002400) +#define BCM_6362_MPI_BASE (0xdeadbeef) +#define BCM_6362_PCMCIA_BASE (0xdeadbeef) +#define BCM_6362_PCIE_BASE (0xb0e40000) +#define BCM_6362_SDRAM_REGS_BASE (0xdeadbeef) +#define BCM_6362_DSL_BASE (0xdeadbeef) +#define BCM_6362_UBUS_BASE (0xdeadbeef) +#define BCM_6362_ENET0_BASE (0xdeadbeef) +#define BCM_6362_ENET1_BASE (0xdeadbeef) +#define BCM_6362_ENETDMA_BASE (0xb000d800) +#define BCM_6362_ENETDMAC_BASE (0xb000da00) +#define BCM_6362_ENETDMAS_BASE (0xb000dc00) +#define BCM_6362_ENETSW_BASE (0xb0e00000) +#define BCM_6362_EHCI0_BASE (0xb0002500) +#define BCM_6362_SDRAM_BASE (0xdeadbeef) +#define BCM_6362_MEMC_BASE (0xdeadbeef) +#define BCM_6362_DDR_BASE (0xb0003000) +#define BCM_6362_M2M_BASE (0xdeadbeef) +#define BCM_6362_ATM_BASE (0xdeadbeef) +#define BCM_6362_XTM_BASE (0xb0007800) +#define BCM_6362_XTMDMA_BASE (0xb000b800) +#define BCM_6362_XTMDMAC_BASE (0xdeadbeef) +#define BCM_6362_XTMDMAS_BASE (0xdeadbeef) +#define BCM_6362_PCM_BASE (0xb000a800) +#define BCM_6362_PCMDMA_BASE (0xdeadbeef) +#define BCM_6362_PCMDMAC_BASE (0xdeadbeef) +#define BCM_6362_PCMDMAS_BASE (0xdeadbeef) +#define BCM_6362_RNG_BASE (0xdeadbeef) +#define BCM_6362_MISC_BASE (0xb0001800) + +#define BCM_6362_NAND_REG_BASE (0xb0000200) +#define BCM_6362_NAND_CACHE_BASE (0xb0000600) +#define BCM_6362_LED_BASE (0xb0001900) +#define BCM_6362_IPSEC_BASE (0xb0002800) +#define BCM_6362_IPSEC_DMA_BASE (0xb000d000) +#define BCM_6362_WLAN_CHIPCOMMON_BASE (0xb0004000) +#define BCM_6362_WLAN_D11_BASE (0xb0005000) +#define BCM_6362_WLAN_SHIM_BASE (0xb0007000) + /* * 6368 register sets base address */ @@ -564,6 +635,9 @@ static inline unsigned long bcm63xx_regset_address(enum bcm63xx_regs_set set) #ifdef CONFIG_BCM63XX_CPU_6358 __GEN_RSET(6358) #endif +#ifdef CONFIG_BCM63XX_CPU_6362 + __GEN_RSET(6362) +#endif #ifdef CONFIG_BCM63XX_CPU_6368 __GEN_RSET(6368) #endif @@ -819,6 +893,71 @@ enum bcm63xx_irq { #define BCM_6358_EXT_IRQ2 (IRQ_INTERNAL_BASE + 27) #define BCM_6358_EXT_IRQ3 (IRQ_INTERNAL_BASE + 28) +/* + * 6362 irqs + */ +#define BCM_6362_HIGH_IRQ_BASE (IRQ_INTERNAL_BASE + 32) + +#define BCM_6362_TIMER_IRQ (IRQ_INTERNAL_BASE + 0) +#define BCM_6362_SPI_IRQ (IRQ_INTERNAL_BASE + 2) +#define BCM_6362_UART0_IRQ (IRQ_INTERNAL_BASE + 3) +#define BCM_6362_UART1_IRQ (IRQ_INTERNAL_BASE + 4) +#define BCM_6362_DSL_IRQ (IRQ_INTERNAL_BASE + 28) +#define BCM_6362_UDC0_IRQ 0 +#define BCM_6362_ENET0_IRQ 0 +#define BCM_6362_ENET1_IRQ 0 +#define BCM_6362_ENET_PHY_IRQ (IRQ_INTERNAL_BASE + 14) +#define BCM_6362_HSSPI_IRQ (IRQ_INTERNAL_BASE + 5) +#define BCM_6362_OHCI0_IRQ (IRQ_INTERNAL_BASE + 9) +#define BCM_6362_EHCI0_IRQ (IRQ_INTERNAL_BASE + 10) +#define BCM_6362_USBD_IRQ (IRQ_INTERNAL_BASE + 11) +#define BCM_6362_USBD_RXDMA0_IRQ (IRQ_INTERNAL_BASE + 20) +#define BCM_6362_USBD_TXDMA0_IRQ (IRQ_INTERNAL_BASE + 21) +#define BCM_6362_USBD_RXDMA1_IRQ (IRQ_INTERNAL_BASE + 22) +#define BCM_6362_USBD_TXDMA1_IRQ (IRQ_INTERNAL_BASE + 23) +#define BCM_6362_USBD_RXDMA2_IRQ (IRQ_INTERNAL_BASE + 24) +#define BCM_6362_USBD_TXDMA2_IRQ (IRQ_INTERNAL_BASE + 25) +#define BCM_6362_PCMCIA_IRQ 0 +#define BCM_6362_ENET0_RXDMA_IRQ 0 +#define BCM_6362_ENET0_TXDMA_IRQ 0 +#define BCM_6362_ENET1_RXDMA_IRQ 0 +#define BCM_6362_ENET1_TXDMA_IRQ 0 +#define BCM_6362_PCI_IRQ (IRQ_INTERNAL_BASE + 30) +#define BCM_6362_ATM_IRQ 0 +#define BCM_6362_ENETSW_RXDMA0_IRQ (BCM_6362_HIGH_IRQ_BASE + 0) +#define BCM_6362_ENETSW_RXDMA1_IRQ (BCM_6362_HIGH_IRQ_BASE + 1) +#define BCM_6362_ENETSW_RXDMA2_IRQ (BCM_6362_HIGH_IRQ_BASE + 2) +#define BCM_6362_ENETSW_RXDMA3_IRQ (BCM_6362_HIGH_IRQ_BASE + 3) +#define BCM_6362_ENETSW_TXDMA0_IRQ 0 +#define BCM_6362_ENETSW_TXDMA1_IRQ 0 +#define BCM_6362_ENETSW_TXDMA2_IRQ 0 +#define BCM_6362_ENETSW_TXDMA3_IRQ 0 +#define BCM_6362_XTM_IRQ 0 +#define BCM_6362_XTM_DMA0_IRQ (BCM_6362_HIGH_IRQ_BASE + 12) + +#define BCM_6362_RING_OSC_IRQ (IRQ_INTERNAL_BASE + 1) +#define BCM_6362_WLAN_GPIO_IRQ (IRQ_INTERNAL_BASE + 6) +#define BCM_6362_WLAN_IRQ (IRQ_INTERNAL_BASE + 7) +#define BCM_6362_IPSEC_IRQ (IRQ_INTERNAL_BASE + 8) +#define BCM_6362_NAND_IRQ (IRQ_INTERNAL_BASE + 12) +#define BCM_6362_PCM_IRQ (IRQ_INTERNAL_BASE + 13) +#define BCM_6362_DG_IRQ (IRQ_INTERNAL_BASE + 15) +#define BCM_6362_EPHY_ENERGY0_IRQ (IRQ_INTERNAL_BASE + 16) +#define BCM_6362_EPHY_ENERGY1_IRQ (IRQ_INTERNAL_BASE + 17) +#define BCM_6362_EPHY_ENERGY2_IRQ (IRQ_INTERNAL_BASE + 18) +#define BCM_6362_EPHY_ENERGY3_IRQ (IRQ_INTERNAL_BASE + 19) +#define BCM_6362_IPSEC_DMA0_IRQ (IRQ_INTERNAL_BASE + 26) +#define BCM_6362_IPSEC_DMA1_IRQ (IRQ_INTERNAL_BASE + 27) +#define BCM_6362_FAP0_IRQ (IRQ_INTERNAL_BASE + 29) +#define BCM_6362_PCM_DMA0_IRQ (BCM_6362_HIGH_IRQ_BASE + 4) +#define BCM_6362_PCM_DMA1_IRQ (BCM_6362_HIGH_IRQ_BASE + 5) +#define BCM_6362_DECT0_IRQ (BCM_6362_HIGH_IRQ_BASE + 6) +#define BCM_6362_DECT1_IRQ (BCM_6362_HIGH_IRQ_BASE + 7) +#define BCM_6362_EXT_IRQ0 (BCM_6362_HIGH_IRQ_BASE + 8) +#define BCM_6362_EXT_IRQ1 (BCM_6362_HIGH_IRQ_BASE + 9) +#define BCM_6362_EXT_IRQ2 (BCM_6362_HIGH_IRQ_BASE + 10) +#define BCM_6362_EXT_IRQ3 (BCM_6362_HIGH_IRQ_BASE + 11) + /* * 6368 irqs */ diff --git a/trunk/arch/mips/include/asm/mach-bcm63xx/bcm63xx_dev_spi.h b/trunk/arch/mips/include/asm/mach-bcm63xx/bcm63xx_dev_spi.h index b0184cf02575..c426cabc620a 100644 --- a/trunk/arch/mips/include/asm/mach-bcm63xx/bcm63xx_dev_spi.h +++ b/trunk/arch/mips/include/asm/mach-bcm63xx/bcm63xx_dev_spi.h @@ -71,18 +71,13 @@ static inline unsigned long bcm63xx_spireg(enum bcm63xx_regs_spi reg) return bcm63xx_regs_spi[reg]; #else -#ifdef CONFIG_BCM63XX_CPU_6338 - __GEN_SPI_RSET(6338) -#endif -#ifdef CONFIG_BCM63XX_CPU_6348 +#if defined(CONFIG_BCM63XX_CPU_6338) || defined(CONFIG_BCM63XX_CPU_6348) __GEN_SPI_RSET(6348) #endif -#ifdef CONFIG_BCM63XX_CPU_6358 +#if defined(CONFIG_BCM63XX_CPU_6358) || defined(CONFIG_BCM63XX_CPU_6362) || \ + defined(CONFIG_BCM63XX_CPU_6368) __GEN_SPI_RSET(6358) #endif -#ifdef CONFIG_BCM63XX_CPU_6368 - __GEN_SPI_RSET(6368) -#endif #endif return 0; } diff --git a/trunk/arch/mips/include/asm/mach-bcm63xx/bcm63xx_gpio.h b/trunk/arch/mips/include/asm/mach-bcm63xx/bcm63xx_gpio.h index 0a9891f7580d..35baa1a60a64 100644 --- a/trunk/arch/mips/include/asm/mach-bcm63xx/bcm63xx_gpio.h +++ b/trunk/arch/mips/include/asm/mach-bcm63xx/bcm63xx_gpio.h @@ -17,6 +17,8 @@ static inline unsigned long bcm63xx_gpio_count(void) return 8; case BCM6345_CPU_ID: return 16; + case BCM6362_CPU_ID: + return 48; case BCM6368_CPU_ID: return 38; case BCM6348_CPU_ID: diff --git a/trunk/arch/mips/include/asm/mach-bcm63xx/bcm63xx_regs.h b/trunk/arch/mips/include/asm/mach-bcm63xx/bcm63xx_regs.h index 81b4702f792a..3203fe49b34d 100644 --- a/trunk/arch/mips/include/asm/mach-bcm63xx/bcm63xx_regs.h +++ b/trunk/arch/mips/include/asm/mach-bcm63xx/bcm63xx_regs.h @@ -10,7 +10,7 @@ #define REV_CHIPID_SHIFT 16 #define REV_CHIPID_MASK (0xffff << REV_CHIPID_SHIFT) #define REV_REVID_SHIFT 0 -#define REV_REVID_MASK (0xffff << REV_REVID_SHIFT) +#define REV_REVID_MASK (0xff << REV_REVID_SHIFT) /* Clock Control register */ #define PERF_CKCTL_REG 0x4 @@ -112,6 +112,39 @@ CKCTL_6358_USBSU_EN | \ CKCTL_6358_EPHY_EN) +#define CKCTL_6362_ADSL_QPROC_EN (1 << 1) +#define CKCTL_6362_ADSL_AFE_EN (1 << 2) +#define CKCTL_6362_ADSL_EN (1 << 3) +#define CKCTL_6362_MIPS_EN (1 << 4) +#define CKCTL_6362_WLAN_OCP_EN (1 << 5) +#define CKCTL_6362_SWPKT_USB_EN (1 << 7) +#define CKCTL_6362_SWPKT_SAR_EN (1 << 8) +#define CKCTL_6362_SAR_EN (1 << 9) +#define CKCTL_6362_ROBOSW_EN (1 << 10) +#define CKCTL_6362_PCM_EN (1 << 11) +#define CKCTL_6362_USBD_EN (1 << 12) +#define CKCTL_6362_USBH_EN (1 << 13) +#define CKCTL_6362_IPSEC_EN (1 << 14) +#define CKCTL_6362_SPI_EN (1 << 15) +#define CKCTL_6362_HSSPI_EN (1 << 16) +#define CKCTL_6362_PCIE_EN (1 << 17) +#define CKCTL_6362_FAP_EN (1 << 18) +#define CKCTL_6362_PHYMIPS_EN (1 << 19) +#define CKCTL_6362_NAND_EN (1 << 20) + +#define CKCTL_6362_ALL_SAFE_EN (CKCTL_6362_PHYMIPS_EN | \ + CKCTL_6362_ADSL_QPROC_EN | \ + CKCTL_6362_ADSL_AFE_EN | \ + CKCTL_6362_ADSL_EN | \ + CKCTL_6362_SAR_EN | \ + CKCTL_6362_PCM_EN | \ + CKCTL_6362_IPSEC_EN | \ + CKCTL_6362_USBD_EN | \ + CKCTL_6362_USBH_EN | \ + CKCTL_6362_ROBOSW_EN | \ + CKCTL_6362_PCIE_EN) + + #define CKCTL_6368_VDSL_QPROC_EN (1 << 2) #define CKCTL_6368_VDSL_AFE_EN (1 << 3) #define CKCTL_6368_VDSL_BONDING_EN (1 << 4) @@ -153,6 +186,7 @@ #define PERF_IRQMASK_6345_REG 0xc #define PERF_IRQMASK_6348_REG 0xc #define PERF_IRQMASK_6358_REG 0xc +#define PERF_IRQMASK_6362_REG 0x20 #define PERF_IRQMASK_6368_REG 0x20 /* Interrupt Status register */ @@ -161,6 +195,7 @@ #define PERF_IRQSTAT_6345_REG 0x10 #define PERF_IRQSTAT_6348_REG 0x10 #define PERF_IRQSTAT_6358_REG 0x10 +#define PERF_IRQSTAT_6362_REG 0x28 #define PERF_IRQSTAT_6368_REG 0x28 /* External Interrupt Configuration register */ @@ -169,6 +204,7 @@ #define PERF_EXTIRQ_CFG_REG_6345 0x14 #define PERF_EXTIRQ_CFG_REG_6348 0x14 #define PERF_EXTIRQ_CFG_REG_6358 0x14 +#define PERF_EXTIRQ_CFG_REG_6362 0x18 #define PERF_EXTIRQ_CFG_REG_6368 0x18 #define PERF_EXTIRQ_CFG_REG2_6368 0x1c @@ -197,6 +233,7 @@ #define PERF_SOFTRESET_REG 0x28 #define PERF_SOFTRESET_6328_REG 0x10 #define PERF_SOFTRESET_6358_REG 0x34 +#define PERF_SOFTRESET_6362_REG 0x10 #define PERF_SOFTRESET_6368_REG 0x10 #define SOFTRESET_6328_SPI_MASK (1 << 0) @@ -259,6 +296,22 @@ #define SOFTRESET_6358_PCM_MASK (1 << 13) #define SOFTRESET_6358_ADSL_MASK (1 << 14) +#define SOFTRESET_6362_SPI_MASK (1 << 0) +#define SOFTRESET_6362_IPSEC_MASK (1 << 1) +#define SOFTRESET_6362_EPHY_MASK (1 << 2) +#define SOFTRESET_6362_SAR_MASK (1 << 3) +#define SOFTRESET_6362_ENETSW_MASK (1 << 4) +#define SOFTRESET_6362_USBS_MASK (1 << 5) +#define SOFTRESET_6362_USBH_MASK (1 << 6) +#define SOFTRESET_6362_PCM_MASK (1 << 7) +#define SOFTRESET_6362_PCIE_CORE_MASK (1 << 8) +#define SOFTRESET_6362_PCIE_MASK (1 << 9) +#define SOFTRESET_6362_PCIE_EXT_MASK (1 << 10) +#define SOFTRESET_6362_WLAN_SHIM_MASK (1 << 11) +#define SOFTRESET_6362_DDR_PHY_MASK (1 << 12) +#define SOFTRESET_6362_FAP_MASK (1 << 13) +#define SOFTRESET_6362_WLAN_UBUS_MASK (1 << 14) + #define SOFTRESET_6368_SPI_MASK (1 << 0) #define SOFTRESET_6368_MPI_MASK (1 << 3) #define SOFTRESET_6368_EPHY_MASK (1 << 6) @@ -1223,24 +1276,7 @@ * _REG relative to RSET_SPI *************************************************************************/ -/* BCM 6338 SPI core */ -#define SPI_6338_CMD 0x00 /* 16-bits register */ -#define SPI_6338_INT_STATUS 0x02 -#define SPI_6338_INT_MASK_ST 0x03 -#define SPI_6338_INT_MASK 0x04 -#define SPI_6338_ST 0x05 -#define SPI_6338_CLK_CFG 0x06 -#define SPI_6338_FILL_BYTE 0x07 -#define SPI_6338_MSG_TAIL 0x09 -#define SPI_6338_RX_TAIL 0x0b -#define SPI_6338_MSG_CTL 0x40 /* 8-bits register */ -#define SPI_6338_MSG_CTL_WIDTH 8 -#define SPI_6338_MSG_DATA 0x41 -#define SPI_6338_MSG_DATA_SIZE 0x3f -#define SPI_6338_RX_DATA 0x80 -#define SPI_6338_RX_DATA_SIZE 0x3f - -/* BCM 6348 SPI core */ +/* BCM 6338/6348 SPI core */ #define SPI_6348_CMD 0x00 /* 16-bits register */ #define SPI_6348_INT_STATUS 0x02 #define SPI_6348_INT_MASK_ST 0x03 @@ -1257,7 +1293,7 @@ #define SPI_6348_RX_DATA 0x80 #define SPI_6348_RX_DATA_SIZE 0x3f -/* BCM 6358 SPI core */ +/* BCM 6358/6262/6368 SPI core */ #define SPI_6358_MSG_CTL 0x00 /* 16-bits register */ #define SPI_6358_MSG_CTL_WIDTH 16 #define SPI_6358_MSG_DATA 0x02 @@ -1274,23 +1310,6 @@ #define SPI_6358_MSG_TAIL 0x709 #define SPI_6358_RX_TAIL 0x70B -/* BCM 6358 SPI core */ -#define SPI_6368_MSG_CTL 0x00 /* 16-bits register */ -#define SPI_6368_MSG_CTL_WIDTH 16 -#define SPI_6368_MSG_DATA 0x02 -#define SPI_6368_MSG_DATA_SIZE 0x21e -#define SPI_6368_RX_DATA 0x400 -#define SPI_6368_RX_DATA_SIZE 0x220 -#define SPI_6368_CMD 0x700 /* 16-bits register */ -#define SPI_6368_INT_STATUS 0x702 -#define SPI_6368_INT_MASK_ST 0x703 -#define SPI_6368_INT_MASK 0x704 -#define SPI_6368_ST 0x705 -#define SPI_6368_CLK_CFG 0x706 -#define SPI_6368_FILL_BYTE 0x707 -#define SPI_6368_MSG_TAIL 0x709 -#define SPI_6368_RX_TAIL 0x70B - /* Shared SPI definitions */ /* Message configuration */ @@ -1298,10 +1317,8 @@ #define SPI_HD_W 0x01 #define SPI_HD_R 0x02 #define SPI_BYTE_CNT_SHIFT 0 -#define SPI_6338_MSG_TYPE_SHIFT 6 #define SPI_6348_MSG_TYPE_SHIFT 6 #define SPI_6358_MSG_TYPE_SHIFT 14 -#define SPI_6368_MSG_TYPE_SHIFT 14 /* Command */ #define SPI_CMD_NOOP 0x00 @@ -1348,10 +1365,18 @@ /************************************************************************* * _REG relative to RSET_MISC *************************************************************************/ -#define MISC_SERDES_CTRL_REG 0x0 +#define MISC_SERDES_CTRL_6328_REG 0x0 +#define MISC_SERDES_CTRL_6362_REG 0x4 #define SERDES_PCIE_EN (1 << 0) #define SERDES_PCIE_EXD_EN (1 << 15) +#define MISC_STRAPBUS_6362_REG 0x14 +#define STRAPBUS_6362_FCVO_SHIFT 1 +#define STRAPBUS_6362_HSSPI_CLK_FAST (1 << 13) +#define STRAPBUS_6362_FCVO_MASK (0x1f << STRAPBUS_6362_FCVO_SHIFT) +#define STRAPBUS_6362_BOOT_SEL_SERIAL (1 << 15) +#define STRAPBUS_6362_BOOT_SEL_NAND (0 << 15) + #define MISC_STRAPBUS_6328_REG 0x240 #define STRAPBUS_6328_FCVO_SHIFT 7 #define STRAPBUS_6328_FCVO_MASK (0x1f << STRAPBUS_6328_FCVO_SHIFT) diff --git a/trunk/arch/mips/include/asm/mach-bcm63xx/ioremap.h b/trunk/arch/mips/include/asm/mach-bcm63xx/ioremap.h index 30931c42379d..94e3011ba7df 100644 --- a/trunk/arch/mips/include/asm/mach-bcm63xx/ioremap.h +++ b/trunk/arch/mips/include/asm/mach-bcm63xx/ioremap.h @@ -19,6 +19,7 @@ static inline int is_bcm63xx_internal_registers(phys_t offset) return 1; break; case BCM6328_CPU_ID: + case BCM6362_CPU_ID: case BCM6368_CPU_ID: if (offset >= 0xb0000000 && offset < 0xb1000000) return 1; diff --git a/trunk/arch/mips/include/asm/mach-generic/dma-coherence.h b/trunk/arch/mips/include/asm/mach-generic/dma-coherence.h index 9c95177f7a7e..fe23034aaf72 100644 --- a/trunk/arch/mips/include/asm/mach-generic/dma-coherence.h +++ b/trunk/arch/mips/include/asm/mach-generic/dma-coherence.h @@ -61,9 +61,8 @@ static inline int plat_device_is_coherent(struct device *dev) { #ifdef CONFIG_DMA_COHERENT return 1; -#endif -#ifdef CONFIG_DMA_NONCOHERENT - return 0; +#else + return coherentio; #endif } diff --git a/trunk/arch/mips/include/asm/mach-generic/spaces.h b/trunk/arch/mips/include/asm/mach-generic/spaces.h index 73d717a75cb0..5b2f2e68e57f 100644 --- a/trunk/arch/mips/include/asm/mach-generic/spaces.h +++ b/trunk/arch/mips/include/asm/mach-generic/spaces.h @@ -20,14 +20,21 @@ #endif #ifdef CONFIG_32BIT - +#ifdef CONFIG_KVM_GUEST +#define CAC_BASE _AC(0x40000000, UL) +#else #define CAC_BASE _AC(0x80000000, UL) +#endif #define IO_BASE _AC(0xa0000000, UL) #define UNCAC_BASE _AC(0xa0000000, UL) #ifndef MAP_BASE +#ifdef CONFIG_KVM_GUEST +#define MAP_BASE _AC(0x60000000, UL) +#else #define MAP_BASE _AC(0xc0000000, UL) #endif +#endif /* * Memory above this physical address will be considered highmem. diff --git a/trunk/arch/mips/include/asm/mach-loongson/cpu-feature-overrides.h b/trunk/arch/mips/include/asm/mach-loongson/cpu-feature-overrides.h index 75fd8c0f986e..c0f3ef45c2c1 100644 --- a/trunk/arch/mips/include/asm/mach-loongson/cpu-feature-overrides.h +++ b/trunk/arch/mips/include/asm/mach-loongson/cpu-feature-overrides.h @@ -57,5 +57,6 @@ #define cpu_has_vint 0 #define cpu_has_vtag_icache 0 #define cpu_has_watch 1 +#define cpu_has_local_ebase 0 #endif /* __ASM_MACH_LOONGSON_CPU_FEATURE_OVERRIDES_H */ diff --git a/trunk/arch/mips/include/asm/mach-ralink/mt7620.h b/trunk/arch/mips/include/asm/mach-ralink/mt7620.h new file mode 100644 index 000000000000..9809972ea882 --- /dev/null +++ b/trunk/arch/mips/include/asm/mach-ralink/mt7620.h @@ -0,0 +1,84 @@ +/* + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published + * by the Free Software Foundation. + * + * Parts of this file are based on Ralink's 2.6.21 BSP + * + * Copyright (C) 2008-2011 Gabor Juhos + * Copyright (C) 2008 Imre Kaloz + * Copyright (C) 2013 John Crispin + */ + +#ifndef _MT7620_REGS_H_ +#define _MT7620_REGS_H_ + +#define MT7620_SYSC_BASE 0x10000000 + +#define SYSC_REG_CHIP_NAME0 0x00 +#define SYSC_REG_CHIP_NAME1 0x04 +#define SYSC_REG_CHIP_REV 0x0c +#define SYSC_REG_SYSTEM_CONFIG0 0x10 +#define SYSC_REG_SYSTEM_CONFIG1 0x14 +#define SYSC_REG_CPLL_CONFIG0 0x54 +#define SYSC_REG_CPLL_CONFIG1 0x58 + +#define MT7620N_CHIP_NAME0 0x33365452 +#define MT7620N_CHIP_NAME1 0x20203235 + +#define MT7620A_CHIP_NAME0 0x3637544d +#define MT7620A_CHIP_NAME1 0x20203032 + +#define CHIP_REV_PKG_MASK 0x1 +#define CHIP_REV_PKG_SHIFT 16 +#define CHIP_REV_VER_MASK 0xf +#define CHIP_REV_VER_SHIFT 8 +#define CHIP_REV_ECO_MASK 0xf + +#define CPLL_SW_CONFIG_SHIFT 31 +#define CPLL_SW_CONFIG_MASK 0x1 +#define CPLL_CPU_CLK_SHIFT 24 +#define CPLL_CPU_CLK_MASK 0x1 +#define CPLL_MULT_RATIO_SHIFT 16 +#define CPLL_MULT_RATIO 0x7 +#define CPLL_DIV_RATIO_SHIFT 10 +#define CPLL_DIV_RATIO 0x3 + +#define SYSCFG0_DRAM_TYPE_MASK 0x3 +#define SYSCFG0_DRAM_TYPE_SHIFT 4 +#define SYSCFG0_DRAM_TYPE_SDRAM 0 +#define SYSCFG0_DRAM_TYPE_DDR1 1 +#define SYSCFG0_DRAM_TYPE_DDR2 2 + +#define MT7620_DRAM_BASE 0x0 +#define MT7620_SDRAM_SIZE_MIN 2 +#define MT7620_SDRAM_SIZE_MAX 64 +#define MT7620_DDR1_SIZE_MIN 32 +#define MT7620_DDR1_SIZE_MAX 128 +#define MT7620_DDR2_SIZE_MIN 32 +#define MT7620_DDR2_SIZE_MAX 256 + +#define MT7620_GPIO_MODE_I2C BIT(0) +#define MT7620_GPIO_MODE_UART0_SHIFT 2 +#define MT7620_GPIO_MODE_UART0_MASK 0x7 +#define MT7620_GPIO_MODE_UART0(x) ((x) << MT7620_GPIO_MODE_UART0_SHIFT) +#define MT7620_GPIO_MODE_UARTF 0x0 +#define MT7620_GPIO_MODE_PCM_UARTF 0x1 +#define MT7620_GPIO_MODE_PCM_I2S 0x2 +#define MT7620_GPIO_MODE_I2S_UARTF 0x3 +#define MT7620_GPIO_MODE_PCM_GPIO 0x4 +#define MT7620_GPIO_MODE_GPIO_UARTF 0x5 +#define MT7620_GPIO_MODE_GPIO_I2S 0x6 +#define MT7620_GPIO_MODE_GPIO 0x7 +#define MT7620_GPIO_MODE_UART1 BIT(5) +#define MT7620_GPIO_MODE_MDIO BIT(8) +#define MT7620_GPIO_MODE_RGMII1 BIT(9) +#define MT7620_GPIO_MODE_RGMII2 BIT(10) +#define MT7620_GPIO_MODE_SPI BIT(11) +#define MT7620_GPIO_MODE_SPI_REF_CLK BIT(12) +#define MT7620_GPIO_MODE_WLED BIT(13) +#define MT7620_GPIO_MODE_JTAG BIT(15) +#define MT7620_GPIO_MODE_EPHY BIT(15) +#define MT7620_GPIO_MODE_WDT BIT(22) + +#endif diff --git a/trunk/arch/mips/include/asm/mach-ralink/rt288x.h b/trunk/arch/mips/include/asm/mach-ralink/rt288x.h new file mode 100644 index 000000000000..03ad716acb42 --- /dev/null +++ b/trunk/arch/mips/include/asm/mach-ralink/rt288x.h @@ -0,0 +1,53 @@ +/* + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published + * by the Free Software Foundation. + * + * Parts of this file are based on Ralink's 2.6.21 BSP + * + * Copyright (C) 2008-2011 Gabor Juhos + * Copyright (C) 2008 Imre Kaloz + * Copyright (C) 2013 John Crispin + */ + +#ifndef _RT288X_REGS_H_ +#define _RT288X_REGS_H_ + +#define RT2880_SYSC_BASE 0x00300000 + +#define SYSC_REG_CHIP_NAME0 0x00 +#define SYSC_REG_CHIP_NAME1 0x04 +#define SYSC_REG_CHIP_ID 0x0c +#define SYSC_REG_SYSTEM_CONFIG 0x10 +#define SYSC_REG_CLKCFG 0x30 + +#define RT2880_CHIP_NAME0 0x38325452 +#define RT2880_CHIP_NAME1 0x20203038 + +#define CHIP_ID_ID_MASK 0xff +#define CHIP_ID_ID_SHIFT 8 +#define CHIP_ID_REV_MASK 0xff + +#define SYSTEM_CONFIG_CPUCLK_SHIFT 20 +#define SYSTEM_CONFIG_CPUCLK_MASK 0x3 +#define SYSTEM_CONFIG_CPUCLK_250 0x0 +#define SYSTEM_CONFIG_CPUCLK_266 0x1 +#define SYSTEM_CONFIG_CPUCLK_280 0x2 +#define SYSTEM_CONFIG_CPUCLK_300 0x3 + +#define RT2880_GPIO_MODE_I2C BIT(0) +#define RT2880_GPIO_MODE_UART0 BIT(1) +#define RT2880_GPIO_MODE_SPI BIT(2) +#define RT2880_GPIO_MODE_UART1 BIT(3) +#define RT2880_GPIO_MODE_JTAG BIT(4) +#define RT2880_GPIO_MODE_MDIO BIT(5) +#define RT2880_GPIO_MODE_SDRAM BIT(6) +#define RT2880_GPIO_MODE_PCI BIT(7) + +#define CLKCFG_SRAM_CS_N_WDT BIT(9) + +#define RT2880_SDRAM_BASE 0x08000000 +#define RT2880_MEM_SIZE_MIN 2 +#define RT2880_MEM_SIZE_MAX 128 + +#endif diff --git a/trunk/arch/mips/include/asm/mach-ralink/rt288x/cpu-feature-overrides.h b/trunk/arch/mips/include/asm/mach-ralink/rt288x/cpu-feature-overrides.h new file mode 100644 index 000000000000..72fc10669199 --- /dev/null +++ b/trunk/arch/mips/include/asm/mach-ralink/rt288x/cpu-feature-overrides.h @@ -0,0 +1,56 @@ +/* + * Ralink RT288x specific CPU feature overrides + * + * Copyright (C) 2008-2009 Gabor Juhos + * Copyright (C) 2008 Imre Kaloz + * + * This file was derived from: include/asm-mips/cpu-features.h + * Copyright (C) 2003, 2004 Ralf Baechle + * Copyright (C) 2004 Maciej W. Rozycki + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published + * by the Free Software Foundation. + * + */ +#ifndef _RT288X_CPU_FEATURE_OVERRIDES_H +#define _RT288X_CPU_FEATURE_OVERRIDES_H + +#define cpu_has_tlb 1 +#define cpu_has_4kex 1 +#define cpu_has_3k_cache 0 +#define cpu_has_4k_cache 1 +#define cpu_has_tx39_cache 0 +#define cpu_has_sb1_cache 0 +#define cpu_has_fpu 0 +#define cpu_has_32fpr 0 +#define cpu_has_counter 1 +#define cpu_has_watch 1 +#define cpu_has_divec 1 + +#define cpu_has_prefetch 1 +#define cpu_has_ejtag 1 +#define cpu_has_llsc 1 + +#define cpu_has_mips16 1 +#define cpu_has_mdmx 0 +#define cpu_has_mips3d 0 +#define cpu_has_smartmips 0 + +#define cpu_has_mips32r1 1 +#define cpu_has_mips32r2 1 +#define cpu_has_mips64r1 0 +#define cpu_has_mips64r2 0 + +#define cpu_has_dsp 0 +#define cpu_has_mipsmt 0 + +#define cpu_has_64bits 0 +#define cpu_has_64bit_zero_reg 0 +#define cpu_has_64bit_gp_regs 0 +#define cpu_has_64bit_addresses 0 + +#define cpu_dcache_line_size() 16 +#define cpu_icache_line_size() 16 + +#endif /* _RT288X_CPU_FEATURE_OVERRIDES_H */ diff --git a/trunk/arch/mips/include/asm/mach-ralink/rt305x.h b/trunk/arch/mips/include/asm/mach-ralink/rt305x.h index 7d344f2d7d0a..069bf37a6010 100644 --- a/trunk/arch/mips/include/asm/mach-ralink/rt305x.h +++ b/trunk/arch/mips/include/asm/mach-ralink/rt305x.h @@ -97,6 +97,14 @@ static inline int soc_is_rt5350(void) #define RT5350_SYSCFG0_CPUCLK_320 0x2 #define RT5350_SYSCFG0_CPUCLK_300 0x3 +#define RT5350_SYSCFG0_DRAM_SIZE_SHIFT 12 +#define RT5350_SYSCFG0_DRAM_SIZE_MASK 7 +#define RT5350_SYSCFG0_DRAM_SIZE_2M 0 +#define RT5350_SYSCFG0_DRAM_SIZE_8M 1 +#define RT5350_SYSCFG0_DRAM_SIZE_16M 2 +#define RT5350_SYSCFG0_DRAM_SIZE_32M 3 +#define RT5350_SYSCFG0_DRAM_SIZE_64M 4 + /* multi function gpio pins */ #define RT305X_GPIO_I2C_SD 1 #define RT305X_GPIO_I2C_SCLK 2 @@ -136,4 +144,23 @@ static inline int soc_is_rt5350(void) #define RT305X_GPIO_MODE_SDRAM BIT(8) #define RT305X_GPIO_MODE_RGMII BIT(9) +#define RT3352_SYSC_REG_SYSCFG0 0x010 +#define RT3352_SYSC_REG_SYSCFG1 0x014 +#define RT3352_SYSC_REG_CLKCFG1 0x030 +#define RT3352_SYSC_REG_RSTCTRL 0x034 +#define RT3352_SYSC_REG_USB_PS 0x05c + +#define RT3352_CLKCFG0_XTAL_SEL BIT(20) +#define RT3352_CLKCFG1_UPHY0_CLK_EN BIT(18) +#define RT3352_CLKCFG1_UPHY1_CLK_EN BIT(20) +#define RT3352_RSTCTRL_UHST BIT(22) +#define RT3352_RSTCTRL_UDEV BIT(25) +#define RT3352_SYSCFG1_USB0_HOST_MODE BIT(10) + +#define RT305X_SDRAM_BASE 0x00000000 +#define RT305X_MEM_SIZE_MIN 2 +#define RT305X_MEM_SIZE_MAX 64 +#define RT3352_MEM_SIZE_MIN 2 +#define RT3352_MEM_SIZE_MAX 256 + #endif diff --git a/trunk/arch/mips/include/asm/mach-ralink/rt305x/cpu-feature-overrides.h b/trunk/arch/mips/include/asm/mach-ralink/rt305x/cpu-feature-overrides.h new file mode 100644 index 000000000000..917c28654552 --- /dev/null +++ b/trunk/arch/mips/include/asm/mach-ralink/rt305x/cpu-feature-overrides.h @@ -0,0 +1,56 @@ +/* + * Ralink RT305x specific CPU feature overrides + * + * Copyright (C) 2008-2009 Gabor Juhos + * Copyright (C) 2008 Imre Kaloz + * + * This file was derived from: include/asm-mips/cpu-features.h + * Copyright (C) 2003, 2004 Ralf Baechle + * Copyright (C) 2004 Maciej W. Rozycki + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published + * by the Free Software Foundation. + * + */ +#ifndef _RT305X_CPU_FEATURE_OVERRIDES_H +#define _RT305X_CPU_FEATURE_OVERRIDES_H + +#define cpu_has_tlb 1 +#define cpu_has_4kex 1 +#define cpu_has_3k_cache 0 +#define cpu_has_4k_cache 1 +#define cpu_has_tx39_cache 0 +#define cpu_has_sb1_cache 0 +#define cpu_has_fpu 0 +#define cpu_has_32fpr 0 +#define cpu_has_counter 1 +#define cpu_has_watch 1 +#define cpu_has_divec 1 + +#define cpu_has_prefetch 1 +#define cpu_has_ejtag 1 +#define cpu_has_llsc 1 + +#define cpu_has_mips16 1 +#define cpu_has_mdmx 0 +#define cpu_has_mips3d 0 +#define cpu_has_smartmips 0 + +#define cpu_has_mips32r1 1 +#define cpu_has_mips32r2 1 +#define cpu_has_mips64r1 0 +#define cpu_has_mips64r2 0 + +#define cpu_has_dsp 1 +#define cpu_has_mipsmt 0 + +#define cpu_has_64bits 0 +#define cpu_has_64bit_zero_reg 0 +#define cpu_has_64bit_gp_regs 0 +#define cpu_has_64bit_addresses 0 + +#define cpu_dcache_line_size() 32 +#define cpu_icache_line_size() 32 + +#endif /* _RT305X_CPU_FEATURE_OVERRIDES_H */ diff --git a/trunk/arch/mips/include/asm/mach-ralink/rt3883.h b/trunk/arch/mips/include/asm/mach-ralink/rt3883.h new file mode 100644 index 000000000000..058382f37f92 --- /dev/null +++ b/trunk/arch/mips/include/asm/mach-ralink/rt3883.h @@ -0,0 +1,252 @@ +/* + * Ralink RT3662/RT3883 SoC register definitions + * + * Copyright (C) 2011-2012 Gabor Juhos + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published + * by the Free Software Foundation. + */ + +#ifndef _RT3883_REGS_H_ +#define _RT3883_REGS_H_ + +#include + +#define RT3883_SDRAM_BASE 0x00000000 +#define RT3883_SYSC_BASE 0x10000000 +#define RT3883_TIMER_BASE 0x10000100 +#define RT3883_INTC_BASE 0x10000200 +#define RT3883_MEMC_BASE 0x10000300 +#define RT3883_UART0_BASE 0x10000500 +#define RT3883_PIO_BASE 0x10000600 +#define RT3883_FSCC_BASE 0x10000700 +#define RT3883_NANDC_BASE 0x10000810 +#define RT3883_I2C_BASE 0x10000900 +#define RT3883_I2S_BASE 0x10000a00 +#define RT3883_SPI_BASE 0x10000b00 +#define RT3883_UART1_BASE 0x10000c00 +#define RT3883_PCM_BASE 0x10002000 +#define RT3883_GDMA_BASE 0x10002800 +#define RT3883_CODEC1_BASE 0x10003000 +#define RT3883_CODEC2_BASE 0x10003800 +#define RT3883_FE_BASE 0x10100000 +#define RT3883_ROM_BASE 0x10118000 +#define RT3883_USBDEV_BASE 0x10112000 +#define RT3883_PCI_BASE 0x10140000 +#define RT3883_WLAN_BASE 0x10180000 +#define RT3883_USBHOST_BASE 0x101c0000 +#define RT3883_BOOT_BASE 0x1c000000 +#define RT3883_SRAM_BASE 0x1e000000 +#define RT3883_PCIMEM_BASE 0x20000000 + +#define RT3883_EHCI_BASE (RT3883_USBHOST_BASE) +#define RT3883_OHCI_BASE (RT3883_USBHOST_BASE + 0x1000) + +#define RT3883_SYSC_SIZE 0x100 +#define RT3883_TIMER_SIZE 0x100 +#define RT3883_INTC_SIZE 0x100 +#define RT3883_MEMC_SIZE 0x100 +#define RT3883_UART0_SIZE 0x100 +#define RT3883_UART1_SIZE 0x100 +#define RT3883_PIO_SIZE 0x100 +#define RT3883_FSCC_SIZE 0x100 +#define RT3883_NANDC_SIZE 0x0f0 +#define RT3883_I2C_SIZE 0x100 +#define RT3883_I2S_SIZE 0x100 +#define RT3883_SPI_SIZE 0x100 +#define RT3883_PCM_SIZE 0x800 +#define RT3883_GDMA_SIZE 0x800 +#define RT3883_CODEC1_SIZE 0x800 +#define RT3883_CODEC2_SIZE 0x800 +#define RT3883_FE_SIZE 0x10000 +#define RT3883_ROM_SIZE 0x4000 +#define RT3883_USBDEV_SIZE 0x4000 +#define RT3883_PCI_SIZE 0x40000 +#define RT3883_WLAN_SIZE 0x40000 +#define RT3883_USBHOST_SIZE 0x40000 +#define RT3883_BOOT_SIZE (32 * 1024 * 1024) +#define RT3883_SRAM_SIZE (32 * 1024 * 1024) + +/* SYSC registers */ +#define RT3883_SYSC_REG_CHIPID0_3 0x00 /* Chip ID 0 */ +#define RT3883_SYSC_REG_CHIPID4_7 0x04 /* Chip ID 1 */ +#define RT3883_SYSC_REG_REVID 0x0c /* Chip Revision Identification */ +#define RT3883_SYSC_REG_SYSCFG0 0x10 /* System Configuration 0 */ +#define RT3883_SYSC_REG_SYSCFG1 0x14 /* System Configuration 1 */ +#define RT3883_SYSC_REG_CLKCFG0 0x2c /* Clock Configuration 0 */ +#define RT3883_SYSC_REG_CLKCFG1 0x30 /* Clock Configuration 1 */ +#define RT3883_SYSC_REG_RSTCTRL 0x34 /* Reset Control*/ +#define RT3883_SYSC_REG_RSTSTAT 0x38 /* Reset Status*/ +#define RT3883_SYSC_REG_USB_PS 0x5c /* USB Power saving control */ +#define RT3883_SYSC_REG_GPIO_MODE 0x60 /* GPIO Purpose Select */ +#define RT3883_SYSC_REG_PCIE_CLK_GEN0 0x7c +#define RT3883_SYSC_REG_PCIE_CLK_GEN1 0x80 +#define RT3883_SYSC_REG_PCIE_CLK_GEN2 0x84 +#define RT3883_SYSC_REG_PMU 0x88 +#define RT3883_SYSC_REG_PMU1 0x8c + +#define RT3883_CHIP_NAME0 0x38335452 +#define RT3883_CHIP_NAME1 0x20203338 + +#define RT3883_REVID_VER_ID_MASK 0x0f +#define RT3883_REVID_VER_ID_SHIFT 8 +#define RT3883_REVID_ECO_ID_MASK 0x0f + +#define RT3883_SYSCFG0_DRAM_TYPE_DDR2 BIT(17) +#define RT3883_SYSCFG0_CPUCLK_SHIFT 8 +#define RT3883_SYSCFG0_CPUCLK_MASK 0x3 +#define RT3883_SYSCFG0_CPUCLK_250 0x0 +#define RT3883_SYSCFG0_CPUCLK_384 0x1 +#define RT3883_SYSCFG0_CPUCLK_480 0x2 +#define RT3883_SYSCFG0_CPUCLK_500 0x3 + +#define RT3883_SYSCFG1_USB0_HOST_MODE BIT(10) +#define RT3883_SYSCFG1_PCIE_RC_MODE BIT(8) +#define RT3883_SYSCFG1_PCI_HOST_MODE BIT(7) +#define RT3883_SYSCFG1_PCI_66M_MODE BIT(6) +#define RT3883_SYSCFG1_GPIO2_AS_WDT_OUT BIT(2) + +#define RT3883_CLKCFG1_PCIE_CLK_EN BIT(21) +#define RT3883_CLKCFG1_UPHY1_CLK_EN BIT(20) +#define RT3883_CLKCFG1_PCI_CLK_EN BIT(19) +#define RT3883_CLKCFG1_UPHY0_CLK_EN BIT(18) + +#define RT3883_GPIO_MODE_I2C BIT(0) +#define RT3883_GPIO_MODE_SPI BIT(1) +#define RT3883_GPIO_MODE_UART0_SHIFT 2 +#define RT3883_GPIO_MODE_UART0_MASK 0x7 +#define RT3883_GPIO_MODE_UART0(x) ((x) << RT3883_GPIO_MODE_UART0_SHIFT) +#define RT3883_GPIO_MODE_UARTF 0x0 +#define RT3883_GPIO_MODE_PCM_UARTF 0x1 +#define RT3883_GPIO_MODE_PCM_I2S 0x2 +#define RT3883_GPIO_MODE_I2S_UARTF 0x3 +#define RT3883_GPIO_MODE_PCM_GPIO 0x4 +#define RT3883_GPIO_MODE_GPIO_UARTF 0x5 +#define RT3883_GPIO_MODE_GPIO_I2S 0x6 +#define RT3883_GPIO_MODE_GPIO 0x7 +#define RT3883_GPIO_MODE_UART1 BIT(5) +#define RT3883_GPIO_MODE_JTAG BIT(6) +#define RT3883_GPIO_MODE_MDIO BIT(7) +#define RT3883_GPIO_MODE_GE1 BIT(9) +#define RT3883_GPIO_MODE_GE2 BIT(10) +#define RT3883_GPIO_MODE_PCI_SHIFT 11 +#define RT3883_GPIO_MODE_PCI_MASK 0x7 +#define RT3883_GPIO_MODE_PCI (RT3883_GPIO_MODE_PCI_MASK << RT3883_GPIO_MODE_PCI_SHIFT) +#define RT3883_GPIO_MODE_LNA_A_SHIFT 16 +#define RT3883_GPIO_MODE_LNA_A_MASK 0x3 +#define _RT3883_GPIO_MODE_LNA_A(_x) ((_x) << RT3883_GPIO_MODE_LNA_A_SHIFT) +#define RT3883_GPIO_MODE_LNA_A_GPIO 0x3 +#define RT3883_GPIO_MODE_LNA_A _RT3883_GPIO_MODE_LNA_A(RT3883_GPIO_MODE_LNA_A_MASK) +#define RT3883_GPIO_MODE_LNA_G_SHIFT 18 +#define RT3883_GPIO_MODE_LNA_G_MASK 0x3 +#define _RT3883_GPIO_MODE_LNA_G(_x) ((_x) << RT3883_GPIO_MODE_LNA_G_SHIFT) +#define RT3883_GPIO_MODE_LNA_G_GPIO 0x3 +#define RT3883_GPIO_MODE_LNA_G _RT3883_GPIO_MODE_LNA_G(RT3883_GPIO_MODE_LNA_G_MASK) + +#define RT3883_GPIO_I2C_SD 1 +#define RT3883_GPIO_I2C_SCLK 2 +#define RT3883_GPIO_SPI_CS0 3 +#define RT3883_GPIO_SPI_CLK 4 +#define RT3883_GPIO_SPI_MOSI 5 +#define RT3883_GPIO_SPI_MISO 6 +#define RT3883_GPIO_7 7 +#define RT3883_GPIO_10 10 +#define RT3883_GPIO_11 11 +#define RT3883_GPIO_14 14 +#define RT3883_GPIO_UART1_TXD 15 +#define RT3883_GPIO_UART1_RXD 16 +#define RT3883_GPIO_JTAG_TDO 17 +#define RT3883_GPIO_JTAG_TDI 18 +#define RT3883_GPIO_JTAG_TMS 19 +#define RT3883_GPIO_JTAG_TCLK 20 +#define RT3883_GPIO_JTAG_TRST_N 21 +#define RT3883_GPIO_MDIO_MDC 22 +#define RT3883_GPIO_MDIO_MDIO 23 +#define RT3883_GPIO_LNA_PE_A0 32 +#define RT3883_GPIO_LNA_PE_A1 33 +#define RT3883_GPIO_LNA_PE_A2 34 +#define RT3883_GPIO_LNA_PE_G0 35 +#define RT3883_GPIO_LNA_PE_G1 36 +#define RT3883_GPIO_LNA_PE_G2 37 +#define RT3883_GPIO_PCI_AD0 40 +#define RT3883_GPIO_PCI_AD31 71 +#define RT3883_GPIO_GE2_TXD0 72 +#define RT3883_GPIO_GE2_TXD1 73 +#define RT3883_GPIO_GE2_TXD2 74 +#define RT3883_GPIO_GE2_TXD3 75 +#define RT3883_GPIO_GE2_TXEN 76 +#define RT3883_GPIO_GE2_TXCLK 77 +#define RT3883_GPIO_GE2_RXD0 78 +#define RT3883_GPIO_GE2_RXD1 79 +#define RT3883_GPIO_GE2_RXD2 80 +#define RT3883_GPIO_GE2_RXD3 81 +#define RT3883_GPIO_GE2_RXDV 82 +#define RT3883_GPIO_GE2_RXCLK 83 +#define RT3883_GPIO_GE1_TXD0 84 +#define RT3883_GPIO_GE1_TXD1 85 +#define RT3883_GPIO_GE1_TXD2 86 +#define RT3883_GPIO_GE1_TXD3 87 +#define RT3883_GPIO_GE1_TXEN 88 +#define RT3883_GPIO_GE1_TXCLK 89 +#define RT3883_GPIO_GE1_RXD0 90 +#define RT3883_GPIO_GE1_RXD1 91 +#define RT3883_GPIO_GE1_RXD2 92 +#define RT3883_GPIO_GE1_RXD3 93 +#define RT3883_GPIO_GE1_RXDV 94 +#define RT3883_GPIO_GE1_RXCLK 95 + +#define RT3883_RSTCTRL_PCIE_PCI_PDM BIT(27) +#define RT3883_RSTCTRL_FLASH BIT(26) +#define RT3883_RSTCTRL_UDEV BIT(25) +#define RT3883_RSTCTRL_PCI BIT(24) +#define RT3883_RSTCTRL_PCIE BIT(23) +#define RT3883_RSTCTRL_UHST BIT(22) +#define RT3883_RSTCTRL_FE BIT(21) +#define RT3883_RSTCTRL_WLAN BIT(20) +#define RT3883_RSTCTRL_UART1 BIT(29) +#define RT3883_RSTCTRL_SPI BIT(18) +#define RT3883_RSTCTRL_I2S BIT(17) +#define RT3883_RSTCTRL_I2C BIT(16) +#define RT3883_RSTCTRL_NAND BIT(15) +#define RT3883_RSTCTRL_DMA BIT(14) +#define RT3883_RSTCTRL_PIO BIT(13) +#define RT3883_RSTCTRL_UART BIT(12) +#define RT3883_RSTCTRL_PCM BIT(11) +#define RT3883_RSTCTRL_MC BIT(10) +#define RT3883_RSTCTRL_INTC BIT(9) +#define RT3883_RSTCTRL_TIMER BIT(8) +#define RT3883_RSTCTRL_SYS BIT(0) + +#define RT3883_INTC_INT_SYSCTL BIT(0) +#define RT3883_INTC_INT_TIMER0 BIT(1) +#define RT3883_INTC_INT_TIMER1 BIT(2) +#define RT3883_INTC_INT_IA BIT(3) +#define RT3883_INTC_INT_PCM BIT(4) +#define RT3883_INTC_INT_UART0 BIT(5) +#define RT3883_INTC_INT_PIO BIT(6) +#define RT3883_INTC_INT_DMA BIT(7) +#define RT3883_INTC_INT_NAND BIT(8) +#define RT3883_INTC_INT_PERFC BIT(9) +#define RT3883_INTC_INT_I2S BIT(10) +#define RT3883_INTC_INT_UART1 BIT(12) +#define RT3883_INTC_INT_UHST BIT(18) +#define RT3883_INTC_INT_UDEV BIT(19) + +/* FLASH/SRAM/Codec Controller registers */ +#define RT3883_FSCC_REG_FLASH_CFG0 0x00 +#define RT3883_FSCC_REG_FLASH_CFG1 0x04 +#define RT3883_FSCC_REG_CODEC_CFG0 0x40 +#define RT3883_FSCC_REG_CODEC_CFG1 0x44 + +#define RT3883_FLASH_CFG_WIDTH_SHIFT 26 +#define RT3883_FLASH_CFG_WIDTH_MASK 0x3 +#define RT3883_FLASH_CFG_WIDTH_8BIT 0x0 +#define RT3883_FLASH_CFG_WIDTH_16BIT 0x1 +#define RT3883_FLASH_CFG_WIDTH_32BIT 0x2 + +#define RT3883_SDRAM_BASE 0x00000000 +#define RT3883_MEM_SIZE_MIN 2 +#define RT3883_MEM_SIZE_MAX 256 + +#endif /* _RT3883_REGS_H_ */ diff --git a/trunk/arch/mips/include/asm/mach-ralink/rt3883/cpu-feature-overrides.h b/trunk/arch/mips/include/asm/mach-ralink/rt3883/cpu-feature-overrides.h new file mode 100644 index 000000000000..181fbf4c976f --- /dev/null +++ b/trunk/arch/mips/include/asm/mach-ralink/rt3883/cpu-feature-overrides.h @@ -0,0 +1,55 @@ +/* + * Ralink RT3662/RT3883 specific CPU feature overrides + * + * Copyright (C) 2011-2013 Gabor Juhos + * + * This file was derived from: include/asm-mips/cpu-features.h + * Copyright (C) 2003, 2004 Ralf Baechle + * Copyright (C) 2004 Maciej W. Rozycki + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published + * by the Free Software Foundation. + * + */ +#ifndef _RT3883_CPU_FEATURE_OVERRIDES_H +#define _RT3883_CPU_FEATURE_OVERRIDES_H + +#define cpu_has_tlb 1 +#define cpu_has_4kex 1 +#define cpu_has_3k_cache 0 +#define cpu_has_4k_cache 1 +#define cpu_has_tx39_cache 0 +#define cpu_has_sb1_cache 0 +#define cpu_has_fpu 0 +#define cpu_has_32fpr 0 +#define cpu_has_counter 1 +#define cpu_has_watch 1 +#define cpu_has_divec 1 + +#define cpu_has_prefetch 1 +#define cpu_has_ejtag 1 +#define cpu_has_llsc 1 + +#define cpu_has_mips16 1 +#define cpu_has_mdmx 0 +#define cpu_has_mips3d 0 +#define cpu_has_smartmips 0 + +#define cpu_has_mips32r1 1 +#define cpu_has_mips32r2 1 +#define cpu_has_mips64r1 0 +#define cpu_has_mips64r2 0 + +#define cpu_has_dsp 1 +#define cpu_has_mipsmt 0 + +#define cpu_has_64bits 0 +#define cpu_has_64bit_zero_reg 0 +#define cpu_has_64bit_gp_regs 0 +#define cpu_has_64bit_addresses 0 + +#define cpu_dcache_line_size() 32 +#define cpu_icache_line_size() 32 + +#endif /* _RT3883_CPU_FEATURE_OVERRIDES_H */ diff --git a/trunk/arch/mips/include/asm/mach-sead3/cpu-feature-overrides.h b/trunk/arch/mips/include/asm/mach-sead3/cpu-feature-overrides.h index 193c0912d38e..bfbd7035d4c5 100644 --- a/trunk/arch/mips/include/asm/mach-sead3/cpu-feature-overrides.h +++ b/trunk/arch/mips/include/asm/mach-sead3/cpu-feature-overrides.h @@ -28,7 +28,11 @@ /* #define cpu_has_prefetch ? */ #define cpu_has_mcheck 1 /* #define cpu_has_ejtag ? */ +#ifdef CONFIG_CPU_MICROMIPS +#define cpu_has_llsc 0 +#else #define cpu_has_llsc 1 +#endif /* #define cpu_has_vtag_icache ? */ /* #define cpu_has_dc_aliases ? */ /* #define cpu_has_ic_fills_f_dc ? */ diff --git a/trunk/arch/mips/include/asm/mips-boards/generic.h b/trunk/arch/mips/include/asm/mips-boards/generic.h index 44a09a64160a..bd9746fbe4af 100644 --- a/trunk/arch/mips/include/asm/mips-boards/generic.h +++ b/trunk/arch/mips/include/asm/mips-boards/generic.h @@ -83,4 +83,7 @@ extern void mips_pcibios_init(void); #define mips_pcibios_init() do { } while (0) #endif +extern void mips_scroll_message(void); +extern void mips_display_message(const char *str); + #endif /* __ASM_MIPS_BOARDS_GENERIC_H */ diff --git a/trunk/arch/mips/include/asm/mips-boards/prom.h b/trunk/arch/mips/include/asm/mips-boards/prom.h deleted file mode 100644 index e7aed3e4ff58..000000000000 --- a/trunk/arch/mips/include/asm/mips-boards/prom.h +++ /dev/null @@ -1,47 +0,0 @@ -/* - * Carsten Langgaard, carstenl@mips.com - * Copyright (C) 2000 MIPS Technologies, Inc. All rights reserved. - * - * ######################################################################## - * - * This program is free software; you can distribute it and/or modify it - * under the terms of the GNU General Public License (Version 2) as - * published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * for more details. - * - * You should have received a copy of the GNU General Public License along - * with this program; if not, write to the Free Software Foundation, Inc., - * 59 Temple Place - Suite 330, Boston MA 02111-1307, USA. - * - * ######################################################################## - * - * MIPS boards bootprom interface for the Linux kernel. - * - */ - -#ifndef _MIPS_PROM_H -#define _MIPS_PROM_H - -extern char *prom_getcmdline(void); -extern char *prom_getenv(char *name); -extern void prom_init_cmdline(void); -extern void prom_meminit(void); -extern void prom_fixup_mem_map(unsigned long start_mem, unsigned long end_mem); -extern void mips_display_message(const char *str); -extern void mips_display_word(unsigned int num); -extern void mips_scroll_message(void); -extern int get_ethernet_addr(char *ethernet_addr); - -/* Memory descriptor management. */ -#define PROM_MAX_PMEMBLOCKS 32 -struct prom_pmemblock { - unsigned long base; /* Within KSEG0. */ - unsigned int size; /* In bytes. */ - unsigned int type; /* free or prom memory */ -}; - -#endif /* !(_MIPS_PROM_H) */ diff --git a/trunk/arch/mips/include/asm/mips_machine.h b/trunk/arch/mips/include/asm/mips_machine.h index 363bb352c7f7..9d00aebe9842 100644 --- a/trunk/arch/mips/include/asm/mips_machine.h +++ b/trunk/arch/mips/include/asm/mips_machine.h @@ -42,13 +42,9 @@ extern long __mips_machines_end; #ifdef CONFIG_MIPS_MACHINE int mips_machtype_setup(char *id) __init; void mips_machine_setup(void) __init; -void mips_set_machine_name(const char *name) __init; -char *mips_get_machine_name(void); #else static inline int mips_machtype_setup(char *id) { return 1; } static inline void mips_machine_setup(void) { } -static inline void mips_set_machine_name(const char *name) { } -static inline char *mips_get_machine_name(void) { return NULL; } #endif /* CONFIG_MIPS_MACHINE */ #endif /* __ASM_MIPS_MACHINE_H */ diff --git a/trunk/arch/mips/include/asm/mipsregs.h b/trunk/arch/mips/include/asm/mipsregs.h index 0da44d422f5b..87e6207b05e4 100644 --- a/trunk/arch/mips/include/asm/mipsregs.h +++ b/trunk/arch/mips/include/asm/mipsregs.h @@ -596,6 +596,7 @@ #define MIPS_CONF3_RXI (_ULCAST_(1) << 12) #define MIPS_CONF3_ULRI (_ULCAST_(1) << 13) #define MIPS_CONF3_ISA (_ULCAST_(3) << 14) +#define MIPS_CONF3_ISA_OE (_ULCAST_(3) << 16) #define MIPS_CONF3_VZ (_ULCAST_(1) << 23) #define MIPS_CONF4_MMUSIZEEXT (_ULCAST_(255) << 0) @@ -622,6 +623,24 @@ #ifndef __ASSEMBLY__ +/* + * Macros for handling the ISA mode bit for microMIPS. + */ +#define get_isa16_mode(x) ((x) & 0x1) +#define msk_isa16_mode(x) ((x) & ~0x1) +#define set_isa16_mode(x) do { (x) |= 0x1; } while(0) + +/* + * microMIPS instructions can be 16-bit or 32-bit in length. This + * returns a 1 if the instruction is 16-bit and a 0 if 32-bit. + */ +static inline int mm_insn_16bit(u16 insn) +{ + u16 opcode = (insn >> 10) & 0x7; + + return (opcode >= 1 && opcode <= 3) ? 1 : 0; +} + /* * Functions to access the R10000 performance counters. These are basically * mfc0 and mtc0 instructions from and to coprocessor register with a 5-bit diff --git a/trunk/arch/mips/include/asm/mmu_context.h b/trunk/arch/mips/include/asm/mmu_context.h index e81d719efcd1..820116067c10 100644 --- a/trunk/arch/mips/include/asm/mmu_context.h +++ b/trunk/arch/mips/include/asm/mmu_context.h @@ -26,10 +26,15 @@ #ifdef CONFIG_MIPS_PGD_C0_CONTEXT -#define TLBMISS_HANDLER_SETUP_PGD(pgd) \ - tlbmiss_handler_setup_pgd((unsigned long)(pgd)) - -extern void tlbmiss_handler_setup_pgd(unsigned long pgd); +#define TLBMISS_HANDLER_SETUP_PGD(pgd) \ +do { \ + void (*tlbmiss_handler_setup_pgd)(unsigned long); \ + extern u32 tlbmiss_handler_setup_pgd_array[16]; \ + \ + tlbmiss_handler_setup_pgd = \ + (__typeof__(tlbmiss_handler_setup_pgd)) tlbmiss_handler_setup_pgd_array; \ + tlbmiss_handler_setup_pgd((unsigned long)(pgd)); \ +} while (0) #define TLBMISS_HANDLER_SETUP() \ do { \ @@ -106,15 +111,21 @@ static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk) static inline void get_new_mmu_context(struct mm_struct *mm, unsigned long cpu) { + extern void kvm_local_flush_tlb_all(void); unsigned long asid = asid_cache(cpu); if (! ((asid += ASID_INC) & ASID_MASK) ) { if (cpu_has_vtag_icache) flush_icache_all(); +#ifdef CONFIG_VIRTUALIZATION + kvm_local_flush_tlb_all(); /* start new asid cycle */ +#else local_flush_tlb_all(); /* start new asid cycle */ +#endif if (!asid) /* fix version if needed */ asid = ASID_FIRST_VERSION; } + cpu_context(cpu, mm) = asid_cache(cpu) = asid; } @@ -133,7 +144,7 @@ init_new_context(struct task_struct *tsk, struct mm_struct *mm) { int i; - for_each_online_cpu(i) + for_each_possible_cpu(i) cpu_context(i, mm) = 0; return 0; diff --git a/trunk/arch/mips/include/asm/netlogic/haldefs.h b/trunk/arch/mips/include/asm/netlogic/haldefs.h index 419d8aef8569..79c7cccdc22c 100644 --- a/trunk/arch/mips/include/asm/netlogic/haldefs.h +++ b/trunk/arch/mips/include/asm/netlogic/haldefs.h @@ -35,42 +35,13 @@ #ifndef __NLM_HAL_HALDEFS_H__ #define __NLM_HAL_HALDEFS_H__ +#include /* for local_irq_disable */ + /* * This file contains platform specific memory mapped IO implementation * and will provide a way to read 32/64 bit memory mapped registers in * all ABIs */ -#if !defined(CONFIG_64BIT) && defined(CONFIG_CPU_XLP) -#error "o32 compile not supported on XLP yet" -#endif -/* - * For o32 compilation, we have to disable interrupts and enable KX bit to - * access 64 bit addresses or data. - * - * We need to disable interrupts because we save just the lower 32 bits of - * registers in interrupt handling. So if we get hit by an interrupt while - * using the upper 32 bits of a register, we lose. - */ -static inline uint32_t nlm_save_flags_kx(void) -{ - return change_c0_status(ST0_KX | ST0_IE, ST0_KX); -} - -static inline uint32_t nlm_save_flags_cop2(void) -{ - return change_c0_status(ST0_CU2 | ST0_IE, ST0_CU2); -} - -static inline void nlm_restore_flags(uint32_t sr) -{ - write_c0_status(sr); -} - -/* - * The n64 implementations are simple, the o32 implementations when they - * are added, will have to disable interrupts and enable KX before doing - * 64 bit ops. - */ static inline uint32_t nlm_read_reg(uint64_t base, uint32_t reg) { @@ -87,13 +58,40 @@ nlm_write_reg(uint64_t base, uint32_t reg, uint32_t val) *addr = val; } +/* + * For o32 compilation, we have to disable interrupts to access 64 bit + * registers + * + * We need to disable interrupts because we save just the lower 32 bits of + * registers in interrupt handling. So if we get hit by an interrupt while + * using the upper 32 bits of a register, we lose. + */ + static inline uint64_t nlm_read_reg64(uint64_t base, uint32_t reg) { uint64_t addr = base + (reg >> 1) * sizeof(uint64_t); volatile uint64_t *ptr = (volatile uint64_t *)(long)addr; - - return *ptr; + uint64_t val; + + if (sizeof(unsigned long) == 4) { + unsigned long flags; + + local_irq_save(flags); + __asm__ __volatile__( + ".set push" "\n\t" + ".set mips64" "\n\t" + "ld %L0, %1" "\n\t" + "dsra32 %M0, %L0, 0" "\n\t" + "sll %L0, %L0, 0" "\n\t" + ".set pop" "\n" + : "=r" (val) + : "m" (*ptr)); + local_irq_restore(flags); + } else + val = *ptr; + + return val; } static inline void @@ -102,7 +100,25 @@ nlm_write_reg64(uint64_t base, uint32_t reg, uint64_t val) uint64_t addr = base + (reg >> 1) * sizeof(uint64_t); volatile uint64_t *ptr = (volatile uint64_t *)(long)addr; - *ptr = val; + if (sizeof(unsigned long) == 4) { + unsigned long flags; + uint64_t tmp; + + local_irq_save(flags); + __asm__ __volatile__( + ".set push" "\n\t" + ".set mips64" "\n\t" + "dsll32 %L0, %L0, 0" "\n\t" + "dsrl32 %L0, %L0, 0" "\n\t" + "dsll32 %M0, %M0, 0" "\n\t" + "or %L0, %L0, %M0" "\n\t" + "sd %L0, %2" "\n\t" + ".set pop" "\n" + : "=r" (tmp) + : "0" (val), "m" (*ptr)); + local_irq_restore(flags); + } else + *ptr = val; } /* @@ -143,14 +159,6 @@ nlm_pcicfg_base(uint32_t devoffset) return nlm_io_base + devoffset; } -static inline uint64_t -nlm_xkphys_map_pcibar0(uint64_t pcibase) -{ - uint64_t paddr; - - paddr = nlm_read_reg(pcibase, 0x4) & ~0xfu; - return (uint64_t)0x9000000000000000 | paddr; -} #elif defined(CONFIG_CPU_XLR) static inline uint64_t diff --git a/trunk/arch/mips/include/asm/netlogic/mips-extns.h b/trunk/arch/mips/include/asm/netlogic/mips-extns.h index 8ad2e0f81719..f299d31d7c1a 100644 --- a/trunk/arch/mips/include/asm/netlogic/mips-extns.h +++ b/trunk/arch/mips/include/asm/netlogic/mips-extns.h @@ -38,21 +38,16 @@ /* * XLR and XLP interrupt request and interrupt mask registers */ -#define read_c0_eirr() __read_64bit_c0_register($9, 6) -#define read_c0_eimr() __read_64bit_c0_register($9, 7) -#define write_c0_eirr(val) __write_64bit_c0_register($9, 6, val) - /* - * Writing EIMR in 32 bit is a special case, the lower 8 bit of the - * EIMR is shadowed in the status register, so we cannot save and - * restore status register for split read. + * NOTE: Do not save/restore flags around write_c0_eimr(). + * On non-R2 platforms the flags has part of EIMR that is shadowed in STATUS + * register. Restoring flags will overwrite the lower 8 bits of EIMR. + * + * Call with interrupts disabled. */ #define write_c0_eimr(val) \ do { \ if (sizeof(unsigned long) == 4) { \ - unsigned long __flags; \ - \ - local_irq_save(__flags); \ __asm__ __volatile__( \ ".set\tmips64\n\t" \ "dsll\t%L0, %L0, 32\n\t" \ @@ -62,8 +57,6 @@ do { \ "dmtc0\t%L0, $9, 7\n\t" \ ".set\tmips0" \ : : "r" (val)); \ - __flags = (__flags & 0xffff00ff) | (((val) & 0xff) << 8);\ - local_irq_restore(__flags); \ } else \ __write_64bit_c0_register($9, 7, (val)); \ } while (0) @@ -128,7 +121,7 @@ static inline uint64_t read_c0_eirr_and_eimr(void) uint64_t val; #ifdef CONFIG_64BIT - val = read_c0_eimr() & read_c0_eirr(); + val = __read_64bit_c0_register($9, 6) & __read_64bit_c0_register($9, 7); #else __asm__ __volatile__( ".set push\n\t" @@ -143,7 +136,6 @@ static inline uint64_t read_c0_eirr_and_eimr(void) ".set pop" : "=r" (val)); #endif - return val; } diff --git a/trunk/arch/mips/include/asm/netlogic/xlp-hal/pic.h b/trunk/arch/mips/include/asm/netlogic/xlp-hal/pic.h index 3df53017fe51..a981f4681a15 100644 --- a/trunk/arch/mips/include/asm/netlogic/xlp-hal/pic.h +++ b/trunk/arch/mips/include/asm/netlogic/xlp-hal/pic.h @@ -191,59 +191,6 @@ #define PIC_IRT_PCIE_LINK_2_INDEX 80 #define PIC_IRT_PCIE_LINK_3_INDEX 81 #define PIC_IRT_PCIE_LINK_INDEX(num) ((num) + PIC_IRT_PCIE_LINK_0_INDEX) -/* 78 to 81 */ -#define PIC_NUM_NA_IRTS 32 -/* 82 to 113 */ -#define PIC_IRT_NA_0_INDEX 82 -#define PIC_IRT_NA_INDEX(num) ((num) + PIC_IRT_NA_0_INDEX) -#define PIC_IRT_POE_INDEX 114 - -#define PIC_NUM_USB_IRTS 6 -#define PIC_IRT_USB_0_INDEX 115 -#define PIC_IRT_EHCI_0_INDEX 115 -#define PIC_IRT_OHCI_0_INDEX 116 -#define PIC_IRT_OHCI_1_INDEX 117 -#define PIC_IRT_EHCI_1_INDEX 118 -#define PIC_IRT_OHCI_2_INDEX 119 -#define PIC_IRT_OHCI_3_INDEX 120 -#define PIC_IRT_USB_INDEX(num) ((num) + PIC_IRT_USB_0_INDEX) -/* 115 to 120 */ -#define PIC_IRT_GDX_INDEX 121 -#define PIC_IRT_SEC_INDEX 122 -#define PIC_IRT_RSA_INDEX 123 - -#define PIC_NUM_COMP_IRTS 4 -#define PIC_IRT_COMP_0_INDEX 124 -#define PIC_IRT_COMP_INDEX(num) ((num) + PIC_IRT_COMP_0_INDEX) -/* 124 to 127 */ -#define PIC_IRT_GBU_INDEX 128 -#define PIC_IRT_ICC_0_INDEX 129 /* ICC - Inter Chip Coherency */ -#define PIC_IRT_ICC_1_INDEX 130 -#define PIC_IRT_ICC_2_INDEX 131 -#define PIC_IRT_CAM_INDEX 132 -#define PIC_IRT_UART_0_INDEX 133 -#define PIC_IRT_UART_1_INDEX 134 -#define PIC_IRT_I2C_0_INDEX 135 -#define PIC_IRT_I2C_1_INDEX 136 -#define PIC_IRT_SYS_0_INDEX 137 -#define PIC_IRT_SYS_1_INDEX 138 -#define PIC_IRT_JTAG_INDEX 139 -#define PIC_IRT_PIC_INDEX 140 -#define PIC_IRT_NBU_INDEX 141 -#define PIC_IRT_TCU_INDEX 142 -#define PIC_IRT_GCU_INDEX 143 /* GBC - Global Coherency */ -#define PIC_IRT_DMC_0_INDEX 144 -#define PIC_IRT_DMC_1_INDEX 145 - -#define PIC_NUM_GPIO_IRTS 4 -#define PIC_IRT_GPIO_0_INDEX 146 -#define PIC_IRT_GPIO_INDEX(num) ((num) + PIC_IRT_GPIO_0_INDEX) - -/* 146 to 149 */ -#define PIC_IRT_NOR_INDEX 150 -#define PIC_IRT_NAND_INDEX 151 -#define PIC_IRT_SPI_INDEX 152 -#define PIC_IRT_MMC_INDEX 153 #define PIC_CLOCK_TIMER 7 #define PIC_IRQ_BASE 8 diff --git a/trunk/arch/mips/include/asm/netlogic/xlp-hal/usb.h b/trunk/arch/mips/include/asm/netlogic/xlp-hal/usb.h deleted file mode 100644 index a9cd350dfb6c..000000000000 --- a/trunk/arch/mips/include/asm/netlogic/xlp-hal/usb.h +++ /dev/null @@ -1,64 +0,0 @@ -/* - * Copyright (c) 2003-2012 Broadcom Corporation - * All Rights Reserved - * - * This software is available to you under a choice of one of two - * licenses. You may choose to be licensed under the terms of the GNU - * General Public License (GPL) Version 2, available from the file - * COPYING in the main directory of this source tree, or the Broadcom - * license below: - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * - * THIS SOFTWARE IS PROVIDED BY BROADCOM ``AS IS'' AND ANY EXPRESS OR - * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED - * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL BROADCOM OR CONTRIBUTORS BE LIABLE - * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR - * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, - * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE - * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN - * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -#ifndef __NLM_HAL_USB_H__ -#define __NLM_HAL_USB_H__ - -#define USB_CTL_0 0x01 -#define USB_PHY_0 0x0A -#define USB_PHY_RESET 0x01 -#define USB_PHY_PORT_RESET_0 0x10 -#define USB_PHY_PORT_RESET_1 0x20 -#define USB_CONTROLLER_RESET 0x01 -#define USB_INT_STATUS 0x0E -#define USB_INT_EN 0x0F -#define USB_PHY_INTERRUPT_EN 0x01 -#define USB_OHCI_INTERRUPT_EN 0x02 -#define USB_OHCI_INTERRUPT1_EN 0x04 -#define USB_OHCI_INTERRUPT2_EN 0x08 -#define USB_CTRL_INTERRUPT_EN 0x10 - -#ifndef __ASSEMBLY__ - -#define nlm_read_usb_reg(b, r) nlm_read_reg(b, r) -#define nlm_write_usb_reg(b, r, v) nlm_write_reg(b, r, v) -#define nlm_get_usb_pcibase(node, inst) \ - nlm_pcicfg_base(XLP_IO_USB_OFFSET(node, inst)) -#define nlm_get_usb_hcd_base(node, inst) \ - nlm_xkphys_map_pcibar0(nlm_get_usb_pcibase(node, inst)) -#define nlm_get_usb_regbase(node, inst) \ - (nlm_get_usb_pcibase(node, inst) + XLP_IO_PCI_HDRSZ) - -#endif -#endif /* __NLM_HAL_USB_H__ */ diff --git a/trunk/arch/mips/include/asm/page.h b/trunk/arch/mips/include/asm/page.h index eab99e536b5c..f59552fae917 100644 --- a/trunk/arch/mips/include/asm/page.h +++ b/trunk/arch/mips/include/asm/page.h @@ -46,7 +46,6 @@ #endif /* CONFIG_MIPS_HUGE_TLB_SUPPORT */ #include -#include extern void build_clear_page(void); extern void build_copy_page(void); @@ -151,6 +150,7 @@ typedef struct { unsigned long pgprot; } pgprot_t; ((unsigned long)(x) - PAGE_OFFSET + PHYS_OFFSET) #endif #define __va(x) ((void *)((unsigned long)(x) + PAGE_OFFSET - PHYS_OFFSET)) +#include /* * RELOC_HIDE was originally added by 6007b903dfe5f1d13e0c711ac2894bdd4a61b1ad @@ -171,14 +171,13 @@ typedef struct { unsigned long pgprot; } pgprot_t; #ifdef CONFIG_FLATMEM -#define pfn_valid(pfn) \ -({ \ - unsigned long __pfn = (pfn); \ - /* avoid include hell */ \ - extern unsigned long min_low_pfn; \ - \ - __pfn >= min_low_pfn && __pfn < max_mapnr; \ -}) +static inline int pfn_valid(unsigned long pfn) +{ + /* avoid include hell */ + extern unsigned long max_mapnr; + + return pfn >= ARCH_PFN_OFFSET && pfn < max_mapnr; +} #elif defined(CONFIG_SPARSEMEM) diff --git a/trunk/arch/mips/include/asm/pgtable.h b/trunk/arch/mips/include/asm/pgtable.h index fdc62fb5630d..8b8f6b393363 100644 --- a/trunk/arch/mips/include/asm/pgtable.h +++ b/trunk/arch/mips/include/asm/pgtable.h @@ -8,6 +8,7 @@ #ifndef _ASM_PGTABLE_H #define _ASM_PGTABLE_H +#include #include #ifdef CONFIG_32BIT #include diff --git a/trunk/arch/mips/include/asm/processor.h b/trunk/arch/mips/include/asm/processor.h index 2a5fa7abb346..1470b7b68b0e 100644 --- a/trunk/arch/mips/include/asm/processor.h +++ b/trunk/arch/mips/include/asm/processor.h @@ -28,7 +28,6 @@ /* * System setup and hardware flags.. */ -extern void (*cpu_wait)(void); extern unsigned int vced_count, vcei_count; @@ -44,11 +43,16 @@ extern unsigned int vced_count, vcei_count; #define SPECIAL_PAGES_SIZE PAGE_SIZE #ifdef CONFIG_32BIT +#ifdef CONFIG_KVM_GUEST +/* User space process size is limited to 1GB in KVM Guest Mode */ +#define TASK_SIZE 0x3fff8000UL +#else /* * User space process size: 2GB. This is hardcoded into a few places, * so don't change it unless you know what you are doing. */ #define TASK_SIZE 0x7fff8000UL +#endif #ifdef __KERNEL__ #define STACK_TOP_MAX TASK_SIZE diff --git a/trunk/arch/mips/include/asm/prom.h b/trunk/arch/mips/include/asm/prom.h index 8808bf548b99..1e7e0961064b 100644 --- a/trunk/arch/mips/include/asm/prom.h +++ b/trunk/arch/mips/include/asm/prom.h @@ -48,4 +48,7 @@ extern void __dt_setup_arch(struct boot_param_header *bph); static inline void device_tree_init(void) { } #endif /* CONFIG_OF */ +extern char *mips_get_machine_name(void); +extern void mips_set_machine_name(const char *name); + #endif /* __ASM_PROM_H */ diff --git a/trunk/arch/mips/include/asm/sn/sn_private.h b/trunk/arch/mips/include/asm/sn/sn_private.h index 1a2c3025bf28..fdfae43d8b99 100644 --- a/trunk/arch/mips/include/asm/sn/sn_private.h +++ b/trunk/arch/mips/include/asm/sn/sn_private.h @@ -14,6 +14,6 @@ extern void install_cpu_nmi_handler(int slice); extern void install_ipi(void); extern void setup_replication_mask(void); extern void replicate_kernel_text(void); -extern pfn_t node_getfirstfree(cnodeid_t); +extern unsigned long node_getfirstfree(cnodeid_t); #endif /* __ASM_SN_SN_PRIVATE_H */ diff --git a/trunk/arch/mips/include/asm/sn/types.h b/trunk/arch/mips/include/asm/sn/types.h index c4813d67aec3..6d24d4e8b9ed 100644 --- a/trunk/arch/mips/include/asm/sn/types.h +++ b/trunk/arch/mips/include/asm/sn/types.h @@ -19,7 +19,6 @@ typedef signed char partid_t; /* partition ID type */ typedef signed short moduleid_t; /* user-visible module number type */ typedef signed short cmoduleid_t; /* kernel compact module id type */ typedef unsigned char clusterid_t; /* Clusterid of the cell */ -typedef unsigned long pfn_t; typedef dev_t vertex_hdl_t; /* hardware graph vertex handle */ diff --git a/trunk/arch/mips/include/asm/spinlock.h b/trunk/arch/mips/include/asm/spinlock.h index 5130c88d6420..78d201fb6c87 100644 --- a/trunk/arch/mips/include/asm/spinlock.h +++ b/trunk/arch/mips/include/asm/spinlock.h @@ -71,7 +71,6 @@ static inline void arch_spin_lock(arch_spinlock_t *lock) " nop \n" " srl %[my_ticket], %[ticket], 16 \n" " andi %[ticket], %[ticket], 0xffff \n" - " andi %[my_ticket], %[my_ticket], 0xffff \n" " bne %[ticket], %[my_ticket], 4f \n" " subu %[ticket], %[my_ticket], %[ticket] \n" "2: \n" @@ -105,7 +104,6 @@ static inline void arch_spin_lock(arch_spinlock_t *lock) " beqz %[my_ticket], 1b \n" " srl %[my_ticket], %[ticket], 16 \n" " andi %[ticket], %[ticket], 0xffff \n" - " andi %[my_ticket], %[my_ticket], 0xffff \n" " bne %[ticket], %[my_ticket], 4f \n" " subu %[ticket], %[my_ticket], %[ticket] \n" "2: \n" @@ -153,7 +151,6 @@ static inline unsigned int arch_spin_trylock(arch_spinlock_t *lock) " \n" "1: ll %[ticket], %[ticket_ptr] \n" " srl %[my_ticket], %[ticket], 16 \n" - " andi %[my_ticket], %[my_ticket], 0xffff \n" " andi %[now_serving], %[ticket], 0xffff \n" " bne %[my_ticket], %[now_serving], 3f \n" " addu %[ticket], %[ticket], %[inc] \n" @@ -178,7 +175,6 @@ static inline unsigned int arch_spin_trylock(arch_spinlock_t *lock) " \n" "1: ll %[ticket], %[ticket_ptr] \n" " srl %[my_ticket], %[ticket], 16 \n" - " andi %[my_ticket], %[my_ticket], 0xffff \n" " andi %[now_serving], %[ticket], 0xffff \n" " bne %[my_ticket], %[now_serving], 3f \n" " addu %[ticket], %[ticket], %[inc] \n" @@ -242,25 +238,16 @@ static inline void arch_read_lock(arch_rwlock_t *rw) : "m" (rw->lock) : "memory"); } else { - __asm__ __volatile__( - " .set noreorder # arch_read_lock \n" - "1: ll %1, %2 \n" - " bltz %1, 3f \n" - " addu %1, 1 \n" - "2: sc %1, %0 \n" - " beqz %1, 1b \n" - " nop \n" - " .subsection 2 \n" - "3: ll %1, %2 \n" - " bltz %1, 3b \n" - " addu %1, 1 \n" - " b 2b \n" - " nop \n" - " .previous \n" - " .set reorder \n" - : "=m" (rw->lock), "=&r" (tmp) - : "m" (rw->lock) - : "memory"); + do { + __asm__ __volatile__( + "1: ll %1, %2 # arch_read_lock \n" + " bltz %1, 1b \n" + " addu %1, 1 \n" + "2: sc %1, %0 \n" + : "=m" (rw->lock), "=&r" (tmp) + : "m" (rw->lock) + : "memory"); + } while (unlikely(!tmp)); } smp_llsc_mb(); @@ -285,21 +272,15 @@ static inline void arch_read_unlock(arch_rwlock_t *rw) : "m" (rw->lock) : "memory"); } else { - __asm__ __volatile__( - " .set noreorder # arch_read_unlock \n" - "1: ll %1, %2 \n" - " sub %1, 1 \n" - " sc %1, %0 \n" - " beqz %1, 2f \n" - " nop \n" - " .subsection 2 \n" - "2: b 1b \n" - " nop \n" - " .previous \n" - " .set reorder \n" - : "=m" (rw->lock), "=&r" (tmp) - : "m" (rw->lock) - : "memory"); + do { + __asm__ __volatile__( + "1: ll %1, %2 # arch_read_unlock \n" + " sub %1, 1 \n" + " sc %1, %0 \n" + : "=m" (rw->lock), "=&r" (tmp) + : "m" (rw->lock) + : "memory"); + } while (unlikely(!tmp)); } } @@ -321,25 +302,16 @@ static inline void arch_write_lock(arch_rwlock_t *rw) : "m" (rw->lock) : "memory"); } else { - __asm__ __volatile__( - " .set noreorder # arch_write_lock \n" - "1: ll %1, %2 \n" - " bnez %1, 3f \n" - " lui %1, 0x8000 \n" - "2: sc %1, %0 \n" - " beqz %1, 3f \n" - " nop \n" - " .subsection 2 \n" - "3: ll %1, %2 \n" - " bnez %1, 3b \n" - " lui %1, 0x8000 \n" - " b 2b \n" - " nop \n" - " .previous \n" - " .set reorder \n" - : "=m" (rw->lock), "=&r" (tmp) - : "m" (rw->lock) - : "memory"); + do { + __asm__ __volatile__( + "1: ll %1, %2 # arch_write_lock \n" + " bnez %1, 1b \n" + " lui %1, 0x8000 \n" + "2: sc %1, %0 \n" + : "=m" (rw->lock), "=&r" (tmp) + : "m" (rw->lock) + : "memory"); + } while (unlikely(!tmp)); } smp_llsc_mb(); @@ -424,25 +396,21 @@ static inline int arch_write_trylock(arch_rwlock_t *rw) : "m" (rw->lock) : "memory"); } else { - __asm__ __volatile__( - " .set noreorder # arch_write_trylock \n" - " li %2, 0 \n" - "1: ll %1, %3 \n" - " bnez %1, 2f \n" - " lui %1, 0x8000 \n" - " sc %1, %0 \n" - " beqz %1, 3f \n" - " li %2, 1 \n" - "2: \n" - __WEAK_LLSC_MB - " .subsection 2 \n" - "3: b 1b \n" - " li %2, 0 \n" - " .previous \n" - " .set reorder \n" - : "=m" (rw->lock), "=&r" (tmp), "=&r" (ret) - : "m" (rw->lock) - : "memory"); + do { + __asm__ __volatile__( + " ll %1, %3 # arch_write_trylock \n" + " li %2, 0 \n" + " bnez %1, 2f \n" + " lui %1, 0x8000 \n" + " sc %1, %0 \n" + " li %2, 1 \n" + "2: \n" + : "=m" (rw->lock), "=&r" (tmp), "=&r" (ret) + : "m" (rw->lock) + : "memory"); + } while (unlikely(!tmp)); + + smp_llsc_mb(); } return ret; diff --git a/trunk/arch/mips/include/asm/stackframe.h b/trunk/arch/mips/include/asm/stackframe.h index c99384018161..a89d1b10d027 100644 --- a/trunk/arch/mips/include/asm/stackframe.h +++ b/trunk/arch/mips/include/asm/stackframe.h @@ -139,7 +139,7 @@ 1: move ra, k0 li k0, 3 mtc0 k0, $22 -#endif /* CONFIG_CPU_LOONGSON2F */ +#endif /* CONFIG_CPU_JUMP_WORKAROUNDS */ #if defined(CONFIG_32BIT) || defined(KBUILD_64BIT_SYM32) lui k1, %hi(kernelsp) #else @@ -189,6 +189,7 @@ LONG_S $0, PT_R0(sp) mfc0 v1, CP0_STATUS LONG_S $2, PT_R2(sp) + LONG_S v1, PT_STATUS(sp) #ifdef CONFIG_MIPS_MT_SMTC /* * Ideally, these instructions would be shuffled in @@ -200,21 +201,20 @@ LONG_S k0, PT_TCSTATUS(sp) #endif /* CONFIG_MIPS_MT_SMTC */ LONG_S $4, PT_R4(sp) - LONG_S $5, PT_R5(sp) - LONG_S v1, PT_STATUS(sp) mfc0 v1, CP0_CAUSE - LONG_S $6, PT_R6(sp) - LONG_S $7, PT_R7(sp) + LONG_S $5, PT_R5(sp) LONG_S v1, PT_CAUSE(sp) + LONG_S $6, PT_R6(sp) MFC0 v1, CP0_EPC + LONG_S $7, PT_R7(sp) #ifdef CONFIG_64BIT LONG_S $8, PT_R8(sp) LONG_S $9, PT_R9(sp) #endif + LONG_S v1, PT_EPC(sp) LONG_S $25, PT_R25(sp) LONG_S $28, PT_R28(sp) LONG_S $31, PT_R31(sp) - LONG_S v1, PT_EPC(sp) ori $28, sp, _THREAD_MASK xori $28, _THREAD_MASK #ifdef CONFIG_CPU_CAVIUM_OCTEON diff --git a/trunk/arch/mips/include/asm/thread_info.h b/trunk/arch/mips/include/asm/thread_info.h index 178f7924149a..895320e25662 100644 --- a/trunk/arch/mips/include/asm/thread_info.h +++ b/trunk/arch/mips/include/asm/thread_info.h @@ -58,8 +58,12 @@ struct thread_info { #define init_stack (init_thread_union.stack) /* How to get the thread information struct from C. */ -register struct thread_info *__current_thread_info __asm__("$28"); -#define current_thread_info() __current_thread_info +static inline struct thread_info *current_thread_info(void) +{ + register struct thread_info *__current_thread_info __asm__("$28"); + + return __current_thread_info; +} #endif /* !__ASSEMBLY__ */ diff --git a/trunk/arch/mips/include/asm/time.h b/trunk/arch/mips/include/asm/time.h index debc8009bd58..2d7b9df4542d 100644 --- a/trunk/arch/mips/include/asm/time.h +++ b/trunk/arch/mips/include/asm/time.h @@ -52,13 +52,15 @@ extern int (*perf_irq)(void); */ extern unsigned int __weak get_c0_compare_int(void); extern int r4k_clockevent_init(void); +extern int smtc_clockevent_init(void); +extern int gic_clockevent_init(void); static inline int mips_clockevent_init(void) { #ifdef CONFIG_MIPS_MT_SMTC - extern int smtc_clockevent_init(void); - return smtc_clockevent_init(); +#elif defined(CONFIG_CEVT_GIC) + return (gic_clockevent_init() | r4k_clockevent_init()); #elif defined(CONFIG_CEVT_R4K) return r4k_clockevent_init(); #else @@ -69,9 +71,7 @@ static inline int mips_clockevent_init(void) /* * Initialize the count register as a clocksource */ -#ifdef CONFIG_CSRC_R4K extern int init_r4k_clocksource(void); -#endif static inline int init_mips_clocksource(void) { diff --git a/trunk/arch/mips/include/asm/uaccess.h b/trunk/arch/mips/include/asm/uaccess.h index bd87e36bf26a..f3fa3750f577 100644 --- a/trunk/arch/mips/include/asm/uaccess.h +++ b/trunk/arch/mips/include/asm/uaccess.h @@ -23,7 +23,11 @@ */ #ifdef CONFIG_32BIT -#define __UA_LIMIT 0x80000000UL +#ifdef CONFIG_KVM_GUEST +#define __UA_LIMIT 0x40000000UL +#else +#define __UA_LIMIT 0x80000000UL +#endif #define __UA_ADDR ".word" #define __UA_LA "la" @@ -55,8 +59,13 @@ extern u64 __ua_limit; * address in this range it's the process's problem, not ours :-) */ +#ifdef CONFIG_KVM_GUEST +#define KERNEL_DS ((mm_segment_t) { 0x80000000UL }) +#define USER_DS ((mm_segment_t) { 0xC0000000UL }) +#else #define KERNEL_DS ((mm_segment_t) { 0UL }) #define USER_DS ((mm_segment_t) { __UA_LIMIT }) +#endif #define VERIFY_READ 0 #define VERIFY_WRITE 1 @@ -261,6 +270,7 @@ do { \ __asm__ __volatile__( \ "1: " insn " %1, %3 \n" \ "2: \n" \ + " .insn \n" \ " .section .fixup,\"ax\" \n" \ "3: li %0, %4 \n" \ " j 2b \n" \ @@ -287,7 +297,9 @@ do { \ __asm__ __volatile__( \ "1: lw %1, (%3) \n" \ "2: lw %D1, 4(%3) \n" \ - "3: .section .fixup,\"ax\" \n" \ + "3: \n" \ + " .insn \n" \ + " .section .fixup,\"ax\" \n" \ "4: li %0, %4 \n" \ " move %1, $0 \n" \ " move %D1, $0 \n" \ @@ -355,6 +367,7 @@ do { \ __asm__ __volatile__( \ "1: " insn " %z2, %3 # __put_user_asm\n" \ "2: \n" \ + " .insn \n" \ " .section .fixup,\"ax\" \n" \ "3: li %0, %4 \n" \ " j 2b \n" \ @@ -373,6 +386,7 @@ do { \ "1: sw %2, (%3) # __put_user_asm_ll32 \n" \ "2: sw %D2, 4(%3) \n" \ "3: \n" \ + " .insn \n" \ " .section .fixup,\"ax\" \n" \ "4: li %0, %4 \n" \ " j 3b \n" \ @@ -524,6 +538,7 @@ do { \ __asm__ __volatile__( \ "1: " insn " %1, %3 \n" \ "2: \n" \ + " .insn \n" \ " .section .fixup,\"ax\" \n" \ "3: li %0, %4 \n" \ " j 2b \n" \ @@ -549,7 +564,9 @@ do { \ "1: ulw %1, (%3) \n" \ "2: ulw %D1, 4(%3) \n" \ " move %0, $0 \n" \ - "3: .section .fixup,\"ax\" \n" \ + "3: \n" \ + " .insn \n" \ + " .section .fixup,\"ax\" \n" \ "4: li %0, %4 \n" \ " move %1, $0 \n" \ " move %D1, $0 \n" \ @@ -616,6 +633,7 @@ do { \ __asm__ __volatile__( \ "1: " insn " %z2, %3 # __put_user_unaligned_asm\n" \ "2: \n" \ + " .insn \n" \ " .section .fixup,\"ax\" \n" \ "3: li %0, %4 \n" \ " j 2b \n" \ @@ -634,6 +652,7 @@ do { \ "1: sw %2, (%3) # __put_user_unaligned_asm_ll32 \n" \ "2: sw %D2, 4(%3) \n" \ "3: \n" \ + " .insn \n" \ " .section .fixup,\"ax\" \n" \ "4: li %0, %4 \n" \ " j 3b \n" \ diff --git a/trunk/arch/mips/include/asm/uasm.h b/trunk/arch/mips/include/asm/uasm.h index 058e941626a6..370d967725c2 100644 --- a/trunk/arch/mips/include/asm/uasm.h +++ b/trunk/arch/mips/include/asm/uasm.h @@ -6,7 +6,7 @@ * Copyright (C) 2004, 2005, 2006, 2008 Thiemo Seufer * Copyright (C) 2005 Maciej W. Rozycki * Copyright (C) 2006 Ralf Baechle (ralf@linux-mips.org) - * Copyright (C) 2012 MIPS Technologies, Inc. + * Copyright (C) 2012, 2013 MIPS Technologies, Inc. All rights reserved. */ #include @@ -22,44 +22,75 @@ #define UASM_EXPORT_SYMBOL(sym) #endif +#define _UASM_ISA_CLASSIC 0 +#define _UASM_ISA_MICROMIPS 1 + +#ifndef UASM_ISA +#ifdef CONFIG_CPU_MICROMIPS +#define UASM_ISA _UASM_ISA_MICROMIPS +#else +#define UASM_ISA _UASM_ISA_CLASSIC +#endif +#endif + +#if (UASM_ISA == _UASM_ISA_CLASSIC) +#ifdef CONFIG_CPU_MICROMIPS +#define ISAOPC(op) CL_uasm_i##op +#define ISAFUNC(x) CL_##x +#else +#define ISAOPC(op) uasm_i##op +#define ISAFUNC(x) x +#endif +#elif (UASM_ISA == _UASM_ISA_MICROMIPS) +#ifdef CONFIG_CPU_MICROMIPS +#define ISAOPC(op) uasm_i##op +#define ISAFUNC(x) x +#else +#define ISAOPC(op) MM_uasm_i##op +#define ISAFUNC(x) MM_##x +#endif +#else +#error Unsupported micro-assembler ISA!!! +#endif + #define Ip_u1u2u3(op) \ void __uasminit \ -uasm_i##op(u32 **buf, unsigned int a, unsigned int b, unsigned int c) +ISAOPC(op)(u32 **buf, unsigned int a, unsigned int b, unsigned int c) #define Ip_u2u1u3(op) \ void __uasminit \ -uasm_i##op(u32 **buf, unsigned int a, unsigned int b, unsigned int c) +ISAOPC(op)(u32 **buf, unsigned int a, unsigned int b, unsigned int c) #define Ip_u3u1u2(op) \ void __uasminit \ -uasm_i##op(u32 **buf, unsigned int a, unsigned int b, unsigned int c) +ISAOPC(op)(u32 **buf, unsigned int a, unsigned int b, unsigned int c) #define Ip_u1u2s3(op) \ void __uasminit \ -uasm_i##op(u32 **buf, unsigned int a, unsigned int b, signed int c) +ISAOPC(op)(u32 **buf, unsigned int a, unsigned int b, signed int c) #define Ip_u2s3u1(op) \ void __uasminit \ -uasm_i##op(u32 **buf, unsigned int a, signed int b, unsigned int c) +ISAOPC(op)(u32 **buf, unsigned int a, signed int b, unsigned int c) #define Ip_u2u1s3(op) \ void __uasminit \ -uasm_i##op(u32 **buf, unsigned int a, unsigned int b, signed int c) +ISAOPC(op)(u32 **buf, unsigned int a, unsigned int b, signed int c) #define Ip_u2u1msbu3(op) \ void __uasminit \ -uasm_i##op(u32 **buf, unsigned int a, unsigned int b, unsigned int c, \ +ISAOPC(op)(u32 **buf, unsigned int a, unsigned int b, unsigned int c, \ unsigned int d) #define Ip_u1u2(op) \ -void __uasminit uasm_i##op(u32 **buf, unsigned int a, unsigned int b) +void __uasminit ISAOPC(op)(u32 **buf, unsigned int a, unsigned int b) #define Ip_u1s2(op) \ -void __uasminit uasm_i##op(u32 **buf, unsigned int a, signed int b) +void __uasminit ISAOPC(op)(u32 **buf, unsigned int a, signed int b) -#define Ip_u1(op) void __uasminit uasm_i##op(u32 **buf, unsigned int a) +#define Ip_u1(op) void __uasminit ISAOPC(op)(u32 **buf, unsigned int a) -#define Ip_0(op) void __uasminit uasm_i##op(u32 **buf) +#define Ip_0(op) void __uasminit ISAOPC(op)(u32 **buf) Ip_u2u1s3(_addiu); Ip_u3u1u2(_addu); @@ -132,19 +163,20 @@ struct uasm_label { int lab; }; -void __uasminit uasm_build_label(struct uasm_label **lab, u32 *addr, int lid); +void __uasminit ISAFUNC(uasm_build_label)(struct uasm_label **lab, u32 *addr, + int lid); #ifdef CONFIG_64BIT -int uasm_in_compat_space_p(long addr); +int ISAFUNC(uasm_in_compat_space_p)(long addr); #endif -int uasm_rel_hi(long val); -int uasm_rel_lo(long val); -void UASM_i_LA_mostly(u32 **buf, unsigned int rs, long addr); -void UASM_i_LA(u32 **buf, unsigned int rs, long addr); +int ISAFUNC(uasm_rel_hi)(long val); +int ISAFUNC(uasm_rel_lo)(long val); +void ISAFUNC(UASM_i_LA_mostly)(u32 **buf, unsigned int rs, long addr); +void ISAFUNC(UASM_i_LA)(u32 **buf, unsigned int rs, long addr); #define UASM_L_LA(lb) \ -static inline void __uasminit uasm_l##lb(struct uasm_label **lab, u32 *addr) \ +static inline void __uasminit ISAFUNC(uasm_l##lb)(struct uasm_label **lab, u32 *addr) \ { \ - uasm_build_label(lab, addr, label##lb); \ + ISAFUNC(uasm_build_label)(lab, addr, label##lb); \ } /* convenience macros for instructions */ @@ -196,27 +228,27 @@ static inline void uasm_i_drotr_safe(u32 **p, unsigned int a1, unsigned int a2, unsigned int a3) { if (a3 < 32) - uasm_i_drotr(p, a1, a2, a3); + ISAOPC(_drotr)(p, a1, a2, a3); else - uasm_i_drotr32(p, a1, a2, a3 - 32); + ISAOPC(_drotr32)(p, a1, a2, a3 - 32); } static inline void uasm_i_dsll_safe(u32 **p, unsigned int a1, unsigned int a2, unsigned int a3) { if (a3 < 32) - uasm_i_dsll(p, a1, a2, a3); + ISAOPC(_dsll)(p, a1, a2, a3); else - uasm_i_dsll32(p, a1, a2, a3 - 32); + ISAOPC(_dsll32)(p, a1, a2, a3 - 32); } static inline void uasm_i_dsrl_safe(u32 **p, unsigned int a1, unsigned int a2, unsigned int a3) { if (a3 < 32) - uasm_i_dsrl(p, a1, a2, a3); + ISAOPC(_dsrl)(p, a1, a2, a3); else - uasm_i_dsrl32(p, a1, a2, a3 - 32); + ISAOPC(_dsrl32)(p, a1, a2, a3 - 32); } /* Handle relocations. */ diff --git a/trunk/arch/mips/include/uapi/asm/inst.h b/trunk/arch/mips/include/uapi/asm/inst.h index 4d078815eaa5..0f4aec2ad1e6 100644 --- a/trunk/arch/mips/include/uapi/asm/inst.h +++ b/trunk/arch/mips/include/uapi/asm/inst.h @@ -7,6 +7,7 @@ * * Copyright (C) 1996, 2000 by Ralf Baechle * Copyright (C) 2006 by Thiemo Seufer + * Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved. */ #ifndef _UAPI_ASM_INST_H #define _UAPI_ASM_INST_H @@ -192,6 +193,282 @@ enum lx_func { lbx_op = 0x16, }; +/* + * (microMIPS) Major opcodes. + */ +enum mm_major_op { + mm_pool32a_op, mm_pool16a_op, mm_lbu16_op, mm_move16_op, + mm_addi32_op, mm_lbu32_op, mm_sb32_op, mm_lb32_op, + mm_pool32b_op, mm_pool16b_op, mm_lhu16_op, mm_andi16_op, + mm_addiu32_op, mm_lhu32_op, mm_sh32_op, mm_lh32_op, + mm_pool32i_op, mm_pool16c_op, mm_lwsp16_op, mm_pool16d_op, + mm_ori32_op, mm_pool32f_op, mm_reserved1_op, mm_reserved2_op, + mm_pool32c_op, mm_lwgp16_op, mm_lw16_op, mm_pool16e_op, + mm_xori32_op, mm_jals32_op, mm_addiupc_op, mm_reserved3_op, + mm_reserved4_op, mm_pool16f_op, mm_sb16_op, mm_beqz16_op, + mm_slti32_op, mm_beq32_op, mm_swc132_op, mm_lwc132_op, + mm_reserved5_op, mm_reserved6_op, mm_sh16_op, mm_bnez16_op, + mm_sltiu32_op, mm_bne32_op, mm_sdc132_op, mm_ldc132_op, + mm_reserved7_op, mm_reserved8_op, mm_swsp16_op, mm_b16_op, + mm_andi32_op, mm_j32_op, mm_sd32_op, mm_ld32_op, + mm_reserved11_op, mm_reserved12_op, mm_sw16_op, mm_li16_op, + mm_jalx32_op, mm_jal32_op, mm_sw32_op, mm_lw32_op, +}; + +/* + * (microMIPS) POOL32I minor opcodes. + */ +enum mm_32i_minor_op { + mm_bltz_op, mm_bltzal_op, mm_bgez_op, mm_bgezal_op, + mm_blez_op, mm_bnezc_op, mm_bgtz_op, mm_beqzc_op, + mm_tlti_op, mm_tgei_op, mm_tltiu_op, mm_tgeiu_op, + mm_tnei_op, mm_lui_op, mm_teqi_op, mm_reserved13_op, + mm_synci_op, mm_bltzals_op, mm_reserved14_op, mm_bgezals_op, + mm_bc2f_op, mm_bc2t_op, mm_reserved15_op, mm_reserved16_op, + mm_reserved17_op, mm_reserved18_op, mm_bposge64_op, mm_bposge32_op, + mm_bc1f_op, mm_bc1t_op, mm_reserved19_op, mm_reserved20_op, + mm_bc1any2f_op, mm_bc1any2t_op, mm_bc1any4f_op, mm_bc1any4t_op, +}; + +/* + * (microMIPS) POOL32A minor opcodes. + */ +enum mm_32a_minor_op { + mm_sll32_op = 0x000, + mm_ins_op = 0x00c, + mm_ext_op = 0x02c, + mm_pool32axf_op = 0x03c, + mm_srl32_op = 0x040, + mm_sra_op = 0x080, + mm_rotr_op = 0x0c0, + mm_lwxs_op = 0x118, + mm_addu32_op = 0x150, + mm_subu32_op = 0x1d0, + mm_and_op = 0x250, + mm_or32_op = 0x290, + mm_xor32_op = 0x310, +}; + +/* + * (microMIPS) POOL32B functions. + */ +enum mm_32b_func { + mm_lwc2_func = 0x0, + mm_lwp_func = 0x1, + mm_ldc2_func = 0x2, + mm_ldp_func = 0x4, + mm_lwm32_func = 0x5, + mm_cache_func = 0x6, + mm_ldm_func = 0x7, + mm_swc2_func = 0x8, + mm_swp_func = 0x9, + mm_sdc2_func = 0xa, + mm_sdp_func = 0xc, + mm_swm32_func = 0xd, + mm_sdm_func = 0xf, +}; + +/* + * (microMIPS) POOL32C functions. + */ +enum mm_32c_func { + mm_pref_func = 0x2, + mm_ll_func = 0x3, + mm_swr_func = 0x9, + mm_sc_func = 0xb, + mm_lwu_func = 0xe, +}; + +/* + * (microMIPS) POOL32AXF minor opcodes. + */ +enum mm_32axf_minor_op { + mm_mfc0_op = 0x003, + mm_mtc0_op = 0x00b, + mm_tlbp_op = 0x00d, + mm_jalr_op = 0x03c, + mm_tlbr_op = 0x04d, + mm_jalrhb_op = 0x07c, + mm_tlbwi_op = 0x08d, + mm_tlbwr_op = 0x0cd, + mm_jalrs_op = 0x13c, + mm_jalrshb_op = 0x17c, + mm_syscall_op = 0x22d, + mm_eret_op = 0x3cd, +}; + +/* + * (microMIPS) POOL32F minor opcodes. + */ +enum mm_32f_minor_op { + mm_32f_00_op = 0x00, + mm_32f_01_op = 0x01, + mm_32f_02_op = 0x02, + mm_32f_10_op = 0x08, + mm_32f_11_op = 0x09, + mm_32f_12_op = 0x0a, + mm_32f_20_op = 0x10, + mm_32f_30_op = 0x18, + mm_32f_40_op = 0x20, + mm_32f_41_op = 0x21, + mm_32f_42_op = 0x22, + mm_32f_50_op = 0x28, + mm_32f_51_op = 0x29, + mm_32f_52_op = 0x2a, + mm_32f_60_op = 0x30, + mm_32f_70_op = 0x38, + mm_32f_73_op = 0x3b, + mm_32f_74_op = 0x3c, +}; + +/* + * (microMIPS) POOL32F secondary minor opcodes. + */ +enum mm_32f_10_minor_op { + mm_lwxc1_op = 0x1, + mm_swxc1_op, + mm_ldxc1_op, + mm_sdxc1_op, + mm_luxc1_op, + mm_suxc1_op, +}; + +enum mm_32f_func { + mm_lwxc1_func = 0x048, + mm_swxc1_func = 0x088, + mm_ldxc1_func = 0x0c8, + mm_sdxc1_func = 0x108, +}; + +/* + * (microMIPS) POOL32F secondary minor opcodes. + */ +enum mm_32f_40_minor_op { + mm_fmovf_op, + mm_fmovt_op, +}; + +/* + * (microMIPS) POOL32F secondary minor opcodes. + */ +enum mm_32f_60_minor_op { + mm_fadd_op, + mm_fsub_op, + mm_fmul_op, + mm_fdiv_op, +}; + +/* + * (microMIPS) POOL32F secondary minor opcodes. + */ +enum mm_32f_70_minor_op { + mm_fmovn_op, + mm_fmovz_op, +}; + +/* + * (microMIPS) POOL32FXF secondary minor opcodes for POOL32F. + */ +enum mm_32f_73_minor_op { + mm_fmov0_op = 0x01, + mm_fcvtl_op = 0x04, + mm_movf0_op = 0x05, + mm_frsqrt_op = 0x08, + mm_ffloorl_op = 0x0c, + mm_fabs0_op = 0x0d, + mm_fcvtw_op = 0x24, + mm_movt0_op = 0x25, + mm_fsqrt_op = 0x28, + mm_ffloorw_op = 0x2c, + mm_fneg0_op = 0x2d, + mm_cfc1_op = 0x40, + mm_frecip_op = 0x48, + mm_fceill_op = 0x4c, + mm_fcvtd0_op = 0x4d, + mm_ctc1_op = 0x60, + mm_fceilw_op = 0x6c, + mm_fcvts0_op = 0x6d, + mm_mfc1_op = 0x80, + mm_fmov1_op = 0x81, + mm_movf1_op = 0x85, + mm_ftruncl_op = 0x8c, + mm_fabs1_op = 0x8d, + mm_mtc1_op = 0xa0, + mm_movt1_op = 0xa5, + mm_ftruncw_op = 0xac, + mm_fneg1_op = 0xad, + mm_froundl_op = 0xcc, + mm_fcvtd1_op = 0xcd, + mm_froundw_op = 0xec, + mm_fcvts1_op = 0xed, +}; + +/* + * (microMIPS) POOL16C minor opcodes. + */ +enum mm_16c_minor_op { + mm_lwm16_op = 0x04, + mm_swm16_op = 0x05, + mm_jr16_op = 0x18, + mm_jrc_op = 0x1a, + mm_jalr16_op = 0x1c, + mm_jalrs16_op = 0x1e, +}; + +/* + * (microMIPS) POOL16D minor opcodes. + */ +enum mm_16d_minor_op { + mm_addius5_func, + mm_addiusp_func, +}; + +/* + * (MIPS16e) opcodes. + */ +enum MIPS16e_ops { + MIPS16e_jal_op = 003, + MIPS16e_ld_op = 007, + MIPS16e_i8_op = 014, + MIPS16e_sd_op = 017, + MIPS16e_lb_op = 020, + MIPS16e_lh_op = 021, + MIPS16e_lwsp_op = 022, + MIPS16e_lw_op = 023, + MIPS16e_lbu_op = 024, + MIPS16e_lhu_op = 025, + MIPS16e_lwpc_op = 026, + MIPS16e_lwu_op = 027, + MIPS16e_sb_op = 030, + MIPS16e_sh_op = 031, + MIPS16e_swsp_op = 032, + MIPS16e_sw_op = 033, + MIPS16e_rr_op = 035, + MIPS16e_extend_op = 036, + MIPS16e_i64_op = 037, +}; + +enum MIPS16e_i64_func { + MIPS16e_ldsp_func, + MIPS16e_sdsp_func, + MIPS16e_sdrasp_func, + MIPS16e_dadjsp_func, + MIPS16e_ldpc_func, +}; + +enum MIPS16e_rr_func { + MIPS16e_jr_func, +}; + +enum MIPS6e_i8_func { + MIPS16e_swrasp_func = 02, +}; + +/* + * (microMIPS & MIPS16e) NOP instruction. + */ +#define MM_NOP16 0x0c00 + /* * Damn ... bitfields depend from byteorder :-( */ @@ -311,6 +588,262 @@ struct v_format { /* MDMX vector format */ ;))))))) }; +/* + * microMIPS instruction formats (32-bit length) + * + * NOTE: + * Parenthesis denote whether the format is a microMIPS instruction or + * if it is MIPS32 instruction re-encoded for use in the microMIPS ASE. + */ +struct fb_format { /* FPU branch format (MIPS32) */ + BITFIELD_FIELD(unsigned int opcode : 6, + BITFIELD_FIELD(unsigned int bc : 5, + BITFIELD_FIELD(unsigned int cc : 3, + BITFIELD_FIELD(unsigned int flag : 2, + BITFIELD_FIELD(signed int simmediate : 16, + ;))))) +}; + +struct fp0_format { /* FPU multiply and add format (MIPS32) */ + BITFIELD_FIELD(unsigned int opcode : 6, + BITFIELD_FIELD(unsigned int fmt : 5, + BITFIELD_FIELD(unsigned int ft : 5, + BITFIELD_FIELD(unsigned int fs : 5, + BITFIELD_FIELD(unsigned int fd : 5, + BITFIELD_FIELD(unsigned int func : 6, + ;)))))) +}; + +struct mm_fp0_format { /* FPU multipy and add format (microMIPS) */ + BITFIELD_FIELD(unsigned int opcode : 6, + BITFIELD_FIELD(unsigned int ft : 5, + BITFIELD_FIELD(unsigned int fs : 5, + BITFIELD_FIELD(unsigned int fd : 5, + BITFIELD_FIELD(unsigned int fmt : 3, + BITFIELD_FIELD(unsigned int op : 2, + BITFIELD_FIELD(unsigned int func : 6, + ;))))))) +}; + +struct fp1_format { /* FPU mfc1 and cfc1 format (MIPS32) */ + BITFIELD_FIELD(unsigned int opcode : 6, + BITFIELD_FIELD(unsigned int op : 5, + BITFIELD_FIELD(unsigned int rt : 5, + BITFIELD_FIELD(unsigned int fs : 5, + BITFIELD_FIELD(unsigned int fd : 5, + BITFIELD_FIELD(unsigned int func : 6, + ;)))))) +}; + +struct mm_fp1_format { /* FPU mfc1 and cfc1 format (microMIPS) */ + BITFIELD_FIELD(unsigned int opcode : 6, + BITFIELD_FIELD(unsigned int rt : 5, + BITFIELD_FIELD(unsigned int fs : 5, + BITFIELD_FIELD(unsigned int fmt : 2, + BITFIELD_FIELD(unsigned int op : 8, + BITFIELD_FIELD(unsigned int func : 6, + ;)))))) +}; + +struct mm_fp2_format { /* FPU movt and movf format (microMIPS) */ + BITFIELD_FIELD(unsigned int opcode : 6, + BITFIELD_FIELD(unsigned int fd : 5, + BITFIELD_FIELD(unsigned int fs : 5, + BITFIELD_FIELD(unsigned int cc : 3, + BITFIELD_FIELD(unsigned int zero : 2, + BITFIELD_FIELD(unsigned int fmt : 2, + BITFIELD_FIELD(unsigned int op : 3, + BITFIELD_FIELD(unsigned int func : 6, + ;)))))))) +}; + +struct mm_fp3_format { /* FPU abs and neg format (microMIPS) */ + BITFIELD_FIELD(unsigned int opcode : 6, + BITFIELD_FIELD(unsigned int rt : 5, + BITFIELD_FIELD(unsigned int fs : 5, + BITFIELD_FIELD(unsigned int fmt : 3, + BITFIELD_FIELD(unsigned int op : 7, + BITFIELD_FIELD(unsigned int func : 6, + ;)))))) +}; + +struct mm_fp4_format { /* FPU c.cond format (microMIPS) */ + BITFIELD_FIELD(unsigned int opcode : 6, + BITFIELD_FIELD(unsigned int rt : 5, + BITFIELD_FIELD(unsigned int fs : 5, + BITFIELD_FIELD(unsigned int cc : 3, + BITFIELD_FIELD(unsigned int fmt : 3, + BITFIELD_FIELD(unsigned int cond : 4, + BITFIELD_FIELD(unsigned int func : 6, + ;))))))) +}; + +struct mm_fp5_format { /* FPU lwxc1 and swxc1 format (microMIPS) */ + BITFIELD_FIELD(unsigned int opcode : 6, + BITFIELD_FIELD(unsigned int index : 5, + BITFIELD_FIELD(unsigned int base : 5, + BITFIELD_FIELD(unsigned int fd : 5, + BITFIELD_FIELD(unsigned int op : 5, + BITFIELD_FIELD(unsigned int func : 6, + ;)))))) +}; + +struct fp6_format { /* FPU madd and msub format (MIPS IV) */ + BITFIELD_FIELD(unsigned int opcode : 6, + BITFIELD_FIELD(unsigned int fr : 5, + BITFIELD_FIELD(unsigned int ft : 5, + BITFIELD_FIELD(unsigned int fs : 5, + BITFIELD_FIELD(unsigned int fd : 5, + BITFIELD_FIELD(unsigned int func : 6, + ;)))))) +}; + +struct mm_fp6_format { /* FPU madd and msub format (microMIPS) */ + BITFIELD_FIELD(unsigned int opcode : 6, + BITFIELD_FIELD(unsigned int ft : 5, + BITFIELD_FIELD(unsigned int fs : 5, + BITFIELD_FIELD(unsigned int fd : 5, + BITFIELD_FIELD(unsigned int fr : 5, + BITFIELD_FIELD(unsigned int func : 6, + ;)))))) +}; + +struct mm_i_format { /* Immediate format (microMIPS) */ + BITFIELD_FIELD(unsigned int opcode : 6, + BITFIELD_FIELD(unsigned int rt : 5, + BITFIELD_FIELD(unsigned int rs : 5, + BITFIELD_FIELD(signed int simmediate : 16, + ;)))) +}; + +struct mm_m_format { /* Multi-word load/store format (microMIPS) */ + BITFIELD_FIELD(unsigned int opcode : 6, + BITFIELD_FIELD(unsigned int rd : 5, + BITFIELD_FIELD(unsigned int base : 5, + BITFIELD_FIELD(unsigned int func : 4, + BITFIELD_FIELD(signed int simmediate : 12, + ;))))) +}; + +struct mm_x_format { /* Scaled indexed load format (microMIPS) */ + BITFIELD_FIELD(unsigned int opcode : 6, + BITFIELD_FIELD(unsigned int index : 5, + BITFIELD_FIELD(unsigned int base : 5, + BITFIELD_FIELD(unsigned int rd : 5, + BITFIELD_FIELD(unsigned int func : 11, + ;))))) +}; + +/* + * microMIPS instruction formats (16-bit length) + */ +struct mm_b0_format { /* Unconditional branch format (microMIPS) */ + BITFIELD_FIELD(unsigned int opcode : 6, + BITFIELD_FIELD(signed int simmediate : 10, + BITFIELD_FIELD(unsigned int : 16, /* Ignored */ + ;))) +}; + +struct mm_b1_format { /* Conditional branch format (microMIPS) */ + BITFIELD_FIELD(unsigned int opcode : 6, + BITFIELD_FIELD(unsigned int rs : 3, + BITFIELD_FIELD(signed int simmediate : 7, + BITFIELD_FIELD(unsigned int : 16, /* Ignored */ + ;)))) +}; + +struct mm16_m_format { /* Multi-word load/store format */ + BITFIELD_FIELD(unsigned int opcode : 6, + BITFIELD_FIELD(unsigned int func : 4, + BITFIELD_FIELD(unsigned int rlist : 2, + BITFIELD_FIELD(unsigned int imm : 4, + BITFIELD_FIELD(unsigned int : 16, /* Ignored */ + ;))))) +}; + +struct mm16_rb_format { /* Signed immediate format */ + BITFIELD_FIELD(unsigned int opcode : 6, + BITFIELD_FIELD(unsigned int rt : 3, + BITFIELD_FIELD(unsigned int base : 3, + BITFIELD_FIELD(signed int simmediate : 4, + BITFIELD_FIELD(unsigned int : 16, /* Ignored */ + ;))))) +}; + +struct mm16_r3_format { /* Load from global pointer format */ + BITFIELD_FIELD(unsigned int opcode : 6, + BITFIELD_FIELD(unsigned int rt : 3, + BITFIELD_FIELD(signed int simmediate : 7, + BITFIELD_FIELD(unsigned int : 16, /* Ignored */ + ;)))) +}; + +struct mm16_r5_format { /* Load/store from stack pointer format */ + BITFIELD_FIELD(unsigned int opcode : 6, + BITFIELD_FIELD(unsigned int rt : 5, + BITFIELD_FIELD(signed int simmediate : 5, + BITFIELD_FIELD(unsigned int : 16, /* Ignored */ + ;)))) +}; + +/* + * MIPS16e instruction formats (16-bit length) + */ +struct m16e_rr { + BITFIELD_FIELD(unsigned int opcode : 5, + BITFIELD_FIELD(unsigned int rx : 3, + BITFIELD_FIELD(unsigned int nd : 1, + BITFIELD_FIELD(unsigned int l : 1, + BITFIELD_FIELD(unsigned int ra : 1, + BITFIELD_FIELD(unsigned int func : 5, + ;)))))) +}; + +struct m16e_jal { + BITFIELD_FIELD(unsigned int opcode : 5, + BITFIELD_FIELD(unsigned int x : 1, + BITFIELD_FIELD(unsigned int imm20_16 : 5, + BITFIELD_FIELD(signed int imm25_21 : 5, + ;)))) +}; + +struct m16e_i64 { + BITFIELD_FIELD(unsigned int opcode : 5, + BITFIELD_FIELD(unsigned int func : 3, + BITFIELD_FIELD(unsigned int imm : 8, + ;))) +}; + +struct m16e_ri64 { + BITFIELD_FIELD(unsigned int opcode : 5, + BITFIELD_FIELD(unsigned int func : 3, + BITFIELD_FIELD(unsigned int ry : 3, + BITFIELD_FIELD(unsigned int imm : 5, + ;)))) +}; + +struct m16e_ri { + BITFIELD_FIELD(unsigned int opcode : 5, + BITFIELD_FIELD(unsigned int rx : 3, + BITFIELD_FIELD(unsigned int imm : 8, + ;))) +}; + +struct m16e_rri { + BITFIELD_FIELD(unsigned int opcode : 5, + BITFIELD_FIELD(unsigned int rx : 3, + BITFIELD_FIELD(unsigned int ry : 3, + BITFIELD_FIELD(unsigned int imm : 5, + ;)))) +}; + +struct m16e_i8 { + BITFIELD_FIELD(unsigned int opcode : 5, + BITFIELD_FIELD(unsigned int func : 3, + BITFIELD_FIELD(unsigned int imm : 8, + ;))) +}; + union mips_instruction { unsigned int word; unsigned short halfword[2]; @@ -326,6 +859,37 @@ union mips_instruction { struct b_format b_format; struct ps_format ps_format; struct v_format v_format; + struct fb_format fb_format; + struct fp0_format fp0_format; + struct mm_fp0_format mm_fp0_format; + struct fp1_format fp1_format; + struct mm_fp1_format mm_fp1_format; + struct mm_fp2_format mm_fp2_format; + struct mm_fp3_format mm_fp3_format; + struct mm_fp4_format mm_fp4_format; + struct mm_fp5_format mm_fp5_format; + struct fp6_format fp6_format; + struct mm_fp6_format mm_fp6_format; + struct mm_i_format mm_i_format; + struct mm_m_format mm_m_format; + struct mm_x_format mm_x_format; + struct mm_b0_format mm_b0_format; + struct mm_b1_format mm_b1_format; + struct mm16_m_format mm16_m_format ; + struct mm16_rb_format mm16_rb_format; + struct mm16_r3_format mm16_r3_format; + struct mm16_r5_format mm16_r5_format; +}; + +union mips16e_instruction { + unsigned int full : 16; + struct m16e_rr rr; + struct m16e_jal jal; + struct m16e_i64 i64; + struct m16e_ri64 ri64; + struct m16e_ri ri; + struct m16e_rri rri; + struct m16e_i8 i8; }; #endif /* _UAPI_ASM_INST_H */ diff --git a/trunk/arch/mips/include/uapi/asm/kvm.h b/trunk/arch/mips/include/uapi/asm/kvm.h new file mode 100644 index 000000000000..85789eacbf18 --- /dev/null +++ b/trunk/arch/mips/include/uapi/asm/kvm.h @@ -0,0 +1,55 @@ +/* +* This file is subject to the terms and conditions of the GNU General Public +* License. See the file "COPYING" in the main directory of this archive +* for more details. +* +* Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved. +* Authors: Sanjay Lal +*/ + +#ifndef __LINUX_KVM_MIPS_H +#define __LINUX_KVM_MIPS_H + +#include + +#define __KVM_MIPS + +#define N_MIPS_COPROC_REGS 32 +#define N_MIPS_COPROC_SEL 8 + +/* for KVM_GET_REGS and KVM_SET_REGS */ +struct kvm_regs { + __u32 gprs[32]; + __u32 hi; + __u32 lo; + __u32 pc; + + __u32 cp0reg[N_MIPS_COPROC_REGS][N_MIPS_COPROC_SEL]; +}; + +/* for KVM_GET_SREGS and KVM_SET_SREGS */ +struct kvm_sregs { +}; + +/* for KVM_GET_FPU and KVM_SET_FPU */ +struct kvm_fpu { +}; + +struct kvm_debug_exit_arch { +}; + +/* for KVM_SET_GUEST_DEBUG */ +struct kvm_guest_debug_arch { +}; + +struct kvm_mips_interrupt { + /* in */ + __u32 cpu; + __u32 irq; +}; + +/* definition of registers in kvm_run */ +struct kvm_sync_regs { +}; + +#endif /* __LINUX_KVM_MIPS_H */ diff --git a/trunk/arch/mips/include/uapi/asm/unistd.h b/trunk/arch/mips/include/uapi/asm/unistd.h index 16338b84fa79..1dee279f9665 100644 --- a/trunk/arch/mips/include/uapi/asm/unistd.h +++ b/trunk/arch/mips/include/uapi/asm/unistd.h @@ -694,16 +694,17 @@ #define __NR_process_vm_writev (__NR_Linux + 305) #define __NR_kcmp (__NR_Linux + 306) #define __NR_finit_module (__NR_Linux + 307) +#define __NR_getdents64 (__NR_Linux + 308) /* * Offset of the last Linux 64-bit flavoured syscall */ -#define __NR_Linux_syscalls 307 +#define __NR_Linux_syscalls 308 #endif /* _MIPS_SIM == _MIPS_SIM_ABI64 */ #define __NR_64_Linux 5000 -#define __NR_64_Linux_syscalls 307 +#define __NR_64_Linux_syscalls 308 #if _MIPS_SIM == _MIPS_SIM_NABI32 diff --git a/trunk/arch/mips/kernel/Makefile b/trunk/arch/mips/kernel/Makefile index 520a908d45d6..423d871a946b 100644 --- a/trunk/arch/mips/kernel/Makefile +++ b/trunk/arch/mips/kernel/Makefile @@ -4,8 +4,8 @@ extra-y := head.o vmlinux.lds -obj-y += cpu-probe.o branch.o entry.o genex.o irq.o process.o \ - ptrace.o reset.o setup.o signal.o syscall.o \ +obj-y += cpu-probe.o branch.o entry.o genex.o idle.o irq.o process.o \ + prom.o ptrace.o reset.o setup.o signal.o syscall.o \ time.o topology.o traps.o unaligned.o watch.o vdso.o ifdef CONFIG_FUNCTION_TRACER @@ -19,15 +19,16 @@ obj-$(CONFIG_CEVT_BCM1480) += cevt-bcm1480.o obj-$(CONFIG_CEVT_R4K) += cevt-r4k.o obj-$(CONFIG_MIPS_MT_SMTC) += cevt-smtc.o obj-$(CONFIG_CEVT_DS1287) += cevt-ds1287.o +obj-$(CONFIG_CEVT_GIC) += cevt-gic.o obj-$(CONFIG_CEVT_GT641XX) += cevt-gt641xx.o obj-$(CONFIG_CEVT_SB1250) += cevt-sb1250.o obj-$(CONFIG_CEVT_TXX9) += cevt-txx9.o obj-$(CONFIG_CSRC_BCM1480) += csrc-bcm1480.o +obj-$(CONFIG_CSRC_GIC) += csrc-gic.o obj-$(CONFIG_CSRC_IOASIC) += csrc-ioasic.o obj-$(CONFIG_CSRC_POWERTV) += csrc-powertv.o obj-$(CONFIG_CSRC_R4K) += csrc-r4k.o obj-$(CONFIG_CSRC_SB1250) += csrc-sb1250.o -obj-$(CONFIG_CSRC_GIC) += csrc-gic.o obj-$(CONFIG_SYNC_R4K) += sync-r4k.o obj-$(CONFIG_STACKTRACE) += stacktrace.o @@ -86,8 +87,6 @@ obj-$(CONFIG_EARLY_PRINTK) += early_printk.o obj-$(CONFIG_SPINLOCK_TEST) += spinlock_test.o obj-$(CONFIG_MIPS_MACHINE) += mips_machine.o -obj-$(CONFIG_OF) += prom.o - CFLAGS_cpu-bugs64.o = $(shell if $(CC) $(KBUILD_CFLAGS) -Wa,-mdaddi -c -o /dev/null -x c /dev/null >/dev/null 2>&1; then echo "-DHAVE_AS_SET_DADDI"; fi) obj-$(CONFIG_HAVE_STD_PC_SERIAL_PORT) += 8250-platform.o diff --git a/trunk/arch/mips/kernel/asm-offsets.c b/trunk/arch/mips/kernel/asm-offsets.c index 50285b2c7ffe..0845091ba480 100644 --- a/trunk/arch/mips/kernel/asm-offsets.c +++ b/trunk/arch/mips/kernel/asm-offsets.c @@ -17,6 +17,8 @@ #include #include +#include + void output_ptreg_defines(void) { COMMENT("MIPS pt_regs offsets."); @@ -328,3 +330,67 @@ void output_pbe_defines(void) BLANK(); } #endif + +void output_kvm_defines(void) +{ + COMMENT(" KVM/MIPS Specfic offsets. "); + DEFINE(VCPU_ARCH_SIZE, sizeof(struct kvm_vcpu_arch)); + OFFSET(VCPU_RUN, kvm_vcpu, run); + OFFSET(VCPU_HOST_ARCH, kvm_vcpu, arch); + + OFFSET(VCPU_HOST_EBASE, kvm_vcpu_arch, host_ebase); + OFFSET(VCPU_GUEST_EBASE, kvm_vcpu_arch, guest_ebase); + + OFFSET(VCPU_HOST_STACK, kvm_vcpu_arch, host_stack); + OFFSET(VCPU_HOST_GP, kvm_vcpu_arch, host_gp); + + OFFSET(VCPU_HOST_CP0_BADVADDR, kvm_vcpu_arch, host_cp0_badvaddr); + OFFSET(VCPU_HOST_CP0_CAUSE, kvm_vcpu_arch, host_cp0_cause); + OFFSET(VCPU_HOST_EPC, kvm_vcpu_arch, host_cp0_epc); + OFFSET(VCPU_HOST_ENTRYHI, kvm_vcpu_arch, host_cp0_entryhi); + + OFFSET(VCPU_GUEST_INST, kvm_vcpu_arch, guest_inst); + + OFFSET(VCPU_R0, kvm_vcpu_arch, gprs[0]); + OFFSET(VCPU_R1, kvm_vcpu_arch, gprs[1]); + OFFSET(VCPU_R2, kvm_vcpu_arch, gprs[2]); + OFFSET(VCPU_R3, kvm_vcpu_arch, gprs[3]); + OFFSET(VCPU_R4, kvm_vcpu_arch, gprs[4]); + OFFSET(VCPU_R5, kvm_vcpu_arch, gprs[5]); + OFFSET(VCPU_R6, kvm_vcpu_arch, gprs[6]); + OFFSET(VCPU_R7, kvm_vcpu_arch, gprs[7]); + OFFSET(VCPU_R8, kvm_vcpu_arch, gprs[8]); + OFFSET(VCPU_R9, kvm_vcpu_arch, gprs[9]); + OFFSET(VCPU_R10, kvm_vcpu_arch, gprs[10]); + OFFSET(VCPU_R11, kvm_vcpu_arch, gprs[11]); + OFFSET(VCPU_R12, kvm_vcpu_arch, gprs[12]); + OFFSET(VCPU_R13, kvm_vcpu_arch, gprs[13]); + OFFSET(VCPU_R14, kvm_vcpu_arch, gprs[14]); + OFFSET(VCPU_R15, kvm_vcpu_arch, gprs[15]); + OFFSET(VCPU_R16, kvm_vcpu_arch, gprs[16]); + OFFSET(VCPU_R17, kvm_vcpu_arch, gprs[17]); + OFFSET(VCPU_R18, kvm_vcpu_arch, gprs[18]); + OFFSET(VCPU_R19, kvm_vcpu_arch, gprs[19]); + OFFSET(VCPU_R20, kvm_vcpu_arch, gprs[20]); + OFFSET(VCPU_R21, kvm_vcpu_arch, gprs[21]); + OFFSET(VCPU_R22, kvm_vcpu_arch, gprs[22]); + OFFSET(VCPU_R23, kvm_vcpu_arch, gprs[23]); + OFFSET(VCPU_R24, kvm_vcpu_arch, gprs[24]); + OFFSET(VCPU_R25, kvm_vcpu_arch, gprs[25]); + OFFSET(VCPU_R26, kvm_vcpu_arch, gprs[26]); + OFFSET(VCPU_R27, kvm_vcpu_arch, gprs[27]); + OFFSET(VCPU_R28, kvm_vcpu_arch, gprs[28]); + OFFSET(VCPU_R29, kvm_vcpu_arch, gprs[29]); + OFFSET(VCPU_R30, kvm_vcpu_arch, gprs[30]); + OFFSET(VCPU_R31, kvm_vcpu_arch, gprs[31]); + OFFSET(VCPU_LO, kvm_vcpu_arch, lo); + OFFSET(VCPU_HI, kvm_vcpu_arch, hi); + OFFSET(VCPU_PC, kvm_vcpu_arch, pc); + OFFSET(VCPU_COP0, kvm_vcpu_arch, cop0); + OFFSET(VCPU_GUEST_KERNEL_ASID, kvm_vcpu_arch, guest_kernel_asid); + OFFSET(VCPU_GUEST_USER_ASID, kvm_vcpu_arch, guest_user_asid); + + OFFSET(COP0_TLB_HI, mips_coproc, reg[MIPS_CP0_TLB_HI][0]); + OFFSET(COP0_STATUS, mips_coproc, reg[MIPS_CP0_STATUS][0]); + BLANK(); +} diff --git a/trunk/arch/mips/kernel/binfmt_elfo32.c b/trunk/arch/mips/kernel/binfmt_elfo32.c index 556a4357d7fc..97c5a1668e53 100644 --- a/trunk/arch/mips/kernel/binfmt_elfo32.c +++ b/trunk/arch/mips/kernel/binfmt_elfo32.c @@ -48,7 +48,11 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG]; __res; \ }) +#ifdef CONFIG_KVM_GUEST +#define TASK32_SIZE 0x3fff8000UL +#else #define TASK32_SIZE 0x7fff8000UL +#endif #undef ELF_ET_DYN_BASE #define ELF_ET_DYN_BASE (TASK32_SIZE / 3 * 2) diff --git a/trunk/arch/mips/kernel/branch.c b/trunk/arch/mips/kernel/branch.c index 83ffe950f710..46c2ad0703a0 100644 --- a/trunk/arch/mips/kernel/branch.c +++ b/trunk/arch/mips/kernel/branch.c @@ -14,10 +14,186 @@ #include #include #include +#include #include #include #include +/* + * Calculate and return exception PC in case of branch delay slot + * for microMIPS and MIPS16e. It does not clear the ISA mode bit. + */ +int __isa_exception_epc(struct pt_regs *regs) +{ + unsigned short inst; + long epc = regs->cp0_epc; + + /* Calculate exception PC in branch delay slot. */ + if (__get_user(inst, (u16 __user *) msk_isa16_mode(epc))) { + /* This should never happen because delay slot was checked. */ + force_sig(SIGSEGV, current); + return epc; + } + if (cpu_has_mips16) { + if (((union mips16e_instruction)inst).ri.opcode + == MIPS16e_jal_op) + epc += 4; + else + epc += 2; + } else if (mm_insn_16bit(inst)) + epc += 2; + else + epc += 4; + + return epc; +} + +/* + * Compute return address and emulate branch in microMIPS mode after an + * exception only. It does not handle compact branches/jumps and cannot + * be used in interrupt context. (Compact branches/jumps do not cause + * exceptions.) + */ +int __microMIPS_compute_return_epc(struct pt_regs *regs) +{ + u16 __user *pc16; + u16 halfword; + unsigned int word; + unsigned long contpc; + struct mm_decoded_insn mminsn = { 0 }; + + mminsn.micro_mips_mode = 1; + + /* This load never faults. */ + pc16 = (unsigned short __user *)msk_isa16_mode(regs->cp0_epc); + __get_user(halfword, pc16); + pc16++; + contpc = regs->cp0_epc + 2; + word = ((unsigned int)halfword << 16); + mminsn.pc_inc = 2; + + if (!mm_insn_16bit(halfword)) { + __get_user(halfword, pc16); + pc16++; + contpc = regs->cp0_epc + 4; + mminsn.pc_inc = 4; + word |= halfword; + } + mminsn.insn = word; + + if (get_user(halfword, pc16)) + goto sigsegv; + mminsn.next_pc_inc = 2; + word = ((unsigned int)halfword << 16); + + if (!mm_insn_16bit(halfword)) { + pc16++; + if (get_user(halfword, pc16)) + goto sigsegv; + mminsn.next_pc_inc = 4; + word |= halfword; + } + mminsn.next_insn = word; + + mm_isBranchInstr(regs, mminsn, &contpc); + + regs->cp0_epc = contpc; + + return 0; + +sigsegv: + force_sig(SIGSEGV, current); + return -EFAULT; +} + +/* + * Compute return address and emulate branch in MIPS16e mode after an + * exception only. It does not handle compact branches/jumps and cannot + * be used in interrupt context. (Compact branches/jumps do not cause + * exceptions.) + */ +int __MIPS16e_compute_return_epc(struct pt_regs *regs) +{ + u16 __user *addr; + union mips16e_instruction inst; + u16 inst2; + u32 fullinst; + long epc; + + epc = regs->cp0_epc; + + /* Read the instruction. */ + addr = (u16 __user *)msk_isa16_mode(epc); + if (__get_user(inst.full, addr)) { + force_sig(SIGSEGV, current); + return -EFAULT; + } + + switch (inst.ri.opcode) { + case MIPS16e_extend_op: + regs->cp0_epc += 4; + return 0; + + /* + * JAL and JALX in MIPS16e mode + */ + case MIPS16e_jal_op: + addr += 1; + if (__get_user(inst2, addr)) { + force_sig(SIGSEGV, current); + return -EFAULT; + } + fullinst = ((unsigned)inst.full << 16) | inst2; + regs->regs[31] = epc + 6; + epc += 4; + epc >>= 28; + epc <<= 28; + /* + * JAL:5 X:1 TARGET[20-16]:5 TARGET[25:21]:5 TARGET[15:0]:16 + * + * ......TARGET[15:0].................TARGET[20:16]........... + * ......TARGET[25:21] + */ + epc |= + ((fullinst & 0xffff) << 2) | ((fullinst & 0x3e00000) >> 3) | + ((fullinst & 0x1f0000) << 7); + if (!inst.jal.x) + set_isa16_mode(epc); /* Set ISA mode bit. */ + regs->cp0_epc = epc; + return 0; + + /* + * J(AL)R(C) + */ + case MIPS16e_rr_op: + if (inst.rr.func == MIPS16e_jr_func) { + + if (inst.rr.ra) + regs->cp0_epc = regs->regs[31]; + else + regs->cp0_epc = + regs->regs[reg16to32[inst.rr.rx]]; + + if (inst.rr.l) { + if (inst.rr.nd) + regs->regs[31] = epc + 2; + else + regs->regs[31] = epc + 4; + } + return 0; + } + break; + } + + /* + * All other cases have no branch delay slot and are 16-bits. + * Branches do not cause an exception. + */ + regs->cp0_epc += 2; + + return 0; +} + /** * __compute_return_epc_for_insn - Computes the return address and do emulate * branch simulation, if required. @@ -129,6 +305,8 @@ int __compute_return_epc_for_insn(struct pt_regs *regs, epc <<= 28; epc |= (insn.j_format.target << 2); regs->cp0_epc = epc; + if (insn.i_format.opcode == jalx_op) + set_isa16_mode(regs->cp0_epc); break; /* diff --git a/trunk/arch/mips/kernel/cevt-gic.c b/trunk/arch/mips/kernel/cevt-gic.c new file mode 100644 index 000000000000..730eaf92c018 --- /dev/null +++ b/trunk/arch/mips/kernel/cevt-gic.c @@ -0,0 +1,104 @@ +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (C) 2013 Imagination Technologies Ltd. + */ +#include +#include +#include +#include +#include + +#include +#include +#include + +DEFINE_PER_CPU(struct clock_event_device, gic_clockevent_device); +int gic_timer_irq_installed; + + +static int gic_next_event(unsigned long delta, struct clock_event_device *evt) +{ + u64 cnt; + int res; + + cnt = gic_read_count(); + cnt += (u64)delta; + gic_write_compare(cnt); + res = ((int)(gic_read_count() - cnt) >= 0) ? -ETIME : 0; + return res; +} + +void gic_set_clock_mode(enum clock_event_mode mode, + struct clock_event_device *evt) +{ + /* Nothing to do ... */ +} + +irqreturn_t gic_compare_interrupt(int irq, void *dev_id) +{ + struct clock_event_device *cd; + int cpu = smp_processor_id(); + + gic_write_compare(gic_read_compare()); + cd = &per_cpu(gic_clockevent_device, cpu); + cd->event_handler(cd); + return IRQ_HANDLED; +} + +struct irqaction gic_compare_irqaction = { + .handler = gic_compare_interrupt, + .flags = IRQF_PERCPU | IRQF_TIMER, + .name = "timer", +}; + + +void gic_event_handler(struct clock_event_device *dev) +{ +} + +int __cpuinit gic_clockevent_init(void) +{ + unsigned int cpu = smp_processor_id(); + struct clock_event_device *cd; + unsigned int irq; + + if (!cpu_has_counter || !gic_frequency) + return -ENXIO; + + irq = MIPS_GIC_IRQ_BASE; + + cd = &per_cpu(gic_clockevent_device, cpu); + + cd->name = "MIPS GIC"; + cd->features = CLOCK_EVT_FEAT_ONESHOT; + + clockevent_set_clock(cd, gic_frequency); + + /* Calculate the min / max delta */ + cd->max_delta_ns = clockevent_delta2ns(0x7fffffff, cd); + cd->min_delta_ns = clockevent_delta2ns(0x300, cd); + + cd->rating = 300; + cd->irq = irq; + cd->cpumask = cpumask_of(cpu); + cd->set_next_event = gic_next_event; + cd->set_mode = gic_set_clock_mode; + cd->event_handler = gic_event_handler; + + clockevents_register_device(cd); + + GICWRITE(GIC_REG(VPE_LOCAL, GIC_VPE_COMPARE_MAP), 0x80000002); + GICWRITE(GIC_REG(VPE_LOCAL, GIC_VPE_SMASK), GIC_VPE_SMASK_CMP_MSK); + + if (gic_timer_irq_installed) + return 0; + + gic_timer_irq_installed = 1; + + setup_irq(irq, &gic_compare_irqaction); + irq_set_handler(irq, handle_percpu_irq); + return 0; +} diff --git a/trunk/arch/mips/kernel/cevt-r4k.c b/trunk/arch/mips/kernel/cevt-r4k.c index 07b847d77f5d..02033eaf8825 100644 --- a/trunk/arch/mips/kernel/cevt-r4k.c +++ b/trunk/arch/mips/kernel/cevt-r4k.c @@ -23,7 +23,6 @@ */ #ifndef CONFIG_MIPS_MT_SMTC - static int mips_next_event(unsigned long delta, struct clock_event_device *evt) { @@ -49,7 +48,6 @@ DEFINE_PER_CPU(struct clock_event_device, mips_clockevent_device); int cp0_timer_irq_installed; #ifndef CONFIG_MIPS_MT_SMTC - irqreturn_t c0_compare_interrupt(int irq, void *dev_id) { const int r2 = cpu_has_mips_r2; @@ -74,6 +72,9 @@ irqreturn_t c0_compare_interrupt(int irq, void *dev_id) /* Clear Count/Compare Interrupt */ write_c0_compare(read_c0_compare()); cd = &per_cpu(mips_clockevent_device, cpu); +#ifdef CONFIG_CEVT_GIC + if (!gic_present) +#endif cd->event_handler(cd); } @@ -118,6 +119,10 @@ int c0_compare_int_usable(void) unsigned int delta; unsigned int cnt; +#ifdef CONFIG_KVM_GUEST + return 1; +#endif + /* * IP7 already pending? Try to clear it by acking the timer. */ @@ -166,7 +171,6 @@ int c0_compare_int_usable(void) } #ifndef CONFIG_MIPS_MT_SMTC - int __cpuinit r4k_clockevent_init(void) { unsigned int cpu = smp_processor_id(); @@ -206,6 +210,9 @@ int __cpuinit r4k_clockevent_init(void) cd->set_mode = mips_set_clock_mode; cd->event_handler = mips_event_handler; +#ifdef CONFIG_CEVT_GIC + if (!gic_present) +#endif clockevents_register_device(cd); if (cp0_timer_irq_installed) diff --git a/trunk/arch/mips/kernel/cpu-probe.c b/trunk/arch/mips/kernel/cpu-probe.c index 5fe66a0c3224..c6568bf4b1b0 100644 --- a/trunk/arch/mips/kernel/cpu-probe.c +++ b/trunk/arch/mips/kernel/cpu-probe.c @@ -27,105 +27,6 @@ #include #include -/* - * Not all of the MIPS CPUs have the "wait" instruction available. Moreover, - * the implementation of the "wait" feature differs between CPU families. This - * points to the function that implements CPU specific wait. - * The wait instruction stops the pipeline and reduces the power consumption of - * the CPU very much. - */ -void (*cpu_wait)(void); -EXPORT_SYMBOL(cpu_wait); - -static void r3081_wait(void) -{ - unsigned long cfg = read_c0_conf(); - write_c0_conf(cfg | R30XX_CONF_HALT); -} - -static void r39xx_wait(void) -{ - local_irq_disable(); - if (!need_resched()) - write_c0_conf(read_c0_conf() | TX39_CONF_HALT); - local_irq_enable(); -} - -extern void r4k_wait(void); - -/* - * This variant is preferable as it allows testing need_resched and going to - * sleep depending on the outcome atomically. Unfortunately the "It is - * implementation-dependent whether the pipeline restarts when a non-enabled - * interrupt is requested" restriction in the MIPS32/MIPS64 architecture makes - * using this version a gamble. - */ -void r4k_wait_irqoff(void) -{ - local_irq_disable(); - if (!need_resched()) - __asm__(" .set push \n" - " .set mips3 \n" - " wait \n" - " .set pop \n"); - local_irq_enable(); - __asm__(" .globl __pastwait \n" - "__pastwait: \n"); -} - -/* - * The RM7000 variant has to handle erratum 38. The workaround is to not - * have any pending stores when the WAIT instruction is executed. - */ -static void rm7k_wait_irqoff(void) -{ - local_irq_disable(); - if (!need_resched()) - __asm__( - " .set push \n" - " .set mips3 \n" - " .set noat \n" - " mfc0 $1, $12 \n" - " sync \n" - " mtc0 $1, $12 # stalls until W stage \n" - " wait \n" - " mtc0 $1, $12 # stalls until W stage \n" - " .set pop \n"); - local_irq_enable(); -} - -/* - * The Au1xxx wait is available only if using 32khz counter or - * external timer source, but specifically not CP0 Counter. - * alchemy/common/time.c may override cpu_wait! - */ -static void au1k_wait(void) -{ - __asm__(" .set mips3 \n" - " cache 0x14, 0(%0) \n" - " cache 0x14, 32(%0) \n" - " sync \n" - " nop \n" - " wait \n" - " nop \n" - " nop \n" - " nop \n" - " nop \n" - " .set mips0 \n" - : : "r" (au1k_wait)); -} - -static int __initdata nowait; - -static int __init wait_disable(char *s) -{ - nowait = 1; - - return 1; -} - -__setup("nowait", wait_disable); - static int __cpuinitdata mips_fpu_disabled; static int __init fpu_disable(char *s) @@ -150,105 +51,6 @@ static int __init dsp_disable(char *s) __setup("nodsp", dsp_disable); -void __init check_wait(void) -{ - struct cpuinfo_mips *c = ¤t_cpu_data; - - if (nowait) { - printk("Wait instruction disabled.\n"); - return; - } - - switch (c->cputype) { - case CPU_R3081: - case CPU_R3081E: - cpu_wait = r3081_wait; - break; - case CPU_TX3927: - cpu_wait = r39xx_wait; - break; - case CPU_R4200: -/* case CPU_R4300: */ - case CPU_R4600: - case CPU_R4640: - case CPU_R4650: - case CPU_R4700: - case CPU_R5000: - case CPU_R5500: - case CPU_NEVADA: - case CPU_4KC: - case CPU_4KEC: - case CPU_4KSC: - case CPU_5KC: - case CPU_25KF: - case CPU_PR4450: - case CPU_BMIPS3300: - case CPU_BMIPS4350: - case CPU_BMIPS4380: - case CPU_BMIPS5000: - case CPU_CAVIUM_OCTEON: - case CPU_CAVIUM_OCTEON_PLUS: - case CPU_CAVIUM_OCTEON2: - case CPU_JZRISC: - case CPU_LOONGSON1: - case CPU_XLR: - case CPU_XLP: - cpu_wait = r4k_wait; - break; - - case CPU_RM7000: - cpu_wait = rm7k_wait_irqoff; - break; - - case CPU_M14KC: - case CPU_M14KEC: - case CPU_24K: - case CPU_34K: - case CPU_1004K: - cpu_wait = r4k_wait; - if (read_c0_config7() & MIPS_CONF7_WII) - cpu_wait = r4k_wait_irqoff; - break; - - case CPU_74K: - cpu_wait = r4k_wait; - if ((c->processor_id & 0xff) >= PRID_REV_ENCODE_332(2, 1, 0)) - cpu_wait = r4k_wait_irqoff; - break; - - case CPU_TX49XX: - cpu_wait = r4k_wait_irqoff; - break; - case CPU_ALCHEMY: - cpu_wait = au1k_wait; - break; - case CPU_20KC: - /* - * WAIT on Rev1.0 has E1, E2, E3 and E16. - * WAIT on Rev2.0 and Rev3.0 has E16. - * Rev3.1 WAIT is nop, why bother - */ - if ((c->processor_id & 0xff) <= 0x64) - break; - - /* - * Another rev is incremeting c0_count at a reduced clock - * rate while in WAIT mode. So we basically have the choice - * between using the cp0 timer as clocksource or avoiding - * the WAIT instruction. Until more details are known, - * disable the use of WAIT for 20Kc entirely. - cpu_wait = r4k_wait; - */ - break; - case CPU_RM9000: - if ((c->processor_id & 0x00ff) >= 0x40) - cpu_wait = r4k_wait; - break; - default: - break; - } -} - static inline void check_errata(void) { struct cpuinfo_mips *c = ¤t_cpu_data; @@ -470,6 +272,9 @@ static inline unsigned int decode_config3(struct cpuinfo_mips *c) c->options |= MIPS_CPU_ULRI; if (config3 & MIPS_CONF3_ISA) c->options |= MIPS_CPU_MICROMIPS; +#ifdef CONFIG_CPU_MICROMIPS + write_c0_config3(read_c0_config3() | MIPS_CONF3_ISA_OE); +#endif if (config3 & MIPS_CONF3_VZ) c->ases |= MIPS_ASE_VZ; diff --git a/trunk/arch/mips/kernel/crash_dump.c b/trunk/arch/mips/kernel/crash_dump.c index 35bed0d2342c..3be9e7bb30ff 100644 --- a/trunk/arch/mips/kernel/crash_dump.c +++ b/trunk/arch/mips/kernel/crash_dump.c @@ -2,6 +2,7 @@ #include #include #include +#include static int __init parse_savemaxmem(char *p) { diff --git a/trunk/arch/mips/kernel/csrc-gic.c b/trunk/arch/mips/kernel/csrc-gic.c index 5dca24bce51b..e02620901117 100644 --- a/trunk/arch/mips/kernel/csrc-gic.c +++ b/trunk/arch/mips/kernel/csrc-gic.c @@ -5,23 +5,14 @@ * * Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved. */ -#include #include +#include -#include #include static cycle_t gic_hpt_read(struct clocksource *cs) { - unsigned int hi, hi2, lo; - - do { - GICREAD(GIC_REG(SHARED, GIC_SH_COUNTER_63_32), hi); - GICREAD(GIC_REG(SHARED, GIC_SH_COUNTER_31_00), lo); - GICREAD(GIC_REG(SHARED, GIC_SH_COUNTER_63_32), hi2); - } while (hi2 != hi); - - return (((cycle_t) hi) << 32) + lo; + return gic_read_count(); } static struct clocksource gic_clocksource = { diff --git a/trunk/arch/mips/kernel/genex.S b/trunk/arch/mips/kernel/genex.S index ecb347ce1b3d..31fa856829cb 100644 --- a/trunk/arch/mips/kernel/genex.S +++ b/trunk/arch/mips/kernel/genex.S @@ -5,8 +5,8 @@ * * Copyright (C) 1994 - 2000, 2001, 2003 Ralf Baechle * Copyright (C) 1999, 2000 Silicon Graphics, Inc. - * Copyright (C) 2001 MIPS Technologies, Inc. * Copyright (C) 2002, 2007 Maciej W. Rozycki + * Copyright (C) 2001, 2012 MIPS Technologies, Inc. All rights reserved. */ #include @@ -21,8 +21,10 @@ #include #include +#ifdef CONFIG_MIPS_MT_SMTC #define PANIC_PIC(msg) \ - .set push; \ + .set push; \ + .set nomicromips; \ .set reorder; \ PTR_LA a0,8f; \ .set noat; \ @@ -31,17 +33,10 @@ 9: b 9b; \ .set pop; \ TEXT(msg) +#endif __INIT -NESTED(except_vec0_generic, 0, sp) - PANIC_PIC("Exception vector 0 called") - END(except_vec0_generic) - -NESTED(except_vec1_generic, 0, sp) - PANIC_PIC("Exception vector 1 called") - END(except_vec1_generic) - /* * General exception vector for all other CPUs. * @@ -127,7 +122,7 @@ handle_vcei: __FINIT .align 5 /* 32 byte rollback region */ -LEAF(r4k_wait) +LEAF(__r4k_wait) .set push .set noreorder /* start of rollback region */ @@ -138,20 +133,27 @@ LEAF(r4k_wait) nop nop nop +#ifdef CONFIG_CPU_MICROMIPS + nop + nop + nop + nop +#endif .set mips3 wait /* end of rollback region (the region size must be power of two) */ - .set pop 1: jr ra - END(r4k_wait) + nop + .set pop + END(__r4k_wait) .macro BUILD_ROLLBACK_PROLOGUE handler FEXPORT(rollback_\handler) .set push .set noat MFC0 k0, CP0_EPC - PTR_LA k1, r4k_wait + PTR_LA k1, __r4k_wait ori k0, 0x1f /* 32 byte rollback region */ xori k0, 0x1f bne k0, k1, 9f @@ -201,7 +203,11 @@ NESTED(handle_int, PT_SIZE, sp) LONG_L s0, TI_REGS($28) LONG_S sp, TI_REGS($28) PTR_LA ra, ret_from_irq - j plat_irq_dispatch + PTR_LA v0, plat_irq_dispatch + jr v0 +#ifdef CONFIG_CPU_MICROMIPS + nop +#endif END(handle_int) __INIT @@ -222,11 +228,14 @@ NESTED(except_vec4, 0, sp) /* * EJTAG debug exception handler. * The EJTAG debug exception entry point is 0xbfc00480, which - * normally is in the boot PROM, so the boot PROM must do a + * normally is in the boot PROM, so the boot PROM must do an * unconditional jump to this vector. */ NESTED(except_vec_ejtag_debug, 0, sp) j ejtag_debug_handler +#ifdef CONFIG_CPU_MICROMIPS + nop +#endif END(except_vec_ejtag_debug) __FINIT @@ -251,9 +260,10 @@ NESTED(except_vec_vi, 0, sp) FEXPORT(except_vec_vi_mori) ori a0, $0, 0 #endif /* CONFIG_MIPS_MT_SMTC */ + PTR_LA v1, except_vec_vi_handler FEXPORT(except_vec_vi_lui) lui v0, 0 /* Patched */ - j except_vec_vi_handler + jr v1 FEXPORT(except_vec_vi_ori) ori v0, 0 /* Patched */ .set pop @@ -354,6 +364,9 @@ EXPORT(ejtag_debug_buffer) */ NESTED(except_vec_nmi, 0, sp) j nmi_handler +#ifdef CONFIG_CPU_MICROMIPS + nop +#endif END(except_vec_nmi) __FINIT @@ -500,13 +513,35 @@ NESTED(nmi_handler, PT_SIZE, sp) .set push .set noat .set noreorder - /* 0x7c03e83b: rdhwr v1,$29 */ + /* MIPS32: 0x7c03e83b: rdhwr v1,$29 */ + /* microMIPS: 0x007d6b3c: rdhwr v1,$29 */ MFC0 k1, CP0_EPC - lui k0, 0x7c03 - lw k1, (k1) - ori k0, 0xe83b - .set reorder +#if defined(CONFIG_CPU_MICROMIPS) || defined(CONFIG_CPU_MIPS32_R2) || defined(CONFIG_CPU_MIPS64_R2) + and k0, k1, 1 + beqz k0, 1f + xor k1, k0 + lhu k0, (k1) + lhu k1, 2(k1) + ins k1, k0, 16, 16 + lui k0, 0x007d + b docheck + ori k0, 0x6b3c +1: + lui k0, 0x7c03 + lw k1, (k1) + ori k0, 0xe83b +#else + andi k0, k1, 1 + bnez k0, handle_ri + lui k0, 0x7c03 + lw k1, (k1) + ori k0, 0xe83b +#endif + .set reorder +docheck: bne k0, k1, handle_ri /* if not ours */ + +isrdhwr: /* The insn is rdhwr. No need to check CAUSE.BD here. */ get_saved_sp /* k1 := current_thread_info */ .set noreorder diff --git a/trunk/arch/mips/kernel/idle.c b/trunk/arch/mips/kernel/idle.c new file mode 100644 index 000000000000..3b09b888afa9 --- /dev/null +++ b/trunk/arch/mips/kernel/idle.c @@ -0,0 +1,244 @@ +/* + * MIPS idle loop and WAIT instruction support. + * + * Copyright (C) xxxx the Anonymous + * Copyright (C) 1994 - 2006 Ralf Baechle + * Copyright (C) 2003, 2004 Maciej W. Rozycki + * Copyright (C) 2001, 2004, 2011, 2012 MIPS Technologies, Inc. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include + +/* + * Not all of the MIPS CPUs have the "wait" instruction available. Moreover, + * the implementation of the "wait" feature differs between CPU families. This + * points to the function that implements CPU specific wait. + * The wait instruction stops the pipeline and reduces the power consumption of + * the CPU very much. + */ +void (*cpu_wait)(void); +EXPORT_SYMBOL(cpu_wait); + +static void r3081_wait(void) +{ + unsigned long cfg = read_c0_conf(); + write_c0_conf(cfg | R30XX_CONF_HALT); + local_irq_enable(); +} + +static void r39xx_wait(void) +{ + if (!need_resched()) + write_c0_conf(read_c0_conf() | TX39_CONF_HALT); + local_irq_enable(); +} + +void r4k_wait(void) +{ + local_irq_enable(); + __r4k_wait(); +} + +/* + * This variant is preferable as it allows testing need_resched and going to + * sleep depending on the outcome atomically. Unfortunately the "It is + * implementation-dependent whether the pipeline restarts when a non-enabled + * interrupt is requested" restriction in the MIPS32/MIPS64 architecture makes + * using this version a gamble. + */ +void r4k_wait_irqoff(void) +{ + if (!need_resched()) + __asm__( + " .set push \n" + " .set mips3 \n" + " wait \n" + " .set pop \n"); + local_irq_enable(); + __asm__( + " .globl __pastwait \n" + "__pastwait: \n"); +} + +/* + * The RM7000 variant has to handle erratum 38. The workaround is to not + * have any pending stores when the WAIT instruction is executed. + */ +static void rm7k_wait_irqoff(void) +{ + if (!need_resched()) + __asm__( + " .set push \n" + " .set mips3 \n" + " .set noat \n" + " mfc0 $1, $12 \n" + " sync \n" + " mtc0 $1, $12 # stalls until W stage \n" + " wait \n" + " mtc0 $1, $12 # stalls until W stage \n" + " .set pop \n"); + local_irq_enable(); +} + +/* + * The Au1xxx wait is available only if using 32khz counter or + * external timer source, but specifically not CP0 Counter. + * alchemy/common/time.c may override cpu_wait! + */ +static void au1k_wait(void) +{ + __asm__( + " .set mips3 \n" + " cache 0x14, 0(%0) \n" + " cache 0x14, 32(%0) \n" + " sync \n" + " nop \n" + " wait \n" + " nop \n" + " nop \n" + " nop \n" + " nop \n" + " .set mips0 \n" + : : "r" (au1k_wait)); + local_irq_enable(); +} + +static int __initdata nowait; + +static int __init wait_disable(char *s) +{ + nowait = 1; + + return 1; +} + +__setup("nowait", wait_disable); + +void __init check_wait(void) +{ + struct cpuinfo_mips *c = ¤t_cpu_data; + + if (nowait) { + printk("Wait instruction disabled.\n"); + return; + } + + switch (c->cputype) { + case CPU_R3081: + case CPU_R3081E: + cpu_wait = r3081_wait; + break; + case CPU_TX3927: + cpu_wait = r39xx_wait; + break; + case CPU_R4200: +/* case CPU_R4300: */ + case CPU_R4600: + case CPU_R4640: + case CPU_R4650: + case CPU_R4700: + case CPU_R5000: + case CPU_R5500: + case CPU_NEVADA: + case CPU_4KC: + case CPU_4KEC: + case CPU_4KSC: + case CPU_5KC: + case CPU_25KF: + case CPU_PR4450: + case CPU_BMIPS3300: + case CPU_BMIPS4350: + case CPU_BMIPS4380: + case CPU_BMIPS5000: + case CPU_CAVIUM_OCTEON: + case CPU_CAVIUM_OCTEON_PLUS: + case CPU_CAVIUM_OCTEON2: + case CPU_JZRISC: + case CPU_LOONGSON1: + case CPU_XLR: + case CPU_XLP: + cpu_wait = r4k_wait; + break; + + case CPU_RM7000: + cpu_wait = rm7k_wait_irqoff; + break; + + case CPU_M14KC: + case CPU_M14KEC: + case CPU_24K: + case CPU_34K: + case CPU_1004K: + cpu_wait = r4k_wait; + if (read_c0_config7() & MIPS_CONF7_WII) + cpu_wait = r4k_wait_irqoff; + break; + + case CPU_74K: + cpu_wait = r4k_wait; + if ((c->processor_id & 0xff) >= PRID_REV_ENCODE_332(2, 1, 0)) + cpu_wait = r4k_wait_irqoff; + break; + + case CPU_TX49XX: + cpu_wait = r4k_wait_irqoff; + break; + case CPU_ALCHEMY: + cpu_wait = au1k_wait; + break; + case CPU_20KC: + /* + * WAIT on Rev1.0 has E1, E2, E3 and E16. + * WAIT on Rev2.0 and Rev3.0 has E16. + * Rev3.1 WAIT is nop, why bother + */ + if ((c->processor_id & 0xff) <= 0x64) + break; + + /* + * Another rev is incremeting c0_count at a reduced clock + * rate while in WAIT mode. So we basically have the choice + * between using the cp0 timer as clocksource or avoiding + * the WAIT instruction. Until more details are known, + * disable the use of WAIT for 20Kc entirely. + cpu_wait = r4k_wait; + */ + break; + case CPU_RM9000: + if ((c->processor_id & 0x00ff) >= 0x40) + cpu_wait = r4k_wait; + break; + default: + break; + } +} + +static void smtc_idle_hook(void) +{ +#ifdef CONFIG_MIPS_MT_SMTC + void smtc_idle_loop_hook(void); + + smtc_idle_loop_hook(); +#endif +} + +void arch_cpu_idle(void) +{ + smtc_idle_hook(); + if (cpu_wait) + cpu_wait(); + else + local_irq_enable(); +} diff --git a/trunk/arch/mips/kernel/irq-gic.c b/trunk/arch/mips/kernel/irq-gic.c index 485e6a961b31..c01b307317a9 100644 --- a/trunk/arch/mips/kernel/irq-gic.c +++ b/trunk/arch/mips/kernel/irq-gic.c @@ -10,6 +10,7 @@ #include #include #include +#include #include #include @@ -19,6 +20,8 @@ #include #include +unsigned int gic_frequency; +unsigned int gic_present; unsigned long _gic_base; unsigned int gic_irq_base; unsigned int gic_irq_flags[GIC_NUM_INTRS]; @@ -30,6 +33,39 @@ static struct gic_pcpu_mask pcpu_masks[NR_CPUS]; static struct gic_pending_regs pending_regs[NR_CPUS]; static struct gic_intrmask_regs intrmask_regs[NR_CPUS]; +#if defined(CONFIG_CSRC_GIC) || defined(CONFIG_CEVT_GIC) +cycle_t gic_read_count(void) +{ + unsigned int hi, hi2, lo; + + do { + GICREAD(GIC_REG(SHARED, GIC_SH_COUNTER_63_32), hi); + GICREAD(GIC_REG(SHARED, GIC_SH_COUNTER_31_00), lo); + GICREAD(GIC_REG(SHARED, GIC_SH_COUNTER_63_32), hi2); + } while (hi2 != hi); + + return (((cycle_t) hi) << 32) + lo; +} + +void gic_write_compare(cycle_t cnt) +{ + GICWRITE(GIC_REG(VPE_LOCAL, GIC_VPE_COMPARE_HI), + (int)(cnt >> 32)); + GICWRITE(GIC_REG(VPE_LOCAL, GIC_VPE_COMPARE_LO), + (int)(cnt & 0xffffffff)); +} + +cycle_t gic_read_compare(void) +{ + unsigned int hi, lo; + + GICREAD(GIC_REG(VPE_LOCAL, GIC_VPE_COMPARE_HI), hi); + GICREAD(GIC_REG(VPE_LOCAL, GIC_VPE_COMPARE_LO), lo); + + return (((cycle_t) hi) << 32) + lo; +} +#endif + unsigned int gic_get_timer_pending(void) { unsigned int vpe_pending; @@ -116,6 +152,17 @@ static void __init vpe_local_setup(unsigned int numvpes) } } +unsigned int gic_compare_int(void) +{ + unsigned int pending; + + GICREAD(GIC_REG(VPE_LOCAL, GIC_VPE_PEND), pending); + if (pending & GIC_VPE_PEND_CMP_MSK) + return 1; + else + return 0; +} + unsigned int gic_get_int(void) { unsigned int i; diff --git a/trunk/arch/mips/kernel/kprobes.c b/trunk/arch/mips/kernel/kprobes.c index 12bc4ebdf55b..1f8187ab0997 100644 --- a/trunk/arch/mips/kernel/kprobes.c +++ b/trunk/arch/mips/kernel/kprobes.c @@ -207,7 +207,10 @@ void __kprobes arch_disarm_kprobe(struct kprobe *p) void __kprobes arch_remove_kprobe(struct kprobe *p) { - free_insn_slot(p->ainsn.insn, 0); + if (p->ainsn.insn) { + free_insn_slot(p->ainsn.insn, 0); + p->ainsn.insn = NULL; + } } static void save_previous_kprobe(struct kprobe_ctlblk *kcb) diff --git a/trunk/arch/mips/kernel/linux32.c b/trunk/arch/mips/kernel/linux32.c index d1d576b765f5..0b29646bcee7 100644 --- a/trunk/arch/mips/kernel/linux32.c +++ b/trunk/arch/mips/kernel/linux32.c @@ -165,10 +165,3 @@ asmlinkage long sys32_fallocate(int fd, int mode, unsigned offset_a2, return sys_fallocate(fd, mode, merge_64(offset_a2, offset_a3), merge_64(len_a4, len_a5)); } - -SYSCALL_DEFINE6(32_fanotify_mark, int, fanotify_fd, unsigned int, flags, - u64, a3, u64, a4, int, dfd, const char __user *, pathname) -{ - return sys_fanotify_mark(fanotify_fd, flags, merge_64(a3, a4), - dfd, pathname); -} diff --git a/trunk/arch/mips/kernel/mips_machine.c b/trunk/arch/mips/kernel/mips_machine.c index 411a058d2c53..876097529697 100644 --- a/trunk/arch/mips/kernel/mips_machine.c +++ b/trunk/arch/mips/kernel/mips_machine.c @@ -11,9 +11,9 @@ #include #include +#include static struct mips_machine *mips_machine __initdata; -static char *mips_machine_name = "Unknown"; #define for_each_machine(mach) \ for ((mach) = (struct mips_machine *)&__mips_machines_start; \ @@ -21,25 +21,6 @@ static char *mips_machine_name = "Unknown"; (unsigned long)(mach) < (unsigned long)&__mips_machines_end; \ (mach)++) -__init void mips_set_machine_name(const char *name) -{ - char *p; - - if (name == NULL) - return; - - p = kstrdup(name, GFP_KERNEL); - if (!p) - pr_err("MIPS: no memory for machine_name\n"); - - mips_machine_name = p; -} - -char *mips_get_machine_name(void) -{ - return mips_machine_name; -} - __init int mips_machtype_setup(char *id) { struct mips_machine *mach; @@ -79,7 +60,6 @@ __init void mips_machine_setup(void) return; mips_set_machine_name(mips_machine->mach_name); - pr_info("MIPS: machine is %s\n", mips_machine_name); if (mips_machine->mach_setup) mips_machine->mach_setup(); diff --git a/trunk/arch/mips/kernel/proc.c b/trunk/arch/mips/kernel/proc.c index 7a54f74b7818..acb34373679e 100644 --- a/trunk/arch/mips/kernel/proc.c +++ b/trunk/arch/mips/kernel/proc.c @@ -10,9 +10,10 @@ #include #include #include +#include #include #include -#include +#include unsigned int vced_count, vcei_count; @@ -99,6 +100,10 @@ static int show_cpuinfo(struct seq_file *m, void *v) if (cpu_has_vz) seq_printf(m, "%s", " vz"); seq_printf(m, "\n"); + if (cpu_has_mmips) { + seq_printf(m, "micromips kernel\t: %s\n", + (read_c0_config3() & MIPS_CONF3_ISA_OE) ? "yes" : "no"); + } seq_printf(m, "shadow register sets\t: %d\n", cpu_data[n].srsets); seq_printf(m, "kscratch registers\t: %d\n", diff --git a/trunk/arch/mips/kernel/process.c b/trunk/arch/mips/kernel/process.c index cfc742d75b7f..c6a041d9d05d 100644 --- a/trunk/arch/mips/kernel/process.c +++ b/trunk/arch/mips/kernel/process.c @@ -7,6 +7,7 @@ * Copyright (C) 2005, 2006 by Ralf Baechle (ralf@linux-mips.org) * Copyright (C) 1999, 2000 Silicon Graphics, Inc. * Copyright (C) 2004 Thiemo Seufer + * Copyright (C) 2013 Imagination Technologies Ltd. */ #include #include @@ -50,19 +51,6 @@ void arch_cpu_idle_dead(void) } #endif -void arch_cpu_idle(void) -{ -#ifdef CONFIG_MIPS_MT_SMTC - extern void smtc_idle_loop_hook(void); - - smtc_idle_loop_hook(); -#endif - if (cpu_wait) - (*cpu_wait)(); - else - local_irq_enable(); -} - asmlinkage void ret_from_fork(void); asmlinkage void ret_from_kernel_thread(void); @@ -223,36 +211,122 @@ struct mips_frame_info { int pc_offset; }; +#define J_TARGET(pc,target) \ + (((unsigned long)(pc) & 0xf0000000) | ((target) << 2)) + static inline int is_ra_save_ins(union mips_instruction *ip) { +#ifdef CONFIG_CPU_MICROMIPS + union mips_instruction mmi; + + /* + * swsp ra,offset + * swm16 reglist,offset(sp) + * swm32 reglist,offset(sp) + * sw32 ra,offset(sp) + * jradiussp - NOT SUPPORTED + * + * microMIPS is way more fun... + */ + if (mm_insn_16bit(ip->halfword[0])) { + mmi.word = (ip->halfword[0] << 16); + return ((mmi.mm16_r5_format.opcode == mm_swsp16_op && + mmi.mm16_r5_format.rt == 31) || + (mmi.mm16_m_format.opcode == mm_pool16c_op && + mmi.mm16_m_format.func == mm_swm16_op)); + } + else { + mmi.halfword[0] = ip->halfword[1]; + mmi.halfword[1] = ip->halfword[0]; + return ((mmi.mm_m_format.opcode == mm_pool32b_op && + mmi.mm_m_format.rd > 9 && + mmi.mm_m_format.base == 29 && + mmi.mm_m_format.func == mm_swm32_func) || + (mmi.i_format.opcode == mm_sw32_op && + mmi.i_format.rs == 29 && + mmi.i_format.rt == 31)); + } +#else /* sw / sd $ra, offset($sp) */ return (ip->i_format.opcode == sw_op || ip->i_format.opcode == sd_op) && ip->i_format.rs == 29 && ip->i_format.rt == 31; +#endif } -static inline int is_jal_jalr_jr_ins(union mips_instruction *ip) +static inline int is_jump_ins(union mips_instruction *ip) { +#ifdef CONFIG_CPU_MICROMIPS + /* + * jr16,jrc,jalr16,jalr16 + * jal + * jalr/jr,jalr.hb/jr.hb,jalrs,jalrs.hb + * jraddiusp - NOT SUPPORTED + * + * microMIPS is kind of more fun... + */ + union mips_instruction mmi; + + mmi.word = (ip->halfword[0] << 16); + + if ((mmi.mm16_r5_format.opcode == mm_pool16c_op && + (mmi.mm16_r5_format.rt & mm_jr16_op) == mm_jr16_op) || + ip->j_format.opcode == mm_jal32_op) + return 1; + if (ip->r_format.opcode != mm_pool32a_op || + ip->r_format.func != mm_pool32axf_op) + return 0; + return (((ip->u_format.uimmediate >> 6) & mm_jalr_op) == mm_jalr_op); +#else + if (ip->j_format.opcode == j_op) + return 1; if (ip->j_format.opcode == jal_op) return 1; if (ip->r_format.opcode != spec_op) return 0; return ip->r_format.func == jalr_op || ip->r_format.func == jr_op; +#endif } static inline int is_sp_move_ins(union mips_instruction *ip) { +#ifdef CONFIG_CPU_MICROMIPS + /* + * addiusp -imm + * addius5 sp,-imm + * addiu32 sp,sp,-imm + * jradiussp - NOT SUPPORTED + * + * microMIPS is not more fun... + */ + if (mm_insn_16bit(ip->halfword[0])) { + union mips_instruction mmi; + + mmi.word = (ip->halfword[0] << 16); + return ((mmi.mm16_r3_format.opcode == mm_pool16d_op && + mmi.mm16_r3_format.simmediate && mm_addiusp_func) || + (mmi.mm16_r5_format.opcode == mm_pool16d_op && + mmi.mm16_r5_format.rt == 29)); + } + return (ip->mm_i_format.opcode == mm_addiu32_op && + ip->mm_i_format.rt == 29 && ip->mm_i_format.rs == 29); +#else /* addiu/daddiu sp,sp,-imm */ if (ip->i_format.rs != 29 || ip->i_format.rt != 29) return 0; if (ip->i_format.opcode == addiu_op || ip->i_format.opcode == daddiu_op) return 1; +#endif return 0; } static int get_frame_info(struct mips_frame_info *info) { +#ifdef CONFIG_CPU_MICROMIPS + union mips_instruction *ip = (void *) (((char *) info->func) - 1); +#else union mips_instruction *ip = info->func; +#endif unsigned max_insns = info->func_size / sizeof(union mips_instruction); unsigned i; @@ -268,11 +342,30 @@ static int get_frame_info(struct mips_frame_info *info) for (i = 0; i < max_insns; i++, ip++) { - if (is_jal_jalr_jr_ins(ip)) + if (is_jump_ins(ip)) break; if (!info->frame_size) { if (is_sp_move_ins(ip)) + { +#ifdef CONFIG_CPU_MICROMIPS + if (mm_insn_16bit(ip->halfword[0])) + { + unsigned short tmp; + + if (ip->halfword[0] & mm_addiusp_func) + { + tmp = (((ip->halfword[0] >> 1) & 0x1ff) << 2); + info->frame_size = -(signed short)(tmp | ((tmp & 0x100) ? 0xfe00 : 0)); + } else { + tmp = (ip->halfword[0] >> 1); + info->frame_size = -(signed short)(tmp & 0xf); + } + ip = (void *) &ip->halfword[1]; + ip--; + } else +#endif info->frame_size = - ip->i_format.simmediate; + } continue; } if (info->pc_offset == -1 && is_ra_save_ins(ip)) { @@ -292,15 +385,42 @@ static int get_frame_info(struct mips_frame_info *info) static struct mips_frame_info schedule_mfi __read_mostly; +#ifdef CONFIG_KALLSYMS +static unsigned long get___schedule_addr(void) +{ + return kallsyms_lookup_name("__schedule"); +} +#else +static unsigned long get___schedule_addr(void) +{ + union mips_instruction *ip = (void *)schedule; + int max_insns = 8; + int i; + + for (i = 0; i < max_insns; i++, ip++) { + if (ip->j_format.opcode == j_op) + return J_TARGET(ip, ip->j_format.target); + } + return 0; +} +#endif + static int __init frame_info_init(void) { unsigned long size = 0; #ifdef CONFIG_KALLSYMS unsigned long ofs; +#endif + unsigned long addr; + + addr = get___schedule_addr(); + if (!addr) + addr = (unsigned long)schedule; - kallsyms_lookup_size_offset((unsigned long)schedule, &size, &ofs); +#ifdef CONFIG_KALLSYMS + kallsyms_lookup_size_offset(addr, &size, &ofs); #endif - schedule_mfi.func = schedule; + schedule_mfi.func = (void *)addr; schedule_mfi.func_size = size; get_frame_info(&schedule_mfi); diff --git a/trunk/arch/mips/kernel/prom.c b/trunk/arch/mips/kernel/prom.c index 028f6f837ef9..5712bb532245 100644 --- a/trunk/arch/mips/kernel/prom.c +++ b/trunk/arch/mips/kernel/prom.c @@ -23,6 +23,23 @@ #include #include +static char mips_machine_name[64] = "Unknown"; + +__init void mips_set_machine_name(const char *name) +{ + if (name == NULL) + return; + + strncpy(mips_machine_name, name, sizeof(mips_machine_name)); + pr_info("MIPS: machine is %s\n", mips_get_machine_name()); +} + +char *mips_get_machine_name(void) +{ + return mips_machine_name; +} + +#ifdef CONFIG_OF int __init early_init_dt_scan_memory_arch(unsigned long node, const char *uname, int depth, void *data) @@ -50,6 +67,18 @@ void __init early_init_dt_setup_initrd_arch(unsigned long start, } #endif +int __init early_init_dt_scan_model(unsigned long node, const char *uname, + int depth, void *data) +{ + if (!depth) { + char *model = of_get_flat_dt_prop(node, "model", NULL); + + if (model) + mips_set_machine_name(model); + } + return 0; +} + void __init early_init_devtree(void *params) { /* Setup flat device-tree pointer */ @@ -65,6 +94,9 @@ void __init early_init_devtree(void *params) /* Scan memory nodes */ of_scan_flat_dt(early_init_dt_scan_root, NULL); of_scan_flat_dt(early_init_dt_scan_memory_arch, NULL); + + /* try to load the mips machine name */ + of_scan_flat_dt(early_init_dt_scan_model, NULL); } void __init __dt_setup_arch(struct boot_param_header *bph) @@ -79,3 +111,4 @@ void __init __dt_setup_arch(struct boot_param_header *bph) early_init_devtree(initial_boot_params); } +#endif diff --git a/trunk/arch/mips/kernel/scall32-o32.S b/trunk/arch/mips/kernel/scall32-o32.S index 9ea29649fc28..9b36424b03c5 100644 --- a/trunk/arch/mips/kernel/scall32-o32.S +++ b/trunk/arch/mips/kernel/scall32-o32.S @@ -138,9 +138,18 @@ stackargs: 5: jr t1 sw t5, 16(sp) # argument #5 to ksp +#ifdef CONFIG_CPU_MICROMIPS sw t8, 28(sp) # argument #8 to ksp + nop sw t7, 24(sp) # argument #7 to ksp + nop sw t6, 20(sp) # argument #6 to ksp + nop +#else + sw t8, 28(sp) # argument #8 to ksp + sw t7, 24(sp) # argument #7 to ksp + sw t6, 20(sp) # argument #6 to ksp +#endif 6: j stack_done # go back nop .set pop diff --git a/trunk/arch/mips/kernel/scall64-64.S b/trunk/arch/mips/kernel/scall64-64.S index 36cfd4060e1f..97a5909a61cf 100644 --- a/trunk/arch/mips/kernel/scall64-64.S +++ b/trunk/arch/mips/kernel/scall64-64.S @@ -423,4 +423,5 @@ sys_call_table: PTR sys_process_vm_writev /* 5305 */ PTR sys_kcmp PTR sys_finit_module + PTR sys_getdents64 .size sys_call_table,.-sys_call_table diff --git a/trunk/arch/mips/kernel/scall64-o32.S b/trunk/arch/mips/kernel/scall64-o32.S index 103bfe570fe8..74f485d3c0ef 100644 --- a/trunk/arch/mips/kernel/scall64-o32.S +++ b/trunk/arch/mips/kernel/scall64-o32.S @@ -529,7 +529,7 @@ sys_call_table: PTR sys_accept4 PTR compat_sys_recvmmsg /* 4335 */ PTR sys_fanotify_init - PTR sys_32_fanotify_mark + PTR compat_sys_fanotify_mark PTR sys_prlimit64 PTR sys_name_to_handle_at PTR compat_sys_open_by_handle_at /* 4340 */ diff --git a/trunk/arch/mips/kernel/setup.c b/trunk/arch/mips/kernel/setup.c index 4c774d5d5087..c7f90519e58c 100644 --- a/trunk/arch/mips/kernel/setup.c +++ b/trunk/arch/mips/kernel/setup.c @@ -23,6 +23,7 @@ #include #include #include +#include #include #include @@ -77,6 +78,8 @@ EXPORT_SYMBOL(mips_io_port_base); static struct resource code_resource = { .name = "Kernel code", }; static struct resource data_resource = { .name = "Kernel data", }; +static void *detect_magic __initdata = detect_memory_region; + void __init add_memory_region(phys_t start, phys_t size, long type) { int x = boot_mem_map.nr_map; @@ -122,6 +125,25 @@ void __init add_memory_region(phys_t start, phys_t size, long type) boot_mem_map.nr_map++; } +void __init detect_memory_region(phys_t start, phys_t sz_min, phys_t sz_max) +{ + void *dm = &detect_magic; + phys_t size; + + for (size = sz_min; size < sz_max; size <<= 1) { + if (!memcmp(dm, dm + size, sizeof(detect_magic))) + break; + } + + pr_debug("Memory: %lluMB of RAM detected at 0x%llx (min: %lluMB, max: %lluMB)\n", + ((unsigned long long) size) / SZ_1M, + (unsigned long long) start, + ((unsigned long long) sz_min) / SZ_1M, + ((unsigned long long) sz_max) / SZ_1M); + + add_memory_region(start, size, BOOT_MEM_RAM); +} + static void __init print_memory_map(void) { int i; diff --git a/trunk/arch/mips/kernel/signal.c b/trunk/arch/mips/kernel/signal.c index b5e88fd83277..fd3ef2c2afbc 100644 --- a/trunk/arch/mips/kernel/signal.c +++ b/trunk/arch/mips/kernel/signal.c @@ -35,6 +35,7 @@ #include #include #include +#include #include "signal-common.h" @@ -480,7 +481,15 @@ static void handle_signal(unsigned long sig, siginfo_t *info, sigset_t *oldset = sigmask_to_save(); int ret; struct mips_abi *abi = current->thread.abi; +#ifdef CONFIG_CPU_MICROMIPS + void *vdso; + unsigned int tmp = (unsigned int)current->mm->context.vdso; + + set_isa16_mode(tmp); + vdso = (void *)tmp; +#else void *vdso = current->mm->context.vdso; +#endif if (regs->regs[0]) { switch(regs->regs[2]) { diff --git a/trunk/arch/mips/kernel/smp-mt.c b/trunk/arch/mips/kernel/smp-mt.c index bfede063d96a..3e5164c11cac 100644 --- a/trunk/arch/mips/kernel/smp-mt.c +++ b/trunk/arch/mips/kernel/smp-mt.c @@ -34,6 +34,7 @@ #include #include #include +#include static void __init smvp_copy_vpe_config(void) { @@ -151,8 +152,6 @@ static void vsmp_send_ipi_mask(const struct cpumask *mask, unsigned int action) static void __cpuinit vsmp_init_secondary(void) { #ifdef CONFIG_IRQ_GIC - extern int gic_present; - /* This is Malta specific: IPI,performance and timer interrupts */ if (gic_present) change_c0_status(ST0_IM, STATUSF_IP3 | STATUSF_IP4 | diff --git a/trunk/arch/mips/kernel/smp.c b/trunk/arch/mips/kernel/smp.c index aee04af213c5..6e7862ab46cc 100644 --- a/trunk/arch/mips/kernel/smp.c +++ b/trunk/arch/mips/kernel/smp.c @@ -37,6 +37,7 @@ #include #include #include +#include #include #include #include @@ -83,6 +84,7 @@ static inline void set_cpu_sibling_map(int cpu) } struct plat_smp_ops *mp_ops; +EXPORT_SYMBOL(mp_ops); __cpuinit void register_smp_ops(struct plat_smp_ops *ops) { diff --git a/trunk/arch/mips/kernel/smtc-asm.S b/trunk/arch/mips/kernel/smtc-asm.S index 76016ac0a9c8..2866863a39df 100644 --- a/trunk/arch/mips/kernel/smtc-asm.S +++ b/trunk/arch/mips/kernel/smtc-asm.S @@ -49,6 +49,9 @@ CAN WE PROVE THAT WE WON'T DO THIS IF INTS DISABLED?? .text .align 5 FEXPORT(__smtc_ipi_vector) +#ifdef CONFIG_CPU_MICROMIPS + nop +#endif .set noat /* Disable thread scheduling to make Status update atomic */ DMT 27 # dmt k1 diff --git a/trunk/arch/mips/kernel/smtc.c b/trunk/arch/mips/kernel/smtc.c index 7186222dc5bb..75a4fd709841 100644 --- a/trunk/arch/mips/kernel/smtc.c +++ b/trunk/arch/mips/kernel/smtc.c @@ -34,6 +34,7 @@ #include #include #include +#include #include #include #include @@ -858,7 +859,6 @@ void smtc_send_ipi(int cpu, int type, unsigned int action) unsigned long flags; int mtflags; unsigned long tcrestart; - extern void r4k_wait_irqoff(void), __pastwait(void); int set_resched_flag = (type == LINUX_SMP_IPI && action == SMP_RESCHEDULE_YOURSELF); @@ -914,8 +914,7 @@ void smtc_send_ipi(int cpu, int type, unsigned int action) */ if (cpu_wait == r4k_wait_irqoff) { tcrestart = read_tc_c0_tcrestart(); - if (tcrestart >= (unsigned long)r4k_wait_irqoff - && tcrestart < (unsigned long)__pastwait) { + if (address_is_in_r4k_wait_irqoff(tcrestart)) { write_tc_c0_tcrestart(__pastwait); tcstatus &= ~TCSTATUS_IXMT; write_tc_c0_tcstatus(tcstatus); diff --git a/trunk/arch/mips/kernel/traps.c b/trunk/arch/mips/kernel/traps.c index 25225515451f..e3be67012d78 100644 --- a/trunk/arch/mips/kernel/traps.c +++ b/trunk/arch/mips/kernel/traps.c @@ -8,8 +8,8 @@ * Copyright (C) 1998 Ulf Carlsson * Copyright (C) 1999 Silicon Graphics, Inc. * Kevin D. Kissell, kevink@mips.com and Carsten Langgaard, carstenl@mips.com - * Copyright (C) 2000, 01 MIPS Technologies, Inc. * Copyright (C) 2002, 2003, 2004, 2005, 2007 Maciej W. Rozycki + * Copyright (C) 2000, 2001, 2012 MIPS Technologies, Inc. All rights reserved. */ #include #include @@ -41,6 +41,7 @@ #include #include #include +#include #include #include #include @@ -57,12 +58,11 @@ #include extern void check_wait(void); -extern asmlinkage void r4k_wait(void); extern asmlinkage void rollback_handle_int(void); extern asmlinkage void handle_int(void); -extern asmlinkage void handle_tlbm(void); -extern asmlinkage void handle_tlbl(void); -extern asmlinkage void handle_tlbs(void); +extern u32 handle_tlbl[]; +extern u32 handle_tlbs[]; +extern u32 handle_tlbm[]; extern asmlinkage void handle_adel(void); extern asmlinkage void handle_ades(void); extern asmlinkage void handle_ibe(void); @@ -83,10 +83,6 @@ extern asmlinkage void handle_dsp(void); extern asmlinkage void handle_mcheck(void); extern asmlinkage void handle_reserved(void); -extern int fpu_emulator_cop1Handler(struct pt_regs *xcp, - struct mips_fpu_struct *ctx, int has_fpu, - void *__user *fault_addr); - void (*board_be_init)(void); int (*board_be_handler)(struct pt_regs *regs, int is_fixup); void (*board_nmi_handler_setup)(void); @@ -482,6 +478,12 @@ asmlinkage void do_be(struct pt_regs *regs) #define SYNC 0x0000000f #define RDHWR 0x0000003b +/* microMIPS definitions */ +#define MM_POOL32A_FUNC 0xfc00ffff +#define MM_RDHWR 0x00006b3c +#define MM_RS 0x001f0000 +#define MM_RT 0x03e00000 + /* * The ll_bit is cleared by r*_switch.S */ @@ -596,42 +598,62 @@ static int simulate_llsc(struct pt_regs *regs, unsigned int opcode) * Simulate trapping 'rdhwr' instructions to provide user accessible * registers not implemented in hardware. */ -static int simulate_rdhwr(struct pt_regs *regs, unsigned int opcode) +static int simulate_rdhwr(struct pt_regs *regs, int rd, int rt) { struct thread_info *ti = task_thread_info(current); + perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, + 1, regs, 0); + switch (rd) { + case 0: /* CPU number */ + regs->regs[rt] = smp_processor_id(); + return 0; + case 1: /* SYNCI length */ + regs->regs[rt] = min(current_cpu_data.dcache.linesz, + current_cpu_data.icache.linesz); + return 0; + case 2: /* Read count register */ + regs->regs[rt] = read_c0_count(); + return 0; + case 3: /* Count register resolution */ + switch (current_cpu_data.cputype) { + case CPU_20KC: + case CPU_25KF: + regs->regs[rt] = 1; + break; + default: + regs->regs[rt] = 2; + } + return 0; + case 29: + regs->regs[rt] = ti->tp_value; + return 0; + default: + return -1; + } +} + +static int simulate_rdhwr_normal(struct pt_regs *regs, unsigned int opcode) +{ if ((opcode & OPCODE) == SPEC3 && (opcode & FUNC) == RDHWR) { int rd = (opcode & RD) >> 11; int rt = (opcode & RT) >> 16; - perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, - 1, regs, 0); - switch (rd) { - case 0: /* CPU number */ - regs->regs[rt] = smp_processor_id(); - return 0; - case 1: /* SYNCI length */ - regs->regs[rt] = min(current_cpu_data.dcache.linesz, - current_cpu_data.icache.linesz); - return 0; - case 2: /* Read count register */ - regs->regs[rt] = read_c0_count(); - return 0; - case 3: /* Count register resolution */ - switch (current_cpu_data.cputype) { - case CPU_20KC: - case CPU_25KF: - regs->regs[rt] = 1; - break; - default: - regs->regs[rt] = 2; - } - return 0; - case 29: - regs->regs[rt] = ti->tp_value; - return 0; - default: - return -1; - } + + simulate_rdhwr(regs, rd, rt); + return 0; + } + + /* Not ours. */ + return -1; +} + +static int simulate_rdhwr_mm(struct pt_regs *regs, unsigned short opcode) +{ + if ((opcode & MM_POOL32A_FUNC) == MM_RDHWR) { + int rd = (opcode & MM_RS) >> 16; + int rt = (opcode & MM_RT) >> 21; + simulate_rdhwr(regs, rd, rt); + return 0; } /* Not ours. */ @@ -662,7 +684,7 @@ asmlinkage void do_ov(struct pt_regs *regs) force_sig_info(SIGFPE, &info, current); } -static int process_fpemu_return(int sig, void __user *fault_addr) +int process_fpemu_return(int sig, void __user *fault_addr) { if (sig == SIGSEGV || sig == SIGBUS) { struct siginfo si = {0}; @@ -813,9 +835,29 @@ static void do_trap_or_bp(struct pt_regs *regs, unsigned int code, asmlinkage void do_bp(struct pt_regs *regs) { unsigned int opcode, bcode; - - if (__get_user(opcode, (unsigned int __user *) exception_epc(regs))) - goto out_sigsegv; + unsigned long epc; + u16 instr[2]; + + if (get_isa16_mode(regs->cp0_epc)) { + /* Calculate EPC. */ + epc = exception_epc(regs); + if (cpu_has_mmips) { + if ((__get_user(instr[0], (u16 __user *)msk_isa16_mode(epc)) || + (__get_user(instr[1], (u16 __user *)msk_isa16_mode(epc + 2))))) + goto out_sigsegv; + opcode = (instr[0] << 16) | instr[1]; + } else { + /* MIPS16e mode */ + if (__get_user(instr[0], (u16 __user *)msk_isa16_mode(epc))) + goto out_sigsegv; + bcode = (instr[0] >> 6) & 0x3f; + do_trap_or_bp(regs, bcode, "Break"); + return; + } + } else { + if (__get_user(opcode, (unsigned int __user *) exception_epc(regs))) + goto out_sigsegv; + } /* * There is the ancient bug in the MIPS assemblers that the break @@ -856,13 +898,22 @@ asmlinkage void do_bp(struct pt_regs *regs) asmlinkage void do_tr(struct pt_regs *regs) { unsigned int opcode, tcode = 0; + u16 instr[2]; + unsigned long epc = exception_epc(regs); - if (__get_user(opcode, (unsigned int __user *) exception_epc(regs))) - goto out_sigsegv; + if ((__get_user(instr[0], (u16 __user *)msk_isa16_mode(epc))) || + (__get_user(instr[1], (u16 __user *)msk_isa16_mode(epc + 2)))) + goto out_sigsegv; + opcode = (instr[0] << 16) | instr[1]; /* Immediate versions don't provide a code. */ - if (!(opcode & OPCODE)) - tcode = ((opcode >> 6) & ((1 << 10) - 1)); + if (!(opcode & OPCODE)) { + if (get_isa16_mode(regs->cp0_epc)) + /* microMIPS */ + tcode = (opcode >> 12) & 0x1f; + else + tcode = ((opcode >> 6) & ((1 << 10) - 1)); + } do_trap_or_bp(regs, tcode, "Trap"); return; @@ -875,6 +926,7 @@ asmlinkage void do_ri(struct pt_regs *regs) { unsigned int __user *epc = (unsigned int __user *)exception_epc(regs); unsigned long old_epc = regs->cp0_epc; + unsigned long old31 = regs->regs[31]; unsigned int opcode = 0; int status = -1; @@ -887,23 +939,37 @@ asmlinkage void do_ri(struct pt_regs *regs) if (unlikely(compute_return_epc(regs) < 0)) return; - if (unlikely(get_user(opcode, epc) < 0)) - status = SIGSEGV; + if (get_isa16_mode(regs->cp0_epc)) { + unsigned short mmop[2] = { 0 }; + + if (unlikely(get_user(mmop[0], epc) < 0)) + status = SIGSEGV; + if (unlikely(get_user(mmop[1], epc) < 0)) + status = SIGSEGV; + opcode = (mmop[0] << 16) | mmop[1]; - if (!cpu_has_llsc && status < 0) - status = simulate_llsc(regs, opcode); + if (status < 0) + status = simulate_rdhwr_mm(regs, opcode); + } else { + if (unlikely(get_user(opcode, epc) < 0)) + status = SIGSEGV; - if (status < 0) - status = simulate_rdhwr(regs, opcode); + if (!cpu_has_llsc && status < 0) + status = simulate_llsc(regs, opcode); - if (status < 0) - status = simulate_sync(regs, opcode); + if (status < 0) + status = simulate_rdhwr_normal(regs, opcode); + + if (status < 0) + status = simulate_sync(regs, opcode); + } if (status < 0) status = SIGILL; if (unlikely(status > 0)) { regs->cp0_epc = old_epc; /* Undo skip-over. */ + regs->regs[31] = old31; force_sig(status, current); } } @@ -973,7 +1039,7 @@ static int default_cu2_call(struct notifier_block *nfb, unsigned long action, asmlinkage void do_cpu(struct pt_regs *regs) { unsigned int __user *epc; - unsigned long old_epc; + unsigned long old_epc, old31; unsigned int opcode; unsigned int cpid; int status; @@ -987,26 +1053,41 @@ asmlinkage void do_cpu(struct pt_regs *regs) case 0: epc = (unsigned int __user *)exception_epc(regs); old_epc = regs->cp0_epc; + old31 = regs->regs[31]; opcode = 0; status = -1; if (unlikely(compute_return_epc(regs) < 0)) return; - if (unlikely(get_user(opcode, epc) < 0)) - status = SIGSEGV; + if (get_isa16_mode(regs->cp0_epc)) { + unsigned short mmop[2] = { 0 }; - if (!cpu_has_llsc && status < 0) - status = simulate_llsc(regs, opcode); + if (unlikely(get_user(mmop[0], epc) < 0)) + status = SIGSEGV; + if (unlikely(get_user(mmop[1], epc) < 0)) + status = SIGSEGV; + opcode = (mmop[0] << 16) | mmop[1]; - if (status < 0) - status = simulate_rdhwr(regs, opcode); + if (status < 0) + status = simulate_rdhwr_mm(regs, opcode); + } else { + if (unlikely(get_user(opcode, epc) < 0)) + status = SIGSEGV; + + if (!cpu_has_llsc && status < 0) + status = simulate_llsc(regs, opcode); + + if (status < 0) + status = simulate_rdhwr_normal(regs, opcode); + } if (status < 0) status = SIGILL; if (unlikely(status > 0)) { regs->cp0_epc = old_epc; /* Undo skip-over. */ + regs->regs[31] = old31; force_sig(status, current); } @@ -1320,7 +1401,7 @@ asmlinkage void cache_parity_error(void) void ejtag_exception_handler(struct pt_regs *regs) { const int field = 2 * sizeof(unsigned long); - unsigned long depc, old_epc; + unsigned long depc, old_epc, old_ra; unsigned int debug; printk(KERN_DEBUG "SDBBP EJTAG debug exception - not handled yet, just ignored!\n"); @@ -1335,10 +1416,12 @@ void ejtag_exception_handler(struct pt_regs *regs) * calculation. */ old_epc = regs->cp0_epc; + old_ra = regs->regs[31]; regs->cp0_epc = depc; - __compute_return_epc(regs); + compute_return_epc(regs); depc = regs->cp0_epc; regs->cp0_epc = old_epc; + regs->regs[31] = old_ra; } else depc += 4; write_c0_depc(depc); @@ -1377,11 +1460,27 @@ unsigned long vi_handlers[64]; void __init *set_except_vector(int n, void *addr) { unsigned long handler = (unsigned long) addr; - unsigned long old_handler = exception_handlers[n]; + unsigned long old_handler; + +#ifdef CONFIG_CPU_MICROMIPS + /* + * Only the TLB handlers are cache aligned with an even + * address. All other handlers are on an odd address and + * require no modification. Otherwise, MIPS32 mode will + * be entered when handling any TLB exceptions. That + * would be bad...since we must stay in microMIPS mode. + */ + if (!(handler & 0x1)) + handler |= 1; +#endif + old_handler = xchg(&exception_handlers[n], handler); - exception_handlers[n] = handler; if (n == 0 && cpu_has_divec) { +#ifdef CONFIG_CPU_MICROMIPS + unsigned long jump_mask = ~((1 << 27) - 1); +#else unsigned long jump_mask = ~((1 << 28) - 1); +#endif u32 *buf = (u32 *)(ebase + 0x200); unsigned int k0 = 26; if ((handler & jump_mask) == ((ebase + 0x200) & jump_mask)) { @@ -1397,7 +1496,7 @@ void __init *set_except_vector(int n, void *addr) return (void *)old_handler; } -static asmlinkage void do_default_vi(void) +static void do_default_vi(void) { show_regs(get_irq_regs()); panic("Caught unexpected vectored interrupt."); @@ -1408,17 +1507,18 @@ static void *set_vi_srs_handler(int n, vi_handler_t addr, int srs) unsigned long handler; unsigned long old_handler = vi_handlers[n]; int srssets = current_cpu_data.srsets; - u32 *w; + u16 *h; unsigned char *b; BUG_ON(!cpu_has_veic && !cpu_has_vint); + BUG_ON((n < 0) && (n > 9)); if (addr == NULL) { handler = (unsigned long) do_default_vi; srs = 0; } else handler = (unsigned long) addr; - vi_handlers[n] = (unsigned long) addr; + vi_handlers[n] = handler; b = (unsigned char *)(ebase + 0x200 + n*VECTORSPACING); @@ -1437,13 +1537,12 @@ static void *set_vi_srs_handler(int n, vi_handler_t addr, int srs) if (srs == 0) { /* * If no shadow set is selected then use the default handler - * that does normal register saving and a standard interrupt exit + * that does normal register saving and standard interrupt exit */ - extern char except_vec_vi, except_vec_vi_lui; extern char except_vec_vi_ori, except_vec_vi_end; extern char rollback_except_vec_vi; - char *vec_start = (cpu_wait == r4k_wait) ? + char *vec_start = using_rollback_handler() ? &rollback_except_vec_vi : &except_vec_vi; #ifdef CONFIG_MIPS_MT_SMTC /* @@ -1452,11 +1551,20 @@ static void *set_vi_srs_handler(int n, vi_handler_t addr, int srs) * Status.IM bit to be masked before going there. */ extern char except_vec_vi_mori; +#if defined(CONFIG_CPU_MICROMIPS) || defined(CONFIG_CPU_BIG_ENDIAN) + const int mori_offset = &except_vec_vi_mori - vec_start + 2; +#else const int mori_offset = &except_vec_vi_mori - vec_start; +#endif #endif /* CONFIG_MIPS_MT_SMTC */ - const int handler_len = &except_vec_vi_end - vec_start; +#if defined(CONFIG_CPU_MICROMIPS) || defined(CONFIG_CPU_BIG_ENDIAN) + const int lui_offset = &except_vec_vi_lui - vec_start + 2; + const int ori_offset = &except_vec_vi_ori - vec_start + 2; +#else const int lui_offset = &except_vec_vi_lui - vec_start; const int ori_offset = &except_vec_vi_ori - vec_start; +#endif + const int handler_len = &except_vec_vi_end - vec_start; if (handler_len > VECTORSPACING) { /* @@ -1466,30 +1574,44 @@ static void *set_vi_srs_handler(int n, vi_handler_t addr, int srs) panic("VECTORSPACING too small"); } - memcpy(b, vec_start, handler_len); + set_handler(((unsigned long)b - ebase), vec_start, +#ifdef CONFIG_CPU_MICROMIPS + (handler_len - 1)); +#else + handler_len); +#endif #ifdef CONFIG_MIPS_MT_SMTC BUG_ON(n > 7); /* Vector index %d exceeds SMTC maximum. */ - w = (u32 *)(b + mori_offset); - *w = (*w & 0xffff0000) | (0x100 << n); + h = (u16 *)(b + mori_offset); + *h = (0x100 << n); #endif /* CONFIG_MIPS_MT_SMTC */ - w = (u32 *)(b + lui_offset); - *w = (*w & 0xffff0000) | (((u32)handler >> 16) & 0xffff); - w = (u32 *)(b + ori_offset); - *w = (*w & 0xffff0000) | ((u32)handler & 0xffff); + h = (u16 *)(b + lui_offset); + *h = (handler >> 16) & 0xffff; + h = (u16 *)(b + ori_offset); + *h = (handler & 0xffff); local_flush_icache_range((unsigned long)b, (unsigned long)(b+handler_len)); } else { /* - * In other cases jump directly to the interrupt handler - * - * It is the handlers responsibility to save registers if required - * (eg hi/lo) and return from the exception using "eret" + * In other cases jump directly to the interrupt handler. It + * is the handler's responsibility to save registers if required + * (eg hi/lo) and return from the exception using "eret". */ - w = (u32 *)b; - *w++ = 0x08000000 | (((u32)handler >> 2) & 0x03fffff); /* j handler */ - *w = 0; + u32 insn; + + h = (u16 *)b; + /* j handler */ +#ifdef CONFIG_CPU_MICROMIPS + insn = 0xd4000000 | (((u32)handler & 0x07ffffff) >> 1); +#else + insn = 0x08000000 | (((u32)handler & 0x0fffffff) >> 2); +#endif + h[0] = (insn >> 16) & 0xffff; + h[1] = insn & 0xffff; + h[2] = 0; + h[3] = 0; local_flush_icache_range((unsigned long)b, (unsigned long)(b+8)); } @@ -1648,7 +1770,11 @@ void __cpuinit per_cpu_trap_init(bool is_boot_cpu) /* Install CPU exception handler */ void __cpuinit set_handler(unsigned long offset, void *addr, unsigned long size) { +#ifdef CONFIG_CPU_MICROMIPS + memcpy((void *)(ebase + offset), ((unsigned char *)addr - 1), size); +#else memcpy((void *)(ebase + offset), addr, size); +#endif local_flush_icache_range(ebase + offset, ebase + offset + size); } @@ -1682,13 +1808,12 @@ __setup("rdhwr_noopt", set_rdhwr_noopt); void __init trap_init(void) { - extern char except_vec3_generic, except_vec3_r4000; + extern char except_vec3_generic; extern char except_vec4; + extern char except_vec3_r4000; unsigned long i; - int rollback; check_wait(); - rollback = (cpu_wait == r4k_wait); #if defined(CONFIG_KGDB) if (kgdb_early_setup) @@ -1700,7 +1825,12 @@ void __init trap_init(void) ebase = (unsigned long) __alloc_bootmem(size, 1 << fls(size), 0); } else { - ebase = CKSEG0; +#ifdef CONFIG_KVM_GUEST +#define KVM_GUEST_KSEG0 0x40000000 + ebase = KVM_GUEST_KSEG0; +#else + ebase = CKSEG0; +#endif if (cpu_has_mips_r2) ebase += (read_c0_ebase() & 0x3ffff000); } @@ -1760,7 +1890,8 @@ void __init trap_init(void) if (board_be_init) board_be_init(); - set_except_vector(0, rollback ? rollback_handle_int : handle_int); + set_except_vector(0, using_rollback_handler() ? rollback_handle_int + : handle_int); set_except_vector(1, handle_tlbm); set_except_vector(2, handle_tlbl); set_except_vector(3, handle_tlbs); @@ -1816,11 +1947,11 @@ void __init trap_init(void) if (cpu_has_vce) /* Special exception: R4[04]00 uses also the divec space. */ - memcpy((void *)(ebase + 0x180), &except_vec3_r4000, 0x100); + set_handler(0x180, &except_vec3_r4000, 0x100); else if (cpu_has_4kex) - memcpy((void *)(ebase + 0x180), &except_vec3_generic, 0x80); + set_handler(0x180, &except_vec3_generic, 0x80); else - memcpy((void *)(ebase + 0x080), &except_vec3_generic, 0x80); + set_handler(0x080, &except_vec3_generic, 0x80); local_flush_icache_range(ebase, ebase + 0x400); flush_tlb_handlers(); diff --git a/trunk/arch/mips/kernel/unaligned.c b/trunk/arch/mips/kernel/unaligned.c index 6087a54c86a0..203d8857070d 100644 --- a/trunk/arch/mips/kernel/unaligned.c +++ b/trunk/arch/mips/kernel/unaligned.c @@ -83,8 +83,12 @@ #include #include #include +#include +#include #include #include +#include +#include #define STR(x) __STR(x) #define __STR(x) #x @@ -102,12 +106,332 @@ static u32 unaligned_action; #endif extern void show_registers(struct pt_regs *regs); +#ifdef __BIG_ENDIAN +#define LoadHW(addr, value, res) \ + __asm__ __volatile__ (".set\tnoat\n" \ + "1:\tlb\t%0, 0(%2)\n" \ + "2:\tlbu\t$1, 1(%2)\n\t" \ + "sll\t%0, 0x8\n\t" \ + "or\t%0, $1\n\t" \ + "li\t%1, 0\n" \ + "3:\t.set\tat\n\t" \ + ".insn\n\t" \ + ".section\t.fixup,\"ax\"\n\t" \ + "4:\tli\t%1, %3\n\t" \ + "j\t3b\n\t" \ + ".previous\n\t" \ + ".section\t__ex_table,\"a\"\n\t" \ + STR(PTR)"\t1b, 4b\n\t" \ + STR(PTR)"\t2b, 4b\n\t" \ + ".previous" \ + : "=&r" (value), "=r" (res) \ + : "r" (addr), "i" (-EFAULT)); + +#define LoadW(addr, value, res) \ + __asm__ __volatile__ ( \ + "1:\tlwl\t%0, (%2)\n" \ + "2:\tlwr\t%0, 3(%2)\n\t" \ + "li\t%1, 0\n" \ + "3:\n\t" \ + ".insn\n\t" \ + ".section\t.fixup,\"ax\"\n\t" \ + "4:\tli\t%1, %3\n\t" \ + "j\t3b\n\t" \ + ".previous\n\t" \ + ".section\t__ex_table,\"a\"\n\t" \ + STR(PTR)"\t1b, 4b\n\t" \ + STR(PTR)"\t2b, 4b\n\t" \ + ".previous" \ + : "=&r" (value), "=r" (res) \ + : "r" (addr), "i" (-EFAULT)); + +#define LoadHWU(addr, value, res) \ + __asm__ __volatile__ ( \ + ".set\tnoat\n" \ + "1:\tlbu\t%0, 0(%2)\n" \ + "2:\tlbu\t$1, 1(%2)\n\t" \ + "sll\t%0, 0x8\n\t" \ + "or\t%0, $1\n\t" \ + "li\t%1, 0\n" \ + "3:\n\t" \ + ".insn\n\t" \ + ".set\tat\n\t" \ + ".section\t.fixup,\"ax\"\n\t" \ + "4:\tli\t%1, %3\n\t" \ + "j\t3b\n\t" \ + ".previous\n\t" \ + ".section\t__ex_table,\"a\"\n\t" \ + STR(PTR)"\t1b, 4b\n\t" \ + STR(PTR)"\t2b, 4b\n\t" \ + ".previous" \ + : "=&r" (value), "=r" (res) \ + : "r" (addr), "i" (-EFAULT)); + +#define LoadWU(addr, value, res) \ + __asm__ __volatile__ ( \ + "1:\tlwl\t%0, (%2)\n" \ + "2:\tlwr\t%0, 3(%2)\n\t" \ + "dsll\t%0, %0, 32\n\t" \ + "dsrl\t%0, %0, 32\n\t" \ + "li\t%1, 0\n" \ + "3:\n\t" \ + ".insn\n\t" \ + "\t.section\t.fixup,\"ax\"\n\t" \ + "4:\tli\t%1, %3\n\t" \ + "j\t3b\n\t" \ + ".previous\n\t" \ + ".section\t__ex_table,\"a\"\n\t" \ + STR(PTR)"\t1b, 4b\n\t" \ + STR(PTR)"\t2b, 4b\n\t" \ + ".previous" \ + : "=&r" (value), "=r" (res) \ + : "r" (addr), "i" (-EFAULT)); + +#define LoadDW(addr, value, res) \ + __asm__ __volatile__ ( \ + "1:\tldl\t%0, (%2)\n" \ + "2:\tldr\t%0, 7(%2)\n\t" \ + "li\t%1, 0\n" \ + "3:\n\t" \ + ".insn\n\t" \ + "\t.section\t.fixup,\"ax\"\n\t" \ + "4:\tli\t%1, %3\n\t" \ + "j\t3b\n\t" \ + ".previous\n\t" \ + ".section\t__ex_table,\"a\"\n\t" \ + STR(PTR)"\t1b, 4b\n\t" \ + STR(PTR)"\t2b, 4b\n\t" \ + ".previous" \ + : "=&r" (value), "=r" (res) \ + : "r" (addr), "i" (-EFAULT)); + +#define StoreHW(addr, value, res) \ + __asm__ __volatile__ ( \ + ".set\tnoat\n" \ + "1:\tsb\t%1, 1(%2)\n\t" \ + "srl\t$1, %1, 0x8\n" \ + "2:\tsb\t$1, 0(%2)\n\t" \ + ".set\tat\n\t" \ + "li\t%0, 0\n" \ + "3:\n\t" \ + ".insn\n\t" \ + ".section\t.fixup,\"ax\"\n\t" \ + "4:\tli\t%0, %3\n\t" \ + "j\t3b\n\t" \ + ".previous\n\t" \ + ".section\t__ex_table,\"a\"\n\t" \ + STR(PTR)"\t1b, 4b\n\t" \ + STR(PTR)"\t2b, 4b\n\t" \ + ".previous" \ + : "=r" (res) \ + : "r" (value), "r" (addr), "i" (-EFAULT)); + +#define StoreW(addr, value, res) \ + __asm__ __volatile__ ( \ + "1:\tswl\t%1,(%2)\n" \ + "2:\tswr\t%1, 3(%2)\n\t" \ + "li\t%0, 0\n" \ + "3:\n\t" \ + ".insn\n\t" \ + ".section\t.fixup,\"ax\"\n\t" \ + "4:\tli\t%0, %3\n\t" \ + "j\t3b\n\t" \ + ".previous\n\t" \ + ".section\t__ex_table,\"a\"\n\t" \ + STR(PTR)"\t1b, 4b\n\t" \ + STR(PTR)"\t2b, 4b\n\t" \ + ".previous" \ + : "=r" (res) \ + : "r" (value), "r" (addr), "i" (-EFAULT)); + +#define StoreDW(addr, value, res) \ + __asm__ __volatile__ ( \ + "1:\tsdl\t%1,(%2)\n" \ + "2:\tsdr\t%1, 7(%2)\n\t" \ + "li\t%0, 0\n" \ + "3:\n\t" \ + ".insn\n\t" \ + ".section\t.fixup,\"ax\"\n\t" \ + "4:\tli\t%0, %3\n\t" \ + "j\t3b\n\t" \ + ".previous\n\t" \ + ".section\t__ex_table,\"a\"\n\t" \ + STR(PTR)"\t1b, 4b\n\t" \ + STR(PTR)"\t2b, 4b\n\t" \ + ".previous" \ + : "=r" (res) \ + : "r" (value), "r" (addr), "i" (-EFAULT)); +#endif + +#ifdef __LITTLE_ENDIAN +#define LoadHW(addr, value, res) \ + __asm__ __volatile__ (".set\tnoat\n" \ + "1:\tlb\t%0, 1(%2)\n" \ + "2:\tlbu\t$1, 0(%2)\n\t" \ + "sll\t%0, 0x8\n\t" \ + "or\t%0, $1\n\t" \ + "li\t%1, 0\n" \ + "3:\t.set\tat\n\t" \ + ".insn\n\t" \ + ".section\t.fixup,\"ax\"\n\t" \ + "4:\tli\t%1, %3\n\t" \ + "j\t3b\n\t" \ + ".previous\n\t" \ + ".section\t__ex_table,\"a\"\n\t" \ + STR(PTR)"\t1b, 4b\n\t" \ + STR(PTR)"\t2b, 4b\n\t" \ + ".previous" \ + : "=&r" (value), "=r" (res) \ + : "r" (addr), "i" (-EFAULT)); + +#define LoadW(addr, value, res) \ + __asm__ __volatile__ ( \ + "1:\tlwl\t%0, 3(%2)\n" \ + "2:\tlwr\t%0, (%2)\n\t" \ + "li\t%1, 0\n" \ + "3:\n\t" \ + ".insn\n\t" \ + ".section\t.fixup,\"ax\"\n\t" \ + "4:\tli\t%1, %3\n\t" \ + "j\t3b\n\t" \ + ".previous\n\t" \ + ".section\t__ex_table,\"a\"\n\t" \ + STR(PTR)"\t1b, 4b\n\t" \ + STR(PTR)"\t2b, 4b\n\t" \ + ".previous" \ + : "=&r" (value), "=r" (res) \ + : "r" (addr), "i" (-EFAULT)); + +#define LoadHWU(addr, value, res) \ + __asm__ __volatile__ ( \ + ".set\tnoat\n" \ + "1:\tlbu\t%0, 1(%2)\n" \ + "2:\tlbu\t$1, 0(%2)\n\t" \ + "sll\t%0, 0x8\n\t" \ + "or\t%0, $1\n\t" \ + "li\t%1, 0\n" \ + "3:\n\t" \ + ".insn\n\t" \ + ".set\tat\n\t" \ + ".section\t.fixup,\"ax\"\n\t" \ + "4:\tli\t%1, %3\n\t" \ + "j\t3b\n\t" \ + ".previous\n\t" \ + ".section\t__ex_table,\"a\"\n\t" \ + STR(PTR)"\t1b, 4b\n\t" \ + STR(PTR)"\t2b, 4b\n\t" \ + ".previous" \ + : "=&r" (value), "=r" (res) \ + : "r" (addr), "i" (-EFAULT)); + +#define LoadWU(addr, value, res) \ + __asm__ __volatile__ ( \ + "1:\tlwl\t%0, 3(%2)\n" \ + "2:\tlwr\t%0, (%2)\n\t" \ + "dsll\t%0, %0, 32\n\t" \ + "dsrl\t%0, %0, 32\n\t" \ + "li\t%1, 0\n" \ + "3:\n\t" \ + ".insn\n\t" \ + "\t.section\t.fixup,\"ax\"\n\t" \ + "4:\tli\t%1, %3\n\t" \ + "j\t3b\n\t" \ + ".previous\n\t" \ + ".section\t__ex_table,\"a\"\n\t" \ + STR(PTR)"\t1b, 4b\n\t" \ + STR(PTR)"\t2b, 4b\n\t" \ + ".previous" \ + : "=&r" (value), "=r" (res) \ + : "r" (addr), "i" (-EFAULT)); + +#define LoadDW(addr, value, res) \ + __asm__ __volatile__ ( \ + "1:\tldl\t%0, 7(%2)\n" \ + "2:\tldr\t%0, (%2)\n\t" \ + "li\t%1, 0\n" \ + "3:\n\t" \ + ".insn\n\t" \ + "\t.section\t.fixup,\"ax\"\n\t" \ + "4:\tli\t%1, %3\n\t" \ + "j\t3b\n\t" \ + ".previous\n\t" \ + ".section\t__ex_table,\"a\"\n\t" \ + STR(PTR)"\t1b, 4b\n\t" \ + STR(PTR)"\t2b, 4b\n\t" \ + ".previous" \ + : "=&r" (value), "=r" (res) \ + : "r" (addr), "i" (-EFAULT)); + +#define StoreHW(addr, value, res) \ + __asm__ __volatile__ ( \ + ".set\tnoat\n" \ + "1:\tsb\t%1, 0(%2)\n\t" \ + "srl\t$1,%1, 0x8\n" \ + "2:\tsb\t$1, 1(%2)\n\t" \ + ".set\tat\n\t" \ + "li\t%0, 0\n" \ + "3:\n\t" \ + ".insn\n\t" \ + ".section\t.fixup,\"ax\"\n\t" \ + "4:\tli\t%0, %3\n\t" \ + "j\t3b\n\t" \ + ".previous\n\t" \ + ".section\t__ex_table,\"a\"\n\t" \ + STR(PTR)"\t1b, 4b\n\t" \ + STR(PTR)"\t2b, 4b\n\t" \ + ".previous" \ + : "=r" (res) \ + : "r" (value), "r" (addr), "i" (-EFAULT)); + +#define StoreW(addr, value, res) \ + __asm__ __volatile__ ( \ + "1:\tswl\t%1, 3(%2)\n" \ + "2:\tswr\t%1, (%2)\n\t" \ + "li\t%0, 0\n" \ + "3:\n\t" \ + ".insn\n\t" \ + ".section\t.fixup,\"ax\"\n\t" \ + "4:\tli\t%0, %3\n\t" \ + "j\t3b\n\t" \ + ".previous\n\t" \ + ".section\t__ex_table,\"a\"\n\t" \ + STR(PTR)"\t1b, 4b\n\t" \ + STR(PTR)"\t2b, 4b\n\t" \ + ".previous" \ + : "=r" (res) \ + : "r" (value), "r" (addr), "i" (-EFAULT)); + +#define StoreDW(addr, value, res) \ + __asm__ __volatile__ ( \ + "1:\tsdl\t%1, 7(%2)\n" \ + "2:\tsdr\t%1, (%2)\n\t" \ + "li\t%0, 0\n" \ + "3:\n\t" \ + ".insn\n\t" \ + ".section\t.fixup,\"ax\"\n\t" \ + "4:\tli\t%0, %3\n\t" \ + "j\t3b\n\t" \ + ".previous\n\t" \ + ".section\t__ex_table,\"a\"\n\t" \ + STR(PTR)"\t1b, 4b\n\t" \ + STR(PTR)"\t2b, 4b\n\t" \ + ".previous" \ + : "=r" (res) \ + : "r" (value), "r" (addr), "i" (-EFAULT)); +#endif + static void emulate_load_store_insn(struct pt_regs *regs, void __user *addr, unsigned int __user *pc) { union mips_instruction insn; unsigned long value; unsigned int res; + unsigned long origpc; + unsigned long orig31; + void __user *fault_addr = NULL; + + origpc = (unsigned long)pc; + orig31 = regs->regs[31]; perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, regs, 0); @@ -117,22 +441,22 @@ static void emulate_load_store_insn(struct pt_regs *regs, __get_user(insn.word, pc); switch (insn.i_format.opcode) { - /* - * These are instructions that a compiler doesn't generate. We - * can assume therefore that the code is MIPS-aware and - * really buggy. Emulating these instructions would break the - * semantics anyway. - */ + /* + * These are instructions that a compiler doesn't generate. We + * can assume therefore that the code is MIPS-aware and + * really buggy. Emulating these instructions would break the + * semantics anyway. + */ case ll_op: case lld_op: case sc_op: case scd_op: - /* - * For these instructions the only way to create an address - * error is an attempted access to kernel/supervisor address - * space. - */ + /* + * For these instructions the only way to create an address + * error is an attempted access to kernel/supervisor address + * space. + */ case ldl_op: case ldr_op: case lwl_op: @@ -146,36 +470,15 @@ static void emulate_load_store_insn(struct pt_regs *regs, case sb_op: goto sigbus; - /* - * The remaining opcodes are the ones that are really of interest. - */ + /* + * The remaining opcodes are the ones that are really of + * interest. + */ case lh_op: if (!access_ok(VERIFY_READ, addr, 2)) goto sigbus; - __asm__ __volatile__ (".set\tnoat\n" -#ifdef __BIG_ENDIAN - "1:\tlb\t%0, 0(%2)\n" - "2:\tlbu\t$1, 1(%2)\n\t" -#endif -#ifdef __LITTLE_ENDIAN - "1:\tlb\t%0, 1(%2)\n" - "2:\tlbu\t$1, 0(%2)\n\t" -#endif - "sll\t%0, 0x8\n\t" - "or\t%0, $1\n\t" - "li\t%1, 0\n" - "3:\t.set\tat\n\t" - ".section\t.fixup,\"ax\"\n\t" - "4:\tli\t%1, %3\n\t" - "j\t3b\n\t" - ".previous\n\t" - ".section\t__ex_table,\"a\"\n\t" - STR(PTR)"\t1b, 4b\n\t" - STR(PTR)"\t2b, 4b\n\t" - ".previous" - : "=&r" (value), "=r" (res) - : "r" (addr), "i" (-EFAULT)); + LoadHW(addr, value, res); if (res) goto fault; compute_return_epc(regs); @@ -186,26 +489,7 @@ static void emulate_load_store_insn(struct pt_regs *regs, if (!access_ok(VERIFY_READ, addr, 4)) goto sigbus; - __asm__ __volatile__ ( -#ifdef __BIG_ENDIAN - "1:\tlwl\t%0, (%2)\n" - "2:\tlwr\t%0, 3(%2)\n\t" -#endif -#ifdef __LITTLE_ENDIAN - "1:\tlwl\t%0, 3(%2)\n" - "2:\tlwr\t%0, (%2)\n\t" -#endif - "li\t%1, 0\n" - "3:\t.section\t.fixup,\"ax\"\n\t" - "4:\tli\t%1, %3\n\t" - "j\t3b\n\t" - ".previous\n\t" - ".section\t__ex_table,\"a\"\n\t" - STR(PTR)"\t1b, 4b\n\t" - STR(PTR)"\t2b, 4b\n\t" - ".previous" - : "=&r" (value), "=r" (res) - : "r" (addr), "i" (-EFAULT)); + LoadW(addr, value, res); if (res) goto fault; compute_return_epc(regs); @@ -216,30 +500,7 @@ static void emulate_load_store_insn(struct pt_regs *regs, if (!access_ok(VERIFY_READ, addr, 2)) goto sigbus; - __asm__ __volatile__ ( - ".set\tnoat\n" -#ifdef __BIG_ENDIAN - "1:\tlbu\t%0, 0(%2)\n" - "2:\tlbu\t$1, 1(%2)\n\t" -#endif -#ifdef __LITTLE_ENDIAN - "1:\tlbu\t%0, 1(%2)\n" - "2:\tlbu\t$1, 0(%2)\n\t" -#endif - "sll\t%0, 0x8\n\t" - "or\t%0, $1\n\t" - "li\t%1, 0\n" - "3:\t.set\tat\n\t" - ".section\t.fixup,\"ax\"\n\t" - "4:\tli\t%1, %3\n\t" - "j\t3b\n\t" - ".previous\n\t" - ".section\t__ex_table,\"a\"\n\t" - STR(PTR)"\t1b, 4b\n\t" - STR(PTR)"\t2b, 4b\n\t" - ".previous" - : "=&r" (value), "=r" (res) - : "r" (addr), "i" (-EFAULT)); + LoadHWU(addr, value, res); if (res) goto fault; compute_return_epc(regs); @@ -258,28 +519,7 @@ static void emulate_load_store_insn(struct pt_regs *regs, if (!access_ok(VERIFY_READ, addr, 4)) goto sigbus; - __asm__ __volatile__ ( -#ifdef __BIG_ENDIAN - "1:\tlwl\t%0, (%2)\n" - "2:\tlwr\t%0, 3(%2)\n\t" -#endif -#ifdef __LITTLE_ENDIAN - "1:\tlwl\t%0, 3(%2)\n" - "2:\tlwr\t%0, (%2)\n\t" -#endif - "dsll\t%0, %0, 32\n\t" - "dsrl\t%0, %0, 32\n\t" - "li\t%1, 0\n" - "3:\t.section\t.fixup,\"ax\"\n\t" - "4:\tli\t%1, %3\n\t" - "j\t3b\n\t" - ".previous\n\t" - ".section\t__ex_table,\"a\"\n\t" - STR(PTR)"\t1b, 4b\n\t" - STR(PTR)"\t2b, 4b\n\t" - ".previous" - : "=&r" (value), "=r" (res) - : "r" (addr), "i" (-EFAULT)); + LoadWU(addr, value, res); if (res) goto fault; compute_return_epc(regs); @@ -302,26 +542,7 @@ static void emulate_load_store_insn(struct pt_regs *regs, if (!access_ok(VERIFY_READ, addr, 8)) goto sigbus; - __asm__ __volatile__ ( -#ifdef __BIG_ENDIAN - "1:\tldl\t%0, (%2)\n" - "2:\tldr\t%0, 7(%2)\n\t" -#endif -#ifdef __LITTLE_ENDIAN - "1:\tldl\t%0, 7(%2)\n" - "2:\tldr\t%0, (%2)\n\t" -#endif - "li\t%1, 0\n" - "3:\t.section\t.fixup,\"ax\"\n\t" - "4:\tli\t%1, %3\n\t" - "j\t3b\n\t" - ".previous\n\t" - ".section\t__ex_table,\"a\"\n\t" - STR(PTR)"\t1b, 4b\n\t" - STR(PTR)"\t2b, 4b\n\t" - ".previous" - : "=&r" (value), "=r" (res) - : "r" (addr), "i" (-EFAULT)); + LoadDW(addr, value, res); if (res) goto fault; compute_return_epc(regs); @@ -336,68 +557,22 @@ static void emulate_load_store_insn(struct pt_regs *regs, if (!access_ok(VERIFY_WRITE, addr, 2)) goto sigbus; + compute_return_epc(regs); value = regs->regs[insn.i_format.rt]; - __asm__ __volatile__ ( -#ifdef __BIG_ENDIAN - ".set\tnoat\n" - "1:\tsb\t%1, 1(%2)\n\t" - "srl\t$1, %1, 0x8\n" - "2:\tsb\t$1, 0(%2)\n\t" - ".set\tat\n\t" -#endif -#ifdef __LITTLE_ENDIAN - ".set\tnoat\n" - "1:\tsb\t%1, 0(%2)\n\t" - "srl\t$1,%1, 0x8\n" - "2:\tsb\t$1, 1(%2)\n\t" - ".set\tat\n\t" -#endif - "li\t%0, 0\n" - "3:\n\t" - ".section\t.fixup,\"ax\"\n\t" - "4:\tli\t%0, %3\n\t" - "j\t3b\n\t" - ".previous\n\t" - ".section\t__ex_table,\"a\"\n\t" - STR(PTR)"\t1b, 4b\n\t" - STR(PTR)"\t2b, 4b\n\t" - ".previous" - : "=r" (res) - : "r" (value), "r" (addr), "i" (-EFAULT)); + StoreHW(addr, value, res); if (res) goto fault; - compute_return_epc(regs); break; case sw_op: if (!access_ok(VERIFY_WRITE, addr, 4)) goto sigbus; + compute_return_epc(regs); value = regs->regs[insn.i_format.rt]; - __asm__ __volatile__ ( -#ifdef __BIG_ENDIAN - "1:\tswl\t%1,(%2)\n" - "2:\tswr\t%1, 3(%2)\n\t" -#endif -#ifdef __LITTLE_ENDIAN - "1:\tswl\t%1, 3(%2)\n" - "2:\tswr\t%1, (%2)\n\t" -#endif - "li\t%0, 0\n" - "3:\n\t" - ".section\t.fixup,\"ax\"\n\t" - "4:\tli\t%0, %3\n\t" - "j\t3b\n\t" - ".previous\n\t" - ".section\t__ex_table,\"a\"\n\t" - STR(PTR)"\t1b, 4b\n\t" - STR(PTR)"\t2b, 4b\n\t" - ".previous" - : "=r" (res) - : "r" (value), "r" (addr), "i" (-EFAULT)); + StoreW(addr, value, res); if (res) goto fault; - compute_return_epc(regs); break; case sd_op: @@ -412,31 +587,11 @@ static void emulate_load_store_insn(struct pt_regs *regs, if (!access_ok(VERIFY_WRITE, addr, 8)) goto sigbus; + compute_return_epc(regs); value = regs->regs[insn.i_format.rt]; - __asm__ __volatile__ ( -#ifdef __BIG_ENDIAN - "1:\tsdl\t%1,(%2)\n" - "2:\tsdr\t%1, 7(%2)\n\t" -#endif -#ifdef __LITTLE_ENDIAN - "1:\tsdl\t%1, 7(%2)\n" - "2:\tsdr\t%1, (%2)\n\t" -#endif - "li\t%0, 0\n" - "3:\n\t" - ".section\t.fixup,\"ax\"\n\t" - "4:\tli\t%0, %3\n\t" - "j\t3b\n\t" - ".previous\n\t" - ".section\t__ex_table,\"a\"\n\t" - STR(PTR)"\t1b, 4b\n\t" - STR(PTR)"\t2b, 4b\n\t" - ".previous" - : "=r" (res) - : "r" (value), "r" (addr), "i" (-EFAULT)); + StoreDW(addr, value, res); if (res) goto fault; - compute_return_epc(regs); break; #endif /* CONFIG_64BIT */ @@ -447,10 +602,21 @@ static void emulate_load_store_insn(struct pt_regs *regs, case ldc1_op: case swc1_op: case sdc1_op: - /* - * I herewith declare: this does not happen. So send SIGBUS. - */ - goto sigbus; + die_if_kernel("Unaligned FP access in kernel code", regs); + BUG_ON(!used_math()); + BUG_ON(!is_fpu_owner()); + + lose_fpu(1); /* Save FPU state for the emulator. */ + res = fpu_emulator_cop1Handler(regs, ¤t->thread.fpu, 1, + &fault_addr); + own_fpu(1); /* Restore FPU state. */ + + /* Signal if something went wrong. */ + process_fpemu_return(res, fault_addr); + + if (res == 0) + break; + return; /* * COP2 is available to implementor for application specific use. @@ -488,6 +654,9 @@ static void emulate_load_store_insn(struct pt_regs *regs, return; fault: + /* roll back jump/branch */ + regs->cp0_epc = origpc; + regs->regs[31] = orig31; /* Did we have an exception handler installed? */ if (fixup_exception(regs)) return; @@ -504,10 +673,881 @@ static void emulate_load_store_insn(struct pt_regs *regs, return; sigill: - die_if_kernel("Unhandled kernel unaligned access or invalid instruction", regs); + die_if_kernel + ("Unhandled kernel unaligned access or invalid instruction", regs); force_sig(SIGILL, current); } +/* Recode table from 16-bit register notation to 32-bit GPR. */ +const int reg16to32[] = { 16, 17, 2, 3, 4, 5, 6, 7 }; + +/* Recode table from 16-bit STORE register notation to 32-bit GPR. */ +const int reg16to32st[] = { 0, 17, 2, 3, 4, 5, 6, 7 }; + +void emulate_load_store_microMIPS(struct pt_regs *regs, void __user * addr) +{ + unsigned long value; + unsigned int res; + int i; + unsigned int reg = 0, rvar; + unsigned long orig31; + u16 __user *pc16; + u16 halfword; + unsigned int word; + unsigned long origpc, contpc; + union mips_instruction insn; + struct mm_decoded_insn mminsn; + void __user *fault_addr = NULL; + + origpc = regs->cp0_epc; + orig31 = regs->regs[31]; + + mminsn.micro_mips_mode = 1; + + /* + * This load never faults. + */ + pc16 = (unsigned short __user *)msk_isa16_mode(regs->cp0_epc); + __get_user(halfword, pc16); + pc16++; + contpc = regs->cp0_epc + 2; + word = ((unsigned int)halfword << 16); + mminsn.pc_inc = 2; + + if (!mm_insn_16bit(halfword)) { + __get_user(halfword, pc16); + pc16++; + contpc = regs->cp0_epc + 4; + mminsn.pc_inc = 4; + word |= halfword; + } + mminsn.insn = word; + + if (get_user(halfword, pc16)) + goto fault; + mminsn.next_pc_inc = 2; + word = ((unsigned int)halfword << 16); + + if (!mm_insn_16bit(halfword)) { + pc16++; + if (get_user(halfword, pc16)) + goto fault; + mminsn.next_pc_inc = 4; + word |= halfword; + } + mminsn.next_insn = word; + + insn = (union mips_instruction)(mminsn.insn); + if (mm_isBranchInstr(regs, mminsn, &contpc)) + insn = (union mips_instruction)(mminsn.next_insn); + + /* Parse instruction to find what to do */ + + switch (insn.mm_i_format.opcode) { + + case mm_pool32a_op: + switch (insn.mm_x_format.func) { + case mm_lwxs_op: + reg = insn.mm_x_format.rd; + goto loadW; + } + + goto sigbus; + + case mm_pool32b_op: + switch (insn.mm_m_format.func) { + case mm_lwp_func: + reg = insn.mm_m_format.rd; + if (reg == 31) + goto sigbus; + + if (!access_ok(VERIFY_READ, addr, 8)) + goto sigbus; + + LoadW(addr, value, res); + if (res) + goto fault; + regs->regs[reg] = value; + addr += 4; + LoadW(addr, value, res); + if (res) + goto fault; + regs->regs[reg + 1] = value; + goto success; + + case mm_swp_func: + reg = insn.mm_m_format.rd; + if (reg == 31) + goto sigbus; + + if (!access_ok(VERIFY_WRITE, addr, 8)) + goto sigbus; + + value = regs->regs[reg]; + StoreW(addr, value, res); + if (res) + goto fault; + addr += 4; + value = regs->regs[reg + 1]; + StoreW(addr, value, res); + if (res) + goto fault; + goto success; + + case mm_ldp_func: +#ifdef CONFIG_64BIT + reg = insn.mm_m_format.rd; + if (reg == 31) + goto sigbus; + + if (!access_ok(VERIFY_READ, addr, 16)) + goto sigbus; + + LoadDW(addr, value, res); + if (res) + goto fault; + regs->regs[reg] = value; + addr += 8; + LoadDW(addr, value, res); + if (res) + goto fault; + regs->regs[reg + 1] = value; + goto success; +#endif /* CONFIG_64BIT */ + + goto sigill; + + case mm_sdp_func: +#ifdef CONFIG_64BIT + reg = insn.mm_m_format.rd; + if (reg == 31) + goto sigbus; + + if (!access_ok(VERIFY_WRITE, addr, 16)) + goto sigbus; + + value = regs->regs[reg]; + StoreDW(addr, value, res); + if (res) + goto fault; + addr += 8; + value = regs->regs[reg + 1]; + StoreDW(addr, value, res); + if (res) + goto fault; + goto success; +#endif /* CONFIG_64BIT */ + + goto sigill; + + case mm_lwm32_func: + reg = insn.mm_m_format.rd; + rvar = reg & 0xf; + if ((rvar > 9) || !reg) + goto sigill; + if (reg & 0x10) { + if (!access_ok + (VERIFY_READ, addr, 4 * (rvar + 1))) + goto sigbus; + } else { + if (!access_ok(VERIFY_READ, addr, 4 * rvar)) + goto sigbus; + } + if (rvar == 9) + rvar = 8; + for (i = 16; rvar; rvar--, i++) { + LoadW(addr, value, res); + if (res) + goto fault; + addr += 4; + regs->regs[i] = value; + } + if ((reg & 0xf) == 9) { + LoadW(addr, value, res); + if (res) + goto fault; + addr += 4; + regs->regs[30] = value; + } + if (reg & 0x10) { + LoadW(addr, value, res); + if (res) + goto fault; + regs->regs[31] = value; + } + goto success; + + case mm_swm32_func: + reg = insn.mm_m_format.rd; + rvar = reg & 0xf; + if ((rvar > 9) || !reg) + goto sigill; + if (reg & 0x10) { + if (!access_ok + (VERIFY_WRITE, addr, 4 * (rvar + 1))) + goto sigbus; + } else { + if (!access_ok(VERIFY_WRITE, addr, 4 * rvar)) + goto sigbus; + } + if (rvar == 9) + rvar = 8; + for (i = 16; rvar; rvar--, i++) { + value = regs->regs[i]; + StoreW(addr, value, res); + if (res) + goto fault; + addr += 4; + } + if ((reg & 0xf) == 9) { + value = regs->regs[30]; + StoreW(addr, value, res); + if (res) + goto fault; + addr += 4; + } + if (reg & 0x10) { + value = regs->regs[31]; + StoreW(addr, value, res); + if (res) + goto fault; + } + goto success; + + case mm_ldm_func: +#ifdef CONFIG_64BIT + reg = insn.mm_m_format.rd; + rvar = reg & 0xf; + if ((rvar > 9) || !reg) + goto sigill; + if (reg & 0x10) { + if (!access_ok + (VERIFY_READ, addr, 8 * (rvar + 1))) + goto sigbus; + } else { + if (!access_ok(VERIFY_READ, addr, 8 * rvar)) + goto sigbus; + } + if (rvar == 9) + rvar = 8; + + for (i = 16; rvar; rvar--, i++) { + LoadDW(addr, value, res); + if (res) + goto fault; + addr += 4; + regs->regs[i] = value; + } + if ((reg & 0xf) == 9) { + LoadDW(addr, value, res); + if (res) + goto fault; + addr += 8; + regs->regs[30] = value; + } + if (reg & 0x10) { + LoadDW(addr, value, res); + if (res) + goto fault; + regs->regs[31] = value; + } + goto success; +#endif /* CONFIG_64BIT */ + + goto sigill; + + case mm_sdm_func: +#ifdef CONFIG_64BIT + reg = insn.mm_m_format.rd; + rvar = reg & 0xf; + if ((rvar > 9) || !reg) + goto sigill; + if (reg & 0x10) { + if (!access_ok + (VERIFY_WRITE, addr, 8 * (rvar + 1))) + goto sigbus; + } else { + if (!access_ok(VERIFY_WRITE, addr, 8 * rvar)) + goto sigbus; + } + if (rvar == 9) + rvar = 8; + + for (i = 16; rvar; rvar--, i++) { + value = regs->regs[i]; + StoreDW(addr, value, res); + if (res) + goto fault; + addr += 8; + } + if ((reg & 0xf) == 9) { + value = regs->regs[30]; + StoreDW(addr, value, res); + if (res) + goto fault; + addr += 8; + } + if (reg & 0x10) { + value = regs->regs[31]; + StoreDW(addr, value, res); + if (res) + goto fault; + } + goto success; +#endif /* CONFIG_64BIT */ + + goto sigill; + + /* LWC2, SWC2, LDC2, SDC2 are not serviced */ + } + + goto sigbus; + + case mm_pool32c_op: + switch (insn.mm_m_format.func) { + case mm_lwu_func: + reg = insn.mm_m_format.rd; + goto loadWU; + } + + /* LL,SC,LLD,SCD are not serviced */ + goto sigbus; + + case mm_pool32f_op: + switch (insn.mm_x_format.func) { + case mm_lwxc1_func: + case mm_swxc1_func: + case mm_ldxc1_func: + case mm_sdxc1_func: + goto fpu_emul; + } + + goto sigbus; + + case mm_ldc132_op: + case mm_sdc132_op: + case mm_lwc132_op: + case mm_swc132_op: +fpu_emul: + /* roll back jump/branch */ + regs->cp0_epc = origpc; + regs->regs[31] = orig31; + + die_if_kernel("Unaligned FP access in kernel code", regs); + BUG_ON(!used_math()); + BUG_ON(!is_fpu_owner()); + + lose_fpu(1); /* save the FPU state for the emulator */ + res = fpu_emulator_cop1Handler(regs, ¤t->thread.fpu, 1, + &fault_addr); + own_fpu(1); /* restore FPU state */ + + /* If something went wrong, signal */ + process_fpemu_return(res, fault_addr); + + if (res == 0) + goto success; + return; + + case mm_lh32_op: + reg = insn.mm_i_format.rt; + goto loadHW; + + case mm_lhu32_op: + reg = insn.mm_i_format.rt; + goto loadHWU; + + case mm_lw32_op: + reg = insn.mm_i_format.rt; + goto loadW; + + case mm_sh32_op: + reg = insn.mm_i_format.rt; + goto storeHW; + + case mm_sw32_op: + reg = insn.mm_i_format.rt; + goto storeW; + + case mm_ld32_op: + reg = insn.mm_i_format.rt; + goto loadDW; + + case mm_sd32_op: + reg = insn.mm_i_format.rt; + goto storeDW; + + case mm_pool16c_op: + switch (insn.mm16_m_format.func) { + case mm_lwm16_op: + reg = insn.mm16_m_format.rlist; + rvar = reg + 1; + if (!access_ok(VERIFY_READ, addr, 4 * rvar)) + goto sigbus; + + for (i = 16; rvar; rvar--, i++) { + LoadW(addr, value, res); + if (res) + goto fault; + addr += 4; + regs->regs[i] = value; + } + LoadW(addr, value, res); + if (res) + goto fault; + regs->regs[31] = value; + + goto success; + + case mm_swm16_op: + reg = insn.mm16_m_format.rlist; + rvar = reg + 1; + if (!access_ok(VERIFY_WRITE, addr, 4 * rvar)) + goto sigbus; + + for (i = 16; rvar; rvar--, i++) { + value = regs->regs[i]; + StoreW(addr, value, res); + if (res) + goto fault; + addr += 4; + } + value = regs->regs[31]; + StoreW(addr, value, res); + if (res) + goto fault; + + goto success; + + } + + goto sigbus; + + case mm_lhu16_op: + reg = reg16to32[insn.mm16_rb_format.rt]; + goto loadHWU; + + case mm_lw16_op: + reg = reg16to32[insn.mm16_rb_format.rt]; + goto loadW; + + case mm_sh16_op: + reg = reg16to32st[insn.mm16_rb_format.rt]; + goto storeHW; + + case mm_sw16_op: + reg = reg16to32st[insn.mm16_rb_format.rt]; + goto storeW; + + case mm_lwsp16_op: + reg = insn.mm16_r5_format.rt; + goto loadW; + + case mm_swsp16_op: + reg = insn.mm16_r5_format.rt; + goto storeW; + + case mm_lwgp16_op: + reg = reg16to32[insn.mm16_r3_format.rt]; + goto loadW; + + default: + goto sigill; + } + +loadHW: + if (!access_ok(VERIFY_READ, addr, 2)) + goto sigbus; + + LoadHW(addr, value, res); + if (res) + goto fault; + regs->regs[reg] = value; + goto success; + +loadHWU: + if (!access_ok(VERIFY_READ, addr, 2)) + goto sigbus; + + LoadHWU(addr, value, res); + if (res) + goto fault; + regs->regs[reg] = value; + goto success; + +loadW: + if (!access_ok(VERIFY_READ, addr, 4)) + goto sigbus; + + LoadW(addr, value, res); + if (res) + goto fault; + regs->regs[reg] = value; + goto success; + +loadWU: +#ifdef CONFIG_64BIT + /* + * A 32-bit kernel might be running on a 64-bit processor. But + * if we're on a 32-bit processor and an i-cache incoherency + * or race makes us see a 64-bit instruction here the sdl/sdr + * would blow up, so for now we don't handle unaligned 64-bit + * instructions on 32-bit kernels. + */ + if (!access_ok(VERIFY_READ, addr, 4)) + goto sigbus; + + LoadWU(addr, value, res); + if (res) + goto fault; + regs->regs[reg] = value; + goto success; +#endif /* CONFIG_64BIT */ + + /* Cannot handle 64-bit instructions in 32-bit kernel */ + goto sigill; + +loadDW: +#ifdef CONFIG_64BIT + /* + * A 32-bit kernel might be running on a 64-bit processor. But + * if we're on a 32-bit processor and an i-cache incoherency + * or race makes us see a 64-bit instruction here the sdl/sdr + * would blow up, so for now we don't handle unaligned 64-bit + * instructions on 32-bit kernels. + */ + if (!access_ok(VERIFY_READ, addr, 8)) + goto sigbus; + + LoadDW(addr, value, res); + if (res) + goto fault; + regs->regs[reg] = value; + goto success; +#endif /* CONFIG_64BIT */ + + /* Cannot handle 64-bit instructions in 32-bit kernel */ + goto sigill; + +storeHW: + if (!access_ok(VERIFY_WRITE, addr, 2)) + goto sigbus; + + value = regs->regs[reg]; + StoreHW(addr, value, res); + if (res) + goto fault; + goto success; + +storeW: + if (!access_ok(VERIFY_WRITE, addr, 4)) + goto sigbus; + + value = regs->regs[reg]; + StoreW(addr, value, res); + if (res) + goto fault; + goto success; + +storeDW: +#ifdef CONFIG_64BIT + /* + * A 32-bit kernel might be running on a 64-bit processor. But + * if we're on a 32-bit processor and an i-cache incoherency + * or race makes us see a 64-bit instruction here the sdl/sdr + * would blow up, so for now we don't handle unaligned 64-bit + * instructions on 32-bit kernels. + */ + if (!access_ok(VERIFY_WRITE, addr, 8)) + goto sigbus; + + value = regs->regs[reg]; + StoreDW(addr, value, res); + if (res) + goto fault; + goto success; +#endif /* CONFIG_64BIT */ + + /* Cannot handle 64-bit instructions in 32-bit kernel */ + goto sigill; + +success: + regs->cp0_epc = contpc; /* advance or branch */ + +#ifdef CONFIG_DEBUG_FS + unaligned_instructions++; +#endif + return; + +fault: + /* roll back jump/branch */ + regs->cp0_epc = origpc; + regs->regs[31] = orig31; + /* Did we have an exception handler installed? */ + if (fixup_exception(regs)) + return; + + die_if_kernel("Unhandled kernel unaligned access", regs); + force_sig(SIGSEGV, current); + + return; + +sigbus: + die_if_kernel("Unhandled kernel unaligned access", regs); + force_sig(SIGBUS, current); + + return; + +sigill: + die_if_kernel + ("Unhandled kernel unaligned access or invalid instruction", regs); + force_sig(SIGILL, current); +} + +static void emulate_load_store_MIPS16e(struct pt_regs *regs, void __user * addr) +{ + unsigned long value; + unsigned int res; + int reg; + unsigned long orig31; + u16 __user *pc16; + unsigned long origpc; + union mips16e_instruction mips16inst, oldinst; + + origpc = regs->cp0_epc; + orig31 = regs->regs[31]; + pc16 = (unsigned short __user *)msk_isa16_mode(origpc); + /* + * This load never faults. + */ + __get_user(mips16inst.full, pc16); + oldinst = mips16inst; + + /* skip EXTEND instruction */ + if (mips16inst.ri.opcode == MIPS16e_extend_op) { + pc16++; + __get_user(mips16inst.full, pc16); + } else if (delay_slot(regs)) { + /* skip jump instructions */ + /* JAL/JALX are 32 bits but have OPCODE in first short int */ + if (mips16inst.ri.opcode == MIPS16e_jal_op) + pc16++; + pc16++; + if (get_user(mips16inst.full, pc16)) + goto sigbus; + } + + switch (mips16inst.ri.opcode) { + case MIPS16e_i64_op: /* I64 or RI64 instruction */ + switch (mips16inst.i64.func) { /* I64/RI64 func field check */ + case MIPS16e_ldpc_func: + case MIPS16e_ldsp_func: + reg = reg16to32[mips16inst.ri64.ry]; + goto loadDW; + + case MIPS16e_sdsp_func: + reg = reg16to32[mips16inst.ri64.ry]; + goto writeDW; + + case MIPS16e_sdrasp_func: + reg = 29; /* GPRSP */ + goto writeDW; + } + + goto sigbus; + + case MIPS16e_swsp_op: + case MIPS16e_lwpc_op: + case MIPS16e_lwsp_op: + reg = reg16to32[mips16inst.ri.rx]; + break; + + case MIPS16e_i8_op: + if (mips16inst.i8.func != MIPS16e_swrasp_func) + goto sigbus; + reg = 29; /* GPRSP */ + break; + + default: + reg = reg16to32[mips16inst.rri.ry]; + break; + } + + switch (mips16inst.ri.opcode) { + + case MIPS16e_lb_op: + case MIPS16e_lbu_op: + case MIPS16e_sb_op: + goto sigbus; + + case MIPS16e_lh_op: + if (!access_ok(VERIFY_READ, addr, 2)) + goto sigbus; + + LoadHW(addr, value, res); + if (res) + goto fault; + MIPS16e_compute_return_epc(regs, &oldinst); + regs->regs[reg] = value; + break; + + case MIPS16e_lhu_op: + if (!access_ok(VERIFY_READ, addr, 2)) + goto sigbus; + + LoadHWU(addr, value, res); + if (res) + goto fault; + MIPS16e_compute_return_epc(regs, &oldinst); + regs->regs[reg] = value; + break; + + case MIPS16e_lw_op: + case MIPS16e_lwpc_op: + case MIPS16e_lwsp_op: + if (!access_ok(VERIFY_READ, addr, 4)) + goto sigbus; + + LoadW(addr, value, res); + if (res) + goto fault; + MIPS16e_compute_return_epc(regs, &oldinst); + regs->regs[reg] = value; + break; + + case MIPS16e_lwu_op: +#ifdef CONFIG_64BIT + /* + * A 32-bit kernel might be running on a 64-bit processor. But + * if we're on a 32-bit processor and an i-cache incoherency + * or race makes us see a 64-bit instruction here the sdl/sdr + * would blow up, so for now we don't handle unaligned 64-bit + * instructions on 32-bit kernels. + */ + if (!access_ok(VERIFY_READ, addr, 4)) + goto sigbus; + + LoadWU(addr, value, res); + if (res) + goto fault; + MIPS16e_compute_return_epc(regs, &oldinst); + regs->regs[reg] = value; + break; +#endif /* CONFIG_64BIT */ + + /* Cannot handle 64-bit instructions in 32-bit kernel */ + goto sigill; + + case MIPS16e_ld_op: +loadDW: +#ifdef CONFIG_64BIT + /* + * A 32-bit kernel might be running on a 64-bit processor. But + * if we're on a 32-bit processor and an i-cache incoherency + * or race makes us see a 64-bit instruction here the sdl/sdr + * would blow up, so for now we don't handle unaligned 64-bit + * instructions on 32-bit kernels. + */ + if (!access_ok(VERIFY_READ, addr, 8)) + goto sigbus; + + LoadDW(addr, value, res); + if (res) + goto fault; + MIPS16e_compute_return_epc(regs, &oldinst); + regs->regs[reg] = value; + break; +#endif /* CONFIG_64BIT */ + + /* Cannot handle 64-bit instructions in 32-bit kernel */ + goto sigill; + + case MIPS16e_sh_op: + if (!access_ok(VERIFY_WRITE, addr, 2)) + goto sigbus; + + MIPS16e_compute_return_epc(regs, &oldinst); + value = regs->regs[reg]; + StoreHW(addr, value, res); + if (res) + goto fault; + break; + + case MIPS16e_sw_op: + case MIPS16e_swsp_op: + case MIPS16e_i8_op: /* actually - MIPS16e_swrasp_func */ + if (!access_ok(VERIFY_WRITE, addr, 4)) + goto sigbus; + + MIPS16e_compute_return_epc(regs, &oldinst); + value = regs->regs[reg]; + StoreW(addr, value, res); + if (res) + goto fault; + break; + + case MIPS16e_sd_op: +writeDW: +#ifdef CONFIG_64BIT + /* + * A 32-bit kernel might be running on a 64-bit processor. But + * if we're on a 32-bit processor and an i-cache incoherency + * or race makes us see a 64-bit instruction here the sdl/sdr + * would blow up, so for now we don't handle unaligned 64-bit + * instructions on 32-bit kernels. + */ + if (!access_ok(VERIFY_WRITE, addr, 8)) + goto sigbus; + + MIPS16e_compute_return_epc(regs, &oldinst); + value = regs->regs[reg]; + StoreDW(addr, value, res); + if (res) + goto fault; + break; +#endif /* CONFIG_64BIT */ + + /* Cannot handle 64-bit instructions in 32-bit kernel */ + goto sigill; + + default: + /* + * Pheeee... We encountered an yet unknown instruction or + * cache coherence problem. Die sucker, die ... + */ + goto sigill; + } + +#ifdef CONFIG_DEBUG_FS + unaligned_instructions++; +#endif + + return; + +fault: + /* roll back jump/branch */ + regs->cp0_epc = origpc; + regs->regs[31] = orig31; + /* Did we have an exception handler installed? */ + if (fixup_exception(regs)) + return; + + die_if_kernel("Unhandled kernel unaligned access", regs); + force_sig(SIGSEGV, current); + + return; + +sigbus: + die_if_kernel("Unhandled kernel unaligned access", regs); + force_sig(SIGBUS, current); + + return; + +sigill: + die_if_kernel + ("Unhandled kernel unaligned access or invalid instruction", regs); + force_sig(SIGILL, current); +} asmlinkage void do_ade(struct pt_regs *regs) { unsigned int __user *pc; @@ -517,23 +1557,62 @@ asmlinkage void do_ade(struct pt_regs *regs) 1, regs, regs->cp0_badvaddr); /* * Did we catch a fault trying to load an instruction? - * Or are we running in MIPS16 mode? */ - if ((regs->cp0_badvaddr == regs->cp0_epc) || (regs->cp0_epc & 0x1)) + if (regs->cp0_badvaddr == regs->cp0_epc) goto sigbus; - pc = (unsigned int __user *) exception_epc(regs); if (user_mode(regs) && !test_thread_flag(TIF_FIXADE)) goto sigbus; if (unaligned_action == UNALIGNED_ACTION_SIGNAL) goto sigbus; - else if (unaligned_action == UNALIGNED_ACTION_SHOW) - show_registers(regs); /* * Do branch emulation only if we didn't forward the exception. * This is all so but ugly ... */ + + /* + * Are we running in microMIPS mode? + */ + if (get_isa16_mode(regs->cp0_epc)) { + /* + * Did we catch a fault trying to load an instruction in + * 16-bit mode? + */ + if (regs->cp0_badvaddr == msk_isa16_mode(regs->cp0_epc)) + goto sigbus; + if (unaligned_action == UNALIGNED_ACTION_SHOW) + show_registers(regs); + + if (cpu_has_mmips) { + seg = get_fs(); + if (!user_mode(regs)) + set_fs(KERNEL_DS); + emulate_load_store_microMIPS(regs, + (void __user *)regs->cp0_badvaddr); + set_fs(seg); + + return; + } + + if (cpu_has_mips16) { + seg = get_fs(); + if (!user_mode(regs)) + set_fs(KERNEL_DS); + emulate_load_store_MIPS16e(regs, + (void __user *)regs->cp0_badvaddr); + set_fs(seg); + + return; + } + + goto sigbus; + } + + if (unaligned_action == UNALIGNED_ACTION_SHOW) + show_registers(regs); + pc = (unsigned int __user *)exception_epc(regs); + seg = get_fs(); if (!user_mode(regs)) set_fs(KERNEL_DS); diff --git a/trunk/arch/mips/kvm/00README.txt b/trunk/arch/mips/kvm/00README.txt new file mode 100644 index 000000000000..51617e481aa3 --- /dev/null +++ b/trunk/arch/mips/kvm/00README.txt @@ -0,0 +1,31 @@ +KVM/MIPS Trap & Emulate Release Notes +===================================== + +(1) KVM/MIPS should support MIPS32R2 and beyond. It has been tested on the following platforms: + Malta Board with FPGA based 34K + Sigma Designs TangoX board with a 24K based 8654 SoC. + Malta Board with 74K @ 1GHz + +(2) Both Guest kernel and Guest Userspace execute in UM. + Guest User address space: 0x00000000 -> 0x40000000 + Guest Kernel Unmapped: 0x40000000 -> 0x60000000 + Guest Kernel Mapped: 0x60000000 -> 0x80000000 + + Guest Usermode virtual memory is limited to 1GB. + +(2) 16K Page Sizes: Both Host Kernel and Guest Kernel should have the same page size, currently at least 16K. + Note that due to cache aliasing issues, 4K page sizes are NOT supported. + +(3) No HugeTLB Support + Both the host kernel and Guest kernel should have the page size set to 16K. + This will be implemented in a future release. + +(4) KVM/MIPS does not have support for SMP Guests + Linux-3.7-rc2 based SMP guest hangs due to the following code sequence in the generated TLB handlers: + LL/TLBP/SC. Since the TLBP instruction causes a trap the reservation gets cleared + when we ERET back to the guest. This causes the guest to hang in an infinite loop. + This will be fixed in a future release. + +(5) Use Host FPU + Currently KVM/MIPS emulates a 24K CPU without a FPU. + This will be fixed in a future release diff --git a/trunk/arch/mips/kvm/Kconfig b/trunk/arch/mips/kvm/Kconfig new file mode 100644 index 000000000000..2c15590e55f7 --- /dev/null +++ b/trunk/arch/mips/kvm/Kconfig @@ -0,0 +1,49 @@ +# +# KVM configuration +# +source "virt/kvm/Kconfig" + +menuconfig VIRTUALIZATION + bool "Virtualization" + depends on HAVE_KVM + ---help--- + Say Y here to get to see options for using your Linux host to run + other operating systems inside virtual machines (guests). + This option alone does not add any kernel code. + + If you say N, all options in this submenu will be skipped and disabled. + +if VIRTUALIZATION + +config KVM + tristate "Kernel-based Virtual Machine (KVM) support" + depends on HAVE_KVM + select PREEMPT_NOTIFIERS + select ANON_INODES + select KVM_MMIO + ---help--- + Support for hosting Guest kernels. + Currently supported on MIPS32 processors. + +config KVM_MIPS_DYN_TRANS + bool "KVM/MIPS: Dynamic binary translation to reduce traps" + depends on KVM + ---help--- + When running in Trap & Emulate mode patch privileged + instructions to reduce the number of traps. + + If unsure, say Y. + +config KVM_MIPS_DEBUG_COP0_COUNTERS + bool "Maintain counters for COP0 accesses" + depends on KVM + ---help--- + Maintain statistics for Guest COP0 accesses. + A histogram of COP0 accesses is printed when the VM is + shutdown. + + If unsure, say N. + +source drivers/vhost/Kconfig + +endif # VIRTUALIZATION diff --git a/trunk/arch/mips/kvm/Makefile b/trunk/arch/mips/kvm/Makefile new file mode 100644 index 000000000000..78d87bbc99db --- /dev/null +++ b/trunk/arch/mips/kvm/Makefile @@ -0,0 +1,13 @@ +# Makefile for KVM support for MIPS +# + +common-objs = $(addprefix ../../../virt/kvm/, kvm_main.o coalesced_mmio.o) + +EXTRA_CFLAGS += -Ivirt/kvm -Iarch/mips/kvm + +kvm-objs := $(common-objs) kvm_mips.o kvm_mips_emul.o kvm_locore.o \ + kvm_mips_int.o kvm_mips_stats.o kvm_mips_commpage.o \ + kvm_mips_dyntrans.o kvm_trap_emul.o + +obj-$(CONFIG_KVM) += kvm.o +obj-y += kvm_cb.o kvm_tlb.o diff --git a/trunk/arch/mips/kvm/kvm_cb.c b/trunk/arch/mips/kvm/kvm_cb.c new file mode 100644 index 000000000000..313c2e37b978 --- /dev/null +++ b/trunk/arch/mips/kvm/kvm_cb.c @@ -0,0 +1,14 @@ +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved. + * Authors: Yann Le Du + */ + +#include +#include + +struct kvm_mips_callbacks *kvm_mips_callbacks; +EXPORT_SYMBOL(kvm_mips_callbacks); diff --git a/trunk/arch/mips/kvm/kvm_locore.S b/trunk/arch/mips/kvm/kvm_locore.S new file mode 100644 index 000000000000..dca2aa665993 --- /dev/null +++ b/trunk/arch/mips/kvm/kvm_locore.S @@ -0,0 +1,650 @@ +/* +* This file is subject to the terms and conditions of the GNU General Public +* License. See the file "COPYING" in the main directory of this archive +* for more details. +* +* Main entry point for the guest, exception handling. +* +* Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved. +* Authors: Sanjay Lal +*/ + +#include +#include +#include +#include +#include +#include + + +#define _C_LABEL(x) x +#define MIPSX(name) mips32_ ## name +#define CALLFRAME_SIZ 32 + +/* + * VECTOR + * exception vector entrypoint + */ +#define VECTOR(x, regmask) \ + .ent _C_LABEL(x),0; \ + EXPORT(x); + +#define VECTOR_END(x) \ + EXPORT(x); + +/* Overload, Danger Will Robinson!! */ +#define PT_HOST_ASID PT_BVADDR +#define PT_HOST_USERLOCAL PT_EPC + +#define CP0_DDATA_LO $28,3 +#define CP0_EBASE $15,1 + +#define CP0_INTCTL $12,1 +#define CP0_SRSCTL $12,2 +#define CP0_SRSMAP $12,3 +#define CP0_HWRENA $7,0 + +/* Resume Flags */ +#define RESUME_FLAG_HOST (1<<1) /* Resume host? */ + +#define RESUME_GUEST 0 +#define RESUME_HOST RESUME_FLAG_HOST + +/* + * __kvm_mips_vcpu_run: entry point to the guest + * a0: run + * a1: vcpu + */ + +FEXPORT(__kvm_mips_vcpu_run) + .set push + .set noreorder + .set noat + + /* k0/k1 not being used in host kernel context */ + addiu k1,sp, -PT_SIZE + LONG_S $0, PT_R0(k1) + LONG_S $1, PT_R1(k1) + LONG_S $2, PT_R2(k1) + LONG_S $3, PT_R3(k1) + + LONG_S $4, PT_R4(k1) + LONG_S $5, PT_R5(k1) + LONG_S $6, PT_R6(k1) + LONG_S $7, PT_R7(k1) + + LONG_S $8, PT_R8(k1) + LONG_S $9, PT_R9(k1) + LONG_S $10, PT_R10(k1) + LONG_S $11, PT_R11(k1) + LONG_S $12, PT_R12(k1) + LONG_S $13, PT_R13(k1) + LONG_S $14, PT_R14(k1) + LONG_S $15, PT_R15(k1) + LONG_S $16, PT_R16(k1) + LONG_S $17, PT_R17(k1) + + LONG_S $18, PT_R18(k1) + LONG_S $19, PT_R19(k1) + LONG_S $20, PT_R20(k1) + LONG_S $21, PT_R21(k1) + LONG_S $22, PT_R22(k1) + LONG_S $23, PT_R23(k1) + LONG_S $24, PT_R24(k1) + LONG_S $25, PT_R25(k1) + + /* XXXKYMA k0/k1 not saved, not being used if we got here through an ioctl() */ + + LONG_S $28, PT_R28(k1) + LONG_S $29, PT_R29(k1) + LONG_S $30, PT_R30(k1) + LONG_S $31, PT_R31(k1) + + /* Save hi/lo */ + mflo v0 + LONG_S v0, PT_LO(k1) + mfhi v1 + LONG_S v1, PT_HI(k1) + + /* Save host status */ + mfc0 v0, CP0_STATUS + LONG_S v0, PT_STATUS(k1) + + /* Save host ASID, shove it into the BVADDR location */ + mfc0 v1,CP0_ENTRYHI + andi v1, 0xff + LONG_S v1, PT_HOST_ASID(k1) + + /* Save DDATA_LO, will be used to store pointer to vcpu */ + mfc0 v1, CP0_DDATA_LO + LONG_S v1, PT_HOST_USERLOCAL(k1) + + /* DDATA_LO has pointer to vcpu */ + mtc0 a1,CP0_DDATA_LO + + /* Offset into vcpu->arch */ + addiu k1, a1, VCPU_HOST_ARCH + + /* Save the host stack to VCPU, used for exception processing when we exit from the Guest */ + LONG_S sp, VCPU_HOST_STACK(k1) + + /* Save the kernel gp as well */ + LONG_S gp, VCPU_HOST_GP(k1) + + /* Setup status register for running the guest in UM, interrupts are disabled */ + li k0,(ST0_EXL | KSU_USER| ST0_BEV) + mtc0 k0,CP0_STATUS + ehb + + /* load up the new EBASE */ + LONG_L k0, VCPU_GUEST_EBASE(k1) + mtc0 k0,CP0_EBASE + + /* Now that the new EBASE has been loaded, unset BEV, set interrupt mask as it was + * but make sure that timer interrupts are enabled + */ + li k0,(ST0_EXL | KSU_USER | ST0_IE) + andi v0, v0, ST0_IM + or k0, k0, v0 + mtc0 k0,CP0_STATUS + ehb + + + /* Set Guest EPC */ + LONG_L t0, VCPU_PC(k1) + mtc0 t0, CP0_EPC + +FEXPORT(__kvm_mips_load_asid) + /* Set the ASID for the Guest Kernel */ + sll t0, t0, 1 /* with kseg0 @ 0x40000000, kernel */ + /* addresses shift to 0x80000000 */ + bltz t0, 1f /* If kernel */ + addiu t1, k1, VCPU_GUEST_KERNEL_ASID /* (BD) */ + addiu t1, k1, VCPU_GUEST_USER_ASID /* else user */ +1: + /* t1: contains the base of the ASID array, need to get the cpu id */ + LONG_L t2, TI_CPU($28) /* smp_processor_id */ + sll t2, t2, 2 /* x4 */ + addu t3, t1, t2 + LONG_L k0, (t3) + andi k0, k0, 0xff + mtc0 k0,CP0_ENTRYHI + ehb + + /* Disable RDHWR access */ + mtc0 zero, CP0_HWRENA + + /* Now load up the Guest Context from VCPU */ + LONG_L $1, VCPU_R1(k1) + LONG_L $2, VCPU_R2(k1) + LONG_L $3, VCPU_R3(k1) + + LONG_L $4, VCPU_R4(k1) + LONG_L $5, VCPU_R5(k1) + LONG_L $6, VCPU_R6(k1) + LONG_L $7, VCPU_R7(k1) + + LONG_L $8, VCPU_R8(k1) + LONG_L $9, VCPU_R9(k1) + LONG_L $10, VCPU_R10(k1) + LONG_L $11, VCPU_R11(k1) + LONG_L $12, VCPU_R12(k1) + LONG_L $13, VCPU_R13(k1) + LONG_L $14, VCPU_R14(k1) + LONG_L $15, VCPU_R15(k1) + LONG_L $16, VCPU_R16(k1) + LONG_L $17, VCPU_R17(k1) + LONG_L $18, VCPU_R18(k1) + LONG_L $19, VCPU_R19(k1) + LONG_L $20, VCPU_R20(k1) + LONG_L $21, VCPU_R21(k1) + LONG_L $22, VCPU_R22(k1) + LONG_L $23, VCPU_R23(k1) + LONG_L $24, VCPU_R24(k1) + LONG_L $25, VCPU_R25(k1) + + /* k0/k1 loaded up later */ + + LONG_L $28, VCPU_R28(k1) + LONG_L $29, VCPU_R29(k1) + LONG_L $30, VCPU_R30(k1) + LONG_L $31, VCPU_R31(k1) + + /* Restore hi/lo */ + LONG_L k0, VCPU_LO(k1) + mtlo k0 + + LONG_L k0, VCPU_HI(k1) + mthi k0 + +FEXPORT(__kvm_mips_load_k0k1) + /* Restore the guest's k0/k1 registers */ + LONG_L k0, VCPU_R26(k1) + LONG_L k1, VCPU_R27(k1) + + /* Jump to guest */ + eret + .set pop + +VECTOR(MIPSX(exception), unknown) +/* + * Find out what mode we came from and jump to the proper handler. + */ + .set push + .set noat + .set noreorder + mtc0 k0, CP0_ERROREPC #01: Save guest k0 + ehb #02: + + mfc0 k0, CP0_EBASE #02: Get EBASE + srl k0, k0, 10 #03: Get rid of CPUNum + sll k0, k0, 10 #04 + LONG_S k1, 0x3000(k0) #05: Save k1 @ offset 0x3000 + addiu k0, k0, 0x2000 #06: Exception handler is installed @ offset 0x2000 + j k0 #07: jump to the function + nop #08: branch delay slot + .set push +VECTOR_END(MIPSX(exceptionEnd)) +.end MIPSX(exception) + +/* + * Generic Guest exception handler. We end up here when the guest + * does something that causes a trap to kernel mode. + * + */ +NESTED (MIPSX(GuestException), CALLFRAME_SIZ, ra) + .set push + .set noat + .set noreorder + + /* Get the VCPU pointer from DDTATA_LO */ + mfc0 k1, CP0_DDATA_LO + addiu k1, k1, VCPU_HOST_ARCH + + /* Start saving Guest context to VCPU */ + LONG_S $0, VCPU_R0(k1) + LONG_S $1, VCPU_R1(k1) + LONG_S $2, VCPU_R2(k1) + LONG_S $3, VCPU_R3(k1) + LONG_S $4, VCPU_R4(k1) + LONG_S $5, VCPU_R5(k1) + LONG_S $6, VCPU_R6(k1) + LONG_S $7, VCPU_R7(k1) + LONG_S $8, VCPU_R8(k1) + LONG_S $9, VCPU_R9(k1) + LONG_S $10, VCPU_R10(k1) + LONG_S $11, VCPU_R11(k1) + LONG_S $12, VCPU_R12(k1) + LONG_S $13, VCPU_R13(k1) + LONG_S $14, VCPU_R14(k1) + LONG_S $15, VCPU_R15(k1) + LONG_S $16, VCPU_R16(k1) + LONG_S $17,VCPU_R17(k1) + LONG_S $18, VCPU_R18(k1) + LONG_S $19, VCPU_R19(k1) + LONG_S $20, VCPU_R20(k1) + LONG_S $21, VCPU_R21(k1) + LONG_S $22, VCPU_R22(k1) + LONG_S $23, VCPU_R23(k1) + LONG_S $24, VCPU_R24(k1) + LONG_S $25, VCPU_R25(k1) + + /* Guest k0/k1 saved later */ + + LONG_S $28, VCPU_R28(k1) + LONG_S $29, VCPU_R29(k1) + LONG_S $30, VCPU_R30(k1) + LONG_S $31, VCPU_R31(k1) + + /* We need to save hi/lo and restore them on + * the way out + */ + mfhi t0 + LONG_S t0, VCPU_HI(k1) + + mflo t0 + LONG_S t0, VCPU_LO(k1) + + /* Finally save guest k0/k1 to VCPU */ + mfc0 t0, CP0_ERROREPC + LONG_S t0, VCPU_R26(k1) + + /* Get GUEST k1 and save it in VCPU */ + la t1, ~0x2ff + mfc0 t0, CP0_EBASE + and t0, t0, t1 + LONG_L t0, 0x3000(t0) + LONG_S t0, VCPU_R27(k1) + + /* Now that context has been saved, we can use other registers */ + + /* Restore vcpu */ + mfc0 a1, CP0_DDATA_LO + move s1, a1 + + /* Restore run (vcpu->run) */ + LONG_L a0, VCPU_RUN(a1) + /* Save pointer to run in s0, will be saved by the compiler */ + move s0, a0 + + + /* Save Host level EPC, BadVaddr and Cause to VCPU, useful to process the exception */ + mfc0 k0,CP0_EPC + LONG_S k0, VCPU_PC(k1) + + mfc0 k0, CP0_BADVADDR + LONG_S k0, VCPU_HOST_CP0_BADVADDR(k1) + + mfc0 k0, CP0_CAUSE + LONG_S k0, VCPU_HOST_CP0_CAUSE(k1) + + mfc0 k0, CP0_ENTRYHI + LONG_S k0, VCPU_HOST_ENTRYHI(k1) + + /* Now restore the host state just enough to run the handlers */ + + /* Swtich EBASE to the one used by Linux */ + /* load up the host EBASE */ + mfc0 v0, CP0_STATUS + + .set at + or k0, v0, ST0_BEV + .set noat + + mtc0 k0, CP0_STATUS + ehb + + LONG_L k0, VCPU_HOST_EBASE(k1) + mtc0 k0,CP0_EBASE + + + /* Now that the new EBASE has been loaded, unset BEV and KSU_USER */ + .set at + and v0, v0, ~(ST0_EXL | KSU_USER | ST0_IE) + or v0, v0, ST0_CU0 + .set noat + mtc0 v0, CP0_STATUS + ehb + + /* Load up host GP */ + LONG_L gp, VCPU_HOST_GP(k1) + + /* Need a stack before we can jump to "C" */ + LONG_L sp, VCPU_HOST_STACK(k1) + + /* Saved host state */ + addiu sp,sp, -PT_SIZE + + /* XXXKYMA do we need to load the host ASID, maybe not because the + * kernel entries are marked GLOBAL, need to verify + */ + + /* Restore host DDATA_LO */ + LONG_L k0, PT_HOST_USERLOCAL(sp) + mtc0 k0, CP0_DDATA_LO + + /* Restore RDHWR access */ + la k0, 0x2000000F + mtc0 k0, CP0_HWRENA + + /* Jump to handler */ +FEXPORT(__kvm_mips_jump_to_handler) + /* XXXKYMA: not sure if this is safe, how large is the stack?? */ + /* Now jump to the kvm_mips_handle_exit() to see if we can deal with this in the kernel */ + la t9,kvm_mips_handle_exit + jalr.hb t9 + addiu sp,sp, -CALLFRAME_SIZ /* BD Slot */ + + /* Return from handler Make sure interrupts are disabled */ + di + ehb + + /* XXXKYMA: k0/k1 could have been blown away if we processed an exception + * while we were handling the exception from the guest, reload k1 + */ + move k1, s1 + addiu k1, k1, VCPU_HOST_ARCH + + /* Check return value, should tell us if we are returning to the host (handle I/O etc) + * or resuming the guest + */ + andi t0, v0, RESUME_HOST + bnez t0, __kvm_mips_return_to_host + nop + +__kvm_mips_return_to_guest: + /* Put the saved pointer to vcpu (s1) back into the DDATA_LO Register */ + mtc0 s1, CP0_DDATA_LO + + /* Load up the Guest EBASE to minimize the window where BEV is set */ + LONG_L t0, VCPU_GUEST_EBASE(k1) + + /* Switch EBASE back to the one used by KVM */ + mfc0 v1, CP0_STATUS + .set at + or k0, v1, ST0_BEV + .set noat + mtc0 k0, CP0_STATUS + ehb + mtc0 t0,CP0_EBASE + + /* Setup status register for running guest in UM */ + .set at + or v1, v1, (ST0_EXL | KSU_USER | ST0_IE) + and v1, v1, ~ST0_CU0 + .set noat + mtc0 v1, CP0_STATUS + ehb + + + /* Set Guest EPC */ + LONG_L t0, VCPU_PC(k1) + mtc0 t0, CP0_EPC + + /* Set the ASID for the Guest Kernel */ + sll t0, t0, 1 /* with kseg0 @ 0x40000000, kernel */ + /* addresses shift to 0x80000000 */ + bltz t0, 1f /* If kernel */ + addiu t1, k1, VCPU_GUEST_KERNEL_ASID /* (BD) */ + addiu t1, k1, VCPU_GUEST_USER_ASID /* else user */ +1: + /* t1: contains the base of the ASID array, need to get the cpu id */ + LONG_L t2, TI_CPU($28) /* smp_processor_id */ + sll t2, t2, 2 /* x4 */ + addu t3, t1, t2 + LONG_L k0, (t3) + andi k0, k0, 0xff + mtc0 k0,CP0_ENTRYHI + ehb + + /* Disable RDHWR access */ + mtc0 zero, CP0_HWRENA + + /* load the guest context from VCPU and return */ + LONG_L $0, VCPU_R0(k1) + LONG_L $1, VCPU_R1(k1) + LONG_L $2, VCPU_R2(k1) + LONG_L $3, VCPU_R3(k1) + LONG_L $4, VCPU_R4(k1) + LONG_L $5, VCPU_R5(k1) + LONG_L $6, VCPU_R6(k1) + LONG_L $7, VCPU_R7(k1) + LONG_L $8, VCPU_R8(k1) + LONG_L $9, VCPU_R9(k1) + LONG_L $10, VCPU_R10(k1) + LONG_L $11, VCPU_R11(k1) + LONG_L $12, VCPU_R12(k1) + LONG_L $13, VCPU_R13(k1) + LONG_L $14, VCPU_R14(k1) + LONG_L $15, VCPU_R15(k1) + LONG_L $16, VCPU_R16(k1) + LONG_L $17, VCPU_R17(k1) + LONG_L $18, VCPU_R18(k1) + LONG_L $19, VCPU_R19(k1) + LONG_L $20, VCPU_R20(k1) + LONG_L $21, VCPU_R21(k1) + LONG_L $22, VCPU_R22(k1) + LONG_L $23, VCPU_R23(k1) + LONG_L $24, VCPU_R24(k1) + LONG_L $25, VCPU_R25(k1) + + /* $/k1 loaded later */ + LONG_L $28, VCPU_R28(k1) + LONG_L $29, VCPU_R29(k1) + LONG_L $30, VCPU_R30(k1) + LONG_L $31, VCPU_R31(k1) + +FEXPORT(__kvm_mips_skip_guest_restore) + LONG_L k0, VCPU_HI(k1) + mthi k0 + + LONG_L k0, VCPU_LO(k1) + mtlo k0 + + LONG_L k0, VCPU_R26(k1) + LONG_L k1, VCPU_R27(k1) + + eret + +__kvm_mips_return_to_host: + /* EBASE is already pointing to Linux */ + LONG_L k1, VCPU_HOST_STACK(k1) + addiu k1,k1, -PT_SIZE + + /* Restore host DDATA_LO */ + LONG_L k0, PT_HOST_USERLOCAL(k1) + mtc0 k0, CP0_DDATA_LO + + /* Restore host ASID */ + LONG_L k0, PT_HOST_ASID(sp) + andi k0, 0xff + mtc0 k0,CP0_ENTRYHI + ehb + + /* Load context saved on the host stack */ + LONG_L $0, PT_R0(k1) + LONG_L $1, PT_R1(k1) + + /* r2/v0 is the return code, shift it down by 2 (arithmetic) to recover the err code */ + sra k0, v0, 2 + move $2, k0 + + LONG_L $3, PT_R3(k1) + LONG_L $4, PT_R4(k1) + LONG_L $5, PT_R5(k1) + LONG_L $6, PT_R6(k1) + LONG_L $7, PT_R7(k1) + LONG_L $8, PT_R8(k1) + LONG_L $9, PT_R9(k1) + LONG_L $10, PT_R10(k1) + LONG_L $11, PT_R11(k1) + LONG_L $12, PT_R12(k1) + LONG_L $13, PT_R13(k1) + LONG_L $14, PT_R14(k1) + LONG_L $15, PT_R15(k1) + LONG_L $16, PT_R16(k1) + LONG_L $17, PT_R17(k1) + LONG_L $18, PT_R18(k1) + LONG_L $19, PT_R19(k1) + LONG_L $20, PT_R20(k1) + LONG_L $21, PT_R21(k1) + LONG_L $22, PT_R22(k1) + LONG_L $23, PT_R23(k1) + LONG_L $24, PT_R24(k1) + LONG_L $25, PT_R25(k1) + + /* Host k0/k1 were not saved */ + + LONG_L $28, PT_R28(k1) + LONG_L $29, PT_R29(k1) + LONG_L $30, PT_R30(k1) + + LONG_L k0, PT_HI(k1) + mthi k0 + + LONG_L k0, PT_LO(k1) + mtlo k0 + + /* Restore RDHWR access */ + la k0, 0x2000000F + mtc0 k0, CP0_HWRENA + + + /* Restore RA, which is the address we will return to */ + LONG_L ra, PT_R31(k1) + j ra + nop + + .set pop +VECTOR_END(MIPSX(GuestExceptionEnd)) +.end MIPSX(GuestException) + +MIPSX(exceptions): + #### + ##### The exception handlers. + ##### + .word _C_LABEL(MIPSX(GuestException)) # 0 + .word _C_LABEL(MIPSX(GuestException)) # 1 + .word _C_LABEL(MIPSX(GuestException)) # 2 + .word _C_LABEL(MIPSX(GuestException)) # 3 + .word _C_LABEL(MIPSX(GuestException)) # 4 + .word _C_LABEL(MIPSX(GuestException)) # 5 + .word _C_LABEL(MIPSX(GuestException)) # 6 + .word _C_LABEL(MIPSX(GuestException)) # 7 + .word _C_LABEL(MIPSX(GuestException)) # 8 + .word _C_LABEL(MIPSX(GuestException)) # 9 + .word _C_LABEL(MIPSX(GuestException)) # 10 + .word _C_LABEL(MIPSX(GuestException)) # 11 + .word _C_LABEL(MIPSX(GuestException)) # 12 + .word _C_LABEL(MIPSX(GuestException)) # 13 + .word _C_LABEL(MIPSX(GuestException)) # 14 + .word _C_LABEL(MIPSX(GuestException)) # 15 + .word _C_LABEL(MIPSX(GuestException)) # 16 + .word _C_LABEL(MIPSX(GuestException)) # 17 + .word _C_LABEL(MIPSX(GuestException)) # 18 + .word _C_LABEL(MIPSX(GuestException)) # 19 + .word _C_LABEL(MIPSX(GuestException)) # 20 + .word _C_LABEL(MIPSX(GuestException)) # 21 + .word _C_LABEL(MIPSX(GuestException)) # 22 + .word _C_LABEL(MIPSX(GuestException)) # 23 + .word _C_LABEL(MIPSX(GuestException)) # 24 + .word _C_LABEL(MIPSX(GuestException)) # 25 + .word _C_LABEL(MIPSX(GuestException)) # 26 + .word _C_LABEL(MIPSX(GuestException)) # 27 + .word _C_LABEL(MIPSX(GuestException)) # 28 + .word _C_LABEL(MIPSX(GuestException)) # 29 + .word _C_LABEL(MIPSX(GuestException)) # 30 + .word _C_LABEL(MIPSX(GuestException)) # 31 + + +/* This routine makes changes to the instruction stream effective to the hardware. + * It should be called after the instruction stream is written. + * On return, the new instructions are effective. + * Inputs: + * a0 = Start address of new instruction stream + * a1 = Size, in bytes, of new instruction stream + */ + +#define HW_SYNCI_Step $1 +LEAF(MIPSX(SyncICache)) + .set push + .set mips32r2 + beq a1, zero, 20f + nop + addu a1, a0, a1 + rdhwr v0, HW_SYNCI_Step + beq v0, zero, 20f + nop + +10: + synci 0(a0) + addu a0, a0, v0 + sltu v1, a0, a1 + bne v1, zero, 10b + nop + sync +20: + jr.hb ra + nop + .set pop +END(MIPSX(SyncICache)) diff --git a/trunk/arch/mips/kvm/kvm_mips.c b/trunk/arch/mips/kvm/kvm_mips.c new file mode 100644 index 000000000000..e0dad0289797 --- /dev/null +++ b/trunk/arch/mips/kvm/kvm_mips.c @@ -0,0 +1,958 @@ +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * KVM/MIPS: MIPS specific KVM APIs + * + * Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved. + * Authors: Sanjay Lal +*/ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#include "kvm_mips_int.h" +#include "kvm_mips_comm.h" + +#define CREATE_TRACE_POINTS +#include "trace.h" + +#ifndef VECTORSPACING +#define VECTORSPACING 0x100 /* for EI/VI mode */ +#endif + +#define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU +struct kvm_stats_debugfs_item debugfs_entries[] = { + { "wait", VCPU_STAT(wait_exits) }, + { "cache", VCPU_STAT(cache_exits) }, + { "signal", VCPU_STAT(signal_exits) }, + { "interrupt", VCPU_STAT(int_exits) }, + { "cop_unsuable", VCPU_STAT(cop_unusable_exits) }, + { "tlbmod", VCPU_STAT(tlbmod_exits) }, + { "tlbmiss_ld", VCPU_STAT(tlbmiss_ld_exits) }, + { "tlbmiss_st", VCPU_STAT(tlbmiss_st_exits) }, + { "addrerr_st", VCPU_STAT(addrerr_st_exits) }, + { "addrerr_ld", VCPU_STAT(addrerr_ld_exits) }, + { "syscall", VCPU_STAT(syscall_exits) }, + { "resvd_inst", VCPU_STAT(resvd_inst_exits) }, + { "break_inst", VCPU_STAT(break_inst_exits) }, + { "flush_dcache", VCPU_STAT(flush_dcache_exits) }, + { "halt_wakeup", VCPU_STAT(halt_wakeup) }, + {NULL} +}; + +static int kvm_mips_reset_vcpu(struct kvm_vcpu *vcpu) +{ + int i; + for_each_possible_cpu(i) { + vcpu->arch.guest_kernel_asid[i] = 0; + vcpu->arch.guest_user_asid[i] = 0; + } + return 0; +} + +gfn_t unalias_gfn(struct kvm *kvm, gfn_t gfn) +{ + return gfn; +} + +/* XXXKYMA: We are simulatoring a processor that has the WII bit set in Config7, so we + * are "runnable" if interrupts are pending + */ +int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu) +{ + return !!(vcpu->arch.pending_exceptions); +} + +int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu) +{ + return 1; +} + +int kvm_arch_hardware_enable(void *garbage) +{ + return 0; +} + +void kvm_arch_hardware_disable(void *garbage) +{ +} + +int kvm_arch_hardware_setup(void) +{ + return 0; +} + +void kvm_arch_hardware_unsetup(void) +{ +} + +void kvm_arch_check_processor_compat(void *rtn) +{ + int *r = (int *)rtn; + *r = 0; + return; +} + +static void kvm_mips_init_tlbs(struct kvm *kvm) +{ + unsigned long wired; + + /* Add a wired entry to the TLB, it is used to map the commpage to the Guest kernel */ + wired = read_c0_wired(); + write_c0_wired(wired + 1); + mtc0_tlbw_hazard(); + kvm->arch.commpage_tlb = wired; + + kvm_debug("[%d] commpage TLB: %d\n", smp_processor_id(), + kvm->arch.commpage_tlb); +} + +static void kvm_mips_init_vm_percpu(void *arg) +{ + struct kvm *kvm = (struct kvm *)arg; + + kvm_mips_init_tlbs(kvm); + kvm_mips_callbacks->vm_init(kvm); + +} + +int kvm_arch_init_vm(struct kvm *kvm, unsigned long type) +{ + if (atomic_inc_return(&kvm_mips_instance) == 1) { + kvm_info("%s: 1st KVM instance, setup host TLB parameters\n", + __func__); + on_each_cpu(kvm_mips_init_vm_percpu, kvm, 1); + } + + + return 0; +} + +void kvm_mips_free_vcpus(struct kvm *kvm) +{ + unsigned int i; + struct kvm_vcpu *vcpu; + + /* Put the pages we reserved for the guest pmap */ + for (i = 0; i < kvm->arch.guest_pmap_npages; i++) { + if (kvm->arch.guest_pmap[i] != KVM_INVALID_PAGE) + kvm_mips_release_pfn_clean(kvm->arch.guest_pmap[i]); + } + + if (kvm->arch.guest_pmap) + kfree(kvm->arch.guest_pmap); + + kvm_for_each_vcpu(i, vcpu, kvm) { + kvm_arch_vcpu_free(vcpu); + } + + mutex_lock(&kvm->lock); + + for (i = 0; i < atomic_read(&kvm->online_vcpus); i++) + kvm->vcpus[i] = NULL; + + atomic_set(&kvm->online_vcpus, 0); + + mutex_unlock(&kvm->lock); +} + +void kvm_arch_sync_events(struct kvm *kvm) +{ +} + +static void kvm_mips_uninit_tlbs(void *arg) +{ + /* Restore wired count */ + write_c0_wired(0); + mtc0_tlbw_hazard(); + /* Clear out all the TLBs */ + kvm_local_flush_tlb_all(); +} + +void kvm_arch_destroy_vm(struct kvm *kvm) +{ + kvm_mips_free_vcpus(kvm); + + /* If this is the last instance, restore wired count */ + if (atomic_dec_return(&kvm_mips_instance) == 0) { + kvm_info("%s: last KVM instance, restoring TLB parameters\n", + __func__); + on_each_cpu(kvm_mips_uninit_tlbs, NULL, 1); + } +} + +long +kvm_arch_dev_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg) +{ + return -EINVAL; +} + +void kvm_arch_free_memslot(struct kvm_memory_slot *free, + struct kvm_memory_slot *dont) +{ +} + +int kvm_arch_create_memslot(struct kvm_memory_slot *slot, unsigned long npages) +{ + return 0; +} + +int kvm_arch_prepare_memory_region(struct kvm *kvm, + struct kvm_memory_slot *memslot, + struct kvm_userspace_memory_region *mem, + enum kvm_mr_change change) +{ + return 0; +} + +void kvm_arch_commit_memory_region(struct kvm *kvm, + struct kvm_userspace_memory_region *mem, + const struct kvm_memory_slot *old, + enum kvm_mr_change change) +{ + unsigned long npages = 0; + int i, err = 0; + + kvm_debug("%s: kvm: %p slot: %d, GPA: %llx, size: %llx, QVA: %llx\n", + __func__, kvm, mem->slot, mem->guest_phys_addr, + mem->memory_size, mem->userspace_addr); + + /* Setup Guest PMAP table */ + if (!kvm->arch.guest_pmap) { + if (mem->slot == 0) + npages = mem->memory_size >> PAGE_SHIFT; + + if (npages) { + kvm->arch.guest_pmap_npages = npages; + kvm->arch.guest_pmap = + kzalloc(npages * sizeof(unsigned long), GFP_KERNEL); + + if (!kvm->arch.guest_pmap) { + kvm_err("Failed to allocate guest PMAP"); + err = -ENOMEM; + goto out; + } + + kvm_info + ("Allocated space for Guest PMAP Table (%ld pages) @ %p\n", + npages, kvm->arch.guest_pmap); + + /* Now setup the page table */ + for (i = 0; i < npages; i++) { + kvm->arch.guest_pmap[i] = KVM_INVALID_PAGE; + } + } + } +out: + return; +} + +void kvm_arch_flush_shadow_all(struct kvm *kvm) +{ +} + +void kvm_arch_flush_shadow_memslot(struct kvm *kvm, + struct kvm_memory_slot *slot) +{ +} + +void kvm_arch_flush_shadow(struct kvm *kvm) +{ +} + +struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id) +{ + extern char mips32_exception[], mips32_exceptionEnd[]; + extern char mips32_GuestException[], mips32_GuestExceptionEnd[]; + int err, size, offset; + void *gebase; + int i; + + struct kvm_vcpu *vcpu = kzalloc(sizeof(struct kvm_vcpu), GFP_KERNEL); + + if (!vcpu) { + err = -ENOMEM; + goto out; + } + + err = kvm_vcpu_init(vcpu, kvm, id); + + if (err) + goto out_free_cpu; + + kvm_info("kvm @ %p: create cpu %d at %p\n", kvm, id, vcpu); + + /* Allocate space for host mode exception handlers that handle + * guest mode exits + */ + if (cpu_has_veic || cpu_has_vint) { + size = 0x200 + VECTORSPACING * 64; + } else { + size = 0x200; + } + + /* Save Linux EBASE */ + vcpu->arch.host_ebase = (void *)read_c0_ebase(); + + gebase = kzalloc(ALIGN(size, PAGE_SIZE), GFP_KERNEL); + + if (!gebase) { + err = -ENOMEM; + goto out_free_cpu; + } + kvm_info("Allocated %d bytes for KVM Exception Handlers @ %p\n", + ALIGN(size, PAGE_SIZE), gebase); + + /* Save new ebase */ + vcpu->arch.guest_ebase = gebase; + + /* Copy L1 Guest Exception handler to correct offset */ + + /* TLB Refill, EXL = 0 */ + memcpy(gebase, mips32_exception, + mips32_exceptionEnd - mips32_exception); + + /* General Exception Entry point */ + memcpy(gebase + 0x180, mips32_exception, + mips32_exceptionEnd - mips32_exception); + + /* For vectored interrupts poke the exception code @ all offsets 0-7 */ + for (i = 0; i < 8; i++) { + kvm_debug("L1 Vectored handler @ %p\n", + gebase + 0x200 + (i * VECTORSPACING)); + memcpy(gebase + 0x200 + (i * VECTORSPACING), mips32_exception, + mips32_exceptionEnd - mips32_exception); + } + + /* General handler, relocate to unmapped space for sanity's sake */ + offset = 0x2000; + kvm_info("Installing KVM Exception handlers @ %p, %#x bytes\n", + gebase + offset, + mips32_GuestExceptionEnd - mips32_GuestException); + + memcpy(gebase + offset, mips32_GuestException, + mips32_GuestExceptionEnd - mips32_GuestException); + + /* Invalidate the icache for these ranges */ + mips32_SyncICache((unsigned long) gebase, ALIGN(size, PAGE_SIZE)); + + /* Allocate comm page for guest kernel, a TLB will be reserved for mapping GVA @ 0xFFFF8000 to this page */ + vcpu->arch.kseg0_commpage = kzalloc(PAGE_SIZE << 1, GFP_KERNEL); + + if (!vcpu->arch.kseg0_commpage) { + err = -ENOMEM; + goto out_free_gebase; + } + + kvm_info("Allocated COMM page @ %p\n", vcpu->arch.kseg0_commpage); + kvm_mips_commpage_init(vcpu); + + /* Init */ + vcpu->arch.last_sched_cpu = -1; + + /* Start off the timer */ + kvm_mips_emulate_count(vcpu); + + return vcpu; + +out_free_gebase: + kfree(gebase); + +out_free_cpu: + kfree(vcpu); + +out: + return ERR_PTR(err); +} + +void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu) +{ + hrtimer_cancel(&vcpu->arch.comparecount_timer); + + kvm_vcpu_uninit(vcpu); + + kvm_mips_dump_stats(vcpu); + + if (vcpu->arch.guest_ebase) + kfree(vcpu->arch.guest_ebase); + + if (vcpu->arch.kseg0_commpage) + kfree(vcpu->arch.kseg0_commpage); + +} + +void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu) +{ + kvm_arch_vcpu_free(vcpu); +} + +int +kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu, + struct kvm_guest_debug *dbg) +{ + return -EINVAL; +} + +int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run) +{ + int r = 0; + sigset_t sigsaved; + + if (vcpu->sigset_active) + sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved); + + if (vcpu->mmio_needed) { + if (!vcpu->mmio_is_write) + kvm_mips_complete_mmio_load(vcpu, run); + vcpu->mmio_needed = 0; + } + + /* Check if we have any exceptions/interrupts pending */ + kvm_mips_deliver_interrupts(vcpu, + kvm_read_c0_guest_cause(vcpu->arch.cop0)); + + local_irq_disable(); + kvm_guest_enter(); + + r = __kvm_mips_vcpu_run(run, vcpu); + + kvm_guest_exit(); + local_irq_enable(); + + if (vcpu->sigset_active) + sigprocmask(SIG_SETMASK, &sigsaved, NULL); + + return r; +} + +int +kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu, struct kvm_mips_interrupt *irq) +{ + int intr = (int)irq->irq; + struct kvm_vcpu *dvcpu = NULL; + + if (intr == 3 || intr == -3 || intr == 4 || intr == -4) + kvm_debug("%s: CPU: %d, INTR: %d\n", __func__, irq->cpu, + (int)intr); + + if (irq->cpu == -1) + dvcpu = vcpu; + else + dvcpu = vcpu->kvm->vcpus[irq->cpu]; + + if (intr == 2 || intr == 3 || intr == 4) { + kvm_mips_callbacks->queue_io_int(dvcpu, irq); + + } else if (intr == -2 || intr == -3 || intr == -4) { + kvm_mips_callbacks->dequeue_io_int(dvcpu, irq); + } else { + kvm_err("%s: invalid interrupt ioctl (%d:%d)\n", __func__, + irq->cpu, irq->irq); + return -EINVAL; + } + + dvcpu->arch.wait = 0; + + if (waitqueue_active(&dvcpu->wq)) { + wake_up_interruptible(&dvcpu->wq); + } + + return 0; +} + +int +kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu, + struct kvm_mp_state *mp_state) +{ + return -EINVAL; +} + +int +kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu, + struct kvm_mp_state *mp_state) +{ + return -EINVAL; +} + +long +kvm_arch_vcpu_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg) +{ + struct kvm_vcpu *vcpu = filp->private_data; + void __user *argp = (void __user *)arg; + long r; + int intr; + + switch (ioctl) { + case KVM_NMI: + /* Treat the NMI as a CPU reset */ + r = kvm_mips_reset_vcpu(vcpu); + break; + case KVM_INTERRUPT: + { + struct kvm_mips_interrupt irq; + r = -EFAULT; + if (copy_from_user(&irq, argp, sizeof(irq))) + goto out; + + intr = (int)irq.irq; + + kvm_debug("[%d] %s: irq: %d\n", vcpu->vcpu_id, __func__, + irq.irq); + + r = kvm_vcpu_ioctl_interrupt(vcpu, &irq); + break; + } + default: + r = -EINVAL; + } + +out: + return r; +} + +/* + * Get (and clear) the dirty memory log for a memory slot. + */ +int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log) +{ + struct kvm_memory_slot *memslot; + unsigned long ga, ga_end; + int is_dirty = 0; + int r; + unsigned long n; + + mutex_lock(&kvm->slots_lock); + + r = kvm_get_dirty_log(kvm, log, &is_dirty); + if (r) + goto out; + + /* If nothing is dirty, don't bother messing with page tables. */ + if (is_dirty) { + memslot = &kvm->memslots->memslots[log->slot]; + + ga = memslot->base_gfn << PAGE_SHIFT; + ga_end = ga + (memslot->npages << PAGE_SHIFT); + + printk("%s: dirty, ga: %#lx, ga_end %#lx\n", __func__, ga, + ga_end); + + n = kvm_dirty_bitmap_bytes(memslot); + memset(memslot->dirty_bitmap, 0, n); + } + + r = 0; +out: + mutex_unlock(&kvm->slots_lock); + return r; + +} + +long kvm_arch_vm_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg) +{ + long r; + + switch (ioctl) { + default: + r = -EINVAL; + } + + return r; +} + +int kvm_arch_init(void *opaque) +{ + int ret; + + if (kvm_mips_callbacks) { + kvm_err("kvm: module already exists\n"); + return -EEXIST; + } + + ret = kvm_mips_emulation_init(&kvm_mips_callbacks); + + return ret; +} + +void kvm_arch_exit(void) +{ + kvm_mips_callbacks = NULL; +} + +int +kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs) +{ + return -ENOTSUPP; +} + +int +kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs) +{ + return -ENOTSUPP; +} + +int kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu) +{ + return 0; +} + +int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu) +{ + return -ENOTSUPP; +} + +int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu) +{ + return -ENOTSUPP; +} + +int kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf) +{ + return VM_FAULT_SIGBUS; +} + +int kvm_dev_ioctl_check_extension(long ext) +{ + int r; + + switch (ext) { + case KVM_CAP_COALESCED_MMIO: + r = KVM_COALESCED_MMIO_PAGE_OFFSET; + break; + default: + r = 0; + break; + } + return r; + +} + +int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu) +{ + return kvm_mips_pending_timer(vcpu); +} + +int kvm_arch_vcpu_dump_regs(struct kvm_vcpu *vcpu) +{ + int i; + struct mips_coproc *cop0; + + if (!vcpu) + return -1; + + printk("VCPU Register Dump:\n"); + printk("\tpc = 0x%08lx\n", vcpu->arch.pc);; + printk("\texceptions: %08lx\n", vcpu->arch.pending_exceptions); + + for (i = 0; i < 32; i += 4) { + printk("\tgpr%02d: %08lx %08lx %08lx %08lx\n", i, + vcpu->arch.gprs[i], + vcpu->arch.gprs[i + 1], + vcpu->arch.gprs[i + 2], vcpu->arch.gprs[i + 3]); + } + printk("\thi: 0x%08lx\n", vcpu->arch.hi); + printk("\tlo: 0x%08lx\n", vcpu->arch.lo); + + cop0 = vcpu->arch.cop0; + printk("\tStatus: 0x%08lx, Cause: 0x%08lx\n", + kvm_read_c0_guest_status(cop0), kvm_read_c0_guest_cause(cop0)); + + printk("\tEPC: 0x%08lx\n", kvm_read_c0_guest_epc(cop0)); + + return 0; +} + +int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) +{ + int i; + + for (i = 0; i < 32; i++) + vcpu->arch.gprs[i] = regs->gprs[i]; + + vcpu->arch.hi = regs->hi; + vcpu->arch.lo = regs->lo; + vcpu->arch.pc = regs->pc; + + return kvm_mips_callbacks->vcpu_ioctl_set_regs(vcpu, regs); +} + +int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) +{ + int i; + + for (i = 0; i < 32; i++) + regs->gprs[i] = vcpu->arch.gprs[i]; + + regs->hi = vcpu->arch.hi; + regs->lo = vcpu->arch.lo; + regs->pc = vcpu->arch.pc; + + return kvm_mips_callbacks->vcpu_ioctl_get_regs(vcpu, regs); +} + +void kvm_mips_comparecount_func(unsigned long data) +{ + struct kvm_vcpu *vcpu = (struct kvm_vcpu *)data; + + kvm_mips_callbacks->queue_timer_int(vcpu); + + vcpu->arch.wait = 0; + if (waitqueue_active(&vcpu->wq)) { + wake_up_interruptible(&vcpu->wq); + } +} + +/* + * low level hrtimer wake routine. + */ +enum hrtimer_restart kvm_mips_comparecount_wakeup(struct hrtimer *timer) +{ + struct kvm_vcpu *vcpu; + + vcpu = container_of(timer, struct kvm_vcpu, arch.comparecount_timer); + kvm_mips_comparecount_func((unsigned long) vcpu); + hrtimer_forward_now(&vcpu->arch.comparecount_timer, + ktime_set(0, MS_TO_NS(10))); + return HRTIMER_RESTART; +} + +int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu) +{ + kvm_mips_callbacks->vcpu_init(vcpu); + hrtimer_init(&vcpu->arch.comparecount_timer, CLOCK_MONOTONIC, + HRTIMER_MODE_REL); + vcpu->arch.comparecount_timer.function = kvm_mips_comparecount_wakeup; + kvm_mips_init_shadow_tlb(vcpu); + return 0; +} + +void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu) +{ + return; +} + +int +kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu, struct kvm_translation *tr) +{ + return 0; +} + +/* Initial guest state */ +int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu) +{ + return kvm_mips_callbacks->vcpu_setup(vcpu); +} + +static +void kvm_mips_set_c0_status(void) +{ + uint32_t status = read_c0_status(); + + if (cpu_has_fpu) + status |= (ST0_CU1); + + if (cpu_has_dsp) + status |= (ST0_MX); + + write_c0_status(status); + ehb(); +} + +/* + * Return value is in the form (errcode<<2 | RESUME_FLAG_HOST | RESUME_FLAG_NV) + */ +int kvm_mips_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu) +{ + uint32_t cause = vcpu->arch.host_cp0_cause; + uint32_t exccode = (cause >> CAUSEB_EXCCODE) & 0x1f; + uint32_t __user *opc = (uint32_t __user *) vcpu->arch.pc; + unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr; + enum emulation_result er = EMULATE_DONE; + int ret = RESUME_GUEST; + + /* Set a default exit reason */ + run->exit_reason = KVM_EXIT_UNKNOWN; + run->ready_for_interrupt_injection = 1; + + /* Set the appropriate status bits based on host CPU features, before we hit the scheduler */ + kvm_mips_set_c0_status(); + + local_irq_enable(); + + kvm_debug("kvm_mips_handle_exit: cause: %#x, PC: %p, kvm_run: %p, kvm_vcpu: %p\n", + cause, opc, run, vcpu); + + /* Do a privilege check, if in UM most of these exit conditions end up + * causing an exception to be delivered to the Guest Kernel + */ + er = kvm_mips_check_privilege(cause, opc, run, vcpu); + if (er == EMULATE_PRIV_FAIL) { + goto skip_emul; + } else if (er == EMULATE_FAIL) { + run->exit_reason = KVM_EXIT_INTERNAL_ERROR; + ret = RESUME_HOST; + goto skip_emul; + } + + switch (exccode) { + case T_INT: + kvm_debug("[%d]T_INT @ %p\n", vcpu->vcpu_id, opc); + + ++vcpu->stat.int_exits; + trace_kvm_exit(vcpu, INT_EXITS); + + if (need_resched()) { + cond_resched(); + } + + ret = RESUME_GUEST; + break; + + case T_COP_UNUSABLE: + kvm_debug("T_COP_UNUSABLE: @ PC: %p\n", opc); + + ++vcpu->stat.cop_unusable_exits; + trace_kvm_exit(vcpu, COP_UNUSABLE_EXITS); + ret = kvm_mips_callbacks->handle_cop_unusable(vcpu); + /* XXXKYMA: Might need to return to user space */ + if (run->exit_reason == KVM_EXIT_IRQ_WINDOW_OPEN) { + ret = RESUME_HOST; + } + break; + + case T_TLB_MOD: + ++vcpu->stat.tlbmod_exits; + trace_kvm_exit(vcpu, TLBMOD_EXITS); + ret = kvm_mips_callbacks->handle_tlb_mod(vcpu); + break; + + case T_TLB_ST_MISS: + kvm_debug + ("TLB ST fault: cause %#x, status %#lx, PC: %p, BadVaddr: %#lx\n", + cause, kvm_read_c0_guest_status(vcpu->arch.cop0), opc, + badvaddr); + + ++vcpu->stat.tlbmiss_st_exits; + trace_kvm_exit(vcpu, TLBMISS_ST_EXITS); + ret = kvm_mips_callbacks->handle_tlb_st_miss(vcpu); + break; + + case T_TLB_LD_MISS: + kvm_debug("TLB LD fault: cause %#x, PC: %p, BadVaddr: %#lx\n", + cause, opc, badvaddr); + + ++vcpu->stat.tlbmiss_ld_exits; + trace_kvm_exit(vcpu, TLBMISS_LD_EXITS); + ret = kvm_mips_callbacks->handle_tlb_ld_miss(vcpu); + break; + + case T_ADDR_ERR_ST: + ++vcpu->stat.addrerr_st_exits; + trace_kvm_exit(vcpu, ADDRERR_ST_EXITS); + ret = kvm_mips_callbacks->handle_addr_err_st(vcpu); + break; + + case T_ADDR_ERR_LD: + ++vcpu->stat.addrerr_ld_exits; + trace_kvm_exit(vcpu, ADDRERR_LD_EXITS); + ret = kvm_mips_callbacks->handle_addr_err_ld(vcpu); + break; + + case T_SYSCALL: + ++vcpu->stat.syscall_exits; + trace_kvm_exit(vcpu, SYSCALL_EXITS); + ret = kvm_mips_callbacks->handle_syscall(vcpu); + break; + + case T_RES_INST: + ++vcpu->stat.resvd_inst_exits; + trace_kvm_exit(vcpu, RESVD_INST_EXITS); + ret = kvm_mips_callbacks->handle_res_inst(vcpu); + break; + + case T_BREAK: + ++vcpu->stat.break_inst_exits; + trace_kvm_exit(vcpu, BREAK_INST_EXITS); + ret = kvm_mips_callbacks->handle_break(vcpu); + break; + + default: + kvm_err + ("Exception Code: %d, not yet handled, @ PC: %p, inst: 0x%08x BadVaddr: %#lx Status: %#lx\n", + exccode, opc, kvm_get_inst(opc, vcpu), badvaddr, + kvm_read_c0_guest_status(vcpu->arch.cop0)); + kvm_arch_vcpu_dump_regs(vcpu); + run->exit_reason = KVM_EXIT_INTERNAL_ERROR; + ret = RESUME_HOST; + break; + + } + +skip_emul: + local_irq_disable(); + + if (er == EMULATE_DONE && !(ret & RESUME_HOST)) + kvm_mips_deliver_interrupts(vcpu, cause); + + if (!(ret & RESUME_HOST)) { + /* Only check for signals if not already exiting to userspace */ + if (signal_pending(current)) { + run->exit_reason = KVM_EXIT_INTR; + ret = (-EINTR << 2) | RESUME_HOST; + ++vcpu->stat.signal_exits; + trace_kvm_exit(vcpu, SIGNAL_EXITS); + } + } + + return ret; +} + +int __init kvm_mips_init(void) +{ + int ret; + + ret = kvm_init(NULL, sizeof(struct kvm_vcpu), 0, THIS_MODULE); + + if (ret) + return ret; + + /* On MIPS, kernel modules are executed from "mapped space", which requires TLBs. + * The TLB handling code is statically linked with the rest of the kernel (kvm_tlb.c) + * to avoid the possibility of double faulting. The issue is that the TLB code + * references routines that are part of the the KVM module, + * which are only available once the module is loaded. + */ + kvm_mips_gfn_to_pfn = gfn_to_pfn; + kvm_mips_release_pfn_clean = kvm_release_pfn_clean; + kvm_mips_is_error_pfn = is_error_pfn; + + pr_info("KVM/MIPS Initialized\n"); + return 0; +} + +void __exit kvm_mips_exit(void) +{ + kvm_exit(); + + kvm_mips_gfn_to_pfn = NULL; + kvm_mips_release_pfn_clean = NULL; + kvm_mips_is_error_pfn = NULL; + + pr_info("KVM/MIPS unloaded\n"); +} + +module_init(kvm_mips_init); +module_exit(kvm_mips_exit); + +EXPORT_TRACEPOINT_SYMBOL(kvm_exit); diff --git a/trunk/arch/mips/kvm/kvm_mips_comm.h b/trunk/arch/mips/kvm/kvm_mips_comm.h new file mode 100644 index 000000000000..a4a8c85cc8f7 --- /dev/null +++ b/trunk/arch/mips/kvm/kvm_mips_comm.h @@ -0,0 +1,23 @@ +/* +* This file is subject to the terms and conditions of the GNU General Public +* License. See the file "COPYING" in the main directory of this archive +* for more details. +* +* KVM/MIPS: commpage: mapped into get kernel space +* +* Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved. +* Authors: Sanjay Lal +*/ + +#ifndef __KVM_MIPS_COMMPAGE_H__ +#define __KVM_MIPS_COMMPAGE_H__ + +struct kvm_mips_commpage { + struct mips_coproc cop0; /* COP0 state is mapped into Guest kernel via commpage */ +}; + +#define KVM_MIPS_COMM_EIDI_OFFSET 0x0 + +extern void kvm_mips_commpage_init(struct kvm_vcpu *vcpu); + +#endif /* __KVM_MIPS_COMMPAGE_H__ */ diff --git a/trunk/arch/mips/kvm/kvm_mips_commpage.c b/trunk/arch/mips/kvm/kvm_mips_commpage.c new file mode 100644 index 000000000000..3873b1ecc40f --- /dev/null +++ b/trunk/arch/mips/kvm/kvm_mips_commpage.c @@ -0,0 +1,37 @@ +/* +* This file is subject to the terms and conditions of the GNU General Public +* License. See the file "COPYING" in the main directory of this archive +* for more details. +* +* commpage, currently used for Virtual COP0 registers. +* Mapped into the guest kernel @ 0x0. +* +* Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved. +* Authors: Sanjay Lal +*/ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#include "kvm_mips_comm.h" + +void kvm_mips_commpage_init(struct kvm_vcpu *vcpu) +{ + struct kvm_mips_commpage *page = vcpu->arch.kseg0_commpage; + memset(page, 0, sizeof(struct kvm_mips_commpage)); + + /* Specific init values for fields */ + vcpu->arch.cop0 = &page->cop0; + memset(vcpu->arch.cop0, 0, sizeof(struct mips_coproc)); + + return; +} diff --git a/trunk/arch/mips/kvm/kvm_mips_dyntrans.c b/trunk/arch/mips/kvm/kvm_mips_dyntrans.c new file mode 100644 index 000000000000..96528e2d1ea6 --- /dev/null +++ b/trunk/arch/mips/kvm/kvm_mips_dyntrans.c @@ -0,0 +1,149 @@ +/* +* This file is subject to the terms and conditions of the GNU General Public +* License. See the file "COPYING" in the main directory of this archive +* for more details. +* +* KVM/MIPS: Binary Patching for privileged instructions, reduces traps. +* +* Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved. +* Authors: Sanjay Lal +*/ + +#include +#include +#include +#include +#include +#include +#include + +#include "kvm_mips_comm.h" + +#define SYNCI_TEMPLATE 0x041f0000 +#define SYNCI_BASE(x) (((x) >> 21) & 0x1f) +#define SYNCI_OFFSET ((x) & 0xffff) + +#define LW_TEMPLATE 0x8c000000 +#define CLEAR_TEMPLATE 0x00000020 +#define SW_TEMPLATE 0xac000000 + +int +kvm_mips_trans_cache_index(uint32_t inst, uint32_t *opc, + struct kvm_vcpu *vcpu) +{ + int result = 0; + unsigned long kseg0_opc; + uint32_t synci_inst = 0x0; + + /* Replace the CACHE instruction, with a NOP */ + kseg0_opc = + CKSEG0ADDR(kvm_mips_translate_guest_kseg0_to_hpa + (vcpu, (unsigned long) opc)); + memcpy((void *)kseg0_opc, (void *)&synci_inst, sizeof(uint32_t)); + mips32_SyncICache(kseg0_opc, 32); + + return result; +} + +/* + * Address based CACHE instructions are transformed into synci(s). A little heavy + * for just D-cache invalidates, but avoids an expensive trap + */ +int +kvm_mips_trans_cache_va(uint32_t inst, uint32_t *opc, + struct kvm_vcpu *vcpu) +{ + int result = 0; + unsigned long kseg0_opc; + uint32_t synci_inst = SYNCI_TEMPLATE, base, offset; + + base = (inst >> 21) & 0x1f; + offset = inst & 0xffff; + synci_inst |= (base << 21); + synci_inst |= offset; + + kseg0_opc = + CKSEG0ADDR(kvm_mips_translate_guest_kseg0_to_hpa + (vcpu, (unsigned long) opc)); + memcpy((void *)kseg0_opc, (void *)&synci_inst, sizeof(uint32_t)); + mips32_SyncICache(kseg0_opc, 32); + + return result; +} + +int +kvm_mips_trans_mfc0(uint32_t inst, uint32_t *opc, struct kvm_vcpu *vcpu) +{ + int32_t rt, rd, sel; + uint32_t mfc0_inst; + unsigned long kseg0_opc, flags; + + rt = (inst >> 16) & 0x1f; + rd = (inst >> 11) & 0x1f; + sel = inst & 0x7; + + if ((rd == MIPS_CP0_ERRCTL) && (sel == 0)) { + mfc0_inst = CLEAR_TEMPLATE; + mfc0_inst |= ((rt & 0x1f) << 16); + } else { + mfc0_inst = LW_TEMPLATE; + mfc0_inst |= ((rt & 0x1f) << 16); + mfc0_inst |= + offsetof(struct mips_coproc, + reg[rd][sel]) + offsetof(struct kvm_mips_commpage, + cop0); + } + + if (KVM_GUEST_KSEGX(opc) == KVM_GUEST_KSEG0) { + kseg0_opc = + CKSEG0ADDR(kvm_mips_translate_guest_kseg0_to_hpa + (vcpu, (unsigned long) opc)); + memcpy((void *)kseg0_opc, (void *)&mfc0_inst, sizeof(uint32_t)); + mips32_SyncICache(kseg0_opc, 32); + } else if (KVM_GUEST_KSEGX((unsigned long) opc) == KVM_GUEST_KSEG23) { + local_irq_save(flags); + memcpy((void *)opc, (void *)&mfc0_inst, sizeof(uint32_t)); + mips32_SyncICache((unsigned long) opc, 32); + local_irq_restore(flags); + } else { + kvm_err("%s: Invalid address: %p\n", __func__, opc); + return -EFAULT; + } + + return 0; +} + +int +kvm_mips_trans_mtc0(uint32_t inst, uint32_t *opc, struct kvm_vcpu *vcpu) +{ + int32_t rt, rd, sel; + uint32_t mtc0_inst = SW_TEMPLATE; + unsigned long kseg0_opc, flags; + + rt = (inst >> 16) & 0x1f; + rd = (inst >> 11) & 0x1f; + sel = inst & 0x7; + + mtc0_inst |= ((rt & 0x1f) << 16); + mtc0_inst |= + offsetof(struct mips_coproc, + reg[rd][sel]) + offsetof(struct kvm_mips_commpage, cop0); + + if (KVM_GUEST_KSEGX(opc) == KVM_GUEST_KSEG0) { + kseg0_opc = + CKSEG0ADDR(kvm_mips_translate_guest_kseg0_to_hpa + (vcpu, (unsigned long) opc)); + memcpy((void *)kseg0_opc, (void *)&mtc0_inst, sizeof(uint32_t)); + mips32_SyncICache(kseg0_opc, 32); + } else if (KVM_GUEST_KSEGX((unsigned long) opc) == KVM_GUEST_KSEG23) { + local_irq_save(flags); + memcpy((void *)opc, (void *)&mtc0_inst, sizeof(uint32_t)); + mips32_SyncICache((unsigned long) opc, 32); + local_irq_restore(flags); + } else { + kvm_err("%s: Invalid address: %p\n", __func__, opc); + return -EFAULT; + } + + return 0; +} diff --git a/trunk/arch/mips/kvm/kvm_mips_emul.c b/trunk/arch/mips/kvm/kvm_mips_emul.c new file mode 100644 index 000000000000..4b6274b47f33 --- /dev/null +++ b/trunk/arch/mips/kvm/kvm_mips_emul.c @@ -0,0 +1,1829 @@ +/* +* This file is subject to the terms and conditions of the GNU General Public +* License. See the file "COPYING" in the main directory of this archive +* for more details. +* +* KVM/MIPS: Instruction/Exception emulation +* +* Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved. +* Authors: Sanjay Lal +*/ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#undef CONFIG_MIPS_MT +#include +#define CONFIG_MIPS_MT + +#include "kvm_mips_opcode.h" +#include "kvm_mips_int.h" +#include "kvm_mips_comm.h" + +#include "trace.h" + +/* + * Compute the return address and do emulate branch simulation, if required. + * This function should be called only in branch delay slot active. + */ +unsigned long kvm_compute_return_epc(struct kvm_vcpu *vcpu, + unsigned long instpc) +{ + unsigned int dspcontrol; + union mips_instruction insn; + struct kvm_vcpu_arch *arch = &vcpu->arch; + long epc = instpc; + long nextpc = KVM_INVALID_INST; + + if (epc & 3) + goto unaligned; + + /* + * Read the instruction + */ + insn.word = kvm_get_inst((uint32_t *) epc, vcpu); + + if (insn.word == KVM_INVALID_INST) + return KVM_INVALID_INST; + + switch (insn.i_format.opcode) { + /* + * jr and jalr are in r_format format. + */ + case spec_op: + switch (insn.r_format.func) { + case jalr_op: + arch->gprs[insn.r_format.rd] = epc + 8; + /* Fall through */ + case jr_op: + nextpc = arch->gprs[insn.r_format.rs]; + break; + } + break; + + /* + * This group contains: + * bltz_op, bgez_op, bltzl_op, bgezl_op, + * bltzal_op, bgezal_op, bltzall_op, bgezall_op. + */ + case bcond_op: + switch (insn.i_format.rt) { + case bltz_op: + case bltzl_op: + if ((long)arch->gprs[insn.i_format.rs] < 0) + epc = epc + 4 + (insn.i_format.simmediate << 2); + else + epc += 8; + nextpc = epc; + break; + + case bgez_op: + case bgezl_op: + if ((long)arch->gprs[insn.i_format.rs] >= 0) + epc = epc + 4 + (insn.i_format.simmediate << 2); + else + epc += 8; + nextpc = epc; + break; + + case bltzal_op: + case bltzall_op: + arch->gprs[31] = epc + 8; + if ((long)arch->gprs[insn.i_format.rs] < 0) + epc = epc + 4 + (insn.i_format.simmediate << 2); + else + epc += 8; + nextpc = epc; + break; + + case bgezal_op: + case bgezall_op: + arch->gprs[31] = epc + 8; + if ((long)arch->gprs[insn.i_format.rs] >= 0) + epc = epc + 4 + (insn.i_format.simmediate << 2); + else + epc += 8; + nextpc = epc; + break; + case bposge32_op: + if (!cpu_has_dsp) + goto sigill; + + dspcontrol = rddsp(0x01); + + if (dspcontrol >= 32) { + epc = epc + 4 + (insn.i_format.simmediate << 2); + } else + epc += 8; + nextpc = epc; + break; + } + break; + + /* + * These are unconditional and in j_format. + */ + case jal_op: + arch->gprs[31] = instpc + 8; + case j_op: + epc += 4; + epc >>= 28; + epc <<= 28; + epc |= (insn.j_format.target << 2); + nextpc = epc; + break; + + /* + * These are conditional and in i_format. + */ + case beq_op: + case beql_op: + if (arch->gprs[insn.i_format.rs] == + arch->gprs[insn.i_format.rt]) + epc = epc + 4 + (insn.i_format.simmediate << 2); + else + epc += 8; + nextpc = epc; + break; + + case bne_op: + case bnel_op: + if (arch->gprs[insn.i_format.rs] != + arch->gprs[insn.i_format.rt]) + epc = epc + 4 + (insn.i_format.simmediate << 2); + else + epc += 8; + nextpc = epc; + break; + + case blez_op: /* not really i_format */ + case blezl_op: + /* rt field assumed to be zero */ + if ((long)arch->gprs[insn.i_format.rs] <= 0) + epc = epc + 4 + (insn.i_format.simmediate << 2); + else + epc += 8; + nextpc = epc; + break; + + case bgtz_op: + case bgtzl_op: + /* rt field assumed to be zero */ + if ((long)arch->gprs[insn.i_format.rs] > 0) + epc = epc + 4 + (insn.i_format.simmediate << 2); + else + epc += 8; + nextpc = epc; + break; + + /* + * And now the FPA/cp1 branch instructions. + */ + case cop1_op: + printk("%s: unsupported cop1_op\n", __func__); + break; + } + + return nextpc; + +unaligned: + printk("%s: unaligned epc\n", __func__); + return nextpc; + +sigill: + printk("%s: DSP branch but not DSP ASE\n", __func__); + return nextpc; +} + +enum emulation_result update_pc(struct kvm_vcpu *vcpu, uint32_t cause) +{ + unsigned long branch_pc; + enum emulation_result er = EMULATE_DONE; + + if (cause & CAUSEF_BD) { + branch_pc = kvm_compute_return_epc(vcpu, vcpu->arch.pc); + if (branch_pc == KVM_INVALID_INST) { + er = EMULATE_FAIL; + } else { + vcpu->arch.pc = branch_pc; + kvm_debug("BD update_pc(): New PC: %#lx\n", vcpu->arch.pc); + } + } else + vcpu->arch.pc += 4; + + kvm_debug("update_pc(): New PC: %#lx\n", vcpu->arch.pc); + + return er; +} + +/* Everytime the compare register is written to, we need to decide when to fire + * the timer that represents timer ticks to the GUEST. + * + */ +enum emulation_result kvm_mips_emulate_count(struct kvm_vcpu *vcpu) +{ + struct mips_coproc *cop0 = vcpu->arch.cop0; + enum emulation_result er = EMULATE_DONE; + + /* If COUNT is enabled */ + if (!(kvm_read_c0_guest_cause(cop0) & CAUSEF_DC)) { + hrtimer_try_to_cancel(&vcpu->arch.comparecount_timer); + hrtimer_start(&vcpu->arch.comparecount_timer, + ktime_set(0, MS_TO_NS(10)), HRTIMER_MODE_REL); + } else { + hrtimer_try_to_cancel(&vcpu->arch.comparecount_timer); + } + + return er; +} + +enum emulation_result kvm_mips_emul_eret(struct kvm_vcpu *vcpu) +{ + struct mips_coproc *cop0 = vcpu->arch.cop0; + enum emulation_result er = EMULATE_DONE; + + if (kvm_read_c0_guest_status(cop0) & ST0_EXL) { + kvm_debug("[%#lx] ERET to %#lx\n", vcpu->arch.pc, + kvm_read_c0_guest_epc(cop0)); + kvm_clear_c0_guest_status(cop0, ST0_EXL); + vcpu->arch.pc = kvm_read_c0_guest_epc(cop0); + + } else if (kvm_read_c0_guest_status(cop0) & ST0_ERL) { + kvm_clear_c0_guest_status(cop0, ST0_ERL); + vcpu->arch.pc = kvm_read_c0_guest_errorepc(cop0); + } else { + printk("[%#lx] ERET when MIPS_SR_EXL|MIPS_SR_ERL == 0\n", + vcpu->arch.pc); + er = EMULATE_FAIL; + } + + return er; +} + +enum emulation_result kvm_mips_emul_wait(struct kvm_vcpu *vcpu) +{ + enum emulation_result er = EMULATE_DONE; + + kvm_debug("[%#lx] !!!WAIT!!! (%#lx)\n", vcpu->arch.pc, + vcpu->arch.pending_exceptions); + + ++vcpu->stat.wait_exits; + trace_kvm_exit(vcpu, WAIT_EXITS); + if (!vcpu->arch.pending_exceptions) { + vcpu->arch.wait = 1; + kvm_vcpu_block(vcpu); + + /* We we are runnable, then definitely go off to user space to check if any + * I/O interrupts are pending. + */ + if (kvm_check_request(KVM_REQ_UNHALT, vcpu)) { + clear_bit(KVM_REQ_UNHALT, &vcpu->requests); + vcpu->run->exit_reason = KVM_EXIT_IRQ_WINDOW_OPEN; + } + } + + return er; +} + +/* XXXKYMA: Linux doesn't seem to use TLBR, return EMULATE_FAIL for now so that we can catch + * this, if things ever change + */ +enum emulation_result kvm_mips_emul_tlbr(struct kvm_vcpu *vcpu) +{ + struct mips_coproc *cop0 = vcpu->arch.cop0; + enum emulation_result er = EMULATE_FAIL; + uint32_t pc = vcpu->arch.pc; + + printk("[%#x] COP0_TLBR [%ld]\n", pc, kvm_read_c0_guest_index(cop0)); + return er; +} + +/* Write Guest TLB Entry @ Index */ +enum emulation_result kvm_mips_emul_tlbwi(struct kvm_vcpu *vcpu) +{ + struct mips_coproc *cop0 = vcpu->arch.cop0; + int index = kvm_read_c0_guest_index(cop0); + enum emulation_result er = EMULATE_DONE; + struct kvm_mips_tlb *tlb = NULL; + uint32_t pc = vcpu->arch.pc; + + if (index < 0 || index >= KVM_MIPS_GUEST_TLB_SIZE) { + printk("%s: illegal index: %d\n", __func__, index); + printk + ("[%#x] COP0_TLBWI [%d] (entryhi: %#lx, entrylo0: %#lx entrylo1: %#lx, mask: %#lx)\n", + pc, index, kvm_read_c0_guest_entryhi(cop0), + kvm_read_c0_guest_entrylo0(cop0), + kvm_read_c0_guest_entrylo1(cop0), + kvm_read_c0_guest_pagemask(cop0)); + index = (index & ~0x80000000) % KVM_MIPS_GUEST_TLB_SIZE; + } + + tlb = &vcpu->arch.guest_tlb[index]; +#if 1 + /* Probe the shadow host TLB for the entry being overwritten, if one matches, invalidate it */ + kvm_mips_host_tlb_inv(vcpu, tlb->tlb_hi); +#endif + + tlb->tlb_mask = kvm_read_c0_guest_pagemask(cop0); + tlb->tlb_hi = kvm_read_c0_guest_entryhi(cop0); + tlb->tlb_lo0 = kvm_read_c0_guest_entrylo0(cop0); + tlb->tlb_lo1 = kvm_read_c0_guest_entrylo1(cop0); + + kvm_debug + ("[%#x] COP0_TLBWI [%d] (entryhi: %#lx, entrylo0: %#lx entrylo1: %#lx, mask: %#lx)\n", + pc, index, kvm_read_c0_guest_entryhi(cop0), + kvm_read_c0_guest_entrylo0(cop0), kvm_read_c0_guest_entrylo1(cop0), + kvm_read_c0_guest_pagemask(cop0)); + + return er; +} + +/* Write Guest TLB Entry @ Random Index */ +enum emulation_result kvm_mips_emul_tlbwr(struct kvm_vcpu *vcpu) +{ + struct mips_coproc *cop0 = vcpu->arch.cop0; + enum emulation_result er = EMULATE_DONE; + struct kvm_mips_tlb *tlb = NULL; + uint32_t pc = vcpu->arch.pc; + int index; + +#if 1 + get_random_bytes(&index, sizeof(index)); + index &= (KVM_MIPS_GUEST_TLB_SIZE - 1); +#else + index = jiffies % KVM_MIPS_GUEST_TLB_SIZE; +#endif + + if (index < 0 || index >= KVM_MIPS_GUEST_TLB_SIZE) { + printk("%s: illegal index: %d\n", __func__, index); + return EMULATE_FAIL; + } + + tlb = &vcpu->arch.guest_tlb[index]; + +#if 1 + /* Probe the shadow host TLB for the entry being overwritten, if one matches, invalidate it */ + kvm_mips_host_tlb_inv(vcpu, tlb->tlb_hi); +#endif + + tlb->tlb_mask = kvm_read_c0_guest_pagemask(cop0); + tlb->tlb_hi = kvm_read_c0_guest_entryhi(cop0); + tlb->tlb_lo0 = kvm_read_c0_guest_entrylo0(cop0); + tlb->tlb_lo1 = kvm_read_c0_guest_entrylo1(cop0); + + kvm_debug + ("[%#x] COP0_TLBWR[%d] (entryhi: %#lx, entrylo0: %#lx entrylo1: %#lx)\n", + pc, index, kvm_read_c0_guest_entryhi(cop0), + kvm_read_c0_guest_entrylo0(cop0), + kvm_read_c0_guest_entrylo1(cop0)); + + return er; +} + +enum emulation_result kvm_mips_emul_tlbp(struct kvm_vcpu *vcpu) +{ + struct mips_coproc *cop0 = vcpu->arch.cop0; + long entryhi = kvm_read_c0_guest_entryhi(cop0); + enum emulation_result er = EMULATE_DONE; + uint32_t pc = vcpu->arch.pc; + int index = -1; + + index = kvm_mips_guest_tlb_lookup(vcpu, entryhi); + + kvm_write_c0_guest_index(cop0, index); + + kvm_debug("[%#x] COP0_TLBP (entryhi: %#lx), index: %d\n", pc, entryhi, + index); + + return er; +} + +enum emulation_result +kvm_mips_emulate_CP0(uint32_t inst, uint32_t *opc, uint32_t cause, + struct kvm_run *run, struct kvm_vcpu *vcpu) +{ + struct mips_coproc *cop0 = vcpu->arch.cop0; + enum emulation_result er = EMULATE_DONE; + int32_t rt, rd, copz, sel, co_bit, op; + uint32_t pc = vcpu->arch.pc; + unsigned long curr_pc; + + /* + * Update PC and hold onto current PC in case there is + * an error and we want to rollback the PC + */ + curr_pc = vcpu->arch.pc; + er = update_pc(vcpu, cause); + if (er == EMULATE_FAIL) { + return er; + } + + copz = (inst >> 21) & 0x1f; + rt = (inst >> 16) & 0x1f; + rd = (inst >> 11) & 0x1f; + sel = inst & 0x7; + co_bit = (inst >> 25) & 1; + + /* Verify that the register is valid */ + if (rd > MIPS_CP0_DESAVE) { + printk("Invalid rd: %d\n", rd); + er = EMULATE_FAIL; + goto done; + } + + if (co_bit) { + op = (inst) & 0xff; + + switch (op) { + case tlbr_op: /* Read indexed TLB entry */ + er = kvm_mips_emul_tlbr(vcpu); + break; + case tlbwi_op: /* Write indexed */ + er = kvm_mips_emul_tlbwi(vcpu); + break; + case tlbwr_op: /* Write random */ + er = kvm_mips_emul_tlbwr(vcpu); + break; + case tlbp_op: /* TLB Probe */ + er = kvm_mips_emul_tlbp(vcpu); + break; + case rfe_op: + printk("!!!COP0_RFE!!!\n"); + break; + case eret_op: + er = kvm_mips_emul_eret(vcpu); + goto dont_update_pc; + break; + case wait_op: + er = kvm_mips_emul_wait(vcpu); + break; + } + } else { + switch (copz) { + case mfc_op: +#ifdef CONFIG_KVM_MIPS_DEBUG_COP0_COUNTERS + cop0->stat[rd][sel]++; +#endif + /* Get reg */ + if ((rd == MIPS_CP0_COUNT) && (sel == 0)) { + /* XXXKYMA: Run the Guest count register @ 1/4 the rate of the host */ + vcpu->arch.gprs[rt] = (read_c0_count() >> 2); + } else if ((rd == MIPS_CP0_ERRCTL) && (sel == 0)) { + vcpu->arch.gprs[rt] = 0x0; +#ifdef CONFIG_KVM_MIPS_DYN_TRANS + kvm_mips_trans_mfc0(inst, opc, vcpu); +#endif + } + else { + vcpu->arch.gprs[rt] = cop0->reg[rd][sel]; + +#ifdef CONFIG_KVM_MIPS_DYN_TRANS + kvm_mips_trans_mfc0(inst, opc, vcpu); +#endif + } + + kvm_debug + ("[%#x] MFCz[%d][%d], vcpu->arch.gprs[%d]: %#lx\n", + pc, rd, sel, rt, vcpu->arch.gprs[rt]); + + break; + + case dmfc_op: + vcpu->arch.gprs[rt] = cop0->reg[rd][sel]; + break; + + case mtc_op: +#ifdef CONFIG_KVM_MIPS_DEBUG_COP0_COUNTERS + cop0->stat[rd][sel]++; +#endif + if ((rd == MIPS_CP0_TLB_INDEX) + && (vcpu->arch.gprs[rt] >= + KVM_MIPS_GUEST_TLB_SIZE)) { + printk("Invalid TLB Index: %ld", + vcpu->arch.gprs[rt]); + er = EMULATE_FAIL; + break; + } +#define C0_EBASE_CORE_MASK 0xff + if ((rd == MIPS_CP0_PRID) && (sel == 1)) { + /* Preserve CORE number */ + kvm_change_c0_guest_ebase(cop0, + ~(C0_EBASE_CORE_MASK), + vcpu->arch.gprs[rt]); + printk("MTCz, cop0->reg[EBASE]: %#lx\n", + kvm_read_c0_guest_ebase(cop0)); + } else if (rd == MIPS_CP0_TLB_HI && sel == 0) { + uint32_t nasid = + vcpu->arch.gprs[rt] & ASID_MASK; + if ((KSEGX(vcpu->arch.gprs[rt]) != CKSEG0) + && + ((kvm_read_c0_guest_entryhi(cop0) & + ASID_MASK) != nasid)) { + + kvm_debug + ("MTCz, change ASID from %#lx to %#lx\n", + kvm_read_c0_guest_entryhi(cop0) & + ASID_MASK, + vcpu->arch.gprs[rt] & ASID_MASK); + + /* Blow away the shadow host TLBs */ + kvm_mips_flush_host_tlb(1); + } + kvm_write_c0_guest_entryhi(cop0, + vcpu->arch.gprs[rt]); + } + /* Are we writing to COUNT */ + else if ((rd == MIPS_CP0_COUNT) && (sel == 0)) { + /* Linux doesn't seem to write into COUNT, we throw an error + * if we notice a write to COUNT + */ + /*er = EMULATE_FAIL; */ + goto done; + } else if ((rd == MIPS_CP0_COMPARE) && (sel == 0)) { + kvm_debug("[%#x] MTCz, COMPARE %#lx <- %#lx\n", + pc, kvm_read_c0_guest_compare(cop0), + vcpu->arch.gprs[rt]); + + /* If we are writing to COMPARE */ + /* Clear pending timer interrupt, if any */ + kvm_mips_callbacks->dequeue_timer_int(vcpu); + kvm_write_c0_guest_compare(cop0, + vcpu->arch.gprs[rt]); + } else if ((rd == MIPS_CP0_STATUS) && (sel == 0)) { + kvm_write_c0_guest_status(cop0, + vcpu->arch.gprs[rt]); + /* Make sure that CU1 and NMI bits are never set */ + kvm_clear_c0_guest_status(cop0, + (ST0_CU1 | ST0_NMI)); + +#ifdef CONFIG_KVM_MIPS_DYN_TRANS + kvm_mips_trans_mtc0(inst, opc, vcpu); +#endif + } else { + cop0->reg[rd][sel] = vcpu->arch.gprs[rt]; +#ifdef CONFIG_KVM_MIPS_DYN_TRANS + kvm_mips_trans_mtc0(inst, opc, vcpu); +#endif + } + + kvm_debug("[%#x] MTCz, cop0->reg[%d][%d]: %#lx\n", pc, + rd, sel, cop0->reg[rd][sel]); + break; + + case dmtc_op: + printk + ("!!!!!!![%#lx]dmtc_op: rt: %d, rd: %d, sel: %d!!!!!!\n", + vcpu->arch.pc, rt, rd, sel); + er = EMULATE_FAIL; + break; + + case mfmcz_op: +#ifdef KVM_MIPS_DEBUG_COP0_COUNTERS + cop0->stat[MIPS_CP0_STATUS][0]++; +#endif + if (rt != 0) { + vcpu->arch.gprs[rt] = + kvm_read_c0_guest_status(cop0); + } + /* EI */ + if (inst & 0x20) { + kvm_debug("[%#lx] mfmcz_op: EI\n", + vcpu->arch.pc); + kvm_set_c0_guest_status(cop0, ST0_IE); + } else { + kvm_debug("[%#lx] mfmcz_op: DI\n", + vcpu->arch.pc); + kvm_clear_c0_guest_status(cop0, ST0_IE); + } + + break; + + case wrpgpr_op: + { + uint32_t css = + cop0->reg[MIPS_CP0_STATUS][2] & 0xf; + uint32_t pss = + (cop0->reg[MIPS_CP0_STATUS][2] >> 6) & 0xf; + /* We don't support any shadow register sets, so SRSCtl[PSS] == SRSCtl[CSS] = 0 */ + if (css || pss) { + er = EMULATE_FAIL; + break; + } + kvm_debug("WRPGPR[%d][%d] = %#lx\n", pss, rd, + vcpu->arch.gprs[rt]); + vcpu->arch.gprs[rd] = vcpu->arch.gprs[rt]; + } + break; + default: + printk + ("[%#lx]MachEmulateCP0: unsupported COP0, copz: 0x%x\n", + vcpu->arch.pc, copz); + er = EMULATE_FAIL; + break; + } + } + +done: + /* + * Rollback PC only if emulation was unsuccessful + */ + if (er == EMULATE_FAIL) { + vcpu->arch.pc = curr_pc; + } + +dont_update_pc: + /* + * This is for special instructions whose emulation + * updates the PC, so do not overwrite the PC under + * any circumstances + */ + + return er; +} + +enum emulation_result +kvm_mips_emulate_store(uint32_t inst, uint32_t cause, + struct kvm_run *run, struct kvm_vcpu *vcpu) +{ + enum emulation_result er = EMULATE_DO_MMIO; + int32_t op, base, rt, offset; + uint32_t bytes; + void *data = run->mmio.data; + unsigned long curr_pc; + + /* + * Update PC and hold onto current PC in case there is + * an error and we want to rollback the PC + */ + curr_pc = vcpu->arch.pc; + er = update_pc(vcpu, cause); + if (er == EMULATE_FAIL) + return er; + + rt = (inst >> 16) & 0x1f; + base = (inst >> 21) & 0x1f; + offset = inst & 0xffff; + op = (inst >> 26) & 0x3f; + + switch (op) { + case sb_op: + bytes = 1; + if (bytes > sizeof(run->mmio.data)) { + kvm_err("%s: bad MMIO length: %d\n", __func__, + run->mmio.len); + } + run->mmio.phys_addr = + kvm_mips_callbacks->gva_to_gpa(vcpu->arch. + host_cp0_badvaddr); + if (run->mmio.phys_addr == KVM_INVALID_ADDR) { + er = EMULATE_FAIL; + break; + } + run->mmio.len = bytes; + run->mmio.is_write = 1; + vcpu->mmio_needed = 1; + vcpu->mmio_is_write = 1; + *(u8 *) data = vcpu->arch.gprs[rt]; + kvm_debug("OP_SB: eaddr: %#lx, gpr: %#lx, data: %#x\n", + vcpu->arch.host_cp0_badvaddr, vcpu->arch.gprs[rt], + *(uint8_t *) data); + + break; + + case sw_op: + bytes = 4; + if (bytes > sizeof(run->mmio.data)) { + kvm_err("%s: bad MMIO length: %d\n", __func__, + run->mmio.len); + } + run->mmio.phys_addr = + kvm_mips_callbacks->gva_to_gpa(vcpu->arch. + host_cp0_badvaddr); + if (run->mmio.phys_addr == KVM_INVALID_ADDR) { + er = EMULATE_FAIL; + break; + } + + run->mmio.len = bytes; + run->mmio.is_write = 1; + vcpu->mmio_needed = 1; + vcpu->mmio_is_write = 1; + *(uint32_t *) data = vcpu->arch.gprs[rt]; + + kvm_debug("[%#lx] OP_SW: eaddr: %#lx, gpr: %#lx, data: %#x\n", + vcpu->arch.pc, vcpu->arch.host_cp0_badvaddr, + vcpu->arch.gprs[rt], *(uint32_t *) data); + break; + + case sh_op: + bytes = 2; + if (bytes > sizeof(run->mmio.data)) { + kvm_err("%s: bad MMIO length: %d\n", __func__, + run->mmio.len); + } + run->mmio.phys_addr = + kvm_mips_callbacks->gva_to_gpa(vcpu->arch. + host_cp0_badvaddr); + if (run->mmio.phys_addr == KVM_INVALID_ADDR) { + er = EMULATE_FAIL; + break; + } + + run->mmio.len = bytes; + run->mmio.is_write = 1; + vcpu->mmio_needed = 1; + vcpu->mmio_is_write = 1; + *(uint16_t *) data = vcpu->arch.gprs[rt]; + + kvm_debug("[%#lx] OP_SH: eaddr: %#lx, gpr: %#lx, data: %#x\n", + vcpu->arch.pc, vcpu->arch.host_cp0_badvaddr, + vcpu->arch.gprs[rt], *(uint32_t *) data); + break; + + default: + printk("Store not yet supported"); + er = EMULATE_FAIL; + break; + } + + /* + * Rollback PC if emulation was unsuccessful + */ + if (er == EMULATE_FAIL) { + vcpu->arch.pc = curr_pc; + } + + return er; +} + +enum emulation_result +kvm_mips_emulate_load(uint32_t inst, uint32_t cause, + struct kvm_run *run, struct kvm_vcpu *vcpu) +{ + enum emulation_result er = EMULATE_DO_MMIO; + int32_t op, base, rt, offset; + uint32_t bytes; + + rt = (inst >> 16) & 0x1f; + base = (inst >> 21) & 0x1f; + offset = inst & 0xffff; + op = (inst >> 26) & 0x3f; + + vcpu->arch.pending_load_cause = cause; + vcpu->arch.io_gpr = rt; + + switch (op) { + case lw_op: + bytes = 4; + if (bytes > sizeof(run->mmio.data)) { + kvm_err("%s: bad MMIO length: %d\n", __func__, + run->mmio.len); + er = EMULATE_FAIL; + break; + } + run->mmio.phys_addr = + kvm_mips_callbacks->gva_to_gpa(vcpu->arch. + host_cp0_badvaddr); + if (run->mmio.phys_addr == KVM_INVALID_ADDR) { + er = EMULATE_FAIL; + break; + } + + run->mmio.len = bytes; + run->mmio.is_write = 0; + vcpu->mmio_needed = 1; + vcpu->mmio_is_write = 0; + break; + + case lh_op: + case lhu_op: + bytes = 2; + if (bytes > sizeof(run->mmio.data)) { + kvm_err("%s: bad MMIO length: %d\n", __func__, + run->mmio.len); + er = EMULATE_FAIL; + break; + } + run->mmio.phys_addr = + kvm_mips_callbacks->gva_to_gpa(vcpu->arch. + host_cp0_badvaddr); + if (run->mmio.phys_addr == KVM_INVALID_ADDR) { + er = EMULATE_FAIL; + break; + } + + run->mmio.len = bytes; + run->mmio.is_write = 0; + vcpu->mmio_needed = 1; + vcpu->mmio_is_write = 0; + + if (op == lh_op) + vcpu->mmio_needed = 2; + else + vcpu->mmio_needed = 1; + + break; + + case lbu_op: + case lb_op: + bytes = 1; + if (bytes > sizeof(run->mmio.data)) { + kvm_err("%s: bad MMIO length: %d\n", __func__, + run->mmio.len); + er = EMULATE_FAIL; + break; + } + run->mmio.phys_addr = + kvm_mips_callbacks->gva_to_gpa(vcpu->arch. + host_cp0_badvaddr); + if (run->mmio.phys_addr == KVM_INVALID_ADDR) { + er = EMULATE_FAIL; + break; + } + + run->mmio.len = bytes; + run->mmio.is_write = 0; + vcpu->mmio_is_write = 0; + + if (op == lb_op) + vcpu->mmio_needed = 2; + else + vcpu->mmio_needed = 1; + + break; + + default: + printk("Load not yet supported"); + er = EMULATE_FAIL; + break; + } + + return er; +} + +int kvm_mips_sync_icache(unsigned long va, struct kvm_vcpu *vcpu) +{ + unsigned long offset = (va & ~PAGE_MASK); + struct kvm *kvm = vcpu->kvm; + unsigned long pa; + gfn_t gfn; + pfn_t pfn; + + gfn = va >> PAGE_SHIFT; + + if (gfn >= kvm->arch.guest_pmap_npages) { + printk("%s: Invalid gfn: %#llx\n", __func__, gfn); + kvm_mips_dump_host_tlbs(); + kvm_arch_vcpu_dump_regs(vcpu); + return -1; + } + pfn = kvm->arch.guest_pmap[gfn]; + pa = (pfn << PAGE_SHIFT) | offset; + + printk("%s: va: %#lx, unmapped: %#x\n", __func__, va, CKSEG0ADDR(pa)); + + mips32_SyncICache(CKSEG0ADDR(pa), 32); + return 0; +} + +#define MIPS_CACHE_OP_INDEX_INV 0x0 +#define MIPS_CACHE_OP_INDEX_LD_TAG 0x1 +#define MIPS_CACHE_OP_INDEX_ST_TAG 0x2 +#define MIPS_CACHE_OP_IMP 0x3 +#define MIPS_CACHE_OP_HIT_INV 0x4 +#define MIPS_CACHE_OP_FILL_WB_INV 0x5 +#define MIPS_CACHE_OP_HIT_HB 0x6 +#define MIPS_CACHE_OP_FETCH_LOCK 0x7 + +#define MIPS_CACHE_ICACHE 0x0 +#define MIPS_CACHE_DCACHE 0x1 +#define MIPS_CACHE_SEC 0x3 + +enum emulation_result +kvm_mips_emulate_cache(uint32_t inst, uint32_t *opc, uint32_t cause, + struct kvm_run *run, struct kvm_vcpu *vcpu) +{ + struct mips_coproc *cop0 = vcpu->arch.cop0; + extern void (*r4k_blast_dcache) (void); + extern void (*r4k_blast_icache) (void); + enum emulation_result er = EMULATE_DONE; + int32_t offset, cache, op_inst, op, base; + struct kvm_vcpu_arch *arch = &vcpu->arch; + unsigned long va; + unsigned long curr_pc; + + /* + * Update PC and hold onto current PC in case there is + * an error and we want to rollback the PC + */ + curr_pc = vcpu->arch.pc; + er = update_pc(vcpu, cause); + if (er == EMULATE_FAIL) + return er; + + base = (inst >> 21) & 0x1f; + op_inst = (inst >> 16) & 0x1f; + offset = inst & 0xffff; + cache = (inst >> 16) & 0x3; + op = (inst >> 18) & 0x7; + + va = arch->gprs[base] + offset; + + kvm_debug("CACHE (cache: %#x, op: %#x, base[%d]: %#lx, offset: %#x\n", + cache, op, base, arch->gprs[base], offset); + + /* Treat INDEX_INV as a nop, basically issued by Linux on startup to invalidate + * the caches entirely by stepping through all the ways/indexes + */ + if (op == MIPS_CACHE_OP_INDEX_INV) { + kvm_debug + ("@ %#lx/%#lx CACHE (cache: %#x, op: %#x, base[%d]: %#lx, offset: %#x\n", + vcpu->arch.pc, vcpu->arch.gprs[31], cache, op, base, + arch->gprs[base], offset); + + if (cache == MIPS_CACHE_DCACHE) + r4k_blast_dcache(); + else if (cache == MIPS_CACHE_ICACHE) + r4k_blast_icache(); + else { + printk("%s: unsupported CACHE INDEX operation\n", + __func__); + return EMULATE_FAIL; + } + +#ifdef CONFIG_KVM_MIPS_DYN_TRANS + kvm_mips_trans_cache_index(inst, opc, vcpu); +#endif + goto done; + } + + preempt_disable(); + if (KVM_GUEST_KSEGX(va) == KVM_GUEST_KSEG0) { + + if (kvm_mips_host_tlb_lookup(vcpu, va) < 0) { + kvm_mips_handle_kseg0_tlb_fault(va, vcpu); + } + } else if ((KVM_GUEST_KSEGX(va) < KVM_GUEST_KSEG0) || + KVM_GUEST_KSEGX(va) == KVM_GUEST_KSEG23) { + int index; + + /* If an entry already exists then skip */ + if (kvm_mips_host_tlb_lookup(vcpu, va) >= 0) { + goto skip_fault; + } + + /* If address not in the guest TLB, then give the guest a fault, the + * resulting handler will do the right thing + */ + index = kvm_mips_guest_tlb_lookup(vcpu, (va & VPN2_MASK) | + (kvm_read_c0_guest_entryhi + (cop0) & ASID_MASK)); + + if (index < 0) { + vcpu->arch.host_cp0_entryhi = (va & VPN2_MASK); + vcpu->arch.host_cp0_badvaddr = va; + er = kvm_mips_emulate_tlbmiss_ld(cause, NULL, run, + vcpu); + preempt_enable(); + goto dont_update_pc; + } else { + struct kvm_mips_tlb *tlb = &vcpu->arch.guest_tlb[index]; + /* Check if the entry is valid, if not then setup a TLB invalid exception to the guest */ + if (!TLB_IS_VALID(*tlb, va)) { + er = kvm_mips_emulate_tlbinv_ld(cause, NULL, + run, vcpu); + preempt_enable(); + goto dont_update_pc; + } else { + /* We fault an entry from the guest tlb to the shadow host TLB */ + kvm_mips_handle_mapped_seg_tlb_fault(vcpu, tlb, + NULL, + NULL); + } + } + } else { + printk + ("INVALID CACHE INDEX/ADDRESS (cache: %#x, op: %#x, base[%d]: %#lx, offset: %#x\n", + cache, op, base, arch->gprs[base], offset); + er = EMULATE_FAIL; + preempt_enable(); + goto dont_update_pc; + + } + +skip_fault: + /* XXXKYMA: Only a subset of cache ops are supported, used by Linux */ + if (cache == MIPS_CACHE_DCACHE + && (op == MIPS_CACHE_OP_FILL_WB_INV + || op == MIPS_CACHE_OP_HIT_INV)) { + flush_dcache_line(va); + +#ifdef CONFIG_KVM_MIPS_DYN_TRANS + /* Replace the CACHE instruction, with a SYNCI, not the same, but avoids a trap */ + kvm_mips_trans_cache_va(inst, opc, vcpu); +#endif + } else if (op == MIPS_CACHE_OP_HIT_INV && cache == MIPS_CACHE_ICACHE) { + flush_dcache_line(va); + flush_icache_line(va); + +#ifdef CONFIG_KVM_MIPS_DYN_TRANS + /* Replace the CACHE instruction, with a SYNCI */ + kvm_mips_trans_cache_va(inst, opc, vcpu); +#endif + } else { + printk + ("NO-OP CACHE (cache: %#x, op: %#x, base[%d]: %#lx, offset: %#x\n", + cache, op, base, arch->gprs[base], offset); + er = EMULATE_FAIL; + preempt_enable(); + goto dont_update_pc; + } + + preempt_enable(); + + dont_update_pc: + /* + * Rollback PC + */ + vcpu->arch.pc = curr_pc; + done: + return er; +} + +enum emulation_result +kvm_mips_emulate_inst(unsigned long cause, uint32_t *opc, + struct kvm_run *run, struct kvm_vcpu *vcpu) +{ + enum emulation_result er = EMULATE_DONE; + uint32_t inst; + + /* + * Fetch the instruction. + */ + if (cause & CAUSEF_BD) { + opc += 1; + } + + inst = kvm_get_inst(opc, vcpu); + + switch (((union mips_instruction)inst).r_format.opcode) { + case cop0_op: + er = kvm_mips_emulate_CP0(inst, opc, cause, run, vcpu); + break; + case sb_op: + case sh_op: + case sw_op: + er = kvm_mips_emulate_store(inst, cause, run, vcpu); + break; + case lb_op: + case lbu_op: + case lhu_op: + case lh_op: + case lw_op: + er = kvm_mips_emulate_load(inst, cause, run, vcpu); + break; + + case cache_op: + ++vcpu->stat.cache_exits; + trace_kvm_exit(vcpu, CACHE_EXITS); + er = kvm_mips_emulate_cache(inst, opc, cause, run, vcpu); + break; + + default: + printk("Instruction emulation not supported (%p/%#x)\n", opc, + inst); + kvm_arch_vcpu_dump_regs(vcpu); + er = EMULATE_FAIL; + break; + } + + return er; +} + +enum emulation_result +kvm_mips_emulate_syscall(unsigned long cause, uint32_t *opc, + struct kvm_run *run, struct kvm_vcpu *vcpu) +{ + struct mips_coproc *cop0 = vcpu->arch.cop0; + struct kvm_vcpu_arch *arch = &vcpu->arch; + enum emulation_result er = EMULATE_DONE; + + if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) { + /* save old pc */ + kvm_write_c0_guest_epc(cop0, arch->pc); + kvm_set_c0_guest_status(cop0, ST0_EXL); + + if (cause & CAUSEF_BD) + kvm_set_c0_guest_cause(cop0, CAUSEF_BD); + else + kvm_clear_c0_guest_cause(cop0, CAUSEF_BD); + + kvm_debug("Delivering SYSCALL @ pc %#lx\n", arch->pc); + + kvm_change_c0_guest_cause(cop0, (0xff), + (T_SYSCALL << CAUSEB_EXCCODE)); + + /* Set PC to the exception entry point */ + arch->pc = KVM_GUEST_KSEG0 + 0x180; + + } else { + printk("Trying to deliver SYSCALL when EXL is already set\n"); + er = EMULATE_FAIL; + } + + return er; +} + +enum emulation_result +kvm_mips_emulate_tlbmiss_ld(unsigned long cause, uint32_t *opc, + struct kvm_run *run, struct kvm_vcpu *vcpu) +{ + struct mips_coproc *cop0 = vcpu->arch.cop0; + struct kvm_vcpu_arch *arch = &vcpu->arch; + enum emulation_result er = EMULATE_DONE; + unsigned long entryhi = (vcpu->arch. host_cp0_badvaddr & VPN2_MASK) | + (kvm_read_c0_guest_entryhi(cop0) & ASID_MASK); + + if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) { + /* save old pc */ + kvm_write_c0_guest_epc(cop0, arch->pc); + kvm_set_c0_guest_status(cop0, ST0_EXL); + + if (cause & CAUSEF_BD) + kvm_set_c0_guest_cause(cop0, CAUSEF_BD); + else + kvm_clear_c0_guest_cause(cop0, CAUSEF_BD); + + kvm_debug("[EXL == 0] delivering TLB MISS @ pc %#lx\n", + arch->pc); + + /* set pc to the exception entry point */ + arch->pc = KVM_GUEST_KSEG0 + 0x0; + + } else { + kvm_debug("[EXL == 1] delivering TLB MISS @ pc %#lx\n", + arch->pc); + + arch->pc = KVM_GUEST_KSEG0 + 0x180; + } + + kvm_change_c0_guest_cause(cop0, (0xff), + (T_TLB_LD_MISS << CAUSEB_EXCCODE)); + + /* setup badvaddr, context and entryhi registers for the guest */ + kvm_write_c0_guest_badvaddr(cop0, vcpu->arch.host_cp0_badvaddr); + /* XXXKYMA: is the context register used by linux??? */ + kvm_write_c0_guest_entryhi(cop0, entryhi); + /* Blow away the shadow host TLBs */ + kvm_mips_flush_host_tlb(1); + + return er; +} + +enum emulation_result +kvm_mips_emulate_tlbinv_ld(unsigned long cause, uint32_t *opc, + struct kvm_run *run, struct kvm_vcpu *vcpu) +{ + struct mips_coproc *cop0 = vcpu->arch.cop0; + struct kvm_vcpu_arch *arch = &vcpu->arch; + enum emulation_result er = EMULATE_DONE; + unsigned long entryhi = + (vcpu->arch.host_cp0_badvaddr & VPN2_MASK) | + (kvm_read_c0_guest_entryhi(cop0) & ASID_MASK); + + if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) { + /* save old pc */ + kvm_write_c0_guest_epc(cop0, arch->pc); + kvm_set_c0_guest_status(cop0, ST0_EXL); + + if (cause & CAUSEF_BD) + kvm_set_c0_guest_cause(cop0, CAUSEF_BD); + else + kvm_clear_c0_guest_cause(cop0, CAUSEF_BD); + + kvm_debug("[EXL == 0] delivering TLB INV @ pc %#lx\n", + arch->pc); + + /* set pc to the exception entry point */ + arch->pc = KVM_GUEST_KSEG0 + 0x180; + + } else { + kvm_debug("[EXL == 1] delivering TLB MISS @ pc %#lx\n", + arch->pc); + arch->pc = KVM_GUEST_KSEG0 + 0x180; + } + + kvm_change_c0_guest_cause(cop0, (0xff), + (T_TLB_LD_MISS << CAUSEB_EXCCODE)); + + /* setup badvaddr, context and entryhi registers for the guest */ + kvm_write_c0_guest_badvaddr(cop0, vcpu->arch.host_cp0_badvaddr); + /* XXXKYMA: is the context register used by linux??? */ + kvm_write_c0_guest_entryhi(cop0, entryhi); + /* Blow away the shadow host TLBs */ + kvm_mips_flush_host_tlb(1); + + return er; +} + +enum emulation_result +kvm_mips_emulate_tlbmiss_st(unsigned long cause, uint32_t *opc, + struct kvm_run *run, struct kvm_vcpu *vcpu) +{ + struct mips_coproc *cop0 = vcpu->arch.cop0; + struct kvm_vcpu_arch *arch = &vcpu->arch; + enum emulation_result er = EMULATE_DONE; + unsigned long entryhi = (vcpu->arch.host_cp0_badvaddr & VPN2_MASK) | + (kvm_read_c0_guest_entryhi(cop0) & ASID_MASK); + + if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) { + /* save old pc */ + kvm_write_c0_guest_epc(cop0, arch->pc); + kvm_set_c0_guest_status(cop0, ST0_EXL); + + if (cause & CAUSEF_BD) + kvm_set_c0_guest_cause(cop0, CAUSEF_BD); + else + kvm_clear_c0_guest_cause(cop0, CAUSEF_BD); + + kvm_debug("[EXL == 0] Delivering TLB MISS @ pc %#lx\n", + arch->pc); + + /* Set PC to the exception entry point */ + arch->pc = KVM_GUEST_KSEG0 + 0x0; + } else { + kvm_debug("[EXL == 1] Delivering TLB MISS @ pc %#lx\n", + arch->pc); + arch->pc = KVM_GUEST_KSEG0 + 0x180; + } + + kvm_change_c0_guest_cause(cop0, (0xff), + (T_TLB_ST_MISS << CAUSEB_EXCCODE)); + + /* setup badvaddr, context and entryhi registers for the guest */ + kvm_write_c0_guest_badvaddr(cop0, vcpu->arch.host_cp0_badvaddr); + /* XXXKYMA: is the context register used by linux??? */ + kvm_write_c0_guest_entryhi(cop0, entryhi); + /* Blow away the shadow host TLBs */ + kvm_mips_flush_host_tlb(1); + + return er; +} + +enum emulation_result +kvm_mips_emulate_tlbinv_st(unsigned long cause, uint32_t *opc, + struct kvm_run *run, struct kvm_vcpu *vcpu) +{ + struct mips_coproc *cop0 = vcpu->arch.cop0; + struct kvm_vcpu_arch *arch = &vcpu->arch; + enum emulation_result er = EMULATE_DONE; + unsigned long entryhi = (vcpu->arch.host_cp0_badvaddr & VPN2_MASK) | + (kvm_read_c0_guest_entryhi(cop0) & ASID_MASK); + + if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) { + /* save old pc */ + kvm_write_c0_guest_epc(cop0, arch->pc); + kvm_set_c0_guest_status(cop0, ST0_EXL); + + if (cause & CAUSEF_BD) + kvm_set_c0_guest_cause(cop0, CAUSEF_BD); + else + kvm_clear_c0_guest_cause(cop0, CAUSEF_BD); + + kvm_debug("[EXL == 0] Delivering TLB MISS @ pc %#lx\n", + arch->pc); + + /* Set PC to the exception entry point */ + arch->pc = KVM_GUEST_KSEG0 + 0x180; + } else { + kvm_debug("[EXL == 1] Delivering TLB MISS @ pc %#lx\n", + arch->pc); + arch->pc = KVM_GUEST_KSEG0 + 0x180; + } + + kvm_change_c0_guest_cause(cop0, (0xff), + (T_TLB_ST_MISS << CAUSEB_EXCCODE)); + + /* setup badvaddr, context and entryhi registers for the guest */ + kvm_write_c0_guest_badvaddr(cop0, vcpu->arch.host_cp0_badvaddr); + /* XXXKYMA: is the context register used by linux??? */ + kvm_write_c0_guest_entryhi(cop0, entryhi); + /* Blow away the shadow host TLBs */ + kvm_mips_flush_host_tlb(1); + + return er; +} + +/* TLBMOD: store into address matching TLB with Dirty bit off */ +enum emulation_result +kvm_mips_handle_tlbmod(unsigned long cause, uint32_t *opc, + struct kvm_run *run, struct kvm_vcpu *vcpu) +{ + enum emulation_result er = EMULATE_DONE; + +#ifdef DEBUG + /* + * If address not in the guest TLB, then we are in trouble + */ + index = kvm_mips_guest_tlb_lookup(vcpu, entryhi); + if (index < 0) { + /* XXXKYMA Invalidate and retry */ + kvm_mips_host_tlb_inv(vcpu, vcpu->arch.host_cp0_badvaddr); + kvm_err("%s: host got TLBMOD for %#lx but entry not present in Guest TLB\n", + __func__, entryhi); + kvm_mips_dump_guest_tlbs(vcpu); + kvm_mips_dump_host_tlbs(); + return EMULATE_FAIL; + } +#endif + + er = kvm_mips_emulate_tlbmod(cause, opc, run, vcpu); + return er; +} + +enum emulation_result +kvm_mips_emulate_tlbmod(unsigned long cause, uint32_t *opc, + struct kvm_run *run, struct kvm_vcpu *vcpu) +{ + struct mips_coproc *cop0 = vcpu->arch.cop0; + unsigned long entryhi = (vcpu->arch.host_cp0_badvaddr & VPN2_MASK) | + (kvm_read_c0_guest_entryhi(cop0) & ASID_MASK); + struct kvm_vcpu_arch *arch = &vcpu->arch; + enum emulation_result er = EMULATE_DONE; + + if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) { + /* save old pc */ + kvm_write_c0_guest_epc(cop0, arch->pc); + kvm_set_c0_guest_status(cop0, ST0_EXL); + + if (cause & CAUSEF_BD) + kvm_set_c0_guest_cause(cop0, CAUSEF_BD); + else + kvm_clear_c0_guest_cause(cop0, CAUSEF_BD); + + kvm_debug("[EXL == 0] Delivering TLB MOD @ pc %#lx\n", + arch->pc); + + arch->pc = KVM_GUEST_KSEG0 + 0x180; + } else { + kvm_debug("[EXL == 1] Delivering TLB MOD @ pc %#lx\n", + arch->pc); + arch->pc = KVM_GUEST_KSEG0 + 0x180; + } + + kvm_change_c0_guest_cause(cop0, (0xff), (T_TLB_MOD << CAUSEB_EXCCODE)); + + /* setup badvaddr, context and entryhi registers for the guest */ + kvm_write_c0_guest_badvaddr(cop0, vcpu->arch.host_cp0_badvaddr); + /* XXXKYMA: is the context register used by linux??? */ + kvm_write_c0_guest_entryhi(cop0, entryhi); + /* Blow away the shadow host TLBs */ + kvm_mips_flush_host_tlb(1); + + return er; +} + +enum emulation_result +kvm_mips_emulate_fpu_exc(unsigned long cause, uint32_t *opc, + struct kvm_run *run, struct kvm_vcpu *vcpu) +{ + struct mips_coproc *cop0 = vcpu->arch.cop0; + struct kvm_vcpu_arch *arch = &vcpu->arch; + enum emulation_result er = EMULATE_DONE; + + if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) { + /* save old pc */ + kvm_write_c0_guest_epc(cop0, arch->pc); + kvm_set_c0_guest_status(cop0, ST0_EXL); + + if (cause & CAUSEF_BD) + kvm_set_c0_guest_cause(cop0, CAUSEF_BD); + else + kvm_clear_c0_guest_cause(cop0, CAUSEF_BD); + + } + + arch->pc = KVM_GUEST_KSEG0 + 0x180; + + kvm_change_c0_guest_cause(cop0, (0xff), + (T_COP_UNUSABLE << CAUSEB_EXCCODE)); + kvm_change_c0_guest_cause(cop0, (CAUSEF_CE), (0x1 << CAUSEB_CE)); + + return er; +} + +enum emulation_result +kvm_mips_emulate_ri_exc(unsigned long cause, uint32_t *opc, + struct kvm_run *run, struct kvm_vcpu *vcpu) +{ + struct mips_coproc *cop0 = vcpu->arch.cop0; + struct kvm_vcpu_arch *arch = &vcpu->arch; + enum emulation_result er = EMULATE_DONE; + + if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) { + /* save old pc */ + kvm_write_c0_guest_epc(cop0, arch->pc); + kvm_set_c0_guest_status(cop0, ST0_EXL); + + if (cause & CAUSEF_BD) + kvm_set_c0_guest_cause(cop0, CAUSEF_BD); + else + kvm_clear_c0_guest_cause(cop0, CAUSEF_BD); + + kvm_debug("Delivering RI @ pc %#lx\n", arch->pc); + + kvm_change_c0_guest_cause(cop0, (0xff), + (T_RES_INST << CAUSEB_EXCCODE)); + + /* Set PC to the exception entry point */ + arch->pc = KVM_GUEST_KSEG0 + 0x180; + + } else { + kvm_err("Trying to deliver RI when EXL is already set\n"); + er = EMULATE_FAIL; + } + + return er; +} + +enum emulation_result +kvm_mips_emulate_bp_exc(unsigned long cause, uint32_t *opc, + struct kvm_run *run, struct kvm_vcpu *vcpu) +{ + struct mips_coproc *cop0 = vcpu->arch.cop0; + struct kvm_vcpu_arch *arch = &vcpu->arch; + enum emulation_result er = EMULATE_DONE; + + if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) { + /* save old pc */ + kvm_write_c0_guest_epc(cop0, arch->pc); + kvm_set_c0_guest_status(cop0, ST0_EXL); + + if (cause & CAUSEF_BD) + kvm_set_c0_guest_cause(cop0, CAUSEF_BD); + else + kvm_clear_c0_guest_cause(cop0, CAUSEF_BD); + + kvm_debug("Delivering BP @ pc %#lx\n", arch->pc); + + kvm_change_c0_guest_cause(cop0, (0xff), + (T_BREAK << CAUSEB_EXCCODE)); + + /* Set PC to the exception entry point */ + arch->pc = KVM_GUEST_KSEG0 + 0x180; + + } else { + printk("Trying to deliver BP when EXL is already set\n"); + er = EMULATE_FAIL; + } + + return er; +} + +/* + * ll/sc, rdhwr, sync emulation + */ + +#define OPCODE 0xfc000000 +#define BASE 0x03e00000 +#define RT 0x001f0000 +#define OFFSET 0x0000ffff +#define LL 0xc0000000 +#define SC 0xe0000000 +#define SPEC0 0x00000000 +#define SPEC3 0x7c000000 +#define RD 0x0000f800 +#define FUNC 0x0000003f +#define SYNC 0x0000000f +#define RDHWR 0x0000003b + +enum emulation_result +kvm_mips_handle_ri(unsigned long cause, uint32_t *opc, + struct kvm_run *run, struct kvm_vcpu *vcpu) +{ + struct mips_coproc *cop0 = vcpu->arch.cop0; + struct kvm_vcpu_arch *arch = &vcpu->arch; + enum emulation_result er = EMULATE_DONE; + unsigned long curr_pc; + uint32_t inst; + + /* + * Update PC and hold onto current PC in case there is + * an error and we want to rollback the PC + */ + curr_pc = vcpu->arch.pc; + er = update_pc(vcpu, cause); + if (er == EMULATE_FAIL) + return er; + + /* + * Fetch the instruction. + */ + if (cause & CAUSEF_BD) + opc += 1; + + inst = kvm_get_inst(opc, vcpu); + + if (inst == KVM_INVALID_INST) { + printk("%s: Cannot get inst @ %p\n", __func__, opc); + return EMULATE_FAIL; + } + + if ((inst & OPCODE) == SPEC3 && (inst & FUNC) == RDHWR) { + int rd = (inst & RD) >> 11; + int rt = (inst & RT) >> 16; + switch (rd) { + case 0: /* CPU number */ + arch->gprs[rt] = 0; + break; + case 1: /* SYNCI length */ + arch->gprs[rt] = min(current_cpu_data.dcache.linesz, + current_cpu_data.icache.linesz); + break; + case 2: /* Read count register */ + printk("RDHWR: Cont register\n"); + arch->gprs[rt] = kvm_read_c0_guest_count(cop0); + break; + case 3: /* Count register resolution */ + switch (current_cpu_data.cputype) { + case CPU_20KC: + case CPU_25KF: + arch->gprs[rt] = 1; + break; + default: + arch->gprs[rt] = 2; + } + break; + case 29: +#if 1 + arch->gprs[rt] = kvm_read_c0_guest_userlocal(cop0); +#else + /* UserLocal not implemented */ + er = kvm_mips_emulate_ri_exc(cause, opc, run, vcpu); +#endif + break; + + default: + printk("RDHWR not supported\n"); + er = EMULATE_FAIL; + break; + } + } else { + printk("Emulate RI not supported @ %p: %#x\n", opc, inst); + er = EMULATE_FAIL; + } + + /* + * Rollback PC only if emulation was unsuccessful + */ + if (er == EMULATE_FAIL) { + vcpu->arch.pc = curr_pc; + } + return er; +} + +enum emulation_result +kvm_mips_complete_mmio_load(struct kvm_vcpu *vcpu, struct kvm_run *run) +{ + unsigned long *gpr = &vcpu->arch.gprs[vcpu->arch.io_gpr]; + enum emulation_result er = EMULATE_DONE; + unsigned long curr_pc; + + if (run->mmio.len > sizeof(*gpr)) { + printk("Bad MMIO length: %d", run->mmio.len); + er = EMULATE_FAIL; + goto done; + } + + /* + * Update PC and hold onto current PC in case there is + * an error and we want to rollback the PC + */ + curr_pc = vcpu->arch.pc; + er = update_pc(vcpu, vcpu->arch.pending_load_cause); + if (er == EMULATE_FAIL) + return er; + + switch (run->mmio.len) { + case 4: + *gpr = *(int32_t *) run->mmio.data; + break; + + case 2: + if (vcpu->mmio_needed == 2) + *gpr = *(int16_t *) run->mmio.data; + else + *gpr = *(int16_t *) run->mmio.data; + + break; + case 1: + if (vcpu->mmio_needed == 2) + *gpr = *(int8_t *) run->mmio.data; + else + *gpr = *(u8 *) run->mmio.data; + break; + } + + if (vcpu->arch.pending_load_cause & CAUSEF_BD) + kvm_debug + ("[%#lx] Completing %d byte BD Load to gpr %d (0x%08lx) type %d\n", + vcpu->arch.pc, run->mmio.len, vcpu->arch.io_gpr, *gpr, + vcpu->mmio_needed); + +done: + return er; +} + +static enum emulation_result +kvm_mips_emulate_exc(unsigned long cause, uint32_t *opc, + struct kvm_run *run, struct kvm_vcpu *vcpu) +{ + uint32_t exccode = (cause >> CAUSEB_EXCCODE) & 0x1f; + struct mips_coproc *cop0 = vcpu->arch.cop0; + struct kvm_vcpu_arch *arch = &vcpu->arch; + enum emulation_result er = EMULATE_DONE; + + if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) { + /* save old pc */ + kvm_write_c0_guest_epc(cop0, arch->pc); + kvm_set_c0_guest_status(cop0, ST0_EXL); + + if (cause & CAUSEF_BD) + kvm_set_c0_guest_cause(cop0, CAUSEF_BD); + else + kvm_clear_c0_guest_cause(cop0, CAUSEF_BD); + + kvm_change_c0_guest_cause(cop0, (0xff), + (exccode << CAUSEB_EXCCODE)); + + /* Set PC to the exception entry point */ + arch->pc = KVM_GUEST_KSEG0 + 0x180; + kvm_write_c0_guest_badvaddr(cop0, vcpu->arch.host_cp0_badvaddr); + + kvm_debug("Delivering EXC %d @ pc %#lx, badVaddr: %#lx\n", + exccode, kvm_read_c0_guest_epc(cop0), + kvm_read_c0_guest_badvaddr(cop0)); + } else { + printk("Trying to deliver EXC when EXL is already set\n"); + er = EMULATE_FAIL; + } + + return er; +} + +enum emulation_result +kvm_mips_check_privilege(unsigned long cause, uint32_t *opc, + struct kvm_run *run, struct kvm_vcpu *vcpu) +{ + enum emulation_result er = EMULATE_DONE; + uint32_t exccode = (cause >> CAUSEB_EXCCODE) & 0x1f; + unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr; + + int usermode = !KVM_GUEST_KERNEL_MODE(vcpu); + + if (usermode) { + switch (exccode) { + case T_INT: + case T_SYSCALL: + case T_BREAK: + case T_RES_INST: + break; + + case T_COP_UNUSABLE: + if (((cause & CAUSEF_CE) >> CAUSEB_CE) == 0) + er = EMULATE_PRIV_FAIL; + break; + + case T_TLB_MOD: + break; + + case T_TLB_LD_MISS: + /* We we are accessing Guest kernel space, then send an address error exception to the guest */ + if (badvaddr >= (unsigned long) KVM_GUEST_KSEG0) { + printk("%s: LD MISS @ %#lx\n", __func__, + badvaddr); + cause &= ~0xff; + cause |= (T_ADDR_ERR_LD << CAUSEB_EXCCODE); + er = EMULATE_PRIV_FAIL; + } + break; + + case T_TLB_ST_MISS: + /* We we are accessing Guest kernel space, then send an address error exception to the guest */ + if (badvaddr >= (unsigned long) KVM_GUEST_KSEG0) { + printk("%s: ST MISS @ %#lx\n", __func__, + badvaddr); + cause &= ~0xff; + cause |= (T_ADDR_ERR_ST << CAUSEB_EXCCODE); + er = EMULATE_PRIV_FAIL; + } + break; + + case T_ADDR_ERR_ST: + printk("%s: address error ST @ %#lx\n", __func__, + badvaddr); + if ((badvaddr & PAGE_MASK) == KVM_GUEST_COMMPAGE_ADDR) { + cause &= ~0xff; + cause |= (T_TLB_ST_MISS << CAUSEB_EXCCODE); + } + er = EMULATE_PRIV_FAIL; + break; + case T_ADDR_ERR_LD: + printk("%s: address error LD @ %#lx\n", __func__, + badvaddr); + if ((badvaddr & PAGE_MASK) == KVM_GUEST_COMMPAGE_ADDR) { + cause &= ~0xff; + cause |= (T_TLB_LD_MISS << CAUSEB_EXCCODE); + } + er = EMULATE_PRIV_FAIL; + break; + default: + er = EMULATE_PRIV_FAIL; + break; + } + } + + if (er == EMULATE_PRIV_FAIL) { + kvm_mips_emulate_exc(cause, opc, run, vcpu); + } + return er; +} + +/* User Address (UA) fault, this could happen if + * (1) TLB entry not present/valid in both Guest and shadow host TLBs, in this + * case we pass on the fault to the guest kernel and let it handle it. + * (2) TLB entry is present in the Guest TLB but not in the shadow, in this + * case we inject the TLB from the Guest TLB into the shadow host TLB + */ +enum emulation_result +kvm_mips_handle_tlbmiss(unsigned long cause, uint32_t *opc, + struct kvm_run *run, struct kvm_vcpu *vcpu) +{ + enum emulation_result er = EMULATE_DONE; + uint32_t exccode = (cause >> CAUSEB_EXCCODE) & 0x1f; + unsigned long va = vcpu->arch.host_cp0_badvaddr; + int index; + + kvm_debug("kvm_mips_handle_tlbmiss: badvaddr: %#lx, entryhi: %#lx\n", + vcpu->arch.host_cp0_badvaddr, vcpu->arch.host_cp0_entryhi); + + /* KVM would not have got the exception if this entry was valid in the shadow host TLB + * Check the Guest TLB, if the entry is not there then send the guest an + * exception. The guest exc handler should then inject an entry into the + * guest TLB + */ + index = kvm_mips_guest_tlb_lookup(vcpu, + (va & VPN2_MASK) | + (kvm_read_c0_guest_entryhi + (vcpu->arch.cop0) & ASID_MASK)); + if (index < 0) { + if (exccode == T_TLB_LD_MISS) { + er = kvm_mips_emulate_tlbmiss_ld(cause, opc, run, vcpu); + } else if (exccode == T_TLB_ST_MISS) { + er = kvm_mips_emulate_tlbmiss_st(cause, opc, run, vcpu); + } else { + printk("%s: invalid exc code: %d\n", __func__, exccode); + er = EMULATE_FAIL; + } + } else { + struct kvm_mips_tlb *tlb = &vcpu->arch.guest_tlb[index]; + + /* Check if the entry is valid, if not then setup a TLB invalid exception to the guest */ + if (!TLB_IS_VALID(*tlb, va)) { + if (exccode == T_TLB_LD_MISS) { + er = kvm_mips_emulate_tlbinv_ld(cause, opc, run, + vcpu); + } else if (exccode == T_TLB_ST_MISS) { + er = kvm_mips_emulate_tlbinv_st(cause, opc, run, + vcpu); + } else { + printk("%s: invalid exc code: %d\n", __func__, + exccode); + er = EMULATE_FAIL; + } + } else { +#ifdef DEBUG + kvm_debug + ("Injecting hi: %#lx, lo0: %#lx, lo1: %#lx into shadow host TLB\n", + tlb->tlb_hi, tlb->tlb_lo0, tlb->tlb_lo1); +#endif + /* OK we have a Guest TLB entry, now inject it into the shadow host TLB */ + kvm_mips_handle_mapped_seg_tlb_fault(vcpu, tlb, NULL, + NULL); + } + } + + return er; +} diff --git a/trunk/arch/mips/kvm/kvm_mips_int.c b/trunk/arch/mips/kvm/kvm_mips_int.c new file mode 100644 index 000000000000..1e5de16afe29 --- /dev/null +++ b/trunk/arch/mips/kvm/kvm_mips_int.c @@ -0,0 +1,243 @@ +/* +* This file is subject to the terms and conditions of the GNU General Public +* License. See the file "COPYING" in the main directory of this archive +* for more details. +* +* KVM/MIPS: Interrupt delivery +* +* Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved. +* Authors: Sanjay Lal +*/ + +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#include "kvm_mips_int.h" + +void kvm_mips_queue_irq(struct kvm_vcpu *vcpu, uint32_t priority) +{ + set_bit(priority, &vcpu->arch.pending_exceptions); +} + +void kvm_mips_dequeue_irq(struct kvm_vcpu *vcpu, uint32_t priority) +{ + clear_bit(priority, &vcpu->arch.pending_exceptions); +} + +void kvm_mips_queue_timer_int_cb(struct kvm_vcpu *vcpu) +{ + /* Cause bits to reflect the pending timer interrupt, + * the EXC code will be set when we are actually + * delivering the interrupt: + */ + kvm_set_c0_guest_cause(vcpu->arch.cop0, (C_IRQ5 | C_TI)); + + /* Queue up an INT exception for the core */ + kvm_mips_queue_irq(vcpu, MIPS_EXC_INT_TIMER); + +} + +void kvm_mips_dequeue_timer_int_cb(struct kvm_vcpu *vcpu) +{ + kvm_clear_c0_guest_cause(vcpu->arch.cop0, (C_IRQ5 | C_TI)); + kvm_mips_dequeue_irq(vcpu, MIPS_EXC_INT_TIMER); +} + +void +kvm_mips_queue_io_int_cb(struct kvm_vcpu *vcpu, struct kvm_mips_interrupt *irq) +{ + int intr = (int)irq->irq; + + /* Cause bits to reflect the pending IO interrupt, + * the EXC code will be set when we are actually + * delivering the interrupt: + */ + switch (intr) { + case 2: + kvm_set_c0_guest_cause(vcpu->arch.cop0, (C_IRQ0)); + /* Queue up an INT exception for the core */ + kvm_mips_queue_irq(vcpu, MIPS_EXC_INT_IO); + break; + + case 3: + kvm_set_c0_guest_cause(vcpu->arch.cop0, (C_IRQ1)); + kvm_mips_queue_irq(vcpu, MIPS_EXC_INT_IPI_1); + break; + + case 4: + kvm_set_c0_guest_cause(vcpu->arch.cop0, (C_IRQ2)); + kvm_mips_queue_irq(vcpu, MIPS_EXC_INT_IPI_2); + break; + + default: + break; + } + +} + +void +kvm_mips_dequeue_io_int_cb(struct kvm_vcpu *vcpu, + struct kvm_mips_interrupt *irq) +{ + int intr = (int)irq->irq; + switch (intr) { + case -2: + kvm_clear_c0_guest_cause(vcpu->arch.cop0, (C_IRQ0)); + kvm_mips_dequeue_irq(vcpu, MIPS_EXC_INT_IO); + break; + + case -3: + kvm_clear_c0_guest_cause(vcpu->arch.cop0, (C_IRQ1)); + kvm_mips_dequeue_irq(vcpu, MIPS_EXC_INT_IPI_1); + break; + + case -4: + kvm_clear_c0_guest_cause(vcpu->arch.cop0, (C_IRQ2)); + kvm_mips_dequeue_irq(vcpu, MIPS_EXC_INT_IPI_2); + break; + + default: + break; + } + +} + +/* Deliver the interrupt of the corresponding priority, if possible. */ +int +kvm_mips_irq_deliver_cb(struct kvm_vcpu *vcpu, unsigned int priority, + uint32_t cause) +{ + int allowed = 0; + uint32_t exccode; + + struct kvm_vcpu_arch *arch = &vcpu->arch; + struct mips_coproc *cop0 = vcpu->arch.cop0; + + switch (priority) { + case MIPS_EXC_INT_TIMER: + if ((kvm_read_c0_guest_status(cop0) & ST0_IE) + && (!(kvm_read_c0_guest_status(cop0) & (ST0_EXL | ST0_ERL))) + && (kvm_read_c0_guest_status(cop0) & IE_IRQ5)) { + allowed = 1; + exccode = T_INT; + } + break; + + case MIPS_EXC_INT_IO: + if ((kvm_read_c0_guest_status(cop0) & ST0_IE) + && (!(kvm_read_c0_guest_status(cop0) & (ST0_EXL | ST0_ERL))) + && (kvm_read_c0_guest_status(cop0) & IE_IRQ0)) { + allowed = 1; + exccode = T_INT; + } + break; + + case MIPS_EXC_INT_IPI_1: + if ((kvm_read_c0_guest_status(cop0) & ST0_IE) + && (!(kvm_read_c0_guest_status(cop0) & (ST0_EXL | ST0_ERL))) + && (kvm_read_c0_guest_status(cop0) & IE_IRQ1)) { + allowed = 1; + exccode = T_INT; + } + break; + + case MIPS_EXC_INT_IPI_2: + if ((kvm_read_c0_guest_status(cop0) & ST0_IE) + && (!(kvm_read_c0_guest_status(cop0) & (ST0_EXL | ST0_ERL))) + && (kvm_read_c0_guest_status(cop0) & IE_IRQ2)) { + allowed = 1; + exccode = T_INT; + } + break; + + default: + break; + } + + /* Are we allowed to deliver the interrupt ??? */ + if (allowed) { + + if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) { + /* save old pc */ + kvm_write_c0_guest_epc(cop0, arch->pc); + kvm_set_c0_guest_status(cop0, ST0_EXL); + + if (cause & CAUSEF_BD) + kvm_set_c0_guest_cause(cop0, CAUSEF_BD); + else + kvm_clear_c0_guest_cause(cop0, CAUSEF_BD); + + kvm_debug("Delivering INT @ pc %#lx\n", arch->pc); + + } else + kvm_err("Trying to deliver interrupt when EXL is already set\n"); + + kvm_change_c0_guest_cause(cop0, CAUSEF_EXCCODE, + (exccode << CAUSEB_EXCCODE)); + + /* XXXSL Set PC to the interrupt exception entry point */ + if (kvm_read_c0_guest_cause(cop0) & CAUSEF_IV) + arch->pc = KVM_GUEST_KSEG0 + 0x200; + else + arch->pc = KVM_GUEST_KSEG0 + 0x180; + + clear_bit(priority, &vcpu->arch.pending_exceptions); + } + + return allowed; +} + +int +kvm_mips_irq_clear_cb(struct kvm_vcpu *vcpu, unsigned int priority, + uint32_t cause) +{ + return 1; +} + +void kvm_mips_deliver_interrupts(struct kvm_vcpu *vcpu, uint32_t cause) +{ + unsigned long *pending = &vcpu->arch.pending_exceptions; + unsigned long *pending_clr = &vcpu->arch.pending_exceptions_clr; + unsigned int priority; + + if (!(*pending) && !(*pending_clr)) + return; + + priority = __ffs(*pending_clr); + while (priority <= MIPS_EXC_MAX) { + if (kvm_mips_callbacks->irq_clear(vcpu, priority, cause)) { + if (!KVM_MIPS_IRQ_CLEAR_ALL_AT_ONCE) + break; + } + + priority = find_next_bit(pending_clr, + BITS_PER_BYTE * sizeof(*pending_clr), + priority + 1); + } + + priority = __ffs(*pending); + while (priority <= MIPS_EXC_MAX) { + if (kvm_mips_callbacks->irq_deliver(vcpu, priority, cause)) { + if (!KVM_MIPS_IRQ_DELIVER_ALL_AT_ONCE) + break; + } + + priority = find_next_bit(pending, + BITS_PER_BYTE * sizeof(*pending), + priority + 1); + } + +} + +int kvm_mips_pending_timer(struct kvm_vcpu *vcpu) +{ + return test_bit(MIPS_EXC_INT_TIMER, &vcpu->arch.pending_exceptions); +} diff --git a/trunk/arch/mips/kvm/kvm_mips_int.h b/trunk/arch/mips/kvm/kvm_mips_int.h new file mode 100644 index 000000000000..20da7d29eede --- /dev/null +++ b/trunk/arch/mips/kvm/kvm_mips_int.h @@ -0,0 +1,49 @@ +/* +* This file is subject to the terms and conditions of the GNU General Public +* License. See the file "COPYING" in the main directory of this archive +* for more details. +* +* KVM/MIPS: Interrupts +* Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved. +* Authors: Sanjay Lal +*/ + +/* MIPS Exception Priorities, exceptions (including interrupts) are queued up + * for the guest in the order specified by their priorities + */ + +#define MIPS_EXC_RESET 0 +#define MIPS_EXC_SRESET 1 +#define MIPS_EXC_DEBUG_ST 2 +#define MIPS_EXC_DEBUG 3 +#define MIPS_EXC_DDB 4 +#define MIPS_EXC_NMI 5 +#define MIPS_EXC_MCHK 6 +#define MIPS_EXC_INT_TIMER 7 +#define MIPS_EXC_INT_IO 8 +#define MIPS_EXC_EXECUTE 9 +#define MIPS_EXC_INT_IPI_1 10 +#define MIPS_EXC_INT_IPI_2 11 +#define MIPS_EXC_MAX 12 +/* XXXSL More to follow */ + +#define C_TI (_ULCAST_(1) << 30) + +#define KVM_MIPS_IRQ_DELIVER_ALL_AT_ONCE (0) +#define KVM_MIPS_IRQ_CLEAR_ALL_AT_ONCE (0) + +void kvm_mips_queue_irq(struct kvm_vcpu *vcpu, uint32_t priority); +void kvm_mips_dequeue_irq(struct kvm_vcpu *vcpu, uint32_t priority); +int kvm_mips_pending_timer(struct kvm_vcpu *vcpu); + +void kvm_mips_queue_timer_int_cb(struct kvm_vcpu *vcpu); +void kvm_mips_dequeue_timer_int_cb(struct kvm_vcpu *vcpu); +void kvm_mips_queue_io_int_cb(struct kvm_vcpu *vcpu, + struct kvm_mips_interrupt *irq); +void kvm_mips_dequeue_io_int_cb(struct kvm_vcpu *vcpu, + struct kvm_mips_interrupt *irq); +int kvm_mips_irq_deliver_cb(struct kvm_vcpu *vcpu, unsigned int priority, + uint32_t cause); +int kvm_mips_irq_clear_cb(struct kvm_vcpu *vcpu, unsigned int priority, + uint32_t cause); +void kvm_mips_deliver_interrupts(struct kvm_vcpu *vcpu, uint32_t cause); diff --git a/trunk/arch/mips/kvm/kvm_mips_opcode.h b/trunk/arch/mips/kvm/kvm_mips_opcode.h new file mode 100644 index 000000000000..86d3b4cc348b --- /dev/null +++ b/trunk/arch/mips/kvm/kvm_mips_opcode.h @@ -0,0 +1,24 @@ +/* +* This file is subject to the terms and conditions of the GNU General Public +* License. See the file "COPYING" in the main directory of this archive +* for more details. +* +* Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved. +* Authors: Sanjay Lal +*/ + +/* + * Define opcode values not defined in + */ + +#ifndef __KVM_MIPS_OPCODE_H__ +#define __KVM_MIPS_OPCODE_H__ + +/* COP0 Ops */ +#define mfmcz_op 0x0b /* 01011 */ +#define wrpgpr_op 0x0e /* 01110 */ + +/* COP0 opcodes (only if COP0 and CO=1): */ +#define wait_op 0x20 /* 100000 */ + +#endif /* __KVM_MIPS_OPCODE_H__ */ diff --git a/trunk/arch/mips/kvm/kvm_mips_stats.c b/trunk/arch/mips/kvm/kvm_mips_stats.c new file mode 100644 index 000000000000..075904bcac1b --- /dev/null +++ b/trunk/arch/mips/kvm/kvm_mips_stats.c @@ -0,0 +1,82 @@ +/* +* This file is subject to the terms and conditions of the GNU General Public +* License. See the file "COPYING" in the main directory of this archive +* for more details. +* +* KVM/MIPS: COP0 access histogram +* +* Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved. +* Authors: Sanjay Lal +*/ + +#include + +char *kvm_mips_exit_types_str[MAX_KVM_MIPS_EXIT_TYPES] = { + "WAIT", + "CACHE", + "Signal", + "Interrupt", + "COP0/1 Unusable", + "TLB Mod", + "TLB Miss (LD)", + "TLB Miss (ST)", + "Address Err (ST)", + "Address Error (LD)", + "System Call", + "Reserved Inst", + "Break Inst", + "D-Cache Flushes", +}; + +char *kvm_cop0_str[N_MIPS_COPROC_REGS] = { + "Index", + "Random", + "EntryLo0", + "EntryLo1", + "Context", + "PG Mask", + "Wired", + "HWREna", + "BadVAddr", + "Count", + "EntryHI", + "Compare", + "Status", + "Cause", + "EXC PC", + "PRID", + "Config", + "LLAddr", + "Watch Lo", + "Watch Hi", + "X Context", + "Reserved", + "Impl Dep", + "Debug", + "DEPC", + "PerfCnt", + "ErrCtl", + "CacheErr", + "TagLo", + "TagHi", + "ErrorEPC", + "DESAVE" +}; + +int kvm_mips_dump_stats(struct kvm_vcpu *vcpu) +{ +#ifdef CONFIG_KVM_MIPS_DEBUG_COP0_COUNTERS + int i, j; + + printk("\nKVM VCPU[%d] COP0 Access Profile:\n", vcpu->vcpu_id); + for (i = 0; i < N_MIPS_COPROC_REGS; i++) { + for (j = 0; j < N_MIPS_COPROC_SEL; j++) { + if (vcpu->arch.cop0->stat[i][j]) + printk("%s[%d]: %lu\n", kvm_cop0_str[i], j, + vcpu->arch.cop0->stat[i][j]); + } + } +#endif + + return 0; +} diff --git a/trunk/arch/mips/kvm/kvm_tlb.c b/trunk/arch/mips/kvm/kvm_tlb.c new file mode 100644 index 000000000000..c777dd36d4a8 --- /dev/null +++ b/trunk/arch/mips/kvm/kvm_tlb.c @@ -0,0 +1,949 @@ +/* +* This file is subject to the terms and conditions of the GNU General Public +* License. See the file "COPYING" in the main directory of this archive +* for more details. +* +* KVM/MIPS TLB handling, this file is part of the Linux host kernel so that +* TLB handlers run from KSEG0 +* +* Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved. +* Authors: Sanjay Lal +*/ + +#include +#include +#include +#include +#include +#include +#include +#include + + +#include +#include +#include +#include +#include + +#undef CONFIG_MIPS_MT +#include +#define CONFIG_MIPS_MT + +#define KVM_GUEST_PC_TLB 0 +#define KVM_GUEST_SP_TLB 1 + +#define PRIx64 "llx" + +/* Use VZ EntryHi.EHINV to invalidate TLB entries */ +#define UNIQUE_ENTRYHI(idx) (CKSEG0 + ((idx) << (PAGE_SHIFT + 1))) + +atomic_t kvm_mips_instance; +EXPORT_SYMBOL(kvm_mips_instance); + +/* These function pointers are initialized once the KVM module is loaded */ +pfn_t(*kvm_mips_gfn_to_pfn) (struct kvm *kvm, gfn_t gfn); +EXPORT_SYMBOL(kvm_mips_gfn_to_pfn); + +void (*kvm_mips_release_pfn_clean) (pfn_t pfn); +EXPORT_SYMBOL(kvm_mips_release_pfn_clean); + +bool(*kvm_mips_is_error_pfn) (pfn_t pfn); +EXPORT_SYMBOL(kvm_mips_is_error_pfn); + +uint32_t kvm_mips_get_kernel_asid(struct kvm_vcpu *vcpu) +{ + return vcpu->arch.guest_kernel_asid[smp_processor_id()] & ASID_MASK; +} + + +uint32_t kvm_mips_get_user_asid(struct kvm_vcpu *vcpu) +{ + return vcpu->arch.guest_user_asid[smp_processor_id()] & ASID_MASK; +} + +inline uint32_t kvm_mips_get_commpage_asid (struct kvm_vcpu *vcpu) +{ + return vcpu->kvm->arch.commpage_tlb; +} + + +/* + * Structure defining an tlb entry data set. + */ + +void kvm_mips_dump_host_tlbs(void) +{ + unsigned long old_entryhi; + unsigned long old_pagemask; + struct kvm_mips_tlb tlb; + unsigned long flags; + int i; + + local_irq_save(flags); + + old_entryhi = read_c0_entryhi(); + old_pagemask = read_c0_pagemask(); + + printk("HOST TLBs:\n"); + printk("ASID: %#lx\n", read_c0_entryhi() & ASID_MASK); + + for (i = 0; i < current_cpu_data.tlbsize; i++) { + write_c0_index(i); + mtc0_tlbw_hazard(); + + tlb_read(); + tlbw_use_hazard(); + + tlb.tlb_hi = read_c0_entryhi(); + tlb.tlb_lo0 = read_c0_entrylo0(); + tlb.tlb_lo1 = read_c0_entrylo1(); + tlb.tlb_mask = read_c0_pagemask(); + + printk("TLB%c%3d Hi 0x%08lx ", + (tlb.tlb_lo0 | tlb.tlb_lo1) & MIPS3_PG_V ? ' ' : '*', + i, tlb.tlb_hi); + printk("Lo0=0x%09" PRIx64 " %c%c attr %lx ", + (uint64_t) mips3_tlbpfn_to_paddr(tlb.tlb_lo0), + (tlb.tlb_lo0 & MIPS3_PG_D) ? 'D' : ' ', + (tlb.tlb_lo0 & MIPS3_PG_G) ? 'G' : ' ', + (tlb.tlb_lo0 >> 3) & 7); + printk("Lo1=0x%09" PRIx64 " %c%c attr %lx sz=%lx\n", + (uint64_t) mips3_tlbpfn_to_paddr(tlb.tlb_lo1), + (tlb.tlb_lo1 & MIPS3_PG_D) ? 'D' : ' ', + (tlb.tlb_lo1 & MIPS3_PG_G) ? 'G' : ' ', + (tlb.tlb_lo1 >> 3) & 7, tlb.tlb_mask); + } + write_c0_entryhi(old_entryhi); + write_c0_pagemask(old_pagemask); + mtc0_tlbw_hazard(); + local_irq_restore(flags); +} + +void kvm_mips_dump_guest_tlbs(struct kvm_vcpu *vcpu) +{ + struct mips_coproc *cop0 = vcpu->arch.cop0; + struct kvm_mips_tlb tlb; + int i; + + printk("Guest TLBs:\n"); + printk("Guest EntryHi: %#lx\n", kvm_read_c0_guest_entryhi(cop0)); + + for (i = 0; i < KVM_MIPS_GUEST_TLB_SIZE; i++) { + tlb = vcpu->arch.guest_tlb[i]; + printk("TLB%c%3d Hi 0x%08lx ", + (tlb.tlb_lo0 | tlb.tlb_lo1) & MIPS3_PG_V ? ' ' : '*', + i, tlb.tlb_hi); + printk("Lo0=0x%09" PRIx64 " %c%c attr %lx ", + (uint64_t) mips3_tlbpfn_to_paddr(tlb.tlb_lo0), + (tlb.tlb_lo0 & MIPS3_PG_D) ? 'D' : ' ', + (tlb.tlb_lo0 & MIPS3_PG_G) ? 'G' : ' ', + (tlb.tlb_lo0 >> 3) & 7); + printk("Lo1=0x%09" PRIx64 " %c%c attr %lx sz=%lx\n", + (uint64_t) mips3_tlbpfn_to_paddr(tlb.tlb_lo1), + (tlb.tlb_lo1 & MIPS3_PG_D) ? 'D' : ' ', + (tlb.tlb_lo1 & MIPS3_PG_G) ? 'G' : ' ', + (tlb.tlb_lo1 >> 3) & 7, tlb.tlb_mask); + } +} + +void kvm_mips_dump_shadow_tlbs(struct kvm_vcpu *vcpu) +{ + int i; + volatile struct kvm_mips_tlb tlb; + + printk("Shadow TLBs:\n"); + for (i = 0; i < KVM_MIPS_GUEST_TLB_SIZE; i++) { + tlb = vcpu->arch.shadow_tlb[smp_processor_id()][i]; + printk("TLB%c%3d Hi 0x%08lx ", + (tlb.tlb_lo0 | tlb.tlb_lo1) & MIPS3_PG_V ? ' ' : '*', + i, tlb.tlb_hi); + printk("Lo0=0x%09" PRIx64 " %c%c attr %lx ", + (uint64_t) mips3_tlbpfn_to_paddr(tlb.tlb_lo0), + (tlb.tlb_lo0 & MIPS3_PG_D) ? 'D' : ' ', + (tlb.tlb_lo0 & MIPS3_PG_G) ? 'G' : ' ', + (tlb.tlb_lo0 >> 3) & 7); + printk("Lo1=0x%09" PRIx64 " %c%c attr %lx sz=%lx\n", + (uint64_t) mips3_tlbpfn_to_paddr(tlb.tlb_lo1), + (tlb.tlb_lo1 & MIPS3_PG_D) ? 'D' : ' ', + (tlb.tlb_lo1 & MIPS3_PG_G) ? 'G' : ' ', + (tlb.tlb_lo1 >> 3) & 7, tlb.tlb_mask); + } +} + +static int kvm_mips_map_page(struct kvm *kvm, gfn_t gfn) +{ + int srcu_idx, err = 0; + pfn_t pfn; + + if (kvm->arch.guest_pmap[gfn] != KVM_INVALID_PAGE) + return 0; + + srcu_idx = srcu_read_lock(&kvm->srcu); + pfn = kvm_mips_gfn_to_pfn(kvm, gfn); + + if (kvm_mips_is_error_pfn(pfn)) { + kvm_err("Couldn't get pfn for gfn %#" PRIx64 "!\n", gfn); + err = -EFAULT; + goto out; + } + + kvm->arch.guest_pmap[gfn] = pfn; +out: + srcu_read_unlock(&kvm->srcu, srcu_idx); + return err; +} + +/* Translate guest KSEG0 addresses to Host PA */ +unsigned long kvm_mips_translate_guest_kseg0_to_hpa(struct kvm_vcpu *vcpu, + unsigned long gva) +{ + gfn_t gfn; + uint32_t offset = gva & ~PAGE_MASK; + struct kvm *kvm = vcpu->kvm; + + if (KVM_GUEST_KSEGX(gva) != KVM_GUEST_KSEG0) { + kvm_err("%s/%p: Invalid gva: %#lx\n", __func__, + __builtin_return_address(0), gva); + return KVM_INVALID_PAGE; + } + + gfn = (KVM_GUEST_CPHYSADDR(gva) >> PAGE_SHIFT); + + if (gfn >= kvm->arch.guest_pmap_npages) { + kvm_err("%s: Invalid gfn: %#llx, GVA: %#lx\n", __func__, gfn, + gva); + return KVM_INVALID_PAGE; + } + + if (kvm_mips_map_page(vcpu->kvm, gfn) < 0) + return KVM_INVALID_ADDR; + + return (kvm->arch.guest_pmap[gfn] << PAGE_SHIFT) + offset; +} + +/* XXXKYMA: Must be called with interrupts disabled */ +/* set flush_dcache_mask == 0 if no dcache flush required */ +int +kvm_mips_host_tlb_write(struct kvm_vcpu *vcpu, unsigned long entryhi, + unsigned long entrylo0, unsigned long entrylo1, int flush_dcache_mask) +{ + unsigned long flags; + unsigned long old_entryhi; + volatile int idx; + + local_irq_save(flags); + + + old_entryhi = read_c0_entryhi(); + write_c0_entryhi(entryhi); + mtc0_tlbw_hazard(); + + tlb_probe(); + tlb_probe_hazard(); + idx = read_c0_index(); + + if (idx > current_cpu_data.tlbsize) { + kvm_err("%s: Invalid Index: %d\n", __func__, idx); + kvm_mips_dump_host_tlbs(); + return -1; + } + + if (idx < 0) { + idx = read_c0_random() % current_cpu_data.tlbsize; + write_c0_index(idx); + mtc0_tlbw_hazard(); + } + write_c0_entrylo0(entrylo0); + write_c0_entrylo1(entrylo1); + mtc0_tlbw_hazard(); + + tlb_write_indexed(); + tlbw_use_hazard(); + +#ifdef DEBUG + if (debug) { + kvm_debug("@ %#lx idx: %2d [entryhi(R): %#lx] " + "entrylo0(R): 0x%08lx, entrylo1(R): 0x%08lx\n", + vcpu->arch.pc, idx, read_c0_entryhi(), + read_c0_entrylo0(), read_c0_entrylo1()); + } +#endif + + /* Flush D-cache */ + if (flush_dcache_mask) { + if (entrylo0 & MIPS3_PG_V) { + ++vcpu->stat.flush_dcache_exits; + flush_data_cache_page((entryhi & VPN2_MASK) & ~flush_dcache_mask); + } + if (entrylo1 & MIPS3_PG_V) { + ++vcpu->stat.flush_dcache_exits; + flush_data_cache_page(((entryhi & VPN2_MASK) & ~flush_dcache_mask) | + (0x1 << PAGE_SHIFT)); + } + } + + /* Restore old ASID */ + write_c0_entryhi(old_entryhi); + mtc0_tlbw_hazard(); + tlbw_use_hazard(); + local_irq_restore(flags); + return 0; +} + + +/* XXXKYMA: Must be called with interrupts disabled */ +int kvm_mips_handle_kseg0_tlb_fault(unsigned long badvaddr, + struct kvm_vcpu *vcpu) +{ + gfn_t gfn; + pfn_t pfn0, pfn1; + unsigned long vaddr = 0; + unsigned long entryhi = 0, entrylo0 = 0, entrylo1 = 0; + int even; + struct kvm *kvm = vcpu->kvm; + const int flush_dcache_mask = 0; + + + if (KVM_GUEST_KSEGX(badvaddr) != KVM_GUEST_KSEG0) { + kvm_err("%s: Invalid BadVaddr: %#lx\n", __func__, badvaddr); + kvm_mips_dump_host_tlbs(); + return -1; + } + + gfn = (KVM_GUEST_CPHYSADDR(badvaddr) >> PAGE_SHIFT); + if (gfn >= kvm->arch.guest_pmap_npages) { + kvm_err("%s: Invalid gfn: %#llx, BadVaddr: %#lx\n", __func__, + gfn, badvaddr); + kvm_mips_dump_host_tlbs(); + return -1; + } + even = !(gfn & 0x1); + vaddr = badvaddr & (PAGE_MASK << 1); + + if (kvm_mips_map_page(vcpu->kvm, gfn) < 0) + return -1; + + if (kvm_mips_map_page(vcpu->kvm, gfn ^ 0x1) < 0) + return -1; + + if (even) { + pfn0 = kvm->arch.guest_pmap[gfn]; + pfn1 = kvm->arch.guest_pmap[gfn ^ 0x1]; + } else { + pfn0 = kvm->arch.guest_pmap[gfn ^ 0x1]; + pfn1 = kvm->arch.guest_pmap[gfn]; + } + + entryhi = (vaddr | kvm_mips_get_kernel_asid(vcpu)); + entrylo0 = mips3_paddr_to_tlbpfn(pfn0 << PAGE_SHIFT) | (0x3 << 3) | (1 << 2) | + (0x1 << 1); + entrylo1 = mips3_paddr_to_tlbpfn(pfn1 << PAGE_SHIFT) | (0x3 << 3) | (1 << 2) | + (0x1 << 1); + + return kvm_mips_host_tlb_write(vcpu, entryhi, entrylo0, entrylo1, + flush_dcache_mask); +} + +int kvm_mips_handle_commpage_tlb_fault(unsigned long badvaddr, + struct kvm_vcpu *vcpu) +{ + pfn_t pfn0, pfn1; + unsigned long flags, old_entryhi = 0, vaddr = 0; + unsigned long entrylo0 = 0, entrylo1 = 0; + + + pfn0 = CPHYSADDR(vcpu->arch.kseg0_commpage) >> PAGE_SHIFT; + pfn1 = 0; + entrylo0 = mips3_paddr_to_tlbpfn(pfn0 << PAGE_SHIFT) | (0x3 << 3) | (1 << 2) | + (0x1 << 1); + entrylo1 = 0; + + local_irq_save(flags); + + old_entryhi = read_c0_entryhi(); + vaddr = badvaddr & (PAGE_MASK << 1); + write_c0_entryhi(vaddr | kvm_mips_get_kernel_asid(vcpu)); + mtc0_tlbw_hazard(); + write_c0_entrylo0(entrylo0); + mtc0_tlbw_hazard(); + write_c0_entrylo1(entrylo1); + mtc0_tlbw_hazard(); + write_c0_index(kvm_mips_get_commpage_asid(vcpu)); + mtc0_tlbw_hazard(); + tlb_write_indexed(); + mtc0_tlbw_hazard(); + tlbw_use_hazard(); + +#ifdef DEBUG + kvm_debug ("@ %#lx idx: %2d [entryhi(R): %#lx] entrylo0 (R): 0x%08lx, entrylo1(R): 0x%08lx\n", + vcpu->arch.pc, read_c0_index(), read_c0_entryhi(), + read_c0_entrylo0(), read_c0_entrylo1()); +#endif + + /* Restore old ASID */ + write_c0_entryhi(old_entryhi); + mtc0_tlbw_hazard(); + tlbw_use_hazard(); + local_irq_restore(flags); + + return 0; +} + +int +kvm_mips_handle_mapped_seg_tlb_fault(struct kvm_vcpu *vcpu, + struct kvm_mips_tlb *tlb, unsigned long *hpa0, unsigned long *hpa1) +{ + unsigned long entryhi = 0, entrylo0 = 0, entrylo1 = 0; + struct kvm *kvm = vcpu->kvm; + pfn_t pfn0, pfn1; + + + if ((tlb->tlb_hi & VPN2_MASK) == 0) { + pfn0 = 0; + pfn1 = 0; + } else { + if (kvm_mips_map_page(kvm, mips3_tlbpfn_to_paddr(tlb->tlb_lo0) >> PAGE_SHIFT) < 0) + return -1; + + if (kvm_mips_map_page(kvm, mips3_tlbpfn_to_paddr(tlb->tlb_lo1) >> PAGE_SHIFT) < 0) + return -1; + + pfn0 = kvm->arch.guest_pmap[mips3_tlbpfn_to_paddr(tlb->tlb_lo0) >> PAGE_SHIFT]; + pfn1 = kvm->arch.guest_pmap[mips3_tlbpfn_to_paddr(tlb->tlb_lo1) >> PAGE_SHIFT]; + } + + if (hpa0) + *hpa0 = pfn0 << PAGE_SHIFT; + + if (hpa1) + *hpa1 = pfn1 << PAGE_SHIFT; + + /* Get attributes from the Guest TLB */ + entryhi = (tlb->tlb_hi & VPN2_MASK) | (KVM_GUEST_KERNEL_MODE(vcpu) ? + kvm_mips_get_kernel_asid(vcpu) : kvm_mips_get_user_asid(vcpu)); + entrylo0 = mips3_paddr_to_tlbpfn(pfn0 << PAGE_SHIFT) | (0x3 << 3) | + (tlb->tlb_lo0 & MIPS3_PG_D) | (tlb->tlb_lo0 & MIPS3_PG_V); + entrylo1 = mips3_paddr_to_tlbpfn(pfn1 << PAGE_SHIFT) | (0x3 << 3) | + (tlb->tlb_lo1 & MIPS3_PG_D) | (tlb->tlb_lo1 & MIPS3_PG_V); + +#ifdef DEBUG + kvm_debug("@ %#lx tlb_lo0: 0x%08lx tlb_lo1: 0x%08lx\n", vcpu->arch.pc, + tlb->tlb_lo0, tlb->tlb_lo1); +#endif + + return kvm_mips_host_tlb_write(vcpu, entryhi, entrylo0, entrylo1, + tlb->tlb_mask); +} + +int kvm_mips_guest_tlb_lookup(struct kvm_vcpu *vcpu, unsigned long entryhi) +{ + int i; + int index = -1; + struct kvm_mips_tlb *tlb = vcpu->arch.guest_tlb; + + + for (i = 0; i < KVM_MIPS_GUEST_TLB_SIZE; i++) { + if (((TLB_VPN2(tlb[i]) & ~tlb[i].tlb_mask) == ((entryhi & VPN2_MASK) & ~tlb[i].tlb_mask)) && + (TLB_IS_GLOBAL(tlb[i]) || (TLB_ASID(tlb[i]) == (entryhi & ASID_MASK)))) { + index = i; + break; + } + } + +#ifdef DEBUG + kvm_debug("%s: entryhi: %#lx, index: %d lo0: %#lx, lo1: %#lx\n", + __func__, entryhi, index, tlb[i].tlb_lo0, tlb[i].tlb_lo1); +#endif + + return index; +} + +int kvm_mips_host_tlb_lookup(struct kvm_vcpu *vcpu, unsigned long vaddr) +{ + unsigned long old_entryhi, flags; + volatile int idx; + + + local_irq_save(flags); + + old_entryhi = read_c0_entryhi(); + + if (KVM_GUEST_KERNEL_MODE(vcpu)) + write_c0_entryhi((vaddr & VPN2_MASK) | kvm_mips_get_kernel_asid(vcpu)); + else { + write_c0_entryhi((vaddr & VPN2_MASK) | kvm_mips_get_user_asid(vcpu)); + } + + mtc0_tlbw_hazard(); + + tlb_probe(); + tlb_probe_hazard(); + idx = read_c0_index(); + + /* Restore old ASID */ + write_c0_entryhi(old_entryhi); + mtc0_tlbw_hazard(); + tlbw_use_hazard(); + + local_irq_restore(flags); + +#ifdef DEBUG + kvm_debug("Host TLB lookup, %#lx, idx: %2d\n", vaddr, idx); +#endif + + return idx; +} + +int kvm_mips_host_tlb_inv(struct kvm_vcpu *vcpu, unsigned long va) +{ + int idx; + unsigned long flags, old_entryhi; + + local_irq_save(flags); + + + old_entryhi = read_c0_entryhi(); + + write_c0_entryhi((va & VPN2_MASK) | kvm_mips_get_user_asid(vcpu)); + mtc0_tlbw_hazard(); + + tlb_probe(); + tlb_probe_hazard(); + idx = read_c0_index(); + + if (idx >= current_cpu_data.tlbsize) + BUG(); + + if (idx > 0) { + write_c0_entryhi(UNIQUE_ENTRYHI(idx)); + mtc0_tlbw_hazard(); + + write_c0_entrylo0(0); + mtc0_tlbw_hazard(); + + write_c0_entrylo1(0); + mtc0_tlbw_hazard(); + + tlb_write_indexed(); + mtc0_tlbw_hazard(); + } + + write_c0_entryhi(old_entryhi); + mtc0_tlbw_hazard(); + tlbw_use_hazard(); + + local_irq_restore(flags); + +#ifdef DEBUG + if (idx > 0) { + kvm_debug("%s: Invalidated entryhi %#lx @ idx %d\n", __func__, + (va & VPN2_MASK) | (vcpu->arch.asid_map[va & ASID_MASK] & ASID_MASK), idx); + } +#endif + + return 0; +} + +/* XXXKYMA: Fix Guest USER/KERNEL no longer share the same ASID*/ +int kvm_mips_host_tlb_inv_index(struct kvm_vcpu *vcpu, int index) +{ + unsigned long flags, old_entryhi; + + if (index >= current_cpu_data.tlbsize) + BUG(); + + local_irq_save(flags); + + + old_entryhi = read_c0_entryhi(); + + write_c0_entryhi(UNIQUE_ENTRYHI(index)); + mtc0_tlbw_hazard(); + + write_c0_index(index); + mtc0_tlbw_hazard(); + + write_c0_entrylo0(0); + mtc0_tlbw_hazard(); + + write_c0_entrylo1(0); + mtc0_tlbw_hazard(); + + tlb_write_indexed(); + mtc0_tlbw_hazard(); + tlbw_use_hazard(); + + write_c0_entryhi(old_entryhi); + mtc0_tlbw_hazard(); + tlbw_use_hazard(); + + local_irq_restore(flags); + + return 0; +} + +void kvm_mips_flush_host_tlb(int skip_kseg0) +{ + unsigned long flags; + unsigned long old_entryhi, entryhi; + unsigned long old_pagemask; + int entry = 0; + int maxentry = current_cpu_data.tlbsize; + + + local_irq_save(flags); + + old_entryhi = read_c0_entryhi(); + old_pagemask = read_c0_pagemask(); + + /* Blast 'em all away. */ + for (entry = 0; entry < maxentry; entry++) { + + write_c0_index(entry); + mtc0_tlbw_hazard(); + + if (skip_kseg0) { + tlb_read(); + tlbw_use_hazard(); + + entryhi = read_c0_entryhi(); + + /* Don't blow away guest kernel entries */ + if (KVM_GUEST_KSEGX(entryhi) == KVM_GUEST_KSEG0) { + continue; + } + } + + /* Make sure all entries differ. */ + write_c0_entryhi(UNIQUE_ENTRYHI(entry)); + mtc0_tlbw_hazard(); + write_c0_entrylo0(0); + mtc0_tlbw_hazard(); + write_c0_entrylo1(0); + mtc0_tlbw_hazard(); + + tlb_write_indexed(); + mtc0_tlbw_hazard(); + } + + tlbw_use_hazard(); + + write_c0_entryhi(old_entryhi); + write_c0_pagemask(old_pagemask); + mtc0_tlbw_hazard(); + tlbw_use_hazard(); + + local_irq_restore(flags); +} + +void +kvm_get_new_mmu_context(struct mm_struct *mm, unsigned long cpu, + struct kvm_vcpu *vcpu) +{ + unsigned long asid = asid_cache(cpu); + + if (!((asid += ASID_INC) & ASID_MASK)) { + if (cpu_has_vtag_icache) { + flush_icache_all(); + } + + kvm_local_flush_tlb_all(); /* start new asid cycle */ + + if (!asid) /* fix version if needed */ + asid = ASID_FIRST_VERSION; + } + + cpu_context(cpu, mm) = asid_cache(cpu) = asid; +} + +void kvm_shadow_tlb_put(struct kvm_vcpu *vcpu) +{ + unsigned long flags; + unsigned long old_entryhi; + unsigned long old_pagemask; + int entry = 0; + int cpu = smp_processor_id(); + + local_irq_save(flags); + + old_entryhi = read_c0_entryhi(); + old_pagemask = read_c0_pagemask(); + + for (entry = 0; entry < current_cpu_data.tlbsize; entry++) { + write_c0_index(entry); + mtc0_tlbw_hazard(); + tlb_read(); + tlbw_use_hazard(); + + vcpu->arch.shadow_tlb[cpu][entry].tlb_hi = read_c0_entryhi(); + vcpu->arch.shadow_tlb[cpu][entry].tlb_lo0 = read_c0_entrylo0(); + vcpu->arch.shadow_tlb[cpu][entry].tlb_lo1 = read_c0_entrylo1(); + vcpu->arch.shadow_tlb[cpu][entry].tlb_mask = read_c0_pagemask(); + } + + write_c0_entryhi(old_entryhi); + write_c0_pagemask(old_pagemask); + mtc0_tlbw_hazard(); + + local_irq_restore(flags); + +} + +void kvm_shadow_tlb_load(struct kvm_vcpu *vcpu) +{ + unsigned long flags; + unsigned long old_ctx; + int entry; + int cpu = smp_processor_id(); + + local_irq_save(flags); + + old_ctx = read_c0_entryhi(); + + for (entry = 0; entry < current_cpu_data.tlbsize; entry++) { + write_c0_entryhi(vcpu->arch.shadow_tlb[cpu][entry].tlb_hi); + mtc0_tlbw_hazard(); + write_c0_entrylo0(vcpu->arch.shadow_tlb[cpu][entry].tlb_lo0); + write_c0_entrylo1(vcpu->arch.shadow_tlb[cpu][entry].tlb_lo1); + + write_c0_index(entry); + mtc0_tlbw_hazard(); + + tlb_write_indexed(); + tlbw_use_hazard(); + } + + tlbw_use_hazard(); + write_c0_entryhi(old_ctx); + mtc0_tlbw_hazard(); + local_irq_restore(flags); +} + + +void kvm_local_flush_tlb_all(void) +{ + unsigned long flags; + unsigned long old_ctx; + int entry = 0; + + local_irq_save(flags); + /* Save old context and create impossible VPN2 value */ + old_ctx = read_c0_entryhi(); + write_c0_entrylo0(0); + write_c0_entrylo1(0); + + /* Blast 'em all away. */ + while (entry < current_cpu_data.tlbsize) { + /* Make sure all entries differ. */ + write_c0_entryhi(UNIQUE_ENTRYHI(entry)); + write_c0_index(entry); + mtc0_tlbw_hazard(); + tlb_write_indexed(); + entry++; + } + tlbw_use_hazard(); + write_c0_entryhi(old_ctx); + mtc0_tlbw_hazard(); + + local_irq_restore(flags); +} + +void kvm_mips_init_shadow_tlb(struct kvm_vcpu *vcpu) +{ + int cpu, entry; + + for_each_possible_cpu(cpu) { + for (entry = 0; entry < current_cpu_data.tlbsize; entry++) { + vcpu->arch.shadow_tlb[cpu][entry].tlb_hi = + UNIQUE_ENTRYHI(entry); + vcpu->arch.shadow_tlb[cpu][entry].tlb_lo0 = 0x0; + vcpu->arch.shadow_tlb[cpu][entry].tlb_lo1 = 0x0; + vcpu->arch.shadow_tlb[cpu][entry].tlb_mask = + read_c0_pagemask(); +#ifdef DEBUG + kvm_debug + ("shadow_tlb[%d][%d]: tlb_hi: %#lx, lo0: %#lx, lo1: %#lx\n", + cpu, entry, + vcpu->arch.shadow_tlb[cpu][entry].tlb_hi, + vcpu->arch.shadow_tlb[cpu][entry].tlb_lo0, + vcpu->arch.shadow_tlb[cpu][entry].tlb_lo1); +#endif + } + } +} + +/* Restore ASID once we are scheduled back after preemption */ +void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu) +{ + unsigned long flags; + int newasid = 0; + +#ifdef DEBUG + kvm_debug("%s: vcpu %p, cpu: %d\n", __func__, vcpu, cpu); +#endif + + /* Alocate new kernel and user ASIDs if needed */ + + local_irq_save(flags); + + if (((vcpu->arch. + guest_kernel_asid[cpu] ^ asid_cache(cpu)) & ASID_VERSION_MASK)) { + kvm_get_new_mmu_context(&vcpu->arch.guest_kernel_mm, cpu, vcpu); + vcpu->arch.guest_kernel_asid[cpu] = + vcpu->arch.guest_kernel_mm.context.asid[cpu]; + kvm_get_new_mmu_context(&vcpu->arch.guest_user_mm, cpu, vcpu); + vcpu->arch.guest_user_asid[cpu] = + vcpu->arch.guest_user_mm.context.asid[cpu]; + newasid++; + + kvm_info("[%d]: cpu_context: %#lx\n", cpu, + cpu_context(cpu, current->mm)); + kvm_info("[%d]: Allocated new ASID for Guest Kernel: %#x\n", + cpu, vcpu->arch.guest_kernel_asid[cpu]); + kvm_info("[%d]: Allocated new ASID for Guest User: %#x\n", cpu, + vcpu->arch.guest_user_asid[cpu]); + } + + if (vcpu->arch.last_sched_cpu != cpu) { + kvm_info("[%d->%d]KVM VCPU[%d] switch\n", + vcpu->arch.last_sched_cpu, cpu, vcpu->vcpu_id); + } + + /* Only reload shadow host TLB if new ASIDs haven't been allocated */ +#if 0 + if ((atomic_read(&kvm_mips_instance) > 1) && !newasid) { + kvm_mips_flush_host_tlb(0); + kvm_shadow_tlb_load(vcpu); + } +#endif + + if (!newasid) { + /* If we preempted while the guest was executing, then reload the pre-empted ASID */ + if (current->flags & PF_VCPU) { + write_c0_entryhi(vcpu->arch. + preempt_entryhi & ASID_MASK); + ehb(); + } + } else { + /* New ASIDs were allocated for the VM */ + + /* Were we in guest context? If so then the pre-empted ASID is no longer + * valid, we need to set it to what it should be based on the mode of + * the Guest (Kernel/User) + */ + if (current->flags & PF_VCPU) { + if (KVM_GUEST_KERNEL_MODE(vcpu)) + write_c0_entryhi(vcpu->arch. + guest_kernel_asid[cpu] & + ASID_MASK); + else + write_c0_entryhi(vcpu->arch. + guest_user_asid[cpu] & + ASID_MASK); + ehb(); + } + } + + local_irq_restore(flags); + +} + +/* ASID can change if another task is scheduled during preemption */ +void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu) +{ + unsigned long flags; + uint32_t cpu; + + local_irq_save(flags); + + cpu = smp_processor_id(); + + + vcpu->arch.preempt_entryhi = read_c0_entryhi(); + vcpu->arch.last_sched_cpu = cpu; + +#if 0 + if ((atomic_read(&kvm_mips_instance) > 1)) { + kvm_shadow_tlb_put(vcpu); + } +#endif + + if (((cpu_context(cpu, current->mm) ^ asid_cache(cpu)) & + ASID_VERSION_MASK)) { + kvm_debug("%s: Dropping MMU Context: %#lx\n", __func__, + cpu_context(cpu, current->mm)); + drop_mmu_context(current->mm, cpu); + } + write_c0_entryhi(cpu_asid(cpu, current->mm)); + ehb(); + + local_irq_restore(flags); +} + +uint32_t kvm_get_inst(uint32_t *opc, struct kvm_vcpu *vcpu) +{ + struct mips_coproc *cop0 = vcpu->arch.cop0; + unsigned long paddr, flags; + uint32_t inst; + int index; + + if (KVM_GUEST_KSEGX((unsigned long) opc) < KVM_GUEST_KSEG0 || + KVM_GUEST_KSEGX((unsigned long) opc) == KVM_GUEST_KSEG23) { + local_irq_save(flags); + index = kvm_mips_host_tlb_lookup(vcpu, (unsigned long) opc); + if (index >= 0) { + inst = *(opc); + } else { + index = + kvm_mips_guest_tlb_lookup(vcpu, + ((unsigned long) opc & VPN2_MASK) + | + (kvm_read_c0_guest_entryhi + (cop0) & ASID_MASK)); + if (index < 0) { + kvm_err + ("%s: get_user_failed for %p, vcpu: %p, ASID: %#lx\n", + __func__, opc, vcpu, read_c0_entryhi()); + kvm_mips_dump_host_tlbs(); + local_irq_restore(flags); + return KVM_INVALID_INST; + } + kvm_mips_handle_mapped_seg_tlb_fault(vcpu, + &vcpu->arch. + guest_tlb[index], + NULL, NULL); + inst = *(opc); + } + local_irq_restore(flags); + } else if (KVM_GUEST_KSEGX(opc) == KVM_GUEST_KSEG0) { + paddr = + kvm_mips_translate_guest_kseg0_to_hpa(vcpu, + (unsigned long) opc); + inst = *(uint32_t *) CKSEG0ADDR(paddr); + } else { + kvm_err("%s: illegal address: %p\n", __func__, opc); + return KVM_INVALID_INST; + } + + return inst; +} + +EXPORT_SYMBOL(kvm_local_flush_tlb_all); +EXPORT_SYMBOL(kvm_shadow_tlb_put); +EXPORT_SYMBOL(kvm_mips_handle_mapped_seg_tlb_fault); +EXPORT_SYMBOL(kvm_mips_handle_commpage_tlb_fault); +EXPORT_SYMBOL(kvm_mips_init_shadow_tlb); +EXPORT_SYMBOL(kvm_mips_dump_host_tlbs); +EXPORT_SYMBOL(kvm_mips_handle_kseg0_tlb_fault); +EXPORT_SYMBOL(kvm_mips_host_tlb_lookup); +EXPORT_SYMBOL(kvm_mips_flush_host_tlb); +EXPORT_SYMBOL(kvm_mips_guest_tlb_lookup); +EXPORT_SYMBOL(kvm_mips_host_tlb_inv); +EXPORT_SYMBOL(kvm_mips_translate_guest_kseg0_to_hpa); +EXPORT_SYMBOL(kvm_shadow_tlb_load); +EXPORT_SYMBOL(kvm_mips_dump_shadow_tlbs); +EXPORT_SYMBOL(kvm_mips_dump_guest_tlbs); +EXPORT_SYMBOL(kvm_get_inst); +EXPORT_SYMBOL(kvm_arch_vcpu_load); +EXPORT_SYMBOL(kvm_arch_vcpu_put); diff --git a/trunk/arch/mips/kvm/kvm_trap_emul.c b/trunk/arch/mips/kvm/kvm_trap_emul.c new file mode 100644 index 000000000000..466aeef044bd --- /dev/null +++ b/trunk/arch/mips/kvm/kvm_trap_emul.c @@ -0,0 +1,482 @@ +/* +* This file is subject to the terms and conditions of the GNU General Public +* License. See the file "COPYING" in the main directory of this archive +* for more details. +* +* KVM/MIPS: Deliver/Emulate exceptions to the guest kernel +* +* Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved. +* Authors: Sanjay Lal +*/ + +#include +#include +#include +#include + +#include + +#include "kvm_mips_opcode.h" +#include "kvm_mips_int.h" + +static gpa_t kvm_trap_emul_gva_to_gpa_cb(gva_t gva) +{ + gpa_t gpa; + uint32_t kseg = KSEGX(gva); + + if ((kseg == CKSEG0) || (kseg == CKSEG1)) + gpa = CPHYSADDR(gva); + else { + printk("%s: cannot find GPA for GVA: %#lx\n", __func__, gva); + kvm_mips_dump_host_tlbs(); + gpa = KVM_INVALID_ADDR; + } + +#ifdef DEBUG + kvm_debug("%s: gva %#lx, gpa: %#llx\n", __func__, gva, gpa); +#endif + + return gpa; +} + + +static int kvm_trap_emul_handle_cop_unusable(struct kvm_vcpu *vcpu) +{ + struct kvm_run *run = vcpu->run; + uint32_t __user *opc = (uint32_t __user *) vcpu->arch.pc; + unsigned long cause = vcpu->arch.host_cp0_cause; + enum emulation_result er = EMULATE_DONE; + int ret = RESUME_GUEST; + + if (((cause & CAUSEF_CE) >> CAUSEB_CE) == 1) { + er = kvm_mips_emulate_fpu_exc(cause, opc, run, vcpu); + } else + er = kvm_mips_emulate_inst(cause, opc, run, vcpu); + + switch (er) { + case EMULATE_DONE: + ret = RESUME_GUEST; + break; + + case EMULATE_FAIL: + run->exit_reason = KVM_EXIT_INTERNAL_ERROR; + ret = RESUME_HOST; + break; + + case EMULATE_WAIT: + run->exit_reason = KVM_EXIT_INTR; + ret = RESUME_HOST; + break; + + default: + BUG(); + } + return ret; +} + +static int kvm_trap_emul_handle_tlb_mod(struct kvm_vcpu *vcpu) +{ + struct kvm_run *run = vcpu->run; + uint32_t __user *opc = (uint32_t __user *) vcpu->arch.pc; + unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr; + unsigned long cause = vcpu->arch.host_cp0_cause; + enum emulation_result er = EMULATE_DONE; + int ret = RESUME_GUEST; + + if (KVM_GUEST_KSEGX(badvaddr) < KVM_GUEST_KSEG0 + || KVM_GUEST_KSEGX(badvaddr) == KVM_GUEST_KSEG23) { +#ifdef DEBUG + kvm_debug + ("USER/KSEG23 ADDR TLB MOD fault: cause %#lx, PC: %p, BadVaddr: %#lx\n", + cause, opc, badvaddr); +#endif + er = kvm_mips_handle_tlbmod(cause, opc, run, vcpu); + + if (er == EMULATE_DONE) + ret = RESUME_GUEST; + else { + run->exit_reason = KVM_EXIT_INTERNAL_ERROR; + ret = RESUME_HOST; + } + } else if (KVM_GUEST_KSEGX(badvaddr) == KVM_GUEST_KSEG0) { + /* XXXKYMA: The guest kernel does not expect to get this fault when we are not + * using HIGHMEM. Need to address this in a HIGHMEM kernel + */ + printk + ("TLB MOD fault not handled, cause %#lx, PC: %p, BadVaddr: %#lx\n", + cause, opc, badvaddr); + kvm_mips_dump_host_tlbs(); + kvm_arch_vcpu_dump_regs(vcpu); + run->exit_reason = KVM_EXIT_INTERNAL_ERROR; + ret = RESUME_HOST; + } else { + printk + ("Illegal TLB Mod fault address , cause %#lx, PC: %p, BadVaddr: %#lx\n", + cause, opc, badvaddr); + kvm_mips_dump_host_tlbs(); + kvm_arch_vcpu_dump_regs(vcpu); + run->exit_reason = KVM_EXIT_INTERNAL_ERROR; + ret = RESUME_HOST; + } + return ret; +} + +static int kvm_trap_emul_handle_tlb_st_miss(struct kvm_vcpu *vcpu) +{ + struct kvm_run *run = vcpu->run; + uint32_t __user *opc = (uint32_t __user *) vcpu->arch.pc; + unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr; + unsigned long cause = vcpu->arch.host_cp0_cause; + enum emulation_result er = EMULATE_DONE; + int ret = RESUME_GUEST; + + if (((badvaddr & PAGE_MASK) == KVM_GUEST_COMMPAGE_ADDR) + && KVM_GUEST_KERNEL_MODE(vcpu)) { + if (kvm_mips_handle_commpage_tlb_fault(badvaddr, vcpu) < 0) { + run->exit_reason = KVM_EXIT_INTERNAL_ERROR; + ret = RESUME_HOST; + } + } else if (KVM_GUEST_KSEGX(badvaddr) < KVM_GUEST_KSEG0 + || KVM_GUEST_KSEGX(badvaddr) == KVM_GUEST_KSEG23) { +#ifdef DEBUG + kvm_debug + ("USER ADDR TLB LD fault: cause %#lx, PC: %p, BadVaddr: %#lx\n", + cause, opc, badvaddr); +#endif + er = kvm_mips_handle_tlbmiss(cause, opc, run, vcpu); + if (er == EMULATE_DONE) + ret = RESUME_GUEST; + else { + run->exit_reason = KVM_EXIT_INTERNAL_ERROR; + ret = RESUME_HOST; + } + } else if (KVM_GUEST_KSEGX(badvaddr) == KVM_GUEST_KSEG0) { + /* All KSEG0 faults are handled by KVM, as the guest kernel does not + * expect to ever get them + */ + if (kvm_mips_handle_kseg0_tlb_fault + (vcpu->arch.host_cp0_badvaddr, vcpu) < 0) { + run->exit_reason = KVM_EXIT_INTERNAL_ERROR; + ret = RESUME_HOST; + } + } else { + kvm_err + ("Illegal TLB LD fault address , cause %#lx, PC: %p, BadVaddr: %#lx\n", + cause, opc, badvaddr); + kvm_mips_dump_host_tlbs(); + kvm_arch_vcpu_dump_regs(vcpu); + run->exit_reason = KVM_EXIT_INTERNAL_ERROR; + ret = RESUME_HOST; + } + return ret; +} + +static int kvm_trap_emul_handle_tlb_ld_miss(struct kvm_vcpu *vcpu) +{ + struct kvm_run *run = vcpu->run; + uint32_t __user *opc = (uint32_t __user *) vcpu->arch.pc; + unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr; + unsigned long cause = vcpu->arch.host_cp0_cause; + enum emulation_result er = EMULATE_DONE; + int ret = RESUME_GUEST; + + if (((badvaddr & PAGE_MASK) == KVM_GUEST_COMMPAGE_ADDR) + && KVM_GUEST_KERNEL_MODE(vcpu)) { + if (kvm_mips_handle_commpage_tlb_fault(badvaddr, vcpu) < 0) { + run->exit_reason = KVM_EXIT_INTERNAL_ERROR; + ret = RESUME_HOST; + } + } else if (KVM_GUEST_KSEGX(badvaddr) < KVM_GUEST_KSEG0 + || KVM_GUEST_KSEGX(badvaddr) == KVM_GUEST_KSEG23) { +#ifdef DEBUG + kvm_debug("USER ADDR TLB ST fault: PC: %#lx, BadVaddr: %#lx\n", + vcpu->arch.pc, badvaddr); +#endif + + /* User Address (UA) fault, this could happen if + * (1) TLB entry not present/valid in both Guest and shadow host TLBs, in this + * case we pass on the fault to the guest kernel and let it handle it. + * (2) TLB entry is present in the Guest TLB but not in the shadow, in this + * case we inject the TLB from the Guest TLB into the shadow host TLB + */ + + er = kvm_mips_handle_tlbmiss(cause, opc, run, vcpu); + if (er == EMULATE_DONE) + ret = RESUME_GUEST; + else { + run->exit_reason = KVM_EXIT_INTERNAL_ERROR; + ret = RESUME_HOST; + } + } else if (KVM_GUEST_KSEGX(badvaddr) == KVM_GUEST_KSEG0) { + if (kvm_mips_handle_kseg0_tlb_fault + (vcpu->arch.host_cp0_badvaddr, vcpu) < 0) { + run->exit_reason = KVM_EXIT_INTERNAL_ERROR; + ret = RESUME_HOST; + } + } else { + printk + ("Illegal TLB ST fault address , cause %#lx, PC: %p, BadVaddr: %#lx\n", + cause, opc, badvaddr); + kvm_mips_dump_host_tlbs(); + kvm_arch_vcpu_dump_regs(vcpu); + run->exit_reason = KVM_EXIT_INTERNAL_ERROR; + ret = RESUME_HOST; + } + return ret; +} + +static int kvm_trap_emul_handle_addr_err_st(struct kvm_vcpu *vcpu) +{ + struct kvm_run *run = vcpu->run; + uint32_t __user *opc = (uint32_t __user *) vcpu->arch.pc; + unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr; + unsigned long cause = vcpu->arch.host_cp0_cause; + enum emulation_result er = EMULATE_DONE; + int ret = RESUME_GUEST; + + if (KVM_GUEST_KERNEL_MODE(vcpu) + && (KSEGX(badvaddr) == CKSEG0 || KSEGX(badvaddr) == CKSEG1)) { +#ifdef DEBUG + kvm_debug("Emulate Store to MMIO space\n"); +#endif + er = kvm_mips_emulate_inst(cause, opc, run, vcpu); + if (er == EMULATE_FAIL) { + printk("Emulate Store to MMIO space failed\n"); + run->exit_reason = KVM_EXIT_INTERNAL_ERROR; + ret = RESUME_HOST; + } else { + run->exit_reason = KVM_EXIT_MMIO; + ret = RESUME_HOST; + } + } else { + printk + ("Address Error (STORE): cause %#lx, PC: %p, BadVaddr: %#lx\n", + cause, opc, badvaddr); + run->exit_reason = KVM_EXIT_INTERNAL_ERROR; + ret = RESUME_HOST; + } + return ret; +} + +static int kvm_trap_emul_handle_addr_err_ld(struct kvm_vcpu *vcpu) +{ + struct kvm_run *run = vcpu->run; + uint32_t __user *opc = (uint32_t __user *) vcpu->arch.pc; + unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr; + unsigned long cause = vcpu->arch.host_cp0_cause; + enum emulation_result er = EMULATE_DONE; + int ret = RESUME_GUEST; + + if (KSEGX(badvaddr) == CKSEG0 || KSEGX(badvaddr) == CKSEG1) { +#ifdef DEBUG + kvm_debug("Emulate Load from MMIO space @ %#lx\n", badvaddr); +#endif + er = kvm_mips_emulate_inst(cause, opc, run, vcpu); + if (er == EMULATE_FAIL) { + printk("Emulate Load from MMIO space failed\n"); + run->exit_reason = KVM_EXIT_INTERNAL_ERROR; + ret = RESUME_HOST; + } else { + run->exit_reason = KVM_EXIT_MMIO; + ret = RESUME_HOST; + } + } else { + printk + ("Address Error (LOAD): cause %#lx, PC: %p, BadVaddr: %#lx\n", + cause, opc, badvaddr); + run->exit_reason = KVM_EXIT_INTERNAL_ERROR; + ret = RESUME_HOST; + er = EMULATE_FAIL; + } + return ret; +} + +static int kvm_trap_emul_handle_syscall(struct kvm_vcpu *vcpu) +{ + struct kvm_run *run = vcpu->run; + uint32_t __user *opc = (uint32_t __user *) vcpu->arch.pc; + unsigned long cause = vcpu->arch.host_cp0_cause; + enum emulation_result er = EMULATE_DONE; + int ret = RESUME_GUEST; + + er = kvm_mips_emulate_syscall(cause, opc, run, vcpu); + if (er == EMULATE_DONE) + ret = RESUME_GUEST; + else { + run->exit_reason = KVM_EXIT_INTERNAL_ERROR; + ret = RESUME_HOST; + } + return ret; +} + +static int kvm_trap_emul_handle_res_inst(struct kvm_vcpu *vcpu) +{ + struct kvm_run *run = vcpu->run; + uint32_t __user *opc = (uint32_t __user *) vcpu->arch.pc; + unsigned long cause = vcpu->arch.host_cp0_cause; + enum emulation_result er = EMULATE_DONE; + int ret = RESUME_GUEST; + + er = kvm_mips_handle_ri(cause, opc, run, vcpu); + if (er == EMULATE_DONE) + ret = RESUME_GUEST; + else { + run->exit_reason = KVM_EXIT_INTERNAL_ERROR; + ret = RESUME_HOST; + } + return ret; +} + +static int kvm_trap_emul_handle_break(struct kvm_vcpu *vcpu) +{ + struct kvm_run *run = vcpu->run; + uint32_t __user *opc = (uint32_t __user *) vcpu->arch.pc; + unsigned long cause = vcpu->arch.host_cp0_cause; + enum emulation_result er = EMULATE_DONE; + int ret = RESUME_GUEST; + + er = kvm_mips_emulate_bp_exc(cause, opc, run, vcpu); + if (er == EMULATE_DONE) + ret = RESUME_GUEST; + else { + run->exit_reason = KVM_EXIT_INTERNAL_ERROR; + ret = RESUME_HOST; + } + return ret; +} + +static int +kvm_trap_emul_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) +{ + struct mips_coproc *cop0 = vcpu->arch.cop0; + + kvm_write_c0_guest_index(cop0, regs->cp0reg[MIPS_CP0_TLB_INDEX][0]); + kvm_write_c0_guest_context(cop0, regs->cp0reg[MIPS_CP0_TLB_CONTEXT][0]); + kvm_write_c0_guest_badvaddr(cop0, regs->cp0reg[MIPS_CP0_BAD_VADDR][0]); + kvm_write_c0_guest_entryhi(cop0, regs->cp0reg[MIPS_CP0_TLB_HI][0]); + kvm_write_c0_guest_epc(cop0, regs->cp0reg[MIPS_CP0_EXC_PC][0]); + + kvm_write_c0_guest_status(cop0, regs->cp0reg[MIPS_CP0_STATUS][0]); + kvm_write_c0_guest_cause(cop0, regs->cp0reg[MIPS_CP0_CAUSE][0]); + kvm_write_c0_guest_pagemask(cop0, + regs->cp0reg[MIPS_CP0_TLB_PG_MASK][0]); + kvm_write_c0_guest_wired(cop0, regs->cp0reg[MIPS_CP0_TLB_WIRED][0]); + kvm_write_c0_guest_errorepc(cop0, regs->cp0reg[MIPS_CP0_ERROR_PC][0]); + + return 0; +} + +static int +kvm_trap_emul_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) +{ + struct mips_coproc *cop0 = vcpu->arch.cop0; + + regs->cp0reg[MIPS_CP0_TLB_INDEX][0] = kvm_read_c0_guest_index(cop0); + regs->cp0reg[MIPS_CP0_TLB_CONTEXT][0] = kvm_read_c0_guest_context(cop0); + regs->cp0reg[MIPS_CP0_BAD_VADDR][0] = kvm_read_c0_guest_badvaddr(cop0); + regs->cp0reg[MIPS_CP0_TLB_HI][0] = kvm_read_c0_guest_entryhi(cop0); + regs->cp0reg[MIPS_CP0_EXC_PC][0] = kvm_read_c0_guest_epc(cop0); + + regs->cp0reg[MIPS_CP0_STATUS][0] = kvm_read_c0_guest_status(cop0); + regs->cp0reg[MIPS_CP0_CAUSE][0] = kvm_read_c0_guest_cause(cop0); + regs->cp0reg[MIPS_CP0_TLB_PG_MASK][0] = + kvm_read_c0_guest_pagemask(cop0); + regs->cp0reg[MIPS_CP0_TLB_WIRED][0] = kvm_read_c0_guest_wired(cop0); + regs->cp0reg[MIPS_CP0_ERROR_PC][0] = kvm_read_c0_guest_errorepc(cop0); + + regs->cp0reg[MIPS_CP0_CONFIG][0] = kvm_read_c0_guest_config(cop0); + regs->cp0reg[MIPS_CP0_CONFIG][1] = kvm_read_c0_guest_config1(cop0); + regs->cp0reg[MIPS_CP0_CONFIG][2] = kvm_read_c0_guest_config2(cop0); + regs->cp0reg[MIPS_CP0_CONFIG][3] = kvm_read_c0_guest_config3(cop0); + regs->cp0reg[MIPS_CP0_CONFIG][7] = kvm_read_c0_guest_config7(cop0); + + return 0; +} + +static int kvm_trap_emul_vm_init(struct kvm *kvm) +{ + return 0; +} + +static int kvm_trap_emul_vcpu_init(struct kvm_vcpu *vcpu) +{ + return 0; +} + +static int kvm_trap_emul_vcpu_setup(struct kvm_vcpu *vcpu) +{ + struct mips_coproc *cop0 = vcpu->arch.cop0; + uint32_t config1; + int vcpu_id = vcpu->vcpu_id; + + /* Arch specific stuff, set up config registers properly so that the + * guest will come up as expected, for now we simulate a + * MIPS 24kc + */ + kvm_write_c0_guest_prid(cop0, 0x00019300); + kvm_write_c0_guest_config(cop0, + MIPS_CONFIG0 | (0x1 << CP0C0_AR) | + (MMU_TYPE_R4000 << CP0C0_MT)); + + /* Read the cache characteristics from the host Config1 Register */ + config1 = (read_c0_config1() & ~0x7f); + + /* Set up MMU size */ + config1 &= ~(0x3f << 25); + config1 |= ((KVM_MIPS_GUEST_TLB_SIZE - 1) << 25); + + /* We unset some bits that we aren't emulating */ + config1 &= + ~((1 << CP0C1_C2) | (1 << CP0C1_MD) | (1 << CP0C1_PC) | + (1 << CP0C1_WR) | (1 << CP0C1_CA)); + kvm_write_c0_guest_config1(cop0, config1); + + kvm_write_c0_guest_config2(cop0, MIPS_CONFIG2); + /* MIPS_CONFIG2 | (read_c0_config2() & 0xfff) */ + kvm_write_c0_guest_config3(cop0, + MIPS_CONFIG3 | (0 << CP0C3_VInt) | (1 << + CP0C3_ULRI)); + + /* Set Wait IE/IXMT Ignore in Config7, IAR, AR */ + kvm_write_c0_guest_config7(cop0, (MIPS_CONF7_WII) | (1 << 10)); + + /* Setup IntCtl defaults, compatibilty mode for timer interrupts (HW5) */ + kvm_write_c0_guest_intctl(cop0, 0xFC000000); + + /* Put in vcpu id as CPUNum into Ebase Reg to handle SMP Guests */ + kvm_write_c0_guest_ebase(cop0, KVM_GUEST_KSEG0 | (vcpu_id & 0xFF)); + + return 0; +} + +static struct kvm_mips_callbacks kvm_trap_emul_callbacks = { + /* exit handlers */ + .handle_cop_unusable = kvm_trap_emul_handle_cop_unusable, + .handle_tlb_mod = kvm_trap_emul_handle_tlb_mod, + .handle_tlb_st_miss = kvm_trap_emul_handle_tlb_st_miss, + .handle_tlb_ld_miss = kvm_trap_emul_handle_tlb_ld_miss, + .handle_addr_err_st = kvm_trap_emul_handle_addr_err_st, + .handle_addr_err_ld = kvm_trap_emul_handle_addr_err_ld, + .handle_syscall = kvm_trap_emul_handle_syscall, + .handle_res_inst = kvm_trap_emul_handle_res_inst, + .handle_break = kvm_trap_emul_handle_break, + + .vm_init = kvm_trap_emul_vm_init, + .vcpu_init = kvm_trap_emul_vcpu_init, + .vcpu_setup = kvm_trap_emul_vcpu_setup, + .gva_to_gpa = kvm_trap_emul_gva_to_gpa_cb, + .queue_timer_int = kvm_mips_queue_timer_int_cb, + .dequeue_timer_int = kvm_mips_dequeue_timer_int_cb, + .queue_io_int = kvm_mips_queue_io_int_cb, + .dequeue_io_int = kvm_mips_dequeue_io_int_cb, + .irq_deliver = kvm_mips_irq_deliver_cb, + .irq_clear = kvm_mips_irq_clear_cb, + .vcpu_ioctl_get_regs = kvm_trap_emul_ioctl_get_regs, + .vcpu_ioctl_set_regs = kvm_trap_emul_ioctl_set_regs, +}; + +int kvm_mips_emulation_init(struct kvm_mips_callbacks **install_callbacks) +{ + *install_callbacks = &kvm_trap_emul_callbacks; + return 0; +} diff --git a/trunk/arch/mips/kvm/trace.h b/trunk/arch/mips/kvm/trace.h new file mode 100644 index 000000000000..bc9e0f406c08 --- /dev/null +++ b/trunk/arch/mips/kvm/trace.h @@ -0,0 +1,46 @@ +/* +* This file is subject to the terms and conditions of the GNU General Public +* License. See the file "COPYING" in the main directory of this archive +* for more details. +* +* Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved. +* Authors: Sanjay Lal +*/ + +#if !defined(_TRACE_KVM_H) || defined(TRACE_HEADER_MULTI_READ) +#define _TRACE_KVM_H + +#include + +#undef TRACE_SYSTEM +#define TRACE_SYSTEM kvm +#define TRACE_INCLUDE_PATH . +#define TRACE_INCLUDE_FILE trace + +/* + * Tracepoints for VM eists + */ +extern char *kvm_mips_exit_types_str[MAX_KVM_MIPS_EXIT_TYPES]; + +TRACE_EVENT(kvm_exit, + TP_PROTO(struct kvm_vcpu *vcpu, unsigned int reason), + TP_ARGS(vcpu, reason), + TP_STRUCT__entry( + __field(struct kvm_vcpu *, vcpu) + __field(unsigned int, reason) + ), + + TP_fast_assign( + __entry->vcpu = vcpu; + __entry->reason = reason; + ), + + TP_printk("[%s]PC: 0x%08lx", + kvm_mips_exit_types_str[__entry->reason], + __entry->vcpu->arch.pc) +); + +#endif /* _TRACE_KVM_H */ + +/* This part must be outside protection */ +#include diff --git a/trunk/arch/mips/lantiq/xway/gptu.c b/trunk/arch/mips/lantiq/xway/gptu.c index 9861c8669fab..850821df924c 100644 --- a/trunk/arch/mips/lantiq/xway/gptu.c +++ b/trunk/arch/mips/lantiq/xway/gptu.c @@ -144,10 +144,6 @@ static int gptu_probe(struct platform_device *pdev) } res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - if (!res) { - dev_err(&pdev->dev, "Failed to get resource\n"); - return -ENOMEM; - } /* remap gptu register range */ gptu_membase = devm_ioremap_resource(&pdev->dev, res); @@ -169,6 +165,8 @@ static int gptu_probe(struct platform_device *pdev) if (((gptu_r32(GPTU_ID) >> 8) & 0xff) != GPTU_MAGIC) { dev_err(&pdev->dev, "Failed to find magic\n"); gptu_hwexit(); + clk_disable(clk); + clk_put(clk); return -ENAVAIL; } diff --git a/trunk/arch/mips/lib/bitops.c b/trunk/arch/mips/lib/bitops.c index a64daee740ee..3b2a1e78a543 100644 --- a/trunk/arch/mips/lib/bitops.c +++ b/trunk/arch/mips/lib/bitops.c @@ -19,7 +19,7 @@ */ void __mips_set_bit(unsigned long nr, volatile unsigned long *addr) { - volatile unsigned long *a = addr; + unsigned long *a = (unsigned long *)addr; unsigned bit = nr & SZLONG_MASK; unsigned long mask; unsigned long flags; @@ -41,7 +41,7 @@ EXPORT_SYMBOL(__mips_set_bit); */ void __mips_clear_bit(unsigned long nr, volatile unsigned long *addr) { - volatile unsigned long *a = addr; + unsigned long *a = (unsigned long *)addr; unsigned bit = nr & SZLONG_MASK; unsigned long mask; unsigned long flags; @@ -63,7 +63,7 @@ EXPORT_SYMBOL(__mips_clear_bit); */ void __mips_change_bit(unsigned long nr, volatile unsigned long *addr) { - volatile unsigned long *a = addr; + unsigned long *a = (unsigned long *)addr; unsigned bit = nr & SZLONG_MASK; unsigned long mask; unsigned long flags; @@ -86,7 +86,7 @@ EXPORT_SYMBOL(__mips_change_bit); int __mips_test_and_set_bit(unsigned long nr, volatile unsigned long *addr) { - volatile unsigned long *a = addr; + unsigned long *a = (unsigned long *)addr; unsigned bit = nr & SZLONG_MASK; unsigned long mask; unsigned long flags; @@ -112,7 +112,7 @@ EXPORT_SYMBOL(__mips_test_and_set_bit); int __mips_test_and_set_bit_lock(unsigned long nr, volatile unsigned long *addr) { - volatile unsigned long *a = addr; + unsigned long *a = (unsigned long *)addr; unsigned bit = nr & SZLONG_MASK; unsigned long mask; unsigned long flags; @@ -137,7 +137,7 @@ EXPORT_SYMBOL(__mips_test_and_set_bit_lock); */ int __mips_test_and_clear_bit(unsigned long nr, volatile unsigned long *addr) { - volatile unsigned long *a = addr; + unsigned long *a = (unsigned long *)addr; unsigned bit = nr & SZLONG_MASK; unsigned long mask; unsigned long flags; @@ -162,7 +162,7 @@ EXPORT_SYMBOL(__mips_test_and_clear_bit); */ int __mips_test_and_change_bit(unsigned long nr, volatile unsigned long *addr) { - volatile unsigned long *a = addr; + unsigned long *a = (unsigned long *)addr; unsigned bit = nr & SZLONG_MASK; unsigned long mask; unsigned long flags; diff --git a/trunk/arch/mips/lib/memset.S b/trunk/arch/mips/lib/memset.S index 053d3b0b0317..0580194e7402 100644 --- a/trunk/arch/mips/lib/memset.S +++ b/trunk/arch/mips/lib/memset.S @@ -5,7 +5,8 @@ * * Copyright (C) 1998, 1999, 2000 by Ralf Baechle * Copyright (C) 1999, 2000 Silicon Graphics, Inc. - * Copyright (C) 2007 Maciej W. Rozycki + * Copyright (C) 2007 by Maciej W. Rozycki + * Copyright (C) 2011, 2012 MIPS Technologies, Inc. */ #include #include @@ -19,6 +20,20 @@ #define LONG_S_R sdr #endif +#ifdef CONFIG_CPU_MICROMIPS +#define STORSIZE (LONGSIZE * 2) +#define STORMASK (STORSIZE - 1) +#define FILL64RG t8 +#define FILLPTRG t7 +#undef LONG_S +#define LONG_S LONG_SP +#else +#define STORSIZE LONGSIZE +#define STORMASK LONGMASK +#define FILL64RG a1 +#define FILLPTRG t0 +#endif + #define EX(insn,reg,addr,handler) \ 9: insn reg, addr; \ .section __ex_table,"a"; \ @@ -26,23 +41,25 @@ .previous .macro f_fill64 dst, offset, val, fixup - EX(LONG_S, \val, (\offset + 0 * LONGSIZE)(\dst), \fixup) - EX(LONG_S, \val, (\offset + 1 * LONGSIZE)(\dst), \fixup) - EX(LONG_S, \val, (\offset + 2 * LONGSIZE)(\dst), \fixup) - EX(LONG_S, \val, (\offset + 3 * LONGSIZE)(\dst), \fixup) - EX(LONG_S, \val, (\offset + 4 * LONGSIZE)(\dst), \fixup) - EX(LONG_S, \val, (\offset + 5 * LONGSIZE)(\dst), \fixup) - EX(LONG_S, \val, (\offset + 6 * LONGSIZE)(\dst), \fixup) - EX(LONG_S, \val, (\offset + 7 * LONGSIZE)(\dst), \fixup) -#if LONGSIZE == 4 - EX(LONG_S, \val, (\offset + 8 * LONGSIZE)(\dst), \fixup) - EX(LONG_S, \val, (\offset + 9 * LONGSIZE)(\dst), \fixup) - EX(LONG_S, \val, (\offset + 10 * LONGSIZE)(\dst), \fixup) - EX(LONG_S, \val, (\offset + 11 * LONGSIZE)(\dst), \fixup) - EX(LONG_S, \val, (\offset + 12 * LONGSIZE)(\dst), \fixup) - EX(LONG_S, \val, (\offset + 13 * LONGSIZE)(\dst), \fixup) - EX(LONG_S, \val, (\offset + 14 * LONGSIZE)(\dst), \fixup) - EX(LONG_S, \val, (\offset + 15 * LONGSIZE)(\dst), \fixup) + EX(LONG_S, \val, (\offset + 0 * STORSIZE)(\dst), \fixup) + EX(LONG_S, \val, (\offset + 1 * STORSIZE)(\dst), \fixup) + EX(LONG_S, \val, (\offset + 2 * STORSIZE)(\dst), \fixup) + EX(LONG_S, \val, (\offset + 3 * STORSIZE)(\dst), \fixup) +#if ((defined(CONFIG_CPU_MICROMIPS) && (LONGSIZE == 4)) || !defined(CONFIG_CPU_MICROMIPS)) + EX(LONG_S, \val, (\offset + 4 * STORSIZE)(\dst), \fixup) + EX(LONG_S, \val, (\offset + 5 * STORSIZE)(\dst), \fixup) + EX(LONG_S, \val, (\offset + 6 * STORSIZE)(\dst), \fixup) + EX(LONG_S, \val, (\offset + 7 * STORSIZE)(\dst), \fixup) +#endif +#if (!defined(CONFIG_CPU_MICROMIPS) && (LONGSIZE == 4)) + EX(LONG_S, \val, (\offset + 8 * STORSIZE)(\dst), \fixup) + EX(LONG_S, \val, (\offset + 9 * STORSIZE)(\dst), \fixup) + EX(LONG_S, \val, (\offset + 10 * STORSIZE)(\dst), \fixup) + EX(LONG_S, \val, (\offset + 11 * STORSIZE)(\dst), \fixup) + EX(LONG_S, \val, (\offset + 12 * STORSIZE)(\dst), \fixup) + EX(LONG_S, \val, (\offset + 13 * STORSIZE)(\dst), \fixup) + EX(LONG_S, \val, (\offset + 14 * STORSIZE)(\dst), \fixup) + EX(LONG_S, \val, (\offset + 15 * STORSIZE)(\dst), \fixup) #endif .endm @@ -71,16 +88,20 @@ LEAF(memset) 1: FEXPORT(__bzero) - sltiu t0, a2, LONGSIZE /* very small region? */ + sltiu t0, a2, STORSIZE /* very small region? */ bnez t0, .Lsmall_memset - andi t0, a0, LONGMASK /* aligned? */ + andi t0, a0, STORMASK /* aligned? */ +#ifdef CONFIG_CPU_MICROMIPS + move t8, a1 /* used by 'swp' instruction */ + move t9, a1 +#endif #ifndef CONFIG_CPU_DADDI_WORKAROUNDS beqz t0, 1f - PTR_SUBU t0, LONGSIZE /* alignment in bytes */ + PTR_SUBU t0, STORSIZE /* alignment in bytes */ #else .set noat - li AT, LONGSIZE + li AT, STORSIZE beqz t0, 1f PTR_SUBU t0, AT /* alignment in bytes */ .set at @@ -99,24 +120,27 @@ FEXPORT(__bzero) 1: ori t1, a2, 0x3f /* # of full blocks */ xori t1, 0x3f beqz t1, .Lmemset_partial /* no block to fill */ - andi t0, a2, 0x40-LONGSIZE + andi t0, a2, 0x40-STORSIZE PTR_ADDU t1, a0 /* end address */ .set reorder 1: PTR_ADDIU a0, 64 R10KCBARRIER(0(ra)) - f_fill64 a0, -64, a1, .Lfwd_fixup + f_fill64 a0, -64, FILL64RG, .Lfwd_fixup bne t1, a0, 1b .set noreorder .Lmemset_partial: R10KCBARRIER(0(ra)) PTR_LA t1, 2f /* where to start */ +#ifdef CONFIG_CPU_MICROMIPS + LONG_SRL t7, t0, 1 +#endif #if LONGSIZE == 4 - PTR_SUBU t1, t0 + PTR_SUBU t1, FILLPTRG #else .set noat - LONG_SRL AT, t0, 1 + LONG_SRL AT, FILLPTRG, 1 PTR_SUBU t1, AT .set at #endif @@ -126,9 +150,9 @@ FEXPORT(__bzero) .set push .set noreorder .set nomacro - f_fill64 a0, -64, a1, .Lpartial_fixup /* ... but first do longs ... */ + f_fill64 a0, -64, FILL64RG, .Lpartial_fixup /* ... but first do longs ... */ 2: .set pop - andi a2, LONGMASK /* At most one long to go */ + andi a2, STORMASK /* At most one long to go */ beqz a2, 1f PTR_ADDU a0, a2 /* What's left */ @@ -169,7 +193,7 @@ FEXPORT(__bzero) .Lpartial_fixup: PTR_L t0, TI_TASK($28) - andi a2, LONGMASK + andi a2, STORMASK LONG_L t0, THREAD_BUADDR(t0) LONG_ADDU a2, t1 jr ra @@ -177,4 +201,4 @@ FEXPORT(__bzero) .Llast_fixup: jr ra - andi v1, a2, LONGMASK + andi v1, a2, STORMASK diff --git a/trunk/arch/mips/lib/mips-atomic.c b/trunk/arch/mips/lib/mips-atomic.c index cd160be3ce4d..6807f7172eaf 100644 --- a/trunk/arch/mips/lib/mips-atomic.c +++ b/trunk/arch/mips/lib/mips-atomic.c @@ -13,6 +13,7 @@ #include #include #include +#include #if !defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_MIPS_MT_SMTC) @@ -34,8 +35,11 @@ * * Workaround: mask EXL bit of the result or place a nop before mfc0. */ -__asm__( - " .macro arch_local_irq_disable\n" +notrace void arch_local_irq_disable(void) +{ + preempt_disable(); + + __asm__ __volatile__( " .set push \n" " .set noat \n" #ifdef CONFIG_MIPS_MT_SMTC @@ -52,108 +56,98 @@ __asm__( " .set noreorder \n" " mtc0 $1,$12 \n" #endif - " irq_disable_hazard \n" + " " __stringify(__irq_disable_hazard) " \n" " .set pop \n" - " .endm \n"); + : /* no outputs */ + : /* no inputs */ + : "memory"); -notrace void arch_local_irq_disable(void) -{ - preempt_disable(); - __asm__ __volatile__( - "arch_local_irq_disable" - : /* no outputs */ - : /* no inputs */ - : "memory"); preempt_enable(); } EXPORT_SYMBOL(arch_local_irq_disable); -__asm__( - " .macro arch_local_irq_save result \n" +notrace unsigned long arch_local_irq_save(void) +{ + unsigned long flags; + + preempt_disable(); + + __asm__ __volatile__( " .set push \n" " .set reorder \n" " .set noat \n" #ifdef CONFIG_MIPS_MT_SMTC - " mfc0 \\result, $2, 1 \n" - " ori $1, \\result, 0x400 \n" + " mfc0 %[flags], $2, 1 \n" + " ori $1, %[flags], 0x400 \n" " .set noreorder \n" " mtc0 $1, $2, 1 \n" - " andi \\result, \\result, 0x400 \n" + " andi %[flags], %[flags], 0x400 \n" #elif defined(CONFIG_CPU_MIPSR2) /* see irqflags.h for inline function */ #else - " mfc0 \\result, $12 \n" - " ori $1, \\result, 0x1f \n" + " mfc0 %[flags], $12 \n" + " ori $1, %[flags], 0x1f \n" " xori $1, 0x1f \n" " .set noreorder \n" " mtc0 $1, $12 \n" #endif - " irq_disable_hazard \n" + " " __stringify(__irq_disable_hazard) " \n" " .set pop \n" - " .endm \n"); + : [flags] "=r" (flags) + : /* no inputs */ + : "memory"); -notrace unsigned long arch_local_irq_save(void) -{ - unsigned long flags; - preempt_disable(); - asm volatile("arch_local_irq_save\t%0" - : "=r" (flags) - : /* no inputs */ - : "memory"); preempt_enable(); + return flags; } EXPORT_SYMBOL(arch_local_irq_save); +notrace void arch_local_irq_restore(unsigned long flags) +{ + unsigned long __tmp1; + +#ifdef CONFIG_MIPS_MT_SMTC + /* + * SMTC kernel needs to do a software replay of queued + * IPIs, at the cost of branch and call overhead on each + * local_irq_restore() + */ + if (unlikely(!(flags & 0x0400))) + smtc_ipi_replay(); +#endif + preempt_disable(); -__asm__( - " .macro arch_local_irq_restore flags \n" + __asm__ __volatile__( " .set push \n" " .set noreorder \n" " .set noat \n" #ifdef CONFIG_MIPS_MT_SMTC - "mfc0 $1, $2, 1 \n" - "andi \\flags, 0x400 \n" - "ori $1, 0x400 \n" - "xori $1, 0x400 \n" - "or \\flags, $1 \n" - "mtc0 \\flags, $2, 1 \n" + " mfc0 $1, $2, 1 \n" + " andi %[flags], 0x400 \n" + " ori $1, 0x400 \n" + " xori $1, 0x400 \n" + " or %[flags], $1 \n" + " mtc0 %[flags], $2, 1 \n" #elif defined(CONFIG_CPU_MIPSR2) && defined(CONFIG_IRQ_CPU) /* see irqflags.h for inline function */ #elif defined(CONFIG_CPU_MIPSR2) /* see irqflags.h for inline function */ #else " mfc0 $1, $12 \n" - " andi \\flags, 1 \n" + " andi %[flags], 1 \n" " ori $1, 0x1f \n" " xori $1, 0x1f \n" - " or \\flags, $1 \n" - " mtc0 \\flags, $12 \n" + " or %[flags], $1 \n" + " mtc0 %[flags], $12 \n" #endif - " irq_disable_hazard \n" + " " __stringify(__irq_disable_hazard) " \n" " .set pop \n" - " .endm \n"); + : [flags] "=r" (__tmp1) + : "0" (flags) + : "memory"); -notrace void arch_local_irq_restore(unsigned long flags) -{ - unsigned long __tmp1; - -#ifdef CONFIG_MIPS_MT_SMTC - /* - * SMTC kernel needs to do a software replay of queued - * IPIs, at the cost of branch and call overhead on each - * local_irq_restore() - */ - if (unlikely(!(flags & 0x0400))) - smtc_ipi_replay(); -#endif - preempt_disable(); - __asm__ __volatile__( - "arch_local_irq_restore\t%0" - : "=r" (__tmp1) - : "0" (flags) - : "memory"); preempt_enable(); } EXPORT_SYMBOL(arch_local_irq_restore); @@ -164,11 +158,36 @@ notrace void __arch_local_irq_restore(unsigned long flags) unsigned long __tmp1; preempt_disable(); + __asm__ __volatile__( - "arch_local_irq_restore\t%0" - : "=r" (__tmp1) - : "0" (flags) - : "memory"); + " .set push \n" + " .set noreorder \n" + " .set noat \n" +#ifdef CONFIG_MIPS_MT_SMTC + " mfc0 $1, $2, 1 \n" + " andi %[flags], 0x400 \n" + " ori $1, 0x400 \n" + " xori $1, 0x400 \n" + " or %[flags], $1 \n" + " mtc0 %[flags], $2, 1 \n" +#elif defined(CONFIG_CPU_MIPSR2) && defined(CONFIG_IRQ_CPU) + /* see irqflags.h for inline function */ +#elif defined(CONFIG_CPU_MIPSR2) + /* see irqflags.h for inline function */ +#else + " mfc0 $1, $12 \n" + " andi %[flags], 1 \n" + " ori $1, 0x1f \n" + " xori $1, 0x1f \n" + " or %[flags], $1 \n" + " mtc0 %[flags], $12 \n" +#endif + " " __stringify(__irq_disable_hazard) " \n" + " .set pop \n" + : [flags] "=r" (__tmp1) + : "0" (flags) + : "memory"); + preempt_enable(); } EXPORT_SYMBOL(__arch_local_irq_restore); diff --git a/trunk/arch/mips/lib/strlen_user.S b/trunk/arch/mips/lib/strlen_user.S index fdbb970f670d..e362dcdc69d1 100644 --- a/trunk/arch/mips/lib/strlen_user.S +++ b/trunk/arch/mips/lib/strlen_user.S @@ -3,8 +3,9 @@ * License. See the file "COPYING" in the main directory of this archive * for more details. * - * Copyright (c) 1996, 1998, 1999, 2004 by Ralf Baechle - * Copyright (c) 1999 Silicon Graphics, Inc. + * Copyright (C) 1996, 1998, 1999, 2004 by Ralf Baechle + * Copyright (C) 1999 Silicon Graphics, Inc. + * Copyright (C) 2011 MIPS Technologies, Inc. */ #include #include @@ -28,9 +29,9 @@ LEAF(__strlen_user_asm) FEXPORT(__strlen_user_nocheck_asm) move v0, a0 -1: EX(lb, t0, (v0), .Lfault) +1: EX(lbu, v1, (v0), .Lfault) PTR_ADDIU v0, 1 - bnez t0, 1b + bnez v1, 1b PTR_SUBU v0, a0 jr ra END(__strlen_user_asm) diff --git a/trunk/arch/mips/lib/strncpy_user.S b/trunk/arch/mips/lib/strncpy_user.S index bad539487503..92870b6b53ea 100644 --- a/trunk/arch/mips/lib/strncpy_user.S +++ b/trunk/arch/mips/lib/strncpy_user.S @@ -3,7 +3,8 @@ * License. See the file "COPYING" in the main directory of this archive * for more details. * - * Copyright (c) 1996, 1999 by Ralf Baechle + * Copyright (C) 1996, 1999 by Ralf Baechle + * Copyright (C) 2011 MIPS Technologies, Inc. */ #include #include @@ -33,26 +34,27 @@ LEAF(__strncpy_from_user_asm) bnez v0, .Lfault FEXPORT(__strncpy_from_user_nocheck_asm) - move v0, zero - move v1, a1 .set noreorder -1: EX(lbu, t0, (v1), .Lfault) + move t0, zero + move v1, a1 +1: EX(lbu, v0, (v1), .Lfault) PTR_ADDIU v1, 1 R10KCBARRIER(0(ra)) - beqz t0, 2f - sb t0, (a0) - PTR_ADDIU v0, 1 - .set reorder - PTR_ADDIU a0, 1 - bne v0, a2, 1b -2: PTR_ADDU t0, a1, v0 - xor t0, a1 - bltz t0, .Lfault + beqz v0, 2f + sb v0, (a0) + PTR_ADDIU t0, 1 + bne t0, a2, 1b + PTR_ADDIU a0, 1 +2: PTR_ADDU v0, a1, t0 + xor v0, a1 + bltz v0, .Lfault + nop jr ra # return n + move v0, t0 END(__strncpy_from_user_asm) -.Lfault: li v0, -EFAULT - jr ra +.Lfault: jr ra + li v0, -EFAULT .section __ex_table,"a" PTR 1b, .Lfault diff --git a/trunk/arch/mips/lib/strnlen_user.S b/trunk/arch/mips/lib/strnlen_user.S index beea03c8c0ce..fcacea5e61f1 100644 --- a/trunk/arch/mips/lib/strnlen_user.S +++ b/trunk/arch/mips/lib/strnlen_user.S @@ -35,7 +35,7 @@ FEXPORT(__strnlen_user_nocheck_asm) PTR_ADDU a1, a0 # stop pointer 1: beq v0, a1, 1f # limit reached? EX(lb, t0, (v0), .Lfault) - PTR_ADDU v0, 1 + PTR_ADDIU v0, 1 bnez t0, 1b 1: PTR_SUBU v0, a0 jr ra diff --git a/trunk/arch/mips/loongson/common/Makefile b/trunk/arch/mips/loongson/common/Makefile index e526488df655..4c57b3e5743f 100644 --- a/trunk/arch/mips/loongson/common/Makefile +++ b/trunk/arch/mips/loongson/common/Makefile @@ -4,7 +4,7 @@ obj-y += setup.o init.o cmdline.o env.o time.o reset.o irq.o \ pci.o bonito-irq.o mem.o machtype.o platform.o -obj-$(CONFIG_GENERIC_GPIO) += gpio.o +obj-$(CONFIG_GPIOLIB) += gpio.o # # Serial port support diff --git a/trunk/arch/mips/loongson/common/reset.c b/trunk/arch/mips/loongson/common/reset.c index 35c8c6468494..65bfbb5d06f4 100644 --- a/trunk/arch/mips/loongson/common/reset.c +++ b/trunk/arch/mips/loongson/common/reset.c @@ -12,6 +12,7 @@ #include #include +#include #include #include diff --git a/trunk/arch/mips/loongson1/common/reset.c b/trunk/arch/mips/loongson1/common/reset.c index d4f610f9604a..547f34b69e4c 100644 --- a/trunk/arch/mips/loongson1/common/reset.c +++ b/trunk/arch/mips/loongson1/common/reset.c @@ -9,6 +9,7 @@ #include #include +#include #include #include diff --git a/trunk/arch/mips/math-emu/cp1emu.c b/trunk/arch/mips/math-emu/cp1emu.c index afb5a0bcf7a5..f03771900813 100644 --- a/trunk/arch/mips/math-emu/cp1emu.c +++ b/trunk/arch/mips/math-emu/cp1emu.c @@ -45,6 +45,7 @@ #include #include #include +#include #include #include @@ -81,6 +82,11 @@ DEFINE_PER_CPU(struct mips_fpu_emulator_stats, fpuemustats); /* Determine rounding mode from the RM bits of the FCSR */ #define modeindex(v) ((v) & FPU_CSR_RM) +/* microMIPS bitfields */ +#define MM_POOL32A_MINOR_MASK 0x3f +#define MM_POOL32A_MINOR_SHIFT 0x6 +#define MM_MIPS32_COND_FC 0x30 + /* Convert Mips rounding mode (0..3) to IEEE library modes. */ static const unsigned char ieee_rm[4] = { [FPU_CSR_RN] = IEEE754_RN, @@ -110,6 +116,556 @@ static const unsigned int fpucondbit[8] = { }; #endif +/* (microMIPS) Convert 16-bit register encoding to 32-bit register encoding. */ +static const unsigned int reg16to32map[8] = {16, 17, 2, 3, 4, 5, 6, 7}; + +/* (microMIPS) Convert certain microMIPS instructions to MIPS32 format. */ +static const int sd_format[] = {16, 17, 0, 0, 0, 0, 0, 0}; +static const int sdps_format[] = {16, 17, 22, 0, 0, 0, 0, 0}; +static const int dwl_format[] = {17, 20, 21, 0, 0, 0, 0, 0}; +static const int swl_format[] = {16, 20, 21, 0, 0, 0, 0, 0}; + +/* + * This functions translates a 32-bit microMIPS instruction + * into a 32-bit MIPS32 instruction. Returns 0 on success + * and SIGILL otherwise. + */ +static int microMIPS32_to_MIPS32(union mips_instruction *insn_ptr) +{ + union mips_instruction insn = *insn_ptr; + union mips_instruction mips32_insn = insn; + int func, fmt, op; + + switch (insn.mm_i_format.opcode) { + case mm_ldc132_op: + mips32_insn.mm_i_format.opcode = ldc1_op; + mips32_insn.mm_i_format.rt = insn.mm_i_format.rs; + mips32_insn.mm_i_format.rs = insn.mm_i_format.rt; + break; + case mm_lwc132_op: + mips32_insn.mm_i_format.opcode = lwc1_op; + mips32_insn.mm_i_format.rt = insn.mm_i_format.rs; + mips32_insn.mm_i_format.rs = insn.mm_i_format.rt; + break; + case mm_sdc132_op: + mips32_insn.mm_i_format.opcode = sdc1_op; + mips32_insn.mm_i_format.rt = insn.mm_i_format.rs; + mips32_insn.mm_i_format.rs = insn.mm_i_format.rt; + break; + case mm_swc132_op: + mips32_insn.mm_i_format.opcode = swc1_op; + mips32_insn.mm_i_format.rt = insn.mm_i_format.rs; + mips32_insn.mm_i_format.rs = insn.mm_i_format.rt; + break; + case mm_pool32i_op: + /* NOTE: offset is << by 1 if in microMIPS mode. */ + if ((insn.mm_i_format.rt == mm_bc1f_op) || + (insn.mm_i_format.rt == mm_bc1t_op)) { + mips32_insn.fb_format.opcode = cop1_op; + mips32_insn.fb_format.bc = bc_op; + mips32_insn.fb_format.flag = + (insn.mm_i_format.rt == mm_bc1t_op) ? 1 : 0; + } else + return SIGILL; + break; + case mm_pool32f_op: + switch (insn.mm_fp0_format.func) { + case mm_32f_01_op: + case mm_32f_11_op: + case mm_32f_02_op: + case mm_32f_12_op: + case mm_32f_41_op: + case mm_32f_51_op: + case mm_32f_42_op: + case mm_32f_52_op: + op = insn.mm_fp0_format.func; + if (op == mm_32f_01_op) + func = madd_s_op; + else if (op == mm_32f_11_op) + func = madd_d_op; + else if (op == mm_32f_02_op) + func = nmadd_s_op; + else if (op == mm_32f_12_op) + func = nmadd_d_op; + else if (op == mm_32f_41_op) + func = msub_s_op; + else if (op == mm_32f_51_op) + func = msub_d_op; + else if (op == mm_32f_42_op) + func = nmsub_s_op; + else + func = nmsub_d_op; + mips32_insn.fp6_format.opcode = cop1x_op; + mips32_insn.fp6_format.fr = insn.mm_fp6_format.fr; + mips32_insn.fp6_format.ft = insn.mm_fp6_format.ft; + mips32_insn.fp6_format.fs = insn.mm_fp6_format.fs; + mips32_insn.fp6_format.fd = insn.mm_fp6_format.fd; + mips32_insn.fp6_format.func = func; + break; + case mm_32f_10_op: + func = -1; /* Invalid */ + op = insn.mm_fp5_format.op & 0x7; + if (op == mm_ldxc1_op) + func = ldxc1_op; + else if (op == mm_sdxc1_op) + func = sdxc1_op; + else if (op == mm_lwxc1_op) + func = lwxc1_op; + else if (op == mm_swxc1_op) + func = swxc1_op; + + if (func != -1) { + mips32_insn.r_format.opcode = cop1x_op; + mips32_insn.r_format.rs = + insn.mm_fp5_format.base; + mips32_insn.r_format.rt = + insn.mm_fp5_format.index; + mips32_insn.r_format.rd = 0; + mips32_insn.r_format.re = insn.mm_fp5_format.fd; + mips32_insn.r_format.func = func; + } else + return SIGILL; + break; + case mm_32f_40_op: + op = -1; /* Invalid */ + if (insn.mm_fp2_format.op == mm_fmovt_op) + op = 1; + else if (insn.mm_fp2_format.op == mm_fmovf_op) + op = 0; + if (op != -1) { + mips32_insn.fp0_format.opcode = cop1_op; + mips32_insn.fp0_format.fmt = + sdps_format[insn.mm_fp2_format.fmt]; + mips32_insn.fp0_format.ft = + (insn.mm_fp2_format.cc<<2) + op; + mips32_insn.fp0_format.fs = + insn.mm_fp2_format.fs; + mips32_insn.fp0_format.fd = + insn.mm_fp2_format.fd; + mips32_insn.fp0_format.func = fmovc_op; + } else + return SIGILL; + break; + case mm_32f_60_op: + func = -1; /* Invalid */ + if (insn.mm_fp0_format.op == mm_fadd_op) + func = fadd_op; + else if (insn.mm_fp0_format.op == mm_fsub_op) + func = fsub_op; + else if (insn.mm_fp0_format.op == mm_fmul_op) + func = fmul_op; + else if (insn.mm_fp0_format.op == mm_fdiv_op) + func = fdiv_op; + if (func != -1) { + mips32_insn.fp0_format.opcode = cop1_op; + mips32_insn.fp0_format.fmt = + sdps_format[insn.mm_fp0_format.fmt]; + mips32_insn.fp0_format.ft = + insn.mm_fp0_format.ft; + mips32_insn.fp0_format.fs = + insn.mm_fp0_format.fs; + mips32_insn.fp0_format.fd = + insn.mm_fp0_format.fd; + mips32_insn.fp0_format.func = func; + } else + return SIGILL; + break; + case mm_32f_70_op: + func = -1; /* Invalid */ + if (insn.mm_fp0_format.op == mm_fmovn_op) + func = fmovn_op; + else if (insn.mm_fp0_format.op == mm_fmovz_op) + func = fmovz_op; + if (func != -1) { + mips32_insn.fp0_format.opcode = cop1_op; + mips32_insn.fp0_format.fmt = + sdps_format[insn.mm_fp0_format.fmt]; + mips32_insn.fp0_format.ft = + insn.mm_fp0_format.ft; + mips32_insn.fp0_format.fs = + insn.mm_fp0_format.fs; + mips32_insn.fp0_format.fd = + insn.mm_fp0_format.fd; + mips32_insn.fp0_format.func = func; + } else + return SIGILL; + break; + case mm_32f_73_op: /* POOL32FXF */ + switch (insn.mm_fp1_format.op) { + case mm_movf0_op: + case mm_movf1_op: + case mm_movt0_op: + case mm_movt1_op: + if ((insn.mm_fp1_format.op & 0x7f) == + mm_movf0_op) + op = 0; + else + op = 1; + mips32_insn.r_format.opcode = spec_op; + mips32_insn.r_format.rs = insn.mm_fp4_format.fs; + mips32_insn.r_format.rt = + (insn.mm_fp4_format.cc << 2) + op; + mips32_insn.r_format.rd = insn.mm_fp4_format.rt; + mips32_insn.r_format.re = 0; + mips32_insn.r_format.func = movc_op; + break; + case mm_fcvtd0_op: + case mm_fcvtd1_op: + case mm_fcvts0_op: + case mm_fcvts1_op: + if ((insn.mm_fp1_format.op & 0x7f) == + mm_fcvtd0_op) { + func = fcvtd_op; + fmt = swl_format[insn.mm_fp3_format.fmt]; + } else { + func = fcvts_op; + fmt = dwl_format[insn.mm_fp3_format.fmt]; + } + mips32_insn.fp0_format.opcode = cop1_op; + mips32_insn.fp0_format.fmt = fmt; + mips32_insn.fp0_format.ft = 0; + mips32_insn.fp0_format.fs = + insn.mm_fp3_format.fs; + mips32_insn.fp0_format.fd = + insn.mm_fp3_format.rt; + mips32_insn.fp0_format.func = func; + break; + case mm_fmov0_op: + case mm_fmov1_op: + case mm_fabs0_op: + case mm_fabs1_op: + case mm_fneg0_op: + case mm_fneg1_op: + if ((insn.mm_fp1_format.op & 0x7f) == + mm_fmov0_op) + func = fmov_op; + else if ((insn.mm_fp1_format.op & 0x7f) == + mm_fabs0_op) + func = fabs_op; + else + func = fneg_op; + mips32_insn.fp0_format.opcode = cop1_op; + mips32_insn.fp0_format.fmt = + sdps_format[insn.mm_fp3_format.fmt]; + mips32_insn.fp0_format.ft = 0; + mips32_insn.fp0_format.fs = + insn.mm_fp3_format.fs; + mips32_insn.fp0_format.fd = + insn.mm_fp3_format.rt; + mips32_insn.fp0_format.func = func; + break; + case mm_ffloorl_op: + case mm_ffloorw_op: + case mm_fceill_op: + case mm_fceilw_op: + case mm_ftruncl_op: + case mm_ftruncw_op: + case mm_froundl_op: + case mm_froundw_op: + case mm_fcvtl_op: + case mm_fcvtw_op: + if (insn.mm_fp1_format.op == mm_ffloorl_op) + func = ffloorl_op; + else if (insn.mm_fp1_format.op == mm_ffloorw_op) + func = ffloor_op; + else if (insn.mm_fp1_format.op == mm_fceill_op) + func = fceill_op; + else if (insn.mm_fp1_format.op == mm_fceilw_op) + func = fceil_op; + else if (insn.mm_fp1_format.op == mm_ftruncl_op) + func = ftruncl_op; + else if (insn.mm_fp1_format.op == mm_ftruncw_op) + func = ftrunc_op; + else if (insn.mm_fp1_format.op == mm_froundl_op) + func = froundl_op; + else if (insn.mm_fp1_format.op == mm_froundw_op) + func = fround_op; + else if (insn.mm_fp1_format.op == mm_fcvtl_op) + func = fcvtl_op; + else + func = fcvtw_op; + mips32_insn.fp0_format.opcode = cop1_op; + mips32_insn.fp0_format.fmt = + sd_format[insn.mm_fp1_format.fmt]; + mips32_insn.fp0_format.ft = 0; + mips32_insn.fp0_format.fs = + insn.mm_fp1_format.fs; + mips32_insn.fp0_format.fd = + insn.mm_fp1_format.rt; + mips32_insn.fp0_format.func = func; + break; + case mm_frsqrt_op: + case mm_fsqrt_op: + case mm_frecip_op: + if (insn.mm_fp1_format.op == mm_frsqrt_op) + func = frsqrt_op; + else if (insn.mm_fp1_format.op == mm_fsqrt_op) + func = fsqrt_op; + else + func = frecip_op; + mips32_insn.fp0_format.opcode = cop1_op; + mips32_insn.fp0_format.fmt = + sdps_format[insn.mm_fp1_format.fmt]; + mips32_insn.fp0_format.ft = 0; + mips32_insn.fp0_format.fs = + insn.mm_fp1_format.fs; + mips32_insn.fp0_format.fd = + insn.mm_fp1_format.rt; + mips32_insn.fp0_format.func = func; + break; + case mm_mfc1_op: + case mm_mtc1_op: + case mm_cfc1_op: + case mm_ctc1_op: + if (insn.mm_fp1_format.op == mm_mfc1_op) + op = mfc_op; + else if (insn.mm_fp1_format.op == mm_mtc1_op) + op = mtc_op; + else if (insn.mm_fp1_format.op == mm_cfc1_op) + op = cfc_op; + else + op = ctc_op; + mips32_insn.fp1_format.opcode = cop1_op; + mips32_insn.fp1_format.op = op; + mips32_insn.fp1_format.rt = + insn.mm_fp1_format.rt; + mips32_insn.fp1_format.fs = + insn.mm_fp1_format.fs; + mips32_insn.fp1_format.fd = 0; + mips32_insn.fp1_format.func = 0; + break; + default: + return SIGILL; + break; + } + break; + case mm_32f_74_op: /* c.cond.fmt */ + mips32_insn.fp0_format.opcode = cop1_op; + mips32_insn.fp0_format.fmt = + sdps_format[insn.mm_fp4_format.fmt]; + mips32_insn.fp0_format.ft = insn.mm_fp4_format.rt; + mips32_insn.fp0_format.fs = insn.mm_fp4_format.fs; + mips32_insn.fp0_format.fd = insn.mm_fp4_format.cc << 2; + mips32_insn.fp0_format.func = + insn.mm_fp4_format.cond | MM_MIPS32_COND_FC; + break; + default: + return SIGILL; + break; + } + break; + default: + return SIGILL; + break; + } + + *insn_ptr = mips32_insn; + return 0; +} + +int mm_isBranchInstr(struct pt_regs *regs, struct mm_decoded_insn dec_insn, + unsigned long *contpc) +{ + union mips_instruction insn = (union mips_instruction)dec_insn.insn; + int bc_false = 0; + unsigned int fcr31; + unsigned int bit; + + switch (insn.mm_i_format.opcode) { + case mm_pool32a_op: + if ((insn.mm_i_format.simmediate & MM_POOL32A_MINOR_MASK) == + mm_pool32axf_op) { + switch (insn.mm_i_format.simmediate >> + MM_POOL32A_MINOR_SHIFT) { + case mm_jalr_op: + case mm_jalrhb_op: + case mm_jalrs_op: + case mm_jalrshb_op: + if (insn.mm_i_format.rt != 0) /* Not mm_jr */ + regs->regs[insn.mm_i_format.rt] = + regs->cp0_epc + + dec_insn.pc_inc + + dec_insn.next_pc_inc; + *contpc = regs->regs[insn.mm_i_format.rs]; + return 1; + break; + } + } + break; + case mm_pool32i_op: + switch (insn.mm_i_format.rt) { + case mm_bltzals_op: + case mm_bltzal_op: + regs->regs[31] = regs->cp0_epc + + dec_insn.pc_inc + + dec_insn.next_pc_inc; + /* Fall through */ + case mm_bltz_op: + if ((long)regs->regs[insn.mm_i_format.rs] < 0) + *contpc = regs->cp0_epc + + dec_insn.pc_inc + + (insn.mm_i_format.simmediate << 1); + else + *contpc = regs->cp0_epc + + dec_insn.pc_inc + + dec_insn.next_pc_inc; + return 1; + break; + case mm_bgezals_op: + case mm_bgezal_op: + regs->regs[31] = regs->cp0_epc + + dec_insn.pc_inc + + dec_insn.next_pc_inc; + /* Fall through */ + case mm_bgez_op: + if ((long)regs->regs[insn.mm_i_format.rs] >= 0) + *contpc = regs->cp0_epc + + dec_insn.pc_inc + + (insn.mm_i_format.simmediate << 1); + else + *contpc = regs->cp0_epc + + dec_insn.pc_inc + + dec_insn.next_pc_inc; + return 1; + break; + case mm_blez_op: + if ((long)regs->regs[insn.mm_i_format.rs] <= 0) + *contpc = regs->cp0_epc + + dec_insn.pc_inc + + (insn.mm_i_format.simmediate << 1); + else + *contpc = regs->cp0_epc + + dec_insn.pc_inc + + dec_insn.next_pc_inc; + return 1; + break; + case mm_bgtz_op: + if ((long)regs->regs[insn.mm_i_format.rs] <= 0) + *contpc = regs->cp0_epc + + dec_insn.pc_inc + + (insn.mm_i_format.simmediate << 1); + else + *contpc = regs->cp0_epc + + dec_insn.pc_inc + + dec_insn.next_pc_inc; + return 1; + break; + case mm_bc2f_op: + case mm_bc1f_op: + bc_false = 1; + /* Fall through */ + case mm_bc2t_op: + case mm_bc1t_op: + preempt_disable(); + if (is_fpu_owner()) + asm volatile("cfc1\t%0,$31" : "=r" (fcr31)); + else + fcr31 = current->thread.fpu.fcr31; + preempt_enable(); + + if (bc_false) + fcr31 = ~fcr31; + + bit = (insn.mm_i_format.rs >> 2); + bit += (bit != 0); + bit += 23; + if (fcr31 & (1 << bit)) + *contpc = regs->cp0_epc + + dec_insn.pc_inc + + (insn.mm_i_format.simmediate << 1); + else + *contpc = regs->cp0_epc + + dec_insn.pc_inc + dec_insn.next_pc_inc; + return 1; + break; + } + break; + case mm_pool16c_op: + switch (insn.mm_i_format.rt) { + case mm_jalr16_op: + case mm_jalrs16_op: + regs->regs[31] = regs->cp0_epc + + dec_insn.pc_inc + dec_insn.next_pc_inc; + /* Fall through */ + case mm_jr16_op: + *contpc = regs->regs[insn.mm_i_format.rs]; + return 1; + break; + } + break; + case mm_beqz16_op: + if ((long)regs->regs[reg16to32map[insn.mm_b1_format.rs]] == 0) + *contpc = regs->cp0_epc + + dec_insn.pc_inc + + (insn.mm_b1_format.simmediate << 1); + else + *contpc = regs->cp0_epc + + dec_insn.pc_inc + dec_insn.next_pc_inc; + return 1; + break; + case mm_bnez16_op: + if ((long)regs->regs[reg16to32map[insn.mm_b1_format.rs]] != 0) + *contpc = regs->cp0_epc + + dec_insn.pc_inc + + (insn.mm_b1_format.simmediate << 1); + else + *contpc = regs->cp0_epc + + dec_insn.pc_inc + dec_insn.next_pc_inc; + return 1; + break; + case mm_b16_op: + *contpc = regs->cp0_epc + dec_insn.pc_inc + + (insn.mm_b0_format.simmediate << 1); + return 1; + break; + case mm_beq32_op: + if (regs->regs[insn.mm_i_format.rs] == + regs->regs[insn.mm_i_format.rt]) + *contpc = regs->cp0_epc + + dec_insn.pc_inc + + (insn.mm_i_format.simmediate << 1); + else + *contpc = regs->cp0_epc + + dec_insn.pc_inc + + dec_insn.next_pc_inc; + return 1; + break; + case mm_bne32_op: + if (regs->regs[insn.mm_i_format.rs] != + regs->regs[insn.mm_i_format.rt]) + *contpc = regs->cp0_epc + + dec_insn.pc_inc + + (insn.mm_i_format.simmediate << 1); + else + *contpc = regs->cp0_epc + + dec_insn.pc_inc + dec_insn.next_pc_inc; + return 1; + break; + case mm_jalx32_op: + regs->regs[31] = regs->cp0_epc + + dec_insn.pc_inc + dec_insn.next_pc_inc; + *contpc = regs->cp0_epc + dec_insn.pc_inc; + *contpc >>= 28; + *contpc <<= 28; + *contpc |= (insn.j_format.target << 2); + return 1; + break; + case mm_jals32_op: + case mm_jal32_op: + regs->regs[31] = regs->cp0_epc + + dec_insn.pc_inc + dec_insn.next_pc_inc; + /* Fall through */ + case mm_j32_op: + *contpc = regs->cp0_epc + dec_insn.pc_inc; + *contpc >>= 27; + *contpc <<= 27; + *contpc |= (insn.j_format.target << 1); + set_isa16_mode(*contpc); + return 1; + break; + } + return 0; +} /* * Redundant with logic already in kernel/branch.c, @@ -117,53 +673,177 @@ static const unsigned int fpucondbit[8] = { * a single subroutine should be used across both * modules. */ -static int isBranchInstr(mips_instruction * i) +static int isBranchInstr(struct pt_regs *regs, struct mm_decoded_insn dec_insn, + unsigned long *contpc) { - switch (MIPSInst_OPCODE(*i)) { + union mips_instruction insn = (union mips_instruction)dec_insn.insn; + unsigned int fcr31; + unsigned int bit = 0; + + switch (insn.i_format.opcode) { case spec_op: - switch (MIPSInst_FUNC(*i)) { + switch (insn.r_format.func) { case jalr_op: + regs->regs[insn.r_format.rd] = + regs->cp0_epc + dec_insn.pc_inc + + dec_insn.next_pc_inc; + /* Fall through */ case jr_op: + *contpc = regs->regs[insn.r_format.rs]; return 1; + break; } break; - case bcond_op: - switch (MIPSInst_RT(*i)) { + switch (insn.i_format.rt) { + case bltzal_op: + case bltzall_op: + regs->regs[31] = regs->cp0_epc + + dec_insn.pc_inc + + dec_insn.next_pc_inc; + /* Fall through */ case bltz_op: - case bgez_op: case bltzl_op: - case bgezl_op: - case bltzal_op: + if ((long)regs->regs[insn.i_format.rs] < 0) + *contpc = regs->cp0_epc + + dec_insn.pc_inc + + (insn.i_format.simmediate << 2); + else + *contpc = regs->cp0_epc + + dec_insn.pc_inc + + dec_insn.next_pc_inc; + return 1; + break; case bgezal_op: - case bltzall_op: case bgezall_op: + regs->regs[31] = regs->cp0_epc + + dec_insn.pc_inc + + dec_insn.next_pc_inc; + /* Fall through */ + case bgez_op: + case bgezl_op: + if ((long)regs->regs[insn.i_format.rs] >= 0) + *contpc = regs->cp0_epc + + dec_insn.pc_inc + + (insn.i_format.simmediate << 2); + else + *contpc = regs->cp0_epc + + dec_insn.pc_inc + + dec_insn.next_pc_inc; return 1; + break; } break; - - case j_op: - case jal_op: case jalx_op: + set_isa16_mode(bit); + case jal_op: + regs->regs[31] = regs->cp0_epc + + dec_insn.pc_inc + + dec_insn.next_pc_inc; + /* Fall through */ + case j_op: + *contpc = regs->cp0_epc + dec_insn.pc_inc; + *contpc >>= 28; + *contpc <<= 28; + *contpc |= (insn.j_format.target << 2); + /* Set microMIPS mode bit: XOR for jalx. */ + *contpc ^= bit; + return 1; + break; case beq_op: - case bne_op: - case blez_op: - case bgtz_op: case beql_op: + if (regs->regs[insn.i_format.rs] == + regs->regs[insn.i_format.rt]) + *contpc = regs->cp0_epc + + dec_insn.pc_inc + + (insn.i_format.simmediate << 2); + else + *contpc = regs->cp0_epc + + dec_insn.pc_inc + + dec_insn.next_pc_inc; + return 1; + break; + case bne_op: case bnel_op: + if (regs->regs[insn.i_format.rs] != + regs->regs[insn.i_format.rt]) + *contpc = regs->cp0_epc + + dec_insn.pc_inc + + (insn.i_format.simmediate << 2); + else + *contpc = regs->cp0_epc + + dec_insn.pc_inc + + dec_insn.next_pc_inc; + return 1; + break; + case blez_op: case blezl_op: + if ((long)regs->regs[insn.i_format.rs] <= 0) + *contpc = regs->cp0_epc + + dec_insn.pc_inc + + (insn.i_format.simmediate << 2); + else + *contpc = regs->cp0_epc + + dec_insn.pc_inc + + dec_insn.next_pc_inc; + return 1; + break; + case bgtz_op: case bgtzl_op: + if ((long)regs->regs[insn.i_format.rs] > 0) + *contpc = regs->cp0_epc + + dec_insn.pc_inc + + (insn.i_format.simmediate << 2); + else + *contpc = regs->cp0_epc + + dec_insn.pc_inc + + dec_insn.next_pc_inc; return 1; - + break; case cop0_op: case cop1_op: case cop2_op: case cop1x_op: - if (MIPSInst_RS(*i) == bc_op) - return 1; + if (insn.i_format.rs == bc_op) { + preempt_disable(); + if (is_fpu_owner()) + asm volatile("cfc1\t%0,$31" : "=r" (fcr31)); + else + fcr31 = current->thread.fpu.fcr31; + preempt_enable(); + + bit = (insn.i_format.rt >> 2); + bit += (bit != 0); + bit += 23; + switch (insn.i_format.rt & 3) { + case 0: /* bc1f */ + case 2: /* bc1fl */ + if (~fcr31 & (1 << bit)) + *contpc = regs->cp0_epc + + dec_insn.pc_inc + + (insn.i_format.simmediate << 2); + else + *contpc = regs->cp0_epc + + dec_insn.pc_inc + + dec_insn.next_pc_inc; + return 1; + break; + case 1: /* bc1t */ + case 3: /* bc1tl */ + if (fcr31 & (1 << bit)) + *contpc = regs->cp0_epc + + dec_insn.pc_inc + + (insn.i_format.simmediate << 2); + else + *contpc = regs->cp0_epc + + dec_insn.pc_inc + + dec_insn.next_pc_inc; + return 1; + break; + } + } break; } - return 0; } @@ -210,26 +890,23 @@ static inline int cop1_64bit(struct pt_regs *xcp) */ static int cop1Emulate(struct pt_regs *xcp, struct mips_fpu_struct *ctx, - void *__user *fault_addr) + struct mm_decoded_insn dec_insn, void *__user *fault_addr) { mips_instruction ir; - unsigned long emulpc, contpc; + unsigned long contpc = xcp->cp0_epc + dec_insn.pc_inc; unsigned int cond; - - if (!access_ok(VERIFY_READ, xcp->cp0_epc, sizeof(mips_instruction))) { - MIPS_FPU_EMU_INC_STATS(errors); - *fault_addr = (mips_instruction __user *)xcp->cp0_epc; - return SIGBUS; - } - if (__get_user(ir, (mips_instruction __user *) xcp->cp0_epc)) { - MIPS_FPU_EMU_INC_STATS(errors); - *fault_addr = (mips_instruction __user *)xcp->cp0_epc; - return SIGSEGV; - } + int pc_inc; /* XXX NEC Vr54xx bug workaround */ - if ((xcp->cp0_cause & CAUSEF_BD) && !isBranchInstr(&ir)) - xcp->cp0_cause &= ~CAUSEF_BD; + if (xcp->cp0_cause & CAUSEF_BD) { + if (dec_insn.micro_mips_mode) { + if (!mm_isBranchInstr(xcp, dec_insn, &contpc)) + xcp->cp0_cause &= ~CAUSEF_BD; + } else { + if (!isBranchInstr(xcp, dec_insn, &contpc)) + xcp->cp0_cause &= ~CAUSEF_BD; + } + } if (xcp->cp0_cause & CAUSEF_BD) { /* @@ -244,32 +921,33 @@ static int cop1Emulate(struct pt_regs *xcp, struct mips_fpu_struct *ctx, * Linux MIPS branch emulator operates on context, updating the * cp0_epc. */ - emulpc = xcp->cp0_epc + 4; /* Snapshot emulation target */ + ir = dec_insn.next_insn; /* process delay slot instr */ + pc_inc = dec_insn.next_pc_inc; + } else { + ir = dec_insn.insn; /* process current instr */ + pc_inc = dec_insn.pc_inc; + } - if (__compute_return_epc(xcp) < 0) { -#ifdef CP1DBG - printk("failed to emulate branch at %p\n", - (void *) (xcp->cp0_epc)); -#endif + /* + * Since microMIPS FPU instructios are a subset of MIPS32 FPU + * instructions, we want to convert microMIPS FPU instructions + * into MIPS32 instructions so that we could reuse all of the + * FPU emulation code. + * + * NOTE: We cannot do this for branch instructions since they + * are not a subset. Example: Cannot emulate a 16-bit + * aligned target address with a MIPS32 instruction. + */ + if (dec_insn.micro_mips_mode) { + /* + * If next instruction is a 16-bit instruction, then it + * it cannot be a FPU instruction. This could happen + * since we can be called for non-FPU instructions. + */ + if ((pc_inc == 2) || + (microMIPS32_to_MIPS32((union mips_instruction *)&ir) + == SIGILL)) return SIGILL; - } - if (!access_ok(VERIFY_READ, emulpc, sizeof(mips_instruction))) { - MIPS_FPU_EMU_INC_STATS(errors); - *fault_addr = (mips_instruction __user *)emulpc; - return SIGBUS; - } - if (__get_user(ir, (mips_instruction __user *) emulpc)) { - MIPS_FPU_EMU_INC_STATS(errors); - *fault_addr = (mips_instruction __user *)emulpc; - return SIGSEGV; - } - /* __compute_return_epc() will have updated cp0_epc */ - contpc = xcp->cp0_epc; - /* In order not to confuse ptrace() et al, tweak context */ - xcp->cp0_epc = emulpc - 4; - } else { - emulpc = xcp->cp0_epc; - contpc = xcp->cp0_epc + 4; } emul: @@ -474,22 +1152,35 @@ static int cop1Emulate(struct pt_regs *xcp, struct mips_fpu_struct *ctx, /* branch taken: emulate dslot * instruction */ - xcp->cp0_epc += 4; - contpc = (xcp->cp0_epc + - (MIPSInst_SIMM(ir) << 2)); - - if (!access_ok(VERIFY_READ, xcp->cp0_epc, - sizeof(mips_instruction))) { - MIPS_FPU_EMU_INC_STATS(errors); - *fault_addr = (mips_instruction __user *)xcp->cp0_epc; - return SIGBUS; - } - if (__get_user(ir, - (mips_instruction __user *) xcp->cp0_epc)) { - MIPS_FPU_EMU_INC_STATS(errors); - *fault_addr = (mips_instruction __user *)xcp->cp0_epc; - return SIGSEGV; - } + xcp->cp0_epc += dec_insn.pc_inc; + + contpc = MIPSInst_SIMM(ir); + ir = dec_insn.next_insn; + if (dec_insn.micro_mips_mode) { + contpc = (xcp->cp0_epc + (contpc << 1)); + + /* If 16-bit instruction, not FPU. */ + if ((dec_insn.next_pc_inc == 2) || + (microMIPS32_to_MIPS32((union mips_instruction *)&ir) == SIGILL)) { + + /* + * Since this instruction will + * be put on the stack with + * 32-bit words, get around + * this problem by putting a + * NOP16 as the second one. + */ + if (dec_insn.next_pc_inc == 2) + ir = (ir & (~0xffff)) | MM_NOP16; + + /* + * Single step the non-CP1 + * instruction in the dslot. + */ + return mips_dsemul(xcp, ir, contpc); + } + } else + contpc = (xcp->cp0_epc + (contpc << 2)); switch (MIPSInst_OPCODE(ir)) { case lwc1_op: @@ -525,8 +1216,8 @@ static int cop1Emulate(struct pt_regs *xcp, struct mips_fpu_struct *ctx, * branch likely nullifies * dslot if not taken */ - xcp->cp0_epc += 4; - contpc += 4; + xcp->cp0_epc += dec_insn.pc_inc; + contpc += dec_insn.pc_inc; /* * else continue & execute * dslot as normal insn @@ -1313,25 +2004,75 @@ int fpu_emulator_cop1Handler(struct pt_regs *xcp, struct mips_fpu_struct *ctx, int has_fpu, void *__user *fault_addr) { unsigned long oldepc, prevepc; - mips_instruction insn; + struct mm_decoded_insn dec_insn; + u16 instr[4]; + u16 *instr_ptr; int sig = 0; oldepc = xcp->cp0_epc; do { prevepc = xcp->cp0_epc; - if (!access_ok(VERIFY_READ, xcp->cp0_epc, sizeof(mips_instruction))) { - MIPS_FPU_EMU_INC_STATS(errors); - *fault_addr = (mips_instruction __user *)xcp->cp0_epc; - return SIGBUS; - } - if (__get_user(insn, (mips_instruction __user *) xcp->cp0_epc)) { - MIPS_FPU_EMU_INC_STATS(errors); - *fault_addr = (mips_instruction __user *)xcp->cp0_epc; - return SIGSEGV; + if (get_isa16_mode(prevepc) && cpu_has_mmips) { + /* + * Get next 2 microMIPS instructions and convert them + * into 32-bit instructions. + */ + if ((get_user(instr[0], (u16 __user *)msk_isa16_mode(xcp->cp0_epc))) || + (get_user(instr[1], (u16 __user *)msk_isa16_mode(xcp->cp0_epc + 2))) || + (get_user(instr[2], (u16 __user *)msk_isa16_mode(xcp->cp0_epc + 4))) || + (get_user(instr[3], (u16 __user *)msk_isa16_mode(xcp->cp0_epc + 6)))) { + MIPS_FPU_EMU_INC_STATS(errors); + return SIGBUS; + } + instr_ptr = instr; + + /* Get first instruction. */ + if (mm_insn_16bit(*instr_ptr)) { + /* Duplicate the half-word. */ + dec_insn.insn = (*instr_ptr << 16) | + (*instr_ptr); + /* 16-bit instruction. */ + dec_insn.pc_inc = 2; + instr_ptr += 1; + } else { + dec_insn.insn = (*instr_ptr << 16) | + *(instr_ptr+1); + /* 32-bit instruction. */ + dec_insn.pc_inc = 4; + instr_ptr += 2; + } + /* Get second instruction. */ + if (mm_insn_16bit(*instr_ptr)) { + /* Duplicate the half-word. */ + dec_insn.next_insn = (*instr_ptr << 16) | + (*instr_ptr); + /* 16-bit instruction. */ + dec_insn.next_pc_inc = 2; + } else { + dec_insn.next_insn = (*instr_ptr << 16) | + *(instr_ptr+1); + /* 32-bit instruction. */ + dec_insn.next_pc_inc = 4; + } + dec_insn.micro_mips_mode = 1; + } else { + if ((get_user(dec_insn.insn, + (mips_instruction __user *) xcp->cp0_epc)) || + (get_user(dec_insn.next_insn, + (mips_instruction __user *)(xcp->cp0_epc+4)))) { + MIPS_FPU_EMU_INC_STATS(errors); + return SIGBUS; + } + dec_insn.pc_inc = 4; + dec_insn.next_pc_inc = 4; + dec_insn.micro_mips_mode = 0; } - if (insn == 0) - xcp->cp0_epc += 4; /* skip nops */ + + if ((dec_insn.insn == 0) || + ((dec_insn.pc_inc == 2) && + ((dec_insn.insn & 0xffff) == MM_NOP16))) + xcp->cp0_epc += dec_insn.pc_inc; /* Skip NOPs */ else { /* * The 'ieee754_csr' is an alias of @@ -1341,7 +2082,7 @@ int fpu_emulator_cop1Handler(struct pt_regs *xcp, struct mips_fpu_struct *ctx, */ /* convert to ieee library modes */ ieee754_csr.rm = ieee_rm[ieee754_csr.rm]; - sig = cop1Emulate(xcp, ctx, fault_addr); + sig = cop1Emulate(xcp, ctx, dec_insn, fault_addr); /* revert to mips rounding mode */ ieee754_csr.rm = mips_rm[ieee754_csr.rm]; } diff --git a/trunk/arch/mips/math-emu/dsemul.c b/trunk/arch/mips/math-emu/dsemul.c index 384a3b0091ea..7ea622ab8dad 100644 --- a/trunk/arch/mips/math-emu/dsemul.c +++ b/trunk/arch/mips/math-emu/dsemul.c @@ -55,7 +55,9 @@ int mips_dsemul(struct pt_regs *regs, mips_instruction ir, unsigned long cpc) struct emuframe __user *fr; int err; - if (ir == 0) { /* a nop is easy */ + if ((get_isa16_mode(regs->cp0_epc) && ((ir >> 16) == MM_NOP16)) || + (ir == 0)) { + /* NOP is easy */ regs->cp0_epc = cpc; regs->cp0_cause &= ~CAUSEF_BD; return 0; @@ -91,8 +93,16 @@ int mips_dsemul(struct pt_regs *regs, mips_instruction ir, unsigned long cpc) if (unlikely(!access_ok(VERIFY_WRITE, fr, sizeof(struct emuframe)))) return SIGBUS; - err = __put_user(ir, &fr->emul); - err |= __put_user((mips_instruction)BREAK_MATH, &fr->badinst); + if (get_isa16_mode(regs->cp0_epc)) { + err = __put_user(ir >> 16, (u16 __user *)(&fr->emul)); + err |= __put_user(ir & 0xffff, (u16 __user *)((long)(&fr->emul) + 2)); + err |= __put_user(BREAK_MATH >> 16, (u16 __user *)(&fr->badinst)); + err |= __put_user(BREAK_MATH & 0xffff, (u16 __user *)((long)(&fr->badinst) + 2)); + } else { + err = __put_user(ir, &fr->emul); + err |= __put_user((mips_instruction)BREAK_MATH, &fr->badinst); + } + err |= __put_user((mips_instruction)BD_COOKIE, &fr->cookie); err |= __put_user(cpc, &fr->epc); @@ -101,7 +111,8 @@ int mips_dsemul(struct pt_regs *regs, mips_instruction ir, unsigned long cpc) return SIGBUS; } - regs->cp0_epc = (unsigned long) &fr->emul; + regs->cp0_epc = ((unsigned long) &fr->emul) | + get_isa16_mode(regs->cp0_epc); flush_cache_sigtramp((unsigned long)&fr->badinst); @@ -114,9 +125,10 @@ int do_dsemulret(struct pt_regs *xcp) unsigned long epc; u32 insn, cookie; int err = 0; + u16 instr[2]; fr = (struct emuframe __user *) - (xcp->cp0_epc - sizeof(mips_instruction)); + (msk_isa16_mode(xcp->cp0_epc) - sizeof(mips_instruction)); /* * If we can't even access the area, something is very wrong, but we'll @@ -131,7 +143,13 @@ int do_dsemulret(struct pt_regs *xcp) * - Is the instruction pointed to by the EPC an BREAK_MATH? * - Is the following memory word the BD_COOKIE? */ - err = __get_user(insn, &fr->badinst); + if (get_isa16_mode(xcp->cp0_epc)) { + err = __get_user(instr[0], (u16 __user *)(&fr->badinst)); + err |= __get_user(instr[1], (u16 __user *)((long)(&fr->badinst) + 2)); + insn = (instr[0] << 16) | instr[1]; + } else { + err = __get_user(insn, &fr->badinst); + } err |= __get_user(cookie, &fr->cookie); if (unlikely(err || (insn != BREAK_MATH) || (cookie != BD_COOKIE))) { diff --git a/trunk/arch/mips/mm/Makefile b/trunk/arch/mips/mm/Makefile index 1dcec30ad1c4..e87aae1f2e80 100644 --- a/trunk/arch/mips/mm/Makefile +++ b/trunk/arch/mips/mm/Makefile @@ -4,7 +4,7 @@ obj-y += cache.o dma-default.o extable.o fault.o \ gup.o init.o mmap.o page.o page-funcs.o \ - tlbex.o tlbex-fault.o uasm.o + tlbex.o tlbex-fault.o uasm-mips.o obj-$(CONFIG_32BIT) += ioremap.o pgtable-32.o obj-$(CONFIG_64BIT) += pgtable-64.o @@ -22,3 +22,5 @@ obj-$(CONFIG_IP22_CPU_SCACHE) += sc-ip22.o obj-$(CONFIG_R5000_CPU_SCACHE) += sc-r5k.o obj-$(CONFIG_RM7000_CPU_SCACHE) += sc-rm7k.o obj-$(CONFIG_MIPS_CPU_SCACHE) += sc-mips.o + +obj-$(CONFIG_SYS_SUPPORTS_MICROMIPS) += uasm-micromips.o diff --git a/trunk/arch/mips/mm/c-r4k.c b/trunk/arch/mips/mm/c-r4k.c index 2078915eacb9..21813beec7a5 100644 --- a/trunk/arch/mips/mm/c-r4k.c +++ b/trunk/arch/mips/mm/c-r4k.c @@ -33,6 +33,7 @@ #include #include /* for run_uncached() */ #include +#include /* * Special Variant of smp_call_function for use by cache functions: @@ -136,7 +137,8 @@ static void __cpuinit r4k_blast_dcache_page_indexed_setup(void) r4k_blast_dcache_page_indexed = blast_dcache64_page_indexed; } -static void (* r4k_blast_dcache)(void); +void (* r4k_blast_dcache)(void); +EXPORT_SYMBOL(r4k_blast_dcache); static void __cpuinit r4k_blast_dcache_setup(void) { @@ -264,7 +266,8 @@ static void __cpuinit r4k_blast_icache_page_indexed_setup(void) r4k_blast_icache_page_indexed = blast_icache64_page_indexed; } -static void (* r4k_blast_icache)(void); +void (* r4k_blast_icache)(void); +EXPORT_SYMBOL(r4k_blast_icache); static void __cpuinit r4k_blast_icache_setup(void) { @@ -1377,20 +1380,6 @@ static void __cpuinit coherency_setup(void) } } -#if defined(CONFIG_DMA_NONCOHERENT) - -static int __cpuinitdata coherentio; - -static int __init setcoherentio(char *str) -{ - coherentio = 1; - - return 0; -} - -early_param("coherentio", setcoherentio); -#endif - static void __cpuinit r4k_cache_error_setup(void) { extern char __weak except_vec2_generic; @@ -1472,9 +1461,14 @@ void __cpuinit r4k_cache_init(void) build_clear_page(); build_copy_page(); -#if !defined(CONFIG_MIPS_CMP) + + /* + * We want to run CMP kernels on core with and without coherent + * caches. Therefore, do not use CONFIG_MIPS_CMP to decide whether + * or not to flush caches. + */ local_r4k___flush_cache_all(NULL); -#endif + coherency_setup(); board_cache_error_setup = r4k_cache_error_setup; } diff --git a/trunk/arch/mips/mm/cache.c b/trunk/arch/mips/mm/cache.c index 07cec4407b0c..5aeb3eb0b72f 100644 --- a/trunk/arch/mips/mm/cache.c +++ b/trunk/arch/mips/mm/cache.c @@ -48,6 +48,7 @@ void (*flush_icache_all)(void); EXPORT_SYMBOL_GPL(local_flush_data_cache_page); EXPORT_SYMBOL(flush_data_cache_page); +EXPORT_SYMBOL(flush_icache_all); #ifdef CONFIG_DMA_NONCOHERENT diff --git a/trunk/arch/mips/mm/dma-default.c b/trunk/arch/mips/mm/dma-default.c index f9ef83829a52..caf92ecb37d6 100644 --- a/trunk/arch/mips/mm/dma-default.c +++ b/trunk/arch/mips/mm/dma-default.c @@ -22,6 +22,26 @@ #include +int coherentio = 0; /* User defined DMA coherency from command line. */ +EXPORT_SYMBOL_GPL(coherentio); +int hw_coherentio = 0; /* Actual hardware supported DMA coherency setting. */ + +static int __init setcoherentio(char *str) +{ + coherentio = 1; + pr_info("Hardware DMA cache coherency (command line)\n"); + return 0; +} +early_param("coherentio", setcoherentio); + +static int __init setnocoherentio(char *str) +{ + coherentio = 0; + pr_info("Software DMA cache coherency (command line)\n"); + return 0; +} +early_param("nocoherentio", setnocoherentio); + static inline struct page *dma_addr_to_page(struct device *dev, dma_addr_t dma_addr) { @@ -115,7 +135,8 @@ static void *mips_dma_alloc_coherent(struct device *dev, size_t size, if (!plat_device_is_coherent(dev)) { dma_cache_wback_inv((unsigned long) ret, size); - ret = UNCAC_ADDR(ret); + if (!hw_coherentio) + ret = UNCAC_ADDR(ret); } } @@ -142,7 +163,7 @@ static void mips_dma_free_coherent(struct device *dev, size_t size, void *vaddr, plat_unmap_dma_mem(dev, dma_handle, size, DMA_BIDIRECTIONAL); - if (!plat_device_is_coherent(dev)) + if (!plat_device_is_coherent(dev) && !hw_coherentio) addr = CAC_ADDR(addr); free_pages(addr, get_order(size)); diff --git a/trunk/arch/mips/mm/page.c b/trunk/arch/mips/mm/page.c index a29fba55b53e..4eb8dcfaf1ce 100644 --- a/trunk/arch/mips/mm/page.c +++ b/trunk/arch/mips/mm/page.c @@ -247,6 +247,11 @@ void __cpuinit build_clear_page(void) struct uasm_label *l = labels; struct uasm_reloc *r = relocs; int i; + static atomic_t run_once = ATOMIC_INIT(0); + + if (atomic_xchg(&run_once, 1)) { + return; + } memset(labels, 0, sizeof(labels)); memset(relocs, 0, sizeof(relocs)); @@ -389,6 +394,11 @@ void __cpuinit build_copy_page(void) struct uasm_label *l = labels; struct uasm_reloc *r = relocs; int i; + static atomic_t run_once = ATOMIC_INIT(0); + + if (atomic_xchg(&run_once, 1)) { + return; + } memset(labels, 0, sizeof(labels)); memset(relocs, 0, sizeof(relocs)); diff --git a/trunk/arch/mips/mm/tlb-r4k.c b/trunk/arch/mips/mm/tlb-r4k.c index 493131c81a29..c643de4c473a 100644 --- a/trunk/arch/mips/mm/tlb-r4k.c +++ b/trunk/arch/mips/mm/tlb-r4k.c @@ -13,6 +13,7 @@ #include #include #include +#include #include #include @@ -94,6 +95,7 @@ void local_flush_tlb_all(void) FLUSH_ITLB; EXIT_CRITICAL(flags); } +EXPORT_SYMBOL(local_flush_tlb_all); /* All entries common to a mm share an asid. To effectively flush these entries, we just bump the asid. */ diff --git a/trunk/arch/mips/mm/tlbex.c b/trunk/arch/mips/mm/tlbex.c index 820e6612d744..ce9818eef7d3 100644 --- a/trunk/arch/mips/mm/tlbex.c +++ b/trunk/arch/mips/mm/tlbex.c @@ -1458,17 +1458,17 @@ u32 handle_tlbl[FASTPATH_SIZE] __cacheline_aligned; u32 handle_tlbs[FASTPATH_SIZE] __cacheline_aligned; u32 handle_tlbm[FASTPATH_SIZE] __cacheline_aligned; #ifdef CONFIG_MIPS_PGD_C0_CONTEXT -u32 tlbmiss_handler_setup_pgd[16] __cacheline_aligned; +u32 tlbmiss_handler_setup_pgd_array[16] __cacheline_aligned; static void __cpuinit build_r4000_setup_pgd(void) { const int a0 = 4; const int a1 = 5; - u32 *p = tlbmiss_handler_setup_pgd; + u32 *p = tlbmiss_handler_setup_pgd_array; struct uasm_label *l = labels; struct uasm_reloc *r = relocs; - memset(tlbmiss_handler_setup_pgd, 0, sizeof(tlbmiss_handler_setup_pgd)); + memset(tlbmiss_handler_setup_pgd_array, 0, sizeof(tlbmiss_handler_setup_pgd_array)); memset(labels, 0, sizeof(labels)); memset(relocs, 0, sizeof(relocs)); @@ -1496,15 +1496,15 @@ static void __cpuinit build_r4000_setup_pgd(void) uasm_i_jr(&p, 31); UASM_i_MTC0(&p, a0, 31, pgd_reg); } - if (p - tlbmiss_handler_setup_pgd > ARRAY_SIZE(tlbmiss_handler_setup_pgd)) - panic("tlbmiss_handler_setup_pgd space exceeded"); + if (p - tlbmiss_handler_setup_pgd_array > ARRAY_SIZE(tlbmiss_handler_setup_pgd_array)) + panic("tlbmiss_handler_setup_pgd_array space exceeded"); uasm_resolve_relocs(relocs, labels); - pr_debug("Wrote tlbmiss_handler_setup_pgd (%u instructions).\n", - (unsigned int)(p - tlbmiss_handler_setup_pgd)); + pr_debug("Wrote tlbmiss_handler_setup_pgd_array (%u instructions).\n", + (unsigned int)(p - tlbmiss_handler_setup_pgd_array)); dump_handler("tlbmiss_handler", - tlbmiss_handler_setup_pgd, - ARRAY_SIZE(tlbmiss_handler_setup_pgd)); + tlbmiss_handler_setup_pgd_array, + ARRAY_SIZE(tlbmiss_handler_setup_pgd_array)); } #endif @@ -2030,6 +2030,13 @@ static void __cpuinit build_r4000_tlb_load_handler(void) uasm_l_nopage_tlbl(&l, p); build_restore_work_registers(&p); +#ifdef CONFIG_CPU_MICROMIPS + if ((unsigned long)tlb_do_page_fault_0 & 1) { + uasm_i_lui(&p, K0, uasm_rel_hi((long)tlb_do_page_fault_0)); + uasm_i_addiu(&p, K0, K0, uasm_rel_lo((long)tlb_do_page_fault_0)); + uasm_i_jr(&p, K0); + } else +#endif uasm_i_j(&p, (unsigned long)tlb_do_page_fault_0 & 0x0fffffff); uasm_i_nop(&p); @@ -2077,6 +2084,13 @@ static void __cpuinit build_r4000_tlb_store_handler(void) uasm_l_nopage_tlbs(&l, p); build_restore_work_registers(&p); +#ifdef CONFIG_CPU_MICROMIPS + if ((unsigned long)tlb_do_page_fault_1 & 1) { + uasm_i_lui(&p, K0, uasm_rel_hi((long)tlb_do_page_fault_1)); + uasm_i_addiu(&p, K0, K0, uasm_rel_lo((long)tlb_do_page_fault_1)); + uasm_i_jr(&p, K0); + } else +#endif uasm_i_j(&p, (unsigned long)tlb_do_page_fault_1 & 0x0fffffff); uasm_i_nop(&p); @@ -2125,6 +2139,13 @@ static void __cpuinit build_r4000_tlb_modify_handler(void) uasm_l_nopage_tlbm(&l, p); build_restore_work_registers(&p); +#ifdef CONFIG_CPU_MICROMIPS + if ((unsigned long)tlb_do_page_fault_1 & 1) { + uasm_i_lui(&p, K0, uasm_rel_hi((long)tlb_do_page_fault_1)); + uasm_i_addiu(&p, K0, K0, uasm_rel_lo((long)tlb_do_page_fault_1)); + uasm_i_jr(&p, K0); + } else +#endif uasm_i_j(&p, (unsigned long)tlb_do_page_fault_1 & 0x0fffffff); uasm_i_nop(&p); @@ -2162,8 +2183,11 @@ void __cpuinit build_tlb_refill_handler(void) case CPU_TX3922: case CPU_TX3927: #ifndef CONFIG_MIPS_PGD_C0_CONTEXT - build_r3000_tlb_refill_handler(); + if (cpu_has_local_ebase) + build_r3000_tlb_refill_handler(); if (!run_once) { + if (!cpu_has_local_ebase) + build_r3000_tlb_refill_handler(); build_r3000_tlb_load_handler(); build_r3000_tlb_store_handler(); build_r3000_tlb_modify_handler(); @@ -2192,9 +2216,12 @@ void __cpuinit build_tlb_refill_handler(void) build_r4000_tlb_load_handler(); build_r4000_tlb_store_handler(); build_r4000_tlb_modify_handler(); + if (!cpu_has_local_ebase) + build_r4000_tlb_refill_handler(); run_once++; } - build_r4000_tlb_refill_handler(); + if (cpu_has_local_ebase) + build_r4000_tlb_refill_handler(); } } @@ -2207,7 +2234,7 @@ void __cpuinit flush_tlb_handlers(void) local_flush_icache_range((unsigned long)handle_tlbm, (unsigned long)handle_tlbm + sizeof(handle_tlbm)); #ifdef CONFIG_MIPS_PGD_C0_CONTEXT - local_flush_icache_range((unsigned long)tlbmiss_handler_setup_pgd, - (unsigned long)tlbmiss_handler_setup_pgd + sizeof(handle_tlbm)); + local_flush_icache_range((unsigned long)tlbmiss_handler_setup_pgd_array, + (unsigned long)tlbmiss_handler_setup_pgd_array + sizeof(handle_tlbm)); #endif } diff --git a/trunk/arch/mips/mm/uasm-micromips.c b/trunk/arch/mips/mm/uasm-micromips.c new file mode 100644 index 000000000000..162ee6d62788 --- /dev/null +++ b/trunk/arch/mips/mm/uasm-micromips.c @@ -0,0 +1,221 @@ +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * A small micro-assembler. It is intentionally kept simple, does only + * support a subset of instructions, and does not try to hide pipeline + * effects like branch delay slots. + * + * Copyright (C) 2004, 2005, 2006, 2008 Thiemo Seufer + * Copyright (C) 2005, 2007 Maciej W. Rozycki + * Copyright (C) 2006 Ralf Baechle (ralf@linux-mips.org) + * Copyright (C) 2012, 2013 MIPS Technologies, Inc. All rights reserved. + */ + +#include +#include +#include + +#include +#include +#include +#define UASM_ISA _UASM_ISA_MICROMIPS +#include + +#define RS_MASK 0x1f +#define RS_SH 16 +#define RT_MASK 0x1f +#define RT_SH 21 +#define SCIMM_MASK 0x3ff +#define SCIMM_SH 16 + +/* This macro sets the non-variable bits of an instruction. */ +#define M(a, b, c, d, e, f) \ + ((a) << OP_SH \ + | (b) << RT_SH \ + | (c) << RS_SH \ + | (d) << RD_SH \ + | (e) << RE_SH \ + | (f) << FUNC_SH) + +/* Define these when we are not the ISA the kernel is being compiled with. */ +#ifndef CONFIG_CPU_MICROMIPS +#define MM_uasm_i_b(buf, off) ISAOPC(_beq)(buf, 0, 0, off) +#define MM_uasm_i_beqz(buf, rs, off) ISAOPC(_beq)(buf, rs, 0, off) +#define MM_uasm_i_beqzl(buf, rs, off) ISAOPC(_beql)(buf, rs, 0, off) +#define MM_uasm_i_bnez(buf, rs, off) ISAOPC(_bne)(buf, rs, 0, off) +#endif + +#include "uasm.c" + +static struct insn insn_table_MM[] __uasminitdata = { + { insn_addu, M(mm_pool32a_op, 0, 0, 0, 0, mm_addu32_op), RT | RS | RD }, + { insn_addiu, M(mm_addiu32_op, 0, 0, 0, 0, 0), RT | RS | SIMM }, + { insn_and, M(mm_pool32a_op, 0, 0, 0, 0, mm_and_op), RT | RS | RD }, + { insn_andi, M(mm_andi32_op, 0, 0, 0, 0, 0), RT | RS | UIMM }, + { insn_beq, M(mm_beq32_op, 0, 0, 0, 0, 0), RS | RT | BIMM }, + { insn_beql, 0, 0 }, + { insn_bgez, M(mm_pool32i_op, mm_bgez_op, 0, 0, 0, 0), RS | BIMM }, + { insn_bgezl, 0, 0 }, + { insn_bltz, M(mm_pool32i_op, mm_bltz_op, 0, 0, 0, 0), RS | BIMM }, + { insn_bltzl, 0, 0 }, + { insn_bne, M(mm_bne32_op, 0, 0, 0, 0, 0), RT | RS | BIMM }, + { insn_cache, M(mm_pool32b_op, 0, 0, mm_cache_func, 0, 0), RT | RS | SIMM }, + { insn_daddu, 0, 0 }, + { insn_daddiu, 0, 0 }, + { insn_dmfc0, 0, 0 }, + { insn_dmtc0, 0, 0 }, + { insn_dsll, 0, 0 }, + { insn_dsll32, 0, 0 }, + { insn_dsra, 0, 0 }, + { insn_dsrl, 0, 0 }, + { insn_dsrl32, 0, 0 }, + { insn_drotr, 0, 0 }, + { insn_drotr32, 0, 0 }, + { insn_dsubu, 0, 0 }, + { insn_eret, M(mm_pool32a_op, 0, 0, 0, mm_eret_op, mm_pool32axf_op), 0 }, + { insn_ins, M(mm_pool32a_op, 0, 0, 0, 0, mm_ins_op), RT | RS | RD | RE }, + { insn_ext, M(mm_pool32a_op, 0, 0, 0, 0, mm_ext_op), RT | RS | RD | RE }, + { insn_j, M(mm_j32_op, 0, 0, 0, 0, 0), JIMM }, + { insn_jal, M(mm_jal32_op, 0, 0, 0, 0, 0), JIMM }, + { insn_jr, M(mm_pool32a_op, 0, 0, 0, mm_jalr_op, mm_pool32axf_op), RS }, + { insn_ld, 0, 0 }, + { insn_ll, M(mm_pool32c_op, 0, 0, (mm_ll_func << 1), 0, 0), RS | RT | SIMM }, + { insn_lld, 0, 0 }, + { insn_lui, M(mm_pool32i_op, mm_lui_op, 0, 0, 0, 0), RS | SIMM }, + { insn_lw, M(mm_lw32_op, 0, 0, 0, 0, 0), RT | RS | SIMM }, + { insn_mfc0, M(mm_pool32a_op, 0, 0, 0, mm_mfc0_op, mm_pool32axf_op), RT | RS | RD }, + { insn_mtc0, M(mm_pool32a_op, 0, 0, 0, mm_mtc0_op, mm_pool32axf_op), RT | RS | RD }, + { insn_or, M(mm_pool32a_op, 0, 0, 0, 0, mm_or32_op), RT | RS | RD }, + { insn_ori, M(mm_ori32_op, 0, 0, 0, 0, 0), RT | RS | UIMM }, + { insn_pref, M(mm_pool32c_op, 0, 0, (mm_pref_func << 1), 0, 0), RT | RS | SIMM }, + { insn_rfe, 0, 0 }, + { insn_sc, M(mm_pool32c_op, 0, 0, (mm_sc_func << 1), 0, 0), RT | RS | SIMM }, + { insn_scd, 0, 0 }, + { insn_sd, 0, 0 }, + { insn_sll, M(mm_pool32a_op, 0, 0, 0, 0, mm_sll32_op), RT | RS | RD }, + { insn_sra, M(mm_pool32a_op, 0, 0, 0, 0, mm_sra_op), RT | RS | RD }, + { insn_srl, M(mm_pool32a_op, 0, 0, 0, 0, mm_srl32_op), RT | RS | RD }, + { insn_rotr, M(mm_pool32a_op, 0, 0, 0, 0, mm_rotr_op), RT | RS | RD }, + { insn_subu, M(mm_pool32a_op, 0, 0, 0, 0, mm_subu32_op), RT | RS | RD }, + { insn_sw, M(mm_sw32_op, 0, 0, 0, 0, 0), RT | RS | SIMM }, + { insn_tlbp, M(mm_pool32a_op, 0, 0, 0, mm_tlbp_op, mm_pool32axf_op), 0 }, + { insn_tlbr, M(mm_pool32a_op, 0, 0, 0, mm_tlbr_op, mm_pool32axf_op), 0 }, + { insn_tlbwi, M(mm_pool32a_op, 0, 0, 0, mm_tlbwi_op, mm_pool32axf_op), 0 }, + { insn_tlbwr, M(mm_pool32a_op, 0, 0, 0, mm_tlbwr_op, mm_pool32axf_op), 0 }, + { insn_xor, M(mm_pool32a_op, 0, 0, 0, 0, mm_xor32_op), RT | RS | RD }, + { insn_xori, M(mm_xori32_op, 0, 0, 0, 0, 0), RT | RS | UIMM }, + { insn_dins, 0, 0 }, + { insn_dinsm, 0, 0 }, + { insn_syscall, M(mm_pool32a_op, 0, 0, 0, mm_syscall_op, mm_pool32axf_op), SCIMM}, + { insn_bbit0, 0, 0 }, + { insn_bbit1, 0, 0 }, + { insn_lwx, 0, 0 }, + { insn_ldx, 0, 0 }, + { insn_invalid, 0, 0 } +}; + +#undef M + +static inline __uasminit u32 build_bimm(s32 arg) +{ + WARN(arg > 0xffff || arg < -0x10000, + KERN_WARNING "Micro-assembler field overflow\n"); + + WARN(arg & 0x3, KERN_WARNING "Invalid micro-assembler branch target\n"); + + return ((arg < 0) ? (1 << 15) : 0) | ((arg >> 1) & 0x7fff); +} + +static inline __uasminit u32 build_jimm(u32 arg) +{ + + WARN(arg & ~((JIMM_MASK << 2) | 1), + KERN_WARNING "Micro-assembler field overflow\n"); + + return (arg >> 1) & JIMM_MASK; +} + +/* + * The order of opcode arguments is implicitly left to right, + * starting with RS and ending with FUNC or IMM. + */ +static void __uasminit build_insn(u32 **buf, enum opcode opc, ...) +{ + struct insn *ip = NULL; + unsigned int i; + va_list ap; + u32 op; + + for (i = 0; insn_table_MM[i].opcode != insn_invalid; i++) + if (insn_table_MM[i].opcode == opc) { + ip = &insn_table_MM[i]; + break; + } + + if (!ip || (opc == insn_daddiu && r4k_daddiu_bug())) + panic("Unsupported Micro-assembler instruction %d", opc); + + op = ip->match; + va_start(ap, opc); + if (ip->fields & RS) { + if (opc == insn_mfc0 || opc == insn_mtc0) + op |= build_rt(va_arg(ap, u32)); + else + op |= build_rs(va_arg(ap, u32)); + } + if (ip->fields & RT) { + if (opc == insn_mfc0 || opc == insn_mtc0) + op |= build_rs(va_arg(ap, u32)); + else + op |= build_rt(va_arg(ap, u32)); + } + if (ip->fields & RD) + op |= build_rd(va_arg(ap, u32)); + if (ip->fields & RE) + op |= build_re(va_arg(ap, u32)); + if (ip->fields & SIMM) + op |= build_simm(va_arg(ap, s32)); + if (ip->fields & UIMM) + op |= build_uimm(va_arg(ap, u32)); + if (ip->fields & BIMM) + op |= build_bimm(va_arg(ap, s32)); + if (ip->fields & JIMM) + op |= build_jimm(va_arg(ap, u32)); + if (ip->fields & FUNC) + op |= build_func(va_arg(ap, u32)); + if (ip->fields & SET) + op |= build_set(va_arg(ap, u32)); + if (ip->fields & SCIMM) + op |= build_scimm(va_arg(ap, u32)); + va_end(ap); + +#ifdef CONFIG_CPU_LITTLE_ENDIAN + **buf = ((op & 0xffff) << 16) | (op >> 16); +#else + **buf = op; +#endif + (*buf)++; +} + +static inline void __uasminit +__resolve_relocs(struct uasm_reloc *rel, struct uasm_label *lab) +{ + long laddr = (long)lab->addr; + long raddr = (long)rel->addr; + + switch (rel->type) { + case R_MIPS_PC16: +#ifdef CONFIG_CPU_LITTLE_ENDIAN + *rel->addr |= (build_bimm(laddr - (raddr + 4)) << 16); +#else + *rel->addr |= build_bimm(laddr - (raddr + 4)); +#endif + break; + + default: + panic("Unsupported Micro-assembler relocation %d", + rel->type); + } +} diff --git a/trunk/arch/mips/mm/uasm-mips.c b/trunk/arch/mips/mm/uasm-mips.c new file mode 100644 index 000000000000..5fcdd8fe3e83 --- /dev/null +++ b/trunk/arch/mips/mm/uasm-mips.c @@ -0,0 +1,205 @@ +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * A small micro-assembler. It is intentionally kept simple, does only + * support a subset of instructions, and does not try to hide pipeline + * effects like branch delay slots. + * + * Copyright (C) 2004, 2005, 2006, 2008 Thiemo Seufer + * Copyright (C) 2005, 2007 Maciej W. Rozycki + * Copyright (C) 2006 Ralf Baechle (ralf@linux-mips.org) + * Copyright (C) 2012, 2013 MIPS Technologies, Inc. All rights reserved. + */ + +#include +#include +#include + +#include +#include +#include +#define UASM_ISA _UASM_ISA_CLASSIC +#include + +#define RS_MASK 0x1f +#define RS_SH 21 +#define RT_MASK 0x1f +#define RT_SH 16 +#define SCIMM_MASK 0xfffff +#define SCIMM_SH 6 + +/* This macro sets the non-variable bits of an instruction. */ +#define M(a, b, c, d, e, f) \ + ((a) << OP_SH \ + | (b) << RS_SH \ + | (c) << RT_SH \ + | (d) << RD_SH \ + | (e) << RE_SH \ + | (f) << FUNC_SH) + +/* Define these when we are not the ISA the kernel is being compiled with. */ +#ifdef CONFIG_CPU_MICROMIPS +#define CL_uasm_i_b(buf, off) ISAOPC(_beq)(buf, 0, 0, off) +#define CL_uasm_i_beqz(buf, rs, off) ISAOPC(_beq)(buf, rs, 0, off) +#define CL_uasm_i_beqzl(buf, rs, off) ISAOPC(_beql)(buf, rs, 0, off) +#define CL_uasm_i_bnez(buf, rs, off) ISAOPC(_bne)(buf, rs, 0, off) +#endif + +#include "uasm.c" + +static struct insn insn_table[] __uasminitdata = { + { insn_addiu, M(addiu_op, 0, 0, 0, 0, 0), RS | RT | SIMM }, + { insn_addu, M(spec_op, 0, 0, 0, 0, addu_op), RS | RT | RD }, + { insn_andi, M(andi_op, 0, 0, 0, 0, 0), RS | RT | UIMM }, + { insn_and, M(spec_op, 0, 0, 0, 0, and_op), RS | RT | RD }, + { insn_bbit0, M(lwc2_op, 0, 0, 0, 0, 0), RS | RT | BIMM }, + { insn_bbit1, M(swc2_op, 0, 0, 0, 0, 0), RS | RT | BIMM }, + { insn_beql, M(beql_op, 0, 0, 0, 0, 0), RS | RT | BIMM }, + { insn_beq, M(beq_op, 0, 0, 0, 0, 0), RS | RT | BIMM }, + { insn_bgezl, M(bcond_op, 0, bgezl_op, 0, 0, 0), RS | BIMM }, + { insn_bgez, M(bcond_op, 0, bgez_op, 0, 0, 0), RS | BIMM }, + { insn_bltzl, M(bcond_op, 0, bltzl_op, 0, 0, 0), RS | BIMM }, + { insn_bltz, M(bcond_op, 0, bltz_op, 0, 0, 0), RS | BIMM }, + { insn_bne, M(bne_op, 0, 0, 0, 0, 0), RS | RT | BIMM }, + { insn_cache, M(cache_op, 0, 0, 0, 0, 0), RS | RT | SIMM }, + { insn_daddiu, M(daddiu_op, 0, 0, 0, 0, 0), RS | RT | SIMM }, + { insn_daddu, M(spec_op, 0, 0, 0, 0, daddu_op), RS | RT | RD }, + { insn_dinsm, M(spec3_op, 0, 0, 0, 0, dinsm_op), RS | RT | RD | RE }, + { insn_dins, M(spec3_op, 0, 0, 0, 0, dins_op), RS | RT | RD | RE }, + { insn_dmfc0, M(cop0_op, dmfc_op, 0, 0, 0, 0), RT | RD | SET}, + { insn_dmtc0, M(cop0_op, dmtc_op, 0, 0, 0, 0), RT | RD | SET}, + { insn_drotr32, M(spec_op, 1, 0, 0, 0, dsrl32_op), RT | RD | RE }, + { insn_drotr, M(spec_op, 1, 0, 0, 0, dsrl_op), RT | RD | RE }, + { insn_dsll32, M(spec_op, 0, 0, 0, 0, dsll32_op), RT | RD | RE }, + { insn_dsll, M(spec_op, 0, 0, 0, 0, dsll_op), RT | RD | RE }, + { insn_dsra, M(spec_op, 0, 0, 0, 0, dsra_op), RT | RD | RE }, + { insn_dsrl32, M(spec_op, 0, 0, 0, 0, dsrl32_op), RT | RD | RE }, + { insn_dsrl, M(spec_op, 0, 0, 0, 0, dsrl_op), RT | RD | RE }, + { insn_dsubu, M(spec_op, 0, 0, 0, 0, dsubu_op), RS | RT | RD }, + { insn_eret, M(cop0_op, cop_op, 0, 0, 0, eret_op), 0 }, + { insn_ext, M(spec3_op, 0, 0, 0, 0, ext_op), RS | RT | RD | RE }, + { insn_ins, M(spec3_op, 0, 0, 0, 0, ins_op), RS | RT | RD | RE }, + { insn_j, M(j_op, 0, 0, 0, 0, 0), JIMM }, + { insn_jal, M(jal_op, 0, 0, 0, 0, 0), JIMM }, + { insn_j, M(j_op, 0, 0, 0, 0, 0), JIMM }, + { insn_jr, M(spec_op, 0, 0, 0, 0, jr_op), RS }, + { insn_ld, M(ld_op, 0, 0, 0, 0, 0), RS | RT | SIMM }, + { insn_ldx, M(spec3_op, 0, 0, 0, ldx_op, lx_op), RS | RT | RD }, + { insn_lld, M(lld_op, 0, 0, 0, 0, 0), RS | RT | SIMM }, + { insn_ll, M(ll_op, 0, 0, 0, 0, 0), RS | RT | SIMM }, + { insn_lui, M(lui_op, 0, 0, 0, 0, 0), RT | SIMM }, + { insn_lw, M(lw_op, 0, 0, 0, 0, 0), RS | RT | SIMM }, + { insn_lwx, M(spec3_op, 0, 0, 0, lwx_op, lx_op), RS | RT | RD }, + { insn_mfc0, M(cop0_op, mfc_op, 0, 0, 0, 0), RT | RD | SET}, + { insn_mtc0, M(cop0_op, mtc_op, 0, 0, 0, 0), RT | RD | SET}, + { insn_ori, M(ori_op, 0, 0, 0, 0, 0), RS | RT | UIMM }, + { insn_or, M(spec_op, 0, 0, 0, 0, or_op), RS | RT | RD }, + { insn_pref, M(pref_op, 0, 0, 0, 0, 0), RS | RT | SIMM }, + { insn_rfe, M(cop0_op, cop_op, 0, 0, 0, rfe_op), 0 }, + { insn_rotr, M(spec_op, 1, 0, 0, 0, srl_op), RT | RD | RE }, + { insn_scd, M(scd_op, 0, 0, 0, 0, 0), RS | RT | SIMM }, + { insn_sc, M(sc_op, 0, 0, 0, 0, 0), RS | RT | SIMM }, + { insn_sd, M(sd_op, 0, 0, 0, 0, 0), RS | RT | SIMM }, + { insn_sll, M(spec_op, 0, 0, 0, 0, sll_op), RT | RD | RE }, + { insn_sra, M(spec_op, 0, 0, 0, 0, sra_op), RT | RD | RE }, + { insn_srl, M(spec_op, 0, 0, 0, 0, srl_op), RT | RD | RE }, + { insn_subu, M(spec_op, 0, 0, 0, 0, subu_op), RS | RT | RD }, + { insn_sw, M(sw_op, 0, 0, 0, 0, 0), RS | RT | SIMM }, + { insn_syscall, M(spec_op, 0, 0, 0, 0, syscall_op), SCIMM}, + { insn_tlbp, M(cop0_op, cop_op, 0, 0, 0, tlbp_op), 0 }, + { insn_tlbr, M(cop0_op, cop_op, 0, 0, 0, tlbr_op), 0 }, + { insn_tlbwi, M(cop0_op, cop_op, 0, 0, 0, tlbwi_op), 0 }, + { insn_tlbwr, M(cop0_op, cop_op, 0, 0, 0, tlbwr_op), 0 }, + { insn_xori, M(xori_op, 0, 0, 0, 0, 0), RS | RT | UIMM }, + { insn_xor, M(spec_op, 0, 0, 0, 0, xor_op), RS | RT | RD }, + { insn_invalid, 0, 0 } +}; + +#undef M + +static inline __uasminit u32 build_bimm(s32 arg) +{ + WARN(arg > 0x1ffff || arg < -0x20000, + KERN_WARNING "Micro-assembler field overflow\n"); + + WARN(arg & 0x3, KERN_WARNING "Invalid micro-assembler branch target\n"); + + return ((arg < 0) ? (1 << 15) : 0) | ((arg >> 2) & 0x7fff); +} + +static inline __uasminit u32 build_jimm(u32 arg) +{ + WARN(arg & ~(JIMM_MASK << 2), + KERN_WARNING "Micro-assembler field overflow\n"); + + return (arg >> 2) & JIMM_MASK; +} + +/* + * The order of opcode arguments is implicitly left to right, + * starting with RS and ending with FUNC or IMM. + */ +static void __uasminit build_insn(u32 **buf, enum opcode opc, ...) +{ + struct insn *ip = NULL; + unsigned int i; + va_list ap; + u32 op; + + for (i = 0; insn_table[i].opcode != insn_invalid; i++) + if (insn_table[i].opcode == opc) { + ip = &insn_table[i]; + break; + } + + if (!ip || (opc == insn_daddiu && r4k_daddiu_bug())) + panic("Unsupported Micro-assembler instruction %d", opc); + + op = ip->match; + va_start(ap, opc); + if (ip->fields & RS) + op |= build_rs(va_arg(ap, u32)); + if (ip->fields & RT) + op |= build_rt(va_arg(ap, u32)); + if (ip->fields & RD) + op |= build_rd(va_arg(ap, u32)); + if (ip->fields & RE) + op |= build_re(va_arg(ap, u32)); + if (ip->fields & SIMM) + op |= build_simm(va_arg(ap, s32)); + if (ip->fields & UIMM) + op |= build_uimm(va_arg(ap, u32)); + if (ip->fields & BIMM) + op |= build_bimm(va_arg(ap, s32)); + if (ip->fields & JIMM) + op |= build_jimm(va_arg(ap, u32)); + if (ip->fields & FUNC) + op |= build_func(va_arg(ap, u32)); + if (ip->fields & SET) + op |= build_set(va_arg(ap, u32)); + if (ip->fields & SCIMM) + op |= build_scimm(va_arg(ap, u32)); + va_end(ap); + + **buf = op; + (*buf)++; +} + +static inline void __uasminit +__resolve_relocs(struct uasm_reloc *rel, struct uasm_label *lab) +{ + long laddr = (long)lab->addr; + long raddr = (long)rel->addr; + + switch (rel->type) { + case R_MIPS_PC16: + *rel->addr |= build_bimm(laddr - (raddr + 4)); + break; + + default: + panic("Unsupported Micro-assembler relocation %d", + rel->type); + } +} diff --git a/trunk/arch/mips/mm/uasm.c b/trunk/arch/mips/mm/uasm.c index 942ff6c2eba2..7eb5e4355d25 100644 --- a/trunk/arch/mips/mm/uasm.c +++ b/trunk/arch/mips/mm/uasm.c @@ -10,17 +10,9 @@ * Copyright (C) 2004, 2005, 2006, 2008 Thiemo Seufer * Copyright (C) 2005, 2007 Maciej W. Rozycki * Copyright (C) 2006 Ralf Baechle (ralf@linux-mips.org) + * Copyright (C) 2012, 2013 MIPS Technologies, Inc. All rights reserved. */ -#include -#include -#include - -#include -#include -#include -#include - enum fields { RS = 0x001, RT = 0x002, @@ -37,10 +29,6 @@ enum fields { #define OP_MASK 0x3f #define OP_SH 26 -#define RS_MASK 0x1f -#define RS_SH 21 -#define RT_MASK 0x1f -#define RT_SH 16 #define RD_MASK 0x1f #define RD_SH 11 #define RE_MASK 0x1f @@ -53,8 +41,6 @@ enum fields { #define FUNC_SH 0 #define SET_MASK 0x7 #define SET_SH 0 -#define SCIMM_MASK 0xfffff -#define SCIMM_SH 6 enum opcode { insn_invalid, @@ -77,85 +63,6 @@ struct insn { enum fields fields; }; -/* This macro sets the non-variable bits of an instruction. */ -#define M(a, b, c, d, e, f) \ - ((a) << OP_SH \ - | (b) << RS_SH \ - | (c) << RT_SH \ - | (d) << RD_SH \ - | (e) << RE_SH \ - | (f) << FUNC_SH) - -static struct insn insn_table[] __uasminitdata = { - { insn_addiu, M(addiu_op, 0, 0, 0, 0, 0), RS | RT | SIMM }, - { insn_addu, M(spec_op, 0, 0, 0, 0, addu_op), RS | RT | RD }, - { insn_andi, M(andi_op, 0, 0, 0, 0, 0), RS | RT | UIMM }, - { insn_and, M(spec_op, 0, 0, 0, 0, and_op), RS | RT | RD }, - { insn_bbit0, M(lwc2_op, 0, 0, 0, 0, 0), RS | RT | BIMM }, - { insn_bbit1, M(swc2_op, 0, 0, 0, 0, 0), RS | RT | BIMM }, - { insn_beql, M(beql_op, 0, 0, 0, 0, 0), RS | RT | BIMM }, - { insn_beq, M(beq_op, 0, 0, 0, 0, 0), RS | RT | BIMM }, - { insn_bgezl, M(bcond_op, 0, bgezl_op, 0, 0, 0), RS | BIMM }, - { insn_bgez, M(bcond_op, 0, bgez_op, 0, 0, 0), RS | BIMM }, - { insn_bltzl, M(bcond_op, 0, bltzl_op, 0, 0, 0), RS | BIMM }, - { insn_bltz, M(bcond_op, 0, bltz_op, 0, 0, 0), RS | BIMM }, - { insn_bne, M(bne_op, 0, 0, 0, 0, 0), RS | RT | BIMM }, - { insn_cache, M(cache_op, 0, 0, 0, 0, 0), RS | RT | SIMM }, - { insn_daddiu, M(daddiu_op, 0, 0, 0, 0, 0), RS | RT | SIMM }, - { insn_daddu, M(spec_op, 0, 0, 0, 0, daddu_op), RS | RT | RD }, - { insn_dinsm, M(spec3_op, 0, 0, 0, 0, dinsm_op), RS | RT | RD | RE }, - { insn_dins, M(spec3_op, 0, 0, 0, 0, dins_op), RS | RT | RD | RE }, - { insn_dmfc0, M(cop0_op, dmfc_op, 0, 0, 0, 0), RT | RD | SET}, - { insn_dmtc0, M(cop0_op, dmtc_op, 0, 0, 0, 0), RT | RD | SET}, - { insn_drotr32, M(spec_op, 1, 0, 0, 0, dsrl32_op), RT | RD | RE }, - { insn_drotr, M(spec_op, 1, 0, 0, 0, dsrl_op), RT | RD | RE }, - { insn_dsll32, M(spec_op, 0, 0, 0, 0, dsll32_op), RT | RD | RE }, - { insn_dsll, M(spec_op, 0, 0, 0, 0, dsll_op), RT | RD | RE }, - { insn_dsra, M(spec_op, 0, 0, 0, 0, dsra_op), RT | RD | RE }, - { insn_dsrl32, M(spec_op, 0, 0, 0, 0, dsrl32_op), RT | RD | RE }, - { insn_dsrl, M(spec_op, 0, 0, 0, 0, dsrl_op), RT | RD | RE }, - { insn_dsubu, M(spec_op, 0, 0, 0, 0, dsubu_op), RS | RT | RD }, - { insn_eret, M(cop0_op, cop_op, 0, 0, 0, eret_op), 0 }, - { insn_ext, M(spec3_op, 0, 0, 0, 0, ext_op), RS | RT | RD | RE }, - { insn_ins, M(spec3_op, 0, 0, 0, 0, ins_op), RS | RT | RD | RE }, - { insn_j, M(j_op, 0, 0, 0, 0, 0), JIMM }, - { insn_jal, M(jal_op, 0, 0, 0, 0, 0), JIMM }, - { insn_j, M(j_op, 0, 0, 0, 0, 0), JIMM }, - { insn_jr, M(spec_op, 0, 0, 0, 0, jr_op), RS }, - { insn_ld, M(ld_op, 0, 0, 0, 0, 0), RS | RT | SIMM }, - { insn_ldx, M(spec3_op, 0, 0, 0, ldx_op, lx_op), RS | RT | RD }, - { insn_lld, M(lld_op, 0, 0, 0, 0, 0), RS | RT | SIMM }, - { insn_ll, M(ll_op, 0, 0, 0, 0, 0), RS | RT | SIMM }, - { insn_lui, M(lui_op, 0, 0, 0, 0, 0), RT | SIMM }, - { insn_lw, M(lw_op, 0, 0, 0, 0, 0), RS | RT | SIMM }, - { insn_lwx, M(spec3_op, 0, 0, 0, lwx_op, lx_op), RS | RT | RD }, - { insn_mfc0, M(cop0_op, mfc_op, 0, 0, 0, 0), RT | RD | SET}, - { insn_mtc0, M(cop0_op, mtc_op, 0, 0, 0, 0), RT | RD | SET}, - { insn_ori, M(ori_op, 0, 0, 0, 0, 0), RS | RT | UIMM }, - { insn_or, M(spec_op, 0, 0, 0, 0, or_op), RS | RT | RD }, - { insn_pref, M(pref_op, 0, 0, 0, 0, 0), RS | RT | SIMM }, - { insn_rfe, M(cop0_op, cop_op, 0, 0, 0, rfe_op), 0 }, - { insn_rotr, M(spec_op, 1, 0, 0, 0, srl_op), RT | RD | RE }, - { insn_scd, M(scd_op, 0, 0, 0, 0, 0), RS | RT | SIMM }, - { insn_sc, M(sc_op, 0, 0, 0, 0, 0), RS | RT | SIMM }, - { insn_sd, M(sd_op, 0, 0, 0, 0, 0), RS | RT | SIMM }, - { insn_sll, M(spec_op, 0, 0, 0, 0, sll_op), RT | RD | RE }, - { insn_sra, M(spec_op, 0, 0, 0, 0, sra_op), RT | RD | RE }, - { insn_srl, M(spec_op, 0, 0, 0, 0, srl_op), RT | RD | RE }, - { insn_subu, M(spec_op, 0, 0, 0, 0, subu_op), RS | RT | RD }, - { insn_sw, M(sw_op, 0, 0, 0, 0, 0), RS | RT | SIMM }, - { insn_syscall, M(spec_op, 0, 0, 0, 0, syscall_op), SCIMM}, - { insn_tlbp, M(cop0_op, cop_op, 0, 0, 0, tlbp_op), 0 }, - { insn_tlbr, M(cop0_op, cop_op, 0, 0, 0, tlbr_op), 0 }, - { insn_tlbwi, M(cop0_op, cop_op, 0, 0, 0, tlbwi_op), 0 }, - { insn_tlbwr, M(cop0_op, cop_op, 0, 0, 0, tlbwr_op), 0 }, - { insn_xori, M(xori_op, 0, 0, 0, 0, 0), RS | RT | UIMM }, - { insn_xor, M(spec_op, 0, 0, 0, 0, xor_op), RS | RT | RD }, - { insn_invalid, 0, 0 } -}; - -#undef M - static inline __uasminit u32 build_rs(u32 arg) { WARN(arg & ~RS_MASK, KERN_WARNING "Micro-assembler field overflow\n"); @@ -199,24 +106,6 @@ static inline __uasminit u32 build_uimm(u32 arg) return arg & IMM_MASK; } -static inline __uasminit u32 build_bimm(s32 arg) -{ - WARN(arg > 0x1ffff || arg < -0x20000, - KERN_WARNING "Micro-assembler field overflow\n"); - - WARN(arg & 0x3, KERN_WARNING "Invalid micro-assembler branch target\n"); - - return ((arg < 0) ? (1 << 15) : 0) | ((arg >> 2) & 0x7fff); -} - -static inline __uasminit u32 build_jimm(u32 arg) -{ - WARN(arg & ~(JIMM_MASK << 2), - KERN_WARNING "Micro-assembler field overflow\n"); - - return (arg >> 2) & JIMM_MASK; -} - static inline __uasminit u32 build_scimm(u32 arg) { WARN(arg & ~SCIMM_MASK, @@ -239,55 +128,7 @@ static inline __uasminit u32 build_set(u32 arg) return arg & SET_MASK; } -/* - * The order of opcode arguments is implicitly left to right, - * starting with RS and ending with FUNC or IMM. - */ -static void __uasminit build_insn(u32 **buf, enum opcode opc, ...) -{ - struct insn *ip = NULL; - unsigned int i; - va_list ap; - u32 op; - - for (i = 0; insn_table[i].opcode != insn_invalid; i++) - if (insn_table[i].opcode == opc) { - ip = &insn_table[i]; - break; - } - - if (!ip || (opc == insn_daddiu && r4k_daddiu_bug())) - panic("Unsupported Micro-assembler instruction %d", opc); - - op = ip->match; - va_start(ap, opc); - if (ip->fields & RS) - op |= build_rs(va_arg(ap, u32)); - if (ip->fields & RT) - op |= build_rt(va_arg(ap, u32)); - if (ip->fields & RD) - op |= build_rd(va_arg(ap, u32)); - if (ip->fields & RE) - op |= build_re(va_arg(ap, u32)); - if (ip->fields & SIMM) - op |= build_simm(va_arg(ap, s32)); - if (ip->fields & UIMM) - op |= build_uimm(va_arg(ap, u32)); - if (ip->fields & BIMM) - op |= build_bimm(va_arg(ap, s32)); - if (ip->fields & JIMM) - op |= build_jimm(va_arg(ap, u32)); - if (ip->fields & FUNC) - op |= build_func(va_arg(ap, u32)); - if (ip->fields & SET) - op |= build_set(va_arg(ap, u32)); - if (ip->fields & SCIMM) - op |= build_scimm(va_arg(ap, u32)); - va_end(ap); - - **buf = op; - (*buf)++; -} +static void __uasminit build_insn(u32 **buf, enum opcode opc, ...); #define I_u1u2u3(op) \ Ip_u1u2u3(op) \ @@ -445,7 +286,7 @@ I_u3u1u2(_ldx) #ifdef CONFIG_CPU_CAVIUM_OCTEON #include -void __uasminit uasm_i_pref(u32 **buf, unsigned int a, signed int b, +void __uasminit ISAFUNC(uasm_i_pref)(u32 **buf, unsigned int a, signed int b, unsigned int c) { if (OCTEON_IS_MODEL(OCTEON_CN63XX_PASS1_X) && a <= 24 && a != 5) @@ -457,21 +298,21 @@ void __uasminit uasm_i_pref(u32 **buf, unsigned int a, signed int b, else build_insn(buf, insn_pref, c, a, b); } -UASM_EXPORT_SYMBOL(uasm_i_pref); +UASM_EXPORT_SYMBOL(ISAFUNC(uasm_i_pref)); #else I_u2s3u1(_pref) #endif /* Handle labels. */ -void __uasminit uasm_build_label(struct uasm_label **lab, u32 *addr, int lid) +void __uasminit ISAFUNC(uasm_build_label)(struct uasm_label **lab, u32 *addr, int lid) { (*lab)->addr = addr; (*lab)->lab = lid; (*lab)++; } -UASM_EXPORT_SYMBOL(uasm_build_label); +UASM_EXPORT_SYMBOL(ISAFUNC(uasm_build_label)); -int __uasminit uasm_in_compat_space_p(long addr) +int __uasminit ISAFUNC(uasm_in_compat_space_p)(long addr) { /* Is this address in 32bit compat space? */ #ifdef CONFIG_64BIT @@ -480,7 +321,7 @@ int __uasminit uasm_in_compat_space_p(long addr) return 1; #endif } -UASM_EXPORT_SYMBOL(uasm_in_compat_space_p); +UASM_EXPORT_SYMBOL(ISAFUNC(uasm_in_compat_space_p)); static int __uasminit uasm_rel_highest(long val) { @@ -500,77 +341,66 @@ static int __uasminit uasm_rel_higher(long val) #endif } -int __uasminit uasm_rel_hi(long val) +int __uasminit ISAFUNC(uasm_rel_hi)(long val) { return ((((val + 0x8000L) >> 16) & 0xffff) ^ 0x8000) - 0x8000; } -UASM_EXPORT_SYMBOL(uasm_rel_hi); +UASM_EXPORT_SYMBOL(ISAFUNC(uasm_rel_hi)); -int __uasminit uasm_rel_lo(long val) +int __uasminit ISAFUNC(uasm_rel_lo)(long val) { return ((val & 0xffff) ^ 0x8000) - 0x8000; } -UASM_EXPORT_SYMBOL(uasm_rel_lo); +UASM_EXPORT_SYMBOL(ISAFUNC(uasm_rel_lo)); -void __uasminit UASM_i_LA_mostly(u32 **buf, unsigned int rs, long addr) +void __uasminit ISAFUNC(UASM_i_LA_mostly)(u32 **buf, unsigned int rs, long addr) { - if (!uasm_in_compat_space_p(addr)) { - uasm_i_lui(buf, rs, uasm_rel_highest(addr)); + if (!ISAFUNC(uasm_in_compat_space_p)(addr)) { + ISAFUNC(uasm_i_lui)(buf, rs, uasm_rel_highest(addr)); if (uasm_rel_higher(addr)) - uasm_i_daddiu(buf, rs, rs, uasm_rel_higher(addr)); - if (uasm_rel_hi(addr)) { - uasm_i_dsll(buf, rs, rs, 16); - uasm_i_daddiu(buf, rs, rs, uasm_rel_hi(addr)); - uasm_i_dsll(buf, rs, rs, 16); + ISAFUNC(uasm_i_daddiu)(buf, rs, rs, uasm_rel_higher(addr)); + if (ISAFUNC(uasm_rel_hi(addr))) { + ISAFUNC(uasm_i_dsll)(buf, rs, rs, 16); + ISAFUNC(uasm_i_daddiu)(buf, rs, rs, + ISAFUNC(uasm_rel_hi)(addr)); + ISAFUNC(uasm_i_dsll)(buf, rs, rs, 16); } else - uasm_i_dsll32(buf, rs, rs, 0); + ISAFUNC(uasm_i_dsll32)(buf, rs, rs, 0); } else - uasm_i_lui(buf, rs, uasm_rel_hi(addr)); + ISAFUNC(uasm_i_lui)(buf, rs, ISAFUNC(uasm_rel_hi(addr))); } -UASM_EXPORT_SYMBOL(UASM_i_LA_mostly); +UASM_EXPORT_SYMBOL(ISAFUNC(UASM_i_LA_mostly)); -void __uasminit UASM_i_LA(u32 **buf, unsigned int rs, long addr) +void __uasminit ISAFUNC(UASM_i_LA)(u32 **buf, unsigned int rs, long addr) { - UASM_i_LA_mostly(buf, rs, addr); - if (uasm_rel_lo(addr)) { - if (!uasm_in_compat_space_p(addr)) - uasm_i_daddiu(buf, rs, rs, uasm_rel_lo(addr)); + ISAFUNC(UASM_i_LA_mostly)(buf, rs, addr); + if (ISAFUNC(uasm_rel_lo(addr))) { + if (!ISAFUNC(uasm_in_compat_space_p)(addr)) + ISAFUNC(uasm_i_daddiu)(buf, rs, rs, + ISAFUNC(uasm_rel_lo(addr))); else - uasm_i_addiu(buf, rs, rs, uasm_rel_lo(addr)); + ISAFUNC(uasm_i_addiu)(buf, rs, rs, + ISAFUNC(uasm_rel_lo(addr))); } } -UASM_EXPORT_SYMBOL(UASM_i_LA); +UASM_EXPORT_SYMBOL(ISAFUNC(UASM_i_LA)); /* Handle relocations. */ void __uasminit -uasm_r_mips_pc16(struct uasm_reloc **rel, u32 *addr, int lid) +ISAFUNC(uasm_r_mips_pc16)(struct uasm_reloc **rel, u32 *addr, int lid) { (*rel)->addr = addr; (*rel)->type = R_MIPS_PC16; (*rel)->lab = lid; (*rel)++; } -UASM_EXPORT_SYMBOL(uasm_r_mips_pc16); +UASM_EXPORT_SYMBOL(ISAFUNC(uasm_r_mips_pc16)); static inline void __uasminit -__resolve_relocs(struct uasm_reloc *rel, struct uasm_label *lab) -{ - long laddr = (long)lab->addr; - long raddr = (long)rel->addr; - - switch (rel->type) { - case R_MIPS_PC16: - *rel->addr |= build_bimm(laddr - (raddr + 4)); - break; - - default: - panic("Unsupported Micro-assembler relocation %d", - rel->type); - } -} +__resolve_relocs(struct uasm_reloc *rel, struct uasm_label *lab); void __uasminit -uasm_resolve_relocs(struct uasm_reloc *rel, struct uasm_label *lab) +ISAFUNC(uasm_resolve_relocs)(struct uasm_reloc *rel, struct uasm_label *lab) { struct uasm_label *l; @@ -579,40 +409,40 @@ uasm_resolve_relocs(struct uasm_reloc *rel, struct uasm_label *lab) if (rel->lab == l->lab) __resolve_relocs(rel, l); } -UASM_EXPORT_SYMBOL(uasm_resolve_relocs); +UASM_EXPORT_SYMBOL(ISAFUNC(uasm_resolve_relocs)); void __uasminit -uasm_move_relocs(struct uasm_reloc *rel, u32 *first, u32 *end, long off) +ISAFUNC(uasm_move_relocs)(struct uasm_reloc *rel, u32 *first, u32 *end, long off) { for (; rel->lab != UASM_LABEL_INVALID; rel++) if (rel->addr >= first && rel->addr < end) rel->addr += off; } -UASM_EXPORT_SYMBOL(uasm_move_relocs); +UASM_EXPORT_SYMBOL(ISAFUNC(uasm_move_relocs)); void __uasminit -uasm_move_labels(struct uasm_label *lab, u32 *first, u32 *end, long off) +ISAFUNC(uasm_move_labels)(struct uasm_label *lab, u32 *first, u32 *end, long off) { for (; lab->lab != UASM_LABEL_INVALID; lab++) if (lab->addr >= first && lab->addr < end) lab->addr += off; } -UASM_EXPORT_SYMBOL(uasm_move_labels); +UASM_EXPORT_SYMBOL(ISAFUNC(uasm_move_labels)); void __uasminit -uasm_copy_handler(struct uasm_reloc *rel, struct uasm_label *lab, u32 *first, +ISAFUNC(uasm_copy_handler)(struct uasm_reloc *rel, struct uasm_label *lab, u32 *first, u32 *end, u32 *target) { long off = (long)(target - first); memcpy(target, first, (end - first) * sizeof(u32)); - uasm_move_relocs(rel, first, end, off); - uasm_move_labels(lab, first, end, off); + ISAFUNC(uasm_move_relocs(rel, first, end, off)); + ISAFUNC(uasm_move_labels(lab, first, end, off)); } -UASM_EXPORT_SYMBOL(uasm_copy_handler); +UASM_EXPORT_SYMBOL(ISAFUNC(uasm_copy_handler)); -int __uasminit uasm_insn_has_bdelay(struct uasm_reloc *rel, u32 *addr) +int __uasminit ISAFUNC(uasm_insn_has_bdelay)(struct uasm_reloc *rel, u32 *addr) { for (; rel->lab != UASM_LABEL_INVALID; rel++) { if (rel->addr == addr @@ -623,88 +453,88 @@ int __uasminit uasm_insn_has_bdelay(struct uasm_reloc *rel, u32 *addr) return 0; } -UASM_EXPORT_SYMBOL(uasm_insn_has_bdelay); +UASM_EXPORT_SYMBOL(ISAFUNC(uasm_insn_has_bdelay)); /* Convenience functions for labeled branches. */ void __uasminit -uasm_il_bltz(u32 **p, struct uasm_reloc **r, unsigned int reg, int lid) +ISAFUNC(uasm_il_bltz)(u32 **p, struct uasm_reloc **r, unsigned int reg, int lid) { uasm_r_mips_pc16(r, *p, lid); - uasm_i_bltz(p, reg, 0); + ISAFUNC(uasm_i_bltz)(p, reg, 0); } -UASM_EXPORT_SYMBOL(uasm_il_bltz); +UASM_EXPORT_SYMBOL(ISAFUNC(uasm_il_bltz)); void __uasminit -uasm_il_b(u32 **p, struct uasm_reloc **r, int lid) +ISAFUNC(uasm_il_b)(u32 **p, struct uasm_reloc **r, int lid) { uasm_r_mips_pc16(r, *p, lid); - uasm_i_b(p, 0); + ISAFUNC(uasm_i_b)(p, 0); } -UASM_EXPORT_SYMBOL(uasm_il_b); +UASM_EXPORT_SYMBOL(ISAFUNC(uasm_il_b)); void __uasminit -uasm_il_beqz(u32 **p, struct uasm_reloc **r, unsigned int reg, int lid) +ISAFUNC(uasm_il_beqz)(u32 **p, struct uasm_reloc **r, unsigned int reg, int lid) { uasm_r_mips_pc16(r, *p, lid); - uasm_i_beqz(p, reg, 0); + ISAFUNC(uasm_i_beqz)(p, reg, 0); } -UASM_EXPORT_SYMBOL(uasm_il_beqz); +UASM_EXPORT_SYMBOL(ISAFUNC(uasm_il_beqz)); void __uasminit -uasm_il_beqzl(u32 **p, struct uasm_reloc **r, unsigned int reg, int lid) +ISAFUNC(uasm_il_beqzl)(u32 **p, struct uasm_reloc **r, unsigned int reg, int lid) { uasm_r_mips_pc16(r, *p, lid); - uasm_i_beqzl(p, reg, 0); + ISAFUNC(uasm_i_beqzl)(p, reg, 0); } -UASM_EXPORT_SYMBOL(uasm_il_beqzl); +UASM_EXPORT_SYMBOL(ISAFUNC(uasm_il_beqzl)); void __uasminit -uasm_il_bne(u32 **p, struct uasm_reloc **r, unsigned int reg1, +ISAFUNC(uasm_il_bne)(u32 **p, struct uasm_reloc **r, unsigned int reg1, unsigned int reg2, int lid) { uasm_r_mips_pc16(r, *p, lid); - uasm_i_bne(p, reg1, reg2, 0); + ISAFUNC(uasm_i_bne)(p, reg1, reg2, 0); } -UASM_EXPORT_SYMBOL(uasm_il_bne); +UASM_EXPORT_SYMBOL(ISAFUNC(uasm_il_bne)); void __uasminit -uasm_il_bnez(u32 **p, struct uasm_reloc **r, unsigned int reg, int lid) +ISAFUNC(uasm_il_bnez)(u32 **p, struct uasm_reloc **r, unsigned int reg, int lid) { uasm_r_mips_pc16(r, *p, lid); - uasm_i_bnez(p, reg, 0); + ISAFUNC(uasm_i_bnez)(p, reg, 0); } -UASM_EXPORT_SYMBOL(uasm_il_bnez); +UASM_EXPORT_SYMBOL(ISAFUNC(uasm_il_bnez)); void __uasminit -uasm_il_bgezl(u32 **p, struct uasm_reloc **r, unsigned int reg, int lid) +ISAFUNC(uasm_il_bgezl)(u32 **p, struct uasm_reloc **r, unsigned int reg, int lid) { uasm_r_mips_pc16(r, *p, lid); - uasm_i_bgezl(p, reg, 0); + ISAFUNC(uasm_i_bgezl)(p, reg, 0); } -UASM_EXPORT_SYMBOL(uasm_il_bgezl); +UASM_EXPORT_SYMBOL(ISAFUNC(uasm_il_bgezl)); void __uasminit -uasm_il_bgez(u32 **p, struct uasm_reloc **r, unsigned int reg, int lid) +ISAFUNC(uasm_il_bgez)(u32 **p, struct uasm_reloc **r, unsigned int reg, int lid) { uasm_r_mips_pc16(r, *p, lid); - uasm_i_bgez(p, reg, 0); + ISAFUNC(uasm_i_bgez)(p, reg, 0); } -UASM_EXPORT_SYMBOL(uasm_il_bgez); +UASM_EXPORT_SYMBOL(ISAFUNC(uasm_il_bgez)); void __uasminit -uasm_il_bbit0(u32 **p, struct uasm_reloc **r, unsigned int reg, +ISAFUNC(uasm_il_bbit0)(u32 **p, struct uasm_reloc **r, unsigned int reg, unsigned int bit, int lid) { uasm_r_mips_pc16(r, *p, lid); - uasm_i_bbit0(p, reg, bit, 0); + ISAFUNC(uasm_i_bbit0)(p, reg, bit, 0); } -UASM_EXPORT_SYMBOL(uasm_il_bbit0); +UASM_EXPORT_SYMBOL(ISAFUNC(uasm_il_bbit0)); void __uasminit -uasm_il_bbit1(u32 **p, struct uasm_reloc **r, unsigned int reg, +ISAFUNC(uasm_il_bbit1)(u32 **p, struct uasm_reloc **r, unsigned int reg, unsigned int bit, int lid) { uasm_r_mips_pc16(r, *p, lid); - uasm_i_bbit1(p, reg, bit, 0); + ISAFUNC(uasm_i_bbit1)(p, reg, bit, 0); } -UASM_EXPORT_SYMBOL(uasm_il_bbit1); +UASM_EXPORT_SYMBOL(ISAFUNC(uasm_il_bbit1)); diff --git a/trunk/arch/mips/mti-malta/Makefile b/trunk/arch/mips/mti-malta/Makefile index 6079ef33b5f0..0388fc8b5613 100644 --- a/trunk/arch/mips/mti-malta/Makefile +++ b/trunk/arch/mips/mti-malta/Makefile @@ -5,9 +5,8 @@ # Copyright (C) 2008 Wind River Systems, Inc. # written by Ralf Baechle # -obj-y := malta-amon.o malta-cmdline.o \ - malta-display.o malta-init.o malta-int.o \ - malta-memory.o malta-platform.o \ +obj-y := malta-amon.o malta-display.o malta-init.o \ + malta-int.o malta-memory.o malta-platform.o \ malta-reset.o malta-setup.o malta-time.o obj-$(CONFIG_EARLY_PRINTK) += malta-console.o diff --git a/trunk/arch/mips/mti-malta/Platform b/trunk/arch/mips/mti-malta/Platform index 5b548b5a4fcf..2cc72c9b38e3 100644 --- a/trunk/arch/mips/mti-malta/Platform +++ b/trunk/arch/mips/mti-malta/Platform @@ -3,5 +3,9 @@ # platform-$(CONFIG_MIPS_MALTA) += mti-malta/ cflags-$(CONFIG_MIPS_MALTA) += -I$(srctree)/arch/mips/include/asm/mach-malta -load-$(CONFIG_MIPS_MALTA) += 0xffffffff80100000 +ifdef CONFIG_KVM_GUEST + load-$(CONFIG_MIPS_MALTA) += 0x0000000040100000 +else + load-$(CONFIG_MIPS_MALTA) += 0xffffffff80100000 +endif all-$(CONFIG_MIPS_MALTA) := $(COMPRESSION_FNAME).bin diff --git a/trunk/arch/mips/mti-malta/malta-cmdline.c b/trunk/arch/mips/mti-malta/malta-cmdline.c deleted file mode 100644 index 5576a306a145..000000000000 --- a/trunk/arch/mips/mti-malta/malta-cmdline.c +++ /dev/null @@ -1,59 +0,0 @@ -/* - * Carsten Langgaard, carstenl@mips.com - * Copyright (C) 1999,2000 MIPS Technologies, Inc. All rights reserved. - * - * This program is free software; you can distribute it and/or modify it - * under the terms of the GNU General Public License (Version 2) as - * published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * for more details. - * - * You should have received a copy of the GNU General Public License along - * with this program; if not, write to the Free Software Foundation, Inc., - * 59 Temple Place - Suite 330, Boston MA 02111-1307, USA. - * - * Kernel command line creation using the prom monitor (YAMON) argc/argv. - */ -#include -#include - -#include - -extern int prom_argc; -extern int *_prom_argv; - -/* - * YAMON (32-bit PROM) pass arguments and environment as 32-bit pointer. - * This macro take care of sign extension. - */ -#define prom_argv(index) ((char *)(long)_prom_argv[(index)]) - -char * __init prom_getcmdline(void) -{ - return &(arcs_cmdline[0]); -} - - -void __init prom_init_cmdline(void) -{ - char *cp; - int actr; - - actr = 1; /* Always ignore argv[0] */ - - cp = &(arcs_cmdline[0]); - while(actr < prom_argc) { - strcpy(cp, prom_argv(actr)); - cp += strlen(prom_argv(actr)); - *cp++ = ' '; - actr++; - } - if (cp != &(arcs_cmdline[0])) { - /* get rid of trailing space */ - --cp; - *cp = '\0'; - } -} diff --git a/trunk/arch/mips/mti-malta/malta-display.c b/trunk/arch/mips/mti-malta/malta-display.c index 9bc58a24e80a..d4f807191ecd 100644 --- a/trunk/arch/mips/mti-malta/malta-display.c +++ b/trunk/arch/mips/mti-malta/malta-display.c @@ -1,28 +1,20 @@ /* - * Carsten Langgaard, carstenl@mips.com - * Copyright (C) 1999,2000 MIPS Technologies, Inc. All rights reserved. - * - * This program is free software; you can distribute it and/or modify it - * under the terms of the GNU General Public License (Version 2) as - * published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * for more details. - * - * You should have received a copy of the GNU General Public License along - * with this program; if not, write to the Free Software Foundation, Inc., - * 59 Temple Place - Suite 330, Boston MA 02111-1307, USA. + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. * * Display routines for display messages in MIPS boards ascii display. + * + * Copyright (C) 1999,2000,2012 MIPS Technologies, Inc. + * All rights reserved. + * Authors: Carsten Langgaard + * Steven J. Hill */ - #include #include -#include +#include + #include -#include extern const char display_string[]; static unsigned int display_count; @@ -36,11 +28,11 @@ void mips_display_message(const char *str) if (unlikely(display == NULL)) display = ioremap(ASCII_DISPLAY_POS_BASE, 16*sizeof(int)); - for (i = 0; i <= 14; i=i+2) { - if (*str) - __raw_writel(*str++, display + i); - else - __raw_writel(' ', display + i); + for (i = 0; i <= 14; i += 2) { + if (*str) + __raw_writel(*str++, display + i); + else + __raw_writel(' ', display + i); } } diff --git a/trunk/arch/mips/mti-malta/malta-init.c b/trunk/arch/mips/mti-malta/malta-init.c index c2cbce9e435e..ff8caffd3266 100644 --- a/trunk/arch/mips/mti-malta/malta-init.c +++ b/trunk/arch/mips/mti-malta/malta-init.c @@ -1,54 +1,28 @@ /* - * Copyright (C) 1999, 2000, 2004, 2005 MIPS Technologies, Inc. - * All rights reserved. - * Authors: Carsten Langgaard - * Maciej W. Rozycki - * - * This program is free software; you can distribute it and/or modify it - * under the terms of the GNU General Public License (Version 2) as - * published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * for more details. - * - * You should have received a copy of the GNU General Public License along - * with this program; if not, write to the Free Software Foundation, Inc., - * 59 Temple Place - Suite 330, Boston MA 02111-1307, USA. + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. * * PROM library initialisation code. + * + * Copyright (C) 1999,2000,2004,2005,2012 MIPS Technologies, Inc. + * All rights reserved. + * Authors: Carsten Langgaard + * Maciej W. Rozycki + * Steven J. Hill */ #include #include #include -#include -#include -#include #include #include #include - +#include #include -#include #include -#include -#include - #include -int prom_argc; -int *_prom_argv, *_prom_envp; - -/* - * YAMON (32-bit PROM) pass arguments and environment as 32-bit pointer. - * This macro take care of sign extension, if running in 64-bit mode. - */ -#define prom_envp(index) ((char *)(long)_prom_envp[(index)]) - -int init_debug; - static int mips_revision_corid; int mips_revision_sconid; @@ -62,74 +36,6 @@ unsigned long _pcictrl_gt64120; /* MIPS System controller register base */ unsigned long _pcictrl_msc; -char *prom_getenv(char *envname) -{ - /* - * Return a pointer to the given environment variable. - * In 64-bit mode: we're using 64-bit pointers, but all pointers - * in the PROM structures are only 32-bit, so we need some - * workarounds, if we are running in 64-bit mode. - */ - int i, index=0; - - i = strlen(envname); - - while (prom_envp(index)) { - if(strncmp(envname, prom_envp(index), i) == 0) { - return(prom_envp(index+1)); - } - index += 2; - } - - return NULL; -} - -static inline unsigned char str2hexnum(unsigned char c) -{ - if (c >= '0' && c <= '9') - return c - '0'; - if (c >= 'a' && c <= 'f') - return c - 'a' + 10; - return 0; /* foo */ -} - -static inline void str2eaddr(unsigned char *ea, unsigned char *str) -{ - int i; - - for (i = 0; i < 6; i++) { - unsigned char num; - - if((*str == '.') || (*str == ':')) - str++; - num = str2hexnum(*str++) << 4; - num |= (str2hexnum(*str++)); - ea[i] = num; - } -} - -int get_ethernet_addr(char *ethernet_addr) -{ - char *ethaddr_str; - - ethaddr_str = prom_getenv("ethaddr"); - if (!ethaddr_str) { - printk("ethaddr not set in boot prom\n"); - return -1; - } - str2eaddr(ethernet_addr, ethaddr_str); - - if (init_debug > 1) { - int i; - printk("get_ethernet_addr: "); - for (i=0; i<5; i++) - printk("%02x:", (unsigned char)*(ethernet_addr+i)); - printk("%02x\n", *(ethernet_addr+i)); - } - - return 0; -} - #ifdef CONFIG_SERIAL_8250_CONSOLE static void __init console_config(void) { @@ -138,17 +44,23 @@ static void __init console_config(void) char parity = '\0', bits = '\0', flow = '\0'; char *s; - if ((strstr(prom_getcmdline(), "console=")) == NULL) { - s = prom_getenv("modetty0"); + if ((strstr(fw_getcmdline(), "console=")) == NULL) { + s = fw_getenv("modetty0"); if (s) { while (*s >= '0' && *s <= '9') baud = baud*10 + *s++ - '0'; - if (*s == ',') s++; - if (*s) parity = *s++; - if (*s == ',') s++; - if (*s) bits = *s++; - if (*s == ',') s++; - if (*s == 'h') flow = 'r'; + if (*s == ',') + s++; + if (*s) + parity = *s++; + if (*s == ',') + s++; + if (*s) + bits = *s++; + if (*s == ',') + s++; + if (*s == 'h') + flow = 'r'; } if (baud == 0) baud = 38400; @@ -158,8 +70,9 @@ static void __init console_config(void) bits = '8'; if (flow == '\0') flow = 'r'; - sprintf(console_string, " console=ttyS0,%d%c%c%c", baud, parity, bits, flow); - strcat(prom_getcmdline(), console_string); + sprintf(console_string, " console=ttyS0,%d%c%c%c", baud, + parity, bits, flow); + strcat(fw_getcmdline(), console_string); pr_info("Config serial console:%s\n", console_string); } } @@ -193,10 +106,6 @@ extern struct plat_smp_ops msmtc_smp_ops; void __init prom_init(void) { - prom_argc = fw_arg0; - _prom_argv = (int *) fw_arg1; - _prom_envp = (int *) fw_arg2; - mips_display_message("LINUX"); /* @@ -306,7 +215,7 @@ void __init prom_init(void) case MIPS_REVISION_SCON_SOCIT: case MIPS_REVISION_SCON_ROCIT: _pcictrl_msc = (unsigned long)ioremap(MIPS_MSC01_PCI_REG_BASE, 0x2000); - mips_pci_controller: +mips_pci_controller: mb(); MSC_READ(MSC01_PCI_CFG, data); MSC_WRITE(MSC01_PCI_CFG, data & ~MSC01_PCI_CFG_EN_BIT); @@ -348,13 +257,13 @@ void __init prom_init(void) default: /* Unknown system controller */ mips_display_message("SC Error"); - while (1); /* We die here... */ + while (1); /* We die here... */ } board_nmi_handler_setup = mips_nmi_setup; board_ejtag_handler_setup = mips_ejtag_setup; - prom_init_cmdline(); - prom_meminit(); + fw_init_cmdline(); + fw_meminit(); #ifdef CONFIG_SERIAL_8250_CONSOLE console_config(); #endif diff --git a/trunk/arch/mips/mti-malta/malta-int.c b/trunk/arch/mips/mti-malta/malta-int.c index e364af70e6cf..0a1339ac3ec8 100644 --- a/trunk/arch/mips/mti-malta/malta-int.c +++ b/trunk/arch/mips/mti-malta/malta-int.c @@ -47,7 +47,6 @@ #include int gcmp_present = -1; -int gic_present; static unsigned long _msc01_biu_base; static unsigned long _gcmp_base; static unsigned int ipi_map[NR_CPUS]; @@ -134,6 +133,9 @@ static void malta_ipi_irqdispatch(void) { int irq; + if (gic_compare_int()) + do_IRQ(MIPS_GIC_IRQ_BASE); + irq = gic_get_int(); if (irq < 0) return; /* interrupt has already been cleared */ diff --git a/trunk/arch/mips/mti-malta/malta-memory.c b/trunk/arch/mips/mti-malta/malta-memory.c index f3d43aa023a9..1f73d63e92a7 100644 --- a/trunk/arch/mips/mti-malta/malta-memory.c +++ b/trunk/arch/mips/mti-malta/malta-memory.c @@ -1,73 +1,45 @@ /* - * Carsten Langgaard, carstenl@mips.com - * Copyright (C) 1999,2000 MIPS Technologies, Inc. All rights reserved. - * - * This program is free software; you can distribute it and/or modify it - * under the terms of the GNU General Public License (Version 2) as - * published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * for more details. - * - * You should have received a copy of the GNU General Public License along - * with this program; if not, write to the Free Software Foundation, Inc., - * 59 Temple Place - Suite 330, Boston MA 02111-1307, USA. + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. * * PROM library functions for acquiring/using memory descriptors given to * us from the YAMON. + * + * Copyright (C) 1999,2000,2012 MIPS Technologies, Inc. + * All rights reserved. + * Authors: Carsten Langgaard + * Steven J. Hill */ #include -#include #include -#include #include #include -#include #include +#include -#include - -/*#define DEBUG*/ - -enum yamon_memtypes { - yamon_dontuse, - yamon_prom, - yamon_free, -}; -static struct prom_pmemblock mdesc[PROM_MAX_PMEMBLOCKS]; - -#ifdef DEBUG -static char *mtypes[3] = { - "Dont use memory", - "YAMON PROM memory", - "Free memory", -}; -#endif +static fw_memblock_t mdesc[FW_MAX_MEMBLOCKS]; /* determined physical memory size, not overridden by command line args */ unsigned long physical_memsize = 0L; -static struct prom_pmemblock * __init prom_getmdesc(void) +fw_memblock_t * __init fw_getmdesc(void) { - char *memsize_str; + char *memsize_str, *ptr; unsigned int memsize; - char *ptr; static char cmdline[COMMAND_LINE_SIZE] __initdata; + long val; + int tmp; /* otherwise look in the environment */ - memsize_str = prom_getenv("memsize"); + memsize_str = fw_getenv("memsize"); if (!memsize_str) { - printk(KERN_WARNING - "memsize not set in boot prom, set to default (32Mb)\n"); + pr_warn("memsize not set in YAMON, set to default (32Mb)\n"); physical_memsize = 0x02000000; } else { -#ifdef DEBUG - pr_debug("prom_memsize = %s\n", memsize_str); -#endif - physical_memsize = simple_strtol(memsize_str, NULL, 0); + tmp = kstrtol(memsize_str, 0, &val); + physical_memsize = (unsigned long)val; } #ifdef CONFIG_CPU_BIG_ENDIAN @@ -90,11 +62,11 @@ static struct prom_pmemblock * __init prom_getmdesc(void) memset(mdesc, 0, sizeof(mdesc)); - mdesc[0].type = yamon_dontuse; + mdesc[0].type = fw_dontuse; mdesc[0].base = 0x00000000; mdesc[0].size = 0x00001000; - mdesc[1].type = yamon_prom; + mdesc[1].type = fw_code; mdesc[1].base = 0x00001000; mdesc[1].size = 0x000ef000; @@ -105,55 +77,45 @@ static struct prom_pmemblock * __init prom_getmdesc(void) * This mean that this area can't be used as DMA memory for PCI * devices. */ - mdesc[2].type = yamon_dontuse; + mdesc[2].type = fw_dontuse; mdesc[2].base = 0x000f0000; mdesc[2].size = 0x00010000; - mdesc[3].type = yamon_dontuse; + mdesc[3].type = fw_dontuse; mdesc[3].base = 0x00100000; - mdesc[3].size = CPHYSADDR(PFN_ALIGN((unsigned long)&_end)) - mdesc[3].base; + mdesc[3].size = CPHYSADDR(PFN_ALIGN((unsigned long)&_end)) - + mdesc[3].base; - mdesc[4].type = yamon_free; + mdesc[4].type = fw_free; mdesc[4].base = CPHYSADDR(PFN_ALIGN(&_end)); mdesc[4].size = memsize - mdesc[4].base; return &mdesc[0]; } -static int __init prom_memtype_classify(unsigned int type) +static int __init fw_memtype_classify(unsigned int type) { switch (type) { - case yamon_free: + case fw_free: return BOOT_MEM_RAM; - case yamon_prom: + case fw_code: return BOOT_MEM_ROM_DATA; default: return BOOT_MEM_RESERVED; } } -void __init prom_meminit(void) +void __init fw_meminit(void) { - struct prom_pmemblock *p; + fw_memblock_t *p; -#ifdef DEBUG - pr_debug("YAMON MEMORY DESCRIPTOR dump:\n"); - p = prom_getmdesc(); - while (p->size) { - int i = 0; - pr_debug("[%d,%p]: base<%08lx> size<%08lx> type<%s>\n", - i, p, p->base, p->size, mtypes[p->type]); - p++; - i++; - } -#endif - p = prom_getmdesc(); + p = fw_getmdesc(); while (p->size) { long type; unsigned long base, size; - type = prom_memtype_classify(p->type); + type = fw_memtype_classify(p->type); base = p->base; size = p->size; @@ -172,7 +134,7 @@ void __init prom_free_prom_memory(void) continue; addr = boot_mem_map.map[i].addr; - free_init_pages("prom memory", + free_init_pages("YAMON memory", addr, addr + boot_mem_map.map[i].size); } } diff --git a/trunk/arch/mips/mti-malta/malta-setup.c b/trunk/arch/mips/mti-malta/malta-setup.c index 200f64df2c9b..c72a06936781 100644 --- a/trunk/arch/mips/mti-malta/malta-setup.c +++ b/trunk/arch/mips/mti-malta/malta-setup.c @@ -25,13 +25,13 @@ #include #include -#include +#include #include -#include #include #include #include #include +#include #ifdef CONFIG_VT #include #endif @@ -105,6 +105,66 @@ static void __init fd_activate(void) } #endif +static int __init plat_enable_iocoherency(void) +{ + int supported = 0; + if (mips_revision_sconid == MIPS_REVISION_SCON_BONITO) { + if (BONITO_PCICACHECTRL & BONITO_PCICACHECTRL_CPUCOH_PRES) { + BONITO_PCICACHECTRL |= BONITO_PCICACHECTRL_CPUCOH_EN; + pr_info("Enabled Bonito CPU coherency\n"); + supported = 1; + } + if (strstr(fw_getcmdline(), "iobcuncached")) { + BONITO_PCICACHECTRL &= ~BONITO_PCICACHECTRL_IOBCCOH_EN; + BONITO_PCIMEMBASECFG = BONITO_PCIMEMBASECFG & + ~(BONITO_PCIMEMBASECFG_MEMBASE0_CACHED | + BONITO_PCIMEMBASECFG_MEMBASE1_CACHED); + pr_info("Disabled Bonito IOBC coherency\n"); + } else { + BONITO_PCICACHECTRL |= BONITO_PCICACHECTRL_IOBCCOH_EN; + BONITO_PCIMEMBASECFG |= + (BONITO_PCIMEMBASECFG_MEMBASE0_CACHED | + BONITO_PCIMEMBASECFG_MEMBASE1_CACHED); + pr_info("Enabled Bonito IOBC coherency\n"); + } + } else if (gcmp_niocu() != 0) { + /* Nothing special needs to be done to enable coherency */ + pr_info("CMP IOCU detected\n"); + if ((*(unsigned int *)0xbf403000 & 0x81) != 0x81) { + pr_crit("IOCU OPERATION DISABLED BY SWITCH - DEFAULTING TO SW IO COHERENCY\n"); + return 0; + } + supported = 1; + } + hw_coherentio = supported; + return supported; +} + +static void __init plat_setup_iocoherency(void) +{ +#ifdef CONFIG_DMA_NONCOHERENT + /* + * Kernel has been configured with software coherency + * but we might choose to turn it off and use hardware + * coherency instead. + */ + if (plat_enable_iocoherency()) { + if (coherentio == 0) + pr_info("Hardware DMA cache coherency disabled\n"); + else + pr_info("Hardware DMA cache coherency enabled\n"); + } else { + if (coherentio == 1) + pr_info("Hardware DMA cache coherency unsupported, but enabled from command line!\n"); + else + pr_info("Software DMA cache coherency enabled\n"); + } +#else + if (!plat_enable_iocoherency()) + panic("Hardware DMA cache coherency not supported!"); +#endif +} + #ifdef CONFIG_BLK_DEV_IDE static void __init pci_clock_check(void) { @@ -115,16 +175,15 @@ static void __init pci_clock_check(void) 33, 20, 25, 30, 12, 16, 37, 10 }; int pciclock = pciclocks[jmpr]; - char *argptr = prom_getcmdline(); + char *argptr = fw_getcmdline(); if (pciclock != 33 && !strstr(argptr, "idebus=")) { - printk(KERN_WARNING "WARNING: PCI clock is %dMHz, " - "setting idebus\n", pciclock); + pr_warn("WARNING: PCI clock is %dMHz, setting idebus\n", + pciclock); argptr += strlen(argptr); sprintf(argptr, " idebus=%d", pciclock); if (pciclock < 20 || pciclock > 66) - printk(KERN_WARNING "WARNING: IDE timing " - "calculations will be incorrect\n"); + pr_warn("WARNING: IDE timing calculations will be incorrect\n"); } } #endif @@ -153,31 +212,31 @@ static void __init bonito_quirks_setup(void) { char *argptr; - argptr = prom_getcmdline(); + argptr = fw_getcmdline(); if (strstr(argptr, "debug")) { BONITO_BONGENCFG |= BONITO_BONGENCFG_DEBUGMODE; - printk(KERN_INFO "Enabled Bonito debug mode\n"); + pr_info("Enabled Bonito debug mode\n"); } else BONITO_BONGENCFG &= ~BONITO_BONGENCFG_DEBUGMODE; #ifdef CONFIG_DMA_COHERENT if (BONITO_PCICACHECTRL & BONITO_PCICACHECTRL_CPUCOH_PRES) { BONITO_PCICACHECTRL |= BONITO_PCICACHECTRL_CPUCOH_EN; - printk(KERN_INFO "Enabled Bonito CPU coherency\n"); + pr_info("Enabled Bonito CPU coherency\n"); - argptr = prom_getcmdline(); + argptr = fw_getcmdline(); if (strstr(argptr, "iobcuncached")) { BONITO_PCICACHECTRL &= ~BONITO_PCICACHECTRL_IOBCCOH_EN; BONITO_PCIMEMBASECFG = BONITO_PCIMEMBASECFG & ~(BONITO_PCIMEMBASECFG_MEMBASE0_CACHED | BONITO_PCIMEMBASECFG_MEMBASE1_CACHED); - printk(KERN_INFO "Disabled Bonito IOBC coherency\n"); + pr_info("Disabled Bonito IOBC coherency\n"); } else { BONITO_PCICACHECTRL |= BONITO_PCICACHECTRL_IOBCCOH_EN; BONITO_PCIMEMBASECFG |= (BONITO_PCIMEMBASECFG_MEMBASE0_CACHED | BONITO_PCIMEMBASECFG_MEMBASE1_CACHED); - printk(KERN_INFO "Enabled Bonito IOBC coherency\n"); + pr_info("Enabled Bonito IOBC coherency\n"); } } else panic("Hardware DMA cache coherency not supported"); @@ -207,6 +266,8 @@ void __init plat_mem_setup(void) if (mips_revision_sconid == MIPS_REVISION_SCON_BONITO) bonito_quirks_setup(); + plat_setup_iocoherency(); + #ifdef CONFIG_BLK_DEV_IDE pci_clock_check(); #endif diff --git a/trunk/arch/mips/mti-malta/malta-time.c b/trunk/arch/mips/mti-malta/malta-time.c index a144b89cf9ba..0ad305f75802 100644 --- a/trunk/arch/mips/mti-malta/malta-time.c +++ b/trunk/arch/mips/mti-malta/malta-time.c @@ -39,12 +39,9 @@ #include #include -#include - #include unsigned long cpu_khz; -int gic_frequency; static int mips_cpu_timer_irq; static int mips_cpu_perf_irq; @@ -74,7 +71,24 @@ static void __init estimate_frequencies(void) { unsigned long flags; unsigned int count, start; +#ifdef CONFIG_IRQ_GIC unsigned int giccount = 0, gicstart = 0; +#endif + +#if defined (CONFIG_KVM_GUEST) && defined (CONFIG_KVM_HOST_FREQ) + unsigned int prid = read_c0_prid() & 0xffff00; + + /* + * XXXKYMA: hardwire the CPU frequency to Host Freq/4 + */ + count = (CONFIG_KVM_HOST_FREQ * 1000000) >> 3; + if ((prid != (PRID_COMP_MIPS | PRID_IMP_20KC)) && + (prid != (PRID_COMP_MIPS | PRID_IMP_25KF))) + count *= 2; + + mips_hpt_frequency = count; + return; +#endif local_irq_save(flags); @@ -84,26 +98,32 @@ static void __init estimate_frequencies(void) /* Initialize counters. */ start = read_c0_count(); +#ifdef CONFIG_IRQ_GIC if (gic_present) GICREAD(GIC_REG(SHARED, GIC_SH_COUNTER_31_00), gicstart); +#endif /* Read counter exactly on falling edge of update flag. */ while (CMOS_READ(RTC_REG_A) & RTC_UIP); while (!(CMOS_READ(RTC_REG_A) & RTC_UIP)); count = read_c0_count(); +#ifdef CONFIG_IRQ_GIC if (gic_present) GICREAD(GIC_REG(SHARED, GIC_SH_COUNTER_31_00), giccount); +#endif local_irq_restore(flags); count -= start; - if (gic_present) - giccount -= gicstart; - mips_hpt_frequency = count; - if (gic_present) + +#ifdef CONFIG_IRQ_GIC + if (gic_present) { + giccount -= gicstart; gic_frequency = giccount; + } +#endif } void read_persistent_clock(struct timespec *ts) @@ -159,24 +179,27 @@ void __init plat_time_init(void) (prid != (PRID_COMP_MIPS | PRID_IMP_25KF))) freq *= 2; freq = freqround(freq, 5000); - pr_debug("CPU frequency %d.%02d MHz\n", freq/1000000, + printk("CPU frequency %d.%02d MHz\n", freq/1000000, (freq%1000000)*100/1000000); cpu_khz = freq / 1000; - if (gic_present) { - freq = freqround(gic_frequency, 5000); - pr_debug("GIC frequency %d.%02d MHz\n", freq/1000000, - (freq%1000000)*100/1000000); - gic_clocksource_init(gic_frequency); - } else - init_r4k_clocksource(); + mips_scroll_message(); #ifdef CONFIG_I8253 /* Only Malta has a PIT. */ setup_pit_timer(); #endif - mips_scroll_message(); +#ifdef CONFIG_IRQ_GIC + if (gic_present) { + freq = freqround(gic_frequency, 5000); + printk("GIC frequency %d.%02d MHz\n", freq/1000000, + (freq%1000000)*100/1000000); +#ifdef CONFIG_CSRC_GIC + gic_clocksource_init(gic_frequency); +#endif + } +#endif plat_perf_setup(); } diff --git a/trunk/arch/mips/mti-sead3/Makefile b/trunk/arch/mips/mti-sead3/Makefile index 10ec701ce6c7..be114209217c 100644 --- a/trunk/arch/mips/mti-sead3/Makefile +++ b/trunk/arch/mips/mti-sead3/Makefile @@ -8,10 +8,10 @@ # Copyright (C) 2012 MIPS Technoligies, Inc. All rights reserved. # Steven J. Hill # -obj-y := sead3-lcd.o sead3-cmdline.o \ - sead3-display.o sead3-init.o sead3-int.o \ - sead3-mtd.o sead3-net.o sead3-platform.o \ - sead3-reset.o sead3-setup.o sead3-time.o +obj-y := sead3-lcd.o sead3-display.o sead3-init.o \ + sead3-int.o sead3-mtd.o sead3-net.o \ + sead3-platform.o sead3-reset.o \ + sead3-setup.o sead3-time.o obj-y += sead3-i2c-dev.o sead3-i2c.o \ sead3-pic32-i2c-drv.o sead3-pic32-bus.o \ diff --git a/trunk/arch/mips/mti-sead3/leds-sead3.c b/trunk/arch/mips/mti-sead3/leds-sead3.c index 322148c353ed..0a168c948b01 100644 --- a/trunk/arch/mips/mti-sead3/leds-sead3.c +++ b/trunk/arch/mips/mti-sead3/leds-sead3.c @@ -34,33 +34,15 @@ static void sead3_fled_set(struct led_classdev *led_cdev, static struct led_classdev sead3_pled = { .name = "sead3::pled", .brightness_set = sead3_pled_set, + .flags = LED_CORE_SUSPENDRESUME, }; static struct led_classdev sead3_fled = { .name = "sead3::fled", .brightness_set = sead3_fled_set, + .flags = LED_CORE_SUSPENDRESUME, }; -#ifdef CONFIG_PM -static int sead3_led_suspend(struct platform_device *dev, - pm_message_t state) -{ - led_classdev_suspend(&sead3_pled); - led_classdev_suspend(&sead3_fled); - return 0; -} - -static int sead3_led_resume(struct platform_device *dev) -{ - led_classdev_resume(&sead3_pled); - led_classdev_resume(&sead3_fled); - return 0; -} -#else -#define sead3_led_suspend NULL -#define sead3_led_resume NULL -#endif - static int sead3_led_probe(struct platform_device *pdev) { int ret; @@ -86,8 +68,6 @@ static int sead3_led_remove(struct platform_device *pdev) static struct platform_driver sead3_led_driver = { .probe = sead3_led_probe, .remove = sead3_led_remove, - .suspend = sead3_led_suspend, - .resume = sead3_led_resume, .driver = { .name = DRVNAME, .owner = THIS_MODULE, diff --git a/trunk/arch/mips/mti-sead3/sead3-cmdline.c b/trunk/arch/mips/mti-sead3/sead3-cmdline.c deleted file mode 100644 index a2e6cec67f57..000000000000 --- a/trunk/arch/mips/mti-sead3/sead3-cmdline.c +++ /dev/null @@ -1,46 +0,0 @@ -/* - * This file is subject to the terms and conditions of the GNU General Public - * License. See the file "COPYING" in the main directory of this archive - * for more details. - * - * Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved. - */ -#include -#include - -#include - -extern int prom_argc; -extern int *_prom_argv; - -/* - * YAMON (32-bit PROM) pass arguments and environment as 32-bit pointer. - * This macro take care of sign extension. - */ -#define prom_argv(index) ((char *)(long)_prom_argv[(index)]) - -char * __init prom_getcmdline(void) -{ - return &(arcs_cmdline[0]); -} - -void __init prom_init_cmdline(void) -{ - char *cp; - int actr; - - actr = 1; /* Always ignore argv[0] */ - - cp = &(arcs_cmdline[0]); - while (actr < prom_argc) { - strcpy(cp, prom_argv(actr)); - cp += strlen(prom_argv(actr)); - *cp++ = ' '; - actr++; - } - if (cp != &(arcs_cmdline[0])) { - /* get rid of trailing space */ - --cp; - *cp = '\0'; - } -} diff --git a/trunk/arch/mips/mti-sead3/sead3-console.c b/trunk/arch/mips/mti-sead3/sead3-console.c index 2ddef19a9adc..031f47d69770 100644 --- a/trunk/arch/mips/mti-sead3/sead3-console.c +++ b/trunk/arch/mips/mti-sead3/sead3-console.c @@ -26,7 +26,7 @@ static inline void serial_out(int offset, int value, unsigned int base_addr) __raw_writel(value, PORT(base_addr, offset)); } -void __init prom_init_early_console(char port) +void __init fw_init_early_console(char port) { console_port = port; } diff --git a/trunk/arch/mips/mti-sead3/sead3-display.c b/trunk/arch/mips/mti-sead3/sead3-display.c index e389326cfa42..94875991907b 100644 --- a/trunk/arch/mips/mti-sead3/sead3-display.c +++ b/trunk/arch/mips/mti-sead3/sead3-display.c @@ -8,7 +8,6 @@ #include #include #include -#include static unsigned int display_count; static unsigned int max_display_count; diff --git a/trunk/arch/mips/mti-sead3/sead3-init.c b/trunk/arch/mips/mti-sead3/sead3-init.c index f95abaa1aa5d..bfbd17b120a2 100644 --- a/trunk/arch/mips/mti-sead3/sead3-init.c +++ b/trunk/arch/mips/mti-sead3/sead3-init.c @@ -12,38 +12,51 @@ #include #include #include -#include - -extern void prom_init_early_console(char port); +#include extern char except_vec_nmi; extern char except_vec_ejtag_debug; -int prom_argc; -int *_prom_argv, *_prom_envp; - -#define prom_envp(index) ((char *)(long)_prom_envp[(index)]) - -char *prom_getenv(char *envname) +#ifdef CONFIG_SERIAL_8250_CONSOLE +static void __init console_config(void) { - /* - * Return a pointer to the given environment variable. - * In 64-bit mode: we're using 64-bit pointers, but all pointers - * in the PROM structures are only 32-bit, so we need some - * workarounds, if we are running in 64-bit mode. - */ - int i, index = 0; - - i = strlen(envname); - - while (prom_envp(index)) { - if (strncmp(envname, prom_envp(index), i) == 0) - return prom_envp(index+1); - index += 2; + char console_string[40]; + int baud = 0; + char parity = '\0', bits = '\0', flow = '\0'; + char *s; + + if ((strstr(fw_getcmdline(), "console=")) == NULL) { + s = fw_getenv("modetty0"); + if (s) { + while (*s >= '0' && *s <= '9') + baud = baud*10 + *s++ - '0'; + if (*s == ',') + s++; + if (*s) + parity = *s++; + if (*s == ',') + s++; + if (*s) + bits = *s++; + if (*s == ',') + s++; + if (*s == 'h') + flow = 'r'; + } + if (baud == 0) + baud = 38400; + if (parity != 'n' && parity != 'o' && parity != 'e') + parity = 'n'; + if (bits != '7' && bits != '8') + bits = '8'; + if (flow == '\0') + flow = 'r'; + sprintf(console_string, " console=ttyS0,%d%c%c%c", baud, + parity, bits, flow); + strcat(fw_getcmdline(), console_string); } - - return NULL; } +#endif static void __init mips_nmi_setup(void) { @@ -52,7 +65,41 @@ static void __init mips_nmi_setup(void) base = cpu_has_veic ? (void *)(CAC_BASE + 0xa80) : (void *)(CAC_BASE + 0x380); +#ifdef CONFIG_CPU_MICROMIPS + /* + * Decrement the exception vector address by one for microMIPS. + */ + memcpy(base, (&except_vec_nmi - 1), 0x80); + + /* + * This is a hack. We do not know if the boot loader was built with + * microMIPS instructions or not. If it was not, the NMI exception + * code at 0x80000a80 will be taken in MIPS32 mode. The hand coded + * assembly below forces us into microMIPS mode if we are a pure + * microMIPS kernel. The assembly instructions are: + * + * 3C1A8000 lui k0,0x8000 + * 375A0381 ori k0,k0,0x381 + * 03400008 jr k0 + * 00000000 nop + * + * The mode switch occurs by jumping to the unaligned exception + * vector address at 0x80000381 which would have been 0x80000380 + * in MIPS32 mode. The jump to the unaligned address transitions + * us into microMIPS mode. + */ + if (!cpu_has_veic) { + void *base2 = (void *)(CAC_BASE + 0xa80); + *((unsigned int *)base2) = 0x3c1a8000; + *((unsigned int *)base2 + 1) = 0x375a0381; + *((unsigned int *)base2 + 2) = 0x03400008; + *((unsigned int *)base2 + 3) = 0x00000000; + flush_icache_range((unsigned long)base2, + (unsigned long)base2 + 0x10); + } +#else memcpy(base, &except_vec_nmi, 0x80); +#endif flush_icache_range((unsigned long)base, (unsigned long)base + 0x80); } @@ -63,29 +110,40 @@ static void __init mips_ejtag_setup(void) base = cpu_has_veic ? (void *)(CAC_BASE + 0xa00) : (void *)(CAC_BASE + 0x300); +#ifdef CONFIG_CPU_MICROMIPS + /* Deja vu... */ + memcpy(base, (&except_vec_ejtag_debug - 1), 0x80); + if (!cpu_has_veic) { + void *base2 = (void *)(CAC_BASE + 0xa00); + *((unsigned int *)base2) = 0x3c1a8000; + *((unsigned int *)base2 + 1) = 0x375a0301; + *((unsigned int *)base2 + 2) = 0x03400008; + *((unsigned int *)base2 + 3) = 0x00000000; + flush_icache_range((unsigned long)base2, + (unsigned long)base2 + 0x10); + } +#else memcpy(base, &except_vec_ejtag_debug, 0x80); +#endif flush_icache_range((unsigned long)base, (unsigned long)base + 0x80); } void __init prom_init(void) { - prom_argc = fw_arg0; - _prom_argv = (int *) fw_arg1; - _prom_envp = (int *) fw_arg2; - board_nmi_handler_setup = mips_nmi_setup; board_ejtag_handler_setup = mips_ejtag_setup; - prom_init_cmdline(); + fw_init_cmdline(); #ifdef CONFIG_EARLY_PRINTK - if ((strstr(prom_getcmdline(), "console=ttyS0")) != NULL) - prom_init_early_console(0); - else if ((strstr(prom_getcmdline(), "console=ttyS1")) != NULL) - prom_init_early_console(1); + if ((strstr(fw_getcmdline(), "console=ttyS0")) != NULL) + fw_init_early_console(0); + else if ((strstr(fw_getcmdline(), "console=ttyS1")) != NULL) + fw_init_early_console(1); #endif #ifdef CONFIG_SERIAL_8250_CONSOLE - if ((strstr(prom_getcmdline(), "console=")) == NULL) - strcat(prom_getcmdline(), " console=ttyS0,38400n8r"); + if ((strstr(fw_getcmdline(), "console=")) == NULL) + strcat(fw_getcmdline(), " console=ttyS0,38400n8r"); + console_config(); #endif } diff --git a/trunk/arch/mips/mti-sead3/sead3-int.c b/trunk/arch/mips/mti-sead3/sead3-int.c index e26e08274fc5..6a560ac03def 100644 --- a/trunk/arch/mips/mti-sead3/sead3-int.c +++ b/trunk/arch/mips/mti-sead3/sead3-int.c @@ -20,7 +20,6 @@ #define SEAD_CONFIG_BASE 0x1b100110 #define SEAD_CONFIG_SIZE 4 -int gic_present; static unsigned long sead3_config_reg; /* diff --git a/trunk/arch/mips/mti-sead3/sead3-setup.c b/trunk/arch/mips/mti-sead3/sead3-setup.c index f012fd164cee..b5059dc899f4 100644 --- a/trunk/arch/mips/mti-sead3/sead3-setup.c +++ b/trunk/arch/mips/mti-sead3/sead3-setup.c @@ -11,10 +11,6 @@ #include #include -#include - -int coherentio; /* 0 => no DMA cache coherency (may be set by user) */ -int hw_coherentio; /* 0 => no HW DMA cache coherency (reflects real HW) */ const char *get_system_type(void) { diff --git a/trunk/arch/mips/mti-sead3/sead3-time.c b/trunk/arch/mips/mti-sead3/sead3-time.c index 239e4e32757f..96b42eb9b5e2 100644 --- a/trunk/arch/mips/mti-sead3/sead3-time.c +++ b/trunk/arch/mips/mti-sead3/sead3-time.c @@ -11,7 +11,6 @@ #include #include #include -#include unsigned long cpu_khz; diff --git a/trunk/arch/mips/netlogic/Kconfig b/trunk/arch/mips/netlogic/Kconfig index 3c05bf9e280a..e0873a31ebaa 100644 --- a/trunk/arch/mips/netlogic/Kconfig +++ b/trunk/arch/mips/netlogic/Kconfig @@ -2,13 +2,22 @@ if NLM_XLP_BOARD || NLM_XLR_BOARD if NLM_XLP_BOARD config DT_XLP_EVP - bool "Built-in device tree for XLP EVP/SVP boards" + bool "Built-in device tree for XLP EVP boards" default y help - Add an FDT blob for XLP EVP and SVP boards into the kernel. + Add an FDT blob for XLP EVP boards into the kernel. This DTB will be used if the firmware does not pass in a DTB - pointer to the kernel. The corresponding DTS file is at - arch/mips/netlogic/dts/xlp_evp.dts + pointer to the kernel. The corresponding DTS file is at + arch/mips/netlogic/dts/xlp_evp.dts + +config DT_XLP_SVP + bool "Built-in device tree for XLP SVP boards" + default y + help + Add an FDT blob for XLP VP boards into the kernel. + This DTB will be used if the firmware does not pass in a DTB + pointer to the kernel. The corresponding DTS file is at + arch/mips/netlogic/dts/xlp_svp.dts config NLM_MULTINODE bool "Support for multi-chip boards" diff --git a/trunk/arch/mips/netlogic/common/smp.c b/trunk/arch/mips/netlogic/common/smp.c index 2bb95dcfe20a..ffba52489bef 100644 --- a/trunk/arch/mips/netlogic/common/smp.c +++ b/trunk/arch/mips/netlogic/common/smp.c @@ -148,8 +148,7 @@ void nlm_cpus_done(void) int nlm_cpu_ready[NR_CPUS]; unsigned long nlm_next_gp; unsigned long nlm_next_sp; - -cpumask_t phys_cpu_present_map; +static cpumask_t phys_cpu_present_mask; void nlm_boot_secondary(int logical_cpu, struct task_struct *idle) { @@ -169,11 +168,12 @@ void __init nlm_smp_setup(void) { unsigned int boot_cpu; int num_cpus, i, ncore; + char buf[64]; boot_cpu = hard_smp_processor_id(); - cpumask_clear(&phys_cpu_present_map); + cpumask_clear(&phys_cpu_present_mask); - cpumask_set_cpu(boot_cpu, &phys_cpu_present_map); + cpumask_set_cpu(boot_cpu, &phys_cpu_present_mask); __cpu_number_map[boot_cpu] = 0; __cpu_logical_map[0] = boot_cpu; set_cpu_possible(0, true); @@ -185,7 +185,7 @@ void __init nlm_smp_setup(void) * it is only set for ASPs (see smpboot.S) */ if (nlm_cpu_ready[i]) { - cpumask_set_cpu(i, &phys_cpu_present_map); + cpumask_set_cpu(i, &phys_cpu_present_mask); __cpu_number_map[i] = num_cpus; __cpu_logical_map[num_cpus] = i; set_cpu_possible(num_cpus, true); @@ -193,16 +193,19 @@ void __init nlm_smp_setup(void) } } + cpumask_scnprintf(buf, ARRAY_SIZE(buf), &phys_cpu_present_mask); + pr_info("Physical CPU mask: %s\n", buf); + cpumask_scnprintf(buf, ARRAY_SIZE(buf), cpu_possible_mask); + pr_info("Possible CPU mask: %s\n", buf); + /* check with the cores we have worken up */ for (ncore = 0, i = 0; i < NLM_NR_NODES; i++) ncore += hweight32(nlm_get_node(i)->coremask); - pr_info("Phys CPU present map: %lx, possible map %lx\n", - (unsigned long)cpumask_bits(&phys_cpu_present_map)[0], - (unsigned long)cpumask_bits(cpu_possible_mask)[0]); - pr_info("Detected (%dc%dt) %d Slave CPU(s)\n", ncore, nlm_threads_per_core, num_cpus); + + /* switch NMI handler to boot CPUs */ nlm_set_nmi_handler(nlm_boot_secondary_cpus); } diff --git a/trunk/arch/mips/netlogic/dts/Makefile b/trunk/arch/mips/netlogic/dts/Makefile index d117d46413aa..aecb6fa9a9c3 100644 --- a/trunk/arch/mips/netlogic/dts/Makefile +++ b/trunk/arch/mips/netlogic/dts/Makefile @@ -1 +1,2 @@ obj-$(CONFIG_DT_XLP_EVP) := xlp_evp.dtb.o +obj-$(CONFIG_DT_XLP_SVP) += xlp_svp.dtb.o diff --git a/trunk/arch/mips/netlogic/dts/xlp_evp.dts b/trunk/arch/mips/netlogic/dts/xlp_evp.dts index 7628b5464fc7..e14f42308064 100644 --- a/trunk/arch/mips/netlogic/dts/xlp_evp.dts +++ b/trunk/arch/mips/netlogic/dts/xlp_evp.dts @@ -20,7 +20,7 @@ #address-cells = <2>; #size-cells = <1>; compatible = "simple-bus"; - ranges = <0 0 0 0x18000000 0x04000000 // PCIe CFG + ranges = <0 0 0 0x18000000 0x04000000 // PCIe CFG 1 0 0 0x16000000 0x01000000>; // GBU chipselects serial0: serial@30000 { diff --git a/trunk/arch/mips/netlogic/dts/xlp_svp.dts b/trunk/arch/mips/netlogic/dts/xlp_svp.dts new file mode 100644 index 000000000000..8af4bdbe5d99 --- /dev/null +++ b/trunk/arch/mips/netlogic/dts/xlp_svp.dts @@ -0,0 +1,124 @@ +/* + * XLP3XX Device Tree Source for SVP boards + */ + +/dts-v1/; +/ { + model = "netlogic,XLP-SVP"; + compatible = "netlogic,xlp"; + #address-cells = <2>; + #size-cells = <2>; + + memory { + device_type = "memory"; + reg = <0 0x00100000 0 0x0FF00000 // 255M at 1M + 0 0x20000000 0 0xa0000000 // 2560M at 512M + 0 0xe0000000 0 0x40000000>; + }; + + soc { + #address-cells = <2>; + #size-cells = <1>; + compatible = "simple-bus"; + ranges = <0 0 0 0x18000000 0x04000000 // PCIe CFG + 1 0 0 0x16000000 0x01000000>; // GBU chipselects + + serial0: serial@30000 { + device_type = "serial"; + compatible = "ns16550"; + reg = <0 0x30100 0xa00>; + reg-shift = <2>; + reg-io-width = <4>; + clock-frequency = <133333333>; + interrupt-parent = <&pic>; + interrupts = <17>; + }; + serial1: serial@31000 { + device_type = "serial"; + compatible = "ns16550"; + reg = <0 0x31100 0xa00>; + reg-shift = <2>; + reg-io-width = <4>; + clock-frequency = <133333333>; + interrupt-parent = <&pic>; + interrupts = <18>; + }; + i2c0: ocores@32000 { + compatible = "opencores,i2c-ocores"; + #address-cells = <1>; + #size-cells = <0>; + reg = <0 0x32100 0xa00>; + reg-shift = <2>; + reg-io-width = <4>; + clock-frequency = <32000000>; + interrupt-parent = <&pic>; + interrupts = <30>; + }; + i2c1: ocores@33000 { + compatible = "opencores,i2c-ocores"; + #address-cells = <1>; + #size-cells = <0>; + reg = <0 0x33100 0xa00>; + reg-shift = <2>; + reg-io-width = <4>; + clock-frequency = <32000000>; + interrupt-parent = <&pic>; + interrupts = <31>; + + rtc@68 { + compatible = "dallas,ds1374"; + reg = <0x68>; + }; + + dtt@4c { + compatible = "national,lm90"; + reg = <0x4c>; + }; + }; + pic: pic@4000 { + interrupt-controller; + #address-cells = <0>; + #interrupt-cells = <1>; + reg = <0 0x4000 0x200>; + }; + + nor_flash@1,0 { + compatible = "cfi-flash"; + #address-cells = <1>; + #size-cells = <1>; + bank-width = <2>; + reg = <1 0 0x1000000>; + + partition@0 { + label = "x-loader"; + reg = <0x0 0x100000>; /* 1M */ + read-only; + }; + + partition@100000 { + label = "u-boot"; + reg = <0x100000 0x100000>; /* 1M */ + }; + + partition@200000 { + label = "kernel"; + reg = <0x200000 0x500000>; /* 5M */ + }; + + partition@700000 { + label = "rootfs"; + reg = <0x700000 0x800000>; /* 8M */ + }; + + partition@f00000 { + label = "env"; + reg = <0xf00000 0x100000>; /* 1M */ + read-only; + }; + }; + }; + + chosen { + bootargs = "console=ttyS0,115200 rdinit=/sbin/init"; + }; +}; diff --git a/trunk/arch/mips/netlogic/xlp/nlm_hal.c b/trunk/arch/mips/netlogic/xlp/nlm_hal.c index c68fd4026104..87560e4db35f 100644 --- a/trunk/arch/mips/netlogic/xlp/nlm_hal.c +++ b/trunk/arch/mips/netlogic/xlp/nlm_hal.c @@ -61,43 +61,61 @@ void nlm_node_init(int node) int nlm_irq_to_irt(int irq) { - if (!PIC_IRQ_IS_IRT(irq)) - return -1; + uint64_t pcibase; + int devoff, irt; switch (irq) { case PIC_UART_0_IRQ: - return PIC_IRT_UART_0_INDEX; + devoff = XLP_IO_UART0_OFFSET(0); + break; case PIC_UART_1_IRQ: - return PIC_IRT_UART_1_INDEX; - case PIC_PCIE_LINK_0_IRQ: - return PIC_IRT_PCIE_LINK_0_INDEX; - case PIC_PCIE_LINK_1_IRQ: - return PIC_IRT_PCIE_LINK_1_INDEX; - case PIC_PCIE_LINK_2_IRQ: - return PIC_IRT_PCIE_LINK_2_INDEX; - case PIC_PCIE_LINK_3_IRQ: - return PIC_IRT_PCIE_LINK_3_INDEX; + devoff = XLP_IO_UART1_OFFSET(0); + break; case PIC_EHCI_0_IRQ: - return PIC_IRT_EHCI_0_INDEX; + devoff = XLP_IO_USB_EHCI0_OFFSET(0); + break; case PIC_EHCI_1_IRQ: - return PIC_IRT_EHCI_1_INDEX; + devoff = XLP_IO_USB_EHCI1_OFFSET(0); + break; case PIC_OHCI_0_IRQ: - return PIC_IRT_OHCI_0_INDEX; + devoff = XLP_IO_USB_OHCI0_OFFSET(0); + break; case PIC_OHCI_1_IRQ: - return PIC_IRT_OHCI_1_INDEX; + devoff = XLP_IO_USB_OHCI1_OFFSET(0); + break; case PIC_OHCI_2_IRQ: - return PIC_IRT_OHCI_2_INDEX; + devoff = XLP_IO_USB_OHCI2_OFFSET(0); + break; case PIC_OHCI_3_IRQ: - return PIC_IRT_OHCI_3_INDEX; + devoff = XLP_IO_USB_OHCI3_OFFSET(0); + break; case PIC_MMC_IRQ: - return PIC_IRT_MMC_INDEX; + devoff = XLP_IO_SD_OFFSET(0); + break; case PIC_I2C_0_IRQ: - return PIC_IRT_I2C_0_INDEX; + devoff = XLP_IO_I2C0_OFFSET(0); + break; case PIC_I2C_1_IRQ: - return PIC_IRT_I2C_1_INDEX; + devoff = XLP_IO_I2C1_OFFSET(0); + break; default: - return -1; + devoff = 0; + break; } + + if (devoff != 0) { + pcibase = nlm_pcicfg_base(devoff); + irt = nlm_read_reg(pcibase, XLP_PCI_IRTINFO_REG) & 0xffff; + /* HW bug, I2C 1 irt entry is off by one */ + if (irq == PIC_I2C_1_IRQ) + irt = irt + 1; + } else if (irq >= PIC_PCIE_LINK_0_IRQ && irq <= PIC_PCIE_LINK_3_IRQ) { + /* HW bug, PCI IRT entries are bad on early silicon, fix */ + irt = PIC_IRT_PCIE_LINK_INDEX(irq - PIC_PCIE_LINK_0_IRQ); + } else { + irt = -1; + } + return irt; } unsigned int nlm_get_core_frequency(int node, int core) diff --git a/trunk/arch/mips/netlogic/xlp/setup.c b/trunk/arch/mips/netlogic/xlp/setup.c index 4894d62043ac..eaa99d28cb8e 100644 --- a/trunk/arch/mips/netlogic/xlp/setup.c +++ b/trunk/arch/mips/netlogic/xlp/setup.c @@ -37,6 +37,7 @@ #include #include +#include #include #include #include @@ -56,7 +57,7 @@ uint64_t nlm_io_base; struct nlm_soc_info nlm_nodes[NLM_NR_NODES]; cpumask_t nlm_cpumask = CPU_MASK_CPU0; unsigned int nlm_threads_per_core; -extern u32 __dtb_start[]; +extern u32 __dtb_xlp_evp_begin[], __dtb_xlp_svp_begin[], __dtb_start[]; static void nlm_linux_exit(void) { @@ -82,8 +83,24 @@ void __init plat_mem_setup(void) * 64-bit, so convert pointer. */ fdtp = (void *)(long)fw_arg0; - if (!fdtp) - fdtp = __dtb_start; + if (!fdtp) { + switch (current_cpu_data.processor_id & 0xff00) { +#ifdef CONFIG_DT_XLP_SVP + case PRID_IMP_NETLOGIC_XLP3XX: + fdtp = __dtb_xlp_svp_begin; + break; +#endif +#ifdef CONFIG_DT_XLP_EVP + case PRID_IMP_NETLOGIC_XLP8XX: + fdtp = __dtb_xlp_evp_begin; + break; +#endif + default: + /* Pick a built-in if any, and hope for the best */ + fdtp = __dtb_start; + break; + } + } fdtp = phys_to_virt(__pa(fdtp)); early_init_devtree(fdtp); } diff --git a/trunk/arch/mips/netlogic/xlp/usb-init.c b/trunk/arch/mips/netlogic/xlp/usb-init.c index 1d0b66c62fd1..9c401dd78337 100644 --- a/trunk/arch/mips/netlogic/xlp/usb-init.c +++ b/trunk/arch/mips/netlogic/xlp/usb-init.c @@ -42,7 +42,30 @@ #include #include #include -#include + +/* + * USB glue logic registers, used only during initialization + */ +#define USB_CTL_0 0x01 +#define USB_PHY_0 0x0A +#define USB_PHY_RESET 0x01 +#define USB_PHY_PORT_RESET_0 0x10 +#define USB_PHY_PORT_RESET_1 0x20 +#define USB_CONTROLLER_RESET 0x01 +#define USB_INT_STATUS 0x0E +#define USB_INT_EN 0x0F +#define USB_PHY_INTERRUPT_EN 0x01 +#define USB_OHCI_INTERRUPT_EN 0x02 +#define USB_OHCI_INTERRUPT1_EN 0x04 +#define USB_OHCI_INTERRUPT2_EN 0x08 +#define USB_CTRL_INTERRUPT_EN 0x10 + +#define nlm_read_usb_reg(b, r) nlm_read_reg(b, r) +#define nlm_write_usb_reg(b, r, v) nlm_write_reg(b, r, v) +#define nlm_get_usb_pcibase(node, inst) \ + nlm_pcicfg_base(XLP_IO_USB_OFFSET(node, inst)) +#define nlm_get_usb_regbase(node, inst) \ + (nlm_get_usb_pcibase(node, inst) + XLP_IO_PCI_HDRSZ) static void nlm_usb_intr_en(int node, int port) { @@ -99,23 +122,23 @@ static void nlm_usb_fixup_final(struct pci_dev *dev) dev->dev.coherent_dma_mask = DMA_BIT_MASK(64); switch (dev->devfn) { case 0x10: - dev->irq = PIC_EHCI_0_IRQ; - break; + dev->irq = PIC_EHCI_0_IRQ; + break; case 0x11: - dev->irq = PIC_OHCI_0_IRQ; - break; + dev->irq = PIC_OHCI_0_IRQ; + break; case 0x12: - dev->irq = PIC_OHCI_1_IRQ; - break; + dev->irq = PIC_OHCI_1_IRQ; + break; case 0x13: - dev->irq = PIC_EHCI_1_IRQ; - break; + dev->irq = PIC_EHCI_1_IRQ; + break; case 0x14: - dev->irq = PIC_OHCI_2_IRQ; - break; + dev->irq = PIC_OHCI_2_IRQ; + break; case 0x15: - dev->irq = PIC_OHCI_3_IRQ; - break; + dev->irq = PIC_OHCI_3_IRQ; + break; } } DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_NETLOGIC, PCI_DEVICE_ID_NLM_EHCI, diff --git a/trunk/arch/mips/netlogic/xlr/setup.c b/trunk/arch/mips/netlogic/xlr/setup.c index e3e094100e3e..89c8c1066632 100644 --- a/trunk/arch/mips/netlogic/xlr/setup.c +++ b/trunk/arch/mips/netlogic/xlr/setup.c @@ -36,6 +36,7 @@ #include #include +#include #include #include #include diff --git a/trunk/arch/mips/oprofile/op_model_mipsxx.c b/trunk/arch/mips/oprofile/op_model_mipsxx.c index 1fd361462c03..e4b1140cdae0 100644 --- a/trunk/arch/mips/oprofile/op_model_mipsxx.c +++ b/trunk/arch/mips/oprofile/op_model_mipsxx.c @@ -41,7 +41,7 @@ static int (*save_perf_irq)(void); * first hardware thread in the core for setup and init. * Skip CPUs with non-zero hardware thread id (4 hwt per core) */ -#ifdef CONFIG_CPU_XLR +#if defined(CONFIG_CPU_XLR) && defined(CONFIG_SMP) #define oprofile_skip_cpu(c) ((cpu_logical_map(c) & 0x3) != 0) #else #define oprofile_skip_cpu(c) 0 diff --git a/trunk/arch/mips/pci/pci-ar71xx.c b/trunk/arch/mips/pci/pci-ar71xx.c index 412ec025cf55..18517dd0f709 100644 --- a/trunk/arch/mips/pci/pci-ar71xx.c +++ b/trunk/arch/mips/pci/pci-ar71xx.c @@ -366,9 +366,9 @@ static int ar71xx_pci_probe(struct platform_device *pdev) if (!res) return -EINVAL; - apc->cfg_base = devm_request_and_ioremap(&pdev->dev, res); - if (!apc->cfg_base) - return -ENOMEM; + apc->cfg_base = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(apc->cfg_base)) + return PTR_ERR(apc->cfg_base); apc->irq = platform_get_irq(pdev, 0); if (apc->irq < 0) diff --git a/trunk/arch/mips/pci/pci-ar724x.c b/trunk/arch/mips/pci/pci-ar724x.c index 8a0700d448fe..65ec032fa0b4 100644 --- a/trunk/arch/mips/pci/pci-ar724x.c +++ b/trunk/arch/mips/pci/pci-ar724x.c @@ -365,25 +365,25 @@ static int ar724x_pci_probe(struct platform_device *pdev) if (!res) return -EINVAL; - apc->ctrl_base = devm_request_and_ioremap(&pdev->dev, res); - if (apc->ctrl_base == NULL) - return -EBUSY; + apc->ctrl_base = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(apc->ctrl_base)) + return PTR_ERR(apc->ctrl_base); res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "cfg_base"); if (!res) return -EINVAL; - apc->devcfg_base = devm_request_and_ioremap(&pdev->dev, res); - if (!apc->devcfg_base) - return -EBUSY; + apc->devcfg_base = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(apc->devcfg_base)) + return PTR_ERR(apc->devcfg_base); res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "crp_base"); if (!res) return -EINVAL; - apc->crp_base = devm_request_and_ioremap(&pdev->dev, res); - if (apc->crp_base == NULL) - return -EBUSY; + apc->crp_base = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(apc->crp_base)) + return PTR_ERR(apc->crp_base); apc->irq = platform_get_irq(pdev, 0); if (apc->irq < 0) diff --git a/trunk/arch/mips/pci/pci-bcm63xx.c b/trunk/arch/mips/pci/pci-bcm63xx.c index 88e781c6b5ba..2eb954239bc5 100644 --- a/trunk/arch/mips/pci/pci-bcm63xx.c +++ b/trunk/arch/mips/pci/pci-bcm63xx.c @@ -121,11 +121,17 @@ void __iomem *pci_iospace_start; static void __init bcm63xx_reset_pcie(void) { u32 val; + u32 reg; /* enable SERDES */ - val = bcm_misc_readl(MISC_SERDES_CTRL_REG); + if (BCMCPU_IS_6328()) + reg = MISC_SERDES_CTRL_6328_REG; + else + reg = MISC_SERDES_CTRL_6362_REG; + + val = bcm_misc_readl(reg); val |= SERDES_PCIE_EN | SERDES_PCIE_EXD_EN; - bcm_misc_writel(val, MISC_SERDES_CTRL_REG); + bcm_misc_writel(val, reg); /* reset the PCIe core */ bcm63xx_core_set_reset(BCM63XX_RESET_PCIE, 1); @@ -330,6 +336,7 @@ static int __init bcm63xx_pci_init(void) switch (bcm63xx_get_cpu_id()) { case BCM6328_CPU_ID: + case BCM6362_CPU_ID: return bcm63xx_register_pcie(); case BCM6348_CPU_ID: case BCM6358_CPU_ID: diff --git a/trunk/arch/mips/pmcs-msp71xx/msp_prom.c b/trunk/arch/mips/pmcs-msp71xx/msp_prom.c index 0edb89a63516..1c9897531660 100644 --- a/trunk/arch/mips/pmcs-msp71xx/msp_prom.c +++ b/trunk/arch/mips/pmcs-msp71xx/msp_prom.c @@ -83,7 +83,7 @@ static inline unsigned char str2hexnum(unsigned char c) return 0; /* foo */ } -static inline int str2eaddr(unsigned char *ea, unsigned char *str) +int str2eaddr(unsigned char *ea, unsigned char *str) { int index = 0; unsigned char num = 0; diff --git a/trunk/arch/mips/pmcs-msp71xx/msp_setup.c b/trunk/arch/mips/pmcs-msp71xx/msp_setup.c index 1651cfdbfe7b..396b2967ad85 100644 --- a/trunk/arch/mips/pmcs-msp71xx/msp_setup.c +++ b/trunk/arch/mips/pmcs-msp71xx/msp_setup.c @@ -12,6 +12,7 @@ #include #include +#include #include #include #include diff --git a/trunk/arch/mips/powertv/init.c b/trunk/arch/mips/powertv/init.c index 5bd9d8f468cc..a01baff52cae 100644 --- a/trunk/arch/mips/powertv/init.c +++ b/trunk/arch/mips/powertv/init.c @@ -29,10 +29,11 @@ #include #include -#include #include #include +#include "init.h" + static int *_prom_envp; unsigned long _prom_memsize; diff --git a/trunk/arch/mips/powertv/init.h b/trunk/arch/mips/powertv/init.h index b194c34ca966..c1a8bd0dbe4b 100644 --- a/trunk/arch/mips/powertv/init.h +++ b/trunk/arch/mips/powertv/init.h @@ -23,4 +23,6 @@ #ifndef _POWERTV_INIT_H #define _POWERTV_INIT_H extern unsigned long _prom_memsize; +extern void prom_meminit(void); +extern char *prom_getenv(char *name); #endif diff --git a/trunk/arch/mips/powertv/memory.c b/trunk/arch/mips/powertv/memory.c index 6e5f1bdc59b5..bc2f3ca22b41 100644 --- a/trunk/arch/mips/powertv/memory.c +++ b/trunk/arch/mips/powertv/memory.c @@ -29,7 +29,6 @@ #include #include -#include #include #include diff --git a/trunk/arch/mips/powertv/powertv_setup.c b/trunk/arch/mips/powertv/powertv_setup.c index 820b8480f222..24689bff1039 100644 --- a/trunk/arch/mips/powertv/powertv_setup.c +++ b/trunk/arch/mips/powertv/powertv_setup.c @@ -31,7 +31,6 @@ #include #include #include -#include #include #include #include diff --git a/trunk/arch/mips/ralink/Kconfig b/trunk/arch/mips/ralink/Kconfig index a0b0197cab0a..026e823d871d 100644 --- a/trunk/arch/mips/ralink/Kconfig +++ b/trunk/arch/mips/ralink/Kconfig @@ -6,12 +6,23 @@ choice help Select Ralink MIPS SoC type. + config SOC_RT288X + bool "RT288x" + config SOC_RT305X bool "RT305x" select USB_ARCH_HAS_HCD select USB_ARCH_HAS_OHCI select USB_ARCH_HAS_EHCI + config SOC_RT3883 + bool "RT3883" + select USB_ARCH_HAS_OHCI + select USB_ARCH_HAS_EHCI + + config SOC_MT7620 + bool "MT7620" + endchoice choice @@ -23,10 +34,22 @@ choice config DTB_RT_NONE bool "None" + config DTB_RT2880_EVAL + bool "RT2880 eval kit" + depends on SOC_RT288X + config DTB_RT305X_EVAL bool "RT305x eval kit" depends on SOC_RT305X + config DTB_RT3883_EVAL + bool "RT3883 eval kit" + depends on SOC_RT3883 + + config DTB_MT7620A_EVAL + bool "MT7620A eval kit" + depends on SOC_MT7620 + endchoice endif diff --git a/trunk/arch/mips/ralink/Makefile b/trunk/arch/mips/ralink/Makefile index 939757f0e71f..38cf1a880aaa 100644 --- a/trunk/arch/mips/ralink/Makefile +++ b/trunk/arch/mips/ralink/Makefile @@ -8,7 +8,10 @@ obj-y := prom.o of.o reset.o clk.o irq.o +obj-$(CONFIG_SOC_RT288X) += rt288x.o obj-$(CONFIG_SOC_RT305X) += rt305x.o +obj-$(CONFIG_SOC_RT3883) += rt3883.o +obj-$(CONFIG_SOC_MT7620) += mt7620.o obj-$(CONFIG_EARLY_PRINTK) += early_printk.o diff --git a/trunk/arch/mips/ralink/Platform b/trunk/arch/mips/ralink/Platform index 6babd65765e6..cda4b6645c50 100644 --- a/trunk/arch/mips/ralink/Platform +++ b/trunk/arch/mips/ralink/Platform @@ -4,7 +4,25 @@ core-$(CONFIG_RALINK) += arch/mips/ralink/ cflags-$(CONFIG_RALINK) += -I$(srctree)/arch/mips/include/asm/mach-ralink +# +# Ralink RT288x +# +load-$(CONFIG_SOC_RT288X) += 0xffffffff88000000 +cflags-$(CONFIG_SOC_RT288X) += -I$(srctree)/arch/mips/include/asm/mach-ralink/rt288x + # # Ralink RT305x # load-$(CONFIG_SOC_RT305X) += 0xffffffff80000000 +cflags-$(CONFIG_SOC_RT305X) += -I$(srctree)/arch/mips/include/asm/mach-ralink/rt305x + +# +# Ralink RT3883 +# +load-$(CONFIG_SOC_RT3883) += 0xffffffff80000000 +cflags-$(CONFIG_SOC_RT3883) += -I$(srctree)/arch/mips/include/asm/mach-ralink/rt3883 + +# +# Ralink MT7620 +# +load-$(CONFIG_SOC_MT7620) += 0xffffffff80000000 diff --git a/trunk/arch/mips/ralink/common.h b/trunk/arch/mips/ralink/common.h index 300990313e1b..83144c3fc5ac 100644 --- a/trunk/arch/mips/ralink/common.h +++ b/trunk/arch/mips/ralink/common.h @@ -22,13 +22,22 @@ struct ralink_pinmux { struct ralink_pinmux_grp *mode; struct ralink_pinmux_grp *uart; int uart_shift; + u32 uart_mask; void (*wdt_reset)(void); + struct ralink_pinmux_grp *pci; + int pci_shift; + u32 pci_mask; }; -extern struct ralink_pinmux gpio_pinmux; +extern struct ralink_pinmux rt_gpio_pinmux; struct ralink_soc_info { unsigned char sys_type[RAMIPS_SYS_TYPE_LEN]; unsigned char *compatible; + + unsigned long mem_base; + unsigned long mem_size; + unsigned long mem_size_min; + unsigned long mem_size_max; }; extern struct ralink_soc_info soc_info; diff --git a/trunk/arch/mips/ralink/dts/Makefile b/trunk/arch/mips/ralink/dts/Makefile index 1a69fb300955..18194fa93e80 100644 --- a/trunk/arch/mips/ralink/dts/Makefile +++ b/trunk/arch/mips/ralink/dts/Makefile @@ -1 +1,4 @@ +obj-$(CONFIG_DTB_RT2880_EVAL) := rt2880_eval.dtb.o obj-$(CONFIG_DTB_RT305X_EVAL) := rt3052_eval.dtb.o +obj-$(CONFIG_DTB_RT3883_EVAL) := rt3883_eval.dtb.o +obj-$(CONFIG_DTB_MT7620A_EVAL) := mt7620a_eval.dtb.o diff --git a/trunk/arch/mips/ralink/dts/mt7620a.dtsi b/trunk/arch/mips/ralink/dts/mt7620a.dtsi new file mode 100644 index 000000000000..08bf24fefe9f --- /dev/null +++ b/trunk/arch/mips/ralink/dts/mt7620a.dtsi @@ -0,0 +1,58 @@ +/ { + #address-cells = <1>; + #size-cells = <1>; + compatible = "ralink,mtk7620a-soc"; + + cpus { + cpu@0 { + compatible = "mips,mips24KEc"; + }; + }; + + cpuintc: cpuintc@0 { + #address-cells = <0>; + #interrupt-cells = <1>; + interrupt-controller; + compatible = "mti,cpu-interrupt-controller"; + }; + + palmbus@10000000 { + compatible = "palmbus"; + reg = <0x10000000 0x200000>; + ranges = <0x0 0x10000000 0x1FFFFF>; + + #address-cells = <1>; + #size-cells = <1>; + + sysc@0 { + compatible = "ralink,mt7620a-sysc"; + reg = <0x0 0x100>; + }; + + intc: intc@200 { + compatible = "ralink,mt7620a-intc", "ralink,rt2880-intc"; + reg = <0x200 0x100>; + + interrupt-controller; + #interrupt-cells = <1>; + + interrupt-parent = <&cpuintc>; + interrupts = <2>; + }; + + memc@300 { + compatible = "ralink,mt7620a-memc", "ralink,rt3050-memc"; + reg = <0x300 0x100>; + }; + + uartlite@c00 { + compatible = "ralink,mt7620a-uart", "ralink,rt2880-uart", "ns16550a"; + reg = <0xc00 0x100>; + + interrupt-parent = <&intc>; + interrupts = <12>; + + reg-shift = <2>; + }; + }; +}; diff --git a/trunk/arch/mips/ralink/dts/mt7620a_eval.dts b/trunk/arch/mips/ralink/dts/mt7620a_eval.dts new file mode 100644 index 000000000000..35eb874ab7f1 --- /dev/null +++ b/trunk/arch/mips/ralink/dts/mt7620a_eval.dts @@ -0,0 +1,16 @@ +/dts-v1/; + +/include/ "mt7620a.dtsi" + +/ { + compatible = "ralink,mt7620a-eval-board", "ralink,mt7620a-soc"; + model = "Ralink MT7620A evaluation board"; + + memory@0 { + reg = <0x0 0x2000000>; + }; + + chosen { + bootargs = "console=ttyS0,57600"; + }; +}; diff --git a/trunk/arch/mips/ralink/dts/rt2880.dtsi b/trunk/arch/mips/ralink/dts/rt2880.dtsi new file mode 100644 index 000000000000..182afde2f2e1 --- /dev/null +++ b/trunk/arch/mips/ralink/dts/rt2880.dtsi @@ -0,0 +1,58 @@ +/ { + #address-cells = <1>; + #size-cells = <1>; + compatible = "ralink,rt2880-soc"; + + cpus { + cpu@0 { + compatible = "mips,mips4KEc"; + }; + }; + + cpuintc: cpuintc@0 { + #address-cells = <0>; + #interrupt-cells = <1>; + interrupt-controller; + compatible = "mti,cpu-interrupt-controller"; + }; + + palmbus@300000 { + compatible = "palmbus"; + reg = <0x300000 0x200000>; + ranges = <0x0 0x300000 0x1FFFFF>; + + #address-cells = <1>; + #size-cells = <1>; + + sysc@0 { + compatible = "ralink,rt2880-sysc"; + reg = <0x0 0x100>; + }; + + intc: intc@200 { + compatible = "ralink,rt2880-intc"; + reg = <0x200 0x100>; + + interrupt-controller; + #interrupt-cells = <1>; + + interrupt-parent = <&cpuintc>; + interrupts = <2>; + }; + + memc@300 { + compatible = "ralink,rt2880-memc"; + reg = <0x300 0x100>; + }; + + uartlite@c00 { + compatible = "ralink,rt2880-uart", "ns16550a"; + reg = <0xc00 0x100>; + + interrupt-parent = <&intc>; + interrupts = <8>; + + reg-shift = <2>; + }; + }; +}; diff --git a/trunk/arch/mips/ralink/dts/rt2880_eval.dts b/trunk/arch/mips/ralink/dts/rt2880_eval.dts new file mode 100644 index 000000000000..322d7002595b --- /dev/null +++ b/trunk/arch/mips/ralink/dts/rt2880_eval.dts @@ -0,0 +1,46 @@ +/dts-v1/; + +/include/ "rt2880.dtsi" + +/ { + compatible = "ralink,rt2880-eval-board", "ralink,rt2880-soc"; + model = "Ralink RT2880 evaluation board"; + + memory@0 { + reg = <0x8000000 0x2000000>; + }; + + chosen { + bootargs = "console=ttyS0,57600"; + }; + + cfi@1f000000 { + compatible = "cfi-flash"; + reg = <0x1f000000 0x400000>; + + bank-width = <2>; + device-width = <2>; + #address-cells = <1>; + #size-cells = <1>; + + partition@0 { + label = "uboot"; + reg = <0x0 0x30000>; + read-only; + }; + partition@30000 { + label = "uboot-env"; + reg = <0x30000 0x10000>; + read-only; + }; + partition@40000 { + label = "calibration"; + reg = <0x40000 0x10000>; + read-only; + }; + partition@50000 { + label = "linux"; + reg = <0x50000 0x3b0000>; + }; + }; +}; diff --git a/trunk/arch/mips/ralink/dts/rt3050.dtsi b/trunk/arch/mips/ralink/dts/rt3050.dtsi index 069d0660e1dd..e3203d414fee 100644 --- a/trunk/arch/mips/ralink/dts/rt3050.dtsi +++ b/trunk/arch/mips/ralink/dts/rt3050.dtsi @@ -1,7 +1,7 @@ / { #address-cells = <1>; #size-cells = <1>; - compatible = "ralink,rt3050-soc", "ralink,rt3052-soc"; + compatible = "ralink,rt3050-soc", "ralink,rt3052-soc", "ralink,rt3350-soc"; cpus { cpu@0 { @@ -9,10 +9,6 @@ }; }; - chosen { - bootargs = "console=ttyS0,57600 init=/init"; - }; - cpuintc: cpuintc@0 { #address-cells = <0>; #interrupt-cells = <1>; @@ -23,7 +19,7 @@ palmbus@10000000 { compatible = "palmbus"; reg = <0x10000000 0x200000>; - ranges = <0x0 0x10000000 0x1FFFFF>; + ranges = <0x0 0x10000000 0x1FFFFF>; #address-cells = <1>; #size-cells = <1>; @@ -33,11 +29,6 @@ reg = <0x0 0x100>; }; - timer@100 { - compatible = "ralink,rt3052-wdt", "ralink,rt2880-wdt"; - reg = <0x100 0x100>; - }; - intc: intc@200 { compatible = "ralink,rt3052-intc", "ralink,rt2880-intc"; reg = <0x200 0x100>; @@ -54,45 +45,6 @@ reg = <0x300 0x100>; }; - gpio0: gpio@600 { - compatible = "ralink,rt3052-gpio", "ralink,rt2880-gpio"; - reg = <0x600 0x34>; - - gpio-controller; - #gpio-cells = <2>; - - ralink,ngpio = <24>; - ralink,regs = [ 00 04 08 0c - 20 24 28 2c - 30 34 ]; - }; - - gpio1: gpio@638 { - compatible = "ralink,rt3052-gpio", "ralink,rt2880-gpio"; - reg = <0x638 0x24>; - - gpio-controller; - #gpio-cells = <2>; - - ralink,ngpio = <16>; - ralink,regs = [ 00 04 08 0c - 10 14 18 1c - 20 24 ]; - }; - - gpio2: gpio@660 { - compatible = "ralink,rt3052-gpio", "ralink,rt2880-gpio"; - reg = <0x660 0x24>; - - gpio-controller; - #gpio-cells = <2>; - - ralink,ngpio = <12>; - ralink,regs = [ 00 04 08 0c - 10 14 18 1c - 20 24 ]; - }; - uartlite@c00 { compatible = "ralink,rt3052-uart", "ralink,rt2880-uart", "ns16550a"; reg = <0xc00 0x100>; @@ -103,4 +55,14 @@ reg-shift = <2>; }; }; + + usb@101c0000 { + compatible = "ralink,rt3050-usb", "snps,dwc2"; + reg = <0x101c0000 40000>; + + interrupt-parent = <&intc>; + interrupts = <18>; + + status = "disabled"; + }; }; diff --git a/trunk/arch/mips/ralink/dts/rt3052_eval.dts b/trunk/arch/mips/ralink/dts/rt3052_eval.dts index 148a590bc419..0ac73ea28198 100644 --- a/trunk/arch/mips/ralink/dts/rt3052_eval.dts +++ b/trunk/arch/mips/ralink/dts/rt3052_eval.dts @@ -1,10 +1,8 @@ /dts-v1/; -/include/ "rt3050.dtsi" +#include "rt3050.dtsi" / { - #address-cells = <1>; - #size-cells = <1>; compatible = "ralink,rt3052-eval-board", "ralink,rt3052-soc"; model = "Ralink RT3052 evaluation board"; @@ -12,12 +10,8 @@ reg = <0x0 0x2000000>; }; - palmbus@10000000 { - sysc@0 { - ralink,pinmmux = "uartlite", "spi"; - ralink,uartmux = "gpio"; - ralink,wdtmux = <0>; - }; + chosen { + bootargs = "console=ttyS0,57600"; }; cfi@1f000000 { @@ -49,4 +43,8 @@ reg = <0x50000 0x7b0000>; }; }; + + usb@101c0000 { + status = "ok"; + }; }; diff --git a/trunk/arch/mips/ralink/dts/rt3883.dtsi b/trunk/arch/mips/ralink/dts/rt3883.dtsi new file mode 100644 index 000000000000..3b131dd0d5ac --- /dev/null +++ b/trunk/arch/mips/ralink/dts/rt3883.dtsi @@ -0,0 +1,58 @@ +/ { + #address-cells = <1>; + #size-cells = <1>; + compatible = "ralink,rt3883-soc"; + + cpus { + cpu@0 { + compatible = "mips,mips74Kc"; + }; + }; + + cpuintc: cpuintc@0 { + #address-cells = <0>; + #interrupt-cells = <1>; + interrupt-controller; + compatible = "mti,cpu-interrupt-controller"; + }; + + palmbus@10000000 { + compatible = "palmbus"; + reg = <0x10000000 0x200000>; + ranges = <0x0 0x10000000 0x1FFFFF>; + + #address-cells = <1>; + #size-cells = <1>; + + sysc@0 { + compatible = "ralink,rt3883-sysc", "ralink,rt3050-sysc"; + reg = <0x0 0x100>; + }; + + intc: intc@200 { + compatible = "ralink,rt3883-intc", "ralink,rt2880-intc"; + reg = <0x200 0x100>; + + interrupt-controller; + #interrupt-cells = <1>; + + interrupt-parent = <&cpuintc>; + interrupts = <2>; + }; + + memc@300 { + compatible = "ralink,rt3883-memc", "ralink,rt3050-memc"; + reg = <0x300 0x100>; + }; + + uartlite@c00 { + compatible = "ralink,rt3883-uart", "ralink,rt2880-uart", "ns16550a"; + reg = <0xc00 0x100>; + + interrupt-parent = <&intc>; + interrupts = <12>; + + reg-shift = <2>; + }; + }; +}; diff --git a/trunk/arch/mips/ralink/dts/rt3883_eval.dts b/trunk/arch/mips/ralink/dts/rt3883_eval.dts new file mode 100644 index 000000000000..2fa6b330bf4f --- /dev/null +++ b/trunk/arch/mips/ralink/dts/rt3883_eval.dts @@ -0,0 +1,16 @@ +/dts-v1/; + +/include/ "rt3883.dtsi" + +/ { + compatible = "ralink,rt3883-eval-board", "ralink,rt3883-soc"; + model = "Ralink RT3883 evaluation board"; + + memory@0 { + reg = <0x0 0x2000000>; + }; + + chosen { + bootargs = "console=ttyS0,57600"; + }; +}; diff --git a/trunk/arch/mips/ralink/early_printk.c b/trunk/arch/mips/ralink/early_printk.c index c4ae47eb24ab..b46d0419d09b 100644 --- a/trunk/arch/mips/ralink/early_printk.c +++ b/trunk/arch/mips/ralink/early_printk.c @@ -11,7 +11,11 @@ #include +#ifdef CONFIG_SOC_RT288X +#define EARLY_UART_BASE 0x300c00 +#else #define EARLY_UART_BASE 0x10000c00 +#endif #define UART_REG_RX 0x00 #define UART_REG_TX 0x04 diff --git a/trunk/arch/mips/ralink/irq.c b/trunk/arch/mips/ralink/irq.c index 6d054c5ec9ab..320b1f1043ff 100644 --- a/trunk/arch/mips/ralink/irq.c +++ b/trunk/arch/mips/ralink/irq.c @@ -31,6 +31,7 @@ #define INTC_INT_GLOBAL BIT(31) #define RALINK_CPU_IRQ_INTC (MIPS_CPU_IRQ_BASE + 2) +#define RALINK_CPU_IRQ_PCI (MIPS_CPU_IRQ_BASE + 4) #define RALINK_CPU_IRQ_FE (MIPS_CPU_IRQ_BASE + 5) #define RALINK_CPU_IRQ_WIFI (MIPS_CPU_IRQ_BASE + 6) #define RALINK_CPU_IRQ_COUNTER (MIPS_CPU_IRQ_BASE + 7) @@ -104,6 +105,9 @@ asmlinkage void plat_irq_dispatch(void) else if (pending & STATUSF_IP6) do_IRQ(RALINK_CPU_IRQ_WIFI); + else if (pending & STATUSF_IP4) + do_IRQ(RALINK_CPU_IRQ_PCI); + else if (pending & STATUSF_IP2) do_IRQ(RALINK_CPU_IRQ_INTC); @@ -162,6 +166,7 @@ static int __init intc_of_init(struct device_node *node, irq_set_chained_handler(irq, ralink_intc_irq_handler); irq_set_handler_data(irq, domain); + /* tell the kernel which irq is used for performance monitoring */ cp0_perfcount_irq = irq_create_mapping(domain, 9); return 0; diff --git a/trunk/arch/mips/ralink/mt7620.c b/trunk/arch/mips/ralink/mt7620.c new file mode 100644 index 000000000000..0018b1a661f6 --- /dev/null +++ b/trunk/arch/mips/ralink/mt7620.c @@ -0,0 +1,234 @@ +/* + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published + * by the Free Software Foundation. + * + * Parts of this file are based on Ralink's 2.6.21 BSP + * + * Copyright (C) 2008-2011 Gabor Juhos + * Copyright (C) 2008 Imre Kaloz + * Copyright (C) 2013 John Crispin + */ + +#include +#include +#include + +#include +#include +#include + +#include "common.h" + +/* does the board have sdram or ddram */ +static int dram_type; + +/* the pll dividers */ +static u32 mt7620_clk_divider[] = { 2, 3, 4, 8 }; + +static struct ralink_pinmux_grp mode_mux[] = { + { + .name = "i2c", + .mask = MT7620_GPIO_MODE_I2C, + .gpio_first = 1, + .gpio_last = 2, + }, { + .name = "spi", + .mask = MT7620_GPIO_MODE_SPI, + .gpio_first = 3, + .gpio_last = 6, + }, { + .name = "uartlite", + .mask = MT7620_GPIO_MODE_UART1, + .gpio_first = 15, + .gpio_last = 16, + }, { + .name = "wdt", + .mask = MT7620_GPIO_MODE_WDT, + .gpio_first = 17, + .gpio_last = 17, + }, { + .name = "mdio", + .mask = MT7620_GPIO_MODE_MDIO, + .gpio_first = 22, + .gpio_last = 23, + }, { + .name = "rgmii1", + .mask = MT7620_GPIO_MODE_RGMII1, + .gpio_first = 24, + .gpio_last = 35, + }, { + .name = "spi refclk", + .mask = MT7620_GPIO_MODE_SPI_REF_CLK, + .gpio_first = 37, + .gpio_last = 39, + }, { + .name = "jtag", + .mask = MT7620_GPIO_MODE_JTAG, + .gpio_first = 40, + .gpio_last = 44, + }, { + /* shared lines with jtag */ + .name = "ephy", + .mask = MT7620_GPIO_MODE_EPHY, + .gpio_first = 40, + .gpio_last = 44, + }, { + .name = "nand", + .mask = MT7620_GPIO_MODE_JTAG, + .gpio_first = 45, + .gpio_last = 59, + }, { + .name = "rgmii2", + .mask = MT7620_GPIO_MODE_RGMII2, + .gpio_first = 60, + .gpio_last = 71, + }, { + .name = "wled", + .mask = MT7620_GPIO_MODE_WLED, + .gpio_first = 72, + .gpio_last = 72, + }, {0} +}; + +static struct ralink_pinmux_grp uart_mux[] = { + { + .name = "uartf", + .mask = MT7620_GPIO_MODE_UARTF, + .gpio_first = 7, + .gpio_last = 14, + }, { + .name = "pcm uartf", + .mask = MT7620_GPIO_MODE_PCM_UARTF, + .gpio_first = 7, + .gpio_last = 14, + }, { + .name = "pcm i2s", + .mask = MT7620_GPIO_MODE_PCM_I2S, + .gpio_first = 7, + .gpio_last = 14, + }, { + .name = "i2s uartf", + .mask = MT7620_GPIO_MODE_I2S_UARTF, + .gpio_first = 7, + .gpio_last = 14, + }, { + .name = "pcm gpio", + .mask = MT7620_GPIO_MODE_PCM_GPIO, + .gpio_first = 11, + .gpio_last = 14, + }, { + .name = "gpio uartf", + .mask = MT7620_GPIO_MODE_GPIO_UARTF, + .gpio_first = 7, + .gpio_last = 10, + }, { + .name = "gpio i2s", + .mask = MT7620_GPIO_MODE_GPIO_I2S, + .gpio_first = 7, + .gpio_last = 10, + }, { + .name = "gpio", + .mask = MT7620_GPIO_MODE_GPIO, + }, {0} +}; + +struct ralink_pinmux rt_gpio_pinmux = { + .mode = mode_mux, + .uart = uart_mux, + .uart_shift = MT7620_GPIO_MODE_UART0_SHIFT, + .uart_mask = MT7620_GPIO_MODE_UART0_MASK, +}; + +void __init ralink_clk_init(void) +{ + unsigned long cpu_rate, sys_rate; + u32 c0 = rt_sysc_r32(SYSC_REG_CPLL_CONFIG0); + u32 c1 = rt_sysc_r32(SYSC_REG_CPLL_CONFIG1); + u32 swconfig = (c0 >> CPLL_SW_CONFIG_SHIFT) & CPLL_SW_CONFIG_MASK; + u32 cpu_clk = (c1 >> CPLL_CPU_CLK_SHIFT) & CPLL_CPU_CLK_MASK; + + if (cpu_clk) { + cpu_rate = 480000000; + } else if (!swconfig) { + cpu_rate = 600000000; + } else { + u32 m = (c0 >> CPLL_MULT_RATIO_SHIFT) & CPLL_MULT_RATIO; + u32 d = (c0 >> CPLL_DIV_RATIO_SHIFT) & CPLL_DIV_RATIO; + + cpu_rate = ((40 * (m + 24)) / mt7620_clk_divider[d]) * 1000000; + } + + if (dram_type == SYSCFG0_DRAM_TYPE_SDRAM) + sys_rate = cpu_rate / 4; + else + sys_rate = cpu_rate / 3; + + ralink_clk_add("cpu", cpu_rate); + ralink_clk_add("10000100.timer", 40000000); + ralink_clk_add("10000500.uart", 40000000); + ralink_clk_add("10000c00.uartlite", 40000000); +} + +void __init ralink_of_remap(void) +{ + rt_sysc_membase = plat_of_remap_node("ralink,mt7620a-sysc"); + rt_memc_membase = plat_of_remap_node("ralink,mt7620a-memc"); + + if (!rt_sysc_membase || !rt_memc_membase) + panic("Failed to remap core resources"); +} + +void prom_soc_init(struct ralink_soc_info *soc_info) +{ + void __iomem *sysc = (void __iomem *) KSEG1ADDR(MT7620_SYSC_BASE); + unsigned char *name = NULL; + u32 n0; + u32 n1; + u32 rev; + u32 cfg0; + + n0 = __raw_readl(sysc + SYSC_REG_CHIP_NAME0); + n1 = __raw_readl(sysc + SYSC_REG_CHIP_NAME1); + + if (n0 == MT7620N_CHIP_NAME0 && n1 == MT7620N_CHIP_NAME1) { + name = "MT7620N"; + soc_info->compatible = "ralink,mt7620n-soc"; + } else if (n0 == MT7620A_CHIP_NAME0 && n1 == MT7620A_CHIP_NAME1) { + name = "MT7620A"; + soc_info->compatible = "ralink,mt7620a-soc"; + } else { + panic("mt7620: unknown SoC, n0:%08x n1:%08x\n", n0, n1); + } + + rev = __raw_readl(sysc + SYSC_REG_CHIP_REV); + + snprintf(soc_info->sys_type, RAMIPS_SYS_TYPE_LEN, + "Ralink %s ver:%u eco:%u", + name, + (rev >> CHIP_REV_VER_SHIFT) & CHIP_REV_VER_MASK, + (rev & CHIP_REV_ECO_MASK)); + + cfg0 = __raw_readl(sysc + SYSC_REG_SYSTEM_CONFIG0); + dram_type = (cfg0 >> SYSCFG0_DRAM_TYPE_SHIFT) & SYSCFG0_DRAM_TYPE_MASK; + + switch (dram_type) { + case SYSCFG0_DRAM_TYPE_SDRAM: + soc_info->mem_size_min = MT7620_SDRAM_SIZE_MIN; + soc_info->mem_size_max = MT7620_SDRAM_SIZE_MAX; + break; + + case SYSCFG0_DRAM_TYPE_DDR1: + soc_info->mem_size_min = MT7620_DDR1_SIZE_MIN; + soc_info->mem_size_max = MT7620_DDR1_SIZE_MAX; + break; + + case SYSCFG0_DRAM_TYPE_DDR2: + soc_info->mem_size_min = MT7620_DDR2_SIZE_MIN; + soc_info->mem_size_max = MT7620_DDR2_SIZE_MAX; + break; + default: + BUG(); + } + soc_info->mem_base = MT7620_DRAM_BASE; +} diff --git a/trunk/arch/mips/ralink/of.c b/trunk/arch/mips/ralink/of.c index 4165e70775be..fb1569580def 100644 --- a/trunk/arch/mips/ralink/of.c +++ b/trunk/arch/mips/ralink/of.c @@ -11,6 +11,7 @@ #include #include #include +#include #include #include #include @@ -85,6 +86,14 @@ void __init plat_mem_setup(void) * parsed resulting in our memory appearing */ __dt_setup_arch(&__dtb_start); + + if (soc_info.mem_size) + add_memory_region(soc_info.mem_base, soc_info.mem_size, + BOOT_MEM_RAM); + else + detect_memory_region(soc_info.mem_base, + soc_info.mem_size_min * SZ_1M, + soc_info.mem_size_max * SZ_1M); } static int __init plat_of_setup(void) diff --git a/trunk/arch/mips/ralink/rt288x.c b/trunk/arch/mips/ralink/rt288x.c new file mode 100644 index 000000000000..f87de1ab2198 --- /dev/null +++ b/trunk/arch/mips/ralink/rt288x.c @@ -0,0 +1,143 @@ +/* + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published + * by the Free Software Foundation. + * + * Parts of this file are based on Ralink's 2.6.21 BSP + * + * Copyright (C) 2008-2011 Gabor Juhos + * Copyright (C) 2008 Imre Kaloz + * Copyright (C) 2013 John Crispin + */ + +#include +#include +#include + +#include +#include +#include + +#include "common.h" + +static struct ralink_pinmux_grp mode_mux[] = { + { + .name = "i2c", + .mask = RT2880_GPIO_MODE_I2C, + .gpio_first = 1, + .gpio_last = 2, + }, { + .name = "spi", + .mask = RT2880_GPIO_MODE_SPI, + .gpio_first = 3, + .gpio_last = 6, + }, { + .name = "uartlite", + .mask = RT2880_GPIO_MODE_UART0, + .gpio_first = 7, + .gpio_last = 14, + }, { + .name = "jtag", + .mask = RT2880_GPIO_MODE_JTAG, + .gpio_first = 17, + .gpio_last = 21, + }, { + .name = "mdio", + .mask = RT2880_GPIO_MODE_MDIO, + .gpio_first = 22, + .gpio_last = 23, + }, { + .name = "sdram", + .mask = RT2880_GPIO_MODE_SDRAM, + .gpio_first = 24, + .gpio_last = 39, + }, { + .name = "pci", + .mask = RT2880_GPIO_MODE_PCI, + .gpio_first = 40, + .gpio_last = 71, + }, {0} +}; + +static void rt288x_wdt_reset(void) +{ + u32 t; + + /* enable WDT reset output on pin SRAM_CS_N */ + t = rt_sysc_r32(SYSC_REG_CLKCFG); + t |= CLKCFG_SRAM_CS_N_WDT; + rt_sysc_w32(t, SYSC_REG_CLKCFG); +} + +struct ralink_pinmux rt_gpio_pinmux = { + .mode = mode_mux, + .wdt_reset = rt288x_wdt_reset, +}; + +void __init ralink_clk_init(void) +{ + unsigned long cpu_rate; + u32 t = rt_sysc_r32(SYSC_REG_SYSTEM_CONFIG); + t = ((t >> SYSTEM_CONFIG_CPUCLK_SHIFT) & SYSTEM_CONFIG_CPUCLK_MASK); + + switch (t) { + case SYSTEM_CONFIG_CPUCLK_250: + cpu_rate = 250000000; + break; + case SYSTEM_CONFIG_CPUCLK_266: + cpu_rate = 266666667; + break; + case SYSTEM_CONFIG_CPUCLK_280: + cpu_rate = 280000000; + break; + case SYSTEM_CONFIG_CPUCLK_300: + cpu_rate = 300000000; + break; + } + + ralink_clk_add("cpu", cpu_rate); + ralink_clk_add("300100.timer", cpu_rate / 2); + ralink_clk_add("300120.watchdog", cpu_rate / 2); + ralink_clk_add("300500.uart", cpu_rate / 2); + ralink_clk_add("300c00.uartlite", cpu_rate / 2); + ralink_clk_add("400000.ethernet", cpu_rate / 2); +} + +void __init ralink_of_remap(void) +{ + rt_sysc_membase = plat_of_remap_node("ralink,rt2880-sysc"); + rt_memc_membase = plat_of_remap_node("ralink,rt2880-memc"); + + if (!rt_sysc_membase || !rt_memc_membase) + panic("Failed to remap core resources"); +} + +void prom_soc_init(struct ralink_soc_info *soc_info) +{ + void __iomem *sysc = (void __iomem *) KSEG1ADDR(RT2880_SYSC_BASE); + const char *name; + u32 n0; + u32 n1; + u32 id; + + n0 = __raw_readl(sysc + SYSC_REG_CHIP_NAME0); + n1 = __raw_readl(sysc + SYSC_REG_CHIP_NAME1); + id = __raw_readl(sysc + SYSC_REG_CHIP_ID); + + if (n0 == RT2880_CHIP_NAME0 && n1 == RT2880_CHIP_NAME1) { + soc_info->compatible = "ralink,r2880-soc"; + name = "RT2880"; + } else { + panic("rt288x: unknown SoC, n0:%08x n1:%08x", n0, n1); + } + + snprintf(soc_info->sys_type, RAMIPS_SYS_TYPE_LEN, + "Ralink %s id:%u rev:%u", + name, + (id >> CHIP_ID_ID_SHIFT) & CHIP_ID_ID_MASK, + (id & CHIP_ID_REV_MASK)); + + soc_info->mem_base = RT2880_SDRAM_BASE; + soc_info->mem_size_min = RT2880_MEM_SIZE_MIN; + soc_info->mem_size_max = RT2880_MEM_SIZE_MAX; +} diff --git a/trunk/arch/mips/ralink/rt305x.c b/trunk/arch/mips/ralink/rt305x.c index 0a4bbdcf59d9..ca7ee3a33790 100644 --- a/trunk/arch/mips/ralink/rt305x.c +++ b/trunk/arch/mips/ralink/rt305x.c @@ -22,7 +22,7 @@ enum rt305x_soc_type rt305x_soc; -struct ralink_pinmux_grp mode_mux[] = { +static struct ralink_pinmux_grp mode_mux[] = { { .name = "i2c", .mask = RT305X_GPIO_MODE_I2C, @@ -61,7 +61,7 @@ struct ralink_pinmux_grp mode_mux[] = { }, {0} }; -struct ralink_pinmux_grp uart_mux[] = { +static struct ralink_pinmux_grp uart_mux[] = { { .name = "uartf", .mask = RT305X_GPIO_MODE_UARTF, @@ -91,19 +91,19 @@ struct ralink_pinmux_grp uart_mux[] = { .name = "gpio uartf", .mask = RT305X_GPIO_MODE_GPIO_UARTF, .gpio_first = RT305X_GPIO_7, - .gpio_last = RT305X_GPIO_14, + .gpio_last = RT305X_GPIO_10, }, { .name = "gpio i2s", .mask = RT305X_GPIO_MODE_GPIO_I2S, .gpio_first = RT305X_GPIO_7, - .gpio_last = RT305X_GPIO_14, + .gpio_last = RT305X_GPIO_10, }, { .name = "gpio", .mask = RT305X_GPIO_MODE_GPIO, }, {0} }; -void rt305x_wdt_reset(void) +static void rt305x_wdt_reset(void) { u32 t; @@ -114,16 +114,53 @@ void rt305x_wdt_reset(void) rt_sysc_w32(t, SYSC_REG_SYSTEM_CONFIG); } -struct ralink_pinmux gpio_pinmux = { +struct ralink_pinmux rt_gpio_pinmux = { .mode = mode_mux, .uart = uart_mux, .uart_shift = RT305X_GPIO_MODE_UART0_SHIFT, + .uart_mask = RT305X_GPIO_MODE_UART0_MASK, .wdt_reset = rt305x_wdt_reset, }; +static unsigned long rt5350_get_mem_size(void) +{ + void __iomem *sysc = (void __iomem *) KSEG1ADDR(RT305X_SYSC_BASE); + unsigned long ret; + u32 t; + + t = __raw_readl(sysc + SYSC_REG_SYSTEM_CONFIG); + t = (t >> RT5350_SYSCFG0_DRAM_SIZE_SHIFT) & + RT5350_SYSCFG0_DRAM_SIZE_MASK; + + switch (t) { + case RT5350_SYSCFG0_DRAM_SIZE_2M: + ret = 2; + break; + case RT5350_SYSCFG0_DRAM_SIZE_8M: + ret = 8; + break; + case RT5350_SYSCFG0_DRAM_SIZE_16M: + ret = 16; + break; + case RT5350_SYSCFG0_DRAM_SIZE_32M: + ret = 32; + break; + case RT5350_SYSCFG0_DRAM_SIZE_64M: + ret = 64; + break; + default: + panic("rt5350: invalid DRAM size: %u", t); + break; + } + + return ret; +} + void __init ralink_clk_init(void) { unsigned long cpu_rate, sys_rate, wdt_rate, uart_rate; + unsigned long wmac_rate = 40000000; + u32 t = rt_sysc_r32(SYSC_REG_SYSTEM_CONFIG); if (soc_is_rt305x() || soc_is_rt3350()) { @@ -176,11 +213,21 @@ void __init ralink_clk_init(void) BUG(); } + if (soc_is_rt3352() || soc_is_rt5350()) { + u32 val = rt_sysc_r32(RT3352_SYSC_REG_SYSCFG0); + + if (!(val & RT3352_CLKCFG0_XTAL_SEL)) + wmac_rate = 20000000; + } + ralink_clk_add("cpu", cpu_rate); ralink_clk_add("10000b00.spi", sys_rate); ralink_clk_add("10000100.timer", wdt_rate); + ralink_clk_add("10000120.watchdog", wdt_rate); ralink_clk_add("10000500.uart", uart_rate); ralink_clk_add("10000c00.uartlite", uart_rate); + ralink_clk_add("10100000.ethernet", sys_rate); + ralink_clk_add("10180000.wmac", wmac_rate); } void __init ralink_of_remap(void) @@ -239,4 +286,15 @@ void prom_soc_init(struct ralink_soc_info *soc_info) name, (id >> CHIP_ID_ID_SHIFT) & CHIP_ID_ID_MASK, (id & CHIP_ID_REV_MASK)); + + soc_info->mem_base = RT305X_SDRAM_BASE; + if (soc_is_rt5350()) { + soc_info->mem_size = rt5350_get_mem_size(); + } else if (soc_is_rt305x() || soc_is_rt3350()) { + soc_info->mem_size_min = RT305X_MEM_SIZE_MIN; + soc_info->mem_size_max = RT305X_MEM_SIZE_MAX; + } else if (soc_is_rt3352()) { + soc_info->mem_size_min = RT3352_MEM_SIZE_MIN; + soc_info->mem_size_max = RT3352_MEM_SIZE_MAX; + } } diff --git a/trunk/arch/mips/ralink/rt3883.c b/trunk/arch/mips/ralink/rt3883.c new file mode 100644 index 000000000000..b474ac284b83 --- /dev/null +++ b/trunk/arch/mips/ralink/rt3883.c @@ -0,0 +1,246 @@ +/* + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published + * by the Free Software Foundation. + * + * Parts of this file are based on Ralink's 2.6.21 BSP + * + * Copyright (C) 2008 Imre Kaloz + * Copyright (C) 2008-2011 Gabor Juhos + * Copyright (C) 2013 John Crispin + */ + +#include +#include +#include + +#include +#include +#include + +#include "common.h" + +static struct ralink_pinmux_grp mode_mux[] = { + { + .name = "i2c", + .mask = RT3883_GPIO_MODE_I2C, + .gpio_first = RT3883_GPIO_I2C_SD, + .gpio_last = RT3883_GPIO_I2C_SCLK, + }, { + .name = "spi", + .mask = RT3883_GPIO_MODE_SPI, + .gpio_first = RT3883_GPIO_SPI_CS0, + .gpio_last = RT3883_GPIO_SPI_MISO, + }, { + .name = "uartlite", + .mask = RT3883_GPIO_MODE_UART1, + .gpio_first = RT3883_GPIO_UART1_TXD, + .gpio_last = RT3883_GPIO_UART1_RXD, + }, { + .name = "jtag", + .mask = RT3883_GPIO_MODE_JTAG, + .gpio_first = RT3883_GPIO_JTAG_TDO, + .gpio_last = RT3883_GPIO_JTAG_TCLK, + }, { + .name = "mdio", + .mask = RT3883_GPIO_MODE_MDIO, + .gpio_first = RT3883_GPIO_MDIO_MDC, + .gpio_last = RT3883_GPIO_MDIO_MDIO, + }, { + .name = "ge1", + .mask = RT3883_GPIO_MODE_GE1, + .gpio_first = RT3883_GPIO_GE1_TXD0, + .gpio_last = RT3883_GPIO_GE1_RXCLK, + }, { + .name = "ge2", + .mask = RT3883_GPIO_MODE_GE2, + .gpio_first = RT3883_GPIO_GE2_TXD0, + .gpio_last = RT3883_GPIO_GE2_RXCLK, + }, { + .name = "pci", + .mask = RT3883_GPIO_MODE_PCI, + .gpio_first = RT3883_GPIO_PCI_AD0, + .gpio_last = RT3883_GPIO_PCI_AD31, + }, { + .name = "lna a", + .mask = RT3883_GPIO_MODE_LNA_A, + .gpio_first = RT3883_GPIO_LNA_PE_A0, + .gpio_last = RT3883_GPIO_LNA_PE_A2, + }, { + .name = "lna g", + .mask = RT3883_GPIO_MODE_LNA_G, + .gpio_first = RT3883_GPIO_LNA_PE_G0, + .gpio_last = RT3883_GPIO_LNA_PE_G2, + }, {0} +}; + +static struct ralink_pinmux_grp uart_mux[] = { + { + .name = "uartf", + .mask = RT3883_GPIO_MODE_UARTF, + .gpio_first = RT3883_GPIO_7, + .gpio_last = RT3883_GPIO_14, + }, { + .name = "pcm uartf", + .mask = RT3883_GPIO_MODE_PCM_UARTF, + .gpio_first = RT3883_GPIO_7, + .gpio_last = RT3883_GPIO_14, + }, { + .name = "pcm i2s", + .mask = RT3883_GPIO_MODE_PCM_I2S, + .gpio_first = RT3883_GPIO_7, + .gpio_last = RT3883_GPIO_14, + }, { + .name = "i2s uartf", + .mask = RT3883_GPIO_MODE_I2S_UARTF, + .gpio_first = RT3883_GPIO_7, + .gpio_last = RT3883_GPIO_14, + }, { + .name = "pcm gpio", + .mask = RT3883_GPIO_MODE_PCM_GPIO, + .gpio_first = RT3883_GPIO_11, + .gpio_last = RT3883_GPIO_14, + }, { + .name = "gpio uartf", + .mask = RT3883_GPIO_MODE_GPIO_UARTF, + .gpio_first = RT3883_GPIO_7, + .gpio_last = RT3883_GPIO_10, + }, { + .name = "gpio i2s", + .mask = RT3883_GPIO_MODE_GPIO_I2S, + .gpio_first = RT3883_GPIO_7, + .gpio_last = RT3883_GPIO_10, + }, { + .name = "gpio", + .mask = RT3883_GPIO_MODE_GPIO, + }, {0} +}; + +static struct ralink_pinmux_grp pci_mux[] = { + { + .name = "pci-dev", + .mask = 0, + .gpio_first = RT3883_GPIO_PCI_AD0, + .gpio_last = RT3883_GPIO_PCI_AD31, + }, { + .name = "pci-host2", + .mask = 1, + .gpio_first = RT3883_GPIO_PCI_AD0, + .gpio_last = RT3883_GPIO_PCI_AD31, + }, { + .name = "pci-host1", + .mask = 2, + .gpio_first = RT3883_GPIO_PCI_AD0, + .gpio_last = RT3883_GPIO_PCI_AD31, + }, { + .name = "pci-fnc", + .mask = 3, + .gpio_first = RT3883_GPIO_PCI_AD0, + .gpio_last = RT3883_GPIO_PCI_AD31, + }, { + .name = "pci-gpio", + .mask = 7, + .gpio_first = RT3883_GPIO_PCI_AD0, + .gpio_last = RT3883_GPIO_PCI_AD31, + }, {0} +}; + +static void rt3883_wdt_reset(void) +{ + u32 t; + + /* enable WDT reset output on GPIO 2 */ + t = rt_sysc_r32(RT3883_SYSC_REG_SYSCFG1); + t |= RT3883_SYSCFG1_GPIO2_AS_WDT_OUT; + rt_sysc_w32(t, RT3883_SYSC_REG_SYSCFG1); +} + +struct ralink_pinmux rt_gpio_pinmux = { + .mode = mode_mux, + .uart = uart_mux, + .uart_shift = RT3883_GPIO_MODE_UART0_SHIFT, + .uart_mask = RT3883_GPIO_MODE_UART0_MASK, + .wdt_reset = rt3883_wdt_reset, + .pci = pci_mux, + .pci_shift = RT3883_GPIO_MODE_PCI_SHIFT, + .pci_mask = RT3883_GPIO_MODE_PCI_MASK, +}; + +void __init ralink_clk_init(void) +{ + unsigned long cpu_rate, sys_rate; + u32 syscfg0; + u32 clksel; + u32 ddr2; + + syscfg0 = rt_sysc_r32(RT3883_SYSC_REG_SYSCFG0); + clksel = ((syscfg0 >> RT3883_SYSCFG0_CPUCLK_SHIFT) & + RT3883_SYSCFG0_CPUCLK_MASK); + ddr2 = syscfg0 & RT3883_SYSCFG0_DRAM_TYPE_DDR2; + + switch (clksel) { + case RT3883_SYSCFG0_CPUCLK_250: + cpu_rate = 250000000; + sys_rate = (ddr2) ? 125000000 : 83000000; + break; + case RT3883_SYSCFG0_CPUCLK_384: + cpu_rate = 384000000; + sys_rate = (ddr2) ? 128000000 : 96000000; + break; + case RT3883_SYSCFG0_CPUCLK_480: + cpu_rate = 480000000; + sys_rate = (ddr2) ? 160000000 : 120000000; + break; + case RT3883_SYSCFG0_CPUCLK_500: + cpu_rate = 500000000; + sys_rate = (ddr2) ? 166000000 : 125000000; + break; + } + + ralink_clk_add("cpu", cpu_rate); + ralink_clk_add("10000100.timer", sys_rate); + ralink_clk_add("10000120.watchdog", sys_rate); + ralink_clk_add("10000500.uart", 40000000); + ralink_clk_add("10000b00.spi", sys_rate); + ralink_clk_add("10000c00.uartlite", 40000000); + ralink_clk_add("10100000.ethernet", sys_rate); +} + +void __init ralink_of_remap(void) +{ + rt_sysc_membase = plat_of_remap_node("ralink,rt3883-sysc"); + rt_memc_membase = plat_of_remap_node("ralink,rt3883-memc"); + + if (!rt_sysc_membase || !rt_memc_membase) + panic("Failed to remap core resources"); +} + +void prom_soc_init(struct ralink_soc_info *soc_info) +{ + void __iomem *sysc = (void __iomem *) KSEG1ADDR(RT3883_SYSC_BASE); + const char *name; + u32 n0; + u32 n1; + u32 id; + + n0 = __raw_readl(sysc + RT3883_SYSC_REG_CHIPID0_3); + n1 = __raw_readl(sysc + RT3883_SYSC_REG_CHIPID4_7); + id = __raw_readl(sysc + RT3883_SYSC_REG_REVID); + + if (n0 == RT3883_CHIP_NAME0 && n1 == RT3883_CHIP_NAME1) { + soc_info->compatible = "ralink,rt3883-soc"; + name = "RT3883"; + } else { + panic("rt3883: unknown SoC, n0:%08x n1:%08x", n0, n1); + } + + snprintf(soc_info->sys_type, RAMIPS_SYS_TYPE_LEN, + "Ralink %s ver:%u eco:%u", + name, + (id >> RT3883_REVID_VER_ID_SHIFT) & RT3883_REVID_VER_ID_MASK, + (id & RT3883_REVID_ECO_ID_MASK)); + + soc_info->mem_base = RT3883_SDRAM_BASE; + soc_info->mem_size_min = RT3883_MEM_SIZE_MIN; + soc_info->mem_size_max = RT3883_MEM_SIZE_MAX; +} diff --git a/trunk/arch/mips/sgi-ip27/ip27-klnuma.c b/trunk/arch/mips/sgi-ip27/ip27-klnuma.c index 1d1919a44e88..7a53b1e28a93 100644 --- a/trunk/arch/mips/sgi-ip27/ip27-klnuma.c +++ b/trunk/arch/mips/sgi-ip27/ip27-klnuma.c @@ -114,7 +114,7 @@ void __init replicate_kernel_text() * data structures on the first couple of pages of the first slot of each * node. If this is the case, getfirstfree(node) > getslotstart(node, 0). */ -pfn_t node_getfirstfree(cnodeid_t cnode) +unsigned long node_getfirstfree(cnodeid_t cnode) { unsigned long loadbase = REP_BASE; nasid_t nasid = COMPACT_TO_NASID_NODEID(cnode); diff --git a/trunk/arch/mips/sgi-ip27/ip27-memory.c b/trunk/arch/mips/sgi-ip27/ip27-memory.c index 5f2bddb1860e..1230f56429d7 100644 --- a/trunk/arch/mips/sgi-ip27/ip27-memory.c +++ b/trunk/arch/mips/sgi-ip27/ip27-memory.c @@ -255,14 +255,14 @@ static void __init dump_topology(void) } } -static pfn_t __init slot_getbasepfn(cnodeid_t cnode, int slot) +static unsigned long __init slot_getbasepfn(cnodeid_t cnode, int slot) { nasid_t nasid = COMPACT_TO_NASID_NODEID(cnode); - return ((pfn_t)nasid << PFN_NASIDSHFT) | (slot << SLOT_PFNSHIFT); + return ((unsigned long)nasid << PFN_NASIDSHFT) | (slot << SLOT_PFNSHIFT); } -static pfn_t __init slot_psize_compute(cnodeid_t node, int slot) +static unsigned long __init slot_psize_compute(cnodeid_t node, int slot) { nasid_t nasid; lboard_t *brd; @@ -353,7 +353,7 @@ static void __init mlreset(void) static void __init szmem(void) { - pfn_t slot_psize, slot0sz = 0, nodebytes; /* Hack to detect problem configs */ + unsigned long slot_psize, slot0sz = 0, nodebytes; /* Hack to detect problem configs */ int slot; cnodeid_t node; @@ -390,10 +390,10 @@ static void __init szmem(void) static void __init node_mem_init(cnodeid_t node) { - pfn_t slot_firstpfn = slot_getbasepfn(node, 0); - pfn_t slot_freepfn = node_getfirstfree(node); + unsigned long slot_firstpfn = slot_getbasepfn(node, 0); + unsigned long slot_freepfn = node_getfirstfree(node); unsigned long bootmap_size; - pfn_t start_pfn, end_pfn; + unsigned long start_pfn, end_pfn; get_pfn_range_for_nid(node, &start_pfn, &end_pfn); @@ -467,7 +467,7 @@ void __init paging_init(void) pagetable_init(); for_each_online_node(node) { - pfn_t start_pfn, end_pfn; + unsigned long start_pfn, end_pfn; get_pfn_range_for_nid(node, &start_pfn, &end_pfn); diff --git a/trunk/arch/mips/sgi-ip27/ip27-timer.c b/trunk/arch/mips/sgi-ip27/ip27-timer.c index fff58ac176f3..2e21b761cb9c 100644 --- a/trunk/arch/mips/sgi-ip27/ip27-timer.c +++ b/trunk/arch/mips/sgi-ip27/ip27-timer.c @@ -69,7 +69,7 @@ static void rt_set_mode(enum clock_event_mode mode, /* Nothing to do ... */ } -int rt_timer_irq; +unsigned int rt_timer_irq; static DEFINE_PER_CPU(struct clock_event_device, hub_rt_clockevent); static DEFINE_PER_CPU(char [11], hub_rt_name); diff --git a/trunk/arch/mips/txx9/generic/setup.c b/trunk/arch/mips/txx9/generic/setup.c index 5524f2c7b05c..681e7f86c080 100644 --- a/trunk/arch/mips/txx9/generic/setup.c +++ b/trunk/arch/mips/txx9/generic/setup.c @@ -26,6 +26,7 @@ #include #include #include +#include #include #include #include @@ -118,7 +119,7 @@ EXPORT_SYMBOL(clk_put); /* GPIO support */ -#ifdef CONFIG_GENERIC_GPIO +#ifdef CONFIG_GPIOLIB int gpio_to_irq(unsigned gpio) { return -EINVAL; diff --git a/trunk/arch/mips/vr41xx/common/pmu.c b/trunk/arch/mips/vr41xx/common/pmu.c index 70a3f90131d8..d7f755833c3f 100644 --- a/trunk/arch/mips/vr41xx/common/pmu.c +++ b/trunk/arch/mips/vr41xx/common/pmu.c @@ -27,6 +27,7 @@ #include #include +#include #include #include #include diff --git a/trunk/arch/mips/wrppmc/reset.c b/trunk/arch/mips/wrppmc/reset.c index cc5474b24f06..80beb188ed47 100644 --- a/trunk/arch/mips/wrppmc/reset.c +++ b/trunk/arch/mips/wrppmc/reset.c @@ -9,6 +9,7 @@ #include #include +#include #include #include diff --git a/trunk/arch/openrisc/Kconfig b/trunk/arch/openrisc/Kconfig index 81b9ddbc9166..1072bfd18c50 100644 --- a/trunk/arch/openrisc/Kconfig +++ b/trunk/arch/openrisc/Kconfig @@ -44,9 +44,6 @@ config GENERIC_HWEIGHT config NO_IOPORT def_bool y -config GENERIC_GPIO - def_bool y - config TRACE_IRQFLAGS_SUPPORT def_bool y diff --git a/trunk/arch/parisc/Kconfig b/trunk/arch/parisc/Kconfig index cad060f288cf..6507dabdd5dd 100644 --- a/trunk/arch/parisc/Kconfig +++ b/trunk/arch/parisc/Kconfig @@ -245,7 +245,7 @@ config SMP config IRQSTACKS bool "Use separate kernel stacks when processing interrupts" - default n + default y help If you say Y here the kernel will use separate kernel stacks for handling hard and soft interrupts. This can help avoid diff --git a/trunk/arch/parisc/Makefile b/trunk/arch/parisc/Makefile index 2f967cc6649e..197690068f88 100644 --- a/trunk/arch/parisc/Makefile +++ b/trunk/arch/parisc/Makefile @@ -23,24 +23,21 @@ NM = sh $(srctree)/arch/parisc/nm CHECKFLAGS += -D__hppa__=1 LIBGCC = $(shell $(CC) $(KBUILD_CFLAGS) -print-libgcc-file-name) -MACHINE := $(shell uname -m) -NATIVE := $(if $(filter parisc%,$(MACHINE)),1,0) - ifdef CONFIG_64BIT UTS_MACHINE := parisc64 CHECKFLAGS += -D__LP64__=1 -m64 -WIDTH := 64 +CC_ARCHES = hppa64 else # 32-bit -WIDTH := +CC_ARCHES = hppa hppa2.0 hppa1.1 endif -# attempt to help out folks who are cross-compiling -ifeq ($(NATIVE),1) -CROSS_COMPILE := hppa$(WIDTH)-linux- -else - ifeq ($(CROSS_COMPILE),) - CROSS_COMPILE := hppa$(WIDTH)-linux-gnu- - endif +ifneq ($(SUBARCH),$(UTS_MACHINE)) + ifeq ($(CROSS_COMPILE),) + CC_SUFFIXES = linux linux-gnu unknown-linux-gnu + CROSS_COMPILE := $(call cc-cross-prefix, \ + $(foreach a,$(CC_ARCHES), \ + $(foreach s,$(CC_SUFFIXES),$(a)-$(s)-))) + endif endif OBJCOPY_FLAGS =-O binary -R .note -R .comment -S diff --git a/trunk/arch/parisc/include/asm/hardirq.h b/trunk/arch/parisc/include/asm/hardirq.h index 12373c4dabab..c19f7138ba48 100644 --- a/trunk/arch/parisc/include/asm/hardirq.h +++ b/trunk/arch/parisc/include/asm/hardirq.h @@ -11,10 +11,18 @@ #include #include +#ifdef CONFIG_IRQSTACKS +#define __ARCH_HAS_DO_SOFTIRQ +#endif + typedef struct { unsigned int __softirq_pending; #ifdef CONFIG_DEBUG_STACKOVERFLOW unsigned int kernel_stack_usage; +#ifdef CONFIG_IRQSTACKS + unsigned int irq_stack_usage; + unsigned int irq_stack_counter; +#endif #endif #ifdef CONFIG_SMP unsigned int irq_resched_count; @@ -28,6 +36,7 @@ DECLARE_PER_CPU_SHARED_ALIGNED(irq_cpustat_t, irq_stat); #define __ARCH_IRQ_STAT #define __IRQ_STAT(cpu, member) (irq_stat[cpu].member) #define inc_irq_stat(member) this_cpu_inc(irq_stat.member) +#define __inc_irq_stat(member) __this_cpu_inc(irq_stat.member) #define local_softirq_pending() this_cpu_read(irq_stat.__softirq_pending) #define __ARCH_SET_SOFTIRQ_PENDING diff --git a/trunk/arch/parisc/include/asm/processor.h b/trunk/arch/parisc/include/asm/processor.h index 064015547d1e..cfbc43929cf6 100644 --- a/trunk/arch/parisc/include/asm/processor.h +++ b/trunk/arch/parisc/include/asm/processor.h @@ -63,10 +63,13 @@ */ #ifdef __KERNEL__ +#include + #define IRQ_STACK_SIZE (4096 << 2) /* 16k irq stack size */ union irq_stack_union { unsigned long stack[IRQ_STACK_SIZE/sizeof(unsigned long)]; + raw_spinlock_t lock; }; DECLARE_PER_CPU(union irq_stack_union, irq_stack_union); diff --git a/trunk/arch/parisc/kernel/entry.S b/trunk/arch/parisc/kernel/entry.S index 4bb96ad9b0b1..ae27cb6ce19a 100644 --- a/trunk/arch/parisc/kernel/entry.S +++ b/trunk/arch/parisc/kernel/entry.S @@ -452,9 +452,41 @@ L2_ptep \pgd,\pte,\index,\va,\fault .endm + /* Acquire pa_dbit_lock lock. */ + .macro dbit_lock spc,tmp,tmp1 +#ifdef CONFIG_SMP + cmpib,COND(=),n 0,\spc,2f + load32 PA(pa_dbit_lock),\tmp +1: LDCW 0(\tmp),\tmp1 + cmpib,COND(=) 0,\tmp1,1b + nop +2: +#endif + .endm + + /* Release pa_dbit_lock lock without reloading lock address. */ + .macro dbit_unlock0 spc,tmp +#ifdef CONFIG_SMP + or,COND(=) %r0,\spc,%r0 + stw \spc,0(\tmp) +#endif + .endm + + /* Release pa_dbit_lock lock. */ + .macro dbit_unlock1 spc,tmp +#ifdef CONFIG_SMP + load32 PA(pa_dbit_lock),\tmp + dbit_unlock0 \spc,\tmp +#endif + .endm + /* Set the _PAGE_ACCESSED bit of the PTE. Be clever and * don't needlessly dirty the cache line if it was already set */ - .macro update_ptep ptep,pte,tmp,tmp1 + .macro update_ptep spc,ptep,pte,tmp,tmp1 +#ifdef CONFIG_SMP + or,COND(=) %r0,\spc,%r0 + LDREG 0(\ptep),\pte +#endif ldi _PAGE_ACCESSED,\tmp1 or \tmp1,\pte,\tmp and,COND(<>) \tmp1,\pte,%r0 @@ -463,7 +495,11 @@ /* Set the dirty bit (and accessed bit). No need to be * clever, this is only used from the dirty fault */ - .macro update_dirty ptep,pte,tmp + .macro update_dirty spc,ptep,pte,tmp +#ifdef CONFIG_SMP + or,COND(=) %r0,\spc,%r0 + LDREG 0(\ptep),\pte +#endif ldi _PAGE_ACCESSED|_PAGE_DIRTY,\tmp or \tmp,\pte,\pte STREG \pte,0(\ptep) @@ -1111,11 +1147,13 @@ dtlb_miss_20w: L3_ptep ptp,pte,t0,va,dtlb_check_alias_20w - update_ptep ptp,pte,t0,t1 + dbit_lock spc,t0,t1 + update_ptep spc,ptp,pte,t0,t1 make_insert_tlb spc,pte,prot idtlbt pte,prot + dbit_unlock1 spc,t0 rfir nop @@ -1135,11 +1173,13 @@ nadtlb_miss_20w: L3_ptep ptp,pte,t0,va,nadtlb_check_alias_20w - update_ptep ptp,pte,t0,t1 + dbit_lock spc,t0,t1 + update_ptep spc,ptp,pte,t0,t1 make_insert_tlb spc,pte,prot idtlbt pte,prot + dbit_unlock1 spc,t0 rfir nop @@ -1161,7 +1201,8 @@ dtlb_miss_11: L2_ptep ptp,pte,t0,va,dtlb_check_alias_11 - update_ptep ptp,pte,t0,t1 + dbit_lock spc,t0,t1 + update_ptep spc,ptp,pte,t0,t1 make_insert_tlb_11 spc,pte,prot @@ -1172,6 +1213,7 @@ dtlb_miss_11: idtlbp prot,(%sr1,va) mtsp t0, %sr1 /* Restore sr1 */ + dbit_unlock1 spc,t0 rfir nop @@ -1192,7 +1234,8 @@ nadtlb_miss_11: L2_ptep ptp,pte,t0,va,nadtlb_check_alias_11 - update_ptep ptp,pte,t0,t1 + dbit_lock spc,t0,t1 + update_ptep spc,ptp,pte,t0,t1 make_insert_tlb_11 spc,pte,prot @@ -1204,6 +1247,7 @@ nadtlb_miss_11: idtlbp prot,(%sr1,va) mtsp t0, %sr1 /* Restore sr1 */ + dbit_unlock1 spc,t0 rfir nop @@ -1224,13 +1268,15 @@ dtlb_miss_20: L2_ptep ptp,pte,t0,va,dtlb_check_alias_20 - update_ptep ptp,pte,t0,t1 + dbit_lock spc,t0,t1 + update_ptep spc,ptp,pte,t0,t1 make_insert_tlb spc,pte,prot f_extend pte,t0 idtlbt pte,prot + dbit_unlock1 spc,t0 rfir nop @@ -1250,13 +1296,15 @@ nadtlb_miss_20: L2_ptep ptp,pte,t0,va,nadtlb_check_alias_20 - update_ptep ptp,pte,t0,t1 + dbit_lock spc,t0,t1 + update_ptep spc,ptp,pte,t0,t1 make_insert_tlb spc,pte,prot f_extend pte,t0 idtlbt pte,prot + dbit_unlock1 spc,t0 rfir nop @@ -1357,11 +1405,13 @@ itlb_miss_20w: L3_ptep ptp,pte,t0,va,itlb_fault - update_ptep ptp,pte,t0,t1 + dbit_lock spc,t0,t1 + update_ptep spc,ptp,pte,t0,t1 make_insert_tlb spc,pte,prot iitlbt pte,prot + dbit_unlock1 spc,t0 rfir nop @@ -1379,11 +1429,13 @@ naitlb_miss_20w: L3_ptep ptp,pte,t0,va,naitlb_check_alias_20w - update_ptep ptp,pte,t0,t1 + dbit_lock spc,t0,t1 + update_ptep spc,ptp,pte,t0,t1 make_insert_tlb spc,pte,prot iitlbt pte,prot + dbit_unlock1 spc,t0 rfir nop @@ -1405,7 +1457,8 @@ itlb_miss_11: L2_ptep ptp,pte,t0,va,itlb_fault - update_ptep ptp,pte,t0,t1 + dbit_lock spc,t0,t1 + update_ptep spc,ptp,pte,t0,t1 make_insert_tlb_11 spc,pte,prot @@ -1416,6 +1469,7 @@ itlb_miss_11: iitlbp prot,(%sr1,va) mtsp t0, %sr1 /* Restore sr1 */ + dbit_unlock1 spc,t0 rfir nop @@ -1427,7 +1481,8 @@ naitlb_miss_11: L2_ptep ptp,pte,t0,va,naitlb_check_alias_11 - update_ptep ptp,pte,t0,t1 + dbit_lock spc,t0,t1 + update_ptep spc,ptp,pte,t0,t1 make_insert_tlb_11 spc,pte,prot @@ -1438,6 +1493,7 @@ naitlb_miss_11: iitlbp prot,(%sr1,va) mtsp t0, %sr1 /* Restore sr1 */ + dbit_unlock1 spc,t0 rfir nop @@ -1459,13 +1515,15 @@ itlb_miss_20: L2_ptep ptp,pte,t0,va,itlb_fault - update_ptep ptp,pte,t0,t1 + dbit_lock spc,t0,t1 + update_ptep spc,ptp,pte,t0,t1 make_insert_tlb spc,pte,prot f_extend pte,t0 iitlbt pte,prot + dbit_unlock1 spc,t0 rfir nop @@ -1477,13 +1535,15 @@ naitlb_miss_20: L2_ptep ptp,pte,t0,va,naitlb_check_alias_20 - update_ptep ptp,pte,t0,t1 + dbit_lock spc,t0,t1 + update_ptep spc,ptp,pte,t0,t1 make_insert_tlb spc,pte,prot f_extend pte,t0 iitlbt pte,prot + dbit_unlock1 spc,t0 rfir nop @@ -1507,29 +1567,13 @@ dbit_trap_20w: L3_ptep ptp,pte,t0,va,dbit_fault -#ifdef CONFIG_SMP - cmpib,COND(=),n 0,spc,dbit_nolock_20w - load32 PA(pa_dbit_lock),t0 - -dbit_spin_20w: - LDCW 0(t0),t1 - cmpib,COND(=) 0,t1,dbit_spin_20w - nop - -dbit_nolock_20w: -#endif - update_dirty ptp,pte,t1 + dbit_lock spc,t0,t1 + update_dirty spc,ptp,pte,t1 make_insert_tlb spc,pte,prot idtlbt pte,prot -#ifdef CONFIG_SMP - cmpib,COND(=),n 0,spc,dbit_nounlock_20w - ldi 1,t1 - stw t1,0(t0) - -dbit_nounlock_20w: -#endif + dbit_unlock0 spc,t0 rfir nop @@ -1543,18 +1587,8 @@ dbit_trap_11: L2_ptep ptp,pte,t0,va,dbit_fault -#ifdef CONFIG_SMP - cmpib,COND(=),n 0,spc,dbit_nolock_11 - load32 PA(pa_dbit_lock),t0 - -dbit_spin_11: - LDCW 0(t0),t1 - cmpib,= 0,t1,dbit_spin_11 - nop - -dbit_nolock_11: -#endif - update_dirty ptp,pte,t1 + dbit_lock spc,t0,t1 + update_dirty spc,ptp,pte,t1 make_insert_tlb_11 spc,pte,prot @@ -1565,13 +1599,7 @@ dbit_nolock_11: idtlbp prot,(%sr1,va) mtsp t1, %sr1 /* Restore sr1 */ -#ifdef CONFIG_SMP - cmpib,COND(=),n 0,spc,dbit_nounlock_11 - ldi 1,t1 - stw t1,0(t0) - -dbit_nounlock_11: -#endif + dbit_unlock0 spc,t0 rfir nop @@ -1583,32 +1611,15 @@ dbit_trap_20: L2_ptep ptp,pte,t0,va,dbit_fault -#ifdef CONFIG_SMP - cmpib,COND(=),n 0,spc,dbit_nolock_20 - load32 PA(pa_dbit_lock),t0 - -dbit_spin_20: - LDCW 0(t0),t1 - cmpib,= 0,t1,dbit_spin_20 - nop - -dbit_nolock_20: -#endif - update_dirty ptp,pte,t1 + dbit_lock spc,t0,t1 + update_dirty spc,ptp,pte,t1 make_insert_tlb spc,pte,prot f_extend pte,t1 idtlbt pte,prot - -#ifdef CONFIG_SMP - cmpib,COND(=),n 0,spc,dbit_nounlock_20 - ldi 1,t1 - stw t1,0(t0) - -dbit_nounlock_20: -#endif + dbit_unlock0 spc,t0 rfir nop diff --git a/trunk/arch/parisc/kernel/irq.c b/trunk/arch/parisc/kernel/irq.c index e255db0bb761..55237a70e197 100644 --- a/trunk/arch/parisc/kernel/irq.c +++ b/trunk/arch/parisc/kernel/irq.c @@ -166,22 +166,32 @@ int arch_show_interrupts(struct seq_file *p, int prec) seq_printf(p, "%*s: ", prec, "STK"); for_each_online_cpu(j) seq_printf(p, "%10u ", irq_stats(j)->kernel_stack_usage); - seq_printf(p, " Kernel stack usage\n"); + seq_puts(p, " Kernel stack usage\n"); +# ifdef CONFIG_IRQSTACKS + seq_printf(p, "%*s: ", prec, "IST"); + for_each_online_cpu(j) + seq_printf(p, "%10u ", irq_stats(j)->irq_stack_usage); + seq_puts(p, " Interrupt stack usage\n"); + seq_printf(p, "%*s: ", prec, "ISC"); + for_each_online_cpu(j) + seq_printf(p, "%10u ", irq_stats(j)->irq_stack_counter); + seq_puts(p, " Interrupt stack usage counter\n"); +# endif #endif #ifdef CONFIG_SMP seq_printf(p, "%*s: ", prec, "RES"); for_each_online_cpu(j) seq_printf(p, "%10u ", irq_stats(j)->irq_resched_count); - seq_printf(p, " Rescheduling interrupts\n"); + seq_puts(p, " Rescheduling interrupts\n"); seq_printf(p, "%*s: ", prec, "CAL"); for_each_online_cpu(j) seq_printf(p, "%10u ", irq_stats(j)->irq_call_count); - seq_printf(p, " Function call interrupts\n"); + seq_puts(p, " Function call interrupts\n"); #endif seq_printf(p, "%*s: ", prec, "TLB"); for_each_online_cpu(j) seq_printf(p, "%10u ", irq_stats(j)->irq_tlb_count); - seq_printf(p, " TLB shootdowns\n"); + seq_puts(p, " TLB shootdowns\n"); return 0; } @@ -378,6 +388,7 @@ static inline void stack_overflow_check(struct pt_regs *regs) unsigned long sp = regs->gr[30]; unsigned long stack_usage; unsigned int *last_usage; + int cpu = smp_processor_id(); /* if sr7 != 0, we interrupted a userspace process which we do not want * to check for stack overflow. We will only check the kernel stack. */ @@ -386,7 +397,31 @@ static inline void stack_overflow_check(struct pt_regs *regs) /* calculate kernel stack usage */ stack_usage = sp - stack_start; - last_usage = &per_cpu(irq_stat.kernel_stack_usage, smp_processor_id()); +#ifdef CONFIG_IRQSTACKS + if (likely(stack_usage <= THREAD_SIZE)) + goto check_kernel_stack; /* found kernel stack */ + + /* check irq stack usage */ + stack_start = (unsigned long) &per_cpu(irq_stack_union, cpu).stack; + stack_usage = sp - stack_start; + + last_usage = &per_cpu(irq_stat.irq_stack_usage, cpu); + if (unlikely(stack_usage > *last_usage)) + *last_usage = stack_usage; + + if (likely(stack_usage < (IRQ_STACK_SIZE - STACK_MARGIN))) + return; + + pr_emerg("stackcheck: %s will most likely overflow irq stack " + "(sp:%lx, stk bottom-top:%lx-%lx)\n", + current->comm, sp, stack_start, stack_start + IRQ_STACK_SIZE); + goto panic_check; + +check_kernel_stack: +#endif + + /* check kernel stack usage */ + last_usage = &per_cpu(irq_stat.kernel_stack_usage, cpu); if (unlikely(stack_usage > *last_usage)) *last_usage = stack_usage; @@ -398,31 +433,69 @@ static inline void stack_overflow_check(struct pt_regs *regs) "(sp:%lx, stk bottom-top:%lx-%lx)\n", current->comm, sp, stack_start, stack_start + THREAD_SIZE); +#ifdef CONFIG_IRQSTACKS +panic_check: +#endif if (sysctl_panic_on_stackoverflow) panic("low stack detected by irq handler - check messages\n"); #endif } #ifdef CONFIG_IRQSTACKS -DEFINE_PER_CPU(union irq_stack_union, irq_stack_union); +DEFINE_PER_CPU(union irq_stack_union, irq_stack_union) = { + .lock = __RAW_SPIN_LOCK_UNLOCKED((irq_stack_union).lock) + }; static void execute_on_irq_stack(void *func, unsigned long param1) { - unsigned long *irq_stack_start; + union irq_stack_union *union_ptr; unsigned long irq_stack; - int cpu = smp_processor_id(); + raw_spinlock_t *irq_stack_in_use; - irq_stack_start = &per_cpu(irq_stack_union, cpu).stack[0]; - irq_stack = (unsigned long) irq_stack_start; - irq_stack = ALIGN(irq_stack, 16); /* align for stack frame usage */ + union_ptr = &per_cpu(irq_stack_union, smp_processor_id()); + irq_stack = (unsigned long) &union_ptr->stack; + irq_stack = ALIGN(irq_stack + sizeof(irq_stack_union.lock), + 64); /* align for stack frame usage */ - BUG_ON(*irq_stack_start); /* report bug if we were called recursive. */ - *irq_stack_start = 1; + /* We may be called recursive. If we are already using the irq stack, + * just continue to use it. Use spinlocks to serialize + * the irq stack usage. + */ + irq_stack_in_use = &union_ptr->lock; + if (!raw_spin_trylock(irq_stack_in_use)) { + void (*direct_call)(unsigned long p1) = func; + + /* We are using the IRQ stack already. + * Do direct call on current stack. */ + direct_call(param1); + return; + } /* This is where we switch to the IRQ stack. */ call_on_stack(param1, func, irq_stack); - *irq_stack_start = 0; + __inc_irq_stat(irq_stack_counter); + + /* free up irq stack usage. */ + do_raw_spin_unlock(irq_stack_in_use); +} + +asmlinkage void do_softirq(void) +{ + __u32 pending; + unsigned long flags; + + if (in_interrupt()) + return; + + local_irq_save(flags); + + pending = local_softirq_pending(); + + if (pending) + execute_on_irq_stack(__do_softirq, 0); + + local_irq_restore(flags); } #endif /* CONFIG_IRQSTACKS */ diff --git a/trunk/arch/parisc/kernel/sys_parisc32.c b/trunk/arch/parisc/kernel/sys_parisc32.c index f517e08e7f0d..a134ff4da12e 100644 --- a/trunk/arch/parisc/kernel/sys_parisc32.c +++ b/trunk/arch/parisc/kernel/sys_parisc32.c @@ -59,11 +59,3 @@ asmlinkage long sys32_unimplemented(int r26, int r25, int r24, int r23, current->comm, current->pid, r20); return -ENOSYS; } - -asmlinkage long compat_sys_fanotify_mark(int fan_fd, int flags, u32 mask_hi, - u32 mask_lo, int fd, - const char __user *pathname) -{ - return sys_fanotify_mark(fan_fd, flags, ((u64)mask_hi << 32) | mask_lo, - fd, pathname); -} diff --git a/trunk/arch/parisc/mm/init.c b/trunk/arch/parisc/mm/init.c index ce939ac8622b..1c965642068b 100644 --- a/trunk/arch/parisc/mm/init.c +++ b/trunk/arch/parisc/mm/init.c @@ -1069,7 +1069,7 @@ void flush_tlb_all(void) { int do_recycle; - inc_irq_stat(irq_tlb_count); + __inc_irq_stat(irq_tlb_count); do_recycle = 0; spin_lock(&sid_lock); if (dirty_space_ids > RECYCLE_THRESHOLD) { @@ -1090,7 +1090,7 @@ void flush_tlb_all(void) #else void flush_tlb_all(void) { - inc_irq_stat(irq_tlb_count); + __inc_irq_stat(irq_tlb_count); spin_lock(&sid_lock); flush_tlb_all_local(NULL); recycle_sids(); diff --git a/trunk/arch/powerpc/Kconfig b/trunk/arch/powerpc/Kconfig index bbbe02197afb..c33e3ad2c8fd 100644 --- a/trunk/arch/powerpc/Kconfig +++ b/trunk/arch/powerpc/Kconfig @@ -82,11 +82,6 @@ config GENERIC_HWEIGHT bool default y -config GENERIC_GPIO - bool - help - Generic GPIO API support - config PPC bool default y diff --git a/trunk/arch/powerpc/Kconfig.debug b/trunk/arch/powerpc/Kconfig.debug index 5416e28a7538..863d877e0b5f 100644 --- a/trunk/arch/powerpc/Kconfig.debug +++ b/trunk/arch/powerpc/Kconfig.debug @@ -262,8 +262,31 @@ config PPC_EARLY_DEBUG_OPAL_HVSI Select this to enable early debugging for the PowerNV platform using an "hvsi" console +config PPC_EARLY_DEBUG_MEMCONS + bool "In memory console" + help + Select this to enable early debugging using an in memory console. + This console provides input and output buffers stored within the + kernel BSS and should be safe to select on any system. A debugger + can then be used to read kernel output or send input to the console. endchoice +config PPC_MEMCONS_OUTPUT_SIZE + int "In memory console output buffer size" + depends on PPC_EARLY_DEBUG_MEMCONS + default 4096 + help + Selects the size of the output buffer (in bytes) of the in memory + console. + +config PPC_MEMCONS_INPUT_SIZE + int "In memory console input buffer size" + depends on PPC_EARLY_DEBUG_MEMCONS + default 128 + help + Selects the size of the input buffer (in bytes) of the in memory + console. + config PPC_EARLY_DEBUG_OPAL def_bool y depends on PPC_EARLY_DEBUG_OPAL_RAW || PPC_EARLY_DEBUG_OPAL_HVSI diff --git a/trunk/arch/powerpc/configs/ps3_defconfig b/trunk/arch/powerpc/configs/ps3_defconfig index f79196232917..139a8308070c 100644 --- a/trunk/arch/powerpc/configs/ps3_defconfig +++ b/trunk/arch/powerpc/configs/ps3_defconfig @@ -136,7 +136,6 @@ CONFIG_HID_SMARTJOYPLUS=m CONFIG_USB_HIDDEV=y CONFIG_USB=m CONFIG_USB_ANNOUNCE_NEW_DEVICES=y -CONFIG_USB_SUSPEND=y CONFIG_USB_MON=m CONFIG_USB_EHCI_HCD=m # CONFIG_USB_EHCI_HCD_PPC_OF is not set diff --git a/trunk/arch/powerpc/include/asm/context_tracking.h b/trunk/arch/powerpc/include/asm/context_tracking.h new file mode 100644 index 000000000000..b6f5a33b8ee2 --- /dev/null +++ b/trunk/arch/powerpc/include/asm/context_tracking.h @@ -0,0 +1,10 @@ +#ifndef _ASM_POWERPC_CONTEXT_TRACKING_H +#define _ASM_POWERPC_CONTEXT_TRACKING_H + +#ifdef CONFIG_CONTEXT_TRACKING +#define SCHEDULE_USER bl .schedule_user +#else +#define SCHEDULE_USER bl .schedule +#endif + +#endif diff --git a/trunk/arch/powerpc/include/asm/firmware.h b/trunk/arch/powerpc/include/asm/firmware.h index 0df54646f968..681bc0314b6b 100644 --- a/trunk/arch/powerpc/include/asm/firmware.h +++ b/trunk/arch/powerpc/include/asm/firmware.h @@ -52,6 +52,7 @@ #define FW_FEATURE_BEST_ENERGY ASM_CONST(0x0000000080000000) #define FW_FEATURE_TYPE1_AFFINITY ASM_CONST(0x0000000100000000) #define FW_FEATURE_PRRN ASM_CONST(0x0000000200000000) +#define FW_FEATURE_OPALv3 ASM_CONST(0x0000000400000000) #ifndef __ASSEMBLY__ @@ -69,7 +70,8 @@ enum { FW_FEATURE_SET_MODE | FW_FEATURE_BEST_ENERGY | FW_FEATURE_TYPE1_AFFINITY | FW_FEATURE_PRRN, FW_FEATURE_PSERIES_ALWAYS = 0, - FW_FEATURE_POWERNV_POSSIBLE = FW_FEATURE_OPAL | FW_FEATURE_OPALv2, + FW_FEATURE_POWERNV_POSSIBLE = FW_FEATURE_OPAL | FW_FEATURE_OPALv2 | + FW_FEATURE_OPALv3, FW_FEATURE_POWERNV_ALWAYS = 0, FW_FEATURE_PS3_POSSIBLE = FW_FEATURE_LPAR | FW_FEATURE_PS3_LV1, FW_FEATURE_PS3_ALWAYS = FW_FEATURE_LPAR | FW_FEATURE_PS3_LV1, diff --git a/trunk/arch/powerpc/include/asm/hw_irq.h b/trunk/arch/powerpc/include/asm/hw_irq.h index d615b28dda82..ba713f166fa5 100644 --- a/trunk/arch/powerpc/include/asm/hw_irq.h +++ b/trunk/arch/powerpc/include/asm/hw_irq.h @@ -96,11 +96,12 @@ static inline bool arch_irqs_disabled(void) #endif #define hard_irq_disable() do { \ + u8 _was_enabled = get_paca()->soft_enabled; \ __hard_irq_disable(); \ - if (local_paca->soft_enabled) \ - trace_hardirqs_off(); \ get_paca()->soft_enabled = 0; \ get_paca()->irq_happened |= PACA_IRQ_HARD_DIS; \ + if (_was_enabled) \ + trace_hardirqs_off(); \ } while(0) static inline bool lazy_irq_pending(void) diff --git a/trunk/arch/powerpc/include/asm/opal.h b/trunk/arch/powerpc/include/asm/opal.h index b6c8b58b1d76..cbb9305ab15a 100644 --- a/trunk/arch/powerpc/include/asm/opal.h +++ b/trunk/arch/powerpc/include/asm/opal.h @@ -243,7 +243,8 @@ enum OpalMCE_TlbErrorType { enum OpalThreadStatus { OPAL_THREAD_INACTIVE = 0x0, - OPAL_THREAD_STARTED = 0x1 + OPAL_THREAD_STARTED = 0x1, + OPAL_THREAD_UNAVAILABLE = 0x2 /* opal-v3 */ }; enum OpalPciBusCompare { @@ -563,6 +564,8 @@ extern void opal_nvram_init(void); extern int opal_machine_check(struct pt_regs *regs); +extern void opal_shutdown(void); + #endif /* __ASSEMBLY__ */ #endif /* __OPAL_H */ diff --git a/trunk/arch/powerpc/include/asm/pci-bridge.h b/trunk/arch/powerpc/include/asm/pci-bridge.h index 8b11b5bd9938..2c1d8cb9b265 100644 --- a/trunk/arch/powerpc/include/asm/pci-bridge.h +++ b/trunk/arch/powerpc/include/asm/pci-bridge.h @@ -174,6 +174,8 @@ struct pci_dn { /* Get the pointer to a device_node's pci_dn */ #define PCI_DN(dn) ((struct pci_dn *) (dn)->data) +extern struct pci_dn *pci_get_pdn(struct pci_dev *pdev); + extern void * update_dn_pci_info(struct device_node *dn, void *data); static inline int pci_device_from_OF_node(struct device_node *np, diff --git a/trunk/arch/powerpc/include/asm/pgalloc-64.h b/trunk/arch/powerpc/include/asm/pgalloc-64.h index 91acb12bac92..b66ae722a8e9 100644 --- a/trunk/arch/powerpc/include/asm/pgalloc-64.h +++ b/trunk/arch/powerpc/include/asm/pgalloc-64.h @@ -186,7 +186,7 @@ static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd, static inline pgtable_t pmd_pgtable(pmd_t pmd) { - return (pgtable_t)(pmd_val(pmd) & -sizeof(pte_t)*PTRS_PER_PTE); + return (pgtable_t)(pmd_val(pmd) & ~PMD_MASKED_BITS); } static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm, diff --git a/trunk/arch/powerpc/include/asm/processor.h b/trunk/arch/powerpc/include/asm/processor.h index d7e67ca8b4a6..594db6bc093c 100644 --- a/trunk/arch/powerpc/include/asm/processor.h +++ b/trunk/arch/powerpc/include/asm/processor.h @@ -284,6 +284,12 @@ struct thread_struct { unsigned long ebbrr; unsigned long ebbhr; unsigned long bescr; + unsigned long siar; + unsigned long sdar; + unsigned long sier; + unsigned long mmcr0; + unsigned long mmcr2; + unsigned long mmcra; #endif }; diff --git a/trunk/arch/powerpc/include/asm/pte-hash64-64k.h b/trunk/arch/powerpc/include/asm/pte-hash64-64k.h index 3e13e23e4fdf..d836d945068d 100644 --- a/trunk/arch/powerpc/include/asm/pte-hash64-64k.h +++ b/trunk/arch/powerpc/include/asm/pte-hash64-64k.h @@ -47,7 +47,7 @@ * generic accessors and iterators here */ #define __real_pte(e,p) ((real_pte_t) { \ - (e), ((e) & _PAGE_COMBO) ? \ + (e), (pte_val(e) & _PAGE_COMBO) ? \ (pte_val(*((p) + PTRS_PER_PTE))) : 0 }) #define __rpte_to_hidx(r,index) ((pte_val((r).pte) & _PAGE_COMBO) ? \ (((r).hidx >> ((index)<<2)) & 0xf) : ((pte_val((r).pte) >> 12) & 0xf)) diff --git a/trunk/arch/powerpc/include/asm/rtas.h b/trunk/arch/powerpc/include/asm/rtas.h index a8bc2bb4adc9..34fd70488d83 100644 --- a/trunk/arch/powerpc/include/asm/rtas.h +++ b/trunk/arch/powerpc/include/asm/rtas.h @@ -264,6 +264,8 @@ extern void rtas_progress(char *s, unsigned short hex); extern void rtas_initialize(void); extern int rtas_suspend_cpu(struct rtas_suspend_me_data *data); extern int rtas_suspend_last_cpu(struct rtas_suspend_me_data *data); +extern int rtas_online_cpus_mask(cpumask_var_t cpus); +extern int rtas_offline_cpus_mask(cpumask_var_t cpus); extern int rtas_ibm_suspend_me(struct rtas_args *); struct rtc_time; diff --git a/trunk/arch/powerpc/include/asm/thread_info.h b/trunk/arch/powerpc/include/asm/thread_info.h index 8ceea14d6fe4..ba7b1973866e 100644 --- a/trunk/arch/powerpc/include/asm/thread_info.h +++ b/trunk/arch/powerpc/include/asm/thread_info.h @@ -97,7 +97,7 @@ static inline struct thread_info *current_thread_info(void) #define TIF_PERFMON_CTXSW 6 /* perfmon needs ctxsw calls */ #define TIF_SYSCALL_AUDIT 7 /* syscall auditing active */ #define TIF_SINGLESTEP 8 /* singlestepping active */ -#define TIF_MEMDIE 9 /* is terminating due to OOM killer */ +#define TIF_NOHZ 9 /* in adaptive nohz mode */ #define TIF_SECCOMP 10 /* secure computing */ #define TIF_RESTOREALL 11 /* Restore all regs (implies NOERROR) */ #define TIF_NOERROR 12 /* Force successful syscall return */ @@ -106,6 +106,7 @@ static inline struct thread_info *current_thread_info(void) #define TIF_SYSCALL_TRACEPOINT 15 /* syscall tracepoint instrumentation */ #define TIF_EMULATE_STACK_STORE 16 /* Is an instruction emulation for stack store? */ +#define TIF_MEMDIE 17 /* is terminating due to OOM killer */ /* as above, but as bit values */ #define _TIF_SYSCALL_TRACE (1< #include #include +#include /* * System calls. @@ -150,7 +151,7 @@ END_FW_FTR_SECTION_IFSET(FW_FEATURE_SPLPAR) CURRENT_THREAD_INFO(r11, r1) ld r10,TI_FLAGS(r11) andi. r11,r10,_TIF_SYSCALL_T_OR_A - bne- syscall_dotrace + bne syscall_dotrace .Lsyscall_dotrace_cont: cmpldi 0,r0,NR_syscalls bge- syscall_enosys @@ -376,8 +377,6 @@ _GLOBAL(ret_from_fork) _GLOBAL(ret_from_kernel_thread) bl .schedule_tail REST_NVGPRS(r1) - li r3,0 - std r3,0(r1) ld r14, 0(r14) mtlr r14 mr r3,r15 @@ -466,6 +465,20 @@ BEGIN_FTR_SECTION std r0, THREAD_EBBHR(r3) mfspr r0, SPRN_EBBRR std r0, THREAD_EBBRR(r3) + + /* PMU registers made user read/(write) by EBB */ + mfspr r0, SPRN_SIAR + std r0, THREAD_SIAR(r3) + mfspr r0, SPRN_SDAR + std r0, THREAD_SDAR(r3) + mfspr r0, SPRN_SIER + std r0, THREAD_SIER(r3) + mfspr r0, SPRN_MMCR0 + std r0, THREAD_MMCR0(r3) + mfspr r0, SPRN_MMCR2 + std r0, THREAD_MMCR2(r3) + mfspr r0, SPRN_MMCRA + std r0, THREAD_MMCRA(r3) END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S) #endif @@ -561,6 +574,20 @@ BEGIN_FTR_SECTION ld r0, THREAD_EBBRR(r4) mtspr SPRN_EBBRR, r0 + /* PMU registers made user read/(write) by EBB */ + ld r0, THREAD_SIAR(r4) + mtspr SPRN_SIAR, r0 + ld r0, THREAD_SDAR(r4) + mtspr SPRN_SDAR, r0 + ld r0, THREAD_SIER(r4) + mtspr SPRN_SIER, r0 + ld r0, THREAD_MMCR0(r4) + mtspr SPRN_MMCR0, r0 + ld r0, THREAD_MMCR2(r4) + mtspr SPRN_MMCR2, r0 + ld r0, THREAD_MMCRA(r4) + mtspr SPRN_MMCRA, r0 + ld r0,THREAD_TAR(r4) mtspr SPRN_TAR,r0 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S) @@ -634,7 +661,7 @@ _GLOBAL(ret_from_except_lite) andi. r0,r4,_TIF_NEED_RESCHED beq 1f bl .restore_interrupts - bl .schedule + SCHEDULE_USER b .ret_from_except_lite 1: bl .save_nvgprs diff --git a/trunk/arch/powerpc/kernel/exceptions-64e.S b/trunk/arch/powerpc/kernel/exceptions-64e.S index 42a756eec9ff..645170a07ada 100644 --- a/trunk/arch/powerpc/kernel/exceptions-64e.S +++ b/trunk/arch/powerpc/kernel/exceptions-64e.S @@ -489,7 +489,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC) */ mfspr r14,SPRN_DBSR /* check single-step/branch taken */ - andis. r15,r14,DBSR_IC@h + andis. r15,r14,(DBSR_IC|DBSR_BT)@h beq+ 1f LOAD_REG_IMMEDIATE(r14,interrupt_base_book3e) @@ -500,7 +500,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC) bge+ cr1,1f /* here it looks like we got an inappropriate debug exception. */ - lis r14,DBSR_IC@h /* clear the IC event */ + lis r14,(DBSR_IC|DBSR_BT)@h /* clear the event */ rlwinm r11,r11,0,~MSR_DE /* clear DE in the CSRR1 value */ mtspr SPRN_DBSR,r14 mtspr SPRN_CSRR1,r11 @@ -555,7 +555,7 @@ kernel_dbg_exc: */ mfspr r14,SPRN_DBSR /* check single-step/branch taken */ - andis. r15,r14,DBSR_IC@h + andis. r15,r14,(DBSR_IC|DBSR_BT)@h beq+ 1f LOAD_REG_IMMEDIATE(r14,interrupt_base_book3e) @@ -566,7 +566,7 @@ kernel_dbg_exc: bge+ cr1,1f /* here it looks like we got an inappropriate debug exception. */ - lis r14,DBSR_IC@h /* clear the IC event */ + lis r14,(DBSR_IC|DBSR_BT)@h /* clear the event */ rlwinm r11,r11,0,~MSR_DE /* clear DE in the DSRR1 value */ mtspr SPRN_DBSR,r14 mtspr SPRN_DSRR1,r11 diff --git a/trunk/arch/powerpc/kernel/machine_kexec_64.c b/trunk/arch/powerpc/kernel/machine_kexec_64.c index 466a2908bb63..611acdf30096 100644 --- a/trunk/arch/powerpc/kernel/machine_kexec_64.c +++ b/trunk/arch/powerpc/kernel/machine_kexec_64.c @@ -17,6 +17,7 @@ #include #include #include +#include #include #include @@ -335,10 +336,13 @@ void default_machine_kexec(struct kimage *image) pr_debug("kexec: Starting switchover sequence.\n"); /* switch to a staticly allocated stack. Based on irq stack code. + * We setup preempt_count to avoid using VMX in memcpy. * XXX: the task struct will likely be invalid once we do the copy! */ kexec_stack.thread_info.task = current_thread_info()->task; kexec_stack.thread_info.flags = 0; + kexec_stack.thread_info.preempt_count = HARDIRQ_OFFSET; + kexec_stack.thread_info.cpu = current_thread_info()->cpu; /* We need a static PACA, too; copy this CPU's PACA over and switch to * it. Also poison per_cpu_offset to catch anyone using non-static diff --git a/trunk/arch/powerpc/kernel/misc_32.S b/trunk/arch/powerpc/kernel/misc_32.S index 19e096bd0e73..e469f30e6eeb 100644 --- a/trunk/arch/powerpc/kernel/misc_32.S +++ b/trunk/arch/powerpc/kernel/misc_32.S @@ -657,6 +657,17 @@ _GLOBAL(__ucmpdi2) li r3,2 blr +_GLOBAL(__bswapdi2) + rotlwi r9,r4,8 + rotlwi r10,r3,8 + rlwimi r9,r4,24,0,7 + rlwimi r10,r3,24,0,7 + rlwimi r9,r4,24,16,23 + rlwimi r10,r3,24,16,23 + mr r3,r9 + mr r4,r10 + blr + _GLOBAL(abs) srawi r4,r3,31 xor r3,r3,r4 diff --git a/trunk/arch/powerpc/kernel/misc_64.S b/trunk/arch/powerpc/kernel/misc_64.S index 5cfa8008693b..6820e45f557b 100644 --- a/trunk/arch/powerpc/kernel/misc_64.S +++ b/trunk/arch/powerpc/kernel/misc_64.S @@ -234,6 +234,17 @@ _GLOBAL(__flush_dcache_icache) isync blr +_GLOBAL(__bswapdi2) + srdi r8,r3,32 + rlwinm r7,r3,8,0xffffffff + rlwimi r7,r3,24,0,7 + rlwinm r9,r8,8,0xffffffff + rlwimi r7,r3,24,16,23 + rlwimi r9,r8,24,0,7 + rlwimi r9,r8,24,16,23 + sldi r7,r7,32 + or r3,r7,r9 + blr #if defined(CONFIG_PPC_PMAC) || defined(CONFIG_PPC_MAPLE) /* diff --git a/trunk/arch/powerpc/kernel/pci-common.c b/trunk/arch/powerpc/kernel/pci-common.c index f5c5c90799a7..e9acf50dd5b2 100644 --- a/trunk/arch/powerpc/kernel/pci-common.c +++ b/trunk/arch/powerpc/kernel/pci-common.c @@ -359,7 +359,6 @@ static pgprot_t __pci_mmap_set_pgprot(struct pci_dev *dev, struct resource *rp, enum pci_mmap_state mmap_state, int write_combine) { - unsigned long prot = pgprot_val(protection); /* Write combine is always 0 on non-memory space mappings. On * memory space, if the user didn't pass 1, we check for a @@ -376,9 +375,9 @@ static pgprot_t __pci_mmap_set_pgprot(struct pci_dev *dev, struct resource *rp, /* XXX would be nice to have a way to ask for write-through */ if (write_combine) - return pgprot_noncached_wc(prot); + return pgprot_noncached_wc(protection); else - return pgprot_noncached(prot); + return pgprot_noncached(protection); } /* @@ -1521,9 +1520,10 @@ static void pcibios_setup_phb_resources(struct pci_controller *hose, for (i = 0; i < 3; ++i) { res = &hose->mem_resources[i]; if (!res->flags) { - printk(KERN_ERR "PCI: Memory resource 0 not set for " - "host bridge %s (domain %d)\n", - hose->dn->full_name, hose->global_number); + if (i == 0) + printk(KERN_ERR "PCI: Memory resource 0 not set for " + "host bridge %s (domain %d)\n", + hose->dn->full_name, hose->global_number); continue; } offset = hose->mem_offset[i]; diff --git a/trunk/arch/powerpc/kernel/pci_64.c b/trunk/arch/powerpc/kernel/pci_64.c index 873050d26840..2e8629654ca8 100644 --- a/trunk/arch/powerpc/kernel/pci_64.c +++ b/trunk/arch/powerpc/kernel/pci_64.c @@ -266,3 +266,13 @@ int pcibus_to_node(struct pci_bus *bus) } EXPORT_SYMBOL(pcibus_to_node); #endif + +static void quirk_radeon_32bit_msi(struct pci_dev *dev) +{ + struct pci_dn *pdn = pci_get_pdn(dev); + + if (pdn) + pdn->force_32bit_msi = 1; +} +DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x68f2, quirk_radeon_32bit_msi); +DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0xaa68, quirk_radeon_32bit_msi); diff --git a/trunk/arch/powerpc/kernel/pci_dn.c b/trunk/arch/powerpc/kernel/pci_dn.c index e7af165f8b9d..df038442548a 100644 --- a/trunk/arch/powerpc/kernel/pci_dn.c +++ b/trunk/arch/powerpc/kernel/pci_dn.c @@ -32,6 +32,14 @@ #include #include +struct pci_dn *pci_get_pdn(struct pci_dev *pdev) +{ + struct device_node *dn = pci_device_to_OF_node(pdev); + if (!dn) + return NULL; + return PCI_DN(dn); +} + /* * Traverse_func that inits the PCI fields of the device node. * NOTE: this *must* be done before read/write config to the device. diff --git a/trunk/arch/powerpc/kernel/ppc_ksyms.c b/trunk/arch/powerpc/kernel/ppc_ksyms.c index 78b8766fd79e..c29666586998 100644 --- a/trunk/arch/powerpc/kernel/ppc_ksyms.c +++ b/trunk/arch/powerpc/kernel/ppc_ksyms.c @@ -143,7 +143,8 @@ EXPORT_SYMBOL(__lshrdi3); int __ucmpdi2(unsigned long long, unsigned long long); EXPORT_SYMBOL(__ucmpdi2); #endif - +long long __bswapdi2(long long); +EXPORT_SYMBOL(__bswapdi2); EXPORT_SYMBOL(memcpy); EXPORT_SYMBOL(memset); EXPORT_SYMBOL(memmove); diff --git a/trunk/arch/powerpc/kernel/process.c b/trunk/arch/powerpc/kernel/process.c index ceb4e7b62cf4..a902723fdc69 100644 --- a/trunk/arch/powerpc/kernel/process.c +++ b/trunk/arch/powerpc/kernel/process.c @@ -339,6 +339,13 @@ static void set_debug_reg_defaults(struct thread_struct *thread) static void prime_debug_regs(struct thread_struct *thread) { + /* + * We could have inherited MSR_DE from userspace, since + * it doesn't get cleared on exception entry. Make sure + * MSR_DE is clear before we enable any debug events. + */ + mtmsr(mfmsr() & ~MSR_DE); + mtspr(SPRN_IAC1, thread->iac1); mtspr(SPRN_IAC2, thread->iac2); #if CONFIG_PPC_ADV_DEBUG_IACS > 2 @@ -971,6 +978,7 @@ int copy_thread(unsigned long clone_flags, unsigned long usp, * do some house keeping and then return from the fork or clone * system call, using the stack frame created above. */ + ((unsigned long *)sp)[0] = 0; sp -= sizeof(struct pt_regs); kregs = (struct pt_regs *) sp; sp -= STACK_FRAME_OVERHEAD; diff --git a/trunk/arch/powerpc/kernel/ptrace.c b/trunk/arch/powerpc/kernel/ptrace.c index 3b14d320e69f..98c2fc198712 100644 --- a/trunk/arch/powerpc/kernel/ptrace.c +++ b/trunk/arch/powerpc/kernel/ptrace.c @@ -32,6 +32,7 @@ #include #include #include +#include #include #include @@ -1788,6 +1789,8 @@ long do_syscall_trace_enter(struct pt_regs *regs) { long ret = 0; + user_exit(); + secure_computing_strict(regs->gpr[0]); if (test_thread_flag(TIF_SYSCALL_TRACE) && @@ -1832,4 +1835,6 @@ void do_syscall_trace_leave(struct pt_regs *regs) step = test_thread_flag(TIF_SINGLESTEP); if (step || test_thread_flag(TIF_SYSCALL_TRACE)) tracehook_report_syscall_exit(regs, step); + + user_enter(); } diff --git a/trunk/arch/powerpc/kernel/rtas.c b/trunk/arch/powerpc/kernel/rtas.c index 1fd6e7b2f390..52add6f3e201 100644 --- a/trunk/arch/powerpc/kernel/rtas.c +++ b/trunk/arch/powerpc/kernel/rtas.c @@ -19,6 +19,7 @@ #include #include #include +#include #include #include #include @@ -807,6 +808,95 @@ static void rtas_percpu_suspend_me(void *info) __rtas_suspend_cpu((struct rtas_suspend_me_data *)info, 1); } +enum rtas_cpu_state { + DOWN, + UP, +}; + +#ifndef CONFIG_SMP +static int rtas_cpu_state_change_mask(enum rtas_cpu_state state, + cpumask_var_t cpus) +{ + if (!cpumask_empty(cpus)) { + cpumask_clear(cpus); + return -EINVAL; + } else + return 0; +} +#else +/* On return cpumask will be altered to indicate CPUs changed. + * CPUs with states changed will be set in the mask, + * CPUs with status unchanged will be unset in the mask. */ +static int rtas_cpu_state_change_mask(enum rtas_cpu_state state, + cpumask_var_t cpus) +{ + int cpu; + int cpuret = 0; + int ret = 0; + + if (cpumask_empty(cpus)) + return 0; + + for_each_cpu(cpu, cpus) { + switch (state) { + case DOWN: + cpuret = cpu_down(cpu); + break; + case UP: + cpuret = cpu_up(cpu); + break; + } + if (cpuret) { + pr_debug("%s: cpu_%s for cpu#%d returned %d.\n", + __func__, + ((state == UP) ? "up" : "down"), + cpu, cpuret); + if (!ret) + ret = cpuret; + if (state == UP) { + /* clear bits for unchanged cpus, return */ + cpumask_shift_right(cpus, cpus, cpu); + cpumask_shift_left(cpus, cpus, cpu); + break; + } else { + /* clear bit for unchanged cpu, continue */ + cpumask_clear_cpu(cpu, cpus); + } + } + } + + return ret; +} +#endif + +int rtas_online_cpus_mask(cpumask_var_t cpus) +{ + int ret; + + ret = rtas_cpu_state_change_mask(UP, cpus); + + if (ret) { + cpumask_var_t tmp_mask; + + if (!alloc_cpumask_var(&tmp_mask, GFP_TEMPORARY)) + return ret; + + /* Use tmp_mask to preserve cpus mask from first failure */ + cpumask_copy(tmp_mask, cpus); + rtas_offline_cpus_mask(tmp_mask); + free_cpumask_var(tmp_mask); + } + + return ret; +} +EXPORT_SYMBOL(rtas_online_cpus_mask); + +int rtas_offline_cpus_mask(cpumask_var_t cpus) +{ + return rtas_cpu_state_change_mask(DOWN, cpus); +} +EXPORT_SYMBOL(rtas_offline_cpus_mask); + int rtas_ibm_suspend_me(struct rtas_args *args) { long state; @@ -814,6 +904,8 @@ int rtas_ibm_suspend_me(struct rtas_args *args) unsigned long retbuf[PLPAR_HCALL_BUFSIZE]; struct rtas_suspend_me_data data; DECLARE_COMPLETION_ONSTACK(done); + cpumask_var_t offline_mask; + int cpuret; if (!rtas_service_present("ibm,suspend-me")) return -ENOSYS; @@ -837,11 +929,24 @@ int rtas_ibm_suspend_me(struct rtas_args *args) return 0; } + if (!alloc_cpumask_var(&offline_mask, GFP_TEMPORARY)) + return -ENOMEM; + atomic_set(&data.working, 0); atomic_set(&data.done, 0); atomic_set(&data.error, 0); data.token = rtas_token("ibm,suspend-me"); data.complete = &done; + + /* All present CPUs must be online */ + cpumask_andnot(offline_mask, cpu_present_mask, cpu_online_mask); + cpuret = rtas_online_cpus_mask(offline_mask); + if (cpuret) { + pr_err("%s: Could not bring present CPUs online.\n", __func__); + atomic_set(&data.error, cpuret); + goto out; + } + stop_topology_update(); /* Call function on all CPUs. One of us will make the @@ -857,6 +962,14 @@ int rtas_ibm_suspend_me(struct rtas_args *args) start_topology_update(); + /* Take down CPUs not online prior to suspend */ + cpuret = rtas_offline_cpus_mask(offline_mask); + if (cpuret) + pr_warn("%s: Could not restore CPUs to offline state.\n", + __func__); + +out: + free_cpumask_var(offline_mask); return atomic_read(&data.error); } #else /* CONFIG_PPC_PSERIES */ diff --git a/trunk/arch/powerpc/kernel/rtas_flash.c b/trunk/arch/powerpc/kernel/rtas_flash.c index 5b3022470126..2f3cdb01506d 100644 --- a/trunk/arch/powerpc/kernel/rtas_flash.c +++ b/trunk/arch/powerpc/kernel/rtas_flash.c @@ -89,6 +89,7 @@ /* Array sizes */ #define VALIDATE_BUF_SIZE 4096 +#define VALIDATE_MSG_LEN 256 #define RTAS_MSG_MAXLEN 64 /* Quirk - RTAS requires 4k list length and block size */ @@ -466,7 +467,7 @@ static void validate_flash(struct rtas_validate_flash_t *args_buf) } static int get_validate_flash_msg(struct rtas_validate_flash_t *args_buf, - char *msg) + char *msg, int msglen) { int n; @@ -474,7 +475,8 @@ static int get_validate_flash_msg(struct rtas_validate_flash_t *args_buf, n = sprintf(msg, "%d\n", args_buf->update_results); if ((args_buf->update_results >= VALIDATE_CUR_UNKNOWN) || (args_buf->update_results == VALIDATE_TMP_UPDATE)) - n += sprintf(msg + n, "%s\n", args_buf->buf); + n += snprintf(msg + n, msglen - n, "%s\n", + args_buf->buf); } else { n = sprintf(msg, "%d\n", args_buf->status); } @@ -486,11 +488,11 @@ static ssize_t validate_flash_read(struct file *file, char __user *buf, { struct rtas_validate_flash_t *const args_buf = &rtas_validate_flash_data; - char msg[RTAS_MSG_MAXLEN]; + char msg[VALIDATE_MSG_LEN]; int msglen; mutex_lock(&rtas_validate_flash_mutex); - msglen = get_validate_flash_msg(args_buf, msg); + msglen = get_validate_flash_msg(args_buf, msg, VALIDATE_MSG_LEN); mutex_unlock(&rtas_validate_flash_mutex); return simple_read_from_buffer(buf, count, ppos, msg, msglen); diff --git a/trunk/arch/powerpc/kernel/signal.c b/trunk/arch/powerpc/kernel/signal.c index cf12eae02de5..577a8aa69c6e 100644 --- a/trunk/arch/powerpc/kernel/signal.c +++ b/trunk/arch/powerpc/kernel/signal.c @@ -13,6 +13,7 @@ #include #include #include +#include #include #include #include @@ -24,7 +25,7 @@ * through debug.exception-trace sysctl. */ -int show_unhandled_signals = 0; +int show_unhandled_signals = 1; /* * Allocate space for the signal frame @@ -159,6 +160,8 @@ static int do_signal(struct pt_regs *regs) void do_notify_resume(struct pt_regs *regs, unsigned long thread_info_flags) { + user_exit(); + if (thread_info_flags & _TIF_UPROBE) uprobe_notify_resume(regs); @@ -169,4 +172,6 @@ void do_notify_resume(struct pt_regs *regs, unsigned long thread_info_flags) clear_thread_flag(TIF_NOTIFY_RESUME); tracehook_notify_resume(regs); } + + user_enter(); } diff --git a/trunk/arch/powerpc/kernel/sys_ppc32.c b/trunk/arch/powerpc/kernel/sys_ppc32.c index cd6e19d263b3..8a285876aef8 100644 --- a/trunk/arch/powerpc/kernel/sys_ppc32.c +++ b/trunk/arch/powerpc/kernel/sys_ppc32.c @@ -126,11 +126,3 @@ asmlinkage long compat_sys_sync_file_range2(int fd, unsigned int flags, return sys_sync_file_range(fd, offset, nbytes, flags); } - -asmlinkage long compat_sys_fanotify_mark(int fanotify_fd, unsigned int flags, - unsigned mask_hi, unsigned mask_lo, - int dfd, const char __user *pathname) -{ - u64 mask = ((u64)mask_hi << 32) | mask_lo; - return sys_fanotify_mark(fanotify_fd, flags, mask, dfd, pathname); -} diff --git a/trunk/arch/powerpc/kernel/traps.c b/trunk/arch/powerpc/kernel/traps.c index 83efa2f7d926..a7a648f6b750 100644 --- a/trunk/arch/powerpc/kernel/traps.c +++ b/trunk/arch/powerpc/kernel/traps.c @@ -35,6 +35,7 @@ #include #include #include +#include #include #include @@ -667,6 +668,7 @@ int machine_check_generic(struct pt_regs *regs) void machine_check_exception(struct pt_regs *regs) { + enum ctx_state prev_state = exception_enter(); int recover = 0; __get_cpu_var(irq_stat).mce_exceptions++; @@ -683,7 +685,7 @@ void machine_check_exception(struct pt_regs *regs) recover = cur_cpu_spec->machine_check(regs); if (recover > 0) - return; + goto bail; #if defined(CONFIG_8xx) && defined(CONFIG_PCI) /* the qspan pci read routines can cause machine checks -- Cort @@ -693,20 +695,23 @@ void machine_check_exception(struct pt_regs *regs) * -- BenH */ bad_page_fault(regs, regs->dar, SIGBUS); - return; + goto bail; #endif if (debugger_fault_handler(regs)) - return; + goto bail; if (check_io_access(regs)) - return; + goto bail; die("Machine check", regs, SIGBUS); /* Must die if the interrupt is not recoverable */ if (!(regs->msr & MSR_RI)) panic("Unrecoverable Machine check"); + +bail: + exception_exit(prev_state); } void SMIException(struct pt_regs *regs) @@ -716,20 +721,29 @@ void SMIException(struct pt_regs *regs) void unknown_exception(struct pt_regs *regs) { + enum ctx_state prev_state = exception_enter(); + printk("Bad trap at PC: %lx, SR: %lx, vector=%lx\n", regs->nip, regs->msr, regs->trap); _exception(SIGTRAP, regs, 0, 0); + + exception_exit(prev_state); } void instruction_breakpoint_exception(struct pt_regs *regs) { + enum ctx_state prev_state = exception_enter(); + if (notify_die(DIE_IABR_MATCH, "iabr_match", regs, 5, 5, SIGTRAP) == NOTIFY_STOP) - return; + goto bail; if (debugger_iabr_match(regs)) - return; + goto bail; _exception(SIGTRAP, regs, TRAP_BRKPT, regs->nip); + +bail: + exception_exit(prev_state); } void RunModeException(struct pt_regs *regs) @@ -739,15 +753,20 @@ void RunModeException(struct pt_regs *regs) void __kprobes single_step_exception(struct pt_regs *regs) { + enum ctx_state prev_state = exception_enter(); + clear_single_step(regs); if (notify_die(DIE_SSTEP, "single_step", regs, 5, 5, SIGTRAP) == NOTIFY_STOP) - return; + goto bail; if (debugger_sstep(regs)) - return; + goto bail; _exception(SIGTRAP, regs, TRAP_TRACE, regs->nip); + +bail: + exception_exit(prev_state); } /* @@ -1005,6 +1024,7 @@ int is_valid_bugaddr(unsigned long addr) void __kprobes program_check_exception(struct pt_regs *regs) { + enum ctx_state prev_state = exception_enter(); unsigned int reason = get_reason(regs); extern int do_mathemu(struct pt_regs *regs); @@ -1014,26 +1034,26 @@ void __kprobes program_check_exception(struct pt_regs *regs) if (reason & REASON_FP) { /* IEEE FP exception */ parse_fpe(regs); - return; + goto bail; } if (reason & REASON_TRAP) { /* Debugger is first in line to stop recursive faults in * rcu_lock, notify_die, or atomic_notifier_call_chain */ if (debugger_bpt(regs)) - return; + goto bail; /* trap exception */ if (notify_die(DIE_BPT, "breakpoint", regs, 5, 5, SIGTRAP) == NOTIFY_STOP) - return; + goto bail; if (!(regs->msr & MSR_PR) && /* not user-mode */ report_bug(regs->nip, regs) == BUG_TRAP_TYPE_WARN) { regs->nip += 4; - return; + goto bail; } _exception(SIGTRAP, regs, TRAP_BRKPT, regs->nip); - return; + goto bail; } #ifdef CONFIG_PPC_TRANSACTIONAL_MEM if (reason & REASON_TM) { @@ -1049,7 +1069,7 @@ void __kprobes program_check_exception(struct pt_regs *regs) if (!user_mode(regs) && report_bug(regs->nip, regs) == BUG_TRAP_TYPE_WARN) { regs->nip += 4; - return; + goto bail; } /* If usermode caused this, it's done something illegal and * gets a SIGILL slap on the wrist. We call it an illegal @@ -1059,7 +1079,7 @@ void __kprobes program_check_exception(struct pt_regs *regs) */ if (user_mode(regs)) { _exception(SIGILL, regs, ILL_ILLOPN, regs->nip); - return; + goto bail; } else { printk(KERN_EMERG "Unexpected TM Bad Thing exception " "at %lx (msr 0x%x)\n", regs->nip, reason); @@ -1083,16 +1103,16 @@ void __kprobes program_check_exception(struct pt_regs *regs) switch (do_mathemu(regs)) { case 0: emulate_single_step(regs); - return; + goto bail; case 1: { int code = 0; code = __parse_fpscr(current->thread.fpscr.val); _exception(SIGFPE, regs, code, regs->nip); - return; + goto bail; } case -EFAULT: _exception(SIGSEGV, regs, SEGV_MAPERR, regs->nip); - return; + goto bail; } /* fall through on any other errors */ #endif /* CONFIG_MATH_EMULATION */ @@ -1103,10 +1123,10 @@ void __kprobes program_check_exception(struct pt_regs *regs) case 0: regs->nip += 4; emulate_single_step(regs); - return; + goto bail; case -EFAULT: _exception(SIGSEGV, regs, SEGV_MAPERR, regs->nip); - return; + goto bail; } } @@ -1114,10 +1134,14 @@ void __kprobes program_check_exception(struct pt_regs *regs) _exception(SIGILL, regs, ILL_PRVOPC, regs->nip); else _exception(SIGILL, regs, ILL_ILLOPC, regs->nip); + +bail: + exception_exit(prev_state); } void alignment_exception(struct pt_regs *regs) { + enum ctx_state prev_state = exception_enter(); int sig, code, fixed = 0; /* We restore the interrupt state now */ @@ -1131,7 +1155,7 @@ void alignment_exception(struct pt_regs *regs) if (fixed == 1) { regs->nip += 4; /* skip over emulated instruction */ emulate_single_step(regs); - return; + goto bail; } /* Operand address was bad */ @@ -1146,6 +1170,9 @@ void alignment_exception(struct pt_regs *regs) _exception(sig, regs, code, regs->dar); else bad_page_fault(regs, regs->dar, sig); + +bail: + exception_exit(prev_state); } void StackOverflow(struct pt_regs *regs) @@ -1174,23 +1201,32 @@ void trace_syscall(struct pt_regs *regs) void kernel_fp_unavailable_exception(struct pt_regs *regs) { + enum ctx_state prev_state = exception_enter(); + printk(KERN_EMERG "Unrecoverable FP Unavailable Exception " "%lx at %lx\n", regs->trap, regs->nip); die("Unrecoverable FP Unavailable Exception", regs, SIGABRT); + + exception_exit(prev_state); } void altivec_unavailable_exception(struct pt_regs *regs) { + enum ctx_state prev_state = exception_enter(); + if (user_mode(regs)) { /* A user program has executed an altivec instruction, but this kernel doesn't support altivec. */ _exception(SIGILL, regs, ILL_ILLOPC, regs->nip); - return; + goto bail; } printk(KERN_EMERG "Unrecoverable VMX/Altivec Unavailable Exception " "%lx at %lx\n", regs->trap, regs->nip); die("Unrecoverable VMX/Altivec Unavailable Exception", regs, SIGABRT); + +bail: + exception_exit(prev_state); } void vsx_unavailable_exception(struct pt_regs *regs) diff --git a/trunk/arch/powerpc/kernel/udbg.c b/trunk/arch/powerpc/kernel/udbg.c index 13b867093499..9d3fdcd66290 100644 --- a/trunk/arch/powerpc/kernel/udbg.c +++ b/trunk/arch/powerpc/kernel/udbg.c @@ -64,6 +64,9 @@ void __init udbg_early_init(void) udbg_init_usbgecko(); #elif defined(CONFIG_PPC_EARLY_DEBUG_WSP) udbg_init_wsp(); +#elif defined(CONFIG_PPC_EARLY_DEBUG_MEMCONS) + /* In memory console */ + udbg_init_memcons(); #elif defined(CONFIG_PPC_EARLY_DEBUG_EHV_BC) udbg_init_ehv_bc(); #elif defined(CONFIG_PPC_EARLY_DEBUG_PS3GELIC) diff --git a/trunk/arch/powerpc/mm/fault.c b/trunk/arch/powerpc/mm/fault.c index 229951ffc351..8726779e1409 100644 --- a/trunk/arch/powerpc/mm/fault.c +++ b/trunk/arch/powerpc/mm/fault.c @@ -32,6 +32,7 @@ #include #include #include +#include #include #include @@ -196,6 +197,7 @@ static int mm_fault_error(struct pt_regs *regs, unsigned long addr, int fault) int __kprobes do_page_fault(struct pt_regs *regs, unsigned long address, unsigned long error_code) { + enum ctx_state prev_state = exception_enter(); struct vm_area_struct * vma; struct mm_struct *mm = current->mm; unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE; @@ -204,6 +206,7 @@ int __kprobes do_page_fault(struct pt_regs *regs, unsigned long address, int trap = TRAP(regs); int is_exec = trap == 0x400; int fault; + int rc = 0; #if !(defined(CONFIG_4xx) || defined(CONFIG_BOOKE)) /* @@ -230,28 +233,30 @@ int __kprobes do_page_fault(struct pt_regs *regs, unsigned long address, * look at it */ if (error_code & ICSWX_DSI_UCT) { - int rc = acop_handle_fault(regs, address, error_code); + rc = acop_handle_fault(regs, address, error_code); if (rc) - return rc; + goto bail; } #endif /* CONFIG_PPC_ICSWX */ if (notify_page_fault(regs)) - return 0; + goto bail; if (unlikely(debugger_fault_handler(regs))) - return 0; + goto bail; /* On a kernel SLB miss we can only check for a valid exception entry */ - if (!user_mode(regs) && (address >= TASK_SIZE)) - return SIGSEGV; + if (!user_mode(regs) && (address >= TASK_SIZE)) { + rc = SIGSEGV; + goto bail; + } #if !(defined(CONFIG_4xx) || defined(CONFIG_BOOKE) || \ defined(CONFIG_PPC_BOOK3S_64)) if (error_code & DSISR_DABRMATCH) { /* breakpoint match */ do_break(regs, address, error_code); - return 0; + goto bail; } #endif @@ -260,8 +265,10 @@ int __kprobes do_page_fault(struct pt_regs *regs, unsigned long address, local_irq_enable(); if (in_atomic() || mm == NULL) { - if (!user_mode(regs)) - return SIGSEGV; + if (!user_mode(regs)) { + rc = SIGSEGV; + goto bail; + } /* in_atomic() in user mode is really bad, as is current->mm == NULL. */ printk(KERN_EMERG "Page fault in user mode with " @@ -417,9 +424,11 @@ int __kprobes do_page_fault(struct pt_regs *regs, unsigned long address, */ fault = handle_mm_fault(mm, vma, address, flags); if (unlikely(fault & (VM_FAULT_RETRY|VM_FAULT_ERROR))) { - int rc = mm_fault_error(regs, address, fault); + rc = mm_fault_error(regs, address, fault); if (rc >= MM_FAULT_RETURN) - return rc; + goto bail; + else + rc = 0; } /* @@ -454,7 +463,7 @@ int __kprobes do_page_fault(struct pt_regs *regs, unsigned long address, } up_read(&mm->mmap_sem); - return 0; + goto bail; bad_area: up_read(&mm->mmap_sem); @@ -463,7 +472,7 @@ int __kprobes do_page_fault(struct pt_regs *regs, unsigned long address, /* User mode accesses cause a SIGSEGV */ if (user_mode(regs)) { _exception(SIGSEGV, regs, code, address); - return 0; + goto bail; } if (is_exec && (error_code & DSISR_PROTFAULT)) @@ -471,7 +480,11 @@ int __kprobes do_page_fault(struct pt_regs *regs, unsigned long address, " page (%lx) - exploit attempt? (uid: %d)\n", address, from_kuid(&init_user_ns, current_uid())); - return SIGSEGV; + rc = SIGSEGV; + +bail: + exception_exit(prev_state); + return rc; } diff --git a/trunk/arch/powerpc/mm/hash_utils_64.c b/trunk/arch/powerpc/mm/hash_utils_64.c index 88ac0eeaadde..e303a6d74e3a 100644 --- a/trunk/arch/powerpc/mm/hash_utils_64.c +++ b/trunk/arch/powerpc/mm/hash_utils_64.c @@ -33,6 +33,7 @@ #include #include #include +#include #include #include @@ -954,6 +955,7 @@ void hash_failure_debug(unsigned long ea, unsigned long access, */ int hash_page(unsigned long ea, unsigned long access, unsigned long trap) { + enum ctx_state prev_state = exception_enter(); pgd_t *pgdir; unsigned long vsid; struct mm_struct *mm; @@ -973,7 +975,8 @@ int hash_page(unsigned long ea, unsigned long access, unsigned long trap) mm = current->mm; if (! mm) { DBG_LOW(" user region with no mm !\n"); - return 1; + rc = 1; + goto bail; } psize = get_slice_psize(mm, ea); ssize = user_segment_size(ea); @@ -992,19 +995,23 @@ int hash_page(unsigned long ea, unsigned long access, unsigned long trap) /* Not a valid range * Send the problem up to do_page_fault */ - return 1; + rc = 1; + goto bail; } DBG_LOW(" mm=%p, mm->pgdir=%p, vsid=%016lx\n", mm, mm->pgd, vsid); /* Bad address. */ if (!vsid) { DBG_LOW("Bad address!\n"); - return 1; + rc = 1; + goto bail; } /* Get pgdir */ pgdir = mm->pgd; - if (pgdir == NULL) - return 1; + if (pgdir == NULL) { + rc = 1; + goto bail; + } /* Check CPU locality */ tmp = cpumask_of(smp_processor_id()); @@ -1027,7 +1034,8 @@ int hash_page(unsigned long ea, unsigned long access, unsigned long trap) ptep = find_linux_pte_or_hugepte(pgdir, ea, &hugeshift); if (ptep == NULL || !pte_present(*ptep)) { DBG_LOW(" no PTE !\n"); - return 1; + rc = 1; + goto bail; } /* Add _PAGE_PRESENT to the required access perm */ @@ -1038,13 +1046,16 @@ int hash_page(unsigned long ea, unsigned long access, unsigned long trap) */ if (access & ~pte_val(*ptep)) { DBG_LOW(" no access !\n"); - return 1; + rc = 1; + goto bail; } #ifdef CONFIG_HUGETLB_PAGE - if (hugeshift) - return __hash_page_huge(ea, access, vsid, ptep, trap, local, + if (hugeshift) { + rc = __hash_page_huge(ea, access, vsid, ptep, trap, local, ssize, hugeshift, psize); + goto bail; + } #endif /* CONFIG_HUGETLB_PAGE */ #ifndef CONFIG_PPC_64K_PAGES @@ -1124,6 +1135,9 @@ int hash_page(unsigned long ea, unsigned long access, unsigned long trap) pte_val(*(ptep + PTRS_PER_PTE))); #endif DBG_LOW(" -> rc=%d\n", rc); + +bail: + exception_exit(prev_state); return rc; } EXPORT_SYMBOL_GPL(hash_page); @@ -1259,6 +1273,8 @@ void flush_hash_range(unsigned long number, int local) */ void low_hash_fault(struct pt_regs *regs, unsigned long address, int rc) { + enum ctx_state prev_state = exception_enter(); + if (user_mode(regs)) { #ifdef CONFIG_PPC_SUBPAGE_PROT if (rc == -2) @@ -1268,6 +1284,8 @@ void low_hash_fault(struct pt_regs *regs, unsigned long address, int rc) _exception(SIGBUS, regs, BUS_ADRERR, address); } else bad_page_fault(regs, address, SIGBUS); + + exception_exit(prev_state); } long hpte_insert_repeating(unsigned long hash, unsigned long vpn, diff --git a/trunk/arch/powerpc/mm/init_64.c b/trunk/arch/powerpc/mm/init_64.c index c2787bf779ca..a90b9c458990 100644 --- a/trunk/arch/powerpc/mm/init_64.c +++ b/trunk/arch/powerpc/mm/init_64.c @@ -215,7 +215,8 @@ static void __meminit vmemmap_create_mapping(unsigned long start, unsigned long phys) { int mapped = htab_bolt_mapping(start, start + page_size, phys, - PAGE_KERNEL, mmu_vmemmap_psize, + pgprot_val(PAGE_KERNEL), + mmu_vmemmap_psize, mmu_kernel_ssize); BUG_ON(mapped < 0); } diff --git a/trunk/arch/powerpc/perf/core-book3s.c b/trunk/arch/powerpc/perf/core-book3s.c index c627843c5b2e..426180b84978 100644 --- a/trunk/arch/powerpc/perf/core-book3s.c +++ b/trunk/arch/powerpc/perf/core-book3s.c @@ -13,11 +13,13 @@ #include #include #include +#include #include #include #include #include #include +#include #define BHRB_MAX_ENTRIES 32 #define BHRB_TARGET 0x0000000000000002 @@ -100,6 +102,10 @@ static inline int siar_valid(struct pt_regs *regs) return 1; } +static inline void power_pmu_bhrb_enable(struct perf_event *event) {} +static inline void power_pmu_bhrb_disable(struct perf_event *event) {} +void power_pmu_flush_branch_stack(void) {} +static inline void power_pmu_bhrb_read(struct cpu_hw_events *cpuhw) {} #endif /* CONFIG_PPC32 */ static bool regs_use_siar(struct pt_regs *regs) @@ -308,6 +314,159 @@ static inline int siar_valid(struct pt_regs *regs) return 1; } + +/* Reset all possible BHRB entries */ +static void power_pmu_bhrb_reset(void) +{ + asm volatile(PPC_CLRBHRB); +} + +static void power_pmu_bhrb_enable(struct perf_event *event) +{ + struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events); + + if (!ppmu->bhrb_nr) + return; + + /* Clear BHRB if we changed task context to avoid data leaks */ + if (event->ctx->task && cpuhw->bhrb_context != event->ctx) { + power_pmu_bhrb_reset(); + cpuhw->bhrb_context = event->ctx; + } + cpuhw->bhrb_users++; +} + +static void power_pmu_bhrb_disable(struct perf_event *event) +{ + struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events); + + if (!ppmu->bhrb_nr) + return; + + cpuhw->bhrb_users--; + WARN_ON_ONCE(cpuhw->bhrb_users < 0); + + if (!cpuhw->disabled && !cpuhw->bhrb_users) { + /* BHRB cannot be turned off when other + * events are active on the PMU. + */ + + /* avoid stale pointer */ + cpuhw->bhrb_context = NULL; + } +} + +/* Called from ctxsw to prevent one process's branch entries to + * mingle with the other process's entries during context switch. + */ +void power_pmu_flush_branch_stack(void) +{ + if (ppmu->bhrb_nr) + power_pmu_bhrb_reset(); +} +/* Calculate the to address for a branch */ +static __u64 power_pmu_bhrb_to(u64 addr) +{ + unsigned int instr; + int ret; + __u64 target; + + if (is_kernel_addr(addr)) + return branch_target((unsigned int *)addr); + + /* Userspace: need copy instruction here then translate it */ + pagefault_disable(); + ret = __get_user_inatomic(instr, (unsigned int __user *)addr); + if (ret) { + pagefault_enable(); + return 0; + } + pagefault_enable(); + + target = branch_target(&instr); + if ((!target) || (instr & BRANCH_ABSOLUTE)) + return target; + + /* Translate relative branch target from kernel to user address */ + return target - (unsigned long)&instr + addr; +} + +/* Processing BHRB entries */ +void power_pmu_bhrb_read(struct cpu_hw_events *cpuhw) +{ + u64 val; + u64 addr; + int r_index, u_index, pred; + + r_index = 0; + u_index = 0; + while (r_index < ppmu->bhrb_nr) { + /* Assembly read function */ + val = read_bhrb(r_index++); + if (!val) + /* Terminal marker: End of valid BHRB entries */ + break; + else { + addr = val & BHRB_EA; + pred = val & BHRB_PREDICTION; + + if (!addr) + /* invalid entry */ + continue; + + /* Branches are read most recent first (ie. mfbhrb 0 is + * the most recent branch). + * There are two types of valid entries: + * 1) a target entry which is the to address of a + * computed goto like a blr,bctr,btar. The next + * entry read from the bhrb will be branch + * corresponding to this target (ie. the actual + * blr/bctr/btar instruction). + * 2) a from address which is an actual branch. If a + * target entry proceeds this, then this is the + * matching branch for that target. If this is not + * following a target entry, then this is a branch + * where the target is given as an immediate field + * in the instruction (ie. an i or b form branch). + * In this case we need to read the instruction from + * memory to determine the target/to address. + */ + + if (val & BHRB_TARGET) { + /* Target branches use two entries + * (ie. computed gotos/XL form) + */ + cpuhw->bhrb_entries[u_index].to = addr; + cpuhw->bhrb_entries[u_index].mispred = pred; + cpuhw->bhrb_entries[u_index].predicted = ~pred; + + /* Get from address in next entry */ + val = read_bhrb(r_index++); + addr = val & BHRB_EA; + if (val & BHRB_TARGET) { + /* Shouldn't have two targets in a + row.. Reset index and try again */ + r_index--; + addr = 0; + } + cpuhw->bhrb_entries[u_index].from = addr; + } else { + /* Branches to immediate field + (ie I or B form) */ + cpuhw->bhrb_entries[u_index].from = addr; + cpuhw->bhrb_entries[u_index].to = + power_pmu_bhrb_to(addr); + cpuhw->bhrb_entries[u_index].mispred = pred; + cpuhw->bhrb_entries[u_index].predicted = ~pred; + } + u_index++; + + } + } + cpuhw->bhrb_stack.nr = u_index; + return; +} + #endif /* CONFIG_PPC64 */ static void perf_event_interrupt(struct pt_regs *regs); @@ -904,47 +1063,6 @@ static int collect_events(struct perf_event *group, int max_count, return n; } -/* Reset all possible BHRB entries */ -static void power_pmu_bhrb_reset(void) -{ - asm volatile(PPC_CLRBHRB); -} - -void power_pmu_bhrb_enable(struct perf_event *event) -{ - struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events); - - if (!ppmu->bhrb_nr) - return; - - /* Clear BHRB if we changed task context to avoid data leaks */ - if (event->ctx->task && cpuhw->bhrb_context != event->ctx) { - power_pmu_bhrb_reset(); - cpuhw->bhrb_context = event->ctx; - } - cpuhw->bhrb_users++; -} - -void power_pmu_bhrb_disable(struct perf_event *event) -{ - struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events); - - if (!ppmu->bhrb_nr) - return; - - cpuhw->bhrb_users--; - WARN_ON_ONCE(cpuhw->bhrb_users < 0); - - if (!cpuhw->disabled && !cpuhw->bhrb_users) { - /* BHRB cannot be turned off when other - * events are active on the PMU. - */ - - /* avoid stale pointer */ - cpuhw->bhrb_context = NULL; - } -} - /* * Add a event to the PMU. * If all events are not already frozen, then we disable and @@ -1180,15 +1298,6 @@ int power_pmu_commit_txn(struct pmu *pmu) return 0; } -/* Called from ctxsw to prevent one process's branch entries to - * mingle with the other process's entries during context switch. - */ -void power_pmu_flush_branch_stack(void) -{ - if (ppmu->bhrb_nr) - power_pmu_bhrb_reset(); -} - /* * Return 1 if we might be able to put event on a limited PMC, * or 0 if not. @@ -1458,77 +1567,6 @@ struct pmu power_pmu = { .flush_branch_stack = power_pmu_flush_branch_stack, }; -/* Processing BHRB entries */ -void power_pmu_bhrb_read(struct cpu_hw_events *cpuhw) -{ - u64 val; - u64 addr; - int r_index, u_index, target, pred; - - r_index = 0; - u_index = 0; - while (r_index < ppmu->bhrb_nr) { - /* Assembly read function */ - val = read_bhrb(r_index); - - /* Terminal marker: End of valid BHRB entries */ - if (val == 0) { - break; - } else { - /* BHRB field break up */ - addr = val & BHRB_EA; - pred = val & BHRB_PREDICTION; - target = val & BHRB_TARGET; - - /* Probable Missed entry: Not applicable for POWER8 */ - if ((addr == 0) && (target == 0) && (pred == 1)) { - r_index++; - continue; - } - - /* Real Missed entry: Power8 based missed entry */ - if ((addr == 0) && (target == 1) && (pred == 1)) { - r_index++; - continue; - } - - /* Reserved condition: Not a valid entry */ - if ((addr == 0) && (target == 1) && (pred == 0)) { - r_index++; - continue; - } - - /* Is a target address */ - if (val & BHRB_TARGET) { - /* First address cannot be a target address */ - if (r_index == 0) { - r_index++; - continue; - } - - /* Update target address for the previous entry */ - cpuhw->bhrb_entries[u_index - 1].to = addr; - cpuhw->bhrb_entries[u_index - 1].mispred = pred; - cpuhw->bhrb_entries[u_index - 1].predicted = ~pred; - - /* Dont increment u_index */ - r_index++; - } else { - /* Update address, flags for current entry */ - cpuhw->bhrb_entries[u_index].from = addr; - cpuhw->bhrb_entries[u_index].mispred = pred; - cpuhw->bhrb_entries[u_index].predicted = ~pred; - - /* Successfully popullated one entry */ - u_index++; - r_index++; - } - } - } - cpuhw->bhrb_stack.nr = u_index; - return; -} - /* * A counter has overflowed; update its count and record * things if requested. Note that interrupts are hard-disabled diff --git a/trunk/arch/powerpc/platforms/40x/Kconfig b/trunk/arch/powerpc/platforms/40x/Kconfig index bd40bbb15e14..6e287f1294fa 100644 --- a/trunk/arch/powerpc/platforms/40x/Kconfig +++ b/trunk/arch/powerpc/platforms/40x/Kconfig @@ -138,7 +138,6 @@ config PPC4xx_GPIO bool "PPC4xx GPIO support" depends on 40x select ARCH_REQUIRE_GPIOLIB - select GENERIC_GPIO help Enable gpiolib support for ppc40x based boards diff --git a/trunk/arch/powerpc/platforms/44x/Kconfig b/trunk/arch/powerpc/platforms/44x/Kconfig index 7be93367d92f..d6c7506ec7d9 100644 --- a/trunk/arch/powerpc/platforms/44x/Kconfig +++ b/trunk/arch/powerpc/platforms/44x/Kconfig @@ -248,7 +248,6 @@ config PPC4xx_GPIO bool "PPC4xx GPIO support" depends on 44x select ARCH_REQUIRE_GPIOLIB - select GENERIC_GPIO help Enable gpiolib support for ppc440 based boards diff --git a/trunk/arch/powerpc/platforms/85xx/Kconfig b/trunk/arch/powerpc/platforms/85xx/Kconfig index 8f02b05f4c96..efdd37c775ad 100644 --- a/trunk/arch/powerpc/platforms/85xx/Kconfig +++ b/trunk/arch/powerpc/platforms/85xx/Kconfig @@ -203,7 +203,6 @@ config GE_IMP3A select DEFAULT_UIMAGE select SWIOTLB select MMIO_NVRAM - select GENERIC_GPIO select ARCH_REQUIRE_GPIOLIB select GE_FPGA help @@ -328,7 +327,7 @@ config B4_QDS select PPC_E500MC select PHYS_64BIT select SWIOTLB - select GENERIC_GPIO + select GPIOLIB select ARCH_REQUIRE_GPIOLIB select HAS_RAPIDIO select PPC_EPAPR_HV_PIC diff --git a/trunk/arch/powerpc/platforms/86xx/Kconfig b/trunk/arch/powerpc/platforms/86xx/Kconfig index 7a6279e38213..1afd1e4a2dd2 100644 --- a/trunk/arch/powerpc/platforms/86xx/Kconfig +++ b/trunk/arch/powerpc/platforms/86xx/Kconfig @@ -37,7 +37,6 @@ config GEF_PPC9A bool "GE PPC9A" select DEFAULT_UIMAGE select MMIO_NVRAM - select GENERIC_GPIO select ARCH_REQUIRE_GPIOLIB select GE_FPGA help @@ -47,7 +46,6 @@ config GEF_SBC310 bool "GE SBC310" select DEFAULT_UIMAGE select MMIO_NVRAM - select GENERIC_GPIO select ARCH_REQUIRE_GPIOLIB select GE_FPGA help @@ -57,7 +55,6 @@ config GEF_SBC610 bool "GE SBC610" select DEFAULT_UIMAGE select MMIO_NVRAM - select GENERIC_GPIO select ARCH_REQUIRE_GPIOLIB select GE_FPGA select HAS_RAPIDIO diff --git a/trunk/arch/powerpc/platforms/8xx/Kconfig b/trunk/arch/powerpc/platforms/8xx/Kconfig index 1fb0b3cddeb3..8dec3c0911ad 100644 --- a/trunk/arch/powerpc/platforms/8xx/Kconfig +++ b/trunk/arch/powerpc/platforms/8xx/Kconfig @@ -114,7 +114,6 @@ config 8xx_COPYBACK config 8xx_GPIO bool "GPIO API Support" - select GENERIC_GPIO select ARCH_REQUIRE_GPIOLIB help Saying Y here will cause the ports on an MPC8xx processor to be used diff --git a/trunk/arch/powerpc/platforms/Kconfig b/trunk/arch/powerpc/platforms/Kconfig index 34d224be93ba..b62aab3e22ec 100644 --- a/trunk/arch/powerpc/platforms/Kconfig +++ b/trunk/arch/powerpc/platforms/Kconfig @@ -128,7 +128,7 @@ config PPC_RTAS_DAEMON config RTAS_PROC bool "Proc interface to RTAS" - depends on PPC_RTAS + depends on PPC_RTAS && PROC_FS default y config RTAS_FLASH @@ -302,7 +302,6 @@ config QUICC_ENGINE config QE_GPIO bool "QE GPIO support" depends on QUICC_ENGINE - select GENERIC_GPIO select ARCH_REQUIRE_GPIOLIB help Say Y here if you're going to use hardware that connects to the @@ -315,7 +314,6 @@ config CPM2 select PPC_LIB_RHEAP select PPC_PCI_CHOICE select ARCH_REQUIRE_GPIOLIB - select GENERIC_GPIO help The CPM2 (Communications Processor Module) is a coprocessor on embedded CPUs made by Freescale. Selecting this option means that @@ -353,7 +351,6 @@ config OF_RTC config SIMPLE_GPIO bool "Support for simple, memory-mapped GPIO controllers" depends on PPC - select GENERIC_GPIO select ARCH_REQUIRE_GPIOLIB help Say Y here to support simple, memory-mapped GPIO controllers. @@ -364,7 +361,6 @@ config SIMPLE_GPIO config MCU_MPC8349EMITX bool "MPC8349E-mITX MCU driver" depends on I2C=y && PPC_83xx - select GENERIC_GPIO select ARCH_REQUIRE_GPIOLIB help Say Y here to enable soft power-off functionality on the Freescale diff --git a/trunk/arch/powerpc/platforms/powernv/Kconfig b/trunk/arch/powerpc/platforms/powernv/Kconfig index d3e840d643af..c24684c818ab 100644 --- a/trunk/arch/powerpc/platforms/powernv/Kconfig +++ b/trunk/arch/powerpc/platforms/powernv/Kconfig @@ -6,6 +6,7 @@ config PPC_POWERNV select PPC_ICP_NATIVE select PPC_P7_NAP select PPC_PCI_CHOICE if EMBEDDED + select EPAPR_BOOT default y config POWERNV_MSI diff --git a/trunk/arch/powerpc/platforms/powernv/opal.c b/trunk/arch/powerpc/platforms/powernv/opal.c index ade4463226c6..628c564ceadb 100644 --- a/trunk/arch/powerpc/platforms/powernv/opal.c +++ b/trunk/arch/powerpc/platforms/powernv/opal.c @@ -15,6 +15,7 @@ #include #include #include +#include #include #include @@ -28,6 +29,8 @@ struct opal { static struct device_node *opal_node; static DEFINE_SPINLOCK(opal_write_lock); extern u64 opal_mc_secondary_handler[]; +static unsigned int *opal_irqs; +static unsigned int opal_irq_count; int __init early_init_dt_scan_opal(unsigned long node, const char *uname, int depth, void *data) @@ -53,7 +56,11 @@ int __init early_init_dt_scan_opal(unsigned long node, opal.entry, entryp, entrysz); powerpc_firmware_features |= FW_FEATURE_OPAL; - if (of_flat_dt_is_compatible(node, "ibm,opal-v2")) { + if (of_flat_dt_is_compatible(node, "ibm,opal-v3")) { + powerpc_firmware_features |= FW_FEATURE_OPALv2; + powerpc_firmware_features |= FW_FEATURE_OPALv3; + printk("OPAL V3 detected !\n"); + } else if (of_flat_dt_is_compatible(node, "ibm,opal-v2")) { powerpc_firmware_features |= FW_FEATURE_OPALv2; printk("OPAL V2 detected !\n"); } else { @@ -144,6 +151,13 @@ int opal_put_chars(uint32_t vtermno, const char *data, int total_len) rc == OPAL_BUSY_EVENT || rc == OPAL_SUCCESS)) { len = total_len; rc = opal_console_write(vtermno, &len, data); + + /* Closed or other error drop */ + if (rc != OPAL_SUCCESS && rc != OPAL_BUSY && + rc != OPAL_BUSY_EVENT) { + written = total_len; + break; + } if (rc == OPAL_SUCCESS) { total_len -= len; data += len; @@ -316,6 +330,8 @@ static int __init opal_init(void) irqs = of_get_property(opal_node, "opal-interrupts", &irqlen); pr_debug("opal: Found %d interrupts reserved for OPAL\n", irqs ? (irqlen / 4) : 0); + opal_irq_count = irqlen / 4; + opal_irqs = kzalloc(opal_irq_count * sizeof(unsigned int), GFP_KERNEL); for (i = 0; irqs && i < (irqlen / 4); i++, irqs++) { unsigned int hwirq = be32_to_cpup(irqs); unsigned int irq = irq_create_mapping(NULL, hwirq); @@ -327,7 +343,19 @@ static int __init opal_init(void) if (rc) pr_warning("opal: Error %d requesting irq %d" " (0x%x)\n", rc, irq, hwirq); + opal_irqs[i] = irq; } return 0; } subsys_initcall(opal_init); + +void opal_shutdown(void) +{ + unsigned int i; + + for (i = 0; i < opal_irq_count; i++) { + if (opal_irqs[i]) + free_irq(opal_irqs[i], 0); + opal_irqs[i] = 0; + } +} diff --git a/trunk/arch/powerpc/platforms/powernv/pci-ioda.c b/trunk/arch/powerpc/platforms/powernv/pci-ioda.c index 1da578b7c1bf..9c9d15e4cdf2 100644 --- a/trunk/arch/powerpc/platforms/powernv/pci-ioda.c +++ b/trunk/arch/powerpc/platforms/powernv/pci-ioda.c @@ -68,16 +68,6 @@ define_pe_printk_level(pe_err, KERN_ERR); define_pe_printk_level(pe_warn, KERN_WARNING); define_pe_printk_level(pe_info, KERN_INFO); -static struct pci_dn *pnv_ioda_get_pdn(struct pci_dev *dev) -{ - struct device_node *np; - - np = pci_device_to_OF_node(dev); - if (!np) - return NULL; - return PCI_DN(np); -} - static int pnv_ioda_alloc_pe(struct pnv_phb *phb) { unsigned long pe; @@ -110,7 +100,7 @@ static struct pnv_ioda_pe *pnv_ioda_get_pe(struct pci_dev *dev) { struct pci_controller *hose = pci_bus_to_host(dev->bus); struct pnv_phb *phb = hose->private_data; - struct pci_dn *pdn = pnv_ioda_get_pdn(dev); + struct pci_dn *pdn = pci_get_pdn(dev); if (!pdn) return NULL; @@ -173,7 +163,7 @@ static int pnv_ioda_configure_pe(struct pnv_phb *phb, struct pnv_ioda_pe *pe) /* Add to all parents PELT-V */ while (parent) { - struct pci_dn *pdn = pnv_ioda_get_pdn(parent); + struct pci_dn *pdn = pci_get_pdn(parent); if (pdn && pdn->pe_number != IODA_INVALID_PE) { rc = opal_pci_set_peltv(phb->opal_id, pdn->pe_number, pe->pe_number, OPAL_ADD_PE_TO_DOMAIN); @@ -252,7 +242,7 @@ static struct pnv_ioda_pe *pnv_ioda_setup_dev_PE(struct pci_dev *dev) { struct pci_controller *hose = pci_bus_to_host(dev->bus); struct pnv_phb *phb = hose->private_data; - struct pci_dn *pdn = pnv_ioda_get_pdn(dev); + struct pci_dn *pdn = pci_get_pdn(dev); struct pnv_ioda_pe *pe; int pe_num; @@ -323,7 +313,7 @@ static void pnv_ioda_setup_same_PE(struct pci_bus *bus, struct pnv_ioda_pe *pe) struct pci_dev *dev; list_for_each_entry(dev, &bus->devices, bus_list) { - struct pci_dn *pdn = pnv_ioda_get_pdn(dev); + struct pci_dn *pdn = pci_get_pdn(dev); if (pdn == NULL) { pr_warn("%s: No device node associated with device !\n", @@ -436,7 +426,7 @@ static void pnv_pci_ioda_setup_PEs(void) static void pnv_pci_ioda_dma_dev_setup(struct pnv_phb *phb, struct pci_dev *pdev) { - struct pci_dn *pdn = pnv_ioda_get_pdn(pdev); + struct pci_dn *pdn = pci_get_pdn(pdev); struct pnv_ioda_pe *pe; /* @@ -768,6 +758,7 @@ static int pnv_pci_ioda_msi_setup(struct pnv_phb *phb, struct pci_dev *dev, unsigned int is_64, struct msi_msg *msg) { struct pnv_ioda_pe *pe = pnv_ioda_get_pe(dev); + struct pci_dn *pdn = pci_get_pdn(dev); struct irq_data *idata; struct irq_chip *ichip; unsigned int xive_num = hwirq - phb->msi_base; @@ -783,6 +774,10 @@ static int pnv_pci_ioda_msi_setup(struct pnv_phb *phb, struct pci_dev *dev, if (pe->mve_number < 0) return -ENXIO; + /* Force 32-bit MSI on some broken devices */ + if (pdn && pdn->force_32bit_msi) + is_64 = 0; + /* Assign XIVE to PE */ rc = opal_pci_set_xive_pe(phb->opal_id, pe->pe_number, xive_num); if (rc) { @@ -1035,7 +1030,7 @@ static int pnv_pci_enable_device_hook(struct pci_dev *dev) if (!phb->initialized) return 0; - pdn = pnv_ioda_get_pdn(dev); + pdn = pci_get_pdn(dev); if (!pdn || pdn->pe_number == IODA_INVALID_PE) return -EINVAL; @@ -1048,6 +1043,12 @@ static u32 pnv_ioda_bdfn_to_pe(struct pnv_phb *phb, struct pci_bus *bus, return phb->ioda.pe_rmap[(bus->number << 8) | devfn]; } +static void pnv_pci_ioda_shutdown(struct pnv_phb *phb) +{ + opal_pci_reset(phb->opal_id, OPAL_PCI_IODA_TABLE_RESET, + OPAL_ASSERT_RESET); +} + void __init pnv_pci_init_ioda_phb(struct device_node *np, int ioda_type) { struct pci_controller *hose; @@ -1178,6 +1179,9 @@ void __init pnv_pci_init_ioda_phb(struct device_node *np, int ioda_type) /* Setup TCEs */ phb->dma_dev_setup = pnv_pci_ioda_dma_dev_setup; + /* Setup shutdown function for kexec */ + phb->shutdown = pnv_pci_ioda_shutdown; + /* Setup MSI support */ pnv_pci_init_ioda_msis(phb); diff --git a/trunk/arch/powerpc/platforms/powernv/pci.c b/trunk/arch/powerpc/platforms/powernv/pci.c index 55dfca844ddf..277343cc6a3d 100644 --- a/trunk/arch/powerpc/platforms/powernv/pci.c +++ b/trunk/arch/powerpc/platforms/powernv/pci.c @@ -47,6 +47,10 @@ static int pnv_msi_check_device(struct pci_dev* pdev, int nvec, int type) { struct pci_controller *hose = pci_bus_to_host(pdev->bus); struct pnv_phb *phb = hose->private_data; + struct pci_dn *pdn = pci_get_pdn(pdev); + + if (pdn && pdn->force_32bit_msi && !phb->msi32_support) + return -ENODEV; return (phb && phb->msi_bmp.bitmap) ? 0 : -ENODEV; } @@ -367,7 +371,7 @@ static void pnv_tce_free(struct iommu_table *tbl, long index, long npages) while (npages--) *(tcep++) = 0; - if (tbl->it_type & TCE_PCI_SWINV_CREATE) + if (tbl->it_type & TCE_PCI_SWINV_FREE) pnv_pci_ioda_tce_invalidate(tbl, tces, tcep - 1); } @@ -450,6 +454,18 @@ static void pnv_pci_dma_dev_setup(struct pci_dev *pdev) pnv_pci_dma_fallback_setup(hose, pdev); } +void pnv_pci_shutdown(void) +{ + struct pci_controller *hose; + + list_for_each_entry(hose, &hose_list, list_node) { + struct pnv_phb *phb = hose->private_data; + + if (phb && phb->shutdown) + phb->shutdown(phb); + } +} + /* Fixup wrong class code in p7ioc and p8 root complex */ static void pnv_p7ioc_rc_quirk(struct pci_dev *dev) { diff --git a/trunk/arch/powerpc/platforms/powernv/pci.h b/trunk/arch/powerpc/platforms/powernv/pci.h index 48dc4bb856a1..25d76c4df50b 100644 --- a/trunk/arch/powerpc/platforms/powernv/pci.h +++ b/trunk/arch/powerpc/platforms/powernv/pci.h @@ -86,6 +86,7 @@ struct pnv_phb { void (*dma_dev_setup)(struct pnv_phb *phb, struct pci_dev *pdev); void (*fixup_phb)(struct pci_controller *hose); u32 (*bdfn_to_pe)(struct pnv_phb *phb, struct pci_bus *bus, u32 devfn); + void (*shutdown)(struct pnv_phb *phb); union { struct { @@ -158,4 +159,5 @@ extern void pnv_pci_init_ioda_hub(struct device_node *np); extern void pnv_pci_init_ioda2_phb(struct device_node *np); extern void pnv_pci_ioda_tce_invalidate(struct iommu_table *tbl, u64 *startp, u64 *endp); + #endif /* __POWERNV_PCI_H */ diff --git a/trunk/arch/powerpc/platforms/powernv/powernv.h b/trunk/arch/powerpc/platforms/powernv/powernv.h index 8a9df7f9667e..a1c6f83fc391 100644 --- a/trunk/arch/powerpc/platforms/powernv/powernv.h +++ b/trunk/arch/powerpc/platforms/powernv/powernv.h @@ -9,8 +9,10 @@ static inline void pnv_smp_init(void) { } #ifdef CONFIG_PCI extern void pnv_pci_init(void); +extern void pnv_pci_shutdown(void); #else static inline void pnv_pci_init(void) { } +static inline void pnv_pci_shutdown(void) { } #endif #endif /* _POWERNV_H */ diff --git a/trunk/arch/powerpc/platforms/powernv/setup.c b/trunk/arch/powerpc/platforms/powernv/setup.c index db1ad1c8f68f..d4459bfc92f7 100644 --- a/trunk/arch/powerpc/platforms/powernv/setup.c +++ b/trunk/arch/powerpc/platforms/powernv/setup.c @@ -78,7 +78,9 @@ static void pnv_show_cpuinfo(struct seq_file *m) if (root) model = of_get_property(root, "model", NULL); seq_printf(m, "machine\t\t: PowerNV %s\n", model); - if (firmware_has_feature(FW_FEATURE_OPALv2)) + if (firmware_has_feature(FW_FEATURE_OPALv3)) + seq_printf(m, "firmware\t: OPAL v3\n"); + else if (firmware_has_feature(FW_FEATURE_OPALv2)) seq_printf(m, "firmware\t: OPAL v2\n"); else if (firmware_has_feature(FW_FEATURE_OPAL)) seq_printf(m, "firmware\t: OPAL v1\n"); @@ -126,6 +128,17 @@ static void pnv_progress(char *s, unsigned short hex) { } +static void pnv_shutdown(void) +{ + /* Let the PCI code clear up IODA tables */ + pnv_pci_shutdown(); + + /* And unregister all OPAL interrupts so they don't fire + * up while we kexec + */ + opal_shutdown(); +} + #ifdef CONFIG_KEXEC static void pnv_kexec_cpu_down(int crash_shutdown, int secondary) { @@ -187,6 +200,7 @@ define_machine(powernv) { .init_IRQ = pnv_init_IRQ, .show_cpuinfo = pnv_show_cpuinfo, .progress = pnv_progress, + .machine_shutdown = pnv_shutdown, .power_save = power7_idle, .calibrate_decr = generic_calibrate_decr, #ifdef CONFIG_KEXEC diff --git a/trunk/arch/powerpc/platforms/powernv/smp.c b/trunk/arch/powerpc/platforms/powernv/smp.c index 6a3ecca5b725..88c9459c3e07 100644 --- a/trunk/arch/powerpc/platforms/powernv/smp.c +++ b/trunk/arch/powerpc/platforms/powernv/smp.c @@ -71,18 +71,68 @@ int pnv_smp_kick_cpu(int nr) BUG_ON(nr < 0 || nr >= NR_CPUS); - /* On OPAL v2 the CPU are still spinning inside OPAL itself, - * get them back now + /* + * If we already started or OPALv2 is not supported, we just + * kick the CPU via the PACA */ - if (!paca[nr].cpu_start && firmware_has_feature(FW_FEATURE_OPALv2)) { - pr_devel("OPAL: Starting CPU %d (HW 0x%x)...\n", nr, pcpu); - rc = opal_start_cpu(pcpu, start_here); + if (paca[nr].cpu_start || !firmware_has_feature(FW_FEATURE_OPALv2)) + goto kick; + + /* + * At this point, the CPU can either be spinning on the way in + * from kexec or be inside OPAL waiting to be started for the + * first time. OPAL v3 allows us to query OPAL to know if it + * has the CPUs, so we do that + */ + if (firmware_has_feature(FW_FEATURE_OPALv3)) { + uint8_t status; + + rc = opal_query_cpu_status(pcpu, &status); if (rc != OPAL_SUCCESS) { - pr_warn("OPAL Error %ld starting CPU %d\n", + pr_warn("OPAL Error %ld querying CPU %d state\n", rc, nr); return -ENODEV; } + + /* + * Already started, just kick it, probably coming from + * kexec and spinning + */ + if (status == OPAL_THREAD_STARTED) + goto kick; + + /* + * Available/inactive, let's kick it + */ + if (status == OPAL_THREAD_INACTIVE) { + pr_devel("OPAL: Starting CPU %d (HW 0x%x)...\n", + nr, pcpu); + rc = opal_start_cpu(pcpu, start_here); + if (rc != OPAL_SUCCESS) { + pr_warn("OPAL Error %ld starting CPU %d\n", + rc, nr); + return -ENODEV; + } + } else { + /* + * An unavailable CPU (or any other unknown status) + * shouldn't be started. It should also + * not be in the possible map but currently it can + * happen + */ + pr_devel("OPAL: CPU %d (HW 0x%x) is unavailable" + " (status %d)...\n", nr, pcpu, status); + return -ENODEV; + } + } else { + /* + * On OPAL v2, we just kick it and hope for the best, + * we must not test the error from opal_start_cpu() or + * we would fail to get CPUs from kexec. + */ + opal_start_cpu(pcpu, start_here); } + kick: return smp_generic_kick_cpu(nr); } diff --git a/trunk/arch/powerpc/platforms/pseries/Kconfig b/trunk/arch/powerpc/platforms/pseries/Kconfig index 9a0941bc4d31..023b288f895b 100644 --- a/trunk/arch/powerpc/platforms/pseries/Kconfig +++ b/trunk/arch/powerpc/platforms/pseries/Kconfig @@ -18,6 +18,7 @@ config PPC_PSERIES select PPC_PCI_CHOICE if EXPERT select ZLIB_DEFLATE select PPC_DOORBELL + select HAVE_CONTEXT_TRACKING default y config PPC_SPLPAR diff --git a/trunk/arch/powerpc/platforms/pseries/msi.c b/trunk/arch/powerpc/platforms/pseries/msi.c index 420524e6f8c9..6d2f0abce6fa 100644 --- a/trunk/arch/powerpc/platforms/pseries/msi.c +++ b/trunk/arch/powerpc/platforms/pseries/msi.c @@ -26,26 +26,6 @@ static int query_token, change_token; #define RTAS_CHANGE_MSIX_FN 4 #define RTAS_CHANGE_32MSI_FN 5 -static struct pci_dn *get_pdn(struct pci_dev *pdev) -{ - struct device_node *dn; - struct pci_dn *pdn; - - dn = pci_device_to_OF_node(pdev); - if (!dn) { - dev_dbg(&pdev->dev, "rtas_msi: No OF device node\n"); - return NULL; - } - - pdn = PCI_DN(dn); - if (!pdn) { - dev_dbg(&pdev->dev, "rtas_msi: No PCI DN\n"); - return NULL; - } - - return pdn; -} - /* RTAS Helpers */ static int rtas_change_msi(struct pci_dn *pdn, u32 func, u32 num_irqs) @@ -91,7 +71,7 @@ static void rtas_disable_msi(struct pci_dev *pdev) { struct pci_dn *pdn; - pdn = get_pdn(pdev); + pdn = pci_get_pdn(pdev); if (!pdn) return; @@ -152,7 +132,7 @@ static int check_req(struct pci_dev *pdev, int nvec, char *prop_name) struct pci_dn *pdn; const u32 *req_msi; - pdn = get_pdn(pdev); + pdn = pci_get_pdn(pdev); if (!pdn) return -ENODEV; @@ -394,6 +374,23 @@ static int check_msix_entries(struct pci_dev *pdev) return 0; } +static void rtas_hack_32bit_msi_gen2(struct pci_dev *pdev) +{ + u32 addr_hi, addr_lo; + + /* + * We should only get in here for IODA1 configs. This is based on the + * fact that we using RTAS for MSIs, we don't have the 32 bit MSI RTAS + * support, and we are in a PCIe Gen2 slot. + */ + dev_info(&pdev->dev, + "rtas_msi: No 32 bit MSI firmware support, forcing 32 bit MSI\n"); + pci_read_config_dword(pdev, pdev->msi_cap + PCI_MSI_ADDRESS_HI, &addr_hi); + addr_lo = 0xffff0000 | ((addr_hi >> (48 - 32)) << 4); + pci_write_config_dword(pdev, pdev->msi_cap + PCI_MSI_ADDRESS_LO, addr_lo); + pci_write_config_dword(pdev, pdev->msi_cap + PCI_MSI_ADDRESS_HI, 0); +} + static int rtas_setup_msi_irqs(struct pci_dev *pdev, int nvec_in, int type) { struct pci_dn *pdn; @@ -401,8 +398,9 @@ static int rtas_setup_msi_irqs(struct pci_dev *pdev, int nvec_in, int type) struct msi_desc *entry; struct msi_msg msg; int nvec = nvec_in; + int use_32bit_msi_hack = 0; - pdn = get_pdn(pdev); + pdn = pci_get_pdn(pdev); if (!pdn) return -ENODEV; @@ -428,15 +426,31 @@ static int rtas_setup_msi_irqs(struct pci_dev *pdev, int nvec_in, int type) */ again: if (type == PCI_CAP_ID_MSI) { - if (pdn->force_32bit_msi) + if (pdn->force_32bit_msi) { rc = rtas_change_msi(pdn, RTAS_CHANGE_32MSI_FN, nvec); - else + if (rc < 0) { + /* + * We only want to run the 32 bit MSI hack below if + * the max bus speed is Gen2 speed + */ + if (pdev->bus->max_bus_speed != PCIE_SPEED_5_0GT) + return rc; + + use_32bit_msi_hack = 1; + } + } else + rc = -1; + + if (rc < 0) rc = rtas_change_msi(pdn, RTAS_CHANGE_MSI_FN, nvec); - if (rc < 0 && !pdn->force_32bit_msi) { + if (rc < 0) { pr_debug("rtas_msi: trying the old firmware call.\n"); rc = rtas_change_msi(pdn, RTAS_CHANGE_FN, nvec); } + + if (use_32bit_msi_hack && rc > 0) + rtas_hack_32bit_msi_gen2(pdev); } else rc = rtas_change_msi(pdn, RTAS_CHANGE_MSIX_FN, nvec); @@ -518,12 +532,3 @@ static int rtas_msi_init(void) } arch_initcall(rtas_msi_init); -static void quirk_radeon(struct pci_dev *dev) -{ - struct pci_dn *pdn = get_pdn(dev); - - if (pdn) - pdn->force_32bit_msi = 1; -} -DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x68f2, quirk_radeon); -DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0xaa68, quirk_radeon); diff --git a/trunk/arch/powerpc/platforms/pseries/suspend.c b/trunk/arch/powerpc/platforms/pseries/suspend.c index 47226e04126d..5f997e79d570 100644 --- a/trunk/arch/powerpc/platforms/pseries/suspend.c +++ b/trunk/arch/powerpc/platforms/pseries/suspend.c @@ -16,6 +16,7 @@ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ +#include #include #include #include @@ -126,11 +127,15 @@ static ssize_t store_hibernate(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { + cpumask_var_t offline_mask; int rc; if (!capable(CAP_SYS_ADMIN)) return -EPERM; + if (!alloc_cpumask_var(&offline_mask, GFP_TEMPORARY)) + return -ENOMEM; + stream_id = simple_strtoul(buf, NULL, 16); do { @@ -140,15 +145,32 @@ static ssize_t store_hibernate(struct device *dev, } while (rc == -EAGAIN); if (!rc) { + /* All present CPUs must be online */ + cpumask_andnot(offline_mask, cpu_present_mask, + cpu_online_mask); + rc = rtas_online_cpus_mask(offline_mask); + if (rc) { + pr_err("%s: Could not bring present CPUs online.\n", + __func__); + goto out; + } + stop_topology_update(); rc = pm_suspend(PM_SUSPEND_MEM); start_topology_update(); + + /* Take down CPUs not online prior to suspend */ + if (!rtas_offline_cpus_mask(offline_mask)) + pr_warn("%s: Could not restore CPUs to offline " + "state.\n", __func__); } stream_id = 0; if (!rc) rc = count; +out: + free_cpumask_var(offline_mask); return rc; } diff --git a/trunk/arch/powerpc/platforms/wsp/ics.c b/trunk/arch/powerpc/platforms/wsp/ics.c index 97fe82ee8633..2d3b1dd9571d 100644 --- a/trunk/arch/powerpc/platforms/wsp/ics.c +++ b/trunk/arch/powerpc/platforms/wsp/ics.c @@ -361,7 +361,7 @@ static int wsp_chip_set_affinity(struct irq_data *d, xive = xive_set_server(xive, get_irq_server(ics, hw_irq)); wsp_ics_set_xive(ics, hw_irq, xive); - return 0; + return IRQ_SET_MASK_OK; } static struct irq_chip wsp_irq_chip = { diff --git a/trunk/arch/powerpc/sysdev/Makefile b/trunk/arch/powerpc/sysdev/Makefile index b0a518e97599..99464a7bdb3b 100644 --- a/trunk/arch/powerpc/sysdev/Makefile +++ b/trunk/arch/powerpc/sysdev/Makefile @@ -64,6 +64,8 @@ endif obj-$(CONFIG_PPC_SCOM) += scom.o +obj-$(CONFIG_PPC_EARLY_DEBUG_MEMCONS) += udbg_memcons.o + subdir-ccflags-$(CONFIG_PPC_WERROR) := -Werror obj-$(CONFIG_PPC_XICS) += xics/ diff --git a/trunk/arch/powerpc/sysdev/ehv_pic.c b/trunk/arch/powerpc/sysdev/ehv_pic.c index 6e0e1005227f..9cd0e60716fe 100644 --- a/trunk/arch/powerpc/sysdev/ehv_pic.c +++ b/trunk/arch/powerpc/sysdev/ehv_pic.c @@ -81,7 +81,7 @@ int ehv_pic_set_affinity(struct irq_data *d, const struct cpumask *dest, ev_int_set_config(src, config, prio, cpuid); spin_unlock_irqrestore(&ehv_pic_lock, flags); - return 0; + return IRQ_SET_MASK_OK; } static unsigned int ehv_pic_type_to_vecpri(unsigned int type) diff --git a/trunk/arch/powerpc/sysdev/mpic.c b/trunk/arch/powerpc/sysdev/mpic.c index ee21b5e71aec..0a13ecb270c7 100644 --- a/trunk/arch/powerpc/sysdev/mpic.c +++ b/trunk/arch/powerpc/sysdev/mpic.c @@ -836,7 +836,7 @@ int mpic_set_affinity(struct irq_data *d, const struct cpumask *cpumask, mpic_physmask(mask)); } - return 0; + return IRQ_SET_MASK_OK; } static unsigned int mpic_type_to_vecpri(struct mpic *mpic, unsigned int type) diff --git a/trunk/arch/powerpc/sysdev/udbg_memcons.c b/trunk/arch/powerpc/sysdev/udbg_memcons.c new file mode 100644 index 000000000000..ce5a7b489e4b --- /dev/null +++ b/trunk/arch/powerpc/sysdev/udbg_memcons.c @@ -0,0 +1,105 @@ +/* + * A udbg backend which logs messages and reads input from in memory + * buffers. + * + * The console output can be read from memcons_output which is a + * circular buffer whose next write position is stored in memcons.output_pos. + * + * Input may be passed by writing into the memcons_input buffer when it is + * empty. The input buffer is empty when both input_pos == input_start and + * *input_start == '\0'. + * + * Copyright (C) 2003-2005 Anton Blanchard and Milton Miller, IBM Corp + * Copyright (C) 2013 Alistair Popple, IBM Corp + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + */ + +#include +#include +#include +#include +#include +#include + +struct memcons { + char *output_start; + char *output_pos; + char *output_end; + char *input_start; + char *input_pos; + char *input_end; +}; + +static char memcons_output[CONFIG_PPC_MEMCONS_OUTPUT_SIZE]; +static char memcons_input[CONFIG_PPC_MEMCONS_INPUT_SIZE]; + +struct memcons memcons = { + .output_start = memcons_output, + .output_pos = memcons_output, + .output_end = &memcons_output[CONFIG_PPC_MEMCONS_OUTPUT_SIZE], + .input_start = memcons_input, + .input_pos = memcons_input, + .input_end = &memcons_input[CONFIG_PPC_MEMCONS_INPUT_SIZE], +}; + +void memcons_putc(char c) +{ + char *new_output_pos; + + *memcons.output_pos = c; + wmb(); + new_output_pos = memcons.output_pos + 1; + if (new_output_pos >= memcons.output_end) + new_output_pos = memcons.output_start; + + memcons.output_pos = new_output_pos; +} + +int memcons_getc_poll(void) +{ + char c; + char *new_input_pos; + + if (*memcons.input_pos) { + c = *memcons.input_pos; + + new_input_pos = memcons.input_pos + 1; + if (new_input_pos >= memcons.input_end) + new_input_pos = memcons.input_start; + else if (*new_input_pos == '\0') + new_input_pos = memcons.input_start; + + *memcons.input_pos = '\0'; + wmb(); + memcons.input_pos = new_input_pos; + return c; + } + + return -1; +} + +int memcons_getc(void) +{ + int c; + + while (1) { + c = memcons_getc_poll(); + if (c == -1) + cpu_relax(); + else + break; + } + + return c; +} + +void udbg_init_memcons(void) +{ + udbg_putc = memcons_putc; + udbg_getc = memcons_getc; + udbg_getc_poll = memcons_getc_poll; +} diff --git a/trunk/arch/powerpc/sysdev/xics/ics-opal.c b/trunk/arch/powerpc/sysdev/xics/ics-opal.c index f7e8609df0d5..39d72212655e 100644 --- a/trunk/arch/powerpc/sysdev/xics/ics-opal.c +++ b/trunk/arch/powerpc/sysdev/xics/ics-opal.c @@ -148,7 +148,7 @@ static int ics_opal_set_affinity(struct irq_data *d, __func__, d->irq, hw_irq, server, rc); return -1; } - return 0; + return IRQ_SET_MASK_OK; } static struct irq_chip ics_opal_irq_chip = { diff --git a/trunk/arch/s390/Kconfig b/trunk/arch/s390/Kconfig index 2c9789da0e24..da183c5a103c 100644 --- a/trunk/arch/s390/Kconfig +++ b/trunk/arch/s390/Kconfig @@ -98,7 +98,6 @@ config S390 select CLONE_BACKWARDS2 select GENERIC_CLOCKEVENTS select GENERIC_CPU_DEVICES if !SMP - select GENERIC_KERNEL_THREAD select GENERIC_SMP_IDLE_THREAD select GENERIC_TIME_VSYSCALL_OLD select HAVE_ALIGNED_STRUCT_PAGE if SLUB diff --git a/trunk/arch/s390/include/asm/ftrace.h b/trunk/arch/s390/include/asm/ftrace.h index b7931faaef6d..bf246dae1367 100644 --- a/trunk/arch/s390/include/asm/ftrace.h +++ b/trunk/arch/s390/include/asm/ftrace.h @@ -9,11 +9,6 @@ struct dyn_arch_ftrace { }; #define MCOUNT_ADDR ((long)_mcount) -#ifdef CONFIG_64BIT -#define MCOUNT_INSN_SIZE 12 -#else -#define MCOUNT_INSN_SIZE 20 -#endif static inline unsigned long ftrace_call_adjust(unsigned long addr) { @@ -21,4 +16,11 @@ static inline unsigned long ftrace_call_adjust(unsigned long addr) } #endif /* __ASSEMBLY__ */ + +#ifdef CONFIG_64BIT +#define MCOUNT_INSN_SIZE 12 +#else +#define MCOUNT_INSN_SIZE 22 +#endif + #endif /* _ASM_S390_FTRACE_H */ diff --git a/trunk/arch/s390/include/asm/page.h b/trunk/arch/s390/include/asm/page.h index 75ce9b065f9f..5d64fb7619cc 100644 --- a/trunk/arch/s390/include/asm/page.h +++ b/trunk/arch/s390/include/asm/page.h @@ -32,7 +32,7 @@ void storage_key_init_range(unsigned long start, unsigned long end); -static unsigned long pfmf(unsigned long function, unsigned long address) +static inline unsigned long pfmf(unsigned long function, unsigned long address) { asm volatile( " .insn rre,0xb9af0000,%[function],%[address]" @@ -44,17 +44,13 @@ static unsigned long pfmf(unsigned long function, unsigned long address) static inline void clear_page(void *page) { - if (MACHINE_HAS_PFMF) { - pfmf(0x10000, (unsigned long)page); - } else { - register unsigned long reg1 asm ("1") = 0; - register void *reg2 asm ("2") = page; - register unsigned long reg3 asm ("3") = 4096; - asm volatile( - " mvcl 2,0" - : "+d" (reg2), "+d" (reg3) : "d" (reg1) - : "memory", "cc"); - } + register unsigned long reg1 asm ("1") = 0; + register void *reg2 asm ("2") = page; + register unsigned long reg3 asm ("3") = 4096; + asm volatile( + " mvcl 2,0" + : "+d" (reg2), "+d" (reg3) : "d" (reg1) + : "memory", "cc"); } static inline void copy_page(void *to, void *from) diff --git a/trunk/arch/s390/include/asm/pgtable.h b/trunk/arch/s390/include/asm/pgtable.h index 4105b8221fdd..0f0de30e3e3f 100644 --- a/trunk/arch/s390/include/asm/pgtable.h +++ b/trunk/arch/s390/include/asm/pgtable.h @@ -306,7 +306,7 @@ extern unsigned long MODULES_END; #define RCP_HC_BIT 0x00200000UL #define RCP_GR_BIT 0x00040000UL #define RCP_GC_BIT 0x00020000UL -#define RCP_IN_BIT 0x00008000UL /* IPTE notify bit */ +#define RCP_IN_BIT 0x00002000UL /* IPTE notify bit */ /* User dirty / referenced bit for KVM's migration feature */ #define KVM_UR_BIT 0x00008000UL @@ -374,7 +374,7 @@ extern unsigned long MODULES_END; #define RCP_HC_BIT 0x0020000000000000UL #define RCP_GR_BIT 0x0004000000000000UL #define RCP_GC_BIT 0x0002000000000000UL -#define RCP_IN_BIT 0x0000800000000000UL /* IPTE notify bit */ +#define RCP_IN_BIT 0x0000200000000000UL /* IPTE notify bit */ /* User dirty / referenced bit for KVM's migration feature */ #define KVM_UR_BIT 0x0000800000000000UL diff --git a/trunk/arch/s390/kernel/compat_wrapper.S b/trunk/arch/s390/kernel/compat_wrapper.S index 2d72d9e96c15..9cb1b975b353 100644 --- a/trunk/arch/s390/kernel/compat_wrapper.S +++ b/trunk/arch/s390/kernel/compat_wrapper.S @@ -793,10 +793,6 @@ ENTRY(sys32_stime_wrapper) llgtr %r2,%r2 # long * jg compat_sys_stime # branch to system call -ENTRY(sys32_sysctl_wrapper) - llgtr %r2,%r2 # struct compat_sysctl_args * - jg compat_sys_sysctl - ENTRY(sys32_fstat64_wrapper) llgfr %r2,%r2 # unsigned long llgtr %r3,%r3 # struct stat64 * @@ -1349,15 +1345,6 @@ ENTRY(sys_fanotify_init_wrapper) llgfr %r3,%r3 # unsigned int jg sys_fanotify_init # branch to system call -ENTRY(sys_fanotify_mark_wrapper) - lgfr %r2,%r2 # int - llgfr %r3,%r3 # unsigned int - sllg %r4,%r4,32 # get high word of 64bit mask - lr %r4,%r5 # get low word of 64bit mask - llgfr %r5,%r6 # unsigned int - llgt %r6,164(%r15) # char * - jg sys_fanotify_mark # branch to system call - ENTRY(sys_prlimit64_wrapper) lgfr %r2,%r2 # pid_t llgfr %r3,%r3 # unsigned int diff --git a/trunk/arch/s390/kernel/dis.c b/trunk/arch/s390/kernel/dis.c index 7f4a4a8c847c..be87d3e05a5b 100644 --- a/trunk/arch/s390/kernel/dis.c +++ b/trunk/arch/s390/kernel/dis.c @@ -1862,6 +1862,8 @@ void print_fn_code(unsigned char *code, unsigned long len) while (len) { ptr = buffer; opsize = insn_length(*code); + if (opsize > len) + break; ptr += sprintf(ptr, "%p: ", code); for (i = 0; i < opsize; i++) ptr += sprintf(ptr, "%02x", code[i]); diff --git a/trunk/arch/s390/kernel/ftrace.c b/trunk/arch/s390/kernel/ftrace.c index 78bdf0e5dff7..e3043aef87a9 100644 --- a/trunk/arch/s390/kernel/ftrace.c +++ b/trunk/arch/s390/kernel/ftrace.c @@ -16,12 +16,6 @@ #include #include -#ifdef CONFIG_64BIT -#define MCOUNT_OFFSET_RET 12 -#else -#define MCOUNT_OFFSET_RET 22 -#endif - #ifdef CONFIG_DYNAMIC_FTRACE void ftrace_disable_code(void); @@ -155,9 +149,10 @@ unsigned long __kprobes prepare_ftrace_return(unsigned long parent, if (unlikely(atomic_read(¤t->tracing_graph_pause))) goto out; + ip = (ip & PSW_ADDR_INSN) - MCOUNT_INSN_SIZE; if (ftrace_push_return_trace(parent, ip, &trace.depth, 0) == -EBUSY) goto out; - trace.func = (ip & PSW_ADDR_INSN) - MCOUNT_OFFSET_RET; + trace.func = ip; /* Only trace if the calling function expects to. */ if (!ftrace_graph_entry(&trace)) { current->curr_ret_stack--; diff --git a/trunk/arch/s390/kernel/mcount.S b/trunk/arch/s390/kernel/mcount.S index 4567ce20d900..08dcf21cb8df 100644 --- a/trunk/arch/s390/kernel/mcount.S +++ b/trunk/arch/s390/kernel/mcount.S @@ -7,6 +7,7 @@ #include #include +#include .section .kprobes.text, "ax" @@ -33,6 +34,7 @@ ENTRY(ftrace_caller) la %r2,0(%r14) st %r0,__SF_BACKCHAIN(%r15) la %r3,0(%r3) + ahi %r2,-MCOUNT_INSN_SIZE l %r14,0b-0b(%r1) l %r14,0(%r14) basr %r14,%r14 diff --git a/trunk/arch/s390/kernel/mcount64.S b/trunk/arch/s390/kernel/mcount64.S index 11332193db30..1c52eae3396a 100644 --- a/trunk/arch/s390/kernel/mcount64.S +++ b/trunk/arch/s390/kernel/mcount64.S @@ -7,6 +7,7 @@ #include #include +#include .section .kprobes.text, "ax" @@ -29,6 +30,7 @@ ENTRY(ftrace_caller) stg %r1,__SF_BACKCHAIN(%r15) lgr %r2,%r14 lg %r3,168(%r15) + aghi %r2,-MCOUNT_INSN_SIZE larl %r14,ftrace_trace_function lg %r14,0(%r14) basr %r14,%r14 diff --git a/trunk/arch/s390/kernel/smp.c b/trunk/arch/s390/kernel/smp.c index 8074cb4b7cbf..05674b669001 100644 --- a/trunk/arch/s390/kernel/smp.c +++ b/trunk/arch/s390/kernel/smp.c @@ -645,7 +645,7 @@ static int __cpuinit __smp_rescan_cpus(struct sclp_cpu_info *info, continue; pcpu = pcpu_devices + cpu; pcpu->address = info->cpu[i].address; - pcpu->state = (cpu >= info->configured) ? + pcpu->state = (i >= info->configured) ? CPU_STATE_STANDBY : CPU_STATE_CONFIGURED; smp_cpu_set_polarization(cpu, POLARIZATION_UNKNOWN); set_cpu_present(cpu, true); diff --git a/trunk/arch/s390/kernel/syscalls.S b/trunk/arch/s390/kernel/syscalls.S index 9f214e992eed..913410bd74a3 100644 --- a/trunk/arch/s390/kernel/syscalls.S +++ b/trunk/arch/s390/kernel/syscalls.S @@ -157,7 +157,7 @@ SYSCALL(sys_readv,sys_readv,compat_sys_readv_wrapper) /* 145 */ SYSCALL(sys_writev,sys_writev,compat_sys_writev_wrapper) SYSCALL(sys_getsid,sys_getsid,sys32_getsid_wrapper) SYSCALL(sys_fdatasync,sys_fdatasync,sys32_fdatasync_wrapper) -SYSCALL(sys_sysctl,sys_sysctl,sys32_sysctl_wrapper) +SYSCALL(sys_sysctl,sys_sysctl,compat_sys_sysctl) SYSCALL(sys_mlock,sys_mlock,sys32_mlock_wrapper) /* 150 */ SYSCALL(sys_munlock,sys_munlock,sys32_munlock_wrapper) SYSCALL(sys_mlockall,sys_mlockall,sys32_mlockall_wrapper) @@ -341,7 +341,7 @@ SYSCALL(sys_pwritev,sys_pwritev,compat_sys_pwritev) SYSCALL(sys_rt_tgsigqueueinfo,sys_rt_tgsigqueueinfo,compat_sys_rt_tgsigqueueinfo) /* 330 */ SYSCALL(sys_perf_event_open,sys_perf_event_open,sys_perf_event_open_wrapper) SYSCALL(sys_fanotify_init,sys_fanotify_init,sys_fanotify_init_wrapper) -SYSCALL(sys_fanotify_mark,sys_fanotify_mark,sys_fanotify_mark_wrapper) +SYSCALL(sys_fanotify_mark,sys_fanotify_mark,compat_sys_fanotify_mark) SYSCALL(sys_prlimit64,sys_prlimit64,sys_prlimit64_wrapper) SYSCALL(sys_name_to_handle_at,sys_name_to_handle_at,sys_name_to_handle_at_wrapper) /* 335 */ SYSCALL(sys_open_by_handle_at,sys_open_by_handle_at,compat_sys_open_by_handle_at) diff --git a/trunk/arch/s390/mm/pgtable.c b/trunk/arch/s390/mm/pgtable.c index 7805ddca833d..18dc417aaf79 100644 --- a/trunk/arch/s390/mm/pgtable.c +++ b/trunk/arch/s390/mm/pgtable.c @@ -677,8 +677,7 @@ int gmap_ipte_notify(struct gmap *gmap, unsigned long start, unsigned long len) break; } /* Get the page mapped */ - if (get_user_pages(current, gmap->mm, addr, 1, 1, 0, - NULL, NULL) != 1) { + if (fixup_user_fault(current, gmap->mm, addr, FAULT_FLAG_WRITE)) { rc = -EFAULT; break; } diff --git a/trunk/arch/sh/Kconfig b/trunk/arch/sh/Kconfig index 78d8ace57272..8c868cf2cf93 100644 --- a/trunk/arch/sh/Kconfig +++ b/trunk/arch/sh/Kconfig @@ -93,9 +93,6 @@ config GENERIC_CSUM config GENERIC_HWEIGHT def_bool y -config GENERIC_GPIO - def_bool n - config GENERIC_CALIBRATE_DELAY bool diff --git a/trunk/arch/sh/boards/mach-sdk7786/Makefile b/trunk/arch/sh/boards/mach-sdk7786/Makefile index 8ae56e9560ac..45d32e3590b9 100644 --- a/trunk/arch/sh/boards/mach-sdk7786/Makefile +++ b/trunk/arch/sh/boards/mach-sdk7786/Makefile @@ -1,4 +1,4 @@ obj-y := fpga.o irq.o nmi.o setup.o -obj-$(CONFIG_GENERIC_GPIO) += gpio.o +obj-$(CONFIG_GPIOLIB) += gpio.o obj-$(CONFIG_HAVE_SRAM_POOL) += sram.o diff --git a/trunk/arch/sh/boards/mach-x3proto/Makefile b/trunk/arch/sh/boards/mach-x3proto/Makefile index 708c21c919ff..0cbe3d02dea3 100644 --- a/trunk/arch/sh/boards/mach-x3proto/Makefile +++ b/trunk/arch/sh/boards/mach-x3proto/Makefile @@ -1,3 +1,3 @@ obj-y += setup.o ilsel.o -obj-$(CONFIG_GENERIC_GPIO) += gpio.o +obj-$(CONFIG_GPIOLIB) += gpio.o diff --git a/trunk/arch/sh/kernel/cpu/sh2a/Makefile b/trunk/arch/sh/kernel/cpu/sh2a/Makefile index 7fdc102d0dd6..990195d98456 100644 --- a/trunk/arch/sh/kernel/cpu/sh2a/Makefile +++ b/trunk/arch/sh/kernel/cpu/sh2a/Makefile @@ -21,4 +21,4 @@ pinmux-$(CONFIG_CPU_SUBTYPE_SH7203) := pinmux-sh7203.o pinmux-$(CONFIG_CPU_SUBTYPE_SH7264) := pinmux-sh7264.o pinmux-$(CONFIG_CPU_SUBTYPE_SH7269) := pinmux-sh7269.o -obj-$(CONFIG_GENERIC_GPIO) += $(pinmux-y) +obj-$(CONFIG_GPIOLIB) += $(pinmux-y) diff --git a/trunk/arch/sh/kernel/cpu/sh3/Makefile b/trunk/arch/sh/kernel/cpu/sh3/Makefile index 6f13f33a35ff..d3634ae7b71a 100644 --- a/trunk/arch/sh/kernel/cpu/sh3/Makefile +++ b/trunk/arch/sh/kernel/cpu/sh3/Makefile @@ -30,4 +30,4 @@ clock-$(CONFIG_CPU_SUBTYPE_SH7712) := clock-sh7712.o pinmux-$(CONFIG_CPU_SUBTYPE_SH7720) := pinmux-sh7720.o obj-y += $(clock-y) -obj-$(CONFIG_GENERIC_GPIO) += $(pinmux-y) +obj-$(CONFIG_GPIOLIB) += $(pinmux-y) diff --git a/trunk/arch/sh/kernel/cpu/sh4a/Makefile b/trunk/arch/sh/kernel/cpu/sh4a/Makefile index 8fc6ec2be2fa..0705df775208 100644 --- a/trunk/arch/sh/kernel/cpu/sh4a/Makefile +++ b/trunk/arch/sh/kernel/cpu/sh4a/Makefile @@ -47,6 +47,6 @@ pinmux-$(CONFIG_CPU_SUBTYPE_SHX3) := pinmux-shx3.o obj-y += $(clock-y) obj-$(CONFIG_SMP) += $(smp-y) -obj-$(CONFIG_GENERIC_GPIO) += $(pinmux-y) +obj-$(CONFIG_GPIOLIB) += $(pinmux-y) obj-$(CONFIG_PERF_EVENTS) += perf_event.o obj-$(CONFIG_HAVE_HW_BREAKPOINT) += ubc.o diff --git a/trunk/arch/sparc/Kconfig b/trunk/arch/sparc/Kconfig index a639c0d07b8b..9ac9f1666339 100644 --- a/trunk/arch/sparc/Kconfig +++ b/trunk/arch/sparc/Kconfig @@ -137,11 +137,6 @@ config GENERIC_ISA_DMA bool default y if SPARC32 -config GENERIC_GPIO - bool - help - Generic GPIO API support - config ARCH_SUPPORTS_DEBUG_PAGEALLOC def_bool y if SPARC64 diff --git a/trunk/arch/sparc/kernel/sys32.S b/trunk/arch/sparc/kernel/sys32.S index 2e680b5245c9..f7c72b6efc27 100644 --- a/trunk/arch/sparc/kernel/sys32.S +++ b/trunk/arch/sparc/kernel/sys32.S @@ -239,15 +239,6 @@ do_sys_accept4: /* sys_accept4(int, struct sockaddr *, int *, int) */ nop nop - .globl sys32_fanotify_mark -sys32_fanotify_mark: - sethi %hi(sys_fanotify_mark), %g1 - sllx %o2, 32, %o2 - or %o2, %o3, %o2 - mov %o4, %o3 - jmpl %g1 + %lo(sys_fanotify_mark), %g0 - mov %o5, %o4 - .section __ex_table,"a" .align 4 .word 1b, __retl_efault, 2b, __retl_efault diff --git a/trunk/arch/sparc/kernel/systbls_64.S b/trunk/arch/sparc/kernel/systbls_64.S index 8fd932080215..6d81597064b6 100644 --- a/trunk/arch/sparc/kernel/systbls_64.S +++ b/trunk/arch/sparc/kernel/systbls_64.S @@ -84,7 +84,7 @@ sys_call_table32: .word compat_sys_timerfd_settime, compat_sys_timerfd_gettime, compat_sys_signalfd4, sys_eventfd2, sys_epoll_create1 /*320*/ .word sys_dup3, sys_pipe2, sys_inotify_init1, sys_accept4, compat_sys_preadv .word compat_sys_pwritev, compat_sys_rt_tgsigqueueinfo, sys_perf_event_open, compat_sys_recvmmsg, sys_fanotify_init -/*330*/ .word sys32_fanotify_mark, sys_prlimit64, sys_name_to_handle_at, compat_sys_open_by_handle_at, compat_sys_clock_adjtime +/*330*/ .word compat_sys_fanotify_mark, sys_prlimit64, sys_name_to_handle_at, compat_sys_open_by_handle_at, compat_sys_clock_adjtime .word sys_syncfs, compat_sys_sendmmsg, sys_setns, compat_sys_process_vm_readv, compat_sys_process_vm_writev /*340*/ .word sys_kern_features, sys_kcmp, sys_finit_module diff --git a/trunk/arch/tile/Kconfig b/trunk/arch/tile/Kconfig index 5b6a40dd5556..3aa37669ff8c 100644 --- a/trunk/arch/tile/Kconfig +++ b/trunk/arch/tile/Kconfig @@ -355,11 +355,17 @@ config HARDWALL config KERNEL_PL int "Processor protection level for kernel" range 1 2 - default "1" + default 2 if TILEGX + default 1 if !TILEGX ---help--- - This setting determines the processor protection level the - kernel will be built to run at. Generally you should use - the default value here. + Since MDE 4.2, the Tilera hypervisor runs the kernel + at PL2 by default. If running under an older hypervisor, + or as a KVM guest, you must run at PL1. (The current + hypervisor may also be recompiled with "make HV_PL=2" to + allow it to run a kernel at PL1, but clients running at PL1 + are not expected to be supported indefinitely.) + + If you're not sure, don't change the default. source "arch/tile/gxio/Kconfig" diff --git a/trunk/arch/tile/include/hv/hypervisor.h b/trunk/arch/tile/include/hv/hypervisor.h index ccd847e2347f..837dca5328c2 100644 --- a/trunk/arch/tile/include/hv/hypervisor.h +++ b/trunk/arch/tile/include/hv/hypervisor.h @@ -107,7 +107,22 @@ #define HV_DISPATCH_ENTRY_SIZE 32 /** Version of the hypervisor interface defined by this file */ -#define _HV_VERSION 11 +#define _HV_VERSION 13 + +/** Last version of the hypervisor interface with old hv_init() ABI. + * + * The change from version 12 to version 13 corresponds to launching + * the client by default at PL2 instead of PL1 (corresponding to the + * hv itself running at PL3 instead of PL2). To make this explicit, + * the hv_init() API was also extended so the client can report its + * desired PL, resulting in a more helpful failure diagnostic. If you + * call hv_init() with _HV_VERSION_OLD_HV_INIT and omit the client_pl + * argument, the hypervisor will assume client_pl = 1. + * + * Note that this is a deprecated solution and we do not expect to + * support clients of the Tilera hypervisor running at PL1 indefinitely. + */ +#define _HV_VERSION_OLD_HV_INIT 12 /* Index into hypervisor interface dispatch code blocks. * @@ -377,7 +392,11 @@ typedef int HV_Errno; #ifndef __ASSEMBLER__ /** Pass HV_VERSION to hv_init to request this version of the interface. */ -typedef enum { HV_VERSION = _HV_VERSION } HV_VersionNumber; +typedef enum { + HV_VERSION = _HV_VERSION, + HV_VERSION_OLD_HV_INIT = _HV_VERSION_OLD_HV_INIT, + +} HV_VersionNumber; /** Initializes the hypervisor. * @@ -385,9 +404,11 @@ typedef enum { HV_VERSION = _HV_VERSION } HV_VersionNumber; * that this program expects, typically HV_VERSION. * @param chip_num Architecture number of the chip the client was built for. * @param chip_rev_num Revision number of the chip the client was built for. + * @param client_pl Privilege level the client is built for + * (not required if interface_version_number == HV_VERSION_OLD_HV_INIT). */ void hv_init(HV_VersionNumber interface_version_number, - int chip_num, int chip_rev_num); + int chip_num, int chip_rev_num, int client_pl); /** Queries we can make for hv_sysconf(). diff --git a/trunk/arch/tile/kernel/head_32.S b/trunk/arch/tile/kernel/head_32.S index f71bfeeaf1a9..ac115307e5e4 100644 --- a/trunk/arch/tile/kernel/head_32.S +++ b/trunk/arch/tile/kernel/head_32.S @@ -38,7 +38,7 @@ ENTRY(_start) movei r2, TILE_CHIP_REV } { - moveli r0, _HV_VERSION + moveli r0, _HV_VERSION_OLD_HV_INIT jal hv_init } /* Get a reasonable default ASID in r0 */ diff --git a/trunk/arch/tile/kernel/head_64.S b/trunk/arch/tile/kernel/head_64.S index f9a2734f7b82..6093964fa5c7 100644 --- a/trunk/arch/tile/kernel/head_64.S +++ b/trunk/arch/tile/kernel/head_64.S @@ -34,13 +34,19 @@ ENTRY(_start) /* Notify the hypervisor of what version of the API we want */ { +#if KERNEL_PL == 1 && _HV_VERSION == 13 + /* Support older hypervisors by asking for API version 12. */ + movei r0, _HV_VERSION_OLD_HV_INIT +#else + movei r0, _HV_VERSION +#endif movei r1, TILE_CHIP - movei r2, TILE_CHIP_REV } { - moveli r0, _HV_VERSION - jal hv_init + movei r2, TILE_CHIP_REV + movei r3, KERNEL_PL } + jal hv_init /* Get a reasonable default ASID in r0 */ { move r0, zero diff --git a/trunk/arch/tile/lib/spinlock_32.c b/trunk/arch/tile/lib/spinlock_32.c index b16ac49a968e..b34f79aada48 100644 --- a/trunk/arch/tile/lib/spinlock_32.c +++ b/trunk/arch/tile/lib/spinlock_32.c @@ -101,7 +101,7 @@ EXPORT_SYMBOL(arch_spin_unlock_wait); * preserve the semantic that the same read lock can be acquired in an * interrupt context. */ -inline int arch_read_trylock(arch_rwlock_t *rwlock) +int arch_read_trylock(arch_rwlock_t *rwlock) { u32 val; __insn_mtspr(SPR_INTERRUPT_CRITICAL_SECTION, 1); diff --git a/trunk/arch/unicore32/Kconfig b/trunk/arch/unicore32/Kconfig index 2943e3acdf0c..41bcc0013442 100644 --- a/trunk/arch/unicore32/Kconfig +++ b/trunk/arch/unicore32/Kconfig @@ -23,9 +23,6 @@ config UNICORE32 designs licensed by PKUnity Ltd. Please see web page at . -config GENERIC_GPIO - def_bool y - config GENERIC_CSUM def_bool y @@ -156,7 +153,7 @@ source "mm/Kconfig" config LEDS def_bool y - depends on GENERIC_GPIO + depends on GPIOLIB config ALIGNMENT_TRAP def_bool y @@ -219,7 +216,6 @@ if ARCH_PUV3 config PUV3_GPIO bool depends on !ARCH_FPGA - select GENERIC_GPIO select GPIO_SYSFS default y diff --git a/trunk/arch/unicore32/kernel/sys.c b/trunk/arch/unicore32/kernel/sys.c index cfe79c9529b3..f9e862539314 100644 --- a/trunk/arch/unicore32/kernel/sys.c +++ b/trunk/arch/unicore32/kernel/sys.c @@ -28,19 +28,11 @@ #include #include -/* Note: used by the compat code even in 64-bit Linux. */ -SYSCALL_DEFINE6(mmap2, unsigned long, addr, unsigned long, len, - unsigned long, prot, unsigned long, flags, - unsigned long, fd, unsigned long, off_4k) -{ - return sys_mmap_pgoff(addr, len, prot, flags, fd, - off_4k); -} - /* Provide the actual syscall number to call mapping. */ #undef __SYSCALL #define __SYSCALL(nr, call) [nr] = (call), +#define sys_mmap2 sys_mmap_pgoff /* Note that we don't include but */ void *sys_call_table[__NR_syscalls] = { [0 ... __NR_syscalls-1] = sys_ni_syscall, diff --git a/trunk/arch/x86/Kconfig b/trunk/arch/x86/Kconfig index 5db2117ae288..685692c94f05 100644 --- a/trunk/arch/x86/Kconfig +++ b/trunk/arch/x86/Kconfig @@ -108,7 +108,6 @@ config X86 select GENERIC_CLOCKEVENTS_BROADCAST if X86_64 || (X86_32 && X86_LOCAL_APIC) select GENERIC_TIME_VSYSCALL if X86_64 select KTIME_SCALAR if X86_32 - select ALWAYS_USE_PERSISTENT_CLOCK select GENERIC_STRNCPY_FROM_USER select GENERIC_STRNLEN_USER select HAVE_CONTEXT_TRACKING if X86_64 @@ -174,9 +173,6 @@ config GENERIC_BUG_RELATIVE_POINTERS config GENERIC_HWEIGHT def_bool y -config GENERIC_GPIO - bool - config ARCH_MAY_HAVE_PC_FDC def_bool y depends on ISA_DMA_API diff --git a/trunk/arch/x86/ia32/sys_ia32.c b/trunk/arch/x86/ia32/sys_ia32.c index 4e4907c67d92..8e0ceecdc957 100644 --- a/trunk/arch/x86/ia32/sys_ia32.c +++ b/trunk/arch/x86/ia32/sys_ia32.c @@ -243,12 +243,3 @@ asmlinkage long sys32_fallocate(int fd, int mode, unsigned offset_lo, return sys_fallocate(fd, mode, ((u64)offset_hi << 32) | offset_lo, ((u64)len_hi << 32) | len_lo); } - -asmlinkage long sys32_fanotify_mark(int fanotify_fd, unsigned int flags, - u32 mask_lo, u32 mask_hi, - int fd, const char __user *pathname) -{ - return sys_fanotify_mark(fanotify_fd, flags, - ((u64)mask_hi << 32) | mask_lo, - fd, pathname); -} diff --git a/trunk/arch/x86/include/asm/sys_ia32.h b/trunk/arch/x86/include/asm/sys_ia32.h index 0ef202e232d6..82c34ee25a65 100644 --- a/trunk/arch/x86/include/asm/sys_ia32.h +++ b/trunk/arch/x86/include/asm/sys_ia32.h @@ -50,9 +50,6 @@ asmlinkage long sys32_fallocate(int, int, unsigned, asmlinkage long sys32_sigreturn(void); asmlinkage long sys32_rt_sigreturn(void); -asmlinkage long sys32_fanotify_mark(int, unsigned int, u32, u32, int, - const char __user *); - #endif /* CONFIG_COMPAT */ #endif /* _ASM_X86_SYS_IA32_H */ diff --git a/trunk/arch/x86/include/asm/syscalls.h b/trunk/arch/x86/include/asm/syscalls.h index 5f87b35fd2ef..2917a6452c49 100644 --- a/trunk/arch/x86/include/asm/syscalls.h +++ b/trunk/arch/x86/include/asm/syscalls.h @@ -37,8 +37,8 @@ asmlinkage long sys_get_thread_area(struct user_desc __user *); unsigned long sys_sigreturn(void); /* kernel/vm86_32.c */ -int sys_vm86old(struct vm86_struct __user *); -int sys_vm86(unsigned long, unsigned long); +asmlinkage long sys_vm86old(struct vm86_struct __user *); +asmlinkage long sys_vm86(unsigned long, unsigned long); #else /* CONFIG_X86_32 */ diff --git a/trunk/arch/x86/include/uapi/asm/msr-index.h b/trunk/arch/x86/include/uapi/asm/msr-index.h index b3a4866661c5..2af848dfa754 100644 --- a/trunk/arch/x86/include/uapi/asm/msr-index.h +++ b/trunk/arch/x86/include/uapi/asm/msr-index.h @@ -120,6 +120,9 @@ #define MSR_CORE_C6_RESIDENCY 0x000003fd #define MSR_CORE_C7_RESIDENCY 0x000003fe #define MSR_PKG_C2_RESIDENCY 0x0000060d +#define MSR_PKG_C8_RESIDENCY 0x00000630 +#define MSR_PKG_C9_RESIDENCY 0x00000631 +#define MSR_PKG_C10_RESIDENCY 0x00000632 /* Run Time Average Power Limiting (RAPL) Interface */ diff --git a/trunk/arch/x86/kernel/head64.c b/trunk/arch/x86/kernel/head64.c index dab95a85f7f8..55b67614ed94 100644 --- a/trunk/arch/x86/kernel/head64.c +++ b/trunk/arch/x86/kernel/head64.c @@ -34,7 +34,7 @@ extern pgd_t early_level4_pgt[PTRS_PER_PGD]; extern pmd_t early_dynamic_pgts[EARLY_DYNAMIC_PAGE_TABLES][PTRS_PER_PMD]; static unsigned int __initdata next_early_pgt = 2; -pmdval_t __initdata early_pmd_flags = __PAGE_KERNEL_LARGE & ~(_PAGE_GLOBAL | _PAGE_NX); +pmdval_t early_pmd_flags = __PAGE_KERNEL_LARGE & ~(_PAGE_GLOBAL | _PAGE_NX); /* Wipe all early page tables except for the kernel symbol map */ static void __init reset_early_page_tables(void) diff --git a/trunk/arch/x86/kernel/microcode_intel_early.c b/trunk/arch/x86/kernel/microcode_intel_early.c index d893e8ed8ac9..2e9e12871c2b 100644 --- a/trunk/arch/x86/kernel/microcode_intel_early.c +++ b/trunk/arch/x86/kernel/microcode_intel_early.c @@ -487,6 +487,7 @@ static inline void show_saved_mc(void) #endif #if defined(CONFIG_MICROCODE_INTEL_EARLY) && defined(CONFIG_HOTPLUG_CPU) +static DEFINE_MUTEX(x86_cpu_microcode_mutex); /* * Save this mc into mc_saved_data. So it will be loaded early when a CPU is * hot added or resumes. @@ -507,7 +508,7 @@ int save_mc_for_early(u8 *mc) * Hold hotplug lock so mc_saved_data is not accessed by a CPU in * hotplug. */ - cpu_hotplug_driver_lock(); + mutex_lock(&x86_cpu_microcode_mutex); mc_saved_count_init = mc_saved_data.mc_saved_count; mc_saved_count = mc_saved_data.mc_saved_count; @@ -544,7 +545,7 @@ int save_mc_for_early(u8 *mc) } out: - cpu_hotplug_driver_unlock(); + mutex_unlock(&x86_cpu_microcode_mutex); return ret; } diff --git a/trunk/arch/x86/kernel/process.c b/trunk/arch/x86/kernel/process.c index 607af0d4d5ef..4e7a37ff03ab 100644 --- a/trunk/arch/x86/kernel/process.c +++ b/trunk/arch/x86/kernel/process.c @@ -312,6 +312,8 @@ void arch_cpu_idle(void) { if (cpuidle_idle_call()) x86_idle(); + else + local_irq_enable(); } /* @@ -368,9 +370,6 @@ void amd_e400_remove_cpu(int cpu) */ static void amd_e400_idle(void) { - if (need_resched()) - return; - if (!amd_e400_c1e_detected) { u32 lo, hi; diff --git a/trunk/arch/x86/kernel/vm86_32.c b/trunk/arch/x86/kernel/vm86_32.c index 1cf5766dde16..e8edcf52e069 100644 --- a/trunk/arch/x86/kernel/vm86_32.c +++ b/trunk/arch/x86/kernel/vm86_32.c @@ -33,6 +33,7 @@ #include #include #include +#include #include #include #include @@ -48,7 +49,6 @@ #include #include #include -#include /* * Known problems: @@ -202,36 +202,32 @@ static void mark_screen_rdonly(struct mm_struct *mm) static int do_vm86_irq_handling(int subfunction, int irqnumber); static void do_sys_vm86(struct kernel_vm86_struct *info, struct task_struct *tsk); -int sys_vm86old(struct vm86_struct __user *v86) +SYSCALL_DEFINE1(vm86old, struct vm86_struct __user *, v86) { struct kernel_vm86_struct info; /* declare this _on top_, * this avoids wasting of stack space. * This remains on the stack until we * return to 32 bit user space. */ - struct task_struct *tsk; - int tmp, ret = -EPERM; + struct task_struct *tsk = current; + int tmp; - tsk = current; if (tsk->thread.saved_sp0) - goto out; + return -EPERM; tmp = copy_vm86_regs_from_user(&info.regs, &v86->regs, offsetof(struct kernel_vm86_struct, vm86plus) - sizeof(info.regs)); - ret = -EFAULT; if (tmp) - goto out; + return -EFAULT; memset(&info.vm86plus, 0, (int)&info.regs32 - (int)&info.vm86plus); info.regs32 = current_pt_regs(); tsk->thread.vm86_info = v86; do_sys_vm86(&info, tsk); - ret = 0; /* we never return here */ -out: - return ret; + return 0; /* we never return here */ } -int sys_vm86(unsigned long cmd, unsigned long arg) +SYSCALL_DEFINE2(vm86, unsigned long, cmd, unsigned long, arg) { struct kernel_vm86_struct info; /* declare this _on top_, * this avoids wasting of stack space. @@ -239,7 +235,7 @@ int sys_vm86(unsigned long cmd, unsigned long arg) * return to 32 bit user space. */ struct task_struct *tsk; - int tmp, ret; + int tmp; struct vm86plus_struct __user *v86; tsk = current; @@ -248,8 +244,7 @@ int sys_vm86(unsigned long cmd, unsigned long arg) case VM86_FREE_IRQ: case VM86_GET_IRQ_BITS: case VM86_GET_AND_RESET_IRQ: - ret = do_vm86_irq_handling(cmd, (int)arg); - goto out; + return do_vm86_irq_handling(cmd, (int)arg); case VM86_PLUS_INSTALL_CHECK: /* * NOTE: on old vm86 stuff this will return the error @@ -257,28 +252,23 @@ int sys_vm86(unsigned long cmd, unsigned long arg) * interpreted as (invalid) address to vm86_struct. * So the installation check works. */ - ret = 0; - goto out; + return 0; } /* we come here only for functions VM86_ENTER, VM86_ENTER_NO_BYPASS */ - ret = -EPERM; if (tsk->thread.saved_sp0) - goto out; + return -EPERM; v86 = (struct vm86plus_struct __user *)arg; tmp = copy_vm86_regs_from_user(&info.regs, &v86->regs, offsetof(struct kernel_vm86_struct, regs32) - sizeof(info.regs)); - ret = -EFAULT; if (tmp) - goto out; + return -EFAULT; info.regs32 = current_pt_regs(); info.vm86plus.is_vm86pus = 1; tsk->thread.vm86_info = (struct vm86_struct __user *)v86; do_sys_vm86(&info, tsk); - ret = 0; /* we never return here */ -out: - return ret; + return 0; /* we never return here */ } diff --git a/trunk/arch/x86/kvm/emulate.c b/trunk/arch/x86/kvm/emulate.c index 8e517bba6a7c..8db0010ed150 100644 --- a/trunk/arch/x86/kvm/emulate.c +++ b/trunk/arch/x86/kvm/emulate.c @@ -60,6 +60,7 @@ #define OpGS 25ull /* GS */ #define OpMem8 26ull /* 8-bit zero extended memory operand */ #define OpImm64 27ull /* Sign extended 16/32/64-bit immediate */ +#define OpXLat 28ull /* memory at BX/EBX/RBX + zero-extended AL */ #define OpBits 5 /* Width of operand field */ #define OpMask ((1ull << OpBits) - 1) @@ -99,6 +100,7 @@ #define SrcImmUByte (OpImmUByte << SrcShift) #define SrcImmU (OpImmU << SrcShift) #define SrcSI (OpSI << SrcShift) +#define SrcXLat (OpXLat << SrcShift) #define SrcImmFAddr (OpImmFAddr << SrcShift) #define SrcMemFAddr (OpMemFAddr << SrcShift) #define SrcAcc (OpAcc << SrcShift) @@ -533,6 +535,9 @@ FOP_SETCC(setle) FOP_SETCC(setnle) FOP_END; +FOP_START(salc) "pushf; sbb %al, %al; popf \n\t" FOP_RET +FOP_END; + #define __emulate_1op_rax_rdx(ctxt, _op, _suffix, _ex) \ do { \ unsigned long _tmp; \ @@ -2996,6 +3001,28 @@ static int em_das(struct x86_emulate_ctxt *ctxt) return X86EMUL_CONTINUE; } +static int em_aam(struct x86_emulate_ctxt *ctxt) +{ + u8 al, ah; + + if (ctxt->src.val == 0) + return emulate_de(ctxt); + + al = ctxt->dst.val & 0xff; + ah = al / ctxt->src.val; + al %= ctxt->src.val; + + ctxt->dst.val = (ctxt->dst.val & 0xffff0000) | al | (ah << 8); + + /* Set PF, ZF, SF */ + ctxt->src.type = OP_IMM; + ctxt->src.val = 0; + ctxt->src.bytes = 1; + fastop(ctxt, em_or); + + return X86EMUL_CONTINUE; +} + static int em_aad(struct x86_emulate_ctxt *ctxt) { u8 al = ctxt->dst.val & 0xff; @@ -3936,7 +3963,10 @@ static const struct opcode opcode_table[256] = { /* 0xD0 - 0xD7 */ G(Src2One | ByteOp, group2), G(Src2One, group2), G(Src2CL | ByteOp, group2), G(Src2CL, group2), - N, I(DstAcc | SrcImmByte | No64, em_aad), N, N, + I(DstAcc | SrcImmUByte | No64, em_aam), + I(DstAcc | SrcImmUByte | No64, em_aad), + F(DstAcc | ByteOp | No64, em_salc), + I(DstAcc | SrcXLat | ByteOp, em_mov), /* 0xD8 - 0xDF */ N, E(0, &escape_d9), N, E(0, &escape_db), N, E(0, &escape_dd), N, N, /* 0xE0 - 0xE7 */ @@ -4198,6 +4228,16 @@ static int decode_operand(struct x86_emulate_ctxt *ctxt, struct operand *op, op->val = 0; op->count = 1; break; + case OpXLat: + op->type = OP_MEM; + op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes; + op->addr.mem.ea = + register_address(ctxt, + reg_read(ctxt, VCPU_REGS_RBX) + + (reg_read(ctxt, VCPU_REGS_RAX) & 0xff)); + op->addr.mem.seg = seg_override(ctxt); + op->val = 0; + break; case OpImmFAddr: op->type = OP_IMM; op->addr.mem.ea = ctxt->_eip; diff --git a/trunk/arch/x86/kvm/vmx.c b/trunk/arch/x86/kvm/vmx.c index 25a791ed21c8..260a91939555 100644 --- a/trunk/arch/x86/kvm/vmx.c +++ b/trunk/arch/x86/kvm/vmx.c @@ -5434,6 +5434,12 @@ static int handle_invalid_guest_state(struct kvm_vcpu *vcpu) return 0; } + if (vcpu->arch.halt_request) { + vcpu->arch.halt_request = 0; + ret = kvm_emulate_halt(vcpu); + goto out; + } + if (signal_pending(current)) goto out; if (need_resched()) diff --git a/trunk/arch/x86/kvm/x86.c b/trunk/arch/x86/kvm/x86.c index 05a8b1a2300d..094b5d96ab14 100644 --- a/trunk/arch/x86/kvm/x86.c +++ b/trunk/arch/x86/kvm/x86.c @@ -555,6 +555,25 @@ void kvm_lmsw(struct kvm_vcpu *vcpu, unsigned long msw) } EXPORT_SYMBOL_GPL(kvm_lmsw); +static void kvm_load_guest_xcr0(struct kvm_vcpu *vcpu) +{ + if (kvm_read_cr4_bits(vcpu, X86_CR4_OSXSAVE) && + !vcpu->guest_xcr0_loaded) { + /* kvm_set_xcr() also depends on this */ + xsetbv(XCR_XFEATURE_ENABLED_MASK, vcpu->arch.xcr0); + vcpu->guest_xcr0_loaded = 1; + } +} + +static void kvm_put_guest_xcr0(struct kvm_vcpu *vcpu) +{ + if (vcpu->guest_xcr0_loaded) { + if (vcpu->arch.xcr0 != host_xcr0) + xsetbv(XCR_XFEATURE_ENABLED_MASK, host_xcr0); + vcpu->guest_xcr0_loaded = 0; + } +} + int __kvm_set_xcr(struct kvm_vcpu *vcpu, u32 index, u64 xcr) { u64 xcr0; @@ -571,8 +590,8 @@ int __kvm_set_xcr(struct kvm_vcpu *vcpu, u32 index, u64 xcr) return 1; if (xcr0 & ~host_xcr0) return 1; + kvm_put_guest_xcr0(vcpu); vcpu->arch.xcr0 = xcr0; - vcpu->guest_xcr0_loaded = 0; return 0; } @@ -5614,25 +5633,6 @@ static void inject_pending_event(struct kvm_vcpu *vcpu) } } -static void kvm_load_guest_xcr0(struct kvm_vcpu *vcpu) -{ - if (kvm_read_cr4_bits(vcpu, X86_CR4_OSXSAVE) && - !vcpu->guest_xcr0_loaded) { - /* kvm_set_xcr() also depends on this */ - xsetbv(XCR_XFEATURE_ENABLED_MASK, vcpu->arch.xcr0); - vcpu->guest_xcr0_loaded = 1; - } -} - -static void kvm_put_guest_xcr0(struct kvm_vcpu *vcpu) -{ - if (vcpu->guest_xcr0_loaded) { - if (vcpu->arch.xcr0 != host_xcr0) - xsetbv(XCR_XFEATURE_ENABLED_MASK, host_xcr0); - vcpu->guest_xcr0_loaded = 0; - } -} - static void process_nmi(struct kvm_vcpu *vcpu) { unsigned limit = 2; diff --git a/trunk/arch/x86/mm/init.c b/trunk/arch/x86/mm/init.c index fdc5dca14fb3..eaac1743def7 100644 --- a/trunk/arch/x86/mm/init.c +++ b/trunk/arch/x86/mm/init.c @@ -359,7 +359,17 @@ unsigned long __init_refok init_memory_mapping(unsigned long start, } /* - * would have hole in the middle or ends, and only ram parts will be mapped. + * We need to iterate through the E820 memory map and create direct mappings + * for only E820_RAM and E820_KERN_RESERVED regions. We cannot simply + * create direct mappings for all pfns from [0 to max_low_pfn) and + * [4GB to max_pfn) because of possible memory holes in high addresses + * that cannot be marked as UC by fixed/variable range MTRRs. + * Depending on the alignment of E820 ranges, this may possibly result + * in using smaller size (i.e. 4K instead of 2M or 1G) page tables. + * + * init_mem_mapping() calls init_range_memory_mapping() with big range. + * That range would have hole in the middle or ends, and only ram parts + * will be mapped in init_range_memory_mapping(). */ static unsigned long __init init_range_memory_mapping( unsigned long r_start, @@ -419,6 +429,13 @@ void __init init_mem_mapping(void) max_pfn_mapped = 0; /* will get exact value next */ min_pfn_mapped = real_end >> PAGE_SHIFT; last_start = start = real_end; + + /* + * We start from the top (end of memory) and go to the bottom. + * The memblock_find_in_range() gets us a block of RAM from the + * end of RAM in [min_pfn_mapped, max_pfn_mapped) used as new pages + * for page table. + */ while (last_start > ISA_END_ADDRESS) { if (last_start > step_size) { start = round_down(last_start - 1, step_size); diff --git a/trunk/arch/x86/pci/xen.c b/trunk/arch/x86/pci/xen.c index 4a9be6ddf054..48e8461057ba 100644 --- a/trunk/arch/x86/pci/xen.c +++ b/trunk/arch/x86/pci/xen.c @@ -295,11 +295,10 @@ static int xen_initdom_setup_msi_irqs(struct pci_dev *dev, int nvec, int type) int pos; u32 table_offset, bir; - pos = pci_find_capability(dev, PCI_CAP_ID_MSIX); - + pos = dev->msix_cap; pci_read_config_dword(dev, pos + PCI_MSIX_TABLE, &table_offset); - bir = (u8)(table_offset & PCI_MSIX_FLAGS_BIRMASK); + bir = (u8)(table_offset & PCI_MSIX_TABLE_BIR); map_irq.table_base = pci_resource_start(dev, bir); map_irq.entry_nr = msidesc->msi_attrib.entry_nr; diff --git a/trunk/arch/x86/syscalls/syscall_32.tbl b/trunk/arch/x86/syscalls/syscall_32.tbl index d0d59bfbccce..aabfb8380a1c 100644 --- a/trunk/arch/x86/syscalls/syscall_32.tbl +++ b/trunk/arch/x86/syscalls/syscall_32.tbl @@ -345,7 +345,7 @@ 336 i386 perf_event_open sys_perf_event_open 337 i386 recvmmsg sys_recvmmsg compat_sys_recvmmsg 338 i386 fanotify_init sys_fanotify_init -339 i386 fanotify_mark sys_fanotify_mark sys32_fanotify_mark +339 i386 fanotify_mark sys_fanotify_mark compat_sys_fanotify_mark 340 i386 prlimit64 sys_prlimit64 341 i386 name_to_handle_at sys_name_to_handle_at 342 i386 open_by_handle_at sys_open_by_handle_at compat_sys_open_by_handle_at diff --git a/trunk/arch/x86/xen/enlighten.c b/trunk/arch/x86/xen/enlighten.c index 53d4f680c9b5..a492be2635ac 100644 --- a/trunk/arch/x86/xen/enlighten.c +++ b/trunk/arch/x86/xen/enlighten.c @@ -85,7 +85,29 @@ EXPORT_SYMBOL_GPL(hypercall_page); +/* + * Pointer to the xen_vcpu_info structure or + * &HYPERVISOR_shared_info->vcpu_info[cpu]. See xen_hvm_init_shared_info + * and xen_vcpu_setup for details. By default it points to share_info->vcpu_info + * but if the hypervisor supports VCPUOP_register_vcpu_info then it can point + * to xen_vcpu_info. The pointer is used in __xen_evtchn_do_upcall to + * acknowledge pending events. + * Also more subtly it is used by the patched version of irq enable/disable + * e.g. xen_irq_enable_direct and xen_iret in PV mode. + * + * The desire to be able to do those mask/unmask operations as a single + * instruction by using the per-cpu offset held in %gs is the real reason + * vcpu info is in a per-cpu pointer and the original reason for this + * hypercall. + * + */ DEFINE_PER_CPU(struct vcpu_info *, xen_vcpu); + +/* + * Per CPU pages used if hypervisor supports VCPUOP_register_vcpu_info + * hypercall. This can be used both in PV and PVHVM mode. The structure + * overrides the default per_cpu(xen_vcpu, cpu) value. + */ DEFINE_PER_CPU(struct vcpu_info, xen_vcpu_info); enum xen_domain_type xen_domain_type = XEN_NATIVE; @@ -157,6 +179,21 @@ static void xen_vcpu_setup(int cpu) BUG_ON(HYPERVISOR_shared_info == &xen_dummy_shared_info); + /* + * This path is called twice on PVHVM - first during bootup via + * smp_init -> xen_hvm_cpu_notify, and then if the VCPU is being + * hotplugged: cpu_up -> xen_hvm_cpu_notify. + * As we can only do the VCPUOP_register_vcpu_info once lets + * not over-write its result. + * + * For PV it is called during restore (xen_vcpu_restore) and bootup + * (xen_setup_vcpu_info_placement). The hotplug mechanism does not + * use this function. + */ + if (xen_hvm_domain()) { + if (per_cpu(xen_vcpu, cpu) == &per_cpu(xen_vcpu_info, cpu)) + return; + } if (cpu < MAX_VIRT_CPUS) per_cpu(xen_vcpu,cpu) = &HYPERVISOR_shared_info->vcpu_info[cpu]; @@ -172,7 +209,12 @@ static void xen_vcpu_setup(int cpu) /* Check to see if the hypervisor will put the vcpu_info structure where we want it, which allows direct access via - a percpu-variable. */ + a percpu-variable. + N.B. This hypercall can _only_ be called once per CPU. Subsequent + calls will error out with -EINVAL. This is due to the fact that + hypervisor has no unregister variant and this hypercall does not + allow to over-write info.mfn and info.offset. + */ err = HYPERVISOR_vcpu_op(VCPUOP_register_vcpu_info, cpu, &info); if (err) { @@ -387,6 +429,9 @@ static void __init xen_init_cpuid_mask(void) cpuid_leaf1_edx_mask &= ~((1 << X86_FEATURE_APIC) | /* disable local APIC */ (1 << X86_FEATURE_ACPI)); /* disable ACPI */ + + cpuid_leaf1_ecx_mask &= ~(1 << (X86_FEATURE_X2APIC % 32)); + ax = 1; cx = 0; xen_cpuid(&ax, &bx, &cx, &dx); @@ -1603,6 +1648,9 @@ void __ref xen_hvm_init_shared_info(void) * online but xen_hvm_init_shared_info is run at resume time too and * in that case multiple vcpus might be online. */ for_each_online_cpu(cpu) { + /* Leave it to be NULL. */ + if (cpu >= MAX_VIRT_CPUS) + continue; per_cpu(xen_vcpu, cpu) = &HYPERVISOR_shared_info->vcpu_info[cpu]; } } diff --git a/trunk/arch/x86/xen/spinlock.c b/trunk/arch/x86/xen/spinlock.c index 8b54603ce816..3002ec1bb71a 100644 --- a/trunk/arch/x86/xen/spinlock.c +++ b/trunk/arch/x86/xen/spinlock.c @@ -364,7 +364,7 @@ void __cpuinit xen_init_lock_cpu(int cpu) int irq; const char *name; - WARN(per_cpu(lock_kicker_irq, cpu) > 0, "spinlock on CPU%d exists on IRQ%d!\n", + WARN(per_cpu(lock_kicker_irq, cpu) >= 0, "spinlock on CPU%d exists on IRQ%d!\n", cpu, per_cpu(lock_kicker_irq, cpu)); /* diff --git a/trunk/arch/xtensa/Kconfig b/trunk/arch/xtensa/Kconfig index b09de49dbec5..0a1b95f81a32 100644 --- a/trunk/arch/xtensa/Kconfig +++ b/trunk/arch/xtensa/Kconfig @@ -1,11 +1,9 @@ -config FRAME_POINTER - def_bool n - config ZONE_DMA def_bool y config XTENSA def_bool y + select ARCH_WANT_FRAME_POINTERS select HAVE_IDE select GENERIC_ATOMIC64 select HAVE_GENERIC_HARDIRQS @@ -33,9 +31,6 @@ config RWSEM_XCHGADD_ALGORITHM config GENERIC_HWEIGHT def_bool y -config GENERIC_GPIO - bool - config ARCH_HAS_ILOG2_U32 def_bool n @@ -52,6 +47,15 @@ config HZ source "init/Kconfig" source "kernel/Kconfig.freezer" +config LOCKDEP_SUPPORT + def_bool y + +config STACKTRACE_SUPPORT + def_bool y + +config TRACE_IRQFLAGS_SUPPORT + def_bool y + config MMU def_bool n @@ -103,6 +107,35 @@ config MATH_EMULATION help Can we use information of configuration file? +config INITIALIZE_XTENSA_MMU_INSIDE_VMLINUX + bool "Initialize Xtensa MMU inside the Linux kernel code" + default y + help + Earlier version initialized the MMU in the exception vector + before jumping to _startup in head.S and had an advantage that + it was possible to place a software breakpoint at 'reset' and + then enter your normal kernel breakpoints once the MMU was mapped + to the kernel mappings (0XC0000000). + + This unfortunately doesn't work for U-Boot and likley also wont + work for using KEXEC to have a hot kernel ready for doing a + KDUMP. + + So now the MMU is initialized in head.S but it's necessary to + use hardware breakpoints (gdb 'hbreak' cmd) to break at _startup. + xt-gdb can't place a Software Breakpoint in the 0XD region prior + to mapping the MMU and after mapping even if the area of low memory + was mapped gdb wouldn't remove the breakpoint on hitting it as the + PC wouldn't match. Since Hardware Breakpoints are recommended for + Linux configurations it seems reasonable to just assume they exist + and leave this older mechanism for unfortunate souls that choose + not to follow Tensilica's recommendation. + + Selecting this will cause U-Boot to set the KERNEL Load and Entry + address at 0x00003000 instead of the mapped std of 0xD0003000. + + If in doubt, say Y. + endmenu config XTENSA_CALIBRATE_CCOUNT @@ -252,21 +285,6 @@ endmenu menu "Executable file formats" -# only elf supported -config KCORE_ELF - def_bool y - depends on PROC_FS - help - If you enabled support for /proc file system then the file - /proc/kcore will contain the kernel core image in ELF format. This - can be used in gdb: - - $ cd /usr/src/linux ; gdb vmlinux /proc/kcore - - This is especially useful if you have compiled the kernel with the - "-g" option to preserve debugging information. It is mainly used - for examining kernel data structures on the live kernel. - source "fs/Kconfig.binfmt" endmenu diff --git a/trunk/arch/xtensa/boot/boot-elf/Makefile b/trunk/arch/xtensa/boot/boot-elf/Makefile index 1fe01b78c124..89db089f5a12 100644 --- a/trunk/arch/xtensa/boot/boot-elf/Makefile +++ b/trunk/arch/xtensa/boot/boot-elf/Makefile @@ -12,6 +12,7 @@ endif export OBJCOPY_ARGS export CPPFLAGS_boot.lds += -P -C +export KBUILD_AFLAGS += -mtext-section-literals boot-y := bootstrap.o diff --git a/trunk/arch/xtensa/boot/boot-elf/boot.lds.S b/trunk/arch/xtensa/boot/boot-elf/boot.lds.S index 7b646e0a6486..932b58ef33d4 100644 --- a/trunk/arch/xtensa/boot/boot-elf/boot.lds.S +++ b/trunk/arch/xtensa/boot/boot-elf/boot.lds.S @@ -1,41 +1,29 @@ -#include +/* + * linux/arch/xtensa/boot/boot-elf/boot.lds.S + * + * Copyright (C) 2008 - 2013 by Tensilica Inc. + * + * Chris Zankel + * Marc Gauthier + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include OUTPUT_ARCH(xtensa) ENTRY(_ResetVector) SECTIONS { - .start 0xD0000000 : { *(.start) } - - .text 0xD0000000: - { - __reloc_start = . ; - _text_start = . ; - *(.literal .text.literal .text) - _text_end = . ; - } - - .rodata ALIGN(0x04): - { - *(.rodata) - *(.rodata1) - } - - .data ALIGN(0x04): + .ResetVector.text XCHAL_RESET_VECTOR_VADDR : { - *(.data) - *(.data1) - *(.sdata) - *(.sdata2) - *(.got.plt) - *(.got) - *(.dynamic) + *(.ResetVector.text) } - __reloc_end = . ; - - . = ALIGN(0x10); - __image_load = . ; - .image 0xd0001000: + .image KERNELOFFSET: AT (LOAD_MEMORY_ADDRESS) { _image_start = .; *(image) @@ -43,7 +31,6 @@ SECTIONS _image_end = . ; } - .bss ((LOADADDR(.image) + SIZEOF(.image) + 3) & ~ 3): { __bss_start = .; @@ -53,14 +40,15 @@ SECTIONS *(.bss) __bss_end = .; } - _end = .; - _param_start = .; - .ResetVector.text XCHAL_RESET_VECTOR_VADDR : + /* + * This is a remapped copy of the Reset Vector Code. + * It keeps gdb in sync with the PC after switching + * to the temporary mapping used while setting up + * the V2 MMU mappings for Linux. + */ + .ResetVector.remapped_text 0x46000000 (INFO): { - *(.ResetVector.text) + *(.ResetVector.remapped_text) } - - - PROVIDE (end = .); } diff --git a/trunk/arch/xtensa/boot/boot-elf/bootstrap.S b/trunk/arch/xtensa/boot/boot-elf/bootstrap.S index 464298bc348b..1388a499753b 100644 --- a/trunk/arch/xtensa/boot/boot-elf/bootstrap.S +++ b/trunk/arch/xtensa/boot/boot-elf/bootstrap.S @@ -1,18 +1,77 @@ +/* + * arch/xtensa/boot/boot-elf/bootstrap.S + * + * Low-level exception handling + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (C) 2004 - 2013 by Tensilica Inc. + * + * Chris Zankel + * Marc Gauthier + * Piet Delaney + */ #include +#include +#include +#include +#include +#include +#include - -/* ResetVector - */ - .section .ResetVector.text, "ax" + .section .ResetVector.text, "ax" .global _ResetVector + .global reset + _ResetVector: - _j reset + _j _SetupMMU + + .begin no-absolute-literals + .literal_position + .align 4 RomInitAddr: - .word 0xd0001000 +#if defined(CONFIG_INITIALIZE_XTENSA_MMU_INSIDE_VMLINUX) && \ + XCHAL_HAVE_PTP_MMU && XCHAL_HAVE_SPANNING_WAY + .word 0x00003000 +#else + .word 0xd0003000 +#endif RomBootParam: .word _bootparam +_bootparam: + .short BP_TAG_FIRST + .short 4 + .long BP_VERSION + .short BP_TAG_LAST + .short 0 + .long 0 + + .align 4 +_SetupMMU: + movi a0, 0 + wsr a0, windowbase + rsync + movi a0, 1 + wsr a0, windowstart + rsync + movi a0, 0x1F + wsr a0, ps + rsync + + Offset = _SetupMMU - _ResetVector + +#ifndef CONFIG_INITIALIZE_XTENSA_MMU_INSIDE_VMLINUX + initialize_mmu +#endif + + .end no-absolute-literals + + rsil a0, XCHAL_DEBUGLEVEL-1 + rsync reset: l32r a0, RomInitAddr l32r a2, RomBootParam @@ -21,13 +80,25 @@ reset: jx a0 .align 4 - .section .bootstrap.data, "aw" - .globl _bootparam -_bootparam: - .short BP_TAG_FIRST - .short 4 - .long BP_VERSION - .short BP_TAG_LAST - .short 0 - .long 0 + .section .ResetVector.remapped_text, "x" + .global _RemappedResetVector + + /* Do org before literals */ + .org 0 + +_RemappedResetVector: + .begin no-absolute-literals + .literal_position + + _j _RemappedSetupMMU + + /* Position Remapped code at the same location as the original code */ + . = _RemappedResetVector + Offset + +_RemappedSetupMMU: +#ifndef CONFIG_INITIALIZE_XTENSA_MMU_INSIDE_VMLINUX + initialize_mmu +#endif + + .end no-absolute-literals diff --git a/trunk/arch/xtensa/boot/boot-redboot/boot.ld b/trunk/arch/xtensa/boot/boot-redboot/boot.ld index 5bbcaf9e830d..b0b9e95b58bd 100644 --- a/trunk/arch/xtensa/boot/boot-redboot/boot.ld +++ b/trunk/arch/xtensa/boot/boot-redboot/boot.ld @@ -33,7 +33,7 @@ SECTIONS . = ALIGN(0x10); __image_load = . ; - .image 0xd0001000: AT(__image_load) + .image 0xd0003000: AT(__image_load) { _image_start = .; *(image) diff --git a/trunk/arch/xtensa/boot/boot-uboot/Makefile b/trunk/arch/xtensa/boot/boot-uboot/Makefile index bfbf8af582f1..545759819ef9 100644 --- a/trunk/arch/xtensa/boot/boot-uboot/Makefile +++ b/trunk/arch/xtensa/boot/boot-uboot/Makefile @@ -4,7 +4,11 @@ # for more details. # -UIMAGE_LOADADDR = 0xd0001000 +ifdef CONFIG_INITIALIZE_XTENSA_MMU_INSIDE_VMLINUX +UIMAGE_LOADADDR = 0x00003000 +else +UIMAGE_LOADADDR = 0xd0003000 +endif UIMAGE_COMPRESSION = gzip $(obj)/../uImage: vmlinux.bin.gz FORCE diff --git a/trunk/arch/xtensa/configs/iss_defconfig b/trunk/arch/xtensa/configs/iss_defconfig index ddab37b24741..77c52f80187a 100644 --- a/trunk/arch/xtensa/configs/iss_defconfig +++ b/trunk/arch/xtensa/configs/iss_defconfig @@ -10,7 +10,6 @@ CONFIG_RWSEM_XCHGADD_ALGORITHM=y CONFIG_GENERIC_FIND_NEXT_BIT=y CONFIG_GENERIC_HWEIGHT=y CONFIG_GENERIC_HARDIRQS=y -CONFIG_GENERIC_GPIO=y # CONFIG_ARCH_HAS_ILOG2_U32 is not set # CONFIG_ARCH_HAS_ILOG2_U64 is not set CONFIG_NO_IOPORT=y diff --git a/trunk/arch/xtensa/configs/s6105_defconfig b/trunk/arch/xtensa/configs/s6105_defconfig index eaf1b8fc6556..4799c6a526b5 100644 --- a/trunk/arch/xtensa/configs/s6105_defconfig +++ b/trunk/arch/xtensa/configs/s6105_defconfig @@ -10,7 +10,6 @@ CONFIG_RWSEM_XCHGADD_ALGORITHM=y CONFIG_GENERIC_FIND_NEXT_BIT=y CONFIG_GENERIC_HWEIGHT=y CONFIG_GENERIC_HARDIRQS=y -CONFIG_GENERIC_GPIO=y # CONFIG_ARCH_HAS_ILOG2_U32 is not set # CONFIG_ARCH_HAS_ILOG2_U64 is not set CONFIG_NO_IOPORT=y diff --git a/trunk/arch/xtensa/include/asm/Kbuild b/trunk/arch/xtensa/include/asm/Kbuild index 095f0a2244f7..1b982641ec35 100644 --- a/trunk/arch/xtensa/include/asm/Kbuild +++ b/trunk/arch/xtensa/include/asm/Kbuild @@ -15,6 +15,7 @@ generic-y += irq_regs.h generic-y += kdebug.h generic-y += kmap_types.h generic-y += kvm_para.h +generic-y += linkage.h generic-y += local.h generic-y += local64.h generic-y += percpu.h diff --git a/trunk/arch/xtensa/include/asm/ftrace.h b/trunk/arch/xtensa/include/asm/ftrace.h index 40a8c178f10d..36dc7a684397 100644 --- a/trunk/arch/xtensa/include/asm/ftrace.h +++ b/trunk/arch/xtensa/include/asm/ftrace.h @@ -1 +1,33 @@ -/* empty */ +/* + * arch/xtensa/include/asm/ftrace.h + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (C) 2013 Tensilica Inc. + */ +#ifndef _XTENSA_FTRACE_H +#define _XTENSA_FTRACE_H + +#include + +#define HAVE_ARCH_CALLER_ADDR +#define CALLER_ADDR0 ({ unsigned long a0, a1; \ + __asm__ __volatile__ ( \ + "mov %0, a0\n" \ + "mov %1, a1\n" \ + : "=r"(a0), "=r"(a1) : : ); \ + MAKE_PC_FROM_RA(a0, a1); }) +#ifdef CONFIG_FRAME_POINTER +extern unsigned long return_address(unsigned level); +#define CALLER_ADDR1 return_address(1) +#define CALLER_ADDR2 return_address(2) +#define CALLER_ADDR3 return_address(3) +#else +#define CALLER_ADDR1 (0) +#define CALLER_ADDR2 (0) +#define CALLER_ADDR3 (0) +#endif + +#endif /* _XTENSA_FTRACE_H */ diff --git a/trunk/arch/xtensa/include/asm/initialize_mmu.h b/trunk/arch/xtensa/include/asm/initialize_mmu.h index e1f8ba4061ed..722553f17db3 100644 --- a/trunk/arch/xtensa/include/asm/initialize_mmu.h +++ b/trunk/arch/xtensa/include/asm/initialize_mmu.h @@ -23,6 +23,9 @@ #ifndef _XTENSA_INITIALIZE_MMU_H #define _XTENSA_INITIALIZE_MMU_H +#include +#include + #ifdef __ASSEMBLY__ #define XTENSA_HWVERSION_RC_2009_0 230000 @@ -48,6 +51,110 @@ * (XCHAL_HW_MIN_VERSION >= XTENSA_HWVERSION_RC_2009_0) */ +#if defined(CONFIG_MMU) && XCHAL_HAVE_PTP_MMU && XCHAL_HAVE_SPANNING_WAY +/* + * Have MMU v3 + */ + +#if !XCHAL_HAVE_VECBASE +# error "MMU v3 requires reloc vectors" +#endif + + movi a1, 0 + _call0 1f + _j 2f + + .align 4 +1: movi a2, 0x10000000 + movi a3, 0x18000000 + add a2, a2, a0 +9: bgeu a2, a3, 9b /* PC is out of the expected range */ + + /* Step 1: invalidate mapping at 0x40000000..0x5FFFFFFF. */ + + movi a2, 0x40000006 + idtlb a2 + iitlb a2 + isync + + /* Step 2: map 0x40000000..0x47FFFFFF to paddr containing this code + * and jump to the new mapping. + */ +#define CA_BYPASS (_PAGE_CA_BYPASS | _PAGE_HW_WRITE | _PAGE_HW_EXEC) +#define CA_WRITEBACK (_PAGE_CA_WB | _PAGE_HW_WRITE | _PAGE_HW_EXEC) + + srli a3, a0, 27 + slli a3, a3, 27 + addi a3, a3, CA_BYPASS + addi a7, a2, -1 + wdtlb a3, a7 + witlb a3, a7 + isync + + slli a4, a0, 5 + srli a4, a4, 5 + addi a5, a2, -6 + add a4, a4, a5 + jx a4 + + /* Step 3: unmap everything other than current area. + * Start at 0x60000000, wrap around, and end with 0x20000000 + */ +2: movi a4, 0x20000000 + add a5, a2, a4 +3: idtlb a5 + iitlb a5 + add a5, a5, a4 + bne a5, a2, 3b + + /* Step 4: Setup MMU with the old V2 mappings. */ + movi a6, 0x01000000 + wsr a6, ITLBCFG + wsr a6, DTLBCFG + isync + + movi a5, 0xd0000005 + movi a4, CA_WRITEBACK + wdtlb a4, a5 + witlb a4, a5 + + movi a5, 0xd8000005 + movi a4, CA_BYPASS + wdtlb a4, a5 + witlb a4, a5 + + movi a5, 0xe0000006 + movi a4, 0xf0000000 + CA_WRITEBACK + wdtlb a4, a5 + witlb a4, a5 + + movi a5, 0xf0000006 + movi a4, 0xf0000000 + CA_BYPASS + wdtlb a4, a5 + witlb a4, a5 + + isync + + /* Jump to self, using MMU v2 mappings. */ + movi a4, 1f + jx a4 + +1: + movi a2, VECBASE_RESET_VADDR + wsr a2, vecbase + + /* Step 5: remove temporary mapping. */ + idtlb a7 + iitlb a7 + isync + + movi a0, 0 + wsr a0, ptevaddr + rsync + +#endif /* defined(CONFIG_MMU) && XCHAL_HAVE_PTP_MMU && + XCHAL_HAVE_SPANNING_WAY */ + .endm #endif /*__ASSEMBLY__*/ diff --git a/trunk/arch/xtensa/include/asm/irqflags.h b/trunk/arch/xtensa/include/asm/irqflags.h index f865b1c1eae4..ea36674c6ec5 100644 --- a/trunk/arch/xtensa/include/asm/irqflags.h +++ b/trunk/arch/xtensa/include/asm/irqflags.h @@ -47,7 +47,10 @@ static inline void arch_local_irq_restore(unsigned long flags) static inline bool arch_irqs_disabled_flags(unsigned long flags) { - return (flags & 0xf) != 0; +#if XCHAL_EXCM_LEVEL < LOCKLEVEL || (1 << PS_EXCM_BIT) < LOCKLEVEL +#error "XCHAL_EXCM_LEVEL and 1<= LOCKLEVEL; } static inline bool arch_irqs_disabled(void) diff --git a/trunk/arch/xtensa/include/asm/linkage.h b/trunk/arch/xtensa/include/asm/linkage.h deleted file mode 100644 index bf2128a99d79..000000000000 --- a/trunk/arch/xtensa/include/asm/linkage.h +++ /dev/null @@ -1,16 +0,0 @@ -/* - * include/asm-xtensa/linkage.h - * - * This file is subject to the terms and conditions of the GNU General Public - * License. See the file "COPYING" in the main directory of this archive - * for more details. - * - * Copyright (C) 2001 - 2005 Tensilica Inc. - */ - -#ifndef _XTENSA_LINKAGE_H -#define _XTENSA_LINKAGE_H - -/* Nothing to do here ... */ - -#endif /* _XTENSA_LINKAGE_H */ diff --git a/trunk/arch/xtensa/include/asm/stacktrace.h b/trunk/arch/xtensa/include/asm/stacktrace.h new file mode 100644 index 000000000000..6a05fcb0a20d --- /dev/null +++ b/trunk/arch/xtensa/include/asm/stacktrace.h @@ -0,0 +1,36 @@ +/* + * arch/xtensa/include/asm/stacktrace.h + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (C) 2001 - 2013 Tensilica Inc. + */ +#ifndef _XTENSA_STACKTRACE_H +#define _XTENSA_STACKTRACE_H + +#include + +struct stackframe { + unsigned long pc; + unsigned long sp; +}; + +static __always_inline unsigned long *stack_pointer(struct task_struct *task) +{ + unsigned long *sp; + + if (!task || task == current) + __asm__ __volatile__ ("mov %0, a1\n" : "=a"(sp)); + else + sp = (unsigned long *)task->thread.sp; + + return sp; +} + +void walk_stackframe(unsigned long *sp, + int (*fn)(struct stackframe *frame, void *data), + void *data); + +#endif /* _XTENSA_STACKTRACE_H */ diff --git a/trunk/arch/xtensa/include/asm/timex.h b/trunk/arch/xtensa/include/asm/timex.h index 9e85ce8bd8dd..3d35e5d0367e 100644 --- a/trunk/arch/xtensa/include/asm/timex.h +++ b/trunk/arch/xtensa/include/asm/timex.h @@ -19,13 +19,16 @@ #define _INTLEVEL(x) XCHAL_INT ## x ## _LEVEL #define INTLEVEL(x) _INTLEVEL(x) -#if INTLEVEL(XCHAL_TIMER0_INTERRUPT) <= XCHAL_EXCM_LEVEL +#if XCHAL_NUM_TIMERS > 0 && \ + INTLEVEL(XCHAL_TIMER0_INTERRUPT) <= XCHAL_EXCM_LEVEL # define LINUX_TIMER 0 # define LINUX_TIMER_INT XCHAL_TIMER0_INTERRUPT -#elif INTLEVEL(XCHAL_TIMER1_INTERRUPT) <= XCHAL_EXCM_LEVEL +#elif XCHAL_NUM_TIMERS > 1 && \ + INTLEVEL(XCHAL_TIMER1_INTERRUPT) <= XCHAL_EXCM_LEVEL # define LINUX_TIMER 1 # define LINUX_TIMER_INT XCHAL_TIMER1_INTERRUPT -#elif INTLEVEL(XCHAL_TIMER2_INTERRUPT) <= XCHAL_EXCM_LEVEL +#elif XCHAL_NUM_TIMERS > 2 && \ + INTLEVEL(XCHAL_TIMER2_INTERRUPT) <= XCHAL_EXCM_LEVEL # define LINUX_TIMER 2 # define LINUX_TIMER_INT XCHAL_TIMER2_INTERRUPT #else diff --git a/trunk/arch/xtensa/include/asm/traps.h b/trunk/arch/xtensa/include/asm/traps.h index b5464ef3cf66..917488a0ab00 100644 --- a/trunk/arch/xtensa/include/asm/traps.h +++ b/trunk/arch/xtensa/include/asm/traps.h @@ -22,10 +22,9 @@ extern void do_unhandled(struct pt_regs *regs, unsigned long exccause); static inline void spill_registers(void) { - unsigned int a0, ps; __asm__ __volatile__ ( - "movi a14, " __stringify(PS_EXCM_BIT | LOCKLEVEL) "\n\t" + "movi a14, "__stringify((1 << PS_EXCM_BIT) | LOCKLEVEL)"\n\t" "mov a12, a0\n\t" "rsr a13, sar\n\t" "xsr a14, ps\n\t" @@ -35,7 +34,7 @@ static inline void spill_registers(void) "mov a0, a12\n\t" "wsr a13, sar\n\t" "wsr a14, ps\n\t" - : : "a" (&a0), "a" (&ps) + : : #if defined(CONFIG_FRAME_POINTER) : "a2", "a3", "a4", "a11", "a12", "a13", "a14", "a15", #else diff --git a/trunk/arch/xtensa/include/asm/vectors.h b/trunk/arch/xtensa/include/asm/vectors.h new file mode 100644 index 000000000000..c52b656d0310 --- /dev/null +++ b/trunk/arch/xtensa/include/asm/vectors.h @@ -0,0 +1,125 @@ +/* + * arch/xtensa/include/asm/xchal_vaddr_remap.h + * + * Xtensa macros for MMU V3 Support. Deals with re-mapping the Virtual + * Memory Addresses from "Virtual == Physical" to their prevvious V2 MMU + * mappings (KSEG at 0xD0000000 and KIO at 0XF0000000). + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (C) 2008 - 2012 Tensilica Inc. + * + * Pete Delaney + * Marc Gauthier + +#if defined(CONFIG_MMU) + +/* Will Become VECBASE */ +#define VIRTUAL_MEMORY_ADDRESS 0xD0000000 + +/* Image Virtual Start Address */ +#define KERNELOFFSET 0xD0003000 + +#if defined(XCHAL_HAVE_PTP_MMU) && XCHAL_HAVE_PTP_MMU && XCHAL_HAVE_SPANNING_WAY + /* MMU v3 - XCHAL_HAVE_PTP_MMU == 1 */ + #define PHYSICAL_MEMORY_ADDRESS 0x00000000 + #define LOAD_MEMORY_ADDRESS 0x00003000 +#else + /* MMU V2 - XCHAL_HAVE_PTP_MMU == 0 */ + #define PHYSICAL_MEMORY_ADDRESS 0xD0000000 + #define LOAD_MEMORY_ADDRESS 0xD0003000 +#endif + +#else /* !defined(CONFIG_MMU) */ + /* MMU Not being used - Virtual == Physical */ + + /* VECBASE */ + #define VIRTUAL_MEMORY_ADDRESS 0x00002000 + + /* Location of the start of the kernel text, _start */ + #define KERNELOFFSET 0x00003000 + #define PHYSICAL_MEMORY_ADDRESS 0x00000000 + + /* Loaded just above possibly live vectors */ + #define LOAD_MEMORY_ADDRESS 0x00003000 + +#endif /* CONFIG_MMU */ + +#define XC_VADDR(offset) (VIRTUAL_MEMORY_ADDRESS + offset) +#define XC_PADDR(offset) (PHYSICAL_MEMORY_ADDRESS + offset) + +/* Used to set VECBASE register */ +#define VECBASE_RESET_VADDR VIRTUAL_MEMORY_ADDRESS + +#define RESET_VECTOR_VECOFS (XCHAL_RESET_VECTOR_VADDR - \ + VECBASE_RESET_VADDR) +#define RESET_VECTOR_VADDR XC_VADDR(RESET_VECTOR_VECOFS) + +#define RESET_VECTOR1_VECOFS (XCHAL_RESET_VECTOR1_VADDR - \ + VECBASE_RESET_VADDR) +#define RESET_VECTOR1_VADDR XC_VADDR(RESET_VECTOR1_VECOFS) + +#if XCHAL_HAVE_VECBASE + +#define USER_VECTOR_VADDR XC_VADDR(XCHAL_USER_VECOFS) +#define KERNEL_VECTOR_VADDR XC_VADDR(XCHAL_KERNEL_VECOFS) +#define DOUBLEEXC_VECTOR_VADDR XC_VADDR(XCHAL_DOUBLEEXC_VECOFS) +#define WINDOW_VECTORS_VADDR XC_VADDR(XCHAL_WINDOW_OF4_VECOFS) +#define INTLEVEL2_VECTOR_VADDR XC_VADDR(XCHAL_INTLEVEL2_VECOFS) +#define INTLEVEL3_VECTOR_VADDR XC_VADDR(XCHAL_INTLEVEL3_VECOFS) +#define INTLEVEL4_VECTOR_VADDR XC_VADDR(XCHAL_INTLEVEL4_VECOFS) +#define INTLEVEL5_VECTOR_VADDR XC_VADDR(XCHAL_INTLEVEL5_VECOFS) +#define INTLEVEL6_VECTOR_VADDR XC_VADDR(XCHAL_INTLEVEL6_VECOFS) + +#define DEBUG_VECTOR_VADDR XC_VADDR(XCHAL_DEBUG_VECOFS) + +#undef XCHAL_NMI_VECTOR_VADDR +#define XCHAL_NMI_VECTOR_VADDR XC_VADDR(XCHAL_NMI_VECOFS) + +#undef XCHAL_INTLEVEL7_VECTOR_VADDR +#define XCHAL_INTLEVEL7_VECTOR_VADDR XC_VADDR(XCHAL_INTLEVEL7_VECOFS) + +/* + * These XCHAL_* #defines from varian/core.h + * are not valid to use with V3 MMU. Non-XCHAL + * constants are defined above and should be used. + */ +#undef XCHAL_VECBASE_RESET_VADDR +#undef XCHAL_RESET_VECTOR0_VADDR +#undef XCHAL_USER_VECTOR_VADDR +#undef XCHAL_KERNEL_VECTOR_VADDR +#undef XCHAL_DOUBLEEXC_VECTOR_VADDR +#undef XCHAL_WINDOW_VECTORS_VADDR +#undef XCHAL_INTLEVEL2_VECTOR_VADDR +#undef XCHAL_INTLEVEL3_VECTOR_VADDR +#undef XCHAL_INTLEVEL4_VECTOR_VADDR +#undef XCHAL_INTLEVEL5_VECTOR_VADDR +#undef XCHAL_INTLEVEL6_VECTOR_VADDR +#undef XCHAL_DEBUG_VECTOR_VADDR +#undef XCHAL_NMI_VECTOR_VADDR +#undef XCHAL_INTLEVEL7_VECTOR_VADDR + +#else + +#define USER_VECTOR_VADDR XCHAL_USER_VECTOR_VADDR +#define KERNEL_VECTOR_VADDR XCHAL_KERNEL_VECTOR_VADDR +#define DOUBLEEXC_VECTOR_VADDR XCHAL_DOUBLEEXC_VECTOR_VADDR +#define WINDOW_VECTORS_VADDR XCHAL_WINDOW_VECTORS_VADDR +#define INTLEVEL2_VECTOR_VADDR XCHAL_INTLEVEL2_VECTOR_VADDR +#define INTLEVEL3_VECTOR_VADDR XCHAL_INTLEVEL3_VECTOR_VADDR +#define INTLEVEL4_VECTOR_VADDR XCHAL_INTLEVEL4_VECTOR_VADDR +#define INTLEVEL5_VECTOR_VADDR XCHAL_INTLEVEL5_VECTOR_VADDR +#define INTLEVEL6_VECTOR_VADDR XCHAL_INTLEVEL6_VECTOR_VADDR +#define DEBUG_VECTOR_VADDR XCHAL_DEBUG_VECTOR_VADDR + +#endif + +#endif /* _XTENSA_VECTORS_H */ diff --git a/trunk/arch/xtensa/kernel/Makefile b/trunk/arch/xtensa/kernel/Makefile index c3a59d992ac0..1e7fc87a94bb 100644 --- a/trunk/arch/xtensa/kernel/Makefile +++ b/trunk/arch/xtensa/kernel/Makefile @@ -4,14 +4,16 @@ extra-y := head.o vmlinux.lds -obj-y := align.o entry.o irq.o coprocessor.o process.o ptrace.o \ - setup.o signal.o syscall.o time.o traps.o vectors.o platform.o \ - pci-dma.o +obj-y := align.o coprocessor.o entry.o irq.o pci-dma.o platform.o process.o \ + ptrace.o setup.o signal.o stacktrace.o syscall.o time.o traps.o \ + vectors.o obj-$(CONFIG_KGDB) += xtensa-stub.o obj-$(CONFIG_PCI) += pci.o obj-$(CONFIG_MODULES) += xtensa_ksyms.o module.o +AFLAGS_head.o += -mtext-section-literals + # In the Xtensa architecture, assembly generates literals which must always # precede the L32R instruction with a relative offset less than 256 kB. # Therefore, the .text and .literal section must be combined in parenthesis diff --git a/trunk/arch/xtensa/kernel/entry.S b/trunk/arch/xtensa/kernel/entry.S index 63845f950792..5082507d5631 100644 --- a/trunk/arch/xtensa/kernel/entry.S +++ b/trunk/arch/xtensa/kernel/entry.S @@ -354,16 +354,16 @@ common_exception: * so we can allow exceptions and interrupts (*) again. * Set PS(EXCM = 0, UM = 0, RING = 0, OWB = 0, WOE = 1, INTLEVEL = X) * - * (*) We only allow interrupts of higher priority than current IRQ + * (*) We only allow interrupts if they were previously enabled and + * we're not handling an IRQ */ rsr a3, ps - addi a0, a0, -4 - movi a2, 1 + addi a0, a0, -EXCCAUSE_LEVEL1_INTERRUPT + movi a2, LOCKLEVEL extui a3, a3, PS_INTLEVEL_SHIFT, PS_INTLEVEL_WIDTH # a3 = PS.INTLEVEL - movnez a2, a3, a3 # a2 = 1: level-1, > 1: high priority - moveqz a3, a2, a0 # a3 = IRQ level iff interrupt + moveqz a3, a2, a0 # a3 = LOCKLEVEL iff interrupt movi a2, 1 << PS_WOE_BIT or a3, a3, a2 rsr a0, exccause @@ -389,6 +389,22 @@ common_exception: save_xtregs_opt a1 a2 a4 a5 a6 a7 PT_XTREGS_OPT +#ifdef CONFIG_TRACE_IRQFLAGS + l32i a4, a1, PT_DEPC + /* Double exception means we came here with an exception + * while PS.EXCM was set, i.e. interrupts disabled. + */ + bgeui a4, VALID_DOUBLE_EXCEPTION_ADDRESS, 1f + l32i a4, a1, PT_EXCCAUSE + bnei a4, EXCCAUSE_LEVEL1_INTERRUPT, 1f + /* We came here with an interrupt means interrupts were enabled + * and we've just disabled them. + */ + movi a4, trace_hardirqs_off + callx4 a4 +1: +#endif + /* Go to second-level dispatcher. Set up parameters to pass to the * exception handler and call the exception handler. */ @@ -407,11 +423,29 @@ common_exception: .global common_exception_return common_exception_return: +#ifdef CONFIG_TRACE_IRQFLAGS + l32i a4, a1, PT_DEPC + /* Double exception means we came here with an exception + * while PS.EXCM was set, i.e. interrupts disabled. + */ + bgeui a4, VALID_DOUBLE_EXCEPTION_ADDRESS, 1f + l32i a4, a1, PT_EXCCAUSE + bnei a4, EXCCAUSE_LEVEL1_INTERRUPT, 1f + /* We came here with an interrupt means interrupts were enabled + * and we'll reenable them on return. + */ + movi a4, trace_hardirqs_on + callx4 a4 +1: +#endif + /* Jump if we are returning from kernel exceptions. */ 1: l32i a3, a1, PT_PS _bbci.l a3, PS_UM_BIT, 4f + rsil a2, 0 + /* Specific to a user exception exit: * We need to check some flags for signal handling and rescheduling, * and have to restore WB and WS, extra states, and all registers @@ -652,51 +686,19 @@ common_exception_exit: l32i a0, a1, PT_DEPC l32i a3, a1, PT_AREG3 - _bltui a0, VALID_DOUBLE_EXCEPTION_ADDRESS, 1f - - wsr a0, depc l32i a2, a1, PT_AREG2 - l32i a0, a1, PT_AREG0 - l32i a1, a1, PT_AREG1 - rfde + _bgeui a0, VALID_DOUBLE_EXCEPTION_ADDRESS, 1f -1: /* Restore a0...a3 and return */ - rsr a0, ps - extui a2, a0, PS_INTLEVEL_SHIFT, PS_INTLEVEL_WIDTH - movi a0, 2f - slli a2, a2, 4 - add a0, a2, a0 - l32i a2, a1, PT_AREG2 - jx a0 - - .macro irq_exit_level level - .align 16 - .if XCHAL_EXCM_LEVEL >= \level - l32i a0, a1, PT_PC - wsr a0, epc\level l32i a0, a1, PT_AREG0 l32i a1, a1, PT_AREG1 - rfi \level - .endif - .endm + rfe - .align 16 -2: +1: wsr a0, depc l32i a0, a1, PT_AREG0 l32i a1, a1, PT_AREG1 - rfe - - .align 16 - /* no rfi for level-1 irq, handled by rfe above*/ - nop - - irq_exit_level 2 - irq_exit_level 3 - irq_exit_level 4 - irq_exit_level 5 - irq_exit_level 6 + rfde ENDPROC(kernel_exception) diff --git a/trunk/arch/xtensa/kernel/head.S b/trunk/arch/xtensa/kernel/head.S index df88f98737f4..ef12c0e6fa25 100644 --- a/trunk/arch/xtensa/kernel/head.S +++ b/trunk/arch/xtensa/kernel/head.S @@ -48,17 +48,36 @@ */ __HEAD + .begin no-absolute-literals + ENTRY(_start) - _j 2f + /* Preserve the pointer to the boot parameter list in EXCSAVE_1 */ + wsr a2, excsave1 + _j _SetupMMU + + .align 4 + .literal_position +.Lstartup: + .word _startup + .align 4 -1: .word _startup -2: l32r a0, 1b + .global _SetupMMU +_SetupMMU: + Offset = _SetupMMU - _start + +#ifdef CONFIG_INITIALIZE_XTENSA_MMU_INSIDE_VMLINUX + initialize_mmu +#endif + .end no-absolute-literals + + l32r a0, .Lstartup jx a0 ENDPROC(_start) - .section .init.text, "ax" + __INIT + .literal_position ENTRY(_startup) @@ -67,10 +86,6 @@ ENTRY(_startup) movi a0, LOCKLEVEL wsr a0, ps - /* Preserve the pointer to the boot parameter list in EXCSAVE_1 */ - - wsr a2, excsave1 - /* Start with a fresh windowbase and windowstart. */ movi a1, 1 @@ -86,7 +101,9 @@ ENTRY(_startup) /* Clear debugging registers. */ #if XCHAL_HAVE_DEBUG +#if XCHAL_NUM_IBREAK > 0 wsr a0, ibreakenable +#endif wsr a0, icount movi a1, 15 wsr a0, icountlevel @@ -156,8 +173,6 @@ ENTRY(_startup) isync - initialize_mmu - /* Unpack data sections * * The linker script used to build the Linux kernel image @@ -205,6 +220,10 @@ ENTRY(_startup) ___flush_dcache_all a2 a3 #endif + memw + isync + ___invalidate_icache_all a2 a3 + isync /* Setup stack and enable window exceptions (keep irqs disabled) */ diff --git a/trunk/arch/xtensa/kernel/platform.c b/trunk/arch/xtensa/kernel/platform.c index 44bf21c3769a..2bd6c351f37c 100644 --- a/trunk/arch/xtensa/kernel/platform.c +++ b/trunk/arch/xtensa/kernel/platform.c @@ -36,6 +36,7 @@ _F(void, power_off, (void), { while(1); }); _F(void, idle, (void), { __asm__ __volatile__ ("waiti 0" ::: "memory"); }); _F(void, heartbeat, (void), { }); _F(int, pcibios_fixup, (void), { return 0; }); +_F(void, pcibios_init, (void), { }); #ifdef CONFIG_XTENSA_CALIBRATE_CCOUNT _F(void, calibrate_ccount, (void), diff --git a/trunk/arch/xtensa/kernel/stacktrace.c b/trunk/arch/xtensa/kernel/stacktrace.c new file mode 100644 index 000000000000..7d2c317bd98b --- /dev/null +++ b/trunk/arch/xtensa/kernel/stacktrace.c @@ -0,0 +1,120 @@ +/* + * arch/xtensa/kernel/stacktrace.c + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (C) 2001 - 2013 Tensilica Inc. + */ +#include +#include +#include + +#include +#include + +void walk_stackframe(unsigned long *sp, + int (*fn)(struct stackframe *frame, void *data), + void *data) +{ + unsigned long a0, a1; + unsigned long sp_end; + + a1 = (unsigned long)sp; + sp_end = ALIGN(a1, THREAD_SIZE); + + spill_registers(); + + while (a1 < sp_end) { + struct stackframe frame; + + sp = (unsigned long *)a1; + + a0 = *(sp - 4); + a1 = *(sp - 3); + + if (a1 <= (unsigned long)sp) + break; + + frame.pc = MAKE_PC_FROM_RA(a0, a1); + frame.sp = a1; + + if (fn(&frame, data)) + return; + } +} + +#ifdef CONFIG_STACKTRACE + +struct stack_trace_data { + struct stack_trace *trace; + unsigned skip; +}; + +static int stack_trace_cb(struct stackframe *frame, void *data) +{ + struct stack_trace_data *trace_data = data; + struct stack_trace *trace = trace_data->trace; + + if (trace_data->skip) { + --trace_data->skip; + return 0; + } + if (!kernel_text_address(frame->pc)) + return 0; + + trace->entries[trace->nr_entries++] = frame->pc; + return trace->nr_entries >= trace->max_entries; +} + +void save_stack_trace_tsk(struct task_struct *task, struct stack_trace *trace) +{ + struct stack_trace_data trace_data = { + .trace = trace, + .skip = trace->skip, + }; + walk_stackframe(stack_pointer(task), stack_trace_cb, &trace_data); +} +EXPORT_SYMBOL_GPL(save_stack_trace_tsk); + +void save_stack_trace(struct stack_trace *trace) +{ + save_stack_trace_tsk(current, trace); +} +EXPORT_SYMBOL_GPL(save_stack_trace); + +#endif + +#ifdef CONFIG_FRAME_POINTER + +struct return_addr_data { + unsigned long addr; + unsigned skip; +}; + +static int return_address_cb(struct stackframe *frame, void *data) +{ + struct return_addr_data *r = data; + + if (r->skip) { + --r->skip; + return 0; + } + if (!kernel_text_address(frame->pc)) + return 0; + r->addr = frame->pc; + return 1; +} + +unsigned long return_address(unsigned level) +{ + struct return_addr_data r = { + .skip = level + 1, + }; + walk_stackframe(stack_pointer(NULL), return_address_cb, &r); + return r.addr; +} +EXPORT_SYMBOL(return_address); + +#endif diff --git a/trunk/arch/xtensa/kernel/traps.c b/trunk/arch/xtensa/kernel/traps.c index 458186dab5dc..3e8a05c874cd 100644 --- a/trunk/arch/xtensa/kernel/traps.c +++ b/trunk/arch/xtensa/kernel/traps.c @@ -11,7 +11,7 @@ * * Essentially rewritten for the Xtensa architecture port. * - * Copyright (C) 2001 - 2005 Tensilica Inc. + * Copyright (C) 2001 - 2013 Tensilica Inc. * * Joe Taylor * Chris Zankel @@ -32,6 +32,7 @@ #include #include +#include #include #include #include @@ -195,7 +196,6 @@ void do_multihit(struct pt_regs *regs, unsigned long exccause) /* * IRQ handler. - * PS.INTLEVEL is the current IRQ priority level. */ extern void do_IRQ(int, struct pt_regs *); @@ -212,18 +212,21 @@ void do_interrupt(struct pt_regs *regs) XCHAL_INTLEVEL6_MASK, XCHAL_INTLEVEL7_MASK, }; - unsigned level = get_sr(ps) & PS_INTLEVEL_MASK; - - if (WARN_ON_ONCE(level >= ARRAY_SIZE(int_level_mask))) - return; for (;;) { unsigned intread = get_sr(interrupt); unsigned intenable = get_sr(intenable); - unsigned int_at_level = intread & intenable & - int_level_mask[level]; + unsigned int_at_level = intread & intenable; + unsigned level; + + for (level = LOCKLEVEL; level > 0; --level) { + if (int_at_level & int_level_mask[level]) { + int_at_level &= int_level_mask[level]; + break; + } + } - if (!int_at_level) + if (level == 0) return; /* @@ -404,53 +407,25 @@ void show_regs(struct pt_regs * regs) regs->syscall); } -static __always_inline unsigned long *stack_pointer(struct task_struct *task) +static int show_trace_cb(struct stackframe *frame, void *data) { - unsigned long *sp; - - if (!task || task == current) - __asm__ __volatile__ ("mov %0, a1\n" : "=a"(sp)); - else - sp = (unsigned long *)task->thread.sp; - - return sp; + if (kernel_text_address(frame->pc)) { + printk(" [<%08lx>] ", frame->pc); + print_symbol("%s\n", frame->pc); + } + return 0; } void show_trace(struct task_struct *task, unsigned long *sp) { - unsigned long a0, a1, pc; - unsigned long sp_start, sp_end; - - if (sp) - a1 = (unsigned long)sp; - else - a1 = (unsigned long)stack_pointer(task); - - sp_start = a1 & ~(THREAD_SIZE-1); - sp_end = sp_start + THREAD_SIZE; + if (!sp) + sp = stack_pointer(task); printk("Call Trace:"); #ifdef CONFIG_KALLSYMS printk("\n"); #endif - spill_registers(); - - while (a1 > sp_start && a1 < sp_end) { - sp = (unsigned long*)a1; - - a0 = *(sp - 4); - a1 = *(sp - 3); - - if (a1 <= (unsigned long) sp) - break; - - pc = MAKE_PC_FROM_RA(a0, a1); - - if (kernel_text_address(pc)) { - printk(" [<%08lx>] ", pc); - print_symbol("%s\n", pc); - } - } + walk_stackframe(sp, show_trace_cb, NULL); printk("\n"); } diff --git a/trunk/arch/xtensa/kernel/vectors.S b/trunk/arch/xtensa/kernel/vectors.S index 82109b42e240..f9e175382aa9 100644 --- a/trunk/arch/xtensa/kernel/vectors.S +++ b/trunk/arch/xtensa/kernel/vectors.S @@ -50,6 +50,7 @@ #include #include #include +#include #define WINDOW_VECTORS_SIZE 0x180 @@ -220,7 +221,7 @@ ENTRY(_DoubleExceptionVector) xsr a0, depc # get DEPC, save a0 - movi a3, XCHAL_WINDOW_VECTORS_VADDR + movi a3, WINDOW_VECTORS_VADDR _bltu a0, a3, .Lfixup addi a3, a3, WINDOW_VECTORS_SIZE _bgeu a0, a3, .Lfixup @@ -385,9 +386,12 @@ ENDPROC(_DebugInterruptVector) .if XCHAL_EXCM_LEVEL >= \level .section .Level\level\()InterruptVector.text, "ax" ENTRY(_Level\level\()InterruptVector) - wsr a0, epc1 + wsr a0, excsave2 rsr a0, epc\level - xsr a0, epc1 + wsr a0, epc1 + movi a0, EXCCAUSE_LEVEL1_INTERRUPT + wsr a0, exccause + rsr a0, eps\level # branch to user or kernel vector j _SimulateUserKernelVectorException .endif @@ -439,10 +443,8 @@ ENDPROC(_WindowOverflow4) */ .align 4 _SimulateUserKernelVectorException: - wsr a0, excsave2 - movi a0, 4 # LEVEL1_INTERRUPT cause - wsr a0, exccause - rsr a0, ps + addi a0, a0, (1 << PS_EXCM_BIT) + wsr a0, ps bbsi.l a0, PS_UM_BIT, 1f # branch if user mode rsr a0, excsave2 # restore a0 j _KernelExceptionVector # simulate kernel vector exception diff --git a/trunk/arch/xtensa/kernel/vmlinux.lds.S b/trunk/arch/xtensa/kernel/vmlinux.lds.S index 14695240536d..21acd11b5df2 100644 --- a/trunk/arch/xtensa/kernel/vmlinux.lds.S +++ b/trunk/arch/xtensa/kernel/vmlinux.lds.S @@ -18,6 +18,7 @@ #include #include +#include #include #include OUTPUT_ARCH(xtensa) @@ -30,7 +31,7 @@ jiffies = jiffies_64; #endif #ifndef KERNELOFFSET -#define KERNELOFFSET 0xd0001000 +#define KERNELOFFSET 0xd0003000 #endif /* Note: In the following macros, it would be nice to specify only the @@ -185,16 +186,16 @@ SECTIONS SECTION_VECTOR (_WindowVectors_text, .WindowVectors.text, - XCHAL_WINDOW_VECTORS_VADDR, 4, + WINDOW_VECTORS_VADDR, 4, .dummy) SECTION_VECTOR (_DebugInterruptVector_literal, .DebugInterruptVector.literal, - XCHAL_DEBUG_VECTOR_VADDR - 4, + DEBUG_VECTOR_VADDR - 4, SIZEOF(.WindowVectors.text), .WindowVectors.text) SECTION_VECTOR (_DebugInterruptVector_text, .DebugInterruptVector.text, - XCHAL_DEBUG_VECTOR_VADDR, + DEBUG_VECTOR_VADDR, 4, .DebugInterruptVector.literal) #undef LAST @@ -202,7 +203,7 @@ SECTIONS #if XCHAL_EXCM_LEVEL >= 2 SECTION_VECTOR (_Level2InterruptVector_text, .Level2InterruptVector.text, - XCHAL_INTLEVEL2_VECTOR_VADDR, + INTLEVEL2_VECTOR_VADDR, SIZEOF(LAST), LAST) # undef LAST # define LAST .Level2InterruptVector.text @@ -210,7 +211,7 @@ SECTIONS #if XCHAL_EXCM_LEVEL >= 3 SECTION_VECTOR (_Level3InterruptVector_text, .Level3InterruptVector.text, - XCHAL_INTLEVEL3_VECTOR_VADDR, + INTLEVEL3_VECTOR_VADDR, SIZEOF(LAST), LAST) # undef LAST # define LAST .Level3InterruptVector.text @@ -218,7 +219,7 @@ SECTIONS #if XCHAL_EXCM_LEVEL >= 4 SECTION_VECTOR (_Level4InterruptVector_text, .Level4InterruptVector.text, - XCHAL_INTLEVEL4_VECTOR_VADDR, + INTLEVEL4_VECTOR_VADDR, SIZEOF(LAST), LAST) # undef LAST # define LAST .Level4InterruptVector.text @@ -226,7 +227,7 @@ SECTIONS #if XCHAL_EXCM_LEVEL >= 5 SECTION_VECTOR (_Level5InterruptVector_text, .Level5InterruptVector.text, - XCHAL_INTLEVEL5_VECTOR_VADDR, + INTLEVEL5_VECTOR_VADDR, SIZEOF(LAST), LAST) # undef LAST # define LAST .Level5InterruptVector.text @@ -234,39 +235,39 @@ SECTIONS #if XCHAL_EXCM_LEVEL >= 6 SECTION_VECTOR (_Level6InterruptVector_text, .Level6InterruptVector.text, - XCHAL_INTLEVEL6_VECTOR_VADDR, + INTLEVEL6_VECTOR_VADDR, SIZEOF(LAST), LAST) # undef LAST # define LAST .Level6InterruptVector.text #endif SECTION_VECTOR (_KernelExceptionVector_literal, .KernelExceptionVector.literal, - XCHAL_KERNEL_VECTOR_VADDR - 4, + KERNEL_VECTOR_VADDR - 4, SIZEOF(LAST), LAST) #undef LAST SECTION_VECTOR (_KernelExceptionVector_text, .KernelExceptionVector.text, - XCHAL_KERNEL_VECTOR_VADDR, + KERNEL_VECTOR_VADDR, 4, .KernelExceptionVector.literal) SECTION_VECTOR (_UserExceptionVector_literal, .UserExceptionVector.literal, - XCHAL_USER_VECTOR_VADDR - 4, + USER_VECTOR_VADDR - 4, SIZEOF(.KernelExceptionVector.text), .KernelExceptionVector.text) SECTION_VECTOR (_UserExceptionVector_text, .UserExceptionVector.text, - XCHAL_USER_VECTOR_VADDR, + USER_VECTOR_VADDR, 4, .UserExceptionVector.literal) SECTION_VECTOR (_DoubleExceptionVector_literal, .DoubleExceptionVector.literal, - XCHAL_DOUBLEEXC_VECTOR_VADDR - 16, + DOUBLEEXC_VECTOR_VADDR - 16, SIZEOF(.UserExceptionVector.text), .UserExceptionVector.text) SECTION_VECTOR (_DoubleExceptionVector_text, .DoubleExceptionVector.text, - XCHAL_DOUBLEEXC_VECTOR_VADDR, + DOUBLEEXC_VECTOR_VADDR, 32, .DoubleExceptionVector.literal) @@ -284,11 +285,26 @@ SECTIONS . = ALIGN(0x10); .bootstrap : { *(.bootstrap.literal .bootstrap.text .bootstrap.data) } - .ResetVector.text XCHAL_RESET_VECTOR_VADDR : + .ResetVector.text RESET_VECTOR_VADDR : { *(.ResetVector.text) } + + /* + * This is a remapped copy of the Secondary Reset Vector Code. + * It keeps gdb in sync with the PC after switching + * to the temporary mapping used while setting up + * the V2 MMU mappings for Linux. + * + * Only debug information about this section is put in the kernel image. + */ + .SecondaryResetVector.remapped_text 0x46000000 (INFO): + { + *(.SecondaryResetVector.remapped_text) + } + + .xt.lit : { *(.xt.lit) } .xt.prop : { *(.xt.prop) } diff --git a/trunk/arch/xtensa/kernel/xtensa_ksyms.c b/trunk/arch/xtensa/kernel/xtensa_ksyms.c index afe058b24e6e..42c53c87c204 100644 --- a/trunk/arch/xtensa/kernel/xtensa_ksyms.c +++ b/trunk/arch/xtensa/kernel/xtensa_ksyms.c @@ -119,3 +119,8 @@ EXPORT_SYMBOL(outsl); EXPORT_SYMBOL(insb); EXPORT_SYMBOL(insw); EXPORT_SYMBOL(insl); + +extern long common_exception_return; +extern long _spill_registers; +EXPORT_SYMBOL(common_exception_return); +EXPORT_SYMBOL(_spill_registers); diff --git a/trunk/arch/xtensa/mm/mmu.c b/trunk/arch/xtensa/mm/mmu.c index 0f77f9d3bb8b..a1077570e383 100644 --- a/trunk/arch/xtensa/mm/mmu.c +++ b/trunk/arch/xtensa/mm/mmu.c @@ -24,15 +24,19 @@ void __init paging_init(void) */ void __init init_mmu(void) { - /* Writing zeros to the TLBCFG special registers ensure - * that valid values exist in the register. For existing - * PGSZID fields, zero selects the first element of the - * page-size array. For nonexistent PGSZID fields, zero is - * the best value to write. Also, when changing PGSZID +#if !(XCHAL_HAVE_PTP_MMU && XCHAL_HAVE_SPANNING_WAY) + /* + * Writing zeros to the instruction and data TLBCFG special + * registers ensure that valid values exist in the register. + * + * For existing PGSZID fields, zero selects the first element + * of the page-size array. For nonexistent PGSZID fields, + * zero is the best value to write. Also, when changing PGSZID * fields, the corresponding TLB must be flushed. */ set_itlbcfg_register(0); set_dtlbcfg_register(0); +#endif flush_tlb_all(); /* Set rasid register to a known value. */ diff --git a/trunk/arch/xtensa/oprofile/backtrace.c b/trunk/arch/xtensa/oprofile/backtrace.c index 66f32ee2c982..5f03a593d84f 100644 --- a/trunk/arch/xtensa/oprofile/backtrace.c +++ b/trunk/arch/xtensa/oprofile/backtrace.c @@ -132,9 +132,7 @@ static void xtensa_backtrace_kernel(struct pt_regs *regs, unsigned int depth) pc = MAKE_PC_FROM_RA(a0, pc); /* Add the PC to the trace. */ - if (kernel_text_address(pc)) - oprofile_add_trace(pc); - + oprofile_add_trace(pc); if (pc == (unsigned long) &common_exception_return) { regs = (struct pt_regs *)a1; if (user_mode(regs)) { diff --git a/trunk/arch/xtensa/platforms/iss/console.c b/trunk/arch/xtensa/platforms/iss/console.c index da9866f7fecf..70cb408bc20d 100644 --- a/trunk/arch/xtensa/platforms/iss/console.c +++ b/trunk/arch/xtensa/platforms/iss/console.c @@ -56,13 +56,13 @@ static void rs_poll(unsigned long); static int rs_open(struct tty_struct *tty, struct file * filp) { tty->port = &serial_port; - spin_lock(&timer_lock); + spin_lock_bh(&timer_lock); if (tty->count == 1) { setup_timer(&serial_timer, rs_poll, (unsigned long)&serial_port); mod_timer(&serial_timer, jiffies + SERIAL_TIMER_VALUE); } - spin_unlock(&timer_lock); + spin_unlock_bh(&timer_lock); return 0; } @@ -99,14 +99,13 @@ static int rs_write(struct tty_struct * tty, static void rs_poll(unsigned long priv) { struct tty_port *port = (struct tty_port *)priv; - struct timeval tv = { .tv_sec = 0, .tv_usec = 0 }; int i = 0; unsigned char c; spin_lock(&timer_lock); - while (__simc(SYS_select_one, 0, XTISS_SELECT_ONE_READ, (int)&tv,0,0)){ - __simc (SYS_read, 0, (unsigned long)&c, 1, 0, 0); + while (simc_poll(0)) { + simc_read(0, &c, 1); tty_insert_flip_char(port, c, TTY_NORMAL); i++; } @@ -244,8 +243,7 @@ static void iss_console_write(struct console *co, const char *s, unsigned count) int len = strlen(s); if (s != 0 && *s != 0) - __simc (SYS_write, 1, (unsigned long)s, - count < len ? count : len,0,0); + simc_write(1, s, count < len ? count : len); } static struct tty_driver* iss_console_device(struct console *c, int *index) diff --git a/trunk/arch/xtensa/platforms/iss/include/platform/simcall.h b/trunk/arch/xtensa/platforms/iss/include/platform/simcall.h index b5a4edf02d76..12b15ad1e586 100644 --- a/trunk/arch/xtensa/platforms/iss/include/platform/simcall.h +++ b/trunk/arch/xtensa/platforms/iss/include/platform/simcall.h @@ -59,56 +59,58 @@ static int errno; -static inline int __simc(int a, int b, int c, int d, int e, int f) +static inline int __simc(int a, int b, int c, int d) { int ret; register int a1 asm("a2") = a; register int b1 asm("a3") = b; register int c1 asm("a4") = c; register int d1 asm("a5") = d; - register int e1 asm("a6") = e; - register int f1 asm("a7") = f; __asm__ __volatile__ ( "simcall\n" "mov %0, a2\n" "mov %1, a3\n" : "=a" (ret), "=a" (errno), "+r"(a1), "+r"(b1) - : "r"(c1), "r"(d1), "r"(e1), "r"(f1) + : "r"(c1), "r"(d1) : "memory"); return ret; } static inline int simc_open(const char *file, int flags, int mode) { - return __simc(SYS_open, (int) file, flags, mode, 0, 0); + return __simc(SYS_open, (int) file, flags, mode); } static inline int simc_close(int fd) { - return __simc(SYS_close, fd, 0, 0, 0, 0); + return __simc(SYS_close, fd, 0, 0); } static inline int simc_ioctl(int fd, int request, void *arg) { - return __simc(SYS_ioctl, fd, request, (int) arg, 0, 0); + return __simc(SYS_ioctl, fd, request, (int) arg); } static inline int simc_read(int fd, void *buf, size_t count) { - return __simc(SYS_read, fd, (int) buf, count, 0, 0); + return __simc(SYS_read, fd, (int) buf, count); } static inline int simc_write(int fd, const void *buf, size_t count) { - return __simc(SYS_write, fd, (int) buf, count, 0, 0); + return __simc(SYS_write, fd, (int) buf, count); } static inline int simc_poll(int fd) { struct timeval tv = { .tv_sec = 0, .tv_usec = 0 }; - return __simc(SYS_select_one, fd, XTISS_SELECT_ONE_READ, (int)&tv, - 0, 0); + return __simc(SYS_select_one, fd, XTISS_SELECT_ONE_READ, (int)&tv); +} + +static inline int simc_lseek(int fd, uint32_t off, int whence) +{ + return __simc(SYS_lseek, fd, off, whence); } #endif /* _XTENSA_PLATFORM_ISS_SIMCALL_H */ diff --git a/trunk/arch/xtensa/platforms/iss/setup.c b/trunk/arch/xtensa/platforms/iss/setup.c index e1700102f35e..da7d18240866 100644 --- a/trunk/arch/xtensa/platforms/iss/setup.c +++ b/trunk/arch/xtensa/platforms/iss/setup.c @@ -38,12 +38,6 @@ void __init platform_init(bp_tag_t* bootparam) } -#ifdef CONFIG_PCI -void platform_pcibios_init(void) -{ -} -#endif - void platform_halt(void) { pr_info(" ** Called platform_halt() **\n"); @@ -64,7 +58,9 @@ void platform_restart(void) "wsr a2, icountlevel\n\t" "movi a2, 0\n\t" "wsr a2, icount\n\t" +#if XCHAL_NUM_IBREAK > 0 "wsr a2, ibreakenable\n\t" +#endif "wsr a2, lcount\n\t" "movi a2, 0x1f\n\t" "wsr a2, ps\n\t" diff --git a/trunk/arch/xtensa/platforms/iss/simdisk.c b/trunk/arch/xtensa/platforms/iss/simdisk.c index 0345f43d34f3..c0edb35424ce 100644 --- a/trunk/arch/xtensa/platforms/iss/simdisk.c +++ b/trunk/arch/xtensa/platforms/iss/simdisk.c @@ -85,7 +85,7 @@ static void simdisk_transfer(struct simdisk *dev, unsigned long sector, while (nbytes > 0) { unsigned long io; - __simc(SYS_lseek, dev->fd, offset, SEEK_SET, 0, 0); + simc_lseek(dev->fd, offset, SEEK_SET); if (write) io = simc_write(dev->fd, buffer, nbytes); else @@ -176,7 +176,7 @@ static int simdisk_attach(struct simdisk *dev, const char *filename) err = -ENODEV; goto out; } - dev->size = __simc(SYS_lseek, dev->fd, 0, SEEK_END, 0, 0); + dev->size = simc_lseek(dev->fd, 0, SEEK_END); set_capacity(dev->gd, dev->size >> SECTOR_SHIFT); dev->filename = filename; pr_info("SIMDISK: %s=%s\n", dev->gd->disk_name, dev->filename); @@ -217,7 +217,7 @@ static ssize_t proc_read_simdisk(struct file *file, char __user *buf, size_t size, loff_t *ppos) { struct simdisk *dev = PDE_DATA(file_inode(file)); - char *s = dev->filename; + const char *s = dev->filename; if (s) { ssize_t n = simple_read_from_buffer(buf, size, ppos, s, strlen(s)); @@ -238,7 +238,7 @@ static ssize_t proc_write_simdisk(struct file *file, const char __user *buf, if (tmp == NULL) return -ENOMEM; - if (copy_from_user(tmp, buffer, count)) { + if (copy_from_user(tmp, buf, count)) { err = -EFAULT; goto out_free; } diff --git a/trunk/arch/xtensa/platforms/xt2000/setup.c b/trunk/arch/xtensa/platforms/xt2000/setup.c index c7d90f17886e..f9bc87966290 100644 --- a/trunk/arch/xtensa/platforms/xt2000/setup.c +++ b/trunk/arch/xtensa/platforms/xt2000/setup.c @@ -69,7 +69,9 @@ void platform_restart(void) "wsr a2, icountlevel\n\t" "movi a2, 0\n\t" "wsr a2, icount\n\t" +#if XCHAL_NUM_IBREAK > 0 "wsr a2, ibreakenable\n\t" +#endif "wsr a2, lcount\n\t" "movi a2, 0x1f\n\t" "wsr a2, ps\n\t" diff --git a/trunk/arch/xtensa/platforms/xtfpga/setup.c b/trunk/arch/xtensa/platforms/xtfpga/setup.c index 9d888a2a5755..96ef8eeb064e 100644 --- a/trunk/arch/xtensa/platforms/xtfpga/setup.c +++ b/trunk/arch/xtensa/platforms/xtfpga/setup.c @@ -60,7 +60,9 @@ void platform_restart(void) "wsr a2, icountlevel\n\t" "movi a2, 0\n\t" "wsr a2, icount\n\t" +#if XCHAL_NUM_IBREAK > 0 "wsr a2, ibreakenable\n\t" +#endif "wsr a2, lcount\n\t" "movi a2, 0x1f\n\t" "wsr a2, ps\n\t" diff --git a/trunk/drivers/acpi/ac.c b/trunk/drivers/acpi/ac.c index 00d2efd674df..4f4e741d34b2 100644 --- a/trunk/drivers/acpi/ac.c +++ b/trunk/drivers/acpi/ac.c @@ -28,6 +28,8 @@ #include #include #include +#include +#include #ifdef CONFIG_ACPI_PROCFS_POWER #include #include @@ -74,6 +76,8 @@ static int acpi_ac_resume(struct device *dev); #endif static SIMPLE_DEV_PM_OPS(acpi_ac_pm, NULL, acpi_ac_resume); +static int ac_sleep_before_get_state_ms; + static struct acpi_driver acpi_ac_driver = { .name = "ac", .class = ACPI_AC_CLASS, @@ -252,6 +256,16 @@ static void acpi_ac_notify(struct acpi_device *device, u32 event) case ACPI_AC_NOTIFY_STATUS: case ACPI_NOTIFY_BUS_CHECK: case ACPI_NOTIFY_DEVICE_CHECK: + /* + * A buggy BIOS may notify AC first and then sleep for + * a specific time before doing actual operations in the + * EC event handler (_Qxx). This will cause the AC state + * reported by the ACPI event to be incorrect, so wait for a + * specific time for the EC event handler to make progress. + */ + if (ac_sleep_before_get_state_ms > 0) + msleep(ac_sleep_before_get_state_ms); + acpi_ac_get_state(ac); acpi_bus_generate_proc_event(device, event, (u32) ac->state); acpi_bus_generate_netlink_event(device->pnp.device_class, @@ -264,6 +278,24 @@ static void acpi_ac_notify(struct acpi_device *device, u32 event) return; } +static int thinkpad_e530_quirk(const struct dmi_system_id *d) +{ + ac_sleep_before_get_state_ms = 1000; + return 0; +} + +static struct dmi_system_id ac_dmi_table[] = { + { + .callback = thinkpad_e530_quirk, + .ident = "thinkpad e530", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), + DMI_MATCH(DMI_PRODUCT_NAME, "32597CG"), + }, + }, + {}, +}; + static int acpi_ac_add(struct acpi_device *device) { int result = 0; @@ -312,6 +344,7 @@ static int acpi_ac_add(struct acpi_device *device) kfree(ac); } + dmi_check_system(ac_dmi_table); return result; } diff --git a/trunk/drivers/acpi/acpica/exfldio.c b/trunk/drivers/acpi/acpica/exfldio.c index ec7f5690031b..c84ee956fa4c 100644 --- a/trunk/drivers/acpi/acpica/exfldio.c +++ b/trunk/drivers/acpi/acpica/exfldio.c @@ -720,7 +720,19 @@ acpi_ex_extract_from_field(union acpi_operand_object *obj_desc, if ((obj_desc->common_field.start_field_bit_offset == 0) && (obj_desc->common_field.bit_length == access_bit_width)) { - status = acpi_ex_field_datum_io(obj_desc, 0, buffer, ACPI_READ); + if (buffer_length >= sizeof(u64)) { + status = + acpi_ex_field_datum_io(obj_desc, 0, buffer, + ACPI_READ); + } else { + /* Use raw_datum (u64) to handle buffers < 64 bits */ + + status = + acpi_ex_field_datum_io(obj_desc, 0, &raw_datum, + ACPI_READ); + ACPI_MEMCPY(buffer, &raw_datum, buffer_length); + } + return_ACPI_STATUS(status); } diff --git a/trunk/drivers/acpi/acpica/nsinit.c b/trunk/drivers/acpi/acpica/nsinit.c index 2a431ec50a25..46f0f83417a1 100644 --- a/trunk/drivers/acpi/acpica/nsinit.c +++ b/trunk/drivers/acpi/acpica/nsinit.c @@ -558,6 +558,7 @@ acpi_ns_init_one_device(acpi_handle obj_handle, ACPI_DEBUG_EXEC(acpi_ut_display_init_pathname (ACPI_TYPE_METHOD, device_node, METHOD_NAME__INI)); + ACPI_MEMSET(info, 0, sizeof(struct acpi_evaluate_info)); info->prefix_node = device_node; info->pathname = METHOD_NAME__INI; info->parameters = NULL; diff --git a/trunk/drivers/acpi/acpica/utosi.c b/trunk/drivers/acpi/acpica/utosi.c index b15acebb96a1..7e807725c636 100644 --- a/trunk/drivers/acpi/acpica/utosi.c +++ b/trunk/drivers/acpi/acpica/utosi.c @@ -349,7 +349,8 @@ acpi_status acpi_ut_osi_implementation(struct acpi_walk_state * walk_state) return_value = 0; status = acpi_os_acquire_mutex(acpi_gbl_osi_mutex, ACPI_WAIT_FOREVER); if (ACPI_FAILURE(status)) { - return (status); + acpi_ut_remove_reference(return_desc); + return_ACPI_STATUS(status); } /* Lookup the interface in the global _OSI list */ diff --git a/trunk/drivers/acpi/ec.c b/trunk/drivers/acpi/ec.c index d45b2871d33b..edc00818c803 100644 --- a/trunk/drivers/acpi/ec.c +++ b/trunk/drivers/acpi/ec.c @@ -223,7 +223,7 @@ static int ec_check_sci_sync(struct acpi_ec *ec, u8 state) static int ec_poll(struct acpi_ec *ec) { unsigned long flags; - int repeat = 2; /* number of command restarts */ + int repeat = 5; /* number of command restarts */ while (repeat--) { unsigned long delay = jiffies + msecs_to_jiffies(ec_delay); @@ -241,8 +241,6 @@ static int ec_poll(struct acpi_ec *ec) } advance_transaction(ec, acpi_ec_read_status(ec)); } while (time_before(jiffies, delay)); - if (acpi_ec_read_status(ec) & ACPI_EC_FLAG_IBF) - break; pr_debug(PREFIX "controller reset, restart transaction\n"); spin_lock_irqsave(&ec->lock, flags); start_transaction(ec); diff --git a/trunk/drivers/acpi/pci_root.c b/trunk/drivers/acpi/pci_root.c index 1dd6f6c85874..e427dc516c76 100644 --- a/trunk/drivers/acpi/pci_root.c +++ b/trunk/drivers/acpi/pci_root.c @@ -641,7 +641,9 @@ static void _handle_hotplug_event_root(struct work_struct *work) /* bus enumerate */ printk(KERN_DEBUG "%s: Bus check notify on %s\n", __func__, (char *)buffer.pointer); - if (!root) + if (root) + acpiphp_check_host_bridge(handle); + else handle_root_bridge_insertion(handle); break; diff --git a/trunk/drivers/acpi/processor_driver.c b/trunk/drivers/acpi/processor_driver.c index bec717ffd25f..c266cdc11784 100644 --- a/trunk/drivers/acpi/processor_driver.c +++ b/trunk/drivers/acpi/processor_driver.c @@ -95,9 +95,6 @@ static const struct acpi_device_id processor_device_ids[] = { }; MODULE_DEVICE_TABLE(acpi, processor_device_ids); -static SIMPLE_DEV_PM_OPS(acpi_processor_pm, - acpi_processor_suspend, acpi_processor_resume); - static struct acpi_driver acpi_processor_driver = { .name = "processor", .class = ACPI_PROCESSOR_CLASS, @@ -107,7 +104,6 @@ static struct acpi_driver acpi_processor_driver = { .remove = acpi_processor_remove, .notify = acpi_processor_notify, }, - .drv.pm = &acpi_processor_pm, }; #define INSTALL_NOTIFY_HANDLER 1 @@ -934,6 +930,8 @@ static int __init acpi_processor_init(void) if (result < 0) return result; + acpi_processor_syscore_init(); + acpi_processor_install_hotplug_notify(); acpi_thermal_cpufreq_init(); @@ -956,6 +954,8 @@ static void __exit acpi_processor_exit(void) acpi_processor_uninstall_hotplug_notify(); + acpi_processor_syscore_exit(); + acpi_bus_unregister_driver(&acpi_processor_driver); return; diff --git a/trunk/drivers/acpi/processor_idle.c b/trunk/drivers/acpi/processor_idle.c index f0df2c9434d2..eb133c77aadb 100644 --- a/trunk/drivers/acpi/processor_idle.c +++ b/trunk/drivers/acpi/processor_idle.c @@ -34,6 +34,7 @@ #include /* need_resched() */ #include #include +#include /* * Include the apic definitions for x86 to have the APIC timer related defines @@ -210,33 +211,41 @@ static void lapic_timer_state_broadcast(struct acpi_processor *pr, #endif +#ifdef CONFIG_PM_SLEEP static u32 saved_bm_rld; -static void acpi_idle_bm_rld_save(void) +int acpi_processor_suspend(void) { acpi_read_bit_register(ACPI_BITREG_BUS_MASTER_RLD, &saved_bm_rld); + return 0; } -static void acpi_idle_bm_rld_restore(void) + +void acpi_processor_resume(void) { u32 resumed_bm_rld; acpi_read_bit_register(ACPI_BITREG_BUS_MASTER_RLD, &resumed_bm_rld); + if (resumed_bm_rld == saved_bm_rld) + return; - if (resumed_bm_rld != saved_bm_rld) - acpi_write_bit_register(ACPI_BITREG_BUS_MASTER_RLD, saved_bm_rld); + acpi_write_bit_register(ACPI_BITREG_BUS_MASTER_RLD, saved_bm_rld); } -int acpi_processor_suspend(struct device *dev) +static struct syscore_ops acpi_processor_syscore_ops = { + .suspend = acpi_processor_suspend, + .resume = acpi_processor_resume, +}; + +void acpi_processor_syscore_init(void) { - acpi_idle_bm_rld_save(); - return 0; + register_syscore_ops(&acpi_processor_syscore_ops); } -int acpi_processor_resume(struct device *dev) +void acpi_processor_syscore_exit(void) { - acpi_idle_bm_rld_restore(); - return 0; + unregister_syscore_ops(&acpi_processor_syscore_ops); } +#endif /* CONFIG_PM_SLEEP */ #if defined(CONFIG_X86) static void tsc_check_state(int state) diff --git a/trunk/drivers/acpi/scan.c b/trunk/drivers/acpi/scan.c index fe158fd4f1df..c1bc608339a6 100644 --- a/trunk/drivers/acpi/scan.c +++ b/trunk/drivers/acpi/scan.c @@ -1785,7 +1785,7 @@ static void acpi_scan_init_hotplug(acpi_handle handle, int type) acpi_set_pnp_ids(handle, &pnp, type); if (!pnp.type.hardware_id) - return; + goto out; /* * This relies on the fact that acpi_install_notify_handler() will not @@ -1800,6 +1800,7 @@ static void acpi_scan_init_hotplug(acpi_handle handle, int type) } } +out: acpi_free_pnp_ids(&pnp); } diff --git a/trunk/drivers/acpi/video.c b/trunk/drivers/acpi/video.c index c3932d0876e0..5b32e15a65ce 100644 --- a/trunk/drivers/acpi/video.c +++ b/trunk/drivers/acpi/video.c @@ -456,6 +456,14 @@ static struct dmi_system_id video_dmi_table[] __initdata = { DMI_MATCH(DMI_PRODUCT_NAME, "HP Pavilion dm4 Notebook PC"), }, }, + { + .callback = video_ignore_initial_backlight, + .ident = "HP 1000 Notebook PC", + .matches = { + DMI_MATCH(DMI_BOARD_VENDOR, "Hewlett-Packard"), + DMI_MATCH(DMI_PRODUCT_NAME, "HP 1000 Notebook PC"), + }, + }, {} }; diff --git a/trunk/drivers/ata/pata_ep93xx.c b/trunk/drivers/ata/pata_ep93xx.c index c1bfaf43d109..980b88e109fc 100644 --- a/trunk/drivers/ata/pata_ep93xx.c +++ b/trunk/drivers/ata/pata_ep93xx.c @@ -933,11 +933,6 @@ static int ep93xx_pata_probe(struct platform_device *pdev) } mem_res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - if (!mem_res) { - err = -ENXIO; - goto err_rel_gpio; - } - ide_base = devm_ioremap_resource(&pdev->dev, mem_res); if (IS_ERR(ide_base)) { err = PTR_ERR(ide_base); diff --git a/trunk/drivers/base/bus.c b/trunk/drivers/base/bus.c index 1a68f947ded8..d414331b480e 100644 --- a/trunk/drivers/base/bus.c +++ b/trunk/drivers/base/bus.c @@ -1295,6 +1295,7 @@ int subsys_virtual_register(struct bus_type *subsys, return subsys_register(subsys, groups, virtual_dir); } +EXPORT_SYMBOL_GPL(subsys_virtual_register); int __init buses_init(void) { diff --git a/trunk/drivers/base/core.c b/trunk/drivers/base/core.c index 016312437577..2499cefdcdf2 100644 --- a/trunk/drivers/base/core.c +++ b/trunk/drivers/base/core.c @@ -572,9 +572,11 @@ int device_create_file(struct device *dev, if (dev) { WARN(((attr->attr.mode & S_IWUGO) && !attr->store), - "Write permission without 'store'\n"); + "Attribute %s: write permission without 'store'\n", + attr->attr.name); WARN(((attr->attr.mode & S_IRUGO) && !attr->show), - "Read permission without 'show'\n"); + "Attribute %s: read permission without 'show'\n", + attr->attr.name); error = sysfs_create_file(&dev->kobj, &attr->attr); } diff --git a/trunk/drivers/base/power/common.c b/trunk/drivers/base/power/common.c index 39c32529b833..5da914041305 100644 --- a/trunk/drivers/base/power/common.c +++ b/trunk/drivers/base/power/common.c @@ -61,24 +61,24 @@ EXPORT_SYMBOL_GPL(dev_pm_get_subsys_data); int dev_pm_put_subsys_data(struct device *dev) { struct pm_subsys_data *psd; - int ret = 0; + int ret = 1; spin_lock_irq(&dev->power.lock); psd = dev_to_psd(dev); - if (!psd) { - ret = -EINVAL; + if (!psd) goto out; - } if (--psd->refcount == 0) { dev->power.subsys_data = NULL; - kfree(psd); - ret = 1; + } else { + psd = NULL; + ret = 0; } out: spin_unlock_irq(&dev->power.lock); + kfree(psd); return ret; } diff --git a/trunk/drivers/bcma/driver_mips.c b/trunk/drivers/bcma/driver_mips.c index 9a7f0e3ab5a3..11115bbe115c 100644 --- a/trunk/drivers/bcma/driver_mips.c +++ b/trunk/drivers/bcma/driver_mips.c @@ -21,7 +21,7 @@ #include #include -static const char *part_probes[] = { "bcm47xxpart", NULL }; +static const char * const part_probes[] = { "bcm47xxpart", NULL }; static struct physmap_flash_data bcma_pflash_data = { .part_probe_types = part_probes, diff --git a/trunk/drivers/bcma/scan.c b/trunk/drivers/bcma/scan.c index bca9c80056fe..8bffa5c9818c 100644 --- a/trunk/drivers/bcma/scan.c +++ b/trunk/drivers/bcma/scan.c @@ -84,6 +84,8 @@ static const struct bcma_device_id_name bcma_bcm_device_names[] = { { BCMA_CORE_I2S, "I2S" }, { BCMA_CORE_SDR_DDR1_MEM_CTL, "SDR/DDR1 Memory Controller" }, { BCMA_CORE_SHIM, "SHIM" }, + { BCMA_CORE_PCIE2, "PCIe Gen2" }, + { BCMA_CORE_ARM_CR4, "ARM CR4" }, { BCMA_CORE_DEFAULT, "Default" }, }; diff --git a/trunk/drivers/block/Makefile b/trunk/drivers/block/Makefile index a3b40232c6ab..ca07399a8d99 100644 --- a/trunk/drivers/block/Makefile +++ b/trunk/drivers/block/Makefile @@ -42,4 +42,5 @@ obj-$(CONFIG_BLK_DEV_PCIESSD_MTIP32XX) += mtip32xx/ obj-$(CONFIG_BLK_DEV_RSXX) += rsxx/ +nvme-y := nvme-core.o nvme-scsi.o swim_mod-y := swim.o swim_asm.o diff --git a/trunk/drivers/block/nvme.c b/trunk/drivers/block/nvme-core.c similarity index 79% rename from trunk/drivers/block/nvme.c rename to trunk/drivers/block/nvme-core.c index 9dcefe40380b..8efdfaa44a59 100644 --- a/trunk/drivers/block/nvme.c +++ b/trunk/drivers/block/nvme-core.c @@ -39,14 +39,13 @@ #include #include #include - +#include #include #define NVME_Q_DEPTH 1024 #define SQ_SIZE(depth) (depth * sizeof(struct nvme_command)) #define CQ_SIZE(depth) (depth * sizeof(struct nvme_completion)) #define NVME_MINORS 64 -#define NVME_IO_TIMEOUT (5 * HZ) #define ADMIN_TIMEOUT (60 * HZ) static int nvme_major; @@ -59,43 +58,6 @@ static DEFINE_SPINLOCK(dev_list_lock); static LIST_HEAD(dev_list); static struct task_struct *nvme_thread; -/* - * Represents an NVM Express device. Each nvme_dev is a PCI function. - */ -struct nvme_dev { - struct list_head node; - struct nvme_queue **queues; - u32 __iomem *dbs; - struct pci_dev *pci_dev; - struct dma_pool *prp_page_pool; - struct dma_pool *prp_small_pool; - int instance; - int queue_count; - int db_stride; - u32 ctrl_config; - struct msix_entry *entry; - struct nvme_bar __iomem *bar; - struct list_head namespaces; - char serial[20]; - char model[40]; - char firmware_rev[8]; - u32 max_hw_sectors; -}; - -/* - * An NVM Express namespace is equivalent to a SCSI LUN - */ -struct nvme_ns { - struct list_head list; - - struct nvme_dev *dev; - struct request_queue *queue; - struct gendisk *disk; - - int ns_id; - int lba_shift; -}; - /* * An NVM Express queue. Each device has at least two (one for admin * commands and one for I/O commands). @@ -131,6 +93,7 @@ static inline void _nvme_check_size(void) BUILD_BUG_ON(sizeof(struct nvme_create_sq) != 64); BUILD_BUG_ON(sizeof(struct nvme_delete_queue) != 64); BUILD_BUG_ON(sizeof(struct nvme_features) != 64); + BUILD_BUG_ON(sizeof(struct nvme_format_cmd) != 64); BUILD_BUG_ON(sizeof(struct nvme_command) != 64); BUILD_BUG_ON(sizeof(struct nvme_id_ctrl) != 4096); BUILD_BUG_ON(sizeof(struct nvme_id_ns) != 4096); @@ -261,12 +224,12 @@ static void *cancel_cmdid(struct nvme_queue *nvmeq, int cmdid, return ctx; } -static struct nvme_queue *get_nvmeq(struct nvme_dev *dev) +struct nvme_queue *get_nvmeq(struct nvme_dev *dev) { return dev->queues[get_cpu() + 1]; } -static void put_nvmeq(struct nvme_queue *nvmeq) +void put_nvmeq(struct nvme_queue *nvmeq) { put_cpu(); } @@ -294,22 +257,6 @@ static int nvme_submit_cmd(struct nvme_queue *nvmeq, struct nvme_command *cmd) return 0; } -/* - * The nvme_iod describes the data in an I/O, including the list of PRP - * entries. You can't see it in this data structure because C doesn't let - * me express that. Use nvme_alloc_iod to ensure there's enough space - * allocated to store the PRP list. - */ -struct nvme_iod { - void *private; /* For the use of the submitter of the I/O */ - int npages; /* In the PRP list. 0 means small pool in use */ - int offset; /* Of PRP list */ - int nents; /* Used in scatterlist */ - int length; /* Of data, in bytes */ - dma_addr_t first_dma; - struct scatterlist sg[0]; -}; - static __le64 **iod_list(struct nvme_iod *iod) { return ((void *)iod) + iod->offset; @@ -343,7 +290,7 @@ nvme_alloc_iod(unsigned nseg, unsigned nbytes, gfp_t gfp) return iod; } -static void nvme_free_iod(struct nvme_dev *dev, struct nvme_iod *iod) +void nvme_free_iod(struct nvme_dev *dev, struct nvme_iod *iod) { const int last_prp = PAGE_SIZE / 8 - 1; int i; @@ -361,16 +308,6 @@ static void nvme_free_iod(struct nvme_dev *dev, struct nvme_iod *iod) kfree(iod); } -static void requeue_bio(struct nvme_dev *dev, struct bio *bio) -{ - struct nvme_queue *nvmeq = get_nvmeq(dev); - if (bio_list_empty(&nvmeq->sq_cong)) - add_wait_queue(&nvmeq->sq_full, &nvmeq->sq_cong_wait); - bio_list_add(&nvmeq->sq_cong, bio); - put_nvmeq(nvmeq); - wake_up_process(nvme_thread); -} - static void bio_completion(struct nvme_dev *dev, void *ctx, struct nvme_completion *cqe) { @@ -382,19 +319,15 @@ static void bio_completion(struct nvme_dev *dev, void *ctx, dma_unmap_sg(&dev->pci_dev->dev, iod->sg, iod->nents, bio_data_dir(bio) ? DMA_TO_DEVICE : DMA_FROM_DEVICE); nvme_free_iod(dev, iod); - if (status) { + if (status) bio_endio(bio, -EIO); - } else if (bio->bi_vcnt > bio->bi_idx) { - requeue_bio(dev, bio); - } else { + else bio_endio(bio, 0); - } } /* length is in bytes. gfp flags indicates whether we may sleep. */ -static int nvme_setup_prps(struct nvme_dev *dev, - struct nvme_common_command *cmd, struct nvme_iod *iod, - int total_len, gfp_t gfp) +int nvme_setup_prps(struct nvme_dev *dev, struct nvme_common_command *cmd, + struct nvme_iod *iod, int total_len, gfp_t gfp) { struct dma_pool *pool; int length = total_len; @@ -473,43 +406,193 @@ static int nvme_setup_prps(struct nvme_dev *dev, return total_len; } +struct nvme_bio_pair { + struct bio b1, b2, *parent; + struct bio_vec *bv1, *bv2; + int err; + atomic_t cnt; +}; + +static void nvme_bio_pair_endio(struct bio *bio, int err) +{ + struct nvme_bio_pair *bp = bio->bi_private; + + if (err) + bp->err = err; + + if (atomic_dec_and_test(&bp->cnt)) { + bio_endio(bp->parent, bp->err); + if (bp->bv1) + kfree(bp->bv1); + if (bp->bv2) + kfree(bp->bv2); + kfree(bp); + } +} + +static struct nvme_bio_pair *nvme_bio_split(struct bio *bio, int idx, + int len, int offset) +{ + struct nvme_bio_pair *bp; + + BUG_ON(len > bio->bi_size); + BUG_ON(idx > bio->bi_vcnt); + + bp = kmalloc(sizeof(*bp), GFP_ATOMIC); + if (!bp) + return NULL; + bp->err = 0; + + bp->b1 = *bio; + bp->b2 = *bio; + + bp->b1.bi_size = len; + bp->b2.bi_size -= len; + bp->b1.bi_vcnt = idx; + bp->b2.bi_idx = idx; + bp->b2.bi_sector += len >> 9; + + if (offset) { + bp->bv1 = kmalloc(bio->bi_max_vecs * sizeof(struct bio_vec), + GFP_ATOMIC); + if (!bp->bv1) + goto split_fail_1; + + bp->bv2 = kmalloc(bio->bi_max_vecs * sizeof(struct bio_vec), + GFP_ATOMIC); + if (!bp->bv2) + goto split_fail_2; + + memcpy(bp->bv1, bio->bi_io_vec, + bio->bi_max_vecs * sizeof(struct bio_vec)); + memcpy(bp->bv2, bio->bi_io_vec, + bio->bi_max_vecs * sizeof(struct bio_vec)); + + bp->b1.bi_io_vec = bp->bv1; + bp->b2.bi_io_vec = bp->bv2; + bp->b2.bi_io_vec[idx].bv_offset += offset; + bp->b2.bi_io_vec[idx].bv_len -= offset; + bp->b1.bi_io_vec[idx].bv_len = offset; + bp->b1.bi_vcnt++; + } else + bp->bv1 = bp->bv2 = NULL; + + bp->b1.bi_private = bp; + bp->b2.bi_private = bp; + + bp->b1.bi_end_io = nvme_bio_pair_endio; + bp->b2.bi_end_io = nvme_bio_pair_endio; + + bp->parent = bio; + atomic_set(&bp->cnt, 2); + + return bp; + + split_fail_2: + kfree(bp->bv1); + split_fail_1: + kfree(bp); + return NULL; +} + +static int nvme_split_and_submit(struct bio *bio, struct nvme_queue *nvmeq, + int idx, int len, int offset) +{ + struct nvme_bio_pair *bp = nvme_bio_split(bio, idx, len, offset); + if (!bp) + return -ENOMEM; + + if (bio_list_empty(&nvmeq->sq_cong)) + add_wait_queue(&nvmeq->sq_full, &nvmeq->sq_cong_wait); + bio_list_add(&nvmeq->sq_cong, &bp->b1); + bio_list_add(&nvmeq->sq_cong, &bp->b2); + + return 0; +} + /* NVMe scatterlists require no holes in the virtual address */ #define BIOVEC_NOT_VIRT_MERGEABLE(vec1, vec2) ((vec2)->bv_offset || \ (((vec1)->bv_offset + (vec1)->bv_len) % PAGE_SIZE)) -static int nvme_map_bio(struct device *dev, struct nvme_iod *iod, +static int nvme_map_bio(struct nvme_queue *nvmeq, struct nvme_iod *iod, struct bio *bio, enum dma_data_direction dma_dir, int psegs) { struct bio_vec *bvec, *bvprv = NULL; struct scatterlist *sg = NULL; - int i, old_idx, length = 0, nsegs = 0; + int i, length = 0, nsegs = 0, split_len = bio->bi_size; + + if (nvmeq->dev->stripe_size) + split_len = nvmeq->dev->stripe_size - + ((bio->bi_sector << 9) & (nvmeq->dev->stripe_size - 1)); sg_init_table(iod->sg, psegs); - old_idx = bio->bi_idx; bio_for_each_segment(bvec, bio, i) { if (bvprv && BIOVEC_PHYS_MERGEABLE(bvprv, bvec)) { sg->length += bvec->bv_len; } else { if (bvprv && BIOVEC_NOT_VIRT_MERGEABLE(bvprv, bvec)) - break; + return nvme_split_and_submit(bio, nvmeq, i, + length, 0); + sg = sg ? sg + 1 : iod->sg; sg_set_page(sg, bvec->bv_page, bvec->bv_len, bvec->bv_offset); nsegs++; } + + if (split_len - length < bvec->bv_len) + return nvme_split_and_submit(bio, nvmeq, i, split_len, + split_len - length); length += bvec->bv_len; bvprv = bvec; } - bio->bi_idx = i; iod->nents = nsegs; sg_mark_end(sg); - if (dma_map_sg(dev, iod->sg, iod->nents, dma_dir) == 0) { - bio->bi_idx = old_idx; + if (dma_map_sg(nvmeq->q_dmadev, iod->sg, iod->nents, dma_dir) == 0) return -ENOMEM; - } + + BUG_ON(length != bio->bi_size); return length; } +/* + * We reuse the small pool to allocate the 16-byte range here as it is not + * worth having a special pool for these or additional cases to handle freeing + * the iod. + */ +static int nvme_submit_discard(struct nvme_queue *nvmeq, struct nvme_ns *ns, + struct bio *bio, struct nvme_iod *iod, int cmdid) +{ + struct nvme_dsm_range *range; + struct nvme_command *cmnd = &nvmeq->sq_cmds[nvmeq->sq_tail]; + + range = dma_pool_alloc(nvmeq->dev->prp_small_pool, GFP_ATOMIC, + &iod->first_dma); + if (!range) + return -ENOMEM; + + iod_list(iod)[0] = (__le64 *)range; + iod->npages = 0; + + range->cattr = cpu_to_le32(0); + range->nlb = cpu_to_le32(bio->bi_size >> ns->lba_shift); + range->slba = cpu_to_le64(nvme_block_nr(ns, bio->bi_sector)); + + memset(cmnd, 0, sizeof(*cmnd)); + cmnd->dsm.opcode = nvme_cmd_dsm; + cmnd->dsm.command_id = cmdid; + cmnd->dsm.nsid = cpu_to_le32(ns->ns_id); + cmnd->dsm.prp1 = cpu_to_le64(iod->first_dma); + cmnd->dsm.nr = 0; + cmnd->dsm.attributes = cpu_to_le32(NVME_DSMGMT_AD); + + if (++nvmeq->sq_tail == nvmeq->q_depth) + nvmeq->sq_tail = 0; + writel(nvmeq->sq_tail, nvmeq->q_db); + + return 0; +} + static int nvme_submit_flush(struct nvme_queue *nvmeq, struct nvme_ns *ns, int cmdid) { @@ -527,7 +610,7 @@ static int nvme_submit_flush(struct nvme_queue *nvmeq, struct nvme_ns *ns, return 0; } -static int nvme_submit_flush_data(struct nvme_queue *nvmeq, struct nvme_ns *ns) +int nvme_submit_flush_data(struct nvme_queue *nvmeq, struct nvme_ns *ns) { int cmdid = alloc_cmdid(nvmeq, (void *)CMD_CTX_FLUSH, special_completion, NVME_IO_TIMEOUT); @@ -567,6 +650,12 @@ static int nvme_submit_bio_queue(struct nvme_queue *nvmeq, struct nvme_ns *ns, if (unlikely(cmdid < 0)) goto free_iod; + if (bio->bi_rw & REQ_DISCARD) { + result = nvme_submit_discard(nvmeq, ns, bio, iod, cmdid); + if (result) + goto free_cmdid; + return result; + } if ((bio->bi_rw & REQ_FLUSH) && !psegs) return nvme_submit_flush(nvmeq, ns, cmdid); @@ -591,8 +680,8 @@ static int nvme_submit_bio_queue(struct nvme_queue *nvmeq, struct nvme_ns *ns, dma_dir = DMA_FROM_DEVICE; } - result = nvme_map_bio(nvmeq->q_dmadev, iod, bio, dma_dir, psegs); - if (result < 0) + result = nvme_map_bio(nvmeq, iod, bio, dma_dir, psegs); + if (result <= 0) goto free_cmdid; length = result; @@ -600,13 +689,11 @@ static int nvme_submit_bio_queue(struct nvme_queue *nvmeq, struct nvme_ns *ns, cmnd->rw.nsid = cpu_to_le32(ns->ns_id); length = nvme_setup_prps(nvmeq->dev, &cmnd->common, iod, length, GFP_ATOMIC); - cmnd->rw.slba = cpu_to_le64(bio->bi_sector >> (ns->lba_shift - 9)); + cmnd->rw.slba = cpu_to_le64(nvme_block_nr(ns, bio->bi_sector)); cmnd->rw.length = cpu_to_le16((length >> ns->lba_shift) - 1); cmnd->rw.control = cpu_to_le16(control); cmnd->rw.dsmgmt = cpu_to_le32(dsmgmt); - bio->bi_sector += length >> 9; - if (++nvmeq->sq_tail == nvmeq->q_depth) nvmeq->sq_tail = 0; writel(nvmeq->sq_tail, nvmeq->q_db); @@ -724,8 +811,8 @@ static void sync_completion(struct nvme_dev *dev, void *ctx, * Returns 0 on success. If the result is negative, it's a Linux error code; * if the result is positive, it's an NVM Express status code */ -static int nvme_submit_sync_cmd(struct nvme_queue *nvmeq, - struct nvme_command *cmd, u32 *result, unsigned timeout) +int nvme_submit_sync_cmd(struct nvme_queue *nvmeq, struct nvme_command *cmd, + u32 *result, unsigned timeout) { int cmdid; struct sync_cmd_info cmdinfo; @@ -741,7 +828,7 @@ static int nvme_submit_sync_cmd(struct nvme_queue *nvmeq, set_current_state(TASK_KILLABLE); nvme_submit_cmd(nvmeq, cmd); - schedule(); + schedule_timeout(timeout); if (cmdinfo.status == -EINTR) { nvme_abort_command(nvmeq, cmdid); @@ -754,7 +841,7 @@ static int nvme_submit_sync_cmd(struct nvme_queue *nvmeq, return cmdinfo.status; } -static int nvme_submit_admin_cmd(struct nvme_dev *dev, struct nvme_command *cmd, +int nvme_submit_admin_cmd(struct nvme_dev *dev, struct nvme_command *cmd, u32 *result) { return nvme_submit_sync_cmd(dev->queues[0], cmd, result, ADMIN_TIMEOUT); @@ -827,7 +914,7 @@ static int adapter_delete_sq(struct nvme_dev *dev, u16 sqid) return adapter_delete_queue(dev, nvme_admin_delete_sq, sqid); } -static int nvme_identify(struct nvme_dev *dev, unsigned nsid, unsigned cns, +int nvme_identify(struct nvme_dev *dev, unsigned nsid, unsigned cns, dma_addr_t dma_addr) { struct nvme_command c; @@ -841,7 +928,7 @@ static int nvme_identify(struct nvme_dev *dev, unsigned nsid, unsigned cns, return nvme_submit_admin_cmd(dev, &c, NULL); } -static int nvme_get_features(struct nvme_dev *dev, unsigned fid, unsigned nsid, +int nvme_get_features(struct nvme_dev *dev, unsigned fid, unsigned nsid, dma_addr_t dma_addr, u32 *result) { struct nvme_command c; @@ -855,8 +942,8 @@ static int nvme_get_features(struct nvme_dev *dev, unsigned fid, unsigned nsid, return nvme_submit_admin_cmd(dev, &c, result); } -static int nvme_set_features(struct nvme_dev *dev, unsigned fid, - unsigned dword11, dma_addr_t dma_addr, u32 *result) +int nvme_set_features(struct nvme_dev *dev, unsigned fid, unsigned dword11, + dma_addr_t dma_addr, u32 *result) { struct nvme_command c; @@ -885,7 +972,7 @@ static void nvme_cancel_ios(struct nvme_queue *nvmeq, bool timeout) void *ctx; nvme_completion_fn fn; static struct nvme_completion cqe = { - .status = cpu_to_le16(NVME_SC_ABORT_REQ) << 1, + .status = cpu_to_le16(NVME_SC_ABORT_REQ << 1), }; if (timeout && !time_after(now, info[cmdid].timeout)) @@ -966,7 +1053,7 @@ static struct nvme_queue *nvme_alloc_queue(struct nvme_dev *dev, int qid, return nvmeq; free_cqdma: - dma_free_coherent(dmadev, CQ_SIZE(nvmeq->q_depth), (void *)nvmeq->cqes, + dma_free_coherent(dmadev, CQ_SIZE(depth), (void *)nvmeq->cqes, nvmeq->cq_dma_addr); free_nvmeq: kfree(nvmeq); @@ -1021,15 +1108,60 @@ static struct nvme_queue *nvme_create_queue(struct nvme_dev *dev, int qid, return ERR_PTR(result); } +static int nvme_wait_ready(struct nvme_dev *dev, u64 cap, bool enabled) +{ + unsigned long timeout; + u32 bit = enabled ? NVME_CSTS_RDY : 0; + + timeout = ((NVME_CAP_TIMEOUT(cap) + 1) * HZ / 2) + jiffies; + + while ((readl(&dev->bar->csts) & NVME_CSTS_RDY) != bit) { + msleep(100); + if (fatal_signal_pending(current)) + return -EINTR; + if (time_after(jiffies, timeout)) { + dev_err(&dev->pci_dev->dev, + "Device not ready; aborting initialisation\n"); + return -ENODEV; + } + } + + return 0; +} + +/* + * If the device has been passed off to us in an enabled state, just clear + * the enabled bit. The spec says we should set the 'shutdown notification + * bits', but doing so may cause the device to complete commands to the + * admin queue ... and we don't know what memory that might be pointing at! + */ +static int nvme_disable_ctrl(struct nvme_dev *dev, u64 cap) +{ + u32 cc = readl(&dev->bar->cc); + + if (cc & NVME_CC_ENABLE) + writel(cc & ~NVME_CC_ENABLE, &dev->bar->cc); + return nvme_wait_ready(dev, cap, false); +} + +static int nvme_enable_ctrl(struct nvme_dev *dev, u64 cap) +{ + return nvme_wait_ready(dev, cap, true); +} + static int nvme_configure_admin_queue(struct nvme_dev *dev) { - int result = 0; + int result; u32 aqa; - u64 cap; - unsigned long timeout; + u64 cap = readq(&dev->bar->cap); struct nvme_queue *nvmeq; dev->dbs = ((void __iomem *)dev->bar) + 4096; + dev->db_stride = NVME_CAP_STRIDE(cap); + + result = nvme_disable_ctrl(dev, cap); + if (result < 0) + return result; nvmeq = nvme_alloc_queue(dev, 0, 64, 0); if (!nvmeq) @@ -1043,38 +1175,28 @@ static int nvme_configure_admin_queue(struct nvme_dev *dev) dev->ctrl_config |= NVME_CC_ARB_RR | NVME_CC_SHN_NONE; dev->ctrl_config |= NVME_CC_IOSQES | NVME_CC_IOCQES; - writel(0, &dev->bar->cc); writel(aqa, &dev->bar->aqa); writeq(nvmeq->sq_dma_addr, &dev->bar->asq); writeq(nvmeq->cq_dma_addr, &dev->bar->acq); writel(dev->ctrl_config, &dev->bar->cc); - cap = readq(&dev->bar->cap); - timeout = ((NVME_CAP_TIMEOUT(cap) + 1) * HZ / 2) + jiffies; - dev->db_stride = NVME_CAP_STRIDE(cap); - - while (!result && !(readl(&dev->bar->csts) & NVME_CSTS_RDY)) { - msleep(100); - if (fatal_signal_pending(current)) - result = -EINTR; - if (time_after(jiffies, timeout)) { - dev_err(&dev->pci_dev->dev, - "Device not ready; aborting initialisation\n"); - result = -ENODEV; - } - } - - if (result) { - nvme_free_queue_mem(nvmeq); - return result; - } + result = nvme_enable_ctrl(dev, cap); + if (result) + goto free_q; result = queue_request_irq(dev, nvmeq, "nvme admin"); + if (result) + goto free_q; + dev->queues[0] = nvmeq; return result; + + free_q: + nvme_free_queue_mem(nvmeq); + return result; } -static struct nvme_iod *nvme_map_user_pages(struct nvme_dev *dev, int write, +struct nvme_iod *nvme_map_user_pages(struct nvme_dev *dev, int write, unsigned long addr, unsigned length) { int i, err, count, nents, offset; @@ -1130,7 +1252,7 @@ static struct nvme_iod *nvme_map_user_pages(struct nvme_dev *dev, int write, return ERR_PTR(err); } -static void nvme_unmap_user_pages(struct nvme_dev *dev, int write, +void nvme_unmap_user_pages(struct nvme_dev *dev, int write, struct nvme_iod *iod) { int i; @@ -1148,13 +1270,19 @@ static int nvme_submit_io(struct nvme_ns *ns, struct nvme_user_io __user *uio) struct nvme_queue *nvmeq; struct nvme_user_io io; struct nvme_command c; - unsigned length; - int status; - struct nvme_iod *iod; + unsigned length, meta_len; + int status, i; + struct nvme_iod *iod, *meta_iod = NULL; + dma_addr_t meta_dma_addr; + void *meta, *uninitialized_var(meta_mem); if (copy_from_user(&io, uio, sizeof(io))) return -EFAULT; length = (io.nblocks + 1) << ns->lba_shift; + meta_len = (io.nblocks + 1) * ns->ms; + + if (meta_len && ((io.metadata & 3) || !io.metadata)) + return -EINVAL; switch (io.opcode) { case nvme_cmd_write: @@ -1176,11 +1304,42 @@ static int nvme_submit_io(struct nvme_ns *ns, struct nvme_user_io __user *uio) c.rw.slba = cpu_to_le64(io.slba); c.rw.length = cpu_to_le16(io.nblocks); c.rw.control = cpu_to_le16(io.control); - c.rw.dsmgmt = cpu_to_le16(io.dsmgmt); - c.rw.reftag = io.reftag; - c.rw.apptag = io.apptag; - c.rw.appmask = io.appmask; - /* XXX: metadata */ + c.rw.dsmgmt = cpu_to_le32(io.dsmgmt); + c.rw.reftag = cpu_to_le32(io.reftag); + c.rw.apptag = cpu_to_le16(io.apptag); + c.rw.appmask = cpu_to_le16(io.appmask); + + if (meta_len) { + meta_iod = nvme_map_user_pages(dev, io.opcode & 1, io.metadata, meta_len); + if (IS_ERR(meta_iod)) { + status = PTR_ERR(meta_iod); + meta_iod = NULL; + goto unmap; + } + + meta_mem = dma_alloc_coherent(&dev->pci_dev->dev, meta_len, + &meta_dma_addr, GFP_KERNEL); + if (!meta_mem) { + status = -ENOMEM; + goto unmap; + } + + if (io.opcode & 1) { + int meta_offset = 0; + + for (i = 0; i < meta_iod->nents; i++) { + meta = kmap_atomic(sg_page(&meta_iod->sg[i])) + + meta_iod->sg[i].offset; + memcpy(meta_mem + meta_offset, meta, + meta_iod->sg[i].length); + kunmap_atomic(meta); + meta_offset += meta_iod->sg[i].length; + } + } + + c.rw.metadata = cpu_to_le64(meta_dma_addr); + } + length = nvme_setup_prps(dev, &c.common, iod, length, GFP_KERNEL); nvmeq = get_nvmeq(dev); @@ -1196,8 +1355,33 @@ static int nvme_submit_io(struct nvme_ns *ns, struct nvme_user_io __user *uio) else status = nvme_submit_sync_cmd(nvmeq, &c, NULL, NVME_IO_TIMEOUT); + if (meta_len) { + if (status == NVME_SC_SUCCESS && !(io.opcode & 1)) { + int meta_offset = 0; + + for (i = 0; i < meta_iod->nents; i++) { + meta = kmap_atomic(sg_page(&meta_iod->sg[i])) + + meta_iod->sg[i].offset; + memcpy(meta, meta_mem + meta_offset, + meta_iod->sg[i].length); + kunmap_atomic(meta); + meta_offset += meta_iod->sg[i].length; + } + } + + dma_free_coherent(&dev->pci_dev->dev, meta_len, meta_mem, + meta_dma_addr); + } + + unmap: nvme_unmap_user_pages(dev, io.opcode & 1, iod); nvme_free_iod(dev, iod); + + if (meta_iod) { + nvme_unmap_user_pages(dev, io.opcode & 1, meta_iod); + nvme_free_iod(dev, meta_iod); + } + return status; } @@ -1208,6 +1392,7 @@ static int nvme_user_admin_cmd(struct nvme_dev *dev, struct nvme_command c; int status, length; struct nvme_iod *uninitialized_var(iod); + unsigned timeout; if (!capable(CAP_SYS_ADMIN)) return -EACCES; @@ -1237,10 +1422,13 @@ static int nvme_user_admin_cmd(struct nvme_dev *dev, GFP_KERNEL); } + timeout = cmd.timeout_ms ? msecs_to_jiffies(cmd.timeout_ms) : + ADMIN_TIMEOUT; if (length != cmd.data_len) status = -ENOMEM; else - status = nvme_submit_admin_cmd(dev, &c, &cmd.result); + status = nvme_submit_sync_cmd(dev->queues[0], &c, &cmd.result, + timeout); if (cmd.data_len) { nvme_unmap_user_pages(dev, cmd.opcode & 1, iod); @@ -1266,6 +1454,10 @@ static int nvme_ioctl(struct block_device *bdev, fmode_t mode, unsigned int cmd, return nvme_user_admin_cmd(ns->dev, (void __user *)arg); case NVME_IOCTL_SUBMIT_IO: return nvme_submit_io(ns, (void __user *)arg); + case SG_GET_VERSION_NUM: + return nvme_sg_get_version_num((void __user *)arg); + case SG_IO: + return nvme_sg_io(ns, (void __user *)arg); default: return -ENOTTY; } @@ -1282,13 +1474,17 @@ static void nvme_resubmit_bios(struct nvme_queue *nvmeq) while (bio_list_peek(&nvmeq->sq_cong)) { struct bio *bio = bio_list_pop(&nvmeq->sq_cong); struct nvme_ns *ns = bio->bi_bdev->bd_disk->private_data; + + if (bio_list_empty(&nvmeq->sq_cong)) + remove_wait_queue(&nvmeq->sq_full, + &nvmeq->sq_cong_wait); if (nvme_submit_bio_queue(nvmeq, ns, bio)) { + if (bio_list_empty(&nvmeq->sq_cong)) + add_wait_queue(&nvmeq->sq_full, + &nvmeq->sq_cong_wait); bio_list_add_head(&nvmeq->sq_cong, bio); break; } - if (bio_list_empty(&nvmeq->sq_cong)) - remove_wait_queue(&nvmeq->sq_full, - &nvmeq->sq_cong_wait); } } @@ -1297,7 +1493,7 @@ static int nvme_kthread(void *data) struct nvme_dev *dev; while (!kthread_should_stop()) { - __set_current_state(TASK_RUNNING); + set_current_state(TASK_INTERRUPTIBLE); spin_lock(&dev_list_lock); list_for_each_entry(dev, &dev_list, node) { int i; @@ -1314,8 +1510,7 @@ static int nvme_kthread(void *data) } } spin_unlock(&dev_list_lock); - set_current_state(TASK_INTERRUPTIBLE); - schedule_timeout(HZ); + schedule_timeout(round_jiffies_relative(HZ)); } return 0; } @@ -1347,6 +1542,16 @@ static void nvme_put_ns_idx(int index) spin_unlock(&dev_list_lock); } +static void nvme_config_discard(struct nvme_ns *ns) +{ + u32 logical_block_size = queue_logical_block_size(ns->queue); + ns->queue->limits.discard_zeroes_data = 0; + ns->queue->limits.discard_alignment = logical_block_size; + ns->queue->limits.discard_granularity = logical_block_size; + ns->queue->limits.max_discard_sectors = 0xffffffff; + queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, ns->queue); +} + static struct nvme_ns *nvme_alloc_ns(struct nvme_dev *dev, int nsid, struct nvme_id_ns *id, struct nvme_lba_range_type *rt) { @@ -1366,7 +1571,6 @@ static struct nvme_ns *nvme_alloc_ns(struct nvme_dev *dev, int nsid, ns->queue->queue_flags = QUEUE_FLAG_DEFAULT; queue_flag_set_unlocked(QUEUE_FLAG_NOMERGES, ns->queue); queue_flag_set_unlocked(QUEUE_FLAG_NONROT, ns->queue); -/* queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, ns->queue); */ blk_queue_make_request(ns->queue, nvme_make_request); ns->dev = dev; ns->queue->queuedata = ns; @@ -1378,6 +1582,7 @@ static struct nvme_ns *nvme_alloc_ns(struct nvme_dev *dev, int nsid, ns->disk = disk; lbaf = id->flbas & 0xf; ns->lba_shift = id->lbaf[lbaf].ds; + ns->ms = le16_to_cpu(id->lbaf[lbaf].ms); blk_queue_logical_block_size(ns->queue, 1 << ns->lba_shift); if (dev->max_hw_sectors) blk_queue_max_hw_sectors(ns->queue, dev->max_hw_sectors); @@ -1392,6 +1597,9 @@ static struct nvme_ns *nvme_alloc_ns(struct nvme_dev *dev, int nsid, sprintf(disk->disk_name, "nvme%dn%d", dev->instance, nsid); set_capacity(disk, le64_to_cpup(&id->nsze) << (ns->lba_shift - 9)); + if (dev->oncs & NVME_CTRL_ONCS_DSM) + nvme_config_discard(ns); + return ns; out_free_queue: @@ -1496,14 +1704,21 @@ static void nvme_free_queues(struct nvme_dev *dev) nvme_free_queue(dev, i); } +/* + * Return: error value if an error occurred setting up the queues or calling + * Identify Device. 0 if these succeeded, even if adding some of the + * namespaces failed. At the moment, these failures are silent. TBD which + * failures should be reported. + */ static int nvme_dev_add(struct nvme_dev *dev) { int res, nn, i; - struct nvme_ns *ns, *next; + struct nvme_ns *ns; struct nvme_id_ctrl *ctrl; struct nvme_id_ns *id_ns; void *mem; dma_addr_t dma_addr; + int shift = NVME_CAP_MPSMIN(readq(&dev->bar->cap)) + 12; res = nvme_setup_io_queues(dev); if (res) @@ -1511,22 +1726,26 @@ static int nvme_dev_add(struct nvme_dev *dev) mem = dma_alloc_coherent(&dev->pci_dev->dev, 8192, &dma_addr, GFP_KERNEL); + if (!mem) + return -ENOMEM; res = nvme_identify(dev, 0, 1, dma_addr); if (res) { res = -EIO; - goto out_free; + goto out; } ctrl = mem; nn = le32_to_cpup(&ctrl->nn); + dev->oncs = le16_to_cpup(&ctrl->oncs); memcpy(dev->serial, ctrl->sn, sizeof(ctrl->sn)); memcpy(dev->model, ctrl->mn, sizeof(ctrl->mn)); memcpy(dev->firmware_rev, ctrl->fr, sizeof(ctrl->fr)); - if (ctrl->mdts) { - int shift = NVME_CAP_MPSMIN(readq(&dev->bar->cap)) + 12; + if (ctrl->mdts) dev->max_hw_sectors = 1 << (ctrl->mdts + shift - 9); - } + if ((dev->pci_dev->vendor == PCI_VENDOR_ID_INTEL) && + (dev->pci_dev->device == 0x0953) && ctrl->vs[3]) + dev->stripe_size = 1 << (ctrl->vs[3] + shift); id_ns = mem; for (i = 1; i <= nn; i++) { @@ -1548,14 +1767,7 @@ static int nvme_dev_add(struct nvme_dev *dev) } list_for_each_entry(ns, &dev->namespaces, list) add_disk(ns->disk); - - goto out; - - out_free: - list_for_each_entry_safe(ns, next, &dev->namespaces, list) { - list_del(&ns->list); - nvme_ns_free(ns); - } + res = 0; out: dma_free_coherent(&dev->pci_dev->dev, 8192, mem, dma_addr); @@ -1634,6 +1846,56 @@ static void nvme_release_instance(struct nvme_dev *dev) spin_unlock(&dev_list_lock); } +static void nvme_free_dev(struct kref *kref) +{ + struct nvme_dev *dev = container_of(kref, struct nvme_dev, kref); + nvme_dev_remove(dev); + pci_disable_msix(dev->pci_dev); + iounmap(dev->bar); + nvme_release_instance(dev); + nvme_release_prp_pools(dev); + pci_disable_device(dev->pci_dev); + pci_release_regions(dev->pci_dev); + kfree(dev->queues); + kfree(dev->entry); + kfree(dev); +} + +static int nvme_dev_open(struct inode *inode, struct file *f) +{ + struct nvme_dev *dev = container_of(f->private_data, struct nvme_dev, + miscdev); + kref_get(&dev->kref); + f->private_data = dev; + return 0; +} + +static int nvme_dev_release(struct inode *inode, struct file *f) +{ + struct nvme_dev *dev = f->private_data; + kref_put(&dev->kref, nvme_free_dev); + return 0; +} + +static long nvme_dev_ioctl(struct file *f, unsigned int cmd, unsigned long arg) +{ + struct nvme_dev *dev = f->private_data; + switch (cmd) { + case NVME_IOCTL_ADMIN_CMD: + return nvme_user_admin_cmd(dev, (void __user *)arg); + default: + return -ENOTTY; + } +} + +static const struct file_operations nvme_dev_fops = { + .owner = THIS_MODULE, + .open = nvme_dev_open, + .release = nvme_dev_release, + .unlocked_ioctl = nvme_dev_ioctl, + .compat_ioctl = nvme_dev_ioctl, +}; + static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id) { int bars, result = -ENOMEM; @@ -1692,8 +1954,20 @@ static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id) if (result) goto delete; + scnprintf(dev->name, sizeof(dev->name), "nvme%d", dev->instance); + dev->miscdev.minor = MISC_DYNAMIC_MINOR; + dev->miscdev.parent = &pdev->dev; + dev->miscdev.name = dev->name; + dev->miscdev.fops = &nvme_dev_fops; + result = misc_register(&dev->miscdev); + if (result) + goto remove; + + kref_init(&dev->kref); return 0; + remove: + nvme_dev_remove(dev); delete: spin_lock(&dev_list_lock); list_del(&dev->node); @@ -1719,16 +1993,8 @@ static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id) static void nvme_remove(struct pci_dev *pdev) { struct nvme_dev *dev = pci_get_drvdata(pdev); - nvme_dev_remove(dev); - pci_disable_msix(pdev); - iounmap(dev->bar); - nvme_release_instance(dev); - nvme_release_prp_pools(dev); - pci_disable_device(pdev); - pci_release_regions(pdev); - kfree(dev->queues); - kfree(dev->entry); - kfree(dev); + misc_deregister(&dev->miscdev); + kref_put(&dev->kref, nvme_free_dev); } /* These functions are yet to be implemented */ diff --git a/trunk/drivers/block/nvme-scsi.c b/trunk/drivers/block/nvme-scsi.c new file mode 100644 index 000000000000..fed54b039893 --- /dev/null +++ b/trunk/drivers/block/nvme-scsi.c @@ -0,0 +1,3053 @@ +/* + * NVM Express device driver + * Copyright (c) 2011, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + */ + +/* + * Refer to the SCSI-NVMe Translation spec for details on how + * each command is translated. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +static int sg_version_num = 30534; /* 2 digits for each component */ + +#define SNTI_TRANSLATION_SUCCESS 0 +#define SNTI_INTERNAL_ERROR 1 + +/* VPD Page Codes */ +#define VPD_SUPPORTED_PAGES 0x00 +#define VPD_SERIAL_NUMBER 0x80 +#define VPD_DEVICE_IDENTIFIERS 0x83 +#define VPD_EXTENDED_INQUIRY 0x86 +#define VPD_BLOCK_DEV_CHARACTERISTICS 0xB1 + +/* CDB offsets */ +#define REPORT_LUNS_CDB_ALLOC_LENGTH_OFFSET 6 +#define REPORT_LUNS_SR_OFFSET 2 +#define READ_CAP_16_CDB_ALLOC_LENGTH_OFFSET 10 +#define REQUEST_SENSE_CDB_ALLOC_LENGTH_OFFSET 4 +#define REQUEST_SENSE_DESC_OFFSET 1 +#define REQUEST_SENSE_DESC_MASK 0x01 +#define DESCRIPTOR_FORMAT_SENSE_DATA_TYPE 1 +#define INQUIRY_EVPD_BYTE_OFFSET 1 +#define INQUIRY_PAGE_CODE_BYTE_OFFSET 2 +#define INQUIRY_EVPD_BIT_MASK 1 +#define INQUIRY_CDB_ALLOCATION_LENGTH_OFFSET 3 +#define START_STOP_UNIT_CDB_IMMED_OFFSET 1 +#define START_STOP_UNIT_CDB_IMMED_MASK 0x1 +#define START_STOP_UNIT_CDB_POWER_COND_MOD_OFFSET 3 +#define START_STOP_UNIT_CDB_POWER_COND_MOD_MASK 0xF +#define START_STOP_UNIT_CDB_POWER_COND_OFFSET 4 +#define START_STOP_UNIT_CDB_POWER_COND_MASK 0xF0 +#define START_STOP_UNIT_CDB_NO_FLUSH_OFFSET 4 +#define START_STOP_UNIT_CDB_NO_FLUSH_MASK 0x4 +#define START_STOP_UNIT_CDB_START_OFFSET 4 +#define START_STOP_UNIT_CDB_START_MASK 0x1 +#define WRITE_BUFFER_CDB_MODE_OFFSET 1 +#define WRITE_BUFFER_CDB_MODE_MASK 0x1F +#define WRITE_BUFFER_CDB_BUFFER_ID_OFFSET 2 +#define WRITE_BUFFER_CDB_BUFFER_OFFSET_OFFSET 3 +#define WRITE_BUFFER_CDB_PARM_LIST_LENGTH_OFFSET 6 +#define FORMAT_UNIT_CDB_FORMAT_PROT_INFO_OFFSET 1 +#define FORMAT_UNIT_CDB_FORMAT_PROT_INFO_MASK 0xC0 +#define FORMAT_UNIT_CDB_FORMAT_PROT_INFO_SHIFT 6 +#define FORMAT_UNIT_CDB_LONG_LIST_OFFSET 1 +#define FORMAT_UNIT_CDB_LONG_LIST_MASK 0x20 +#define FORMAT_UNIT_CDB_FORMAT_DATA_OFFSET 1 +#define FORMAT_UNIT_CDB_FORMAT_DATA_MASK 0x10 +#define FORMAT_UNIT_SHORT_PARM_LIST_LEN 4 +#define FORMAT_UNIT_LONG_PARM_LIST_LEN 8 +#define FORMAT_UNIT_PROT_INT_OFFSET 3 +#define FORMAT_UNIT_PROT_FIELD_USAGE_OFFSET 0 +#define FORMAT_UNIT_PROT_FIELD_USAGE_MASK 0x07 +#define UNMAP_CDB_PARAM_LIST_LENGTH_OFFSET 7 + +/* Misc. defines */ +#define NIBBLE_SHIFT 4 +#define FIXED_SENSE_DATA 0x70 +#define DESC_FORMAT_SENSE_DATA 0x72 +#define FIXED_SENSE_DATA_ADD_LENGTH 10 +#define LUN_ENTRY_SIZE 8 +#define LUN_DATA_HEADER_SIZE 8 +#define ALL_LUNS_RETURNED 0x02 +#define ALL_WELL_KNOWN_LUNS_RETURNED 0x01 +#define RESTRICTED_LUNS_RETURNED 0x00 +#define NVME_POWER_STATE_START_VALID 0x00 +#define NVME_POWER_STATE_ACTIVE 0x01 +#define NVME_POWER_STATE_IDLE 0x02 +#define NVME_POWER_STATE_STANDBY 0x03 +#define NVME_POWER_STATE_LU_CONTROL 0x07 +#define POWER_STATE_0 0 +#define POWER_STATE_1 1 +#define POWER_STATE_2 2 +#define POWER_STATE_3 3 +#define DOWNLOAD_SAVE_ACTIVATE 0x05 +#define DOWNLOAD_SAVE_DEFER_ACTIVATE 0x0E +#define ACTIVATE_DEFERRED_MICROCODE 0x0F +#define FORMAT_UNIT_IMMED_MASK 0x2 +#define FORMAT_UNIT_IMMED_OFFSET 1 +#define KELVIN_TEMP_FACTOR 273 +#define FIXED_FMT_SENSE_DATA_SIZE 18 +#define DESC_FMT_SENSE_DATA_SIZE 8 + +/* SCSI/NVMe defines and bit masks */ +#define INQ_STANDARD_INQUIRY_PAGE 0x00 +#define INQ_SUPPORTED_VPD_PAGES_PAGE 0x00 +#define INQ_UNIT_SERIAL_NUMBER_PAGE 0x80 +#define INQ_DEVICE_IDENTIFICATION_PAGE 0x83 +#define INQ_EXTENDED_INQUIRY_DATA_PAGE 0x86 +#define INQ_BDEV_CHARACTERISTICS_PAGE 0xB1 +#define INQ_SERIAL_NUMBER_LENGTH 0x14 +#define INQ_NUM_SUPPORTED_VPD_PAGES 5 +#define VERSION_SPC_4 0x06 +#define ACA_UNSUPPORTED 0 +#define STANDARD_INQUIRY_LENGTH 36 +#define ADDITIONAL_STD_INQ_LENGTH 31 +#define EXTENDED_INQUIRY_DATA_PAGE_LENGTH 0x3C +#define RESERVED_FIELD 0 + +/* SCSI READ/WRITE Defines */ +#define IO_CDB_WP_MASK 0xE0 +#define IO_CDB_WP_SHIFT 5 +#define IO_CDB_FUA_MASK 0x8 +#define IO_6_CDB_LBA_OFFSET 0 +#define IO_6_CDB_LBA_MASK 0x001FFFFF +#define IO_6_CDB_TX_LEN_OFFSET 4 +#define IO_6_DEFAULT_TX_LEN 256 +#define IO_10_CDB_LBA_OFFSET 2 +#define IO_10_CDB_TX_LEN_OFFSET 7 +#define IO_10_CDB_WP_OFFSET 1 +#define IO_10_CDB_FUA_OFFSET 1 +#define IO_12_CDB_LBA_OFFSET 2 +#define IO_12_CDB_TX_LEN_OFFSET 6 +#define IO_12_CDB_WP_OFFSET 1 +#define IO_12_CDB_FUA_OFFSET 1 +#define IO_16_CDB_FUA_OFFSET 1 +#define IO_16_CDB_WP_OFFSET 1 +#define IO_16_CDB_LBA_OFFSET 2 +#define IO_16_CDB_TX_LEN_OFFSET 10 + +/* Mode Sense/Select defines */ +#define MODE_PAGE_INFO_EXCEP 0x1C +#define MODE_PAGE_CACHING 0x08 +#define MODE_PAGE_CONTROL 0x0A +#define MODE_PAGE_POWER_CONDITION 0x1A +#define MODE_PAGE_RETURN_ALL 0x3F +#define MODE_PAGE_BLK_DES_LEN 0x08 +#define MODE_PAGE_LLBAA_BLK_DES_LEN 0x10 +#define MODE_PAGE_CACHING_LEN 0x14 +#define MODE_PAGE_CONTROL_LEN 0x0C +#define MODE_PAGE_POW_CND_LEN 0x28 +#define MODE_PAGE_INF_EXC_LEN 0x0C +#define MODE_PAGE_ALL_LEN 0x54 +#define MODE_SENSE6_MPH_SIZE 4 +#define MODE_SENSE6_ALLOC_LEN_OFFSET 4 +#define MODE_SENSE_PAGE_CONTROL_OFFSET 2 +#define MODE_SENSE_PAGE_CONTROL_MASK 0xC0 +#define MODE_SENSE_PAGE_CODE_OFFSET 2 +#define MODE_SENSE_PAGE_CODE_MASK 0x3F +#define MODE_SENSE_LLBAA_OFFSET 1 +#define MODE_SENSE_LLBAA_MASK 0x10 +#define MODE_SENSE_LLBAA_SHIFT 4 +#define MODE_SENSE_DBD_OFFSET 1 +#define MODE_SENSE_DBD_MASK 8 +#define MODE_SENSE_DBD_SHIFT 3 +#define MODE_SENSE10_MPH_SIZE 8 +#define MODE_SENSE10_ALLOC_LEN_OFFSET 7 +#define MODE_SELECT_CDB_PAGE_FORMAT_OFFSET 1 +#define MODE_SELECT_CDB_SAVE_PAGES_OFFSET 1 +#define MODE_SELECT_6_CDB_PARAM_LIST_LENGTH_OFFSET 4 +#define MODE_SELECT_10_CDB_PARAM_LIST_LENGTH_OFFSET 7 +#define MODE_SELECT_CDB_PAGE_FORMAT_MASK 0x10 +#define MODE_SELECT_CDB_SAVE_PAGES_MASK 0x1 +#define MODE_SELECT_6_BD_OFFSET 3 +#define MODE_SELECT_10_BD_OFFSET 6 +#define MODE_SELECT_10_LLBAA_OFFSET 4 +#define MODE_SELECT_10_LLBAA_MASK 1 +#define MODE_SELECT_6_MPH_SIZE 4 +#define MODE_SELECT_10_MPH_SIZE 8 +#define CACHING_MODE_PAGE_WCE_MASK 0x04 +#define MODE_SENSE_BLK_DESC_ENABLED 0 +#define MODE_SENSE_BLK_DESC_COUNT 1 +#define MODE_SELECT_PAGE_CODE_MASK 0x3F +#define SHORT_DESC_BLOCK 8 +#define LONG_DESC_BLOCK 16 +#define MODE_PAGE_POW_CND_LEN_FIELD 0x26 +#define MODE_PAGE_INF_EXC_LEN_FIELD 0x0A +#define MODE_PAGE_CACHING_LEN_FIELD 0x12 +#define MODE_PAGE_CONTROL_LEN_FIELD 0x0A +#define MODE_SENSE_PC_CURRENT_VALUES 0 + +/* Log Sense defines */ +#define LOG_PAGE_SUPPORTED_LOG_PAGES_PAGE 0x00 +#define LOG_PAGE_SUPPORTED_LOG_PAGES_LENGTH 0x07 +#define LOG_PAGE_INFORMATIONAL_EXCEPTIONS_PAGE 0x2F +#define LOG_PAGE_TEMPERATURE_PAGE 0x0D +#define LOG_SENSE_CDB_SP_OFFSET 1 +#define LOG_SENSE_CDB_SP_NOT_ENABLED 0 +#define LOG_SENSE_CDB_PC_OFFSET 2 +#define LOG_SENSE_CDB_PC_MASK 0xC0 +#define LOG_SENSE_CDB_PC_SHIFT 6 +#define LOG_SENSE_CDB_PC_CUMULATIVE_VALUES 1 +#define LOG_SENSE_CDB_PAGE_CODE_MASK 0x3F +#define LOG_SENSE_CDB_ALLOC_LENGTH_OFFSET 7 +#define REMAINING_INFO_EXCP_PAGE_LENGTH 0x8 +#define LOG_INFO_EXCP_PAGE_LENGTH 0xC +#define REMAINING_TEMP_PAGE_LENGTH 0xC +#define LOG_TEMP_PAGE_LENGTH 0x10 +#define LOG_TEMP_UNKNOWN 0xFF +#define SUPPORTED_LOG_PAGES_PAGE_LENGTH 0x3 + +/* Read Capacity defines */ +#define READ_CAP_10_RESP_SIZE 8 +#define READ_CAP_16_RESP_SIZE 32 + +/* NVMe Namespace and Command Defines */ +#define NVME_GET_SMART_LOG_PAGE 0x02 +#define NVME_GET_FEAT_TEMP_THRESH 0x04 +#define BYTES_TO_DWORDS 4 +#define NVME_MAX_FIRMWARE_SLOT 7 + +/* Report LUNs defines */ +#define REPORT_LUNS_FIRST_LUN_OFFSET 8 + +/* SCSI ADDITIONAL SENSE Codes */ + +#define SCSI_ASC_NO_SENSE 0x00 +#define SCSI_ASC_PERIPHERAL_DEV_WRITE_FAULT 0x03 +#define SCSI_ASC_LUN_NOT_READY 0x04 +#define SCSI_ASC_WARNING 0x0B +#define SCSI_ASC_LOG_BLOCK_GUARD_CHECK_FAILED 0x10 +#define SCSI_ASC_LOG_BLOCK_APPTAG_CHECK_FAILED 0x10 +#define SCSI_ASC_LOG_BLOCK_REFTAG_CHECK_FAILED 0x10 +#define SCSI_ASC_UNRECOVERED_READ_ERROR 0x11 +#define SCSI_ASC_MISCOMPARE_DURING_VERIFY 0x1D +#define SCSI_ASC_ACCESS_DENIED_INVALID_LUN_ID 0x20 +#define SCSI_ASC_ILLEGAL_COMMAND 0x20 +#define SCSI_ASC_ILLEGAL_BLOCK 0x21 +#define SCSI_ASC_INVALID_CDB 0x24 +#define SCSI_ASC_INVALID_LUN 0x25 +#define SCSI_ASC_INVALID_PARAMETER 0x26 +#define SCSI_ASC_FORMAT_COMMAND_FAILED 0x31 +#define SCSI_ASC_INTERNAL_TARGET_FAILURE 0x44 + +/* SCSI ADDITIONAL SENSE Code Qualifiers */ + +#define SCSI_ASCQ_CAUSE_NOT_REPORTABLE 0x00 +#define SCSI_ASCQ_FORMAT_COMMAND_FAILED 0x01 +#define SCSI_ASCQ_LOG_BLOCK_GUARD_CHECK_FAILED 0x01 +#define SCSI_ASCQ_LOG_BLOCK_APPTAG_CHECK_FAILED 0x02 +#define SCSI_ASCQ_LOG_BLOCK_REFTAG_CHECK_FAILED 0x03 +#define SCSI_ASCQ_FORMAT_IN_PROGRESS 0x04 +#define SCSI_ASCQ_POWER_LOSS_EXPECTED 0x08 +#define SCSI_ASCQ_INVALID_LUN_ID 0x09 + +/** + * DEVICE_SPECIFIC_PARAMETER in mode parameter header (see sbc2r16) to + * enable DPOFUA support type 0x10 value. + */ +#define DEVICE_SPECIFIC_PARAMETER 0 +#define VPD_ID_DESCRIPTOR_LENGTH sizeof(VPD_IDENTIFICATION_DESCRIPTOR) + +/* MACROs to extract information from CDBs */ + +#define GET_OPCODE(cdb) cdb[0] + +#define GET_U8_FROM_CDB(cdb, index) (cdb[index] << 0) + +#define GET_U16_FROM_CDB(cdb, index) ((cdb[index] << 8) | (cdb[index + 1] << 0)) + +#define GET_U24_FROM_CDB(cdb, index) ((cdb[index] << 16) | \ +(cdb[index + 1] << 8) | \ +(cdb[index + 2] << 0)) + +#define GET_U32_FROM_CDB(cdb, index) ((cdb[index] << 24) | \ +(cdb[index + 1] << 16) | \ +(cdb[index + 2] << 8) | \ +(cdb[index + 3] << 0)) + +#define GET_U64_FROM_CDB(cdb, index) ((((u64)cdb[index]) << 56) | \ +(((u64)cdb[index + 1]) << 48) | \ +(((u64)cdb[index + 2]) << 40) | \ +(((u64)cdb[index + 3]) << 32) | \ +(((u64)cdb[index + 4]) << 24) | \ +(((u64)cdb[index + 5]) << 16) | \ +(((u64)cdb[index + 6]) << 8) | \ +(((u64)cdb[index + 7]) << 0)) + +/* Inquiry Helper Macros */ +#define GET_INQ_EVPD_BIT(cdb) \ +((GET_U8_FROM_CDB(cdb, INQUIRY_EVPD_BYTE_OFFSET) & \ +INQUIRY_EVPD_BIT_MASK) ? 1 : 0) + +#define GET_INQ_PAGE_CODE(cdb) \ +(GET_U8_FROM_CDB(cdb, INQUIRY_PAGE_CODE_BYTE_OFFSET)) + +#define GET_INQ_ALLOC_LENGTH(cdb) \ +(GET_U16_FROM_CDB(cdb, INQUIRY_CDB_ALLOCATION_LENGTH_OFFSET)) + +/* Report LUNs Helper Macros */ +#define GET_REPORT_LUNS_ALLOC_LENGTH(cdb) \ +(GET_U32_FROM_CDB(cdb, REPORT_LUNS_CDB_ALLOC_LENGTH_OFFSET)) + +/* Read Capacity Helper Macros */ +#define GET_READ_CAP_16_ALLOC_LENGTH(cdb) \ +(GET_U32_FROM_CDB(cdb, READ_CAP_16_CDB_ALLOC_LENGTH_OFFSET)) + +#define IS_READ_CAP_16(cdb) \ +((cdb[0] == SERVICE_ACTION_IN && cdb[1] == SAI_READ_CAPACITY_16) ? 1 : 0) + +/* Request Sense Helper Macros */ +#define GET_REQUEST_SENSE_ALLOC_LENGTH(cdb) \ +(GET_U8_FROM_CDB(cdb, REQUEST_SENSE_CDB_ALLOC_LENGTH_OFFSET)) + +/* Mode Sense Helper Macros */ +#define GET_MODE_SENSE_DBD(cdb) \ +((GET_U8_FROM_CDB(cdb, MODE_SENSE_DBD_OFFSET) & MODE_SENSE_DBD_MASK) >> \ +MODE_SENSE_DBD_SHIFT) + +#define GET_MODE_SENSE_LLBAA(cdb) \ +((GET_U8_FROM_CDB(cdb, MODE_SENSE_LLBAA_OFFSET) & \ +MODE_SENSE_LLBAA_MASK) >> MODE_SENSE_LLBAA_SHIFT) + +#define GET_MODE_SENSE_MPH_SIZE(cdb10) \ +(cdb10 ? MODE_SENSE10_MPH_SIZE : MODE_SENSE6_MPH_SIZE) + + +/* Struct to gather data that needs to be extracted from a SCSI CDB. + Not conforming to any particular CDB variant, but compatible with all. */ + +struct nvme_trans_io_cdb { + u8 fua; + u8 prot_info; + u64 lba; + u32 xfer_len; +}; + + +/* Internal Helper Functions */ + + +/* Copy data to userspace memory */ + +static int nvme_trans_copy_to_user(struct sg_io_hdr *hdr, void *from, + unsigned long n) +{ + int res = SNTI_TRANSLATION_SUCCESS; + unsigned long not_copied; + int i; + void *index = from; + size_t remaining = n; + size_t xfer_len; + + if (hdr->iovec_count > 0) { + struct sg_iovec sgl; + + for (i = 0; i < hdr->iovec_count; i++) { + not_copied = copy_from_user(&sgl, hdr->dxferp + + i * sizeof(struct sg_iovec), + sizeof(struct sg_iovec)); + if (not_copied) + return -EFAULT; + xfer_len = min(remaining, sgl.iov_len); + not_copied = copy_to_user(sgl.iov_base, index, + xfer_len); + if (not_copied) { + res = -EFAULT; + break; + } + index += xfer_len; + remaining -= xfer_len; + if (remaining == 0) + break; + } + return res; + } + not_copied = copy_to_user(hdr->dxferp, from, n); + if (not_copied) + res = -EFAULT; + return res; +} + +/* Copy data from userspace memory */ + +static int nvme_trans_copy_from_user(struct sg_io_hdr *hdr, void *to, + unsigned long n) +{ + int res = SNTI_TRANSLATION_SUCCESS; + unsigned long not_copied; + int i; + void *index = to; + size_t remaining = n; + size_t xfer_len; + + if (hdr->iovec_count > 0) { + struct sg_iovec sgl; + + for (i = 0; i < hdr->iovec_count; i++) { + not_copied = copy_from_user(&sgl, hdr->dxferp + + i * sizeof(struct sg_iovec), + sizeof(struct sg_iovec)); + if (not_copied) + return -EFAULT; + xfer_len = min(remaining, sgl.iov_len); + not_copied = copy_from_user(index, sgl.iov_base, + xfer_len); + if (not_copied) { + res = -EFAULT; + break; + } + index += xfer_len; + remaining -= xfer_len; + if (remaining == 0) + break; + } + return res; + } + + not_copied = copy_from_user(to, hdr->dxferp, n); + if (not_copied) + res = -EFAULT; + return res; +} + +/* Status/Sense Buffer Writeback */ + +static int nvme_trans_completion(struct sg_io_hdr *hdr, u8 status, u8 sense_key, + u8 asc, u8 ascq) +{ + int res = SNTI_TRANSLATION_SUCCESS; + u8 xfer_len; + u8 resp[DESC_FMT_SENSE_DATA_SIZE]; + + if (scsi_status_is_good(status)) { + hdr->status = SAM_STAT_GOOD; + hdr->masked_status = GOOD; + hdr->host_status = DID_OK; + hdr->driver_status = DRIVER_OK; + hdr->sb_len_wr = 0; + } else { + hdr->status = status; + hdr->masked_status = status >> 1; + hdr->host_status = DID_OK; + hdr->driver_status = DRIVER_OK; + + memset(resp, 0, DESC_FMT_SENSE_DATA_SIZE); + resp[0] = DESC_FORMAT_SENSE_DATA; + resp[1] = sense_key; + resp[2] = asc; + resp[3] = ascq; + + xfer_len = min_t(u8, hdr->mx_sb_len, DESC_FMT_SENSE_DATA_SIZE); + hdr->sb_len_wr = xfer_len; + if (copy_to_user(hdr->sbp, resp, xfer_len) > 0) + res = -EFAULT; + } + + return res; +} + +static int nvme_trans_status_code(struct sg_io_hdr *hdr, int nvme_sc) +{ + u8 status, sense_key, asc, ascq; + int res = SNTI_TRANSLATION_SUCCESS; + + /* For non-nvme (Linux) errors, simply return the error code */ + if (nvme_sc < 0) + return nvme_sc; + + /* Mask DNR, More, and reserved fields */ + nvme_sc &= 0x7FF; + + switch (nvme_sc) { + /* Generic Command Status */ + case NVME_SC_SUCCESS: + status = SAM_STAT_GOOD; + sense_key = NO_SENSE; + asc = SCSI_ASC_NO_SENSE; + ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE; + break; + case NVME_SC_INVALID_OPCODE: + status = SAM_STAT_CHECK_CONDITION; + sense_key = ILLEGAL_REQUEST; + asc = SCSI_ASC_ILLEGAL_COMMAND; + ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE; + break; + case NVME_SC_INVALID_FIELD: + status = SAM_STAT_CHECK_CONDITION; + sense_key = ILLEGAL_REQUEST; + asc = SCSI_ASC_INVALID_CDB; + ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE; + break; + case NVME_SC_DATA_XFER_ERROR: + status = SAM_STAT_CHECK_CONDITION; + sense_key = MEDIUM_ERROR; + asc = SCSI_ASC_NO_SENSE; + ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE; + break; + case NVME_SC_POWER_LOSS: + status = SAM_STAT_TASK_ABORTED; + sense_key = ABORTED_COMMAND; + asc = SCSI_ASC_WARNING; + ascq = SCSI_ASCQ_POWER_LOSS_EXPECTED; + break; + case NVME_SC_INTERNAL: + status = SAM_STAT_CHECK_CONDITION; + sense_key = HARDWARE_ERROR; + asc = SCSI_ASC_INTERNAL_TARGET_FAILURE; + ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE; + break; + case NVME_SC_ABORT_REQ: + status = SAM_STAT_TASK_ABORTED; + sense_key = ABORTED_COMMAND; + asc = SCSI_ASC_NO_SENSE; + ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE; + break; + case NVME_SC_ABORT_QUEUE: + status = SAM_STAT_TASK_ABORTED; + sense_key = ABORTED_COMMAND; + asc = SCSI_ASC_NO_SENSE; + ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE; + break; + case NVME_SC_FUSED_FAIL: + status = SAM_STAT_TASK_ABORTED; + sense_key = ABORTED_COMMAND; + asc = SCSI_ASC_NO_SENSE; + ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE; + break; + case NVME_SC_FUSED_MISSING: + status = SAM_STAT_TASK_ABORTED; + sense_key = ABORTED_COMMAND; + asc = SCSI_ASC_NO_SENSE; + ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE; + break; + case NVME_SC_INVALID_NS: + status = SAM_STAT_CHECK_CONDITION; + sense_key = ILLEGAL_REQUEST; + asc = SCSI_ASC_ACCESS_DENIED_INVALID_LUN_ID; + ascq = SCSI_ASCQ_INVALID_LUN_ID; + break; + case NVME_SC_LBA_RANGE: + status = SAM_STAT_CHECK_CONDITION; + sense_key = ILLEGAL_REQUEST; + asc = SCSI_ASC_ILLEGAL_BLOCK; + ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE; + break; + case NVME_SC_CAP_EXCEEDED: + status = SAM_STAT_CHECK_CONDITION; + sense_key = MEDIUM_ERROR; + asc = SCSI_ASC_NO_SENSE; + ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE; + break; + case NVME_SC_NS_NOT_READY: + status = SAM_STAT_CHECK_CONDITION; + sense_key = NOT_READY; + asc = SCSI_ASC_LUN_NOT_READY; + ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE; + break; + + /* Command Specific Status */ + case NVME_SC_INVALID_FORMAT: + status = SAM_STAT_CHECK_CONDITION; + sense_key = ILLEGAL_REQUEST; + asc = SCSI_ASC_FORMAT_COMMAND_FAILED; + ascq = SCSI_ASCQ_FORMAT_COMMAND_FAILED; + break; + case NVME_SC_BAD_ATTRIBUTES: + status = SAM_STAT_CHECK_CONDITION; + sense_key = ILLEGAL_REQUEST; + asc = SCSI_ASC_INVALID_CDB; + ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE; + break; + + /* Media Errors */ + case NVME_SC_WRITE_FAULT: + status = SAM_STAT_CHECK_CONDITION; + sense_key = MEDIUM_ERROR; + asc = SCSI_ASC_PERIPHERAL_DEV_WRITE_FAULT; + ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE; + break; + case NVME_SC_READ_ERROR: + status = SAM_STAT_CHECK_CONDITION; + sense_key = MEDIUM_ERROR; + asc = SCSI_ASC_UNRECOVERED_READ_ERROR; + ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE; + break; + case NVME_SC_GUARD_CHECK: + status = SAM_STAT_CHECK_CONDITION; + sense_key = MEDIUM_ERROR; + asc = SCSI_ASC_LOG_BLOCK_GUARD_CHECK_FAILED; + ascq = SCSI_ASCQ_LOG_BLOCK_GUARD_CHECK_FAILED; + break; + case NVME_SC_APPTAG_CHECK: + status = SAM_STAT_CHECK_CONDITION; + sense_key = MEDIUM_ERROR; + asc = SCSI_ASC_LOG_BLOCK_APPTAG_CHECK_FAILED; + ascq = SCSI_ASCQ_LOG_BLOCK_APPTAG_CHECK_FAILED; + break; + case NVME_SC_REFTAG_CHECK: + status = SAM_STAT_CHECK_CONDITION; + sense_key = MEDIUM_ERROR; + asc = SCSI_ASC_LOG_BLOCK_REFTAG_CHECK_FAILED; + ascq = SCSI_ASCQ_LOG_BLOCK_REFTAG_CHECK_FAILED; + break; + case NVME_SC_COMPARE_FAILED: + status = SAM_STAT_CHECK_CONDITION; + sense_key = MISCOMPARE; + asc = SCSI_ASC_MISCOMPARE_DURING_VERIFY; + ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE; + break; + case NVME_SC_ACCESS_DENIED: + status = SAM_STAT_CHECK_CONDITION; + sense_key = ILLEGAL_REQUEST; + asc = SCSI_ASC_ACCESS_DENIED_INVALID_LUN_ID; + ascq = SCSI_ASCQ_INVALID_LUN_ID; + break; + + /* Unspecified/Default */ + case NVME_SC_CMDID_CONFLICT: + case NVME_SC_CMD_SEQ_ERROR: + case NVME_SC_CQ_INVALID: + case NVME_SC_QID_INVALID: + case NVME_SC_QUEUE_SIZE: + case NVME_SC_ABORT_LIMIT: + case NVME_SC_ABORT_MISSING: + case NVME_SC_ASYNC_LIMIT: + case NVME_SC_FIRMWARE_SLOT: + case NVME_SC_FIRMWARE_IMAGE: + case NVME_SC_INVALID_VECTOR: + case NVME_SC_INVALID_LOG_PAGE: + default: + status = SAM_STAT_CHECK_CONDITION; + sense_key = ILLEGAL_REQUEST; + asc = SCSI_ASC_NO_SENSE; + ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE; + break; + } + + res = nvme_trans_completion(hdr, status, sense_key, asc, ascq); + + return res; +} + +/* INQUIRY Helper Functions */ + +static int nvme_trans_standard_inquiry_page(struct nvme_ns *ns, + struct sg_io_hdr *hdr, u8 *inq_response, + int alloc_len) +{ + struct nvme_dev *dev = ns->dev; + dma_addr_t dma_addr; + void *mem; + struct nvme_id_ns *id_ns; + int res = SNTI_TRANSLATION_SUCCESS; + int nvme_sc; + int xfer_len; + u8 resp_data_format = 0x02; + u8 protect; + u8 cmdque = 0x01 << 1; + + mem = dma_alloc_coherent(&dev->pci_dev->dev, sizeof(struct nvme_id_ns), + &dma_addr, GFP_KERNEL); + if (mem == NULL) { + res = -ENOMEM; + goto out_dma; + } + + /* nvme ns identify - use DPS value for PROTECT field */ + nvme_sc = nvme_identify(dev, ns->ns_id, 0, dma_addr); + res = nvme_trans_status_code(hdr, nvme_sc); + /* + * If nvme_sc was -ve, res will be -ve here. + * If nvme_sc was +ve, the status would bace been translated, and res + * can only be 0 or -ve. + * - If 0 && nvme_sc > 0, then go into next if where res gets nvme_sc + * - If -ve, return because its a Linux error. + */ + if (res) + goto out_free; + if (nvme_sc) { + res = nvme_sc; + goto out_free; + } + id_ns = mem; + (id_ns->dps) ? (protect = 0x01) : (protect = 0); + + memset(inq_response, 0, STANDARD_INQUIRY_LENGTH); + inq_response[2] = VERSION_SPC_4; + inq_response[3] = resp_data_format; /*normaca=0 | hisup=0 */ + inq_response[4] = ADDITIONAL_STD_INQ_LENGTH; + inq_response[5] = protect; /* sccs=0 | acc=0 | tpgs=0 | pc3=0 */ + inq_response[7] = cmdque; /* wbus16=0 | sync=0 | vs=0 */ + strncpy(&inq_response[8], "NVMe ", 8); + strncpy(&inq_response[16], dev->model, 16); + strncpy(&inq_response[32], dev->firmware_rev, 4); + + xfer_len = min(alloc_len, STANDARD_INQUIRY_LENGTH); + res = nvme_trans_copy_to_user(hdr, inq_response, xfer_len); + + out_free: + dma_free_coherent(&dev->pci_dev->dev, sizeof(struct nvme_id_ns), mem, + dma_addr); + out_dma: + return res; +} + +static int nvme_trans_supported_vpd_pages(struct nvme_ns *ns, + struct sg_io_hdr *hdr, u8 *inq_response, + int alloc_len) +{ + int res = SNTI_TRANSLATION_SUCCESS; + int xfer_len; + + memset(inq_response, 0, STANDARD_INQUIRY_LENGTH); + inq_response[1] = INQ_SUPPORTED_VPD_PAGES_PAGE; /* Page Code */ + inq_response[3] = INQ_NUM_SUPPORTED_VPD_PAGES; /* Page Length */ + inq_response[4] = INQ_SUPPORTED_VPD_PAGES_PAGE; + inq_response[5] = INQ_UNIT_SERIAL_NUMBER_PAGE; + inq_response[6] = INQ_DEVICE_IDENTIFICATION_PAGE; + inq_response[7] = INQ_EXTENDED_INQUIRY_DATA_PAGE; + inq_response[8] = INQ_BDEV_CHARACTERISTICS_PAGE; + + xfer_len = min(alloc_len, STANDARD_INQUIRY_LENGTH); + res = nvme_trans_copy_to_user(hdr, inq_response, xfer_len); + + return res; +} + +static int nvme_trans_unit_serial_page(struct nvme_ns *ns, + struct sg_io_hdr *hdr, u8 *inq_response, + int alloc_len) +{ + struct nvme_dev *dev = ns->dev; + int res = SNTI_TRANSLATION_SUCCESS; + int xfer_len; + + memset(inq_response, 0, STANDARD_INQUIRY_LENGTH); + inq_response[1] = INQ_UNIT_SERIAL_NUMBER_PAGE; /* Page Code */ + inq_response[3] = INQ_SERIAL_NUMBER_LENGTH; /* Page Length */ + strncpy(&inq_response[4], dev->serial, INQ_SERIAL_NUMBER_LENGTH); + + xfer_len = min(alloc_len, STANDARD_INQUIRY_LENGTH); + res = nvme_trans_copy_to_user(hdr, inq_response, xfer_len); + + return res; +} + +static int nvme_trans_device_id_page(struct nvme_ns *ns, struct sg_io_hdr *hdr, + u8 *inq_response, int alloc_len) +{ + struct nvme_dev *dev = ns->dev; + dma_addr_t dma_addr; + void *mem; + struct nvme_id_ctrl *id_ctrl; + int res = SNTI_TRANSLATION_SUCCESS; + int nvme_sc; + u8 ieee[4]; + int xfer_len; + __be32 tmp_id = cpu_to_be32(ns->ns_id); + + mem = dma_alloc_coherent(&dev->pci_dev->dev, sizeof(struct nvme_id_ns), + &dma_addr, GFP_KERNEL); + if (mem == NULL) { + res = -ENOMEM; + goto out_dma; + } + + /* nvme controller identify */ + nvme_sc = nvme_identify(dev, 0, 1, dma_addr); + res = nvme_trans_status_code(hdr, nvme_sc); + if (res) + goto out_free; + if (nvme_sc) { + res = nvme_sc; + goto out_free; + } + id_ctrl = mem; + + /* Since SCSI tried to save 4 bits... [SPC-4(r34) Table 591] */ + ieee[0] = id_ctrl->ieee[0] << 4; + ieee[1] = id_ctrl->ieee[0] >> 4 | id_ctrl->ieee[1] << 4; + ieee[2] = id_ctrl->ieee[1] >> 4 | id_ctrl->ieee[2] << 4; + ieee[3] = id_ctrl->ieee[2] >> 4; + + memset(inq_response, 0, STANDARD_INQUIRY_LENGTH); + inq_response[1] = INQ_DEVICE_IDENTIFICATION_PAGE; /* Page Code */ + inq_response[3] = 20; /* Page Length */ + /* Designation Descriptor start */ + inq_response[4] = 0x01; /* Proto ID=0h | Code set=1h */ + inq_response[5] = 0x03; /* PIV=0b | Asso=00b | Designator Type=3h */ + inq_response[6] = 0x00; /* Rsvd */ + inq_response[7] = 16; /* Designator Length */ + /* Designator start */ + inq_response[8] = 0x60 | ieee[3]; /* NAA=6h | IEEE ID MSB, High nibble*/ + inq_response[9] = ieee[2]; /* IEEE ID */ + inq_response[10] = ieee[1]; /* IEEE ID */ + inq_response[11] = ieee[0]; /* IEEE ID| Vendor Specific ID... */ + inq_response[12] = (dev->pci_dev->vendor & 0xFF00) >> 8; + inq_response[13] = (dev->pci_dev->vendor & 0x00FF); + inq_response[14] = dev->serial[0]; + inq_response[15] = dev->serial[1]; + inq_response[16] = dev->model[0]; + inq_response[17] = dev->model[1]; + memcpy(&inq_response[18], &tmp_id, sizeof(u32)); + /* Last 2 bytes are zero */ + + xfer_len = min(alloc_len, STANDARD_INQUIRY_LENGTH); + res = nvme_trans_copy_to_user(hdr, inq_response, xfer_len); + + out_free: + dma_free_coherent(&dev->pci_dev->dev, sizeof(struct nvme_id_ns), mem, + dma_addr); + out_dma: + return res; +} + +static int nvme_trans_ext_inq_page(struct nvme_ns *ns, struct sg_io_hdr *hdr, + int alloc_len) +{ + u8 *inq_response; + int res = SNTI_TRANSLATION_SUCCESS; + int nvme_sc; + struct nvme_dev *dev = ns->dev; + dma_addr_t dma_addr; + void *mem; + struct nvme_id_ctrl *id_ctrl; + struct nvme_id_ns *id_ns; + int xfer_len; + u8 microcode = 0x80; + u8 spt; + u8 spt_lut[8] = {0, 0, 2, 1, 4, 6, 5, 7}; + u8 grd_chk, app_chk, ref_chk, protect; + u8 uask_sup = 0x20; + u8 v_sup; + u8 luiclr = 0x01; + + inq_response = kmalloc(EXTENDED_INQUIRY_DATA_PAGE_LENGTH, GFP_KERNEL); + if (inq_response == NULL) { + res = -ENOMEM; + goto out_mem; + } + + mem = dma_alloc_coherent(&dev->pci_dev->dev, sizeof(struct nvme_id_ns), + &dma_addr, GFP_KERNEL); + if (mem == NULL) { + res = -ENOMEM; + goto out_dma; + } + + /* nvme ns identify */ + nvme_sc = nvme_identify(dev, ns->ns_id, 0, dma_addr); + res = nvme_trans_status_code(hdr, nvme_sc); + if (res) + goto out_free; + if (nvme_sc) { + res = nvme_sc; + goto out_free; + } + id_ns = mem; + spt = spt_lut[(id_ns->dpc) & 0x07] << 3; + (id_ns->dps) ? (protect = 0x01) : (protect = 0); + grd_chk = protect << 2; + app_chk = protect << 1; + ref_chk = protect; + + /* nvme controller identify */ + nvme_sc = nvme_identify(dev, 0, 1, dma_addr); + res = nvme_trans_status_code(hdr, nvme_sc); + if (res) + goto out_free; + if (nvme_sc) { + res = nvme_sc; + goto out_free; + } + id_ctrl = mem; + v_sup = id_ctrl->vwc; + + memset(inq_response, 0, EXTENDED_INQUIRY_DATA_PAGE_LENGTH); + inq_response[1] = INQ_EXTENDED_INQUIRY_DATA_PAGE; /* Page Code */ + inq_response[2] = 0x00; /* Page Length MSB */ + inq_response[3] = 0x3C; /* Page Length LSB */ + inq_response[4] = microcode | spt | grd_chk | app_chk | ref_chk; + inq_response[5] = uask_sup; + inq_response[6] = v_sup; + inq_response[7] = luiclr; + inq_response[8] = 0; + inq_response[9] = 0; + + xfer_len = min(alloc_len, EXTENDED_INQUIRY_DATA_PAGE_LENGTH); + res = nvme_trans_copy_to_user(hdr, inq_response, xfer_len); + + out_free: + dma_free_coherent(&dev->pci_dev->dev, sizeof(struct nvme_id_ns), mem, + dma_addr); + out_dma: + kfree(inq_response); + out_mem: + return res; +} + +static int nvme_trans_bdev_char_page(struct nvme_ns *ns, struct sg_io_hdr *hdr, + int alloc_len) +{ + u8 *inq_response; + int res = SNTI_TRANSLATION_SUCCESS; + int xfer_len; + + inq_response = kmalloc(EXTENDED_INQUIRY_DATA_PAGE_LENGTH, GFP_KERNEL); + if (inq_response == NULL) { + res = -ENOMEM; + goto out_mem; + } + + memset(inq_response, 0, EXTENDED_INQUIRY_DATA_PAGE_LENGTH); + inq_response[1] = INQ_BDEV_CHARACTERISTICS_PAGE; /* Page Code */ + inq_response[2] = 0x00; /* Page Length MSB */ + inq_response[3] = 0x3C; /* Page Length LSB */ + inq_response[4] = 0x00; /* Medium Rotation Rate MSB */ + inq_response[5] = 0x01; /* Medium Rotation Rate LSB */ + inq_response[6] = 0x00; /* Form Factor */ + + xfer_len = min(alloc_len, EXTENDED_INQUIRY_DATA_PAGE_LENGTH); + res = nvme_trans_copy_to_user(hdr, inq_response, xfer_len); + + kfree(inq_response); + out_mem: + return res; +} + +/* LOG SENSE Helper Functions */ + +static int nvme_trans_log_supp_pages(struct nvme_ns *ns, struct sg_io_hdr *hdr, + int alloc_len) +{ + int res = SNTI_TRANSLATION_SUCCESS; + int xfer_len; + u8 *log_response; + + log_response = kmalloc(LOG_PAGE_SUPPORTED_LOG_PAGES_LENGTH, GFP_KERNEL); + if (log_response == NULL) { + res = -ENOMEM; + goto out_mem; + } + memset(log_response, 0, LOG_PAGE_SUPPORTED_LOG_PAGES_LENGTH); + + log_response[0] = LOG_PAGE_SUPPORTED_LOG_PAGES_PAGE; + /* Subpage=0x00, Page Length MSB=0 */ + log_response[3] = SUPPORTED_LOG_PAGES_PAGE_LENGTH; + log_response[4] = LOG_PAGE_SUPPORTED_LOG_PAGES_PAGE; + log_response[5] = LOG_PAGE_INFORMATIONAL_EXCEPTIONS_PAGE; + log_response[6] = LOG_PAGE_TEMPERATURE_PAGE; + + xfer_len = min(alloc_len, LOG_PAGE_SUPPORTED_LOG_PAGES_LENGTH); + res = nvme_trans_copy_to_user(hdr, log_response, xfer_len); + + kfree(log_response); + out_mem: + return res; +} + +static int nvme_trans_log_info_exceptions(struct nvme_ns *ns, + struct sg_io_hdr *hdr, int alloc_len) +{ + int res = SNTI_TRANSLATION_SUCCESS; + int xfer_len; + u8 *log_response; + struct nvme_command c; + struct nvme_dev *dev = ns->dev; + struct nvme_smart_log *smart_log; + dma_addr_t dma_addr; + void *mem; + u8 temp_c; + u16 temp_k; + + log_response = kmalloc(LOG_INFO_EXCP_PAGE_LENGTH, GFP_KERNEL); + if (log_response == NULL) { + res = -ENOMEM; + goto out_mem; + } + memset(log_response, 0, LOG_INFO_EXCP_PAGE_LENGTH); + + mem = dma_alloc_coherent(&dev->pci_dev->dev, + sizeof(struct nvme_smart_log), + &dma_addr, GFP_KERNEL); + if (mem == NULL) { + res = -ENOMEM; + goto out_dma; + } + + /* Get SMART Log Page */ + memset(&c, 0, sizeof(c)); + c.common.opcode = nvme_admin_get_log_page; + c.common.nsid = cpu_to_le32(0xFFFFFFFF); + c.common.prp1 = cpu_to_le64(dma_addr); + c.common.cdw10[0] = cpu_to_le32(((sizeof(struct nvme_smart_log) / + BYTES_TO_DWORDS) << 16) | NVME_GET_SMART_LOG_PAGE); + res = nvme_submit_admin_cmd(dev, &c, NULL); + if (res != NVME_SC_SUCCESS) { + temp_c = LOG_TEMP_UNKNOWN; + } else { + smart_log = mem; + temp_k = (smart_log->temperature[1] << 8) + + (smart_log->temperature[0]); + temp_c = temp_k - KELVIN_TEMP_FACTOR; + } + + log_response[0] = LOG_PAGE_INFORMATIONAL_EXCEPTIONS_PAGE; + /* Subpage=0x00, Page Length MSB=0 */ + log_response[3] = REMAINING_INFO_EXCP_PAGE_LENGTH; + /* Informational Exceptions Log Parameter 1 Start */ + /* Parameter Code=0x0000 bytes 4,5 */ + log_response[6] = 0x23; /* DU=0, TSD=1, ETC=0, TMC=0, FMT_AND_LNK=11b */ + log_response[7] = 0x04; /* PARAMETER LENGTH */ + /* Add sense Code and qualifier = 0x00 each */ + /* Use Temperature from NVMe Get Log Page, convert to C from K */ + log_response[10] = temp_c; + + xfer_len = min(alloc_len, LOG_INFO_EXCP_PAGE_LENGTH); + res = nvme_trans_copy_to_user(hdr, log_response, xfer_len); + + dma_free_coherent(&dev->pci_dev->dev, sizeof(struct nvme_smart_log), + mem, dma_addr); + out_dma: + kfree(log_response); + out_mem: + return res; +} + +static int nvme_trans_log_temperature(struct nvme_ns *ns, struct sg_io_hdr *hdr, + int alloc_len) +{ + int res = SNTI_TRANSLATION_SUCCESS; + int xfer_len; + u8 *log_response; + struct nvme_command c; + struct nvme_dev *dev = ns->dev; + struct nvme_smart_log *smart_log; + dma_addr_t dma_addr; + void *mem; + u32 feature_resp; + u8 temp_c_cur, temp_c_thresh; + u16 temp_k; + + log_response = kmalloc(LOG_TEMP_PAGE_LENGTH, GFP_KERNEL); + if (log_response == NULL) { + res = -ENOMEM; + goto out_mem; + } + memset(log_response, 0, LOG_TEMP_PAGE_LENGTH); + + mem = dma_alloc_coherent(&dev->pci_dev->dev, + sizeof(struct nvme_smart_log), + &dma_addr, GFP_KERNEL); + if (mem == NULL) { + res = -ENOMEM; + goto out_dma; + } + + /* Get SMART Log Page */ + memset(&c, 0, sizeof(c)); + c.common.opcode = nvme_admin_get_log_page; + c.common.nsid = cpu_to_le32(0xFFFFFFFF); + c.common.prp1 = cpu_to_le64(dma_addr); + c.common.cdw10[0] = cpu_to_le32(((sizeof(struct nvme_smart_log) / + BYTES_TO_DWORDS) << 16) | NVME_GET_SMART_LOG_PAGE); + res = nvme_submit_admin_cmd(dev, &c, NULL); + if (res != NVME_SC_SUCCESS) { + temp_c_cur = LOG_TEMP_UNKNOWN; + } else { + smart_log = mem; + temp_k = (smart_log->temperature[1] << 8) + + (smart_log->temperature[0]); + temp_c_cur = temp_k - KELVIN_TEMP_FACTOR; + } + + /* Get Features for Temp Threshold */ + res = nvme_get_features(dev, NVME_FEAT_TEMP_THRESH, 0, 0, + &feature_resp); + if (res != NVME_SC_SUCCESS) + temp_c_thresh = LOG_TEMP_UNKNOWN; + else + temp_c_thresh = (feature_resp & 0xFFFF) - KELVIN_TEMP_FACTOR; + + log_response[0] = LOG_PAGE_TEMPERATURE_PAGE; + /* Subpage=0x00, Page Length MSB=0 */ + log_response[3] = REMAINING_TEMP_PAGE_LENGTH; + /* Temperature Log Parameter 1 (Temperature) Start */ + /* Parameter Code = 0x0000 */ + log_response[6] = 0x01; /* Format and Linking = 01b */ + log_response[7] = 0x02; /* Parameter Length */ + /* Use Temperature from NVMe Get Log Page, convert to C from K */ + log_response[9] = temp_c_cur; + /* Temperature Log Parameter 2 (Reference Temperature) Start */ + log_response[11] = 0x01; /* Parameter Code = 0x0001 */ + log_response[12] = 0x01; /* Format and Linking = 01b */ + log_response[13] = 0x02; /* Parameter Length */ + /* Use Temperature Thresh from NVMe Get Log Page, convert to C from K */ + log_response[15] = temp_c_thresh; + + xfer_len = min(alloc_len, LOG_TEMP_PAGE_LENGTH); + res = nvme_trans_copy_to_user(hdr, log_response, xfer_len); + + dma_free_coherent(&dev->pci_dev->dev, sizeof(struct nvme_smart_log), + mem, dma_addr); + out_dma: + kfree(log_response); + out_mem: + return res; +} + +/* MODE SENSE Helper Functions */ + +static int nvme_trans_fill_mode_parm_hdr(u8 *resp, int len, u8 cdb10, u8 llbaa, + u16 mode_data_length, u16 blk_desc_len) +{ + /* Quick check to make sure I don't stomp on my own memory... */ + if ((cdb10 && len < 8) || (!cdb10 && len < 4)) + return SNTI_INTERNAL_ERROR; + + if (cdb10) { + resp[0] = (mode_data_length & 0xFF00) >> 8; + resp[1] = (mode_data_length & 0x00FF); + /* resp[2] and [3] are zero */ + resp[4] = llbaa; + resp[5] = RESERVED_FIELD; + resp[6] = (blk_desc_len & 0xFF00) >> 8; + resp[7] = (blk_desc_len & 0x00FF); + } else { + resp[0] = (mode_data_length & 0x00FF); + /* resp[1] and [2] are zero */ + resp[3] = (blk_desc_len & 0x00FF); + } + + return SNTI_TRANSLATION_SUCCESS; +} + +static int nvme_trans_fill_blk_desc(struct nvme_ns *ns, struct sg_io_hdr *hdr, + u8 *resp, int len, u8 llbaa) +{ + int res = SNTI_TRANSLATION_SUCCESS; + int nvme_sc; + struct nvme_dev *dev = ns->dev; + dma_addr_t dma_addr; + void *mem; + struct nvme_id_ns *id_ns; + u8 flbas; + u32 lba_length; + + if (llbaa == 0 && len < MODE_PAGE_BLK_DES_LEN) + return SNTI_INTERNAL_ERROR; + else if (llbaa > 0 && len < MODE_PAGE_LLBAA_BLK_DES_LEN) + return SNTI_INTERNAL_ERROR; + + mem = dma_alloc_coherent(&dev->pci_dev->dev, sizeof(struct nvme_id_ns), + &dma_addr, GFP_KERNEL); + if (mem == NULL) { + res = -ENOMEM; + goto out; + } + + /* nvme ns identify */ + nvme_sc = nvme_identify(dev, ns->ns_id, 0, dma_addr); + res = nvme_trans_status_code(hdr, nvme_sc); + if (res) + goto out_dma; + if (nvme_sc) { + res = nvme_sc; + goto out_dma; + } + id_ns = mem; + flbas = (id_ns->flbas) & 0x0F; + lba_length = (1 << (id_ns->lbaf[flbas].ds)); + + if (llbaa == 0) { + __be32 tmp_cap = cpu_to_be32(le64_to_cpu(id_ns->ncap)); + /* Byte 4 is reserved */ + __be32 tmp_len = cpu_to_be32(lba_length & 0x00FFFFFF); + + memcpy(resp, &tmp_cap, sizeof(u32)); + memcpy(&resp[4], &tmp_len, sizeof(u32)); + } else { + __be64 tmp_cap = cpu_to_be64(le64_to_cpu(id_ns->ncap)); + __be32 tmp_len = cpu_to_be32(lba_length); + + memcpy(resp, &tmp_cap, sizeof(u64)); + /* Bytes 8, 9, 10, 11 are reserved */ + memcpy(&resp[12], &tmp_len, sizeof(u32)); + } + + out_dma: + dma_free_coherent(&dev->pci_dev->dev, sizeof(struct nvme_id_ns), mem, + dma_addr); + out: + return res; +} + +static int nvme_trans_fill_control_page(struct nvme_ns *ns, + struct sg_io_hdr *hdr, u8 *resp, + int len) +{ + if (len < MODE_PAGE_CONTROL_LEN) + return SNTI_INTERNAL_ERROR; + + resp[0] = MODE_PAGE_CONTROL; + resp[1] = MODE_PAGE_CONTROL_LEN_FIELD; + resp[2] = 0x0E; /* TST=000b, TMF_ONLY=0, DPICZ=1, + * D_SENSE=1, GLTSD=1, RLEC=0 */ + resp[3] = 0x12; /* Q_ALGO_MODIFIER=1h, NUAR=0, QERR=01b */ + /* Byte 4: VS=0, RAC=0, UA_INT=0, SWP=0 */ + resp[5] = 0x40; /* ATO=0, TAS=1, ATMPE=0, RWWP=0, AUTOLOAD=0 */ + /* resp[6] and [7] are obsolete, thus zero */ + resp[8] = 0xFF; /* Busy timeout period = 0xffff */ + resp[9] = 0xFF; + /* Bytes 10,11: Extended selftest completion time = 0x0000 */ + + return SNTI_TRANSLATION_SUCCESS; +} + +static int nvme_trans_fill_caching_page(struct nvme_ns *ns, + struct sg_io_hdr *hdr, + u8 *resp, int len) +{ + int res = SNTI_TRANSLATION_SUCCESS; + int nvme_sc; + struct nvme_dev *dev = ns->dev; + u32 feature_resp; + u8 vwc; + + if (len < MODE_PAGE_CACHING_LEN) + return SNTI_INTERNAL_ERROR; + + nvme_sc = nvme_get_features(dev, NVME_FEAT_VOLATILE_WC, 0, 0, + &feature_resp); + res = nvme_trans_status_code(hdr, nvme_sc); + if (res) + goto out; + if (nvme_sc) { + res = nvme_sc; + goto out; + } + vwc = feature_resp & 0x00000001; + + resp[0] = MODE_PAGE_CACHING; + resp[1] = MODE_PAGE_CACHING_LEN_FIELD; + resp[2] = vwc << 2; + + out: + return res; +} + +static int nvme_trans_fill_pow_cnd_page(struct nvme_ns *ns, + struct sg_io_hdr *hdr, u8 *resp, + int len) +{ + int res = SNTI_TRANSLATION_SUCCESS; + + if (len < MODE_PAGE_POW_CND_LEN) + return SNTI_INTERNAL_ERROR; + + resp[0] = MODE_PAGE_POWER_CONDITION; + resp[1] = MODE_PAGE_POW_CND_LEN_FIELD; + /* All other bytes are zero */ + + return res; +} + +static int nvme_trans_fill_inf_exc_page(struct nvme_ns *ns, + struct sg_io_hdr *hdr, u8 *resp, + int len) +{ + int res = SNTI_TRANSLATION_SUCCESS; + + if (len < MODE_PAGE_INF_EXC_LEN) + return SNTI_INTERNAL_ERROR; + + resp[0] = MODE_PAGE_INFO_EXCEP; + resp[1] = MODE_PAGE_INF_EXC_LEN_FIELD; + resp[2] = 0x88; + /* All other bytes are zero */ + + return res; +} + +static int nvme_trans_fill_all_pages(struct nvme_ns *ns, struct sg_io_hdr *hdr, + u8 *resp, int len) +{ + int res = SNTI_TRANSLATION_SUCCESS; + u16 mode_pages_offset_1 = 0; + u16 mode_pages_offset_2, mode_pages_offset_3, mode_pages_offset_4; + + mode_pages_offset_2 = mode_pages_offset_1 + MODE_PAGE_CACHING_LEN; + mode_pages_offset_3 = mode_pages_offset_2 + MODE_PAGE_CONTROL_LEN; + mode_pages_offset_4 = mode_pages_offset_3 + MODE_PAGE_POW_CND_LEN; + + res = nvme_trans_fill_caching_page(ns, hdr, &resp[mode_pages_offset_1], + MODE_PAGE_CACHING_LEN); + if (res != SNTI_TRANSLATION_SUCCESS) + goto out; + res = nvme_trans_fill_control_page(ns, hdr, &resp[mode_pages_offset_2], + MODE_PAGE_CONTROL_LEN); + if (res != SNTI_TRANSLATION_SUCCESS) + goto out; + res = nvme_trans_fill_pow_cnd_page(ns, hdr, &resp[mode_pages_offset_3], + MODE_PAGE_POW_CND_LEN); + if (res != SNTI_TRANSLATION_SUCCESS) + goto out; + res = nvme_trans_fill_inf_exc_page(ns, hdr, &resp[mode_pages_offset_4], + MODE_PAGE_INF_EXC_LEN); + if (res != SNTI_TRANSLATION_SUCCESS) + goto out; + + out: + return res; +} + +static inline int nvme_trans_get_blk_desc_len(u8 dbd, u8 llbaa) +{ + if (dbd == MODE_SENSE_BLK_DESC_ENABLED) { + /* SPC-4: len = 8 x Num_of_descriptors if llbaa = 0, 16x if 1 */ + return 8 * (llbaa + 1) * MODE_SENSE_BLK_DESC_COUNT; + } else { + return 0; + } +} + +static int nvme_trans_mode_page_create(struct nvme_ns *ns, + struct sg_io_hdr *hdr, u8 *cmd, + u16 alloc_len, u8 cdb10, + int (*mode_page_fill_func) + (struct nvme_ns *, + struct sg_io_hdr *hdr, u8 *, int), + u16 mode_pages_tot_len) +{ + int res = SNTI_TRANSLATION_SUCCESS; + int xfer_len; + u8 *response; + u8 dbd, llbaa; + u16 resp_size; + int mph_size; + u16 mode_pages_offset_1; + u16 blk_desc_len, blk_desc_offset, mode_data_length; + + dbd = GET_MODE_SENSE_DBD(cmd); + llbaa = GET_MODE_SENSE_LLBAA(cmd); + mph_size = GET_MODE_SENSE_MPH_SIZE(cdb10); + blk_desc_len = nvme_trans_get_blk_desc_len(dbd, llbaa); + + resp_size = mph_size + blk_desc_len + mode_pages_tot_len; + /* Refer spc4r34 Table 440 for calculation of Mode data Length field */ + mode_data_length = 3 + (3 * cdb10) + blk_desc_len + mode_pages_tot_len; + + blk_desc_offset = mph_size; + mode_pages_offset_1 = blk_desc_offset + blk_desc_len; + + response = kmalloc(resp_size, GFP_KERNEL); + if (response == NULL) { + res = -ENOMEM; + goto out_mem; + } + memset(response, 0, resp_size); + + res = nvme_trans_fill_mode_parm_hdr(&response[0], mph_size, cdb10, + llbaa, mode_data_length, blk_desc_len); + if (res != SNTI_TRANSLATION_SUCCESS) + goto out_free; + if (blk_desc_len > 0) { + res = nvme_trans_fill_blk_desc(ns, hdr, + &response[blk_desc_offset], + blk_desc_len, llbaa); + if (res != SNTI_TRANSLATION_SUCCESS) + goto out_free; + } + res = mode_page_fill_func(ns, hdr, &response[mode_pages_offset_1], + mode_pages_tot_len); + if (res != SNTI_TRANSLATION_SUCCESS) + goto out_free; + + xfer_len = min(alloc_len, resp_size); + res = nvme_trans_copy_to_user(hdr, response, xfer_len); + + out_free: + kfree(response); + out_mem: + return res; +} + +/* Read Capacity Helper Functions */ + +static void nvme_trans_fill_read_cap(u8 *response, struct nvme_id_ns *id_ns, + u8 cdb16) +{ + u8 flbas; + u32 lba_length; + u64 rlba; + u8 prot_en; + u8 p_type_lut[4] = {0, 0, 1, 2}; + __be64 tmp_rlba; + __be32 tmp_rlba_32; + __be32 tmp_len; + + flbas = (id_ns->flbas) & 0x0F; + lba_length = (1 << (id_ns->lbaf[flbas].ds)); + rlba = le64_to_cpup(&id_ns->nsze) - 1; + (id_ns->dps) ? (prot_en = 0x01) : (prot_en = 0); + + if (!cdb16) { + if (rlba > 0xFFFFFFFF) + rlba = 0xFFFFFFFF; + tmp_rlba_32 = cpu_to_be32(rlba); + tmp_len = cpu_to_be32(lba_length); + memcpy(response, &tmp_rlba_32, sizeof(u32)); + memcpy(&response[4], &tmp_len, sizeof(u32)); + } else { + tmp_rlba = cpu_to_be64(rlba); + tmp_len = cpu_to_be32(lba_length); + memcpy(response, &tmp_rlba, sizeof(u64)); + memcpy(&response[8], &tmp_len, sizeof(u32)); + response[12] = (p_type_lut[id_ns->dps & 0x3] << 1) | prot_en; + /* P_I_Exponent = 0x0 | LBPPBE = 0x0 */ + /* LBPME = 0 | LBPRZ = 0 | LALBA = 0x00 */ + /* Bytes 16-31 - Reserved */ + } +} + +/* Start Stop Unit Helper Functions */ + +static int nvme_trans_power_state(struct nvme_ns *ns, struct sg_io_hdr *hdr, + u8 pc, u8 pcmod, u8 start) +{ + int res = SNTI_TRANSLATION_SUCCESS; + int nvme_sc; + struct nvme_dev *dev = ns->dev; + dma_addr_t dma_addr; + void *mem; + struct nvme_id_ctrl *id_ctrl; + int lowest_pow_st; /* max npss = lowest power consumption */ + unsigned ps_desired = 0; + + /* NVMe Controller Identify */ + mem = dma_alloc_coherent(&dev->pci_dev->dev, + sizeof(struct nvme_id_ctrl), + &dma_addr, GFP_KERNEL); + if (mem == NULL) { + res = -ENOMEM; + goto out; + } + nvme_sc = nvme_identify(dev, 0, 1, dma_addr); + res = nvme_trans_status_code(hdr, nvme_sc); + if (res) + goto out_dma; + if (nvme_sc) { + res = nvme_sc; + goto out_dma; + } + id_ctrl = mem; + lowest_pow_st = id_ctrl->npss - 1; + + switch (pc) { + case NVME_POWER_STATE_START_VALID: + /* Action unspecified if POWER CONDITION MODIFIER != 0 */ + if (pcmod == 0 && start == 0x1) + ps_desired = POWER_STATE_0; + if (pcmod == 0 && start == 0x0) + ps_desired = lowest_pow_st; + break; + case NVME_POWER_STATE_ACTIVE: + /* Action unspecified if POWER CONDITION MODIFIER != 0 */ + if (pcmod == 0) + ps_desired = POWER_STATE_0; + break; + case NVME_POWER_STATE_IDLE: + /* Action unspecified if POWER CONDITION MODIFIER != [0,1,2] */ + /* min of desired state and (lps-1) because lps is STOP */ + if (pcmod == 0x0) + ps_desired = min(POWER_STATE_1, (lowest_pow_st - 1)); + else if (pcmod == 0x1) + ps_desired = min(POWER_STATE_2, (lowest_pow_st - 1)); + else if (pcmod == 0x2) + ps_desired = min(POWER_STATE_3, (lowest_pow_st - 1)); + break; + case NVME_POWER_STATE_STANDBY: + /* Action unspecified if POWER CONDITION MODIFIER != [0,1] */ + if (pcmod == 0x0) + ps_desired = max(0, (lowest_pow_st - 2)); + else if (pcmod == 0x1) + ps_desired = max(0, (lowest_pow_st - 1)); + break; + case NVME_POWER_STATE_LU_CONTROL: + default: + res = nvme_trans_completion(hdr, SAM_STAT_CHECK_CONDITION, + ILLEGAL_REQUEST, SCSI_ASC_INVALID_CDB, + SCSI_ASCQ_CAUSE_NOT_REPORTABLE); + break; + } + nvme_sc = nvme_set_features(dev, NVME_FEAT_POWER_MGMT, ps_desired, 0, + NULL); + res = nvme_trans_status_code(hdr, nvme_sc); + if (res) + goto out_dma; + if (nvme_sc) + res = nvme_sc; + out_dma: + dma_free_coherent(&dev->pci_dev->dev, sizeof(struct nvme_id_ctrl), mem, + dma_addr); + out: + return res; +} + +/* Write Buffer Helper Functions */ +/* Also using this for Format Unit with hdr passed as NULL, and buffer_id, 0 */ + +static int nvme_trans_send_fw_cmd(struct nvme_ns *ns, struct sg_io_hdr *hdr, + u8 opcode, u32 tot_len, u32 offset, + u8 buffer_id) +{ + int res = SNTI_TRANSLATION_SUCCESS; + int nvme_sc; + struct nvme_dev *dev = ns->dev; + struct nvme_command c; + struct nvme_iod *iod = NULL; + unsigned length; + + memset(&c, 0, sizeof(c)); + c.common.opcode = opcode; + if (opcode == nvme_admin_download_fw) { + if (hdr->iovec_count > 0) { + /* Assuming SGL is not allowed for this command */ + res = nvme_trans_completion(hdr, + SAM_STAT_CHECK_CONDITION, + ILLEGAL_REQUEST, + SCSI_ASC_INVALID_CDB, + SCSI_ASCQ_CAUSE_NOT_REPORTABLE); + goto out; + } + iod = nvme_map_user_pages(dev, DMA_TO_DEVICE, + (unsigned long)hdr->dxferp, tot_len); + if (IS_ERR(iod)) { + res = PTR_ERR(iod); + goto out; + } + length = nvme_setup_prps(dev, &c.common, iod, tot_len, + GFP_KERNEL); + if (length != tot_len) { + res = -ENOMEM; + goto out_unmap; + } + + c.dlfw.numd = cpu_to_le32((tot_len/BYTES_TO_DWORDS) - 1); + c.dlfw.offset = cpu_to_le32(offset/BYTES_TO_DWORDS); + } else if (opcode == nvme_admin_activate_fw) { + u32 cdw10 = buffer_id | NVME_FWACT_REPL_ACTV; + c.common.cdw10[0] = cpu_to_le32(cdw10); + } + + nvme_sc = nvme_submit_admin_cmd(dev, &c, NULL); + res = nvme_trans_status_code(hdr, nvme_sc); + if (res) + goto out_unmap; + if (nvme_sc) + res = nvme_sc; + + out_unmap: + if (opcode == nvme_admin_download_fw) { + nvme_unmap_user_pages(dev, DMA_TO_DEVICE, iod); + nvme_free_iod(dev, iod); + } + out: + return res; +} + +/* Mode Select Helper Functions */ + +static inline void nvme_trans_modesel_get_bd_len(u8 *parm_list, u8 cdb10, + u16 *bd_len, u8 *llbaa) +{ + if (cdb10) { + /* 10 Byte CDB */ + *bd_len = (parm_list[MODE_SELECT_10_BD_OFFSET] << 8) + + parm_list[MODE_SELECT_10_BD_OFFSET + 1]; + *llbaa = parm_list[MODE_SELECT_10_LLBAA_OFFSET] && + MODE_SELECT_10_LLBAA_MASK; + } else { + /* 6 Byte CDB */ + *bd_len = parm_list[MODE_SELECT_6_BD_OFFSET]; + } +} + +static void nvme_trans_modesel_save_bd(struct nvme_ns *ns, u8 *parm_list, + u16 idx, u16 bd_len, u8 llbaa) +{ + u16 bd_num; + + bd_num = bd_len / ((llbaa == 0) ? + SHORT_DESC_BLOCK : LONG_DESC_BLOCK); + /* Store block descriptor info if a FORMAT UNIT comes later */ + /* TODO Saving 1st BD info; what to do if multiple BD received? */ + if (llbaa == 0) { + /* Standard Block Descriptor - spc4r34 7.5.5.1 */ + ns->mode_select_num_blocks = + (parm_list[idx + 1] << 16) + + (parm_list[idx + 2] << 8) + + (parm_list[idx + 3]); + + ns->mode_select_block_len = + (parm_list[idx + 5] << 16) + + (parm_list[idx + 6] << 8) + + (parm_list[idx + 7]); + } else { + /* Long LBA Block Descriptor - sbc3r27 6.4.2.3 */ + ns->mode_select_num_blocks = + (((u64)parm_list[idx + 0]) << 56) + + (((u64)parm_list[idx + 1]) << 48) + + (((u64)parm_list[idx + 2]) << 40) + + (((u64)parm_list[idx + 3]) << 32) + + (((u64)parm_list[idx + 4]) << 24) + + (((u64)parm_list[idx + 5]) << 16) + + (((u64)parm_list[idx + 6]) << 8) + + ((u64)parm_list[idx + 7]); + + ns->mode_select_block_len = + (parm_list[idx + 12] << 24) + + (parm_list[idx + 13] << 16) + + (parm_list[idx + 14] << 8) + + (parm_list[idx + 15]); + } +} + +static u16 nvme_trans_modesel_get_mp(struct nvme_ns *ns, struct sg_io_hdr *hdr, + u8 *mode_page, u8 page_code) +{ + int res = SNTI_TRANSLATION_SUCCESS; + int nvme_sc; + struct nvme_dev *dev = ns->dev; + unsigned dword11; + + switch (page_code) { + case MODE_PAGE_CACHING: + dword11 = ((mode_page[2] & CACHING_MODE_PAGE_WCE_MASK) ? 1 : 0); + nvme_sc = nvme_set_features(dev, NVME_FEAT_VOLATILE_WC, dword11, + 0, NULL); + res = nvme_trans_status_code(hdr, nvme_sc); + if (res) + break; + if (nvme_sc) { + res = nvme_sc; + break; + } + break; + case MODE_PAGE_CONTROL: + break; + case MODE_PAGE_POWER_CONDITION: + /* Verify the OS is not trying to set timers */ + if ((mode_page[2] & 0x01) != 0 || (mode_page[3] & 0x0F) != 0) { + res = nvme_trans_completion(hdr, + SAM_STAT_CHECK_CONDITION, + ILLEGAL_REQUEST, + SCSI_ASC_INVALID_PARAMETER, + SCSI_ASCQ_CAUSE_NOT_REPORTABLE); + if (!res) + res = SNTI_INTERNAL_ERROR; + break; + } + break; + default: + res = nvme_trans_completion(hdr, SAM_STAT_CHECK_CONDITION, + ILLEGAL_REQUEST, SCSI_ASC_INVALID_CDB, + SCSI_ASCQ_CAUSE_NOT_REPORTABLE); + if (!res) + res = SNTI_INTERNAL_ERROR; + break; + } + + return res; +} + +static int nvme_trans_modesel_data(struct nvme_ns *ns, struct sg_io_hdr *hdr, + u8 *cmd, u16 parm_list_len, u8 pf, + u8 sp, u8 cdb10) +{ + int res = SNTI_TRANSLATION_SUCCESS; + u8 *parm_list; + u16 bd_len; + u8 llbaa = 0; + u16 index, saved_index; + u8 page_code; + u16 mp_size; + + /* Get parm list from data-in/out buffer */ + parm_list = kmalloc(parm_list_len, GFP_KERNEL); + if (parm_list == NULL) { + res = -ENOMEM; + goto out; + } + + res = nvme_trans_copy_from_user(hdr, parm_list, parm_list_len); + if (res != SNTI_TRANSLATION_SUCCESS) + goto out_mem; + + nvme_trans_modesel_get_bd_len(parm_list, cdb10, &bd_len, &llbaa); + index = (cdb10) ? (MODE_SELECT_10_MPH_SIZE) : (MODE_SELECT_6_MPH_SIZE); + + if (bd_len != 0) { + /* Block Descriptors present, parse */ + nvme_trans_modesel_save_bd(ns, parm_list, index, bd_len, llbaa); + index += bd_len; + } + saved_index = index; + + /* Multiple mode pages may be present; iterate through all */ + /* In 1st Iteration, don't do NVME Command, only check for CDB errors */ + do { + page_code = parm_list[index] & MODE_SELECT_PAGE_CODE_MASK; + mp_size = parm_list[index + 1] + 2; + if ((page_code != MODE_PAGE_CACHING) && + (page_code != MODE_PAGE_CONTROL) && + (page_code != MODE_PAGE_POWER_CONDITION)) { + res = nvme_trans_completion(hdr, + SAM_STAT_CHECK_CONDITION, + ILLEGAL_REQUEST, + SCSI_ASC_INVALID_CDB, + SCSI_ASCQ_CAUSE_NOT_REPORTABLE); + goto out_mem; + } + index += mp_size; + } while (index < parm_list_len); + + /* In 2nd Iteration, do the NVME Commands */ + index = saved_index; + do { + page_code = parm_list[index] & MODE_SELECT_PAGE_CODE_MASK; + mp_size = parm_list[index + 1] + 2; + res = nvme_trans_modesel_get_mp(ns, hdr, &parm_list[index], + page_code); + if (res != SNTI_TRANSLATION_SUCCESS) + break; + index += mp_size; + } while (index < parm_list_len); + + out_mem: + kfree(parm_list); + out: + return res; +} + +/* Format Unit Helper Functions */ + +static int nvme_trans_fmt_set_blk_size_count(struct nvme_ns *ns, + struct sg_io_hdr *hdr) +{ + int res = SNTI_TRANSLATION_SUCCESS; + int nvme_sc; + struct nvme_dev *dev = ns->dev; + dma_addr_t dma_addr; + void *mem; + struct nvme_id_ns *id_ns; + u8 flbas; + + /* + * SCSI Expects a MODE SELECT would have been issued prior to + * a FORMAT UNIT, and the block size and number would be used + * from the block descriptor in it. If a MODE SELECT had not + * been issued, FORMAT shall use the current values for both. + */ + + if (ns->mode_select_num_blocks == 0 || ns->mode_select_block_len == 0) { + mem = dma_alloc_coherent(&dev->pci_dev->dev, + sizeof(struct nvme_id_ns), &dma_addr, GFP_KERNEL); + if (mem == NULL) { + res = -ENOMEM; + goto out; + } + /* nvme ns identify */ + nvme_sc = nvme_identify(dev, ns->ns_id, 0, dma_addr); + res = nvme_trans_status_code(hdr, nvme_sc); + if (res) + goto out_dma; + if (nvme_sc) { + res = nvme_sc; + goto out_dma; + } + id_ns = mem; + + if (ns->mode_select_num_blocks == 0) + ns->mode_select_num_blocks = le64_to_cpu(id_ns->ncap); + if (ns->mode_select_block_len == 0) { + flbas = (id_ns->flbas) & 0x0F; + ns->mode_select_block_len = + (1 << (id_ns->lbaf[flbas].ds)); + } + out_dma: + dma_free_coherent(&dev->pci_dev->dev, sizeof(struct nvme_id_ns), + mem, dma_addr); + } + out: + return res; +} + +static int nvme_trans_fmt_get_parm_header(struct sg_io_hdr *hdr, u8 len, + u8 format_prot_info, u8 *nvme_pf_code) +{ + int res = SNTI_TRANSLATION_SUCCESS; + u8 *parm_list; + u8 pf_usage, pf_code; + + parm_list = kmalloc(len, GFP_KERNEL); + if (parm_list == NULL) { + res = -ENOMEM; + goto out; + } + res = nvme_trans_copy_from_user(hdr, parm_list, len); + if (res != SNTI_TRANSLATION_SUCCESS) + goto out_mem; + + if ((parm_list[FORMAT_UNIT_IMMED_OFFSET] & + FORMAT_UNIT_IMMED_MASK) != 0) { + res = nvme_trans_completion(hdr, SAM_STAT_CHECK_CONDITION, + ILLEGAL_REQUEST, SCSI_ASC_INVALID_CDB, + SCSI_ASCQ_CAUSE_NOT_REPORTABLE); + goto out_mem; + } + + if (len == FORMAT_UNIT_LONG_PARM_LIST_LEN && + (parm_list[FORMAT_UNIT_PROT_INT_OFFSET] & 0x0F) != 0) { + res = nvme_trans_completion(hdr, SAM_STAT_CHECK_CONDITION, + ILLEGAL_REQUEST, SCSI_ASC_INVALID_CDB, + SCSI_ASCQ_CAUSE_NOT_REPORTABLE); + goto out_mem; + } + pf_usage = parm_list[FORMAT_UNIT_PROT_FIELD_USAGE_OFFSET] & + FORMAT_UNIT_PROT_FIELD_USAGE_MASK; + pf_code = (pf_usage << 2) | format_prot_info; + switch (pf_code) { + case 0: + *nvme_pf_code = 0; + break; + case 2: + *nvme_pf_code = 1; + break; + case 3: + *nvme_pf_code = 2; + break; + case 7: + *nvme_pf_code = 3; + break; + default: + res = nvme_trans_completion(hdr, SAM_STAT_CHECK_CONDITION, + ILLEGAL_REQUEST, SCSI_ASC_INVALID_CDB, + SCSI_ASCQ_CAUSE_NOT_REPORTABLE); + break; + } + + out_mem: + kfree(parm_list); + out: + return res; +} + +static int nvme_trans_fmt_send_cmd(struct nvme_ns *ns, struct sg_io_hdr *hdr, + u8 prot_info) +{ + int res = SNTI_TRANSLATION_SUCCESS; + int nvme_sc; + struct nvme_dev *dev = ns->dev; + dma_addr_t dma_addr; + void *mem; + struct nvme_id_ns *id_ns; + u8 i; + u8 flbas, nlbaf; + u8 selected_lbaf = 0xFF; + u32 cdw10 = 0; + struct nvme_command c; + + /* Loop thru LBAF's in id_ns to match reqd lbaf, put in cdw10 */ + mem = dma_alloc_coherent(&dev->pci_dev->dev, sizeof(struct nvme_id_ns), + &dma_addr, GFP_KERNEL); + if (mem == NULL) { + res = -ENOMEM; + goto out; + } + /* nvme ns identify */ + nvme_sc = nvme_identify(dev, ns->ns_id, 0, dma_addr); + res = nvme_trans_status_code(hdr, nvme_sc); + if (res) + goto out_dma; + if (nvme_sc) { + res = nvme_sc; + goto out_dma; + } + id_ns = mem; + flbas = (id_ns->flbas) & 0x0F; + nlbaf = id_ns->nlbaf; + + for (i = 0; i < nlbaf; i++) { + if (ns->mode_select_block_len == (1 << (id_ns->lbaf[i].ds))) { + selected_lbaf = i; + break; + } + } + if (selected_lbaf > 0x0F) { + res = nvme_trans_completion(hdr, SAM_STAT_CHECK_CONDITION, + ILLEGAL_REQUEST, SCSI_ASC_INVALID_PARAMETER, + SCSI_ASCQ_CAUSE_NOT_REPORTABLE); + } + if (ns->mode_select_num_blocks != le64_to_cpu(id_ns->ncap)) { + res = nvme_trans_completion(hdr, SAM_STAT_CHECK_CONDITION, + ILLEGAL_REQUEST, SCSI_ASC_INVALID_PARAMETER, + SCSI_ASCQ_CAUSE_NOT_REPORTABLE); + } + + cdw10 |= prot_info << 5; + cdw10 |= selected_lbaf & 0x0F; + memset(&c, 0, sizeof(c)); + c.format.opcode = nvme_admin_format_nvm; + c.format.nsid = cpu_to_le32(ns->ns_id); + c.format.cdw10 = cpu_to_le32(cdw10); + + nvme_sc = nvme_submit_admin_cmd(dev, &c, NULL); + res = nvme_trans_status_code(hdr, nvme_sc); + if (res) + goto out_dma; + if (nvme_sc) + res = nvme_sc; + + out_dma: + dma_free_coherent(&dev->pci_dev->dev, sizeof(struct nvme_id_ns), mem, + dma_addr); + out: + return res; +} + +/* Read/Write Helper Functions */ + +static inline void nvme_trans_get_io_cdb6(u8 *cmd, + struct nvme_trans_io_cdb *cdb_info) +{ + cdb_info->fua = 0; + cdb_info->prot_info = 0; + cdb_info->lba = GET_U32_FROM_CDB(cmd, IO_6_CDB_LBA_OFFSET) & + IO_6_CDB_LBA_MASK; + cdb_info->xfer_len = GET_U8_FROM_CDB(cmd, IO_6_CDB_TX_LEN_OFFSET); + + /* sbc3r27 sec 5.32 - TRANSFER LEN of 0 implies a 256 Block transfer */ + if (cdb_info->xfer_len == 0) + cdb_info->xfer_len = IO_6_DEFAULT_TX_LEN; +} + +static inline void nvme_trans_get_io_cdb10(u8 *cmd, + struct nvme_trans_io_cdb *cdb_info) +{ + cdb_info->fua = GET_U8_FROM_CDB(cmd, IO_10_CDB_FUA_OFFSET) & + IO_CDB_FUA_MASK; + cdb_info->prot_info = GET_U8_FROM_CDB(cmd, IO_10_CDB_WP_OFFSET) & + IO_CDB_WP_MASK >> IO_CDB_WP_SHIFT; + cdb_info->lba = GET_U32_FROM_CDB(cmd, IO_10_CDB_LBA_OFFSET); + cdb_info->xfer_len = GET_U16_FROM_CDB(cmd, IO_10_CDB_TX_LEN_OFFSET); +} + +static inline void nvme_trans_get_io_cdb12(u8 *cmd, + struct nvme_trans_io_cdb *cdb_info) +{ + cdb_info->fua = GET_U8_FROM_CDB(cmd, IO_12_CDB_FUA_OFFSET) & + IO_CDB_FUA_MASK; + cdb_info->prot_info = GET_U8_FROM_CDB(cmd, IO_12_CDB_WP_OFFSET) & + IO_CDB_WP_MASK >> IO_CDB_WP_SHIFT; + cdb_info->lba = GET_U32_FROM_CDB(cmd, IO_12_CDB_LBA_OFFSET); + cdb_info->xfer_len = GET_U32_FROM_CDB(cmd, IO_12_CDB_TX_LEN_OFFSET); +} + +static inline void nvme_trans_get_io_cdb16(u8 *cmd, + struct nvme_trans_io_cdb *cdb_info) +{ + cdb_info->fua = GET_U8_FROM_CDB(cmd, IO_16_CDB_FUA_OFFSET) & + IO_CDB_FUA_MASK; + cdb_info->prot_info = GET_U8_FROM_CDB(cmd, IO_16_CDB_WP_OFFSET) & + IO_CDB_WP_MASK >> IO_CDB_WP_SHIFT; + cdb_info->lba = GET_U64_FROM_CDB(cmd, IO_16_CDB_LBA_OFFSET); + cdb_info->xfer_len = GET_U32_FROM_CDB(cmd, IO_16_CDB_TX_LEN_OFFSET); +} + +static inline u32 nvme_trans_io_get_num_cmds(struct sg_io_hdr *hdr, + struct nvme_trans_io_cdb *cdb_info, + u32 max_blocks) +{ + /* If using iovecs, send one nvme command per vector */ + if (hdr->iovec_count > 0) + return hdr->iovec_count; + else if (cdb_info->xfer_len > max_blocks) + return ((cdb_info->xfer_len - 1) / max_blocks) + 1; + else + return 1; +} + +static u16 nvme_trans_io_get_control(struct nvme_ns *ns, + struct nvme_trans_io_cdb *cdb_info) +{ + u16 control = 0; + + /* When Protection information support is added, implement here */ + + if (cdb_info->fua > 0) + control |= NVME_RW_FUA; + + return control; +} + +static int nvme_trans_do_nvme_io(struct nvme_ns *ns, struct sg_io_hdr *hdr, + struct nvme_trans_io_cdb *cdb_info, u8 is_write) +{ + int res = SNTI_TRANSLATION_SUCCESS; + int nvme_sc; + struct nvme_dev *dev = ns->dev; + struct nvme_queue *nvmeq; + u32 num_cmds; + struct nvme_iod *iod; + u64 unit_len; + u64 unit_num_blocks; /* Number of blocks to xfer in each nvme cmd */ + u32 retcode; + u32 i = 0; + u64 nvme_offset = 0; + void __user *next_mapping_addr; + struct nvme_command c; + u8 opcode = (is_write ? nvme_cmd_write : nvme_cmd_read); + u16 control; + u32 max_blocks = nvme_block_nr(ns, dev->max_hw_sectors); + + num_cmds = nvme_trans_io_get_num_cmds(hdr, cdb_info, max_blocks); + + /* + * This loop handles two cases. + * First, when an SGL is used in the form of an iovec list: + * - Use iov_base as the next mapping address for the nvme command_id + * - Use iov_len as the data transfer length for the command. + * Second, when we have a single buffer + * - If larger than max_blocks, split into chunks, offset + * each nvme command accordingly. + */ + for (i = 0; i < num_cmds; i++) { + memset(&c, 0, sizeof(c)); + if (hdr->iovec_count > 0) { + struct sg_iovec sgl; + + retcode = copy_from_user(&sgl, hdr->dxferp + + i * sizeof(struct sg_iovec), + sizeof(struct sg_iovec)); + if (retcode) + return -EFAULT; + unit_len = sgl.iov_len; + unit_num_blocks = unit_len >> ns->lba_shift; + next_mapping_addr = sgl.iov_base; + } else { + unit_num_blocks = min((u64)max_blocks, + (cdb_info->xfer_len - nvme_offset)); + unit_len = unit_num_blocks << ns->lba_shift; + next_mapping_addr = hdr->dxferp + + ((1 << ns->lba_shift) * nvme_offset); + } + + c.rw.opcode = opcode; + c.rw.nsid = cpu_to_le32(ns->ns_id); + c.rw.slba = cpu_to_le64(cdb_info->lba + nvme_offset); + c.rw.length = cpu_to_le16(unit_num_blocks - 1); + control = nvme_trans_io_get_control(ns, cdb_info); + c.rw.control = cpu_to_le16(control); + + iod = nvme_map_user_pages(dev, + (is_write) ? DMA_TO_DEVICE : DMA_FROM_DEVICE, + (unsigned long)next_mapping_addr, unit_len); + if (IS_ERR(iod)) { + res = PTR_ERR(iod); + goto out; + } + retcode = nvme_setup_prps(dev, &c.common, iod, unit_len, + GFP_KERNEL); + if (retcode != unit_len) { + nvme_unmap_user_pages(dev, + (is_write) ? DMA_TO_DEVICE : DMA_FROM_DEVICE, + iod); + nvme_free_iod(dev, iod); + res = -ENOMEM; + goto out; + } + + nvme_offset += unit_num_blocks; + + nvmeq = get_nvmeq(dev); + /* + * Since nvme_submit_sync_cmd sleeps, we can't keep + * preemption disabled. We may be preempted at any + * point, and be rescheduled to a different CPU. That + * will cause cacheline bouncing, but no additional + * races since q_lock already protects against other + * CPUs. + */ + put_nvmeq(nvmeq); + nvme_sc = nvme_submit_sync_cmd(nvmeq, &c, NULL, + NVME_IO_TIMEOUT); + if (nvme_sc != NVME_SC_SUCCESS) { + nvme_unmap_user_pages(dev, + (is_write) ? DMA_TO_DEVICE : DMA_FROM_DEVICE, + iod); + nvme_free_iod(dev, iod); + res = nvme_trans_status_code(hdr, nvme_sc); + goto out; + } + nvme_unmap_user_pages(dev, + (is_write) ? DMA_TO_DEVICE : DMA_FROM_DEVICE, + iod); + nvme_free_iod(dev, iod); + } + res = nvme_trans_status_code(hdr, NVME_SC_SUCCESS); + + out: + return res; +} + + +/* SCSI Command Translation Functions */ + +static int nvme_trans_io(struct nvme_ns *ns, struct sg_io_hdr *hdr, u8 is_write, + u8 *cmd) +{ + int res = SNTI_TRANSLATION_SUCCESS; + struct nvme_trans_io_cdb cdb_info; + u8 opcode = cmd[0]; + u64 xfer_bytes; + u64 sum_iov_len = 0; + struct sg_iovec sgl; + int i; + size_t not_copied; + + /* Extract Fields from CDB */ + switch (opcode) { + case WRITE_6: + case READ_6: + nvme_trans_get_io_cdb6(cmd, &cdb_info); + break; + case WRITE_10: + case READ_10: + nvme_trans_get_io_cdb10(cmd, &cdb_info); + break; + case WRITE_12: + case READ_12: + nvme_trans_get_io_cdb12(cmd, &cdb_info); + break; + case WRITE_16: + case READ_16: + nvme_trans_get_io_cdb16(cmd, &cdb_info); + break; + default: + /* Will never really reach here */ + res = SNTI_INTERNAL_ERROR; + goto out; + } + + /* Calculate total length of transfer (in bytes) */ + if (hdr->iovec_count > 0) { + for (i = 0; i < hdr->iovec_count; i++) { + not_copied = copy_from_user(&sgl, hdr->dxferp + + i * sizeof(struct sg_iovec), + sizeof(struct sg_iovec)); + if (not_copied) + return -EFAULT; + sum_iov_len += sgl.iov_len; + /* IO vector sizes should be multiples of block size */ + if (sgl.iov_len % (1 << ns->lba_shift) != 0) { + res = nvme_trans_completion(hdr, + SAM_STAT_CHECK_CONDITION, + ILLEGAL_REQUEST, + SCSI_ASC_INVALID_PARAMETER, + SCSI_ASCQ_CAUSE_NOT_REPORTABLE); + goto out; + } + } + } else { + sum_iov_len = hdr->dxfer_len; + } + + /* As Per sg ioctl howto, if the lengths differ, use the lower one */ + xfer_bytes = min(((u64)hdr->dxfer_len), sum_iov_len); + + /* If block count and actual data buffer size dont match, error out */ + if (xfer_bytes != (cdb_info.xfer_len << ns->lba_shift)) { + res = -EINVAL; + goto out; + } + + /* Check for 0 length transfer - it is not illegal */ + if (cdb_info.xfer_len == 0) + goto out; + + /* Send NVMe IO Command(s) */ + res = nvme_trans_do_nvme_io(ns, hdr, &cdb_info, is_write); + if (res != SNTI_TRANSLATION_SUCCESS) + goto out; + + out: + return res; +} + +static int nvme_trans_inquiry(struct nvme_ns *ns, struct sg_io_hdr *hdr, + u8 *cmd) +{ + int res = SNTI_TRANSLATION_SUCCESS; + u8 evpd; + u8 page_code; + int alloc_len; + u8 *inq_response; + + evpd = GET_INQ_EVPD_BIT(cmd); + page_code = GET_INQ_PAGE_CODE(cmd); + alloc_len = GET_INQ_ALLOC_LENGTH(cmd); + + inq_response = kmalloc(STANDARD_INQUIRY_LENGTH, GFP_KERNEL); + if (inq_response == NULL) { + res = -ENOMEM; + goto out_mem; + } + + if (evpd == 0) { + if (page_code == INQ_STANDARD_INQUIRY_PAGE) { + res = nvme_trans_standard_inquiry_page(ns, hdr, + inq_response, alloc_len); + } else { + res = nvme_trans_completion(hdr, + SAM_STAT_CHECK_CONDITION, + ILLEGAL_REQUEST, + SCSI_ASC_INVALID_CDB, + SCSI_ASCQ_CAUSE_NOT_REPORTABLE); + } + } else { + switch (page_code) { + case VPD_SUPPORTED_PAGES: + res = nvme_trans_supported_vpd_pages(ns, hdr, + inq_response, alloc_len); + break; + case VPD_SERIAL_NUMBER: + res = nvme_trans_unit_serial_page(ns, hdr, inq_response, + alloc_len); + break; + case VPD_DEVICE_IDENTIFIERS: + res = nvme_trans_device_id_page(ns, hdr, inq_response, + alloc_len); + break; + case VPD_EXTENDED_INQUIRY: + res = nvme_trans_ext_inq_page(ns, hdr, alloc_len); + break; + case VPD_BLOCK_DEV_CHARACTERISTICS: + res = nvme_trans_bdev_char_page(ns, hdr, alloc_len); + break; + default: + res = nvme_trans_completion(hdr, + SAM_STAT_CHECK_CONDITION, + ILLEGAL_REQUEST, + SCSI_ASC_INVALID_CDB, + SCSI_ASCQ_CAUSE_NOT_REPORTABLE); + break; + } + } + kfree(inq_response); + out_mem: + return res; +} + +static int nvme_trans_log_sense(struct nvme_ns *ns, struct sg_io_hdr *hdr, + u8 *cmd) +{ + int res = SNTI_TRANSLATION_SUCCESS; + u16 alloc_len; + u8 sp; + u8 pc; + u8 page_code; + + sp = GET_U8_FROM_CDB(cmd, LOG_SENSE_CDB_SP_OFFSET); + if (sp != LOG_SENSE_CDB_SP_NOT_ENABLED) { + res = nvme_trans_completion(hdr, SAM_STAT_CHECK_CONDITION, + ILLEGAL_REQUEST, SCSI_ASC_INVALID_CDB, + SCSI_ASCQ_CAUSE_NOT_REPORTABLE); + goto out; + } + pc = GET_U8_FROM_CDB(cmd, LOG_SENSE_CDB_PC_OFFSET); + page_code = pc & LOG_SENSE_CDB_PAGE_CODE_MASK; + pc = (pc & LOG_SENSE_CDB_PC_MASK) >> LOG_SENSE_CDB_PC_SHIFT; + if (pc != LOG_SENSE_CDB_PC_CUMULATIVE_VALUES) { + res = nvme_trans_completion(hdr, SAM_STAT_CHECK_CONDITION, + ILLEGAL_REQUEST, SCSI_ASC_INVALID_CDB, + SCSI_ASCQ_CAUSE_NOT_REPORTABLE); + goto out; + } + alloc_len = GET_U16_FROM_CDB(cmd, LOG_SENSE_CDB_ALLOC_LENGTH_OFFSET); + switch (page_code) { + case LOG_PAGE_SUPPORTED_LOG_PAGES_PAGE: + res = nvme_trans_log_supp_pages(ns, hdr, alloc_len); + break; + case LOG_PAGE_INFORMATIONAL_EXCEPTIONS_PAGE: + res = nvme_trans_log_info_exceptions(ns, hdr, alloc_len); + break; + case LOG_PAGE_TEMPERATURE_PAGE: + res = nvme_trans_log_temperature(ns, hdr, alloc_len); + break; + default: + res = nvme_trans_completion(hdr, SAM_STAT_CHECK_CONDITION, + ILLEGAL_REQUEST, SCSI_ASC_INVALID_CDB, + SCSI_ASCQ_CAUSE_NOT_REPORTABLE); + break; + } + + out: + return res; +} + +static int nvme_trans_mode_select(struct nvme_ns *ns, struct sg_io_hdr *hdr, + u8 *cmd) +{ + int res = SNTI_TRANSLATION_SUCCESS; + u8 cdb10 = 0; + u16 parm_list_len; + u8 page_format; + u8 save_pages; + + page_format = GET_U8_FROM_CDB(cmd, MODE_SELECT_CDB_PAGE_FORMAT_OFFSET); + page_format &= MODE_SELECT_CDB_PAGE_FORMAT_MASK; + + save_pages = GET_U8_FROM_CDB(cmd, MODE_SELECT_CDB_SAVE_PAGES_OFFSET); + save_pages &= MODE_SELECT_CDB_SAVE_PAGES_MASK; + + if (GET_OPCODE(cmd) == MODE_SELECT) { + parm_list_len = GET_U8_FROM_CDB(cmd, + MODE_SELECT_6_CDB_PARAM_LIST_LENGTH_OFFSET); + } else { + parm_list_len = GET_U16_FROM_CDB(cmd, + MODE_SELECT_10_CDB_PARAM_LIST_LENGTH_OFFSET); + cdb10 = 1; + } + + if (parm_list_len != 0) { + /* + * According to SPC-4 r24, a paramter list length field of 0 + * shall not be considered an error + */ + res = nvme_trans_modesel_data(ns, hdr, cmd, parm_list_len, + page_format, save_pages, cdb10); + } + + return res; +} + +static int nvme_trans_mode_sense(struct nvme_ns *ns, struct sg_io_hdr *hdr, + u8 *cmd) +{ + int res = SNTI_TRANSLATION_SUCCESS; + u16 alloc_len; + u8 cdb10 = 0; + u8 page_code; + u8 pc; + + if (GET_OPCODE(cmd) == MODE_SENSE) { + alloc_len = GET_U8_FROM_CDB(cmd, MODE_SENSE6_ALLOC_LEN_OFFSET); + } else { + alloc_len = GET_U16_FROM_CDB(cmd, + MODE_SENSE10_ALLOC_LEN_OFFSET); + cdb10 = 1; + } + + pc = GET_U8_FROM_CDB(cmd, MODE_SENSE_PAGE_CONTROL_OFFSET) & + MODE_SENSE_PAGE_CONTROL_MASK; + if (pc != MODE_SENSE_PC_CURRENT_VALUES) { + res = nvme_trans_completion(hdr, SAM_STAT_CHECK_CONDITION, + ILLEGAL_REQUEST, SCSI_ASC_INVALID_CDB, + SCSI_ASCQ_CAUSE_NOT_REPORTABLE); + goto out; + } + + page_code = GET_U8_FROM_CDB(cmd, MODE_SENSE_PAGE_CODE_OFFSET) & + MODE_SENSE_PAGE_CODE_MASK; + switch (page_code) { + case MODE_PAGE_CACHING: + res = nvme_trans_mode_page_create(ns, hdr, cmd, alloc_len, + cdb10, + &nvme_trans_fill_caching_page, + MODE_PAGE_CACHING_LEN); + break; + case MODE_PAGE_CONTROL: + res = nvme_trans_mode_page_create(ns, hdr, cmd, alloc_len, + cdb10, + &nvme_trans_fill_control_page, + MODE_PAGE_CONTROL_LEN); + break; + case MODE_PAGE_POWER_CONDITION: + res = nvme_trans_mode_page_create(ns, hdr, cmd, alloc_len, + cdb10, + &nvme_trans_fill_pow_cnd_page, + MODE_PAGE_POW_CND_LEN); + break; + case MODE_PAGE_INFO_EXCEP: + res = nvme_trans_mode_page_create(ns, hdr, cmd, alloc_len, + cdb10, + &nvme_trans_fill_inf_exc_page, + MODE_PAGE_INF_EXC_LEN); + break; + case MODE_PAGE_RETURN_ALL: + res = nvme_trans_mode_page_create(ns, hdr, cmd, alloc_len, + cdb10, + &nvme_trans_fill_all_pages, + MODE_PAGE_ALL_LEN); + break; + default: + res = nvme_trans_completion(hdr, SAM_STAT_CHECK_CONDITION, + ILLEGAL_REQUEST, SCSI_ASC_INVALID_CDB, + SCSI_ASCQ_CAUSE_NOT_REPORTABLE); + break; + } + + out: + return res; +} + +static int nvme_trans_read_capacity(struct nvme_ns *ns, struct sg_io_hdr *hdr, + u8 *cmd) +{ + int res = SNTI_TRANSLATION_SUCCESS; + int nvme_sc; + u32 alloc_len = READ_CAP_10_RESP_SIZE; + u32 resp_size = READ_CAP_10_RESP_SIZE; + u32 xfer_len; + u8 cdb16; + struct nvme_dev *dev = ns->dev; + dma_addr_t dma_addr; + void *mem; + struct nvme_id_ns *id_ns; + u8 *response; + + cdb16 = IS_READ_CAP_16(cmd); + if (cdb16) { + alloc_len = GET_READ_CAP_16_ALLOC_LENGTH(cmd); + resp_size = READ_CAP_16_RESP_SIZE; + } + + mem = dma_alloc_coherent(&dev->pci_dev->dev, sizeof(struct nvme_id_ns), + &dma_addr, GFP_KERNEL); + if (mem == NULL) { + res = -ENOMEM; + goto out; + } + /* nvme ns identify */ + nvme_sc = nvme_identify(dev, ns->ns_id, 0, dma_addr); + res = nvme_trans_status_code(hdr, nvme_sc); + if (res) + goto out_dma; + if (nvme_sc) { + res = nvme_sc; + goto out_dma; + } + id_ns = mem; + + response = kmalloc(resp_size, GFP_KERNEL); + if (response == NULL) { + res = -ENOMEM; + goto out_dma; + } + memset(response, 0, resp_size); + nvme_trans_fill_read_cap(response, id_ns, cdb16); + + xfer_len = min(alloc_len, resp_size); + res = nvme_trans_copy_to_user(hdr, response, xfer_len); + + kfree(response); + out_dma: + dma_free_coherent(&dev->pci_dev->dev, sizeof(struct nvme_id_ns), mem, + dma_addr); + out: + return res; +} + +static int nvme_trans_report_luns(struct nvme_ns *ns, struct sg_io_hdr *hdr, + u8 *cmd) +{ + int res = SNTI_TRANSLATION_SUCCESS; + int nvme_sc; + u32 alloc_len, xfer_len, resp_size; + u8 select_report; + u8 *response; + struct nvme_dev *dev = ns->dev; + dma_addr_t dma_addr; + void *mem; + struct nvme_id_ctrl *id_ctrl; + u32 ll_length, lun_id; + u8 lun_id_offset = REPORT_LUNS_FIRST_LUN_OFFSET; + __be32 tmp_len; + + alloc_len = GET_REPORT_LUNS_ALLOC_LENGTH(cmd); + select_report = GET_U8_FROM_CDB(cmd, REPORT_LUNS_SR_OFFSET); + + if ((select_report != ALL_LUNS_RETURNED) && + (select_report != ALL_WELL_KNOWN_LUNS_RETURNED) && + (select_report != RESTRICTED_LUNS_RETURNED)) { + res = nvme_trans_completion(hdr, SAM_STAT_CHECK_CONDITION, + ILLEGAL_REQUEST, SCSI_ASC_INVALID_CDB, + SCSI_ASCQ_CAUSE_NOT_REPORTABLE); + goto out; + } else { + /* NVMe Controller Identify */ + mem = dma_alloc_coherent(&dev->pci_dev->dev, + sizeof(struct nvme_id_ctrl), + &dma_addr, GFP_KERNEL); + if (mem == NULL) { + res = -ENOMEM; + goto out; + } + nvme_sc = nvme_identify(dev, 0, 1, dma_addr); + res = nvme_trans_status_code(hdr, nvme_sc); + if (res) + goto out_dma; + if (nvme_sc) { + res = nvme_sc; + goto out_dma; + } + id_ctrl = mem; + ll_length = le32_to_cpu(id_ctrl->nn) * LUN_ENTRY_SIZE; + resp_size = ll_length + LUN_DATA_HEADER_SIZE; + + if (alloc_len < resp_size) { + res = nvme_trans_completion(hdr, + SAM_STAT_CHECK_CONDITION, + ILLEGAL_REQUEST, SCSI_ASC_INVALID_CDB, + SCSI_ASCQ_CAUSE_NOT_REPORTABLE); + goto out_dma; + } + + response = kmalloc(resp_size, GFP_KERNEL); + if (response == NULL) { + res = -ENOMEM; + goto out_dma; + } + memset(response, 0, resp_size); + + /* The first LUN ID will always be 0 per the SAM spec */ + for (lun_id = 0; lun_id < le32_to_cpu(id_ctrl->nn); lun_id++) { + /* + * Set the LUN Id and then increment to the next LUN + * location in the parameter data. + */ + __be64 tmp_id = cpu_to_be64(lun_id); + memcpy(&response[lun_id_offset], &tmp_id, sizeof(u64)); + lun_id_offset += LUN_ENTRY_SIZE; + } + tmp_len = cpu_to_be32(ll_length); + memcpy(response, &tmp_len, sizeof(u32)); + } + + xfer_len = min(alloc_len, resp_size); + res = nvme_trans_copy_to_user(hdr, response, xfer_len); + + kfree(response); + out_dma: + dma_free_coherent(&dev->pci_dev->dev, sizeof(struct nvme_id_ctrl), mem, + dma_addr); + out: + return res; +} + +static int nvme_trans_request_sense(struct nvme_ns *ns, struct sg_io_hdr *hdr, + u8 *cmd) +{ + int res = SNTI_TRANSLATION_SUCCESS; + u8 alloc_len, xfer_len, resp_size; + u8 desc_format; + u8 *response; + + alloc_len = GET_REQUEST_SENSE_ALLOC_LENGTH(cmd); + desc_format = GET_U8_FROM_CDB(cmd, REQUEST_SENSE_DESC_OFFSET); + desc_format &= REQUEST_SENSE_DESC_MASK; + + resp_size = ((desc_format) ? (DESC_FMT_SENSE_DATA_SIZE) : + (FIXED_FMT_SENSE_DATA_SIZE)); + response = kmalloc(resp_size, GFP_KERNEL); + if (response == NULL) { + res = -ENOMEM; + goto out; + } + memset(response, 0, resp_size); + + if (desc_format == DESCRIPTOR_FORMAT_SENSE_DATA_TYPE) { + /* Descriptor Format Sense Data */ + response[0] = DESC_FORMAT_SENSE_DATA; + response[1] = NO_SENSE; + /* TODO How is LOW POWER CONDITION ON handled? (byte 2) */ + response[2] = SCSI_ASC_NO_SENSE; + response[3] = SCSI_ASCQ_CAUSE_NOT_REPORTABLE; + /* SDAT_OVFL = 0 | Additional Sense Length = 0 */ + } else { + /* Fixed Format Sense Data */ + response[0] = FIXED_SENSE_DATA; + /* Byte 1 = Obsolete */ + response[2] = NO_SENSE; /* FM, EOM, ILI, SDAT_OVFL = 0 */ + /* Bytes 3-6 - Information - set to zero */ + response[7] = FIXED_SENSE_DATA_ADD_LENGTH; + /* Bytes 8-11 - Cmd Specific Information - set to zero */ + response[12] = SCSI_ASC_NO_SENSE; + response[13] = SCSI_ASCQ_CAUSE_NOT_REPORTABLE; + /* Byte 14 = Field Replaceable Unit Code = 0 */ + /* Bytes 15-17 - SKSV=0; Sense Key Specific = 0 */ + } + + xfer_len = min(alloc_len, resp_size); + res = nvme_trans_copy_to_user(hdr, response, xfer_len); + + kfree(response); + out: + return res; +} + +static int nvme_trans_security_protocol(struct nvme_ns *ns, + struct sg_io_hdr *hdr, + u8 *cmd) +{ + return nvme_trans_completion(hdr, SAM_STAT_CHECK_CONDITION, + ILLEGAL_REQUEST, SCSI_ASC_ILLEGAL_COMMAND, + SCSI_ASCQ_CAUSE_NOT_REPORTABLE); +} + +static int nvme_trans_start_stop(struct nvme_ns *ns, struct sg_io_hdr *hdr, + u8 *cmd) +{ + int res = SNTI_TRANSLATION_SUCCESS; + int nvme_sc; + struct nvme_queue *nvmeq; + struct nvme_command c; + u8 immed, pcmod, pc, no_flush, start; + + immed = GET_U8_FROM_CDB(cmd, START_STOP_UNIT_CDB_IMMED_OFFSET); + pcmod = GET_U8_FROM_CDB(cmd, START_STOP_UNIT_CDB_POWER_COND_MOD_OFFSET); + pc = GET_U8_FROM_CDB(cmd, START_STOP_UNIT_CDB_POWER_COND_OFFSET); + no_flush = GET_U8_FROM_CDB(cmd, START_STOP_UNIT_CDB_NO_FLUSH_OFFSET); + start = GET_U8_FROM_CDB(cmd, START_STOP_UNIT_CDB_START_OFFSET); + + immed &= START_STOP_UNIT_CDB_IMMED_MASK; + pcmod &= START_STOP_UNIT_CDB_POWER_COND_MOD_MASK; + pc = (pc & START_STOP_UNIT_CDB_POWER_COND_MASK) >> NIBBLE_SHIFT; + no_flush &= START_STOP_UNIT_CDB_NO_FLUSH_MASK; + start &= START_STOP_UNIT_CDB_START_MASK; + + if (immed != 0) { + res = nvme_trans_completion(hdr, SAM_STAT_CHECK_CONDITION, + ILLEGAL_REQUEST, SCSI_ASC_INVALID_CDB, + SCSI_ASCQ_CAUSE_NOT_REPORTABLE); + } else { + if (no_flush == 0) { + /* Issue NVME FLUSH command prior to START STOP UNIT */ + memset(&c, 0, sizeof(c)); + c.common.opcode = nvme_cmd_flush; + c.common.nsid = cpu_to_le32(ns->ns_id); + + nvmeq = get_nvmeq(ns->dev); + put_nvmeq(nvmeq); + nvme_sc = nvme_submit_sync_cmd(nvmeq, &c, NULL, NVME_IO_TIMEOUT); + + res = nvme_trans_status_code(hdr, nvme_sc); + if (res) + goto out; + if (nvme_sc) { + res = nvme_sc; + goto out; + } + } + /* Setup the expected power state transition */ + res = nvme_trans_power_state(ns, hdr, pc, pcmod, start); + } + + out: + return res; +} + +static int nvme_trans_synchronize_cache(struct nvme_ns *ns, + struct sg_io_hdr *hdr, u8 *cmd) +{ + int res = SNTI_TRANSLATION_SUCCESS; + int nvme_sc; + struct nvme_command c; + struct nvme_queue *nvmeq; + + memset(&c, 0, sizeof(c)); + c.common.opcode = nvme_cmd_flush; + c.common.nsid = cpu_to_le32(ns->ns_id); + + nvmeq = get_nvmeq(ns->dev); + put_nvmeq(nvmeq); + nvme_sc = nvme_submit_sync_cmd(nvmeq, &c, NULL, NVME_IO_TIMEOUT); + + res = nvme_trans_status_code(hdr, nvme_sc); + if (res) + goto out; + if (nvme_sc) + res = nvme_sc; + + out: + return res; +} + +static int nvme_trans_format_unit(struct nvme_ns *ns, struct sg_io_hdr *hdr, + u8 *cmd) +{ + int res = SNTI_TRANSLATION_SUCCESS; + u8 parm_hdr_len = 0; + u8 nvme_pf_code = 0; + u8 format_prot_info, long_list, format_data; + + format_prot_info = GET_U8_FROM_CDB(cmd, + FORMAT_UNIT_CDB_FORMAT_PROT_INFO_OFFSET); + long_list = GET_U8_FROM_CDB(cmd, FORMAT_UNIT_CDB_LONG_LIST_OFFSET); + format_data = GET_U8_FROM_CDB(cmd, FORMAT_UNIT_CDB_FORMAT_DATA_OFFSET); + + format_prot_info = (format_prot_info & + FORMAT_UNIT_CDB_FORMAT_PROT_INFO_MASK) >> + FORMAT_UNIT_CDB_FORMAT_PROT_INFO_SHIFT; + long_list &= FORMAT_UNIT_CDB_LONG_LIST_MASK; + format_data &= FORMAT_UNIT_CDB_FORMAT_DATA_MASK; + + if (format_data != 0) { + if (format_prot_info != 0) { + if (long_list == 0) + parm_hdr_len = FORMAT_UNIT_SHORT_PARM_LIST_LEN; + else + parm_hdr_len = FORMAT_UNIT_LONG_PARM_LIST_LEN; + } + } else if (format_data == 0 && format_prot_info != 0) { + res = nvme_trans_completion(hdr, SAM_STAT_CHECK_CONDITION, + ILLEGAL_REQUEST, SCSI_ASC_INVALID_CDB, + SCSI_ASCQ_CAUSE_NOT_REPORTABLE); + goto out; + } + + /* Get parm header from data-in/out buffer */ + /* + * According to the translation spec, the only fields in the parameter + * list we are concerned with are in the header. So allocate only that. + */ + if (parm_hdr_len > 0) { + res = nvme_trans_fmt_get_parm_header(hdr, parm_hdr_len, + format_prot_info, &nvme_pf_code); + if (res != SNTI_TRANSLATION_SUCCESS) + goto out; + } + + /* Attempt to activate any previously downloaded firmware image */ + res = nvme_trans_send_fw_cmd(ns, hdr, nvme_admin_activate_fw, 0, 0, 0); + + /* Determine Block size and count and send format command */ + res = nvme_trans_fmt_set_blk_size_count(ns, hdr); + if (res != SNTI_TRANSLATION_SUCCESS) + goto out; + + res = nvme_trans_fmt_send_cmd(ns, hdr, nvme_pf_code); + + out: + return res; +} + +static int nvme_trans_test_unit_ready(struct nvme_ns *ns, + struct sg_io_hdr *hdr, + u8 *cmd) +{ + int res = SNTI_TRANSLATION_SUCCESS; + struct nvme_dev *dev = ns->dev; + + if (!(readl(&dev->bar->csts) & NVME_CSTS_RDY)) + res = nvme_trans_completion(hdr, SAM_STAT_CHECK_CONDITION, + NOT_READY, SCSI_ASC_LUN_NOT_READY, + SCSI_ASCQ_CAUSE_NOT_REPORTABLE); + else + res = nvme_trans_completion(hdr, SAM_STAT_GOOD, NO_SENSE, 0, 0); + + return res; +} + +static int nvme_trans_write_buffer(struct nvme_ns *ns, struct sg_io_hdr *hdr, + u8 *cmd) +{ + int res = SNTI_TRANSLATION_SUCCESS; + u32 buffer_offset, parm_list_length; + u8 buffer_id, mode; + + parm_list_length = + GET_U24_FROM_CDB(cmd, WRITE_BUFFER_CDB_PARM_LIST_LENGTH_OFFSET); + if (parm_list_length % BYTES_TO_DWORDS != 0) { + /* NVMe expects Firmware file to be a whole number of DWORDS */ + res = nvme_trans_completion(hdr, SAM_STAT_CHECK_CONDITION, + ILLEGAL_REQUEST, SCSI_ASC_INVALID_CDB, + SCSI_ASCQ_CAUSE_NOT_REPORTABLE); + goto out; + } + buffer_id = GET_U8_FROM_CDB(cmd, WRITE_BUFFER_CDB_BUFFER_ID_OFFSET); + if (buffer_id > NVME_MAX_FIRMWARE_SLOT) { + res = nvme_trans_completion(hdr, SAM_STAT_CHECK_CONDITION, + ILLEGAL_REQUEST, SCSI_ASC_INVALID_CDB, + SCSI_ASCQ_CAUSE_NOT_REPORTABLE); + goto out; + } + mode = GET_U8_FROM_CDB(cmd, WRITE_BUFFER_CDB_MODE_OFFSET) & + WRITE_BUFFER_CDB_MODE_MASK; + buffer_offset = + GET_U24_FROM_CDB(cmd, WRITE_BUFFER_CDB_BUFFER_OFFSET_OFFSET); + + switch (mode) { + case DOWNLOAD_SAVE_ACTIVATE: + res = nvme_trans_send_fw_cmd(ns, hdr, nvme_admin_download_fw, + parm_list_length, buffer_offset, + buffer_id); + if (res != SNTI_TRANSLATION_SUCCESS) + goto out; + res = nvme_trans_send_fw_cmd(ns, hdr, nvme_admin_activate_fw, + parm_list_length, buffer_offset, + buffer_id); + break; + case DOWNLOAD_SAVE_DEFER_ACTIVATE: + res = nvme_trans_send_fw_cmd(ns, hdr, nvme_admin_download_fw, + parm_list_length, buffer_offset, + buffer_id); + break; + case ACTIVATE_DEFERRED_MICROCODE: + res = nvme_trans_send_fw_cmd(ns, hdr, nvme_admin_activate_fw, + parm_list_length, buffer_offset, + buffer_id); + break; + default: + res = nvme_trans_completion(hdr, SAM_STAT_CHECK_CONDITION, + ILLEGAL_REQUEST, SCSI_ASC_INVALID_CDB, + SCSI_ASCQ_CAUSE_NOT_REPORTABLE); + break; + } + + out: + return res; +} + +struct scsi_unmap_blk_desc { + __be64 slba; + __be32 nlb; + u32 resv; +}; + +struct scsi_unmap_parm_list { + __be16 unmap_data_len; + __be16 unmap_blk_desc_data_len; + u32 resv; + struct scsi_unmap_blk_desc desc[0]; +}; + +static int nvme_trans_unmap(struct nvme_ns *ns, struct sg_io_hdr *hdr, + u8 *cmd) +{ + struct nvme_dev *dev = ns->dev; + struct scsi_unmap_parm_list *plist; + struct nvme_dsm_range *range; + struct nvme_queue *nvmeq; + struct nvme_command c; + int i, nvme_sc, res = -ENOMEM; + u16 ndesc, list_len; + dma_addr_t dma_addr; + + list_len = GET_U16_FROM_CDB(cmd, UNMAP_CDB_PARAM_LIST_LENGTH_OFFSET); + if (!list_len) + return -EINVAL; + + plist = kmalloc(list_len, GFP_KERNEL); + if (!plist) + return -ENOMEM; + + res = nvme_trans_copy_from_user(hdr, plist, list_len); + if (res != SNTI_TRANSLATION_SUCCESS) + goto out; + + ndesc = be16_to_cpu(plist->unmap_blk_desc_data_len) >> 4; + if (!ndesc || ndesc > 256) { + res = -EINVAL; + goto out; + } + + range = dma_alloc_coherent(&dev->pci_dev->dev, ndesc * sizeof(*range), + &dma_addr, GFP_KERNEL); + if (!range) + goto out; + + for (i = 0; i < ndesc; i++) { + range[i].nlb = cpu_to_le32(be32_to_cpu(plist->desc[i].nlb)); + range[i].slba = cpu_to_le64(be64_to_cpu(plist->desc[i].slba)); + range[i].cattr = 0; + } + + memset(&c, 0, sizeof(c)); + c.dsm.opcode = nvme_cmd_dsm; + c.dsm.nsid = cpu_to_le32(ns->ns_id); + c.dsm.prp1 = cpu_to_le64(dma_addr); + c.dsm.nr = cpu_to_le32(ndesc - 1); + c.dsm.attributes = cpu_to_le32(NVME_DSMGMT_AD); + + nvmeq = get_nvmeq(dev); + put_nvmeq(nvmeq); + + nvme_sc = nvme_submit_sync_cmd(nvmeq, &c, NULL, NVME_IO_TIMEOUT); + res = nvme_trans_status_code(hdr, nvme_sc); + + dma_free_coherent(&dev->pci_dev->dev, ndesc * sizeof(*range), + range, dma_addr); + out: + kfree(plist); + return res; +} + +static int nvme_scsi_translate(struct nvme_ns *ns, struct sg_io_hdr *hdr) +{ + u8 cmd[BLK_MAX_CDB]; + int retcode; + unsigned int opcode; + + if (hdr->cmdp == NULL) + return -EMSGSIZE; + if (copy_from_user(cmd, hdr->cmdp, hdr->cmd_len)) + return -EFAULT; + + opcode = cmd[0]; + + switch (opcode) { + case READ_6: + case READ_10: + case READ_12: + case READ_16: + retcode = nvme_trans_io(ns, hdr, 0, cmd); + break; + case WRITE_6: + case WRITE_10: + case WRITE_12: + case WRITE_16: + retcode = nvme_trans_io(ns, hdr, 1, cmd); + break; + case INQUIRY: + retcode = nvme_trans_inquiry(ns, hdr, cmd); + break; + case LOG_SENSE: + retcode = nvme_trans_log_sense(ns, hdr, cmd); + break; + case MODE_SELECT: + case MODE_SELECT_10: + retcode = nvme_trans_mode_select(ns, hdr, cmd); + break; + case MODE_SENSE: + case MODE_SENSE_10: + retcode = nvme_trans_mode_sense(ns, hdr, cmd); + break; + case READ_CAPACITY: + retcode = nvme_trans_read_capacity(ns, hdr, cmd); + break; + case SERVICE_ACTION_IN: + if (IS_READ_CAP_16(cmd)) + retcode = nvme_trans_read_capacity(ns, hdr, cmd); + else + goto out; + break; + case REPORT_LUNS: + retcode = nvme_trans_report_luns(ns, hdr, cmd); + break; + case REQUEST_SENSE: + retcode = nvme_trans_request_sense(ns, hdr, cmd); + break; + case SECURITY_PROTOCOL_IN: + case SECURITY_PROTOCOL_OUT: + retcode = nvme_trans_security_protocol(ns, hdr, cmd); + break; + case START_STOP: + retcode = nvme_trans_start_stop(ns, hdr, cmd); + break; + case SYNCHRONIZE_CACHE: + retcode = nvme_trans_synchronize_cache(ns, hdr, cmd); + break; + case FORMAT_UNIT: + retcode = nvme_trans_format_unit(ns, hdr, cmd); + break; + case TEST_UNIT_READY: + retcode = nvme_trans_test_unit_ready(ns, hdr, cmd); + break; + case WRITE_BUFFER: + retcode = nvme_trans_write_buffer(ns, hdr, cmd); + break; + case UNMAP: + retcode = nvme_trans_unmap(ns, hdr, cmd); + break; + default: + out: + retcode = nvme_trans_completion(hdr, SAM_STAT_CHECK_CONDITION, + ILLEGAL_REQUEST, SCSI_ASC_ILLEGAL_COMMAND, + SCSI_ASCQ_CAUSE_NOT_REPORTABLE); + break; + } + return retcode; +} + +int nvme_sg_io(struct nvme_ns *ns, struct sg_io_hdr __user *u_hdr) +{ + struct sg_io_hdr hdr; + int retcode; + + if (!capable(CAP_SYS_ADMIN)) + return -EACCES; + if (copy_from_user(&hdr, u_hdr, sizeof(hdr))) + return -EFAULT; + if (hdr.interface_id != 'S') + return -EINVAL; + if (hdr.cmd_len > BLK_MAX_CDB) + return -EINVAL; + + retcode = nvme_scsi_translate(ns, &hdr); + if (retcode < 0) + return retcode; + if (retcode > 0) + retcode = SNTI_TRANSLATION_SUCCESS; + if (copy_to_user(u_hdr, &hdr, sizeof(sg_io_hdr_t)) > 0) + return -EFAULT; + + return retcode; +} + +int nvme_sg_get_version_num(int __user *ip) +{ + return put_user(sg_version_num, ip); +} diff --git a/trunk/drivers/block/rbd.c b/trunk/drivers/block/rbd.c index ca63104136e0..d6d314027b5d 100644 --- a/trunk/drivers/block/rbd.c +++ b/trunk/drivers/block/rbd.c @@ -55,6 +55,39 @@ #define SECTOR_SHIFT 9 #define SECTOR_SIZE (1ULL << SECTOR_SHIFT) +/* + * Increment the given counter and return its updated value. + * If the counter is already 0 it will not be incremented. + * If the counter is already at its maximum value returns + * -EINVAL without updating it. + */ +static int atomic_inc_return_safe(atomic_t *v) +{ + unsigned int counter; + + counter = (unsigned int)__atomic_add_unless(v, 1, 0); + if (counter <= (unsigned int)INT_MAX) + return (int)counter; + + atomic_dec(v); + + return -EINVAL; +} + +/* Decrement the counter. Return the resulting value, or -EINVAL */ +static int atomic_dec_return_safe(atomic_t *v) +{ + int counter; + + counter = atomic_dec_return(v); + if (counter >= 0) + return counter; + + atomic_inc(v); + + return -EINVAL; +} + #define RBD_DRV_NAME "rbd" #define RBD_DRV_NAME_LONG "rbd (rados block device)" @@ -100,21 +133,20 @@ * block device image metadata (in-memory version) */ struct rbd_image_header { - /* These four fields never change for a given rbd image */ + /* These six fields never change for a given rbd image */ char *object_prefix; - u64 features; __u8 obj_order; __u8 crypt_type; __u8 comp_type; + u64 stripe_unit; + u64 stripe_count; + u64 features; /* Might be changeable someday? */ /* The remaining fields need to be updated occasionally */ u64 image_size; struct ceph_snap_context *snapc; - char *snap_names; - u64 *snap_sizes; - - u64 stripe_unit; - u64 stripe_count; + char *snap_names; /* format 1 only */ + u64 *snap_sizes; /* format 1 only */ }; /* @@ -225,6 +257,7 @@ struct rbd_obj_request { }; }; struct page **copyup_pages; + u32 copyup_page_count; struct ceph_osd_request *osd_req; @@ -257,6 +290,7 @@ struct rbd_img_request { struct rbd_obj_request *obj_request; /* obj req initiator */ }; struct page **copyup_pages; + u32 copyup_page_count; spinlock_t completion_lock;/* protects next_completion */ u32 next_completion; rbd_img_callback_t callback; @@ -311,6 +345,7 @@ struct rbd_device { struct rbd_spec *parent_spec; u64 parent_overlap; + atomic_t parent_ref; struct rbd_device *parent; /* protects updating the header */ @@ -359,7 +394,8 @@ static ssize_t rbd_add(struct bus_type *bus, const char *buf, size_t count); static ssize_t rbd_remove(struct bus_type *bus, const char *buf, size_t count); -static int rbd_dev_image_probe(struct rbd_device *rbd_dev); +static int rbd_dev_image_probe(struct rbd_device *rbd_dev, bool mapping); +static void rbd_spec_put(struct rbd_spec *spec); static struct bus_attribute rbd_bus_attrs[] = { __ATTR(add, S_IWUSR, NULL, rbd_add), @@ -426,7 +462,8 @@ static void rbd_img_parent_read(struct rbd_obj_request *obj_request); static void rbd_dev_remove_parent(struct rbd_device *rbd_dev); static int rbd_dev_refresh(struct rbd_device *rbd_dev); -static int rbd_dev_v2_refresh(struct rbd_device *rbd_dev); +static int rbd_dev_v2_header_onetime(struct rbd_device *rbd_dev); +static int rbd_dev_v2_header_info(struct rbd_device *rbd_dev); static const char *rbd_dev_v2_snap_name(struct rbd_device *rbd_dev, u64 snap_id); static int _rbd_dev_v2_snap_size(struct rbd_device *rbd_dev, u64 snap_id, @@ -726,88 +763,123 @@ static bool rbd_dev_ondisk_valid(struct rbd_image_header_ondisk *ondisk) } /* - * Create a new header structure, translate header format from the on-disk - * header. + * Fill an rbd image header with information from the given format 1 + * on-disk header. */ -static int rbd_header_from_disk(struct rbd_image_header *header, +static int rbd_header_from_disk(struct rbd_device *rbd_dev, struct rbd_image_header_ondisk *ondisk) { + struct rbd_image_header *header = &rbd_dev->header; + bool first_time = header->object_prefix == NULL; + struct ceph_snap_context *snapc; + char *object_prefix = NULL; + char *snap_names = NULL; + u64 *snap_sizes = NULL; u32 snap_count; - size_t len; size_t size; + int ret = -ENOMEM; u32 i; - memset(header, 0, sizeof (*header)); + /* Allocate this now to avoid having to handle failure below */ - snap_count = le32_to_cpu(ondisk->snap_count); + if (first_time) { + size_t len; - len = strnlen(ondisk->object_prefix, sizeof (ondisk->object_prefix)); - header->object_prefix = kmalloc(len + 1, GFP_KERNEL); - if (!header->object_prefix) - return -ENOMEM; - memcpy(header->object_prefix, ondisk->object_prefix, len); - header->object_prefix[len] = '\0'; + len = strnlen(ondisk->object_prefix, + sizeof (ondisk->object_prefix)); + object_prefix = kmalloc(len + 1, GFP_KERNEL); + if (!object_prefix) + return -ENOMEM; + memcpy(object_prefix, ondisk->object_prefix, len); + object_prefix[len] = '\0'; + } + /* Allocate the snapshot context and fill it in */ + + snap_count = le32_to_cpu(ondisk->snap_count); + snapc = ceph_create_snap_context(snap_count, GFP_KERNEL); + if (!snapc) + goto out_err; + snapc->seq = le64_to_cpu(ondisk->snap_seq); if (snap_count) { + struct rbd_image_snap_ondisk *snaps; u64 snap_names_len = le64_to_cpu(ondisk->snap_names_len); - /* Save a copy of the snapshot names */ + /* We'll keep a copy of the snapshot names... */ - if (snap_names_len > (u64) SIZE_MAX) - return -EIO; - header->snap_names = kmalloc(snap_names_len, GFP_KERNEL); - if (!header->snap_names) + if (snap_names_len > (u64)SIZE_MAX) + goto out_2big; + snap_names = kmalloc(snap_names_len, GFP_KERNEL); + if (!snap_names) goto out_err; + + /* ...as well as the array of their sizes. */ + + size = snap_count * sizeof (*header->snap_sizes); + snap_sizes = kmalloc(size, GFP_KERNEL); + if (!snap_sizes) + goto out_err; + /* - * Note that rbd_dev_v1_header_read() guarantees - * the ondisk buffer we're working with has + * Copy the names, and fill in each snapshot's id + * and size. + * + * Note that rbd_dev_v1_header_info() guarantees the + * ondisk buffer we're working with has * snap_names_len bytes beyond the end of the * snapshot id array, this memcpy() is safe. */ - memcpy(header->snap_names, &ondisk->snaps[snap_count], - snap_names_len); + memcpy(snap_names, &ondisk->snaps[snap_count], snap_names_len); + snaps = ondisk->snaps; + for (i = 0; i < snap_count; i++) { + snapc->snaps[i] = le64_to_cpu(snaps[i].id); + snap_sizes[i] = le64_to_cpu(snaps[i].image_size); + } + } - /* Record each snapshot's size */ + /* We won't fail any more, fill in the header */ - size = snap_count * sizeof (*header->snap_sizes); - header->snap_sizes = kmalloc(size, GFP_KERNEL); - if (!header->snap_sizes) - goto out_err; - for (i = 0; i < snap_count; i++) - header->snap_sizes[i] = - le64_to_cpu(ondisk->snaps[i].image_size); + down_write(&rbd_dev->header_rwsem); + if (first_time) { + header->object_prefix = object_prefix; + header->obj_order = ondisk->options.order; + header->crypt_type = ondisk->options.crypt_type; + header->comp_type = ondisk->options.comp_type; + /* The rest aren't used for format 1 images */ + header->stripe_unit = 0; + header->stripe_count = 0; + header->features = 0; } else { - header->snap_names = NULL; - header->snap_sizes = NULL; + ceph_put_snap_context(header->snapc); + kfree(header->snap_names); + kfree(header->snap_sizes); } - header->features = 0; /* No features support in v1 images */ - header->obj_order = ondisk->options.order; - header->crypt_type = ondisk->options.crypt_type; - header->comp_type = ondisk->options.comp_type; - - /* Allocate and fill in the snapshot context */ + /* The remaining fields always get updated (when we refresh) */ header->image_size = le64_to_cpu(ondisk->image_size); + header->snapc = snapc; + header->snap_names = snap_names; + header->snap_sizes = snap_sizes; - header->snapc = ceph_create_snap_context(snap_count, GFP_KERNEL); - if (!header->snapc) - goto out_err; - header->snapc->seq = le64_to_cpu(ondisk->snap_seq); - for (i = 0; i < snap_count; i++) - header->snapc->snaps[i] = le64_to_cpu(ondisk->snaps[i].id); + /* Make sure mapping size is consistent with header info */ - return 0; + if (rbd_dev->spec->snap_id == CEPH_NOSNAP || first_time) + if (rbd_dev->mapping.size != header->image_size) + rbd_dev->mapping.size = header->image_size; + + up_write(&rbd_dev->header_rwsem); + return 0; +out_2big: + ret = -EIO; out_err: - kfree(header->snap_sizes); - header->snap_sizes = NULL; - kfree(header->snap_names); - header->snap_names = NULL; - kfree(header->object_prefix); - header->object_prefix = NULL; + kfree(snap_sizes); + kfree(snap_names); + ceph_put_snap_context(snapc); + kfree(object_prefix); - return -ENOMEM; + return ret; } static const char *_rbd_dev_v1_snap_name(struct rbd_device *rbd_dev, u32 which) @@ -934,20 +1006,11 @@ static int rbd_snap_features(struct rbd_device *rbd_dev, u64 snap_id, static int rbd_dev_mapping_set(struct rbd_device *rbd_dev) { - const char *snap_name = rbd_dev->spec->snap_name; - u64 snap_id; + u64 snap_id = rbd_dev->spec->snap_id; u64 size = 0; u64 features = 0; int ret; - if (strcmp(snap_name, RBD_SNAP_HEAD_NAME)) { - snap_id = rbd_snap_id_by_name(rbd_dev, snap_name); - if (snap_id == CEPH_NOSNAP) - return -ENOENT; - } else { - snap_id = CEPH_NOSNAP; - } - ret = rbd_snap_size(rbd_dev, snap_id, &size); if (ret) return ret; @@ -958,11 +1021,6 @@ static int rbd_dev_mapping_set(struct rbd_device *rbd_dev) rbd_dev->mapping.size = size; rbd_dev->mapping.features = features; - /* If we are mapping a snapshot it must be marked read-only */ - - if (snap_id != CEPH_NOSNAP) - rbd_dev->mapping.read_only = true; - return 0; } @@ -970,14 +1028,6 @@ static void rbd_dev_mapping_clear(struct rbd_device *rbd_dev) { rbd_dev->mapping.size = 0; rbd_dev->mapping.features = 0; - rbd_dev->mapping.read_only = true; -} - -static void rbd_dev_clear_mapping(struct rbd_device *rbd_dev) -{ - rbd_dev->mapping.size = 0; - rbd_dev->mapping.features = 0; - rbd_dev->mapping.read_only = true; } static const char *rbd_segment_name(struct rbd_device *rbd_dev, u64 offset) @@ -1342,20 +1392,18 @@ static void rbd_obj_request_put(struct rbd_obj_request *obj_request) kref_put(&obj_request->kref, rbd_obj_request_destroy); } -static void rbd_img_request_get(struct rbd_img_request *img_request) -{ - dout("%s: img %p (was %d)\n", __func__, img_request, - atomic_read(&img_request->kref.refcount)); - kref_get(&img_request->kref); -} - +static bool img_request_child_test(struct rbd_img_request *img_request); +static void rbd_parent_request_destroy(struct kref *kref); static void rbd_img_request_destroy(struct kref *kref); static void rbd_img_request_put(struct rbd_img_request *img_request) { rbd_assert(img_request != NULL); dout("%s: img %p (was %d)\n", __func__, img_request, atomic_read(&img_request->kref.refcount)); - kref_put(&img_request->kref, rbd_img_request_destroy); + if (img_request_child_test(img_request)) + kref_put(&img_request->kref, rbd_parent_request_destroy); + else + kref_put(&img_request->kref, rbd_img_request_destroy); } static inline void rbd_img_obj_request_add(struct rbd_img_request *img_request, @@ -1472,6 +1520,12 @@ static void img_request_child_set(struct rbd_img_request *img_request) smp_mb(); } +static void img_request_child_clear(struct rbd_img_request *img_request) +{ + clear_bit(IMG_REQ_CHILD, &img_request->flags); + smp_mb(); +} + static bool img_request_child_test(struct rbd_img_request *img_request) { smp_mb(); @@ -1484,6 +1538,12 @@ static void img_request_layered_set(struct rbd_img_request *img_request) smp_mb(); } +static void img_request_layered_clear(struct rbd_img_request *img_request) +{ + clear_bit(IMG_REQ_LAYERED, &img_request->flags); + smp_mb(); +} + static bool img_request_layered_test(struct rbd_img_request *img_request) { smp_mb(); @@ -1827,6 +1887,74 @@ static void rbd_obj_request_destroy(struct kref *kref) kmem_cache_free(rbd_obj_request_cache, obj_request); } +/* It's OK to call this for a device with no parent */ + +static void rbd_spec_put(struct rbd_spec *spec); +static void rbd_dev_unparent(struct rbd_device *rbd_dev) +{ + rbd_dev_remove_parent(rbd_dev); + rbd_spec_put(rbd_dev->parent_spec); + rbd_dev->parent_spec = NULL; + rbd_dev->parent_overlap = 0; +} + +/* + * Parent image reference counting is used to determine when an + * image's parent fields can be safely torn down--after there are no + * more in-flight requests to the parent image. When the last + * reference is dropped, cleaning them up is safe. + */ +static void rbd_dev_parent_put(struct rbd_device *rbd_dev) +{ + int counter; + + if (!rbd_dev->parent_spec) + return; + + counter = atomic_dec_return_safe(&rbd_dev->parent_ref); + if (counter > 0) + return; + + /* Last reference; clean up parent data structures */ + + if (!counter) + rbd_dev_unparent(rbd_dev); + else + rbd_warn(rbd_dev, "parent reference underflow\n"); +} + +/* + * If an image has a non-zero parent overlap, get a reference to its + * parent. + * + * We must get the reference before checking for the overlap to + * coordinate properly with zeroing the parent overlap in + * rbd_dev_v2_parent_info() when an image gets flattened. We + * drop it again if there is no overlap. + * + * Returns true if the rbd device has a parent with a non-zero + * overlap and a reference for it was successfully taken, or + * false otherwise. + */ +static bool rbd_dev_parent_get(struct rbd_device *rbd_dev) +{ + int counter; + + if (!rbd_dev->parent_spec) + return false; + + counter = atomic_inc_return_safe(&rbd_dev->parent_ref); + if (counter > 0 && rbd_dev->parent_overlap) + return true; + + /* Image was flattened, but parent is not yet torn down */ + + if (counter < 0) + rbd_warn(rbd_dev, "parent reference overflow\n"); + + return false; +} + /* * Caller is responsible for filling in the list of object requests * that comprises the image request, and the Linux request pointer @@ -1835,8 +1963,7 @@ static void rbd_obj_request_destroy(struct kref *kref) static struct rbd_img_request *rbd_img_request_create( struct rbd_device *rbd_dev, u64 offset, u64 length, - bool write_request, - bool child_request) + bool write_request) { struct rbd_img_request *img_request; @@ -1861,9 +1988,7 @@ static struct rbd_img_request *rbd_img_request_create( } else { img_request->snap_id = rbd_dev->spec->snap_id; } - if (child_request) - img_request_child_set(img_request); - if (rbd_dev->parent_spec) + if (rbd_dev_parent_get(rbd_dev)) img_request_layered_set(img_request); spin_lock_init(&img_request->completion_lock); img_request->next_completion = 0; @@ -1873,9 +1998,6 @@ static struct rbd_img_request *rbd_img_request_create( INIT_LIST_HEAD(&img_request->obj_requests); kref_init(&img_request->kref); - rbd_img_request_get(img_request); /* Avoid a warning */ - rbd_img_request_put(img_request); /* TEMPORARY */ - dout("%s: rbd_dev %p %s %llu/%llu -> img %p\n", __func__, rbd_dev, write_request ? "write" : "read", offset, length, img_request); @@ -1897,15 +2019,54 @@ static void rbd_img_request_destroy(struct kref *kref) rbd_img_obj_request_del(img_request, obj_request); rbd_assert(img_request->obj_request_count == 0); + if (img_request_layered_test(img_request)) { + img_request_layered_clear(img_request); + rbd_dev_parent_put(img_request->rbd_dev); + } + if (img_request_write_test(img_request)) ceph_put_snap_context(img_request->snapc); - if (img_request_child_test(img_request)) - rbd_obj_request_put(img_request->obj_request); - kmem_cache_free(rbd_img_request_cache, img_request); } +static struct rbd_img_request *rbd_parent_request_create( + struct rbd_obj_request *obj_request, + u64 img_offset, u64 length) +{ + struct rbd_img_request *parent_request; + struct rbd_device *rbd_dev; + + rbd_assert(obj_request->img_request); + rbd_dev = obj_request->img_request->rbd_dev; + + parent_request = rbd_img_request_create(rbd_dev->parent, + img_offset, length, false); + if (!parent_request) + return NULL; + + img_request_child_set(parent_request); + rbd_obj_request_get(obj_request); + parent_request->obj_request = obj_request; + + return parent_request; +} + +static void rbd_parent_request_destroy(struct kref *kref) +{ + struct rbd_img_request *parent_request; + struct rbd_obj_request *orig_request; + + parent_request = container_of(kref, struct rbd_img_request, kref); + orig_request = parent_request->obj_request; + + parent_request->obj_request = NULL; + rbd_obj_request_put(orig_request); + img_request_child_clear(parent_request); + + rbd_img_request_destroy(kref); +} + static bool rbd_img_obj_end_request(struct rbd_obj_request *obj_request) { struct rbd_img_request *img_request; @@ -2114,7 +2275,7 @@ rbd_img_obj_copyup_callback(struct rbd_obj_request *obj_request) { struct rbd_img_request *img_request; struct rbd_device *rbd_dev; - u64 length; + struct page **pages; u32 page_count; rbd_assert(obj_request->type == OBJ_REQUEST_BIO); @@ -2124,12 +2285,14 @@ rbd_img_obj_copyup_callback(struct rbd_obj_request *obj_request) rbd_dev = img_request->rbd_dev; rbd_assert(rbd_dev); - length = (u64)1 << rbd_dev->header.obj_order; - page_count = (u32)calc_pages_for(0, length); - rbd_assert(obj_request->copyup_pages); - ceph_release_page_vector(obj_request->copyup_pages, page_count); + pages = obj_request->copyup_pages; + rbd_assert(pages != NULL); obj_request->copyup_pages = NULL; + page_count = obj_request->copyup_page_count; + rbd_assert(page_count); + obj_request->copyup_page_count = 0; + ceph_release_page_vector(pages, page_count); /* * We want the transfer count to reflect the size of the @@ -2153,9 +2316,11 @@ rbd_img_obj_parent_read_full_callback(struct rbd_img_request *img_request) struct ceph_osd_client *osdc; struct rbd_device *rbd_dev; struct page **pages; - int result; - u64 obj_size; - u64 xferred; + u32 page_count; + int img_result; + u64 parent_length; + u64 offset; + u64 length; rbd_assert(img_request_child_test(img_request)); @@ -2164,46 +2329,74 @@ rbd_img_obj_parent_read_full_callback(struct rbd_img_request *img_request) pages = img_request->copyup_pages; rbd_assert(pages != NULL); img_request->copyup_pages = NULL; + page_count = img_request->copyup_page_count; + rbd_assert(page_count); + img_request->copyup_page_count = 0; orig_request = img_request->obj_request; rbd_assert(orig_request != NULL); - rbd_assert(orig_request->type == OBJ_REQUEST_BIO); - result = img_request->result; - obj_size = img_request->length; - xferred = img_request->xferred; + rbd_assert(obj_request_type_valid(orig_request->type)); + img_result = img_request->result; + parent_length = img_request->length; + rbd_assert(parent_length == img_request->xferred); + rbd_img_request_put(img_request); - rbd_dev = img_request->rbd_dev; + rbd_assert(orig_request->img_request); + rbd_dev = orig_request->img_request->rbd_dev; rbd_assert(rbd_dev); - rbd_assert(obj_size == (u64)1 << rbd_dev->header.obj_order); - rbd_img_request_put(img_request); + /* + * If the overlap has become 0 (most likely because the + * image has been flattened) we need to free the pages + * and re-submit the original write request. + */ + if (!rbd_dev->parent_overlap) { + struct ceph_osd_client *osdc; - if (result) - goto out_err; + ceph_release_page_vector(pages, page_count); + osdc = &rbd_dev->rbd_client->client->osdc; + img_result = rbd_obj_request_submit(osdc, orig_request); + if (!img_result) + return; + } - /* Allocate the new copyup osd request for the original request */ + if (img_result) + goto out_err; - result = -ENOMEM; - rbd_assert(!orig_request->osd_req); + /* + * The original osd request is of no use to use any more. + * We need a new one that can hold the two ops in a copyup + * request. Allocate the new copyup osd request for the + * original request, and release the old one. + */ + img_result = -ENOMEM; osd_req = rbd_osd_req_create_copyup(orig_request); if (!osd_req) goto out_err; + rbd_osd_req_destroy(orig_request->osd_req); orig_request->osd_req = osd_req; orig_request->copyup_pages = pages; + orig_request->copyup_page_count = page_count; /* Initialize the copyup op */ osd_req_op_cls_init(osd_req, 0, CEPH_OSD_OP_CALL, "rbd", "copyup"); - osd_req_op_cls_request_data_pages(osd_req, 0, pages, obj_size, 0, + osd_req_op_cls_request_data_pages(osd_req, 0, pages, parent_length, 0, false, false); /* Then the original write request op */ + offset = orig_request->offset; + length = orig_request->length; osd_req_op_extent_init(osd_req, 1, CEPH_OSD_OP_WRITE, - orig_request->offset, - orig_request->length, 0, 0); - osd_req_op_extent_osd_data_bio(osd_req, 1, orig_request->bio_list, - orig_request->length); + offset, length, 0, 0); + if (orig_request->type == OBJ_REQUEST_BIO) + osd_req_op_extent_osd_data_bio(osd_req, 1, + orig_request->bio_list, length); + else + osd_req_op_extent_osd_data_pages(osd_req, 1, + orig_request->pages, length, + offset & ~PAGE_MASK, false, false); rbd_osd_req_format_write(orig_request); @@ -2211,13 +2404,13 @@ rbd_img_obj_parent_read_full_callback(struct rbd_img_request *img_request) orig_request->callback = rbd_img_obj_copyup_callback; osdc = &rbd_dev->rbd_client->client->osdc; - result = rbd_obj_request_submit(osdc, orig_request); - if (!result) + img_result = rbd_obj_request_submit(osdc, orig_request); + if (!img_result) return; out_err: /* Record the error code and complete the request */ - orig_request->result = result; + orig_request->result = img_result; orig_request->xferred = 0; obj_request_done_set(orig_request); rbd_obj_request_complete(orig_request); @@ -2249,22 +2442,13 @@ static int rbd_img_obj_parent_read_full(struct rbd_obj_request *obj_request) int result; rbd_assert(obj_request_img_data_test(obj_request)); - rbd_assert(obj_request->type == OBJ_REQUEST_BIO); + rbd_assert(obj_request_type_valid(obj_request->type)); img_request = obj_request->img_request; rbd_assert(img_request != NULL); rbd_dev = img_request->rbd_dev; rbd_assert(rbd_dev->parent != NULL); - /* - * First things first. The original osd request is of no - * use to use any more, we'll need a new one that can hold - * the two ops in a copyup request. We'll get that later, - * but for now we can release the old one. - */ - rbd_osd_req_destroy(obj_request->osd_req); - obj_request->osd_req = NULL; - /* * Determine the byte range covered by the object in the * child image to which the original request was to be sent. @@ -2295,18 +2479,16 @@ static int rbd_img_obj_parent_read_full(struct rbd_obj_request *obj_request) } result = -ENOMEM; - parent_request = rbd_img_request_create(rbd_dev->parent, - img_offset, length, - false, true); + parent_request = rbd_parent_request_create(obj_request, + img_offset, length); if (!parent_request) goto out_err; - rbd_obj_request_get(obj_request); - parent_request->obj_request = obj_request; result = rbd_img_request_fill(parent_request, OBJ_REQUEST_PAGES, pages); if (result) goto out_err; parent_request->copyup_pages = pages; + parent_request->copyup_page_count = page_count; parent_request->callback = rbd_img_obj_parent_read_full_callback; result = rbd_img_request_submit(parent_request); @@ -2314,6 +2496,7 @@ static int rbd_img_obj_parent_read_full(struct rbd_obj_request *obj_request) return 0; parent_request->copyup_pages = NULL; + parent_request->copyup_page_count = 0; parent_request->obj_request = NULL; rbd_obj_request_put(obj_request); out_err: @@ -2331,6 +2514,7 @@ static int rbd_img_obj_parent_read_full(struct rbd_obj_request *obj_request) static void rbd_img_obj_exists_callback(struct rbd_obj_request *obj_request) { struct rbd_obj_request *orig_request; + struct rbd_device *rbd_dev; int result; rbd_assert(!obj_request_img_data_test(obj_request)); @@ -2353,8 +2537,21 @@ static void rbd_img_obj_exists_callback(struct rbd_obj_request *obj_request) obj_request->xferred, obj_request->length); rbd_obj_request_put(obj_request); - rbd_assert(orig_request); - rbd_assert(orig_request->img_request); + /* + * If the overlap has become 0 (most likely because the + * image has been flattened) we need to free the pages + * and re-submit the original write request. + */ + rbd_dev = orig_request->img_request->rbd_dev; + if (!rbd_dev->parent_overlap) { + struct ceph_osd_client *osdc; + + rbd_obj_request_put(orig_request); + osdc = &rbd_dev->rbd_client->client->osdc; + result = rbd_obj_request_submit(osdc, orig_request); + if (!result) + return; + } /* * Our only purpose here is to determine whether the object @@ -2512,14 +2709,36 @@ static void rbd_img_parent_read_callback(struct rbd_img_request *img_request) struct rbd_obj_request *obj_request; struct rbd_device *rbd_dev; u64 obj_end; + u64 img_xferred; + int img_result; rbd_assert(img_request_child_test(img_request)); + /* First get what we need from the image request and release it */ + obj_request = img_request->obj_request; + img_xferred = img_request->xferred; + img_result = img_request->result; + rbd_img_request_put(img_request); + + /* + * If the overlap has become 0 (most likely because the + * image has been flattened) we need to re-submit the + * original request. + */ rbd_assert(obj_request); rbd_assert(obj_request->img_request); + rbd_dev = obj_request->img_request->rbd_dev; + if (!rbd_dev->parent_overlap) { + struct ceph_osd_client *osdc; + + osdc = &rbd_dev->rbd_client->client->osdc; + img_result = rbd_obj_request_submit(osdc, obj_request); + if (!img_result) + return; + } - obj_request->result = img_request->result; + obj_request->result = img_result; if (obj_request->result) goto out; @@ -2532,7 +2751,6 @@ static void rbd_img_parent_read_callback(struct rbd_img_request *img_request) */ rbd_assert(obj_request->img_offset < U64_MAX - obj_request->length); obj_end = obj_request->img_offset + obj_request->length; - rbd_dev = obj_request->img_request->rbd_dev; if (obj_end > rbd_dev->parent_overlap) { u64 xferred = 0; @@ -2540,43 +2758,39 @@ static void rbd_img_parent_read_callback(struct rbd_img_request *img_request) xferred = rbd_dev->parent_overlap - obj_request->img_offset; - obj_request->xferred = min(img_request->xferred, xferred); + obj_request->xferred = min(img_xferred, xferred); } else { - obj_request->xferred = img_request->xferred; + obj_request->xferred = img_xferred; } out: - rbd_img_request_put(img_request); rbd_img_obj_request_read_callback(obj_request); rbd_obj_request_complete(obj_request); } static void rbd_img_parent_read(struct rbd_obj_request *obj_request) { - struct rbd_device *rbd_dev; struct rbd_img_request *img_request; int result; rbd_assert(obj_request_img_data_test(obj_request)); rbd_assert(obj_request->img_request != NULL); rbd_assert(obj_request->result == (s32) -ENOENT); - rbd_assert(obj_request->type == OBJ_REQUEST_BIO); + rbd_assert(obj_request_type_valid(obj_request->type)); - rbd_dev = obj_request->img_request->rbd_dev; - rbd_assert(rbd_dev->parent != NULL); /* rbd_read_finish(obj_request, obj_request->length); */ - img_request = rbd_img_request_create(rbd_dev->parent, + img_request = rbd_parent_request_create(obj_request, obj_request->img_offset, - obj_request->length, - false, true); + obj_request->length); result = -ENOMEM; if (!img_request) goto out_err; - rbd_obj_request_get(obj_request); - img_request->obj_request = obj_request; - - result = rbd_img_request_fill(img_request, OBJ_REQUEST_BIO, - obj_request->bio_list); + if (obj_request->type == OBJ_REQUEST_BIO) + result = rbd_img_request_fill(img_request, OBJ_REQUEST_BIO, + obj_request->bio_list); + else + result = rbd_img_request_fill(img_request, OBJ_REQUEST_PAGES, + obj_request->pages); if (result) goto out_err; @@ -2626,6 +2840,7 @@ static int rbd_obj_notify_ack(struct rbd_device *rbd_dev, u64 notify_id) static void rbd_watch_cb(u64 ver, u64 notify_id, u8 opcode, void *data) { struct rbd_device *rbd_dev = (struct rbd_device *)data; + int ret; if (!rbd_dev) return; @@ -2633,7 +2848,9 @@ static void rbd_watch_cb(u64 ver, u64 notify_id, u8 opcode, void *data) dout("%s: \"%s\" notify_id %llu opcode %u\n", __func__, rbd_dev->header_name, (unsigned long long)notify_id, (unsigned int)opcode); - (void)rbd_dev_refresh(rbd_dev); + ret = rbd_dev_refresh(rbd_dev); + if (ret) + rbd_warn(rbd_dev, ": header refresh error (%d)\n", ret); rbd_obj_notify_ack(rbd_dev, notify_id); } @@ -2642,7 +2859,7 @@ static void rbd_watch_cb(u64 ver, u64 notify_id, u8 opcode, void *data) * Request sync osd watch/unwatch. The value of "start" determines * whether a watch request is being initiated or torn down. */ -static int rbd_dev_header_watch_sync(struct rbd_device *rbd_dev, int start) +static int rbd_dev_header_watch_sync(struct rbd_device *rbd_dev, bool start) { struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc; struct rbd_obj_request *obj_request; @@ -2676,7 +2893,7 @@ static int rbd_dev_header_watch_sync(struct rbd_device *rbd_dev, int start) rbd_dev->watch_request->osd_req); osd_req_op_watch_init(obj_request->osd_req, 0, CEPH_OSD_OP_WATCH, - rbd_dev->watch_event->cookie, 0, start); + rbd_dev->watch_event->cookie, 0, start ? 1 : 0); rbd_osd_req_format_write(obj_request); ret = rbd_obj_request_submit(osdc, obj_request); @@ -2869,9 +3086,16 @@ static void rbd_request_fn(struct request_queue *q) goto end_request; /* Shouldn't happen */ } + result = -EIO; + if (offset + length > rbd_dev->mapping.size) { + rbd_warn(rbd_dev, "beyond EOD (%llu~%llu > %llu)\n", + offset, length, rbd_dev->mapping.size); + goto end_request; + } + result = -ENOMEM; img_request = rbd_img_request_create(rbd_dev, offset, length, - write_request, false); + write_request); if (!img_request) goto end_request; @@ -3022,17 +3246,11 @@ static int rbd_obj_read_sync(struct rbd_device *rbd_dev, } /* - * Read the complete header for the given rbd device. - * - * Returns a pointer to a dynamically-allocated buffer containing - * the complete and validated header. Caller can pass the address - * of a variable that will be filled in with the version of the - * header object at the time it was read. - * - * Returns a pointer-coded errno if a failure occurs. + * Read the complete header for the given rbd device. On successful + * return, the rbd_dev->header field will contain up-to-date + * information about the image. */ -static struct rbd_image_header_ondisk * -rbd_dev_v1_header_read(struct rbd_device *rbd_dev) +static int rbd_dev_v1_header_info(struct rbd_device *rbd_dev) { struct rbd_image_header_ondisk *ondisk = NULL; u32 snap_count = 0; @@ -3057,22 +3275,22 @@ rbd_dev_v1_header_read(struct rbd_device *rbd_dev) size += names_size; ondisk = kmalloc(size, GFP_KERNEL); if (!ondisk) - return ERR_PTR(-ENOMEM); + return -ENOMEM; ret = rbd_obj_read_sync(rbd_dev, rbd_dev->header_name, 0, size, ondisk); if (ret < 0) - goto out_err; + goto out; if ((size_t)ret < size) { ret = -ENXIO; rbd_warn(rbd_dev, "short header read (want %zd got %d)", size, ret); - goto out_err; + goto out; } if (!rbd_dev_ondisk_valid(ondisk)) { ret = -ENXIO; rbd_warn(rbd_dev, "invalid header"); - goto out_err; + goto out; } names_size = le64_to_cpu(ondisk->snap_names_len); @@ -3080,85 +3298,13 @@ rbd_dev_v1_header_read(struct rbd_device *rbd_dev) snap_count = le32_to_cpu(ondisk->snap_count); } while (snap_count != want_count); - return ondisk; - -out_err: - kfree(ondisk); - - return ERR_PTR(ret); -} - -/* - * reload the ondisk the header - */ -static int rbd_read_header(struct rbd_device *rbd_dev, - struct rbd_image_header *header) -{ - struct rbd_image_header_ondisk *ondisk; - int ret; - - ondisk = rbd_dev_v1_header_read(rbd_dev); - if (IS_ERR(ondisk)) - return PTR_ERR(ondisk); - ret = rbd_header_from_disk(header, ondisk); + ret = rbd_header_from_disk(rbd_dev, ondisk); +out: kfree(ondisk); return ret; } -static void rbd_update_mapping_size(struct rbd_device *rbd_dev) -{ - if (rbd_dev->spec->snap_id != CEPH_NOSNAP) - return; - - if (rbd_dev->mapping.size != rbd_dev->header.image_size) { - sector_t size; - - rbd_dev->mapping.size = rbd_dev->header.image_size; - size = (sector_t)rbd_dev->mapping.size / SECTOR_SIZE; - dout("setting size to %llu sectors", (unsigned long long)size); - set_capacity(rbd_dev->disk, size); - } -} - -/* - * only read the first part of the ondisk header, without the snaps info - */ -static int rbd_dev_v1_refresh(struct rbd_device *rbd_dev) -{ - int ret; - struct rbd_image_header h; - - ret = rbd_read_header(rbd_dev, &h); - if (ret < 0) - return ret; - - down_write(&rbd_dev->header_rwsem); - - /* Update image size, and check for resize of mapped image */ - rbd_dev->header.image_size = h.image_size; - rbd_update_mapping_size(rbd_dev); - - /* rbd_dev->header.object_prefix shouldn't change */ - kfree(rbd_dev->header.snap_sizes); - kfree(rbd_dev->header.snap_names); - /* osd requests may still refer to snapc */ - ceph_put_snap_context(rbd_dev->header.snapc); - - rbd_dev->header.image_size = h.image_size; - rbd_dev->header.snapc = h.snapc; - rbd_dev->header.snap_names = h.snap_names; - rbd_dev->header.snap_sizes = h.snap_sizes; - /* Free the extra copy of the object prefix */ - if (strcmp(rbd_dev->header.object_prefix, h.object_prefix)) - rbd_warn(rbd_dev, "object prefix changed (ignoring)"); - kfree(h.object_prefix); - - up_write(&rbd_dev->header_rwsem); - - return ret; -} - /* * Clear the rbd device's EXISTS flag if the snapshot it's mapped to * has disappeared from the (just updated) snapshot context. @@ -3180,26 +3326,29 @@ static void rbd_exists_validate(struct rbd_device *rbd_dev) static int rbd_dev_refresh(struct rbd_device *rbd_dev) { - u64 image_size; + u64 mapping_size; int ret; rbd_assert(rbd_image_format_valid(rbd_dev->image_format)); - image_size = rbd_dev->header.image_size; + mapping_size = rbd_dev->mapping.size; mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING); if (rbd_dev->image_format == 1) - ret = rbd_dev_v1_refresh(rbd_dev); + ret = rbd_dev_v1_header_info(rbd_dev); else - ret = rbd_dev_v2_refresh(rbd_dev); + ret = rbd_dev_v2_header_info(rbd_dev); /* If it's a mapped snapshot, validate its EXISTS flag */ rbd_exists_validate(rbd_dev); mutex_unlock(&ctl_mutex); - if (ret) - rbd_warn(rbd_dev, "got notification but failed to " - " update snaps: %d\n", ret); - if (image_size != rbd_dev->header.image_size) + if (mapping_size != rbd_dev->mapping.size) { + sector_t size; + + size = (sector_t)rbd_dev->mapping.size / SECTOR_SIZE; + dout("setting size to %llu sectors", (unsigned long long)size); + set_capacity(rbd_dev->disk, size); revalidate_disk(rbd_dev->disk); + } return ret; } @@ -3403,6 +3552,8 @@ static ssize_t rbd_image_refresh(struct device *dev, int ret; ret = rbd_dev_refresh(rbd_dev); + if (ret) + rbd_warn(rbd_dev, ": manual header refresh error (%d)\n", ret); return ret < 0 ? ret : size; } @@ -3501,6 +3652,7 @@ static struct rbd_device *rbd_dev_create(struct rbd_client *rbdc, spin_lock_init(&rbd_dev->lock); rbd_dev->flags = 0; + atomic_set(&rbd_dev->parent_ref, 0); INIT_LIST_HEAD(&rbd_dev->node); init_rwsem(&rbd_dev->header_rwsem); @@ -3650,6 +3802,7 @@ static int rbd_dev_v2_parent_info(struct rbd_device *rbd_dev) __le64 snapid; void *p; void *end; + u64 pool_id; char *image_id; u64 overlap; int ret; @@ -3680,18 +3833,37 @@ static int rbd_dev_v2_parent_info(struct rbd_device *rbd_dev) p = reply_buf; end = reply_buf + ret; ret = -ERANGE; - ceph_decode_64_safe(&p, end, parent_spec->pool_id, out_err); - if (parent_spec->pool_id == CEPH_NOPOOL) + ceph_decode_64_safe(&p, end, pool_id, out_err); + if (pool_id == CEPH_NOPOOL) { + /* + * Either the parent never existed, or we have + * record of it but the image got flattened so it no + * longer has a parent. When the parent of a + * layered image disappears we immediately set the + * overlap to 0. The effect of this is that all new + * requests will be treated as if the image had no + * parent. + */ + if (rbd_dev->parent_overlap) { + rbd_dev->parent_overlap = 0; + smp_mb(); + rbd_dev_parent_put(rbd_dev); + pr_info("%s: clone image has been flattened\n", + rbd_dev->disk->disk_name); + } + goto out; /* No parent? No problem. */ + } /* The ceph file layout needs to fit pool id in 32 bits */ ret = -EIO; - if (parent_spec->pool_id > (u64)U32_MAX) { + if (pool_id > (u64)U32_MAX) { rbd_warn(NULL, "parent pool id too large (%llu > %u)\n", - (unsigned long long)parent_spec->pool_id, U32_MAX); + (unsigned long long)pool_id, U32_MAX); goto out_err; } + parent_spec->pool_id = pool_id; image_id = ceph_extract_encoded_string(&p, end, NULL, GFP_KERNEL); if (IS_ERR(image_id)) { @@ -3702,9 +3874,14 @@ static int rbd_dev_v2_parent_info(struct rbd_device *rbd_dev) ceph_decode_64_safe(&p, end, parent_spec->snap_id, out_err); ceph_decode_64_safe(&p, end, overlap, out_err); - rbd_dev->parent_overlap = overlap; - rbd_dev->parent_spec = parent_spec; - parent_spec = NULL; /* rbd_dev now owns this */ + if (overlap) { + rbd_spec_put(rbd_dev->parent_spec); + rbd_dev->parent_spec = parent_spec; + parent_spec = NULL; /* rbd_dev now owns this */ + rbd_dev->parent_overlap = overlap; + } else { + rbd_warn(rbd_dev, "ignoring parent of clone with overlap 0\n"); + } out: ret = 0; out_err: @@ -4002,6 +4179,7 @@ static int rbd_dev_v2_snap_context(struct rbd_device *rbd_dev) for (i = 0; i < snap_count; i++) snapc->snaps[i] = ceph_decode_64(&p); + ceph_put_snap_context(rbd_dev->header.snapc); rbd_dev->header.snapc = snapc; dout(" snap context seq = %llu, snap_count = %u\n", @@ -4053,21 +4231,56 @@ static const char *rbd_dev_v2_snap_name(struct rbd_device *rbd_dev, return snap_name; } -static int rbd_dev_v2_refresh(struct rbd_device *rbd_dev) +static int rbd_dev_v2_header_info(struct rbd_device *rbd_dev) { + bool first_time = rbd_dev->header.object_prefix == NULL; int ret; down_write(&rbd_dev->header_rwsem); + if (first_time) { + ret = rbd_dev_v2_header_onetime(rbd_dev); + if (ret) + goto out; + } + + /* + * If the image supports layering, get the parent info. We + * need to probe the first time regardless. Thereafter we + * only need to if there's a parent, to see if it has + * disappeared due to the mapped image getting flattened. + */ + if (rbd_dev->header.features & RBD_FEATURE_LAYERING && + (first_time || rbd_dev->parent_spec)) { + bool warn; + + ret = rbd_dev_v2_parent_info(rbd_dev); + if (ret) + goto out; + + /* + * Print a warning if this is the initial probe and + * the image has a parent. Don't print it if the + * image now being probed is itself a parent. We + * can tell at this point because we won't know its + * pool name yet (just its pool id). + */ + warn = rbd_dev->parent_spec && rbd_dev->spec->pool_name; + if (first_time && warn) + rbd_warn(rbd_dev, "WARNING: kernel layering " + "is EXPERIMENTAL!"); + } + ret = rbd_dev_v2_image_size(rbd_dev); if (ret) goto out; - rbd_update_mapping_size(rbd_dev); + + if (rbd_dev->spec->snap_id == CEPH_NOSNAP) + if (rbd_dev->mapping.size != rbd_dev->header.image_size) + rbd_dev->mapping.size = rbd_dev->header.image_size; ret = rbd_dev_v2_snap_context(rbd_dev); dout("rbd_dev_v2_snap_context returned %d\n", ret); - if (ret) - goto out; out: up_write(&rbd_dev->header_rwsem); @@ -4490,10 +4703,10 @@ static void rbd_dev_unprobe(struct rbd_device *rbd_dev) { struct rbd_image_header *header; - rbd_dev_remove_parent(rbd_dev); - rbd_spec_put(rbd_dev->parent_spec); - rbd_dev->parent_spec = NULL; - rbd_dev->parent_overlap = 0; + /* Drop parent reference unless it's already been done (or none) */ + + if (rbd_dev->parent_overlap) + rbd_dev_parent_put(rbd_dev); /* Free dynamic fields from the header, then zero it out */ @@ -4505,72 +4718,22 @@ static void rbd_dev_unprobe(struct rbd_device *rbd_dev) memset(header, 0, sizeof (*header)); } -static int rbd_dev_v1_probe(struct rbd_device *rbd_dev) +static int rbd_dev_v2_header_onetime(struct rbd_device *rbd_dev) { int ret; - /* Populate rbd image metadata */ - - ret = rbd_read_header(rbd_dev, &rbd_dev->header); - if (ret < 0) - goto out_err; - - /* Version 1 images have no parent (no layering) */ - - rbd_dev->parent_spec = NULL; - rbd_dev->parent_overlap = 0; - - dout("discovered version 1 image, header name is %s\n", - rbd_dev->header_name); - - return 0; - -out_err: - kfree(rbd_dev->header_name); - rbd_dev->header_name = NULL; - kfree(rbd_dev->spec->image_id); - rbd_dev->spec->image_id = NULL; - - return ret; -} - -static int rbd_dev_v2_probe(struct rbd_device *rbd_dev) -{ - int ret; - - ret = rbd_dev_v2_image_size(rbd_dev); - if (ret) - goto out_err; - - /* Get the object prefix (a.k.a. block_name) for the image */ - ret = rbd_dev_v2_object_prefix(rbd_dev); if (ret) goto out_err; - /* Get the and check features for the image */ - + /* + * Get the and check features for the image. Currently the + * features are assumed to never change. + */ ret = rbd_dev_v2_features(rbd_dev); if (ret) goto out_err; - /* If the image supports layering, get the parent info */ - - if (rbd_dev->header.features & RBD_FEATURE_LAYERING) { - ret = rbd_dev_v2_parent_info(rbd_dev); - if (ret) - goto out_err; - - /* - * Don't print a warning for parent images. We can - * tell this point because we won't know its pool - * name yet (just its pool id). - */ - if (rbd_dev->spec->pool_name) - rbd_warn(rbd_dev, "WARNING: kernel layering " - "is EXPERIMENTAL!"); - } - /* If the image supports fancy striping, get its parameters */ if (rbd_dev->header.features & RBD_FEATURE_STRIPINGV2) { @@ -4578,28 +4741,11 @@ static int rbd_dev_v2_probe(struct rbd_device *rbd_dev) if (ret < 0) goto out_err; } - - /* crypto and compression type aren't (yet) supported for v2 images */ - - rbd_dev->header.crypt_type = 0; - rbd_dev->header.comp_type = 0; - - /* Get the snapshot context, plus the header version */ - - ret = rbd_dev_v2_snap_context(rbd_dev); - if (ret) - goto out_err; - - dout("discovered version 2 image, header name is %s\n", - rbd_dev->header_name); + /* No support for crypto and compression type format 2 images */ return 0; out_err: - rbd_dev->parent_overlap = 0; - rbd_spec_put(rbd_dev->parent_spec); - rbd_dev->parent_spec = NULL; - kfree(rbd_dev->header_name); - rbd_dev->header_name = NULL; + rbd_dev->header.features = 0; kfree(rbd_dev->header.object_prefix); rbd_dev->header.object_prefix = NULL; @@ -4628,15 +4774,16 @@ static int rbd_dev_probe_parent(struct rbd_device *rbd_dev) if (!parent) goto out_err; - ret = rbd_dev_image_probe(parent); + ret = rbd_dev_image_probe(parent, false); if (ret < 0) goto out_err; rbd_dev->parent = parent; + atomic_set(&rbd_dev->parent_ref, 1); return 0; out_err: if (parent) { - rbd_spec_put(rbd_dev->parent_spec); + rbd_dev_unparent(rbd_dev); kfree(rbd_dev->header_name); rbd_dev_destroy(parent); } else { @@ -4651,10 +4798,6 @@ static int rbd_dev_device_setup(struct rbd_device *rbd_dev) { int ret; - ret = rbd_dev_mapping_set(rbd_dev); - if (ret) - return ret; - /* generate unique id: find highest unique id, add one */ rbd_dev_id_get(rbd_dev); @@ -4676,13 +4819,17 @@ static int rbd_dev_device_setup(struct rbd_device *rbd_dev) if (ret) goto err_out_blkdev; - ret = rbd_bus_add_dev(rbd_dev); + ret = rbd_dev_mapping_set(rbd_dev); if (ret) goto err_out_disk; + set_capacity(rbd_dev->disk, rbd_dev->mapping.size / SECTOR_SIZE); + + ret = rbd_bus_add_dev(rbd_dev); + if (ret) + goto err_out_mapping; /* Everything's ready. Announce the disk to the world. */ - set_capacity(rbd_dev->disk, rbd_dev->mapping.size / SECTOR_SIZE); set_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags); add_disk(rbd_dev->disk); @@ -4691,6 +4838,8 @@ static int rbd_dev_device_setup(struct rbd_device *rbd_dev) return ret; +err_out_mapping: + rbd_dev_mapping_clear(rbd_dev); err_out_disk: rbd_free_disk(rbd_dev); err_out_blkdev: @@ -4731,12 +4880,7 @@ static int rbd_dev_header_name(struct rbd_device *rbd_dev) static void rbd_dev_image_release(struct rbd_device *rbd_dev) { - int ret; - rbd_dev_unprobe(rbd_dev); - ret = rbd_dev_header_watch_sync(rbd_dev, 0); - if (ret) - rbd_warn(rbd_dev, "failed to cancel watch event (%d)\n", ret); kfree(rbd_dev->header_name); rbd_dev->header_name = NULL; rbd_dev->image_format = 0; @@ -4748,10 +4892,11 @@ static void rbd_dev_image_release(struct rbd_device *rbd_dev) /* * Probe for the existence of the header object for the given rbd - * device. For format 2 images this includes determining the image - * id. + * device. If this image is the one being mapped (i.e., not a + * parent), initiate a watch on its header object before using that + * object to get detailed information about the rbd image. */ -static int rbd_dev_image_probe(struct rbd_device *rbd_dev) +static int rbd_dev_image_probe(struct rbd_device *rbd_dev, bool mapping) { int ret; int tmp; @@ -4771,14 +4916,16 @@ static int rbd_dev_image_probe(struct rbd_device *rbd_dev) if (ret) goto err_out_format; - ret = rbd_dev_header_watch_sync(rbd_dev, 1); - if (ret) - goto out_header_name; + if (mapping) { + ret = rbd_dev_header_watch_sync(rbd_dev, true); + if (ret) + goto out_header_name; + } if (rbd_dev->image_format == 1) - ret = rbd_dev_v1_probe(rbd_dev); + ret = rbd_dev_v1_header_info(rbd_dev); else - ret = rbd_dev_v2_probe(rbd_dev); + ret = rbd_dev_v2_header_info(rbd_dev); if (ret) goto err_out_watch; @@ -4787,15 +4934,22 @@ static int rbd_dev_image_probe(struct rbd_device *rbd_dev) goto err_out_probe; ret = rbd_dev_probe_parent(rbd_dev); - if (!ret) - return 0; + if (ret) + goto err_out_probe; + + dout("discovered format %u image, header name is %s\n", + rbd_dev->image_format, rbd_dev->header_name); + return 0; err_out_probe: rbd_dev_unprobe(rbd_dev); err_out_watch: - tmp = rbd_dev_header_watch_sync(rbd_dev, 0); - if (tmp) - rbd_warn(rbd_dev, "unable to tear down watch request\n"); + if (mapping) { + tmp = rbd_dev_header_watch_sync(rbd_dev, false); + if (tmp) + rbd_warn(rbd_dev, "unable to tear down " + "watch request (%d)\n", tmp); + } out_header_name: kfree(rbd_dev->header_name); rbd_dev->header_name = NULL; @@ -4819,6 +4973,7 @@ static ssize_t rbd_add(struct bus_type *bus, struct rbd_spec *spec = NULL; struct rbd_client *rbdc; struct ceph_osd_client *osdc; + bool read_only; int rc = -ENOMEM; if (!try_module_get(THIS_MODULE)) @@ -4828,6 +4983,9 @@ static ssize_t rbd_add(struct bus_type *bus, rc = rbd_add_parse_args(buf, &ceph_opts, &rbd_opts, &spec); if (rc < 0) goto err_out_module; + read_only = rbd_opts->read_only; + kfree(rbd_opts); + rbd_opts = NULL; /* done with this */ rbdc = rbd_get_client(ceph_opts); if (IS_ERR(rbdc)) { @@ -4858,14 +5016,16 @@ static ssize_t rbd_add(struct bus_type *bus, rbdc = NULL; /* rbd_dev now owns this */ spec = NULL; /* rbd_dev now owns this */ - rbd_dev->mapping.read_only = rbd_opts->read_only; - kfree(rbd_opts); - rbd_opts = NULL; /* done with this */ - - rc = rbd_dev_image_probe(rbd_dev); + rc = rbd_dev_image_probe(rbd_dev, true); if (rc < 0) goto err_out_rbd_dev; + /* If we are mapping a snapshot it must be marked read-only */ + + if (rbd_dev->spec->snap_id != CEPH_NOSNAP) + read_only = true; + rbd_dev->mapping.read_only = read_only; + rc = rbd_dev_device_setup(rbd_dev); if (!rc) return count; @@ -4911,7 +5071,7 @@ static void rbd_dev_device_release(struct device *dev) rbd_free_disk(rbd_dev); clear_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags); - rbd_dev_clear_mapping(rbd_dev); + rbd_dev_mapping_clear(rbd_dev); unregister_blkdev(rbd_dev->major, rbd_dev->name); rbd_dev->major = 0; rbd_dev_id_put(rbd_dev); @@ -4978,10 +5138,13 @@ static ssize_t rbd_remove(struct bus_type *bus, spin_unlock_irq(&rbd_dev->lock); if (ret < 0) goto done; - ret = count; rbd_bus_del_dev(rbd_dev); + ret = rbd_dev_header_watch_sync(rbd_dev, false); + if (ret) + rbd_warn(rbd_dev, "failed to cancel watch event (%d)\n", ret); rbd_dev_image_release(rbd_dev); module_put(THIS_MODULE); + ret = count; done: mutex_unlock(&ctl_mutex); diff --git a/trunk/drivers/char/hw_random/mxc-rnga.c b/trunk/drivers/char/hw_random/mxc-rnga.c index 4ca35e8a5d8c..19a12ac64a9e 100644 --- a/trunk/drivers/char/hw_random/mxc-rnga.c +++ b/trunk/drivers/char/hw_random/mxc-rnga.c @@ -167,11 +167,6 @@ static int __init mxc_rnga_probe(struct platform_device *pdev) clk_prepare_enable(mxc_rng->clk); res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - if (!res) { - err = -ENOENT; - goto err_region; - } - mxc_rng->mem = devm_ioremap_resource(&pdev->dev, res); if (IS_ERR(mxc_rng->mem)) { err = PTR_ERR(mxc_rng->mem); @@ -189,7 +184,6 @@ static int __init mxc_rnga_probe(struct platform_device *pdev) return 0; err_ioremap: -err_region: clk_disable_unprepare(mxc_rng->clk); out: diff --git a/trunk/drivers/char/hw_random/omap-rng.c b/trunk/drivers/char/hw_random/omap-rng.c index 749dc16ca2cc..d2903e772270 100644 --- a/trunk/drivers/char/hw_random/omap-rng.c +++ b/trunk/drivers/char/hw_random/omap-rng.c @@ -119,11 +119,6 @@ static int omap_rng_probe(struct platform_device *pdev) dev_set_drvdata(&pdev->dev, priv); priv->mem_res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - if (!priv->mem_res) { - ret = -ENOENT; - goto err_ioremap; - } - priv->base = devm_ioremap_resource(&pdev->dev, priv->mem_res); if (IS_ERR(priv->base)) { ret = PTR_ERR(priv->base); diff --git a/trunk/drivers/char/ipmi/ipmi_bt_sm.c b/trunk/drivers/char/ipmi/ipmi_bt_sm.c index cdd4c09fda96..a22a7a502740 100644 --- a/trunk/drivers/char/ipmi/ipmi_bt_sm.c +++ b/trunk/drivers/char/ipmi/ipmi_bt_sm.c @@ -95,9 +95,9 @@ struct si_sm_data { enum bt_states state; unsigned char seq; /* BT sequence number */ struct si_sm_io *io; - unsigned char write_data[IPMI_MAX_MSG_LENGTH]; + unsigned char write_data[IPMI_MAX_MSG_LENGTH + 2]; /* +2 for memcpy */ int write_count; - unsigned char read_data[IPMI_MAX_MSG_LENGTH]; + unsigned char read_data[IPMI_MAX_MSG_LENGTH + 2]; /* +2 for memcpy */ int read_count; int truncated; long timeout; /* microseconds countdown */ diff --git a/trunk/drivers/char/ipmi/ipmi_devintf.c b/trunk/drivers/char/ipmi/ipmi_devintf.c index 9eb360ff8cab..d5a5f020810a 100644 --- a/trunk/drivers/char/ipmi/ipmi_devintf.c +++ b/trunk/drivers/char/ipmi/ipmi_devintf.c @@ -837,13 +837,25 @@ static long compat_ipmi_ioctl(struct file *filep, unsigned int cmd, return ipmi_ioctl(filep, cmd, arg); } } + +static long unlocked_compat_ipmi_ioctl(struct file *filep, unsigned int cmd, + unsigned long arg) +{ + int ret; + + mutex_lock(&ipmi_mutex); + ret = compat_ipmi_ioctl(filep, cmd, arg); + mutex_unlock(&ipmi_mutex); + + return ret; +} #endif static const struct file_operations ipmi_fops = { .owner = THIS_MODULE, .unlocked_ioctl = ipmi_unlocked_ioctl, #ifdef CONFIG_COMPAT - .compat_ioctl = compat_ipmi_ioctl, + .compat_ioctl = unlocked_compat_ipmi_ioctl, #endif .open = ipmi_open, .release = ipmi_release, diff --git a/trunk/drivers/char/ipmi/ipmi_msghandler.c b/trunk/drivers/char/ipmi/ipmi_msghandler.c index 4d439d2fcfd6..4445fa164a2d 100644 --- a/trunk/drivers/char/ipmi/ipmi_msghandler.c +++ b/trunk/drivers/char/ipmi/ipmi_msghandler.c @@ -2037,12 +2037,11 @@ int ipmi_smi_add_proc_entry(ipmi_smi_t smi, char *name, entry = kmalloc(sizeof(*entry), GFP_KERNEL); if (!entry) return -ENOMEM; - entry->name = kmalloc(strlen(name)+1, GFP_KERNEL); + entry->name = kstrdup(name, GFP_KERNEL); if (!entry->name) { kfree(entry); return -ENOMEM; } - strcpy(entry->name, name); file = proc_create_data(name, 0, smi->proc_dir, proc_ops, data); if (!file) { diff --git a/trunk/drivers/char/ipmi/ipmi_si_intf.c b/trunk/drivers/char/ipmi/ipmi_si_intf.c index 313538abe63c..af4b23ffc5a6 100644 --- a/trunk/drivers/char/ipmi/ipmi_si_intf.c +++ b/trunk/drivers/char/ipmi/ipmi_si_intf.c @@ -663,8 +663,10 @@ static void handle_transaction_done(struct smi_info *smi_info) /* We got the flags from the SMI, now handle them. */ smi_info->handlers->get_result(smi_info->si_sm, msg, 4); if (msg[2] != 0) { - dev_warn(smi_info->dev, "Could not enable interrupts" - ", failed get, using polled mode.\n"); + dev_warn(smi_info->dev, + "Couldn't get irq info: %x.\n", msg[2]); + dev_warn(smi_info->dev, + "Maybe ok, but ipmi might run very slowly.\n"); smi_info->si_state = SI_NORMAL; } else { msg[0] = (IPMI_NETFN_APP_REQUEST << 2); @@ -685,10 +687,12 @@ static void handle_transaction_done(struct smi_info *smi_info) /* We got the flags from the SMI, now handle them. */ smi_info->handlers->get_result(smi_info->si_sm, msg, 4); - if (msg[2] != 0) - dev_warn(smi_info->dev, "Could not enable interrupts" - ", failed set, using polled mode.\n"); - else + if (msg[2] != 0) { + dev_warn(smi_info->dev, + "Couldn't set irq info: %x.\n", msg[2]); + dev_warn(smi_info->dev, + "Maybe ok, but ipmi might run very slowly.\n"); + } else smi_info->interrupt_disabled = 0; smi_info->si_state = SI_NORMAL; break; diff --git a/trunk/drivers/char/lp.c b/trunk/drivers/char/lp.c index dafd9ac6428f..0913d79424d3 100644 --- a/trunk/drivers/char/lp.c +++ b/trunk/drivers/char/lp.c @@ -622,9 +622,12 @@ static int lp_do_ioctl(unsigned int minor, unsigned int cmd, return -EFAULT; break; case LPGETSTATUS: + if (mutex_lock_interruptible(&lp_table[minor].port_mutex)) + return -EINTR; lp_claim_parport_or_block (&lp_table[minor]); status = r_str(minor); lp_release_parport (&lp_table[minor]); + mutex_unlock(&lp_table[minor].port_mutex); if (copy_to_user(argp, &status, sizeof(int))) return -EFAULT; diff --git a/trunk/drivers/char/ttyprintk.c b/trunk/drivers/char/ttyprintk.c index 4945bd3d18d0..d5d2e4a985aa 100644 --- a/trunk/drivers/char/ttyprintk.c +++ b/trunk/drivers/char/ttyprintk.c @@ -179,7 +179,6 @@ static int __init ttyprintk_init(void) { int ret = -ENOMEM; - tpk_port.port.ops = &null_ops; mutex_init(&tpk_port.port_write_mutex); ttyprintk_driver = tty_alloc_driver(1, @@ -190,6 +189,7 @@ static int __init ttyprintk_init(void) return PTR_ERR(ttyprintk_driver); tty_port_init(&tpk_port.port); + tpk_port.port.ops = &null_ops; ttyprintk_driver->driver_name = "ttyprintk"; ttyprintk_driver->name = "ttyprintk"; diff --git a/trunk/drivers/cpufreq/Kconfig b/trunk/drivers/cpufreq/Kconfig index a1488f58f6ca..534fcb825153 100644 --- a/trunk/drivers/cpufreq/Kconfig +++ b/trunk/drivers/cpufreq/Kconfig @@ -47,7 +47,7 @@ config CPU_FREQ_STAT_DETAILS choice prompt "Default CPUFreq governor" - default CPU_FREQ_DEFAULT_GOV_USERSPACE if CPU_FREQ_SA1100 || CPU_FREQ_SA1110 + default CPU_FREQ_DEFAULT_GOV_USERSPACE if ARM_SA1100_CPUFREQ || ARM_SA1110_CPUFREQ default CPU_FREQ_DEFAULT_GOV_PERFORMANCE help This option sets which CPUFreq governor shall be loaded at diff --git a/trunk/drivers/cpufreq/Kconfig.arm b/trunk/drivers/cpufreq/Kconfig.arm index f3af18b9acc5..6e57543fe0b9 100644 --- a/trunk/drivers/cpufreq/Kconfig.arm +++ b/trunk/drivers/cpufreq/Kconfig.arm @@ -3,16 +3,17 @@ # config ARM_BIG_LITTLE_CPUFREQ - tristate - depends on ARM_CPU_TOPOLOGY + tristate "Generic ARM big LITTLE CPUfreq driver" + depends on ARM_CPU_TOPOLOGY && PM_OPP && HAVE_CLK + help + This enables the Generic CPUfreq driver for ARM big.LITTLE platforms. config ARM_DT_BL_CPUFREQ - tristate "Generic ARM big LITTLE CPUfreq driver probed via DT" - select ARM_BIG_LITTLE_CPUFREQ - depends on OF && HAVE_CLK + tristate "Generic probing via DT for ARM big LITTLE CPUfreq driver" + depends on ARM_BIG_LITTLE_CPUFREQ && OF help - This enables the Generic CPUfreq driver for ARM big.LITTLE platform. - This gets frequency tables from DT. + This enables probing via DT for Generic CPUfreq driver for ARM + big.LITTLE platform. This gets frequency tables from DT. config ARM_EXYNOS_CPUFREQ bool "SAMSUNG EXYNOS SoCs" diff --git a/trunk/drivers/cpufreq/arm_big_little.c b/trunk/drivers/cpufreq/arm_big_little.c index dbdf677d2f36..5d7f53fcd6f5 100644 --- a/trunk/drivers/cpufreq/arm_big_little.c +++ b/trunk/drivers/cpufreq/arm_big_little.c @@ -40,11 +40,6 @@ static struct clk *clk[MAX_CLUSTERS]; static struct cpufreq_frequency_table *freq_table[MAX_CLUSTERS]; static atomic_t cluster_usage[MAX_CLUSTERS] = {ATOMIC_INIT(0), ATOMIC_INIT(0)}; -static int cpu_to_cluster(int cpu) -{ - return topology_physical_package_id(cpu); -} - static unsigned int bL_cpufreq_get(unsigned int cpu) { u32 cur_cluster = cpu_to_cluster(cpu); @@ -192,7 +187,7 @@ static int bL_cpufreq_init(struct cpufreq_policy *policy) cpumask_copy(policy->cpus, topology_core_cpumask(policy->cpu)); - dev_info(cpu_dev, "CPU %d initialized\n", policy->cpu); + dev_info(cpu_dev, "%s: CPU %d initialized\n", __func__, policy->cpu); return 0; } diff --git a/trunk/drivers/cpufreq/arm_big_little.h b/trunk/drivers/cpufreq/arm_big_little.h index 70f18fc12d4a..79b2ce17884d 100644 --- a/trunk/drivers/cpufreq/arm_big_little.h +++ b/trunk/drivers/cpufreq/arm_big_little.h @@ -34,6 +34,11 @@ struct cpufreq_arm_bL_ops { int (*init_opp_table)(struct device *cpu_dev); }; +static inline int cpu_to_cluster(int cpu) +{ + return topology_physical_package_id(cpu); +} + int bL_cpufreq_register(struct cpufreq_arm_bL_ops *ops); void bL_cpufreq_unregister(struct cpufreq_arm_bL_ops *ops); diff --git a/trunk/drivers/cpufreq/arm_big_little_dt.c b/trunk/drivers/cpufreq/arm_big_little_dt.c index 44be3115375c..173ed059d95f 100644 --- a/trunk/drivers/cpufreq/arm_big_little_dt.c +++ b/trunk/drivers/cpufreq/arm_big_little_dt.c @@ -66,8 +66,8 @@ static int dt_get_transition_latency(struct device *cpu_dev) parent = of_find_node_by_path("/cpus"); if (!parent) { - pr_err("failed to find OF /cpus\n"); - return -ENOENT; + pr_info("Failed to find OF /cpus. Use CPUFREQ_ETERNAL transition latency\n"); + return CPUFREQ_ETERNAL; } for_each_child_of_node(parent, np) { @@ -78,10 +78,11 @@ static int dt_get_transition_latency(struct device *cpu_dev) of_node_put(np); of_node_put(parent); - return 0; + return transition_latency; } - return -ENODEV; + pr_info("clock-latency isn't found, use CPUFREQ_ETERNAL transition latency\n"); + return CPUFREQ_ETERNAL; } static struct cpufreq_arm_bL_ops dt_bL_ops = { diff --git a/trunk/drivers/cpufreq/cpufreq-cpu0.c b/trunk/drivers/cpufreq/cpufreq-cpu0.c index 3ab8294eab04..a64eb8b70444 100644 --- a/trunk/drivers/cpufreq/cpufreq-cpu0.c +++ b/trunk/drivers/cpufreq/cpufreq-cpu0.c @@ -189,12 +189,29 @@ static int cpu0_cpufreq_probe(struct platform_device *pdev) if (!np) { pr_err("failed to find cpu0 node\n"); - return -ENOENT; + ret = -ENOENT; + goto out_put_parent; } cpu_dev = &pdev->dev; cpu_dev->of_node = np; + cpu_reg = devm_regulator_get(cpu_dev, "cpu0"); + if (IS_ERR(cpu_reg)) { + /* + * If cpu0 regulator supply node is present, but regulator is + * not yet registered, we should try defering probe. + */ + if (PTR_ERR(cpu_reg) == -EPROBE_DEFER) { + dev_err(cpu_dev, "cpu0 regulator not ready, retry\n"); + ret = -EPROBE_DEFER; + goto out_put_node; + } + pr_warn("failed to get cpu0 regulator: %ld\n", + PTR_ERR(cpu_reg)); + cpu_reg = NULL; + } + cpu_clk = devm_clk_get(cpu_dev, NULL); if (IS_ERR(cpu_clk)) { ret = PTR_ERR(cpu_clk); @@ -202,12 +219,6 @@ static int cpu0_cpufreq_probe(struct platform_device *pdev) goto out_put_node; } - cpu_reg = devm_regulator_get(cpu_dev, "cpu0"); - if (IS_ERR(cpu_reg)) { - pr_warn("failed to get cpu0 regulator\n"); - cpu_reg = NULL; - } - ret = of_init_opp_table(cpu_dev); if (ret) { pr_err("failed to init OPP table: %d\n", ret); @@ -264,6 +275,8 @@ static int cpu0_cpufreq_probe(struct platform_device *pdev) opp_free_cpufreq_table(cpu_dev, &freq_table); out_put_node: of_node_put(np); +out_put_parent: + of_node_put(parent); return ret; } diff --git a/trunk/drivers/cpufreq/cpufreq.c b/trunk/drivers/cpufreq/cpufreq.c index 1b8a48eaf90f..4b8c7f297d74 100644 --- a/trunk/drivers/cpufreq/cpufreq.c +++ b/trunk/drivers/cpufreq/cpufreq.c @@ -1075,14 +1075,14 @@ static int __cpufreq_remove_dev(struct device *dev, struct subsys_interface *sif __func__, cpu_dev->id, cpu); } + if ((cpus == 1) && (cpufreq_driver->target)) + __cpufreq_governor(data, CPUFREQ_GOV_POLICY_EXIT); + pr_debug("%s: removing link, cpu: %d\n", __func__, cpu); cpufreq_cpu_put(data); /* If cpu is last user of policy, free policy */ if (cpus == 1) { - if (cpufreq_driver->target) - __cpufreq_governor(data, CPUFREQ_GOV_POLICY_EXIT); - lock_policy_rwsem_read(cpu); kobj = &data->kobj; cmp = &data->kobj_unregister; @@ -1832,15 +1832,13 @@ static int __cpuinit cpufreq_cpu_callback(struct notifier_block *nfb, if (dev) { switch (action) { case CPU_ONLINE: - case CPU_ONLINE_FROZEN: cpufreq_add_dev(dev, NULL); break; case CPU_DOWN_PREPARE: - case CPU_DOWN_PREPARE_FROZEN: + case CPU_UP_CANCELED_FROZEN: __cpufreq_remove_dev(dev, NULL); break; case CPU_DOWN_FAILED: - case CPU_DOWN_FAILED_FROZEN: cpufreq_add_dev(dev, NULL); break; } diff --git a/trunk/drivers/cpufreq/cpufreq_governor.c b/trunk/drivers/cpufreq/cpufreq_governor.c index 443442df113b..5af40ad82d23 100644 --- a/trunk/drivers/cpufreq/cpufreq_governor.c +++ b/trunk/drivers/cpufreq/cpufreq_governor.c @@ -255,6 +255,7 @@ int cpufreq_governor_dbs(struct cpufreq_policy *policy, if (have_governor_per_policy()) { WARN_ON(dbs_data); } else if (dbs_data) { + dbs_data->usage_count++; policy->governor_data = dbs_data; return 0; } @@ -266,6 +267,7 @@ int cpufreq_governor_dbs(struct cpufreq_policy *policy, } dbs_data->cdata = cdata; + dbs_data->usage_count = 1; rc = cdata->init(dbs_data); if (rc) { pr_err("%s: POLICY_INIT: init() failed\n", __func__); @@ -294,7 +296,8 @@ int cpufreq_governor_dbs(struct cpufreq_policy *policy, set_sampling_rate(dbs_data, max(dbs_data->min_sampling_rate, latency * LATENCY_MULTIPLIER)); - if (dbs_data->cdata->governor == GOV_CONSERVATIVE) { + if ((cdata->governor == GOV_CONSERVATIVE) && + (!policy->governor->initialized)) { struct cs_ops *cs_ops = dbs_data->cdata->gov_ops; cpufreq_register_notifier(cs_ops->notifier_block, @@ -306,12 +309,12 @@ int cpufreq_governor_dbs(struct cpufreq_policy *policy, return 0; case CPUFREQ_GOV_POLICY_EXIT: - if ((policy->governor->initialized == 1) || - have_governor_per_policy()) { + if (!--dbs_data->usage_count) { sysfs_remove_group(get_governor_parent_kobj(policy), get_sysfs_attr(dbs_data)); - if (dbs_data->cdata->governor == GOV_CONSERVATIVE) { + if ((dbs_data->cdata->governor == GOV_CONSERVATIVE) && + (policy->governor->initialized == 1)) { struct cs_ops *cs_ops = dbs_data->cdata->gov_ops; cpufreq_unregister_notifier(cs_ops->notifier_block, diff --git a/trunk/drivers/cpufreq/cpufreq_governor.h b/trunk/drivers/cpufreq/cpufreq_governor.h index 8ac33538d0bd..e16a96130cb3 100644 --- a/trunk/drivers/cpufreq/cpufreq_governor.h +++ b/trunk/drivers/cpufreq/cpufreq_governor.h @@ -211,6 +211,7 @@ struct common_dbs_data { struct dbs_data { struct common_dbs_data *cdata; unsigned int min_sampling_rate; + int usage_count; void *tuners; /* dbs_mutex protects dbs_enable in governor start/stop */ diff --git a/trunk/drivers/cpufreq/cpufreq_ondemand.c b/trunk/drivers/cpufreq/cpufreq_ondemand.c index b0ffef96bf77..4b9bb5def6f1 100644 --- a/trunk/drivers/cpufreq/cpufreq_ondemand.c +++ b/trunk/drivers/cpufreq/cpufreq_ondemand.c @@ -547,7 +547,6 @@ static int od_init(struct dbs_data *dbs_data) tuners->io_is_busy = should_io_be_busy(); dbs_data->tuners = tuners; - pr_info("%s: tuners %p\n", __func__, tuners); mutex_init(&dbs_data->mutex); return 0; } diff --git a/trunk/drivers/cpufreq/cpufreq_stats.c b/trunk/drivers/cpufreq/cpufreq_stats.c index bfd6273fd873..fb65decffa28 100644 --- a/trunk/drivers/cpufreq/cpufreq_stats.c +++ b/trunk/drivers/cpufreq/cpufreq_stats.c @@ -349,15 +349,16 @@ static int __cpuinit cpufreq_stat_cpu_callback(struct notifier_block *nfb, switch (action) { case CPU_ONLINE: - case CPU_ONLINE_FROZEN: cpufreq_update_policy(cpu); break; case CPU_DOWN_PREPARE: - case CPU_DOWN_PREPARE_FROZEN: cpufreq_stats_free_sysfs(cpu); break; case CPU_DEAD: - case CPU_DEAD_FROZEN: + cpufreq_stats_free_table(cpu); + break; + case CPU_UP_CANCELED_FROZEN: + cpufreq_stats_free_sysfs(cpu); cpufreq_stats_free_table(cpu); break; } diff --git a/trunk/drivers/cpufreq/intel_pstate.c b/trunk/drivers/cpufreq/intel_pstate.c index cc3a8e6c92be..9c36ace92a39 100644 --- a/trunk/drivers/cpufreq/intel_pstate.c +++ b/trunk/drivers/cpufreq/intel_pstate.c @@ -48,12 +48,7 @@ static inline int32_t div_fp(int32_t x, int32_t y) } struct sample { - ktime_t start_time; - ktime_t end_time; int core_pct_busy; - int pstate_pct_busy; - u64 duration_us; - u64 idletime_us; u64 aperf; u64 mperf; int freq; @@ -86,13 +81,9 @@ struct cpudata { struct pstate_adjust_policy *pstate_policy; struct pstate_data pstate; struct _pid pid; - struct _pid idle_pid; int min_pstate_count; - int idle_mode; - ktime_t prev_sample; - u64 prev_idle_time_us; u64 prev_aperf; u64 prev_mperf; int sample_ptr; @@ -124,6 +115,8 @@ struct perf_limits { int min_perf_pct; int32_t max_perf; int32_t min_perf; + int max_policy_pct; + int max_sysfs_pct; }; static struct perf_limits limits = { @@ -132,6 +125,8 @@ static struct perf_limits limits = { .max_perf = int_tofp(1), .min_perf_pct = 0, .min_perf = 0, + .max_policy_pct = 100, + .max_sysfs_pct = 100, }; static inline void pid_reset(struct _pid *pid, int setpoint, int busy, @@ -202,19 +197,6 @@ static inline void intel_pstate_busy_pid_reset(struct cpudata *cpu) 0); } -static inline void intel_pstate_idle_pid_reset(struct cpudata *cpu) -{ - pid_p_gain_set(&cpu->idle_pid, cpu->pstate_policy->p_gain_pct); - pid_d_gain_set(&cpu->idle_pid, cpu->pstate_policy->d_gain_pct); - pid_i_gain_set(&cpu->idle_pid, cpu->pstate_policy->i_gain_pct); - - pid_reset(&cpu->idle_pid, - 75, - 50, - cpu->pstate_policy->deadband, - 0); -} - static inline void intel_pstate_reset_all_pid(void) { unsigned int cpu; @@ -302,7 +284,8 @@ static ssize_t store_max_perf_pct(struct kobject *a, struct attribute *b, if (ret != 1) return -EINVAL; - limits.max_perf_pct = clamp_t(int, input, 0 , 100); + limits.max_sysfs_pct = clamp_t(int, input, 0 , 100); + limits.max_perf_pct = min(limits.max_policy_pct, limits.max_sysfs_pct); limits.max_perf = div_fp(int_tofp(limits.max_perf_pct), int_tofp(100)); return count; } @@ -408,9 +391,8 @@ static void intel_pstate_set_pstate(struct cpudata *cpu, int pstate) if (pstate == cpu->pstate.current_pstate) return; -#ifndef MODULE trace_cpu_frequency(pstate * 100000, cpu->cpu); -#endif + cpu->pstate.current_pstate = pstate; wrmsrl(MSR_IA32_PERF_CTL, pstate << 8); @@ -450,48 +432,26 @@ static inline void intel_pstate_calc_busy(struct cpudata *cpu, struct sample *sample) { u64 core_pct; - sample->pstate_pct_busy = 100 - div64_u64( - sample->idletime_us * 100, - sample->duration_us); core_pct = div64_u64(sample->aperf * 100, sample->mperf); sample->freq = cpu->pstate.max_pstate * core_pct * 1000; - sample->core_pct_busy = div_s64((sample->pstate_pct_busy * core_pct), - 100); + sample->core_pct_busy = core_pct; } static inline void intel_pstate_sample(struct cpudata *cpu) { - ktime_t now; - u64 idle_time_us; u64 aperf, mperf; - now = ktime_get(); - idle_time_us = get_cpu_idle_time_us(cpu->cpu, NULL); - rdmsrl(MSR_IA32_APERF, aperf); rdmsrl(MSR_IA32_MPERF, mperf); - /* for the first sample, don't actually record a sample, just - * set the baseline */ - if (cpu->prev_idle_time_us > 0) { - cpu->sample_ptr = (cpu->sample_ptr + 1) % SAMPLE_COUNT; - cpu->samples[cpu->sample_ptr].start_time = cpu->prev_sample; - cpu->samples[cpu->sample_ptr].end_time = now; - cpu->samples[cpu->sample_ptr].duration_us = - ktime_us_delta(now, cpu->prev_sample); - cpu->samples[cpu->sample_ptr].idletime_us = - idle_time_us - cpu->prev_idle_time_us; - - cpu->samples[cpu->sample_ptr].aperf = aperf; - cpu->samples[cpu->sample_ptr].mperf = mperf; - cpu->samples[cpu->sample_ptr].aperf -= cpu->prev_aperf; - cpu->samples[cpu->sample_ptr].mperf -= cpu->prev_mperf; - - intel_pstate_calc_busy(cpu, &cpu->samples[cpu->sample_ptr]); - } + cpu->sample_ptr = (cpu->sample_ptr + 1) % SAMPLE_COUNT; + cpu->samples[cpu->sample_ptr].aperf = aperf; + cpu->samples[cpu->sample_ptr].mperf = mperf; + cpu->samples[cpu->sample_ptr].aperf -= cpu->prev_aperf; + cpu->samples[cpu->sample_ptr].mperf -= cpu->prev_mperf; + + intel_pstate_calc_busy(cpu, &cpu->samples[cpu->sample_ptr]); - cpu->prev_sample = now; - cpu->prev_idle_time_us = idle_time_us; cpu->prev_aperf = aperf; cpu->prev_mperf = mperf; } @@ -505,16 +465,6 @@ static inline void intel_pstate_set_sample_time(struct cpudata *cpu) mod_timer_pinned(&cpu->timer, jiffies + delay); } -static inline void intel_pstate_idle_mode(struct cpudata *cpu) -{ - cpu->idle_mode = 1; -} - -static inline void intel_pstate_normal_mode(struct cpudata *cpu) -{ - cpu->idle_mode = 0; -} - static inline int intel_pstate_get_scaled_busy(struct cpudata *cpu) { int32_t busy_scaled; @@ -547,50 +497,21 @@ static inline void intel_pstate_adjust_busy_pstate(struct cpudata *cpu) intel_pstate_pstate_decrease(cpu, steps); } -static inline void intel_pstate_adjust_idle_pstate(struct cpudata *cpu) -{ - int busy_scaled; - struct _pid *pid; - int ctl = 0; - int steps; - - pid = &cpu->idle_pid; - - busy_scaled = intel_pstate_get_scaled_busy(cpu); - - ctl = pid_calc(pid, 100 - busy_scaled); - - steps = abs(ctl); - if (ctl < 0) - intel_pstate_pstate_decrease(cpu, steps); - else - intel_pstate_pstate_increase(cpu, steps); - - if (cpu->pstate.current_pstate == cpu->pstate.min_pstate) - intel_pstate_normal_mode(cpu); -} - static void intel_pstate_timer_func(unsigned long __data) { struct cpudata *cpu = (struct cpudata *) __data; intel_pstate_sample(cpu); + intel_pstate_adjust_busy_pstate(cpu); - if (!cpu->idle_mode) - intel_pstate_adjust_busy_pstate(cpu); - else - intel_pstate_adjust_idle_pstate(cpu); - -#if defined(XPERF_FIX) if (cpu->pstate.current_pstate == cpu->pstate.min_pstate) { cpu->min_pstate_count++; if (!(cpu->min_pstate_count % 5)) { intel_pstate_set_pstate(cpu, cpu->pstate.max_pstate); - intel_pstate_idle_mode(cpu); } } else cpu->min_pstate_count = 0; -#endif + intel_pstate_set_sample_time(cpu); } @@ -631,7 +552,6 @@ static int intel_pstate_init_cpu(unsigned int cpunum) (unsigned long)cpu; cpu->timer.expires = jiffies + HZ/100; intel_pstate_busy_pid_reset(cpu); - intel_pstate_idle_pid_reset(cpu); intel_pstate_sample(cpu); intel_pstate_set_pstate(cpu, cpu->pstate.max_pstate); @@ -675,8 +595,9 @@ static int intel_pstate_set_policy(struct cpufreq_policy *policy) limits.min_perf_pct = clamp_t(int, limits.min_perf_pct, 0 , 100); limits.min_perf = div_fp(int_tofp(limits.min_perf_pct), int_tofp(100)); - limits.max_perf_pct = policy->max * 100 / policy->cpuinfo.max_freq; - limits.max_perf_pct = clamp_t(int, limits.max_perf_pct, 0 , 100); + limits.max_policy_pct = policy->max * 100 / policy->cpuinfo.max_freq; + limits.max_policy_pct = clamp_t(int, limits.max_policy_pct, 0 , 100); + limits.max_perf_pct = min(limits.max_policy_pct, limits.max_sysfs_pct); limits.max_perf = div_fp(int_tofp(limits.max_perf_pct), int_tofp(100)); return 0; @@ -788,10 +709,9 @@ static int __init intel_pstate_init(void) pr_info("Intel P-state driver initializing.\n"); - all_cpu_data = vmalloc(sizeof(void *) * num_possible_cpus()); + all_cpu_data = vzalloc(sizeof(void *) * num_possible_cpus()); if (!all_cpu_data) return -ENOMEM; - memset(all_cpu_data, 0, sizeof(void *) * num_possible_cpus()); rc = cpufreq_register_driver(&intel_pstate_driver); if (rc) diff --git a/trunk/drivers/cpufreq/kirkwood-cpufreq.c b/trunk/drivers/cpufreq/kirkwood-cpufreq.c index d36ea8dc96eb..b2644af985ec 100644 --- a/trunk/drivers/cpufreq/kirkwood-cpufreq.c +++ b/trunk/drivers/cpufreq/kirkwood-cpufreq.c @@ -171,10 +171,6 @@ static int kirkwood_cpufreq_probe(struct platform_device *pdev) priv.dev = &pdev->dev; res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - if (!res) { - dev_err(&pdev->dev, "Cannot get memory resource\n"); - return -ENODEV; - } priv.base = devm_ioremap_resource(&pdev->dev, res); if (IS_ERR(priv.base)) return PTR_ERR(priv.base); diff --git a/trunk/drivers/cpufreq/loongson2_cpufreq.c b/trunk/drivers/cpufreq/loongson2_cpufreq.c index 84889573b566..d53912768946 100644 --- a/trunk/drivers/cpufreq/loongson2_cpufreq.c +++ b/trunk/drivers/cpufreq/loongson2_cpufreq.c @@ -18,6 +18,7 @@ #include #include +#include #include @@ -200,6 +201,7 @@ static void loongson2_cpu_wait(void) LOONGSON_CHIPCFG0 &= ~0x7; /* Put CPU into wait mode */ LOONGSON_CHIPCFG0 = cpu_freq; /* Restore CPU state */ spin_unlock_irqrestore(&loongson2_wait_lock, flags); + local_irq_enable(); } static int __init cpufreq_init(void) diff --git a/trunk/drivers/crypto/nx/nx-aes-cbc.c b/trunk/drivers/crypto/nx/nx-aes-cbc.c index a76d4c4f29f5..35d483f8db66 100644 --- a/trunk/drivers/crypto/nx/nx-aes-cbc.c +++ b/trunk/drivers/crypto/nx/nx-aes-cbc.c @@ -126,6 +126,7 @@ struct crypto_alg nx_cbc_aes_alg = { .cra_blocksize = AES_BLOCK_SIZE, .cra_ctxsize = sizeof(struct nx_crypto_ctx), .cra_type = &crypto_blkcipher_type, + .cra_alignmask = 0xf, .cra_module = THIS_MODULE, .cra_init = nx_crypto_ctx_aes_cbc_init, .cra_exit = nx_crypto_ctx_exit, diff --git a/trunk/drivers/crypto/nx/nx-aes-ecb.c b/trunk/drivers/crypto/nx/nx-aes-ecb.c index ba5f1611336f..7bbc9a81da21 100644 --- a/trunk/drivers/crypto/nx/nx-aes-ecb.c +++ b/trunk/drivers/crypto/nx/nx-aes-ecb.c @@ -123,6 +123,7 @@ struct crypto_alg nx_ecb_aes_alg = { .cra_priority = 300, .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER, .cra_blocksize = AES_BLOCK_SIZE, + .cra_alignmask = 0xf, .cra_ctxsize = sizeof(struct nx_crypto_ctx), .cra_type = &crypto_blkcipher_type, .cra_module = THIS_MODULE, diff --git a/trunk/drivers/crypto/nx/nx-aes-gcm.c b/trunk/drivers/crypto/nx/nx-aes-gcm.c index c8109edc5cfb..6cca6c392b00 100644 --- a/trunk/drivers/crypto/nx/nx-aes-gcm.c +++ b/trunk/drivers/crypto/nx/nx-aes-gcm.c @@ -219,7 +219,7 @@ static int gcm_aes_nx_crypt(struct aead_request *req, int enc) if (enc) NX_CPB_FDM(csbcpb) |= NX_FDM_ENDE_ENCRYPT; else - nbytes -= AES_BLOCK_SIZE; + nbytes -= crypto_aead_authsize(crypto_aead_reqtfm(req)); csbcpb->cpb.aes_gcm.bit_length_data = nbytes * 8; diff --git a/trunk/drivers/crypto/nx/nx-sha256.c b/trunk/drivers/crypto/nx/nx-sha256.c index 9767315f8c0b..67024f2f0b78 100644 --- a/trunk/drivers/crypto/nx/nx-sha256.c +++ b/trunk/drivers/crypto/nx/nx-sha256.c @@ -69,7 +69,7 @@ static int nx_sha256_update(struct shash_desc *desc, const u8 *data, * 1: <= SHA256_BLOCK_SIZE: copy into state, return 0 * 2: > SHA256_BLOCK_SIZE: process X blocks, copy in leftover */ - if (len + sctx->count <= SHA256_BLOCK_SIZE) { + if (len + sctx->count < SHA256_BLOCK_SIZE) { memcpy(sctx->buf + sctx->count, data, len); sctx->count += len; goto out; @@ -110,7 +110,8 @@ static int nx_sha256_update(struct shash_desc *desc, const u8 *data, atomic_inc(&(nx_ctx->stats->sha256_ops)); /* copy the leftover back into the state struct */ - memcpy(sctx->buf, data + len - leftover, leftover); + if (leftover) + memcpy(sctx->buf, data + len - leftover, leftover); sctx->count = leftover; csbcpb->cpb.sha256.message_bit_length += (u64) @@ -130,6 +131,7 @@ static int nx_sha256_final(struct shash_desc *desc, u8 *out) struct nx_sg *in_sg, *out_sg; int rc; + if (NX_CPB_FDM(csbcpb) & NX_FDM_CONTINUATION) { /* we've hit the nx chip previously, now we're finalizing, * so copy over the partial digest */ @@ -162,7 +164,7 @@ static int nx_sha256_final(struct shash_desc *desc, u8 *out) atomic_inc(&(nx_ctx->stats->sha256_ops)); - atomic64_add(csbcpb->cpb.sha256.message_bit_length, + atomic64_add(csbcpb->cpb.sha256.message_bit_length / 8, &(nx_ctx->stats->sha256_bytes)); memcpy(out, csbcpb->cpb.sha256.message_digest, SHA256_DIGEST_SIZE); out: diff --git a/trunk/drivers/crypto/nx/nx-sha512.c b/trunk/drivers/crypto/nx/nx-sha512.c index 3177b8c3d5f1..08eee1122349 100644 --- a/trunk/drivers/crypto/nx/nx-sha512.c +++ b/trunk/drivers/crypto/nx/nx-sha512.c @@ -69,7 +69,7 @@ static int nx_sha512_update(struct shash_desc *desc, const u8 *data, * 1: <= SHA512_BLOCK_SIZE: copy into state, return 0 * 2: > SHA512_BLOCK_SIZE: process X blocks, copy in leftover */ - if ((u64)len + sctx->count[0] <= SHA512_BLOCK_SIZE) { + if ((u64)len + sctx->count[0] < SHA512_BLOCK_SIZE) { memcpy(sctx->buf + sctx->count[0], data, len); sctx->count[0] += len; goto out; @@ -110,7 +110,8 @@ static int nx_sha512_update(struct shash_desc *desc, const u8 *data, atomic_inc(&(nx_ctx->stats->sha512_ops)); /* copy the leftover back into the state struct */ - memcpy(sctx->buf, data + len - leftover, leftover); + if (leftover) + memcpy(sctx->buf, data + len - leftover, leftover); sctx->count[0] = leftover; spbc_bits = csbcpb->cpb.sha512.spbc * 8; @@ -168,7 +169,7 @@ static int nx_sha512_final(struct shash_desc *desc, u8 *out) goto out; atomic_inc(&(nx_ctx->stats->sha512_ops)); - atomic64_add(csbcpb->cpb.sha512.message_bit_length_lo, + atomic64_add(csbcpb->cpb.sha512.message_bit_length_lo / 8, &(nx_ctx->stats->sha512_bytes)); memcpy(out, csbcpb->cpb.sha512.message_digest, SHA512_DIGEST_SIZE); diff --git a/trunk/drivers/crypto/nx/nx.c b/trunk/drivers/crypto/nx/nx.c index c767f232e693..bbdab6e5ccf0 100644 --- a/trunk/drivers/crypto/nx/nx.c +++ b/trunk/drivers/crypto/nx/nx.c @@ -211,44 +211,20 @@ int nx_build_sg_lists(struct nx_crypto_ctx *nx_ctx, { struct nx_sg *nx_insg = nx_ctx->in_sg; struct nx_sg *nx_outsg = nx_ctx->out_sg; - struct blkcipher_walk walk; - int rc; - - blkcipher_walk_init(&walk, dst, src, nbytes); - rc = blkcipher_walk_virt_block(desc, &walk, AES_BLOCK_SIZE); - if (rc) - goto out; if (iv) - memcpy(iv, walk.iv, AES_BLOCK_SIZE); + memcpy(iv, desc->info, AES_BLOCK_SIZE); - while (walk.nbytes) { - nx_insg = nx_build_sg_list(nx_insg, walk.src.virt.addr, - walk.nbytes, nx_ctx->ap->sglen); - nx_outsg = nx_build_sg_list(nx_outsg, walk.dst.virt.addr, - walk.nbytes, nx_ctx->ap->sglen); - - rc = blkcipher_walk_done(desc, &walk, 0); - if (rc) - break; - } - - if (walk.nbytes) { - nx_insg = nx_build_sg_list(nx_insg, walk.src.virt.addr, - walk.nbytes, nx_ctx->ap->sglen); - nx_outsg = nx_build_sg_list(nx_outsg, walk.dst.virt.addr, - walk.nbytes, nx_ctx->ap->sglen); - - rc = 0; - } + nx_insg = nx_walk_and_build(nx_insg, nx_ctx->ap->sglen, src, 0, nbytes); + nx_outsg = nx_walk_and_build(nx_outsg, nx_ctx->ap->sglen, dst, 0, nbytes); /* these lengths should be negative, which will indicate to phyp that * the input and output parameters are scatterlists, not linear * buffers */ nx_ctx->op.inlen = (nx_ctx->in_sg - nx_insg) * sizeof(struct nx_sg); nx_ctx->op.outlen = (nx_ctx->out_sg - nx_outsg) * sizeof(struct nx_sg); -out: - return rc; + + return 0; } /** @@ -454,6 +430,8 @@ static int nx_register_algs(void) if (rc) goto out; + nx_driver.of.status = NX_OKAY; + rc = crypto_register_alg(&nx_ecb_aes_alg); if (rc) goto out; @@ -498,8 +476,6 @@ static int nx_register_algs(void) if (rc) goto out_unreg_s512; - nx_driver.of.status = NX_OKAY; - goto out; out_unreg_s512: diff --git a/trunk/drivers/dma/Kconfig b/trunk/drivers/dma/Kconfig index aeaea32bcfda..e9924898043a 100644 --- a/trunk/drivers/dma/Kconfig +++ b/trunk/drivers/dma/Kconfig @@ -63,8 +63,6 @@ config INTEL_IOATDMA depends on PCI && X86 select DMA_ENGINE select DCA - select ASYNC_TX_DISABLE_PQ_VAL_DMA - select ASYNC_TX_DISABLE_XOR_VAL_DMA help Enable support for the Intel(R) I/OAT DMA engine present in recent Intel Xeon chipsets. @@ -174,15 +172,7 @@ config TEGRA20_APB_DMA This DMA controller transfers data from memory to peripheral fifo or vice versa. It does not support memory to memory data transfer. - - -config SH_DMAE - tristate "Renesas SuperH DMAC support" - depends on (SUPERH && SH_DMA) || (ARM && ARCH_SHMOBILE) - depends on !SH_DMA_API - select DMA_ENGINE - help - Enable support for the Renesas SuperH DMA controllers. +source "drivers/dma/sh/Kconfig" config COH901318 bool "ST-Ericsson COH901318 DMA support" @@ -328,6 +318,10 @@ config DMA_ENGINE config DMA_VIRTUAL_CHANNELS tristate +config DMA_ACPI + def_bool y + depends on ACPI + config DMA_OF def_bool y depends on OF diff --git a/trunk/drivers/dma/Makefile b/trunk/drivers/dma/Makefile index 488e3ff85b52..a2b0df591f95 100644 --- a/trunk/drivers/dma/Makefile +++ b/trunk/drivers/dma/Makefile @@ -3,6 +3,7 @@ ccflags-$(CONFIG_DMADEVICES_VDEBUG) += -DVERBOSE_DEBUG obj-$(CONFIG_DMA_ENGINE) += dmaengine.o obj-$(CONFIG_DMA_VIRTUAL_CHANNELS) += virt-dma.o +obj-$(CONFIG_DMA_ACPI) += acpi-dma.o obj-$(CONFIG_DMA_OF) += of-dma.o obj-$(CONFIG_NET_DMA) += iovlock.o @@ -18,7 +19,7 @@ obj-$(CONFIG_DW_DMAC) += dw_dmac.o obj-$(CONFIG_AT_HDMAC) += at_hdmac.o obj-$(CONFIG_MX3_IPU) += ipu/ obj-$(CONFIG_TXX9_DMAC) += txx9dmac.o -obj-$(CONFIG_SH_DMAE) += sh/ +obj-$(CONFIG_SH_DMAE_BASE) += sh/ obj-$(CONFIG_COH901318) += coh901318.o coh901318_lli.o obj-$(CONFIG_AMCC_PPC440SPE_ADMA) += ppc4xx/ obj-$(CONFIG_IMX_SDMA) += imx-sdma.o diff --git a/trunk/drivers/dma/acpi-dma.c b/trunk/drivers/dma/acpi-dma.c new file mode 100644 index 000000000000..ba6fc62e9651 --- /dev/null +++ b/trunk/drivers/dma/acpi-dma.c @@ -0,0 +1,279 @@ +/* + * ACPI helpers for DMA request / controller + * + * Based on of-dma.c + * + * Copyright (C) 2013, Intel Corporation + * Author: Andy Shevchenko + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include +#include +#include +#include +#include +#include +#include + +static LIST_HEAD(acpi_dma_list); +static DEFINE_MUTEX(acpi_dma_lock); + +/** + * acpi_dma_controller_register - Register a DMA controller to ACPI DMA helpers + * @dev: struct device of DMA controller + * @acpi_dma_xlate: translation function which converts a dma specifier + * into a dma_chan structure + * @data pointer to controller specific data to be used by + * translation function + * + * Returns 0 on success or appropriate errno value on error. + * + * Allocated memory should be freed with appropriate acpi_dma_controller_free() + * call. + */ +int acpi_dma_controller_register(struct device *dev, + struct dma_chan *(*acpi_dma_xlate) + (struct acpi_dma_spec *, struct acpi_dma *), + void *data) +{ + struct acpi_device *adev; + struct acpi_dma *adma; + + if (!dev || !acpi_dma_xlate) + return -EINVAL; + + /* Check if the device was enumerated by ACPI */ + if (!ACPI_HANDLE(dev)) + return -EINVAL; + + if (acpi_bus_get_device(ACPI_HANDLE(dev), &adev)) + return -EINVAL; + + adma = kzalloc(sizeof(*adma), GFP_KERNEL); + if (!adma) + return -ENOMEM; + + adma->dev = dev; + adma->acpi_dma_xlate = acpi_dma_xlate; + adma->data = data; + + /* Now queue acpi_dma controller structure in list */ + mutex_lock(&acpi_dma_lock); + list_add_tail(&adma->dma_controllers, &acpi_dma_list); + mutex_unlock(&acpi_dma_lock); + + return 0; +} +EXPORT_SYMBOL_GPL(acpi_dma_controller_register); + +/** + * acpi_dma_controller_free - Remove a DMA controller from ACPI DMA helpers list + * @dev: struct device of DMA controller + * + * Memory allocated by acpi_dma_controller_register() is freed here. + */ +int acpi_dma_controller_free(struct device *dev) +{ + struct acpi_dma *adma; + + if (!dev) + return -EINVAL; + + mutex_lock(&acpi_dma_lock); + + list_for_each_entry(adma, &acpi_dma_list, dma_controllers) + if (adma->dev == dev) { + list_del(&adma->dma_controllers); + mutex_unlock(&acpi_dma_lock); + kfree(adma); + return 0; + } + + mutex_unlock(&acpi_dma_lock); + return -ENODEV; +} +EXPORT_SYMBOL_GPL(acpi_dma_controller_free); + +static void devm_acpi_dma_release(struct device *dev, void *res) +{ + acpi_dma_controller_free(dev); +} + +/** + * devm_acpi_dma_controller_register - resource managed acpi_dma_controller_register() + * @dev: device that is registering this DMA controller + * @acpi_dma_xlate: translation function + * @data pointer to controller specific data + * + * Managed acpi_dma_controller_register(). DMA controller registered by this + * function are automatically freed on driver detach. See + * acpi_dma_controller_register() for more information. + */ +int devm_acpi_dma_controller_register(struct device *dev, + struct dma_chan *(*acpi_dma_xlate) + (struct acpi_dma_spec *, struct acpi_dma *), + void *data) +{ + void *res; + int ret; + + res = devres_alloc(devm_acpi_dma_release, 0, GFP_KERNEL); + if (!res) + return -ENOMEM; + + ret = acpi_dma_controller_register(dev, acpi_dma_xlate, data); + if (ret) { + devres_free(res); + return ret; + } + devres_add(dev, res); + return 0; +} +EXPORT_SYMBOL_GPL(devm_acpi_dma_controller_register); + +/** + * devm_acpi_dma_controller_free - resource managed acpi_dma_controller_free() + * + * Unregister a DMA controller registered with + * devm_acpi_dma_controller_register(). Normally this function will not need to + * be called and the resource management code will ensure that the resource is + * freed. + */ +void devm_acpi_dma_controller_free(struct device *dev) +{ + WARN_ON(devres_destroy(dev, devm_acpi_dma_release, NULL, NULL)); +} +EXPORT_SYMBOL_GPL(devm_acpi_dma_controller_free); + +struct acpi_dma_parser_data { + struct acpi_dma_spec dma_spec; + size_t index; + size_t n; +}; + +/** + * acpi_dma_parse_fixed_dma - Parse FixedDMA ACPI resources to a DMA specifier + * @res: struct acpi_resource to get FixedDMA resources from + * @data: pointer to a helper struct acpi_dma_parser_data + */ +static int acpi_dma_parse_fixed_dma(struct acpi_resource *res, void *data) +{ + struct acpi_dma_parser_data *pdata = data; + + if (res->type == ACPI_RESOURCE_TYPE_FIXED_DMA) { + struct acpi_resource_fixed_dma *dma = &res->data.fixed_dma; + + if (pdata->n++ == pdata->index) { + pdata->dma_spec.chan_id = dma->channels; + pdata->dma_spec.slave_id = dma->request_lines; + } + } + + /* Tell the ACPI core to skip this resource */ + return 1; +} + +/** + * acpi_dma_request_slave_chan_by_index - Get the DMA slave channel + * @dev: struct device to get DMA request from + * @index: index of FixedDMA descriptor for @dev + * + * Returns pointer to appropriate dma channel on success or NULL on error. + */ +struct dma_chan *acpi_dma_request_slave_chan_by_index(struct device *dev, + size_t index) +{ + struct acpi_dma_parser_data pdata; + struct acpi_dma_spec *dma_spec = &pdata.dma_spec; + struct list_head resource_list; + struct acpi_device *adev; + struct acpi_dma *adma; + struct dma_chan *chan = NULL; + + /* Check if the device was enumerated by ACPI */ + if (!dev || !ACPI_HANDLE(dev)) + return NULL; + + if (acpi_bus_get_device(ACPI_HANDLE(dev), &adev)) + return NULL; + + memset(&pdata, 0, sizeof(pdata)); + pdata.index = index; + + /* Initial values for the request line and channel */ + dma_spec->chan_id = -1; + dma_spec->slave_id = -1; + + INIT_LIST_HEAD(&resource_list); + acpi_dev_get_resources(adev, &resource_list, + acpi_dma_parse_fixed_dma, &pdata); + acpi_dev_free_resource_list(&resource_list); + + if (dma_spec->slave_id < 0 || dma_spec->chan_id < 0) + return NULL; + + mutex_lock(&acpi_dma_lock); + + list_for_each_entry(adma, &acpi_dma_list, dma_controllers) { + dma_spec->dev = adma->dev; + chan = adma->acpi_dma_xlate(dma_spec, adma); + if (chan) + break; + } + + mutex_unlock(&acpi_dma_lock); + return chan; +} +EXPORT_SYMBOL_GPL(acpi_dma_request_slave_chan_by_index); + +/** + * acpi_dma_request_slave_chan_by_name - Get the DMA slave channel + * @dev: struct device to get DMA request from + * @name: represents corresponding FixedDMA descriptor for @dev + * + * In order to support both Device Tree and ACPI in a single driver we + * translate the names "tx" and "rx" here based on the most common case where + * the first FixedDMA descriptor is TX and second is RX. + * + * Returns pointer to appropriate dma channel on success or NULL on error. + */ +struct dma_chan *acpi_dma_request_slave_chan_by_name(struct device *dev, + const char *name) +{ + size_t index; + + if (!strcmp(name, "tx")) + index = 0; + else if (!strcmp(name, "rx")) + index = 1; + else + return NULL; + + return acpi_dma_request_slave_chan_by_index(dev, index); +} +EXPORT_SYMBOL_GPL(acpi_dma_request_slave_chan_by_name); + +/** + * acpi_dma_simple_xlate - Simple ACPI DMA engine translation helper + * @dma_spec: pointer to ACPI DMA specifier + * @adma: pointer to ACPI DMA controller data + * + * A simple translation function for ACPI based devices. Passes &struct + * dma_spec to the DMA controller driver provided filter function. Returns + * pointer to the channel if found or %NULL otherwise. + */ +struct dma_chan *acpi_dma_simple_xlate(struct acpi_dma_spec *dma_spec, + struct acpi_dma *adma) +{ + struct acpi_dma_filter_info *info = adma->data; + + if (!info || !info->filter_fn) + return NULL; + + return dma_request_channel(info->dma_cap, info->filter_fn, dma_spec); +} +EXPORT_SYMBOL_GPL(acpi_dma_simple_xlate); diff --git a/trunk/drivers/dma/at_hdmac.c b/trunk/drivers/dma/at_hdmac.c index 88cfc61329d2..e923cda930f9 100644 --- a/trunk/drivers/dma/at_hdmac.c +++ b/trunk/drivers/dma/at_hdmac.c @@ -24,6 +24,7 @@ #include #include #include +#include #include "at_hdmac_regs.h" #include "dmaengine.h" @@ -677,7 +678,7 @@ atc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, ctrlb |= ATC_DST_ADDR_MODE_FIXED | ATC_SRC_ADDR_MODE_INCR | ATC_FC_MEM2PER - | ATC_SIF(AT_DMA_MEM_IF) | ATC_DIF(AT_DMA_PER_IF); + | ATC_SIF(atchan->mem_if) | ATC_DIF(atchan->per_if); reg = sconfig->dst_addr; for_each_sg(sgl, sg, sg_len, i) { struct at_desc *desc; @@ -716,7 +717,7 @@ atc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, ctrlb |= ATC_DST_ADDR_MODE_INCR | ATC_SRC_ADDR_MODE_FIXED | ATC_FC_PER2MEM - | ATC_SIF(AT_DMA_PER_IF) | ATC_DIF(AT_DMA_MEM_IF); + | ATC_SIF(atchan->per_if) | ATC_DIF(atchan->mem_if); reg = sconfig->src_addr; for_each_sg(sgl, sg, sg_len, i) { @@ -822,8 +823,8 @@ atc_dma_cyclic_fill_desc(struct dma_chan *chan, struct at_desc *desc, desc->lli.ctrlb = ATC_DST_ADDR_MODE_FIXED | ATC_SRC_ADDR_MODE_INCR | ATC_FC_MEM2PER - | ATC_SIF(AT_DMA_MEM_IF) - | ATC_DIF(AT_DMA_PER_IF); + | ATC_SIF(atchan->mem_if) + | ATC_DIF(atchan->per_if); break; case DMA_DEV_TO_MEM: @@ -833,8 +834,8 @@ atc_dma_cyclic_fill_desc(struct dma_chan *chan, struct at_desc *desc, desc->lli.ctrlb = ATC_DST_ADDR_MODE_INCR | ATC_SRC_ADDR_MODE_FIXED | ATC_FC_PER2MEM - | ATC_SIF(AT_DMA_PER_IF) - | ATC_DIF(AT_DMA_MEM_IF); + | ATC_SIF(atchan->per_if) + | ATC_DIF(atchan->mem_if); break; default: @@ -1188,6 +1189,67 @@ static void atc_free_chan_resources(struct dma_chan *chan) dev_vdbg(chan2dev(chan), "free_chan_resources: done\n"); } +#ifdef CONFIG_OF +static bool at_dma_filter(struct dma_chan *chan, void *slave) +{ + struct at_dma_slave *atslave = slave; + + if (atslave->dma_dev == chan->device->dev) { + chan->private = atslave; + return true; + } else { + return false; + } +} + +static struct dma_chan *at_dma_xlate(struct of_phandle_args *dma_spec, + struct of_dma *of_dma) +{ + struct dma_chan *chan; + struct at_dma_chan *atchan; + struct at_dma_slave *atslave; + dma_cap_mask_t mask; + unsigned int per_id; + struct platform_device *dmac_pdev; + + if (dma_spec->args_count != 2) + return NULL; + + dmac_pdev = of_find_device_by_node(dma_spec->np); + + dma_cap_zero(mask); + dma_cap_set(DMA_SLAVE, mask); + + atslave = devm_kzalloc(&dmac_pdev->dev, sizeof(*atslave), GFP_KERNEL); + if (!atslave) + return NULL; + /* + * We can fill both SRC_PER and DST_PER, one of these fields will be + * ignored depending on DMA transfer direction. + */ + per_id = dma_spec->args[1]; + atslave->cfg = ATC_FIFOCFG_HALFFIFO | ATC_DST_H2SEL_HW + | ATC_SRC_H2SEL_HW | ATC_DST_PER(per_id) + | ATC_SRC_PER(per_id); + atslave->dma_dev = &dmac_pdev->dev; + + chan = dma_request_channel(mask, at_dma_filter, atslave); + if (!chan) + return NULL; + + atchan = to_at_dma_chan(chan); + atchan->per_if = dma_spec->args[0] & 0xff; + atchan->mem_if = (dma_spec->args[0] >> 16) & 0xff; + + return chan; +} +#else +static struct dma_chan *at_dma_xlate(struct of_phandle_args *dma_spec, + struct of_dma *of_dma) +{ + return NULL; +} +#endif /*-- Module Management -----------------------------------------------*/ @@ -1342,6 +1404,8 @@ static int __init at_dma_probe(struct platform_device *pdev) for (i = 0; i < plat_dat->nr_channels; i++) { struct at_dma_chan *atchan = &atdma->chan[i]; + atchan->mem_if = AT_DMA_MEM_IF; + atchan->per_if = AT_DMA_PER_IF; atchan->chan_common.device = &atdma->dma_common; dma_cookie_init(&atchan->chan_common); list_add_tail(&atchan->chan_common.device_node, @@ -1388,8 +1452,25 @@ static int __init at_dma_probe(struct platform_device *pdev) dma_async_device_register(&atdma->dma_common); + /* + * Do not return an error if the dmac node is not present in order to + * not break the existing way of requesting channel with + * dma_request_channel(). + */ + if (pdev->dev.of_node) { + err = of_dma_controller_register(pdev->dev.of_node, + at_dma_xlate, atdma); + if (err) { + dev_err(&pdev->dev, "could not register of_dma_controller\n"); + goto err_of_dma_controller_register; + } + } + return 0; +err_of_dma_controller_register: + dma_async_device_unregister(&atdma->dma_common); + dma_pool_destroy(atdma->dma_desc_pool); err_pool_create: platform_set_drvdata(pdev, NULL); free_irq(platform_get_irq(pdev, 0), atdma); @@ -1406,7 +1487,7 @@ static int __init at_dma_probe(struct platform_device *pdev) return err; } -static int __exit at_dma_remove(struct platform_device *pdev) +static int at_dma_remove(struct platform_device *pdev) { struct at_dma *atdma = platform_get_drvdata(pdev); struct dma_chan *chan, *_chan; @@ -1564,7 +1645,7 @@ static const struct dev_pm_ops at_dma_dev_pm_ops = { }; static struct platform_driver at_dma_driver = { - .remove = __exit_p(at_dma_remove), + .remove = at_dma_remove, .shutdown = at_dma_shutdown, .id_table = atdma_devtypes, .driver = { diff --git a/trunk/drivers/dma/at_hdmac_regs.h b/trunk/drivers/dma/at_hdmac_regs.h index 0eb3c1388667..c604d26fd4d3 100644 --- a/trunk/drivers/dma/at_hdmac_regs.h +++ b/trunk/drivers/dma/at_hdmac_regs.h @@ -220,6 +220,8 @@ enum atc_status { * @device: parent device * @ch_regs: memory mapped register base * @mask: channel index in a mask + * @per_if: peripheral interface + * @mem_if: memory interface * @status: transmit status information from irq/prep* functions * to tasklet (use atomic operations) * @tasklet: bottom half to finish transaction work @@ -238,6 +240,8 @@ struct at_dma_chan { struct at_dma *device; void __iomem *ch_regs; u8 mask; + u8 per_if; + u8 mem_if; unsigned long status; struct tasklet_struct tasklet; u32 save_cfg; diff --git a/trunk/drivers/dma/coh901318.c b/trunk/drivers/dma/coh901318.c index 797940e532ff..3b23061cdb41 100644 --- a/trunk/drivers/dma/coh901318.c +++ b/trunk/drivers/dma/coh901318.c @@ -2748,7 +2748,7 @@ static int __init coh901318_probe(struct platform_device *pdev) return err; } -static int __exit coh901318_remove(struct platform_device *pdev) +static int coh901318_remove(struct platform_device *pdev) { struct coh901318_base *base = platform_get_drvdata(pdev); @@ -2760,7 +2760,7 @@ static int __exit coh901318_remove(struct platform_device *pdev) static struct platform_driver coh901318_driver = { - .remove = __exit_p(coh901318_remove), + .remove = coh901318_remove, .driver = { .name = "coh901318", }, diff --git a/trunk/drivers/dma/dmaengine.c b/trunk/drivers/dma/dmaengine.c index b2728d6ba2fd..93f7992bee5c 100644 --- a/trunk/drivers/dma/dmaengine.c +++ b/trunk/drivers/dma/dmaengine.c @@ -62,6 +62,8 @@ #include #include #include +#include +#include #include static DEFINE_MUTEX(dma_list_mutex); @@ -174,7 +176,8 @@ static struct class dma_devclass = { #define dma_device_satisfies_mask(device, mask) \ __dma_device_satisfies_mask((device), &(mask)) static int -__dma_device_satisfies_mask(struct dma_device *device, dma_cap_mask_t *want) +__dma_device_satisfies_mask(struct dma_device *device, + const dma_cap_mask_t *want) { dma_cap_mask_t has; @@ -463,7 +466,8 @@ static void dma_channel_rebalance(void) } } -static struct dma_chan *private_candidate(dma_cap_mask_t *mask, struct dma_device *dev, +static struct dma_chan *private_candidate(const dma_cap_mask_t *mask, + struct dma_device *dev, dma_filter_fn fn, void *fn_param) { struct dma_chan *chan; @@ -505,7 +509,8 @@ static struct dma_chan *private_candidate(dma_cap_mask_t *mask, struct dma_devic * @fn: optional callback to disposition available channels * @fn_param: opaque parameter to pass to dma_filter_fn */ -struct dma_chan *__dma_request_channel(dma_cap_mask_t *mask, dma_filter_fn fn, void *fn_param) +struct dma_chan *__dma_request_channel(const dma_cap_mask_t *mask, + dma_filter_fn fn, void *fn_param) { struct dma_device *device, *_d; struct dma_chan *chan = NULL; @@ -555,12 +560,16 @@ EXPORT_SYMBOL_GPL(__dma_request_channel); * @dev: pointer to client device structure * @name: slave channel name */ -struct dma_chan *dma_request_slave_channel(struct device *dev, char *name) +struct dma_chan *dma_request_slave_channel(struct device *dev, const char *name) { /* If device-tree is present get slave info from here */ if (dev->of_node) return of_dma_request_slave_channel(dev->of_node, name); + /* If device was enumerated by ACPI get slave info from here */ + if (ACPI_HANDLE(dev)) + return acpi_dma_request_slave_chan_by_name(dev, name); + return NULL; } EXPORT_SYMBOL_GPL(dma_request_slave_channel); diff --git a/trunk/drivers/dma/dmatest.c b/trunk/drivers/dma/dmatest.c index a2c8904b63ea..d8ce4ecfef18 100644 --- a/trunk/drivers/dma/dmatest.c +++ b/trunk/drivers/dma/dmatest.c @@ -2,6 +2,7 @@ * DMA Engine test module * * Copyright (C) 2007 Atmel Corporation + * Copyright (C) 2013 Intel Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as @@ -18,6 +19,10 @@ #include #include #include +#include +#include +#include +#include static unsigned int test_buf_size = 16384; module_param(test_buf_size, uint, S_IRUGO); @@ -61,6 +66,9 @@ module_param(timeout, uint, S_IRUGO); MODULE_PARM_DESC(timeout, "Transfer Timeout in msec (default: 3000), " "Pass -1 for infinite timeout"); +/* Maximum amount of mismatched bytes in buffer to print */ +#define MAX_ERROR_COUNT 32 + /* * Initialization patterns. All bytes in the source buffer has bit 7 * set, all bytes in the destination buffer has bit 7 cleared. @@ -78,13 +86,65 @@ MODULE_PARM_DESC(timeout, "Transfer Timeout in msec (default: 3000), " #define PATTERN_OVERWRITE 0x20 #define PATTERN_COUNT_MASK 0x1f +enum dmatest_error_type { + DMATEST_ET_OK, + DMATEST_ET_MAP_SRC, + DMATEST_ET_MAP_DST, + DMATEST_ET_PREP, + DMATEST_ET_SUBMIT, + DMATEST_ET_TIMEOUT, + DMATEST_ET_DMA_ERROR, + DMATEST_ET_DMA_IN_PROGRESS, + DMATEST_ET_VERIFY, + DMATEST_ET_VERIFY_BUF, +}; + +struct dmatest_verify_buffer { + unsigned int index; + u8 expected; + u8 actual; +}; + +struct dmatest_verify_result { + unsigned int error_count; + struct dmatest_verify_buffer data[MAX_ERROR_COUNT]; + u8 pattern; + bool is_srcbuf; +}; + +struct dmatest_thread_result { + struct list_head node; + unsigned int n; + unsigned int src_off; + unsigned int dst_off; + unsigned int len; + enum dmatest_error_type type; + union { + unsigned long data; + dma_cookie_t cookie; + enum dma_status status; + int error; + struct dmatest_verify_result *vr; + }; +}; + +struct dmatest_result { + struct list_head node; + char *name; + struct list_head results; +}; + +struct dmatest_info; + struct dmatest_thread { struct list_head node; + struct dmatest_info *info; struct task_struct *task; struct dma_chan *chan; u8 **srcs; u8 **dsts; enum dma_transaction_type type; + bool done; }; struct dmatest_chan { @@ -93,25 +153,69 @@ struct dmatest_chan { struct list_head threads; }; -/* - * These are protected by dma_list_mutex since they're only used by - * the DMA filter function callback +/** + * struct dmatest_params - test parameters. + * @buf_size: size of the memcpy test buffer + * @channel: bus ID of the channel to test + * @device: bus ID of the DMA Engine to test + * @threads_per_chan: number of threads to start per channel + * @max_channels: maximum number of channels to use + * @iterations: iterations before stopping test + * @xor_sources: number of xor source buffers + * @pq_sources: number of p+q source buffers + * @timeout: transfer timeout in msec, -1 for infinite timeout */ -static LIST_HEAD(dmatest_channels); -static unsigned int nr_channels; +struct dmatest_params { + unsigned int buf_size; + char channel[20]; + char device[20]; + unsigned int threads_per_chan; + unsigned int max_channels; + unsigned int iterations; + unsigned int xor_sources; + unsigned int pq_sources; + int timeout; +}; -static bool dmatest_match_channel(struct dma_chan *chan) +/** + * struct dmatest_info - test information. + * @params: test parameters + * @lock: access protection to the fields of this structure + */ +struct dmatest_info { + /* Test parameters */ + struct dmatest_params params; + + /* Internal state */ + struct list_head channels; + unsigned int nr_channels; + struct mutex lock; + + /* debugfs related stuff */ + struct dentry *root; + struct dmatest_params dbgfs_params; + + /* Test results */ + struct list_head results; + struct mutex results_lock; +}; + +static struct dmatest_info test_info; + +static bool dmatest_match_channel(struct dmatest_params *params, + struct dma_chan *chan) { - if (test_channel[0] == '\0') + if (params->channel[0] == '\0') return true; - return strcmp(dma_chan_name(chan), test_channel) == 0; + return strcmp(dma_chan_name(chan), params->channel) == 0; } -static bool dmatest_match_device(struct dma_device *device) +static bool dmatest_match_device(struct dmatest_params *params, + struct dma_device *device) { - if (test_device[0] == '\0') + if (params->device[0] == '\0') return true; - return strcmp(dev_name(device->dev), test_device) == 0; + return strcmp(dev_name(device->dev), params->device) == 0; } static unsigned long dmatest_random(void) @@ -122,7 +226,8 @@ static unsigned long dmatest_random(void) return buf; } -static void dmatest_init_srcs(u8 **bufs, unsigned int start, unsigned int len) +static void dmatest_init_srcs(u8 **bufs, unsigned int start, unsigned int len, + unsigned int buf_size) { unsigned int i; u8 *buf; @@ -133,13 +238,14 @@ static void dmatest_init_srcs(u8 **bufs, unsigned int start, unsigned int len) for ( ; i < start + len; i++) buf[i] = PATTERN_SRC | PATTERN_COPY | (~i & PATTERN_COUNT_MASK); - for ( ; i < test_buf_size; i++) + for ( ; i < buf_size; i++) buf[i] = PATTERN_SRC | (~i & PATTERN_COUNT_MASK); buf++; } } -static void dmatest_init_dsts(u8 **bufs, unsigned int start, unsigned int len) +static void dmatest_init_dsts(u8 **bufs, unsigned int start, unsigned int len, + unsigned int buf_size) { unsigned int i; u8 *buf; @@ -150,40 +256,14 @@ static void dmatest_init_dsts(u8 **bufs, unsigned int start, unsigned int len) for ( ; i < start + len; i++) buf[i] = PATTERN_DST | PATTERN_OVERWRITE | (~i & PATTERN_COUNT_MASK); - for ( ; i < test_buf_size; i++) + for ( ; i < buf_size; i++) buf[i] = PATTERN_DST | (~i & PATTERN_COUNT_MASK); } } -static void dmatest_mismatch(u8 actual, u8 pattern, unsigned int index, - unsigned int counter, bool is_srcbuf) -{ - u8 diff = actual ^ pattern; - u8 expected = pattern | (~counter & PATTERN_COUNT_MASK); - const char *thread_name = current->comm; - - if (is_srcbuf) - pr_warning("%s: srcbuf[0x%x] overwritten!" - " Expected %02x, got %02x\n", - thread_name, index, expected, actual); - else if ((pattern & PATTERN_COPY) - && (diff & (PATTERN_COPY | PATTERN_OVERWRITE))) - pr_warning("%s: dstbuf[0x%x] not copied!" - " Expected %02x, got %02x\n", - thread_name, index, expected, actual); - else if (diff & PATTERN_SRC) - pr_warning("%s: dstbuf[0x%x] was copied!" - " Expected %02x, got %02x\n", - thread_name, index, expected, actual); - else - pr_warning("%s: dstbuf[0x%x] mismatch!" - " Expected %02x, got %02x\n", - thread_name, index, expected, actual); -} - -static unsigned int dmatest_verify(u8 **bufs, unsigned int start, - unsigned int end, unsigned int counter, u8 pattern, - bool is_srcbuf) +static unsigned int dmatest_verify(struct dmatest_verify_result *vr, u8 **bufs, + unsigned int start, unsigned int end, unsigned int counter, + u8 pattern, bool is_srcbuf) { unsigned int i; unsigned int error_count = 0; @@ -191,6 +271,7 @@ static unsigned int dmatest_verify(u8 **bufs, unsigned int start, u8 expected; u8 *buf; unsigned int counter_orig = counter; + struct dmatest_verify_buffer *vb; for (; (buf = *bufs); bufs++) { counter = counter_orig; @@ -198,18 +279,21 @@ static unsigned int dmatest_verify(u8 **bufs, unsigned int start, actual = buf[i]; expected = pattern | (~counter & PATTERN_COUNT_MASK); if (actual != expected) { - if (error_count < 32) - dmatest_mismatch(actual, pattern, i, - counter, is_srcbuf); + if (error_count < MAX_ERROR_COUNT && vr) { + vb = &vr->data[error_count]; + vb->index = i; + vb->expected = expected; + vb->actual = actual; + } error_count++; } counter++; } } - if (error_count > 32) + if (error_count > MAX_ERROR_COUNT) pr_warning("%s: %u errors suppressed\n", - current->comm, error_count - 32); + current->comm, error_count - MAX_ERROR_COUNT); return error_count; } @@ -249,6 +333,170 @@ static unsigned int min_odd(unsigned int x, unsigned int y) return val % 2 ? val : val - 1; } +static char *verify_result_get_one(struct dmatest_verify_result *vr, + unsigned int i) +{ + struct dmatest_verify_buffer *vb = &vr->data[i]; + u8 diff = vb->actual ^ vr->pattern; + static char buf[512]; + char *msg; + + if (vr->is_srcbuf) + msg = "srcbuf overwritten!"; + else if ((vr->pattern & PATTERN_COPY) + && (diff & (PATTERN_COPY | PATTERN_OVERWRITE))) + msg = "dstbuf not copied!"; + else if (diff & PATTERN_SRC) + msg = "dstbuf was copied!"; + else + msg = "dstbuf mismatch!"; + + snprintf(buf, sizeof(buf) - 1, "%s [0x%x] Expected %02x, got %02x", msg, + vb->index, vb->expected, vb->actual); + + return buf; +} + +static char *thread_result_get(const char *name, + struct dmatest_thread_result *tr) +{ + static const char * const messages[] = { + [DMATEST_ET_OK] = "No errors", + [DMATEST_ET_MAP_SRC] = "src mapping error", + [DMATEST_ET_MAP_DST] = "dst mapping error", + [DMATEST_ET_PREP] = "prep error", + [DMATEST_ET_SUBMIT] = "submit error", + [DMATEST_ET_TIMEOUT] = "test timed out", + [DMATEST_ET_DMA_ERROR] = + "got completion callback (DMA_ERROR)", + [DMATEST_ET_DMA_IN_PROGRESS] = + "got completion callback (DMA_IN_PROGRESS)", + [DMATEST_ET_VERIFY] = "errors", + [DMATEST_ET_VERIFY_BUF] = "verify errors", + }; + static char buf[512]; + + snprintf(buf, sizeof(buf) - 1, + "%s: #%u: %s with src_off=0x%x ""dst_off=0x%x len=0x%x (%lu)", + name, tr->n, messages[tr->type], tr->src_off, tr->dst_off, + tr->len, tr->data); + + return buf; +} + +static int thread_result_add(struct dmatest_info *info, + struct dmatest_result *r, enum dmatest_error_type type, + unsigned int n, unsigned int src_off, unsigned int dst_off, + unsigned int len, unsigned long data) +{ + struct dmatest_thread_result *tr; + + tr = kzalloc(sizeof(*tr), GFP_KERNEL); + if (!tr) + return -ENOMEM; + + tr->type = type; + tr->n = n; + tr->src_off = src_off; + tr->dst_off = dst_off; + tr->len = len; + tr->data = data; + + mutex_lock(&info->results_lock); + list_add_tail(&tr->node, &r->results); + mutex_unlock(&info->results_lock); + + pr_warn("%s\n", thread_result_get(r->name, tr)); + return 0; +} + +static unsigned int verify_result_add(struct dmatest_info *info, + struct dmatest_result *r, unsigned int n, + unsigned int src_off, unsigned int dst_off, unsigned int len, + u8 **bufs, int whence, unsigned int counter, u8 pattern, + bool is_srcbuf) +{ + struct dmatest_verify_result *vr; + unsigned int error_count; + unsigned int buf_off = is_srcbuf ? src_off : dst_off; + unsigned int start, end; + + if (whence < 0) { + start = 0; + end = buf_off; + } else if (whence > 0) { + start = buf_off + len; + end = info->params.buf_size; + } else { + start = buf_off; + end = buf_off + len; + } + + vr = kmalloc(sizeof(*vr), GFP_KERNEL); + if (!vr) { + pr_warn("dmatest: No memory to store verify result\n"); + return dmatest_verify(NULL, bufs, start, end, counter, pattern, + is_srcbuf); + } + + vr->pattern = pattern; + vr->is_srcbuf = is_srcbuf; + + error_count = dmatest_verify(vr, bufs, start, end, counter, pattern, + is_srcbuf); + if (error_count) { + vr->error_count = error_count; + thread_result_add(info, r, DMATEST_ET_VERIFY_BUF, n, src_off, + dst_off, len, (unsigned long)vr); + return error_count; + } + + kfree(vr); + return 0; +} + +static void result_free(struct dmatest_info *info, const char *name) +{ + struct dmatest_result *r, *_r; + + mutex_lock(&info->results_lock); + list_for_each_entry_safe(r, _r, &info->results, node) { + struct dmatest_thread_result *tr, *_tr; + + if (name && strcmp(r->name, name)) + continue; + + list_for_each_entry_safe(tr, _tr, &r->results, node) { + if (tr->type == DMATEST_ET_VERIFY_BUF) + kfree(tr->vr); + list_del(&tr->node); + kfree(tr); + } + + kfree(r->name); + list_del(&r->node); + kfree(r); + } + + mutex_unlock(&info->results_lock); +} + +static struct dmatest_result *result_init(struct dmatest_info *info, + const char *name) +{ + struct dmatest_result *r; + + r = kzalloc(sizeof(*r), GFP_KERNEL); + if (r) { + r->name = kstrdup(name, GFP_KERNEL); + INIT_LIST_HEAD(&r->results); + mutex_lock(&info->results_lock); + list_add_tail(&r->node, &info->results); + mutex_unlock(&info->results_lock); + } + return r; +} + /* * This function repeatedly tests DMA transfers of various lengths and * offsets for a given operation type until it is told to exit by @@ -268,6 +516,8 @@ static int dmatest_func(void *data) DECLARE_WAIT_QUEUE_HEAD_ONSTACK(done_wait); struct dmatest_thread *thread = data; struct dmatest_done done = { .wait = &done_wait }; + struct dmatest_info *info; + struct dmatest_params *params; struct dma_chan *chan; struct dma_device *dev; const char *thread_name; @@ -278,11 +528,12 @@ static int dmatest_func(void *data) dma_cookie_t cookie; enum dma_status status; enum dma_ctrl_flags flags; - u8 pq_coefs[pq_sources + 1]; + u8 *pq_coefs = NULL; int ret; int src_cnt; int dst_cnt; int i; + struct dmatest_result *result; thread_name = current->comm; set_freezable(); @@ -290,28 +541,39 @@ static int dmatest_func(void *data) ret = -ENOMEM; smp_rmb(); + info = thread->info; + params = &info->params; chan = thread->chan; dev = chan->device; if (thread->type == DMA_MEMCPY) src_cnt = dst_cnt = 1; else if (thread->type == DMA_XOR) { /* force odd to ensure dst = src */ - src_cnt = min_odd(xor_sources | 1, dev->max_xor); + src_cnt = min_odd(params->xor_sources | 1, dev->max_xor); dst_cnt = 1; } else if (thread->type == DMA_PQ) { /* force odd to ensure dst = src */ - src_cnt = min_odd(pq_sources | 1, dma_maxpq(dev, 0)); + src_cnt = min_odd(params->pq_sources | 1, dma_maxpq(dev, 0)); dst_cnt = 2; + + pq_coefs = kmalloc(params->pq_sources+1, GFP_KERNEL); + if (!pq_coefs) + goto err_thread_type; + for (i = 0; i < src_cnt; i++) pq_coefs[i] = 1; } else + goto err_thread_type; + + result = result_init(info, thread_name); + if (!result) goto err_srcs; thread->srcs = kcalloc(src_cnt+1, sizeof(u8 *), GFP_KERNEL); if (!thread->srcs) goto err_srcs; for (i = 0; i < src_cnt; i++) { - thread->srcs[i] = kmalloc(test_buf_size, GFP_KERNEL); + thread->srcs[i] = kmalloc(params->buf_size, GFP_KERNEL); if (!thread->srcs[i]) goto err_srcbuf; } @@ -321,7 +583,7 @@ static int dmatest_func(void *data) if (!thread->dsts) goto err_dsts; for (i = 0; i < dst_cnt; i++) { - thread->dsts[i] = kmalloc(test_buf_size, GFP_KERNEL); + thread->dsts[i] = kmalloc(params->buf_size, GFP_KERNEL); if (!thread->dsts[i]) goto err_dstbuf; } @@ -337,7 +599,7 @@ static int dmatest_func(void *data) | DMA_COMPL_SKIP_DEST_UNMAP | DMA_COMPL_SRC_UNMAP_SINGLE; while (!kthread_should_stop() - && !(iterations && total_tests >= iterations)) { + && !(params->iterations && total_tests >= params->iterations)) { struct dma_async_tx_descriptor *tx = NULL; dma_addr_t dma_srcs[src_cnt]; dma_addr_t dma_dsts[dst_cnt]; @@ -353,24 +615,24 @@ static int dmatest_func(void *data) else if (thread->type == DMA_PQ) align = dev->pq_align; - if (1 << align > test_buf_size) { + if (1 << align > params->buf_size) { pr_err("%u-byte buffer too small for %d-byte alignment\n", - test_buf_size, 1 << align); + params->buf_size, 1 << align); break; } - len = dmatest_random() % test_buf_size + 1; + len = dmatest_random() % params->buf_size + 1; len = (len >> align) << align; if (!len) len = 1 << align; - src_off = dmatest_random() % (test_buf_size - len + 1); - dst_off = dmatest_random() % (test_buf_size - len + 1); + src_off = dmatest_random() % (params->buf_size - len + 1); + dst_off = dmatest_random() % (params->buf_size - len + 1); src_off = (src_off >> align) << align; dst_off = (dst_off >> align) << align; - dmatest_init_srcs(thread->srcs, src_off, len); - dmatest_init_dsts(thread->dsts, dst_off, len); + dmatest_init_srcs(thread->srcs, src_off, len, params->buf_size); + dmatest_init_dsts(thread->dsts, dst_off, len, params->buf_size); for (i = 0; i < src_cnt; i++) { u8 *buf = thread->srcs[i] + src_off; @@ -380,10 +642,10 @@ static int dmatest_func(void *data) ret = dma_mapping_error(dev->dev, dma_srcs[i]); if (ret) { unmap_src(dev->dev, dma_srcs, len, i); - pr_warn("%s: #%u: mapping error %d with " - "src_off=0x%x len=0x%x\n", - thread_name, total_tests - 1, ret, - src_off, len); + thread_result_add(info, result, + DMATEST_ET_MAP_SRC, + total_tests, src_off, dst_off, + len, ret); failed_tests++; continue; } @@ -391,16 +653,17 @@ static int dmatest_func(void *data) /* map with DMA_BIDIRECTIONAL to force writeback/invalidate */ for (i = 0; i < dst_cnt; i++) { dma_dsts[i] = dma_map_single(dev->dev, thread->dsts[i], - test_buf_size, + params->buf_size, DMA_BIDIRECTIONAL); ret = dma_mapping_error(dev->dev, dma_dsts[i]); if (ret) { unmap_src(dev->dev, dma_srcs, len, src_cnt); - unmap_dst(dev->dev, dma_dsts, test_buf_size, i); - pr_warn("%s: #%u: mapping error %d with " - "dst_off=0x%x len=0x%x\n", - thread_name, total_tests - 1, ret, - dst_off, test_buf_size); + unmap_dst(dev->dev, dma_dsts, params->buf_size, + i); + thread_result_add(info, result, + DMATEST_ET_MAP_DST, + total_tests, src_off, dst_off, + len, ret); failed_tests++; continue; } @@ -428,11 +691,11 @@ static int dmatest_func(void *data) if (!tx) { unmap_src(dev->dev, dma_srcs, len, src_cnt); - unmap_dst(dev->dev, dma_dsts, test_buf_size, dst_cnt); - pr_warning("%s: #%u: prep error with src_off=0x%x " - "dst_off=0x%x len=0x%x\n", - thread_name, total_tests - 1, - src_off, dst_off, len); + unmap_dst(dev->dev, dma_dsts, params->buf_size, + dst_cnt); + thread_result_add(info, result, DMATEST_ET_PREP, + total_tests, src_off, dst_off, + len, 0); msleep(100); failed_tests++; continue; @@ -444,18 +707,18 @@ static int dmatest_func(void *data) cookie = tx->tx_submit(tx); if (dma_submit_error(cookie)) { - pr_warning("%s: #%u: submit error %d with src_off=0x%x " - "dst_off=0x%x len=0x%x\n", - thread_name, total_tests - 1, cookie, - src_off, dst_off, len); + thread_result_add(info, result, DMATEST_ET_SUBMIT, + total_tests, src_off, dst_off, + len, cookie); msleep(100); failed_tests++; continue; } dma_async_issue_pending(chan); - wait_event_freezable_timeout(done_wait, done.done, - msecs_to_jiffies(timeout)); + wait_event_freezable_timeout(done_wait, + done.done || kthread_should_stop(), + msecs_to_jiffies(params->timeout)); status = dma_async_is_tx_complete(chan, cookie, NULL, NULL); @@ -468,56 +731,57 @@ static int dmatest_func(void *data) * free it this time?" dancing. For now, just * leave it dangling. */ - pr_warning("%s: #%u: test timed out\n", - thread_name, total_tests - 1); + thread_result_add(info, result, DMATEST_ET_TIMEOUT, + total_tests, src_off, dst_off, + len, 0); failed_tests++; continue; } else if (status != DMA_SUCCESS) { - pr_warning("%s: #%u: got completion callback," - " but status is \'%s\'\n", - thread_name, total_tests - 1, - status == DMA_ERROR ? "error" : "in progress"); + enum dmatest_error_type type = (status == DMA_ERROR) ? + DMATEST_ET_DMA_ERROR : DMATEST_ET_DMA_IN_PROGRESS; + thread_result_add(info, result, type, + total_tests, src_off, dst_off, + len, status); failed_tests++; continue; } /* Unmap by myself (see DMA_COMPL_SKIP_DEST_UNMAP above) */ - unmap_dst(dev->dev, dma_dsts, test_buf_size, dst_cnt); + unmap_dst(dev->dev, dma_dsts, params->buf_size, dst_cnt); error_count = 0; pr_debug("%s: verifying source buffer...\n", thread_name); - error_count += dmatest_verify(thread->srcs, 0, src_off, + error_count += verify_result_add(info, result, total_tests, + src_off, dst_off, len, thread->srcs, -1, 0, PATTERN_SRC, true); - error_count += dmatest_verify(thread->srcs, src_off, - src_off + len, src_off, - PATTERN_SRC | PATTERN_COPY, true); - error_count += dmatest_verify(thread->srcs, src_off + len, - test_buf_size, src_off + len, - PATTERN_SRC, true); - - pr_debug("%s: verifying dest buffer...\n", - thread->task->comm); - error_count += dmatest_verify(thread->dsts, 0, dst_off, + error_count += verify_result_add(info, result, total_tests, + src_off, dst_off, len, thread->srcs, 0, + src_off, PATTERN_SRC | PATTERN_COPY, true); + error_count += verify_result_add(info, result, total_tests, + src_off, dst_off, len, thread->srcs, 1, + src_off + len, PATTERN_SRC, true); + + pr_debug("%s: verifying dest buffer...\n", thread_name); + error_count += verify_result_add(info, result, total_tests, + src_off, dst_off, len, thread->dsts, -1, 0, PATTERN_DST, false); - error_count += dmatest_verify(thread->dsts, dst_off, - dst_off + len, src_off, - PATTERN_SRC | PATTERN_COPY, false); - error_count += dmatest_verify(thread->dsts, dst_off + len, - test_buf_size, dst_off + len, - PATTERN_DST, false); + error_count += verify_result_add(info, result, total_tests, + src_off, dst_off, len, thread->dsts, 0, + src_off, PATTERN_SRC | PATTERN_COPY, false); + error_count += verify_result_add(info, result, total_tests, + src_off, dst_off, len, thread->dsts, 1, + dst_off + len, PATTERN_DST, false); if (error_count) { - pr_warning("%s: #%u: %u errors with " - "src_off=0x%x dst_off=0x%x len=0x%x\n", - thread_name, total_tests - 1, error_count, - src_off, dst_off, len); + thread_result_add(info, result, DMATEST_ET_VERIFY, + total_tests, src_off, dst_off, + len, error_count); failed_tests++; } else { - pr_debug("%s: #%u: No errors with " - "src_off=0x%x dst_off=0x%x len=0x%x\n", - thread_name, total_tests - 1, - src_off, dst_off, len); + thread_result_add(info, result, DMATEST_ET_OK, + total_tests, src_off, dst_off, + len, 0); } } @@ -532,6 +796,8 @@ static int dmatest_func(void *data) err_srcbuf: kfree(thread->srcs); err_srcs: + kfree(pq_coefs); +err_thread_type: pr_notice("%s: terminating after %u tests, %u failures (status %d)\n", thread_name, total_tests, failed_tests, ret); @@ -539,7 +805,9 @@ static int dmatest_func(void *data) if (ret) dmaengine_terminate_all(chan); - if (iterations > 0) + thread->done = true; + + if (params->iterations > 0) while (!kthread_should_stop()) { DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wait_dmatest_exit); interruptible_sleep_on(&wait_dmatest_exit); @@ -568,8 +836,10 @@ static void dmatest_cleanup_channel(struct dmatest_chan *dtc) kfree(dtc); } -static int dmatest_add_threads(struct dmatest_chan *dtc, enum dma_transaction_type type) +static int dmatest_add_threads(struct dmatest_info *info, + struct dmatest_chan *dtc, enum dma_transaction_type type) { + struct dmatest_params *params = &info->params; struct dmatest_thread *thread; struct dma_chan *chan = dtc->chan; char *op; @@ -584,7 +854,7 @@ static int dmatest_add_threads(struct dmatest_chan *dtc, enum dma_transaction_ty else return -EINVAL; - for (i = 0; i < threads_per_chan; i++) { + for (i = 0; i < params->threads_per_chan; i++) { thread = kzalloc(sizeof(struct dmatest_thread), GFP_KERNEL); if (!thread) { pr_warning("dmatest: No memory for %s-%s%u\n", @@ -592,6 +862,7 @@ static int dmatest_add_threads(struct dmatest_chan *dtc, enum dma_transaction_ty break; } + thread->info = info; thread->chan = dtc->chan; thread->type = type; smp_wmb(); @@ -612,7 +883,8 @@ static int dmatest_add_threads(struct dmatest_chan *dtc, enum dma_transaction_ty return i; } -static int dmatest_add_channel(struct dma_chan *chan) +static int dmatest_add_channel(struct dmatest_info *info, + struct dma_chan *chan) { struct dmatest_chan *dtc; struct dma_device *dma_dev = chan->device; @@ -629,75 +901,418 @@ static int dmatest_add_channel(struct dma_chan *chan) INIT_LIST_HEAD(&dtc->threads); if (dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask)) { - cnt = dmatest_add_threads(dtc, DMA_MEMCPY); + cnt = dmatest_add_threads(info, dtc, DMA_MEMCPY); thread_count += cnt > 0 ? cnt : 0; } if (dma_has_cap(DMA_XOR, dma_dev->cap_mask)) { - cnt = dmatest_add_threads(dtc, DMA_XOR); + cnt = dmatest_add_threads(info, dtc, DMA_XOR); thread_count += cnt > 0 ? cnt : 0; } if (dma_has_cap(DMA_PQ, dma_dev->cap_mask)) { - cnt = dmatest_add_threads(dtc, DMA_PQ); + cnt = dmatest_add_threads(info, dtc, DMA_PQ); thread_count += cnt > 0 ? cnt : 0; } pr_info("dmatest: Started %u threads using %s\n", thread_count, dma_chan_name(chan)); - list_add_tail(&dtc->node, &dmatest_channels); - nr_channels++; + list_add_tail(&dtc->node, &info->channels); + info->nr_channels++; return 0; } static bool filter(struct dma_chan *chan, void *param) { - if (!dmatest_match_channel(chan) || !dmatest_match_device(chan->device)) + struct dmatest_params *params = param; + + if (!dmatest_match_channel(params, chan) || + !dmatest_match_device(params, chan->device)) return false; else return true; } -static int __init dmatest_init(void) +static int __run_threaded_test(struct dmatest_info *info) { dma_cap_mask_t mask; struct dma_chan *chan; + struct dmatest_params *params = &info->params; int err = 0; dma_cap_zero(mask); dma_cap_set(DMA_MEMCPY, mask); for (;;) { - chan = dma_request_channel(mask, filter, NULL); + chan = dma_request_channel(mask, filter, params); if (chan) { - err = dmatest_add_channel(chan); + err = dmatest_add_channel(info, chan); if (err) { dma_release_channel(chan); break; /* add_channel failed, punt */ } } else break; /* no more channels available */ - if (max_channels && nr_channels >= max_channels) + if (params->max_channels && + info->nr_channels >= params->max_channels) break; /* we have all we need */ } - return err; } -/* when compiled-in wait for drivers to load first */ -late_initcall(dmatest_init); -static void __exit dmatest_exit(void) +#ifndef MODULE +static int run_threaded_test(struct dmatest_info *info) +{ + int ret; + + mutex_lock(&info->lock); + ret = __run_threaded_test(info); + mutex_unlock(&info->lock); + return ret; +} +#endif + +static void __stop_threaded_test(struct dmatest_info *info) { struct dmatest_chan *dtc, *_dtc; struct dma_chan *chan; - list_for_each_entry_safe(dtc, _dtc, &dmatest_channels, node) { + list_for_each_entry_safe(dtc, _dtc, &info->channels, node) { list_del(&dtc->node); chan = dtc->chan; dmatest_cleanup_channel(dtc); - pr_debug("dmatest: dropped channel %s\n", - dma_chan_name(chan)); + pr_debug("dmatest: dropped channel %s\n", dma_chan_name(chan)); dma_release_channel(chan); } + + info->nr_channels = 0; +} + +static void stop_threaded_test(struct dmatest_info *info) +{ + mutex_lock(&info->lock); + __stop_threaded_test(info); + mutex_unlock(&info->lock); +} + +static int __restart_threaded_test(struct dmatest_info *info, bool run) +{ + struct dmatest_params *params = &info->params; + int ret; + + /* Stop any running test first */ + __stop_threaded_test(info); + + if (run == false) + return 0; + + /* Clear results from previous run */ + result_free(info, NULL); + + /* Copy test parameters */ + memcpy(params, &info->dbgfs_params, sizeof(*params)); + + /* Run test with new parameters */ + ret = __run_threaded_test(info); + if (ret) { + __stop_threaded_test(info); + pr_err("dmatest: Can't run test\n"); + } + + return ret; +} + +static ssize_t dtf_write_string(void *to, size_t available, loff_t *ppos, + const void __user *from, size_t count) +{ + char tmp[20]; + ssize_t len; + + len = simple_write_to_buffer(tmp, sizeof(tmp) - 1, ppos, from, count); + if (len >= 0) { + tmp[len] = '\0'; + strlcpy(to, strim(tmp), available); + } + + return len; +} + +static ssize_t dtf_read_channel(struct file *file, char __user *buf, + size_t count, loff_t *ppos) +{ + struct dmatest_info *info = file->private_data; + return simple_read_from_buffer(buf, count, ppos, + info->dbgfs_params.channel, + strlen(info->dbgfs_params.channel)); +} + +static ssize_t dtf_write_channel(struct file *file, const char __user *buf, + size_t size, loff_t *ppos) +{ + struct dmatest_info *info = file->private_data; + return dtf_write_string(info->dbgfs_params.channel, + sizeof(info->dbgfs_params.channel), + ppos, buf, size); +} + +static const struct file_operations dtf_channel_fops = { + .read = dtf_read_channel, + .write = dtf_write_channel, + .open = simple_open, + .llseek = default_llseek, +}; + +static ssize_t dtf_read_device(struct file *file, char __user *buf, + size_t count, loff_t *ppos) +{ + struct dmatest_info *info = file->private_data; + return simple_read_from_buffer(buf, count, ppos, + info->dbgfs_params.device, + strlen(info->dbgfs_params.device)); +} + +static ssize_t dtf_write_device(struct file *file, const char __user *buf, + size_t size, loff_t *ppos) +{ + struct dmatest_info *info = file->private_data; + return dtf_write_string(info->dbgfs_params.device, + sizeof(info->dbgfs_params.device), + ppos, buf, size); +} + +static const struct file_operations dtf_device_fops = { + .read = dtf_read_device, + .write = dtf_write_device, + .open = simple_open, + .llseek = default_llseek, +}; + +static ssize_t dtf_read_run(struct file *file, char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct dmatest_info *info = file->private_data; + char buf[3]; + struct dmatest_chan *dtc; + bool alive = false; + + mutex_lock(&info->lock); + list_for_each_entry(dtc, &info->channels, node) { + struct dmatest_thread *thread; + + list_for_each_entry(thread, &dtc->threads, node) { + if (!thread->done) { + alive = true; + break; + } + } + } + + if (alive) { + buf[0] = 'Y'; + } else { + __stop_threaded_test(info); + buf[0] = 'N'; + } + + mutex_unlock(&info->lock); + buf[1] = '\n'; + buf[2] = 0x00; + return simple_read_from_buffer(user_buf, count, ppos, buf, 2); +} + +static ssize_t dtf_write_run(struct file *file, const char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct dmatest_info *info = file->private_data; + char buf[16]; + bool bv; + int ret = 0; + + if (copy_from_user(buf, user_buf, min(count, (sizeof(buf) - 1)))) + return -EFAULT; + + if (strtobool(buf, &bv) == 0) { + mutex_lock(&info->lock); + ret = __restart_threaded_test(info, bv); + mutex_unlock(&info->lock); + } + + return ret ? ret : count; +} + +static const struct file_operations dtf_run_fops = { + .read = dtf_read_run, + .write = dtf_write_run, + .open = simple_open, + .llseek = default_llseek, +}; + +static int dtf_results_show(struct seq_file *sf, void *data) +{ + struct dmatest_info *info = sf->private; + struct dmatest_result *result; + struct dmatest_thread_result *tr; + unsigned int i; + + mutex_lock(&info->results_lock); + list_for_each_entry(result, &info->results, node) { + list_for_each_entry(tr, &result->results, node) { + seq_printf(sf, "%s\n", + thread_result_get(result->name, tr)); + if (tr->type == DMATEST_ET_VERIFY_BUF) { + for (i = 0; i < tr->vr->error_count; i++) { + seq_printf(sf, "\t%s\n", + verify_result_get_one(tr->vr, i)); + } + } + } + } + + mutex_unlock(&info->results_lock); + return 0; +} + +static int dtf_results_open(struct inode *inode, struct file *file) +{ + return single_open(file, dtf_results_show, inode->i_private); +} + +static const struct file_operations dtf_results_fops = { + .open = dtf_results_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, +}; + +static int dmatest_register_dbgfs(struct dmatest_info *info) +{ + struct dentry *d; + struct dmatest_params *params = &info->dbgfs_params; + int ret = -ENOMEM; + + d = debugfs_create_dir("dmatest", NULL); + if (IS_ERR(d)) + return PTR_ERR(d); + if (!d) + goto err_root; + + info->root = d; + + /* Copy initial values */ + memcpy(params, &info->params, sizeof(*params)); + + /* Test parameters */ + + d = debugfs_create_u32("test_buf_size", S_IWUSR | S_IRUGO, info->root, + (u32 *)¶ms->buf_size); + if (IS_ERR_OR_NULL(d)) + goto err_node; + + d = debugfs_create_file("channel", S_IRUGO | S_IWUSR, info->root, + info, &dtf_channel_fops); + if (IS_ERR_OR_NULL(d)) + goto err_node; + + d = debugfs_create_file("device", S_IRUGO | S_IWUSR, info->root, + info, &dtf_device_fops); + if (IS_ERR_OR_NULL(d)) + goto err_node; + + d = debugfs_create_u32("threads_per_chan", S_IWUSR | S_IRUGO, info->root, + (u32 *)¶ms->threads_per_chan); + if (IS_ERR_OR_NULL(d)) + goto err_node; + + d = debugfs_create_u32("max_channels", S_IWUSR | S_IRUGO, info->root, + (u32 *)¶ms->max_channels); + if (IS_ERR_OR_NULL(d)) + goto err_node; + + d = debugfs_create_u32("iterations", S_IWUSR | S_IRUGO, info->root, + (u32 *)¶ms->iterations); + if (IS_ERR_OR_NULL(d)) + goto err_node; + + d = debugfs_create_u32("xor_sources", S_IWUSR | S_IRUGO, info->root, + (u32 *)¶ms->xor_sources); + if (IS_ERR_OR_NULL(d)) + goto err_node; + + d = debugfs_create_u32("pq_sources", S_IWUSR | S_IRUGO, info->root, + (u32 *)¶ms->pq_sources); + if (IS_ERR_OR_NULL(d)) + goto err_node; + + d = debugfs_create_u32("timeout", S_IWUSR | S_IRUGO, info->root, + (u32 *)¶ms->timeout); + if (IS_ERR_OR_NULL(d)) + goto err_node; + + /* Run or stop threaded test */ + d = debugfs_create_file("run", S_IWUSR | S_IRUGO, info->root, + info, &dtf_run_fops); + if (IS_ERR_OR_NULL(d)) + goto err_node; + + /* Results of test in progress */ + d = debugfs_create_file("results", S_IRUGO, info->root, info, + &dtf_results_fops); + if (IS_ERR_OR_NULL(d)) + goto err_node; + + return 0; + +err_node: + debugfs_remove_recursive(info->root); +err_root: + pr_err("dmatest: Failed to initialize debugfs\n"); + return ret; +} + +static int __init dmatest_init(void) +{ + struct dmatest_info *info = &test_info; + struct dmatest_params *params = &info->params; + int ret; + + memset(info, 0, sizeof(*info)); + + mutex_init(&info->lock); + INIT_LIST_HEAD(&info->channels); + + mutex_init(&info->results_lock); + INIT_LIST_HEAD(&info->results); + + /* Set default parameters */ + params->buf_size = test_buf_size; + strlcpy(params->channel, test_channel, sizeof(params->channel)); + strlcpy(params->device, test_device, sizeof(params->device)); + params->threads_per_chan = threads_per_chan; + params->max_channels = max_channels; + params->iterations = iterations; + params->xor_sources = xor_sources; + params->pq_sources = pq_sources; + params->timeout = timeout; + + ret = dmatest_register_dbgfs(info); + if (ret) + return ret; + +#ifdef MODULE + return 0; +#else + return run_threaded_test(info); +#endif +} +/* when compiled-in wait for drivers to load first */ +late_initcall(dmatest_init); + +static void __exit dmatest_exit(void) +{ + struct dmatest_info *info = &test_info; + + debugfs_remove_recursive(info->root); + stop_threaded_test(info); + result_free(info, NULL); } module_exit(dmatest_exit); diff --git a/trunk/drivers/dma/dw_dmac.c b/trunk/drivers/dma/dw_dmac.c index 43a5329d4483..2e5deaa82b60 100644 --- a/trunk/drivers/dma/dw_dmac.c +++ b/trunk/drivers/dma/dw_dmac.c @@ -25,6 +25,8 @@ #include #include #include +#include +#include #include "dw_dmac_regs.h" #include "dmaengine.h" @@ -49,29 +51,22 @@ static inline unsigned int dwc_get_sms(struct dw_dma_slave *slave) return slave ? slave->src_master : 1; } -#define SRC_MASTER 0 -#define DST_MASTER 1 - -static inline unsigned int dwc_get_master(struct dma_chan *chan, int master) +static inline void dwc_set_masters(struct dw_dma_chan *dwc) { - struct dw_dma *dw = to_dw_dma(chan->device); - struct dw_dma_slave *dws = chan->private; - unsigned int m; - - if (master == SRC_MASTER) - m = dwc_get_sms(dws); - else - m = dwc_get_dms(dws); + struct dw_dma *dw = to_dw_dma(dwc->chan.device); + struct dw_dma_slave *dws = dwc->chan.private; + unsigned char mmax = dw->nr_masters - 1; - return min_t(unsigned int, dw->nr_masters - 1, m); + if (dwc->request_line == ~0) { + dwc->src_master = min_t(unsigned char, mmax, dwc_get_sms(dws)); + dwc->dst_master = min_t(unsigned char, mmax, dwc_get_dms(dws)); + } } #define DWC_DEFAULT_CTLLO(_chan) ({ \ struct dw_dma_chan *_dwc = to_dw_dma_chan(_chan); \ struct dma_slave_config *_sconfig = &_dwc->dma_sconfig; \ bool _is_slave = is_slave_direction(_dwc->direction); \ - int _dms = dwc_get_master(_chan, DST_MASTER); \ - int _sms = dwc_get_master(_chan, SRC_MASTER); \ u8 _smsize = _is_slave ? _sconfig->src_maxburst : \ DW_DMA_MSIZE_16; \ u8 _dmsize = _is_slave ? _sconfig->dst_maxburst : \ @@ -81,8 +76,8 @@ static inline unsigned int dwc_get_master(struct dma_chan *chan, int master) | DWC_CTLL_SRC_MSIZE(_smsize) \ | DWC_CTLL_LLP_D_EN \ | DWC_CTLL_LLP_S_EN \ - | DWC_CTLL_DMS(_dms) \ - | DWC_CTLL_SMS(_sms)); \ + | DWC_CTLL_DMS(_dwc->dst_master) \ + | DWC_CTLL_SMS(_dwc->src_master)); \ }) /* @@ -92,13 +87,6 @@ static inline unsigned int dwc_get_master(struct dma_chan *chan, int master) */ #define NR_DESCS_PER_CHANNEL 64 -static inline unsigned int dwc_get_data_width(struct dma_chan *chan, int master) -{ - struct dw_dma *dw = to_dw_dma(chan->device); - - return dw->data_width[dwc_get_master(chan, master)]; -} - /*----------------------------------------------------------------------*/ static struct device *chan2dev(struct dma_chan *chan) @@ -172,13 +160,7 @@ static void dwc_initialize(struct dw_dma_chan *dwc) if (dwc->initialized == true) return; - if (dws && dws->cfg_hi == ~0 && dws->cfg_lo == ~0) { - /* autoconfigure based on request line from DT */ - if (dwc->direction == DMA_MEM_TO_DEV) - cfghi = DWC_CFGH_DST_PER(dwc->request_line); - else if (dwc->direction == DMA_DEV_TO_MEM) - cfghi = DWC_CFGH_SRC_PER(dwc->request_line); - } else if (dws) { + if (dws) { /* * We need controller-specific data to set up slave * transfers. @@ -189,9 +171,9 @@ static void dwc_initialize(struct dw_dma_chan *dwc) cfglo |= dws->cfg_lo & ~DWC_CFGL_CH_PRIOR_MASK; } else { if (dwc->direction == DMA_MEM_TO_DEV) - cfghi = DWC_CFGH_DST_PER(dwc->dma_sconfig.slave_id); + cfghi = DWC_CFGH_DST_PER(dwc->request_line); else if (dwc->direction == DMA_DEV_TO_MEM) - cfghi = DWC_CFGH_SRC_PER(dwc->dma_sconfig.slave_id); + cfghi = DWC_CFGH_SRC_PER(dwc->request_line); } channel_writel(dwc, CFG_LO, cfglo); @@ -473,16 +455,16 @@ static void dwc_scan_descriptors(struct dw_dma *dw, struct dw_dma_chan *dwc) (unsigned long long)llp); list_for_each_entry_safe(desc, _desc, &dwc->active_list, desc_node) { - /* initial residue value */ + /* Initial residue value */ dwc->residue = desc->total_len; - /* check first descriptors addr */ + /* Check first descriptors addr */ if (desc->txd.phys == llp) { spin_unlock_irqrestore(&dwc->lock, flags); return; } - /* check first descriptors llp */ + /* Check first descriptors llp */ if (desc->lli.llp == llp) { /* This one is currently in progress */ dwc->residue -= dwc_get_sent(dwc); @@ -588,7 +570,7 @@ inline dma_addr_t dw_dma_get_dst_addr(struct dma_chan *chan) } EXPORT_SYMBOL(dw_dma_get_dst_addr); -/* called with dwc->lock held and all DMAC interrupts disabled */ +/* Called with dwc->lock held and all DMAC interrupts disabled */ static void dwc_handle_cyclic(struct dw_dma *dw, struct dw_dma_chan *dwc, u32 status_err, u32 status_xfer) { @@ -626,7 +608,7 @@ static void dwc_handle_cyclic(struct dw_dma *dw, struct dw_dma_chan *dwc, dwc_chan_disable(dw, dwc); - /* make sure DMA does not restart by loading a new list */ + /* Make sure DMA does not restart by loading a new list */ channel_writel(dwc, LLP, 0); channel_writel(dwc, CTL_LO, 0); channel_writel(dwc, CTL_HI, 0); @@ -745,6 +727,7 @@ dwc_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src, size_t len, unsigned long flags) { struct dw_dma_chan *dwc = to_dw_dma_chan(chan); + struct dw_dma *dw = to_dw_dma(chan->device); struct dw_desc *desc; struct dw_desc *first; struct dw_desc *prev; @@ -767,8 +750,8 @@ dwc_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src, dwc->direction = DMA_MEM_TO_MEM; - data_width = min_t(unsigned int, dwc_get_data_width(chan, SRC_MASTER), - dwc_get_data_width(chan, DST_MASTER)); + data_width = min_t(unsigned int, dw->data_width[dwc->src_master], + dw->data_width[dwc->dst_master]); src_width = dst_width = min_t(unsigned int, data_width, dwc_fast_fls(src | dest | len)); @@ -826,6 +809,7 @@ dwc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, unsigned long flags, void *context) { struct dw_dma_chan *dwc = to_dw_dma_chan(chan); + struct dw_dma *dw = to_dw_dma(chan->device); struct dma_slave_config *sconfig = &dwc->dma_sconfig; struct dw_desc *prev; struct dw_desc *first; @@ -859,7 +843,7 @@ dwc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, ctllo |= sconfig->device_fc ? DWC_CTLL_FC(DW_DMA_FC_P_M2P) : DWC_CTLL_FC(DW_DMA_FC_D_M2P); - data_width = dwc_get_data_width(chan, SRC_MASTER); + data_width = dw->data_width[dwc->src_master]; for_each_sg(sgl, sg, sg_len, i) { struct dw_desc *desc; @@ -919,7 +903,7 @@ dwc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, ctllo |= sconfig->device_fc ? DWC_CTLL_FC(DW_DMA_FC_P_P2M) : DWC_CTLL_FC(DW_DMA_FC_D_P2M); - data_width = dwc_get_data_width(chan, DST_MASTER); + data_width = dw->data_width[dwc->dst_master]; for_each_sg(sgl, sg, sg_len, i) { struct dw_desc *desc; @@ -1001,13 +985,6 @@ static inline void convert_burst(u32 *maxburst) *maxburst = 0; } -static inline void convert_slave_id(struct dw_dma_chan *dwc) -{ - struct dw_dma *dw = to_dw_dma(dwc->chan.device); - - dwc->dma_sconfig.slave_id -= dw->request_line_base; -} - static int set_runtime_config(struct dma_chan *chan, struct dma_slave_config *sconfig) { @@ -1020,9 +997,12 @@ set_runtime_config(struct dma_chan *chan, struct dma_slave_config *sconfig) memcpy(&dwc->dma_sconfig, sconfig, sizeof(*sconfig)); dwc->direction = sconfig->direction; + /* Take the request line from slave_id member */ + if (dwc->request_line == ~0) + dwc->request_line = sconfig->slave_id; + convert_burst(&dwc->dma_sconfig.src_maxburst); convert_burst(&dwc->dma_sconfig.dst_maxburst); - convert_slave_id(dwc); return 0; } @@ -1030,10 +1010,11 @@ set_runtime_config(struct dma_chan *chan, struct dma_slave_config *sconfig) static inline void dwc_chan_pause(struct dw_dma_chan *dwc) { u32 cfglo = channel_readl(dwc, CFG_LO); + unsigned int count = 20; /* timeout iterations */ channel_writel(dwc, CFG_LO, cfglo | DWC_CFGL_CH_SUSP); - while (!(channel_readl(dwc, CFG_LO) & DWC_CFGL_FIFO_EMPTY)) - cpu_relax(); + while (!(channel_readl(dwc, CFG_LO) & DWC_CFGL_FIFO_EMPTY) && count--) + udelay(2); dwc->paused = true; } @@ -1169,6 +1150,8 @@ static int dwc_alloc_chan_resources(struct dma_chan *chan) * doesn't mean what you think it means), and status writeback. */ + dwc_set_masters(dwc); + spin_lock_irqsave(&dwc->lock, flags); i = dwc->descs_allocated; while (dwc->descs_allocated < NR_DESCS_PER_CHANNEL) { @@ -1226,6 +1209,7 @@ static void dwc_free_chan_resources(struct dma_chan *chan) list_splice_init(&dwc->free_list, &list); dwc->descs_allocated = 0; dwc->initialized = false; + dwc->request_line = ~0; /* Disable interrupts */ channel_clear_bit(dw, MASK.XFER, dwc->mask); @@ -1241,42 +1225,36 @@ static void dwc_free_chan_resources(struct dma_chan *chan) dev_vdbg(chan2dev(chan), "%s: done\n", __func__); } -struct dw_dma_filter_args { +/*----------------------------------------------------------------------*/ + +struct dw_dma_of_filter_args { struct dw_dma *dw; unsigned int req; unsigned int src; unsigned int dst; }; -static bool dw_dma_generic_filter(struct dma_chan *chan, void *param) +static bool dw_dma_of_filter(struct dma_chan *chan, void *param) { struct dw_dma_chan *dwc = to_dw_dma_chan(chan); - struct dw_dma *dw = to_dw_dma(chan->device); - struct dw_dma_filter_args *fargs = param; - struct dw_dma_slave *dws = &dwc->slave; + struct dw_dma_of_filter_args *fargs = param; - /* ensure the device matches our channel */ + /* Ensure the device matches our channel */ if (chan->device != &fargs->dw->dma) return false; - dws->dma_dev = dw->dma.dev; - dws->cfg_hi = ~0; - dws->cfg_lo = ~0; - dws->src_master = fargs->src; - dws->dst_master = fargs->dst; - dwc->request_line = fargs->req; - - chan->private = dws; + dwc->src_master = fargs->src; + dwc->dst_master = fargs->dst; return true; } -static struct dma_chan *dw_dma_xlate(struct of_phandle_args *dma_spec, - struct of_dma *ofdma) +static struct dma_chan *dw_dma_of_xlate(struct of_phandle_args *dma_spec, + struct of_dma *ofdma) { struct dw_dma *dw = ofdma->of_dma_data; - struct dw_dma_filter_args fargs = { + struct dw_dma_of_filter_args fargs = { .dw = dw, }; dma_cap_mask_t cap; @@ -1297,8 +1275,48 @@ static struct dma_chan *dw_dma_xlate(struct of_phandle_args *dma_spec, dma_cap_set(DMA_SLAVE, cap); /* TODO: there should be a simpler way to do this */ - return dma_request_channel(cap, dw_dma_generic_filter, &fargs); + return dma_request_channel(cap, dw_dma_of_filter, &fargs); +} + +#ifdef CONFIG_ACPI +static bool dw_dma_acpi_filter(struct dma_chan *chan, void *param) +{ + struct dw_dma_chan *dwc = to_dw_dma_chan(chan); + struct acpi_dma_spec *dma_spec = param; + + if (chan->device->dev != dma_spec->dev || + chan->chan_id != dma_spec->chan_id) + return false; + + dwc->request_line = dma_spec->slave_id; + dwc->src_master = dwc_get_sms(NULL); + dwc->dst_master = dwc_get_dms(NULL); + + return true; +} + +static void dw_dma_acpi_controller_register(struct dw_dma *dw) +{ + struct device *dev = dw->dma.dev; + struct acpi_dma_filter_info *info; + int ret; + + info = devm_kzalloc(dev, sizeof(*info), GFP_KERNEL); + if (!info) + return; + + dma_cap_zero(info->dma_cap); + dma_cap_set(DMA_SLAVE, info->dma_cap); + info->filter_fn = dw_dma_acpi_filter; + + ret = devm_acpi_dma_controller_register(dev, acpi_dma_simple_xlate, + info); + if (ret) + dev_err(dev, "could not register acpi_dma_controller\n"); } +#else /* !CONFIG_ACPI */ +static inline void dw_dma_acpi_controller_register(struct dw_dma *dw) {} +#endif /* !CONFIG_ACPI */ /* --------------------- Cyclic DMA API extensions -------------------- */ @@ -1322,7 +1340,7 @@ int dw_dma_cyclic_start(struct dma_chan *chan) spin_lock_irqsave(&dwc->lock, flags); - /* assert channel is idle */ + /* Assert channel is idle */ if (dma_readl(dw, CH_EN) & dwc->mask) { dev_err(chan2dev(&dwc->chan), "BUG: Attempted to start non-idle channel\n"); @@ -1334,7 +1352,7 @@ int dw_dma_cyclic_start(struct dma_chan *chan) dma_writel(dw, CLEAR.ERROR, dwc->mask); dma_writel(dw, CLEAR.XFER, dwc->mask); - /* setup DMAC channel registers */ + /* Setup DMAC channel registers */ channel_writel(dwc, LLP, dwc->cdesc->desc[0]->txd.phys); channel_writel(dwc, CTL_LO, DWC_CTLL_LLP_D_EN | DWC_CTLL_LLP_S_EN); channel_writel(dwc, CTL_HI, 0); @@ -1501,7 +1519,7 @@ struct dw_cyclic_desc *dw_dma_cyclic_prep(struct dma_chan *chan, last = desc; } - /* lets make a cyclic list */ + /* Let's make a cyclic list */ last->lli.llp = cdesc->desc[0]->txd.phys; dev_dbg(chan2dev(&dwc->chan), "cyclic prepared buf 0x%llx len %zu " @@ -1636,7 +1654,6 @@ dw_dma_parse_dt(struct platform_device *pdev) static int dw_probe(struct platform_device *pdev) { - const struct platform_device_id *match; struct dw_dma_platform_data *pdata; struct resource *io; struct dw_dma *dw; @@ -1706,7 +1723,7 @@ static int dw_probe(struct platform_device *pdev) dw->regs = regs; - /* get hardware configuration parameters */ + /* Get hardware configuration parameters */ if (autocfg) { max_blk_size = dma_readl(dw, MAX_BLK_SIZE); @@ -1720,18 +1737,13 @@ static int dw_probe(struct platform_device *pdev) memcpy(dw->data_width, pdata->data_width, 4); } - /* Get the base request line if set */ - match = platform_get_device_id(pdev); - if (match) - dw->request_line_base = (unsigned int)match->driver_data; - /* Calculate all channel mask before DMA setup */ dw->all_chan_mask = (1 << nr_channels) - 1; - /* force dma off, just in case */ + /* Force dma off, just in case */ dw_dma_off(dw); - /* disable BLOCK interrupts as well */ + /* Disable BLOCK interrupts as well */ channel_clear_bit(dw, MASK.BLOCK, dw->all_chan_mask); err = devm_request_irq(&pdev->dev, irq, dw_dma_interrupt, 0, @@ -1741,7 +1753,7 @@ static int dw_probe(struct platform_device *pdev) platform_set_drvdata(pdev, dw); - /* create a pool of consistent memory blocks for hardware descriptors */ + /* Create a pool of consistent memory blocks for hardware descriptors */ dw->desc_pool = dmam_pool_create("dw_dmac_desc_pool", &pdev->dev, sizeof(struct dw_desc), 4, 0); if (!dw->desc_pool) { @@ -1781,8 +1793,9 @@ static int dw_probe(struct platform_device *pdev) channel_clear_bit(dw, CH_EN, dwc->mask); dwc->direction = DMA_TRANS_NONE; + dwc->request_line = ~0; - /* hardware configuration */ + /* Hardware configuration */ if (autocfg) { unsigned int dwc_params; @@ -1842,12 +1855,15 @@ static int dw_probe(struct platform_device *pdev) if (pdev->dev.of_node) { err = of_dma_controller_register(pdev->dev.of_node, - dw_dma_xlate, dw); - if (err && err != -ENODEV) + dw_dma_of_xlate, dw); + if (err) dev_err(&pdev->dev, "could not register of_dma_controller\n"); } + if (ACPI_HANDLE(&pdev->dev)) + dw_dma_acpi_controller_register(dw); + return 0; } @@ -1912,18 +1928,19 @@ static const struct dev_pm_ops dw_dev_pm_ops = { }; #ifdef CONFIG_OF -static const struct of_device_id dw_dma_id_table[] = { +static const struct of_device_id dw_dma_of_id_table[] = { { .compatible = "snps,dma-spear1340" }, {} }; -MODULE_DEVICE_TABLE(of, dw_dma_id_table); +MODULE_DEVICE_TABLE(of, dw_dma_of_id_table); #endif -static const struct platform_device_id dw_dma_ids[] = { - /* Name, Request Line Base */ - { "INTL9C60", (kernel_ulong_t)16 }, +#ifdef CONFIG_ACPI +static const struct acpi_device_id dw_dma_acpi_id_table[] = { + { "INTL9C60", 0 }, { } }; +#endif static struct platform_driver dw_driver = { .probe = dw_probe, @@ -1932,9 +1949,9 @@ static struct platform_driver dw_driver = { .driver = { .name = "dw_dmac", .pm = &dw_dev_pm_ops, - .of_match_table = of_match_ptr(dw_dma_id_table), + .of_match_table = of_match_ptr(dw_dma_of_id_table), + .acpi_match_table = ACPI_PTR(dw_dma_acpi_id_table), }, - .id_table = dw_dma_ids, }; static int __init dw_init(void) diff --git a/trunk/drivers/dma/dw_dmac_regs.h b/trunk/drivers/dma/dw_dmac_regs.h index 4d02c3669b75..9d417200bd57 100644 --- a/trunk/drivers/dma/dw_dmac_regs.h +++ b/trunk/drivers/dma/dw_dmac_regs.h @@ -212,8 +212,11 @@ struct dw_dma_chan { /* hardware configuration */ unsigned int block_size; bool nollp; + + /* custom slave configuration */ unsigned int request_line; - struct dw_dma_slave slave; + unsigned char src_master; + unsigned char dst_master; /* configuration passed via DMA_SLAVE_CONFIG */ struct dma_slave_config dma_sconfig; @@ -247,7 +250,6 @@ struct dw_dma { /* hardware configuration */ unsigned char nr_masters; unsigned char data_width[4]; - unsigned int request_line_base; struct dw_dma_chan chan[0]; }; diff --git a/trunk/drivers/dma/imx-dma.c b/trunk/drivers/dma/imx-dma.c index 70b8975d107e..f28583370d00 100644 --- a/trunk/drivers/dma/imx-dma.c +++ b/trunk/drivers/dma/imx-dma.c @@ -859,8 +859,7 @@ static struct dma_async_tx_descriptor *imxdma_prep_dma_cyclic( desc = list_first_entry(&imxdmac->ld_free, struct imxdma_desc, node); - if (imxdmac->sg_list) - kfree(imxdmac->sg_list); + kfree(imxdmac->sg_list); imxdmac->sg_list = kcalloc(periods + 1, sizeof(struct scatterlist), GFP_KERNEL); @@ -1145,7 +1144,7 @@ static int __init imxdma_probe(struct platform_device *pdev) return ret; } -static int __exit imxdma_remove(struct platform_device *pdev) +static int imxdma_remove(struct platform_device *pdev) { struct imxdma_engine *imxdma = platform_get_drvdata(pdev); @@ -1162,7 +1161,7 @@ static struct platform_driver imxdma_driver = { .name = "imx-dma", }, .id_table = imx_dma_devtype, - .remove = __exit_p(imxdma_remove), + .remove = imxdma_remove, }; static int __init imxdma_module_init(void) diff --git a/trunk/drivers/dma/imx-sdma.c b/trunk/drivers/dma/imx-sdma.c index f082aa3a918c..092867bf795c 100644 --- a/trunk/drivers/dma/imx-sdma.c +++ b/trunk/drivers/dma/imx-sdma.c @@ -1462,7 +1462,7 @@ static int __init sdma_probe(struct platform_device *pdev) return ret; } -static int __exit sdma_remove(struct platform_device *pdev) +static int sdma_remove(struct platform_device *pdev) { return -EBUSY; } @@ -1473,7 +1473,7 @@ static struct platform_driver sdma_driver = { .of_match_table = sdma_dt_ids, }, .id_table = sdma_devtypes, - .remove = __exit_p(sdma_remove), + .remove = sdma_remove, }; static int __init sdma_module_init(void) diff --git a/trunk/drivers/dma/ioat/dma.c b/trunk/drivers/dma/ioat/dma.c index 1879a5942bfc..17a2393b3e25 100644 --- a/trunk/drivers/dma/ioat/dma.c +++ b/trunk/drivers/dma/ioat/dma.c @@ -892,7 +892,7 @@ MODULE_PARM_DESC(ioat_interrupt_style, * ioat_dma_setup_interrupts - setup interrupt handler * @device: ioat device */ -static int ioat_dma_setup_interrupts(struct ioatdma_device *device) +int ioat_dma_setup_interrupts(struct ioatdma_device *device) { struct ioat_chan_common *chan; struct pci_dev *pdev = device->pdev; @@ -941,6 +941,7 @@ static int ioat_dma_setup_interrupts(struct ioatdma_device *device) } } intrctrl |= IOAT_INTRCTRL_MSIX_VECTOR_CONTROL; + device->irq_mode = IOAT_MSIX; goto done; msix_single_vector: @@ -956,6 +957,7 @@ static int ioat_dma_setup_interrupts(struct ioatdma_device *device) pci_disable_msix(pdev); goto msi; } + device->irq_mode = IOAT_MSIX_SINGLE; goto done; msi: @@ -969,6 +971,7 @@ static int ioat_dma_setup_interrupts(struct ioatdma_device *device) pci_disable_msi(pdev); goto intx; } + device->irq_mode = IOAT_MSIX; goto done; intx: @@ -977,6 +980,7 @@ static int ioat_dma_setup_interrupts(struct ioatdma_device *device) if (err) goto err_no_irq; + device->irq_mode = IOAT_INTX; done: if (device->intr_quirk) device->intr_quirk(device); @@ -987,9 +991,11 @@ static int ioat_dma_setup_interrupts(struct ioatdma_device *device) err_no_irq: /* Disable all interrupt generation */ writeb(0, device->reg_base + IOAT_INTRCTRL_OFFSET); + device->irq_mode = IOAT_NOIRQ; dev_err(dev, "no usable interrupts\n"); return err; } +EXPORT_SYMBOL(ioat_dma_setup_interrupts); static void ioat_disable_interrupts(struct ioatdma_device *device) { diff --git a/trunk/drivers/dma/ioat/dma.h b/trunk/drivers/dma/ioat/dma.h index 53a4cbb78f47..54fb7b9ff9aa 100644 --- a/trunk/drivers/dma/ioat/dma.h +++ b/trunk/drivers/dma/ioat/dma.h @@ -39,6 +39,7 @@ #define to_ioat_desc(lh) container_of(lh, struct ioat_desc_sw, node) #define tx_to_ioat_desc(tx) container_of(tx, struct ioat_desc_sw, txd) #define to_dev(ioat_chan) (&(ioat_chan)->device->pdev->dev) +#define to_pdev(ioat_chan) ((ioat_chan)->device->pdev) #define chan_num(ch) ((int)((ch)->reg_base - (ch)->device->reg_base) / 0x80) @@ -48,6 +49,14 @@ */ #define NULL_DESC_BUFFER_SIZE 1 +enum ioat_irq_mode { + IOAT_NOIRQ = 0, + IOAT_MSIX, + IOAT_MSIX_SINGLE, + IOAT_MSI, + IOAT_INTX +}; + /** * struct ioatdma_device - internal representation of a IOAT device * @pdev: PCI-Express device @@ -72,11 +81,16 @@ struct ioatdma_device { void __iomem *reg_base; struct pci_pool *dma_pool; struct pci_pool *completion_pool; +#define MAX_SED_POOLS 5 + struct dma_pool *sed_hw_pool[MAX_SED_POOLS]; + struct kmem_cache *sed_pool; struct dma_device common; u8 version; struct msix_entry msix_entries[4]; struct ioat_chan_common *idx[4]; struct dca_provider *dca; + enum ioat_irq_mode irq_mode; + u32 cap; void (*intr_quirk)(struct ioatdma_device *device); int (*enumerate_channels)(struct ioatdma_device *device); int (*reset_hw)(struct ioat_chan_common *chan); @@ -131,6 +145,20 @@ struct ioat_dma_chan { u16 active; }; +/** + * struct ioat_sed_ent - wrapper around super extended hardware descriptor + * @hw: hardware SED + * @sed_dma: dma address for the SED + * @list: list member + * @parent: point to the dma descriptor that's the parent + */ +struct ioat_sed_ent { + struct ioat_sed_raw_descriptor *hw; + dma_addr_t dma; + struct ioat_ring_ent *parent; + unsigned int hw_pool; +}; + static inline struct ioat_chan_common *to_chan_common(struct dma_chan *c) { return container_of(c, struct ioat_chan_common, common); @@ -179,7 +207,7 @@ __dump_desc_dbg(struct ioat_chan_common *chan, struct ioat_dma_descriptor *hw, struct device *dev = to_dev(chan); dev_dbg(dev, "desc[%d]: (%#llx->%#llx) cookie: %d flags: %#x" - " ctl: %#x (op: %d int_en: %d compl: %d)\n", id, + " ctl: %#10.8x (op: %#x int_en: %d compl: %d)\n", id, (unsigned long long) tx->phys, (unsigned long long) hw->next, tx->cookie, tx->flags, hw->ctl, hw->ctl_f.op, hw->ctl_f.int_en, hw->ctl_f.compl_write); @@ -201,7 +229,7 @@ ioat_chan_by_index(struct ioatdma_device *device, int index) return device->idx[index]; } -static inline u64 ioat_chansts(struct ioat_chan_common *chan) +static inline u64 ioat_chansts_32(struct ioat_chan_common *chan) { u8 ver = chan->device->version; u64 status; @@ -218,6 +246,26 @@ static inline u64 ioat_chansts(struct ioat_chan_common *chan) return status; } +#if BITS_PER_LONG == 64 + +static inline u64 ioat_chansts(struct ioat_chan_common *chan) +{ + u8 ver = chan->device->version; + u64 status; + + /* With IOAT v3.3 the status register is 64bit. */ + if (ver >= IOAT_VER_3_3) + status = readq(chan->reg_base + IOAT_CHANSTS_OFFSET(ver)); + else + status = ioat_chansts_32(chan); + + return status; +} + +#else +#define ioat_chansts ioat_chansts_32 +#endif + static inline void ioat_start(struct ioat_chan_common *chan) { u8 ver = chan->device->version; @@ -321,6 +369,7 @@ bool ioat_cleanup_preamble(struct ioat_chan_common *chan, dma_addr_t *phys_complete); void ioat_kobject_add(struct ioatdma_device *device, struct kobj_type *type); void ioat_kobject_del(struct ioatdma_device *device); +int ioat_dma_setup_interrupts(struct ioatdma_device *device); extern const struct sysfs_ops ioat_sysfs_ops; extern struct ioat_sysfs_entry ioat_version_attr; extern struct ioat_sysfs_entry ioat_cap_attr; diff --git a/trunk/drivers/dma/ioat/dma_v2.h b/trunk/drivers/dma/ioat/dma_v2.h index e100f644e344..29bf9448035d 100644 --- a/trunk/drivers/dma/ioat/dma_v2.h +++ b/trunk/drivers/dma/ioat/dma_v2.h @@ -137,6 +137,7 @@ struct ioat_ring_ent { #ifdef DEBUG int id; #endif + struct ioat_sed_ent *sed; }; static inline struct ioat_ring_ent * @@ -157,6 +158,7 @@ static inline void ioat2_set_chainaddr(struct ioat2_dma_chan *ioat, u64 addr) int ioat2_dma_probe(struct ioatdma_device *dev, int dca); int ioat3_dma_probe(struct ioatdma_device *dev, int dca); +void ioat3_dma_remove(struct ioatdma_device *dev); struct dca_provider *ioat2_dca_init(struct pci_dev *pdev, void __iomem *iobase); struct dca_provider *ioat3_dca_init(struct pci_dev *pdev, void __iomem *iobase); int ioat2_check_space_lock(struct ioat2_dma_chan *ioat, int num_descs); diff --git a/trunk/drivers/dma/ioat/dma_v3.c b/trunk/drivers/dma/ioat/dma_v3.c index e8336cce360b..ca6ea9b3551b 100644 --- a/trunk/drivers/dma/ioat/dma_v3.c +++ b/trunk/drivers/dma/ioat/dma_v3.c @@ -55,7 +55,7 @@ /* * Support routines for v3+ hardware */ - +#include #include #include #include @@ -70,6 +70,10 @@ /* ioat hardware assumes at least two sources for raid operations */ #define src_cnt_to_sw(x) ((x) + 2) #define src_cnt_to_hw(x) ((x) - 2) +#define ndest_to_sw(x) ((x) + 1) +#define ndest_to_hw(x) ((x) - 1) +#define src16_cnt_to_sw(x) ((x) + 9) +#define src16_cnt_to_hw(x) ((x) - 9) /* provide a lookup table for setting the source address in the base or * extended descriptor of an xor or pq descriptor @@ -77,7 +81,20 @@ static const u8 xor_idx_to_desc = 0xe0; static const u8 xor_idx_to_field[] = { 1, 4, 5, 6, 7, 0, 1, 2 }; static const u8 pq_idx_to_desc = 0xf8; +static const u8 pq16_idx_to_desc[] = { 0, 0, 1, 1, 1, 1, 1, 1, 1, + 2, 2, 2, 2, 2, 2, 2 }; static const u8 pq_idx_to_field[] = { 1, 4, 5, 0, 1, 2, 4, 5 }; +static const u8 pq16_idx_to_field[] = { 1, 4, 1, 2, 3, 4, 5, 6, 7, + 0, 1, 2, 3, 4, 5, 6 }; + +/* + * technically sources 1 and 2 do not require SED, but the op will have + * at least 9 descriptors so that's irrelevant. + */ +static const u8 pq16_idx_to_sed[] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, + 1, 1, 1, 1, 1, 1, 1 }; + +static void ioat3_eh(struct ioat2_dma_chan *ioat); static dma_addr_t xor_get_src(struct ioat_raw_descriptor *descs[2], int idx) { @@ -101,6 +118,13 @@ static dma_addr_t pq_get_src(struct ioat_raw_descriptor *descs[2], int idx) return raw->field[pq_idx_to_field[idx]]; } +static dma_addr_t pq16_get_src(struct ioat_raw_descriptor *desc[3], int idx) +{ + struct ioat_raw_descriptor *raw = desc[pq16_idx_to_desc[idx]]; + + return raw->field[pq16_idx_to_field[idx]]; +} + static void pq_set_src(struct ioat_raw_descriptor *descs[2], dma_addr_t addr, u32 offset, u8 coef, int idx) { @@ -111,6 +135,167 @@ static void pq_set_src(struct ioat_raw_descriptor *descs[2], pq->coef[idx] = coef; } +static int sed_get_pq16_pool_idx(int src_cnt) +{ + + return pq16_idx_to_sed[src_cnt]; +} + +static bool is_jf_ioat(struct pci_dev *pdev) +{ + switch (pdev->device) { + case PCI_DEVICE_ID_INTEL_IOAT_JSF0: + case PCI_DEVICE_ID_INTEL_IOAT_JSF1: + case PCI_DEVICE_ID_INTEL_IOAT_JSF2: + case PCI_DEVICE_ID_INTEL_IOAT_JSF3: + case PCI_DEVICE_ID_INTEL_IOAT_JSF4: + case PCI_DEVICE_ID_INTEL_IOAT_JSF5: + case PCI_DEVICE_ID_INTEL_IOAT_JSF6: + case PCI_DEVICE_ID_INTEL_IOAT_JSF7: + case PCI_DEVICE_ID_INTEL_IOAT_JSF8: + case PCI_DEVICE_ID_INTEL_IOAT_JSF9: + return true; + default: + return false; + } +} + +static bool is_snb_ioat(struct pci_dev *pdev) +{ + switch (pdev->device) { + case PCI_DEVICE_ID_INTEL_IOAT_SNB0: + case PCI_DEVICE_ID_INTEL_IOAT_SNB1: + case PCI_DEVICE_ID_INTEL_IOAT_SNB2: + case PCI_DEVICE_ID_INTEL_IOAT_SNB3: + case PCI_DEVICE_ID_INTEL_IOAT_SNB4: + case PCI_DEVICE_ID_INTEL_IOAT_SNB5: + case PCI_DEVICE_ID_INTEL_IOAT_SNB6: + case PCI_DEVICE_ID_INTEL_IOAT_SNB7: + case PCI_DEVICE_ID_INTEL_IOAT_SNB8: + case PCI_DEVICE_ID_INTEL_IOAT_SNB9: + return true; + default: + return false; + } +} + +static bool is_ivb_ioat(struct pci_dev *pdev) +{ + switch (pdev->device) { + case PCI_DEVICE_ID_INTEL_IOAT_IVB0: + case PCI_DEVICE_ID_INTEL_IOAT_IVB1: + case PCI_DEVICE_ID_INTEL_IOAT_IVB2: + case PCI_DEVICE_ID_INTEL_IOAT_IVB3: + case PCI_DEVICE_ID_INTEL_IOAT_IVB4: + case PCI_DEVICE_ID_INTEL_IOAT_IVB5: + case PCI_DEVICE_ID_INTEL_IOAT_IVB6: + case PCI_DEVICE_ID_INTEL_IOAT_IVB7: + case PCI_DEVICE_ID_INTEL_IOAT_IVB8: + case PCI_DEVICE_ID_INTEL_IOAT_IVB9: + return true; + default: + return false; + } + +} + +static bool is_hsw_ioat(struct pci_dev *pdev) +{ + switch (pdev->device) { + case PCI_DEVICE_ID_INTEL_IOAT_HSW0: + case PCI_DEVICE_ID_INTEL_IOAT_HSW1: + case PCI_DEVICE_ID_INTEL_IOAT_HSW2: + case PCI_DEVICE_ID_INTEL_IOAT_HSW3: + case PCI_DEVICE_ID_INTEL_IOAT_HSW4: + case PCI_DEVICE_ID_INTEL_IOAT_HSW5: + case PCI_DEVICE_ID_INTEL_IOAT_HSW6: + case PCI_DEVICE_ID_INTEL_IOAT_HSW7: + case PCI_DEVICE_ID_INTEL_IOAT_HSW8: + case PCI_DEVICE_ID_INTEL_IOAT_HSW9: + return true; + default: + return false; + } + +} + +static bool is_xeon_cb32(struct pci_dev *pdev) +{ + return is_jf_ioat(pdev) || is_snb_ioat(pdev) || is_ivb_ioat(pdev) || + is_hsw_ioat(pdev); +} + +static bool is_bwd_ioat(struct pci_dev *pdev) +{ + switch (pdev->device) { + case PCI_DEVICE_ID_INTEL_IOAT_BWD0: + case PCI_DEVICE_ID_INTEL_IOAT_BWD1: + case PCI_DEVICE_ID_INTEL_IOAT_BWD2: + case PCI_DEVICE_ID_INTEL_IOAT_BWD3: + return true; + default: + return false; + } +} + +static bool is_bwd_noraid(struct pci_dev *pdev) +{ + switch (pdev->device) { + case PCI_DEVICE_ID_INTEL_IOAT_BWD2: + case PCI_DEVICE_ID_INTEL_IOAT_BWD3: + return true; + default: + return false; + } + +} + +static void pq16_set_src(struct ioat_raw_descriptor *desc[3], + dma_addr_t addr, u32 offset, u8 coef, int idx) +{ + struct ioat_pq_descriptor *pq = (struct ioat_pq_descriptor *)desc[0]; + struct ioat_pq16a_descriptor *pq16 = + (struct ioat_pq16a_descriptor *)desc[1]; + struct ioat_raw_descriptor *raw = desc[pq16_idx_to_desc[idx]]; + + raw->field[pq16_idx_to_field[idx]] = addr + offset; + + if (idx < 8) + pq->coef[idx] = coef; + else + pq16->coef[idx - 8] = coef; +} + +static struct ioat_sed_ent * +ioat3_alloc_sed(struct ioatdma_device *device, unsigned int hw_pool) +{ + struct ioat_sed_ent *sed; + gfp_t flags = __GFP_ZERO | GFP_ATOMIC; + + sed = kmem_cache_alloc(device->sed_pool, flags); + if (!sed) + return NULL; + + sed->hw_pool = hw_pool; + sed->hw = dma_pool_alloc(device->sed_hw_pool[hw_pool], + flags, &sed->dma); + if (!sed->hw) { + kmem_cache_free(device->sed_pool, sed); + return NULL; + } + + return sed; +} + +static void ioat3_free_sed(struct ioatdma_device *device, struct ioat_sed_ent *sed) +{ + if (!sed) + return; + + dma_pool_free(device->sed_hw_pool[sed->hw_pool], sed->hw, sed->dma); + kmem_cache_free(device->sed_pool, sed); +} + static void ioat3_dma_unmap(struct ioat2_dma_chan *ioat, struct ioat_ring_ent *desc, int idx) { @@ -223,6 +408,54 @@ static void ioat3_dma_unmap(struct ioat2_dma_chan *ioat, } break; } + case IOAT_OP_PQ_16S: + case IOAT_OP_PQ_VAL_16S: { + struct ioat_pq_descriptor *pq = desc->pq; + int src_cnt = src16_cnt_to_sw(pq->ctl_f.src_cnt); + struct ioat_raw_descriptor *descs[4]; + int i; + + /* in the 'continue' case don't unmap the dests as sources */ + if (dmaf_p_disabled_continue(flags)) + src_cnt--; + else if (dmaf_continue(flags)) + src_cnt -= 3; + + if (!(flags & DMA_COMPL_SKIP_SRC_UNMAP)) { + descs[0] = (struct ioat_raw_descriptor *)pq; + descs[1] = (struct ioat_raw_descriptor *)(desc->sed->hw); + descs[2] = (struct ioat_raw_descriptor *)(&desc->sed->hw->b[0]); + for (i = 0; i < src_cnt; i++) { + dma_addr_t src = pq16_get_src(descs, i); + + ioat_unmap(pdev, src - offset, len, + PCI_DMA_TODEVICE, flags, 0); + } + + /* the dests are sources in pq validate operations */ + if (pq->ctl_f.op == IOAT_OP_XOR_VAL) { + if (!(flags & DMA_PREP_PQ_DISABLE_P)) + ioat_unmap(pdev, pq->p_addr - offset, + len, PCI_DMA_TODEVICE, + flags, 0); + if (!(flags & DMA_PREP_PQ_DISABLE_Q)) + ioat_unmap(pdev, pq->q_addr - offset, + len, PCI_DMA_TODEVICE, + flags, 0); + break; + } + } + + if (!(flags & DMA_COMPL_SKIP_DEST_UNMAP)) { + if (!(flags & DMA_PREP_PQ_DISABLE_P)) + ioat_unmap(pdev, pq->p_addr - offset, len, + PCI_DMA_BIDIRECTIONAL, flags, 1); + if (!(flags & DMA_PREP_PQ_DISABLE_Q)) + ioat_unmap(pdev, pq->q_addr - offset, len, + PCI_DMA_BIDIRECTIONAL, flags, 1); + } + break; + } default: dev_err(&pdev->dev, "%s: unknown op type: %#x\n", __func__, desc->hw->ctl_f.op); @@ -250,6 +483,63 @@ static bool desc_has_ext(struct ioat_ring_ent *desc) return false; } +static u64 ioat3_get_current_completion(struct ioat_chan_common *chan) +{ + u64 phys_complete; + u64 completion; + + completion = *chan->completion; + phys_complete = ioat_chansts_to_addr(completion); + + dev_dbg(to_dev(chan), "%s: phys_complete: %#llx\n", __func__, + (unsigned long long) phys_complete); + + return phys_complete; +} + +static bool ioat3_cleanup_preamble(struct ioat_chan_common *chan, + u64 *phys_complete) +{ + *phys_complete = ioat3_get_current_completion(chan); + if (*phys_complete == chan->last_completion) + return false; + + clear_bit(IOAT_COMPLETION_ACK, &chan->state); + mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT); + + return true; +} + +static void +desc_get_errstat(struct ioat2_dma_chan *ioat, struct ioat_ring_ent *desc) +{ + struct ioat_dma_descriptor *hw = desc->hw; + + switch (hw->ctl_f.op) { + case IOAT_OP_PQ_VAL: + case IOAT_OP_PQ_VAL_16S: + { + struct ioat_pq_descriptor *pq = desc->pq; + + /* check if there's error written */ + if (!pq->dwbes_f.wbes) + return; + + /* need to set a chanerr var for checking to clear later */ + + if (pq->dwbes_f.p_val_err) + *desc->result |= SUM_CHECK_P_RESULT; + + if (pq->dwbes_f.q_val_err) + *desc->result |= SUM_CHECK_Q_RESULT; + + return; + } + default: + return; + } +} + /** * __cleanup - reclaim used descriptors * @ioat: channel (ring) to clean @@ -260,6 +550,7 @@ static bool desc_has_ext(struct ioat_ring_ent *desc) static void __cleanup(struct ioat2_dma_chan *ioat, dma_addr_t phys_complete) { struct ioat_chan_common *chan = &ioat->base; + struct ioatdma_device *device = chan->device; struct ioat_ring_ent *desc; bool seen_current = false; int idx = ioat->tail, i; @@ -268,6 +559,16 @@ static void __cleanup(struct ioat2_dma_chan *ioat, dma_addr_t phys_complete) dev_dbg(to_dev(chan), "%s: head: %#x tail: %#x issued: %#x\n", __func__, ioat->head, ioat->tail, ioat->issued); + /* + * At restart of the channel, the completion address and the + * channel status will be 0 due to starting a new chain. Since + * it's new chain and the first descriptor "fails", there is + * nothing to clean up. We do not want to reap the entire submitted + * chain due to this 0 address value and then BUG. + */ + if (!phys_complete) + return; + active = ioat2_ring_active(ioat); for (i = 0; i < active && !seen_current; i++) { struct dma_async_tx_descriptor *tx; @@ -276,6 +577,11 @@ static void __cleanup(struct ioat2_dma_chan *ioat, dma_addr_t phys_complete) prefetch(ioat2_get_ring_ent(ioat, idx + i + 1)); desc = ioat2_get_ring_ent(ioat, idx + i); dump_desc_dbg(ioat, desc); + + /* set err stat if we are using dwbes */ + if (device->cap & IOAT_CAP_DWBES) + desc_get_errstat(ioat, desc); + tx = &desc->txd; if (tx->cookie) { dma_cookie_complete(tx); @@ -294,6 +600,12 @@ static void __cleanup(struct ioat2_dma_chan *ioat, dma_addr_t phys_complete) BUG_ON(i + 1 >= active); i++; } + + /* cleanup super extended descriptors */ + if (desc->sed) { + ioat3_free_sed(device, desc->sed); + desc->sed = NULL; + } } smp_mb(); /* finish all descriptor reads before incrementing tail */ ioat->tail = idx + i; @@ -314,11 +626,22 @@ static void __cleanup(struct ioat2_dma_chan *ioat, dma_addr_t phys_complete) static void ioat3_cleanup(struct ioat2_dma_chan *ioat) { struct ioat_chan_common *chan = &ioat->base; - dma_addr_t phys_complete; + u64 phys_complete; spin_lock_bh(&chan->cleanup_lock); - if (ioat_cleanup_preamble(chan, &phys_complete)) + + if (ioat3_cleanup_preamble(chan, &phys_complete)) __cleanup(ioat, phys_complete); + + if (is_ioat_halted(*chan->completion)) { + u32 chanerr = readl(chan->reg_base + IOAT_CHANERR_OFFSET); + + if (chanerr & IOAT_CHANERR_HANDLE_MASK) { + mod_timer(&chan->timer, jiffies + IDLE_TIMEOUT); + ioat3_eh(ioat); + } + } + spin_unlock_bh(&chan->cleanup_lock); } @@ -333,15 +656,78 @@ static void ioat3_cleanup_event(unsigned long data) static void ioat3_restart_channel(struct ioat2_dma_chan *ioat) { struct ioat_chan_common *chan = &ioat->base; - dma_addr_t phys_complete; + u64 phys_complete; ioat2_quiesce(chan, 0); - if (ioat_cleanup_preamble(chan, &phys_complete)) + if (ioat3_cleanup_preamble(chan, &phys_complete)) __cleanup(ioat, phys_complete); __ioat2_restart_chan(ioat); } +static void ioat3_eh(struct ioat2_dma_chan *ioat) +{ + struct ioat_chan_common *chan = &ioat->base; + struct pci_dev *pdev = to_pdev(chan); + struct ioat_dma_descriptor *hw; + u64 phys_complete; + struct ioat_ring_ent *desc; + u32 err_handled = 0; + u32 chanerr_int; + u32 chanerr; + + /* cleanup so tail points to descriptor that caused the error */ + if (ioat3_cleanup_preamble(chan, &phys_complete)) + __cleanup(ioat, phys_complete); + + chanerr = readl(chan->reg_base + IOAT_CHANERR_OFFSET); + pci_read_config_dword(pdev, IOAT_PCI_CHANERR_INT_OFFSET, &chanerr_int); + + dev_dbg(to_dev(chan), "%s: error = %x:%x\n", + __func__, chanerr, chanerr_int); + + desc = ioat2_get_ring_ent(ioat, ioat->tail); + hw = desc->hw; + dump_desc_dbg(ioat, desc); + + switch (hw->ctl_f.op) { + case IOAT_OP_XOR_VAL: + if (chanerr & IOAT_CHANERR_XOR_P_OR_CRC_ERR) { + *desc->result |= SUM_CHECK_P_RESULT; + err_handled |= IOAT_CHANERR_XOR_P_OR_CRC_ERR; + } + break; + case IOAT_OP_PQ_VAL: + case IOAT_OP_PQ_VAL_16S: + if (chanerr & IOAT_CHANERR_XOR_P_OR_CRC_ERR) { + *desc->result |= SUM_CHECK_P_RESULT; + err_handled |= IOAT_CHANERR_XOR_P_OR_CRC_ERR; + } + if (chanerr & IOAT_CHANERR_XOR_Q_ERR) { + *desc->result |= SUM_CHECK_Q_RESULT; + err_handled |= IOAT_CHANERR_XOR_Q_ERR; + } + break; + } + + /* fault on unhandled error or spurious halt */ + if (chanerr ^ err_handled || chanerr == 0) { + dev_err(to_dev(chan), "%s: fatal error (%x:%x)\n", + __func__, chanerr, err_handled); + BUG(); + } + + writel(chanerr, chan->reg_base + IOAT_CHANERR_OFFSET); + pci_write_config_dword(pdev, IOAT_PCI_CHANERR_INT_OFFSET, chanerr_int); + + /* mark faulting descriptor as complete */ + *chan->completion = desc->txd.phys; + + spin_lock_bh(&ioat->prep_lock); + ioat3_restart_channel(ioat); + spin_unlock_bh(&ioat->prep_lock); +} + static void check_active(struct ioat2_dma_chan *ioat) { struct ioat_chan_common *chan = &ioat->base; @@ -605,7 +991,8 @@ dump_pq_desc_dbg(struct ioat2_dma_chan *ioat, struct ioat_ring_ent *desc, struct int i; dev_dbg(dev, "desc[%d]: (%#llx->%#llx) flags: %#x" - " sz: %#x ctl: %#x (op: %d int: %d compl: %d pq: '%s%s' src_cnt: %d)\n", + " sz: %#10.8x ctl: %#x (op: %#x int: %d compl: %d pq: '%s%s'" + " src_cnt: %d)\n", desc_id(desc), (unsigned long long) desc->txd.phys, (unsigned long long) (pq_ex ? pq_ex->next : pq->next), desc->txd.flags, pq->size, pq->ctl, pq->ctl_f.op, pq->ctl_f.int_en, @@ -617,6 +1004,42 @@ dump_pq_desc_dbg(struct ioat2_dma_chan *ioat, struct ioat_ring_ent *desc, struct (unsigned long long) pq_get_src(descs, i), pq->coef[i]); dev_dbg(dev, "\tP: %#llx\n", pq->p_addr); dev_dbg(dev, "\tQ: %#llx\n", pq->q_addr); + dev_dbg(dev, "\tNEXT: %#llx\n", pq->next); +} + +static void dump_pq16_desc_dbg(struct ioat2_dma_chan *ioat, + struct ioat_ring_ent *desc) +{ + struct device *dev = to_dev(&ioat->base); + struct ioat_pq_descriptor *pq = desc->pq; + struct ioat_raw_descriptor *descs[] = { (void *)pq, + (void *)pq, + (void *)pq }; + int src_cnt = src16_cnt_to_sw(pq->ctl_f.src_cnt); + int i; + + if (desc->sed) { + descs[1] = (void *)desc->sed->hw; + descs[2] = (void *)desc->sed->hw + 64; + } + + dev_dbg(dev, "desc[%d]: (%#llx->%#llx) flags: %#x" + " sz: %#x ctl: %#x (op: %#x int: %d compl: %d pq: '%s%s'" + " src_cnt: %d)\n", + desc_id(desc), (unsigned long long) desc->txd.phys, + (unsigned long long) pq->next, + desc->txd.flags, pq->size, pq->ctl, + pq->ctl_f.op, pq->ctl_f.int_en, + pq->ctl_f.compl_write, + pq->ctl_f.p_disable ? "" : "p", pq->ctl_f.q_disable ? "" : "q", + pq->ctl_f.src_cnt); + for (i = 0; i < src_cnt; i++) { + dev_dbg(dev, "\tsrc[%d]: %#llx coef: %#x\n", i, + (unsigned long long) pq16_get_src(descs, i), + pq->coef[i]); + } + dev_dbg(dev, "\tP: %#llx\n", pq->p_addr); + dev_dbg(dev, "\tQ: %#llx\n", pq->q_addr); } static struct dma_async_tx_descriptor * @@ -627,6 +1050,7 @@ __ioat3_prep_pq_lock(struct dma_chan *c, enum sum_check_flags *result, { struct ioat2_dma_chan *ioat = to_ioat2_chan(c); struct ioat_chan_common *chan = &ioat->base; + struct ioatdma_device *device = chan->device; struct ioat_ring_ent *compl_desc; struct ioat_ring_ent *desc; struct ioat_ring_ent *ext; @@ -637,6 +1061,7 @@ __ioat3_prep_pq_lock(struct dma_chan *c, enum sum_check_flags *result, u32 offset = 0; u8 op = result ? IOAT_OP_PQ_VAL : IOAT_OP_PQ; int i, s, idx, with_ext, num_descs; + int cb32 = (device->version < IOAT_VER_3_3) ? 1 : 0; dev_dbg(to_dev(chan), "%s\n", __func__); /* the engine requires at least two sources (we provide @@ -662,7 +1087,7 @@ __ioat3_prep_pq_lock(struct dma_chan *c, enum sum_check_flags *result, * order. */ if (likely(num_descs) && - ioat2_check_space_lock(ioat, num_descs+1) == 0) + ioat2_check_space_lock(ioat, num_descs + cb32) == 0) idx = ioat->head; else return NULL; @@ -700,6 +1125,9 @@ __ioat3_prep_pq_lock(struct dma_chan *c, enum sum_check_flags *result, pq->q_addr = dst[1] + offset; pq->ctl = 0; pq->ctl_f.op = op; + /* we turn on descriptor write back error status */ + if (device->cap & IOAT_CAP_DWBES) + pq->ctl_f.wb_en = result ? 1 : 0; pq->ctl_f.src_cnt = src_cnt_to_hw(s); pq->ctl_f.p_disable = !!(flags & DMA_PREP_PQ_DISABLE_P); pq->ctl_f.q_disable = !!(flags & DMA_PREP_PQ_DISABLE_Q); @@ -716,26 +1144,140 @@ __ioat3_prep_pq_lock(struct dma_chan *c, enum sum_check_flags *result, pq->ctl_f.fence = !!(flags & DMA_PREP_FENCE); dump_pq_desc_dbg(ioat, desc, ext); - /* completion descriptor carries interrupt bit */ - compl_desc = ioat2_get_ring_ent(ioat, idx + i); - compl_desc->txd.flags = flags & DMA_PREP_INTERRUPT; - hw = compl_desc->hw; - hw->ctl = 0; - hw->ctl_f.null = 1; - hw->ctl_f.int_en = !!(flags & DMA_PREP_INTERRUPT); - hw->ctl_f.compl_write = 1; - hw->size = NULL_DESC_BUFFER_SIZE; - dump_desc_dbg(ioat, compl_desc); + if (!cb32) { + pq->ctl_f.int_en = !!(flags & DMA_PREP_INTERRUPT); + pq->ctl_f.compl_write = 1; + compl_desc = desc; + } else { + /* completion descriptor carries interrupt bit */ + compl_desc = ioat2_get_ring_ent(ioat, idx + i); + compl_desc->txd.flags = flags & DMA_PREP_INTERRUPT; + hw = compl_desc->hw; + hw->ctl = 0; + hw->ctl_f.null = 1; + hw->ctl_f.int_en = !!(flags & DMA_PREP_INTERRUPT); + hw->ctl_f.compl_write = 1; + hw->size = NULL_DESC_BUFFER_SIZE; + dump_desc_dbg(ioat, compl_desc); + } + /* we leave the channel locked to ensure in order submission */ return &compl_desc->txd; } +static struct dma_async_tx_descriptor * +__ioat3_prep_pq16_lock(struct dma_chan *c, enum sum_check_flags *result, + const dma_addr_t *dst, const dma_addr_t *src, + unsigned int src_cnt, const unsigned char *scf, + size_t len, unsigned long flags) +{ + struct ioat2_dma_chan *ioat = to_ioat2_chan(c); + struct ioat_chan_common *chan = &ioat->base; + struct ioatdma_device *device = chan->device; + struct ioat_ring_ent *desc; + size_t total_len = len; + struct ioat_pq_descriptor *pq; + u32 offset = 0; + u8 op; + int i, s, idx, num_descs; + + /* this function only handles src_cnt 9 - 16 */ + BUG_ON(src_cnt < 9); + + /* this function is only called with 9-16 sources */ + op = result ? IOAT_OP_PQ_VAL_16S : IOAT_OP_PQ_16S; + + dev_dbg(to_dev(chan), "%s\n", __func__); + + num_descs = ioat2_xferlen_to_descs(ioat, len); + + /* + * 16 source pq is only available on cb3.3 and has no completion + * write hw bug. + */ + if (num_descs && ioat2_check_space_lock(ioat, num_descs) == 0) + idx = ioat->head; + else + return NULL; + + i = 0; + + do { + struct ioat_raw_descriptor *descs[4]; + size_t xfer_size = min_t(size_t, len, 1 << ioat->xfercap_log); + + desc = ioat2_get_ring_ent(ioat, idx + i); + pq = desc->pq; + + descs[0] = (struct ioat_raw_descriptor *) pq; + + desc->sed = ioat3_alloc_sed(device, + sed_get_pq16_pool_idx(src_cnt)); + if (!desc->sed) { + dev_err(to_dev(chan), + "%s: no free sed entries\n", __func__); + return NULL; + } + + pq->sed_addr = desc->sed->dma; + desc->sed->parent = desc; + + descs[1] = (struct ioat_raw_descriptor *)desc->sed->hw; + descs[2] = (void *)descs[1] + 64; + + for (s = 0; s < src_cnt; s++) + pq16_set_src(descs, src[s], offset, scf[s], s); + + /* see the comment for dma_maxpq in include/linux/dmaengine.h */ + if (dmaf_p_disabled_continue(flags)) + pq16_set_src(descs, dst[1], offset, 1, s++); + else if (dmaf_continue(flags)) { + pq16_set_src(descs, dst[0], offset, 0, s++); + pq16_set_src(descs, dst[1], offset, 1, s++); + pq16_set_src(descs, dst[1], offset, 0, s++); + } + + pq->size = xfer_size; + pq->p_addr = dst[0] + offset; + pq->q_addr = dst[1] + offset; + pq->ctl = 0; + pq->ctl_f.op = op; + pq->ctl_f.src_cnt = src16_cnt_to_hw(s); + /* we turn on descriptor write back error status */ + if (device->cap & IOAT_CAP_DWBES) + pq->ctl_f.wb_en = result ? 1 : 0; + pq->ctl_f.p_disable = !!(flags & DMA_PREP_PQ_DISABLE_P); + pq->ctl_f.q_disable = !!(flags & DMA_PREP_PQ_DISABLE_Q); + + len -= xfer_size; + offset += xfer_size; + } while (++i < num_descs); + + /* last pq descriptor carries the unmap parameters and fence bit */ + desc->txd.flags = flags; + desc->len = total_len; + if (result) + desc->result = result; + pq->ctl_f.fence = !!(flags & DMA_PREP_FENCE); + + /* with cb3.3 we should be able to do completion w/o a null desc */ + pq->ctl_f.int_en = !!(flags & DMA_PREP_INTERRUPT); + pq->ctl_f.compl_write = 1; + + dump_pq16_desc_dbg(ioat, desc); + + /* we leave the channel locked to ensure in order submission */ + return &desc->txd; +} + static struct dma_async_tx_descriptor * ioat3_prep_pq(struct dma_chan *chan, dma_addr_t *dst, dma_addr_t *src, unsigned int src_cnt, const unsigned char *scf, size_t len, unsigned long flags) { + struct dma_device *dma = chan->device; + /* specify valid address for disabled result */ if (flags & DMA_PREP_PQ_DISABLE_P) dst[0] = dst[1]; @@ -755,11 +1297,20 @@ ioat3_prep_pq(struct dma_chan *chan, dma_addr_t *dst, dma_addr_t *src, single_source_coef[0] = scf[0]; single_source_coef[1] = 0; - return __ioat3_prep_pq_lock(chan, NULL, dst, single_source, 2, - single_source_coef, len, flags); - } else - return __ioat3_prep_pq_lock(chan, NULL, dst, src, src_cnt, scf, - len, flags); + return (src_cnt > 8) && (dma->max_pq > 8) ? + __ioat3_prep_pq16_lock(chan, NULL, dst, single_source, + 2, single_source_coef, len, + flags) : + __ioat3_prep_pq_lock(chan, NULL, dst, single_source, 2, + single_source_coef, len, flags); + + } else { + return (src_cnt > 8) && (dma->max_pq > 8) ? + __ioat3_prep_pq16_lock(chan, NULL, dst, src, src_cnt, + scf, len, flags) : + __ioat3_prep_pq_lock(chan, NULL, dst, src, src_cnt, + scf, len, flags); + } } struct dma_async_tx_descriptor * @@ -767,6 +1318,8 @@ ioat3_prep_pq_val(struct dma_chan *chan, dma_addr_t *pq, dma_addr_t *src, unsigned int src_cnt, const unsigned char *scf, size_t len, enum sum_check_flags *pqres, unsigned long flags) { + struct dma_device *dma = chan->device; + /* specify valid address for disabled result */ if (flags & DMA_PREP_PQ_DISABLE_P) pq[0] = pq[1]; @@ -778,14 +1331,18 @@ ioat3_prep_pq_val(struct dma_chan *chan, dma_addr_t *pq, dma_addr_t *src, */ *pqres = 0; - return __ioat3_prep_pq_lock(chan, pqres, pq, src, src_cnt, scf, len, - flags); + return (src_cnt > 8) && (dma->max_pq > 8) ? + __ioat3_prep_pq16_lock(chan, pqres, pq, src, src_cnt, scf, len, + flags) : + __ioat3_prep_pq_lock(chan, pqres, pq, src, src_cnt, scf, len, + flags); } static struct dma_async_tx_descriptor * ioat3_prep_pqxor(struct dma_chan *chan, dma_addr_t dst, dma_addr_t *src, unsigned int src_cnt, size_t len, unsigned long flags) { + struct dma_device *dma = chan->device; unsigned char scf[src_cnt]; dma_addr_t pq[2]; @@ -794,8 +1351,11 @@ ioat3_prep_pqxor(struct dma_chan *chan, dma_addr_t dst, dma_addr_t *src, flags |= DMA_PREP_PQ_DISABLE_Q; pq[1] = dst; /* specify valid address for disabled result */ - return __ioat3_prep_pq_lock(chan, NULL, pq, src, src_cnt, scf, len, - flags); + return (src_cnt > 8) && (dma->max_pq > 8) ? + __ioat3_prep_pq16_lock(chan, NULL, pq, src, src_cnt, scf, len, + flags) : + __ioat3_prep_pq_lock(chan, NULL, pq, src, src_cnt, scf, len, + flags); } struct dma_async_tx_descriptor * @@ -803,6 +1363,7 @@ ioat3_prep_pqxor_val(struct dma_chan *chan, dma_addr_t *src, unsigned int src_cnt, size_t len, enum sum_check_flags *result, unsigned long flags) { + struct dma_device *dma = chan->device; unsigned char scf[src_cnt]; dma_addr_t pq[2]; @@ -816,8 +1377,12 @@ ioat3_prep_pqxor_val(struct dma_chan *chan, dma_addr_t *src, flags |= DMA_PREP_PQ_DISABLE_Q; pq[1] = pq[0]; /* specify valid address for disabled result */ - return __ioat3_prep_pq_lock(chan, result, pq, &src[1], src_cnt - 1, scf, - len, flags); + + return (src_cnt > 8) && (dma->max_pq > 8) ? + __ioat3_prep_pq16_lock(chan, result, pq, &src[1], src_cnt - 1, + scf, len, flags) : + __ioat3_prep_pq_lock(chan, result, pq, &src[1], src_cnt - 1, + scf, len, flags); } static struct dma_async_tx_descriptor * @@ -1167,6 +1732,56 @@ static int ioat3_dma_self_test(struct ioatdma_device *device) return 0; } +static int ioat3_irq_reinit(struct ioatdma_device *device) +{ + int msixcnt = device->common.chancnt; + struct pci_dev *pdev = device->pdev; + int i; + struct msix_entry *msix; + struct ioat_chan_common *chan; + int err = 0; + + switch (device->irq_mode) { + case IOAT_MSIX: + + for (i = 0; i < msixcnt; i++) { + msix = &device->msix_entries[i]; + chan = ioat_chan_by_index(device, i); + devm_free_irq(&pdev->dev, msix->vector, chan); + } + + pci_disable_msix(pdev); + break; + + case IOAT_MSIX_SINGLE: + msix = &device->msix_entries[0]; + chan = ioat_chan_by_index(device, 0); + devm_free_irq(&pdev->dev, msix->vector, chan); + pci_disable_msix(pdev); + break; + + case IOAT_MSI: + chan = ioat_chan_by_index(device, 0); + devm_free_irq(&pdev->dev, pdev->irq, chan); + pci_disable_msi(pdev); + break; + + case IOAT_INTX: + chan = ioat_chan_by_index(device, 0); + devm_free_irq(&pdev->dev, pdev->irq, chan); + break; + + default: + return 0; + } + + device->irq_mode = IOAT_NOIRQ; + + err = ioat_dma_setup_interrupts(device); + + return err; +} + static int ioat3_reset_hw(struct ioat_chan_common *chan) { /* throw away whatever the channel was doing and get it @@ -1183,80 +1798,65 @@ static int ioat3_reset_hw(struct ioat_chan_common *chan) chanerr = readl(chan->reg_base + IOAT_CHANERR_OFFSET); writel(chanerr, chan->reg_base + IOAT_CHANERR_OFFSET); - /* clear any pending errors */ - err = pci_read_config_dword(pdev, IOAT_PCI_CHANERR_INT_OFFSET, &chanerr); + if (device->version < IOAT_VER_3_3) { + /* clear any pending errors */ + err = pci_read_config_dword(pdev, + IOAT_PCI_CHANERR_INT_OFFSET, &chanerr); + if (err) { + dev_err(&pdev->dev, + "channel error register unreachable\n"); + return err; + } + pci_write_config_dword(pdev, + IOAT_PCI_CHANERR_INT_OFFSET, chanerr); + + /* Clear DMAUNCERRSTS Cfg-Reg Parity Error status bit + * (workaround for spurious config parity error after restart) + */ + pci_read_config_word(pdev, IOAT_PCI_DEVICE_ID_OFFSET, &dev_id); + if (dev_id == PCI_DEVICE_ID_INTEL_IOAT_TBG0) { + pci_write_config_dword(pdev, + IOAT_PCI_DMAUNCERRSTS_OFFSET, + 0x10); + } + } + + err = ioat2_reset_sync(chan, msecs_to_jiffies(200)); if (err) { - dev_err(&pdev->dev, "channel error register unreachable\n"); + dev_err(&pdev->dev, "Failed to reset!\n"); return err; } - pci_write_config_dword(pdev, IOAT_PCI_CHANERR_INT_OFFSET, chanerr); - /* Clear DMAUNCERRSTS Cfg-Reg Parity Error status bit - * (workaround for spurious config parity error after restart) - */ - pci_read_config_word(pdev, IOAT_PCI_DEVICE_ID_OFFSET, &dev_id); - if (dev_id == PCI_DEVICE_ID_INTEL_IOAT_TBG0) - pci_write_config_dword(pdev, IOAT_PCI_DMAUNCERRSTS_OFFSET, 0x10); + if (device->irq_mode != IOAT_NOIRQ && is_bwd_ioat(pdev)) + err = ioat3_irq_reinit(device); - return ioat2_reset_sync(chan, msecs_to_jiffies(200)); + return err; } -static bool is_jf_ioat(struct pci_dev *pdev) +static void ioat3_intr_quirk(struct ioatdma_device *device) { - switch (pdev->device) { - case PCI_DEVICE_ID_INTEL_IOAT_JSF0: - case PCI_DEVICE_ID_INTEL_IOAT_JSF1: - case PCI_DEVICE_ID_INTEL_IOAT_JSF2: - case PCI_DEVICE_ID_INTEL_IOAT_JSF3: - case PCI_DEVICE_ID_INTEL_IOAT_JSF4: - case PCI_DEVICE_ID_INTEL_IOAT_JSF5: - case PCI_DEVICE_ID_INTEL_IOAT_JSF6: - case PCI_DEVICE_ID_INTEL_IOAT_JSF7: - case PCI_DEVICE_ID_INTEL_IOAT_JSF8: - case PCI_DEVICE_ID_INTEL_IOAT_JSF9: - return true; - default: - return false; - } -} + struct dma_device *dma; + struct dma_chan *c; + struct ioat_chan_common *chan; + u32 errmask; -static bool is_snb_ioat(struct pci_dev *pdev) -{ - switch (pdev->device) { - case PCI_DEVICE_ID_INTEL_IOAT_SNB0: - case PCI_DEVICE_ID_INTEL_IOAT_SNB1: - case PCI_DEVICE_ID_INTEL_IOAT_SNB2: - case PCI_DEVICE_ID_INTEL_IOAT_SNB3: - case PCI_DEVICE_ID_INTEL_IOAT_SNB4: - case PCI_DEVICE_ID_INTEL_IOAT_SNB5: - case PCI_DEVICE_ID_INTEL_IOAT_SNB6: - case PCI_DEVICE_ID_INTEL_IOAT_SNB7: - case PCI_DEVICE_ID_INTEL_IOAT_SNB8: - case PCI_DEVICE_ID_INTEL_IOAT_SNB9: - return true; - default: - return false; - } -} + dma = &device->common; -static bool is_ivb_ioat(struct pci_dev *pdev) -{ - switch (pdev->device) { - case PCI_DEVICE_ID_INTEL_IOAT_IVB0: - case PCI_DEVICE_ID_INTEL_IOAT_IVB1: - case PCI_DEVICE_ID_INTEL_IOAT_IVB2: - case PCI_DEVICE_ID_INTEL_IOAT_IVB3: - case PCI_DEVICE_ID_INTEL_IOAT_IVB4: - case PCI_DEVICE_ID_INTEL_IOAT_IVB5: - case PCI_DEVICE_ID_INTEL_IOAT_IVB6: - case PCI_DEVICE_ID_INTEL_IOAT_IVB7: - case PCI_DEVICE_ID_INTEL_IOAT_IVB8: - case PCI_DEVICE_ID_INTEL_IOAT_IVB9: - return true; - default: - return false; + /* + * if we have descriptor write back error status, we mask the + * error interrupts + */ + if (device->cap & IOAT_CAP_DWBES) { + list_for_each_entry(c, &dma->channels, device_node) { + chan = to_chan_common(c); + errmask = readl(chan->reg_base + + IOAT_CHANERR_MASK_OFFSET); + errmask |= IOAT_CHANERR_XOR_P_OR_CRC_ERR | + IOAT_CHANERR_XOR_Q_ERR; + writel(errmask, chan->reg_base + + IOAT_CHANERR_MASK_OFFSET); + } } - } int ioat3_dma_probe(struct ioatdma_device *device, int dca) @@ -1268,30 +1868,33 @@ int ioat3_dma_probe(struct ioatdma_device *device, int dca) struct ioat_chan_common *chan; bool is_raid_device = false; int err; - u32 cap; device->enumerate_channels = ioat2_enumerate_channels; device->reset_hw = ioat3_reset_hw; device->self_test = ioat3_dma_self_test; + device->intr_quirk = ioat3_intr_quirk; dma = &device->common; dma->device_prep_dma_memcpy = ioat2_dma_prep_memcpy_lock; dma->device_issue_pending = ioat2_issue_pending; dma->device_alloc_chan_resources = ioat2_alloc_chan_resources; dma->device_free_chan_resources = ioat2_free_chan_resources; - if (is_jf_ioat(pdev) || is_snb_ioat(pdev) || is_ivb_ioat(pdev)) + if (is_xeon_cb32(pdev)) dma->copy_align = 6; dma_cap_set(DMA_INTERRUPT, dma->cap_mask); dma->device_prep_dma_interrupt = ioat3_prep_interrupt_lock; - cap = readl(device->reg_base + IOAT_DMA_CAP_OFFSET); + device->cap = readl(device->reg_base + IOAT_DMA_CAP_OFFSET); + + if (is_bwd_noraid(pdev)) + device->cap &= ~(IOAT_CAP_XOR | IOAT_CAP_PQ | IOAT_CAP_RAID16SS); /* dca is incompatible with raid operations */ - if (dca_en && (cap & (IOAT_CAP_XOR|IOAT_CAP_PQ))) - cap &= ~(IOAT_CAP_XOR|IOAT_CAP_PQ); + if (dca_en && (device->cap & (IOAT_CAP_XOR|IOAT_CAP_PQ))) + device->cap &= ~(IOAT_CAP_XOR|IOAT_CAP_PQ); - if (cap & IOAT_CAP_XOR) { + if (device->cap & IOAT_CAP_XOR) { is_raid_device = true; dma->max_xor = 8; dma->xor_align = 6; @@ -1302,53 +1905,86 @@ int ioat3_dma_probe(struct ioatdma_device *device, int dca) dma_cap_set(DMA_XOR_VAL, dma->cap_mask); dma->device_prep_dma_xor_val = ioat3_prep_xor_val; } - if (cap & IOAT_CAP_PQ) { + + if (device->cap & IOAT_CAP_PQ) { is_raid_device = true; - dma_set_maxpq(dma, 8, 0); - dma->pq_align = 6; - dma_cap_set(DMA_PQ, dma->cap_mask); dma->device_prep_dma_pq = ioat3_prep_pq; - - dma_cap_set(DMA_PQ_VAL, dma->cap_mask); dma->device_prep_dma_pq_val = ioat3_prep_pq_val; + dma_cap_set(DMA_PQ, dma->cap_mask); + dma_cap_set(DMA_PQ_VAL, dma->cap_mask); - if (!(cap & IOAT_CAP_XOR)) { - dma->max_xor = 8; - dma->xor_align = 6; + if (device->cap & IOAT_CAP_RAID16SS) { + dma_set_maxpq(dma, 16, 0); + dma->pq_align = 0; + } else { + dma_set_maxpq(dma, 8, 0); + if (is_xeon_cb32(pdev)) + dma->pq_align = 6; + else + dma->pq_align = 0; + } - dma_cap_set(DMA_XOR, dma->cap_mask); + if (!(device->cap & IOAT_CAP_XOR)) { dma->device_prep_dma_xor = ioat3_prep_pqxor; - - dma_cap_set(DMA_XOR_VAL, dma->cap_mask); dma->device_prep_dma_xor_val = ioat3_prep_pqxor_val; + dma_cap_set(DMA_XOR, dma->cap_mask); + dma_cap_set(DMA_XOR_VAL, dma->cap_mask); + + if (device->cap & IOAT_CAP_RAID16SS) { + dma->max_xor = 16; + dma->xor_align = 0; + } else { + dma->max_xor = 8; + if (is_xeon_cb32(pdev)) + dma->xor_align = 6; + else + dma->xor_align = 0; + } } } - if (is_raid_device && (cap & IOAT_CAP_FILL_BLOCK)) { + + if (is_raid_device && (device->cap & IOAT_CAP_FILL_BLOCK)) { dma_cap_set(DMA_MEMSET, dma->cap_mask); dma->device_prep_dma_memset = ioat3_prep_memset_lock; } - if (is_raid_device) { - dma->device_tx_status = ioat3_tx_status; - device->cleanup_fn = ioat3_cleanup_event; - device->timer_fn = ioat3_timer_event; - } else { - dma->device_tx_status = ioat_dma_tx_status; - device->cleanup_fn = ioat2_cleanup_event; - device->timer_fn = ioat2_timer_event; + dma->device_tx_status = ioat3_tx_status; + device->cleanup_fn = ioat3_cleanup_event; + device->timer_fn = ioat3_timer_event; + + if (is_xeon_cb32(pdev)) { + dma_cap_clear(DMA_XOR_VAL, dma->cap_mask); + dma->device_prep_dma_xor_val = NULL; + + dma_cap_clear(DMA_PQ_VAL, dma->cap_mask); + dma->device_prep_dma_pq_val = NULL; } - #ifdef CONFIG_ASYNC_TX_DISABLE_PQ_VAL_DMA - dma_cap_clear(DMA_PQ_VAL, dma->cap_mask); - dma->device_prep_dma_pq_val = NULL; - #endif + /* starting with CB3.3 super extended descriptors are supported */ + if (device->cap & IOAT_CAP_RAID16SS) { + char pool_name[14]; + int i; + + /* allocate sw descriptor pool for SED */ + device->sed_pool = kmem_cache_create("ioat_sed", + sizeof(struct ioat_sed_ent), 0, 0, NULL); + if (!device->sed_pool) + return -ENOMEM; + + for (i = 0; i < MAX_SED_POOLS; i++) { + snprintf(pool_name, 14, "ioat_hw%d_sed", i); - #ifdef CONFIG_ASYNC_TX_DISABLE_XOR_VAL_DMA - dma_cap_clear(DMA_XOR_VAL, dma->cap_mask); - dma->device_prep_dma_xor_val = NULL; - #endif + /* allocate SED DMA pool */ + device->sed_hw_pool[i] = dma_pool_create(pool_name, + &pdev->dev, + SED_SIZE * (i + 1), 64, 0); + if (!device->sed_hw_pool[i]) + goto sed_pool_cleanup; + + } + } err = ioat_probe(device); if (err) @@ -1371,4 +2007,28 @@ int ioat3_dma_probe(struct ioatdma_device *device, int dca) device->dca = ioat3_dca_init(pdev, device->reg_base); return 0; + +sed_pool_cleanup: + if (device->sed_pool) { + int i; + kmem_cache_destroy(device->sed_pool); + + for (i = 0; i < MAX_SED_POOLS; i++) + if (device->sed_hw_pool[i]) + dma_pool_destroy(device->sed_hw_pool[i]); + } + + return -ENOMEM; +} + +void ioat3_dma_remove(struct ioatdma_device *device) +{ + if (device->sed_pool) { + int i; + kmem_cache_destroy(device->sed_pool); + + for (i = 0; i < MAX_SED_POOLS; i++) + if (device->sed_hw_pool[i]) + dma_pool_destroy(device->sed_hw_pool[i]); + } } diff --git a/trunk/drivers/dma/ioat/hw.h b/trunk/drivers/dma/ioat/hw.h index 7cb74c62c719..5ee57d402a6e 100644 --- a/trunk/drivers/dma/ioat/hw.h +++ b/trunk/drivers/dma/ioat/hw.h @@ -30,11 +30,6 @@ #define IOAT_PCI_DID_SCNB 0x65FF #define IOAT_PCI_DID_SNB 0x402F -#define IOAT_VER_1_2 0x12 /* Version 1.2 */ -#define IOAT_VER_2_0 0x20 /* Version 2.0 */ -#define IOAT_VER_3_0 0x30 /* Version 3.0 */ -#define IOAT_VER_3_2 0x32 /* Version 3.2 */ - #define PCI_DEVICE_ID_INTEL_IOAT_IVB0 0x0e20 #define PCI_DEVICE_ID_INTEL_IOAT_IVB1 0x0e21 #define PCI_DEVICE_ID_INTEL_IOAT_IVB2 0x0e22 @@ -46,6 +41,29 @@ #define PCI_DEVICE_ID_INTEL_IOAT_IVB8 0x0e2e #define PCI_DEVICE_ID_INTEL_IOAT_IVB9 0x0e2f +#define PCI_DEVICE_ID_INTEL_IOAT_HSW0 0x2f20 +#define PCI_DEVICE_ID_INTEL_IOAT_HSW1 0x2f21 +#define PCI_DEVICE_ID_INTEL_IOAT_HSW2 0x2f22 +#define PCI_DEVICE_ID_INTEL_IOAT_HSW3 0x2f23 +#define PCI_DEVICE_ID_INTEL_IOAT_HSW4 0x2f24 +#define PCI_DEVICE_ID_INTEL_IOAT_HSW5 0x2f25 +#define PCI_DEVICE_ID_INTEL_IOAT_HSW6 0x2f26 +#define PCI_DEVICE_ID_INTEL_IOAT_HSW7 0x2f27 +#define PCI_DEVICE_ID_INTEL_IOAT_HSW8 0x2f2e +#define PCI_DEVICE_ID_INTEL_IOAT_HSW9 0x2f2f + +#define PCI_DEVICE_ID_INTEL_IOAT_BWD0 0x0C50 +#define PCI_DEVICE_ID_INTEL_IOAT_BWD1 0x0C51 +#define PCI_DEVICE_ID_INTEL_IOAT_BWD2 0x0C52 +#define PCI_DEVICE_ID_INTEL_IOAT_BWD3 0x0C53 + +#define IOAT_VER_1_2 0x12 /* Version 1.2 */ +#define IOAT_VER_2_0 0x20 /* Version 2.0 */ +#define IOAT_VER_3_0 0x30 /* Version 3.0 */ +#define IOAT_VER_3_2 0x32 /* Version 3.2 */ +#define IOAT_VER_3_3 0x33 /* Version 3.3 */ + + int system_has_dca_enabled(struct pci_dev *pdev); struct ioat_dma_descriptor { @@ -147,7 +165,17 @@ struct ioat_xor_ext_descriptor { }; struct ioat_pq_descriptor { - uint32_t size; + union { + uint32_t size; + uint32_t dwbes; + struct { + unsigned int rsvd:25; + unsigned int p_val_err:1; + unsigned int q_val_err:1; + unsigned int rsvd1:4; + unsigned int wbes:1; + } dwbes_f; + }; union { uint32_t ctl; struct { @@ -162,9 +190,14 @@ struct ioat_pq_descriptor { unsigned int hint:1; unsigned int p_disable:1; unsigned int q_disable:1; - unsigned int rsvd:11; + unsigned int rsvd2:2; + unsigned int wb_en:1; + unsigned int prl_en:1; + unsigned int rsvd3:7; #define IOAT_OP_PQ 0x89 #define IOAT_OP_PQ_VAL 0x8a + #define IOAT_OP_PQ_16S 0xa0 + #define IOAT_OP_PQ_VAL_16S 0xa1 unsigned int op:8; } ctl_f; }; @@ -172,7 +205,10 @@ struct ioat_pq_descriptor { uint64_t p_addr; uint64_t next; uint64_t src_addr2; - uint64_t src_addr3; + union { + uint64_t src_addr3; + uint64_t sed_addr; + }; uint8_t coef[8]; uint64_t q_addr; }; @@ -221,4 +257,40 @@ struct ioat_pq_update_descriptor { struct ioat_raw_descriptor { uint64_t field[8]; }; + +struct ioat_pq16a_descriptor { + uint8_t coef[8]; + uint64_t src_addr3; + uint64_t src_addr4; + uint64_t src_addr5; + uint64_t src_addr6; + uint64_t src_addr7; + uint64_t src_addr8; + uint64_t src_addr9; +}; + +struct ioat_pq16b_descriptor { + uint64_t src_addr10; + uint64_t src_addr11; + uint64_t src_addr12; + uint64_t src_addr13; + uint64_t src_addr14; + uint64_t src_addr15; + uint64_t src_addr16; + uint64_t rsvd; +}; + +union ioat_sed_pq_descriptor { + struct ioat_pq16a_descriptor a; + struct ioat_pq16b_descriptor b; +}; + +#define SED_SIZE 64 + +struct ioat_sed_raw_descriptor { + uint64_t a[8]; + uint64_t b[8]; + uint64_t c[8]; +}; + #endif diff --git a/trunk/drivers/dma/ioat/pci.c b/trunk/drivers/dma/ioat/pci.c index 71c7ecd80fac..2c8d560e6334 100644 --- a/trunk/drivers/dma/ioat/pci.c +++ b/trunk/drivers/dma/ioat/pci.c @@ -94,6 +94,23 @@ static struct pci_device_id ioat_pci_tbl[] = { { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_IVB8) }, { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_IVB9) }, + { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_HSW0) }, + { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_HSW1) }, + { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_HSW2) }, + { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_HSW3) }, + { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_HSW4) }, + { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_HSW5) }, + { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_HSW6) }, + { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_HSW7) }, + { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_HSW8) }, + { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_HSW9) }, + + /* I/OAT v3.3 platforms */ + { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BWD0) }, + { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BWD1) }, + { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BWD2) }, + { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BWD3) }, + { 0, } }; MODULE_DEVICE_TABLE(pci, ioat_pci_tbl); @@ -190,6 +207,9 @@ static void ioat_remove(struct pci_dev *pdev) if (!device) return; + if (device->version >= IOAT_VER_3_0) + ioat3_dma_remove(device); + dev_err(&pdev->dev, "Removing dma and dca services\n"); if (device->dca) { unregister_dca_provider(device->dca, &pdev->dev); diff --git a/trunk/drivers/dma/ioat/registers.h b/trunk/drivers/dma/ioat/registers.h index 1391798542b6..2f1cfa0f1f47 100644 --- a/trunk/drivers/dma/ioat/registers.h +++ b/trunk/drivers/dma/ioat/registers.h @@ -79,6 +79,8 @@ #define IOAT_CAP_APIC 0x00000080 #define IOAT_CAP_XOR 0x00000100 #define IOAT_CAP_PQ 0x00000200 +#define IOAT_CAP_DWBES 0x00002000 +#define IOAT_CAP_RAID16SS 0x00020000 #define IOAT_CHANNEL_MMIO_SIZE 0x80 /* Each Channel MMIO space is this size */ @@ -93,6 +95,8 @@ #define IOAT_CHANCTRL_ERR_COMPLETION_EN 0x0004 #define IOAT_CHANCTRL_INT_REARM 0x0001 #define IOAT_CHANCTRL_RUN (IOAT_CHANCTRL_INT_REARM |\ + IOAT_CHANCTRL_ERR_INT_EN |\ + IOAT_CHANCTRL_ERR_COMPLETION_EN |\ IOAT_CHANCTRL_ANY_ERR_ABORT_EN) #define IOAT_DMA_COMP_OFFSET 0x02 /* 16-bit DMA channel compatibility */ diff --git a/trunk/drivers/dma/ipu/ipu_idmac.c b/trunk/drivers/dma/ipu/ipu_idmac.c index 8c61d17a86bf..d39c2cd0795d 100644 --- a/trunk/drivers/dma/ipu/ipu_idmac.c +++ b/trunk/drivers/dma/ipu/ipu_idmac.c @@ -1642,7 +1642,7 @@ static int __init ipu_idmac_init(struct ipu *ipu) return dma_async_device_register(&idmac->dma); } -static void __exit ipu_idmac_exit(struct ipu *ipu) +static void ipu_idmac_exit(struct ipu *ipu) { int i; struct idmac *idmac = &ipu->idmac; @@ -1756,7 +1756,7 @@ static int __init ipu_probe(struct platform_device *pdev) return ret; } -static int __exit ipu_remove(struct platform_device *pdev) +static int ipu_remove(struct platform_device *pdev) { struct ipu *ipu = platform_get_drvdata(pdev); @@ -1781,7 +1781,7 @@ static struct platform_driver ipu_platform_driver = { .name = "ipu-core", .owner = THIS_MODULE, }, - .remove = __exit_p(ipu_remove), + .remove = ipu_remove, }; static int __init ipu_init(void) diff --git a/trunk/drivers/dma/of-dma.c b/trunk/drivers/dma/of-dma.c index 69d04d28b1ef..7aa0864cd487 100644 --- a/trunk/drivers/dma/of-dma.c +++ b/trunk/drivers/dma/of-dma.c @@ -13,43 +13,31 @@ #include #include #include -#include +#include #include #include #include static LIST_HEAD(of_dma_list); -static DEFINE_SPINLOCK(of_dma_lock); +static DEFINE_MUTEX(of_dma_lock); /** - * of_dma_get_controller - Get a DMA controller in DT DMA helpers list + * of_dma_find_controller - Get a DMA controller in DT DMA helpers list * @dma_spec: pointer to DMA specifier as found in the device tree * * Finds a DMA controller with matching device node and number for dma cells - * in a list of registered DMA controllers. If a match is found the use_count - * variable is increased and a valid pointer to the DMA data stored is retuned. - * A NULL pointer is returned if no match is found. + * in a list of registered DMA controllers. If a match is found a valid pointer + * to the DMA data stored is retuned. A NULL pointer is returned if no match is + * found. */ -static struct of_dma *of_dma_get_controller(struct of_phandle_args *dma_spec) +static struct of_dma *of_dma_find_controller(struct of_phandle_args *dma_spec) { struct of_dma *ofdma; - spin_lock(&of_dma_lock); - - if (list_empty(&of_dma_list)) { - spin_unlock(&of_dma_lock); - return NULL; - } - list_for_each_entry(ofdma, &of_dma_list, of_dma_controllers) if ((ofdma->of_node == dma_spec->np) && - (ofdma->of_dma_nbcells == dma_spec->args_count)) { - ofdma->use_count++; - spin_unlock(&of_dma_lock); + (ofdma->of_dma_nbcells == dma_spec->args_count)) return ofdma; - } - - spin_unlock(&of_dma_lock); pr_debug("%s: can't find DMA controller %s\n", __func__, dma_spec->np->full_name); @@ -57,22 +45,6 @@ static struct of_dma *of_dma_get_controller(struct of_phandle_args *dma_spec) return NULL; } -/** - * of_dma_put_controller - Decrement use count for a registered DMA controller - * @of_dma: pointer to DMA controller data - * - * Decrements the use_count variable in the DMA data structure. This function - * should be called only when a valid pointer is returned from - * of_dma_get_controller() and no further accesses to data referenced by that - * pointer are needed. - */ -static void of_dma_put_controller(struct of_dma *ofdma) -{ - spin_lock(&of_dma_lock); - ofdma->use_count--; - spin_unlock(&of_dma_lock); -} - /** * of_dma_controller_register - Register a DMA controller to DT DMA helpers * @np: device node of DMA controller @@ -93,6 +65,7 @@ int of_dma_controller_register(struct device_node *np, { struct of_dma *ofdma; int nbcells; + const __be32 *prop; if (!np || !of_dma_xlate) { pr_err("%s: not enough information provided\n", __func__); @@ -103,8 +76,11 @@ int of_dma_controller_register(struct device_node *np, if (!ofdma) return -ENOMEM; - nbcells = be32_to_cpup(of_get_property(np, "#dma-cells", NULL)); - if (!nbcells) { + prop = of_get_property(np, "#dma-cells", NULL); + if (prop) + nbcells = be32_to_cpup(prop); + + if (!prop || !nbcells) { pr_err("%s: #dma-cells property is missing or invalid\n", __func__); kfree(ofdma); @@ -115,12 +91,11 @@ int of_dma_controller_register(struct device_node *np, ofdma->of_dma_nbcells = nbcells; ofdma->of_dma_xlate = of_dma_xlate; ofdma->of_dma_data = data; - ofdma->use_count = 0; /* Now queue of_dma controller structure in list */ - spin_lock(&of_dma_lock); + mutex_lock(&of_dma_lock); list_add_tail(&ofdma->of_dma_controllers, &of_dma_list); - spin_unlock(&of_dma_lock); + mutex_unlock(&of_dma_lock); return 0; } @@ -132,32 +107,20 @@ EXPORT_SYMBOL_GPL(of_dma_controller_register); * * Memory allocated by of_dma_controller_register() is freed here. */ -int of_dma_controller_free(struct device_node *np) +void of_dma_controller_free(struct device_node *np) { struct of_dma *ofdma; - spin_lock(&of_dma_lock); - - if (list_empty(&of_dma_list)) { - spin_unlock(&of_dma_lock); - return -ENODEV; - } + mutex_lock(&of_dma_lock); list_for_each_entry(ofdma, &of_dma_list, of_dma_controllers) if (ofdma->of_node == np) { - if (ofdma->use_count) { - spin_unlock(&of_dma_lock); - return -EBUSY; - } - list_del(&ofdma->of_dma_controllers); - spin_unlock(&of_dma_lock); kfree(ofdma); - return 0; + break; } - spin_unlock(&of_dma_lock); - return -ENODEV; + mutex_unlock(&of_dma_lock); } EXPORT_SYMBOL_GPL(of_dma_controller_free); @@ -172,8 +135,8 @@ EXPORT_SYMBOL_GPL(of_dma_controller_free); * specifiers, matches the name provided. Returns 0 if the name matches and * a valid pointer to the DMA specifier is found. Otherwise returns -ENODEV. */ -static int of_dma_match_channel(struct device_node *np, char *name, int index, - struct of_phandle_args *dma_spec) +static int of_dma_match_channel(struct device_node *np, const char *name, + int index, struct of_phandle_args *dma_spec) { const char *s; @@ -198,7 +161,7 @@ static int of_dma_match_channel(struct device_node *np, char *name, int index, * Returns pointer to appropriate dma channel on success or NULL on error. */ struct dma_chan *of_dma_request_slave_channel(struct device_node *np, - char *name) + const char *name) { struct of_phandle_args dma_spec; struct of_dma *ofdma; @@ -220,14 +183,15 @@ struct dma_chan *of_dma_request_slave_channel(struct device_node *np, if (of_dma_match_channel(np, name, i, &dma_spec)) continue; - ofdma = of_dma_get_controller(&dma_spec); - - if (!ofdma) - continue; + mutex_lock(&of_dma_lock); + ofdma = of_dma_find_controller(&dma_spec); - chan = ofdma->of_dma_xlate(&dma_spec, ofdma); + if (ofdma) + chan = ofdma->of_dma_xlate(&dma_spec, ofdma); + else + chan = NULL; - of_dma_put_controller(ofdma); + mutex_unlock(&of_dma_lock); of_node_put(dma_spec.np); diff --git a/trunk/drivers/dma/omap-dma.c b/trunk/drivers/dma/omap-dma.c index 08b43bf37158..ec3fc4fd9160 100644 --- a/trunk/drivers/dma/omap-dma.c +++ b/trunk/drivers/dma/omap-dma.c @@ -16,6 +16,8 @@ #include #include #include +#include +#include #include "virt-dma.h" @@ -67,6 +69,10 @@ static const unsigned es_bytes[] = { [OMAP_DMA_DATA_TYPE_S32] = 4, }; +static struct of_dma_filter_info omap_dma_info = { + .filter_fn = omap_dma_filter_fn, +}; + static inline struct omap_dmadev *to_omap_dma_dev(struct dma_device *d) { return container_of(d, struct omap_dmadev, ddev); @@ -629,8 +635,22 @@ static int omap_dma_probe(struct platform_device *pdev) pr_warn("OMAP-DMA: failed to register slave DMA engine device: %d\n", rc); omap_dma_free(od); - } else { - platform_set_drvdata(pdev, od); + return rc; + } + + platform_set_drvdata(pdev, od); + + if (pdev->dev.of_node) { + omap_dma_info.dma_cap = od->ddev.cap_mask; + + /* Device-tree DMA controller registration */ + rc = of_dma_controller_register(pdev->dev.of_node, + of_dma_simple_xlate, &omap_dma_info); + if (rc) { + pr_warn("OMAP-DMA: failed to register DMA controller\n"); + dma_async_device_unregister(&od->ddev); + omap_dma_free(od); + } } dev_info(&pdev->dev, "OMAP DMA engine driver\n"); @@ -642,18 +662,32 @@ static int omap_dma_remove(struct platform_device *pdev) { struct omap_dmadev *od = platform_get_drvdata(pdev); + if (pdev->dev.of_node) + of_dma_controller_free(pdev->dev.of_node); + dma_async_device_unregister(&od->ddev); omap_dma_free(od); return 0; } +static const struct of_device_id omap_dma_match[] = { + { .compatible = "ti,omap2420-sdma", }, + { .compatible = "ti,omap2430-sdma", }, + { .compatible = "ti,omap3430-sdma", }, + { .compatible = "ti,omap3630-sdma", }, + { .compatible = "ti,omap4430-sdma", }, + {}, +}; +MODULE_DEVICE_TABLE(of, omap_dma_match); + static struct platform_driver omap_dma_driver = { .probe = omap_dma_probe, .remove = omap_dma_remove, .driver = { .name = "omap-dma-engine", .owner = THIS_MODULE, + .of_match_table = of_match_ptr(omap_dma_match), }, }; diff --git a/trunk/drivers/dma/pch_dma.c b/trunk/drivers/dma/pch_dma.c index d01faeb0f27c..ce3dc3e9688c 100644 --- a/trunk/drivers/dma/pch_dma.c +++ b/trunk/drivers/dma/pch_dma.c @@ -476,7 +476,7 @@ static struct pch_dma_desc *pdc_desc_get(struct pch_dma_chan *pd_chan) dev_dbg(chan2dev(&pd_chan->chan), "scanned %d descriptors\n", i); if (!ret) { - ret = pdc_alloc_desc(&pd_chan->chan, GFP_NOIO); + ret = pdc_alloc_desc(&pd_chan->chan, GFP_ATOMIC); if (ret) { spin_lock(&pd_chan->lock); pd_chan->descs_allocated++; diff --git a/trunk/drivers/dma/pl330.c b/trunk/drivers/dma/pl330.c index 5dbc5946c4c3..a17553f7c028 100644 --- a/trunk/drivers/dma/pl330.c +++ b/trunk/drivers/dma/pl330.c @@ -26,6 +26,7 @@ #include #include #include +#include #include "dmaengine.h" #define PL330_MAX_CHAN 8 @@ -2288,13 +2289,12 @@ static inline void fill_queue(struct dma_pl330_chan *pch) /* If already submitted */ if (desc->status == BUSY) - break; + continue; ret = pl330_submit_req(pch->pl330_chid, &desc->req); if (!ret) { desc->status = BUSY; - break; } else if (ret == -EAGAIN) { /* QFull or DMAC Dying */ break; @@ -2904,9 +2904,9 @@ pl330_probe(struct amba_device *adev, const struct amba_id *id) pi->mcbufsz = pdat ? pdat->mcbuf_sz : 0; res = &adev->res; - pi->base = devm_request_and_ioremap(&adev->dev, res); - if (!pi->base) - return -ENXIO; + pi->base = devm_ioremap_resource(&adev->dev, res); + if (IS_ERR(pi->base)) + return PTR_ERR(pi->base); amba_set_drvdata(adev, pdmac); diff --git a/trunk/drivers/dma/sh/Kconfig b/trunk/drivers/dma/sh/Kconfig new file mode 100644 index 000000000000..5c1dee20c13e --- /dev/null +++ b/trunk/drivers/dma/sh/Kconfig @@ -0,0 +1,24 @@ +# +# DMA engine configuration for sh +# + +config SH_DMAE_BASE + bool "Renesas SuperH DMA Engine support" + depends on (SUPERH && SH_DMA) || (ARM && ARCH_SHMOBILE) + depends on !SH_DMA_API + default y + select DMA_ENGINE + help + Enable support for the Renesas SuperH DMA controllers. + +config SH_DMAE + tristate "Renesas SuperH DMAC support" + depends on SH_DMAE_BASE + help + Enable support for the Renesas SuperH DMA controllers. + +config SUDMAC + tristate "Renesas SUDMAC support" + depends on SH_DMAE_BASE + help + Enable support for the Renesas SUDMAC controllers. diff --git a/trunk/drivers/dma/sh/Makefile b/trunk/drivers/dma/sh/Makefile index 54ae9572b0ac..c07ca4612e46 100644 --- a/trunk/drivers/dma/sh/Makefile +++ b/trunk/drivers/dma/sh/Makefile @@ -1,2 +1,3 @@ -obj-$(CONFIG_SH_DMAE) += shdma-base.o +obj-$(CONFIG_SH_DMAE_BASE) += shdma-base.o obj-$(CONFIG_SH_DMAE) += shdma.o +obj-$(CONFIG_SUDMAC) += sudmac.o diff --git a/trunk/drivers/dma/sh/sudmac.c b/trunk/drivers/dma/sh/sudmac.c new file mode 100644 index 000000000000..e7c94bbddb53 --- /dev/null +++ b/trunk/drivers/dma/sh/sudmac.c @@ -0,0 +1,428 @@ +/* + * Renesas SUDMAC support + * + * Copyright (C) 2013 Renesas Solutions Corp. + * + * based on drivers/dma/sh/shdma.c: + * Copyright (C) 2011-2012 Guennadi Liakhovetski + * Copyright (C) 2009 Nobuhiro Iwamatsu + * Copyright (C) 2009 Renesas Solutions, Inc. All rights reserved. + * Copyright (C) 2007 Freescale Semiconductor, Inc. All rights reserved. + * + * This is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + */ + +#include +#include +#include +#include +#include +#include +#include + +struct sudmac_chan { + struct shdma_chan shdma_chan; + void __iomem *base; + char dev_id[16]; /* unique name per DMAC of channel */ + + u32 offset; /* for CFG, BA, BBC, CA, CBC, DEN */ + u32 cfg; + u32 dint_end_bit; +}; + +struct sudmac_device { + struct shdma_dev shdma_dev; + struct sudmac_pdata *pdata; + void __iomem *chan_reg; +}; + +struct sudmac_regs { + u32 base_addr; + u32 base_byte_count; +}; + +struct sudmac_desc { + struct sudmac_regs hw; + struct shdma_desc shdma_desc; +}; + +#define to_chan(schan) container_of(schan, struct sudmac_chan, shdma_chan) +#define to_desc(sdesc) container_of(sdesc, struct sudmac_desc, shdma_desc) +#define to_sdev(sc) container_of(sc->shdma_chan.dma_chan.device, \ + struct sudmac_device, shdma_dev.dma_dev) + +/* SUDMAC register */ +#define SUDMAC_CH0CFG 0x00 +#define SUDMAC_CH0BA 0x10 +#define SUDMAC_CH0BBC 0x18 +#define SUDMAC_CH0CA 0x20 +#define SUDMAC_CH0CBC 0x28 +#define SUDMAC_CH0DEN 0x30 +#define SUDMAC_DSTSCLR 0x38 +#define SUDMAC_DBUFCTRL 0x3C +#define SUDMAC_DINTCTRL 0x40 +#define SUDMAC_DINTSTS 0x44 +#define SUDMAC_DINTSTSCLR 0x48 +#define SUDMAC_CH0SHCTRL 0x50 + +/* Definitions for the sudmac_channel.config */ +#define SUDMAC_SENDBUFM 0x1000 /* b12: Transmit Buffer Mode */ +#define SUDMAC_RCVENDM 0x0100 /* b8: Receive Data Transfer End Mode */ +#define SUDMAC_LBA_WAIT 0x0030 /* b5-4: Local Bus Access Wait */ + +/* Definitions for the sudmac_channel.dint_end_bit */ +#define SUDMAC_CH1ENDE 0x0002 /* b1: Ch1 DMA Transfer End Int Enable */ +#define SUDMAC_CH0ENDE 0x0001 /* b0: Ch0 DMA Transfer End Int Enable */ + +#define SUDMAC_DRV_NAME "sudmac" + +static void sudmac_writel(struct sudmac_chan *sc, u32 data, u32 reg) +{ + iowrite32(data, sc->base + reg); +} + +static u32 sudmac_readl(struct sudmac_chan *sc, u32 reg) +{ + return ioread32(sc->base + reg); +} + +static bool sudmac_is_busy(struct sudmac_chan *sc) +{ + u32 den = sudmac_readl(sc, SUDMAC_CH0DEN + sc->offset); + + if (den) + return true; /* working */ + + return false; /* waiting */ +} + +static void sudmac_set_reg(struct sudmac_chan *sc, struct sudmac_regs *hw, + struct shdma_desc *sdesc) +{ + sudmac_writel(sc, sc->cfg, SUDMAC_CH0CFG + sc->offset); + sudmac_writel(sc, hw->base_addr, SUDMAC_CH0BA + sc->offset); + sudmac_writel(sc, hw->base_byte_count, SUDMAC_CH0BBC + sc->offset); +} + +static void sudmac_start(struct sudmac_chan *sc) +{ + u32 dintctrl = sudmac_readl(sc, SUDMAC_DINTCTRL); + + sudmac_writel(sc, dintctrl | sc->dint_end_bit, SUDMAC_DINTCTRL); + sudmac_writel(sc, 1, SUDMAC_CH0DEN + sc->offset); +} + +static void sudmac_start_xfer(struct shdma_chan *schan, + struct shdma_desc *sdesc) +{ + struct sudmac_chan *sc = to_chan(schan); + struct sudmac_desc *sd = to_desc(sdesc); + + sudmac_set_reg(sc, &sd->hw, sdesc); + sudmac_start(sc); +} + +static bool sudmac_channel_busy(struct shdma_chan *schan) +{ + struct sudmac_chan *sc = to_chan(schan); + + return sudmac_is_busy(sc); +} + +static void sudmac_setup_xfer(struct shdma_chan *schan, int slave_id) +{ +} + +static const struct sudmac_slave_config *sudmac_find_slave( + struct sudmac_chan *sc, int slave_id) +{ + struct sudmac_device *sdev = to_sdev(sc); + struct sudmac_pdata *pdata = sdev->pdata; + const struct sudmac_slave_config *cfg; + int i; + + for (i = 0, cfg = pdata->slave; i < pdata->slave_num; i++, cfg++) + if (cfg->slave_id == slave_id) + return cfg; + + return NULL; +} + +static int sudmac_set_slave(struct shdma_chan *schan, int slave_id, bool try) +{ + struct sudmac_chan *sc = to_chan(schan); + const struct sudmac_slave_config *cfg = sudmac_find_slave(sc, slave_id); + + if (!cfg) + return -ENODEV; + + return 0; +} + +static inline void sudmac_dma_halt(struct sudmac_chan *sc) +{ + u32 dintctrl = sudmac_readl(sc, SUDMAC_DINTCTRL); + + sudmac_writel(sc, 0, SUDMAC_CH0DEN + sc->offset); + sudmac_writel(sc, dintctrl & ~sc->dint_end_bit, SUDMAC_DINTCTRL); + sudmac_writel(sc, sc->dint_end_bit, SUDMAC_DINTSTSCLR); +} + +static int sudmac_desc_setup(struct shdma_chan *schan, + struct shdma_desc *sdesc, + dma_addr_t src, dma_addr_t dst, size_t *len) +{ + struct sudmac_chan *sc = to_chan(schan); + struct sudmac_desc *sd = to_desc(sdesc); + + dev_dbg(sc->shdma_chan.dev, "%s: src=%x, dst=%x, len=%d\n", + __func__, src, dst, *len); + + if (*len > schan->max_xfer_len) + *len = schan->max_xfer_len; + + if (dst) + sd->hw.base_addr = dst; + else if (src) + sd->hw.base_addr = src; + sd->hw.base_byte_count = *len; + + return 0; +} + +static void sudmac_halt(struct shdma_chan *schan) +{ + struct sudmac_chan *sc = to_chan(schan); + + sudmac_dma_halt(sc); +} + +static bool sudmac_chan_irq(struct shdma_chan *schan, int irq) +{ + struct sudmac_chan *sc = to_chan(schan); + u32 dintsts = sudmac_readl(sc, SUDMAC_DINTSTS); + + if (!(dintsts & sc->dint_end_bit)) + return false; + + /* DMA stop */ + sudmac_dma_halt(sc); + + return true; +} + +static size_t sudmac_get_partial(struct shdma_chan *schan, + struct shdma_desc *sdesc) +{ + struct sudmac_chan *sc = to_chan(schan); + struct sudmac_desc *sd = to_desc(sdesc); + u32 current_byte_count = sudmac_readl(sc, SUDMAC_CH0CBC + sc->offset); + + return sd->hw.base_byte_count - current_byte_count; +} + +static bool sudmac_desc_completed(struct shdma_chan *schan, + struct shdma_desc *sdesc) +{ + struct sudmac_chan *sc = to_chan(schan); + struct sudmac_desc *sd = to_desc(sdesc); + u32 current_addr = sudmac_readl(sc, SUDMAC_CH0CA + sc->offset); + + return sd->hw.base_addr + sd->hw.base_byte_count == current_addr; +} + +static int sudmac_chan_probe(struct sudmac_device *su_dev, int id, int irq, + unsigned long flags) +{ + struct shdma_dev *sdev = &su_dev->shdma_dev; + struct platform_device *pdev = to_platform_device(sdev->dma_dev.dev); + struct sudmac_chan *sc; + struct shdma_chan *schan; + int err; + + sc = devm_kzalloc(&pdev->dev, sizeof(struct sudmac_chan), GFP_KERNEL); + if (!sc) { + dev_err(sdev->dma_dev.dev, + "No free memory for allocating dma channels!\n"); + return -ENOMEM; + } + + schan = &sc->shdma_chan; + schan->max_xfer_len = 64 * 1024 * 1024 - 1; + + shdma_chan_probe(sdev, schan, id); + + sc->base = su_dev->chan_reg; + + /* get platform_data */ + sc->offset = su_dev->pdata->channel->offset; + if (su_dev->pdata->channel->config & SUDMAC_TX_BUFFER_MODE) + sc->cfg |= SUDMAC_SENDBUFM; + if (su_dev->pdata->channel->config & SUDMAC_RX_END_MODE) + sc->cfg |= SUDMAC_RCVENDM; + sc->cfg |= (su_dev->pdata->channel->wait << 4) & SUDMAC_LBA_WAIT; + + if (su_dev->pdata->channel->dint_end_bit & SUDMAC_DMA_BIT_CH0) + sc->dint_end_bit |= SUDMAC_CH0ENDE; + if (su_dev->pdata->channel->dint_end_bit & SUDMAC_DMA_BIT_CH1) + sc->dint_end_bit |= SUDMAC_CH1ENDE; + + /* set up channel irq */ + if (pdev->id >= 0) + snprintf(sc->dev_id, sizeof(sc->dev_id), "sudmac%d.%d", + pdev->id, id); + else + snprintf(sc->dev_id, sizeof(sc->dev_id), "sudmac%d", id); + + err = shdma_request_irq(schan, irq, flags, sc->dev_id); + if (err) { + dev_err(sdev->dma_dev.dev, + "DMA channel %d request_irq failed %d\n", id, err); + goto err_no_irq; + } + + return 0; + +err_no_irq: + /* remove from dmaengine device node */ + shdma_chan_remove(schan); + return err; +} + +static void sudmac_chan_remove(struct sudmac_device *su_dev) +{ + struct dma_device *dma_dev = &su_dev->shdma_dev.dma_dev; + struct shdma_chan *schan; + int i; + + shdma_for_each_chan(schan, &su_dev->shdma_dev, i) { + struct sudmac_chan *sc = to_chan(schan); + + BUG_ON(!schan); + + shdma_free_irq(&sc->shdma_chan); + shdma_chan_remove(schan); + } + dma_dev->chancnt = 0; +} + +static dma_addr_t sudmac_slave_addr(struct shdma_chan *schan) +{ + /* SUDMAC doesn't need the address */ + return 0; +} + +static struct shdma_desc *sudmac_embedded_desc(void *buf, int i) +{ + return &((struct sudmac_desc *)buf)[i].shdma_desc; +} + +static const struct shdma_ops sudmac_shdma_ops = { + .desc_completed = sudmac_desc_completed, + .halt_channel = sudmac_halt, + .channel_busy = sudmac_channel_busy, + .slave_addr = sudmac_slave_addr, + .desc_setup = sudmac_desc_setup, + .set_slave = sudmac_set_slave, + .setup_xfer = sudmac_setup_xfer, + .start_xfer = sudmac_start_xfer, + .embedded_desc = sudmac_embedded_desc, + .chan_irq = sudmac_chan_irq, + .get_partial = sudmac_get_partial, +}; + +static int sudmac_probe(struct platform_device *pdev) +{ + struct sudmac_pdata *pdata = pdev->dev.platform_data; + int err, i; + struct sudmac_device *su_dev; + struct dma_device *dma_dev; + struct resource *chan, *irq_res; + + /* get platform data */ + if (!pdata) + return -ENODEV; + + chan = platform_get_resource(pdev, IORESOURCE_MEM, 0); + irq_res = platform_get_resource(pdev, IORESOURCE_IRQ, 0); + if (!chan || !irq_res) + return -ENODEV; + + err = -ENOMEM; + su_dev = devm_kzalloc(&pdev->dev, sizeof(struct sudmac_device), + GFP_KERNEL); + if (!su_dev) { + dev_err(&pdev->dev, "Not enough memory\n"); + return err; + } + + dma_dev = &su_dev->shdma_dev.dma_dev; + + su_dev->chan_reg = devm_request_and_ioremap(&pdev->dev, chan); + if (!su_dev->chan_reg) + return err; + + dma_cap_set(DMA_SLAVE, dma_dev->cap_mask); + + su_dev->shdma_dev.ops = &sudmac_shdma_ops; + su_dev->shdma_dev.desc_size = sizeof(struct sudmac_desc); + err = shdma_init(&pdev->dev, &su_dev->shdma_dev, pdata->channel_num); + if (err < 0) + return err; + + /* platform data */ + su_dev->pdata = pdev->dev.platform_data; + + platform_set_drvdata(pdev, su_dev); + + /* Create DMA Channel */ + for (i = 0; i < pdata->channel_num; i++) { + err = sudmac_chan_probe(su_dev, i, irq_res->start, IRQF_SHARED); + if (err) + goto chan_probe_err; + } + + err = dma_async_device_register(&su_dev->shdma_dev.dma_dev); + if (err < 0) + goto chan_probe_err; + + return err; + +chan_probe_err: + sudmac_chan_remove(su_dev); + + platform_set_drvdata(pdev, NULL); + shdma_cleanup(&su_dev->shdma_dev); + + return err; +} + +static int sudmac_remove(struct platform_device *pdev) +{ + struct sudmac_device *su_dev = platform_get_drvdata(pdev); + struct dma_device *dma_dev = &su_dev->shdma_dev.dma_dev; + + dma_async_device_unregister(dma_dev); + sudmac_chan_remove(su_dev); + shdma_cleanup(&su_dev->shdma_dev); + platform_set_drvdata(pdev, NULL); + + return 0; +} + +static struct platform_driver sudmac_driver = { + .driver = { + .owner = THIS_MODULE, + .name = SUDMAC_DRV_NAME, + }, + .probe = sudmac_probe, + .remove = sudmac_remove, +}; +module_platform_driver(sudmac_driver); + +MODULE_AUTHOR("Yoshihiro Shimoda"); +MODULE_DESCRIPTION("Renesas SUDMAC driver"); +MODULE_LICENSE("GPL v2"); +MODULE_ALIAS("platform:" SUDMAC_DRV_NAME); diff --git a/trunk/drivers/dma/sirf-dma.c b/trunk/drivers/dma/sirf-dma.c index 1d627e2391f4..1765a0a2736d 100644 --- a/trunk/drivers/dma/sirf-dma.c +++ b/trunk/drivers/dma/sirf-dma.c @@ -16,6 +16,7 @@ #include #include #include +#include #include #include "dmaengine.h" @@ -78,6 +79,7 @@ struct sirfsoc_dma { struct sirfsoc_dma_chan channels[SIRFSOC_DMA_CHANNELS]; void __iomem *base; int irq; + struct clk *clk; bool is_marco; }; @@ -639,6 +641,12 @@ static int sirfsoc_dma_probe(struct platform_device *op) return -EINVAL; } + sdma->clk = devm_clk_get(dev, NULL); + if (IS_ERR(sdma->clk)) { + dev_err(dev, "failed to get a clock.\n"); + return PTR_ERR(sdma->clk); + } + ret = of_address_to_resource(dn, 0, &res); if (ret) { dev_err(dev, "Error parsing memory region!\n"); @@ -698,6 +706,8 @@ static int sirfsoc_dma_probe(struct platform_device *op) tasklet_init(&sdma->tasklet, sirfsoc_dma_tasklet, (unsigned long)sdma); + clk_prepare_enable(sdma->clk); + /* Register DMA engine */ dev_set_drvdata(dev, sdma); ret = dma_async_device_register(dma); @@ -720,6 +730,7 @@ static int sirfsoc_dma_remove(struct platform_device *op) struct device *dev = &op->dev; struct sirfsoc_dma *sdma = dev_get_drvdata(dev); + clk_disable_unprepare(sdma->clk); dma_async_device_unregister(&sdma->dma); free_irq(sdma->irq, sdma); irq_dispose_mapping(sdma->irq); @@ -742,7 +753,18 @@ static struct platform_driver sirfsoc_dma_driver = { }, }; -module_platform_driver(sirfsoc_dma_driver); +static __init int sirfsoc_dma_init(void) +{ + return platform_driver_register(&sirfsoc_dma_driver); +} + +static void __exit sirfsoc_dma_exit(void) +{ + platform_driver_unregister(&sirfsoc_dma_driver); +} + +subsys_initcall(sirfsoc_dma_init); +module_exit(sirfsoc_dma_exit); MODULE_AUTHOR("Rongjun Ying , " "Barry Song "); diff --git a/trunk/drivers/dma/tegra20-apb-dma.c b/trunk/drivers/dma/tegra20-apb-dma.c index fcee27eae1f6..33f59ecd256e 100644 --- a/trunk/drivers/dma/tegra20-apb-dma.c +++ b/trunk/drivers/dma/tegra20-apb-dma.c @@ -30,6 +30,7 @@ #include #include #include +#include #include #include #include @@ -199,6 +200,7 @@ struct tegra_dma_channel { /* Channel-slave specific configuration */ struct dma_slave_config dma_sconfig; + struct tegra_dma_channel_regs channel_reg; }; /* tegra_dma: Tegra DMA specific information */ @@ -1213,7 +1215,6 @@ static const struct tegra_dma_chip_data tegra20_dma_chip_data = { .support_channel_pause = false, }; -#if defined(CONFIG_OF) /* Tegra30 specific DMA controller information */ static const struct tegra_dma_chip_data tegra30_dma_chip_data = { .nr_channels = 32, @@ -1243,7 +1244,6 @@ static const struct of_device_id tegra_dma_of_match[] = { }, }; MODULE_DEVICE_TABLE(of, tegra_dma_of_match); -#endif static int tegra_dma_probe(struct platform_device *pdev) { @@ -1252,20 +1252,14 @@ static int tegra_dma_probe(struct platform_device *pdev) int ret; int i; const struct tegra_dma_chip_data *cdata = NULL; + const struct of_device_id *match; - if (pdev->dev.of_node) { - const struct of_device_id *match; - match = of_match_device(of_match_ptr(tegra_dma_of_match), - &pdev->dev); - if (!match) { - dev_err(&pdev->dev, "Error: No device match found\n"); - return -ENODEV; - } - cdata = match->data; - } else { - /* If no device tree then fallback to tegra20 */ - cdata = &tegra20_dma_chip_data; + match = of_match_device(tegra_dma_of_match, &pdev->dev); + if (!match) { + dev_err(&pdev->dev, "Error: No device match found\n"); + return -ENODEV; } + cdata = match->data; tdma = devm_kzalloc(&pdev->dev, sizeof(*tdma) + cdata->nr_channels * sizeof(struct tegra_dma_channel), GFP_KERNEL); @@ -1279,11 +1273,6 @@ static int tegra_dma_probe(struct platform_device *pdev) platform_set_drvdata(pdev, tdma); res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - if (!res) { - dev_err(&pdev->dev, "No mem resource for DMA\n"); - return -EINVAL; - } - tdma->base_addr = devm_ioremap_resource(&pdev->dev, res); if (IS_ERR(tdma->base_addr)) return PTR_ERR(tdma->base_addr); @@ -1448,11 +1437,74 @@ static int tegra_dma_runtime_resume(struct device *dev) return 0; } +#ifdef CONFIG_PM_SLEEP +static int tegra_dma_pm_suspend(struct device *dev) +{ + struct tegra_dma *tdma = dev_get_drvdata(dev); + int i; + int ret; + + /* Enable clock before accessing register */ + ret = tegra_dma_runtime_resume(dev); + if (ret < 0) + return ret; + + tdma->reg_gen = tdma_read(tdma, TEGRA_APBDMA_GENERAL); + for (i = 0; i < tdma->chip_data->nr_channels; i++) { + struct tegra_dma_channel *tdc = &tdma->channels[i]; + struct tegra_dma_channel_regs *ch_reg = &tdc->channel_reg; + + ch_reg->csr = tdc_read(tdc, TEGRA_APBDMA_CHAN_CSR); + ch_reg->ahb_ptr = tdc_read(tdc, TEGRA_APBDMA_CHAN_AHBPTR); + ch_reg->apb_ptr = tdc_read(tdc, TEGRA_APBDMA_CHAN_APBPTR); + ch_reg->ahb_seq = tdc_read(tdc, TEGRA_APBDMA_CHAN_AHBSEQ); + ch_reg->apb_seq = tdc_read(tdc, TEGRA_APBDMA_CHAN_APBSEQ); + } + + /* Disable clock */ + tegra_dma_runtime_suspend(dev); + return 0; +} + +static int tegra_dma_pm_resume(struct device *dev) +{ + struct tegra_dma *tdma = dev_get_drvdata(dev); + int i; + int ret; + + /* Enable clock before accessing register */ + ret = tegra_dma_runtime_resume(dev); + if (ret < 0) + return ret; + + tdma_write(tdma, TEGRA_APBDMA_GENERAL, tdma->reg_gen); + tdma_write(tdma, TEGRA_APBDMA_CONTROL, 0); + tdma_write(tdma, TEGRA_APBDMA_IRQ_MASK_SET, 0xFFFFFFFFul); + + for (i = 0; i < tdma->chip_data->nr_channels; i++) { + struct tegra_dma_channel *tdc = &tdma->channels[i]; + struct tegra_dma_channel_regs *ch_reg = &tdc->channel_reg; + + tdc_write(tdc, TEGRA_APBDMA_CHAN_APBSEQ, ch_reg->apb_seq); + tdc_write(tdc, TEGRA_APBDMA_CHAN_APBPTR, ch_reg->apb_ptr); + tdc_write(tdc, TEGRA_APBDMA_CHAN_AHBSEQ, ch_reg->ahb_seq); + tdc_write(tdc, TEGRA_APBDMA_CHAN_AHBPTR, ch_reg->ahb_ptr); + tdc_write(tdc, TEGRA_APBDMA_CHAN_CSR, + (ch_reg->csr & ~TEGRA_APBDMA_CSR_ENB)); + } + + /* Disable clock */ + tegra_dma_runtime_suspend(dev); + return 0; +} +#endif + static const struct dev_pm_ops tegra_dma_dev_pm_ops = { #ifdef CONFIG_PM_RUNTIME .runtime_suspend = tegra_dma_runtime_suspend, .runtime_resume = tegra_dma_runtime_resume, #endif + SET_SYSTEM_SLEEP_PM_OPS(tegra_dma_pm_suspend, tegra_dma_pm_resume) }; static struct platform_driver tegra_dmac_driver = { @@ -1460,7 +1512,7 @@ static struct platform_driver tegra_dmac_driver = { .name = "tegra-apbdma", .owner = THIS_MODULE, .pm = &tegra_dma_dev_pm_ops, - .of_match_table = of_match_ptr(tegra_dma_of_match), + .of_match_table = tegra_dma_of_match, }, .probe = tegra_dma_probe, .remove = tegra_dma_remove, diff --git a/trunk/drivers/dma/timb_dma.c b/trunk/drivers/dma/timb_dma.c index 952f823901a6..26107ba6edb3 100644 --- a/trunk/drivers/dma/timb_dma.c +++ b/trunk/drivers/dma/timb_dma.c @@ -823,7 +823,7 @@ static struct platform_driver td_driver = { .owner = THIS_MODULE, }, .probe = td_probe, - .remove = __exit_p(td_remove), + .remove = td_remove, }; module_platform_driver(td_driver); diff --git a/trunk/drivers/dma/txx9dmac.c b/trunk/drivers/dma/txx9dmac.c index 913f55c76c99..a59fb4841d4c 100644 --- a/trunk/drivers/dma/txx9dmac.c +++ b/trunk/drivers/dma/txx9dmac.c @@ -1190,7 +1190,7 @@ static int __init txx9dmac_chan_probe(struct platform_device *pdev) return 0; } -static int __exit txx9dmac_chan_remove(struct platform_device *pdev) +static int txx9dmac_chan_remove(struct platform_device *pdev) { struct txx9dmac_chan *dc = platform_get_drvdata(pdev); @@ -1252,7 +1252,7 @@ static int __init txx9dmac_probe(struct platform_device *pdev) return 0; } -static int __exit txx9dmac_remove(struct platform_device *pdev) +static int txx9dmac_remove(struct platform_device *pdev) { struct txx9dmac_dev *ddev = platform_get_drvdata(pdev); @@ -1299,14 +1299,14 @@ static const struct dev_pm_ops txx9dmac_dev_pm_ops = { }; static struct platform_driver txx9dmac_chan_driver = { - .remove = __exit_p(txx9dmac_chan_remove), + .remove = txx9dmac_chan_remove, .driver = { .name = "txx9dmac-chan", }, }; static struct platform_driver txx9dmac_driver = { - .remove = __exit_p(txx9dmac_remove), + .remove = txx9dmac_remove, .shutdown = txx9dmac_shutdown, .driver = { .name = "txx9dmac", diff --git a/trunk/drivers/edac/edac_mc_sysfs.c b/trunk/drivers/edac/edac_mc_sysfs.c index 5899a76eec3b..67610a6ebf87 100644 --- a/trunk/drivers/edac/edac_mc_sysfs.c +++ b/trunk/drivers/edac/edac_mc_sysfs.c @@ -87,7 +87,7 @@ static struct device *mci_pdev; /* * various constants for Memory Controllers */ -static const char *mem_types[] = { +static const char * const mem_types[] = { [MEM_EMPTY] = "Empty", [MEM_RESERVED] = "Reserved", [MEM_UNKNOWN] = "Unknown", @@ -107,7 +107,7 @@ static const char *mem_types[] = { [MEM_RDDR3] = "Registered-DDR3" }; -static const char *dev_types[] = { +static const char * const dev_types[] = { [DEV_UNKNOWN] = "Unknown", [DEV_X1] = "x1", [DEV_X2] = "x2", @@ -118,7 +118,7 @@ static const char *dev_types[] = { [DEV_X64] = "x64" }; -static const char *edac_caps[] = { +static const char * const edac_caps[] = { [EDAC_UNKNOWN] = "Unknown", [EDAC_NONE] = "None", [EDAC_RESERVED] = "Reserved", @@ -327,17 +327,17 @@ static struct device_attribute *dynamic_csrow_dimm_attr[] = { }; /* possible dynamic channel ce_count attribute files */ -DEVICE_CHANNEL(ch0_ce_count, S_IRUGO | S_IWUSR, +DEVICE_CHANNEL(ch0_ce_count, S_IRUGO, channel_ce_count_show, NULL, 0); -DEVICE_CHANNEL(ch1_ce_count, S_IRUGO | S_IWUSR, +DEVICE_CHANNEL(ch1_ce_count, S_IRUGO, channel_ce_count_show, NULL, 1); -DEVICE_CHANNEL(ch2_ce_count, S_IRUGO | S_IWUSR, +DEVICE_CHANNEL(ch2_ce_count, S_IRUGO, channel_ce_count_show, NULL, 2); -DEVICE_CHANNEL(ch3_ce_count, S_IRUGO | S_IWUSR, +DEVICE_CHANNEL(ch3_ce_count, S_IRUGO, channel_ce_count_show, NULL, 3); -DEVICE_CHANNEL(ch4_ce_count, S_IRUGO | S_IWUSR, +DEVICE_CHANNEL(ch4_ce_count, S_IRUGO, channel_ce_count_show, NULL, 4); -DEVICE_CHANNEL(ch5_ce_count, S_IRUGO | S_IWUSR, +DEVICE_CHANNEL(ch5_ce_count, S_IRUGO, channel_ce_count_show, NULL, 5); /* Total possible dynamic ce_count attribute file table */ diff --git a/trunk/drivers/extcon/Kconfig b/trunk/drivers/extcon/Kconfig index 5168a1324a65..3297301a42d4 100644 --- a/trunk/drivers/extcon/Kconfig +++ b/trunk/drivers/extcon/Kconfig @@ -16,7 +16,7 @@ comment "Extcon Device Drivers" config EXTCON_GPIO tristate "GPIO extcon support" - depends on GENERIC_GPIO + depends on GPIOLIB help Say Y here to enable GPIO based extcon support. Note that GPIO extcon supports single state per extcon instance. diff --git a/trunk/drivers/firewire/core-cdev.c b/trunk/drivers/firewire/core-cdev.c index 27ac423ab25e..7ef316fdc4d9 100644 --- a/trunk/drivers/firewire/core-cdev.c +++ b/trunk/drivers/firewire/core-cdev.c @@ -389,10 +389,8 @@ static void queue_bus_reset_event(struct client *client) struct bus_reset_event *e; e = kzalloc(sizeof(*e), GFP_KERNEL); - if (e == NULL) { - fw_notice(client->device->card, "out of memory when allocating event\n"); + if (e == NULL) return; - } fill_bus_reset_event(&e->reset, client); @@ -693,10 +691,9 @@ static void handle_request(struct fw_card *card, struct fw_request *request, r = kmalloc(sizeof(*r), GFP_ATOMIC); e = kmalloc(sizeof(*e), GFP_ATOMIC); - if (r == NULL || e == NULL) { - fw_notice(card, "out of memory when allocating event\n"); + if (r == NULL || e == NULL) goto failed; - } + r->card = card; r->request = request; r->data = payload; @@ -930,10 +927,9 @@ static void iso_callback(struct fw_iso_context *context, u32 cycle, struct iso_interrupt_event *e; e = kmalloc(sizeof(*e) + header_length, GFP_ATOMIC); - if (e == NULL) { - fw_notice(context->card, "out of memory when allocating event\n"); + if (e == NULL) return; - } + e->interrupt.type = FW_CDEV_EVENT_ISO_INTERRUPT; e->interrupt.closure = client->iso_closure; e->interrupt.cycle = cycle; @@ -950,10 +946,9 @@ static void iso_mc_callback(struct fw_iso_context *context, struct iso_interrupt_mc_event *e; e = kmalloc(sizeof(*e), GFP_ATOMIC); - if (e == NULL) { - fw_notice(context->card, "out of memory when allocating event\n"); + if (e == NULL) return; - } + e->interrupt.type = FW_CDEV_EVENT_ISO_INTERRUPT_MULTICHANNEL; e->interrupt.closure = client->iso_closure; e->interrupt.completed = fw_iso_buffer_lookup(&client->buffer, @@ -1366,8 +1361,7 @@ static int init_iso_resource(struct client *client, int ret; if ((request->channels == 0 && request->bandwidth == 0) || - request->bandwidth > BANDWIDTH_AVAILABLE_INITIAL || - request->bandwidth < 0) + request->bandwidth > BANDWIDTH_AVAILABLE_INITIAL) return -EINVAL; r = kmalloc(sizeof(*r), GFP_KERNEL); @@ -1582,10 +1576,9 @@ void fw_cdev_handle_phy_packet(struct fw_card *card, struct fw_packet *p) list_for_each_entry(client, &card->phy_receiver_list, phy_receiver_link) { e = kmalloc(sizeof(*e) + 8, GFP_ATOMIC); - if (e == NULL) { - fw_notice(card, "out of memory when allocating event\n"); + if (e == NULL) break; - } + e->phy_packet.closure = client->phy_receiver_closure; e->phy_packet.type = FW_CDEV_EVENT_PHY_PACKET_RECEIVED; e->phy_packet.rcode = RCODE_COMPLETE; diff --git a/trunk/drivers/firewire/core-device.c b/trunk/drivers/firewire/core-device.c index 03ce7d980c6a..664a6ff0a823 100644 --- a/trunk/drivers/firewire/core-device.c +++ b/trunk/drivers/firewire/core-device.c @@ -692,10 +692,8 @@ static void create_units(struct fw_device *device) * match the drivers id_tables against it. */ unit = kzalloc(sizeof(*unit), GFP_KERNEL); - if (unit == NULL) { - fw_err(device->card, "out of memory for unit\n"); + if (unit == NULL) continue; - } unit->directory = ci.p + value - 1; unit->device.bus = &fw_bus_type; diff --git a/trunk/drivers/firewire/net.c b/trunk/drivers/firewire/net.c index 4d565365e476..815b0fcbe918 100644 --- a/trunk/drivers/firewire/net.c +++ b/trunk/drivers/firewire/net.c @@ -356,10 +356,8 @@ static struct fwnet_fragment_info *fwnet_frag_new( } new = kmalloc(sizeof(*new), GFP_ATOMIC); - if (!new) { - dev_err(&pd->skb->dev->dev, "out of memory\n"); + if (!new) return NULL; - } new->offset = offset; new->len = len; @@ -402,8 +400,6 @@ static struct fwnet_partial_datagram *fwnet_pd_new(struct net_device *net, fail_w_new: kfree(new); fail: - dev_err(&net->dev, "out of memory\n"); - return NULL; } @@ -609,7 +605,6 @@ static int fwnet_incoming_packet(struct fwnet_device *dev, __be32 *buf, int len, skb = dev_alloc_skb(len + LL_RESERVED_SPACE(net)); if (unlikely(!skb)) { - dev_err(&net->dev, "out of memory\n"); net->stats.rx_dropped++; return -ENOMEM; diff --git a/trunk/drivers/firewire/ohci.c b/trunk/drivers/firewire/ohci.c index 45912e6e0ac2..9e1db6490b9a 100644 --- a/trunk/drivers/firewire/ohci.c +++ b/trunk/drivers/firewire/ohci.c @@ -54,6 +54,10 @@ #include "core.h" #include "ohci.h" +#define ohci_info(ohci, f, args...) dev_info(ohci->card.device, f, ##args) +#define ohci_notice(ohci, f, args...) dev_notice(ohci->card.device, f, ##args) +#define ohci_err(ohci, f, args...) dev_err(ohci->card.device, f, ##args) + #define DESCRIPTOR_OUTPUT_MORE 0 #define DESCRIPTOR_OUTPUT_LAST (1 << 12) #define DESCRIPTOR_INPUT_MORE (2 << 12) @@ -68,6 +72,8 @@ #define DESCRIPTOR_BRANCH_ALWAYS (3 << 2) #define DESCRIPTOR_WAIT (3 << 0) +#define DESCRIPTOR_CMD (0xf << 12) + struct descriptor { __le16 req_count; __le16 control; @@ -149,10 +155,11 @@ struct context { struct descriptor *last; /* - * The last descriptor in the DMA program. It contains the branch + * The last descriptor block in the DMA program. It contains the branch * address that must be updated upon appending a new descriptor. */ struct descriptor *prev; + int prev_z; descriptor_callback_t callback; @@ -270,7 +277,9 @@ static char ohci_driver_name[] = KBUILD_MODNAME; #define PCI_DEVICE_ID_TI_TSB12LV22 0x8009 #define PCI_DEVICE_ID_TI_TSB12LV26 0x8020 #define PCI_DEVICE_ID_TI_TSB82AA2 0x8025 +#define PCI_DEVICE_ID_VIA_VT630X 0x3044 #define PCI_VENDOR_ID_PINNACLE_SYSTEMS 0x11bd +#define PCI_REV_ID_VIA_VT6306 0x46 #define QUIRK_CYCLE_TIMER 1 #define QUIRK_RESET_PACKET 2 @@ -278,6 +287,8 @@ static char ohci_driver_name[] = KBUILD_MODNAME; #define QUIRK_NO_1394A 8 #define QUIRK_NO_MSI 16 #define QUIRK_TI_SLLZ059 32 +#define QUIRK_IR_WAKE 64 +#define QUIRK_PHY_LCTRL_TIMEOUT 128 /* In case of multiple matches in ohci_quirks[], only the first one is used. */ static const struct { @@ -290,7 +301,10 @@ static const struct { QUIRK_BE_HEADERS}, {PCI_VENDOR_ID_ATT, PCI_DEVICE_ID_AGERE_FW643, 6, - QUIRK_NO_MSI}, + QUIRK_PHY_LCTRL_TIMEOUT | QUIRK_NO_MSI}, + + {PCI_VENDOR_ID_ATT, PCI_ANY_ID, PCI_ANY_ID, + QUIRK_PHY_LCTRL_TIMEOUT}, {PCI_VENDOR_ID_CREATIVE, PCI_DEVICE_ID_CREATIVE_SB1394, PCI_ANY_ID, QUIRK_RESET_PACKET}, @@ -319,6 +333,9 @@ static const struct { {PCI_VENDOR_ID_TI, PCI_ANY_ID, PCI_ANY_ID, QUIRK_RESET_PACKET}, + {PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_VT630X, PCI_REV_ID_VIA_VT6306, + QUIRK_CYCLE_TIMER | QUIRK_IR_WAKE}, + {PCI_VENDOR_ID_VIA, PCI_ANY_ID, PCI_ANY_ID, QUIRK_CYCLE_TIMER | QUIRK_NO_MSI}, }; @@ -333,6 +350,8 @@ MODULE_PARM_DESC(quirks, "Chip quirks (default = 0" ", no 1394a enhancements = " __stringify(QUIRK_NO_1394A) ", disable MSI = " __stringify(QUIRK_NO_MSI) ", TI SLLZ059 erratum = " __stringify(QUIRK_TI_SLLZ059) + ", IR wake unreliable = " __stringify(QUIRK_IR_WAKE) + ", phy LCtrl timeout = " __stringify(QUIRK_PHY_LCTRL_TIMEOUT) ")"); #define OHCI_PARAM_DEBUG_AT_AR 1 @@ -359,8 +378,7 @@ static void log_irqs(struct fw_ohci *ohci, u32 evt) !(evt & OHCI1394_busReset)) return; - dev_notice(ohci->card.device, - "IRQ %08x%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s\n", evt, + ohci_notice(ohci, "IRQ %08x%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s\n", evt, evt & OHCI1394_selfIDComplete ? " selfID" : "", evt & OHCI1394_RQPkt ? " AR_req" : "", evt & OHCI1394_RSPkt ? " AR_resp" : "", @@ -406,21 +424,19 @@ static void log_selfids(struct fw_ohci *ohci, int generation, int self_id_count) if (likely(!(param_debug & OHCI_PARAM_DEBUG_SELFIDS))) return; - dev_notice(ohci->card.device, - "%d selfIDs, generation %d, local node ID %04x\n", - self_id_count, generation, ohci->node_id); + ohci_notice(ohci, "%d selfIDs, generation %d, local node ID %04x\n", + self_id_count, generation, ohci->node_id); for (s = ohci->self_id_buffer; self_id_count--; ++s) if ((*s & 1 << 23) == 0) - dev_notice(ohci->card.device, - "selfID 0: %08x, phy %d [%c%c%c] " - "%s gc=%d %s %s%s%s\n", + ohci_notice(ohci, + "selfID 0: %08x, phy %d [%c%c%c] %s gc=%d %s %s%s%s\n", *s, *s >> 24 & 63, _p(s, 6), _p(s, 4), _p(s, 2), speed[*s >> 14 & 3], *s >> 16 & 63, power[*s >> 8 & 7], *s >> 22 & 1 ? "L" : "", *s >> 11 & 1 ? "c" : "", *s & 2 ? "i" : ""); else - dev_notice(ohci->card.device, + ohci_notice(ohci, "selfID n: %08x, phy %d [%c%c%c%c%c%c%c%c]\n", *s, *s >> 24 & 63, _p(s, 16), _p(s, 14), _p(s, 12), _p(s, 10), @@ -470,9 +486,8 @@ static void log_ar_at_event(struct fw_ohci *ohci, evt = 0x1f; if (evt == OHCI1394_evt_bus_reset) { - dev_notice(ohci->card.device, - "A%c evt_bus_reset, generation %d\n", - dir, (header[2] >> 16) & 0xff); + ohci_notice(ohci, "A%c evt_bus_reset, generation %d\n", + dir, (header[2] >> 16) & 0xff); return; } @@ -491,32 +506,26 @@ static void log_ar_at_event(struct fw_ohci *ohci, switch (tcode) { case 0xa: - dev_notice(ohci->card.device, - "A%c %s, %s\n", - dir, evts[evt], tcodes[tcode]); + ohci_notice(ohci, "A%c %s, %s\n", + dir, evts[evt], tcodes[tcode]); break; case 0xe: - dev_notice(ohci->card.device, - "A%c %s, PHY %08x %08x\n", - dir, evts[evt], header[1], header[2]); + ohci_notice(ohci, "A%c %s, PHY %08x %08x\n", + dir, evts[evt], header[1], header[2]); break; case 0x0: case 0x1: case 0x4: case 0x5: case 0x9: - dev_notice(ohci->card.device, - "A%c spd %x tl %02x, " - "%04x -> %04x, %s, " - "%s, %04x%08x%s\n", - dir, speed, header[0] >> 10 & 0x3f, - header[1] >> 16, header[0] >> 16, evts[evt], - tcodes[tcode], header[1] & 0xffff, header[2], specific); + ohci_notice(ohci, + "A%c spd %x tl %02x, %04x -> %04x, %s, %s, %04x%08x%s\n", + dir, speed, header[0] >> 10 & 0x3f, + header[1] >> 16, header[0] >> 16, evts[evt], + tcodes[tcode], header[1] & 0xffff, header[2], specific); break; default: - dev_notice(ohci->card.device, - "A%c spd %x tl %02x, " - "%04x -> %04x, %s, " - "%s%s\n", - dir, speed, header[0] >> 10 & 0x3f, - header[1] >> 16, header[0] >> 16, evts[evt], - tcodes[tcode], specific); + ohci_notice(ohci, + "A%c spd %x tl %02x, %04x -> %04x, %s, %s%s\n", + dir, speed, header[0] >> 10 & 0x3f, + header[1] >> 16, header[0] >> 16, evts[evt], + tcodes[tcode], specific); } } @@ -563,7 +572,8 @@ static int read_phy_reg(struct fw_ohci *ohci, int addr) if (i >= 3) msleep(1); } - dev_err(ohci->card.device, "failed to read phy reg\n"); + ohci_err(ohci, "failed to read phy reg %d\n", addr); + dump_stack(); return -EBUSY; } @@ -585,7 +595,8 @@ static int write_phy_reg(const struct fw_ohci *ohci, int addr, u32 val) if (i >= 3) msleep(1); } - dev_err(ohci->card.device, "failed to write phy reg\n"); + ohci_err(ohci, "failed to write phy reg %d, val %u\n", addr, val); + dump_stack(); return -EBUSY; } @@ -690,8 +701,7 @@ static void ar_context_abort(struct ar_context *ctx, const char *error_msg) reg_write(ohci, CONTROL_CLEAR(ctx->regs), CONTEXT_RUN); flush_writes(ohci); - dev_err(ohci->card.device, "AR error: %s; DMA stopped\n", - error_msg); + ohci_err(ohci, "AR error: %s; DMA stopped\n", error_msg); } /* FIXME: restart? */ } @@ -1157,6 +1167,7 @@ static int context_init(struct context *ctx, struct fw_ohci *ohci, ctx->buffer_tail->used += sizeof(*ctx->buffer_tail->buffer); ctx->last = ctx->buffer_tail->buffer; ctx->prev = ctx->buffer_tail->buffer; + ctx->prev_z = 1; return 0; } @@ -1221,14 +1232,35 @@ static void context_append(struct context *ctx, { dma_addr_t d_bus; struct descriptor_buffer *desc = ctx->buffer_tail; + struct descriptor *d_branch; d_bus = desc->buffer_bus + (d - desc->buffer) * sizeof(*d); desc->used += (z + extra) * sizeof(*d); wmb(); /* finish init of new descriptors before branch_address update */ - ctx->prev->branch_address = cpu_to_le32(d_bus | z); - ctx->prev = find_branch_descriptor(d, z); + + d_branch = find_branch_descriptor(ctx->prev, ctx->prev_z); + d_branch->branch_address = cpu_to_le32(d_bus | z); + + /* + * VT6306 incorrectly checks only the single descriptor at the + * CommandPtr when the wake bit is written, so if it's a + * multi-descriptor block starting with an INPUT_MORE, put a copy of + * the branch address in the first descriptor. + * + * Not doing this for transmit contexts since not sure how it interacts + * with skip addresses. + */ + if (unlikely(ctx->ohci->quirks & QUIRK_IR_WAKE) && + d_branch != ctx->prev && + (ctx->prev->control & cpu_to_le16(DESCRIPTOR_CMD)) == + cpu_to_le16(DESCRIPTOR_INPUT_MORE)) { + ctx->prev->branch_address = cpu_to_le32(d_bus | z); + } + + ctx->prev = d; + ctx->prev_z = z; } static void context_stop(struct context *ctx) @@ -1248,7 +1280,7 @@ static void context_stop(struct context *ctx) if (i) udelay(10); } - dev_err(ohci->card.device, "DMA context still active (0x%08x)\n", reg); + ohci_err(ohci, "DMA context still active (0x%08x)\n", reg); } struct driver_data { @@ -1557,7 +1589,7 @@ static void handle_local_lock(struct fw_ohci *ohci, goto out; } - dev_err(ohci->card.device, "swap not done (CSR lock timeout)\n"); + ohci_err(ohci, "swap not done (CSR lock timeout)\n"); fw_fill_response(&response, packet->header, RCODE_BUSY, NULL, 0); out: @@ -1632,8 +1664,7 @@ static void detect_dead_context(struct fw_ohci *ohci, ctl = reg_read(ohci, CONTROL_SET(regs)); if (ctl & CONTEXT_DEAD) - dev_err(ohci->card.device, - "DMA context %s has stopped, error code: %s\n", + ohci_err(ohci, "DMA context %s has stopped, error code: %s\n", name, evts[ctl & 0x1f]); } @@ -1815,8 +1846,8 @@ static int find_and_insert_self_id(struct fw_ohci *ohci, int self_id_count) reg = reg_read(ohci, OHCI1394_NodeID); if (!(reg & OHCI1394_NodeID_idValid)) { - dev_notice(ohci->card.device, - "node ID not valid, new bus reset in progress\n"); + ohci_notice(ohci, + "node ID not valid, new bus reset in progress\n"); return -EBUSY; } self_id |= ((reg & 0x3f) << 24); /* phy ID */ @@ -1863,12 +1894,12 @@ static void bus_reset_work(struct work_struct *work) reg = reg_read(ohci, OHCI1394_NodeID); if (!(reg & OHCI1394_NodeID_idValid)) { - dev_notice(ohci->card.device, - "node ID not valid, new bus reset in progress\n"); + ohci_notice(ohci, + "node ID not valid, new bus reset in progress\n"); return; } if ((reg & OHCI1394_NodeID_nodeNumber) == 63) { - dev_notice(ohci->card.device, "malconfigured bus\n"); + ohci_notice(ohci, "malconfigured bus\n"); return; } ohci->node_id = reg & (OHCI1394_NodeID_busNumber | @@ -1882,7 +1913,7 @@ static void bus_reset_work(struct work_struct *work) reg = reg_read(ohci, OHCI1394_SelfIDCount); if (reg & OHCI1394_SelfIDCount_selfIDError) { - dev_notice(ohci->card.device, "inconsistent self IDs\n"); + ohci_notice(ohci, "self ID receive error\n"); return; } /* @@ -1894,7 +1925,7 @@ static void bus_reset_work(struct work_struct *work) self_id_count = (reg >> 3) & 0xff; if (self_id_count > 252) { - dev_notice(ohci->card.device, "inconsistent self IDs\n"); + ohci_notice(ohci, "bad selfIDSize (%08x)\n", reg); return; } @@ -1902,7 +1933,10 @@ static void bus_reset_work(struct work_struct *work) rmb(); for (i = 1, j = 0; j < self_id_count; i += 2, j++) { - if (ohci->self_id_cpu[i] != ~ohci->self_id_cpu[i + 1]) { + u32 id = cond_le32_to_cpu(ohci->self_id_cpu[i]); + u32 id2 = cond_le32_to_cpu(ohci->self_id_cpu[i + 1]); + + if (id != ~id2) { /* * If the invalid data looks like a cycle start packet, * it's likely to be the result of the cycle master @@ -1910,33 +1944,30 @@ static void bus_reset_work(struct work_struct *work) * so far are valid and should be processed so that the * bus manager can then correct the gap count. */ - if (cond_le32_to_cpu(ohci->self_id_cpu[i]) - == 0xffff008f) { - dev_notice(ohci->card.device, - "ignoring spurious self IDs\n"); + if (id == 0xffff008f) { + ohci_notice(ohci, "ignoring spurious self IDs\n"); self_id_count = j; break; - } else { - dev_notice(ohci->card.device, - "inconsistent self IDs\n"); - return; } + + ohci_notice(ohci, "bad self ID %d/%d (%08x != ~%08x)\n", + j, self_id_count, id, id2); + return; } - ohci->self_id_buffer[j] = - cond_le32_to_cpu(ohci->self_id_cpu[i]); + ohci->self_id_buffer[j] = id; } if (ohci->quirks & QUIRK_TI_SLLZ059) { self_id_count = find_and_insert_self_id(ohci, self_id_count); if (self_id_count < 0) { - dev_notice(ohci->card.device, - "could not construct local self ID\n"); + ohci_notice(ohci, + "could not construct local self ID\n"); return; } } if (self_id_count == 0) { - dev_notice(ohci->card.device, "inconsistent self IDs\n"); + ohci_notice(ohci, "no self IDs\n"); return; } rmb(); @@ -1957,8 +1988,7 @@ static void bus_reset_work(struct work_struct *work) new_generation = (reg_read(ohci, OHCI1394_SelfIDCount) >> 16) & 0xff; if (new_generation != generation) { - dev_notice(ohci->card.device, - "new bus reset, discarding self ids\n"); + ohci_notice(ohci, "new bus reset, discarding self ids\n"); return; } @@ -2096,7 +2126,7 @@ static irqreturn_t irq_handler(int irq, void *data) } if (unlikely(event & OHCI1394_regAccessFail)) - dev_err(ohci->card.device, "register access failure\n"); + ohci_err(ohci, "register access failure\n"); if (unlikely(event & OHCI1394_postedWriteErr)) { reg_read(ohci, OHCI1394_PostedWriteAddressHi); @@ -2104,13 +2134,12 @@ static irqreturn_t irq_handler(int irq, void *data) reg_write(ohci, OHCI1394_IntEventClear, OHCI1394_postedWriteErr); if (printk_ratelimit()) - dev_err(ohci->card.device, "PCI posted write error\n"); + ohci_err(ohci, "PCI posted write error\n"); } if (unlikely(event & OHCI1394_cycleTooLong)) { if (printk_ratelimit()) - dev_notice(ohci->card.device, - "isochronous cycle too long\n"); + ohci_notice(ohci, "isochronous cycle too long\n"); reg_write(ohci, OHCI1394_LinkControlSet, OHCI1394_LinkControl_cycleMaster); } @@ -2123,8 +2152,7 @@ static irqreturn_t irq_handler(int irq, void *data) * them at least two cycles later. (FIXME?) */ if (printk_ratelimit()) - dev_notice(ohci->card.device, - "isochronous cycle inconsistent\n"); + ohci_notice(ohci, "isochronous cycle inconsistent\n"); } if (unlikely(event & OHCI1394_unrecoverableError)) @@ -2246,12 +2274,11 @@ static int ohci_enable(struct fw_card *card, const __be32 *config_rom, size_t length) { struct fw_ohci *ohci = fw_ohci(card); - struct pci_dev *dev = to_pci_dev(card->device); u32 lps, version, irqs; int i, ret; if (software_reset(ohci)) { - dev_err(card->device, "failed to reset ohci card\n"); + ohci_err(ohci, "failed to reset ohci card\n"); return -EBUSY; } @@ -2262,20 +2289,31 @@ static int ohci_enable(struct fw_card *card, * will lock up the machine. Wait 50msec to make sure we have * full link enabled. However, with some cards (well, at least * a JMicron PCIe card), we have to try again sometimes. + * + * TI TSB82AA2 + TSB81BA3(A) cards signal LPS enabled early but + * cannot actually use the phy at that time. These need tens of + * millisecods pause between LPS write and first phy access too. + * + * But do not wait for 50msec on Agere/LSI cards. Their phy + * arbitration state machine may time out during such a long wait. */ + reg_write(ohci, OHCI1394_HCControlSet, OHCI1394_HCControl_LPS | OHCI1394_HCControl_postedWriteEnable); flush_writes(ohci); - for (lps = 0, i = 0; !lps && i < 3; i++) { + if (!(ohci->quirks & QUIRK_PHY_LCTRL_TIMEOUT)) msleep(50); + + for (lps = 0, i = 0; !lps && i < 150; i++) { + msleep(1); lps = reg_read(ohci, OHCI1394_HCControlSet) & OHCI1394_HCControl_LPS; } if (!lps) { - dev_err(card->device, "failed to set Link Power Status\n"); + ohci_err(ohci, "failed to set Link Power Status\n"); return -EIO; } @@ -2284,7 +2322,7 @@ static int ohci_enable(struct fw_card *card, if (ret < 0) return ret; if (ret) - dev_notice(card->device, "local TSB41BA3D phy\n"); + ohci_notice(ohci, "local TSB41BA3D phy\n"); else ohci->quirks &= ~QUIRK_TI_SLLZ059; } @@ -2382,24 +2420,6 @@ static int ohci_enable(struct fw_card *card, reg_write(ohci, OHCI1394_AsReqFilterHiSet, 0x80000000); - if (!(ohci->quirks & QUIRK_NO_MSI)) - pci_enable_msi(dev); - if (request_irq(dev->irq, irq_handler, - pci_dev_msi_enabled(dev) ? 0 : IRQF_SHARED, - ohci_driver_name, ohci)) { - dev_err(card->device, "failed to allocate interrupt %d\n", - dev->irq); - pci_disable_msi(dev); - - if (config_rom) { - dma_free_coherent(ohci->card.device, CONFIG_ROM_SIZE, - ohci->next_config_rom, - ohci->next_config_rom_bus); - ohci->next_config_rom = NULL; - } - return -EIO; - } - irqs = OHCI1394_reqTxComplete | OHCI1394_respTxComplete | OHCI1394_RQPkt | OHCI1394_RSPkt | OHCI1394_isochTx | OHCI1394_isochRx | @@ -3578,20 +3598,20 @@ static int pci_probe(struct pci_dev *dev, if (!(pci_resource_flags(dev, 0) & IORESOURCE_MEM) || pci_resource_len(dev, 0) < OHCI1394_REGISTER_SIZE) { - dev_err(&dev->dev, "invalid MMIO resource\n"); + ohci_err(ohci, "invalid MMIO resource\n"); err = -ENXIO; goto fail_disable; } err = pci_request_region(dev, 0, ohci_driver_name); if (err) { - dev_err(&dev->dev, "MMIO resource unavailable\n"); + ohci_err(ohci, "MMIO resource unavailable\n"); goto fail_disable; } ohci->registers = pci_iomap(dev, 0, OHCI1394_REGISTER_SIZE); if (ohci->registers == NULL) { - dev_err(&dev->dev, "failed to remap registers\n"); + ohci_err(ohci, "failed to remap registers\n"); err = -ENXIO; goto fail_iomem; } @@ -3675,19 +3695,33 @@ static int pci_probe(struct pci_dev *dev, guid = ((u64) reg_read(ohci, OHCI1394_GUIDHi) << 32) | reg_read(ohci, OHCI1394_GUIDLo); + if (!(ohci->quirks & QUIRK_NO_MSI)) + pci_enable_msi(dev); + if (request_irq(dev->irq, irq_handler, + pci_dev_msi_enabled(dev) ? 0 : IRQF_SHARED, + ohci_driver_name, ohci)) { + ohci_err(ohci, "failed to allocate interrupt %d\n", dev->irq); + err = -EIO; + goto fail_msi; + } + err = fw_card_add(&ohci->card, max_receive, link_speed, guid); if (err) - goto fail_contexts; + goto fail_irq; version = reg_read(ohci, OHCI1394_Version) & 0x00ff00ff; - dev_notice(&dev->dev, - "added OHCI v%x.%x device as card %d, " - "%d IR + %d IT contexts, quirks 0x%x\n", - version >> 16, version & 0xff, ohci->card.index, - ohci->n_ir, ohci->n_it, ohci->quirks); + ohci_notice(ohci, + "added OHCI v%x.%x device as card %d, " + "%d IR + %d IT contexts, quirks 0x%x\n", + version >> 16, version & 0xff, ohci->card.index, + ohci->n_ir, ohci->n_it, ohci->quirks); return 0; + fail_irq: + free_irq(dev->irq, ohci); + fail_msi: + pci_disable_msi(dev); fail_contexts: kfree(ohci->ir_context_list); kfree(ohci->it_context_list); @@ -3711,19 +3745,21 @@ static int pci_probe(struct pci_dev *dev, kfree(ohci); pmac_ohci_off(dev); fail: - if (err == -ENOMEM) - dev_err(&dev->dev, "out of memory\n"); - return err; } static void pci_remove(struct pci_dev *dev) { - struct fw_ohci *ohci; + struct fw_ohci *ohci = pci_get_drvdata(dev); - ohci = pci_get_drvdata(dev); - reg_write(ohci, OHCI1394_IntMaskClear, ~0); - flush_writes(ohci); + /* + * If the removal is happening from the suspend state, LPS won't be + * enabled and host registers (eg., IntMaskClear) won't be accessible. + */ + if (reg_read(ohci, OHCI1394_HCControlSet) & OHCI1394_HCControl_LPS) { + reg_write(ohci, OHCI1394_IntMaskClear, ~0); + flush_writes(ohci); + } cancel_work_sync(&ohci->bus_reset_work); fw_core_remove_card(&ohci->card); @@ -3766,16 +3802,14 @@ static int pci_suspend(struct pci_dev *dev, pm_message_t state) int err; software_reset(ohci); - free_irq(dev->irq, ohci); - pci_disable_msi(dev); err = pci_save_state(dev); if (err) { - dev_err(&dev->dev, "pci_save_state failed\n"); + ohci_err(ohci, "pci_save_state failed\n"); return err; } err = pci_set_power_state(dev, pci_choose_state(dev, state)); if (err) - dev_err(&dev->dev, "pci_set_power_state failed with %d\n", err); + ohci_err(ohci, "pci_set_power_state failed with %d\n", err); pmac_ohci_off(dev); return 0; @@ -3791,7 +3825,7 @@ static int pci_resume(struct pci_dev *dev) pci_restore_state(dev); err = pci_enable_device(dev); if (err) { - dev_err(&dev->dev, "pci_enable_device failed\n"); + ohci_err(ohci, "pci_enable_device failed\n"); return err; } @@ -3837,6 +3871,4 @@ MODULE_DESCRIPTION("Driver for PCI OHCI IEEE1394 controllers"); MODULE_LICENSE("GPL"); /* Provide a module alias so root-on-sbp2 initrds don't break. */ -#ifndef CONFIG_IEEE1394_OHCI1394_MODULE MODULE_ALIAS("ohci1394"); -#endif diff --git a/trunk/drivers/firewire/sbp2.c b/trunk/drivers/firewire/sbp2.c index 1162d6b3bf85..47674b913843 100644 --- a/trunk/drivers/firewire/sbp2.c +++ b/trunk/drivers/firewire/sbp2.c @@ -1144,8 +1144,8 @@ static int sbp2_probe(struct device *dev) return -ENODEV; if (dma_get_max_seg_size(device->card->device) > SBP2_MAX_SEG_SIZE) - BUG_ON(dma_set_max_seg_size(device->card->device, - SBP2_MAX_SEG_SIZE)); + WARN_ON(dma_set_max_seg_size(device->card->device, + SBP2_MAX_SEG_SIZE)); shost = scsi_host_alloc(&scsi_driver_template, sizeof(*tgt)); if (shost == NULL) @@ -1475,10 +1475,8 @@ static int sbp2_scsi_queuecommand(struct Scsi_Host *shost, } orb = kzalloc(sizeof(*orb), GFP_ATOMIC); - if (orb == NULL) { - dev_notice(lu_dev(lu), "failed to alloc ORB\n"); + if (orb == NULL) return SCSI_MLQUEUE_HOST_BUSY; - } /* Initialize rcode to something not RCODE_COMPLETE. */ orb->base.rcode = -1; @@ -1636,9 +1634,7 @@ MODULE_LICENSE("GPL"); MODULE_DEVICE_TABLE(ieee1394, sbp2_id_table); /* Provide a module alias so root-on-sbp2 initrds don't break. */ -#ifndef CONFIG_IEEE1394_SBP2_MODULE MODULE_ALIAS("sbp2"); -#endif static int __init sbp2_init(void) { diff --git a/trunk/drivers/gpio/Kconfig b/trunk/drivers/gpio/Kconfig index c22eed9481e3..573c449c49b9 100644 --- a/trunk/drivers/gpio/Kconfig +++ b/trunk/drivers/gpio/Kconfig @@ -38,7 +38,6 @@ config GPIO_DEVRES menuconfig GPIOLIB bool "GPIO Support" depends on ARCH_WANT_OPTIONAL_GPIOLIB || ARCH_REQUIRE_GPIOLIB - select GENERIC_GPIO help This enables GPIO support through the generic GPIO library. You only need to enable this, if you also want to enable @@ -637,7 +636,7 @@ config GPIO_MAX7301 config GPIO_MCP23S08 tristate "Microchip MCP23xxx I/O expander" - depends on SPI_MASTER || I2C + depends on (SPI_MASTER && !I2C) || I2C help SPI/I2C driver for Microchip MCP23S08/MCP23S17/MCP23008/MCP23017 I/O expanders. diff --git a/trunk/drivers/gpio/gpio-langwell.c b/trunk/drivers/gpio/gpio-langwell.c index 634c3d37f7b5..62ef10a641c4 100644 --- a/trunk/drivers/gpio/gpio-langwell.c +++ b/trunk/drivers/gpio/gpio-langwell.c @@ -324,6 +324,7 @@ static int lnw_gpio_probe(struct pci_dev *pdev, resource_size_t start, len; struct lnw_gpio *lnw; u32 gpio_base; + u32 irq_base; int retval; int ngpio = id->driver_data; @@ -345,6 +346,7 @@ static int lnw_gpio_probe(struct pci_dev *pdev, retval = -EFAULT; goto err_ioremap; } + irq_base = *(u32 *)base; gpio_base = *((u32 *)base + 1); /* release the IO mapping, since we already get the info from bar1 */ iounmap(base); @@ -365,13 +367,6 @@ static int lnw_gpio_probe(struct pci_dev *pdev, goto err_ioremap; } - lnw->domain = irq_domain_add_linear(pdev->dev.of_node, ngpio, - &lnw_gpio_irq_ops, lnw); - if (!lnw->domain) { - retval = -ENOMEM; - goto err_ioremap; - } - lnw->reg_base = base; lnw->chip.label = dev_name(&pdev->dev); lnw->chip.request = lnw_gpio_request; @@ -384,6 +379,14 @@ static int lnw_gpio_probe(struct pci_dev *pdev, lnw->chip.ngpio = ngpio; lnw->chip.can_sleep = 0; lnw->pdev = pdev; + + lnw->domain = irq_domain_add_simple(pdev->dev.of_node, ngpio, irq_base, + &lnw_gpio_irq_ops, lnw); + if (!lnw->domain) { + retval = -ENOMEM; + goto err_ioremap; + } + pci_set_drvdata(pdev, lnw); retval = gpiochip_add(&lnw->chip); if (retval) { diff --git a/trunk/drivers/gpio/gpio-lpc32xx.c b/trunk/drivers/gpio/gpio-lpc32xx.c index dda6a756a3d9..90a80eb688a9 100644 --- a/trunk/drivers/gpio/gpio-lpc32xx.c +++ b/trunk/drivers/gpio/gpio-lpc32xx.c @@ -255,7 +255,7 @@ static int __get_gpo_state_p3(struct lpc32xx_gpio_chip *group, } /* - * GENERIC_GPIO primitives. + * GPIO primitives. */ static int lpc32xx_gpio_dir_input_p012(struct gpio_chip *chip, unsigned pin) diff --git a/trunk/drivers/gpio/gpio-ml-ioh.c b/trunk/drivers/gpio/gpio-ml-ioh.c index b73366523fae..0966f2637ad2 100644 --- a/trunk/drivers/gpio/gpio-ml-ioh.c +++ b/trunk/drivers/gpio/gpio-ml-ioh.c @@ -496,8 +496,7 @@ static int ioh_gpio_probe(struct pci_dev *pdev, err_gpiochip_add: while (--i >= 0) { chip--; - ret = gpiochip_remove(&chip->gpio); - if (ret) + if (gpiochip_remove(&chip->gpio)) dev_err(&pdev->dev, "Failed gpiochip_remove(%d)\n", i); } kfree(chip_save); diff --git a/trunk/drivers/gpio/gpio-mvebu.c b/trunk/drivers/gpio/gpio-mvebu.c index bf69a7eff370..3a4816adc137 100644 --- a/trunk/drivers/gpio/gpio-mvebu.c +++ b/trunk/drivers/gpio/gpio-mvebu.c @@ -619,11 +619,6 @@ static int mvebu_gpio_probe(struct platform_device *pdev) * per-CPU registers */ if (soc_variant == MVEBU_GPIO_SOC_VARIANT_ARMADAXP) { res = platform_get_resource(pdev, IORESOURCE_MEM, 1); - if (!res) { - dev_err(&pdev->dev, "Cannot get memory resource\n"); - return -ENODEV; - } - mvchip->percpu_membase = devm_ioremap_resource(&pdev->dev, res); if (IS_ERR(mvchip->percpu_membase)) diff --git a/trunk/drivers/gpio/gpio-mxs.c b/trunk/drivers/gpio/gpio-mxs.c index 25000b0f8453..f8e6af20dfbf 100644 --- a/trunk/drivers/gpio/gpio-mxs.c +++ b/trunk/drivers/gpio/gpio-mxs.c @@ -326,7 +326,8 @@ static int mxs_gpio_probe(struct platform_device *pdev) err = bgpio_init(&port->bgc, &pdev->dev, 4, port->base + PINCTRL_DIN(port), - port->base + PINCTRL_DOUT(port), NULL, + port->base + PINCTRL_DOUT(port) + MXS_SET, + port->base + PINCTRL_DOUT(port) + MXS_CLR, port->base + PINCTRL_DOE(port), NULL, 0); if (err) goto out_irqdesc_free; diff --git a/trunk/drivers/gpio/gpio-omap.c b/trunk/drivers/gpio/gpio-omap.c index 2050891d9c65..d3f7d2db870f 100644 --- a/trunk/drivers/gpio/gpio-omap.c +++ b/trunk/drivers/gpio/gpio-omap.c @@ -69,6 +69,7 @@ struct gpio_bank { bool is_mpuio; bool dbck_flag; bool loses_context; + bool context_valid; int stride; u32 width; int context_loss_count; @@ -1128,6 +1129,10 @@ static int omap_gpio_probe(struct platform_device *pdev) bank->loses_context = true; } else { bank->loses_context = pdata->loses_context; + + if (bank->loses_context) + bank->get_context_loss_count = + pdata->get_context_loss_count; } @@ -1178,9 +1183,6 @@ static int omap_gpio_probe(struct platform_device *pdev) omap_gpio_chip_init(bank); omap_gpio_show_rev(bank); - if (bank->loses_context) - bank->get_context_loss_count = pdata->get_context_loss_count; - pm_runtime_put(bank->dev); list_add_tail(&bank->node, &omap_gpio_list); @@ -1259,6 +1261,8 @@ static int omap_gpio_runtime_suspend(struct device *dev) return 0; } +static void omap_gpio_init_context(struct gpio_bank *p); + static int omap_gpio_runtime_resume(struct device *dev) { struct platform_device *pdev = to_platform_device(dev); @@ -1268,6 +1272,20 @@ static int omap_gpio_runtime_resume(struct device *dev) int c; spin_lock_irqsave(&bank->lock, flags); + + /* + * On the first resume during the probe, the context has not + * been initialised and so initialise it now. Also initialise + * the context loss count. + */ + if (bank->loses_context && !bank->context_valid) { + omap_gpio_init_context(bank); + + if (bank->get_context_loss_count) + bank->context_loss_count = + bank->get_context_loss_count(bank->dev); + } + _gpio_dbck_enable(bank); /* @@ -1384,6 +1402,29 @@ void omap2_gpio_resume_after_idle(void) } #if defined(CONFIG_PM_RUNTIME) +static void omap_gpio_init_context(struct gpio_bank *p) +{ + struct omap_gpio_reg_offs *regs = p->regs; + void __iomem *base = p->base; + + p->context.ctrl = __raw_readl(base + regs->ctrl); + p->context.oe = __raw_readl(base + regs->direction); + p->context.wake_en = __raw_readl(base + regs->wkup_en); + p->context.leveldetect0 = __raw_readl(base + regs->leveldetect0); + p->context.leveldetect1 = __raw_readl(base + regs->leveldetect1); + p->context.risingdetect = __raw_readl(base + regs->risingdetect); + p->context.fallingdetect = __raw_readl(base + regs->fallingdetect); + p->context.irqenable1 = __raw_readl(base + regs->irqenable); + p->context.irqenable2 = __raw_readl(base + regs->irqenable2); + + if (regs->set_dataout && p->regs->clr_dataout) + p->context.dataout = __raw_readl(base + regs->set_dataout); + else + p->context.dataout = __raw_readl(base + regs->dataout); + + p->context_valid = true; +} + static void omap_gpio_restore_context(struct gpio_bank *bank) { __raw_writel(bank->context.wake_en, @@ -1421,6 +1462,7 @@ static void omap_gpio_restore_context(struct gpio_bank *bank) #else #define omap_gpio_runtime_suspend NULL #define omap_gpio_runtime_resume NULL +static void omap_gpio_init_context(struct gpio_bank *p) {} #endif static const struct dev_pm_ops gpio_pm_ops = { diff --git a/trunk/drivers/gpio/gpio-pch.c b/trunk/drivers/gpio/gpio-pch.c index cdf599687cf7..0fec097e838d 100644 --- a/trunk/drivers/gpio/gpio-pch.c +++ b/trunk/drivers/gpio/gpio-pch.c @@ -424,8 +424,7 @@ static int pch_gpio_probe(struct pci_dev *pdev, err_request_irq: irq_free_descs(irq_base, gpio_pins[chip->ioh]); - ret = gpiochip_remove(&chip->gpio); - if (ret) + if (gpiochip_remove(&chip->gpio)) dev_err(&pdev->dev, "%s gpiochip_remove failed\n", __func__); err_gpiochip_add: diff --git a/trunk/drivers/gpio/gpio-sch.c b/trunk/drivers/gpio/gpio-sch.c index 1e4de16ceb41..5af65719b95d 100644 --- a/trunk/drivers/gpio/gpio-sch.c +++ b/trunk/drivers/gpio/gpio-sch.c @@ -272,10 +272,8 @@ static int sch_gpio_probe(struct platform_device *pdev) return 0; err_sch_gpio_resume: - err = gpiochip_remove(&sch_gpio_core); - if (err) - dev_err(&pdev->dev, "%s failed, %d\n", - "gpiochip_remove()", err); + if (gpiochip_remove(&sch_gpio_core)) + dev_err(&pdev->dev, "%s gpiochip_remove failed\n", __func__); err_sch_gpio_core: release_region(res->start, resource_size(res)); diff --git a/trunk/drivers/gpio/gpio-tegra.c b/trunk/drivers/gpio/gpio-tegra.c index da4cb5b0cb87..9a62672f1bed 100644 --- a/trunk/drivers/gpio/gpio-tegra.c +++ b/trunk/drivers/gpio/gpio-tegra.c @@ -463,11 +463,6 @@ static int tegra_gpio_probe(struct platform_device *pdev) } res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - if (!res) { - dev_err(&pdev->dev, "Missing MEM resource\n"); - return -ENODEV; - } - regs = devm_ioremap_resource(&pdev->dev, res); if (IS_ERR(regs)) return PTR_ERR(regs); diff --git a/trunk/drivers/gpio/gpio-viperboard.c b/trunk/drivers/gpio/gpio-viperboard.c index 095ab14cea4d..5ac2919197fe 100644 --- a/trunk/drivers/gpio/gpio-viperboard.c +++ b/trunk/drivers/gpio/gpio-viperboard.c @@ -446,7 +446,8 @@ static int vprbrd_gpio_probe(struct platform_device *pdev) return ret; err_gpiob: - ret = gpiochip_remove(&vb_gpio->gpioa); + if (gpiochip_remove(&vb_gpio->gpioa)) + dev_err(&pdev->dev, "%s gpiochip_remove failed\n", __func__); err_gpioa: return ret; diff --git a/trunk/drivers/gpu/drm/drm_crtc.c b/trunk/drivers/gpu/drm/drm_crtc.c index 3a8f7e6db295..e7e92429d10f 100644 --- a/trunk/drivers/gpu/drm/drm_crtc.c +++ b/trunk/drivers/gpu/drm/drm_crtc.c @@ -78,6 +78,10 @@ void drm_warn_on_modeset_not_all_locked(struct drm_device *dev) { struct drm_crtc *crtc; + /* Locking is currently fubar in the panic handler. */ + if (oops_in_progress) + return; + list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) WARN_ON(!mutex_is_locked(&crtc->mutex)); @@ -246,6 +250,7 @@ char *drm_get_connector_status_name(enum drm_connector_status status) else return "unknown"; } +EXPORT_SYMBOL(drm_get_connector_status_name); /** * drm_mode_object_get - allocate a new modeset identifier diff --git a/trunk/drivers/gpu/drm/drm_crtc_helper.c b/trunk/drivers/gpu/drm/drm_crtc_helper.c index e974f9309b72..ed1334e27c33 100644 --- a/trunk/drivers/gpu/drm/drm_crtc_helper.c +++ b/trunk/drivers/gpu/drm/drm_crtc_helper.c @@ -121,6 +121,7 @@ int drm_helper_probe_single_connector_modes(struct drm_connector *connector, connector->helper_private; int count = 0; int mode_flags = 0; + bool verbose_prune = true; DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n", connector->base.id, drm_get_connector_name(connector)); @@ -149,6 +150,7 @@ int drm_helper_probe_single_connector_modes(struct drm_connector *connector, DRM_DEBUG_KMS("[CONNECTOR:%d:%s] disconnected\n", connector->base.id, drm_get_connector_name(connector)); drm_mode_connector_update_edid_property(connector, NULL); + verbose_prune = false; goto prune; } @@ -182,7 +184,7 @@ int drm_helper_probe_single_connector_modes(struct drm_connector *connector, } prune: - drm_mode_prune_invalid(dev, &connector->modes, true); + drm_mode_prune_invalid(dev, &connector->modes, verbose_prune); if (list_empty(&connector->modes)) return 0; @@ -1005,12 +1007,20 @@ static void output_poll_execute(struct work_struct *work) continue; connector->status = connector->funcs->detect(connector, false); - DRM_DEBUG_KMS("[CONNECTOR:%d:%s] status updated from %d to %d\n", - connector->base.id, - drm_get_connector_name(connector), - old_status, connector->status); - if (old_status != connector->status) + if (old_status != connector->status) { + const char *old, *new; + + old = drm_get_connector_status_name(old_status); + new = drm_get_connector_status_name(connector->status); + + DRM_DEBUG_KMS("[CONNECTOR:%d:%s] " + "status updated from %s to %s\n", + connector->base.id, + drm_get_connector_name(connector), + old, new); + changed = true; + } } mutex_unlock(&dev->mode_config.mutex); @@ -1083,10 +1093,11 @@ void drm_helper_hpd_irq_event(struct drm_device *dev) old_status = connector->status; connector->status = connector->funcs->detect(connector, false); - DRM_DEBUG_KMS("[CONNECTOR:%d:%s] status updated from %d to %d\n", + DRM_DEBUG_KMS("[CONNECTOR:%d:%s] status updated from %s to %s\n", connector->base.id, drm_get_connector_name(connector), - old_status, connector->status); + drm_get_connector_status_name(old_status), + drm_get_connector_status_name(connector->status)); if (old_status != connector->status) changed = true; } diff --git a/trunk/drivers/gpu/drm/drm_drv.c b/trunk/drivers/gpu/drm/drm_drv.c index 8d4f29075af5..9cc247f55502 100644 --- a/trunk/drivers/gpu/drm/drm_drv.c +++ b/trunk/drivers/gpu/drm/drm_drv.c @@ -57,7 +57,7 @@ static int drm_version(struct drm_device *dev, void *data, struct drm_file *file_priv); #define DRM_IOCTL_DEF(ioctl, _func, _flags) \ - [DRM_IOCTL_NR(ioctl)] = {.cmd = ioctl, .func = _func, .flags = _flags, .cmd_drv = 0} + [DRM_IOCTL_NR(ioctl)] = {.cmd = ioctl, .func = _func, .flags = _flags, .cmd_drv = 0, .name = #ioctl} /** Ioctl table */ static const struct drm_ioctl_desc drm_ioctls[] = { @@ -375,7 +375,7 @@ long drm_ioctl(struct file *filp, { struct drm_file *file_priv = filp->private_data; struct drm_device *dev; - const struct drm_ioctl_desc *ioctl; + const struct drm_ioctl_desc *ioctl = NULL; drm_ioctl_t *func; unsigned int nr = DRM_IOCTL_NR(cmd); int retcode = -EINVAL; @@ -392,11 +392,6 @@ long drm_ioctl(struct file *filp, atomic_inc(&dev->counts[_DRM_STAT_IOCTLS]); ++file_priv->ioctl_count; - DRM_DEBUG("pid=%d, cmd=0x%02x, nr=0x%02x, dev 0x%lx, auth=%d\n", - task_pid_nr(current), cmd, nr, - (long)old_encode_dev(file_priv->minor->device), - file_priv->authenticated); - if ((nr >= DRM_CORE_IOCTL_COUNT) && ((nr < DRM_COMMAND_BASE) || (nr >= DRM_COMMAND_END))) goto err_i1; @@ -417,6 +412,11 @@ long drm_ioctl(struct file *filp, } else goto err_i1; + DRM_DEBUG("pid=%d, dev=0x%lx, auth=%d, %s\n", + task_pid_nr(current), + (long)old_encode_dev(file_priv->minor->device), + file_priv->authenticated, ioctl->name); + /* Do not trust userspace, use our own definition */ func = ioctl->func; /* is there a local override? */ @@ -471,6 +471,12 @@ long drm_ioctl(struct file *filp, } err_i1: + if (!ioctl) + DRM_DEBUG("invalid iotcl: pid=%d, dev=0x%lx, auth=%d, cmd=0x%02x, nr=0x%02x\n", + task_pid_nr(current), + (long)old_encode_dev(file_priv->minor->device), + file_priv->authenticated, cmd, nr); + if (kdata != stack_kdata) kfree(kdata); atomic_dec(&dev->ioctl_count); diff --git a/trunk/drivers/gpu/drm/drm_encoder_slave.c b/trunk/drivers/gpu/drm/drm_encoder_slave.c index 48c52f7df4e6..0cfb60f54766 100644 --- a/trunk/drivers/gpu/drm/drm_encoder_slave.c +++ b/trunk/drivers/gpu/drm/drm_encoder_slave.c @@ -54,16 +54,12 @@ int drm_i2c_encoder_init(struct drm_device *dev, struct i2c_adapter *adap, const struct i2c_board_info *info) { - char modalias[sizeof(I2C_MODULE_PREFIX) - + I2C_NAME_SIZE]; struct module *module = NULL; struct i2c_client *client; struct drm_i2c_encoder_driver *encoder_drv; int err = 0; - snprintf(modalias, sizeof(modalias), - "%s%s", I2C_MODULE_PREFIX, info->type); - request_module(modalias); + request_module("%s%s", I2C_MODULE_PREFIX, info->type); client = i2c_new_device(adap, info); if (!client) { diff --git a/trunk/drivers/gpu/drm/drm_mm.c b/trunk/drivers/gpu/drm/drm_mm.c index db1e2d6f90d7..07cf99cc8862 100644 --- a/trunk/drivers/gpu/drm/drm_mm.c +++ b/trunk/drivers/gpu/drm/drm_mm.c @@ -755,33 +755,35 @@ void drm_mm_debug_table(struct drm_mm *mm, const char *prefix) EXPORT_SYMBOL(drm_mm_debug_table); #if defined(CONFIG_DEBUG_FS) -int drm_mm_dump_table(struct seq_file *m, struct drm_mm *mm) +static unsigned long drm_mm_dump_hole(struct seq_file *m, struct drm_mm_node *entry) { - struct drm_mm_node *entry; - unsigned long total_used = 0, total_free = 0, total = 0; unsigned long hole_start, hole_end, hole_size; - hole_start = drm_mm_hole_node_start(&mm->head_node); - hole_end = drm_mm_hole_node_end(&mm->head_node); - hole_size = hole_end - hole_start; - if (hole_size) + if (entry->hole_follows) { + hole_start = drm_mm_hole_node_start(entry); + hole_end = drm_mm_hole_node_end(entry); + hole_size = hole_end - hole_start; seq_printf(m, "0x%08lx-0x%08lx: 0x%08lx: free\n", hole_start, hole_end, hole_size); - total_free += hole_size; + return hole_size; + } + + return 0; +} + +int drm_mm_dump_table(struct seq_file *m, struct drm_mm *mm) +{ + struct drm_mm_node *entry; + unsigned long total_used = 0, total_free = 0, total = 0; + + total_free += drm_mm_dump_hole(m, &mm->head_node); drm_mm_for_each_node(entry, mm) { seq_printf(m, "0x%08lx-0x%08lx: 0x%08lx: used\n", entry->start, entry->start + entry->size, entry->size); total_used += entry->size; - if (entry->hole_follows) { - hole_start = drm_mm_hole_node_start(entry); - hole_end = drm_mm_hole_node_end(entry); - hole_size = hole_end - hole_start; - seq_printf(m, "0x%08lx-0x%08lx: 0x%08lx: free\n", - hole_start, hole_end, hole_size); - total_free += hole_size; - } + total_free += drm_mm_dump_hole(m, entry); } total = total_free + total_used; diff --git a/trunk/drivers/gpu/drm/drm_modes.c b/trunk/drivers/gpu/drm/drm_modes.c index faa79df02648..a371ff865a88 100644 --- a/trunk/drivers/gpu/drm/drm_modes.c +++ b/trunk/drivers/gpu/drm/drm_modes.c @@ -1143,6 +1143,7 @@ bool drm_mode_parse_command_line_for_connector(const char *mode_option, was_digit = false; } else goto done; + break; case '0' ... '9': was_digit = true; break; diff --git a/trunk/drivers/gpu/drm/exynos/exynos_hdmi.c b/trunk/drivers/gpu/drm/exynos/exynos_hdmi.c index bbfc3840080c..6652597586a1 100644 --- a/trunk/drivers/gpu/drm/exynos/exynos_hdmi.c +++ b/trunk/drivers/gpu/drm/exynos/exynos_hdmi.c @@ -2005,11 +2005,6 @@ static int hdmi_probe(struct platform_device *pdev) } res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - if (!res) { - DRM_ERROR("failed to find registers\n"); - return -ENOENT; - } - hdata->regs = devm_ioremap_resource(&pdev->dev, res); if (IS_ERR(hdata->regs)) return PTR_ERR(hdata->regs); diff --git a/trunk/drivers/gpu/drm/i915/i915_gem.c b/trunk/drivers/gpu/drm/i915/i915_gem.c index 6be940effefd..6165535d15f0 100644 --- a/trunk/drivers/gpu/drm/i915/i915_gem.c +++ b/trunk/drivers/gpu/drm/i915/i915_gem.c @@ -1045,6 +1045,8 @@ static int __wait_seqno(struct intel_ring_buffer *ring, u32 seqno, if (timeout) { struct timespec sleep_time = timespec_sub(now, before); *timeout = timespec_sub(*timeout, sleep_time); + if (!timespec_valid(timeout)) /* i.e. negative time remains */ + set_normalized_timespec(timeout, 0, 0); } switch (end) { @@ -1053,8 +1055,6 @@ static int __wait_seqno(struct intel_ring_buffer *ring, u32 seqno, case -ERESTARTSYS: /* Signal */ return (int)end; case 0: /* Timeout */ - if (timeout) - set_normalized_timespec(timeout, 0, 0); return -ETIME; default: /* Completed */ WARN_ON(end < 0); /* We're not aware of other errors */ @@ -2377,10 +2377,8 @@ i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file) mutex_unlock(&dev->struct_mutex); ret = __wait_seqno(ring, seqno, reset_counter, true, timeout); - if (timeout) { - WARN_ON(!timespec_valid(timeout)); + if (timeout) args->timeout_ns = timespec_to_ns(timeout); - } return ret; out: diff --git a/trunk/drivers/gpu/drm/i915/i915_gem_gtt.c b/trunk/drivers/gpu/drm/i915/i915_gem_gtt.c index dca614de71b6..bdb0d7717bc7 100644 --- a/trunk/drivers/gpu/drm/i915/i915_gem_gtt.c +++ b/trunk/drivers/gpu/drm/i915/i915_gem_gtt.c @@ -709,15 +709,6 @@ static inline size_t gen6_get_stolen_size(u16 snb_gmch_ctl) return snb_gmch_ctl << 25; /* 32 MB units */ } -static inline size_t gen7_get_stolen_size(u16 snb_gmch_ctl) -{ - static const int stolen_decoder[] = { - 0, 0, 0, 0, 0, 32, 48, 64, 128, 256, 96, 160, 224, 352}; - snb_gmch_ctl >>= IVB_GMCH_GMS_SHIFT; - snb_gmch_ctl &= IVB_GMCH_GMS_MASK; - return stolen_decoder[snb_gmch_ctl] << 20; -} - static int gen6_gmch_probe(struct drm_device *dev, size_t *gtt_total, size_t *stolen, @@ -747,11 +738,7 @@ static int gen6_gmch_probe(struct drm_device *dev, pci_read_config_word(dev->pdev, SNB_GMCH_CTRL, &snb_gmch_ctl); gtt_size = gen6_get_total_gtt_size(snb_gmch_ctl); - if (IS_GEN7(dev) && !IS_VALLEYVIEW(dev)) - *stolen = gen7_get_stolen_size(snb_gmch_ctl); - else - *stolen = gen6_get_stolen_size(snb_gmch_ctl); - + *stolen = gen6_get_stolen_size(snb_gmch_ctl); *gtt_total = (gtt_size / sizeof(gen6_gtt_pte_t)) << PAGE_SHIFT; /* For Modern GENs the PTEs and register space are split in the BAR */ diff --git a/trunk/drivers/gpu/drm/i915/i915_reg.h b/trunk/drivers/gpu/drm/i915/i915_reg.h index 83f9c26e1adb..2d6b62e42daf 100644 --- a/trunk/drivers/gpu/drm/i915/i915_reg.h +++ b/trunk/drivers/gpu/drm/i915/i915_reg.h @@ -46,8 +46,6 @@ #define SNB_GMCH_GGMS_MASK 0x3 #define SNB_GMCH_GMS_SHIFT 3 /* Graphics Mode Select */ #define SNB_GMCH_GMS_MASK 0x1f -#define IVB_GMCH_GMS_SHIFT 4 -#define IVB_GMCH_GMS_MASK 0xf /* PCI config space */ diff --git a/trunk/drivers/gpu/drm/i915/intel_ddi.c b/trunk/drivers/gpu/drm/i915/intel_ddi.c index 26a0a570f92e..fb961bb81903 100644 --- a/trunk/drivers/gpu/drm/i915/intel_ddi.c +++ b/trunk/drivers/gpu/drm/i915/intel_ddi.c @@ -1265,6 +1265,8 @@ static void intel_ddi_pre_enable(struct intel_encoder *intel_encoder) intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON); intel_dp_start_link_train(intel_dp); intel_dp_complete_link_train(intel_dp); + if (port != PORT_A) + intel_dp_stop_link_train(intel_dp); } } @@ -1326,6 +1328,9 @@ static void intel_enable_ddi(struct intel_encoder *intel_encoder) } else if (type == INTEL_OUTPUT_EDP) { struct intel_dp *intel_dp = enc_to_intel_dp(encoder); + if (port == PORT_A) + intel_dp_stop_link_train(intel_dp); + ironlake_edp_backlight_on(intel_dp); } diff --git a/trunk/drivers/gpu/drm/i915/intel_dp.c b/trunk/drivers/gpu/drm/i915/intel_dp.c index fb2fbc1e08b9..3d704b706a8d 100644 --- a/trunk/drivers/gpu/drm/i915/intel_dp.c +++ b/trunk/drivers/gpu/drm/i915/intel_dp.c @@ -702,6 +702,9 @@ intel_dp_compute_config(struct intel_encoder *encoder, /* Walk through all bpp values. Luckily they're all nicely spaced with 2 * bpc in between. */ bpp = min_t(int, 8*3, pipe_config->pipe_bpp); + if (is_edp(intel_dp) && dev_priv->edp.bpp) + bpp = min_t(int, bpp, dev_priv->edp.bpp); + for (; bpp >= 6*3; bpp -= 2*3) { mode_rate = intel_dp_link_required(target_clock, bpp); @@ -739,6 +742,7 @@ intel_dp_compute_config(struct intel_encoder *encoder, intel_dp->link_bw = bws[clock]; intel_dp->lane_count = lane_count; adjusted_mode->clock = drm_dp_bw_code_to_link_rate(intel_dp->link_bw); + pipe_config->pipe_bpp = bpp; pipe_config->pixel_target_clock = target_clock; DRM_DEBUG_KMS("DP link bw %02x lane count %d clock %d bpp %d\n", @@ -751,20 +755,6 @@ intel_dp_compute_config(struct intel_encoder *encoder, target_clock, adjusted_mode->clock, &pipe_config->dp_m_n); - /* - * XXX: We have a strange regression where using the vbt edp bpp value - * for the link bw computation results in black screens, the panel only - * works when we do the computation at the usual 24bpp (but still - * requires us to use 18bpp). Until that's fully debugged, stay - * bug-for-bug compatible with the old code. - */ - if (is_edp(intel_dp) && dev_priv->edp.bpp) { - DRM_DEBUG_KMS("clamping display bpc (was %d) to eDP (%d)\n", - bpp, dev_priv->edp.bpp); - bpp = min_t(int, bpp, dev_priv->edp.bpp); - } - pipe_config->pipe_bpp = bpp; - return true; } @@ -1389,6 +1379,7 @@ static void intel_enable_dp(struct intel_encoder *encoder) ironlake_edp_panel_on(intel_dp); ironlake_edp_panel_vdd_off(intel_dp, true); intel_dp_complete_link_train(intel_dp); + intel_dp_stop_link_train(intel_dp); ironlake_edp_backlight_on(intel_dp); } @@ -1711,10 +1702,9 @@ intel_dp_set_link_train(struct intel_dp *intel_dp, struct drm_i915_private *dev_priv = dev->dev_private; enum port port = intel_dig_port->port; int ret; - uint32_t temp; if (HAS_DDI(dev)) { - temp = I915_READ(DP_TP_CTL(port)); + uint32_t temp = I915_READ(DP_TP_CTL(port)); if (dp_train_pat & DP_LINK_SCRAMBLING_DISABLE) temp |= DP_TP_CTL_SCRAMBLE_DISABLE; @@ -1724,18 +1714,6 @@ intel_dp_set_link_train(struct intel_dp *intel_dp, temp &= ~DP_TP_CTL_LINK_TRAIN_MASK; switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) { case DP_TRAINING_PATTERN_DISABLE: - - if (port != PORT_A) { - temp |= DP_TP_CTL_LINK_TRAIN_IDLE; - I915_WRITE(DP_TP_CTL(port), temp); - - if (wait_for((I915_READ(DP_TP_STATUS(port)) & - DP_TP_STATUS_IDLE_DONE), 1)) - DRM_ERROR("Timed out waiting for DP idle patterns\n"); - - temp &= ~DP_TP_CTL_LINK_TRAIN_MASK; - } - temp |= DP_TP_CTL_LINK_TRAIN_NORMAL; break; @@ -1811,6 +1789,37 @@ intel_dp_set_link_train(struct intel_dp *intel_dp, return true; } +static void intel_dp_set_idle_link_train(struct intel_dp *intel_dp) +{ + struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); + struct drm_device *dev = intel_dig_port->base.base.dev; + struct drm_i915_private *dev_priv = dev->dev_private; + enum port port = intel_dig_port->port; + uint32_t val; + + if (!HAS_DDI(dev)) + return; + + val = I915_READ(DP_TP_CTL(port)); + val &= ~DP_TP_CTL_LINK_TRAIN_MASK; + val |= DP_TP_CTL_LINK_TRAIN_IDLE; + I915_WRITE(DP_TP_CTL(port), val); + + /* + * On PORT_A we can have only eDP in SST mode. There the only reason + * we need to set idle transmission mode is to work around a HW issue + * where we enable the pipe while not in idle link-training mode. + * In this case there is requirement to wait for a minimum number of + * idle patterns to be sent. + */ + if (port == PORT_A) + return; + + if (wait_for((I915_READ(DP_TP_STATUS(port)) & DP_TP_STATUS_IDLE_DONE), + 1)) + DRM_ERROR("Timed out waiting for DP idle patterns\n"); +} + /* Enable corresponding port and start training pattern 1 */ void intel_dp_start_link_train(struct intel_dp *intel_dp) @@ -1953,10 +1962,19 @@ intel_dp_complete_link_train(struct intel_dp *intel_dp) ++tries; } + intel_dp_set_idle_link_train(intel_dp); + + intel_dp->DP = DP; + if (channel_eq) DRM_DEBUG_KMS("Channel EQ done. DP Training successful\n"); - intel_dp_set_link_train(intel_dp, DP, DP_TRAINING_PATTERN_DISABLE); +} + +void intel_dp_stop_link_train(struct intel_dp *intel_dp) +{ + intel_dp_set_link_train(intel_dp, intel_dp->DP, + DP_TRAINING_PATTERN_DISABLE); } static void @@ -2164,6 +2182,7 @@ intel_dp_check_link_status(struct intel_dp *intel_dp) drm_get_encoder_name(&intel_encoder->base)); intel_dp_start_link_train(intel_dp); intel_dp_complete_link_train(intel_dp); + intel_dp_stop_link_train(intel_dp); } } diff --git a/trunk/drivers/gpu/drm/i915/intel_drv.h b/trunk/drivers/gpu/drm/i915/intel_drv.h index b5b6d19e6dd3..624a9e6b8d71 100644 --- a/trunk/drivers/gpu/drm/i915/intel_drv.h +++ b/trunk/drivers/gpu/drm/i915/intel_drv.h @@ -499,6 +499,7 @@ extern void intel_dp_init_connector(struct intel_digital_port *intel_dig_port, extern void intel_dp_init_link_config(struct intel_dp *intel_dp); extern void intel_dp_start_link_train(struct intel_dp *intel_dp); extern void intel_dp_complete_link_train(struct intel_dp *intel_dp); +extern void intel_dp_stop_link_train(struct intel_dp *intel_dp); extern void intel_dp_sink_dpms(struct intel_dp *intel_dp, int mode); extern void intel_dp_encoder_destroy(struct drm_encoder *encoder); extern void intel_dp_check_link_status(struct intel_dp *intel_dp); diff --git a/trunk/drivers/gpu/drm/i915/intel_fb.c b/trunk/drivers/gpu/drm/i915/intel_fb.c index 0e19e575a1b4..6b7c3ca2c035 100644 --- a/trunk/drivers/gpu/drm/i915/intel_fb.c +++ b/trunk/drivers/gpu/drm/i915/intel_fb.c @@ -262,10 +262,22 @@ void intel_fbdev_fini(struct drm_device *dev) void intel_fbdev_set_suspend(struct drm_device *dev, int state) { drm_i915_private_t *dev_priv = dev->dev_private; - if (!dev_priv->fbdev) + struct intel_fbdev *ifbdev = dev_priv->fbdev; + struct fb_info *info; + + if (!ifbdev) return; - fb_set_suspend(dev_priv->fbdev->helper.fbdev, state); + info = ifbdev->helper.fbdev; + + /* On resume from hibernation: If the object is shmemfs backed, it has + * been restored from swap. If the object is stolen however, it will be + * full of whatever garbage was left in there. + */ + if (!state && ifbdev->ifb.obj->stolen) + memset_io(info->screen_base, 0, info->screen_size); + + fb_set_suspend(info, state); } MODULE_LICENSE("GPL and additional rights"); diff --git a/trunk/drivers/gpu/drm/i915/intel_pm.c b/trunk/drivers/gpu/drm/i915/intel_pm.c index de3b0dc5658b..aa01128ff192 100644 --- a/trunk/drivers/gpu/drm/i915/intel_pm.c +++ b/trunk/drivers/gpu/drm/i915/intel_pm.c @@ -1301,17 +1301,17 @@ static void valleyview_update_wm(struct drm_device *dev) vlv_update_drain_latency(dev); - if (g4x_compute_wm0(dev, 0, + if (g4x_compute_wm0(dev, PIPE_A, &valleyview_wm_info, latency_ns, &valleyview_cursor_wm_info, latency_ns, &planea_wm, &cursora_wm)) - enabled |= 1; + enabled |= 1 << PIPE_A; - if (g4x_compute_wm0(dev, 1, + if (g4x_compute_wm0(dev, PIPE_B, &valleyview_wm_info, latency_ns, &valleyview_cursor_wm_info, latency_ns, &planeb_wm, &cursorb_wm)) - enabled |= 2; + enabled |= 1 << PIPE_B; if (single_plane_enabled(enabled) && g4x_compute_srwm(dev, ffs(enabled) - 1, @@ -1357,17 +1357,17 @@ static void g4x_update_wm(struct drm_device *dev) int plane_sr, cursor_sr; unsigned int enabled = 0; - if (g4x_compute_wm0(dev, 0, + if (g4x_compute_wm0(dev, PIPE_A, &g4x_wm_info, latency_ns, &g4x_cursor_wm_info, latency_ns, &planea_wm, &cursora_wm)) - enabled |= 1; + enabled |= 1 << PIPE_A; - if (g4x_compute_wm0(dev, 1, + if (g4x_compute_wm0(dev, PIPE_B, &g4x_wm_info, latency_ns, &g4x_cursor_wm_info, latency_ns, &planeb_wm, &cursorb_wm)) - enabled |= 2; + enabled |= 1 << PIPE_B; if (single_plane_enabled(enabled) && g4x_compute_srwm(dev, ffs(enabled) - 1, @@ -1716,7 +1716,7 @@ static void ironlake_update_wm(struct drm_device *dev) unsigned int enabled; enabled = 0; - if (g4x_compute_wm0(dev, 0, + if (g4x_compute_wm0(dev, PIPE_A, &ironlake_display_wm_info, ILK_LP0_PLANE_LATENCY, &ironlake_cursor_wm_info, @@ -1727,10 +1727,10 @@ static void ironlake_update_wm(struct drm_device *dev) DRM_DEBUG_KMS("FIFO watermarks For pipe A -" " plane %d, " "cursor: %d\n", plane_wm, cursor_wm); - enabled |= 1; + enabled |= 1 << PIPE_A; } - if (g4x_compute_wm0(dev, 1, + if (g4x_compute_wm0(dev, PIPE_B, &ironlake_display_wm_info, ILK_LP0_PLANE_LATENCY, &ironlake_cursor_wm_info, @@ -1741,7 +1741,7 @@ static void ironlake_update_wm(struct drm_device *dev) DRM_DEBUG_KMS("FIFO watermarks For pipe B -" " plane %d, cursor: %d\n", plane_wm, cursor_wm); - enabled |= 2; + enabled |= 1 << PIPE_B; } /* @@ -1801,7 +1801,7 @@ static void sandybridge_update_wm(struct drm_device *dev) unsigned int enabled; enabled = 0; - if (g4x_compute_wm0(dev, 0, + if (g4x_compute_wm0(dev, PIPE_A, &sandybridge_display_wm_info, latency, &sandybridge_cursor_wm_info, latency, &plane_wm, &cursor_wm)) { @@ -1812,10 +1812,10 @@ static void sandybridge_update_wm(struct drm_device *dev) DRM_DEBUG_KMS("FIFO watermarks For pipe A -" " plane %d, " "cursor: %d\n", plane_wm, cursor_wm); - enabled |= 1; + enabled |= 1 << PIPE_A; } - if (g4x_compute_wm0(dev, 1, + if (g4x_compute_wm0(dev, PIPE_B, &sandybridge_display_wm_info, latency, &sandybridge_cursor_wm_info, latency, &plane_wm, &cursor_wm)) { @@ -1826,7 +1826,7 @@ static void sandybridge_update_wm(struct drm_device *dev) DRM_DEBUG_KMS("FIFO watermarks For pipe B -" " plane %d, cursor: %d\n", plane_wm, cursor_wm); - enabled |= 2; + enabled |= 1 << PIPE_B; } /* @@ -1904,7 +1904,7 @@ static void ivybridge_update_wm(struct drm_device *dev) unsigned int enabled; enabled = 0; - if (g4x_compute_wm0(dev, 0, + if (g4x_compute_wm0(dev, PIPE_A, &sandybridge_display_wm_info, latency, &sandybridge_cursor_wm_info, latency, &plane_wm, &cursor_wm)) { @@ -1915,10 +1915,10 @@ static void ivybridge_update_wm(struct drm_device *dev) DRM_DEBUG_KMS("FIFO watermarks For pipe A -" " plane %d, " "cursor: %d\n", plane_wm, cursor_wm); - enabled |= 1; + enabled |= 1 << PIPE_A; } - if (g4x_compute_wm0(dev, 1, + if (g4x_compute_wm0(dev, PIPE_B, &sandybridge_display_wm_info, latency, &sandybridge_cursor_wm_info, latency, &plane_wm, &cursor_wm)) { @@ -1929,10 +1929,10 @@ static void ivybridge_update_wm(struct drm_device *dev) DRM_DEBUG_KMS("FIFO watermarks For pipe B -" " plane %d, cursor: %d\n", plane_wm, cursor_wm); - enabled |= 2; + enabled |= 1 << PIPE_B; } - if (g4x_compute_wm0(dev, 2, + if (g4x_compute_wm0(dev, PIPE_C, &sandybridge_display_wm_info, latency, &sandybridge_cursor_wm_info, latency, &plane_wm, &cursor_wm)) { @@ -1943,7 +1943,7 @@ static void ivybridge_update_wm(struct drm_device *dev) DRM_DEBUG_KMS("FIFO watermarks For pipe C -" " plane %d, cursor: %d\n", plane_wm, cursor_wm); - enabled |= 3; + enabled |= 1 << PIPE_C; } /* diff --git a/trunk/drivers/gpu/drm/mgag200/mgag200_mode.c b/trunk/drivers/gpu/drm/mgag200/mgag200_mode.c index f9889658329b..77b8a45fb10a 100644 --- a/trunk/drivers/gpu/drm/mgag200/mgag200_mode.c +++ b/trunk/drivers/gpu/drm/mgag200/mgag200_mode.c @@ -46,29 +46,26 @@ static void mga_crtc_load_lut(struct drm_crtc *crtc) static inline void mga_wait_vsync(struct mga_device *mdev) { - unsigned int count = 0; + unsigned long timeout = jiffies + HZ/10; unsigned int status = 0; do { status = RREG32(MGAREG_Status); - count++; - } while ((status & 0x08) && (count < 250000)); - count = 0; + } while ((status & 0x08) && time_before(jiffies, timeout)); + timeout = jiffies + HZ/10; status = 0; do { status = RREG32(MGAREG_Status); - count++; - } while (!(status & 0x08) && (count < 250000)); + } while (!(status & 0x08) && time_before(jiffies, timeout)); } static inline void mga_wait_busy(struct mga_device *mdev) { - unsigned int count = 0; + unsigned long timeout = jiffies + HZ; unsigned int status = 0; do { status = RREG8(MGAREG_Status + 2); - count++; - } while ((status & 0x01) && (count < 500000)); + } while ((status & 0x01) && time_before(jiffies, timeout)); } /* @@ -189,12 +186,12 @@ static int mga_g200wb_set_plls(struct mga_device *mdev, long clock) WREG8(DAC_INDEX, MGA1064_PIX_CLK_CTL); tmp = RREG8(DAC_DATA); tmp |= MGA1064_PIX_CLK_CTL_CLK_DIS; - WREG_DAC(MGA1064_PIX_CLK_CTL_CLK_DIS, tmp); + WREG8(DAC_DATA, tmp); WREG8(DAC_INDEX, MGA1064_REMHEADCTL); tmp = RREG8(DAC_DATA); tmp |= MGA1064_REMHEADCTL_CLKDIS; - WREG_DAC(MGA1064_REMHEADCTL, tmp); + WREG8(DAC_DATA, tmp); /* select PLL Set C */ tmp = RREG8(MGAREG_MEM_MISC_READ); @@ -204,7 +201,7 @@ static int mga_g200wb_set_plls(struct mga_device *mdev, long clock) WREG8(DAC_INDEX, MGA1064_PIX_CLK_CTL); tmp = RREG8(DAC_DATA); tmp |= MGA1064_PIX_CLK_CTL_CLK_POW_DOWN | 0x80; - WREG_DAC(MGA1064_PIX_CLK_CTL, tmp); + WREG8(DAC_DATA, tmp); udelay(500); @@ -212,7 +209,7 @@ static int mga_g200wb_set_plls(struct mga_device *mdev, long clock) WREG8(DAC_INDEX, MGA1064_VREF_CTL); tmp = RREG8(DAC_DATA); tmp &= ~0x04; - WREG_DAC(MGA1064_VREF_CTL, tmp); + WREG8(DAC_DATA, tmp); udelay(50); @@ -236,13 +233,13 @@ static int mga_g200wb_set_plls(struct mga_device *mdev, long clock) tmp = RREG8(DAC_DATA); tmp &= ~MGA1064_PIX_CLK_CTL_SEL_MSK; tmp |= MGA1064_PIX_CLK_CTL_SEL_PLL; - WREG_DAC(MGA1064_PIX_CLK_CTL, tmp); + WREG8(DAC_DATA, tmp); WREG8(DAC_INDEX, MGA1064_REMHEADCTL); tmp = RREG8(DAC_DATA); tmp &= ~MGA1064_REMHEADCTL_CLKSL_MSK; tmp |= MGA1064_REMHEADCTL_CLKSL_PLL; - WREG_DAC(MGA1064_REMHEADCTL, tmp); + WREG8(DAC_DATA, tmp); /* reset dotclock rate bit */ WREG8(MGAREG_SEQ_INDEX, 1); @@ -253,7 +250,7 @@ static int mga_g200wb_set_plls(struct mga_device *mdev, long clock) WREG8(DAC_INDEX, MGA1064_PIX_CLK_CTL); tmp = RREG8(DAC_DATA); tmp &= ~MGA1064_PIX_CLK_CTL_CLK_DIS; - WREG_DAC(MGA1064_PIX_CLK_CTL, tmp); + WREG8(DAC_DATA, tmp); vcount = RREG8(MGAREG_VCOUNT); @@ -318,7 +315,7 @@ static int mga_g200ev_set_plls(struct mga_device *mdev, long clock) WREG8(DAC_INDEX, MGA1064_PIX_CLK_CTL); tmp = RREG8(DAC_DATA); tmp |= MGA1064_PIX_CLK_CTL_CLK_DIS; - WREG_DAC(MGA1064_PIX_CLK_CTL_CLK_DIS, tmp); + WREG8(DAC_DATA, tmp); tmp = RREG8(MGAREG_MEM_MISC_READ); tmp |= 0x3 << 2; @@ -326,12 +323,12 @@ static int mga_g200ev_set_plls(struct mga_device *mdev, long clock) WREG8(DAC_INDEX, MGA1064_PIX_PLL_STAT); tmp = RREG8(DAC_DATA); - WREG_DAC(MGA1064_PIX_PLL_STAT, tmp & ~0x40); + WREG8(DAC_DATA, tmp & ~0x40); WREG8(DAC_INDEX, MGA1064_PIX_CLK_CTL); tmp = RREG8(DAC_DATA); tmp |= MGA1064_PIX_CLK_CTL_CLK_POW_DOWN; - WREG_DAC(MGA1064_PIX_CLK_CTL, tmp); + WREG8(DAC_DATA, tmp); WREG_DAC(MGA1064_EV_PIX_PLLC_M, m); WREG_DAC(MGA1064_EV_PIX_PLLC_N, n); @@ -342,7 +339,7 @@ static int mga_g200ev_set_plls(struct mga_device *mdev, long clock) WREG8(DAC_INDEX, MGA1064_PIX_CLK_CTL); tmp = RREG8(DAC_DATA); tmp &= ~MGA1064_PIX_CLK_CTL_CLK_POW_DOWN; - WREG_DAC(MGA1064_PIX_CLK_CTL, tmp); + WREG8(DAC_DATA, tmp); udelay(500); @@ -350,11 +347,11 @@ static int mga_g200ev_set_plls(struct mga_device *mdev, long clock) tmp = RREG8(DAC_DATA); tmp &= ~MGA1064_PIX_CLK_CTL_SEL_MSK; tmp |= MGA1064_PIX_CLK_CTL_SEL_PLL; - WREG_DAC(MGA1064_PIX_CLK_CTL, tmp); + WREG8(DAC_DATA, tmp); WREG8(DAC_INDEX, MGA1064_PIX_PLL_STAT); tmp = RREG8(DAC_DATA); - WREG_DAC(MGA1064_PIX_PLL_STAT, tmp | 0x40); + WREG8(DAC_DATA, tmp | 0x40); tmp = RREG8(MGAREG_MEM_MISC_READ); tmp |= (0x3 << 2); @@ -363,7 +360,7 @@ static int mga_g200ev_set_plls(struct mga_device *mdev, long clock) WREG8(DAC_INDEX, MGA1064_PIX_CLK_CTL); tmp = RREG8(DAC_DATA); tmp &= ~MGA1064_PIX_CLK_CTL_CLK_DIS; - WREG_DAC(MGA1064_PIX_CLK_CTL, tmp); + WREG8(DAC_DATA, tmp); return 0; } @@ -416,7 +413,7 @@ static int mga_g200eh_set_plls(struct mga_device *mdev, long clock) WREG8(DAC_INDEX, MGA1064_PIX_CLK_CTL); tmp = RREG8(DAC_DATA); tmp |= MGA1064_PIX_CLK_CTL_CLK_DIS; - WREG_DAC(MGA1064_PIX_CLK_CTL_CLK_DIS, tmp); + WREG8(DAC_DATA, tmp); tmp = RREG8(MGAREG_MEM_MISC_READ); tmp |= 0x3 << 2; @@ -425,7 +422,7 @@ static int mga_g200eh_set_plls(struct mga_device *mdev, long clock) WREG8(DAC_INDEX, MGA1064_PIX_CLK_CTL); tmp = RREG8(DAC_DATA); tmp |= MGA1064_PIX_CLK_CTL_CLK_POW_DOWN; - WREG_DAC(MGA1064_PIX_CLK_CTL, tmp); + WREG8(DAC_DATA, tmp); udelay(500); @@ -439,13 +436,13 @@ static int mga_g200eh_set_plls(struct mga_device *mdev, long clock) tmp = RREG8(DAC_DATA); tmp &= ~MGA1064_PIX_CLK_CTL_SEL_MSK; tmp |= MGA1064_PIX_CLK_CTL_SEL_PLL; - WREG_DAC(MGA1064_PIX_CLK_CTL, tmp); + WREG8(DAC_DATA, tmp); WREG8(DAC_INDEX, MGA1064_PIX_CLK_CTL); tmp = RREG8(DAC_DATA); tmp &= ~MGA1064_PIX_CLK_CTL_CLK_DIS; tmp &= ~MGA1064_PIX_CLK_CTL_CLK_POW_DOWN; - WREG_DAC(MGA1064_PIX_CLK_CTL, tmp); + WREG8(DAC_DATA, tmp); vcount = RREG8(MGAREG_VCOUNT); @@ -515,12 +512,12 @@ static int mga_g200er_set_plls(struct mga_device *mdev, long clock) WREG8(DAC_INDEX, MGA1064_PIX_CLK_CTL); tmp = RREG8(DAC_DATA); tmp |= MGA1064_PIX_CLK_CTL_CLK_DIS; - WREG_DAC(MGA1064_PIX_CLK_CTL_CLK_DIS, tmp); + WREG8(DAC_DATA, tmp); WREG8(DAC_INDEX, MGA1064_REMHEADCTL); tmp = RREG8(DAC_DATA); tmp |= MGA1064_REMHEADCTL_CLKDIS; - WREG_DAC(MGA1064_REMHEADCTL, tmp); + WREG8(DAC_DATA, tmp); tmp = RREG8(MGAREG_MEM_MISC_READ); tmp |= (0x3<<2) | 0xc0; @@ -530,7 +527,7 @@ static int mga_g200er_set_plls(struct mga_device *mdev, long clock) tmp = RREG8(DAC_DATA); tmp &= ~MGA1064_PIX_CLK_CTL_CLK_DIS; tmp |= MGA1064_PIX_CLK_CTL_CLK_POW_DOWN; - WREG_DAC(MGA1064_PIX_CLK_CTL, tmp); + WREG8(DAC_DATA, tmp); udelay(500); @@ -657,12 +654,26 @@ static void mga_g200wb_commit(struct drm_crtc *crtc) WREG_DAC(MGA1064_GEN_IO_DATA, tmp); } - +/* + This is how the framebuffer base address is stored in g200 cards: + * Assume @offset is the gpu_addr variable of the framebuffer object + * Then addr is the number of _pixels_ (not bytes) from the start of + VRAM to the first pixel we want to display. (divided by 2 for 32bit + framebuffers) + * addr is stored in the CRTCEXT0, CRTCC and CRTCD registers + addr<20> -> CRTCEXT0<6> + addr<19-16> -> CRTCEXT0<3-0> + addr<15-8> -> CRTCC<7-0> + addr<7-0> -> CRTCD<7-0> + CRTCEXT0 has to be programmed last to trigger an update and make the + new addr variable take effect. + */ void mga_set_start_address(struct drm_crtc *crtc, unsigned offset) { struct mga_device *mdev = crtc->dev->dev_private; u32 addr; int count; + u8 crtcext0; while (RREG8(0x1fda) & 0x08); while (!(RREG8(0x1fda) & 0x08)); @@ -670,10 +681,17 @@ void mga_set_start_address(struct drm_crtc *crtc, unsigned offset) count = RREG8(MGAREG_VCOUNT) + 2; while (RREG8(MGAREG_VCOUNT) < count); - addr = offset >> 2; + WREG8(MGAREG_CRTCEXT_INDEX, 0); + crtcext0 = RREG8(MGAREG_CRTCEXT_DATA); + crtcext0 &= 0xB0; + addr = offset / 8; + /* Can't store addresses any higher than that... + but we also don't have more than 16MB of memory, so it should be fine. */ + WARN_ON(addr > 0x1fffff); + crtcext0 |= (!!(addr & (1<<20)))<<6; WREG_CRT(0x0d, (u8)(addr & 0xff)); WREG_CRT(0x0c, (u8)(addr >> 8) & 0xff); - WREG_CRT(0xaf, (u8)(addr >> 16) & 0xf); + WREG_ECRT(0x0, ((u8)(addr >> 16) & 0xf) | crtcext0); } @@ -829,11 +847,7 @@ static int mga_crtc_mode_set(struct drm_crtc *crtc, for (i = 0; i < sizeof(dacvalue); i++) { - if ((i <= 0x03) || - (i == 0x07) || - (i == 0x0b) || - (i == 0x0f) || - ((i >= 0x13) && (i <= 0x17)) || + if ((i <= 0x17) || (i == 0x1b) || (i == 0x1c) || ((i >= 0x1f) && (i <= 0x29)) || diff --git a/trunk/drivers/gpu/drm/nouveau/core/engine/device/nvc0.c b/trunk/drivers/gpu/drm/nouveau/core/engine/device/nvc0.c index 955af122c3a6..a36e64e98ef3 100644 --- a/trunk/drivers/gpu/drm/nouveau/core/engine/device/nvc0.c +++ b/trunk/drivers/gpu/drm/nouveau/core/engine/device/nvc0.c @@ -138,7 +138,6 @@ nvc0_identify(struct nouveau_device *device) device->oclass[NVDEV_ENGINE_BSP ] = &nvc0_bsp_oclass; device->oclass[NVDEV_ENGINE_PPP ] = &nvc0_ppp_oclass; device->oclass[NVDEV_ENGINE_COPY0 ] = &nvc0_copy0_oclass; - device->oclass[NVDEV_ENGINE_COPY1 ] = &nvc0_copy1_oclass; device->oclass[NVDEV_ENGINE_DISP ] = &nva3_disp_oclass; break; case 0xce: @@ -225,7 +224,6 @@ nvc0_identify(struct nouveau_device *device) device->oclass[NVDEV_ENGINE_BSP ] = &nvc0_bsp_oclass; device->oclass[NVDEV_ENGINE_PPP ] = &nvc0_ppp_oclass; device->oclass[NVDEV_ENGINE_COPY0 ] = &nvc0_copy0_oclass; - device->oclass[NVDEV_ENGINE_COPY1 ] = &nvc0_copy1_oclass; device->oclass[NVDEV_ENGINE_DISP ] = &nva3_disp_oclass; break; case 0xc8: diff --git a/trunk/drivers/gpu/drm/nouveau/core/engine/fifo/nv50.c b/trunk/drivers/gpu/drm/nouveau/core/engine/fifo/nv50.c index ddaeb5572903..89bf459d584b 100644 --- a/trunk/drivers/gpu/drm/nouveau/core/engine/fifo/nv50.c +++ b/trunk/drivers/gpu/drm/nouveau/core/engine/fifo/nv50.c @@ -47,6 +47,7 @@ nv50_fifo_playlist_update(struct nv50_fifo_priv *priv) struct nouveau_gpuobj *cur; int i, p; + mutex_lock(&nv_subdev(priv)->mutex); cur = priv->playlist[priv->cur_playlist]; priv->cur_playlist = !priv->cur_playlist; @@ -60,6 +61,7 @@ nv50_fifo_playlist_update(struct nv50_fifo_priv *priv) nv_wr32(priv, 0x0032f4, cur->addr >> 12); nv_wr32(priv, 0x0032ec, p); nv_wr32(priv, 0x002500, 0x00000101); + mutex_unlock(&nv_subdev(priv)->mutex); } static int diff --git a/trunk/drivers/gpu/drm/nouveau/core/engine/fifo/nvc0.c b/trunk/drivers/gpu/drm/nouveau/core/engine/fifo/nvc0.c index 4d4a6b905370..46dfa68c47bb 100644 --- a/trunk/drivers/gpu/drm/nouveau/core/engine/fifo/nvc0.c +++ b/trunk/drivers/gpu/drm/nouveau/core/engine/fifo/nvc0.c @@ -71,6 +71,7 @@ nvc0_fifo_playlist_update(struct nvc0_fifo_priv *priv) struct nouveau_gpuobj *cur; int i, p; + mutex_lock(&nv_subdev(priv)->mutex); cur = priv->playlist[priv->cur_playlist]; priv->cur_playlist = !priv->cur_playlist; @@ -87,6 +88,7 @@ nvc0_fifo_playlist_update(struct nvc0_fifo_priv *priv) nv_wr32(priv, 0x002274, 0x01f00000 | (p >> 3)); if (!nv_wait(priv, 0x00227c, 0x00100000, 0x00000000)) nv_error(priv, "playlist update failed\n"); + mutex_unlock(&nv_subdev(priv)->mutex); } static int @@ -248,9 +250,17 @@ nvc0_fifo_chan_fini(struct nouveau_object *object, bool suspend) struct nvc0_fifo_priv *priv = (void *)object->engine; struct nvc0_fifo_chan *chan = (void *)object; u32 chid = chan->base.chid; + u32 mask, engine; nv_mask(priv, 0x003004 + (chid * 8), 0x00000001, 0x00000000); nvc0_fifo_playlist_update(priv); + mask = nv_rd32(priv, 0x0025a4); + for (engine = 0; mask && engine < 16; engine++) { + if (!(mask & (1 << engine))) + continue; + nv_mask(priv, 0x0025a8 + (engine * 4), 0x00000000, 0x00000000); + mask &= ~(1 << engine); + } nv_wr32(priv, 0x003000 + (chid * 8), 0x00000000); return nouveau_fifo_channel_fini(&chan->base, suspend); diff --git a/trunk/drivers/gpu/drm/nouveau/core/engine/fifo/nve0.c b/trunk/drivers/gpu/drm/nouveau/core/engine/fifo/nve0.c index 9151919fb831..56192a7242ae 100644 --- a/trunk/drivers/gpu/drm/nouveau/core/engine/fifo/nve0.c +++ b/trunk/drivers/gpu/drm/nouveau/core/engine/fifo/nve0.c @@ -94,11 +94,13 @@ nve0_fifo_playlist_update(struct nve0_fifo_priv *priv, u32 engine) u32 match = (engine << 16) | 0x00000001; int i, p; + mutex_lock(&nv_subdev(priv)->mutex); cur = engn->playlist[engn->cur_playlist]; if (unlikely(cur == NULL)) { int ret = nouveau_gpuobj_new(nv_object(priv), NULL, 0x8000, 0x1000, 0, &cur); if (ret) { + mutex_unlock(&nv_subdev(priv)->mutex); nv_error(priv, "playlist alloc failed\n"); return; } @@ -122,6 +124,7 @@ nve0_fifo_playlist_update(struct nve0_fifo_priv *priv, u32 engine) nv_wr32(priv, 0x002274, (engine << 20) | (p >> 3)); if (!nv_wait(priv, 0x002284 + (engine * 4), 0x00100000, 0x00000000)) nv_error(priv, "playlist %d update timeout\n", engine); + mutex_unlock(&nv_subdev(priv)->mutex); } static int diff --git a/trunk/drivers/gpu/drm/nouveau/core/subdev/bios/init.c b/trunk/drivers/gpu/drm/nouveau/core/subdev/bios/init.c index c300b5e7b670..c434d398d16f 100644 --- a/trunk/drivers/gpu/drm/nouveau/core/subdev/bios/init.c +++ b/trunk/drivers/gpu/drm/nouveau/core/subdev/bios/init.c @@ -1940,8 +1940,8 @@ init_zm_mask_add(struct nvbios_init *init) trace("ZM_MASK_ADD\tR[0x%06x] &= 0x%08x += 0x%08x\n", addr, mask, add); init->offset += 13; - data = init_rd32(init, addr) & mask; - data |= ((data + add) & ~mask); + data = init_rd32(init, addr); + data = (data & mask) | ((data + add) & ~mask); init_wr32(init, addr, data); } diff --git a/trunk/drivers/gpu/drm/nouveau/core/subdev/ltcg/nvc0.c b/trunk/drivers/gpu/drm/nouveau/core/subdev/ltcg/nvc0.c index e4940fb166e8..fb794e997fbc 100644 --- a/trunk/drivers/gpu/drm/nouveau/core/subdev/ltcg/nvc0.c +++ b/trunk/drivers/gpu/drm/nouveau/core/subdev/ltcg/nvc0.c @@ -29,7 +29,6 @@ struct nvc0_ltcg_priv { struct nouveau_ltcg base; u32 part_nr; - u32 part_mask; u32 subp_nr; struct nouveau_mm tags; u32 num_tags; @@ -105,8 +104,6 @@ nvc0_ltcg_tags_clear(struct nouveau_ltcg *ltcg, u32 first, u32 count) /* wait until it's finished with clearing */ for (p = 0; p < priv->part_nr; ++p) { - if (!(priv->part_mask & (1 << p))) - continue; for (i = 0; i < priv->subp_nr; ++i) nv_wait(priv, 0x1410c8 + p * 0x2000 + i * 0x400, ~0, 0); } @@ -121,6 +118,8 @@ nvc0_ltcg_init_tag_ram(struct nouveau_fb *pfb, struct nvc0_ltcg_priv *priv) int ret; nv_wr32(priv, 0x17e8d8, priv->part_nr); + if (nv_device(pfb)->card_type >= NV_E0) + nv_wr32(priv, 0x17e000, priv->part_nr); /* tags for 1/4 of VRAM should be enough (8192/4 per GiB of VRAM) */ priv->num_tags = (pfb->ram.size >> 17) / 4; @@ -167,16 +166,20 @@ nvc0_ltcg_ctor(struct nouveau_object *parent, struct nouveau_object *engine, { struct nvc0_ltcg_priv *priv; struct nouveau_fb *pfb = nouveau_fb(parent); - int ret; + u32 parts, mask; + int ret, i; ret = nouveau_ltcg_create(parent, engine, oclass, &priv); *pobject = nv_object(priv); if (ret) return ret; - priv->part_nr = nv_rd32(priv, 0x022438); - priv->part_mask = nv_rd32(priv, 0x022554); - + parts = nv_rd32(priv, 0x022438); + mask = nv_rd32(priv, 0x022554); + for (i = 0; i < parts; i++) { + if (!(mask & (1 << i))) + priv->part_nr++; + } priv->subp_nr = nv_rd32(priv, 0x17e8dc) >> 28; nv_mask(priv, 0x17e820, 0x00100000, 0x00000000); /* INTR_EN &= ~0x10 */ diff --git a/trunk/drivers/gpu/drm/nouveau/nouveau_drm.c b/trunk/drivers/gpu/drm/nouveau/nouveau_drm.c index 46c152ff0a80..383f4e6ea9d1 100644 --- a/trunk/drivers/gpu/drm/nouveau/nouveau_drm.c +++ b/trunk/drivers/gpu/drm/nouveau/nouveau_drm.c @@ -453,18 +453,32 @@ nouveau_do_suspend(struct drm_device *dev) NV_INFO(drm, "evicting buffers...\n"); ttm_bo_evict_mm(&drm->ttm.bdev, TTM_PL_VRAM); + NV_INFO(drm, "waiting for kernel channels to go idle...\n"); + if (drm->cechan) { + ret = nouveau_channel_idle(drm->cechan); + if (ret) + return ret; + } + + if (drm->channel) { + ret = nouveau_channel_idle(drm->channel); + if (ret) + return ret; + } + + NV_INFO(drm, "suspending client object trees...\n"); if (drm->fence && nouveau_fence(drm)->suspend) { if (!nouveau_fence(drm)->suspend(drm)) return -ENOMEM; } - NV_INFO(drm, "suspending client object trees...\n"); list_for_each_entry(cli, &drm->clients, head) { ret = nouveau_client_fini(&cli->base, true); if (ret) goto fail_client; } + NV_INFO(drm, "suspending kernel object tree...\n"); ret = nouveau_client_fini(&drm->client.base, true); if (ret) goto fail_client; @@ -514,17 +528,18 @@ nouveau_do_resume(struct drm_device *dev) nouveau_agp_reset(drm); - NV_INFO(drm, "resuming client object trees...\n"); + NV_INFO(drm, "resuming kernel object tree...\n"); nouveau_client_init(&drm->client.base); nouveau_agp_init(drm); + NV_INFO(drm, "resuming client object trees...\n"); + if (drm->fence && nouveau_fence(drm)->resume) + nouveau_fence(drm)->resume(drm); + list_for_each_entry(cli, &drm->clients, head) { nouveau_client_init(&cli->base); } - if (drm->fence && nouveau_fence(drm)->resume) - nouveau_fence(drm)->resume(drm); - nouveau_run_vbios_init(dev); nouveau_pm_resume(dev); diff --git a/trunk/drivers/gpu/drm/qxl/qxl_cmd.c b/trunk/drivers/gpu/drm/qxl/qxl_cmd.c index 08b0823c93d5..f86771481317 100644 --- a/trunk/drivers/gpu/drm/qxl/qxl_cmd.c +++ b/trunk/drivers/gpu/drm/qxl/qxl_cmd.c @@ -277,7 +277,7 @@ int qxl_alloc_bo_reserved(struct qxl_device *qdev, unsigned long size, return 0; } -static int wait_for_io_cmd_user(struct qxl_device *qdev, uint8_t val, long port) +static int wait_for_io_cmd_user(struct qxl_device *qdev, uint8_t val, long port, bool intr) { int irq_num; long addr = qdev->io_base + port; @@ -285,20 +285,29 @@ static int wait_for_io_cmd_user(struct qxl_device *qdev, uint8_t val, long port) mutex_lock(&qdev->async_io_mutex); irq_num = atomic_read(&qdev->irq_received_io_cmd); - - if (qdev->last_sent_io_cmd > irq_num) { - ret = wait_event_interruptible(qdev->io_cmd_event, - atomic_read(&qdev->irq_received_io_cmd) > irq_num); - if (ret) + if (intr) + ret = wait_event_interruptible_timeout(qdev->io_cmd_event, + atomic_read(&qdev->irq_received_io_cmd) > irq_num, 5*HZ); + else + ret = wait_event_timeout(qdev->io_cmd_event, + atomic_read(&qdev->irq_received_io_cmd) > irq_num, 5*HZ); + /* 0 is timeout, just bail the "hw" has gone away */ + if (ret <= 0) goto out; irq_num = atomic_read(&qdev->irq_received_io_cmd); } outb(val, addr); qdev->last_sent_io_cmd = irq_num + 1; - ret = wait_event_interruptible(qdev->io_cmd_event, - atomic_read(&qdev->irq_received_io_cmd) > irq_num); + if (intr) + ret = wait_event_interruptible_timeout(qdev->io_cmd_event, + atomic_read(&qdev->irq_received_io_cmd) > irq_num, 5*HZ); + else + ret = wait_event_timeout(qdev->io_cmd_event, + atomic_read(&qdev->irq_received_io_cmd) > irq_num, 5*HZ); out: + if (ret > 0) + ret = 0; mutex_unlock(&qdev->async_io_mutex); return ret; } @@ -308,7 +317,7 @@ static void wait_for_io_cmd(struct qxl_device *qdev, uint8_t val, long port) int ret; restart: - ret = wait_for_io_cmd_user(qdev, val, port); + ret = wait_for_io_cmd_user(qdev, val, port, false); if (ret == -ERESTARTSYS) goto restart; } @@ -340,7 +349,7 @@ int qxl_io_update_area(struct qxl_device *qdev, struct qxl_bo *surf, mutex_lock(&qdev->update_area_mutex); qdev->ram_header->update_area = *area; qdev->ram_header->update_surface = surface_id; - ret = wait_for_io_cmd_user(qdev, 0, QXL_IO_UPDATE_AREA_ASYNC); + ret = wait_for_io_cmd_user(qdev, 0, QXL_IO_UPDATE_AREA_ASYNC, true); mutex_unlock(&qdev->update_area_mutex); return ret; } diff --git a/trunk/drivers/gpu/drm/qxl/qxl_display.c b/trunk/drivers/gpu/drm/qxl/qxl_display.c index fcfd4436ceed..823d29e926ec 100644 --- a/trunk/drivers/gpu/drm/qxl/qxl_display.c +++ b/trunk/drivers/gpu/drm/qxl/qxl_display.c @@ -428,10 +428,10 @@ static int qxl_framebuffer_surface_dirty(struct drm_framebuffer *fb, int inc = 1; qobj = gem_to_qxl_bo(qxl_fb->obj); - if (qxl_fb != qdev->active_user_framebuffer) { - DRM_INFO("%s: qxl_fb 0x%p != qdev->active_user_framebuffer 0x%p\n", - __func__, qxl_fb, qdev->active_user_framebuffer); - } + /* if we aren't primary surface ignore this */ + if (!qobj->is_primary) + return 0; + if (!num_clips) { num_clips = 1; clips = &norect; @@ -604,7 +604,6 @@ static int qxl_crtc_mode_set(struct drm_crtc *crtc, mode->hdisplay, mode->vdisplay); } - qdev->mode_set = true; return 0; } @@ -893,7 +892,6 @@ qxl_user_framebuffer_create(struct drm_device *dev, { struct drm_gem_object *obj; struct qxl_framebuffer *qxl_fb; - struct qxl_device *qdev = dev->dev_private; int ret; obj = drm_gem_object_lookup(dev, file_priv, mode_cmd->handles[0]); @@ -909,13 +907,6 @@ qxl_user_framebuffer_create(struct drm_device *dev, return NULL; } - if (qdev->active_user_framebuffer) { - DRM_INFO("%s: active_user_framebuffer %p -> %p\n", - __func__, - qdev->active_user_framebuffer, qxl_fb); - } - qdev->active_user_framebuffer = qxl_fb; - return &qxl_fb->base; } diff --git a/trunk/drivers/gpu/drm/qxl/qxl_drv.h b/trunk/drivers/gpu/drm/qxl/qxl_drv.h index 52b582c211da..43d06ab28a21 100644 --- a/trunk/drivers/gpu/drm/qxl/qxl_drv.h +++ b/trunk/drivers/gpu/drm/qxl/qxl_drv.h @@ -255,12 +255,6 @@ struct qxl_device { struct qxl_gem gem; struct qxl_mode_info mode_info; - /* - * last created framebuffer with fb_create - * only used by debugfs dumbppm - */ - struct qxl_framebuffer *active_user_framebuffer; - struct fb_info *fbdev_info; struct qxl_framebuffer *fbdev_qfb; void *ram_physical; @@ -270,7 +264,6 @@ struct qxl_device { struct qxl_ring *cursor_ring; struct qxl_ram_header *ram_header; - bool mode_set; bool primary_created; diff --git a/trunk/drivers/gpu/drm/qxl/qxl_ioctl.c b/trunk/drivers/gpu/drm/qxl/qxl_ioctl.c index 04b64f9cbfdb..6db7370373ea 100644 --- a/trunk/drivers/gpu/drm/qxl/qxl_ioctl.c +++ b/trunk/drivers/gpu/drm/qxl/qxl_ioctl.c @@ -294,6 +294,7 @@ static int qxl_update_area_ioctl(struct drm_device *dev, void *data, goto out; if (!qobj->pin_count) { + qxl_ttm_placement_from_domain(qobj, qobj->type); ret = ttm_bo_validate(&qobj->tbo, &qobj->placement, true, false); if (unlikely(ret)) diff --git a/trunk/drivers/gpu/drm/radeon/atombios_crtc.c b/trunk/drivers/gpu/drm/radeon/atombios_crtc.c index 6d6fdb3ba0d0..d5df8fd10217 100644 --- a/trunk/drivers/gpu/drm/radeon/atombios_crtc.c +++ b/trunk/drivers/gpu/drm/radeon/atombios_crtc.c @@ -1811,12 +1811,9 @@ static bool atombios_crtc_mode_fixup(struct drm_crtc *crtc, static void atombios_crtc_prepare(struct drm_crtc *crtc) { - struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); struct drm_device *dev = crtc->dev; struct radeon_device *rdev = dev->dev_private; - radeon_crtc->in_mode_set = true; - /* disable crtc pair power gating before programming */ if (ASIC_IS_DCE6(rdev)) atombios_powergate_crtc(crtc, ATOM_DISABLE); @@ -1827,11 +1824,8 @@ static void atombios_crtc_prepare(struct drm_crtc *crtc) static void atombios_crtc_commit(struct drm_crtc *crtc) { - struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); - atombios_crtc_dpms(crtc, DRM_MODE_DPMS_ON); atombios_lock_crtc(crtc, ATOM_DISABLE); - radeon_crtc->in_mode_set = false; } static void atombios_crtc_disable(struct drm_crtc *crtc) diff --git a/trunk/drivers/gpu/drm/radeon/evergreen.c b/trunk/drivers/gpu/drm/radeon/evergreen.c index 105bafb6c29d..8f9e2d31b255 100644 --- a/trunk/drivers/gpu/drm/radeon/evergreen.c +++ b/trunk/drivers/gpu/drm/radeon/evergreen.c @@ -2343,11 +2343,13 @@ void evergreen_mc_stop(struct radeon_device *rdev, struct evergreen_mc_save *sav u32 crtc_enabled, tmp, frame_count, blackout; int i, j; - save->vga_render_control = RREG32(VGA_RENDER_CONTROL); - save->vga_hdp_control = RREG32(VGA_HDP_CONTROL); + if (!ASIC_IS_NODCE(rdev)) { + save->vga_render_control = RREG32(VGA_RENDER_CONTROL); + save->vga_hdp_control = RREG32(VGA_HDP_CONTROL); - /* disable VGA render */ - WREG32(VGA_RENDER_CONTROL, 0); + /* disable VGA render */ + WREG32(VGA_RENDER_CONTROL, 0); + } /* blank the display controllers */ for (i = 0; i < rdev->num_crtc; i++) { crtc_enabled = RREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[i]) & EVERGREEN_CRTC_MASTER_EN; @@ -2438,8 +2440,11 @@ void evergreen_mc_resume(struct radeon_device *rdev, struct evergreen_mc_save *s WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS + crtc_offsets[i], (u32)rdev->mc.vram_start); } - WREG32(EVERGREEN_VGA_MEMORY_BASE_ADDRESS_HIGH, upper_32_bits(rdev->mc.vram_start)); - WREG32(EVERGREEN_VGA_MEMORY_BASE_ADDRESS, (u32)rdev->mc.vram_start); + + if (!ASIC_IS_NODCE(rdev)) { + WREG32(EVERGREEN_VGA_MEMORY_BASE_ADDRESS_HIGH, upper_32_bits(rdev->mc.vram_start)); + WREG32(EVERGREEN_VGA_MEMORY_BASE_ADDRESS, (u32)rdev->mc.vram_start); + } /* unlock regs and wait for update */ for (i = 0; i < rdev->num_crtc; i++) { @@ -2499,10 +2504,12 @@ void evergreen_mc_resume(struct radeon_device *rdev, struct evergreen_mc_save *s } } } - /* Unlock vga access */ - WREG32(VGA_HDP_CONTROL, save->vga_hdp_control); - mdelay(1); - WREG32(VGA_RENDER_CONTROL, save->vga_render_control); + if (!ASIC_IS_NODCE(rdev)) { + /* Unlock vga access */ + WREG32(VGA_HDP_CONTROL, save->vga_hdp_control); + mdelay(1); + WREG32(VGA_RENDER_CONTROL, save->vga_render_control); + } } void evergreen_mc_program(struct radeon_device *rdev) @@ -3405,8 +3412,8 @@ int evergreen_mc_init(struct radeon_device *rdev) rdev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE); } else { /* size in MB on evergreen/cayman/tn */ - rdev->mc.mc_vram_size = RREG32(CONFIG_MEMSIZE) * 1024 * 1024; - rdev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE) * 1024 * 1024; + rdev->mc.mc_vram_size = RREG32(CONFIG_MEMSIZE) * 1024ULL * 1024ULL; + rdev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE) * 1024ULL * 1024ULL; } rdev->mc.visible_vram_size = rdev->mc.aper_size; r700_vram_gtt_location(rdev, &rdev->mc); diff --git a/trunk/drivers/gpu/drm/radeon/evergreen_hdmi.c b/trunk/drivers/gpu/drm/radeon/evergreen_hdmi.c index b4ab8ceb1654..ed7c8a768092 100644 --- a/trunk/drivers/gpu/drm/radeon/evergreen_hdmi.c +++ b/trunk/drivers/gpu/drm/radeon/evergreen_hdmi.c @@ -154,19 +154,18 @@ static void evergreen_audio_set_dto(struct drm_encoder *encoder, u32 clock) struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv; struct radeon_crtc *radeon_crtc = to_radeon_crtc(encoder->crtc); - u32 base_rate = 48000; + u32 base_rate = 24000; if (!dig || !dig->afmt) return; - /* XXX: properly calculate this */ /* XXX two dtos; generally use dto0 for hdmi */ /* Express [24MHz / target pixel clock] as an exact rational * number (coefficient of two integer numbers. DCCG_AUDIO_DTOx_PHASE * is the numerator, DCCG_AUDIO_DTOx_MODULE is the denominator */ - WREG32(DCCG_AUDIO_DTO0_PHASE, (base_rate*50) & 0xffffff); - WREG32(DCCG_AUDIO_DTO0_MODULE, (clock*100) & 0xffffff); + WREG32(DCCG_AUDIO_DTO0_PHASE, base_rate * 100); + WREG32(DCCG_AUDIO_DTO0_MODULE, clock * 100); WREG32(DCCG_AUDIO_DTO_SOURCE, DCCG_AUDIO_DTO0_SOURCE_SEL(radeon_crtc->crtc_id)); } diff --git a/trunk/drivers/gpu/drm/radeon/r300_cmdbuf.c b/trunk/drivers/gpu/drm/radeon/r300_cmdbuf.c index 865e2c9980db..60170ea5e3a2 100644 --- a/trunk/drivers/gpu/drm/radeon/r300_cmdbuf.c +++ b/trunk/drivers/gpu/drm/radeon/r300_cmdbuf.c @@ -75,7 +75,7 @@ static int r300_emit_cliprects(drm_radeon_private_t *dev_priv, OUT_RING(CP_PACKET0(R300_RE_CLIPRECT_TL_0, nr * 2 - 1)); for (i = 0; i < nr; ++i) { - if (DRM_COPY_FROM_USER_UNCHECKED + if (DRM_COPY_FROM_USER (&box, &cmdbuf->boxes[n + i], sizeof(box))) { DRM_ERROR("copy cliprect faulted\n"); return -EFAULT; diff --git a/trunk/drivers/gpu/drm/radeon/r600_hdmi.c b/trunk/drivers/gpu/drm/radeon/r600_hdmi.c index 47f180a79352..456750a0daa5 100644 --- a/trunk/drivers/gpu/drm/radeon/r600_hdmi.c +++ b/trunk/drivers/gpu/drm/radeon/r600_hdmi.c @@ -232,7 +232,7 @@ void r600_audio_set_dto(struct drm_encoder *encoder, u32 clock) struct radeon_device *rdev = dev->dev_private; struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv; - u32 base_rate = 48000; + u32 base_rate = 24000; if (!dig || !dig->afmt) return; @@ -240,7 +240,6 @@ void r600_audio_set_dto(struct drm_encoder *encoder, u32 clock) /* there are two DTOs selected by DCCG_AUDIO_DTO_SELECT. * doesn't matter which one you use. Just use the first one. */ - /* XXX: properly calculate this */ /* XXX two dtos; generally use dto0 for hdmi */ /* Express [24MHz / target pixel clock] as an exact rational * number (coefficient of two integer numbers. DCCG_AUDIO_DTOx_PHASE @@ -250,13 +249,13 @@ void r600_audio_set_dto(struct drm_encoder *encoder, u32 clock) /* according to the reg specs, this should DCE3.2 only, but in * practice it seems to cover DCE3.0 as well. */ - WREG32(DCCG_AUDIO_DTO0_PHASE, base_rate * 50); + WREG32(DCCG_AUDIO_DTO0_PHASE, base_rate * 100); WREG32(DCCG_AUDIO_DTO0_MODULE, clock * 100); WREG32(DCCG_AUDIO_DTO_SELECT, 0); /* select DTO0 */ } else { /* according to the reg specs, this should be DCE2.0 and DCE3.0 */ - WREG32(AUDIO_DTO, AUDIO_DTO_PHASE(base_rate * 50) | - AUDIO_DTO_MODULE(clock * 100)); + WREG32(AUDIO_DTO, AUDIO_DTO_PHASE(base_rate / 10) | + AUDIO_DTO_MODULE(clock / 10)); } } diff --git a/trunk/drivers/gpu/drm/radeon/radeon.h b/trunk/drivers/gpu/drm/radeon/radeon.h index 1442ce765d48..142ce6cc69f5 100644 --- a/trunk/drivers/gpu/drm/radeon/radeon.h +++ b/trunk/drivers/gpu/drm/radeon/radeon.h @@ -1694,6 +1694,7 @@ struct radeon_device { int num_crtc; /* number of crtcs */ struct mutex dc_hw_i2c_mutex; /* display controller hw i2c mutex */ bool audio_enabled; + bool has_uvd; struct r600_audio audio_status; /* audio stuff */ struct notifier_block acpi_nb; /* only one userspace can use Hyperz features or CMASK at a time */ @@ -1838,6 +1839,7 @@ void r100_pll_errata_after_index(struct radeon_device *rdev); #define ASIC_IS_DCE61(rdev) ((rdev->family >= CHIP_ARUBA) && \ (rdev->flags & RADEON_IS_IGP)) #define ASIC_IS_DCE64(rdev) ((rdev->family == CHIP_OLAND)) +#define ASIC_IS_NODCE(rdev) ((rdev->family == CHIP_HAINAN)) /* * BIOS helpers. diff --git a/trunk/drivers/gpu/drm/radeon/radeon_asic.c b/trunk/drivers/gpu/drm/radeon/radeon_asic.c index 6417132c50cf..06b8c19ab19e 100644 --- a/trunk/drivers/gpu/drm/radeon/radeon_asic.c +++ b/trunk/drivers/gpu/drm/radeon/radeon_asic.c @@ -1935,6 +1935,8 @@ int radeon_asic_init(struct radeon_device *rdev) else rdev->num_crtc = 2; + rdev->has_uvd = false; + switch (rdev->family) { case CHIP_R100: case CHIP_RV100: @@ -1999,16 +2001,22 @@ int radeon_asic_init(struct radeon_device *rdev) case CHIP_RV635: case CHIP_RV670: rdev->asic = &r600_asic; + if (rdev->family == CHIP_R600) + rdev->has_uvd = false; + else + rdev->has_uvd = true; break; case CHIP_RS780: case CHIP_RS880: rdev->asic = &rs780_asic; + rdev->has_uvd = true; break; case CHIP_RV770: case CHIP_RV730: case CHIP_RV710: case CHIP_RV740: rdev->asic = &rv770_asic; + rdev->has_uvd = true; break; case CHIP_CEDAR: case CHIP_REDWOOD: @@ -2021,11 +2029,13 @@ int radeon_asic_init(struct radeon_device *rdev) else rdev->num_crtc = 6; rdev->asic = &evergreen_asic; + rdev->has_uvd = true; break; case CHIP_PALM: case CHIP_SUMO: case CHIP_SUMO2: rdev->asic = &sumo_asic; + rdev->has_uvd = true; break; case CHIP_BARTS: case CHIP_TURKS: @@ -2036,27 +2046,37 @@ int radeon_asic_init(struct radeon_device *rdev) else rdev->num_crtc = 6; rdev->asic = &btc_asic; + rdev->has_uvd = true; break; case CHIP_CAYMAN: rdev->asic = &cayman_asic; /* set num crtcs */ rdev->num_crtc = 6; + rdev->has_uvd = true; break; case CHIP_ARUBA: rdev->asic = &trinity_asic; /* set num crtcs */ rdev->num_crtc = 4; + rdev->has_uvd = true; break; case CHIP_TAHITI: case CHIP_PITCAIRN: case CHIP_VERDE: case CHIP_OLAND: + case CHIP_HAINAN: rdev->asic = &si_asic; /* set num crtcs */ - if (rdev->family == CHIP_OLAND) + if (rdev->family == CHIP_HAINAN) + rdev->num_crtc = 0; + else if (rdev->family == CHIP_OLAND) rdev->num_crtc = 2; else rdev->num_crtc = 6; + if (rdev->family == CHIP_HAINAN) + rdev->has_uvd = false; + else + rdev->has_uvd = true; break; default: /* FIXME: not supported yet */ diff --git a/trunk/drivers/gpu/drm/radeon/radeon_bios.c b/trunk/drivers/gpu/drm/radeon/radeon_bios.c index fa3c56fba294..061b227dae0c 100644 --- a/trunk/drivers/gpu/drm/radeon/radeon_bios.c +++ b/trunk/drivers/gpu/drm/radeon/radeon_bios.c @@ -244,24 +244,28 @@ static bool ni_read_disabled_bios(struct radeon_device *rdev) /* enable the rom */ WREG32(R600_BUS_CNTL, (bus_cntl & ~R600_BIOS_ROM_DIS)); - /* Disable VGA mode */ - WREG32(AVIVO_D1VGA_CONTROL, - (d1vga_control & ~(AVIVO_DVGA_CONTROL_MODE_ENABLE | - AVIVO_DVGA_CONTROL_TIMING_SELECT))); - WREG32(AVIVO_D2VGA_CONTROL, - (d2vga_control & ~(AVIVO_DVGA_CONTROL_MODE_ENABLE | - AVIVO_DVGA_CONTROL_TIMING_SELECT))); - WREG32(AVIVO_VGA_RENDER_CONTROL, - (vga_render_control & ~AVIVO_VGA_VSTATUS_CNTL_MASK)); + if (!ASIC_IS_NODCE(rdev)) { + /* Disable VGA mode */ + WREG32(AVIVO_D1VGA_CONTROL, + (d1vga_control & ~(AVIVO_DVGA_CONTROL_MODE_ENABLE | + AVIVO_DVGA_CONTROL_TIMING_SELECT))); + WREG32(AVIVO_D2VGA_CONTROL, + (d2vga_control & ~(AVIVO_DVGA_CONTROL_MODE_ENABLE | + AVIVO_DVGA_CONTROL_TIMING_SELECT))); + WREG32(AVIVO_VGA_RENDER_CONTROL, + (vga_render_control & ~AVIVO_VGA_VSTATUS_CNTL_MASK)); + } WREG32(R600_ROM_CNTL, rom_cntl | R600_SCK_OVERWRITE); r = radeon_read_bios(rdev); /* restore regs */ WREG32(R600_BUS_CNTL, bus_cntl); - WREG32(AVIVO_D1VGA_CONTROL, d1vga_control); - WREG32(AVIVO_D2VGA_CONTROL, d2vga_control); - WREG32(AVIVO_VGA_RENDER_CONTROL, vga_render_control); + if (!ASIC_IS_NODCE(rdev)) { + WREG32(AVIVO_D1VGA_CONTROL, d1vga_control); + WREG32(AVIVO_D2VGA_CONTROL, d2vga_control); + WREG32(AVIVO_VGA_RENDER_CONTROL, vga_render_control); + } WREG32(R600_ROM_CNTL, rom_cntl); return r; } diff --git a/trunk/drivers/gpu/drm/radeon/radeon_device.c b/trunk/drivers/gpu/drm/radeon/radeon_device.c index a8f608903989..c2c59fb1ea01 100644 --- a/trunk/drivers/gpu/drm/radeon/radeon_device.c +++ b/trunk/drivers/gpu/drm/radeon/radeon_device.c @@ -94,6 +94,7 @@ static const char radeon_family_name[][16] = { "PITCAIRN", "VERDE", "OLAND", + "HAINAN", "LAST", }; diff --git a/trunk/drivers/gpu/drm/radeon/radeon_drv.c b/trunk/drivers/gpu/drm/radeon/radeon_drv.c index d33f484ace48..094e7e5ea39e 100644 --- a/trunk/drivers/gpu/drm/radeon/radeon_drv.c +++ b/trunk/drivers/gpu/drm/radeon/radeon_drv.c @@ -147,7 +147,7 @@ static inline void radeon_unregister_atpx_handler(void) {} #endif int radeon_no_wb; -int radeon_modeset = 1; +int radeon_modeset = -1; int radeon_dynclks = -1; int radeon_r4xx_atom = 0; int radeon_agpmode = 0; @@ -456,6 +456,16 @@ static struct pci_driver radeon_kms_pci_driver = { static int __init radeon_init(void) { +#ifdef CONFIG_VGA_CONSOLE + if (vgacon_text_force() && radeon_modeset == -1) { + DRM_INFO("VGACON disable radeon kernel modesetting.\n"); + radeon_modeset = 0; + } +#endif + /* set to modesetting by default if not nomodeset */ + if (radeon_modeset == -1) + radeon_modeset = 1; + if (radeon_modeset == 1) { DRM_INFO("radeon kernel modesetting enabled.\n"); driver = &kms_driver; diff --git a/trunk/drivers/gpu/drm/radeon/radeon_family.h b/trunk/drivers/gpu/drm/radeon/radeon_family.h index 2d91123f2759..36e9803b077d 100644 --- a/trunk/drivers/gpu/drm/radeon/radeon_family.h +++ b/trunk/drivers/gpu/drm/radeon/radeon_family.h @@ -92,6 +92,7 @@ enum radeon_family { CHIP_PITCAIRN, CHIP_VERDE, CHIP_OLAND, + CHIP_HAINAN, CHIP_LAST, }; diff --git a/trunk/drivers/gpu/drm/radeon/radeon_legacy_crtc.c b/trunk/drivers/gpu/drm/radeon/radeon_legacy_crtc.c index 6857cb4efb76..7cb178a34a0f 100644 --- a/trunk/drivers/gpu/drm/radeon/radeon_legacy_crtc.c +++ b/trunk/drivers/gpu/drm/radeon/radeon_legacy_crtc.c @@ -1031,11 +1031,9 @@ static int radeon_crtc_mode_set(struct drm_crtc *crtc, static void radeon_crtc_prepare(struct drm_crtc *crtc) { - struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); struct drm_device *dev = crtc->dev; struct drm_crtc *crtci; - radeon_crtc->in_mode_set = true; /* * The hardware wedges sometimes if you reconfigure one CRTC * whilst another is running (see fdo bug #24611). @@ -1046,7 +1044,6 @@ static void radeon_crtc_prepare(struct drm_crtc *crtc) static void radeon_crtc_commit(struct drm_crtc *crtc) { - struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); struct drm_device *dev = crtc->dev; struct drm_crtc *crtci; @@ -1057,7 +1054,6 @@ static void radeon_crtc_commit(struct drm_crtc *crtc) if (crtci->enabled) radeon_crtc_dpms(crtci, DRM_MODE_DPMS_ON); } - radeon_crtc->in_mode_set = false; } static const struct drm_crtc_helper_funcs legacy_helper_funcs = { diff --git a/trunk/drivers/gpu/drm/radeon/radeon_mode.h b/trunk/drivers/gpu/drm/radeon/radeon_mode.h index 44e579e75fd0..69ad4fe224c1 100644 --- a/trunk/drivers/gpu/drm/radeon/radeon_mode.h +++ b/trunk/drivers/gpu/drm/radeon/radeon_mode.h @@ -302,7 +302,6 @@ struct radeon_crtc { u16 lut_r[256], lut_g[256], lut_b[256]; bool enabled; bool can_tile; - bool in_mode_set; uint32_t crtc_offset; struct drm_gem_object *cursor_bo; uint64_t cursor_addr; diff --git a/trunk/drivers/gpu/drm/radeon/radeon_ttm.c b/trunk/drivers/gpu/drm/radeon/radeon_ttm.c index 93f760e27a92..6c0ce8915fac 100644 --- a/trunk/drivers/gpu/drm/radeon/radeon_ttm.c +++ b/trunk/drivers/gpu/drm/radeon/radeon_ttm.c @@ -726,7 +726,7 @@ int radeon_ttm_init(struct radeon_device *rdev) return r; } DRM_INFO("radeon: %uM of VRAM memory ready\n", - (unsigned)rdev->mc.real_vram_size / (1024 * 1024)); + (unsigned) (rdev->mc.real_vram_size / (1024 * 1024))); r = ttm_bo_init_mm(&rdev->mman.bdev, TTM_PL_TT, rdev->mc.gtt_size >> PAGE_SHIFT); if (r) { diff --git a/trunk/drivers/gpu/drm/radeon/si.c b/trunk/drivers/gpu/drm/radeon/si.c index f0b6c2f87c4d..5ffade69af25 100644 --- a/trunk/drivers/gpu/drm/radeon/si.c +++ b/trunk/drivers/gpu/drm/radeon/si.c @@ -60,6 +60,11 @@ MODULE_FIRMWARE("radeon/OLAND_me.bin"); MODULE_FIRMWARE("radeon/OLAND_ce.bin"); MODULE_FIRMWARE("radeon/OLAND_mc.bin"); MODULE_FIRMWARE("radeon/OLAND_rlc.bin"); +MODULE_FIRMWARE("radeon/HAINAN_pfp.bin"); +MODULE_FIRMWARE("radeon/HAINAN_me.bin"); +MODULE_FIRMWARE("radeon/HAINAN_ce.bin"); +MODULE_FIRMWARE("radeon/HAINAN_mc.bin"); +MODULE_FIRMWARE("radeon/HAINAN_rlc.bin"); extern int r600_ih_ring_alloc(struct radeon_device *rdev); extern void r600_ih_ring_fini(struct radeon_device *rdev); @@ -265,6 +270,40 @@ static const u32 oland_golden_registers[] = 0x15c0, 0x000c0fc0, 0x000c0400 }; +static const u32 hainan_golden_registers[] = +{ + 0x9a10, 0x00010000, 0x00018208, + 0x9830, 0xffffffff, 0x00000000, + 0x9834, 0xf00fffff, 0x00000400, + 0x9838, 0x0002021c, 0x00020200, + 0xd0c0, 0xff000fff, 0x00000100, + 0xd030, 0x000300c0, 0x00800040, + 0xd8c0, 0xff000fff, 0x00000100, + 0xd830, 0x000300c0, 0x00800040, + 0x2ae4, 0x00073ffe, 0x000022a2, + 0x240c, 0x000007ff, 0x00000000, + 0x8a14, 0xf000001f, 0x00000007, + 0x8b24, 0xffffffff, 0x00ffffff, + 0x8b10, 0x0000ff0f, 0x00000000, + 0x28a4c, 0x07ffffff, 0x4e000000, + 0x28350, 0x3f3f3fff, 0x00000000, + 0x30, 0x000000ff, 0x0040, + 0x34, 0x00000040, 0x00004040, + 0x9100, 0x03e00000, 0x03600000, + 0x9060, 0x0000007f, 0x00000020, + 0x9508, 0x00010000, 0x00010000, + 0xac14, 0x000003ff, 0x000000f1, + 0xac10, 0xffffffff, 0x00000000, + 0xac0c, 0xffffffff, 0x00003210, + 0x88d4, 0x0000001f, 0x00000010, + 0x15c0, 0x000c0fc0, 0x000c0400 +}; + +static const u32 hainan_golden_registers2[] = +{ + 0x98f8, 0xffffffff, 0x02010001 +}; + static const u32 tahiti_mgcg_cgcg_init[] = { 0xc400, 0xffffffff, 0xfffffffc, @@ -673,6 +712,83 @@ static const u32 oland_mgcg_cgcg_init[] = 0xd8c0, 0xfffffff0, 0x00000100 }; +static const u32 hainan_mgcg_cgcg_init[] = +{ + 0xc400, 0xffffffff, 0xfffffffc, + 0x802c, 0xffffffff, 0xe0000000, + 0x9a60, 0xffffffff, 0x00000100, + 0x92a4, 0xffffffff, 0x00000100, + 0xc164, 0xffffffff, 0x00000100, + 0x9774, 0xffffffff, 0x00000100, + 0x8984, 0xffffffff, 0x06000100, + 0x8a18, 0xffffffff, 0x00000100, + 0x92a0, 0xffffffff, 0x00000100, + 0xc380, 0xffffffff, 0x00000100, + 0x8b28, 0xffffffff, 0x00000100, + 0x9144, 0xffffffff, 0x00000100, + 0x8d88, 0xffffffff, 0x00000100, + 0x8d8c, 0xffffffff, 0x00000100, + 0x9030, 0xffffffff, 0x00000100, + 0x9034, 0xffffffff, 0x00000100, + 0x9038, 0xffffffff, 0x00000100, + 0x903c, 0xffffffff, 0x00000100, + 0xad80, 0xffffffff, 0x00000100, + 0xac54, 0xffffffff, 0x00000100, + 0x897c, 0xffffffff, 0x06000100, + 0x9868, 0xffffffff, 0x00000100, + 0x9510, 0xffffffff, 0x00000100, + 0xaf04, 0xffffffff, 0x00000100, + 0xae04, 0xffffffff, 0x00000100, + 0x949c, 0xffffffff, 0x00000100, + 0x802c, 0xffffffff, 0xe0000000, + 0x9160, 0xffffffff, 0x00010000, + 0x9164, 0xffffffff, 0x00030002, + 0x9168, 0xffffffff, 0x00040007, + 0x916c, 0xffffffff, 0x00060005, + 0x9170, 0xffffffff, 0x00090008, + 0x9174, 0xffffffff, 0x00020001, + 0x9178, 0xffffffff, 0x00040003, + 0x917c, 0xffffffff, 0x00000007, + 0x9180, 0xffffffff, 0x00060005, + 0x9184, 0xffffffff, 0x00090008, + 0x9188, 0xffffffff, 0x00030002, + 0x918c, 0xffffffff, 0x00050004, + 0x9190, 0xffffffff, 0x00000008, + 0x9194, 0xffffffff, 0x00070006, + 0x9198, 0xffffffff, 0x000a0009, + 0x919c, 0xffffffff, 0x00040003, + 0x91a0, 0xffffffff, 0x00060005, + 0x91a4, 0xffffffff, 0x00000009, + 0x91a8, 0xffffffff, 0x00080007, + 0x91ac, 0xffffffff, 0x000b000a, + 0x91b0, 0xffffffff, 0x00050004, + 0x91b4, 0xffffffff, 0x00070006, + 0x91b8, 0xffffffff, 0x0008000b, + 0x91bc, 0xffffffff, 0x000a0009, + 0x91c0, 0xffffffff, 0x000d000c, + 0x91c4, 0xffffffff, 0x00060005, + 0x91c8, 0xffffffff, 0x00080007, + 0x91cc, 0xffffffff, 0x0000000b, + 0x91d0, 0xffffffff, 0x000a0009, + 0x91d4, 0xffffffff, 0x000d000c, + 0x9150, 0xffffffff, 0x96940200, + 0x8708, 0xffffffff, 0x00900100, + 0xc478, 0xffffffff, 0x00000080, + 0xc404, 0xffffffff, 0x0020003f, + 0x30, 0xffffffff, 0x0000001c, + 0x34, 0x000f0000, 0x000f0000, + 0x160c, 0xffffffff, 0x00000100, + 0x1024, 0xffffffff, 0x00000100, + 0x20a8, 0xffffffff, 0x00000104, + 0x264c, 0x000c0000, 0x000c0000, + 0x2648, 0x000c0000, 0x000c0000, + 0x2f50, 0x00000001, 0x00000001, + 0x30cc, 0xc0000fff, 0x00000104, + 0xc1e4, 0x00000001, 0x00000001, + 0xd0c0, 0xfffffff0, 0x00000100, + 0xd8c0, 0xfffffff0, 0x00000100 +}; + static u32 verde_pg_init[] = { 0x353c, 0xffffffff, 0x40000, @@ -853,6 +969,17 @@ static void si_init_golden_registers(struct radeon_device *rdev) oland_mgcg_cgcg_init, (const u32)ARRAY_SIZE(oland_mgcg_cgcg_init)); break; + case CHIP_HAINAN: + radeon_program_register_sequence(rdev, + hainan_golden_registers, + (const u32)ARRAY_SIZE(hainan_golden_registers)); + radeon_program_register_sequence(rdev, + hainan_golden_registers2, + (const u32)ARRAY_SIZE(hainan_golden_registers2)); + radeon_program_register_sequence(rdev, + hainan_mgcg_cgcg_init, + (const u32)ARRAY_SIZE(hainan_mgcg_cgcg_init)); + break; default: break; } @@ -1062,6 +1189,45 @@ static const u32 oland_io_mc_regs[TAHITI_IO_MC_REGS_SIZE][2] = { {0x0000009f, 0x00a17730} }; +static const u32 hainan_io_mc_regs[TAHITI_IO_MC_REGS_SIZE][2] = { + {0x0000006f, 0x03044000}, + {0x00000070, 0x0480c018}, + {0x00000071, 0x00000040}, + {0x00000072, 0x01000000}, + {0x00000074, 0x000000ff}, + {0x00000075, 0x00143400}, + {0x00000076, 0x08ec0800}, + {0x00000077, 0x040000cc}, + {0x00000079, 0x00000000}, + {0x0000007a, 0x21000409}, + {0x0000007c, 0x00000000}, + {0x0000007d, 0xe8000000}, + {0x0000007e, 0x044408a8}, + {0x0000007f, 0x00000003}, + {0x00000080, 0x00000000}, + {0x00000081, 0x01000000}, + {0x00000082, 0x02000000}, + {0x00000083, 0x00000000}, + {0x00000084, 0xe3f3e4f4}, + {0x00000085, 0x00052024}, + {0x00000087, 0x00000000}, + {0x00000088, 0x66036603}, + {0x00000089, 0x01000000}, + {0x0000008b, 0x1c0a0000}, + {0x0000008c, 0xff010000}, + {0x0000008e, 0xffffefff}, + {0x0000008f, 0xfff3efff}, + {0x00000090, 0xfff3efbf}, + {0x00000094, 0x00101101}, + {0x00000095, 0x00000fff}, + {0x00000096, 0x00116fff}, + {0x00000097, 0x60010000}, + {0x00000098, 0x10010000}, + {0x00000099, 0x00006000}, + {0x0000009a, 0x00001000}, + {0x0000009f, 0x00a07730} +}; + /* ucode loading */ static int si_mc_load_microcode(struct radeon_device *rdev) { @@ -1095,6 +1261,11 @@ static int si_mc_load_microcode(struct radeon_device *rdev) ucode_size = OLAND_MC_UCODE_SIZE; regs_size = TAHITI_IO_MC_REGS_SIZE; break; + case CHIP_HAINAN: + io_mc_regs = (u32 *)&hainan_io_mc_regs; + ucode_size = OLAND_MC_UCODE_SIZE; + regs_size = TAHITI_IO_MC_REGS_SIZE; + break; } running = RREG32(MC_SEQ_SUP_CNTL) & RUN_MASK; @@ -1198,6 +1369,15 @@ static int si_init_microcode(struct radeon_device *rdev) rlc_req_size = SI_RLC_UCODE_SIZE * 4; mc_req_size = OLAND_MC_UCODE_SIZE * 4; break; + case CHIP_HAINAN: + chip_name = "HAINAN"; + rlc_chip_name = "HAINAN"; + pfp_req_size = SI_PFP_UCODE_SIZE * 4; + me_req_size = SI_PM4_UCODE_SIZE * 4; + ce_req_size = SI_CE_UCODE_SIZE * 4; + rlc_req_size = SI_RLC_UCODE_SIZE * 4; + mc_req_size = OLAND_MC_UCODE_SIZE * 4; + break; default: BUG(); } @@ -2003,7 +2183,8 @@ static void si_tiling_mode_table_init(struct radeon_device *rdev) WREG32(GB_TILE_MODE0 + (reg_offset * 4), gb_tile_moden); } } else if ((rdev->family == CHIP_VERDE) || - (rdev->family == CHIP_OLAND)) { + (rdev->family == CHIP_OLAND) || + (rdev->family == CHIP_HAINAN)) { for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++) { switch (reg_offset) { case 0: /* non-AA compressed depth or any compressed stencil */ @@ -2466,6 +2647,23 @@ static void si_gpu_init(struct radeon_device *rdev) rdev->config.si.sc_earlyz_tile_fifo_size = 0x130; gb_addr_config = VERDE_GB_ADDR_CONFIG_GOLDEN; break; + case CHIP_HAINAN: + rdev->config.si.max_shader_engines = 1; + rdev->config.si.max_tile_pipes = 4; + rdev->config.si.max_cu_per_sh = 5; + rdev->config.si.max_sh_per_se = 1; + rdev->config.si.max_backends_per_se = 1; + rdev->config.si.max_texture_channel_caches = 2; + rdev->config.si.max_gprs = 256; + rdev->config.si.max_gs_threads = 16; + rdev->config.si.max_hw_contexts = 8; + + rdev->config.si.sc_prim_fifo_size_frontend = 0x20; + rdev->config.si.sc_prim_fifo_size_backend = 0x40; + rdev->config.si.sc_hiz_tile_fifo_size = 0x30; + rdev->config.si.sc_earlyz_tile_fifo_size = 0x130; + gb_addr_config = HAINAN_GB_ADDR_CONFIG_GOLDEN; + break; } /* Initialize HDP */ @@ -2559,9 +2757,11 @@ static void si_gpu_init(struct radeon_device *rdev) WREG32(HDP_ADDR_CONFIG, gb_addr_config); WREG32(DMA_TILING_CONFIG + DMA0_REGISTER_OFFSET, gb_addr_config); WREG32(DMA_TILING_CONFIG + DMA1_REGISTER_OFFSET, gb_addr_config); - WREG32(UVD_UDEC_ADDR_CONFIG, gb_addr_config); - WREG32(UVD_UDEC_DB_ADDR_CONFIG, gb_addr_config); - WREG32(UVD_UDEC_DBW_ADDR_CONFIG, gb_addr_config); + if (rdev->has_uvd) { + WREG32(UVD_UDEC_ADDR_CONFIG, gb_addr_config); + WREG32(UVD_UDEC_DB_ADDR_CONFIG, gb_addr_config); + WREG32(UVD_UDEC_DBW_ADDR_CONFIG, gb_addr_config); + } si_tiling_mode_table_init(rdev); @@ -3304,8 +3504,9 @@ static void si_mc_program(struct radeon_device *rdev) if (radeon_mc_wait_for_idle(rdev)) { dev_warn(rdev->dev, "Wait for MC idle timedout !\n"); } - /* Lockout access through VGA aperture*/ - WREG32(VGA_HDP_CONTROL, VGA_MEMORY_DISABLE); + if (!ASIC_IS_NODCE(rdev)) + /* Lockout access through VGA aperture*/ + WREG32(VGA_HDP_CONTROL, VGA_MEMORY_DISABLE); /* Update configuration */ WREG32(MC_VM_SYSTEM_APERTURE_LOW_ADDR, rdev->mc.vram_start >> 12); @@ -3327,9 +3528,11 @@ static void si_mc_program(struct radeon_device *rdev) dev_warn(rdev->dev, "Wait for MC idle timedout !\n"); } evergreen_mc_resume(rdev, &save); - /* we need to own VRAM, so turn off the VGA renderer here - * to stop it overwriting our objects */ - rv515_vga_render_disable(rdev); + if (!ASIC_IS_NODCE(rdev)) { + /* we need to own VRAM, so turn off the VGA renderer here + * to stop it overwriting our objects */ + rv515_vga_render_disable(rdev); + } } static void si_vram_gtt_location(struct radeon_device *rdev, @@ -3397,8 +3600,8 @@ static int si_mc_init(struct radeon_device *rdev) rdev->mc.aper_base = pci_resource_start(rdev->pdev, 0); rdev->mc.aper_size = pci_resource_len(rdev->pdev, 0); /* size in MB on si */ - rdev->mc.mc_vram_size = RREG32(CONFIG_MEMSIZE) * 1024 * 1024; - rdev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE) * 1024 * 1024; + rdev->mc.mc_vram_size = RREG32(CONFIG_MEMSIZE) * 1024ULL * 1024ULL; + rdev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE) * 1024ULL * 1024ULL; rdev->mc.visible_vram_size = rdev->mc.aper_size; si_vram_gtt_location(rdev, &rdev->mc); radeon_update_bandwidth_info(rdev); @@ -4251,8 +4454,10 @@ static void si_disable_interrupt_state(struct radeon_device *rdev) tmp = RREG32(DMA_CNTL + DMA1_REGISTER_OFFSET) & ~TRAP_ENABLE; WREG32(DMA_CNTL + DMA1_REGISTER_OFFSET, tmp); WREG32(GRBM_INT_CNTL, 0); - WREG32(INT_MASK + EVERGREEN_CRTC0_REGISTER_OFFSET, 0); - WREG32(INT_MASK + EVERGREEN_CRTC1_REGISTER_OFFSET, 0); + if (rdev->num_crtc >= 2) { + WREG32(INT_MASK + EVERGREEN_CRTC0_REGISTER_OFFSET, 0); + WREG32(INT_MASK + EVERGREEN_CRTC1_REGISTER_OFFSET, 0); + } if (rdev->num_crtc >= 4) { WREG32(INT_MASK + EVERGREEN_CRTC2_REGISTER_OFFSET, 0); WREG32(INT_MASK + EVERGREEN_CRTC3_REGISTER_OFFSET, 0); @@ -4262,8 +4467,10 @@ static void si_disable_interrupt_state(struct radeon_device *rdev) WREG32(INT_MASK + EVERGREEN_CRTC5_REGISTER_OFFSET, 0); } - WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET, 0); - WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET, 0); + if (rdev->num_crtc >= 2) { + WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET, 0); + WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET, 0); + } if (rdev->num_crtc >= 4) { WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET, 0); WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET, 0); @@ -4273,21 +4480,22 @@ static void si_disable_interrupt_state(struct radeon_device *rdev) WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET, 0); } - WREG32(DACA_AUTODETECT_INT_CONTROL, 0); - - tmp = RREG32(DC_HPD1_INT_CONTROL) & DC_HPDx_INT_POLARITY; - WREG32(DC_HPD1_INT_CONTROL, tmp); - tmp = RREG32(DC_HPD2_INT_CONTROL) & DC_HPDx_INT_POLARITY; - WREG32(DC_HPD2_INT_CONTROL, tmp); - tmp = RREG32(DC_HPD3_INT_CONTROL) & DC_HPDx_INT_POLARITY; - WREG32(DC_HPD3_INT_CONTROL, tmp); - tmp = RREG32(DC_HPD4_INT_CONTROL) & DC_HPDx_INT_POLARITY; - WREG32(DC_HPD4_INT_CONTROL, tmp); - tmp = RREG32(DC_HPD5_INT_CONTROL) & DC_HPDx_INT_POLARITY; - WREG32(DC_HPD5_INT_CONTROL, tmp); - tmp = RREG32(DC_HPD6_INT_CONTROL) & DC_HPDx_INT_POLARITY; - WREG32(DC_HPD6_INT_CONTROL, tmp); + if (!ASIC_IS_NODCE(rdev)) { + WREG32(DACA_AUTODETECT_INT_CONTROL, 0); + tmp = RREG32(DC_HPD1_INT_CONTROL) & DC_HPDx_INT_POLARITY; + WREG32(DC_HPD1_INT_CONTROL, tmp); + tmp = RREG32(DC_HPD2_INT_CONTROL) & DC_HPDx_INT_POLARITY; + WREG32(DC_HPD2_INT_CONTROL, tmp); + tmp = RREG32(DC_HPD3_INT_CONTROL) & DC_HPDx_INT_POLARITY; + WREG32(DC_HPD3_INT_CONTROL, tmp); + tmp = RREG32(DC_HPD4_INT_CONTROL) & DC_HPDx_INT_POLARITY; + WREG32(DC_HPD4_INT_CONTROL, tmp); + tmp = RREG32(DC_HPD5_INT_CONTROL) & DC_HPDx_INT_POLARITY; + WREG32(DC_HPD5_INT_CONTROL, tmp); + tmp = RREG32(DC_HPD6_INT_CONTROL) & DC_HPDx_INT_POLARITY; + WREG32(DC_HPD6_INT_CONTROL, tmp); + } } static int si_irq_init(struct radeon_device *rdev) @@ -4366,7 +4574,7 @@ int si_irq_set(struct radeon_device *rdev) u32 cp_int_cntl = CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE; u32 cp_int_cntl1 = 0, cp_int_cntl2 = 0; u32 crtc1 = 0, crtc2 = 0, crtc3 = 0, crtc4 = 0, crtc5 = 0, crtc6 = 0; - u32 hpd1, hpd2, hpd3, hpd4, hpd5, hpd6; + u32 hpd1 = 0, hpd2 = 0, hpd3 = 0, hpd4 = 0, hpd5 = 0, hpd6 = 0; u32 grbm_int_cntl = 0; u32 grph1 = 0, grph2 = 0, grph3 = 0, grph4 = 0, grph5 = 0, grph6 = 0; u32 dma_cntl, dma_cntl1; @@ -4383,12 +4591,14 @@ int si_irq_set(struct radeon_device *rdev) return 0; } - hpd1 = RREG32(DC_HPD1_INT_CONTROL) & ~DC_HPDx_INT_EN; - hpd2 = RREG32(DC_HPD2_INT_CONTROL) & ~DC_HPDx_INT_EN; - hpd3 = RREG32(DC_HPD3_INT_CONTROL) & ~DC_HPDx_INT_EN; - hpd4 = RREG32(DC_HPD4_INT_CONTROL) & ~DC_HPDx_INT_EN; - hpd5 = RREG32(DC_HPD5_INT_CONTROL) & ~DC_HPDx_INT_EN; - hpd6 = RREG32(DC_HPD6_INT_CONTROL) & ~DC_HPDx_INT_EN; + if (!ASIC_IS_NODCE(rdev)) { + hpd1 = RREG32(DC_HPD1_INT_CONTROL) & ~DC_HPDx_INT_EN; + hpd2 = RREG32(DC_HPD2_INT_CONTROL) & ~DC_HPDx_INT_EN; + hpd3 = RREG32(DC_HPD3_INT_CONTROL) & ~DC_HPDx_INT_EN; + hpd4 = RREG32(DC_HPD4_INT_CONTROL) & ~DC_HPDx_INT_EN; + hpd5 = RREG32(DC_HPD5_INT_CONTROL) & ~DC_HPDx_INT_EN; + hpd6 = RREG32(DC_HPD6_INT_CONTROL) & ~DC_HPDx_INT_EN; + } dma_cntl = RREG32(DMA_CNTL + DMA0_REGISTER_OFFSET) & ~TRAP_ENABLE; dma_cntl1 = RREG32(DMA_CNTL + DMA1_REGISTER_OFFSET) & ~TRAP_ENABLE; @@ -4479,8 +4689,10 @@ int si_irq_set(struct radeon_device *rdev) WREG32(GRBM_INT_CNTL, grbm_int_cntl); - WREG32(INT_MASK + EVERGREEN_CRTC0_REGISTER_OFFSET, crtc1); - WREG32(INT_MASK + EVERGREEN_CRTC1_REGISTER_OFFSET, crtc2); + if (rdev->num_crtc >= 2) { + WREG32(INT_MASK + EVERGREEN_CRTC0_REGISTER_OFFSET, crtc1); + WREG32(INT_MASK + EVERGREEN_CRTC1_REGISTER_OFFSET, crtc2); + } if (rdev->num_crtc >= 4) { WREG32(INT_MASK + EVERGREEN_CRTC2_REGISTER_OFFSET, crtc3); WREG32(INT_MASK + EVERGREEN_CRTC3_REGISTER_OFFSET, crtc4); @@ -4490,8 +4702,10 @@ int si_irq_set(struct radeon_device *rdev) WREG32(INT_MASK + EVERGREEN_CRTC5_REGISTER_OFFSET, crtc6); } - WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET, grph1); - WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET, grph2); + if (rdev->num_crtc >= 2) { + WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET, grph1); + WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET, grph2); + } if (rdev->num_crtc >= 4) { WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET, grph3); WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET, grph4); @@ -4501,12 +4715,14 @@ int si_irq_set(struct radeon_device *rdev) WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET, grph6); } - WREG32(DC_HPD1_INT_CONTROL, hpd1); - WREG32(DC_HPD2_INT_CONTROL, hpd2); - WREG32(DC_HPD3_INT_CONTROL, hpd3); - WREG32(DC_HPD4_INT_CONTROL, hpd4); - WREG32(DC_HPD5_INT_CONTROL, hpd5); - WREG32(DC_HPD6_INT_CONTROL, hpd6); + if (!ASIC_IS_NODCE(rdev)) { + WREG32(DC_HPD1_INT_CONTROL, hpd1); + WREG32(DC_HPD2_INT_CONTROL, hpd2); + WREG32(DC_HPD3_INT_CONTROL, hpd3); + WREG32(DC_HPD4_INT_CONTROL, hpd4); + WREG32(DC_HPD5_INT_CONTROL, hpd5); + WREG32(DC_HPD6_INT_CONTROL, hpd6); + } return 0; } @@ -4515,6 +4731,9 @@ static inline void si_irq_ack(struct radeon_device *rdev) { u32 tmp; + if (ASIC_IS_NODCE(rdev)) + return; + rdev->irq.stat_regs.evergreen.disp_int = RREG32(DISP_INTERRUPT_STATUS); rdev->irq.stat_regs.evergreen.disp_int_cont = RREG32(DISP_INTERRUPT_STATUS_CONTINUE); rdev->irq.stat_regs.evergreen.disp_int_cont2 = RREG32(DISP_INTERRUPT_STATUS_CONTINUE2); @@ -5118,15 +5337,17 @@ static int si_startup(struct radeon_device *rdev) return r; } - r = rv770_uvd_resume(rdev); - if (!r) { - r = radeon_fence_driver_start_ring(rdev, - R600_RING_TYPE_UVD_INDEX); + if (rdev->has_uvd) { + r = rv770_uvd_resume(rdev); + if (!r) { + r = radeon_fence_driver_start_ring(rdev, + R600_RING_TYPE_UVD_INDEX); + if (r) + dev_err(rdev->dev, "UVD fences init error (%d).\n", r); + } if (r) - dev_err(rdev->dev, "UVD fences init error (%d).\n", r); + rdev->ring[R600_RING_TYPE_UVD_INDEX].ring_size = 0; } - if (r) - rdev->ring[R600_RING_TYPE_UVD_INDEX].ring_size = 0; /* Enable IRQ */ r = si_irq_init(rdev); @@ -5185,16 +5406,18 @@ static int si_startup(struct radeon_device *rdev) if (r) return r; - ring = &rdev->ring[R600_RING_TYPE_UVD_INDEX]; - if (ring->ring_size) { - r = radeon_ring_init(rdev, ring, ring->ring_size, - R600_WB_UVD_RPTR_OFFSET, - UVD_RBC_RB_RPTR, UVD_RBC_RB_WPTR, - 0, 0xfffff, RADEON_CP_PACKET2); - if (!r) - r = r600_uvd_init(rdev); - if (r) - DRM_ERROR("radeon: failed initializing UVD (%d).\n", r); + if (rdev->has_uvd) { + ring = &rdev->ring[R600_RING_TYPE_UVD_INDEX]; + if (ring->ring_size) { + r = radeon_ring_init(rdev, ring, ring->ring_size, + R600_WB_UVD_RPTR_OFFSET, + UVD_RBC_RB_RPTR, UVD_RBC_RB_WPTR, + 0, 0xfffff, RADEON_CP_PACKET2); + if (!r) + r = r600_uvd_init(rdev); + if (r) + DRM_ERROR("radeon: failed initializing UVD (%d).\n", r); + } } r = radeon_ib_pool_init(rdev); @@ -5243,8 +5466,10 @@ int si_suspend(struct radeon_device *rdev) radeon_vm_manager_fini(rdev); si_cp_enable(rdev, false); cayman_dma_stop(rdev); - r600_uvd_rbc_stop(rdev); - radeon_uvd_suspend(rdev); + if (rdev->has_uvd) { + r600_uvd_rbc_stop(rdev); + radeon_uvd_suspend(rdev); + } si_irq_suspend(rdev); radeon_wb_disable(rdev); si_pcie_gart_disable(rdev); @@ -5332,11 +5557,13 @@ int si_init(struct radeon_device *rdev) ring->ring_obj = NULL; r600_ring_init(rdev, ring, 64 * 1024); - r = radeon_uvd_init(rdev); - if (!r) { - ring = &rdev->ring[R600_RING_TYPE_UVD_INDEX]; - ring->ring_obj = NULL; - r600_ring_init(rdev, ring, 4096); + if (rdev->has_uvd) { + r = radeon_uvd_init(rdev); + if (!r) { + ring = &rdev->ring[R600_RING_TYPE_UVD_INDEX]; + ring->ring_obj = NULL; + r600_ring_init(rdev, ring, 4096); + } } rdev->ih.ring_obj = NULL; @@ -5384,7 +5611,8 @@ void si_fini(struct radeon_device *rdev) radeon_vm_manager_fini(rdev); radeon_ib_pool_fini(rdev); radeon_irq_kms_fini(rdev); - radeon_uvd_fini(rdev); + if (rdev->has_uvd) + radeon_uvd_fini(rdev); si_pcie_gart_fini(rdev); r600_vram_scratch_fini(rdev); radeon_gem_fini(rdev); diff --git a/trunk/drivers/gpu/drm/radeon/sid.h b/trunk/drivers/gpu/drm/radeon/sid.h index 222877ba6cf5..8f2d7d4f9b28 100644 --- a/trunk/drivers/gpu/drm/radeon/sid.h +++ b/trunk/drivers/gpu/drm/radeon/sid.h @@ -28,6 +28,7 @@ #define TAHITI_GB_ADDR_CONFIG_GOLDEN 0x12011003 #define VERDE_GB_ADDR_CONFIG_GOLDEN 0x12010002 +#define HAINAN_GB_ADDR_CONFIG_GOLDEN 0x02010001 /* discrete uvd clocks */ #define CG_UPLL_FUNC_CNTL 0x634 diff --git a/trunk/drivers/gpu/host1x/drm/dc.c b/trunk/drivers/gpu/host1x/drm/dc.c index 1e2060324f02..8c04943f82e3 100644 --- a/trunk/drivers/gpu/host1x/drm/dc.c +++ b/trunk/drivers/gpu/host1x/drm/dc.c @@ -1128,11 +1128,6 @@ static int tegra_dc_probe(struct platform_device *pdev) return err; regs = platform_get_resource(pdev, IORESOURCE_MEM, 0); - if (!regs) { - dev_err(&pdev->dev, "failed to get registers\n"); - return -ENXIO; - } - dc->regs = devm_ioremap_resource(&pdev->dev, regs); if (IS_ERR(dc->regs)) return PTR_ERR(dc->regs); diff --git a/trunk/drivers/hid/hid-core.c b/trunk/drivers/hid/hid-core.c index 6961bbeab3ed..264f55099940 100644 --- a/trunk/drivers/hid/hid-core.c +++ b/trunk/drivers/hid/hid-core.c @@ -1685,6 +1685,7 @@ static const struct hid_device_id hid_have_special_driver[] = { { HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_NAVIGATION_CONTROLLER) }, { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_PS3_CONTROLLER) }, { HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_VAIO_VGX_MOUSE) }, + { HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_VAIO_VGP_MOUSE) }, { HID_USB_DEVICE(USB_VENDOR_ID_STEELSERIES, USB_DEVICE_ID_STEELSERIES_SRWS1) }, { HID_USB_DEVICE(USB_VENDOR_ID_SUNPLUS, USB_DEVICE_ID_SUNPLUS_WDESKTOP) }, { HID_USB_DEVICE(USB_VENDOR_ID_THINGM, USB_DEVICE_ID_BLINK1) }, @@ -2341,7 +2342,7 @@ struct hid_device *hid_allocate_device(void) init_waitqueue_head(&hdev->debug_wait); INIT_LIST_HEAD(&hdev->debug_list); - mutex_init(&hdev->debug_list_lock); + spin_lock_init(&hdev->debug_list_lock); sema_init(&hdev->driver_lock, 1); sema_init(&hdev->driver_input_lock, 1); diff --git a/trunk/drivers/hid/hid-debug.c b/trunk/drivers/hid/hid-debug.c index 7e56cb3855e3..8453214ec376 100644 --- a/trunk/drivers/hid/hid-debug.c +++ b/trunk/drivers/hid/hid-debug.c @@ -579,15 +579,16 @@ void hid_debug_event(struct hid_device *hdev, char *buf) { int i; struct hid_debug_list *list; + unsigned long flags; - mutex_lock(&hdev->debug_list_lock); + spin_lock_irqsave(&hdev->debug_list_lock, flags); list_for_each_entry(list, &hdev->debug_list, node) { for (i = 0; i < strlen(buf); i++) list->hid_debug_buf[(list->tail + i) % HID_DEBUG_BUFSIZE] = buf[i]; list->tail = (list->tail + i) % HID_DEBUG_BUFSIZE; } - mutex_unlock(&hdev->debug_list_lock); + spin_unlock_irqrestore(&hdev->debug_list_lock, flags); wake_up_interruptible(&hdev->debug_wait); } @@ -977,6 +978,7 @@ static int hid_debug_events_open(struct inode *inode, struct file *file) { int err = 0; struct hid_debug_list *list; + unsigned long flags; if (!(list = kzalloc(sizeof(struct hid_debug_list), GFP_KERNEL))) { err = -ENOMEM; @@ -992,9 +994,9 @@ static int hid_debug_events_open(struct inode *inode, struct file *file) file->private_data = list; mutex_init(&list->read_mutex); - mutex_lock(&list->hdev->debug_list_lock); + spin_lock_irqsave(&list->hdev->debug_list_lock, flags); list_add_tail(&list->node, &list->hdev->debug_list); - mutex_unlock(&list->hdev->debug_list_lock); + spin_unlock_irqrestore(&list->hdev->debug_list_lock, flags); out: return err; @@ -1088,10 +1090,11 @@ static unsigned int hid_debug_events_poll(struct file *file, poll_table *wait) static int hid_debug_events_release(struct inode *inode, struct file *file) { struct hid_debug_list *list = file->private_data; + unsigned long flags; - mutex_lock(&list->hdev->debug_list_lock); + spin_lock_irqsave(&list->hdev->debug_list_lock, flags); list_del(&list->node); - mutex_unlock(&list->hdev->debug_list_lock); + spin_unlock_irqrestore(&list->hdev->debug_list_lock, flags); kfree(list->hid_debug_buf); kfree(list); diff --git a/trunk/drivers/hid/hid-steelseries.c b/trunk/drivers/hid/hid-steelseries.c index 9b0efb0083fe..d16491192112 100644 --- a/trunk/drivers/hid/hid-steelseries.c +++ b/trunk/drivers/hid/hid-steelseries.c @@ -18,7 +18,8 @@ #include "hid-ids.h" -#if defined(CONFIG_LEDS_CLASS) || defined(CONFIG_LEDS_CLASS_MODULE) +#if IS_BUILTIN(CONFIG_LEDS_CLASS) || \ + (IS_MODULE(CONFIG_LEDS_CLASS) && IS_MODULE(CONFIG_HID_STEELSERIES)) #define SRWS1_NUMBER_LEDS 15 struct steelseries_srws1_data { __u16 led_state; @@ -107,7 +108,8 @@ static __u8 steelseries_srws1_rdesc_fixed[] = { 0xC0 /* End Collection */ }; -#if defined(CONFIG_LEDS_CLASS) || defined(CONFIG_LEDS_CLASS_MODULE) +#if IS_BUILTIN(CONFIG_LEDS_CLASS) || \ + (IS_MODULE(CONFIG_LEDS_CLASS) && IS_MODULE(CONFIG_HID_STEELSERIES)) static void steelseries_srws1_set_leds(struct hid_device *hdev, __u16 leds) { struct list_head *report_list = &hdev->report_enum[HID_OUTPUT_REPORT].report_list; @@ -370,7 +372,8 @@ MODULE_DEVICE_TABLE(hid, steelseries_srws1_devices); static struct hid_driver steelseries_srws1_driver = { .name = "steelseries_srws1", .id_table = steelseries_srws1_devices, -#if defined(CONFIG_LEDS_CLASS) || defined(CONFIG_LEDS_CLASS_MODULE) +#if IS_BUILTIN(CONFIG_LEDS_CLASS) || \ + (IS_MODULE(CONFIG_LEDS_CLASS) && IS_MODULE(CONFIG_HID_STEELSERIES)) .probe = steelseries_srws1_probe, .remove = steelseries_srws1_remove, #endif diff --git a/trunk/drivers/hv/channel_mgmt.c b/trunk/drivers/hv/channel_mgmt.c index bad8128b283a..21ef68934a20 100644 --- a/trunk/drivers/hv/channel_mgmt.c +++ b/trunk/drivers/hv/channel_mgmt.c @@ -329,7 +329,7 @@ static u32 get_vp_index(uuid_le *type_guid) return 0; } cur_cpu = (++next_vp % max_cpus); - return cur_cpu; + return hv_context.vp_index[cur_cpu]; } /* diff --git a/trunk/drivers/hwmon/abituguru.c b/trunk/drivers/hwmon/abituguru.c index df0b69987914..2ebd6ce46108 100644 --- a/trunk/drivers/hwmon/abituguru.c +++ b/trunk/drivers/hwmon/abituguru.c @@ -1414,14 +1414,18 @@ static int abituguru_probe(struct platform_device *pdev) pr_info("found Abit uGuru\n"); /* Register sysfs hooks */ - for (i = 0; i < sysfs_attr_i; i++) - if (device_create_file(&pdev->dev, - &data->sysfs_attr[i].dev_attr)) + for (i = 0; i < sysfs_attr_i; i++) { + res = device_create_file(&pdev->dev, + &data->sysfs_attr[i].dev_attr); + if (res) goto abituguru_probe_error; - for (i = 0; i < ARRAY_SIZE(abituguru_sysfs_attr); i++) - if (device_create_file(&pdev->dev, - &abituguru_sysfs_attr[i].dev_attr)) + } + for (i = 0; i < ARRAY_SIZE(abituguru_sysfs_attr); i++) { + res = device_create_file(&pdev->dev, + &abituguru_sysfs_attr[i].dev_attr); + if (res) goto abituguru_probe_error; + } data->hwmon_dev = hwmon_device_register(&pdev->dev); if (!IS_ERR(data->hwmon_dev)) diff --git a/trunk/drivers/hwmon/iio_hwmon.c b/trunk/drivers/hwmon/iio_hwmon.c index aafa4531b961..52b77afebde1 100644 --- a/trunk/drivers/hwmon/iio_hwmon.c +++ b/trunk/drivers/hwmon/iio_hwmon.c @@ -84,8 +84,10 @@ static int iio_hwmon_probe(struct platform_device *pdev) return PTR_ERR(channels); st = devm_kzalloc(dev, sizeof(*st), GFP_KERNEL); - if (st == NULL) - return -ENOMEM; + if (st == NULL) { + ret = -ENOMEM; + goto error_release_channels; + } st->channels = channels; @@ -159,7 +161,7 @@ static int iio_hwmon_probe(struct platform_device *pdev) error_remove_group: sysfs_remove_group(&dev->kobj, &st->attr_group); error_release_channels: - iio_channel_release_all(st->channels); + iio_channel_release_all(channels); return ret; } diff --git a/trunk/drivers/hwmon/nct6775.c b/trunk/drivers/hwmon/nct6775.c index f43f5e571db9..04638aee9039 100644 --- a/trunk/drivers/hwmon/nct6775.c +++ b/trunk/drivers/hwmon/nct6775.c @@ -3705,8 +3705,10 @@ static int nct6775_probe(struct platform_device *pdev) data->have_temp |= 1 << i; data->have_temp_fixed |= 1 << i; data->reg_temp[0][i] = reg_temp_alternate[i]; - data->reg_temp[1][i] = reg_temp_over[i]; - data->reg_temp[2][i] = reg_temp_hyst[i]; + if (i < num_reg_temp) { + data->reg_temp[1][i] = reg_temp_over[i]; + data->reg_temp[2][i] = reg_temp_hyst[i]; + } data->temp_src[i] = i + 1; continue; } diff --git a/trunk/drivers/hwmon/tmp401.c b/trunk/drivers/hwmon/tmp401.c index a478454f690f..dfe6d9527efb 100644 --- a/trunk/drivers/hwmon/tmp401.c +++ b/trunk/drivers/hwmon/tmp401.c @@ -240,7 +240,7 @@ static struct tmp401_data *tmp401_update_device(struct device *dev) mutex_lock(&data->update_lock); next_update = data->last_updated + - msecs_to_jiffies(data->update_interval) + 1; + msecs_to_jiffies(data->update_interval); if (time_after(jiffies, next_update) || !data->valid) { if (data->kind != tmp432) { /* diff --git a/trunk/drivers/i2c/busses/Kconfig b/trunk/drivers/i2c/busses/Kconfig index adfee98486b1..631736e2e7ed 100644 --- a/trunk/drivers/i2c/busses/Kconfig +++ b/trunk/drivers/i2c/busses/Kconfig @@ -363,7 +363,7 @@ config I2C_BLACKFIN_TWI_CLK_KHZ config I2C_CBUS_GPIO tristate "CBUS I2C driver" - depends on GENERIC_GPIO + depends on GPIOLIB help Support for CBUS access using I2C API. Mostly relevant for Nokia Internet Tablets (770, N800 and N810). @@ -436,7 +436,7 @@ config I2C_EG20T config I2C_GPIO tristate "GPIO-based bitbanging I2C" - depends on GENERIC_GPIO + depends on GPIOLIB select I2C_ALGOBIT help This is a very simple bitbanging I2C driver utilizing the diff --git a/trunk/drivers/i2c/busses/i2c-designware-core.c b/trunk/drivers/i2c/busses/i2c-designware-core.c index 21fbb340ad66..c41ca6354fc5 100644 --- a/trunk/drivers/i2c/busses/i2c-designware-core.c +++ b/trunk/drivers/i2c/busses/i2c-designware-core.c @@ -383,7 +383,8 @@ static void i2c_dw_xfer_init(struct dw_i2c_dev *dev) /* Enable the adapter */ __i2c_dw_enable(dev, true); - /* Enable interrupts */ + /* Clear and enable interrupts */ + i2c_dw_clear_int(dev); dw_writel(dev, DW_IC_INTR_DEFAULT_MASK, DW_IC_INTR_MASK); } @@ -448,8 +449,14 @@ i2c_dw_xfer_msg(struct dw_i2c_dev *dev) cmd |= BIT(9); if (msgs[dev->msg_write_idx].flags & I2C_M_RD) { + + /* avoid rx buffer overrun */ + if (rx_limit - dev->rx_outstanding <= 0) + break; + dw_writel(dev, cmd | 0x100, DW_IC_DATA_CMD); rx_limit--; + dev->rx_outstanding++; } else dw_writel(dev, cmd | *buf++, DW_IC_DATA_CMD); tx_limit--; buf_len--; @@ -502,8 +509,10 @@ i2c_dw_read(struct dw_i2c_dev *dev) rx_valid = dw_readl(dev, DW_IC_RXFLR); - for (; len > 0 && rx_valid > 0; len--, rx_valid--) + for (; len > 0 && rx_valid > 0; len--, rx_valid--) { *buf++ = dw_readl(dev, DW_IC_DATA_CMD); + dev->rx_outstanding--; + } if (len > 0) { dev->status |= STATUS_READ_IN_PROGRESS; @@ -561,6 +570,7 @@ i2c_dw_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[], int num) dev->msg_err = 0; dev->status = STATUS_IDLE; dev->abort_source = 0; + dev->rx_outstanding = 0; ret = i2c_dw_wait_bus_not_busy(dev); if (ret < 0) diff --git a/trunk/drivers/i2c/busses/i2c-designware-core.h b/trunk/drivers/i2c/busses/i2c-designware-core.h index 9c1840ee09c7..e761ad18dd61 100644 --- a/trunk/drivers/i2c/busses/i2c-designware-core.h +++ b/trunk/drivers/i2c/busses/i2c-designware-core.h @@ -60,6 +60,7 @@ * @adapter: i2c subsystem adapter node * @tx_fifo_depth: depth of the hardware tx fifo * @rx_fifo_depth: depth of the hardware rx fifo + * @rx_outstanding: current master-rx elements in tx fifo */ struct dw_i2c_dev { struct device *dev; @@ -88,6 +89,7 @@ struct dw_i2c_dev { u32 master_cfg; unsigned int tx_fifo_depth; unsigned int rx_fifo_depth; + int rx_outstanding; }; #define ACCESS_SWAP 0x00000001 diff --git a/trunk/drivers/i2c/busses/i2c-designware-platdrv.c b/trunk/drivers/i2c/busses/i2c-designware-platdrv.c index 8ec91335d95a..35b70a1edf57 100644 --- a/trunk/drivers/i2c/busses/i2c-designware-platdrv.c +++ b/trunk/drivers/i2c/busses/i2c-designware-platdrv.c @@ -69,6 +69,7 @@ static int dw_i2c_acpi_configure(struct platform_device *pdev) static const struct acpi_device_id dw_i2c_acpi_match[] = { { "INT33C2", 0 }, { "INT33C3", 0 }, + { "80860F41", 0 }, { } }; MODULE_DEVICE_TABLE(acpi, dw_i2c_acpi_match); diff --git a/trunk/drivers/i2c/busses/i2c-i801.c b/trunk/drivers/i2c/busses/i2c-i801.c index e1cf2e0e1f23..3a6903f63913 100644 --- a/trunk/drivers/i2c/busses/i2c-i801.c +++ b/trunk/drivers/i2c/busses/i2c-i801.c @@ -231,7 +231,11 @@ static const char *i801_feature_names[] = { static unsigned int disable_features; module_param(disable_features, uint, S_IRUGO | S_IWUSR); -MODULE_PARM_DESC(disable_features, "Disable selected driver features"); +MODULE_PARM_DESC(disable_features, "Disable selected driver features:\n" + "\t\t 0x01 disable SMBus PEC\n" + "\t\t 0x02 disable the block buffer\n" + "\t\t 0x08 disable the I2C block read functionality\n" + "\t\t 0x10 don't use interrupts "); /* Make sure the SMBus host is ready to start transmitting. Return 0 if it is, -EBUSY if it is not. */ diff --git a/trunk/drivers/i2c/busses/i2c-mv64xxx.c b/trunk/drivers/i2c/busses/i2c-mv64xxx.c index 3bbd65d35a5e..1a3abd6a0bfc 100644 --- a/trunk/drivers/i2c/busses/i2c-mv64xxx.c +++ b/trunk/drivers/i2c/busses/i2c-mv64xxx.c @@ -252,7 +252,7 @@ mv64xxx_i2c_do_action(struct mv64xxx_i2c_data *drv_data) writel(drv_data->cntl_bits, drv_data->reg_base + MV64XXX_I2C_REG_CONTROL); drv_data->block = 0; - wake_up_interruptible(&drv_data->waitq); + wake_up(&drv_data->waitq); break; case MV64XXX_I2C_ACTION_CONTINUE: @@ -300,7 +300,7 @@ mv64xxx_i2c_do_action(struct mv64xxx_i2c_data *drv_data) writel(drv_data->cntl_bits | MV64XXX_I2C_REG_CONTROL_STOP, drv_data->reg_base + MV64XXX_I2C_REG_CONTROL); drv_data->block = 0; - wake_up_interruptible(&drv_data->waitq); + wake_up(&drv_data->waitq); break; case MV64XXX_I2C_ACTION_INVALID: @@ -315,7 +315,7 @@ mv64xxx_i2c_do_action(struct mv64xxx_i2c_data *drv_data) writel(drv_data->cntl_bits | MV64XXX_I2C_REG_CONTROL_STOP, drv_data->reg_base + MV64XXX_I2C_REG_CONTROL); drv_data->block = 0; - wake_up_interruptible(&drv_data->waitq); + wake_up(&drv_data->waitq); break; } } @@ -381,7 +381,7 @@ mv64xxx_i2c_wait_for_completion(struct mv64xxx_i2c_data *drv_data) unsigned long flags; char abort = 0; - time_left = wait_event_interruptible_timeout(drv_data->waitq, + time_left = wait_event_timeout(drv_data->waitq, !drv_data->block, drv_data->adapter.timeout); spin_lock_irqsave(&drv_data->lock, flags); diff --git a/trunk/drivers/i2c/busses/i2c-s3c2410.c b/trunk/drivers/i2c/busses/i2c-s3c2410.c index 6e8ee92ab553..cab1c91b75a3 100644 --- a/trunk/drivers/i2c/busses/i2c-s3c2410.c +++ b/trunk/drivers/i2c/busses/i2c-s3c2410.c @@ -1082,11 +1082,6 @@ static int s3c24xx_i2c_probe(struct platform_device *pdev) /* map the registers */ res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - if (res == NULL) { - dev_err(&pdev->dev, "cannot find IO resource\n"); - return -ENOENT; - } - i2c->regs = devm_ioremap_resource(&pdev->dev, res); if (IS_ERR(i2c->regs)) diff --git a/trunk/drivers/i2c/busses/i2c-sirf.c b/trunk/drivers/i2c/busses/i2c-sirf.c index 5a7ad240bd26..a63c7d506836 100644 --- a/trunk/drivers/i2c/busses/i2c-sirf.c +++ b/trunk/drivers/i2c/busses/i2c-sirf.c @@ -303,12 +303,6 @@ static int i2c_sirfsoc_probe(struct platform_device *pdev) adap->class = I2C_CLASS_HWMON; mem_res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - if (mem_res == NULL) { - dev_err(&pdev->dev, "Unable to get MEM resource\n"); - err = -EINVAL; - goto out; - } - siic->base = devm_ioremap_resource(&pdev->dev, mem_res); if (IS_ERR(siic->base)) { err = PTR_ERR(siic->base); diff --git a/trunk/drivers/i2c/busses/i2c-tegra.c b/trunk/drivers/i2c/busses/i2c-tegra.c index b60ff90adc39..9aa1b60f7fdd 100644 --- a/trunk/drivers/i2c/busses/i2c-tegra.c +++ b/trunk/drivers/i2c/busses/i2c-tegra.c @@ -714,11 +714,6 @@ static int tegra_i2c_probe(struct platform_device *pdev) int ret = 0; res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - if (!res) { - dev_err(&pdev->dev, "no mem resource\n"); - return -EINVAL; - } - base = devm_ioremap_resource(&pdev->dev, res); if (IS_ERR(base)) return PTR_ERR(base); diff --git a/trunk/drivers/i2c/i2c-core.c b/trunk/drivers/i2c/i2c-core.c index 6b63cc7eb71e..48e31ed69dbf 100644 --- a/trunk/drivers/i2c/i2c-core.c +++ b/trunk/drivers/i2c/i2c-core.c @@ -892,7 +892,8 @@ i2c_sysfs_delete_device(struct device *dev, struct device_attribute *attr, } static DEVICE_ATTR(new_device, S_IWUSR, NULL, i2c_sysfs_new_device); -static DEVICE_ATTR(delete_device, S_IWUSR, NULL, i2c_sysfs_delete_device); +static DEVICE_ATTR_IGNORE_LOCKDEP(delete_device, S_IWUSR, NULL, + i2c_sysfs_delete_device); static struct attribute *i2c_adapter_attrs[] = { &dev_attr_name.attr, diff --git a/trunk/drivers/i2c/muxes/Kconfig b/trunk/drivers/i2c/muxes/Kconfig index 5faf244d2476..f7f9865b8b89 100644 --- a/trunk/drivers/i2c/muxes/Kconfig +++ b/trunk/drivers/i2c/muxes/Kconfig @@ -7,7 +7,7 @@ menu "Multiplexer I2C Chip support" config I2C_ARB_GPIO_CHALLENGE tristate "GPIO-based I2C arbitration" - depends on GENERIC_GPIO && OF + depends on GPIOLIB && OF help If you say yes to this option, support will be included for an I2C multimaster arbitration scheme using GPIOs and a challenge & @@ -19,7 +19,7 @@ config I2C_ARB_GPIO_CHALLENGE config I2C_MUX_GPIO tristate "GPIO-based I2C multiplexer" - depends on GENERIC_GPIO + depends on GPIOLIB help If you say yes to this option, support will be included for a GPIO based I2C multiplexer. This driver provides access to diff --git a/trunk/drivers/idle/intel_idle.c b/trunk/drivers/idle/intel_idle.c index 0e8fab1913df..fa6964d8681a 100644 --- a/trunk/drivers/idle/intel_idle.c +++ b/trunk/drivers/idle/intel_idle.c @@ -272,6 +272,27 @@ static struct cpuidle_state hsw_cstates[CPUIDLE_STATE_MAX] = { .exit_latency = 166, .target_residency = 500, .enter = &intel_idle }, + { + .name = "C8-HSW", + .desc = "MWAIT 0x40", + .flags = MWAIT2flg(0x40) | CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED, + .exit_latency = 300, + .target_residency = 900, + .enter = &intel_idle }, + { + .name = "C9-HSW", + .desc = "MWAIT 0x50", + .flags = MWAIT2flg(0x50) | CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED, + .exit_latency = 600, + .target_residency = 1800, + .enter = &intel_idle }, + { + .name = "C10-HSW", + .desc = "MWAIT 0x60", + .flags = MWAIT2flg(0x60) | CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED, + .exit_latency = 2600, + .target_residency = 7700, + .enter = &intel_idle }, { .enter = NULL } }; diff --git a/trunk/drivers/iio/adc/exynos_adc.c b/trunk/drivers/iio/adc/exynos_adc.c index 9f3a8ef1fb3e..b3d03d335948 100644 --- a/trunk/drivers/iio/adc/exynos_adc.c +++ b/trunk/drivers/iio/adc/exynos_adc.c @@ -390,8 +390,8 @@ static int exynos_adc_remove(struct platform_device *pdev) #ifdef CONFIG_PM_SLEEP static int exynos_adc_suspend(struct device *dev) { - struct platform_device *pdev = to_platform_device(dev); - struct exynos_adc *info = platform_get_drvdata(pdev); + struct iio_dev *indio_dev = dev_get_drvdata(dev); + struct exynos_adc *info = iio_priv(indio_dev); u32 con; if (info->version == ADC_V2) { @@ -413,8 +413,8 @@ static int exynos_adc_suspend(struct device *dev) static int exynos_adc_resume(struct device *dev) { - struct platform_device *pdev = to_platform_device(dev); - struct exynos_adc *info = platform_get_drvdata(pdev); + struct iio_dev *indio_dev = dev_get_drvdata(dev); + struct exynos_adc *info = iio_priv(indio_dev); int ret; ret = regulator_enable(info->vdd); diff --git a/trunk/drivers/iio/common/st_sensors/st_sensors_core.c b/trunk/drivers/iio/common/st_sensors/st_sensors_core.c index bd33473f8e38..ed9bc8ae9330 100644 --- a/trunk/drivers/iio/common/st_sensors/st_sensors_core.c +++ b/trunk/drivers/iio/common/st_sensors/st_sensors_core.c @@ -312,6 +312,8 @@ int st_sensors_read_info_raw(struct iio_dev *indio_dev, goto read_error; *val = *val >> ch->scan_type.shift; + + err = st_sensors_set_enable(indio_dev, false); } mutex_unlock(&indio_dev->mlock); diff --git a/trunk/drivers/iio/dac/Kconfig b/trunk/drivers/iio/dac/Kconfig index f4a6f0838327..b61160bd935e 100644 --- a/trunk/drivers/iio/dac/Kconfig +++ b/trunk/drivers/iio/dac/Kconfig @@ -5,7 +5,7 @@ menu "Digital to analog converters" config AD5064 tristate "Analog Devices AD5064 and similar multi-channel DAC driver" - depends on (SPI_MASTER || I2C) + depends on (SPI_MASTER && I2C!=m) || I2C help Say yes here to build support for Analog Devices AD5024, AD5025, AD5044, AD5045, AD5064, AD5064-1, AD5065, AD5628, AD5629R, AD5648, AD5666, AD5668, @@ -27,7 +27,7 @@ config AD5360 config AD5380 tristate "Analog Devices AD5380/81/82/83/84/90/91/92 DAC driver" - depends on (SPI_MASTER || I2C) + depends on (SPI_MASTER && I2C!=m) || I2C select REGMAP_I2C if I2C select REGMAP_SPI if SPI_MASTER help @@ -57,7 +57,7 @@ config AD5624R_SPI config AD5446 tristate "Analog Devices AD5446 and similar single channel DACs driver" - depends on (SPI_MASTER || I2C) + depends on (SPI_MASTER && I2C!=m) || I2C help Say yes here to build support for Analog Devices AD5300, AD5301, AD5310, AD5311, AD5320, AD5321, AD5444, AD5446, AD5450, AD5451, AD5452, AD5453, diff --git a/trunk/drivers/infiniband/core/iwcm.c b/trunk/drivers/infiniband/core/iwcm.c index 0bb99bb38809..c47c2034ca71 100644 --- a/trunk/drivers/infiniband/core/iwcm.c +++ b/trunk/drivers/infiniband/core/iwcm.c @@ -878,6 +878,8 @@ static void cm_work_handler(struct work_struct *_work) } return; } + if (empty) + return; spin_lock_irqsave(&cm_id_priv->lock, flags); } spin_unlock_irqrestore(&cm_id_priv->lock, flags); diff --git a/trunk/drivers/infiniband/core/verbs.c b/trunk/drivers/infiniband/core/verbs.c index a8fdd3381405..22192deb8828 100644 --- a/trunk/drivers/infiniband/core/verbs.c +++ b/trunk/drivers/infiniband/core/verbs.c @@ -348,7 +348,8 @@ static void __ib_shared_qp_event_handler(struct ib_event *event, void *context) struct ib_qp *qp = context; list_for_each_entry(event->element.qp, &qp->open_list, open_list) - event->element.qp->event_handler(event, event->element.qp->qp_context); + if (event->element.qp->event_handler) + event->element.qp->event_handler(event, event->element.qp->qp_context); } static void __ib_insert_xrcd_qp(struct ib_xrcd *xrcd, struct ib_qp *qp) diff --git a/trunk/drivers/infiniband/hw/cxgb3/iwch_provider.c b/trunk/drivers/infiniband/hw/cxgb3/iwch_provider.c index 9c12da0cbd32..e87f2201b220 100644 --- a/trunk/drivers/infiniband/hw/cxgb3/iwch_provider.c +++ b/trunk/drivers/infiniband/hw/cxgb3/iwch_provider.c @@ -559,7 +559,7 @@ static int iwch_reregister_phys_mem(struct ib_mr *mr, __be64 *page_list = NULL; int shift = 0; u64 total_size; - int npages; + int npages = 0; int ret; PDBG("%s ib_mr %p ib_pd %p\n", __func__, mr, pd); diff --git a/trunk/drivers/infiniband/hw/cxgb4/qp.c b/trunk/drivers/infiniband/hw/cxgb4/qp.c index 5b059e2d80cc..232040447e8a 100644 --- a/trunk/drivers/infiniband/hw/cxgb4/qp.c +++ b/trunk/drivers/infiniband/hw/cxgb4/qp.c @@ -111,6 +111,16 @@ static int alloc_host_sq(struct c4iw_rdev *rdev, struct t4_sq *sq) return 0; } +static int alloc_sq(struct c4iw_rdev *rdev, struct t4_sq *sq, int user) +{ + int ret = -ENOSYS; + if (user) + ret = alloc_oc_sq(rdev, sq); + if (ret) + ret = alloc_host_sq(rdev, sq); + return ret; +} + static int destroy_qp(struct c4iw_rdev *rdev, struct t4_wq *wq, struct c4iw_dev_ucontext *uctx) { @@ -179,15 +189,9 @@ static int create_qp(struct c4iw_rdev *rdev, struct t4_wq *wq, goto free_sw_rq; } - if (user) { - if (alloc_oc_sq(rdev, &wq->sq) && alloc_host_sq(rdev, &wq->sq)) - goto free_hwaddr; - } else { - ret = alloc_host_sq(rdev, &wq->sq); - if (ret) - goto free_hwaddr; - } - + ret = alloc_sq(rdev, &wq->sq, user); + if (ret) + goto free_hwaddr; memset(wq->sq.queue, 0, wq->sq.memsize); dma_unmap_addr_set(&wq->sq, mapping, wq->sq.dma_addr); diff --git a/trunk/drivers/infiniband/hw/ipath/ipath_verbs.c b/trunk/drivers/infiniband/hw/ipath/ipath_verbs.c index ea93870266eb..44ea9390417c 100644 --- a/trunk/drivers/infiniband/hw/ipath/ipath_verbs.c +++ b/trunk/drivers/infiniband/hw/ipath/ipath_verbs.c @@ -2187,7 +2187,8 @@ int ipath_register_ib_device(struct ipath_devdata *dd) if (ret) goto err_reg; - if (ipath_verbs_register_sysfs(dev)) + ret = ipath_verbs_register_sysfs(dev); + if (ret) goto err_class; enable_timer(dd); @@ -2327,15 +2328,15 @@ static int ipath_verbs_register_sysfs(struct ib_device *dev) int i; int ret; - for (i = 0; i < ARRAY_SIZE(ipath_class_attributes); ++i) - if (device_create_file(&dev->dev, - ipath_class_attributes[i])) { - ret = 1; + for (i = 0; i < ARRAY_SIZE(ipath_class_attributes); ++i) { + ret = device_create_file(&dev->dev, + ipath_class_attributes[i]); + if (ret) goto bail; - } - - ret = 0; - + } + return 0; bail: + for (i = 0; i < ARRAY_SIZE(ipath_class_attributes); ++i) + device_remove_file(&dev->dev, ipath_class_attributes[i]); return ret; } diff --git a/trunk/drivers/infiniband/hw/mlx4/cq.c b/trunk/drivers/infiniband/hw/mlx4/cq.c index 73b3a7132587..d5e60f44ba5a 100644 --- a/trunk/drivers/infiniband/hw/mlx4/cq.c +++ b/trunk/drivers/infiniband/hw/mlx4/cq.c @@ -33,6 +33,7 @@ #include #include +#include #include #include "mlx4_ib.h" @@ -585,6 +586,7 @@ static int mlx4_ib_poll_one(struct mlx4_ib_cq *cq, struct mlx4_qp *mqp; struct mlx4_ib_wq *wq; struct mlx4_ib_srq *srq; + struct mlx4_srq *msrq = NULL; int is_send; int is_error; u32 g_mlpath_rqpn; @@ -653,6 +655,20 @@ static int mlx4_ib_poll_one(struct mlx4_ib_cq *cq, wc->qp = &(*cur_qp)->ibqp; + if (wc->qp->qp_type == IB_QPT_XRC_TGT) { + u32 srq_num; + g_mlpath_rqpn = be32_to_cpu(cqe->g_mlpath_rqpn); + srq_num = g_mlpath_rqpn & 0xffffff; + /* SRQ is also in the radix tree */ + msrq = mlx4_srq_lookup(to_mdev(cq->ibcq.device)->dev, + srq_num); + if (unlikely(!msrq)) { + pr_warn("CQ %06x with entry for unknown SRQN %06x\n", + cq->mcq.cqn, srq_num); + return -EINVAL; + } + } + if (is_send) { wq = &(*cur_qp)->sq; if (!(*cur_qp)->sq_signal_bits) { @@ -666,6 +682,11 @@ static int mlx4_ib_poll_one(struct mlx4_ib_cq *cq, wqe_ctr = be16_to_cpu(cqe->wqe_index); wc->wr_id = srq->wrid[wqe_ctr]; mlx4_ib_free_srq_wqe(srq, wqe_ctr); + } else if (msrq) { + srq = to_mibsrq(msrq); + wqe_ctr = be16_to_cpu(cqe->wqe_index); + wc->wr_id = srq->wrid[wqe_ctr]; + mlx4_ib_free_srq_wqe(srq, wqe_ctr); } else { wq = &(*cur_qp)->rq; tail = wq->tail & (wq->wqe_cnt - 1); diff --git a/trunk/drivers/infiniband/hw/mlx4/qp.c b/trunk/drivers/infiniband/hw/mlx4/qp.c index 35cced2a4da8..4f10af2905b5 100644 --- a/trunk/drivers/infiniband/hw/mlx4/qp.c +++ b/trunk/drivers/infiniband/hw/mlx4/qp.c @@ -1292,6 +1292,8 @@ static int __mlx4_ib_modify_qp(struct ib_qp *ibqp, if (cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT) { context->sq_size_stride |= !!qp->sq_no_prefetch << 7; context->xrcd = cpu_to_be32((u32) qp->xrcdn); + if (ibqp->qp_type == IB_QPT_RAW_PACKET) + context->param3 |= cpu_to_be32(1 << 30); } if (qp->ibqp.uobject) @@ -1458,6 +1460,10 @@ static int __mlx4_ib_modify_qp(struct ib_qp *ibqp, } } + if (qp->ibqp.qp_type == IB_QPT_RAW_PACKET) + context->pri_path.ackto = (context->pri_path.ackto & 0xf8) | + MLX4_IB_LINK_TYPE_ETH; + if (cur_state == IB_QPS_RTS && new_state == IB_QPS_SQD && attr_mask & IB_QP_EN_SQD_ASYNC_NOTIFY && attr->en_sqd_async_notify) sqd_event = 1; diff --git a/trunk/drivers/infiniband/hw/qib/qib_sysfs.c b/trunk/drivers/infiniband/hw/qib/qib_sysfs.c index 034cc821de5c..3c8e4e3caca6 100644 --- a/trunk/drivers/infiniband/hw/qib/qib_sysfs.c +++ b/trunk/drivers/infiniband/hw/qib/qib_sysfs.c @@ -808,10 +808,14 @@ int qib_verbs_register_sysfs(struct qib_devdata *dd) for (i = 0; i < ARRAY_SIZE(qib_attributes); ++i) { ret = device_create_file(&dev->dev, qib_attributes[i]); if (ret) - return ret; + goto bail; } return 0; +bail: + for (i = 0; i < ARRAY_SIZE(qib_attributes); ++i) + device_remove_file(&dev->dev, qib_attributes[i]); + return ret; } /* diff --git a/trunk/drivers/infiniband/hw/qib/qib_verbs.c b/trunk/drivers/infiniband/hw/qib/qib_verbs.c index 7c0ab16a2fe2..904c384aa361 100644 --- a/trunk/drivers/infiniband/hw/qib/qib_verbs.c +++ b/trunk/drivers/infiniband/hw/qib/qib_verbs.c @@ -2234,7 +2234,8 @@ int qib_register_ib_device(struct qib_devdata *dd) if (ret) goto err_agents; - if (qib_verbs_register_sysfs(dd)) + ret = qib_verbs_register_sysfs(dd); + if (ret) goto err_class; goto bail; diff --git a/trunk/drivers/infiniband/ulp/ipoib/ipoib_main.c b/trunk/drivers/infiniband/ulp/ipoib/ipoib_main.c index 554b9063da54..b6e049a3c7a8 100644 --- a/trunk/drivers/infiniband/ulp/ipoib/ipoib_main.c +++ b/trunk/drivers/infiniband/ulp/ipoib/ipoib_main.c @@ -830,7 +830,7 @@ static int ipoib_hard_header(struct sk_buff *skb, */ memcpy(cb->hwaddr, daddr, INFINIBAND_ALEN); - return 0; + return sizeof *header; } static void ipoib_set_mcast_list(struct net_device *dev) diff --git a/trunk/drivers/infiniband/ulp/iser/iscsi_iser.c b/trunk/drivers/infiniband/ulp/iser/iscsi_iser.c index 0ab8c9cc3a78..f19b0998a53c 100644 --- a/trunk/drivers/infiniband/ulp/iser/iscsi_iser.c +++ b/trunk/drivers/infiniband/ulp/iser/iscsi_iser.c @@ -82,10 +82,10 @@ module_param_named(max_lun, iscsi_max_lun, uint, S_IRUGO); int iser_debug_level = 0; -MODULE_DESCRIPTION("iSER (iSCSI Extensions for RDMA) Datamover " - "v" DRV_VER " (" DRV_DATE ")"); +MODULE_DESCRIPTION("iSER (iSCSI Extensions for RDMA) Datamover"); MODULE_LICENSE("Dual BSD/GPL"); MODULE_AUTHOR("Alex Nezhinsky, Dan Bar Dov, Or Gerlitz"); +MODULE_VERSION(DRV_VER); module_param_named(debug_level, iser_debug_level, int, 0644); MODULE_PARM_DESC(debug_level, "Enable debug tracing if > 0 (default:disabled)"); @@ -370,8 +370,8 @@ iscsi_iser_conn_bind(struct iscsi_cls_session *cls_session, /* binds the iSER connection retrieved from the previously * connected ep_handle to the iSCSI layer connection. exchanges * connection pointers */ - iser_err("binding iscsi/iser conn %p %p to ib_conn %p\n", - conn, conn->dd_data, ib_conn); + iser_info("binding iscsi/iser conn %p %p to ib_conn %p\n", + conn, conn->dd_data, ib_conn); iser_conn = conn->dd_data; ib_conn->iser_conn = iser_conn; iser_conn->ib_conn = ib_conn; @@ -475,28 +475,28 @@ iscsi_iser_set_param(struct iscsi_cls_conn *cls_conn, case ISCSI_PARAM_HDRDGST_EN: sscanf(buf, "%d", &value); if (value) { - printk(KERN_ERR "DataDigest wasn't negotiated to None"); + iser_err("DataDigest wasn't negotiated to None"); return -EPROTO; } break; case ISCSI_PARAM_DATADGST_EN: sscanf(buf, "%d", &value); if (value) { - printk(KERN_ERR "DataDigest wasn't negotiated to None"); + iser_err("DataDigest wasn't negotiated to None"); return -EPROTO; } break; case ISCSI_PARAM_IFMARKER_EN: sscanf(buf, "%d", &value); if (value) { - printk(KERN_ERR "IFMarker wasn't negotiated to No"); + iser_err("IFMarker wasn't negotiated to No"); return -EPROTO; } break; case ISCSI_PARAM_OFMARKER_EN: sscanf(buf, "%d", &value); if (value) { - printk(KERN_ERR "OFMarker wasn't negotiated to No"); + iser_err("OFMarker wasn't negotiated to No"); return -EPROTO; } break; @@ -596,7 +596,7 @@ iscsi_iser_ep_poll(struct iscsi_endpoint *ep, int timeout_ms) ib_conn->state == ISER_CONN_DOWN)) rc = -1; - iser_err("ib conn %p rc = %d\n", ib_conn, rc); + iser_info("ib conn %p rc = %d\n", ib_conn, rc); if (rc > 0) return 1; /* success, this is the equivalent of POLLOUT */ @@ -623,7 +623,7 @@ iscsi_iser_ep_disconnect(struct iscsi_endpoint *ep) iscsi_suspend_tx(ib_conn->iser_conn->iscsi_conn); - iser_err("ib conn %p state %d\n",ib_conn, ib_conn->state); + iser_info("ib conn %p state %d\n", ib_conn, ib_conn->state); iser_conn_terminate(ib_conn); } @@ -682,7 +682,7 @@ static umode_t iser_attr_is_visible(int param_type, int param) static struct scsi_host_template iscsi_iser_sht = { .module = THIS_MODULE, - .name = "iSCSI Initiator over iSER, v." DRV_VER, + .name = "iSCSI Initiator over iSER", .queuecommand = iscsi_queuecommand, .change_queue_depth = iscsi_change_queue_depth, .sg_tablesize = ISCSI_ISER_SG_TABLESIZE, @@ -740,7 +740,7 @@ static int __init iser_init(void) iser_dbg("Starting iSER datamover...\n"); if (iscsi_max_lun < 1) { - printk(KERN_ERR "Invalid max_lun value of %u\n", iscsi_max_lun); + iser_err("Invalid max_lun value of %u\n", iscsi_max_lun); return -EINVAL; } diff --git a/trunk/drivers/infiniband/ulp/iser/iscsi_iser.h b/trunk/drivers/infiniband/ulp/iser/iscsi_iser.h index 5babdb35bda7..06f578cde75b 100644 --- a/trunk/drivers/infiniband/ulp/iser/iscsi_iser.h +++ b/trunk/drivers/infiniband/ulp/iser/iscsi_iser.h @@ -42,6 +42,7 @@ #include #include +#include #include #include @@ -65,20 +66,26 @@ #define DRV_NAME "iser" #define PFX DRV_NAME ": " -#define DRV_VER "0.1" -#define DRV_DATE "May 7th, 2006" +#define DRV_VER "1.1" #define iser_dbg(fmt, arg...) \ do { \ - if (iser_debug_level > 1) \ + if (iser_debug_level > 2) \ printk(KERN_DEBUG PFX "%s:" fmt,\ __func__ , ## arg); \ } while (0) #define iser_warn(fmt, arg...) \ + do { \ + if (iser_debug_level > 1) \ + pr_warn(PFX "%s:" fmt, \ + __func__ , ## arg); \ + } while (0) + +#define iser_info(fmt, arg...) \ do { \ if (iser_debug_level > 0) \ - printk(KERN_DEBUG PFX "%s:" fmt,\ + pr_info(PFX "%s:" fmt, \ __func__ , ## arg); \ } while (0) @@ -133,6 +140,15 @@ struct iser_hdr { __be64 read_va; } __attribute__((packed)); + +#define ISER_ZBVA_NOT_SUPPORTED 0x80 +#define ISER_SEND_W_INV_NOT_SUPPORTED 0x40 + +struct iser_cm_hdr { + u8 flags; + u8 rsvd[3]; +} __packed; + /* Constant PDU lengths calculations */ #define ISER_HEADERS_LEN (sizeof(struct iser_hdr) + sizeof(struct iscsi_hdr)) diff --git a/trunk/drivers/infiniband/ulp/iser/iser_memory.c b/trunk/drivers/infiniband/ulp/iser/iser_memory.c index be1edb04b085..68ebb7fe072a 100644 --- a/trunk/drivers/infiniband/ulp/iser/iser_memory.c +++ b/trunk/drivers/infiniband/ulp/iser/iser_memory.c @@ -416,8 +416,9 @@ int iser_reg_rdma_mem(struct iscsi_iser_task *iser_task, for (i=0 ; ipage_vec->length ; i++) iser_err("page_vec[%d] = 0x%llx\n", i, (unsigned long long) ib_conn->page_vec->pages[i]); - return err; } + if (err) + return err; } return 0; } diff --git a/trunk/drivers/infiniband/ulp/iser/iser_verbs.c b/trunk/drivers/infiniband/ulp/iser/iser_verbs.c index 4debadc53106..5278916c3103 100644 --- a/trunk/drivers/infiniband/ulp/iser/iser_verbs.c +++ b/trunk/drivers/infiniband/ulp/iser/iser_verbs.c @@ -74,8 +74,9 @@ static int iser_create_device_ib_res(struct iser_device *device) struct iser_cq_desc *cq_desc; device->cqs_used = min(ISER_MAX_CQ, device->ib_device->num_comp_vectors); - iser_err("using %d CQs, device %s supports %d vectors\n", device->cqs_used, - device->ib_device->name, device->ib_device->num_comp_vectors); + iser_info("using %d CQs, device %s supports %d vectors\n", + device->cqs_used, device->ib_device->name, + device->ib_device->num_comp_vectors); device->cq_desc = kmalloc(sizeof(struct iser_cq_desc) * device->cqs_used, GFP_KERNEL); @@ -262,7 +263,7 @@ static int iser_create_ib_conn_res(struct iser_conn *ib_conn) min_index = index; device->cq_active_qps[min_index]++; mutex_unlock(&ig.connlist_mutex); - iser_err("cq index %d used for ib_conn %p\n", min_index, ib_conn); + iser_info("cq index %d used for ib_conn %p\n", min_index, ib_conn); init_attr.event_handler = iser_qp_event_callback; init_attr.qp_context = (void *)ib_conn; @@ -280,9 +281,9 @@ static int iser_create_ib_conn_res(struct iser_conn *ib_conn) goto out_err; ib_conn->qp = ib_conn->cma_id->qp; - iser_err("setting conn %p cma_id %p: fmr_pool %p qp %p\n", - ib_conn, ib_conn->cma_id, - ib_conn->fmr_pool, ib_conn->cma_id->qp); + iser_info("setting conn %p cma_id %p: fmr_pool %p qp %p\n", + ib_conn, ib_conn->cma_id, + ib_conn->fmr_pool, ib_conn->cma_id->qp); return ret; out_err: @@ -299,9 +300,9 @@ static int iser_free_ib_conn_res(struct iser_conn *ib_conn, int can_destroy_id) int cq_index; BUG_ON(ib_conn == NULL); - iser_err("freeing conn %p cma_id %p fmr pool %p qp %p\n", - ib_conn, ib_conn->cma_id, - ib_conn->fmr_pool, ib_conn->qp); + iser_info("freeing conn %p cma_id %p fmr pool %p qp %p\n", + ib_conn, ib_conn->cma_id, + ib_conn->fmr_pool, ib_conn->qp); /* qp is created only once both addr & route are resolved */ if (ib_conn->fmr_pool != NULL) @@ -379,7 +380,7 @@ static void iser_device_try_release(struct iser_device *device) { mutex_lock(&ig.device_list_mutex); device->refcount--; - iser_err("device %p refcount %d\n",device,device->refcount); + iser_info("device %p refcount %d\n", device, device->refcount); if (!device->refcount) { iser_free_device_ib_res(device); list_del(&device->ig_list); @@ -498,6 +499,7 @@ static int iser_route_handler(struct rdma_cm_id *cma_id) { struct rdma_conn_param conn_param; int ret; + struct iser_cm_hdr req_hdr; ret = iser_create_ib_conn_res((struct iser_conn *)cma_id->context); if (ret) @@ -509,6 +511,12 @@ static int iser_route_handler(struct rdma_cm_id *cma_id) conn_param.retry_count = 7; conn_param.rnr_retry_count = 6; + memset(&req_hdr, 0, sizeof(req_hdr)); + req_hdr.flags = (ISER_ZBVA_NOT_SUPPORTED | + ISER_SEND_W_INV_NOT_SUPPORTED); + conn_param.private_data = (void *)&req_hdr; + conn_param.private_data_len = sizeof(struct iser_cm_hdr); + ret = rdma_connect(cma_id, &conn_param); if (ret) { iser_err("failure connecting: %d\n", ret); @@ -558,8 +566,8 @@ static int iser_cma_handler(struct rdma_cm_id *cma_id, struct rdma_cm_event *eve { int ret = 0; - iser_err("event %d status %d conn %p id %p\n", - event->event, event->status, cma_id->context, cma_id); + iser_info("event %d status %d conn %p id %p\n", + event->event, event->status, cma_id->context, cma_id); switch (event->event) { case RDMA_CM_EVENT_ADDR_RESOLVED: @@ -619,8 +627,8 @@ int iser_connect(struct iser_conn *ib_conn, /* the device is known only --after-- address resolution */ ib_conn->device = NULL; - iser_err("connecting to: %pI4, port 0x%x\n", - &dst_addr->sin_addr, dst_addr->sin_port); + iser_info("connecting to: %pI4, port 0x%x\n", + &dst_addr->sin_addr, dst_addr->sin_port); ib_conn->state = ISER_CONN_PENDING; diff --git a/trunk/drivers/infiniband/ulp/srpt/ib_srpt.c b/trunk/drivers/infiniband/ulp/srpt/ib_srpt.c index c09d41b1a2ff..b08ca7a9f76b 100644 --- a/trunk/drivers/infiniband/ulp/srpt/ib_srpt.c +++ b/trunk/drivers/infiniband/ulp/srpt/ib_srpt.c @@ -1374,7 +1374,7 @@ static int srpt_abort_cmd(struct srpt_send_ioctx *ioctx) target_put_sess_cmd(ioctx->ch->sess, &ioctx->cmd); break; default: - WARN_ON("ERROR: unexpected command state"); + WARN(1, "Unexpected command state (%d)", state); break; } diff --git a/trunk/drivers/input/keyboard/Kconfig b/trunk/drivers/input/keyboard/Kconfig index 6a195d5e90ff..62a2c0e4cc99 100644 --- a/trunk/drivers/input/keyboard/Kconfig +++ b/trunk/drivers/input/keyboard/Kconfig @@ -175,7 +175,7 @@ config KEYBOARD_EP93XX config KEYBOARD_GPIO tristate "GPIO Buttons" - depends on GENERIC_GPIO + depends on GPIOLIB help This driver implements support for buttons connected to GPIO pins of various CPUs (and some other chips). @@ -190,7 +190,7 @@ config KEYBOARD_GPIO config KEYBOARD_GPIO_POLLED tristate "Polled GPIO buttons" - depends on GENERIC_GPIO + depends on GPIOLIB select INPUT_POLLDEV help This driver implements support for buttons connected @@ -241,7 +241,7 @@ config KEYBOARD_TCA8418 config KEYBOARD_MATRIX tristate "GPIO driven matrix keypad support" - depends on GENERIC_GPIO + depends on GPIOLIB select INPUT_MATRIXKMAP help Enable support for GPIO driven matrix keypad. diff --git a/trunk/drivers/input/misc/Kconfig b/trunk/drivers/input/misc/Kconfig index af80928a46b4..bb698e1f9e42 100644 --- a/trunk/drivers/input/misc/Kconfig +++ b/trunk/drivers/input/misc/Kconfig @@ -214,7 +214,7 @@ config INPUT_APANEL config INPUT_GP2A tristate "Sharp GP2AP002A00F I2C Proximity/Opto sensor driver" depends on I2C - depends on GENERIC_GPIO + depends on GPIOLIB help Say Y here if you have a Sharp GP2AP002A00F proximity/als combo-chip hooked to an I2C bus. @@ -224,7 +224,7 @@ config INPUT_GP2A config INPUT_GPIO_TILT_POLLED tristate "Polled GPIO tilt switch" - depends on GENERIC_GPIO + depends on GPIOLIB select INPUT_POLLDEV help This driver implements support for tilt switches connected @@ -472,7 +472,7 @@ config INPUT_PWM_BEEPER config INPUT_GPIO_ROTARY_ENCODER tristate "Rotary encoders connected to GPIO pins" - depends on GPIOLIB && GENERIC_GPIO + depends on GPIOLIB help Say Y here to add support for rotary encoders connected to GPIO lines. Check file:Documentation/input/rotary-encoder.txt for more @@ -484,7 +484,7 @@ config INPUT_GPIO_ROTARY_ENCODER config INPUT_RB532_BUTTON tristate "Mikrotik Routerboard 532 button interface" depends on MIKROTIK_RB532 - depends on GPIOLIB && GENERIC_GPIO + depends on GPIOLIB select INPUT_POLLDEV help Say Y here if you want support for the S1 button built into diff --git a/trunk/drivers/input/mouse/Kconfig b/trunk/drivers/input/mouse/Kconfig index 802bd6a72d73..effa9c5f2c5c 100644 --- a/trunk/drivers/input/mouse/Kconfig +++ b/trunk/drivers/input/mouse/Kconfig @@ -295,7 +295,7 @@ config MOUSE_VSXXXAA config MOUSE_GPIO tristate "GPIO mouse" - depends on GENERIC_GPIO + depends on GPIOLIB select INPUT_POLLDEV help This driver simulates a mouse on GPIO lines of various CPUs (and some diff --git a/trunk/drivers/input/tablet/wacom_wac.c b/trunk/drivers/input/tablet/wacom_wac.c index 0bfd8cf25200..5c68e4486845 100644 --- a/trunk/drivers/input/tablet/wacom_wac.c +++ b/trunk/drivers/input/tablet/wacom_wac.c @@ -342,10 +342,10 @@ static int wacom_intuos_inout(struct wacom_wac *wacom) wacom->id[idx] = (data[2] << 4) | (data[3] >> 4) | ((data[7] & 0x0f) << 20) | ((data[8] & 0xf0) << 12); - switch (wacom->id[idx] & 0xfffff) { + switch (wacom->id[idx]) { case 0x812: /* Inking pen */ case 0x801: /* Intuos3 Inking pen */ - case 0x20802: /* Intuos4 Inking Pen */ + case 0x120802: /* Intuos4/5 Inking Pen */ case 0x012: wacom->tool[idx] = BTN_TOOL_PENCIL; break; @@ -356,11 +356,13 @@ static int wacom_intuos_inout(struct wacom_wac *wacom) case 0x823: /* Intuos3 Grip Pen */ case 0x813: /* Intuos3 Classic Pen */ case 0x885: /* Intuos3 Marker Pen */ - case 0x802: /* Intuos4 General Pen */ - case 0x804: /* Intuos4 Marker Pen */ - case 0x40802: /* Intuos4 Classic Pen */ - case 0x18802: /* DTH2242 Grip Pen */ + case 0x802: /* Intuos4/5 13HD/24HD General Pen */ + case 0x804: /* Intuos4/5 13HD/24HD Marker Pen */ case 0x022: + case 0x100804: /* Intuos4/5 13HD/24HD Art Pen */ + case 0x140802: /* Intuos4/5 13HD/24HD Classic Pen */ + case 0x160802: /* Cintiq 13HD Pro Pen */ + case 0x180802: /* DTH2242 Pen */ wacom->tool[idx] = BTN_TOOL_PEN; break; @@ -391,10 +393,14 @@ static int wacom_intuos_inout(struct wacom_wac *wacom) case 0x82b: /* Intuos3 Grip Pen Eraser */ case 0x81b: /* Intuos3 Classic Pen Eraser */ case 0x91b: /* Intuos3 Airbrush Eraser */ - case 0x80c: /* Intuos4 Marker Pen Eraser */ - case 0x80a: /* Intuos4 General Pen Eraser */ - case 0x4080a: /* Intuos4 Classic Pen Eraser */ - case 0x90a: /* Intuos4 Airbrush Eraser */ + case 0x80c: /* Intuos4/5 13HD/24HD Marker Pen Eraser */ + case 0x80a: /* Intuos4/5 13HD/24HD General Pen Eraser */ + case 0x90a: /* Intuos4/5 13HD/24HD Airbrush Eraser */ + case 0x14080a: /* Intuos4/5 13HD/24HD Classic Pen Eraser */ + case 0x10090a: /* Intuos4/5 13HD/24HD Airbrush Eraser */ + case 0x10080c: /* Intuos4/5 13HD/24HD Art Pen Eraser */ + case 0x16080a: /* Cintiq 13HD Pro Pen Eraser */ + case 0x18080a: /* DTH2242 Eraser */ wacom->tool[idx] = BTN_TOOL_RUBBER; break; @@ -402,7 +408,8 @@ static int wacom_intuos_inout(struct wacom_wac *wacom) case 0x912: case 0x112: case 0x913: /* Intuos3 Airbrush */ - case 0x902: /* Intuos4 Airbrush */ + case 0x902: /* Intuos4/5 13HD/24HD Airbrush */ + case 0x100902: /* Intuos4/5 13HD/24HD Airbrush */ wacom->tool[idx] = BTN_TOOL_AIRBRUSH; break; @@ -533,10 +540,8 @@ static int wacom_intuos_irq(struct wacom_wac *wacom) input_report_key(input, BTN_8, (data[3] & 0x80)); } if (data[1] | (data[2] & 0x01) | data[3]) { - input_report_key(input, wacom->tool[1], 1); input_report_abs(input, ABS_MISC, PAD_DEVICE_ID); } else { - input_report_key(input, wacom->tool[1], 0); input_report_abs(input, ABS_MISC, 0); } } else if (features->type == DTK) { @@ -546,6 +551,26 @@ static int wacom_intuos_irq(struct wacom_wac *wacom) input_report_key(input, BTN_3, (data[6] & 0x08)); input_report_key(input, BTN_4, (data[6] & 0x10)); input_report_key(input, BTN_5, (data[6] & 0x20)); + if (data[6] & 0x3f) { + input_report_abs(input, ABS_MISC, PAD_DEVICE_ID); + } else { + input_report_abs(input, ABS_MISC, 0); + } + } else if (features->type == WACOM_13HD) { + input_report_key(input, BTN_0, (data[3] & 0x01)); + input_report_key(input, BTN_1, (data[4] & 0x01)); + input_report_key(input, BTN_2, (data[4] & 0x02)); + input_report_key(input, BTN_3, (data[4] & 0x04)); + input_report_key(input, BTN_4, (data[4] & 0x08)); + input_report_key(input, BTN_5, (data[4] & 0x10)); + input_report_key(input, BTN_6, (data[4] & 0x20)); + input_report_key(input, BTN_7, (data[4] & 0x40)); + input_report_key(input, BTN_8, (data[4] & 0x80)); + if ((data[3] & 0x01) | data[4]) { + input_report_abs(input, ABS_MISC, PAD_DEVICE_ID); + } else { + input_report_abs(input, ABS_MISC, 0); + } } else if (features->type == WACOM_24HD) { input_report_key(input, BTN_0, (data[6] & 0x01)); input_report_key(input, BTN_1, (data[6] & 0x02)); @@ -590,10 +615,8 @@ static int wacom_intuos_irq(struct wacom_wac *wacom) } if (data[1] | data[2] | (data[3] & 0x1f) | data[4] | data[6] | data[8]) { - input_report_key(input, wacom->tool[1], 1); input_report_abs(input, ABS_MISC, PAD_DEVICE_ID); } else { - input_report_key(input, wacom->tool[1], 0); input_report_abs(input, ABS_MISC, 0); } } else if (features->type >= INTUOS5S && features->type <= INTUOS5L) { @@ -618,10 +641,8 @@ static int wacom_intuos_irq(struct wacom_wac *wacom) } if (data[2] | (data[3] & 0x01) | data[4] | data[5]) { - input_report_key(input, wacom->tool[1], 1); input_report_abs(input, ABS_MISC, PAD_DEVICE_ID); } else { - input_report_key(input, wacom->tool[1], 0); input_report_abs(input, ABS_MISC, 0); } } else { @@ -668,10 +689,8 @@ static int wacom_intuos_irq(struct wacom_wac *wacom) if ((data[5] & 0x1f) | data[6] | (data[1] & 0x1f) | data[2] | (data[3] & 0x1f) | data[4] | data[8] | (data[7] & 0x01)) { - input_report_key(input, wacom->tool[1], 1); input_report_abs(input, ABS_MISC, PAD_DEVICE_ID); } else { - input_report_key(input, wacom->tool[1], 0); input_report_abs(input, ABS_MISC, 0); } } @@ -1301,6 +1320,7 @@ void wacom_wac_irq(struct wacom_wac *wacom_wac, size_t len) case INTUOS4L: case CINTIQ: case WACOM_BEE: + case WACOM_13HD: case WACOM_21UX2: case WACOM_22HD: case WACOM_24HD: @@ -1530,15 +1550,15 @@ int wacom_setup_input_capabilities(struct input_dev *input_dev, __set_bit(KEY_PROG1, input_dev->keybit); __set_bit(KEY_PROG2, input_dev->keybit); __set_bit(KEY_PROG3, input_dev->keybit); + + input_set_abs_params(input_dev, ABS_Z, -900, 899, 0, 0); + input_set_abs_params(input_dev, ABS_THROTTLE, 0, 71, 0, 0); /* fall through */ case DTK: for (i = 0; i < 6; i++) __set_bit(BTN_0 + i, input_dev->keybit); - input_set_abs_params(input_dev, ABS_Z, -900, 899, 0, 0); - input_set_abs_params(input_dev, ABS_THROTTLE, 0, 71, 0, 0); - __set_bit(INPUT_PROP_DIRECT, input_dev->propbit); wacom_setup_cintiq(wacom_wac); @@ -1579,6 +1599,15 @@ int wacom_setup_input_capabilities(struct input_dev *input_dev, wacom_setup_cintiq(wacom_wac); break; + case WACOM_13HD: + for (i = 0; i < 9; i++) + __set_bit(BTN_0 + i, input_dev->keybit); + + input_set_abs_params(input_dev, ABS_Z, -900, 899, 0, 0); + __set_bit(INPUT_PROP_DIRECT, input_dev->propbit); + wacom_setup_cintiq(wacom_wac); + break; + case INTUOS3: case INTUOS3L: __set_bit(BTN_4, input_dev->keybit); @@ -1950,6 +1979,9 @@ static const struct wacom_features wacom_features_0xC5 = static const struct wacom_features wacom_features_0xC6 = { "Wacom Cintiq 12WX", WACOM_PKGLEN_INTUOS, 53020, 33440, 1023, 63, WACOM_BEE, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES }; +static const struct wacom_features wacom_features_0x304 = + { "Wacom Cintiq 13HD", WACOM_PKGLEN_INTUOS, 59552, 33848, 1023, + 63, WACOM_13HD, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES }; static const struct wacom_features wacom_features_0xC7 = { "Wacom DTU1931", WACOM_PKGLEN_GRAPHIRE, 37832, 30305, 511, 0, PL, WACOM_INTUOS_RES, WACOM_INTUOS_RES }; @@ -1959,6 +1991,9 @@ static const struct wacom_features wacom_features_0xCE = static const struct wacom_features wacom_features_0xF0 = { "Wacom DTU1631", WACOM_PKGLEN_GRAPHIRE, 34623, 19553, 511, 0, DTU, WACOM_INTUOS_RES, WACOM_INTUOS_RES }; +static const struct wacom_features wacom_features_0x57 = + { "Wacom DTK2241", WACOM_PKGLEN_INTUOS, 95840, 54260, 2047, + 63, DTK, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES}; static const struct wacom_features wacom_features_0x59 = /* Pen */ { "Wacom DTH2242", WACOM_PKGLEN_INTUOS, 95840, 54260, 2047, 63, DTK, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES, @@ -1972,6 +2007,12 @@ static const struct wacom_features wacom_features_0xCC = static const struct wacom_features wacom_features_0xFA = { "Wacom Cintiq 22HD", WACOM_PKGLEN_INTUOS, 95840, 54260, 2047, 63, WACOM_22HD, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES }; +static const struct wacom_features wacom_features_0x5B = + { "Wacom Cintiq 22HDT", WACOM_PKGLEN_INTUOS, 95840, 54260, 2047, + 63, WACOM_24HD, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES, .oVid = USB_VENDOR_ID_WACOM, .oPid = 0x5e }; +static const struct wacom_features wacom_features_0x5E = + { "Wacom Cintiq 22HDT", .type = WACOM_24HDT, + .oVid = USB_VENDOR_ID_WACOM, .oPid = 0x5b, .touch_max = 10 }; static const struct wacom_features wacom_features_0x90 = { "Wacom ISDv4 90", WACOM_PKGLEN_GRAPHIRE, 26202, 16325, 255, 0, TABLETPC, WACOM_INTUOS_RES, WACOM_INTUOS_RES }; @@ -2143,8 +2184,11 @@ const struct usb_device_id wacom_ids[] = { { USB_DEVICE_WACOM(0x43) }, { USB_DEVICE_WACOM(0x44) }, { USB_DEVICE_WACOM(0x45) }, + { USB_DEVICE_WACOM(0x57) }, { USB_DEVICE_WACOM(0x59) }, { USB_DEVICE_DETAILED(0x5D, USB_CLASS_HID, 0, 0) }, + { USB_DEVICE_WACOM(0x5B) }, + { USB_DEVICE_DETAILED(0x5E, USB_CLASS_HID, 0, 0) }, { USB_DEVICE_WACOM(0xB0) }, { USB_DEVICE_WACOM(0xB1) }, { USB_DEVICE_WACOM(0xB2) }, @@ -2205,6 +2249,7 @@ const struct usb_device_id wacom_ids[] = { { USB_DEVICE_WACOM(0x100) }, { USB_DEVICE_WACOM(0x101) }, { USB_DEVICE_WACOM(0x10D) }, + { USB_DEVICE_WACOM(0x304) }, { USB_DEVICE_WACOM(0x4001) }, { USB_DEVICE_WACOM(0x47) }, { USB_DEVICE_WACOM(0xF4) }, diff --git a/trunk/drivers/input/tablet/wacom_wac.h b/trunk/drivers/input/tablet/wacom_wac.h index 5f9a7721e16c..dfc9e08e7f70 100644 --- a/trunk/drivers/input/tablet/wacom_wac.h +++ b/trunk/drivers/input/tablet/wacom_wac.h @@ -82,6 +82,7 @@ enum { WACOM_24HD, CINTIQ, WACOM_BEE, + WACOM_13HD, WACOM_MO, WIRELESS, BAMBOO_PT, diff --git a/trunk/drivers/input/touchscreen/egalax_ts.c b/trunk/drivers/input/touchscreen/egalax_ts.c index 17c9097f3b5d..39f3df8670c3 100644 --- a/trunk/drivers/input/touchscreen/egalax_ts.c +++ b/trunk/drivers/input/touchscreen/egalax_ts.c @@ -216,7 +216,7 @@ static int egalax_ts_probe(struct i2c_client *client, input_set_abs_params(input_dev, ABS_MT_POSITION_X, 0, EGALAX_MAX_X, 0, 0); input_set_abs_params(input_dev, - ABS_MT_POSITION_X, 0, EGALAX_MAX_Y, 0, 0); + ABS_MT_POSITION_Y, 0, EGALAX_MAX_Y, 0, 0); input_mt_init_slots(input_dev, MAX_SUPPORT_POINTS, 0); input_set_drvdata(input_dev, ts); diff --git a/trunk/drivers/isdn/capi/kcapi.c b/trunk/drivers/isdn/capi/kcapi.c index 9b1b274c7d25..c123709acf82 100644 --- a/trunk/drivers/isdn/capi/kcapi.c +++ b/trunk/drivers/isdn/capi/kcapi.c @@ -93,7 +93,7 @@ capi_ctr_put(struct capi_ctr *ctr) static inline struct capi_ctr *get_capi_ctr_by_nr(u16 contr) { - if (contr - 1 >= CAPI_MAXCONTR) + if (contr < 1 || contr - 1 >= CAPI_MAXCONTR) return NULL; return capi_controller[contr - 1]; @@ -103,7 +103,7 @@ static inline struct capi20_appl *__get_capi_appl_by_nr(u16 applid) { lockdep_assert_held(&capi_controller_lock); - if (applid - 1 >= CAPI_MAXAPPL) + if (applid < 1 || applid - 1 >= CAPI_MAXAPPL) return NULL; return capi_applications[applid - 1]; @@ -111,7 +111,7 @@ static inline struct capi20_appl *__get_capi_appl_by_nr(u16 applid) static inline struct capi20_appl *get_capi_appl_by_nr(u16 applid) { - if (applid - 1 >= CAPI_MAXAPPL) + if (applid < 1 || applid - 1 >= CAPI_MAXAPPL) return NULL; return rcu_dereference(capi_applications[applid - 1]); diff --git a/trunk/drivers/leds/Kconfig b/trunk/drivers/leds/Kconfig index d44806d41b44..ef992293598a 100644 --- a/trunk/drivers/leds/Kconfig +++ b/trunk/drivers/leds/Kconfig @@ -173,7 +173,7 @@ config LEDS_PCA9532_GPIO config LEDS_GPIO tristate "LED Support for GPIO connected LEDs" depends on LEDS_CLASS - depends on GENERIC_GPIO + depends on GPIOLIB help This option enables support for the LEDs connected to GPIO outputs. To be useful the particular board must have LEDs @@ -362,7 +362,7 @@ config LEDS_INTEL_SS4200 config LEDS_LT3593 tristate "LED driver for LT3593 controllers" depends on LEDS_CLASS - depends on GENERIC_GPIO + depends on GPIOLIB help This option enables support for LEDs driven by a Linear Technology LT3593 controller. This controller uses a special one-wire pulse @@ -431,7 +431,7 @@ config LEDS_ASIC3 config LEDS_RENESAS_TPU bool "LED support for Renesas TPU" - depends on LEDS_CLASS=y && HAVE_CLK && GENERIC_GPIO + depends on LEDS_CLASS=y && HAVE_CLK && GPIOLIB help This option enables build of the LED TPU platform driver, suitable to drive any TPU channel on newer Renesas SoCs. diff --git a/trunk/drivers/leds/leds-gpio.c b/trunk/drivers/leds/leds-gpio.c index a0d931bcb37c..b02b679abf31 100644 --- a/trunk/drivers/leds/leds-gpio.c +++ b/trunk/drivers/leds/leds-gpio.c @@ -107,6 +107,10 @@ static int create_gpio_led(const struct gpio_led *template, return 0; } + ret = devm_gpio_request(parent, template->gpio, template->name); + if (ret < 0) + return ret; + led_dat->cdev.name = template->name; led_dat->cdev.default_trigger = template->default_trigger; led_dat->gpio = template->gpio; @@ -126,10 +130,7 @@ static int create_gpio_led(const struct gpio_led *template, if (!template->retain_state_suspended) led_dat->cdev.flags |= LED_CORE_SUSPENDRESUME; - ret = devm_gpio_request_one(parent, template->gpio, - (led_dat->active_low ^ state) ? - GPIOF_OUT_INIT_HIGH : GPIOF_OUT_INIT_LOW, - template->name); + ret = gpio_direction_output(led_dat->gpio, led_dat->active_low ^ state); if (ret < 0) return ret; diff --git a/trunk/drivers/lguest/page_tables.c b/trunk/drivers/lguest/page_tables.c index 699187ab3800..5b9ac32801c7 100644 --- a/trunk/drivers/lguest/page_tables.c +++ b/trunk/drivers/lguest/page_tables.c @@ -1002,6 +1002,7 @@ void guest_set_pgd(struct lguest *lg, unsigned long gpgdir, u32 idx) kill_guest(&lg->cpus[0], "Cannot populate switcher mapping"); } + lg->pgdirs[pgdir].last_host_cpu = -1; } } diff --git a/trunk/drivers/md/dm-bufio.c b/trunk/drivers/md/dm-bufio.c index c6083132c4b8..0387e05cdb98 100644 --- a/trunk/drivers/md/dm-bufio.c +++ b/trunk/drivers/md/dm-bufio.c @@ -319,6 +319,9 @@ static void __cache_size_refresh(void) static void *alloc_buffer_data(struct dm_bufio_client *c, gfp_t gfp_mask, enum data_mode *data_mode) { + unsigned noio_flag; + void *ptr; + if (c->block_size <= DM_BUFIO_BLOCK_SIZE_SLAB_LIMIT) { *data_mode = DATA_MODE_SLAB; return kmem_cache_alloc(DM_BUFIO_CACHE(c), gfp_mask); @@ -332,7 +335,26 @@ static void *alloc_buffer_data(struct dm_bufio_client *c, gfp_t gfp_mask, } *data_mode = DATA_MODE_VMALLOC; - return __vmalloc(c->block_size, gfp_mask, PAGE_KERNEL); + + /* + * __vmalloc allocates the data pages and auxiliary structures with + * gfp_flags that were specified, but pagetables are always allocated + * with GFP_KERNEL, no matter what was specified as gfp_mask. + * + * Consequently, we must set per-process flag PF_MEMALLOC_NOIO so that + * all allocations done by this process (including pagetables) are done + * as if GFP_NOIO was specified. + */ + + if (gfp_mask & __GFP_NORETRY) + noio_flag = memalloc_noio_save(); + + ptr = __vmalloc(c->block_size, gfp_mask, PAGE_KERNEL); + + if (gfp_mask & __GFP_NORETRY) + memalloc_noio_restore(noio_flag); + + return ptr; } /* diff --git a/trunk/drivers/md/dm-cache-metadata.c b/trunk/drivers/md/dm-cache-metadata.c index 83e995fece88..1af7255bbffb 100644 --- a/trunk/drivers/md/dm-cache-metadata.c +++ b/trunk/drivers/md/dm-cache-metadata.c @@ -1044,7 +1044,7 @@ void dm_cache_metadata_get_stats(struct dm_cache_metadata *cmd, struct dm_cache_statistics *stats) { down_read(&cmd->root_lock); - memcpy(stats, &cmd->stats, sizeof(*stats)); + *stats = cmd->stats; up_read(&cmd->root_lock); } @@ -1052,7 +1052,7 @@ void dm_cache_metadata_set_stats(struct dm_cache_metadata *cmd, struct dm_cache_statistics *stats) { down_write(&cmd->root_lock); - memcpy(&cmd->stats, stats, sizeof(*stats)); + cmd->stats = *stats; up_write(&cmd->root_lock); } diff --git a/trunk/drivers/md/dm-cache-policy.h b/trunk/drivers/md/dm-cache-policy.h index 558bdfdabf5f..33369ca9614f 100644 --- a/trunk/drivers/md/dm-cache-policy.h +++ b/trunk/drivers/md/dm-cache-policy.h @@ -130,8 +130,8 @@ struct dm_cache_policy { * * Must not block. * - * Returns 1 iff in cache, 0 iff not, < 0 on error (-EWOULDBLOCK - * would be typical). + * Returns 0 if in cache, -ENOENT if not, < 0 for other errors + * (-EWOULDBLOCK would be typical). */ int (*lookup)(struct dm_cache_policy *p, dm_oblock_t oblock, dm_cblock_t *cblock); diff --git a/trunk/drivers/md/dm-cache-target.c b/trunk/drivers/md/dm-cache-target.c index 10744091e6ca..df44b60e66f2 100644 --- a/trunk/drivers/md/dm-cache-target.c +++ b/trunk/drivers/md/dm-cache-target.c @@ -205,7 +205,7 @@ struct per_bio_data { /* * writethrough fields. These MUST remain at the end of this * structure and the 'cache' member must be the first as it - * is used to determine the offsetof the writethrough fields. + * is used to determine the offset of the writethrough fields. */ struct cache *cache; dm_cblock_t cblock; @@ -393,7 +393,7 @@ static int get_cell(struct cache *cache, return r; } - /*----------------------------------------------------------------*/ +/*----------------------------------------------------------------*/ static bool is_dirty(struct cache *cache, dm_cblock_t b) { @@ -419,6 +419,7 @@ static void clear_dirty(struct cache *cache, dm_oblock_t oblock, dm_cblock_t cbl } /*----------------------------------------------------------------*/ + static bool block_size_is_power_of_two(struct cache *cache) { return cache->sectors_per_block_shift >= 0; @@ -667,7 +668,7 @@ static void writethrough_endio(struct bio *bio, int err) /* * We can't issue this bio directly, since we're in interrupt - * context. So it get's put on a bio list for processing by the + * context. So it gets put on a bio list for processing by the * worker thread. */ defer_writethrough_bio(pb->cache, bio); @@ -1445,6 +1446,7 @@ static void do_worker(struct work_struct *ws) static void do_waker(struct work_struct *ws) { struct cache *cache = container_of(to_delayed_work(ws), struct cache, waker); + policy_tick(cache->policy); wake_worker(cache); queue_delayed_work(cache->wq, &cache->waker, COMMIT_PERIOD); } @@ -1809,7 +1811,37 @@ static int parse_cache_args(struct cache_args *ca, int argc, char **argv, static struct kmem_cache *migration_cache; -static int set_config_values(struct dm_cache_policy *p, int argc, const char **argv) +#define NOT_CORE_OPTION 1 + +static int process_config_option(struct cache *cache, const char *key, const char *value) +{ + unsigned long tmp; + + if (!strcasecmp(key, "migration_threshold")) { + if (kstrtoul(value, 10, &tmp)) + return -EINVAL; + + cache->migration_threshold = tmp; + return 0; + } + + return NOT_CORE_OPTION; +} + +static int set_config_value(struct cache *cache, const char *key, const char *value) +{ + int r = process_config_option(cache, key, value); + + if (r == NOT_CORE_OPTION) + r = policy_set_config_value(cache->policy, key, value); + + if (r) + DMWARN("bad config value for %s: %s", key, value); + + return r; +} + +static int set_config_values(struct cache *cache, int argc, const char **argv) { int r = 0; @@ -1819,12 +1851,9 @@ static int set_config_values(struct dm_cache_policy *p, int argc, const char **a } while (argc) { - r = policy_set_config_value(p, argv[0], argv[1]); - if (r) { - DMWARN("policy_set_config_value failed: key = '%s', value = '%s'", - argv[0], argv[1]); - return r; - } + r = set_config_value(cache, argv[0], argv[1]); + if (r) + break; argc -= 2; argv += 2; @@ -1836,8 +1865,6 @@ static int set_config_values(struct dm_cache_policy *p, int argc, const char **a static int create_cache_policy(struct cache *cache, struct cache_args *ca, char **error) { - int r; - cache->policy = dm_cache_policy_create(ca->policy_name, cache->cache_size, cache->origin_sectors, @@ -1847,14 +1874,7 @@ static int create_cache_policy(struct cache *cache, struct cache_args *ca, return -ENOMEM; } - r = set_config_values(cache->policy, ca->policy_argc, ca->policy_argv); - if (r) { - *error = "Error setting cache policy's config values"; - dm_cache_policy_destroy(cache->policy); - cache->policy = NULL; - } - - return r; + return 0; } /* @@ -1886,7 +1906,7 @@ static sector_t calculate_discard_block_size(sector_t cache_block_size, return discard_block_size; } -#define DEFAULT_MIGRATION_THRESHOLD (2048 * 100) +#define DEFAULT_MIGRATION_THRESHOLD 2048 static int cache_create(struct cache_args *ca, struct cache **result) { @@ -1911,7 +1931,7 @@ static int cache_create(struct cache_args *ca, struct cache **result) ti->discards_supported = true; ti->discard_zeroes_data_unsupported = true; - memcpy(&cache->features, &ca->features, sizeof(cache->features)); + cache->features = ca->features; ti->per_bio_data_size = get_per_bio_data_size(cache); cache->callbacks.congested_fn = cache_is_congested; @@ -1948,7 +1968,15 @@ static int cache_create(struct cache_args *ca, struct cache **result) r = create_cache_policy(cache, ca, error); if (r) goto bad; + cache->policy_nr_args = ca->policy_argc; + cache->migration_threshold = DEFAULT_MIGRATION_THRESHOLD; + + r = set_config_values(cache, ca->policy_argc, ca->policy_argv); + if (r) { + *error = "Error setting cache policy's config values"; + goto bad; + } cmd = dm_cache_metadata_open(cache->metadata_dev->bdev, ca->block_size, may_format, @@ -1967,10 +1995,10 @@ static int cache_create(struct cache_args *ca, struct cache **result) INIT_LIST_HEAD(&cache->quiesced_migrations); INIT_LIST_HEAD(&cache->completed_migrations); INIT_LIST_HEAD(&cache->need_commit_migrations); - cache->migration_threshold = DEFAULT_MIGRATION_THRESHOLD; atomic_set(&cache->nr_migrations, 0); init_waitqueue_head(&cache->migration_wait); + r = -ENOMEM; cache->nr_dirty = 0; cache->dirty_bitset = alloc_bitset(from_cblock(cache->cache_size)); if (!cache->dirty_bitset) { @@ -2517,23 +2545,6 @@ static void cache_status(struct dm_target *ti, status_type_t type, DMEMIT("Error"); } -#define NOT_CORE_OPTION 1 - -static int process_config_option(struct cache *cache, char **argv) -{ - unsigned long tmp; - - if (!strcasecmp(argv[0], "migration_threshold")) { - if (kstrtoul(argv[1], 10, &tmp)) - return -EINVAL; - - cache->migration_threshold = tmp; - return 0; - } - - return NOT_CORE_OPTION; -} - /* * Supports . * @@ -2541,17 +2552,12 @@ static int process_config_option(struct cache *cache, char **argv) */ static int cache_message(struct dm_target *ti, unsigned argc, char **argv) { - int r; struct cache *cache = ti->private; if (argc != 2) return -EINVAL; - r = process_config_option(cache, argv); - if (r == NOT_CORE_OPTION) - return policy_set_config_value(cache->policy, argv[0], argv[1]); - - return r; + return set_config_value(cache, argv[0], argv[1]); } static int cache_iterate_devices(struct dm_target *ti, @@ -2609,7 +2615,7 @@ static void cache_io_hints(struct dm_target *ti, struct queue_limits *limits) static struct target_type cache_target = { .name = "cache", - .version = {1, 1, 0}, + .version = {1, 1, 1}, .module = THIS_MODULE, .ctr = cache_ctr, .dtr = cache_dtr, diff --git a/trunk/drivers/md/dm-mpath.c b/trunk/drivers/md/dm-mpath.c index 51bb81676be3..bdf26f5bd326 100644 --- a/trunk/drivers/md/dm-mpath.c +++ b/trunk/drivers/md/dm-mpath.c @@ -907,6 +907,7 @@ static int multipath_ctr(struct dm_target *ti, unsigned int argc, ti->num_flush_bios = 1; ti->num_discard_bios = 1; + ti->num_write_same_bios = 1; return 0; diff --git a/trunk/drivers/md/dm-snap.c b/trunk/drivers/md/dm-snap.c index c0e07026a8d1..c434e5aab2df 100644 --- a/trunk/drivers/md/dm-snap.c +++ b/trunk/drivers/md/dm-snap.c @@ -1121,6 +1121,7 @@ static int snapshot_ctr(struct dm_target *ti, unsigned int argc, char **argv) s->pending_pool = mempool_create_slab_pool(MIN_IOS, pending_cache); if (!s->pending_pool) { ti->error = "Could not allocate mempool for pending exceptions"; + r = -ENOMEM; goto bad_pending_pool; } diff --git a/trunk/drivers/md/dm-stripe.c b/trunk/drivers/md/dm-stripe.c index ea5e878a30b9..d907ca6227ce 100644 --- a/trunk/drivers/md/dm-stripe.c +++ b/trunk/drivers/md/dm-stripe.c @@ -94,7 +94,7 @@ static int get_stripe(struct dm_target *ti, struct stripe_c *sc, static int stripe_ctr(struct dm_target *ti, unsigned int argc, char **argv) { struct stripe_c *sc; - sector_t width; + sector_t width, tmp_len; uint32_t stripes; uint32_t chunk_size; int r; @@ -116,15 +116,16 @@ static int stripe_ctr(struct dm_target *ti, unsigned int argc, char **argv) } width = ti->len; - if (sector_div(width, chunk_size)) { + if (sector_div(width, stripes)) { ti->error = "Target length not divisible by " - "chunk size"; + "number of stripes"; return -EINVAL; } - if (sector_div(width, stripes)) { + tmp_len = width; + if (sector_div(tmp_len, chunk_size)) { ti->error = "Target length not divisible by " - "number of stripes"; + "chunk size"; return -EINVAL; } diff --git a/trunk/drivers/md/dm-table.c b/trunk/drivers/md/dm-table.c index e50dad0c65f4..1ff252ab7d46 100644 --- a/trunk/drivers/md/dm-table.c +++ b/trunk/drivers/md/dm-table.c @@ -1442,7 +1442,7 @@ static bool dm_table_supports_write_same(struct dm_table *t) return false; if (!ti->type->iterate_devices || - !ti->type->iterate_devices(ti, device_not_write_same_capable, NULL)) + ti->type->iterate_devices(ti, device_not_write_same_capable, NULL)) return false; } diff --git a/trunk/drivers/md/dm-thin-metadata.c b/trunk/drivers/md/dm-thin-metadata.c index 00cee02f8fc9..60bce435f4fa 100644 --- a/trunk/drivers/md/dm-thin-metadata.c +++ b/trunk/drivers/md/dm-thin-metadata.c @@ -1645,12 +1645,12 @@ int dm_thin_get_highest_mapped_block(struct dm_thin_device *td, return r; } -static int __resize_data_dev(struct dm_pool_metadata *pmd, dm_block_t new_count) +static int __resize_space_map(struct dm_space_map *sm, dm_block_t new_count) { int r; dm_block_t old_count; - r = dm_sm_get_nr_blocks(pmd->data_sm, &old_count); + r = dm_sm_get_nr_blocks(sm, &old_count); if (r) return r; @@ -1658,11 +1658,11 @@ static int __resize_data_dev(struct dm_pool_metadata *pmd, dm_block_t new_count) return 0; if (new_count < old_count) { - DMERR("cannot reduce size of data device"); + DMERR("cannot reduce size of space map"); return -EINVAL; } - return dm_sm_extend(pmd->data_sm, new_count - old_count); + return dm_sm_extend(sm, new_count - old_count); } int dm_pool_resize_data_dev(struct dm_pool_metadata *pmd, dm_block_t new_count) @@ -1671,7 +1671,19 @@ int dm_pool_resize_data_dev(struct dm_pool_metadata *pmd, dm_block_t new_count) down_write(&pmd->root_lock); if (!pmd->fail_io) - r = __resize_data_dev(pmd, new_count); + r = __resize_space_map(pmd->data_sm, new_count); + up_write(&pmd->root_lock); + + return r; +} + +int dm_pool_resize_metadata_dev(struct dm_pool_metadata *pmd, dm_block_t new_count) +{ + int r = -EINVAL; + + down_write(&pmd->root_lock); + if (!pmd->fail_io) + r = __resize_space_map(pmd->metadata_sm, new_count); up_write(&pmd->root_lock); return r; @@ -1684,3 +1696,17 @@ void dm_pool_metadata_read_only(struct dm_pool_metadata *pmd) dm_bm_set_read_only(pmd->bm); up_write(&pmd->root_lock); } + +int dm_pool_register_metadata_threshold(struct dm_pool_metadata *pmd, + dm_block_t threshold, + dm_sm_threshold_fn fn, + void *context) +{ + int r; + + down_write(&pmd->root_lock); + r = dm_sm_register_threshold_callback(pmd->metadata_sm, threshold, fn, context); + up_write(&pmd->root_lock); + + return r; +} diff --git a/trunk/drivers/md/dm-thin-metadata.h b/trunk/drivers/md/dm-thin-metadata.h index 0cecc3702885..845ebbe589a9 100644 --- a/trunk/drivers/md/dm-thin-metadata.h +++ b/trunk/drivers/md/dm-thin-metadata.h @@ -8,6 +8,7 @@ #define DM_THIN_METADATA_H #include "persistent-data/dm-block-manager.h" +#include "persistent-data/dm-space-map.h" #define THIN_METADATA_BLOCK_SIZE 4096 @@ -185,6 +186,7 @@ int dm_pool_get_data_dev_size(struct dm_pool_metadata *pmd, dm_block_t *result); * blocks would be lost. */ int dm_pool_resize_data_dev(struct dm_pool_metadata *pmd, dm_block_t new_size); +int dm_pool_resize_metadata_dev(struct dm_pool_metadata *pmd, dm_block_t new_size); /* * Flicks the underlying block manager into read only mode, so you know @@ -192,6 +194,11 @@ int dm_pool_resize_data_dev(struct dm_pool_metadata *pmd, dm_block_t new_size); */ void dm_pool_metadata_read_only(struct dm_pool_metadata *pmd); +int dm_pool_register_metadata_threshold(struct dm_pool_metadata *pmd, + dm_block_t threshold, + dm_sm_threshold_fn fn, + void *context); + /*----------------------------------------------------------------*/ #endif diff --git a/trunk/drivers/md/dm-thin.c b/trunk/drivers/md/dm-thin.c index 004ad1652b73..88f2f802d528 100644 --- a/trunk/drivers/md/dm-thin.c +++ b/trunk/drivers/md/dm-thin.c @@ -922,7 +922,7 @@ static int alloc_data_block(struct thin_c *tc, dm_block_t *result) return r; if (free_blocks <= pool->low_water_blocks && !pool->low_water_triggered) { - DMWARN("%s: reached low water mark, sending event.", + DMWARN("%s: reached low water mark for data device: sending event.", dm_device_name(pool->pool_md)); spin_lock_irqsave(&pool->lock, flags); pool->low_water_triggered = 1; @@ -1281,6 +1281,10 @@ static void process_bio_fail(struct thin_c *tc, struct bio *bio) bio_io_error(bio); } +/* + * FIXME: should we also commit due to size of transaction, measured in + * metadata blocks? + */ static int need_commit_due_to_time(struct pool *pool) { return jiffies < pool->last_commit_jiffies || @@ -1909,6 +1913,56 @@ static int parse_pool_features(struct dm_arg_set *as, struct pool_features *pf, return r; } +static void metadata_low_callback(void *context) +{ + struct pool *pool = context; + + DMWARN("%s: reached low water mark for metadata device: sending event.", + dm_device_name(pool->pool_md)); + + dm_table_event(pool->ti->table); +} + +static sector_t get_metadata_dev_size(struct block_device *bdev) +{ + sector_t metadata_dev_size = i_size_read(bdev->bd_inode) >> SECTOR_SHIFT; + char buffer[BDEVNAME_SIZE]; + + if (metadata_dev_size > THIN_METADATA_MAX_SECTORS_WARNING) { + DMWARN("Metadata device %s is larger than %u sectors: excess space will not be used.", + bdevname(bdev, buffer), THIN_METADATA_MAX_SECTORS); + metadata_dev_size = THIN_METADATA_MAX_SECTORS_WARNING; + } + + return metadata_dev_size; +} + +static dm_block_t get_metadata_dev_size_in_blocks(struct block_device *bdev) +{ + sector_t metadata_dev_size = get_metadata_dev_size(bdev); + + sector_div(metadata_dev_size, THIN_METADATA_BLOCK_SIZE >> SECTOR_SHIFT); + + return metadata_dev_size; +} + +/* + * When a metadata threshold is crossed a dm event is triggered, and + * userland should respond by growing the metadata device. We could let + * userland set the threshold, like we do with the data threshold, but I'm + * not sure they know enough to do this well. + */ +static dm_block_t calc_metadata_threshold(struct pool_c *pt) +{ + /* + * 4M is ample for all ops with the possible exception of thin + * device deletion which is harmless if it fails (just retry the + * delete after you've grown the device). + */ + dm_block_t quarter = get_metadata_dev_size_in_blocks(pt->metadata_dev->bdev) / 4; + return min((dm_block_t)1024ULL /* 4M */, quarter); +} + /* * thin-pool * @@ -1931,8 +1985,7 @@ static int pool_ctr(struct dm_target *ti, unsigned argc, char **argv) unsigned long block_size; dm_block_t low_water_blocks; struct dm_dev *metadata_dev; - sector_t metadata_dev_size; - char b[BDEVNAME_SIZE]; + fmode_t metadata_mode; /* * FIXME Remove validation from scope of lock. @@ -1944,19 +1997,32 @@ static int pool_ctr(struct dm_target *ti, unsigned argc, char **argv) r = -EINVAL; goto out_unlock; } + as.argc = argc; as.argv = argv; - r = dm_get_device(ti, argv[0], FMODE_READ | FMODE_WRITE, &metadata_dev); + /* + * Set default pool features. + */ + pool_features_init(&pf); + + dm_consume_args(&as, 4); + r = parse_pool_features(&as, &pf, ti); + if (r) + goto out_unlock; + + metadata_mode = FMODE_READ | ((pf.mode == PM_READ_ONLY) ? 0 : FMODE_WRITE); + r = dm_get_device(ti, argv[0], metadata_mode, &metadata_dev); if (r) { ti->error = "Error opening metadata block device"; goto out_unlock; } - metadata_dev_size = i_size_read(metadata_dev->bdev->bd_inode) >> SECTOR_SHIFT; - if (metadata_dev_size > THIN_METADATA_MAX_SECTORS_WARNING) - DMWARN("Metadata device %s is larger than %u sectors: excess space will not be used.", - bdevname(metadata_dev->bdev, b), THIN_METADATA_MAX_SECTORS); + /* + * Run for the side-effect of possibly issuing a warning if the + * device is too big. + */ + (void) get_metadata_dev_size(metadata_dev->bdev); r = dm_get_device(ti, argv[1], FMODE_READ | FMODE_WRITE, &data_dev); if (r) { @@ -1979,16 +2045,6 @@ static int pool_ctr(struct dm_target *ti, unsigned argc, char **argv) goto out; } - /* - * Set default pool features. - */ - pool_features_init(&pf); - - dm_consume_args(&as, 4); - r = parse_pool_features(&as, &pf, ti); - if (r) - goto out; - pt = kzalloc(sizeof(*pt), GFP_KERNEL); if (!pt) { r = -ENOMEM; @@ -2040,6 +2096,13 @@ static int pool_ctr(struct dm_target *ti, unsigned argc, char **argv) } ti->private = pt; + r = dm_pool_register_metadata_threshold(pt->pool->pmd, + calc_metadata_threshold(pt), + metadata_low_callback, + pool); + if (r) + goto out_free_pt; + pt->callbacks.congested_fn = pool_is_congested; dm_table_add_target_callbacks(ti->table, &pt->callbacks); @@ -2079,18 +2142,7 @@ static int pool_map(struct dm_target *ti, struct bio *bio) return r; } -/* - * Retrieves the number of blocks of the data device from - * the superblock and compares it to the actual device size, - * thus resizing the data device in case it has grown. - * - * This both copes with opening preallocated data devices in the ctr - * being followed by a resume - * -and- - * calling the resume method individually after userspace has - * grown the data device in reaction to a table event. - */ -static int pool_preresume(struct dm_target *ti) +static int maybe_resize_data_dev(struct dm_target *ti, bool *need_commit) { int r; struct pool_c *pt = ti->private; @@ -2098,12 +2150,7 @@ static int pool_preresume(struct dm_target *ti) sector_t data_size = ti->len; dm_block_t sb_data_size; - /* - * Take control of the pool object. - */ - r = bind_control_target(pool, ti); - if (r) - return r; + *need_commit = false; (void) sector_div(data_size, pool->sectors_per_block); @@ -2114,7 +2161,7 @@ static int pool_preresume(struct dm_target *ti) } if (data_size < sb_data_size) { - DMERR("pool target too small, is %llu blocks (expected %llu)", + DMERR("pool target (%llu blocks) too small: expected %llu", (unsigned long long)data_size, sb_data_size); return -EINVAL; @@ -2122,17 +2169,90 @@ static int pool_preresume(struct dm_target *ti) r = dm_pool_resize_data_dev(pool->pmd, data_size); if (r) { DMERR("failed to resize data device"); - /* FIXME Stricter than necessary: Rollback transaction instead here */ set_pool_mode(pool, PM_READ_ONLY); return r; } - (void) commit_or_fallback(pool); + *need_commit = true; } return 0; } +static int maybe_resize_metadata_dev(struct dm_target *ti, bool *need_commit) +{ + int r; + struct pool_c *pt = ti->private; + struct pool *pool = pt->pool; + dm_block_t metadata_dev_size, sb_metadata_dev_size; + + *need_commit = false; + + metadata_dev_size = get_metadata_dev_size_in_blocks(pool->md_dev); + + r = dm_pool_get_metadata_dev_size(pool->pmd, &sb_metadata_dev_size); + if (r) { + DMERR("failed to retrieve data device size"); + return r; + } + + if (metadata_dev_size < sb_metadata_dev_size) { + DMERR("metadata device (%llu blocks) too small: expected %llu", + metadata_dev_size, sb_metadata_dev_size); + return -EINVAL; + + } else if (metadata_dev_size > sb_metadata_dev_size) { + r = dm_pool_resize_metadata_dev(pool->pmd, metadata_dev_size); + if (r) { + DMERR("failed to resize metadata device"); + return r; + } + + *need_commit = true; + } + + return 0; +} + +/* + * Retrieves the number of blocks of the data device from + * the superblock and compares it to the actual device size, + * thus resizing the data device in case it has grown. + * + * This both copes with opening preallocated data devices in the ctr + * being followed by a resume + * -and- + * calling the resume method individually after userspace has + * grown the data device in reaction to a table event. + */ +static int pool_preresume(struct dm_target *ti) +{ + int r; + bool need_commit1, need_commit2; + struct pool_c *pt = ti->private; + struct pool *pool = pt->pool; + + /* + * Take control of the pool object. + */ + r = bind_control_target(pool, ti); + if (r) + return r; + + r = maybe_resize_data_dev(ti, &need_commit1); + if (r) + return r; + + r = maybe_resize_metadata_dev(ti, &need_commit2); + if (r) + return r; + + if (need_commit1 || need_commit2) + (void) commit_or_fallback(pool); + + return 0; +} + static void pool_resume(struct dm_target *ti) { struct pool_c *pt = ti->private; @@ -2549,7 +2669,7 @@ static struct target_type pool_target = { .name = "thin-pool", .features = DM_TARGET_SINGLETON | DM_TARGET_ALWAYS_WRITEABLE | DM_TARGET_IMMUTABLE, - .version = {1, 7, 0}, + .version = {1, 8, 0}, .module = THIS_MODULE, .ctr = pool_ctr, .dtr = pool_dtr, diff --git a/trunk/drivers/md/persistent-data/dm-space-map-disk.c b/trunk/drivers/md/persistent-data/dm-space-map-disk.c index f6d29e614ab7..e735a6d5a793 100644 --- a/trunk/drivers/md/persistent-data/dm-space-map-disk.c +++ b/trunk/drivers/md/persistent-data/dm-space-map-disk.c @@ -248,7 +248,8 @@ static struct dm_space_map ops = { .new_block = sm_disk_new_block, .commit = sm_disk_commit, .root_size = sm_disk_root_size, - .copy_root = sm_disk_copy_root + .copy_root = sm_disk_copy_root, + .register_threshold_callback = NULL }; struct dm_space_map *dm_sm_disk_create(struct dm_transaction_manager *tm, diff --git a/trunk/drivers/md/persistent-data/dm-space-map-metadata.c b/trunk/drivers/md/persistent-data/dm-space-map-metadata.c index 906cf3df71af..1c959684caef 100644 --- a/trunk/drivers/md/persistent-data/dm-space-map-metadata.c +++ b/trunk/drivers/md/persistent-data/dm-space-map-metadata.c @@ -16,6 +16,55 @@ /*----------------------------------------------------------------*/ +/* + * An edge triggered threshold. + */ +struct threshold { + bool threshold_set; + bool value_set; + dm_block_t threshold; + dm_block_t current_value; + dm_sm_threshold_fn fn; + void *context; +}; + +static void threshold_init(struct threshold *t) +{ + t->threshold_set = false; + t->value_set = false; +} + +static void set_threshold(struct threshold *t, dm_block_t value, + dm_sm_threshold_fn fn, void *context) +{ + t->threshold_set = true; + t->threshold = value; + t->fn = fn; + t->context = context; +} + +static bool below_threshold(struct threshold *t, dm_block_t value) +{ + return t->threshold_set && value <= t->threshold; +} + +static bool threshold_already_triggered(struct threshold *t) +{ + return t->value_set && below_threshold(t, t->current_value); +} + +static void check_threshold(struct threshold *t, dm_block_t value) +{ + if (below_threshold(t, value) && + !threshold_already_triggered(t)) + t->fn(t->context); + + t->value_set = true; + t->current_value = value; +} + +/*----------------------------------------------------------------*/ + /* * Space map interface. * @@ -54,6 +103,8 @@ struct sm_metadata { unsigned allocated_this_transaction; unsigned nr_uncommitted; struct block_op uncommitted[MAX_RECURSIVE_ALLOCATIONS]; + + struct threshold threshold; }; static int add_bop(struct sm_metadata *smm, enum block_op_type type, dm_block_t b) @@ -144,12 +195,6 @@ static void sm_metadata_destroy(struct dm_space_map *sm) kfree(smm); } -static int sm_metadata_extend(struct dm_space_map *sm, dm_block_t extra_blocks) -{ - DMERR("doesn't support extend"); - return -EINVAL; -} - static int sm_metadata_get_nr_blocks(struct dm_space_map *sm, dm_block_t *count) { struct sm_metadata *smm = container_of(sm, struct sm_metadata, sm); @@ -335,9 +380,19 @@ static int sm_metadata_new_block_(struct dm_space_map *sm, dm_block_t *b) static int sm_metadata_new_block(struct dm_space_map *sm, dm_block_t *b) { + dm_block_t count; + struct sm_metadata *smm = container_of(sm, struct sm_metadata, sm); + int r = sm_metadata_new_block_(sm, b); if (r) DMERR("unable to allocate new metadata block"); + + r = sm_metadata_get_nr_free(sm, &count); + if (r) + DMERR("couldn't get free block count"); + + check_threshold(&smm->threshold, count); + return r; } @@ -357,6 +412,18 @@ static int sm_metadata_commit(struct dm_space_map *sm) return 0; } +static int sm_metadata_register_threshold_callback(struct dm_space_map *sm, + dm_block_t threshold, + dm_sm_threshold_fn fn, + void *context) +{ + struct sm_metadata *smm = container_of(sm, struct sm_metadata, sm); + + set_threshold(&smm->threshold, threshold, fn, context); + + return 0; +} + static int sm_metadata_root_size(struct dm_space_map *sm, size_t *result) { *result = sizeof(struct disk_sm_root); @@ -382,6 +449,8 @@ static int sm_metadata_copy_root(struct dm_space_map *sm, void *where_le, size_t return 0; } +static int sm_metadata_extend(struct dm_space_map *sm, dm_block_t extra_blocks); + static struct dm_space_map ops = { .destroy = sm_metadata_destroy, .extend = sm_metadata_extend, @@ -395,7 +464,8 @@ static struct dm_space_map ops = { .new_block = sm_metadata_new_block, .commit = sm_metadata_commit, .root_size = sm_metadata_root_size, - .copy_root = sm_metadata_copy_root + .copy_root = sm_metadata_copy_root, + .register_threshold_callback = sm_metadata_register_threshold_callback }; /*----------------------------------------------------------------*/ @@ -410,7 +480,7 @@ static void sm_bootstrap_destroy(struct dm_space_map *sm) static int sm_bootstrap_extend(struct dm_space_map *sm, dm_block_t extra_blocks) { - DMERR("boostrap doesn't support extend"); + DMERR("bootstrap doesn't support extend"); return -EINVAL; } @@ -450,7 +520,7 @@ static int sm_bootstrap_count_is_more_than_one(struct dm_space_map *sm, static int sm_bootstrap_set_count(struct dm_space_map *sm, dm_block_t b, uint32_t count) { - DMERR("boostrap doesn't support set_count"); + DMERR("bootstrap doesn't support set_count"); return -EINVAL; } @@ -491,7 +561,7 @@ static int sm_bootstrap_commit(struct dm_space_map *sm) static int sm_bootstrap_root_size(struct dm_space_map *sm, size_t *result) { - DMERR("boostrap doesn't support root_size"); + DMERR("bootstrap doesn't support root_size"); return -EINVAL; } @@ -499,7 +569,7 @@ static int sm_bootstrap_root_size(struct dm_space_map *sm, size_t *result) static int sm_bootstrap_copy_root(struct dm_space_map *sm, void *where, size_t max) { - DMERR("boostrap doesn't support copy_root"); + DMERR("bootstrap doesn't support copy_root"); return -EINVAL; } @@ -517,11 +587,42 @@ static struct dm_space_map bootstrap_ops = { .new_block = sm_bootstrap_new_block, .commit = sm_bootstrap_commit, .root_size = sm_bootstrap_root_size, - .copy_root = sm_bootstrap_copy_root + .copy_root = sm_bootstrap_copy_root, + .register_threshold_callback = NULL }; /*----------------------------------------------------------------*/ +static int sm_metadata_extend(struct dm_space_map *sm, dm_block_t extra_blocks) +{ + int r, i; + enum allocation_event ev; + struct sm_metadata *smm = container_of(sm, struct sm_metadata, sm); + dm_block_t old_len = smm->ll.nr_blocks; + + /* + * Flick into a mode where all blocks get allocated in the new area. + */ + smm->begin = old_len; + memcpy(&smm->sm, &bootstrap_ops, sizeof(smm->sm)); + + /* + * Extend. + */ + r = sm_ll_extend(&smm->ll, extra_blocks); + + /* + * Switch back to normal behaviour. + */ + memcpy(&smm->sm, &ops, sizeof(smm->sm)); + for (i = old_len; !r && i < smm->begin; i++) + r = sm_ll_inc(&smm->ll, i, &ev); + + return r; +} + +/*----------------------------------------------------------------*/ + struct dm_space_map *dm_sm_metadata_init(void) { struct sm_metadata *smm; @@ -549,6 +650,7 @@ int dm_sm_metadata_create(struct dm_space_map *sm, smm->recursion_count = 0; smm->allocated_this_transaction = 0; smm->nr_uncommitted = 0; + threshold_init(&smm->threshold); memcpy(&smm->sm, &bootstrap_ops, sizeof(smm->sm)); @@ -590,6 +692,7 @@ int dm_sm_metadata_open(struct dm_space_map *sm, smm->recursion_count = 0; smm->allocated_this_transaction = 0; smm->nr_uncommitted = 0; + threshold_init(&smm->threshold); memcpy(&smm->old_ll, &smm->ll, sizeof(smm->old_ll)); return 0; diff --git a/trunk/drivers/md/persistent-data/dm-space-map.h b/trunk/drivers/md/persistent-data/dm-space-map.h index 1cbfc6b1638a..3e6d1153b7c4 100644 --- a/trunk/drivers/md/persistent-data/dm-space-map.h +++ b/trunk/drivers/md/persistent-data/dm-space-map.h @@ -9,6 +9,8 @@ #include "dm-block-manager.h" +typedef void (*dm_sm_threshold_fn)(void *context); + /* * struct dm_space_map keeps a record of how many times each block in a device * is referenced. It needs to be fixed on disk as part of the transaction. @@ -59,6 +61,15 @@ struct dm_space_map { */ int (*root_size)(struct dm_space_map *sm, size_t *result); int (*copy_root)(struct dm_space_map *sm, void *copy_to_here_le, size_t len); + + /* + * You can register one threshold callback which is edge-triggered + * when the free space in the space map drops below the threshold. + */ + int (*register_threshold_callback)(struct dm_space_map *sm, + dm_block_t threshold, + dm_sm_threshold_fn fn, + void *context); }; /*----------------------------------------------------------------*/ @@ -131,4 +142,16 @@ static inline int dm_sm_copy_root(struct dm_space_map *sm, void *copy_to_here_le return sm->copy_root(sm, copy_to_here_le, len); } +static inline int dm_sm_register_threshold_callback(struct dm_space_map *sm, + dm_block_t threshold, + dm_sm_threshold_fn fn, + void *context) +{ + if (sm->register_threshold_callback) + return sm->register_threshold_callback(sm, threshold, fn, context); + + return -EINVAL; +} + + #endif /* _LINUX_DM_SPACE_MAP_H */ diff --git a/trunk/drivers/memory/emif.c b/trunk/drivers/memory/emif.c index cadf1cc19aaf..04644e7b42b1 100644 --- a/trunk/drivers/memory/emif.c +++ b/trunk/drivers/memory/emif.c @@ -1560,12 +1560,6 @@ static int __init_or_module emif_probe(struct platform_device *pdev) platform_set_drvdata(pdev, emif); res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - if (!res) { - dev_err(emif->dev, "%s: error getting memory resource\n", - __func__); - goto error; - } - emif->base = devm_ioremap_resource(emif->dev, res); if (IS_ERR(emif->base)) goto error; diff --git a/trunk/drivers/mfd/Kconfig b/trunk/drivers/mfd/Kconfig index d9aed1593e5d..d54e985748b7 100644 --- a/trunk/drivers/mfd/Kconfig +++ b/trunk/drivers/mfd/Kconfig @@ -579,7 +579,7 @@ config AB8500_CORE config AB8500_DEBUG bool "Enable debug info via debugfs" - depends on AB8500_CORE && DEBUG_FS + depends on AB8500_GPADC && DEBUG_FS default y if DEBUG_FS help Select this option if you want debug information using the debug @@ -818,6 +818,7 @@ config MFD_TPS65910 config MFD_TPS65912 bool "TI TPS65912 Power Management chip" depends on GPIOLIB + select MFD_CORE help If you say yes here you get support for the TPS65912 series of PM chips. diff --git a/trunk/drivers/mfd/ab8500-core.c b/trunk/drivers/mfd/ab8500-core.c index 8e8a016effe9..258b367e3989 100644 --- a/trunk/drivers/mfd/ab8500-core.c +++ b/trunk/drivers/mfd/ab8500-core.c @@ -867,6 +867,15 @@ static struct resource ab8500_chargalg_resources[] = {}; #ifdef CONFIG_DEBUG_FS static struct resource ab8500_debug_resources[] = { + { + .name = "IRQ_AB8500", + /* + * Number will be filled in. NOTE: this is deliberately + * not flagged as an IRQ in ordet to avoid remapping using + * the irqdomain in the MFD core, so that this IRQ passes + * unremapped to the debug code. + */ + }, { .name = "IRQ_FIRST", .start = AB8500_INT_MAIN_EXT_CH_NOT_OK, @@ -1051,6 +1060,7 @@ static struct mfd_cell ab8500_devs[] = { }, { .name = "ab8500-gpadc", + .of_compatible = "stericsson,ab8500-gpadc", .num_resources = ARRAY_SIZE(ab8500_gpadc_resources), .resources = ab8500_gpadc_resources, }, @@ -1097,7 +1107,7 @@ static struct mfd_cell ab8500_devs[] = { .of_compatible = "stericsson,ab8500-denc", }, { - .name = "ab8500-gpio", + .name = "pinctrl-ab8500", .of_compatible = "stericsson,ab8500-gpio", }, { @@ -1208,6 +1218,7 @@ static struct mfd_cell ab8505_devs[] = { }, { .name = "ab8500-gpadc", + .of_compatible = "stericsson,ab8500-gpadc", .num_resources = ARRAY_SIZE(ab8505_gpadc_resources), .resources = ab8505_gpadc_resources, }, @@ -1234,7 +1245,7 @@ static struct mfd_cell ab8505_devs[] = { .name = "ab8500-leds", }, { - .name = "ab8500-gpio", + .name = "pinctrl-ab8505", }, { .name = "ab8500-usb", @@ -1271,6 +1282,7 @@ static struct mfd_cell ab8540_devs[] = { }, { .name = "ab8500-gpadc", + .of_compatible = "stericsson,ab8500-gpadc", .num_resources = ARRAY_SIZE(ab8505_gpadc_resources), .resources = ab8505_gpadc_resources, }, @@ -1302,7 +1314,7 @@ static struct mfd_cell ab8540_devs[] = { .resources = ab8500_temp_resources, }, { - .name = "ab8500-gpio", + .name = "pinctrl-ab8540", }, { .name = "ab8540-usb", @@ -1712,6 +1724,12 @@ static int ab8500_probe(struct platform_device *pdev) if (ret) return ret; +#if CONFIG_DEBUG_FS + /* Pass to debugfs */ + ab8500_debug_resources[0].start = ab8500->irq; + ab8500_debug_resources[0].end = ab8500->irq; +#endif + if (is_ab9540(ab8500)) ret = mfd_add_devices(ab8500->dev, 0, ab9540_devs, ARRAY_SIZE(ab9540_devs), NULL, diff --git a/trunk/drivers/mfd/ab8500-debugfs.c b/trunk/drivers/mfd/ab8500-debugfs.c index b88bbbc15f1e..37b7ce4c7c3b 100644 --- a/trunk/drivers/mfd/ab8500-debugfs.c +++ b/trunk/drivers/mfd/ab8500-debugfs.c @@ -91,12 +91,10 @@ #include #endif -/* TODO: this file should not reference IRQ_DB8500_AB8500! */ -#include - static u32 debug_bank; static u32 debug_address; +static int irq_ab8500; static int irq_first; static int irq_last; static u32 *irq_count; @@ -1589,7 +1587,7 @@ void ab8500_debug_register_interrupt(int line) { if (line < num_interrupt_lines) { num_interrupts[line]++; - if (suspend_test_wake_cause_interrupt_is_mine(IRQ_DB8500_AB8500)) + if (suspend_test_wake_cause_interrupt_is_mine(irq_ab8500)) num_wake_interrupts[line]++; } } @@ -2941,6 +2939,7 @@ static int ab8500_debug_probe(struct platform_device *plf) struct dentry *file; int ret = -ENOMEM; struct ab8500 *ab8500; + struct resource *res; debug_bank = AB8500_MISC; debug_address = AB8500_REV_REG & 0x00FF; @@ -2959,6 +2958,15 @@ static int ab8500_debug_probe(struct platform_device *plf) if (!event_name) goto out_freedev_attr; + res = platform_get_resource_byname(plf, 0, "IRQ_AB8500"); + if (!res) { + dev_err(&plf->dev, "AB8500 irq not found, err %d\n", + irq_first); + ret = -ENXIO; + goto out_freeevent_name; + } + irq_ab8500 = res->start; + irq_first = platform_get_irq_byname(plf, "IRQ_FIRST"); if (irq_first < 0) { dev_err(&plf->dev, "First irq not found, err %d\n", diff --git a/trunk/drivers/mfd/ab8500-gpadc.c b/trunk/drivers/mfd/ab8500-gpadc.c index 5e65b28a5d09..13f7866de46e 100644 --- a/trunk/drivers/mfd/ab8500-gpadc.c +++ b/trunk/drivers/mfd/ab8500-gpadc.c @@ -907,14 +907,17 @@ static int ab8500_gpadc_suspend(struct device *dev) static int ab8500_gpadc_resume(struct device *dev) { struct ab8500_gpadc *gpadc = dev_get_drvdata(dev); + int ret; - regulator_enable(gpadc->regu); + ret = regulator_enable(gpadc->regu); + if (ret) + dev_err(dev, "Failed to enable vtvout LDO: %d\n", ret); pm_runtime_mark_last_busy(gpadc->dev); pm_runtime_put_autosuspend(gpadc->dev); mutex_unlock(&gpadc->ab8500_gpadc_lock); - return 0; + return ret; } static int ab8500_gpadc_probe(struct platform_device *pdev) diff --git a/trunk/drivers/mfd/ab8500-sysctrl.c b/trunk/drivers/mfd/ab8500-sysctrl.c index fbca1ced49fa..8e0dae59844d 100644 --- a/trunk/drivers/mfd/ab8500-sysctrl.c +++ b/trunk/drivers/mfd/ab8500-sysctrl.c @@ -23,7 +23,7 @@ static struct device *sysctrl_dev; -void ab8500_power_off(void) +static void ab8500_power_off(void) { sigset_t old; sigset_t all; @@ -104,7 +104,7 @@ void ab8500_restart(char mode, const char *cmd) plat = dev_get_platdata(sysctrl_dev->parent); pdata = plat->sysctrl; - if (pdata->reboot_reason_code) + if (pdata && pdata->reboot_reason_code) reason = pdata->reboot_reason_code(cmd); else pr_warn("[%s] No reboot reason set. Default reason %d\n", @@ -188,14 +188,15 @@ static int ab8500_sysctrl_probe(struct platform_device *pdev) plat = dev_get_platdata(pdev->dev.parent); - if (!(plat && plat->sysctrl)) + if (!plat) return -EINVAL; - if (plat->pm_power_off) + sysctrl_dev = &pdev->dev; + + if (!pm_power_off) pm_power_off = ab8500_power_off; pdata = plat->sysctrl; - if (pdata) { int last, ret, i, j; @@ -226,6 +227,10 @@ static int ab8500_sysctrl_probe(struct platform_device *pdev) static int ab8500_sysctrl_remove(struct platform_device *pdev) { sysctrl_dev = NULL; + + if (pm_power_off == ab8500_power_off) + pm_power_off = NULL; + return 0; } diff --git a/trunk/drivers/mfd/abx500-core.c b/trunk/drivers/mfd/abx500-core.c index 9818afba2515..3714acb61458 100644 --- a/trunk/drivers/mfd/abx500-core.c +++ b/trunk/drivers/mfd/abx500-core.c @@ -156,7 +156,7 @@ EXPORT_SYMBOL(abx500_startup_irq_enabled); void abx500_dump_all_banks(void) { struct abx500_ops *ops; - struct device dummy_child = {0}; + struct device dummy_child = {NULL}; struct abx500_device_entry *dev_entry; list_for_each_entry(dev_entry, &abx500_list, list) { diff --git a/trunk/drivers/mfd/cros_ec_spi.c b/trunk/drivers/mfd/cros_ec_spi.c index 19193cf1e7a1..367ccb58ecb1 100644 --- a/trunk/drivers/mfd/cros_ec_spi.c +++ b/trunk/drivers/mfd/cros_ec_spi.c @@ -120,7 +120,7 @@ static int cros_ec_spi_receive_response(struct cros_ec_device *ec_dev, for (end = ptr + EC_MSG_PREAMBLE_COUNT; ptr != end; ptr++) { if (*ptr == EC_MSG_HEADER) { - dev_dbg(ec_dev->dev, "msg found at %ld\n", + dev_dbg(ec_dev->dev, "msg found at %zd\n", ptr - ec_dev->din); break; } @@ -154,7 +154,7 @@ static int cros_ec_spi_receive_response(struct cros_ec_device *ec_dev, * maximum-supported transfer size. */ todo = min(need_len, 256); - dev_dbg(ec_dev->dev, "loop, todo=%d, need_len=%d, ptr=%ld\n", + dev_dbg(ec_dev->dev, "loop, todo=%d, need_len=%d, ptr=%zd\n", todo, need_len, ptr - ec_dev->din); memset(&trans, '\0', sizeof(trans)); @@ -178,7 +178,7 @@ static int cros_ec_spi_receive_response(struct cros_ec_device *ec_dev, need_len -= todo; } - dev_dbg(ec_dev->dev, "loop done, ptr=%ld\n", ptr - ec_dev->din); + dev_dbg(ec_dev->dev, "loop done, ptr=%zd\n", ptr - ec_dev->din); return 0; } diff --git a/trunk/drivers/mfd/db8500-prcmu.c b/trunk/drivers/mfd/db8500-prcmu.c index 319b8abe742b..66f80973596b 100644 --- a/trunk/drivers/mfd/db8500-prcmu.c +++ b/trunk/drivers/mfd/db8500-prcmu.c @@ -1613,6 +1613,8 @@ static unsigned long dsiclk_rate(u8 n) if (divsel == PRCM_DSI_PLLOUT_SEL_OFF) divsel = dsiclk[n].divsel; + else + dsiclk[n].divsel = divsel; switch (divsel) { case PRCM_DSI_PLLOUT_SEL_PHI_4: @@ -3095,6 +3097,7 @@ static struct mfd_cell db8500_prcmu_devs[] = { .num_resources = ARRAY_SIZE(db8500_thsens_resources), .resources = db8500_thsens_resources, .platform_data = &db8500_thsens_data, + .pdata_size = sizeof(db8500_thsens_data), }, }; diff --git a/trunk/drivers/mfd/intel_msic.c b/trunk/drivers/mfd/intel_msic.c index 5be3b5e13855..d8d5137f9717 100644 --- a/trunk/drivers/mfd/intel_msic.c +++ b/trunk/drivers/mfd/intel_msic.c @@ -414,11 +414,6 @@ static int intel_msic_probe(struct platform_device *pdev) * the clients via intel_msic_irq_read(). */ res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - if (!res) { - dev_err(&pdev->dev, "failed to get SRAM iomem resource\n"); - return -ENODEV; - } - msic->irq_base = devm_ioremap_resource(&pdev->dev, res); if (IS_ERR(msic->irq_base)) return PTR_ERR(msic->irq_base); diff --git a/trunk/drivers/mfd/si476x-cmd.c b/trunk/drivers/mfd/si476x-cmd.c index de48b4e88450..6f1ef63086c9 100644 --- a/trunk/drivers/mfd/si476x-cmd.c +++ b/trunk/drivers/mfd/si476x-cmd.c @@ -29,6 +29,8 @@ #include +#include + #define msb(x) ((u8)((u16) x >> 8)) #define lsb(x) ((u8)((u16) x & 0x00FF)) @@ -150,7 +152,7 @@ enum si476x_acf_status_report_bits { SI476X_ACF_SOFTMUTE_INT = (1 << 0), SI476X_ACF_SMUTE = (1 << 0), - SI476X_ACF_SMATTN = 0b11111, + SI476X_ACF_SMATTN = 0x1f, SI476X_ACF_PILOT = (1 << 7), SI476X_ACF_STBLEND = ~SI476X_ACF_PILOT, }; @@ -483,7 +485,7 @@ int si476x_core_cmd_get_property(struct si476x_core *core, u16 property) if (err < 0) return err; else - return be16_to_cpup((__be16 *)(resp + 2)); + return get_unaligned_be16(resp + 2); } EXPORT_SYMBOL_GPL(si476x_core_cmd_get_property); @@ -772,18 +774,18 @@ int si476x_core_cmd_am_rsq_status(struct si476x_core *core, if (!report) return err; - report->snrhint = 0b00001000 & resp[1]; - report->snrlint = 0b00000100 & resp[1]; - report->rssihint = 0b00000010 & resp[1]; - report->rssilint = 0b00000001 & resp[1]; + report->snrhint = 0x08 & resp[1]; + report->snrlint = 0x04 & resp[1]; + report->rssihint = 0x02 & resp[1]; + report->rssilint = 0x01 & resp[1]; - report->bltf = 0b10000000 & resp[2]; - report->snr_ready = 0b00100000 & resp[2]; - report->rssiready = 0b00001000 & resp[2]; - report->afcrl = 0b00000010 & resp[2]; - report->valid = 0b00000001 & resp[2]; + report->bltf = 0x80 & resp[2]; + report->snr_ready = 0x20 & resp[2]; + report->rssiready = 0x08 & resp[2]; + report->afcrl = 0x02 & resp[2]; + report->valid = 0x01 & resp[2]; - report->readfreq = be16_to_cpup((__be16 *)(resp + 3)); + report->readfreq = get_unaligned_be16(resp + 3); report->freqoff = resp[5]; report->rssi = resp[6]; report->snr = resp[7]; @@ -931,26 +933,26 @@ int si476x_core_cmd_fm_rds_status(struct si476x_core *core, if (err < 0 || report == NULL) return err; - report->rdstpptyint = 0b00010000 & resp[1]; - report->rdspiint = 0b00001000 & resp[1]; - report->rdssyncint = 0b00000010 & resp[1]; - report->rdsfifoint = 0b00000001 & resp[1]; + report->rdstpptyint = 0x10 & resp[1]; + report->rdspiint = 0x08 & resp[1]; + report->rdssyncint = 0x02 & resp[1]; + report->rdsfifoint = 0x01 & resp[1]; - report->tpptyvalid = 0b00010000 & resp[2]; - report->pivalid = 0b00001000 & resp[2]; - report->rdssync = 0b00000010 & resp[2]; - report->rdsfifolost = 0b00000001 & resp[2]; + report->tpptyvalid = 0x10 & resp[2]; + report->pivalid = 0x08 & resp[2]; + report->rdssync = 0x02 & resp[2]; + report->rdsfifolost = 0x01 & resp[2]; - report->tp = 0b00100000 & resp[3]; - report->pty = 0b00011111 & resp[3]; + report->tp = 0x20 & resp[3]; + report->pty = 0x1f & resp[3]; - report->pi = be16_to_cpup((__be16 *)(resp + 4)); + report->pi = get_unaligned_be16(resp + 4); report->rdsfifoused = resp[6]; - report->ble[V4L2_RDS_BLOCK_A] = 0b11000000 & resp[7]; - report->ble[V4L2_RDS_BLOCK_B] = 0b00110000 & resp[7]; - report->ble[V4L2_RDS_BLOCK_C] = 0b00001100 & resp[7]; - report->ble[V4L2_RDS_BLOCK_D] = 0b00000011 & resp[7]; + report->ble[V4L2_RDS_BLOCK_A] = 0xc0 & resp[7]; + report->ble[V4L2_RDS_BLOCK_B] = 0x30 & resp[7]; + report->ble[V4L2_RDS_BLOCK_C] = 0x0c & resp[7]; + report->ble[V4L2_RDS_BLOCK_D] = 0x03 & resp[7]; report->rds[V4L2_RDS_BLOCK_A].block = V4L2_RDS_BLOCK_A; report->rds[V4L2_RDS_BLOCK_A].msb = resp[8]; @@ -991,9 +993,9 @@ int si476x_core_cmd_fm_rds_blockcount(struct si476x_core *core, SI476X_DEFAULT_TIMEOUT); if (!err) { - report->expected = be16_to_cpup((__be16 *)(resp + 2)); - report->received = be16_to_cpup((__be16 *)(resp + 4)); - report->uncorrectable = be16_to_cpup((__be16 *)(resp + 6)); + report->expected = get_unaligned_be16(resp + 2); + report->received = get_unaligned_be16(resp + 4); + report->uncorrectable = get_unaligned_be16(resp + 6); } return err; @@ -1005,7 +1007,7 @@ int si476x_core_cmd_fm_phase_diversity(struct si476x_core *core, { u8 resp[CMD_FM_PHASE_DIVERSITY_NRESP]; const u8 args[CMD_FM_PHASE_DIVERSITY_NARGS] = { - mode & 0b111, + mode & 0x07, }; return si476x_core_send_command(core, CMD_FM_PHASE_DIVERSITY, @@ -1162,7 +1164,7 @@ static int si476x_core_cmd_am_tune_freq_a20(struct si476x_core *core, const int am_freq = tuneargs->freq; u8 resp[CMD_AM_TUNE_FREQ_NRESP]; const u8 args[CMD_AM_TUNE_FREQ_NARGS] = { - (tuneargs->zifsr << 6) | (tuneargs->injside & 0b11), + (tuneargs->zifsr << 6) | (tuneargs->injside & 0x03), msb(am_freq), lsb(am_freq), }; @@ -1197,20 +1199,20 @@ static int si476x_core_cmd_fm_rsq_status_a10(struct si476x_core *core, if (err < 0 || report == NULL) return err; - report->multhint = 0b10000000 & resp[1]; - report->multlint = 0b01000000 & resp[1]; - report->snrhint = 0b00001000 & resp[1]; - report->snrlint = 0b00000100 & resp[1]; - report->rssihint = 0b00000010 & resp[1]; - report->rssilint = 0b00000001 & resp[1]; + report->multhint = 0x80 & resp[1]; + report->multlint = 0x40 & resp[1]; + report->snrhint = 0x08 & resp[1]; + report->snrlint = 0x04 & resp[1]; + report->rssihint = 0x02 & resp[1]; + report->rssilint = 0x01 & resp[1]; - report->bltf = 0b10000000 & resp[2]; - report->snr_ready = 0b00100000 & resp[2]; - report->rssiready = 0b00001000 & resp[2]; - report->afcrl = 0b00000010 & resp[2]; - report->valid = 0b00000001 & resp[2]; + report->bltf = 0x80 & resp[2]; + report->snr_ready = 0x20 & resp[2]; + report->rssiready = 0x08 & resp[2]; + report->afcrl = 0x02 & resp[2]; + report->valid = 0x01 & resp[2]; - report->readfreq = be16_to_cpup((__be16 *)(resp + 3)); + report->readfreq = get_unaligned_be16(resp + 3); report->freqoff = resp[5]; report->rssi = resp[6]; report->snr = resp[7]; @@ -1218,7 +1220,7 @@ static int si476x_core_cmd_fm_rsq_status_a10(struct si476x_core *core, report->hassi = resp[10]; report->mult = resp[11]; report->dev = resp[12]; - report->readantcap = be16_to_cpup((__be16 *)(resp + 13)); + report->readantcap = get_unaligned_be16(resp + 13); report->assi = resp[15]; report->usn = resp[16]; @@ -1251,20 +1253,20 @@ static int si476x_core_cmd_fm_rsq_status_a20(struct si476x_core *core, if (err < 0 || report == NULL) return err; - report->multhint = 0b10000000 & resp[1]; - report->multlint = 0b01000000 & resp[1]; - report->snrhint = 0b00001000 & resp[1]; - report->snrlint = 0b00000100 & resp[1]; - report->rssihint = 0b00000010 & resp[1]; - report->rssilint = 0b00000001 & resp[1]; + report->multhint = 0x80 & resp[1]; + report->multlint = 0x40 & resp[1]; + report->snrhint = 0x08 & resp[1]; + report->snrlint = 0x04 & resp[1]; + report->rssihint = 0x02 & resp[1]; + report->rssilint = 0x01 & resp[1]; - report->bltf = 0b10000000 & resp[2]; - report->snr_ready = 0b00100000 & resp[2]; - report->rssiready = 0b00001000 & resp[2]; - report->afcrl = 0b00000010 & resp[2]; - report->valid = 0b00000001 & resp[2]; + report->bltf = 0x80 & resp[2]; + report->snr_ready = 0x20 & resp[2]; + report->rssiready = 0x08 & resp[2]; + report->afcrl = 0x02 & resp[2]; + report->valid = 0x01 & resp[2]; - report->readfreq = be16_to_cpup((__be16 *)(resp + 3)); + report->readfreq = get_unaligned_be16(resp + 3); report->freqoff = resp[5]; report->rssi = resp[6]; report->snr = resp[7]; @@ -1272,7 +1274,7 @@ static int si476x_core_cmd_fm_rsq_status_a20(struct si476x_core *core, report->hassi = resp[10]; report->mult = resp[11]; report->dev = resp[12]; - report->readantcap = be16_to_cpup((__be16 *)(resp + 13)); + report->readantcap = get_unaligned_be16(resp + 13); report->assi = resp[15]; report->usn = resp[16]; @@ -1306,21 +1308,21 @@ static int si476x_core_cmd_fm_rsq_status_a30(struct si476x_core *core, if (err < 0 || report == NULL) return err; - report->multhint = 0b10000000 & resp[1]; - report->multlint = 0b01000000 & resp[1]; - report->snrhint = 0b00001000 & resp[1]; - report->snrlint = 0b00000100 & resp[1]; - report->rssihint = 0b00000010 & resp[1]; - report->rssilint = 0b00000001 & resp[1]; - - report->bltf = 0b10000000 & resp[2]; - report->snr_ready = 0b00100000 & resp[2]; - report->rssiready = 0b00001000 & resp[2]; - report->injside = 0b00000100 & resp[2]; - report->afcrl = 0b00000010 & resp[2]; - report->valid = 0b00000001 & resp[2]; - - report->readfreq = be16_to_cpup((__be16 *)(resp + 3)); + report->multhint = 0x80 & resp[1]; + report->multlint = 0x40 & resp[1]; + report->snrhint = 0x08 & resp[1]; + report->snrlint = 0x04 & resp[1]; + report->rssihint = 0x02 & resp[1]; + report->rssilint = 0x01 & resp[1]; + + report->bltf = 0x80 & resp[2]; + report->snr_ready = 0x20 & resp[2]; + report->rssiready = 0x08 & resp[2]; + report->injside = 0x04 & resp[2]; + report->afcrl = 0x02 & resp[2]; + report->valid = 0x01 & resp[2]; + + report->readfreq = get_unaligned_be16(resp + 3); report->freqoff = resp[5]; report->rssi = resp[6]; report->snr = resp[7]; @@ -1329,7 +1331,7 @@ static int si476x_core_cmd_fm_rsq_status_a30(struct si476x_core *core, report->hassi = resp[10]; report->mult = resp[11]; report->dev = resp[12]; - report->readantcap = be16_to_cpup((__be16 *)(resp + 13)); + report->readantcap = get_unaligned_be16(resp + 13); report->assi = resp[15]; report->usn = resp[16]; @@ -1337,7 +1339,7 @@ static int si476x_core_cmd_fm_rsq_status_a30(struct si476x_core *core, report->rdsdev = resp[18]; report->assidev = resp[19]; report->strongdev = resp[20]; - report->rdspi = be16_to_cpup((__be16 *)(resp + 21)); + report->rdspi = get_unaligned_be16(resp + 21); return err; } diff --git a/trunk/drivers/misc/atmel-ssc.c b/trunk/drivers/misc/atmel-ssc.c index c09c28f92055..1abd5ad59925 100644 --- a/trunk/drivers/misc/atmel-ssc.c +++ b/trunk/drivers/misc/atmel-ssc.c @@ -154,11 +154,6 @@ static int ssc_probe(struct platform_device *pdev) ssc->pdata = (struct atmel_ssc_platform_data *)plat_dat; regs = platform_get_resource(pdev, IORESOURCE_MEM, 0); - if (!regs) { - dev_dbg(&pdev->dev, "no mmio resource defined\n"); - return -ENXIO; - } - ssc->regs = devm_ioremap_resource(&pdev->dev, regs); if (IS_ERR(ssc->regs)) return PTR_ERR(ssc->regs); diff --git a/trunk/drivers/misc/dummy-irq.c b/trunk/drivers/misc/dummy-irq.c index 7014167e2c61..c37eeedfe215 100644 --- a/trunk/drivers/misc/dummy-irq.c +++ b/trunk/drivers/misc/dummy-irq.c @@ -19,7 +19,7 @@ #include #include -static int irq; +static int irq = -1; static irqreturn_t dummy_interrupt(int irq, void *dev_id) { @@ -36,6 +36,10 @@ static irqreturn_t dummy_interrupt(int irq, void *dev_id) static int __init dummy_irq_init(void) { + if (irq < 0) { + printk(KERN_ERR "dummy-irq: no IRQ given. Use irq=N\n"); + return -EIO; + } if (request_irq(irq, &dummy_interrupt, IRQF_SHARED, "dummy_irq", &irq)) { printk(KERN_ERR "dummy-irq: cannot register IRQ %d\n", irq); return -EIO; diff --git a/trunk/drivers/misc/mei/bus.c b/trunk/drivers/misc/mei/bus.c index 1e935eacaa7f..9ecd49a7be1b 100644 --- a/trunk/drivers/misc/mei/bus.c +++ b/trunk/drivers/misc/mei/bus.c @@ -496,6 +496,8 @@ int mei_cl_disable_device(struct mei_cl_device *device) } } + device->event_cb = NULL; + mutex_unlock(&dev->device_lock); if (!device->ops || !device->ops->disable) diff --git a/trunk/drivers/misc/mei/main.c b/trunk/drivers/misc/mei/main.c index 7c44c8dbae42..053139f61086 100644 --- a/trunk/drivers/misc/mei/main.c +++ b/trunk/drivers/misc/mei/main.c @@ -489,11 +489,16 @@ static int mei_ioctl_connect_client(struct file *file, /* find ME client we're trying to connect to */ i = mei_me_cl_by_uuid(dev, &data->in_client_uuid); - if (i >= 0 && !dev->me_clients[i].props.fixed_address) { - cl->me_client_id = dev->me_clients[i].client_id; - cl->state = MEI_FILE_CONNECTING; + if (i < 0 || dev->me_clients[i].props.fixed_address) { + dev_dbg(&dev->pdev->dev, "Cannot connect to FW Client UUID = %pUl\n", + &data->in_client_uuid); + rets = -ENODEV; + goto end; } + cl->me_client_id = dev->me_clients[i].client_id; + cl->state = MEI_FILE_CONNECTING; + dev_dbg(&dev->pdev->dev, "Connect to FW Client ID = %d\n", cl->me_client_id); dev_dbg(&dev->pdev->dev, "FW Client - Protocol Version = %d\n", @@ -527,11 +532,6 @@ static int mei_ioctl_connect_client(struct file *file, goto end; } - if (cl->state != MEI_FILE_CONNECTING) { - rets = -ENODEV; - goto end; - } - /* prepare the output buffer */ client = &data->out_client_properties; @@ -543,7 +543,6 @@ static int mei_ioctl_connect_client(struct file *file, rets = mei_cl_connect(cl, file); end: - dev_dbg(&dev->pdev->dev, "free connect cb memory."); return rets; } diff --git a/trunk/drivers/misc/vmw_vmci/Kconfig b/trunk/drivers/misc/vmw_vmci/Kconfig index ea98f7e9ccd1..39c2ecadb273 100644 --- a/trunk/drivers/misc/vmw_vmci/Kconfig +++ b/trunk/drivers/misc/vmw_vmci/Kconfig @@ -4,7 +4,7 @@ config VMWARE_VMCI tristate "VMware VMCI Driver" - depends on X86 && PCI && NET + depends on X86 && PCI help This is VMware's Virtual Machine Communication Interface. It enables high-speed communication between host and guest in a virtual diff --git a/trunk/drivers/misc/vmw_vmci/vmci_queue_pair.c b/trunk/drivers/misc/vmw_vmci/vmci_queue_pair.c index d94245dbd765..8ff2e5ee8fb8 100644 --- a/trunk/drivers/misc/vmw_vmci/vmci_queue_pair.c +++ b/trunk/drivers/misc/vmw_vmci/vmci_queue_pair.c @@ -23,7 +23,7 @@ #include #include #include -#include +#include #include #include diff --git a/trunk/drivers/mmc/host/mmci.c b/trunk/drivers/mmc/host/mmci.c index 375c109607ff..f4f3038c1df0 100644 --- a/trunk/drivers/mmc/host/mmci.c +++ b/trunk/drivers/mmc/host/mmci.c @@ -1130,6 +1130,7 @@ static void mmci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) struct variant_data *variant = host->variant; u32 pwr = 0; unsigned long flags; + int ret; pm_runtime_get_sync(mmc_dev(mmc)); @@ -1161,8 +1162,12 @@ static void mmci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) break; case MMC_POWER_ON: if (!IS_ERR(mmc->supply.vqmmc) && - !regulator_is_enabled(mmc->supply.vqmmc)) - regulator_enable(mmc->supply.vqmmc); + !regulator_is_enabled(mmc->supply.vqmmc)) { + ret = regulator_enable(mmc->supply.vqmmc); + if (ret < 0) + dev_err(mmc_dev(mmc), + "failed to enable vqmmc regulator\n"); + } pwr |= MCI_PWR_ON; break; diff --git a/trunk/drivers/mtd/Kconfig b/trunk/drivers/mtd/Kconfig index 557bec599f4f..5fab4e6e8301 100644 --- a/trunk/drivers/mtd/Kconfig +++ b/trunk/drivers/mtd/Kconfig @@ -157,19 +157,6 @@ config MTD_BCM47XX_PARTS comment "User Modules And Translation Layers" -config MTD_CHAR - tristate "Direct char device access to MTD devices" - help - This provides a character device for each MTD device present in - the system, allowing the user to read and write directly to the - memory chips, and also use ioctl() to obtain information about - the device, or to erase parts of it. - -config HAVE_MTD_OTP - bool - help - Enable access to OTP regions using MTD_CHAR. - config MTD_BLKDEVS tristate "Common interface to block layer for MTD 'translation layers'" depends on BLOCK diff --git a/trunk/drivers/mtd/Makefile b/trunk/drivers/mtd/Makefile index 18a38e55b2f0..4cfb31e6c966 100644 --- a/trunk/drivers/mtd/Makefile +++ b/trunk/drivers/mtd/Makefile @@ -4,7 +4,7 @@ # Core functionality. obj-$(CONFIG_MTD) += mtd.o -mtd-y := mtdcore.o mtdsuper.o mtdconcat.o mtdpart.o +mtd-y := mtdcore.o mtdsuper.o mtdconcat.o mtdpart.o mtdchar.o obj-$(CONFIG_MTD_OF_PARTS) += ofpart.o obj-$(CONFIG_MTD_REDBOOT_PARTS) += redboot.o @@ -15,7 +15,6 @@ obj-$(CONFIG_MTD_BCM63XX_PARTS) += bcm63xxpart.o obj-$(CONFIG_MTD_BCM47XX_PARTS) += bcm47xxpart.o # 'Users' - code which presents functionality to userspace. -obj-$(CONFIG_MTD_CHAR) += mtdchar.o obj-$(CONFIG_MTD_BLKDEVS) += mtd_blkdevs.o obj-$(CONFIG_MTD_BLOCK) += mtdblock.o obj-$(CONFIG_MTD_BLOCK_RO) += mtdblock_ro.o diff --git a/trunk/drivers/mtd/chips/Kconfig b/trunk/drivers/mtd/chips/Kconfig index c219e3d098d9..e4696b37f3de 100644 --- a/trunk/drivers/mtd/chips/Kconfig +++ b/trunk/drivers/mtd/chips/Kconfig @@ -146,7 +146,6 @@ config MTD_CFI_I8 config MTD_OTP bool "Protection Registers aka one-time programmable (OTP) bits" depends on MTD_CFI_ADV_OPTIONS - select HAVE_MTD_OTP default n help This enables support for reading, writing and locking so called diff --git a/trunk/drivers/mtd/devices/Kconfig b/trunk/drivers/mtd/devices/Kconfig index 12311f506ca1..2a4d55e4b362 100644 --- a/trunk/drivers/mtd/devices/Kconfig +++ b/trunk/drivers/mtd/devices/Kconfig @@ -71,7 +71,6 @@ config MTD_DATAFLASH_WRITE_VERIFY config MTD_DATAFLASH_OTP bool "DataFlash OTP support (Security Register)" depends on MTD_DATAFLASH - select HAVE_MTD_OTP help Newer DataFlash chips (revisions C and D) support 128 bytes of one-time-programmable (OTP) data. The first half may be written @@ -205,69 +204,6 @@ config MTD_BLOCK2MTD comment "Disk-On-Chip Device Drivers" -config MTD_DOC2000 - tristate "M-Systems Disk-On-Chip 2000 and Millennium (DEPRECATED)" - depends on MTD_NAND - select MTD_DOCPROBE - select MTD_NAND_IDS - ---help--- - This provides an MTD device driver for the M-Systems DiskOnChip - 2000 and Millennium devices. Originally designed for the DiskOnChip - 2000, it also now includes support for the DiskOnChip Millennium. - If you have problems with this driver and the DiskOnChip Millennium, - you may wish to try the alternative Millennium driver below. To use - the alternative driver, you will need to undefine DOC_SINGLE_DRIVER - in the source code. - - If you use this device, you probably also want to enable the NFTL - 'NAND Flash Translation Layer' option below, which is used to - emulate a block device by using a kind of file system on the flash - chips. - - NOTE: This driver is deprecated and will probably be removed soon. - Please try the new DiskOnChip driver under "NAND Flash Device - Drivers". - -config MTD_DOC2001 - tristate "M-Systems Disk-On-Chip Millennium-only alternative driver (DEPRECATED)" - depends on MTD_NAND - select MTD_DOCPROBE - select MTD_NAND_IDS - ---help--- - This provides an alternative MTD device driver for the M-Systems - DiskOnChip Millennium devices. Use this if you have problems with - the combined DiskOnChip 2000 and Millennium driver above. To get - the DiskOnChip probe code to load and use this driver instead of - the other one, you will need to undefine DOC_SINGLE_DRIVER near - the beginning of . - - If you use this device, you probably also want to enable the NFTL - 'NAND Flash Translation Layer' option below, which is used to - emulate a block device by using a kind of file system on the flash - chips. - - NOTE: This driver is deprecated and will probably be removed soon. - Please try the new DiskOnChip driver under "NAND Flash Device - Drivers". - -config MTD_DOC2001PLUS - tristate "M-Systems Disk-On-Chip Millennium Plus" - depends on MTD_NAND - select MTD_DOCPROBE - select MTD_NAND_IDS - ---help--- - This provides an MTD device driver for the M-Systems DiskOnChip - Millennium Plus devices. - - If you use this device, you probably also want to enable the INFTL - 'Inverse NAND Flash Translation Layer' option below, which is used - to emulate a block device by using a kind of file system on the - flash chips. - - NOTE: This driver will soon be replaced by the new DiskOnChip driver - under "NAND Flash Device Drivers" (currently that driver does not - support all Millennium Plus devices). - config MTD_DOCG3 tristate "M-Systems Disk-On-Chip G3" select BCH diff --git a/trunk/drivers/mtd/devices/Makefile b/trunk/drivers/mtd/devices/Makefile index 369a1943ca25..d83bd73096f6 100644 --- a/trunk/drivers/mtd/devices/Makefile +++ b/trunk/drivers/mtd/devices/Makefile @@ -2,12 +2,7 @@ # linux/drivers/mtd/devices/Makefile # -obj-$(CONFIG_MTD_DOC2000) += doc2000.o -obj-$(CONFIG_MTD_DOC2001) += doc2001.o -obj-$(CONFIG_MTD_DOC2001PLUS) += doc2001plus.o obj-$(CONFIG_MTD_DOCG3) += docg3.o -obj-$(CONFIG_MTD_DOCPROBE) += docprobe.o -obj-$(CONFIG_MTD_DOCECC) += docecc.o obj-$(CONFIG_MTD_SLRAM) += slram.o obj-$(CONFIG_MTD_PHRAM) += phram.o obj-$(CONFIG_MTD_PMC551) += pmc551.o diff --git a/trunk/drivers/mtd/devices/bcm47xxsflash.c b/trunk/drivers/mtd/devices/bcm47xxsflash.c index 95266285acb1..18e7761137a3 100644 --- a/trunk/drivers/mtd/devices/bcm47xxsflash.c +++ b/trunk/drivers/mtd/devices/bcm47xxsflash.c @@ -10,7 +10,7 @@ MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("Serial flash driver for BCMA bus"); -static const char *probes[] = { "bcm47xxpart", NULL }; +static const char * const probes[] = { "bcm47xxpart", NULL }; static int bcm47xxsflash_read(struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf) @@ -61,6 +61,17 @@ static int bcm47xxsflash_bcma_probe(struct platform_device *pdev) } sflash->priv = b47s; + b47s->bcma_cc = container_of(sflash, struct bcma_drv_cc, sflash); + + switch (b47s->bcma_cc->capabilities & BCMA_CC_CAP_FLASHT) { + case BCMA_CC_FLASHT_STSER: + b47s->type = BCM47XXSFLASH_TYPE_ST; + break; + case BCMA_CC_FLASHT_ATSER: + b47s->type = BCM47XXSFLASH_TYPE_ATMEL; + break; + } + b47s->window = sflash->window; b47s->blocksize = sflash->blocksize; b47s->numblocks = sflash->numblocks; diff --git a/trunk/drivers/mtd/devices/bcm47xxsflash.h b/trunk/drivers/mtd/devices/bcm47xxsflash.h index ebf6f710e23c..f22f8c46dfc0 100644 --- a/trunk/drivers/mtd/devices/bcm47xxsflash.h +++ b/trunk/drivers/mtd/devices/bcm47xxsflash.h @@ -3,7 +3,66 @@ #include +/* Used for ST flashes only. */ +#define OPCODE_ST_WREN 0x0006 /* Write Enable */ +#define OPCODE_ST_WRDIS 0x0004 /* Write Disable */ +#define OPCODE_ST_RDSR 0x0105 /* Read Status Register */ +#define OPCODE_ST_WRSR 0x0101 /* Write Status Register */ +#define OPCODE_ST_READ 0x0303 /* Read Data Bytes */ +#define OPCODE_ST_PP 0x0302 /* Page Program */ +#define OPCODE_ST_SE 0x02d8 /* Sector Erase */ +#define OPCODE_ST_BE 0x00c7 /* Bulk Erase */ +#define OPCODE_ST_DP 0x00b9 /* Deep Power-down */ +#define OPCODE_ST_RES 0x03ab /* Read Electronic Signature */ +#define OPCODE_ST_CSA 0x1000 /* Keep chip select asserted */ +#define OPCODE_ST_SSE 0x0220 /* Sub-sector Erase */ + +/* Used for Atmel flashes only. */ +#define OPCODE_AT_READ 0x07e8 +#define OPCODE_AT_PAGE_READ 0x07d2 +#define OPCODE_AT_STATUS 0x01d7 +#define OPCODE_AT_BUF1_WRITE 0x0384 +#define OPCODE_AT_BUF2_WRITE 0x0387 +#define OPCODE_AT_BUF1_ERASE_PROGRAM 0x0283 +#define OPCODE_AT_BUF2_ERASE_PROGRAM 0x0286 +#define OPCODE_AT_BUF1_PROGRAM 0x0288 +#define OPCODE_AT_BUF2_PROGRAM 0x0289 +#define OPCODE_AT_PAGE_ERASE 0x0281 +#define OPCODE_AT_BLOCK_ERASE 0x0250 +#define OPCODE_AT_BUF1_WRITE_ERASE_PROGRAM 0x0382 +#define OPCODE_AT_BUF2_WRITE_ERASE_PROGRAM 0x0385 +#define OPCODE_AT_BUF1_LOAD 0x0253 +#define OPCODE_AT_BUF2_LOAD 0x0255 +#define OPCODE_AT_BUF1_COMPARE 0x0260 +#define OPCODE_AT_BUF2_COMPARE 0x0261 +#define OPCODE_AT_BUF1_REPROGRAM 0x0258 +#define OPCODE_AT_BUF2_REPROGRAM 0x0259 + +/* Status register bits for ST flashes */ +#define SR_ST_WIP 0x01 /* Write In Progress */ +#define SR_ST_WEL 0x02 /* Write Enable Latch */ +#define SR_ST_BP_MASK 0x1c /* Block Protect */ +#define SR_ST_BP_SHIFT 2 +#define SR_ST_SRWD 0x80 /* Status Register Write Disable */ + +/* Status register bits for Atmel flashes */ +#define SR_AT_READY 0x80 +#define SR_AT_MISMATCH 0x40 +#define SR_AT_ID_MASK 0x38 +#define SR_AT_ID_SHIFT 3 + +struct bcma_drv_cc; + +enum bcm47xxsflash_type { + BCM47XXSFLASH_TYPE_ATMEL, + BCM47XXSFLASH_TYPE_ST, +}; + struct bcm47xxsflash { + struct bcma_drv_cc *bcma_cc; + + enum bcm47xxsflash_type type; + u32 window; u32 blocksize; u16 numblocks; diff --git a/trunk/drivers/mtd/devices/doc2000.c b/trunk/drivers/mtd/devices/doc2000.c deleted file mode 100644 index a4eb8b5b85ec..000000000000 --- a/trunk/drivers/mtd/devices/doc2000.c +++ /dev/null @@ -1,1178 +0,0 @@ - -/* - * Linux driver for Disk-On-Chip 2000 and Millennium - * (c) 1999 Machine Vision Holdings, Inc. - * (c) 1999, 2000 David Woodhouse - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include -#include -#include - -#define DOC_SUPPORT_2000 -#define DOC_SUPPORT_2000TSOP -#define DOC_SUPPORT_MILLENNIUM - -#ifdef DOC_SUPPORT_2000 -#define DoC_is_2000(doc) (doc->ChipID == DOC_ChipID_Doc2k) -#else -#define DoC_is_2000(doc) (0) -#endif - -#if defined(DOC_SUPPORT_2000TSOP) || defined(DOC_SUPPORT_MILLENNIUM) -#define DoC_is_Millennium(doc) (doc->ChipID == DOC_ChipID_DocMil) -#else -#define DoC_is_Millennium(doc) (0) -#endif - -/* #define ECC_DEBUG */ - -/* I have no idea why some DoC chips can not use memcpy_from|to_io(). - * This may be due to the different revisions of the ASIC controller built-in or - * simplily a QA/Bug issue. Who knows ?? If you have trouble, please uncomment - * this: - #undef USE_MEMCPY -*/ - -static int doc_read(struct mtd_info *mtd, loff_t from, size_t len, - size_t *retlen, u_char *buf); -static int doc_write(struct mtd_info *mtd, loff_t to, size_t len, - size_t *retlen, const u_char *buf); -static int doc_read_oob(struct mtd_info *mtd, loff_t ofs, - struct mtd_oob_ops *ops); -static int doc_write_oob(struct mtd_info *mtd, loff_t ofs, - struct mtd_oob_ops *ops); -static int doc_write_oob_nolock(struct mtd_info *mtd, loff_t ofs, size_t len, - size_t *retlen, const u_char *buf); -static int doc_erase (struct mtd_info *mtd, struct erase_info *instr); - -static struct mtd_info *doc2klist = NULL; - -/* Perform the required delay cycles by reading from the appropriate register */ -static void DoC_Delay(struct DiskOnChip *doc, unsigned short cycles) -{ - volatile char dummy; - int i; - - for (i = 0; i < cycles; i++) { - if (DoC_is_Millennium(doc)) - dummy = ReadDOC(doc->virtadr, NOP); - else - dummy = ReadDOC(doc->virtadr, DOCStatus); - } - -} - -/* DOC_WaitReady: Wait for RDY line to be asserted by the flash chip */ -static int _DoC_WaitReady(struct DiskOnChip *doc) -{ - void __iomem *docptr = doc->virtadr; - unsigned long timeo = jiffies + (HZ * 10); - - pr_debug("_DoC_WaitReady called for out-of-line wait\n"); - - /* Out-of-line routine to wait for chip response */ - while (!(ReadDOC(docptr, CDSNControl) & CDSN_CTRL_FR_B)) { - /* issue 2 read from NOP register after reading from CDSNControl register - see Software Requirement 11.4 item 2. */ - DoC_Delay(doc, 2); - - if (time_after(jiffies, timeo)) { - pr_debug("_DoC_WaitReady timed out.\n"); - return -EIO; - } - udelay(1); - cond_resched(); - } - - return 0; -} - -static inline int DoC_WaitReady(struct DiskOnChip *doc) -{ - void __iomem *docptr = doc->virtadr; - - /* This is inline, to optimise the common case, where it's ready instantly */ - int ret = 0; - - /* 4 read form NOP register should be issued in prior to the read from CDSNControl - see Software Requirement 11.4 item 2. */ - DoC_Delay(doc, 4); - - if (!(ReadDOC(docptr, CDSNControl) & CDSN_CTRL_FR_B)) - /* Call the out-of-line routine to wait */ - ret = _DoC_WaitReady(doc); - - /* issue 2 read from NOP register after reading from CDSNControl register - see Software Requirement 11.4 item 2. */ - DoC_Delay(doc, 2); - - return ret; -} - -/* DoC_Command: Send a flash command to the flash chip through the CDSN Slow IO register to - bypass the internal pipeline. Each of 4 delay cycles (read from the NOP register) is - required after writing to CDSN Control register, see Software Requirement 11.4 item 3. */ - -static int DoC_Command(struct DiskOnChip *doc, unsigned char command, - unsigned char xtraflags) -{ - void __iomem *docptr = doc->virtadr; - - if (DoC_is_2000(doc)) - xtraflags |= CDSN_CTRL_FLASH_IO; - - /* Assert the CLE (Command Latch Enable) line to the flash chip */ - WriteDOC(xtraflags | CDSN_CTRL_CLE | CDSN_CTRL_CE, docptr, CDSNControl); - DoC_Delay(doc, 4); /* Software requirement 11.4.3 for Millennium */ - - if (DoC_is_Millennium(doc)) - WriteDOC(command, docptr, CDSNSlowIO); - - /* Send the command */ - WriteDOC_(command, docptr, doc->ioreg); - if (DoC_is_Millennium(doc)) - WriteDOC(command, docptr, WritePipeTerm); - - /* Lower the CLE line */ - WriteDOC(xtraflags | CDSN_CTRL_CE, docptr, CDSNControl); - DoC_Delay(doc, 4); /* Software requirement 11.4.3 for Millennium */ - - /* Wait for the chip to respond - Software requirement 11.4.1 (extended for any command) */ - return DoC_WaitReady(doc); -} - -/* DoC_Address: Set the current address for the flash chip through the CDSN Slow IO register to - bypass the internal pipeline. Each of 4 delay cycles (read from the NOP register) is - required after writing to CDSN Control register, see Software Requirement 11.4 item 3. */ - -static int DoC_Address(struct DiskOnChip *doc, int numbytes, unsigned long ofs, - unsigned char xtraflags1, unsigned char xtraflags2) -{ - int i; - void __iomem *docptr = doc->virtadr; - - if (DoC_is_2000(doc)) - xtraflags1 |= CDSN_CTRL_FLASH_IO; - - /* Assert the ALE (Address Latch Enable) line to the flash chip */ - WriteDOC(xtraflags1 | CDSN_CTRL_ALE | CDSN_CTRL_CE, docptr, CDSNControl); - - DoC_Delay(doc, 4); /* Software requirement 11.4.3 for Millennium */ - - /* Send the address */ - /* Devices with 256-byte page are addressed as: - Column (bits 0-7), Page (bits 8-15, 16-23, 24-31) - * there is no device on the market with page256 - and more than 24 bits. - Devices with 512-byte page are addressed as: - Column (bits 0-7), Page (bits 9-16, 17-24, 25-31) - * 25-31 is sent only if the chip support it. - * bit 8 changes the read command to be sent - (NAND_CMD_READ0 or NAND_CMD_READ1). - */ - - if (numbytes == ADDR_COLUMN || numbytes == ADDR_COLUMN_PAGE) { - if (DoC_is_Millennium(doc)) - WriteDOC(ofs & 0xff, docptr, CDSNSlowIO); - WriteDOC_(ofs & 0xff, docptr, doc->ioreg); - } - - if (doc->page256) { - ofs = ofs >> 8; - } else { - ofs = ofs >> 9; - } - - if (numbytes == ADDR_PAGE || numbytes == ADDR_COLUMN_PAGE) { - for (i = 0; i < doc->pageadrlen; i++, ofs = ofs >> 8) { - if (DoC_is_Millennium(doc)) - WriteDOC(ofs & 0xff, docptr, CDSNSlowIO); - WriteDOC_(ofs & 0xff, docptr, doc->ioreg); - } - } - - if (DoC_is_Millennium(doc)) - WriteDOC(ofs & 0xff, docptr, WritePipeTerm); - - DoC_Delay(doc, 2); /* Needed for some slow flash chips. mf. */ - - /* FIXME: The SlowIO's for millennium could be replaced by - a single WritePipeTerm here. mf. */ - - /* Lower the ALE line */ - WriteDOC(xtraflags1 | xtraflags2 | CDSN_CTRL_CE, docptr, - CDSNControl); - - DoC_Delay(doc, 4); /* Software requirement 11.4.3 for Millennium */ - - /* Wait for the chip to respond - Software requirement 11.4.1 */ - return DoC_WaitReady(doc); -} - -/* Read a buffer from DoC, taking care of Millennium odditys */ -static void DoC_ReadBuf(struct DiskOnChip *doc, u_char * buf, int len) -{ - volatile int dummy; - int modulus = 0xffff; - void __iomem *docptr = doc->virtadr; - int i; - - if (len <= 0) - return; - - if (DoC_is_Millennium(doc)) { - /* Read the data via the internal pipeline through CDSN IO register, - see Pipelined Read Operations 11.3 */ - dummy = ReadDOC(docptr, ReadPipeInit); - - /* Millennium should use the LastDataRead register - Pipeline Reads */ - len--; - - /* This is needed for correctly ECC calculation */ - modulus = 0xff; - } - - for (i = 0; i < len; i++) - buf[i] = ReadDOC_(docptr, doc->ioreg + (i & modulus)); - - if (DoC_is_Millennium(doc)) { - buf[i] = ReadDOC(docptr, LastDataRead); - } -} - -/* Write a buffer to DoC, taking care of Millennium odditys */ -static void DoC_WriteBuf(struct DiskOnChip *doc, const u_char * buf, int len) -{ - void __iomem *docptr = doc->virtadr; - int i; - - if (len <= 0) - return; - - for (i = 0; i < len; i++) - WriteDOC_(buf[i], docptr, doc->ioreg + i); - - if (DoC_is_Millennium(doc)) { - WriteDOC(0x00, docptr, WritePipeTerm); - } -} - - -/* DoC_SelectChip: Select a given flash chip within the current floor */ - -static inline int DoC_SelectChip(struct DiskOnChip *doc, int chip) -{ - void __iomem *docptr = doc->virtadr; - - /* Software requirement 11.4.4 before writing DeviceSelect */ - /* Deassert the CE line to eliminate glitches on the FCE# outputs */ - WriteDOC(CDSN_CTRL_WP, docptr, CDSNControl); - DoC_Delay(doc, 4); /* Software requirement 11.4.3 for Millennium */ - - /* Select the individual flash chip requested */ - WriteDOC(chip, docptr, CDSNDeviceSelect); - DoC_Delay(doc, 4); - - /* Reassert the CE line */ - WriteDOC(CDSN_CTRL_CE | CDSN_CTRL_FLASH_IO | CDSN_CTRL_WP, docptr, - CDSNControl); - DoC_Delay(doc, 4); /* Software requirement 11.4.3 for Millennium */ - - /* Wait for it to be ready */ - return DoC_WaitReady(doc); -} - -/* DoC_SelectFloor: Select a given floor (bank of flash chips) */ - -static inline int DoC_SelectFloor(struct DiskOnChip *doc, int floor) -{ - void __iomem *docptr = doc->virtadr; - - /* Select the floor (bank) of chips required */ - WriteDOC(floor, docptr, FloorSelect); - - /* Wait for the chip to be ready */ - return DoC_WaitReady(doc); -} - -/* DoC_IdentChip: Identify a given NAND chip given {floor,chip} */ - -static int DoC_IdentChip(struct DiskOnChip *doc, int floor, int chip) -{ - int mfr, id, i, j; - volatile char dummy; - - /* Page in the required floor/chip */ - DoC_SelectFloor(doc, floor); - DoC_SelectChip(doc, chip); - - /* Reset the chip */ - if (DoC_Command(doc, NAND_CMD_RESET, CDSN_CTRL_WP)) { - pr_debug("DoC_Command (reset) for %d,%d returned true\n", - floor, chip); - return 0; - } - - - /* Read the NAND chip ID: 1. Send ReadID command */ - if (DoC_Command(doc, NAND_CMD_READID, CDSN_CTRL_WP)) { - pr_debug("DoC_Command (ReadID) for %d,%d returned true\n", - floor, chip); - return 0; - } - - /* Read the NAND chip ID: 2. Send address byte zero */ - DoC_Address(doc, ADDR_COLUMN, 0, CDSN_CTRL_WP, 0); - - /* Read the manufacturer and device id codes from the device */ - - if (DoC_is_Millennium(doc)) { - DoC_Delay(doc, 2); - dummy = ReadDOC(doc->virtadr, ReadPipeInit); - mfr = ReadDOC(doc->virtadr, LastDataRead); - - DoC_Delay(doc, 2); - dummy = ReadDOC(doc->virtadr, ReadPipeInit); - id = ReadDOC(doc->virtadr, LastDataRead); - } else { - /* CDSN Slow IO register see Software Req 11.4 item 5. */ - dummy = ReadDOC(doc->virtadr, CDSNSlowIO); - DoC_Delay(doc, 2); - mfr = ReadDOC_(doc->virtadr, doc->ioreg); - - /* CDSN Slow IO register see Software Req 11.4 item 5. */ - dummy = ReadDOC(doc->virtadr, CDSNSlowIO); - DoC_Delay(doc, 2); - id = ReadDOC_(doc->virtadr, doc->ioreg); - } - - /* No response - return failure */ - if (mfr == 0xff || mfr == 0) - return 0; - - /* Check it's the same as the first chip we identified. - * M-Systems say that any given DiskOnChip device should only - * contain _one_ type of flash part, although that's not a - * hardware restriction. */ - if (doc->mfr) { - if (doc->mfr == mfr && doc->id == id) - return 1; /* This is the same as the first */ - else - printk(KERN_WARNING - "Flash chip at floor %d, chip %d is different:\n", - floor, chip); - } - - /* Print and store the manufacturer and ID codes. */ - for (i = 0; nand_flash_ids[i].name != NULL; i++) { - if (id == nand_flash_ids[i].id) { - /* Try to identify manufacturer */ - for (j = 0; nand_manuf_ids[j].id != 0x0; j++) { - if (nand_manuf_ids[j].id == mfr) - break; - } - printk(KERN_INFO - "Flash chip found: Manufacturer ID: %2.2X, " - "Chip ID: %2.2X (%s:%s)\n", mfr, id, - nand_manuf_ids[j].name, nand_flash_ids[i].name); - if (!doc->mfr) { - doc->mfr = mfr; - doc->id = id; - doc->chipshift = - ffs((nand_flash_ids[i].chipsize << 20)) - 1; - doc->page256 = (nand_flash_ids[i].pagesize == 256) ? 1 : 0; - doc->pageadrlen = doc->chipshift > 25 ? 3 : 2; - doc->erasesize = - nand_flash_ids[i].erasesize; - return 1; - } - return 0; - } - } - - - /* We haven't fully identified the chip. Print as much as we know. */ - printk(KERN_WARNING "Unknown flash chip found: %2.2X %2.2X\n", - id, mfr); - - printk(KERN_WARNING "Please report to dwmw2@infradead.org\n"); - return 0; -} - -/* DoC_ScanChips: Find all NAND chips present in a DiskOnChip, and identify them */ - -static void DoC_ScanChips(struct DiskOnChip *this, int maxchips) -{ - int floor, chip; - int numchips[MAX_FLOORS]; - int ret = 1; - - this->numchips = 0; - this->mfr = 0; - this->id = 0; - - /* For each floor, find the number of valid chips it contains */ - for (floor = 0; floor < MAX_FLOORS; floor++) { - ret = 1; - numchips[floor] = 0; - for (chip = 0; chip < maxchips && ret != 0; chip++) { - - ret = DoC_IdentChip(this, floor, chip); - if (ret) { - numchips[floor]++; - this->numchips++; - } - } - } - - /* If there are none at all that we recognise, bail */ - if (!this->numchips) { - printk(KERN_NOTICE "No flash chips recognised.\n"); - return; - } - - /* Allocate an array to hold the information for each chip */ - this->chips = kmalloc(sizeof(struct Nand) * this->numchips, GFP_KERNEL); - if (!this->chips) { - printk(KERN_NOTICE "No memory for allocating chip info structures\n"); - return; - } - - ret = 0; - - /* Fill out the chip array with {floor, chipno} for each - * detected chip in the device. */ - for (floor = 0; floor < MAX_FLOORS; floor++) { - for (chip = 0; chip < numchips[floor]; chip++) { - this->chips[ret].floor = floor; - this->chips[ret].chip = chip; - this->chips[ret].curadr = 0; - this->chips[ret].curmode = 0x50; - ret++; - } - } - - /* Calculate and print the total size of the device */ - this->totlen = this->numchips * (1 << this->chipshift); - - printk(KERN_INFO "%d flash chips found. Total DiskOnChip size: %ld MiB\n", - this->numchips, this->totlen >> 20); -} - -static int DoC2k_is_alias(struct DiskOnChip *doc1, struct DiskOnChip *doc2) -{ - int tmp1, tmp2, retval; - if (doc1->physadr == doc2->physadr) - return 1; - - /* Use the alias resolution register which was set aside for this - * purpose. If it's value is the same on both chips, they might - * be the same chip, and we write to one and check for a change in - * the other. It's unclear if this register is usuable in the - * DoC 2000 (it's in the Millennium docs), but it seems to work. */ - tmp1 = ReadDOC(doc1->virtadr, AliasResolution); - tmp2 = ReadDOC(doc2->virtadr, AliasResolution); - if (tmp1 != tmp2) - return 0; - - WriteDOC((tmp1 + 1) % 0xff, doc1->virtadr, AliasResolution); - tmp2 = ReadDOC(doc2->virtadr, AliasResolution); - if (tmp2 == (tmp1 + 1) % 0xff) - retval = 1; - else - retval = 0; - - /* Restore register contents. May not be necessary, but do it just to - * be safe. */ - WriteDOC(tmp1, doc1->virtadr, AliasResolution); - - return retval; -} - -/* This routine is found from the docprobe code by symbol_get(), - * which will bump the use count of this module. */ -void DoC2k_init(struct mtd_info *mtd) -{ - struct DiskOnChip *this = mtd->priv; - struct DiskOnChip *old = NULL; - int maxchips; - - /* We must avoid being called twice for the same device. */ - - if (doc2klist) - old = doc2klist->priv; - - while (old) { - if (DoC2k_is_alias(old, this)) { - printk(KERN_NOTICE - "Ignoring DiskOnChip 2000 at 0x%lX - already configured\n", - this->physadr); - iounmap(this->virtadr); - kfree(mtd); - return; - } - if (old->nextdoc) - old = old->nextdoc->priv; - else - old = NULL; - } - - - switch (this->ChipID) { - case DOC_ChipID_Doc2kTSOP: - mtd->name = "DiskOnChip 2000 TSOP"; - this->ioreg = DoC_Mil_CDSN_IO; - /* Pretend it's a Millennium */ - this->ChipID = DOC_ChipID_DocMil; - maxchips = MAX_CHIPS; - break; - case DOC_ChipID_Doc2k: - mtd->name = "DiskOnChip 2000"; - this->ioreg = DoC_2k_CDSN_IO; - maxchips = MAX_CHIPS; - break; - case DOC_ChipID_DocMil: - mtd->name = "DiskOnChip Millennium"; - this->ioreg = DoC_Mil_CDSN_IO; - maxchips = MAX_CHIPS_MIL; - break; - default: - printk("Unknown ChipID 0x%02x\n", this->ChipID); - kfree(mtd); - iounmap(this->virtadr); - return; - } - - printk(KERN_NOTICE "%s found at address 0x%lX\n", mtd->name, - this->physadr); - - mtd->type = MTD_NANDFLASH; - mtd->flags = MTD_CAP_NANDFLASH; - mtd->writebufsize = mtd->writesize = 512; - mtd->oobsize = 16; - mtd->ecc_strength = 2; - mtd->owner = THIS_MODULE; - mtd->_erase = doc_erase; - mtd->_read = doc_read; - mtd->_write = doc_write; - mtd->_read_oob = doc_read_oob; - mtd->_write_oob = doc_write_oob; - this->curfloor = -1; - this->curchip = -1; - mutex_init(&this->lock); - - /* Ident all the chips present. */ - DoC_ScanChips(this, maxchips); - - if (!this->totlen) { - kfree(mtd); - iounmap(this->virtadr); - } else { - this->nextdoc = doc2klist; - doc2klist = mtd; - mtd->size = this->totlen; - mtd->erasesize = this->erasesize; - mtd_device_register(mtd, NULL, 0); - return; - } -} -EXPORT_SYMBOL_GPL(DoC2k_init); - -static int doc_read(struct mtd_info *mtd, loff_t from, size_t len, - size_t * retlen, u_char * buf) -{ - struct DiskOnChip *this = mtd->priv; - void __iomem *docptr = this->virtadr; - struct Nand *mychip; - unsigned char syndrome[6], eccbuf[6]; - volatile char dummy; - int i, len256 = 0, ret=0; - size_t left = len; - - mutex_lock(&this->lock); - while (left) { - len = left; - - /* Don't allow a single read to cross a 512-byte block boundary */ - if (from + len > ((from | 0x1ff) + 1)) - len = ((from | 0x1ff) + 1) - from; - - /* The ECC will not be calculated correctly if less than 512 is read */ - if (len != 0x200) - printk(KERN_WARNING - "ECC needs a full sector read (adr: %lx size %lx)\n", - (long) from, (long) len); - - /* printk("DoC_Read (adr: %lx size %lx)\n", (long) from, (long) len); */ - - - /* Find the chip which is to be used and select it */ - mychip = &this->chips[from >> (this->chipshift)]; - - if (this->curfloor != mychip->floor) { - DoC_SelectFloor(this, mychip->floor); - DoC_SelectChip(this, mychip->chip); - } else if (this->curchip != mychip->chip) { - DoC_SelectChip(this, mychip->chip); - } - - this->curfloor = mychip->floor; - this->curchip = mychip->chip; - - DoC_Command(this, - (!this->page256 - && (from & 0x100)) ? NAND_CMD_READ1 : NAND_CMD_READ0, - CDSN_CTRL_WP); - DoC_Address(this, ADDR_COLUMN_PAGE, from, CDSN_CTRL_WP, - CDSN_CTRL_ECC_IO); - - /* Prime the ECC engine */ - WriteDOC(DOC_ECC_RESET, docptr, ECCConf); - WriteDOC(DOC_ECC_EN, docptr, ECCConf); - - /* treat crossing 256-byte sector for 2M x 8bits devices */ - if (this->page256 && from + len > (from | 0xff) + 1) { - len256 = (from | 0xff) + 1 - from; - DoC_ReadBuf(this, buf, len256); - - DoC_Command(this, NAND_CMD_READ0, CDSN_CTRL_WP); - DoC_Address(this, ADDR_COLUMN_PAGE, from + len256, - CDSN_CTRL_WP, CDSN_CTRL_ECC_IO); - } - - DoC_ReadBuf(this, &buf[len256], len - len256); - - /* Let the caller know we completed it */ - *retlen += len; - - /* Read the ECC data through the DiskOnChip ECC logic */ - /* Note: this will work even with 2M x 8bit devices as */ - /* they have 8 bytes of OOB per 256 page. mf. */ - DoC_ReadBuf(this, eccbuf, 6); - - /* Flush the pipeline */ - if (DoC_is_Millennium(this)) { - dummy = ReadDOC(docptr, ECCConf); - dummy = ReadDOC(docptr, ECCConf); - i = ReadDOC(docptr, ECCConf); - } else { - dummy = ReadDOC(docptr, 2k_ECCStatus); - dummy = ReadDOC(docptr, 2k_ECCStatus); - i = ReadDOC(docptr, 2k_ECCStatus); - } - - /* Check the ECC Status */ - if (i & 0x80) { - int nb_errors; - /* There was an ECC error */ -#ifdef ECC_DEBUG - printk(KERN_ERR "DiskOnChip ECC Error: Read at %lx\n", (long)from); -#endif - /* Read the ECC syndrome through the DiskOnChip ECC - logic. These syndrome will be all ZERO when there - is no error */ - for (i = 0; i < 6; i++) { - syndrome[i] = - ReadDOC(docptr, ECCSyndrome0 + i); - } - nb_errors = doc_decode_ecc(buf, syndrome); - -#ifdef ECC_DEBUG - printk(KERN_ERR "Errors corrected: %x\n", nb_errors); -#endif - if (nb_errors < 0) { - /* We return error, but have actually done the - read. Not that this can be told to - user-space, via sys_read(), but at least - MTD-aware stuff can know about it by - checking *retlen */ - ret = -EIO; - } - } - -#ifdef PSYCHO_DEBUG - printk(KERN_DEBUG "ECC DATA at %lxB: %2.2X %2.2X %2.2X %2.2X %2.2X %2.2X\n", - (long)from, eccbuf[0], eccbuf[1], eccbuf[2], - eccbuf[3], eccbuf[4], eccbuf[5]); -#endif - - /* disable the ECC engine */ - WriteDOC(DOC_ECC_DIS, docptr , ECCConf); - - /* according to 11.4.1, we need to wait for the busy line - * drop if we read to the end of the page. */ - if(0 == ((from + len) & 0x1ff)) - { - DoC_WaitReady(this); - } - - from += len; - left -= len; - buf += len; - } - - mutex_unlock(&this->lock); - - return ret; -} - -static int doc_write(struct mtd_info *mtd, loff_t to, size_t len, - size_t * retlen, const u_char * buf) -{ - struct DiskOnChip *this = mtd->priv; - int di; /* Yes, DI is a hangover from when I was disassembling the binary driver */ - void __iomem *docptr = this->virtadr; - unsigned char eccbuf[6]; - volatile char dummy; - int len256 = 0; - struct Nand *mychip; - size_t left = len; - int status; - - mutex_lock(&this->lock); - while (left) { - len = left; - - /* Don't allow a single write to cross a 512-byte block boundary */ - if (to + len > ((to | 0x1ff) + 1)) - len = ((to | 0x1ff) + 1) - to; - - /* The ECC will not be calculated correctly if less than 512 is written */ -/* DBB- - if (len != 0x200 && eccbuf) - printk(KERN_WARNING - "ECC needs a full sector write (adr: %lx size %lx)\n", - (long) to, (long) len); - -DBB */ - - /* printk("DoC_Write (adr: %lx size %lx)\n", (long) to, (long) len); */ - - /* Find the chip which is to be used and select it */ - mychip = &this->chips[to >> (this->chipshift)]; - - if (this->curfloor != mychip->floor) { - DoC_SelectFloor(this, mychip->floor); - DoC_SelectChip(this, mychip->chip); - } else if (this->curchip != mychip->chip) { - DoC_SelectChip(this, mychip->chip); - } - - this->curfloor = mychip->floor; - this->curchip = mychip->chip; - - /* Set device to main plane of flash */ - DoC_Command(this, NAND_CMD_RESET, CDSN_CTRL_WP); - DoC_Command(this, - (!this->page256 - && (to & 0x100)) ? NAND_CMD_READ1 : NAND_CMD_READ0, - CDSN_CTRL_WP); - - DoC_Command(this, NAND_CMD_SEQIN, 0); - DoC_Address(this, ADDR_COLUMN_PAGE, to, 0, CDSN_CTRL_ECC_IO); - - /* Prime the ECC engine */ - WriteDOC(DOC_ECC_RESET, docptr, ECCConf); - WriteDOC(DOC_ECC_EN | DOC_ECC_RW, docptr, ECCConf); - - /* treat crossing 256-byte sector for 2M x 8bits devices */ - if (this->page256 && to + len > (to | 0xff) + 1) { - len256 = (to | 0xff) + 1 - to; - DoC_WriteBuf(this, buf, len256); - - DoC_Command(this, NAND_CMD_PAGEPROG, 0); - - DoC_Command(this, NAND_CMD_STATUS, CDSN_CTRL_WP); - /* There's an implicit DoC_WaitReady() in DoC_Command */ - - dummy = ReadDOC(docptr, CDSNSlowIO); - DoC_Delay(this, 2); - - if (ReadDOC_(docptr, this->ioreg) & 1) { - printk(KERN_ERR "Error programming flash\n"); - /* Error in programming */ - *retlen = 0; - mutex_unlock(&this->lock); - return -EIO; - } - - DoC_Command(this, NAND_CMD_SEQIN, 0); - DoC_Address(this, ADDR_COLUMN_PAGE, to + len256, 0, - CDSN_CTRL_ECC_IO); - } - - DoC_WriteBuf(this, &buf[len256], len - len256); - - WriteDOC(CDSN_CTRL_ECC_IO | CDSN_CTRL_CE, docptr, CDSNControl); - - if (DoC_is_Millennium(this)) { - WriteDOC(0, docptr, NOP); - WriteDOC(0, docptr, NOP); - WriteDOC(0, docptr, NOP); - } else { - WriteDOC_(0, docptr, this->ioreg); - WriteDOC_(0, docptr, this->ioreg); - WriteDOC_(0, docptr, this->ioreg); - } - - WriteDOC(CDSN_CTRL_ECC_IO | CDSN_CTRL_FLASH_IO | CDSN_CTRL_CE, docptr, - CDSNControl); - - /* Read the ECC data through the DiskOnChip ECC logic */ - for (di = 0; di < 6; di++) { - eccbuf[di] = ReadDOC(docptr, ECCSyndrome0 + di); - } - - /* Reset the ECC engine */ - WriteDOC(DOC_ECC_DIS, docptr, ECCConf); - -#ifdef PSYCHO_DEBUG - printk - ("OOB data at %lx is %2.2X %2.2X %2.2X %2.2X %2.2X %2.2X\n", - (long) to, eccbuf[0], eccbuf[1], eccbuf[2], eccbuf[3], - eccbuf[4], eccbuf[5]); -#endif - DoC_Command(this, NAND_CMD_PAGEPROG, 0); - - DoC_Command(this, NAND_CMD_STATUS, CDSN_CTRL_WP); - /* There's an implicit DoC_WaitReady() in DoC_Command */ - - if (DoC_is_Millennium(this)) { - ReadDOC(docptr, ReadPipeInit); - status = ReadDOC(docptr, LastDataRead); - } else { - dummy = ReadDOC(docptr, CDSNSlowIO); - DoC_Delay(this, 2); - status = ReadDOC_(docptr, this->ioreg); - } - - if (status & 1) { - printk(KERN_ERR "Error programming flash\n"); - /* Error in programming */ - *retlen = 0; - mutex_unlock(&this->lock); - return -EIO; - } - - /* Let the caller know we completed it */ - *retlen += len; - - { - unsigned char x[8]; - size_t dummy; - int ret; - - /* Write the ECC data to flash */ - for (di=0; di<6; di++) - x[di] = eccbuf[di]; - - x[6]=0x55; - x[7]=0x55; - - ret = doc_write_oob_nolock(mtd, to, 8, &dummy, x); - if (ret) { - mutex_unlock(&this->lock); - return ret; - } - } - - to += len; - left -= len; - buf += len; - } - - mutex_unlock(&this->lock); - return 0; -} - -static int doc_read_oob(struct mtd_info *mtd, loff_t ofs, - struct mtd_oob_ops *ops) -{ - struct DiskOnChip *this = mtd->priv; - int len256 = 0, ret; - struct Nand *mychip; - uint8_t *buf = ops->oobbuf; - size_t len = ops->len; - - BUG_ON(ops->mode != MTD_OPS_PLACE_OOB); - - ofs += ops->ooboffs; - - mutex_lock(&this->lock); - - mychip = &this->chips[ofs >> this->chipshift]; - - if (this->curfloor != mychip->floor) { - DoC_SelectFloor(this, mychip->floor); - DoC_SelectChip(this, mychip->chip); - } else if (this->curchip != mychip->chip) { - DoC_SelectChip(this, mychip->chip); - } - this->curfloor = mychip->floor; - this->curchip = mychip->chip; - - /* update address for 2M x 8bit devices. OOB starts on the second */ - /* page to maintain compatibility with doc_read_ecc. */ - if (this->page256) { - if (!(ofs & 0x8)) - ofs += 0x100; - else - ofs -= 0x8; - } - - DoC_Command(this, NAND_CMD_READOOB, CDSN_CTRL_WP); - DoC_Address(this, ADDR_COLUMN_PAGE, ofs, CDSN_CTRL_WP, 0); - - /* treat crossing 8-byte OOB data for 2M x 8bit devices */ - /* Note: datasheet says it should automaticaly wrap to the */ - /* next OOB block, but it didn't work here. mf. */ - if (this->page256 && ofs + len > (ofs | 0x7) + 1) { - len256 = (ofs | 0x7) + 1 - ofs; - DoC_ReadBuf(this, buf, len256); - - DoC_Command(this, NAND_CMD_READOOB, CDSN_CTRL_WP); - DoC_Address(this, ADDR_COLUMN_PAGE, ofs & (~0x1ff), - CDSN_CTRL_WP, 0); - } - - DoC_ReadBuf(this, &buf[len256], len - len256); - - ops->retlen = len; - /* Reading the full OOB data drops us off of the end of the page, - * causing the flash device to go into busy mode, so we need - * to wait until ready 11.4.1 and Toshiba TC58256FT docs */ - - ret = DoC_WaitReady(this); - - mutex_unlock(&this->lock); - return ret; - -} - -static int doc_write_oob_nolock(struct mtd_info *mtd, loff_t ofs, size_t len, - size_t * retlen, const u_char * buf) -{ - struct DiskOnChip *this = mtd->priv; - int len256 = 0; - void __iomem *docptr = this->virtadr; - struct Nand *mychip = &this->chips[ofs >> this->chipshift]; - volatile int dummy; - int status; - - // printk("doc_write_oob(%lx, %d): %2.2X %2.2X %2.2X %2.2X ... %2.2X %2.2X .. %2.2X %2.2X\n",(long)ofs, len, - // buf[0], buf[1], buf[2], buf[3], buf[8], buf[9], buf[14],buf[15]); - - /* Find the chip which is to be used and select it */ - if (this->curfloor != mychip->floor) { - DoC_SelectFloor(this, mychip->floor); - DoC_SelectChip(this, mychip->chip); - } else if (this->curchip != mychip->chip) { - DoC_SelectChip(this, mychip->chip); - } - this->curfloor = mychip->floor; - this->curchip = mychip->chip; - - /* disable the ECC engine */ - WriteDOC (DOC_ECC_RESET, docptr, ECCConf); - WriteDOC (DOC_ECC_DIS, docptr, ECCConf); - - /* Reset the chip, see Software Requirement 11.4 item 1. */ - DoC_Command(this, NAND_CMD_RESET, CDSN_CTRL_WP); - - /* issue the Read2 command to set the pointer to the Spare Data Area. */ - DoC_Command(this, NAND_CMD_READOOB, CDSN_CTRL_WP); - - /* update address for 2M x 8bit devices. OOB starts on the second */ - /* page to maintain compatibility with doc_read_ecc. */ - if (this->page256) { - if (!(ofs & 0x8)) - ofs += 0x100; - else - ofs -= 0x8; - } - - /* issue the Serial Data In command to initial the Page Program process */ - DoC_Command(this, NAND_CMD_SEQIN, 0); - DoC_Address(this, ADDR_COLUMN_PAGE, ofs, 0, 0); - - /* treat crossing 8-byte OOB data for 2M x 8bit devices */ - /* Note: datasheet says it should automaticaly wrap to the */ - /* next OOB block, but it didn't work here. mf. */ - if (this->page256 && ofs + len > (ofs | 0x7) + 1) { - len256 = (ofs | 0x7) + 1 - ofs; - DoC_WriteBuf(this, buf, len256); - - DoC_Command(this, NAND_CMD_PAGEPROG, 0); - DoC_Command(this, NAND_CMD_STATUS, 0); - /* DoC_WaitReady() is implicit in DoC_Command */ - - if (DoC_is_Millennium(this)) { - ReadDOC(docptr, ReadPipeInit); - status = ReadDOC(docptr, LastDataRead); - } else { - dummy = ReadDOC(docptr, CDSNSlowIO); - DoC_Delay(this, 2); - status = ReadDOC_(docptr, this->ioreg); - } - - if (status & 1) { - printk(KERN_ERR "Error programming oob data\n"); - /* There was an error */ - *retlen = 0; - return -EIO; - } - DoC_Command(this, NAND_CMD_SEQIN, 0); - DoC_Address(this, ADDR_COLUMN_PAGE, ofs & (~0x1ff), 0, 0); - } - - DoC_WriteBuf(this, &buf[len256], len - len256); - - DoC_Command(this, NAND_CMD_PAGEPROG, 0); - DoC_Command(this, NAND_CMD_STATUS, 0); - /* DoC_WaitReady() is implicit in DoC_Command */ - - if (DoC_is_Millennium(this)) { - ReadDOC(docptr, ReadPipeInit); - status = ReadDOC(docptr, LastDataRead); - } else { - dummy = ReadDOC(docptr, CDSNSlowIO); - DoC_Delay(this, 2); - status = ReadDOC_(docptr, this->ioreg); - } - - if (status & 1) { - printk(KERN_ERR "Error programming oob data\n"); - /* There was an error */ - *retlen = 0; - return -EIO; - } - - *retlen = len; - return 0; - -} - -static int doc_write_oob(struct mtd_info *mtd, loff_t ofs, - struct mtd_oob_ops *ops) -{ - struct DiskOnChip *this = mtd->priv; - int ret; - - BUG_ON(ops->mode != MTD_OPS_PLACE_OOB); - - mutex_lock(&this->lock); - ret = doc_write_oob_nolock(mtd, ofs + ops->ooboffs, ops->len, - &ops->retlen, ops->oobbuf); - - mutex_unlock(&this->lock); - return ret; -} - -static int doc_erase(struct mtd_info *mtd, struct erase_info *instr) -{ - struct DiskOnChip *this = mtd->priv; - __u32 ofs = instr->addr; - __u32 len = instr->len; - volatile int dummy; - void __iomem *docptr = this->virtadr; - struct Nand *mychip; - int status; - - mutex_lock(&this->lock); - - if (ofs & (mtd->erasesize-1) || len & (mtd->erasesize-1)) { - mutex_unlock(&this->lock); - return -EINVAL; - } - - instr->state = MTD_ERASING; - - /* FIXME: Do this in the background. Use timers or schedule_task() */ - while(len) { - mychip = &this->chips[ofs >> this->chipshift]; - - if (this->curfloor != mychip->floor) { - DoC_SelectFloor(this, mychip->floor); - DoC_SelectChip(this, mychip->chip); - } else if (this->curchip != mychip->chip) { - DoC_SelectChip(this, mychip->chip); - } - this->curfloor = mychip->floor; - this->curchip = mychip->chip; - - DoC_Command(this, NAND_CMD_ERASE1, 0); - DoC_Address(this, ADDR_PAGE, ofs, 0, 0); - DoC_Command(this, NAND_CMD_ERASE2, 0); - - DoC_Command(this, NAND_CMD_STATUS, CDSN_CTRL_WP); - - if (DoC_is_Millennium(this)) { - ReadDOC(docptr, ReadPipeInit); - status = ReadDOC(docptr, LastDataRead); - } else { - dummy = ReadDOC(docptr, CDSNSlowIO); - DoC_Delay(this, 2); - status = ReadDOC_(docptr, this->ioreg); - } - - if (status & 1) { - printk(KERN_ERR "Error erasing at 0x%x\n", ofs); - /* There was an error */ - instr->state = MTD_ERASE_FAILED; - goto callback; - } - ofs += mtd->erasesize; - len -= mtd->erasesize; - } - instr->state = MTD_ERASE_DONE; - - callback: - mtd_erase_callback(instr); - - mutex_unlock(&this->lock); - return 0; -} - - -/**************************************************************************** - * - * Module stuff - * - ****************************************************************************/ - -static void __exit cleanup_doc2000(void) -{ - struct mtd_info *mtd; - struct DiskOnChip *this; - - while ((mtd = doc2klist)) { - this = mtd->priv; - doc2klist = this->nextdoc; - - mtd_device_unregister(mtd); - - iounmap(this->virtadr); - kfree(this->chips); - kfree(mtd); - } -} - -module_exit(cleanup_doc2000); - -MODULE_LICENSE("GPL"); -MODULE_AUTHOR("David Woodhouse et al."); -MODULE_DESCRIPTION("MTD driver for DiskOnChip 2000 and Millennium"); - diff --git a/trunk/drivers/mtd/devices/doc2001.c b/trunk/drivers/mtd/devices/doc2001.c deleted file mode 100644 index f6927955dab0..000000000000 --- a/trunk/drivers/mtd/devices/doc2001.c +++ /dev/null @@ -1,824 +0,0 @@ - -/* - * Linux driver for Disk-On-Chip Millennium - * (c) 1999 Machine Vision Holdings, Inc. - * (c) 1999, 2000 David Woodhouse - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include -#include -#include - -/* #define ECC_DEBUG */ - -/* I have no idea why some DoC chips can not use memcop_form|to_io(). - * This may be due to the different revisions of the ASIC controller built-in or - * simplily a QA/Bug issue. Who knows ?? If you have trouble, please uncomment - * this:*/ -#undef USE_MEMCPY - -static int doc_read(struct mtd_info *mtd, loff_t from, size_t len, - size_t *retlen, u_char *buf); -static int doc_write(struct mtd_info *mtd, loff_t to, size_t len, - size_t *retlen, const u_char *buf); -static int doc_read_oob(struct mtd_info *mtd, loff_t ofs, - struct mtd_oob_ops *ops); -static int doc_write_oob(struct mtd_info *mtd, loff_t ofs, - struct mtd_oob_ops *ops); -static int doc_erase (struct mtd_info *mtd, struct erase_info *instr); - -static struct mtd_info *docmillist = NULL; - -/* Perform the required delay cycles by reading from the NOP register */ -static void DoC_Delay(void __iomem * docptr, unsigned short cycles) -{ - volatile char dummy; - int i; - - for (i = 0; i < cycles; i++) - dummy = ReadDOC(docptr, NOP); -} - -/* DOC_WaitReady: Wait for RDY line to be asserted by the flash chip */ -static int _DoC_WaitReady(void __iomem * docptr) -{ - unsigned short c = 0xffff; - - pr_debug("_DoC_WaitReady called for out-of-line wait\n"); - - /* Out-of-line routine to wait for chip response */ - while (!(ReadDOC(docptr, CDSNControl) & CDSN_CTRL_FR_B) && --c) - ; - - if (c == 0) - pr_debug("_DoC_WaitReady timed out.\n"); - - return (c == 0); -} - -static inline int DoC_WaitReady(void __iomem * docptr) -{ - /* This is inline, to optimise the common case, where it's ready instantly */ - int ret = 0; - - /* 4 read form NOP register should be issued in prior to the read from CDSNControl - see Software Requirement 11.4 item 2. */ - DoC_Delay(docptr, 4); - - if (!(ReadDOC(docptr, CDSNControl) & CDSN_CTRL_FR_B)) - /* Call the out-of-line routine to wait */ - ret = _DoC_WaitReady(docptr); - - /* issue 2 read from NOP register after reading from CDSNControl register - see Software Requirement 11.4 item 2. */ - DoC_Delay(docptr, 2); - - return ret; -} - -/* DoC_Command: Send a flash command to the flash chip through the CDSN IO register - with the internal pipeline. Each of 4 delay cycles (read from the NOP register) is - required after writing to CDSN Control register, see Software Requirement 11.4 item 3. */ - -static void DoC_Command(void __iomem * docptr, unsigned char command, - unsigned char xtraflags) -{ - /* Assert the CLE (Command Latch Enable) line to the flash chip */ - WriteDOC(xtraflags | CDSN_CTRL_CLE | CDSN_CTRL_CE, docptr, CDSNControl); - DoC_Delay(docptr, 4); - - /* Send the command */ - WriteDOC(command, docptr, Mil_CDSN_IO); - WriteDOC(0x00, docptr, WritePipeTerm); - - /* Lower the CLE line */ - WriteDOC(xtraflags | CDSN_CTRL_CE, docptr, CDSNControl); - DoC_Delay(docptr, 4); -} - -/* DoC_Address: Set the current address for the flash chip through the CDSN IO register - with the internal pipeline. Each of 4 delay cycles (read from the NOP register) is - required after writing to CDSN Control register, see Software Requirement 11.4 item 3. */ - -static inline void DoC_Address(void __iomem * docptr, int numbytes, unsigned long ofs, - unsigned char xtraflags1, unsigned char xtraflags2) -{ - /* Assert the ALE (Address Latch Enable) line to the flash chip */ - WriteDOC(xtraflags1 | CDSN_CTRL_ALE | CDSN_CTRL_CE, docptr, CDSNControl); - DoC_Delay(docptr, 4); - - /* Send the address */ - switch (numbytes) - { - case 1: - /* Send single byte, bits 0-7. */ - WriteDOC(ofs & 0xff, docptr, Mil_CDSN_IO); - WriteDOC(0x00, docptr, WritePipeTerm); - break; - case 2: - /* Send bits 9-16 followed by 17-23 */ - WriteDOC((ofs >> 9) & 0xff, docptr, Mil_CDSN_IO); - WriteDOC((ofs >> 17) & 0xff, docptr, Mil_CDSN_IO); - WriteDOC(0x00, docptr, WritePipeTerm); - break; - case 3: - /* Send 0-7, 9-16, then 17-23 */ - WriteDOC(ofs & 0xff, docptr, Mil_CDSN_IO); - WriteDOC((ofs >> 9) & 0xff, docptr, Mil_CDSN_IO); - WriteDOC((ofs >> 17) & 0xff, docptr, Mil_CDSN_IO); - WriteDOC(0x00, docptr, WritePipeTerm); - break; - default: - return; - } - - /* Lower the ALE line */ - WriteDOC(xtraflags1 | xtraflags2 | CDSN_CTRL_CE, docptr, CDSNControl); - DoC_Delay(docptr, 4); -} - -/* DoC_SelectChip: Select a given flash chip within the current floor */ -static int DoC_SelectChip(void __iomem * docptr, int chip) -{ - /* Select the individual flash chip requested */ - WriteDOC(chip, docptr, CDSNDeviceSelect); - DoC_Delay(docptr, 4); - - /* Wait for it to be ready */ - return DoC_WaitReady(docptr); -} - -/* DoC_SelectFloor: Select a given floor (bank of flash chips) */ -static int DoC_SelectFloor(void __iomem * docptr, int floor) -{ - /* Select the floor (bank) of chips required */ - WriteDOC(floor, docptr, FloorSelect); - - /* Wait for the chip to be ready */ - return DoC_WaitReady(docptr); -} - -/* DoC_IdentChip: Identify a given NAND chip given {floor,chip} */ -static int DoC_IdentChip(struct DiskOnChip *doc, int floor, int chip) -{ - int mfr, id, i, j; - volatile char dummy; - - /* Page in the required floor/chip - FIXME: is this supported by Millennium ?? */ - DoC_SelectFloor(doc->virtadr, floor); - DoC_SelectChip(doc->virtadr, chip); - - /* Reset the chip, see Software Requirement 11.4 item 1. */ - DoC_Command(doc->virtadr, NAND_CMD_RESET, CDSN_CTRL_WP); - DoC_WaitReady(doc->virtadr); - - /* Read the NAND chip ID: 1. Send ReadID command */ - DoC_Command(doc->virtadr, NAND_CMD_READID, CDSN_CTRL_WP); - - /* Read the NAND chip ID: 2. Send address byte zero */ - DoC_Address(doc->virtadr, 1, 0x00, CDSN_CTRL_WP, 0x00); - - /* Read the manufacturer and device id codes of the flash device through - CDSN IO register see Software Requirement 11.4 item 5.*/ - dummy = ReadDOC(doc->virtadr, ReadPipeInit); - DoC_Delay(doc->virtadr, 2); - mfr = ReadDOC(doc->virtadr, Mil_CDSN_IO); - - DoC_Delay(doc->virtadr, 2); - id = ReadDOC(doc->virtadr, Mil_CDSN_IO); - dummy = ReadDOC(doc->virtadr, LastDataRead); - - /* No response - return failure */ - if (mfr == 0xff || mfr == 0) - return 0; - - /* FIXME: to deal with multi-flash on multi-Millennium case more carefully */ - for (i = 0; nand_flash_ids[i].name != NULL; i++) { - if ( id == nand_flash_ids[i].id) { - /* Try to identify manufacturer */ - for (j = 0; nand_manuf_ids[j].id != 0x0; j++) { - if (nand_manuf_ids[j].id == mfr) - break; - } - printk(KERN_INFO "Flash chip found: Manufacturer ID: %2.2X, " - "Chip ID: %2.2X (%s:%s)\n", - mfr, id, nand_manuf_ids[j].name, nand_flash_ids[i].name); - doc->mfr = mfr; - doc->id = id; - doc->chipshift = ffs((nand_flash_ids[i].chipsize << 20)) - 1; - break; - } - } - - if (nand_flash_ids[i].name == NULL) - return 0; - else - return 1; -} - -/* DoC_ScanChips: Find all NAND chips present in a DiskOnChip, and identify them */ -static void DoC_ScanChips(struct DiskOnChip *this) -{ - int floor, chip; - int numchips[MAX_FLOORS_MIL]; - int ret; - - this->numchips = 0; - this->mfr = 0; - this->id = 0; - - /* For each floor, find the number of valid chips it contains */ - for (floor = 0,ret = 1; floor < MAX_FLOORS_MIL; floor++) { - numchips[floor] = 0; - for (chip = 0; chip < MAX_CHIPS_MIL && ret != 0; chip++) { - ret = DoC_IdentChip(this, floor, chip); - if (ret) { - numchips[floor]++; - this->numchips++; - } - } - } - /* If there are none at all that we recognise, bail */ - if (!this->numchips) { - printk("No flash chips recognised.\n"); - return; - } - - /* Allocate an array to hold the information for each chip */ - this->chips = kmalloc(sizeof(struct Nand) * this->numchips, GFP_KERNEL); - if (!this->chips){ - printk("No memory for allocating chip info structures\n"); - return; - } - - /* Fill out the chip array with {floor, chipno} for each - * detected chip in the device. */ - for (floor = 0, ret = 0; floor < MAX_FLOORS_MIL; floor++) { - for (chip = 0 ; chip < numchips[floor] ; chip++) { - this->chips[ret].floor = floor; - this->chips[ret].chip = chip; - this->chips[ret].curadr = 0; - this->chips[ret].curmode = 0x50; - ret++; - } - } - - /* Calculate and print the total size of the device */ - this->totlen = this->numchips * (1 << this->chipshift); - printk(KERN_INFO "%d flash chips found. Total DiskOnChip size: %ld MiB\n", - this->numchips ,this->totlen >> 20); -} - -static int DoCMil_is_alias(struct DiskOnChip *doc1, struct DiskOnChip *doc2) -{ - int tmp1, tmp2, retval; - - if (doc1->physadr == doc2->physadr) - return 1; - - /* Use the alias resolution register which was set aside for this - * purpose. If it's value is the same on both chips, they might - * be the same chip, and we write to one and check for a change in - * the other. It's unclear if this register is usuable in the - * DoC 2000 (it's in the Millenium docs), but it seems to work. */ - tmp1 = ReadDOC(doc1->virtadr, AliasResolution); - tmp2 = ReadDOC(doc2->virtadr, AliasResolution); - if (tmp1 != tmp2) - return 0; - - WriteDOC((tmp1+1) % 0xff, doc1->virtadr, AliasResolution); - tmp2 = ReadDOC(doc2->virtadr, AliasResolution); - if (tmp2 == (tmp1+1) % 0xff) - retval = 1; - else - retval = 0; - - /* Restore register contents. May not be necessary, but do it just to - * be safe. */ - WriteDOC(tmp1, doc1->virtadr, AliasResolution); - - return retval; -} - -/* This routine is found from the docprobe code by symbol_get(), - * which will bump the use count of this module. */ -void DoCMil_init(struct mtd_info *mtd) -{ - struct DiskOnChip *this = mtd->priv; - struct DiskOnChip *old = NULL; - - /* We must avoid being called twice for the same device. */ - if (docmillist) - old = docmillist->priv; - - while (old) { - if (DoCMil_is_alias(this, old)) { - printk(KERN_NOTICE "Ignoring DiskOnChip Millennium at " - "0x%lX - already configured\n", this->physadr); - iounmap(this->virtadr); - kfree(mtd); - return; - } - if (old->nextdoc) - old = old->nextdoc->priv; - else - old = NULL; - } - - mtd->name = "DiskOnChip Millennium"; - printk(KERN_NOTICE "DiskOnChip Millennium found at address 0x%lX\n", - this->physadr); - - mtd->type = MTD_NANDFLASH; - mtd->flags = MTD_CAP_NANDFLASH; - - /* FIXME: erase size is not always 8KiB */ - mtd->erasesize = 0x2000; - mtd->writebufsize = mtd->writesize = 512; - mtd->oobsize = 16; - mtd->ecc_strength = 2; - mtd->owner = THIS_MODULE; - mtd->_erase = doc_erase; - mtd->_read = doc_read; - mtd->_write = doc_write; - mtd->_read_oob = doc_read_oob; - mtd->_write_oob = doc_write_oob; - this->curfloor = -1; - this->curchip = -1; - - /* Ident all the chips present. */ - DoC_ScanChips(this); - - if (!this->totlen) { - kfree(mtd); - iounmap(this->virtadr); - } else { - this->nextdoc = docmillist; - docmillist = mtd; - mtd->size = this->totlen; - mtd_device_register(mtd, NULL, 0); - return; - } -} -EXPORT_SYMBOL_GPL(DoCMil_init); - -static int doc_read (struct mtd_info *mtd, loff_t from, size_t len, - size_t *retlen, u_char *buf) -{ - int i, ret; - volatile char dummy; - unsigned char syndrome[6], eccbuf[6]; - struct DiskOnChip *this = mtd->priv; - void __iomem *docptr = this->virtadr; - struct Nand *mychip = &this->chips[from >> (this->chipshift)]; - - /* Don't allow a single read to cross a 512-byte block boundary */ - if (from + len > ((from | 0x1ff) + 1)) - len = ((from | 0x1ff) + 1) - from; - - /* Find the chip which is to be used and select it */ - if (this->curfloor != mychip->floor) { - DoC_SelectFloor(docptr, mychip->floor); - DoC_SelectChip(docptr, mychip->chip); - } else if (this->curchip != mychip->chip) { - DoC_SelectChip(docptr, mychip->chip); - } - this->curfloor = mychip->floor; - this->curchip = mychip->chip; - - /* issue the Read0 or Read1 command depend on which half of the page - we are accessing. Polling the Flash Ready bit after issue 3 bytes - address in Sequence Read Mode, see Software Requirement 11.4 item 1.*/ - DoC_Command(docptr, (from >> 8) & 1, CDSN_CTRL_WP); - DoC_Address(docptr, 3, from, CDSN_CTRL_WP, 0x00); - DoC_WaitReady(docptr); - - /* init the ECC engine, see Reed-Solomon EDC/ECC 11.1 .*/ - WriteDOC (DOC_ECC_RESET, docptr, ECCConf); - WriteDOC (DOC_ECC_EN, docptr, ECCConf); - - /* Read the data via the internal pipeline through CDSN IO register, - see Pipelined Read Operations 11.3 */ - dummy = ReadDOC(docptr, ReadPipeInit); -#ifndef USE_MEMCPY - for (i = 0; i < len-1; i++) { - /* N.B. you have to increase the source address in this way or the - ECC logic will not work properly */ - buf[i] = ReadDOC(docptr, Mil_CDSN_IO + (i & 0xff)); - } -#else - memcpy_fromio(buf, docptr + DoC_Mil_CDSN_IO, len - 1); -#endif - buf[len - 1] = ReadDOC(docptr, LastDataRead); - - /* Let the caller know we completed it */ - *retlen = len; - ret = 0; - - /* Read the ECC data from Spare Data Area, - see Reed-Solomon EDC/ECC 11.1 */ - dummy = ReadDOC(docptr, ReadPipeInit); -#ifndef USE_MEMCPY - for (i = 0; i < 5; i++) { - /* N.B. you have to increase the source address in this way or the - ECC logic will not work properly */ - eccbuf[i] = ReadDOC(docptr, Mil_CDSN_IO + i); - } -#else - memcpy_fromio(eccbuf, docptr + DoC_Mil_CDSN_IO, 5); -#endif - eccbuf[5] = ReadDOC(docptr, LastDataRead); - - /* Flush the pipeline */ - dummy = ReadDOC(docptr, ECCConf); - dummy = ReadDOC(docptr, ECCConf); - - /* Check the ECC Status */ - if (ReadDOC(docptr, ECCConf) & 0x80) { - int nb_errors; - /* There was an ECC error */ -#ifdef ECC_DEBUG - printk("DiskOnChip ECC Error: Read at %lx\n", (long)from); -#endif - /* Read the ECC syndrome through the DiskOnChip ECC logic. - These syndrome will be all ZERO when there is no error */ - for (i = 0; i < 6; i++) { - syndrome[i] = ReadDOC(docptr, ECCSyndrome0 + i); - } - nb_errors = doc_decode_ecc(buf, syndrome); -#ifdef ECC_DEBUG - printk("ECC Errors corrected: %x\n", nb_errors); -#endif - if (nb_errors < 0) { - /* We return error, but have actually done the read. Not that - this can be told to user-space, via sys_read(), but at least - MTD-aware stuff can know about it by checking *retlen */ - ret = -EIO; - } - } - -#ifdef PSYCHO_DEBUG - printk("ECC DATA at %lx: %2.2X %2.2X %2.2X %2.2X %2.2X %2.2X\n", - (long)from, eccbuf[0], eccbuf[1], eccbuf[2], eccbuf[3], - eccbuf[4], eccbuf[5]); -#endif - - /* disable the ECC engine */ - WriteDOC(DOC_ECC_DIS, docptr , ECCConf); - - return ret; -} - -static int doc_write (struct mtd_info *mtd, loff_t to, size_t len, - size_t *retlen, const u_char *buf) -{ - int i,ret = 0; - char eccbuf[6]; - volatile char dummy; - struct DiskOnChip *this = mtd->priv; - void __iomem *docptr = this->virtadr; - struct Nand *mychip = &this->chips[to >> (this->chipshift)]; - -#if 0 - /* Don't allow a single write to cross a 512-byte block boundary */ - if (to + len > ( (to | 0x1ff) + 1)) - len = ((to | 0x1ff) + 1) - to; -#else - /* Don't allow writes which aren't exactly one block */ - if (to & 0x1ff || len != 0x200) - return -EINVAL; -#endif - - /* Find the chip which is to be used and select it */ - if (this->curfloor != mychip->floor) { - DoC_SelectFloor(docptr, mychip->floor); - DoC_SelectChip(docptr, mychip->chip); - } else if (this->curchip != mychip->chip) { - DoC_SelectChip(docptr, mychip->chip); - } - this->curfloor = mychip->floor; - this->curchip = mychip->chip; - - /* Reset the chip, see Software Requirement 11.4 item 1. */ - DoC_Command(docptr, NAND_CMD_RESET, 0x00); - DoC_WaitReady(docptr); - /* Set device to main plane of flash */ - DoC_Command(docptr, NAND_CMD_READ0, 0x00); - - /* issue the Serial Data In command to initial the Page Program process */ - DoC_Command(docptr, NAND_CMD_SEQIN, 0x00); - DoC_Address(docptr, 3, to, 0x00, 0x00); - DoC_WaitReady(docptr); - - /* init the ECC engine, see Reed-Solomon EDC/ECC 11.1 .*/ - WriteDOC (DOC_ECC_RESET, docptr, ECCConf); - WriteDOC (DOC_ECC_EN | DOC_ECC_RW, docptr, ECCConf); - - /* Write the data via the internal pipeline through CDSN IO register, - see Pipelined Write Operations 11.2 */ -#ifndef USE_MEMCPY - for (i = 0; i < len; i++) { - /* N.B. you have to increase the source address in this way or the - ECC logic will not work properly */ - WriteDOC(buf[i], docptr, Mil_CDSN_IO + i); - } -#else - memcpy_toio(docptr + DoC_Mil_CDSN_IO, buf, len); -#endif - WriteDOC(0x00, docptr, WritePipeTerm); - - /* Write ECC data to flash, the ECC info is generated by the DiskOnChip ECC logic - see Reed-Solomon EDC/ECC 11.1 */ - WriteDOC(0, docptr, NOP); - WriteDOC(0, docptr, NOP); - WriteDOC(0, docptr, NOP); - - /* Read the ECC data through the DiskOnChip ECC logic */ - for (i = 0; i < 6; i++) { - eccbuf[i] = ReadDOC(docptr, ECCSyndrome0 + i); - } - - /* ignore the ECC engine */ - WriteDOC(DOC_ECC_DIS, docptr , ECCConf); - -#ifndef USE_MEMCPY - /* Write the ECC data to flash */ - for (i = 0; i < 6; i++) { - /* N.B. you have to increase the source address in this way or the - ECC logic will not work properly */ - WriteDOC(eccbuf[i], docptr, Mil_CDSN_IO + i); - } -#else - memcpy_toio(docptr + DoC_Mil_CDSN_IO, eccbuf, 6); -#endif - - /* write the block status BLOCK_USED (0x5555) at the end of ECC data - FIXME: this is only a hack for programming the IPL area for LinuxBIOS - and should be replace with proper codes in user space utilities */ - WriteDOC(0x55, docptr, Mil_CDSN_IO); - WriteDOC(0x55, docptr, Mil_CDSN_IO + 1); - - WriteDOC(0x00, docptr, WritePipeTerm); - -#ifdef PSYCHO_DEBUG - printk("OOB data at %lx is %2.2X %2.2X %2.2X %2.2X %2.2X %2.2X\n", - (long) to, eccbuf[0], eccbuf[1], eccbuf[2], eccbuf[3], - eccbuf[4], eccbuf[5]); -#endif - - /* Commit the Page Program command and wait for ready - see Software Requirement 11.4 item 1.*/ - DoC_Command(docptr, NAND_CMD_PAGEPROG, 0x00); - DoC_WaitReady(docptr); - - /* Read the status of the flash device through CDSN IO register - see Software Requirement 11.4 item 5.*/ - DoC_Command(docptr, NAND_CMD_STATUS, CDSN_CTRL_WP); - dummy = ReadDOC(docptr, ReadPipeInit); - DoC_Delay(docptr, 2); - if (ReadDOC(docptr, Mil_CDSN_IO) & 1) { - printk("Error programming flash\n"); - /* Error in programming - FIXME: implement Bad Block Replacement (in nftl.c ??) */ - ret = -EIO; - } - dummy = ReadDOC(docptr, LastDataRead); - - /* Let the caller know we completed it */ - *retlen = len; - - return ret; -} - -static int doc_read_oob(struct mtd_info *mtd, loff_t ofs, - struct mtd_oob_ops *ops) -{ -#ifndef USE_MEMCPY - int i; -#endif - volatile char dummy; - struct DiskOnChip *this = mtd->priv; - void __iomem *docptr = this->virtadr; - struct Nand *mychip = &this->chips[ofs >> this->chipshift]; - uint8_t *buf = ops->oobbuf; - size_t len = ops->len; - - BUG_ON(ops->mode != MTD_OPS_PLACE_OOB); - - ofs += ops->ooboffs; - - /* Find the chip which is to be used and select it */ - if (this->curfloor != mychip->floor) { - DoC_SelectFloor(docptr, mychip->floor); - DoC_SelectChip(docptr, mychip->chip); - } else if (this->curchip != mychip->chip) { - DoC_SelectChip(docptr, mychip->chip); - } - this->curfloor = mychip->floor; - this->curchip = mychip->chip; - - /* disable the ECC engine */ - WriteDOC (DOC_ECC_RESET, docptr, ECCConf); - WriteDOC (DOC_ECC_DIS, docptr, ECCConf); - - /* issue the Read2 command to set the pointer to the Spare Data Area. - Polling the Flash Ready bit after issue 3 bytes address in - Sequence Read Mode, see Software Requirement 11.4 item 1.*/ - DoC_Command(docptr, NAND_CMD_READOOB, CDSN_CTRL_WP); - DoC_Address(docptr, 3, ofs, CDSN_CTRL_WP, 0x00); - DoC_WaitReady(docptr); - - /* Read the data out via the internal pipeline through CDSN IO register, - see Pipelined Read Operations 11.3 */ - dummy = ReadDOC(docptr, ReadPipeInit); -#ifndef USE_MEMCPY - for (i = 0; i < len-1; i++) { - /* N.B. you have to increase the source address in this way or the - ECC logic will not work properly */ - buf[i] = ReadDOC(docptr, Mil_CDSN_IO + i); - } -#else - memcpy_fromio(buf, docptr + DoC_Mil_CDSN_IO, len - 1); -#endif - buf[len - 1] = ReadDOC(docptr, LastDataRead); - - ops->retlen = len; - - return 0; -} - -static int doc_write_oob(struct mtd_info *mtd, loff_t ofs, - struct mtd_oob_ops *ops) -{ -#ifndef USE_MEMCPY - int i; -#endif - volatile char dummy; - int ret = 0; - struct DiskOnChip *this = mtd->priv; - void __iomem *docptr = this->virtadr; - struct Nand *mychip = &this->chips[ofs >> this->chipshift]; - uint8_t *buf = ops->oobbuf; - size_t len = ops->len; - - BUG_ON(ops->mode != MTD_OPS_PLACE_OOB); - - ofs += ops->ooboffs; - - /* Find the chip which is to be used and select it */ - if (this->curfloor != mychip->floor) { - DoC_SelectFloor(docptr, mychip->floor); - DoC_SelectChip(docptr, mychip->chip); - } else if (this->curchip != mychip->chip) { - DoC_SelectChip(docptr, mychip->chip); - } - this->curfloor = mychip->floor; - this->curchip = mychip->chip; - - /* disable the ECC engine */ - WriteDOC (DOC_ECC_RESET, docptr, ECCConf); - WriteDOC (DOC_ECC_DIS, docptr, ECCConf); - - /* Reset the chip, see Software Requirement 11.4 item 1. */ - DoC_Command(docptr, NAND_CMD_RESET, CDSN_CTRL_WP); - DoC_WaitReady(docptr); - /* issue the Read2 command to set the pointer to the Spare Data Area. */ - DoC_Command(docptr, NAND_CMD_READOOB, CDSN_CTRL_WP); - - /* issue the Serial Data In command to initial the Page Program process */ - DoC_Command(docptr, NAND_CMD_SEQIN, 0x00); - DoC_Address(docptr, 3, ofs, 0x00, 0x00); - - /* Write the data via the internal pipeline through CDSN IO register, - see Pipelined Write Operations 11.2 */ -#ifndef USE_MEMCPY - for (i = 0; i < len; i++) { - /* N.B. you have to increase the source address in this way or the - ECC logic will not work properly */ - WriteDOC(buf[i], docptr, Mil_CDSN_IO + i); - } -#else - memcpy_toio(docptr + DoC_Mil_CDSN_IO, buf, len); -#endif - WriteDOC(0x00, docptr, WritePipeTerm); - - /* Commit the Page Program command and wait for ready - see Software Requirement 11.4 item 1.*/ - DoC_Command(docptr, NAND_CMD_PAGEPROG, 0x00); - DoC_WaitReady(docptr); - - /* Read the status of the flash device through CDSN IO register - see Software Requirement 11.4 item 5.*/ - DoC_Command(docptr, NAND_CMD_STATUS, 0x00); - dummy = ReadDOC(docptr, ReadPipeInit); - DoC_Delay(docptr, 2); - if (ReadDOC(docptr, Mil_CDSN_IO) & 1) { - printk("Error programming oob data\n"); - /* FIXME: implement Bad Block Replacement (in nftl.c ??) */ - ops->retlen = 0; - ret = -EIO; - } - dummy = ReadDOC(docptr, LastDataRead); - - ops->retlen = len; - - return ret; -} - -int doc_erase (struct mtd_info *mtd, struct erase_info *instr) -{ - volatile char dummy; - struct DiskOnChip *this = mtd->priv; - __u32 ofs = instr->addr; - __u32 len = instr->len; - void __iomem *docptr = this->virtadr; - struct Nand *mychip = &this->chips[ofs >> this->chipshift]; - - if (len != mtd->erasesize) - printk(KERN_WARNING "Erase not right size (%x != %x)n", - len, mtd->erasesize); - - /* Find the chip which is to be used and select it */ - if (this->curfloor != mychip->floor) { - DoC_SelectFloor(docptr, mychip->floor); - DoC_SelectChip(docptr, mychip->chip); - } else if (this->curchip != mychip->chip) { - DoC_SelectChip(docptr, mychip->chip); - } - this->curfloor = mychip->floor; - this->curchip = mychip->chip; - - instr->state = MTD_ERASE_PENDING; - - /* issue the Erase Setup command */ - DoC_Command(docptr, NAND_CMD_ERASE1, 0x00); - DoC_Address(docptr, 2, ofs, 0x00, 0x00); - - /* Commit the Erase Start command and wait for ready - see Software Requirement 11.4 item 1.*/ - DoC_Command(docptr, NAND_CMD_ERASE2, 0x00); - DoC_WaitReady(docptr); - - instr->state = MTD_ERASING; - - /* Read the status of the flash device through CDSN IO register - see Software Requirement 11.4 item 5. - FIXME: it seems that we are not wait long enough, some blocks are not - erased fully */ - DoC_Command(docptr, NAND_CMD_STATUS, CDSN_CTRL_WP); - dummy = ReadDOC(docptr, ReadPipeInit); - DoC_Delay(docptr, 2); - if (ReadDOC(docptr, Mil_CDSN_IO) & 1) { - printk("Error Erasing at 0x%x\n", ofs); - /* There was an error - FIXME: implement Bad Block Replacement (in nftl.c ??) */ - instr->state = MTD_ERASE_FAILED; - } else - instr->state = MTD_ERASE_DONE; - dummy = ReadDOC(docptr, LastDataRead); - - mtd_erase_callback(instr); - - return 0; -} - -/**************************************************************************** - * - * Module stuff - * - ****************************************************************************/ - -static void __exit cleanup_doc2001(void) -{ - struct mtd_info *mtd; - struct DiskOnChip *this; - - while ((mtd=docmillist)) { - this = mtd->priv; - docmillist = this->nextdoc; - - mtd_device_unregister(mtd); - - iounmap(this->virtadr); - kfree(this->chips); - kfree(mtd); - } -} - -module_exit(cleanup_doc2001); - -MODULE_LICENSE("GPL"); -MODULE_AUTHOR("David Woodhouse et al."); -MODULE_DESCRIPTION("Alternative driver for DiskOnChip Millennium"); diff --git a/trunk/drivers/mtd/devices/doc2001plus.c b/trunk/drivers/mtd/devices/doc2001plus.c deleted file mode 100644 index 4f2220ad8924..000000000000 --- a/trunk/drivers/mtd/devices/doc2001plus.c +++ /dev/null @@ -1,1080 +0,0 @@ -/* - * Linux driver for Disk-On-Chip Millennium Plus - * - * (c) 2002-2003 Greg Ungerer - * (c) 2002-2003 SnapGear Inc - * (c) 1999 Machine Vision Holdings, Inc. - * (c) 1999, 2000 David Woodhouse - * - * Released under GPL - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include -#include -#include - -/* #define ECC_DEBUG */ - -/* I have no idea why some DoC chips can not use memcop_form|to_io(). - * This may be due to the different revisions of the ASIC controller built-in or - * simplily a QA/Bug issue. Who knows ?? If you have trouble, please uncomment - * this:*/ -#undef USE_MEMCPY - -static int doc_read(struct mtd_info *mtd, loff_t from, size_t len, - size_t *retlen, u_char *buf); -static int doc_write(struct mtd_info *mtd, loff_t to, size_t len, - size_t *retlen, const u_char *buf); -static int doc_read_oob(struct mtd_info *mtd, loff_t ofs, - struct mtd_oob_ops *ops); -static int doc_write_oob(struct mtd_info *mtd, loff_t ofs, - struct mtd_oob_ops *ops); -static int doc_erase (struct mtd_info *mtd, struct erase_info *instr); - -static struct mtd_info *docmilpluslist = NULL; - - -/* Perform the required delay cycles by writing to the NOP register */ -static void DoC_Delay(void __iomem * docptr, int cycles) -{ - int i; - - for (i = 0; (i < cycles); i++) - WriteDOC(0, docptr, Mplus_NOP); -} - -#define CDSN_CTRL_FR_B_MASK (CDSN_CTRL_FR_B0 | CDSN_CTRL_FR_B1) - -/* DOC_WaitReady: Wait for RDY line to be asserted by the flash chip */ -static int _DoC_WaitReady(void __iomem * docptr) -{ - unsigned int c = 0xffff; - - pr_debug("_DoC_WaitReady called for out-of-line wait\n"); - - /* Out-of-line routine to wait for chip response */ - while (((ReadDOC(docptr, Mplus_FlashControl) & CDSN_CTRL_FR_B_MASK) != CDSN_CTRL_FR_B_MASK) && --c) - ; - - if (c == 0) - pr_debug("_DoC_WaitReady timed out.\n"); - - return (c == 0); -} - -static inline int DoC_WaitReady(void __iomem * docptr) -{ - /* This is inline, to optimise the common case, where it's ready instantly */ - int ret = 0; - - /* read form NOP register should be issued prior to the read from CDSNControl - see Software Requirement 11.4 item 2. */ - DoC_Delay(docptr, 4); - - if ((ReadDOC(docptr, Mplus_FlashControl) & CDSN_CTRL_FR_B_MASK) != CDSN_CTRL_FR_B_MASK) - /* Call the out-of-line routine to wait */ - ret = _DoC_WaitReady(docptr); - - return ret; -} - -/* For some reason the Millennium Plus seems to occasionally put itself - * into reset mode. For me this happens randomly, with no pattern that I - * can detect. M-systems suggest always check this on any block level - * operation and setting to normal mode if in reset mode. - */ -static inline void DoC_CheckASIC(void __iomem * docptr) -{ - /* Make sure the DoC is in normal mode */ - if ((ReadDOC(docptr, Mplus_DOCControl) & DOC_MODE_NORMAL) == 0) { - WriteDOC((DOC_MODE_NORMAL | DOC_MODE_MDWREN), docptr, Mplus_DOCControl); - WriteDOC(~(DOC_MODE_NORMAL | DOC_MODE_MDWREN), docptr, Mplus_CtrlConfirm); - } -} - -/* DoC_Command: Send a flash command to the flash chip through the Flash - * command register. Need 2 Write Pipeline Terminates to complete send. - */ -static void DoC_Command(void __iomem * docptr, unsigned char command, - unsigned char xtraflags) -{ - WriteDOC(command, docptr, Mplus_FlashCmd); - WriteDOC(command, docptr, Mplus_WritePipeTerm); - WriteDOC(command, docptr, Mplus_WritePipeTerm); -} - -/* DoC_Address: Set the current address for the flash chip through the Flash - * Address register. Need 2 Write Pipeline Terminates to complete send. - */ -static inline void DoC_Address(struct DiskOnChip *doc, int numbytes, - unsigned long ofs, unsigned char xtraflags1, - unsigned char xtraflags2) -{ - void __iomem * docptr = doc->virtadr; - - /* Allow for possible Mill Plus internal flash interleaving */ - ofs >>= doc->interleave; - - switch (numbytes) { - case 1: - /* Send single byte, bits 0-7. */ - WriteDOC(ofs & 0xff, docptr, Mplus_FlashAddress); - break; - case 2: - /* Send bits 9-16 followed by 17-23 */ - WriteDOC((ofs >> 9) & 0xff, docptr, Mplus_FlashAddress); - WriteDOC((ofs >> 17) & 0xff, docptr, Mplus_FlashAddress); - break; - case 3: - /* Send 0-7, 9-16, then 17-23 */ - WriteDOC(ofs & 0xff, docptr, Mplus_FlashAddress); - WriteDOC((ofs >> 9) & 0xff, docptr, Mplus_FlashAddress); - WriteDOC((ofs >> 17) & 0xff, docptr, Mplus_FlashAddress); - break; - default: - return; - } - - WriteDOC(0x00, docptr, Mplus_WritePipeTerm); - WriteDOC(0x00, docptr, Mplus_WritePipeTerm); -} - -/* DoC_SelectChip: Select a given flash chip within the current floor */ -static int DoC_SelectChip(void __iomem * docptr, int chip) -{ - /* No choice for flash chip on Millennium Plus */ - return 0; -} - -/* DoC_SelectFloor: Select a given floor (bank of flash chips) */ -static int DoC_SelectFloor(void __iomem * docptr, int floor) -{ - WriteDOC((floor & 0x3), docptr, Mplus_DeviceSelect); - return 0; -} - -/* - * Translate the given offset into the appropriate command and offset. - * This does the mapping using the 16bit interleave layout defined by - * M-Systems, and looks like this for a sector pair: - * +-----------+-------+-------+-------+--------------+---------+-----------+ - * | 0 --- 511 |512-517|518-519|520-521| 522 --- 1033 |1034-1039|1040 - 1055| - * +-----------+-------+-------+-------+--------------+---------+-----------+ - * | Data 0 | ECC 0 |Flags0 |Flags1 | Data 1 |ECC 1 | OOB 1 + 2 | - * +-----------+-------+-------+-------+--------------+---------+-----------+ - */ -/* FIXME: This lives in INFTL not here. Other users of flash devices - may not want it */ -static unsigned int DoC_GetDataOffset(struct mtd_info *mtd, loff_t *from) -{ - struct DiskOnChip *this = mtd->priv; - - if (this->interleave) { - unsigned int ofs = *from & 0x3ff; - unsigned int cmd; - - if (ofs < 512) { - cmd = NAND_CMD_READ0; - ofs &= 0x1ff; - } else if (ofs < 1014) { - cmd = NAND_CMD_READ1; - ofs = (ofs & 0x1ff) + 10; - } else { - cmd = NAND_CMD_READOOB; - ofs = ofs - 1014; - } - - *from = (*from & ~0x3ff) | ofs; - return cmd; - } else { - /* No interleave */ - if ((*from) & 0x100) - return NAND_CMD_READ1; - return NAND_CMD_READ0; - } -} - -static unsigned int DoC_GetECCOffset(struct mtd_info *mtd, loff_t *from) -{ - unsigned int ofs, cmd; - - if (*from & 0x200) { - cmd = NAND_CMD_READOOB; - ofs = 10 + (*from & 0xf); - } else { - cmd = NAND_CMD_READ1; - ofs = (*from & 0xf); - } - - *from = (*from & ~0x3ff) | ofs; - return cmd; -} - -static unsigned int DoC_GetFlagsOffset(struct mtd_info *mtd, loff_t *from) -{ - unsigned int ofs, cmd; - - cmd = NAND_CMD_READ1; - ofs = (*from & 0x200) ? 8 : 6; - *from = (*from & ~0x3ff) | ofs; - return cmd; -} - -static unsigned int DoC_GetHdrOffset(struct mtd_info *mtd, loff_t *from) -{ - unsigned int ofs, cmd; - - cmd = NAND_CMD_READOOB; - ofs = (*from & 0x200) ? 24 : 16; - *from = (*from & ~0x3ff) | ofs; - return cmd; -} - -static inline void MemReadDOC(void __iomem * docptr, unsigned char *buf, int len) -{ -#ifndef USE_MEMCPY - int i; - for (i = 0; i < len; i++) - buf[i] = ReadDOC(docptr, Mil_CDSN_IO + i); -#else - memcpy_fromio(buf, docptr + DoC_Mil_CDSN_IO, len); -#endif -} - -static inline void MemWriteDOC(void __iomem * docptr, unsigned char *buf, int len) -{ -#ifndef USE_MEMCPY - int i; - for (i = 0; i < len; i++) - WriteDOC(buf[i], docptr, Mil_CDSN_IO + i); -#else - memcpy_toio(docptr + DoC_Mil_CDSN_IO, buf, len); -#endif -} - -/* DoC_IdentChip: Identify a given NAND chip given {floor,chip} */ -static int DoC_IdentChip(struct DiskOnChip *doc, int floor, int chip) -{ - int mfr, id, i, j; - volatile char dummy; - void __iomem * docptr = doc->virtadr; - - /* Page in the required floor/chip */ - DoC_SelectFloor(docptr, floor); - DoC_SelectChip(docptr, chip); - - /* Millennium Plus bus cycle sequence as per figure 2, section 2.4 */ - WriteDOC((DOC_FLASH_CE | DOC_FLASH_WP), docptr, Mplus_FlashSelect); - - /* Reset the chip, see Software Requirement 11.4 item 1. */ - DoC_Command(docptr, NAND_CMD_RESET, 0); - DoC_WaitReady(docptr); - - /* Read the NAND chip ID: 1. Send ReadID command */ - DoC_Command(docptr, NAND_CMD_READID, 0); - - /* Read the NAND chip ID: 2. Send address byte zero */ - DoC_Address(doc, 1, 0x00, 0, 0x00); - - WriteDOC(0, docptr, Mplus_FlashControl); - DoC_WaitReady(docptr); - - /* Read the manufacturer and device id codes of the flash device through - CDSN IO register see Software Requirement 11.4 item 5.*/ - dummy = ReadDOC(docptr, Mplus_ReadPipeInit); - dummy = ReadDOC(docptr, Mplus_ReadPipeInit); - - mfr = ReadDOC(docptr, Mil_CDSN_IO); - if (doc->interleave) - dummy = ReadDOC(docptr, Mil_CDSN_IO); /* 2 way interleave */ - - id = ReadDOC(docptr, Mil_CDSN_IO); - if (doc->interleave) - dummy = ReadDOC(docptr, Mil_CDSN_IO); /* 2 way interleave */ - - dummy = ReadDOC(docptr, Mplus_LastDataRead); - dummy = ReadDOC(docptr, Mplus_LastDataRead); - - /* Disable flash internally */ - WriteDOC(0, docptr, Mplus_FlashSelect); - - /* No response - return failure */ - if (mfr == 0xff || mfr == 0) - return 0; - - for (i = 0; nand_flash_ids[i].name != NULL; i++) { - if (id == nand_flash_ids[i].id) { - /* Try to identify manufacturer */ - for (j = 0; nand_manuf_ids[j].id != 0x0; j++) { - if (nand_manuf_ids[j].id == mfr) - break; - } - printk(KERN_INFO "Flash chip found: Manufacturer ID: %2.2X, " - "Chip ID: %2.2X (%s:%s)\n", mfr, id, - nand_manuf_ids[j].name, nand_flash_ids[i].name); - doc->mfr = mfr; - doc->id = id; - doc->chipshift = ffs((nand_flash_ids[i].chipsize << 20)) - 1; - doc->erasesize = nand_flash_ids[i].erasesize << doc->interleave; - break; - } - } - - if (nand_flash_ids[i].name == NULL) - return 0; - return 1; -} - -/* DoC_ScanChips: Find all NAND chips present in a DiskOnChip, and identify them */ -static void DoC_ScanChips(struct DiskOnChip *this) -{ - int floor, chip; - int numchips[MAX_FLOORS_MPLUS]; - int ret; - - this->numchips = 0; - this->mfr = 0; - this->id = 0; - - /* Work out the intended interleave setting */ - this->interleave = 0; - if (this->ChipID == DOC_ChipID_DocMilPlus32) - this->interleave = 1; - - /* Check the ASIC agrees */ - if ( (this->interleave << 2) != - (ReadDOC(this->virtadr, Mplus_Configuration) & 4)) { - u_char conf = ReadDOC(this->virtadr, Mplus_Configuration); - printk(KERN_NOTICE "Setting DiskOnChip Millennium Plus interleave to %s\n", - this->interleave?"on (16-bit)":"off (8-bit)"); - conf ^= 4; - WriteDOC(conf, this->virtadr, Mplus_Configuration); - } - - /* For each floor, find the number of valid chips it contains */ - for (floor = 0,ret = 1; floor < MAX_FLOORS_MPLUS; floor++) { - numchips[floor] = 0; - for (chip = 0; chip < MAX_CHIPS_MPLUS && ret != 0; chip++) { - ret = DoC_IdentChip(this, floor, chip); - if (ret) { - numchips[floor]++; - this->numchips++; - } - } - } - /* If there are none at all that we recognise, bail */ - if (!this->numchips) { - printk("No flash chips recognised.\n"); - return; - } - - /* Allocate an array to hold the information for each chip */ - this->chips = kmalloc(sizeof(struct Nand) * this->numchips, GFP_KERNEL); - if (!this->chips){ - printk("MTD: No memory for allocating chip info structures\n"); - return; - } - - /* Fill out the chip array with {floor, chipno} for each - * detected chip in the device. */ - for (floor = 0, ret = 0; floor < MAX_FLOORS_MPLUS; floor++) { - for (chip = 0 ; chip < numchips[floor] ; chip++) { - this->chips[ret].floor = floor; - this->chips[ret].chip = chip; - this->chips[ret].curadr = 0; - this->chips[ret].curmode = 0x50; - ret++; - } - } - - /* Calculate and print the total size of the device */ - this->totlen = this->numchips * (1 << this->chipshift); - printk(KERN_INFO "%d flash chips found. Total DiskOnChip size: %ld MiB\n", - this->numchips ,this->totlen >> 20); -} - -static int DoCMilPlus_is_alias(struct DiskOnChip *doc1, struct DiskOnChip *doc2) -{ - int tmp1, tmp2, retval; - - if (doc1->physadr == doc2->physadr) - return 1; - - /* Use the alias resolution register which was set aside for this - * purpose. If it's value is the same on both chips, they might - * be the same chip, and we write to one and check for a change in - * the other. It's unclear if this register is usuable in the - * DoC 2000 (it's in the Millennium docs), but it seems to work. */ - tmp1 = ReadDOC(doc1->virtadr, Mplus_AliasResolution); - tmp2 = ReadDOC(doc2->virtadr, Mplus_AliasResolution); - if (tmp1 != tmp2) - return 0; - - WriteDOC((tmp1+1) % 0xff, doc1->virtadr, Mplus_AliasResolution); - tmp2 = ReadDOC(doc2->virtadr, Mplus_AliasResolution); - if (tmp2 == (tmp1+1) % 0xff) - retval = 1; - else - retval = 0; - - /* Restore register contents. May not be necessary, but do it just to - * be safe. */ - WriteDOC(tmp1, doc1->virtadr, Mplus_AliasResolution); - - return retval; -} - -/* This routine is found from the docprobe code by symbol_get(), - * which will bump the use count of this module. */ -void DoCMilPlus_init(struct mtd_info *mtd) -{ - struct DiskOnChip *this = mtd->priv; - struct DiskOnChip *old = NULL; - - /* We must avoid being called twice for the same device. */ - if (docmilpluslist) - old = docmilpluslist->priv; - - while (old) { - if (DoCMilPlus_is_alias(this, old)) { - printk(KERN_NOTICE "Ignoring DiskOnChip Millennium " - "Plus at 0x%lX - already configured\n", - this->physadr); - iounmap(this->virtadr); - kfree(mtd); - return; - } - if (old->nextdoc) - old = old->nextdoc->priv; - else - old = NULL; - } - - mtd->name = "DiskOnChip Millennium Plus"; - printk(KERN_NOTICE "DiskOnChip Millennium Plus found at " - "address 0x%lX\n", this->physadr); - - mtd->type = MTD_NANDFLASH; - mtd->flags = MTD_CAP_NANDFLASH; - mtd->writebufsize = mtd->writesize = 512; - mtd->oobsize = 16; - mtd->ecc_strength = 2; - mtd->owner = THIS_MODULE; - mtd->_erase = doc_erase; - mtd->_read = doc_read; - mtd->_write = doc_write; - mtd->_read_oob = doc_read_oob; - mtd->_write_oob = doc_write_oob; - this->curfloor = -1; - this->curchip = -1; - - /* Ident all the chips present. */ - DoC_ScanChips(this); - - if (!this->totlen) { - kfree(mtd); - iounmap(this->virtadr); - } else { - this->nextdoc = docmilpluslist; - docmilpluslist = mtd; - mtd->size = this->totlen; - mtd->erasesize = this->erasesize; - mtd_device_register(mtd, NULL, 0); - return; - } -} -EXPORT_SYMBOL_GPL(DoCMilPlus_init); - -#if 0 -static int doc_dumpblk(struct mtd_info *mtd, loff_t from) -{ - int i; - loff_t fofs; - struct DiskOnChip *this = mtd->priv; - void __iomem * docptr = this->virtadr; - struct Nand *mychip = &this->chips[from >> (this->chipshift)]; - unsigned char *bp, buf[1056]; - char c[32]; - - from &= ~0x3ff; - - /* Don't allow read past end of device */ - if (from >= this->totlen) - return -EINVAL; - - DoC_CheckASIC(docptr); - - /* Find the chip which is to be used and select it */ - if (this->curfloor != mychip->floor) { - DoC_SelectFloor(docptr, mychip->floor); - DoC_SelectChip(docptr, mychip->chip); - } else if (this->curchip != mychip->chip) { - DoC_SelectChip(docptr, mychip->chip); - } - this->curfloor = mychip->floor; - this->curchip = mychip->chip; - - /* Millennium Plus bus cycle sequence as per figure 2, section 2.4 */ - WriteDOC((DOC_FLASH_CE | DOC_FLASH_WP), docptr, Mplus_FlashSelect); - - /* Reset the chip, see Software Requirement 11.4 item 1. */ - DoC_Command(docptr, NAND_CMD_RESET, 0); - DoC_WaitReady(docptr); - - fofs = from; - DoC_Command(docptr, DoC_GetDataOffset(mtd, &fofs), 0); - DoC_Address(this, 3, fofs, 0, 0x00); - WriteDOC(0, docptr, Mplus_FlashControl); - DoC_WaitReady(docptr); - - /* disable the ECC engine */ - WriteDOC(DOC_ECC_RESET, docptr, Mplus_ECCConf); - - ReadDOC(docptr, Mplus_ReadPipeInit); - ReadDOC(docptr, Mplus_ReadPipeInit); - - /* Read the data via the internal pipeline through CDSN IO - register, see Pipelined Read Operations 11.3 */ - MemReadDOC(docptr, buf, 1054); - buf[1054] = ReadDOC(docptr, Mplus_LastDataRead); - buf[1055] = ReadDOC(docptr, Mplus_LastDataRead); - - memset(&c[0], 0, sizeof(c)); - printk("DUMP OFFSET=%x:\n", (int)from); - - for (i = 0, bp = &buf[0]; (i < 1056); i++) { - if ((i % 16) == 0) - printk("%08x: ", i); - printk(" %02x", *bp); - c[(i & 0xf)] = ((*bp >= 0x20) && (*bp <= 0x7f)) ? *bp : '.'; - bp++; - if (((i + 1) % 16) == 0) - printk(" %s\n", c); - } - printk("\n"); - - /* Disable flash internally */ - WriteDOC(0, docptr, Mplus_FlashSelect); - - return 0; -} -#endif - -static int doc_read(struct mtd_info *mtd, loff_t from, size_t len, - size_t *retlen, u_char *buf) -{ - int ret, i; - volatile char dummy; - loff_t fofs; - unsigned char syndrome[6], eccbuf[6]; - struct DiskOnChip *this = mtd->priv; - void __iomem * docptr = this->virtadr; - struct Nand *mychip = &this->chips[from >> (this->chipshift)]; - - /* Don't allow a single read to cross a 512-byte block boundary */ - if (from + len > ((from | 0x1ff) + 1)) - len = ((from | 0x1ff) + 1) - from; - - DoC_CheckASIC(docptr); - - /* Find the chip which is to be used and select it */ - if (this->curfloor != mychip->floor) { - DoC_SelectFloor(docptr, mychip->floor); - DoC_SelectChip(docptr, mychip->chip); - } else if (this->curchip != mychip->chip) { - DoC_SelectChip(docptr, mychip->chip); - } - this->curfloor = mychip->floor; - this->curchip = mychip->chip; - - /* Millennium Plus bus cycle sequence as per figure 2, section 2.4 */ - WriteDOC((DOC_FLASH_CE | DOC_FLASH_WP), docptr, Mplus_FlashSelect); - - /* Reset the chip, see Software Requirement 11.4 item 1. */ - DoC_Command(docptr, NAND_CMD_RESET, 0); - DoC_WaitReady(docptr); - - fofs = from; - DoC_Command(docptr, DoC_GetDataOffset(mtd, &fofs), 0); - DoC_Address(this, 3, fofs, 0, 0x00); - WriteDOC(0, docptr, Mplus_FlashControl); - DoC_WaitReady(docptr); - - /* init the ECC engine, see Reed-Solomon EDC/ECC 11.1 .*/ - WriteDOC(DOC_ECC_RESET, docptr, Mplus_ECCConf); - WriteDOC(DOC_ECC_EN, docptr, Mplus_ECCConf); - - /* Let the caller know we completed it */ - *retlen = len; - ret = 0; - - ReadDOC(docptr, Mplus_ReadPipeInit); - ReadDOC(docptr, Mplus_ReadPipeInit); - - /* Read the data via the internal pipeline through CDSN IO - register, see Pipelined Read Operations 11.3 */ - MemReadDOC(docptr, buf, len); - - /* Read the ECC data following raw data */ - MemReadDOC(docptr, eccbuf, 4); - eccbuf[4] = ReadDOC(docptr, Mplus_LastDataRead); - eccbuf[5] = ReadDOC(docptr, Mplus_LastDataRead); - - /* Flush the pipeline */ - dummy = ReadDOC(docptr, Mplus_ECCConf); - dummy = ReadDOC(docptr, Mplus_ECCConf); - - /* Check the ECC Status */ - if (ReadDOC(docptr, Mplus_ECCConf) & 0x80) { - int nb_errors; - /* There was an ECC error */ -#ifdef ECC_DEBUG - printk("DiskOnChip ECC Error: Read at %lx\n", (long)from); -#endif - /* Read the ECC syndrome through the DiskOnChip ECC logic. - These syndrome will be all ZERO when there is no error */ - for (i = 0; i < 6; i++) - syndrome[i] = ReadDOC(docptr, Mplus_ECCSyndrome0 + i); - - nb_errors = doc_decode_ecc(buf, syndrome); -#ifdef ECC_DEBUG - printk("ECC Errors corrected: %x\n", nb_errors); -#endif - if (nb_errors < 0) { - /* We return error, but have actually done the - read. Not that this can be told to user-space, via - sys_read(), but at least MTD-aware stuff can know - about it by checking *retlen */ -#ifdef ECC_DEBUG - printk("%s(%d): Millennium Plus ECC error (from=0x%x:\n", - __FILE__, __LINE__, (int)from); - printk(" syndrome= %*phC\n", 6, syndrome); - printk(" eccbuf= %*phC\n", 6, eccbuf); -#endif - ret = -EIO; - } - } - -#ifdef PSYCHO_DEBUG - printk("ECC DATA at %lx: %*ph\n", (long)from, 6, eccbuf); -#endif - /* disable the ECC engine */ - WriteDOC(DOC_ECC_DIS, docptr , Mplus_ECCConf); - - /* Disable flash internally */ - WriteDOC(0, docptr, Mplus_FlashSelect); - - return ret; -} - -static int doc_write(struct mtd_info *mtd, loff_t to, size_t len, - size_t *retlen, const u_char *buf) -{ - int i, before, ret = 0; - loff_t fto; - volatile char dummy; - char eccbuf[6]; - struct DiskOnChip *this = mtd->priv; - void __iomem * docptr = this->virtadr; - struct Nand *mychip = &this->chips[to >> (this->chipshift)]; - - /* Don't allow writes which aren't exactly one block (512 bytes) */ - if ((to & 0x1ff) || (len != 0x200)) - return -EINVAL; - - /* Determine position of OOB flags, before or after data */ - before = (this->interleave && (to & 0x200)); - - DoC_CheckASIC(docptr); - - /* Find the chip which is to be used and select it */ - if (this->curfloor != mychip->floor) { - DoC_SelectFloor(docptr, mychip->floor); - DoC_SelectChip(docptr, mychip->chip); - } else if (this->curchip != mychip->chip) { - DoC_SelectChip(docptr, mychip->chip); - } - this->curfloor = mychip->floor; - this->curchip = mychip->chip; - - /* Millennium Plus bus cycle sequence as per figure 2, section 2.4 */ - WriteDOC(DOC_FLASH_CE, docptr, Mplus_FlashSelect); - - /* Reset the chip, see Software Requirement 11.4 item 1. */ - DoC_Command(docptr, NAND_CMD_RESET, 0); - DoC_WaitReady(docptr); - - /* Set device to appropriate plane of flash */ - fto = to; - WriteDOC(DoC_GetDataOffset(mtd, &fto), docptr, Mplus_FlashCmd); - - /* On interleaved devices the flags for 2nd half 512 are before data */ - if (before) - fto -= 2; - - /* issue the Serial Data In command to initial the Page Program process */ - DoC_Command(docptr, NAND_CMD_SEQIN, 0x00); - DoC_Address(this, 3, fto, 0x00, 0x00); - - /* Disable the ECC engine */ - WriteDOC(DOC_ECC_RESET, docptr, Mplus_ECCConf); - - if (before) { - /* Write the block status BLOCK_USED (0x5555) */ - WriteDOC(0x55, docptr, Mil_CDSN_IO); - WriteDOC(0x55, docptr, Mil_CDSN_IO); - } - - /* init the ECC engine, see Reed-Solomon EDC/ECC 11.1 .*/ - WriteDOC(DOC_ECC_EN | DOC_ECC_RW, docptr, Mplus_ECCConf); - - MemWriteDOC(docptr, (unsigned char *) buf, len); - - /* Write ECC data to flash, the ECC info is generated by - the DiskOnChip ECC logic see Reed-Solomon EDC/ECC 11.1 */ - DoC_Delay(docptr, 3); - - /* Read the ECC data through the DiskOnChip ECC logic */ - for (i = 0; i < 6; i++) - eccbuf[i] = ReadDOC(docptr, Mplus_ECCSyndrome0 + i); - - /* disable the ECC engine */ - WriteDOC(DOC_ECC_DIS, docptr, Mplus_ECCConf); - - /* Write the ECC data to flash */ - MemWriteDOC(docptr, eccbuf, 6); - - if (!before) { - /* Write the block status BLOCK_USED (0x5555) */ - WriteDOC(0x55, docptr, Mil_CDSN_IO+6); - WriteDOC(0x55, docptr, Mil_CDSN_IO+7); - } - -#ifdef PSYCHO_DEBUG - printk("OOB data at %lx is %2.2X %2.2X %2.2X %2.2X %2.2X %2.2X\n", - (long) to, eccbuf[0], eccbuf[1], eccbuf[2], eccbuf[3], - eccbuf[4], eccbuf[5]); -#endif - - WriteDOC(0x00, docptr, Mplus_WritePipeTerm); - WriteDOC(0x00, docptr, Mplus_WritePipeTerm); - - /* Commit the Page Program command and wait for ready - see Software Requirement 11.4 item 1.*/ - DoC_Command(docptr, NAND_CMD_PAGEPROG, 0x00); - DoC_WaitReady(docptr); - - /* Read the status of the flash device through CDSN IO register - see Software Requirement 11.4 item 5.*/ - DoC_Command(docptr, NAND_CMD_STATUS, 0); - dummy = ReadDOC(docptr, Mplus_ReadPipeInit); - dummy = ReadDOC(docptr, Mplus_ReadPipeInit); - DoC_Delay(docptr, 2); - if ((dummy = ReadDOC(docptr, Mplus_LastDataRead)) & 1) { - printk("MTD: Error 0x%x programming at 0x%x\n", dummy, (int)to); - /* Error in programming - FIXME: implement Bad Block Replacement (in nftl.c ??) */ - ret = -EIO; - } - dummy = ReadDOC(docptr, Mplus_LastDataRead); - - /* Disable flash internally */ - WriteDOC(0, docptr, Mplus_FlashSelect); - - /* Let the caller know we completed it */ - *retlen = len; - - return ret; -} - -static int doc_read_oob(struct mtd_info *mtd, loff_t ofs, - struct mtd_oob_ops *ops) -{ - loff_t fofs, base; - struct DiskOnChip *this = mtd->priv; - void __iomem * docptr = this->virtadr; - struct Nand *mychip = &this->chips[ofs >> this->chipshift]; - size_t i, size, got, want; - uint8_t *buf = ops->oobbuf; - size_t len = ops->len; - - BUG_ON(ops->mode != MTD_OPS_PLACE_OOB); - - ofs += ops->ooboffs; - - DoC_CheckASIC(docptr); - - /* Find the chip which is to be used and select it */ - if (this->curfloor != mychip->floor) { - DoC_SelectFloor(docptr, mychip->floor); - DoC_SelectChip(docptr, mychip->chip); - } else if (this->curchip != mychip->chip) { - DoC_SelectChip(docptr, mychip->chip); - } - this->curfloor = mychip->floor; - this->curchip = mychip->chip; - - /* Millennium Plus bus cycle sequence as per figure 2, section 2.4 */ - WriteDOC((DOC_FLASH_CE | DOC_FLASH_WP), docptr, Mplus_FlashSelect); - - /* disable the ECC engine */ - WriteDOC(DOC_ECC_RESET, docptr, Mplus_ECCConf); - DoC_WaitReady(docptr); - - /* Maximum of 16 bytes in the OOB region, so limit read to that */ - if (len > 16) - len = 16; - got = 0; - want = len; - - for (i = 0; ((i < 3) && (want > 0)); i++) { - /* Figure out which region we are accessing... */ - fofs = ofs; - base = ofs & 0xf; - if (!this->interleave) { - DoC_Command(docptr, NAND_CMD_READOOB, 0); - size = 16 - base; - } else if (base < 6) { - DoC_Command(docptr, DoC_GetECCOffset(mtd, &fofs), 0); - size = 6 - base; - } else if (base < 8) { - DoC_Command(docptr, DoC_GetFlagsOffset(mtd, &fofs), 0); - size = 8 - base; - } else { - DoC_Command(docptr, DoC_GetHdrOffset(mtd, &fofs), 0); - size = 16 - base; - } - if (size > want) - size = want; - - /* Issue read command */ - DoC_Address(this, 3, fofs, 0, 0x00); - WriteDOC(0, docptr, Mplus_FlashControl); - DoC_WaitReady(docptr); - - ReadDOC(docptr, Mplus_ReadPipeInit); - ReadDOC(docptr, Mplus_ReadPipeInit); - MemReadDOC(docptr, &buf[got], size - 2); - buf[got + size - 2] = ReadDOC(docptr, Mplus_LastDataRead); - buf[got + size - 1] = ReadDOC(docptr, Mplus_LastDataRead); - - ofs += size; - got += size; - want -= size; - } - - /* Disable flash internally */ - WriteDOC(0, docptr, Mplus_FlashSelect); - - ops->retlen = len; - return 0; -} - -static int doc_write_oob(struct mtd_info *mtd, loff_t ofs, - struct mtd_oob_ops *ops) -{ - volatile char dummy; - loff_t fofs, base; - struct DiskOnChip *this = mtd->priv; - void __iomem * docptr = this->virtadr; - struct Nand *mychip = &this->chips[ofs >> this->chipshift]; - size_t i, size, got, want; - int ret = 0; - uint8_t *buf = ops->oobbuf; - size_t len = ops->len; - - BUG_ON(ops->mode != MTD_OPS_PLACE_OOB); - - ofs += ops->ooboffs; - - DoC_CheckASIC(docptr); - - /* Find the chip which is to be used and select it */ - if (this->curfloor != mychip->floor) { - DoC_SelectFloor(docptr, mychip->floor); - DoC_SelectChip(docptr, mychip->chip); - } else if (this->curchip != mychip->chip) { - DoC_SelectChip(docptr, mychip->chip); - } - this->curfloor = mychip->floor; - this->curchip = mychip->chip; - - /* Millennium Plus bus cycle sequence as per figure 2, section 2.4 */ - WriteDOC(DOC_FLASH_CE, docptr, Mplus_FlashSelect); - - - /* Maximum of 16 bytes in the OOB region, so limit write to that */ - if (len > 16) - len = 16; - got = 0; - want = len; - - for (i = 0; ((i < 3) && (want > 0)); i++) { - /* Reset the chip, see Software Requirement 11.4 item 1. */ - DoC_Command(docptr, NAND_CMD_RESET, 0); - DoC_WaitReady(docptr); - - /* Figure out which region we are accessing... */ - fofs = ofs; - base = ofs & 0x0f; - if (!this->interleave) { - WriteDOC(NAND_CMD_READOOB, docptr, Mplus_FlashCmd); - size = 16 - base; - } else if (base < 6) { - WriteDOC(DoC_GetECCOffset(mtd, &fofs), docptr, Mplus_FlashCmd); - size = 6 - base; - } else if (base < 8) { - WriteDOC(DoC_GetFlagsOffset(mtd, &fofs), docptr, Mplus_FlashCmd); - size = 8 - base; - } else { - WriteDOC(DoC_GetHdrOffset(mtd, &fofs), docptr, Mplus_FlashCmd); - size = 16 - base; - } - if (size > want) - size = want; - - /* Issue the Serial Data In command to initial the Page Program process */ - DoC_Command(docptr, NAND_CMD_SEQIN, 0x00); - DoC_Address(this, 3, fofs, 0, 0x00); - - /* Disable the ECC engine */ - WriteDOC(DOC_ECC_RESET, docptr, Mplus_ECCConf); - - /* Write the data via the internal pipeline through CDSN IO - register, see Pipelined Write Operations 11.2 */ - MemWriteDOC(docptr, (unsigned char *) &buf[got], size); - WriteDOC(0x00, docptr, Mplus_WritePipeTerm); - WriteDOC(0x00, docptr, Mplus_WritePipeTerm); - - /* Commit the Page Program command and wait for ready - see Software Requirement 11.4 item 1.*/ - DoC_Command(docptr, NAND_CMD_PAGEPROG, 0x00); - DoC_WaitReady(docptr); - - /* Read the status of the flash device through CDSN IO register - see Software Requirement 11.4 item 5.*/ - DoC_Command(docptr, NAND_CMD_STATUS, 0x00); - dummy = ReadDOC(docptr, Mplus_ReadPipeInit); - dummy = ReadDOC(docptr, Mplus_ReadPipeInit); - DoC_Delay(docptr, 2); - if ((dummy = ReadDOC(docptr, Mplus_LastDataRead)) & 1) { - printk("MTD: Error 0x%x programming oob at 0x%x\n", - dummy, (int)ofs); - /* FIXME: implement Bad Block Replacement */ - ops->retlen = 0; - ret = -EIO; - } - dummy = ReadDOC(docptr, Mplus_LastDataRead); - - ofs += size; - got += size; - want -= size; - } - - /* Disable flash internally */ - WriteDOC(0, docptr, Mplus_FlashSelect); - - ops->retlen = len; - return ret; -} - -int doc_erase(struct mtd_info *mtd, struct erase_info *instr) -{ - volatile char dummy; - struct DiskOnChip *this = mtd->priv; - __u32 ofs = instr->addr; - __u32 len = instr->len; - void __iomem * docptr = this->virtadr; - struct Nand *mychip = &this->chips[ofs >> this->chipshift]; - - DoC_CheckASIC(docptr); - - if (len != mtd->erasesize) - printk(KERN_WARNING "MTD: Erase not right size (%x != %x)n", - len, mtd->erasesize); - - /* Find the chip which is to be used and select it */ - if (this->curfloor != mychip->floor) { - DoC_SelectFloor(docptr, mychip->floor); - DoC_SelectChip(docptr, mychip->chip); - } else if (this->curchip != mychip->chip) { - DoC_SelectChip(docptr, mychip->chip); - } - this->curfloor = mychip->floor; - this->curchip = mychip->chip; - - instr->state = MTD_ERASE_PENDING; - - /* Millennium Plus bus cycle sequence as per figure 2, section 2.4 */ - WriteDOC(DOC_FLASH_CE, docptr, Mplus_FlashSelect); - - DoC_Command(docptr, NAND_CMD_RESET, 0x00); - DoC_WaitReady(docptr); - - DoC_Command(docptr, NAND_CMD_ERASE1, 0); - DoC_Address(this, 2, ofs, 0, 0x00); - DoC_Command(docptr, NAND_CMD_ERASE2, 0); - DoC_WaitReady(docptr); - instr->state = MTD_ERASING; - - /* Read the status of the flash device through CDSN IO register - see Software Requirement 11.4 item 5. */ - DoC_Command(docptr, NAND_CMD_STATUS, 0); - dummy = ReadDOC(docptr, Mplus_ReadPipeInit); - dummy = ReadDOC(docptr, Mplus_ReadPipeInit); - if ((dummy = ReadDOC(docptr, Mplus_LastDataRead)) & 1) { - printk("MTD: Error 0x%x erasing at 0x%x\n", dummy, ofs); - /* FIXME: implement Bad Block Replacement (in nftl.c ??) */ - instr->state = MTD_ERASE_FAILED; - } else { - instr->state = MTD_ERASE_DONE; - } - dummy = ReadDOC(docptr, Mplus_LastDataRead); - - /* Disable flash internally */ - WriteDOC(0, docptr, Mplus_FlashSelect); - - mtd_erase_callback(instr); - - return 0; -} - -/**************************************************************************** - * - * Module stuff - * - ****************************************************************************/ - -static void __exit cleanup_doc2001plus(void) -{ - struct mtd_info *mtd; - struct DiskOnChip *this; - - while ((mtd=docmilpluslist)) { - this = mtd->priv; - docmilpluslist = this->nextdoc; - - mtd_device_unregister(mtd); - - iounmap(this->virtadr); - kfree(this->chips); - kfree(mtd); - } -} - -module_exit(cleanup_doc2001plus); - -MODULE_LICENSE("GPL"); -MODULE_AUTHOR("Greg Ungerer et al."); -MODULE_DESCRIPTION("Driver for DiskOnChip Millennium Plus"); diff --git a/trunk/drivers/mtd/devices/docecc.c b/trunk/drivers/mtd/devices/docecc.c deleted file mode 100644 index 4a1c39b6f37d..000000000000 --- a/trunk/drivers/mtd/devices/docecc.c +++ /dev/null @@ -1,521 +0,0 @@ -/* - * ECC algorithm for M-systems disk on chip. We use the excellent Reed - * Solmon code of Phil Karn (karn@ka9q.ampr.org) available under the - * GNU GPL License. The rest is simply to convert the disk on chip - * syndrome into a standard syndome. - * - * Author: Fabrice Bellard (fabrice.bellard@netgem.com) - * Copyright (C) 2000 Netgem S.A. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA - */ -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include -#include - -#define DEBUG_ECC 0 -/* need to undef it (from asm/termbits.h) */ -#undef B0 - -#define MM 10 /* Symbol size in bits */ -#define KK (1023-4) /* Number of data symbols per block */ -#define B0 510 /* First root of generator polynomial, alpha form */ -#define PRIM 1 /* power of alpha used to generate roots of generator poly */ -#define NN ((1 << MM) - 1) - -typedef unsigned short dtype; - -/* 1+x^3+x^10 */ -static const int Pp[MM+1] = { 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1 }; - -/* This defines the type used to store an element of the Galois Field - * used by the code. Make sure this is something larger than a char if - * if anything larger than GF(256) is used. - * - * Note: unsigned char will work up to GF(256) but int seems to run - * faster on the Pentium. - */ -typedef int gf; - -/* No legal value in index form represents zero, so - * we need a special value for this purpose - */ -#define A0 (NN) - -/* Compute x % NN, where NN is 2**MM - 1, - * without a slow divide - */ -static inline gf -modnn(int x) -{ - while (x >= NN) { - x -= NN; - x = (x >> MM) + (x & NN); - } - return x; -} - -#define CLEAR(a,n) {\ -int ci;\ -for(ci=(n)-1;ci >=0;ci--)\ -(a)[ci] = 0;\ -} - -#define COPY(a,b,n) {\ -int ci;\ -for(ci=(n)-1;ci >=0;ci--)\ -(a)[ci] = (b)[ci];\ -} - -#define COPYDOWN(a,b,n) {\ -int ci;\ -for(ci=(n)-1;ci >=0;ci--)\ -(a)[ci] = (b)[ci];\ -} - -#define Ldec 1 - -/* generate GF(2**m) from the irreducible polynomial p(X) in Pp[0]..Pp[m] - lookup tables: index->polynomial form alpha_to[] contains j=alpha**i; - polynomial form -> index form index_of[j=alpha**i] = i - alpha=2 is the primitive element of GF(2**m) - HARI's COMMENT: (4/13/94) alpha_to[] can be used as follows: - Let @ represent the primitive element commonly called "alpha" that - is the root of the primitive polynomial p(x). Then in GF(2^m), for any - 0 <= i <= 2^m-2, - @^i = a(0) + a(1) @ + a(2) @^2 + ... + a(m-1) @^(m-1) - where the binary vector (a(0),a(1),a(2),...,a(m-1)) is the representation - of the integer "alpha_to[i]" with a(0) being the LSB and a(m-1) the MSB. Thus for - example the polynomial representation of @^5 would be given by the binary - representation of the integer "alpha_to[5]". - Similarly, index_of[] can be used as follows: - As above, let @ represent the primitive element of GF(2^m) that is - the root of the primitive polynomial p(x). In order to find the power - of @ (alpha) that has the polynomial representation - a(0) + a(1) @ + a(2) @^2 + ... + a(m-1) @^(m-1) - we consider the integer "i" whose binary representation with a(0) being LSB - and a(m-1) MSB is (a(0),a(1),...,a(m-1)) and locate the entry - "index_of[i]". Now, @^index_of[i] is that element whose polynomial - representation is (a(0),a(1),a(2),...,a(m-1)). - NOTE: - The element alpha_to[2^m-1] = 0 always signifying that the - representation of "@^infinity" = 0 is (0,0,0,...,0). - Similarly, the element index_of[0] = A0 always signifying - that the power of alpha which has the polynomial representation - (0,0,...,0) is "infinity". - -*/ - -static void -generate_gf(dtype Alpha_to[NN + 1], dtype Index_of[NN + 1]) -{ - register int i, mask; - - mask = 1; - Alpha_to[MM] = 0; - for (i = 0; i < MM; i++) { - Alpha_to[i] = mask; - Index_of[Alpha_to[i]] = i; - /* If Pp[i] == 1 then, term @^i occurs in poly-repr of @^MM */ - if (Pp[i] != 0) - Alpha_to[MM] ^= mask; /* Bit-wise EXOR operation */ - mask <<= 1; /* single left-shift */ - } - Index_of[Alpha_to[MM]] = MM; - /* - * Have obtained poly-repr of @^MM. Poly-repr of @^(i+1) is given by - * poly-repr of @^i shifted left one-bit and accounting for any @^MM - * term that may occur when poly-repr of @^i is shifted. - */ - mask >>= 1; - for (i = MM + 1; i < NN; i++) { - if (Alpha_to[i - 1] >= mask) - Alpha_to[i] = Alpha_to[MM] ^ ((Alpha_to[i - 1] ^ mask) << 1); - else - Alpha_to[i] = Alpha_to[i - 1] << 1; - Index_of[Alpha_to[i]] = i; - } - Index_of[0] = A0; - Alpha_to[NN] = 0; -} - -/* - * Performs ERRORS+ERASURES decoding of RS codes. bb[] is the content - * of the feedback shift register after having processed the data and - * the ECC. - * - * Return number of symbols corrected, or -1 if codeword is illegal - * or uncorrectable. If eras_pos is non-null, the detected error locations - * are written back. NOTE! This array must be at least NN-KK elements long. - * The corrected data are written in eras_val[]. They must be xor with the data - * to retrieve the correct data : data[erase_pos[i]] ^= erase_val[i] . - * - * First "no_eras" erasures are declared by the calling program. Then, the - * maximum # of errors correctable is t_after_eras = floor((NN-KK-no_eras)/2). - * If the number of channel errors is not greater than "t_after_eras" the - * transmitted codeword will be recovered. Details of algorithm can be found - * in R. Blahut's "Theory ... of Error-Correcting Codes". - - * Warning: the eras_pos[] array must not contain duplicate entries; decoder failure - * will result. The decoder *could* check for this condition, but it would involve - * extra time on every decoding operation. - * */ -static int -eras_dec_rs(dtype Alpha_to[NN + 1], dtype Index_of[NN + 1], - gf bb[NN - KK + 1], gf eras_val[NN-KK], int eras_pos[NN-KK], - int no_eras) -{ - int deg_lambda, el, deg_omega; - int i, j, r,k; - gf u,q,tmp,num1,num2,den,discr_r; - gf lambda[NN-KK + 1], s[NN-KK + 1]; /* Err+Eras Locator poly - * and syndrome poly */ - gf b[NN-KK + 1], t[NN-KK + 1], omega[NN-KK + 1]; - gf root[NN-KK], reg[NN-KK + 1], loc[NN-KK]; - int syn_error, count; - - syn_error = 0; - for(i=0;i 0) { - /* Init lambda to be the erasure locator polynomial */ - lambda[1] = Alpha_to[modnn(PRIM * eras_pos[0])]; - for (i = 1; i < no_eras; i++) { - u = modnn(PRIM*eras_pos[i]); - for (j = i+1; j > 0; j--) { - tmp = Index_of[lambda[j - 1]]; - if(tmp != A0) - lambda[j] ^= Alpha_to[modnn(u + tmp)]; - } - } -#if DEBUG_ECC >= 1 - /* Test code that verifies the erasure locator polynomial just constructed - Needed only for decoder debugging. */ - - /* find roots of the erasure location polynomial */ - for(i=1;i<=no_eras;i++) - reg[i] = Index_of[lambda[i]]; - count = 0; - for (i = 1,k=NN-Ldec; i <= NN; i++,k = modnn(NN+k-Ldec)) { - q = 1; - for (j = 1; j <= no_eras; j++) - if (reg[j] != A0) { - reg[j] = modnn(reg[j] + j); - q ^= Alpha_to[reg[j]]; - } - if (q != 0) - continue; - /* store root and error location number indices */ - root[count] = i; - loc[count] = k; - count++; - } - if (count != no_eras) { - printf("\n lambda(x) is WRONG\n"); - count = -1; - goto finish; - } -#if DEBUG_ECC >= 2 - printf("\n Erasure positions as determined by roots of Eras Loc Poly:\n"); - for (i = 0; i < count; i++) - printf("%d ", loc[i]); - printf("\n"); -#endif -#endif - } - for(i=0;i 0; j--){ - if (reg[j] != A0) { - reg[j] = modnn(reg[j] + j); - q ^= Alpha_to[reg[j]]; - } - } - if (q != 0) - continue; - /* store root (index-form) and error location number */ - root[count] = i; - loc[count] = k; - /* If we've already found max possible roots, - * abort the search to save time - */ - if(++count == deg_lambda) - break; - } - if (deg_lambda != count) { - /* - * deg(lambda) unequal to number of roots => uncorrectable - * error detected - */ - count = -1; - goto finish; - } - /* - * Compute err+eras evaluator poly omega(x) = s(x)*lambda(x) (modulo - * x**(NN-KK)). in index form. Also find deg(omega). - */ - deg_omega = 0; - for (i = 0; i < NN-KK;i++){ - tmp = 0; - j = (deg_lambda < i) ? deg_lambda : i; - for(;j >= 0; j--){ - if ((s[i + 1 - j] != A0) && (lambda[j] != A0)) - tmp ^= Alpha_to[modnn(s[i + 1 - j] + lambda[j])]; - } - if(tmp != 0) - deg_omega = i; - omega[i] = Index_of[tmp]; - } - omega[NN-KK] = A0; - - /* - * Compute error values in poly-form. num1 = omega(inv(X(l))), num2 = - * inv(X(l))**(B0-1) and den = lambda_pr(inv(X(l))) all in poly-form - */ - for (j = count-1; j >=0; j--) { - num1 = 0; - for (i = deg_omega; i >= 0; i--) { - if (omega[i] != A0) - num1 ^= Alpha_to[modnn(omega[i] + i * root[j])]; - } - num2 = Alpha_to[modnn(root[j] * (B0 - 1) + NN)]; - den = 0; - - /* lambda[i+1] for i even is the formal derivative lambda_pr of lambda[i] */ - for (i = min(deg_lambda,NN-KK-1) & ~1; i >= 0; i -=2) { - if(lambda[i+1] != A0) - den ^= Alpha_to[modnn(lambda[i+1] + i * root[j])]; - } - if (den == 0) { -#if DEBUG_ECC >= 1 - printf("\n ERROR: denominator = 0\n"); -#endif - /* Convert to dual- basis */ - count = -1; - goto finish; - } - /* Apply error to data */ - if (num1 != 0) { - eras_val[j] = Alpha_to[modnn(Index_of[num1] + Index_of[num2] + NN - Index_of[den])]; - } else { - eras_val[j] = 0; - } - } - finish: - for(i=0;i> 2) | ((ecc1[2] & 0x0f) << 6); - bb[2] = ((ecc1[2] & 0xf0) >> 4) | ((ecc1[3] & 0x3f) << 4); - bb[3] = ((ecc1[3] & 0xc0) >> 6) | ((ecc1[0] & 0xff) << 2); - - nb_errors = eras_dec_rs(Alpha_to, Index_of, bb, - error_val, error_pos, 0); - if (nb_errors <= 0) - goto the_end; - - /* correct the errors */ - for(i=0;i= NB_DATA && pos < KK) { - nb_errors = -1; - goto the_end; - } - if (pos < NB_DATA) { - /* extract bit position (MSB first) */ - pos = 10 * (NB_DATA - 1 - pos) - 6; - /* now correct the following 10 bits. At most two bytes - can be modified since pos is even */ - index = (pos >> 3) ^ 1; - bitpos = pos & 7; - if ((index >= 0 && index < SECTOR_SIZE) || - index == (SECTOR_SIZE + 1)) { - val = error_val[i] >> (2 + bitpos); - parity ^= val; - if (index < SECTOR_SIZE) - sector[index] ^= val; - } - index = ((pos >> 3) + 1) ^ 1; - bitpos = (bitpos + 10) & 7; - if (bitpos == 0) - bitpos = 8; - if ((index >= 0 && index < SECTOR_SIZE) || - index == (SECTOR_SIZE + 1)) { - val = error_val[i] << (8 - bitpos); - parity ^= val; - if (index < SECTOR_SIZE) - sector[index] ^= val; - } - } - } - - /* use parity to test extra errors */ - if ((parity & 0xff) != 0) - nb_errors = -1; - - the_end: - kfree(Alpha_to); - kfree(Index_of); - return nb_errors; -} - -EXPORT_SYMBOL_GPL(doc_decode_ecc); - -MODULE_LICENSE("GPL"); -MODULE_AUTHOR("Fabrice Bellard "); -MODULE_DESCRIPTION("ECC code for correcting errors detected by DiskOnChip 2000 and Millennium ECC hardware"); diff --git a/trunk/drivers/mtd/devices/docg3.c b/trunk/drivers/mtd/devices/docg3.c index 8510ccb9c6f0..3e1b0a0ef4db 100644 --- a/trunk/drivers/mtd/devices/docg3.c +++ b/trunk/drivers/mtd/devices/docg3.c @@ -123,7 +123,7 @@ static inline void doc_flash_address(struct docg3 *docg3, u8 addr) doc_writeb(docg3, addr, DOC_FLASHADDRESS); } -static char const *part_probes[] = { "cmdlinepart", "saftlpart", NULL }; +static char const * const part_probes[] = { "cmdlinepart", "saftlpart", NULL }; static int doc_register_readb(struct docg3 *docg3, int reg) { @@ -2144,18 +2144,7 @@ static struct platform_driver g3_driver = { .remove = __exit_p(docg3_release), }; -static int __init docg3_init(void) -{ - return platform_driver_probe(&g3_driver, docg3_probe); -} -module_init(docg3_init); - - -static void __exit docg3_exit(void) -{ - platform_driver_unregister(&g3_driver); -} -module_exit(docg3_exit); +module_platform_driver_probe(g3_driver, docg3_probe); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Robert Jarzmik "); diff --git a/trunk/drivers/mtd/devices/docprobe.c b/trunk/drivers/mtd/devices/docprobe.c deleted file mode 100644 index 88b3fd3e18a7..000000000000 --- a/trunk/drivers/mtd/devices/docprobe.c +++ /dev/null @@ -1,325 +0,0 @@ - -/* Linux driver for Disk-On-Chip devices */ -/* Probe routines common to all DoC devices */ -/* (C) 1999 Machine Vision Holdings, Inc. */ -/* (C) 1999-2003 David Woodhouse */ - - -/* DOC_PASSIVE_PROBE: - In order to ensure that the BIOS checksum is correct at boot time, and - hence that the onboard BIOS extension gets executed, the DiskOnChip - goes into reset mode when it is read sequentially: all registers - return 0xff until the chip is woken up again by writing to the - DOCControl register. - - Unfortunately, this means that the probe for the DiskOnChip is unsafe, - because one of the first things it does is write to where it thinks - the DOCControl register should be - which may well be shared memory - for another device. I've had machines which lock up when this is - attempted. Hence the possibility to do a passive probe, which will fail - to detect a chip in reset mode, but is at least guaranteed not to lock - the machine. - - If you have this problem, uncomment the following line: -#define DOC_PASSIVE_PROBE -*/ - - -/* DOC_SINGLE_DRIVER: - Millennium driver has been merged into DOC2000 driver. - - The old Millennium-only driver has been retained just in case there - are problems with the new code. If the combined driver doesn't work - for you, you can try the old one by undefining DOC_SINGLE_DRIVER - below and also enabling it in your configuration. If this fixes the - problems, please send a report to the MTD mailing list at - . -*/ -#define DOC_SINGLE_DRIVER - -#include -#include -#include -#include -#include -#include -#include -#include - -#include -#include -#include - - -static unsigned long doc_config_location = CONFIG_MTD_DOCPROBE_ADDRESS; -module_param(doc_config_location, ulong, 0); -MODULE_PARM_DESC(doc_config_location, "Physical memory address at which to probe for DiskOnChip"); - -static unsigned long __initdata doc_locations[] = { -#if defined (__alpha__) || defined(__i386__) || defined(__x86_64__) -#ifdef CONFIG_MTD_DOCPROBE_HIGH - 0xfffc8000, 0xfffca000, 0xfffcc000, 0xfffce000, - 0xfffd0000, 0xfffd2000, 0xfffd4000, 0xfffd6000, - 0xfffd8000, 0xfffda000, 0xfffdc000, 0xfffde000, - 0xfffe0000, 0xfffe2000, 0xfffe4000, 0xfffe6000, - 0xfffe8000, 0xfffea000, 0xfffec000, 0xfffee000, -#else /* CONFIG_MTD_DOCPROBE_HIGH */ - 0xc8000, 0xca000, 0xcc000, 0xce000, - 0xd0000, 0xd2000, 0xd4000, 0xd6000, - 0xd8000, 0xda000, 0xdc000, 0xde000, - 0xe0000, 0xe2000, 0xe4000, 0xe6000, - 0xe8000, 0xea000, 0xec000, 0xee000, -#endif /* CONFIG_MTD_DOCPROBE_HIGH */ -#endif - 0xffffffff }; - -/* doccheck: Probe a given memory window to see if there's a DiskOnChip present */ - -static inline int __init doccheck(void __iomem *potential, unsigned long physadr) -{ - void __iomem *window=potential; - unsigned char tmp, tmpb, tmpc, ChipID; -#ifndef DOC_PASSIVE_PROBE - unsigned char tmp2; -#endif - - /* Routine copied from the Linux DOC driver */ - -#ifdef CONFIG_MTD_DOCPROBE_55AA - /* Check for 0x55 0xAA signature at beginning of window, - this is no longer true once we remove the IPL (for Millennium */ - if (ReadDOC(window, Sig1) != 0x55 || ReadDOC(window, Sig2) != 0xaa) - return 0; -#endif /* CONFIG_MTD_DOCPROBE_55AA */ - -#ifndef DOC_PASSIVE_PROBE - /* It's not possible to cleanly detect the DiskOnChip - the - * bootup procedure will put the device into reset mode, and - * it's not possible to talk to it without actually writing - * to the DOCControl register. So we store the current contents - * of the DOCControl register's location, in case we later decide - * that it's not a DiskOnChip, and want to put it back how we - * found it. - */ - tmp2 = ReadDOC(window, DOCControl); - - /* Reset the DiskOnChip ASIC */ - WriteDOC(DOC_MODE_CLR_ERR | DOC_MODE_MDWREN | DOC_MODE_RESET, - window, DOCControl); - WriteDOC(DOC_MODE_CLR_ERR | DOC_MODE_MDWREN | DOC_MODE_RESET, - window, DOCControl); - - /* Enable the DiskOnChip ASIC */ - WriteDOC(DOC_MODE_CLR_ERR | DOC_MODE_MDWREN | DOC_MODE_NORMAL, - window, DOCControl); - WriteDOC(DOC_MODE_CLR_ERR | DOC_MODE_MDWREN | DOC_MODE_NORMAL, - window, DOCControl); -#endif /* !DOC_PASSIVE_PROBE */ - - /* We need to read the ChipID register four times. For some - newer DiskOnChip 2000 units, the first three reads will - return the DiskOnChip Millennium ident. Don't ask. */ - ChipID = ReadDOC(window, ChipID); - - switch (ChipID) { - case DOC_ChipID_Doc2k: - /* Check the TOGGLE bit in the ECC register */ - tmp = ReadDOC(window, 2k_ECCStatus) & DOC_TOGGLE_BIT; - tmpb = ReadDOC(window, 2k_ECCStatus) & DOC_TOGGLE_BIT; - tmpc = ReadDOC(window, 2k_ECCStatus) & DOC_TOGGLE_BIT; - if (tmp != tmpb && tmp == tmpc) - return ChipID; - break; - - case DOC_ChipID_DocMil: - /* Check for the new 2000 with Millennium ASIC */ - ReadDOC(window, ChipID); - ReadDOC(window, ChipID); - if (ReadDOC(window, ChipID) != DOC_ChipID_DocMil) - ChipID = DOC_ChipID_Doc2kTSOP; - - /* Check the TOGGLE bit in the ECC register */ - tmp = ReadDOC(window, ECCConf) & DOC_TOGGLE_BIT; - tmpb = ReadDOC(window, ECCConf) & DOC_TOGGLE_BIT; - tmpc = ReadDOC(window, ECCConf) & DOC_TOGGLE_BIT; - if (tmp != tmpb && tmp == tmpc) - return ChipID; - break; - - case DOC_ChipID_DocMilPlus16: - case DOC_ChipID_DocMilPlus32: - case 0: - /* Possible Millennium+, need to do more checks */ -#ifndef DOC_PASSIVE_PROBE - /* Possibly release from power down mode */ - for (tmp = 0; (tmp < 4); tmp++) - ReadDOC(window, Mplus_Power); - - /* Reset the DiskOnChip ASIC */ - tmp = DOC_MODE_RESET | DOC_MODE_MDWREN | DOC_MODE_RST_LAT | - DOC_MODE_BDECT; - WriteDOC(tmp, window, Mplus_DOCControl); - WriteDOC(~tmp, window, Mplus_CtrlConfirm); - - mdelay(1); - /* Enable the DiskOnChip ASIC */ - tmp = DOC_MODE_NORMAL | DOC_MODE_MDWREN | DOC_MODE_RST_LAT | - DOC_MODE_BDECT; - WriteDOC(tmp, window, Mplus_DOCControl); - WriteDOC(~tmp, window, Mplus_CtrlConfirm); - mdelay(1); -#endif /* !DOC_PASSIVE_PROBE */ - - ChipID = ReadDOC(window, ChipID); - - switch (ChipID) { - case DOC_ChipID_DocMilPlus16: - case DOC_ChipID_DocMilPlus32: - /* Check the TOGGLE bit in the toggle register */ - tmp = ReadDOC(window, Mplus_Toggle) & DOC_TOGGLE_BIT; - tmpb = ReadDOC(window, Mplus_Toggle) & DOC_TOGGLE_BIT; - tmpc = ReadDOC(window, Mplus_Toggle) & DOC_TOGGLE_BIT; - if (tmp != tmpb && tmp == tmpc) - return ChipID; - default: - break; - } - /* FALL TRHU */ - - default: - -#ifdef CONFIG_MTD_DOCPROBE_55AA - printk(KERN_DEBUG "Possible DiskOnChip with unknown ChipID %2.2X found at 0x%lx\n", - ChipID, physadr); -#endif -#ifndef DOC_PASSIVE_PROBE - /* Put back the contents of the DOCControl register, in case it's not - * actually a DiskOnChip. - */ - WriteDOC(tmp2, window, DOCControl); -#endif - return 0; - } - - printk(KERN_WARNING "DiskOnChip failed TOGGLE test, dropping.\n"); - -#ifndef DOC_PASSIVE_PROBE - /* Put back the contents of the DOCControl register: it's not a DiskOnChip */ - WriteDOC(tmp2, window, DOCControl); -#endif - return 0; -} - -static int docfound; - -extern void DoC2k_init(struct mtd_info *); -extern void DoCMil_init(struct mtd_info *); -extern void DoCMilPlus_init(struct mtd_info *); - -static void __init DoC_Probe(unsigned long physadr) -{ - void __iomem *docptr; - struct DiskOnChip *this; - struct mtd_info *mtd; - int ChipID; - char namebuf[15]; - char *name = namebuf; - void (*initroutine)(struct mtd_info *) = NULL; - - docptr = ioremap(physadr, DOC_IOREMAP_LEN); - - if (!docptr) - return; - - if ((ChipID = doccheck(docptr, physadr))) { - if (ChipID == DOC_ChipID_Doc2kTSOP) { - /* Remove this at your own peril. The hardware driver works but nothing prevents you from erasing bad blocks */ - printk(KERN_NOTICE "Refusing to drive DiskOnChip 2000 TSOP until Bad Block Table is correctly supported by INFTL\n"); - iounmap(docptr); - return; - } - docfound = 1; - mtd = kzalloc(sizeof(struct DiskOnChip) + sizeof(struct mtd_info), GFP_KERNEL); - if (!mtd) { - printk(KERN_WARNING "Cannot allocate memory for data structures. Dropping.\n"); - iounmap(docptr); - return; - } - - this = (struct DiskOnChip *)(&mtd[1]); - mtd->priv = this; - this->virtadr = docptr; - this->physadr = physadr; - this->ChipID = ChipID; - sprintf(namebuf, "with ChipID %2.2X", ChipID); - - switch(ChipID) { - case DOC_ChipID_Doc2kTSOP: - name="2000 TSOP"; - initroutine = symbol_request(DoC2k_init); - break; - - case DOC_ChipID_Doc2k: - name="2000"; - initroutine = symbol_request(DoC2k_init); - break; - - case DOC_ChipID_DocMil: - name="Millennium"; -#ifdef DOC_SINGLE_DRIVER - initroutine = symbol_request(DoC2k_init); -#else - initroutine = symbol_request(DoCMil_init); -#endif /* DOC_SINGLE_DRIVER */ - break; - - case DOC_ChipID_DocMilPlus16: - case DOC_ChipID_DocMilPlus32: - name="MillenniumPlus"; - initroutine = symbol_request(DoCMilPlus_init); - break; - } - - if (initroutine) { - (*initroutine)(mtd); - symbol_put_addr(initroutine); - return; - } - printk(KERN_NOTICE "Cannot find driver for DiskOnChip %s at 0x%lX\n", name, physadr); - kfree(mtd); - } - iounmap(docptr); -} - - -/**************************************************************************** - * - * Module stuff - * - ****************************************************************************/ - -static int __init init_doc(void) -{ - int i; - - if (doc_config_location) { - printk(KERN_INFO "Using configured DiskOnChip probe address 0x%lx\n", doc_config_location); - DoC_Probe(doc_config_location); - } else { - for (i=0; (doc_locations[i] != 0xffffffff); i++) { - DoC_Probe(doc_locations[i]); - } - } - /* No banner message any more. Print a message if no DiskOnChip - found, so the user knows we at least tried. */ - if (!docfound) - printk(KERN_INFO "No recognised DiskOnChip devices found\n"); - return -EAGAIN; -} - -module_init(init_doc); - -MODULE_LICENSE("GPL"); -MODULE_AUTHOR("David Woodhouse "); -MODULE_DESCRIPTION("Probe code for DiskOnChip 2000 and Millennium devices"); - diff --git a/trunk/drivers/mtd/devices/elm.c b/trunk/drivers/mtd/devices/elm.c index 2ec5da9ee248..dccef9fdc1f2 100644 --- a/trunk/drivers/mtd/devices/elm.c +++ b/trunk/drivers/mtd/devices/elm.c @@ -81,14 +81,21 @@ static u32 elm_read_reg(struct elm_info *info, int offset) * @dev: ELM device * @bch_type: Type of BCH ecc */ -void elm_config(struct device *dev, enum bch_ecc bch_type) +int elm_config(struct device *dev, enum bch_ecc bch_type) { u32 reg_val; struct elm_info *info = dev_get_drvdata(dev); + if (!info) { + dev_err(dev, "Unable to configure elm - device not probed?\n"); + return -ENODEV; + } + reg_val = (bch_type & ECC_BCH_LEVEL_MASK) | (ELM_ECC_SIZE << 16); elm_write_reg(info, ELM_LOCATION_CONFIG, reg_val); info->bch_type = bch_type; + + return 0; } EXPORT_SYMBOL(elm_config); diff --git a/trunk/drivers/mtd/devices/m25p80.c b/trunk/drivers/mtd/devices/m25p80.c index 5b6b0728be21..2f3d2a5ff349 100644 --- a/trunk/drivers/mtd/devices/m25p80.c +++ b/trunk/drivers/mtd/devices/m25p80.c @@ -681,6 +681,7 @@ struct flash_info { u16 flags; #define SECT_4K 0x01 /* OPCODE_BE_4K works uniformly */ #define M25P_NO_ERASE 0x02 /* No erase command needed */ +#define SST_WRITE 0x04 /* use SST byte programming */ }; #define INFO(_jedec_id, _ext_id, _sector_size, _n_sectors, _flags) \ @@ -728,6 +729,7 @@ static const struct spi_device_id m25p_ids[] = { { "en25q32b", INFO(0x1c3016, 0, 64 * 1024, 64, 0) }, { "en25p64", INFO(0x1c2017, 0, 64 * 1024, 128, 0) }, { "en25q64", INFO(0x1c3017, 0, 64 * 1024, 128, SECT_4K) }, + { "en25qh256", INFO(0x1c7019, 0, 64 * 1024, 512, 0) }, /* Everspin */ { "mr25h256", CAT25_INFO( 32 * 1024, 1, 256, 2) }, @@ -740,7 +742,6 @@ static const struct spi_device_id m25p_ids[] = { { "160s33b", INFO(0x898911, 0, 64 * 1024, 32, 0) }, { "320s33b", INFO(0x898912, 0, 64 * 1024, 64, 0) }, { "640s33b", INFO(0x898913, 0, 64 * 1024, 128, 0) }, - { "n25q064", INFO(0x20ba17, 0, 64 * 1024, 128, 0) }, /* Macronix */ { "mx25l2005a", INFO(0xc22012, 0, 64 * 1024, 4, SECT_4K) }, @@ -753,8 +754,10 @@ static const struct spi_device_id m25p_ids[] = { { "mx25l12855e", INFO(0xc22618, 0, 64 * 1024, 256, 0) }, { "mx25l25635e", INFO(0xc22019, 0, 64 * 1024, 512, 0) }, { "mx25l25655e", INFO(0xc22619, 0, 64 * 1024, 512, 0) }, + { "mx66l51235l", INFO(0xc2201a, 0, 64 * 1024, 1024, 0) }, /* Micron */ + { "n25q064", INFO(0x20ba17, 0, 64 * 1024, 128, 0) }, { "n25q128a11", INFO(0x20bb18, 0, 64 * 1024, 256, 0) }, { "n25q128a13", INFO(0x20ba18, 0, 64 * 1024, 256, 0) }, { "n25q256a", INFO(0x20ba19, 0, 64 * 1024, 512, SECT_4K) }, @@ -781,14 +784,15 @@ static const struct spi_device_id m25p_ids[] = { { "s25fl064k", INFO(0xef4017, 0, 64 * 1024, 128, SECT_4K) }, /* SST -- large erase sizes are "overlays", "sectors" are 4K */ - { "sst25vf040b", INFO(0xbf258d, 0, 64 * 1024, 8, SECT_4K) }, - { "sst25vf080b", INFO(0xbf258e, 0, 64 * 1024, 16, SECT_4K) }, - { "sst25vf016b", INFO(0xbf2541, 0, 64 * 1024, 32, SECT_4K) }, - { "sst25vf032b", INFO(0xbf254a, 0, 64 * 1024, 64, SECT_4K) }, - { "sst25wf512", INFO(0xbf2501, 0, 64 * 1024, 1, SECT_4K) }, - { "sst25wf010", INFO(0xbf2502, 0, 64 * 1024, 2, SECT_4K) }, - { "sst25wf020", INFO(0xbf2503, 0, 64 * 1024, 4, SECT_4K) }, - { "sst25wf040", INFO(0xbf2504, 0, 64 * 1024, 8, SECT_4K) }, + { "sst25vf040b", INFO(0xbf258d, 0, 64 * 1024, 8, SECT_4K | SST_WRITE) }, + { "sst25vf080b", INFO(0xbf258e, 0, 64 * 1024, 16, SECT_4K | SST_WRITE) }, + { "sst25vf016b", INFO(0xbf2541, 0, 64 * 1024, 32, SECT_4K | SST_WRITE) }, + { "sst25vf032b", INFO(0xbf254a, 0, 64 * 1024, 64, SECT_4K | SST_WRITE) }, + { "sst25vf064c", INFO(0xbf254b, 0, 64 * 1024, 128, SECT_4K) }, + { "sst25wf512", INFO(0xbf2501, 0, 64 * 1024, 1, SECT_4K | SST_WRITE) }, + { "sst25wf010", INFO(0xbf2502, 0, 64 * 1024, 2, SECT_4K | SST_WRITE) }, + { "sst25wf020", INFO(0xbf2503, 0, 64 * 1024, 4, SECT_4K | SST_WRITE) }, + { "sst25wf040", INFO(0xbf2504, 0, 64 * 1024, 8, SECT_4K | SST_WRITE) }, /* ST Microelectronics -- newer production may have feature updates */ { "m25p05", INFO(0x202010, 0, 32 * 1024, 2, 0) }, @@ -838,6 +842,7 @@ static const struct spi_device_id m25p_ids[] = { { "w25q64", INFO(0xef4017, 0, 64 * 1024, 128, SECT_4K) }, { "w25q80", INFO(0xef5014, 0, 64 * 1024, 16, SECT_4K) }, { "w25q80bl", INFO(0xef4014, 0, 64 * 1024, 16, SECT_4K) }, + { "w25q128", INFO(0xef4018, 0, 64 * 1024, 256, SECT_4K) }, { "w25q256", INFO(0xef4019, 0, 64 * 1024, 512, SECT_4K) }, /* Catalyst / On Semiconductor -- non-JEDEC */ @@ -1000,7 +1005,7 @@ static int m25p_probe(struct spi_device *spi) } /* sst flash chips use AAI word program */ - if (JEDEC_MFR(info->jedec_id) == CFI_MFR_SST) + if (info->flags & SST_WRITE) flash->mtd._write = sst_write; else flash->mtd._write = m25p80_write; diff --git a/trunk/drivers/mtd/devices/mtd_dataflash.c b/trunk/drivers/mtd/devices/mtd_dataflash.c index 945c9f762349..28779b6dfcd9 100644 --- a/trunk/drivers/mtd/devices/mtd_dataflash.c +++ b/trunk/drivers/mtd/devices/mtd_dataflash.c @@ -105,8 +105,6 @@ static const struct of_device_id dataflash_dt_ids[] = { { .compatible = "atmel,dataflash", }, { /* sentinel */ } }; -#else -#define dataflash_dt_ids NULL #endif /* ......................................................................... */ @@ -914,7 +912,7 @@ static struct spi_driver dataflash_driver = { .driver = { .name = "mtd_dataflash", .owner = THIS_MODULE, - .of_match_table = dataflash_dt_ids, + .of_match_table = of_match_ptr(dataflash_dt_ids), }, .probe = dataflash_probe, diff --git a/trunk/drivers/mtd/maps/Kconfig b/trunk/drivers/mtd/maps/Kconfig index 3ed17c4d4358..bed9d58d5741 100644 --- a/trunk/drivers/mtd/maps/Kconfig +++ b/trunk/drivers/mtd/maps/Kconfig @@ -249,22 +249,6 @@ config MTD_LANTIQ help Support for NOR flash attached to the Lantiq SoC's External Bus Unit. -config MTD_DILNETPC - tristate "CFI Flash device mapped on DIL/Net PC" - depends on X86 && MTD_CFI_INTELEXT && BROKEN - help - MTD map driver for SSV DIL/Net PC Boards "DNP" and "ADNP". - For details, see - and - -config MTD_DILNETPC_BOOTSIZE - hex "Size of DIL/Net PC flash boot partition" - depends on MTD_DILNETPC - default "0x80000" - help - The amount of space taken up by the kernel or Etherboot - on the DIL/Net PC flash chips. - config MTD_L440GX tristate "BIOS flash chip on Intel L440GX boards" depends on X86 && MTD_JEDECPROBE @@ -274,42 +258,6 @@ config MTD_L440GX BE VERY CAREFUL. -config MTD_TQM8XXL - tristate "CFI Flash device mapped on TQM8XXL" - depends on MTD_CFI && TQM8xxL - help - The TQM8xxL PowerPC board has up to two banks of CFI-compliant - chips, currently uses AMD one. This 'mapping' driver supports - that arrangement, allowing the CFI probe and command set driver - code to communicate with the chips on the TQM8xxL board. More at - . - -config MTD_RPXLITE - tristate "CFI Flash device mapped on RPX Lite or CLLF" - depends on MTD_CFI && (RPXCLASSIC || RPXLITE) - help - The RPXLite PowerPC board has CFI-compliant chips mapped in - a strange sparse mapping. This 'mapping' driver supports that - arrangement, allowing the CFI probe and command set driver code - to communicate with the chips on the RPXLite board. More at - . - -config MTD_MBX860 - tristate "System flash on MBX860 board" - depends on MTD_CFI && MBX - help - This enables access routines for the flash chips on the Motorola - MBX860 board. If you have one of these boards and would like - to use the flash chips on it, say 'Y'. - -config MTD_DBOX2 - tristate "CFI Flash device mapped on D-Box2" - depends on DBOX2 && MTD_CFI_INTELSTD && MTD_CFI_INTELEXT && MTD_CFI_AMDSTD - help - This enables access routines for the flash chips on the Nokia/Sagem - D-Box 2 board. If you have one of these boards and would like to use - the flash chips on it, say 'Y'. - config MTD_CFI_FLAGADM tristate "CFI Flash device mapping on FlagaDM" depends on 8xx && MTD_CFI @@ -349,15 +297,6 @@ config MTD_IXP4XX IXDP425 and Coyote. If you have an IXP4xx based board and would like to use the flash chips on it, say 'Y'. -config MTD_IXP2000 - tristate "CFI Flash device mapped on Intel IXP2000 based systems" - depends on MTD_CFI && MTD_COMPLEX_MAPPINGS && ARCH_IXP2000 - help - This enables MTD access to flash devices on platforms based - on Intel's IXP2000 family of network processors. If you have an - IXP2000 based board and would like to use the flash chips on it, - say 'Y'. - config MTD_AUTCPU12 bool "NV-RAM mapping AUTCPU12 board" depends on ARCH_AUTCPU12 @@ -372,13 +311,6 @@ config MTD_IMPA7 This enables access to the NOR Flash on the impA7 board of implementa GmbH. If you have such a board, say 'Y' here. -config MTD_H720X - tristate "Hynix evaluation board mappings" - depends on MTD_CFI && ( ARCH_H7201 || ARCH_H7202 ) - help - This enables access to the flash chips on the Hynix evaluation boards. - If you have such a board, say 'Y'. - # This needs CFI or JEDEC, depending on the cards found. config MTD_PCI tristate "PCI MTD driver" @@ -419,7 +351,7 @@ config MTD_BFIN_ASYNC config MTD_GPIO_ADDR tristate "GPIO-assisted Flash Chip Support" - depends on GENERIC_GPIO || GPIOLIB + depends on GPIOLIB depends on MTD_COMPLEX_MAPPINGS help Map driver which allows flashes to be partially physically addressed @@ -433,15 +365,6 @@ config MTD_UCLINUX help Map driver to support image based filesystems for uClinux. -config MTD_DMV182 - tristate "Map driver for Dy-4 SVME/DMV-182 board." - depends on DMV182 - select MTD_MAP_BANK_WIDTH_32 - select MTD_CFI_I8 - select MTD_CFI_AMDSTD - help - Map driver for Dy-4 SVME/DMV-182 board. - config MTD_INTEL_VR_NOR tristate "NOR flash on Intel Vermilion Range Expansion Bus CS0" depends on PCI diff --git a/trunk/drivers/mtd/maps/Makefile b/trunk/drivers/mtd/maps/Makefile index 4ded28711bc1..395a12444048 100644 --- a/trunk/drivers/mtd/maps/Makefile +++ b/trunk/drivers/mtd/maps/Makefile @@ -9,7 +9,6 @@ endif # Chip mappings obj-$(CONFIG_MTD_CFI_FLAGADM) += cfi_flagadm.o obj-$(CONFIG_MTD_DC21285) += dc21285.o -obj-$(CONFIG_MTD_DILNETPC) += dilnetpc.o obj-$(CONFIG_MTD_L440GX) += l440gx.o obj-$(CONFIG_MTD_AMD76XROM) += amd76xrom.o obj-$(CONFIG_MTD_ESB2ROM) += esb2rom.o @@ -17,15 +16,12 @@ obj-$(CONFIG_MTD_ICHXROM) += ichxrom.o obj-$(CONFIG_MTD_CK804XROM) += ck804xrom.o obj-$(CONFIG_MTD_TSUNAMI) += tsunami_flash.o obj-$(CONFIG_MTD_PXA2XX) += pxa2xx-flash.o -obj-$(CONFIG_MTD_MBX860) += mbx860.o obj-$(CONFIG_MTD_OCTAGON) += octagon-5066.o obj-$(CONFIG_MTD_PHYSMAP) += physmap.o obj-$(CONFIG_MTD_PHYSMAP_OF) += physmap_of.o obj-$(CONFIG_MTD_PISMO) += pismo.o obj-$(CONFIG_MTD_PMC_MSP_EVM) += pmcmsp-flash.o obj-$(CONFIG_MTD_PCMCIA) += pcmciamtd.o -obj-$(CONFIG_MTD_RPXLITE) += rpxlite.o -obj-$(CONFIG_MTD_TQM8XXL) += tqm8xxl.o obj-$(CONFIG_MTD_SA1100) += sa1100-flash.o obj-$(CONFIG_MTD_SBC_GXX) += sbc_gxx.o obj-$(CONFIG_MTD_SC520CDP) += sc520cdp.o @@ -34,7 +30,6 @@ obj-$(CONFIG_MTD_TS5500) += ts5500_flash.o obj-$(CONFIG_MTD_SUN_UFLASH) += sun_uflash.o obj-$(CONFIG_MTD_VMAX) += vmax301.o obj-$(CONFIG_MTD_SCx200_DOCFLASH)+= scx200_docflash.o -obj-$(CONFIG_MTD_DBOX2) += dbox2-flash.o obj-$(CONFIG_MTD_SOLUTIONENGINE)+= solutionengine.o obj-$(CONFIG_MTD_PCI) += pci.o obj-$(CONFIG_MTD_AUTCPU12) += autcpu12-nvram.o @@ -42,10 +37,7 @@ obj-$(CONFIG_MTD_IMPA7) += impa7.o obj-$(CONFIG_MTD_UCLINUX) += uclinux.o obj-$(CONFIG_MTD_NETtel) += nettel.o obj-$(CONFIG_MTD_SCB2_FLASH) += scb2_flash.o -obj-$(CONFIG_MTD_H720X) += h720x-flash.o obj-$(CONFIG_MTD_IXP4XX) += ixp4xx.o -obj-$(CONFIG_MTD_IXP2000) += ixp2000.o -obj-$(CONFIG_MTD_DMV182) += dmv182.o obj-$(CONFIG_MTD_PLATRAM) += plat-ram.o obj-$(CONFIG_MTD_INTEL_VR_NOR) += intel_vr_nor.o obj-$(CONFIG_MTD_BFIN_ASYNC) += bfin-async-flash.o diff --git a/trunk/drivers/mtd/maps/bfin-async-flash.c b/trunk/drivers/mtd/maps/bfin-async-flash.c index f833edfaab79..319b04a6c9d1 100644 --- a/trunk/drivers/mtd/maps/bfin-async-flash.c +++ b/trunk/drivers/mtd/maps/bfin-async-flash.c @@ -122,7 +122,8 @@ static void bfin_flash_copy_to(struct map_info *map, unsigned long to, const voi switch_back(state); } -static const char *part_probe_types[] = { "cmdlinepart", "RedBoot", NULL }; +static const char * const part_probe_types[] = { + "cmdlinepart", "RedBoot", NULL }; static int bfin_flash_probe(struct platform_device *pdev) { diff --git a/trunk/drivers/mtd/maps/ck804xrom.c b/trunk/drivers/mtd/maps/ck804xrom.c index 586a1c77e48a..0455166f05fa 100644 --- a/trunk/drivers/mtd/maps/ck804xrom.c +++ b/trunk/drivers/mtd/maps/ck804xrom.c @@ -308,8 +308,7 @@ static int ck804xrom_init_one(struct pci_dev *pdev, out: /* Free any left over map structures */ - if (map) - kfree(map); + kfree(map); /* See if I have any map structures */ if (list_empty(&window->maps)) { diff --git a/trunk/drivers/mtd/maps/dbox2-flash.c b/trunk/drivers/mtd/maps/dbox2-flash.c deleted file mode 100644 index 85bdece6ab3f..000000000000 --- a/trunk/drivers/mtd/maps/dbox2-flash.c +++ /dev/null @@ -1,123 +0,0 @@ -/* - * D-Box 2 flash driver - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include - -/* partition_info gives details on the logical partitions that the split the - * single flash device into. If the size if zero we use up to the end of the - * device. */ -static struct mtd_partition partition_info[]= { - { - .name = "BR bootloader", - .size = 128 * 1024, - .offset = 0, - .mask_flags = MTD_WRITEABLE - }, - { - .name = "FLFS (U-Boot)", - .size = 128 * 1024, - .offset = MTDPART_OFS_APPEND, - .mask_flags = 0 - }, - { - .name = "Root (SquashFS)", - .size = 7040 * 1024, - .offset = MTDPART_OFS_APPEND, - .mask_flags = 0 - }, - { - .name = "var (JFFS2)", - .size = 896 * 1024, - .offset = MTDPART_OFS_APPEND, - .mask_flags = 0 - }, - { - .name = "Flash without bootloader", - .size = MTDPART_SIZ_FULL, - .offset = 128 * 1024, - .mask_flags = 0 - }, - { - .name = "Complete Flash", - .size = MTDPART_SIZ_FULL, - .offset = 0, - .mask_flags = MTD_WRITEABLE - } -}; - -#define NUM_PARTITIONS ARRAY_SIZE(partition_info) - -#define WINDOW_ADDR 0x10000000 -#define WINDOW_SIZE 0x800000 - -static struct mtd_info *mymtd; - - -struct map_info dbox2_flash_map = { - .name = "D-Box 2 flash memory", - .size = WINDOW_SIZE, - .bankwidth = 4, - .phys = WINDOW_ADDR, -}; - -static int __init init_dbox2_flash(void) -{ - printk(KERN_NOTICE "D-Box 2 flash driver (size->0x%X mem->0x%X)\n", WINDOW_SIZE, WINDOW_ADDR); - dbox2_flash_map.virt = ioremap(WINDOW_ADDR, WINDOW_SIZE); - - if (!dbox2_flash_map.virt) { - printk("Failed to ioremap\n"); - return -EIO; - } - simple_map_init(&dbox2_flash_map); - - // Probe for dual Intel 28F320 or dual AMD - mymtd = do_map_probe("cfi_probe", &dbox2_flash_map); - if (!mymtd) { - // Probe for single Intel 28F640 - dbox2_flash_map.bankwidth = 2; - - mymtd = do_map_probe("cfi_probe", &dbox2_flash_map); - } - - if (mymtd) { - mymtd->owner = THIS_MODULE; - - /* Create MTD devices for each partition. */ - mtd_device_register(mymtd, partition_info, NUM_PARTITIONS); - - return 0; - } - - iounmap((void *)dbox2_flash_map.virt); - return -ENXIO; -} - -static void __exit cleanup_dbox2_flash(void) -{ - if (mymtd) { - mtd_device_unregister(mymtd); - map_destroy(mymtd); - } - if (dbox2_flash_map.virt) { - iounmap((void *)dbox2_flash_map.virt); - dbox2_flash_map.virt = 0; - } -} - -module_init(init_dbox2_flash); -module_exit(cleanup_dbox2_flash); - - -MODULE_LICENSE("GPL"); -MODULE_AUTHOR("Kári Davíðsson , Bastian Blank , Alexander Wild "); -MODULE_DESCRIPTION("MTD map driver for D-Box 2 board"); diff --git a/trunk/drivers/mtd/maps/dc21285.c b/trunk/drivers/mtd/maps/dc21285.c index 080f06053bd4..f8a7dd14cee0 100644 --- a/trunk/drivers/mtd/maps/dc21285.c +++ b/trunk/drivers/mtd/maps/dc21285.c @@ -143,9 +143,8 @@ static struct map_info dc21285_map = { .copy_from = dc21285_copy_from, }; - /* Partition stuff */ -static const char *probes[] = { "RedBoot", "cmdlinepart", NULL }; +static const char * const probes[] = { "RedBoot", "cmdlinepart", NULL }; static int __init init_dc21285(void) { diff --git a/trunk/drivers/mtd/maps/dilnetpc.c b/trunk/drivers/mtd/maps/dilnetpc.c deleted file mode 100644 index 3e393f0da823..000000000000 --- a/trunk/drivers/mtd/maps/dilnetpc.c +++ /dev/null @@ -1,496 +0,0 @@ -/* dilnetpc.c -- MTD map driver for SSV DIL/Net PC Boards "DNP" and "ADNP" - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA - * - * The DIL/Net PC is a tiny embedded PC board made by SSV Embedded Systems - * featuring the AMD Elan SC410 processor. There are two variants of this - * board: DNP/1486 and ADNP/1486. The DNP version has 2 megs of flash - * ROM (Intel 28F016S3) and 8 megs of DRAM, the ADNP version has 4 megs - * flash and 16 megs of RAM. - * For details, see http://www.ssv-embedded.de/ssv/pc104/p169.htm - * and http://www.ssv-embedded.de/ssv/pc104/p170.htm - */ - -#include -#include -#include -#include -#include - -#include -#include -#include -#include - -#include - -/* -** The DIL/NetPC keeps its BIOS in two distinct flash blocks. -** Destroying any of these blocks transforms the DNPC into -** a paperweight (albeit not a very useful one, considering -** it only weighs a few grams). -** -** Therefore, the BIOS blocks must never be erased or written to -** except by people who know exactly what they are doing (e.g. -** to install a BIOS update). These partitions are marked read-only -** by default, but can be made read/write by undefining -** DNPC_BIOS_BLOCKS_WRITEPROTECTED: -*/ -#define DNPC_BIOS_BLOCKS_WRITEPROTECTED - -/* -** The ID string (in ROM) is checked to determine whether we -** are running on a DNP/1486 or ADNP/1486 -*/ -#define BIOSID_BASE 0x000fe100 - -#define ID_DNPC "DNP1486" -#define ID_ADNP "ADNP1486" - -/* -** Address where the flash should appear in CPU space -*/ -#define FLASH_BASE 0x2000000 - -/* -** Chip Setup and Control (CSC) indexed register space -*/ -#define CSC_INDEX 0x22 -#define CSC_DATA 0x23 - -#define CSC_MMSWAR 0x30 /* MMS window C-F attributes register */ -#define CSC_MMSWDSR 0x31 /* MMS window C-F device select register */ - -#define CSC_RBWR 0xa7 /* GPIO Read-Back/Write Register B */ - -#define CSC_CR 0xd0 /* internal I/O device disable/Echo */ - /* Z-bus/configuration register */ - -#define CSC_PCCMDCR 0xf1 /* PC card mode and DMA control register */ - - -/* -** PC Card indexed register space: -*/ - -#define PCC_INDEX 0x3e0 -#define PCC_DATA 0x3e1 - -#define PCC_AWER_B 0x46 /* Socket B Address Window enable register */ -#define PCC_MWSAR_1_Lo 0x58 /* memory window 1 start address low register */ -#define PCC_MWSAR_1_Hi 0x59 /* memory window 1 start address high register */ -#define PCC_MWEAR_1_Lo 0x5A /* memory window 1 stop address low register */ -#define PCC_MWEAR_1_Hi 0x5B /* memory window 1 stop address high register */ -#define PCC_MWAOR_1_Lo 0x5C /* memory window 1 address offset low register */ -#define PCC_MWAOR_1_Hi 0x5D /* memory window 1 address offset high register */ - - -/* -** Access to SC4x0's Chip Setup and Control (CSC) -** and PC Card (PCC) indexed registers: -*/ -static inline void setcsc(int reg, unsigned char data) -{ - outb(reg, CSC_INDEX); - outb(data, CSC_DATA); -} - -static inline unsigned char getcsc(int reg) -{ - outb(reg, CSC_INDEX); - return(inb(CSC_DATA)); -} - -static inline void setpcc(int reg, unsigned char data) -{ - outb(reg, PCC_INDEX); - outb(data, PCC_DATA); -} - -static inline unsigned char getpcc(int reg) -{ - outb(reg, PCC_INDEX); - return(inb(PCC_DATA)); -} - - -/* -************************************************************ -** Enable access to DIL/NetPC's flash by mapping it into -** the SC4x0's MMS Window C. -************************************************************ -*/ -static void dnpc_map_flash(unsigned long flash_base, unsigned long flash_size) -{ - unsigned long flash_end = flash_base + flash_size - 1; - - /* - ** enable setup of MMS windows C-F: - */ - /* - enable PC Card indexed register space */ - setcsc(CSC_CR, getcsc(CSC_CR) | 0x2); - /* - set PC Card controller to operate in standard mode */ - setcsc(CSC_PCCMDCR, getcsc(CSC_PCCMDCR) & ~1); - - /* - ** Program base address and end address of window - ** where the flash ROM should appear in CPU address space - */ - setpcc(PCC_MWSAR_1_Lo, (flash_base >> 12) & 0xff); - setpcc(PCC_MWSAR_1_Hi, (flash_base >> 20) & 0x3f); - setpcc(PCC_MWEAR_1_Lo, (flash_end >> 12) & 0xff); - setpcc(PCC_MWEAR_1_Hi, (flash_end >> 20) & 0x3f); - - /* program offset of first flash location to appear in this window (0) */ - setpcc(PCC_MWAOR_1_Lo, ((0 - flash_base) >> 12) & 0xff); - setpcc(PCC_MWAOR_1_Hi, ((0 - flash_base)>> 20) & 0x3f); - - /* set attributes for MMS window C: non-cacheable, write-enabled */ - setcsc(CSC_MMSWAR, getcsc(CSC_MMSWAR) & ~0x11); - - /* select physical device ROMCS0 (i.e. flash) for MMS Window C */ - setcsc(CSC_MMSWDSR, getcsc(CSC_MMSWDSR) & ~0x03); - - /* enable memory window 1 */ - setpcc(PCC_AWER_B, getpcc(PCC_AWER_B) | 0x02); - - /* now disable PC Card indexed register space again */ - setcsc(CSC_CR, getcsc(CSC_CR) & ~0x2); -} - - -/* -************************************************************ -** Disable access to DIL/NetPC's flash by mapping it into -** the SC4x0's MMS Window C. -************************************************************ -*/ -static void dnpc_unmap_flash(void) -{ - /* - enable PC Card indexed register space */ - setcsc(CSC_CR, getcsc(CSC_CR) | 0x2); - - /* disable memory window 1 */ - setpcc(PCC_AWER_B, getpcc(PCC_AWER_B) & ~0x02); - - /* now disable PC Card indexed register space again */ - setcsc(CSC_CR, getcsc(CSC_CR) & ~0x2); -} - - - -/* -************************************************************ -** Enable/Disable VPP to write to flash -************************************************************ -*/ - -static DEFINE_SPINLOCK(dnpc_spin); -static int vpp_counter = 0; -/* -** This is what has to be done for the DNP board .. -*/ -static void dnp_set_vpp(struct map_info *not_used, int on) -{ - spin_lock_irq(&dnpc_spin); - - if (on) - { - if(++vpp_counter == 1) - setcsc(CSC_RBWR, getcsc(CSC_RBWR) & ~0x4); - } - else - { - if(--vpp_counter == 0) - setcsc(CSC_RBWR, getcsc(CSC_RBWR) | 0x4); - else - BUG_ON(vpp_counter < 0); - } - spin_unlock_irq(&dnpc_spin); -} - -/* -** .. and this the ADNP version: -*/ -static void adnp_set_vpp(struct map_info *not_used, int on) -{ - spin_lock_irq(&dnpc_spin); - - if (on) - { - if(++vpp_counter == 1) - setcsc(CSC_RBWR, getcsc(CSC_RBWR) & ~0x8); - } - else - { - if(--vpp_counter == 0) - setcsc(CSC_RBWR, getcsc(CSC_RBWR) | 0x8); - else - BUG_ON(vpp_counter < 0); - } - spin_unlock_irq(&dnpc_spin); -} - - - -#define DNP_WINDOW_SIZE 0x00200000 /* DNP flash size is 2MiB */ -#define ADNP_WINDOW_SIZE 0x00400000 /* ADNP flash size is 4MiB */ -#define WINDOW_ADDR FLASH_BASE - -static struct map_info dnpc_map = { - .name = "ADNP Flash Bank", - .size = ADNP_WINDOW_SIZE, - .bankwidth = 1, - .set_vpp = adnp_set_vpp, - .phys = WINDOW_ADDR -}; - -/* -** The layout of the flash is somewhat "strange": -** -** 1. 960 KiB (15 blocks) : Space for ROM Bootloader and user data -** 2. 64 KiB (1 block) : System BIOS -** 3. 960 KiB (15 blocks) : User Data (DNP model) or -** 3. 3008 KiB (47 blocks) : User Data (ADNP model) -** 4. 64 KiB (1 block) : System BIOS Entry -*/ - -static struct mtd_partition partition_info[]= -{ - { - .name = "ADNP boot", - .offset = 0, - .size = 0xf0000, - }, - { - .name = "ADNP system BIOS", - .offset = MTDPART_OFS_NXTBLK, - .size = 0x10000, -#ifdef DNPC_BIOS_BLOCKS_WRITEPROTECTED - .mask_flags = MTD_WRITEABLE, -#endif - }, - { - .name = "ADNP file system", - .offset = MTDPART_OFS_NXTBLK, - .size = 0x2f0000, - }, - { - .name = "ADNP system BIOS entry", - .offset = MTDPART_OFS_NXTBLK, - .size = MTDPART_SIZ_FULL, -#ifdef DNPC_BIOS_BLOCKS_WRITEPROTECTED - .mask_flags = MTD_WRITEABLE, -#endif - }, -}; - -#define NUM_PARTITIONS ARRAY_SIZE(partition_info) - -static struct mtd_info *mymtd; -static struct mtd_info *lowlvl_parts[NUM_PARTITIONS]; -static struct mtd_info *merged_mtd; - -/* -** "Highlevel" partition info: -** -** Using the MTD concat layer, we can re-arrange partitions to our -** liking: we construct a virtual MTD device by concatenating the -** partitions, specifying the sequence such that the boot block -** is immediately followed by the filesystem block (i.e. the stupid -** system BIOS block is mapped to a different place). When re-partitioning -** this concatenated MTD device, we can set the boot block size to -** an arbitrary (though erase block aligned) value i.e. not one that -** is dictated by the flash's physical layout. We can thus set the -** boot block to be e.g. 64 KB (which is fully sufficient if we want -** to boot an etherboot image) or to -say- 1.5 MB if we want to boot -** a large kernel image. In all cases, the remainder of the flash -** is available as file system space. -*/ - -static struct mtd_partition higlvl_partition_info[]= -{ - { - .name = "ADNP boot block", - .offset = 0, - .size = CONFIG_MTD_DILNETPC_BOOTSIZE, - }, - { - .name = "ADNP file system space", - .offset = MTDPART_OFS_NXTBLK, - .size = ADNP_WINDOW_SIZE-CONFIG_MTD_DILNETPC_BOOTSIZE-0x20000, - }, - { - .name = "ADNP system BIOS + BIOS Entry", - .offset = MTDPART_OFS_NXTBLK, - .size = MTDPART_SIZ_FULL, -#ifdef DNPC_BIOS_BLOCKS_WRITEPROTECTED - .mask_flags = MTD_WRITEABLE, -#endif - }, -}; - -#define NUM_HIGHLVL_PARTITIONS ARRAY_SIZE(higlvl_partition_info) - - -static int dnp_adnp_probe(void) -{ - char *biosid, rc = -1; - - biosid = (char*)ioremap(BIOSID_BASE, 16); - if(biosid) - { - if(!strcmp(biosid, ID_DNPC)) - rc = 1; /* this is a DNPC */ - else if(!strcmp(biosid, ID_ADNP)) - rc = 0; /* this is a ADNPC */ - } - iounmap((void *)biosid); - return(rc); -} - - -static int __init init_dnpc(void) -{ - int is_dnp; - - /* - ** determine hardware (DNP/ADNP/invalid) - */ - if((is_dnp = dnp_adnp_probe()) < 0) - return -ENXIO; - - /* - ** Things are set up for ADNP by default - ** -> modify all that needs to be different for DNP - */ - if(is_dnp) - { /* - ** Adjust window size, select correct set_vpp function. - ** The partitioning scheme is identical on both DNP - ** and ADNP except for the size of the third partition. - */ - int i; - dnpc_map.size = DNP_WINDOW_SIZE; - dnpc_map.set_vpp = dnp_set_vpp; - partition_info[2].size = 0xf0000; - - /* - ** increment all string pointers so the leading 'A' gets skipped, - ** thus turning all occurrences of "ADNP ..." into "DNP ..." - */ - ++dnpc_map.name; - for(i = 0; i < NUM_PARTITIONS; i++) - ++partition_info[i].name; - higlvl_partition_info[1].size = DNP_WINDOW_SIZE - - CONFIG_MTD_DILNETPC_BOOTSIZE - 0x20000; - for(i = 0; i < NUM_HIGHLVL_PARTITIONS; i++) - ++higlvl_partition_info[i].name; - } - - printk(KERN_NOTICE "DIL/Net %s flash: 0x%lx at 0x%llx\n", - is_dnp ? "DNPC" : "ADNP", dnpc_map.size, (unsigned long long)dnpc_map.phys); - - dnpc_map.virt = ioremap_nocache(dnpc_map.phys, dnpc_map.size); - - dnpc_map_flash(dnpc_map.phys, dnpc_map.size); - - if (!dnpc_map.virt) { - printk("Failed to ioremap_nocache\n"); - return -EIO; - } - simple_map_init(&dnpc_map); - - printk("FLASH virtual address: 0x%p\n", dnpc_map.virt); - - mymtd = do_map_probe("jedec_probe", &dnpc_map); - - if (!mymtd) - mymtd = do_map_probe("cfi_probe", &dnpc_map); - - /* - ** If flash probes fail, try to make flashes accessible - ** at least as ROM. Ajust erasesize in this case since - ** the default one (128M) will break our partitioning - */ - if (!mymtd) - if((mymtd = do_map_probe("map_rom", &dnpc_map))) - mymtd->erasesize = 0x10000; - - if (!mymtd) { - iounmap(dnpc_map.virt); - return -ENXIO; - } - - mymtd->owner = THIS_MODULE; - - /* - ** Supply pointers to lowlvl_parts[] array to add_mtd_partitions() - ** -> add_mtd_partitions() will _not_ register MTD devices for - ** the partitions, but will instead store pointers to the MTD - ** objects it creates into our lowlvl_parts[] array. - ** NOTE: we arrange the pointers such that the sequence of the - ** partitions gets re-arranged: partition #2 follows - ** partition #0. - */ - partition_info[0].mtdp = &lowlvl_parts[0]; - partition_info[1].mtdp = &lowlvl_parts[2]; - partition_info[2].mtdp = &lowlvl_parts[1]; - partition_info[3].mtdp = &lowlvl_parts[3]; - - mtd_device_register(mymtd, partition_info, NUM_PARTITIONS); - - /* - ** now create a virtual MTD device by concatenating the for partitions - ** (in the sequence given by the lowlvl_parts[] array. - */ - merged_mtd = mtd_concat_create(lowlvl_parts, NUM_PARTITIONS, "(A)DNP Flash Concatenated"); - if(merged_mtd) - { /* - ** now partition the new device the way we want it. This time, - ** we do not supply mtd pointers in higlvl_partition_info, so - ** add_mtd_partitions() will register the devices. - */ - mtd_device_register(merged_mtd, higlvl_partition_info, - NUM_HIGHLVL_PARTITIONS); - } - - return 0; -} - -static void __exit cleanup_dnpc(void) -{ - if(merged_mtd) { - mtd_device_unregister(merged_mtd); - mtd_concat_destroy(merged_mtd); - } - - if (mymtd) { - mtd_device_unregister(mymtd); - map_destroy(mymtd); - } - if (dnpc_map.virt) { - iounmap(dnpc_map.virt); - dnpc_unmap_flash(); - dnpc_map.virt = NULL; - } -} - -module_init(init_dnpc); -module_exit(cleanup_dnpc); - -MODULE_LICENSE("GPL"); -MODULE_AUTHOR("Sysgo Real-Time Solutions GmbH"); -MODULE_DESCRIPTION("MTD map driver for SSV DIL/NetPC DNP & ADNP"); diff --git a/trunk/drivers/mtd/maps/dmv182.c b/trunk/drivers/mtd/maps/dmv182.c deleted file mode 100644 index 6538ac675e00..000000000000 --- a/trunk/drivers/mtd/maps/dmv182.c +++ /dev/null @@ -1,146 +0,0 @@ - -/* - * drivers/mtd/maps/dmv182.c - * - * Flash map driver for the Dy4 SVME182 board - * - * Copyright 2003-2004, TimeSys Corporation - * - * Based on the SVME181 flash map, by Tom Nelson, Dot4, Inc. for TimeSys Corp. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License as published by the - * Free Software Foundation; either version 2 of the License, or (at your - * option) any later version. - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include - -/* - * This driver currently handles only the 16MiB user flash bank 1 on the - * board. It does not provide access to bank 0 (contains the Dy4 FFW), bank 2 - * (VxWorks boot), or the optional 48MiB expansion flash. - * - * scott.wood@timesys.com: On the newer boards with 128MiB flash, it - * now supports the first 96MiB (the boot flash bank containing FFW - * is excluded). The VxWorks loader is in partition 1. - */ - -#define FLASH_BASE_ADDR 0xf0000000 -#define FLASH_BANK_SIZE (128*1024*1024) - -MODULE_AUTHOR("Scott Wood, TimeSys Corporation "); -MODULE_DESCRIPTION("User-programmable flash device on the Dy4 SVME182 board"); -MODULE_LICENSE("GPL"); - -static struct map_info svme182_map = { - .name = "Dy4 SVME182", - .bankwidth = 32, - .size = 128 * 1024 * 1024 -}; - -#define BOOTIMAGE_PART_SIZE ((6*1024*1024)-RESERVED_PART_SIZE) - -// Allow 6MiB for the kernel -#define NEW_BOOTIMAGE_PART_SIZE (6 * 1024 * 1024) -// Allow 1MiB for the bootloader -#define NEW_BOOTLOADER_PART_SIZE (1024 * 1024) -// Use the remaining 9MiB at the end of flash for the RFS -#define NEW_RFS_PART_SIZE (0x01000000 - NEW_BOOTLOADER_PART_SIZE - \ - NEW_BOOTIMAGE_PART_SIZE) - -static struct mtd_partition svme182_partitions[] = { - // The Lower PABS is only 128KiB, but the partition code doesn't - // like partitions that don't end on the largest erase block - // size of the device, even if all of the erase blocks in the - // partition are small ones. The hardware should prevent - // writes to the actual PABS areas. - { - name: "Lower PABS and CPU 0 bootloader or kernel", - size: 6*1024*1024, - offset: 0, - }, - { - name: "Root Filesystem", - size: 10*1024*1024, - offset: MTDPART_OFS_NXTBLK - }, - { - name: "CPU1 Bootloader", - size: 1024*1024, - offset: MTDPART_OFS_NXTBLK, - }, - { - name: "Extra", - size: 110*1024*1024, - offset: MTDPART_OFS_NXTBLK - }, - { - name: "Foundation Firmware and Upper PABS", - size: 1024*1024, - offset: MTDPART_OFS_NXTBLK, - mask_flags: MTD_WRITEABLE // read-only - } -}; - -static struct mtd_info *this_mtd; - -static int __init init_svme182(void) -{ - struct mtd_partition *partitions; - int num_parts = ARRAY_SIZE(svme182_partitions); - - partitions = svme182_partitions; - - svme182_map.virt = ioremap(FLASH_BASE_ADDR, svme182_map.size); - - if (svme182_map.virt == 0) { - printk("Failed to ioremap FLASH memory area.\n"); - return -EIO; - } - - simple_map_init(&svme182_map); - - this_mtd = do_map_probe("cfi_probe", &svme182_map); - if (!this_mtd) - { - iounmap((void *)svme182_map.virt); - return -ENXIO; - } - - printk(KERN_NOTICE "SVME182 flash device: %dMiB at 0x%08x\n", - this_mtd->size >> 20, FLASH_BASE_ADDR); - - this_mtd->owner = THIS_MODULE; - mtd_device_register(this_mtd, partitions, num_parts); - - return 0; -} - -static void __exit cleanup_svme182(void) -{ - if (this_mtd) - { - mtd_device_unregister(this_mtd); - map_destroy(this_mtd); - } - - if (svme182_map.virt) - { - iounmap((void *)svme182_map.virt); - svme182_map.virt = 0; - } - - return; -} - -module_init(init_svme182); -module_exit(cleanup_svme182); diff --git a/trunk/drivers/mtd/maps/gpio-addr-flash.c b/trunk/drivers/mtd/maps/gpio-addr-flash.c index 7b643de2500b..5ede28294f9e 100644 --- a/trunk/drivers/mtd/maps/gpio-addr-flash.c +++ b/trunk/drivers/mtd/maps/gpio-addr-flash.c @@ -157,7 +157,8 @@ static void gf_copy_to(struct map_info *map, unsigned long to, memcpy_toio(map->virt + (to % state->win_size), from, len); } -static const char *part_probe_types[] = { "cmdlinepart", "RedBoot", NULL }; +static const char * const part_probe_types[] = { + "cmdlinepart", "RedBoot", NULL }; /** * gpio_flash_probe() - setup a mapping for a GPIO assisted flash diff --git a/trunk/drivers/mtd/maps/h720x-flash.c b/trunk/drivers/mtd/maps/h720x-flash.c deleted file mode 100644 index 8ed6cb4529d8..000000000000 --- a/trunk/drivers/mtd/maps/h720x-flash.c +++ /dev/null @@ -1,120 +0,0 @@ -/* - * Flash memory access on Hynix GMS30C7201/HMS30C7202 based - * evaluation boards - * - * (C) 2002 Jungjun Kim - * 2003 Thomas Gleixner - */ - -#include -#include -#include -#include -#include -#include - -#include -#include -#include -#include -#include - -static struct mtd_info *mymtd; - -static struct map_info h720x_map = { - .name = "H720X", - .bankwidth = 4, - .size = H720X_FLASH_SIZE, - .phys = H720X_FLASH_PHYS, -}; - -static struct mtd_partition h720x_partitions[] = { - { - .name = "ArMon", - .size = 0x00080000, - .offset = 0, - .mask_flags = MTD_WRITEABLE - },{ - .name = "Env", - .size = 0x00040000, - .offset = 0x00080000, - .mask_flags = MTD_WRITEABLE - },{ - .name = "Kernel", - .size = 0x00180000, - .offset = 0x000c0000, - .mask_flags = MTD_WRITEABLE - },{ - .name = "Ramdisk", - .size = 0x00400000, - .offset = 0x00240000, - .mask_flags = MTD_WRITEABLE - },{ - .name = "jffs2", - .size = MTDPART_SIZ_FULL, - .offset = MTDPART_OFS_APPEND - } -}; - -#define NUM_PARTITIONS ARRAY_SIZE(h720x_partitions) - -/* - * Initialize FLASH support - */ -static int __init h720x_mtd_init(void) -{ - h720x_map.virt = ioremap(h720x_map.phys, h720x_map.size); - - if (!h720x_map.virt) { - printk(KERN_ERR "H720x-MTD: ioremap failed\n"); - return -EIO; - } - - simple_map_init(&h720x_map); - - // Probe for flash bankwidth 4 - printk (KERN_INFO "H720x-MTD probing 32bit FLASH\n"); - mymtd = do_map_probe("cfi_probe", &h720x_map); - if (!mymtd) { - printk (KERN_INFO "H720x-MTD probing 16bit FLASH\n"); - // Probe for bankwidth 2 - h720x_map.bankwidth = 2; - mymtd = do_map_probe("cfi_probe", &h720x_map); - } - - if (mymtd) { - mymtd->owner = THIS_MODULE; - - mtd_device_parse_register(mymtd, NULL, NULL, - h720x_partitions, NUM_PARTITIONS); - return 0; - } - - iounmap((void *)h720x_map.virt); - return -ENXIO; -} - -/* - * Cleanup - */ -static void __exit h720x_mtd_cleanup(void) -{ - - if (mymtd) { - mtd_device_unregister(mymtd); - map_destroy(mymtd); - } - - if (h720x_map.virt) { - iounmap((void *)h720x_map.virt); - h720x_map.virt = 0; - } -} - - -module_init(h720x_mtd_init); -module_exit(h720x_mtd_cleanup); - -MODULE_LICENSE("GPL"); -MODULE_AUTHOR("Thomas Gleixner "); -MODULE_DESCRIPTION("MTD map driver for Hynix evaluation boards"); diff --git a/trunk/drivers/mtd/maps/impa7.c b/trunk/drivers/mtd/maps/impa7.c index 834a06c56f56..49686744d93c 100644 --- a/trunk/drivers/mtd/maps/impa7.c +++ b/trunk/drivers/mtd/maps/impa7.c @@ -24,14 +24,12 @@ #define NUM_FLASHBANKS 2 #define BUSWIDTH 4 -/* can be { "cfi_probe", "jedec_probe", "map_rom", NULL } */ -#define PROBETYPES { "jedec_probe", NULL } - #define MSG_PREFIX "impA7:" /* prefix for our printk()'s */ #define MTDID "impa7-%d" /* for mtdparts= partitioning */ static struct mtd_info *impa7_mtd[NUM_FLASHBANKS]; +static const char * const rom_probe_types[] = { "jedec_probe", NULL }; static struct map_info impa7_map[NUM_FLASHBANKS] = { { @@ -60,8 +58,7 @@ static struct mtd_partition partitions[] = static int __init init_impa7(void) { - static const char *rom_probe_types[] = PROBETYPES; - const char **type; + const char * const *type; int i; static struct { u_long addr; u_long size; } pt[NUM_FLASHBANKS] = { { WINDOW_ADDR0, WINDOW_SIZE0 }, diff --git a/trunk/drivers/mtd/maps/intel_vr_nor.c b/trunk/drivers/mtd/maps/intel_vr_nor.c index b14053b25026..f581ac1cf022 100644 --- a/trunk/drivers/mtd/maps/intel_vr_nor.c +++ b/trunk/drivers/mtd/maps/intel_vr_nor.c @@ -82,9 +82,9 @@ static void vr_nor_destroy_mtd_setup(struct vr_nor_mtd *p) static int vr_nor_mtd_setup(struct vr_nor_mtd *p) { - static const char *probe_types[] = + static const char * const probe_types[] = { "cfi_probe", "jedec_probe", NULL }; - const char **type; + const char * const *type; for (type = probe_types; !p->info && *type; type++) p->info = do_map_probe(*type, &p->map); diff --git a/trunk/drivers/mtd/maps/ixp2000.c b/trunk/drivers/mtd/maps/ixp2000.c deleted file mode 100644 index 4a41ced0f710..000000000000 --- a/trunk/drivers/mtd/maps/ixp2000.c +++ /dev/null @@ -1,253 +0,0 @@ -/* - * drivers/mtd/maps/ixp2000.c - * - * Mapping for the Intel XScale IXP2000 based systems - * - * Copyright (C) 2002 Intel Corp. - * Copyright (C) 2003-2004 MontaVista Software, Inc. - * - * Original Author: Naeem M Afzal - * Maintainer: Deepak Saxena - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - * - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include -#include -#include - -#include -#include -#include - -#include - -struct ixp2000_flash_info { - struct mtd_info *mtd; - struct map_info map; - struct resource *res; -}; - -static inline unsigned long flash_bank_setup(struct map_info *map, unsigned long ofs) -{ - unsigned long (*set_bank)(unsigned long) = - (unsigned long(*)(unsigned long))map->map_priv_2; - - return (set_bank ? set_bank(ofs) : ofs); -} - -#ifdef __ARMEB__ -/* - * Rev A0 and A1 of IXP2400 silicon have a broken addressing unit which - * causes the lower address bits to be XORed with 0x11 on 8 bit accesses - * and XORed with 0x10 on 16 bit accesses. See the spec update, erratum 44. - */ -static int erratum44_workaround = 0; - -static inline unsigned long address_fix8_write(unsigned long addr) -{ - if (erratum44_workaround) { - return (addr ^ 3); - } - return addr; -} -#else - -#define address_fix8_write(x) (x) -#endif - -static map_word ixp2000_flash_read8(struct map_info *map, unsigned long ofs) -{ - map_word val; - - val.x[0] = *((u8 *)(map->map_priv_1 + flash_bank_setup(map, ofs))); - return val; -} - -/* - * We can't use the standard memcpy due to the broken SlowPort - * address translation on rev A0 and A1 silicon and the fact that - * we have banked flash. - */ -static void ixp2000_flash_copy_from(struct map_info *map, void *to, - unsigned long from, ssize_t len) -{ - from = flash_bank_setup(map, from); - while(len--) - *(__u8 *) to++ = *(__u8 *)(map->map_priv_1 + from++); -} - -static void ixp2000_flash_write8(struct map_info *map, map_word d, unsigned long ofs) -{ - *(__u8 *) (address_fix8_write(map->map_priv_1 + - flash_bank_setup(map, ofs))) = d.x[0]; -} - -static void ixp2000_flash_copy_to(struct map_info *map, unsigned long to, - const void *from, ssize_t len) -{ - to = flash_bank_setup(map, to); - while(len--) { - unsigned long tmp = address_fix8_write(map->map_priv_1 + to++); - *(__u8 *)(tmp) = *(__u8 *)(from++); - } -} - - -static int ixp2000_flash_remove(struct platform_device *dev) -{ - struct flash_platform_data *plat = dev->dev.platform_data; - struct ixp2000_flash_info *info = platform_get_drvdata(dev); - - platform_set_drvdata(dev, NULL); - - if(!info) - return 0; - - if (info->mtd) { - mtd_device_unregister(info->mtd); - map_destroy(info->mtd); - } - if (info->map.map_priv_1) - iounmap((void *) info->map.map_priv_1); - - if (info->res) { - release_resource(info->res); - kfree(info->res); - } - - if (plat->exit) - plat->exit(); - - return 0; -} - - -static int ixp2000_flash_probe(struct platform_device *dev) -{ - static const char *probes[] = { "RedBoot", "cmdlinepart", NULL }; - struct ixp2000_flash_data *ixp_data = dev->dev.platform_data; - struct flash_platform_data *plat; - struct ixp2000_flash_info *info; - unsigned long window_size; - int err = -1; - - if (!ixp_data) - return -ENODEV; - - plat = ixp_data->platform_data; - if (!plat) - return -ENODEV; - - window_size = resource_size(dev->resource); - dev_info(&dev->dev, "Probe of IXP2000 flash(%d banks x %dMiB)\n", - ixp_data->nr_banks, ((u32)window_size >> 20)); - - if (plat->width != 1) { - dev_err(&dev->dev, "IXP2000 MTD map only supports 8-bit mode, asking for %d\n", - plat->width * 8); - return -EIO; - } - - info = kzalloc(sizeof(struct ixp2000_flash_info), GFP_KERNEL); - if(!info) { - err = -ENOMEM; - goto Error; - } - - platform_set_drvdata(dev, info); - - /* - * Tell the MTD layer we're not 1:1 mapped so that it does - * not attempt to do a direct access on us. - */ - info->map.phys = NO_XIP; - - info->map.size = ixp_data->nr_banks * window_size; - info->map.bankwidth = 1; - - /* - * map_priv_2 is used to store a ptr to the bank_setup routine - */ - info->map.map_priv_2 = (unsigned long) ixp_data->bank_setup; - - info->map.name = dev_name(&dev->dev); - info->map.read = ixp2000_flash_read8; - info->map.write = ixp2000_flash_write8; - info->map.copy_from = ixp2000_flash_copy_from; - info->map.copy_to = ixp2000_flash_copy_to; - - info->res = request_mem_region(dev->resource->start, - resource_size(dev->resource), - dev_name(&dev->dev)); - if (!info->res) { - dev_err(&dev->dev, "Could not reserve memory region\n"); - err = -ENOMEM; - goto Error; - } - - info->map.map_priv_1 = - (unsigned long)ioremap(dev->resource->start, - resource_size(dev->resource)); - if (!info->map.map_priv_1) { - dev_err(&dev->dev, "Failed to ioremap flash region\n"); - err = -EIO; - goto Error; - } - -#if defined(__ARMEB__) - /* - * Enable erratum 44 workaround for NPUs with broken slowport - */ - - erratum44_workaround = ixp2000_has_broken_slowport(); - dev_info(&dev->dev, "Erratum 44 workaround %s\n", - erratum44_workaround ? "enabled" : "disabled"); -#endif - - info->mtd = do_map_probe(plat->map_name, &info->map); - if (!info->mtd) { - dev_err(&dev->dev, "map_probe failed\n"); - err = -ENXIO; - goto Error; - } - info->mtd->owner = THIS_MODULE; - - err = mtd_device_parse_register(info->mtd, probes, NULL, NULL, 0); - if (err) - goto Error; - - return 0; - -Error: - ixp2000_flash_remove(dev); - return err; -} - -static struct platform_driver ixp2000_flash_driver = { - .probe = ixp2000_flash_probe, - .remove = ixp2000_flash_remove, - .driver = { - .name = "IXP2000-Flash", - .owner = THIS_MODULE, - }, -}; - -module_platform_driver(ixp2000_flash_driver); - -MODULE_LICENSE("GPL"); -MODULE_AUTHOR("Deepak Saxena "); -MODULE_ALIAS("platform:IXP2000-Flash"); diff --git a/trunk/drivers/mtd/maps/ixp4xx.c b/trunk/drivers/mtd/maps/ixp4xx.c index e864fc6c58f9..52b3410a105c 100644 --- a/trunk/drivers/mtd/maps/ixp4xx.c +++ b/trunk/drivers/mtd/maps/ixp4xx.c @@ -148,7 +148,7 @@ struct ixp4xx_flash_info { struct resource *res; }; -static const char *probes[] = { "RedBoot", "cmdlinepart", NULL }; +static const char * const probes[] = { "RedBoot", "cmdlinepart", NULL }; static int ixp4xx_flash_remove(struct platform_device *dev) { diff --git a/trunk/drivers/mtd/maps/lantiq-flash.c b/trunk/drivers/mtd/maps/lantiq-flash.c index d1da6ede3845..d7ac65d1d569 100644 --- a/trunk/drivers/mtd/maps/lantiq-flash.c +++ b/trunk/drivers/mtd/maps/lantiq-flash.c @@ -46,8 +46,7 @@ struct ltq_mtd { }; static const char ltq_map_name[] = "ltq_nor"; -static const char *ltq_probe_types[] = { - "cmdlinepart", "ofpart", NULL }; +static const char * const ltq_probe_types[] = { "cmdlinepart", "ofpart", NULL }; static map_word ltq_read16(struct map_info *map, unsigned long adr) diff --git a/trunk/drivers/mtd/maps/mbx860.c b/trunk/drivers/mtd/maps/mbx860.c deleted file mode 100644 index 93fa56c33003..000000000000 --- a/trunk/drivers/mtd/maps/mbx860.c +++ /dev/null @@ -1,98 +0,0 @@ -/* - * Handle mapping of the flash on MBX860 boards - * - * Author: Anton Todorov - * Copyright: (C) 2001 Emness Technology - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - * - */ - -#include -#include -#include -#include -#include -#include -#include -#include - - -#define WINDOW_ADDR 0xfe000000 -#define WINDOW_SIZE 0x00200000 - -/* Flash / Partition sizing */ -#define MAX_SIZE_KiB 8192 -#define BOOT_PARTITION_SIZE_KiB 512 -#define KERNEL_PARTITION_SIZE_KiB 5632 -#define APP_PARTITION_SIZE_KiB 2048 - -#define NUM_PARTITIONS 3 - -/* partition_info gives details on the logical partitions that the split the - * single flash device into. If the size if zero we use up to the end of the - * device. */ -static struct mtd_partition partition_info[]={ - { .name = "MBX flash BOOT partition", - .offset = 0, - .size = BOOT_PARTITION_SIZE_KiB*1024 }, - { .name = "MBX flash DATA partition", - .offset = BOOT_PARTITION_SIZE_KiB*1024, - .size = (KERNEL_PARTITION_SIZE_KiB)*1024 }, - { .name = "MBX flash APPLICATION partition", - .offset = (BOOT_PARTITION_SIZE_KiB+KERNEL_PARTITION_SIZE_KiB)*1024 } -}; - - -static struct mtd_info *mymtd; - -struct map_info mbx_map = { - .name = "MBX flash", - .size = WINDOW_SIZE, - .phys = WINDOW_ADDR, - .bankwidth = 4, -}; - -static int __init init_mbx(void) -{ - printk(KERN_NOTICE "Motorola MBX flash device: 0x%x at 0x%x\n", WINDOW_SIZE*4, WINDOW_ADDR); - mbx_map.virt = ioremap(WINDOW_ADDR, WINDOW_SIZE * 4); - - if (!mbx_map.virt) { - printk("Failed to ioremap\n"); - return -EIO; - } - simple_map_init(&mbx_map); - - mymtd = do_map_probe("jedec_probe", &mbx_map); - if (mymtd) { - mymtd->owner = THIS_MODULE; - mtd_device_register(mymtd, NULL, 0); - mtd_device_register(mymtd, partition_info, NUM_PARTITIONS); - return 0; - } - - iounmap((void *)mbx_map.virt); - return -ENXIO; -} - -static void __exit cleanup_mbx(void) -{ - if (mymtd) { - mtd_device_unregister(mymtd); - map_destroy(mymtd); - } - if (mbx_map.virt) { - iounmap((void *)mbx_map.virt); - mbx_map.virt = 0; - } -} - -module_init(init_mbx); -module_exit(cleanup_mbx); - -MODULE_AUTHOR("Anton Todorov "); -MODULE_DESCRIPTION("MTD map driver for Motorola MBX860 board"); -MODULE_LICENSE("GPL"); diff --git a/trunk/drivers/mtd/maps/pci.c b/trunk/drivers/mtd/maps/pci.c index c3aebd5da5d6..c2604f8b2a5e 100644 --- a/trunk/drivers/mtd/maps/pci.c +++ b/trunk/drivers/mtd/maps/pci.c @@ -283,8 +283,7 @@ static int mtd_pci_probe(struct pci_dev *dev, const struct pci_device_id *id) if (err) goto release; - /* tsk - do_map_probe should take const char * */ - mtd = do_map_probe((char *)info->map_name, &map->map); + mtd = do_map_probe(info->map_name, &map->map); err = -ENODEV; if (!mtd) goto release; diff --git a/trunk/drivers/mtd/maps/physmap.c b/trunk/drivers/mtd/maps/physmap.c index 21b0b713cacb..e7a592c8c765 100644 --- a/trunk/drivers/mtd/maps/physmap.c +++ b/trunk/drivers/mtd/maps/physmap.c @@ -87,21 +87,18 @@ static void physmap_set_vpp(struct map_info *map, int state) spin_unlock_irqrestore(&info->vpp_lock, flags); } -static const char *rom_probe_types[] = { - "cfi_probe", - "jedec_probe", - "qinfo_probe", - "map_rom", - NULL }; -static const char *part_probe_types[] = { "cmdlinepart", "RedBoot", "afs", - NULL }; +static const char * const rom_probe_types[] = { + "cfi_probe", "jedec_probe", "qinfo_probe", "map_rom", NULL }; + +static const char * const part_probe_types[] = { + "cmdlinepart", "RedBoot", "afs", NULL }; static int physmap_flash_probe(struct platform_device *dev) { struct physmap_flash_data *physmap_data; struct physmap_flash_info *info; - const char **probe_type; - const char **part_types; + const char * const *probe_type; + const char * const *part_types; int err = 0; int i; int devices_found = 0; diff --git a/trunk/drivers/mtd/maps/physmap_of.c b/trunk/drivers/mtd/maps/physmap_of.c index 363939dfad05..d11109762ac5 100644 --- a/trunk/drivers/mtd/maps/physmap_of.c +++ b/trunk/drivers/mtd/maps/physmap_of.c @@ -71,6 +71,9 @@ static int of_flash_remove(struct platform_device *dev) return 0; } +static const char * const rom_probe_types[] = { + "cfi_probe", "jedec_probe", "map_rom" }; + /* Helper function to handle probing of the obsolete "direct-mapped" * compatible binding, which has an extra "probe-type" property * describing the type of flash probe necessary. */ @@ -80,8 +83,6 @@ static struct mtd_info *obsolete_probe(struct platform_device *dev, struct device_node *dp = dev->dev.of_node; const char *of_probe; struct mtd_info *mtd; - static const char *rom_probe_types[] - = { "cfi_probe", "jedec_probe", "map_rom"}; int i; dev_warn(&dev->dev, "Device tree uses obsolete \"direct-mapped\" " @@ -111,9 +112,10 @@ static struct mtd_info *obsolete_probe(struct platform_device *dev, specifies the list of partition probers to use. If none is given then the default is use. These take precedence over other device tree information. */ -static const char *part_probe_types_def[] = { "cmdlinepart", "RedBoot", - "ofpart", "ofoldpart", NULL }; -static const char **of_get_probes(struct device_node *dp) +static const char * const part_probe_types_def[] = { + "cmdlinepart", "RedBoot", "ofpart", "ofoldpart", NULL }; + +static const char * const *of_get_probes(struct device_node *dp) { const char *cp; int cplen; @@ -142,7 +144,7 @@ static const char **of_get_probes(struct device_node *dp) return res; } -static void of_free_probes(const char **probes) +static void of_free_probes(const char * const *probes) { if (probes != part_probe_types_def) kfree(probes); @@ -151,7 +153,7 @@ static void of_free_probes(const char **probes) static struct of_device_id of_flash_match[]; static int of_flash_probe(struct platform_device *dev) { - const char **part_probe_types; + const char * const *part_probe_types; const struct of_device_id *match; struct device_node *dp = dev->dev.of_node; struct resource res; diff --git a/trunk/drivers/mtd/maps/plat-ram.c b/trunk/drivers/mtd/maps/plat-ram.c index 2de66b062f0d..71fdda29594b 100644 --- a/trunk/drivers/mtd/maps/plat-ram.c +++ b/trunk/drivers/mtd/maps/plat-ram.c @@ -199,7 +199,7 @@ static int platram_probe(struct platform_device *pdev) * supplied by the platform_data struct */ if (pdata->map_probes) { - const char **map_probes = pdata->map_probes; + const char * const *map_probes = pdata->map_probes; for ( ; !info->mtd && *map_probes; map_probes++) info->mtd = do_map_probe(*map_probes , &info->map); diff --git a/trunk/drivers/mtd/maps/pxa2xx-flash.c b/trunk/drivers/mtd/maps/pxa2xx-flash.c index 43e3dbb976d9..acb1dbcf7ce5 100644 --- a/trunk/drivers/mtd/maps/pxa2xx-flash.c +++ b/trunk/drivers/mtd/maps/pxa2xx-flash.c @@ -45,9 +45,7 @@ struct pxa2xx_flash_info { struct map_info map; }; - -static const char *probes[] = { "RedBoot", "cmdlinepart", NULL }; - +static const char * const probes[] = { "RedBoot", "cmdlinepart", NULL }; static int pxa2xx_flash_probe(struct platform_device *pdev) { diff --git a/trunk/drivers/mtd/maps/rbtx4939-flash.c b/trunk/drivers/mtd/maps/rbtx4939-flash.c index 49c3fe715eee..ac02fbffd6df 100644 --- a/trunk/drivers/mtd/maps/rbtx4939-flash.c +++ b/trunk/drivers/mtd/maps/rbtx4939-flash.c @@ -45,14 +45,15 @@ static int rbtx4939_flash_remove(struct platform_device *dev) return 0; } -static const char *rom_probe_types[] = { "cfi_probe", "jedec_probe", NULL }; +static const char * const rom_probe_types[] = { + "cfi_probe", "jedec_probe", NULL }; static int rbtx4939_flash_probe(struct platform_device *dev) { struct rbtx4939_flash_data *pdata; struct rbtx4939_flash_info *info; struct resource *res; - const char **probe_type; + const char * const *probe_type; int err = 0; unsigned long size; diff --git a/trunk/drivers/mtd/maps/rpxlite.c b/trunk/drivers/mtd/maps/rpxlite.c deleted file mode 100644 index ed88225bf667..000000000000 --- a/trunk/drivers/mtd/maps/rpxlite.c +++ /dev/null @@ -1,64 +0,0 @@ -/* - * Handle mapping of the flash on the RPX Lite and CLLF boards - */ - -#include -#include -#include -#include -#include -#include -#include - - -#define WINDOW_ADDR 0xfe000000 -#define WINDOW_SIZE 0x800000 - -static struct mtd_info *mymtd; - -static struct map_info rpxlite_map = { - .name = "RPX", - .size = WINDOW_SIZE, - .bankwidth = 4, - .phys = WINDOW_ADDR, -}; - -static int __init init_rpxlite(void) -{ - printk(KERN_NOTICE "RPX Lite or CLLF flash device: %x at %x\n", WINDOW_SIZE*4, WINDOW_ADDR); - rpxlite_map.virt = ioremap(WINDOW_ADDR, WINDOW_SIZE * 4); - - if (!rpxlite_map.virt) { - printk("Failed to ioremap\n"); - return -EIO; - } - simple_map_init(&rpxlite_map); - mymtd = do_map_probe("cfi_probe", &rpxlite_map); - if (mymtd) { - mymtd->owner = THIS_MODULE; - mtd_device_register(mymtd, NULL, 0); - return 0; - } - - iounmap((void *)rpxlite_map.virt); - return -ENXIO; -} - -static void __exit cleanup_rpxlite(void) -{ - if (mymtd) { - mtd_device_unregister(mymtd); - map_destroy(mymtd); - } - if (rpxlite_map.virt) { - iounmap((void *)rpxlite_map.virt); - rpxlite_map.virt = 0; - } -} - -module_init(init_rpxlite); -module_exit(cleanup_rpxlite); - -MODULE_LICENSE("GPL"); -MODULE_AUTHOR("Arnold Christensen "); -MODULE_DESCRIPTION("MTD map driver for RPX Lite and CLLF boards"); diff --git a/trunk/drivers/mtd/maps/sa1100-flash.c b/trunk/drivers/mtd/maps/sa1100-flash.c index f694417cf7e6..29e3dcaa1d90 100644 --- a/trunk/drivers/mtd/maps/sa1100-flash.c +++ b/trunk/drivers/mtd/maps/sa1100-flash.c @@ -244,7 +244,7 @@ static struct sa_info *sa1100_setup_mtd(struct platform_device *pdev, return ERR_PTR(ret); } -static const char *part_probes[] = { "cmdlinepart", "RedBoot", NULL }; +static const char * const part_probes[] = { "cmdlinepart", "RedBoot", NULL }; static int sa1100_mtd_probe(struct platform_device *pdev) { diff --git a/trunk/drivers/mtd/maps/solutionengine.c b/trunk/drivers/mtd/maps/solutionengine.c index 9d900ada6708..83a7a7091562 100644 --- a/trunk/drivers/mtd/maps/solutionengine.c +++ b/trunk/drivers/mtd/maps/solutionengine.c @@ -31,7 +31,7 @@ struct map_info soleng_flash_map = { .bankwidth = 4, }; -static const char *probes[] = { "RedBoot", "cmdlinepart", NULL }; +static const char * const probes[] = { "RedBoot", "cmdlinepart", NULL }; #ifdef CONFIG_MTD_SUPERH_RESERVE static struct mtd_partition superh_se_partitions[] = { diff --git a/trunk/drivers/mtd/maps/tqm8xxl.c b/trunk/drivers/mtd/maps/tqm8xxl.c deleted file mode 100644 index d78587990e7e..000000000000 --- a/trunk/drivers/mtd/maps/tqm8xxl.c +++ /dev/null @@ -1,249 +0,0 @@ -/* - * Handle mapping of the flash memory access routines - * on TQM8xxL based devices. - * - * based on rpxlite.c - * - * Copyright(C) 2001 Kirk Lee - * - * This code is GPLed - * - */ - -/* - * According to TQM8xxL hardware manual, TQM8xxL series have - * following flash memory organisations: - * | capacity | | chip type | | bank0 | | bank1 | - * 2MiB 512Kx16 2MiB 0 - * 4MiB 1Mx16 4MiB 0 - * 8MiB 1Mx16 4MiB 4MiB - * Thus, we choose CONFIG_MTD_CFI_I2 & CONFIG_MTD_CFI_B4 at - * kernel configuration. - */ -#include -#include -#include -#include -#include - -#include -#include -#include - -#include - -#define FLASH_ADDR 0x40000000 -#define FLASH_SIZE 0x00800000 -#define FLASH_BANK_MAX 4 - -// trivial struct to describe partition information -struct mtd_part_def -{ - int nums; - unsigned char *type; - struct mtd_partition* mtd_part; -}; - -//static struct mtd_info *mymtd; -static struct mtd_info* mtd_banks[FLASH_BANK_MAX]; -static struct map_info* map_banks[FLASH_BANK_MAX]; -static struct mtd_part_def part_banks[FLASH_BANK_MAX]; -static unsigned long num_banks; -static void __iomem *start_scan_addr; - -/* - * Here are partition information for all known TQM8xxL series devices. - * See include/linux/mtd/partitions.h for definition of the mtd_partition - * structure. - * - * The *_max_flash_size is the maximum possible mapped flash size which - * is not necessarily the actual flash size. It must correspond to the - * value specified in the mapping definition defined by the - * "struct map_desc *_io_desc" for the corresponding machine. - */ - -/* Currently, TQM8xxL has up to 8MiB flash */ -static unsigned long tqm8xxl_max_flash_size = 0x00800000; - -/* partition definition for first flash bank - * (cf. "drivers/char/flash_config.c") - */ -static struct mtd_partition tqm8xxl_partitions[] = { - { - .name = "ppcboot", - .offset = 0x00000000, - .size = 0x00020000, /* 128KB */ - .mask_flags = MTD_WRITEABLE, /* force read-only */ - }, - { - .name = "kernel", /* default kernel image */ - .offset = 0x00020000, - .size = 0x000e0000, - .mask_flags = MTD_WRITEABLE, /* force read-only */ - }, - { - .name = "user", - .offset = 0x00100000, - .size = 0x00100000, - }, - { - .name = "initrd", - .offset = 0x00200000, - .size = 0x00200000, - } -}; -/* partition definition for second flash bank */ -static struct mtd_partition tqm8xxl_fs_partitions[] = { - { - .name = "cramfs", - .offset = 0x00000000, - .size = 0x00200000, - }, - { - .name = "jffs", - .offset = 0x00200000, - .size = 0x00200000, - //.size = MTDPART_SIZ_FULL, - } -}; - -static int __init init_tqm_mtd(void) -{ - int idx = 0, ret = 0; - unsigned long flash_addr, flash_size, mtd_size = 0; - /* pointer to TQM8xxL board info data */ - bd_t *bd = (bd_t *)__res; - - flash_addr = bd->bi_flashstart; - flash_size = bd->bi_flashsize; - - //request maximum flash size address space - start_scan_addr = ioremap(flash_addr, flash_size); - if (!start_scan_addr) { - printk(KERN_WARNING "%s:Failed to ioremap address:0x%x\n", __func__, flash_addr); - return -EIO; - } - - for (idx = 0 ; idx < FLASH_BANK_MAX ; idx++) { - if(mtd_size >= flash_size) - break; - - printk(KERN_INFO "%s: chip probing count %d\n", __func__, idx); - - map_banks[idx] = kzalloc(sizeof(struct map_info), GFP_KERNEL); - if(map_banks[idx] == NULL) { - ret = -ENOMEM; - /* FIXME: What if some MTD devices were probed already? */ - goto error_mem; - } - - map_banks[idx]->name = kmalloc(16, GFP_KERNEL); - - if (!map_banks[idx]->name) { - ret = -ENOMEM; - /* FIXME: What if some MTD devices were probed already? */ - goto error_mem; - } - sprintf(map_banks[idx]->name, "TQM8xxL%d", idx); - - map_banks[idx]->size = flash_size; - map_banks[idx]->bankwidth = 4; - - simple_map_init(map_banks[idx]); - - map_banks[idx]->virt = start_scan_addr; - map_banks[idx]->phys = flash_addr; - /* FIXME: This looks utterly bogus, but I'm trying to - preserve the behaviour of the original (shown here)... - - map_banks[idx]->map_priv_1 = - start_scan_addr + ((idx > 0) ? - (mtd_banks[idx-1] ? mtd_banks[idx-1]->size : 0) : 0); - */ - - if (idx && mtd_banks[idx-1]) { - map_banks[idx]->virt += mtd_banks[idx-1]->size; - map_banks[idx]->phys += mtd_banks[idx-1]->size; - } - - //start to probe flash chips - mtd_banks[idx] = do_map_probe("cfi_probe", map_banks[idx]); - - if (mtd_banks[idx]) { - mtd_banks[idx]->owner = THIS_MODULE; - mtd_size += mtd_banks[idx]->size; - num_banks++; - - printk(KERN_INFO "%s: bank%d, name:%s, size:%dbytes \n", __func__, num_banks, - mtd_banks[idx]->name, mtd_banks[idx]->size); - } - } - - /* no supported flash chips found */ - if (!num_banks) { - printk(KERN_NOTICE "TQM8xxL: No support flash chips found!\n"); - ret = -ENXIO; - goto error_mem; - } - - /* - * Select Static partition definitions - */ - part_banks[0].mtd_part = tqm8xxl_partitions; - part_banks[0].type = "Static image"; - part_banks[0].nums = ARRAY_SIZE(tqm8xxl_partitions); - - part_banks[1].mtd_part = tqm8xxl_fs_partitions; - part_banks[1].type = "Static file system"; - part_banks[1].nums = ARRAY_SIZE(tqm8xxl_fs_partitions); - - for(idx = 0; idx < num_banks ; idx++) { - if (part_banks[idx].nums == 0) - printk(KERN_NOTICE "TQM flash%d: no partition info available, registering whole flash at once\n", idx); - else - printk(KERN_NOTICE "TQM flash%d: Using %s partition definition\n", - idx, part_banks[idx].type); - mtd_device_register(mtd_banks[idx], part_banks[idx].mtd_part, - part_banks[idx].nums); - } - return 0; -error_mem: - for(idx = 0 ; idx < FLASH_BANK_MAX ; idx++) { - if(map_banks[idx] != NULL) { - kfree(map_banks[idx]->name); - map_banks[idx]->name = NULL; - kfree(map_banks[idx]); - map_banks[idx] = NULL; - } - } -error: - iounmap(start_scan_addr); - return ret; -} - -static void __exit cleanup_tqm_mtd(void) -{ - unsigned int idx = 0; - for(idx = 0 ; idx < num_banks ; idx++) { - /* destroy mtd_info previously allocated */ - if (mtd_banks[idx]) { - mtd_device_unregister(mtd_banks[idx]); - map_destroy(mtd_banks[idx]); - } - /* release map_info not used anymore */ - kfree(map_banks[idx]->name); - kfree(map_banks[idx]); - } - - if (start_scan_addr) { - iounmap(start_scan_addr); - start_scan_addr = 0; - } -} - -module_init(init_tqm_mtd); -module_exit(cleanup_tqm_mtd); - -MODULE_LICENSE("GPL"); -MODULE_AUTHOR("Kirk Lee "); -MODULE_DESCRIPTION("MTD map driver for TQM8xxL boards"); diff --git a/trunk/drivers/mtd/maps/tsunami_flash.c b/trunk/drivers/mtd/maps/tsunami_flash.c index 1de390e1c2fb..da2cdb5fd6db 100644 --- a/trunk/drivers/mtd/maps/tsunami_flash.c +++ b/trunk/drivers/mtd/maps/tsunami_flash.c @@ -82,11 +82,12 @@ static void __exit cleanup_tsunami_flash(void) tsunami_flash_mtd = 0; } +static const char * const rom_probe_types[] = { + "cfi_probe", "jedec_probe", "map_rom", NULL }; static int __init init_tsunami_flash(void) { - static const char *rom_probe_types[] = { "cfi_probe", "jedec_probe", "map_rom", NULL }; - char **type; + const char * const *type; tsunami_tig_writeb(FLASH_ENABLE_BYTE, FLASH_ENABLE_PORT); diff --git a/trunk/drivers/mtd/mtdchar.c b/trunk/drivers/mtd/mtdchar.c index dc571ebc1aa0..c719879284bd 100644 --- a/trunk/drivers/mtd/mtdchar.c +++ b/trunk/drivers/mtd/mtdchar.c @@ -38,6 +38,8 @@ #include +#include "mtdcore.h" + static DEFINE_MUTEX(mtd_mutex); /* @@ -365,37 +367,35 @@ static void mtdchar_erase_callback (struct erase_info *instr) wake_up((wait_queue_head_t *)instr->priv); } -#ifdef CONFIG_HAVE_MTD_OTP static int otp_select_filemode(struct mtd_file_info *mfi, int mode) { struct mtd_info *mtd = mfi->mtd; size_t retlen; - int ret = 0; - - /* - * Make a fake call to mtd_read_fact_prot_reg() to check if OTP - * operations are supported. - */ - if (mtd_read_fact_prot_reg(mtd, -1, 0, &retlen, NULL) == -EOPNOTSUPP) - return -EOPNOTSUPP; switch (mode) { case MTD_OTP_FACTORY: + if (mtd_read_fact_prot_reg(mtd, -1, 0, &retlen, NULL) == + -EOPNOTSUPP) + return -EOPNOTSUPP; + mfi->mode = MTD_FILE_MODE_OTP_FACTORY; break; case MTD_OTP_USER: + if (mtd_read_user_prot_reg(mtd, -1, 0, &retlen, NULL) == + -EOPNOTSUPP) + return -EOPNOTSUPP; + mfi->mode = MTD_FILE_MODE_OTP_USER; break; - default: - ret = -EINVAL; case MTD_OTP_OFF: + mfi->mode = MTD_FILE_MODE_NORMAL; break; + default: + return -EINVAL; } - return ret; + + return 0; } -#else -# define otp_select_filemode(f,m) -EOPNOTSUPP -#endif static int mtdchar_writeoob(struct file *file, struct mtd_info *mtd, uint64_t start, uint32_t length, void __user *ptr, @@ -888,7 +888,6 @@ static int mtdchar_ioctl(struct file *file, u_int cmd, u_long arg) break; } -#ifdef CONFIG_HAVE_MTD_OTP case OTPSELECT: { int mode; @@ -944,7 +943,6 @@ static int mtdchar_ioctl(struct file *file, u_int cmd, u_long arg) ret = mtd_lock_user_prot_reg(mtd, oinfo.start, oinfo.length); break; } -#endif /* This ioctl is being deprecated - it truncates the ECC layout */ case ECCGETLAYOUT: @@ -1185,23 +1183,25 @@ static struct file_system_type mtd_inodefs_type = { }; MODULE_ALIAS_FS("mtd_inodefs"); -static int __init init_mtdchar(void) +int __init init_mtdchar(void) { int ret; ret = __register_chrdev(MTD_CHAR_MAJOR, 0, 1 << MINORBITS, "mtd", &mtd_fops); if (ret < 0) { - pr_notice("Can't allocate major number %d for " - "Memory Technology Devices.\n", MTD_CHAR_MAJOR); + pr_err("Can't allocate major number %d for MTD\n", + MTD_CHAR_MAJOR); return ret; } ret = register_filesystem(&mtd_inodefs_type); if (ret) { - pr_notice("Can't register mtd_inodefs filesystem: %d\n", ret); + pr_err("Can't register mtd_inodefs filesystem, error %d\n", + ret); goto err_unregister_chdev; } + return ret; err_unregister_chdev: @@ -1209,18 +1209,10 @@ static int __init init_mtdchar(void) return ret; } -static void __exit cleanup_mtdchar(void) +void __exit cleanup_mtdchar(void) { unregister_filesystem(&mtd_inodefs_type); __unregister_chrdev(MTD_CHAR_MAJOR, 0, 1 << MINORBITS, "mtd"); } -module_init(init_mtdchar); -module_exit(cleanup_mtdchar); - -MODULE_ALIAS_CHARDEV_MAJOR(MTD_CHAR_MAJOR); - -MODULE_LICENSE("GPL"); -MODULE_AUTHOR("David Woodhouse "); -MODULE_DESCRIPTION("Direct character-device access to MTD devices"); MODULE_ALIAS_CHARDEV_MAJOR(MTD_CHAR_MAJOR); diff --git a/trunk/drivers/mtd/mtdcore.c b/trunk/drivers/mtd/mtdcore.c index 322ca65b0cc5..c400c57c394a 100644 --- a/trunk/drivers/mtd/mtdcore.c +++ b/trunk/drivers/mtd/mtdcore.c @@ -42,6 +42,7 @@ #include #include "mtdcore.h" + /* * backing device capabilities for non-mappable devices (such as NAND flash) * - permits private mappings, copies are taken of the data @@ -97,11 +98,7 @@ EXPORT_SYMBOL_GPL(__mtd_next_device); static LIST_HEAD(mtd_notifiers); -#if defined(CONFIG_MTD_CHAR) || defined(CONFIG_MTD_CHAR_MODULE) #define MTD_DEVT(index) MKDEV(MTD_CHAR_MAJOR, (index)*2) -#else -#define MTD_DEVT(index) 0 -#endif /* REVISIT once MTD uses the driver model better, whoever allocates * the mtd_info will probably want to use the release() hook... @@ -493,7 +490,7 @@ int del_mtd_device(struct mtd_info *mtd) * * Returns zero in case of success and a negative error code in case of failure. */ -int mtd_device_parse_register(struct mtd_info *mtd, const char **types, +int mtd_device_parse_register(struct mtd_info *mtd, const char * const *types, struct mtd_part_parser_data *parser_data, const struct mtd_partition *parts, int nr_parts) @@ -1117,8 +1114,6 @@ EXPORT_SYMBOL_GPL(mtd_kmalloc_up_to); /*====================================================================*/ /* Support for /proc/mtd */ -static struct proc_dir_entry *proc_mtd; - static int mtd_proc_show(struct seq_file *m, void *v) { struct mtd_info *mtd; @@ -1164,6 +1159,8 @@ static int __init mtd_bdi_init(struct backing_dev_info *bdi, const char *name) return ret; } +static struct proc_dir_entry *proc_mtd; + static int __init init_mtd(void) { int ret; @@ -1184,11 +1181,17 @@ static int __init init_mtd(void) if (ret) goto err_bdi3; -#ifdef CONFIG_PROC_FS proc_mtd = proc_create("mtd", 0, NULL, &mtd_proc_ops); -#endif /* CONFIG_PROC_FS */ + + ret = init_mtdchar(); + if (ret) + goto out_procfs; + return 0; +out_procfs: + if (proc_mtd) + remove_proc_entry("mtd", NULL); err_bdi3: bdi_destroy(&mtd_bdi_ro_mappable); err_bdi2: @@ -1202,10 +1205,9 @@ static int __init init_mtd(void) static void __exit cleanup_mtd(void) { -#ifdef CONFIG_PROC_FS + cleanup_mtdchar(); if (proc_mtd) - remove_proc_entry( "mtd", NULL); -#endif /* CONFIG_PROC_FS */ + remove_proc_entry("mtd", NULL); class_unregister(&mtd_class); bdi_destroy(&mtd_bdi_unmappable); bdi_destroy(&mtd_bdi_ro_mappable); diff --git a/trunk/drivers/mtd/mtdcore.h b/trunk/drivers/mtd/mtdcore.h index 961a38408542..7b0353399a10 100644 --- a/trunk/drivers/mtd/mtdcore.h +++ b/trunk/drivers/mtd/mtdcore.h @@ -1,23 +1,21 @@ -/* linux/drivers/mtd/mtdcore.h - * - * Header file for driver private mtdcore exports - * +/* + * These are exported solely for the purpose of mtd_blkdevs.c and mtdchar.c. + * You should not use them for _anything_ else. */ -/* These are exported solely for the purpose of mtd_blkdevs.c. You - should not use them for _anything_ else */ - extern struct mutex mtd_table_mutex; -extern struct mtd_info *__mtd_next_device(int i); -extern int add_mtd_device(struct mtd_info *mtd); -extern int del_mtd_device(struct mtd_info *mtd); -extern int add_mtd_partitions(struct mtd_info *, const struct mtd_partition *, - int); -extern int del_mtd_partitions(struct mtd_info *); -extern int parse_mtd_partitions(struct mtd_info *master, const char **types, - struct mtd_partition **pparts, - struct mtd_part_parser_data *data); +struct mtd_info *__mtd_next_device(int i); +int add_mtd_device(struct mtd_info *mtd); +int del_mtd_device(struct mtd_info *mtd); +int add_mtd_partitions(struct mtd_info *, const struct mtd_partition *, int); +int del_mtd_partitions(struct mtd_info *); +int parse_mtd_partitions(struct mtd_info *master, const char * const *types, + struct mtd_partition **pparts, + struct mtd_part_parser_data *data); + +int __init init_mtdchar(void); +void __exit cleanup_mtdchar(void); #define mtd_for_each_device(mtd) \ for ((mtd) = __mtd_next_device(0); \ diff --git a/trunk/drivers/mtd/mtdpart.c b/trunk/drivers/mtd/mtdpart.c index 70fa70a8318f..301493382cd0 100644 --- a/trunk/drivers/mtd/mtdpart.c +++ b/trunk/drivers/mtd/mtdpart.c @@ -694,7 +694,7 @@ EXPORT_SYMBOL_GPL(deregister_mtd_parser); * Do not forget to update 'parse_mtd_partitions()' kerneldoc comment if you * are changing this array! */ -static const char *default_mtd_part_types[] = { +static const char * const default_mtd_part_types[] = { "cmdlinepart", "ofpart", NULL @@ -720,7 +720,7 @@ static const char *default_mtd_part_types[] = { * o a positive number of found partitions, in which case on exit @pparts will * point to an array containing this number of &struct mtd_info objects. */ -int parse_mtd_partitions(struct mtd_info *master, const char **types, +int parse_mtd_partitions(struct mtd_info *master, const char *const *types, struct mtd_partition **pparts, struct mtd_part_parser_data *data) { diff --git a/trunk/drivers/mtd/nand/Kconfig b/trunk/drivers/mtd/nand/Kconfig index 81bf5e52601e..a60f6c17f57b 100644 --- a/trunk/drivers/mtd/nand/Kconfig +++ b/trunk/drivers/mtd/nand/Kconfig @@ -41,14 +41,6 @@ config MTD_SM_COMMON tristate default n -config MTD_NAND_MUSEUM_IDS - bool "Enable chip ids for obsolete ancient NAND devices" - default n - help - Enable this option only when your board has first generation - NAND chips (page size 256 byte, erase size 4-8KiB). The IDs - of these chips were reused by later, larger chips. - config MTD_NAND_DENALI tristate "Support Denali NAND controller" help @@ -81,15 +73,9 @@ config MTD_NAND_DENALI_SCRATCH_REG_ADDR scratch register here to enable this feature. On Intel Moorestown boards, the scratch register is at 0xFF108018. -config MTD_NAND_H1900 - tristate "iPAQ H1900 flash" - depends on ARCH_PXA && BROKEN - help - This enables the driver for the iPAQ h1900 flash. - config MTD_NAND_GPIO tristate "GPIO NAND Flash driver" - depends on GENERIC_GPIO && ARM + depends on GPIOLIB && ARM help This enables a GPIO based NAND flash driver. @@ -201,22 +187,6 @@ config MTD_NAND_BF5XX_BOOTROM_ECC If unsure, say N. -config MTD_NAND_RTC_FROM4 - tristate "Renesas Flash ROM 4-slot interface board (FROM_BOARD4)" - depends on SH_SOLUTION_ENGINE - select REED_SOLOMON - select REED_SOLOMON_DEC8 - select BITREVERSE - help - This enables the driver for the Renesas Technology AG-AND - flash interface board (FROM_BOARD4) - -config MTD_NAND_PPCHAMELEONEVB - tristate "NAND Flash device on PPChameleonEVB board" - depends on PPCHAMELEONEVB && BROKEN - help - This enables the NAND flash driver on the PPChameleon EVB Board. - config MTD_NAND_S3C2410 tristate "NAND Flash support for Samsung S3C SoCs" depends on ARCH_S3C24XX || ARCH_S3C64XX diff --git a/trunk/drivers/mtd/nand/Makefile b/trunk/drivers/mtd/nand/Makefile index d76d91205691..bb8189172f62 100644 --- a/trunk/drivers/mtd/nand/Makefile +++ b/trunk/drivers/mtd/nand/Makefile @@ -15,14 +15,11 @@ obj-$(CONFIG_MTD_NAND_DENALI_PCI) += denali_pci.o obj-$(CONFIG_MTD_NAND_DENALI_DT) += denali_dt.o obj-$(CONFIG_MTD_NAND_AU1550) += au1550nd.o obj-$(CONFIG_MTD_NAND_BF5XX) += bf5xx_nand.o -obj-$(CONFIG_MTD_NAND_PPCHAMELEONEVB) += ppchameleonevb.o obj-$(CONFIG_MTD_NAND_S3C2410) += s3c2410.o obj-$(CONFIG_MTD_NAND_DAVINCI) += davinci_nand.o obj-$(CONFIG_MTD_NAND_DISKONCHIP) += diskonchip.o obj-$(CONFIG_MTD_NAND_DOCG4) += docg4.o obj-$(CONFIG_MTD_NAND_FSMC) += fsmc_nand.o -obj-$(CONFIG_MTD_NAND_H1900) += h1910.o -obj-$(CONFIG_MTD_NAND_RTC_FROM4) += rtc_from4.o obj-$(CONFIG_MTD_NAND_SHARPSL) += sharpsl.o obj-$(CONFIG_MTD_NAND_NANDSIM) += nandsim.o obj-$(CONFIG_MTD_NAND_CS553X) += cs553x_nand.o diff --git a/trunk/drivers/mtd/nand/atmel_nand.c b/trunk/drivers/mtd/nand/atmel_nand.c index ffcbcca2fd2d..2d23d2929438 100644 --- a/trunk/drivers/mtd/nand/atmel_nand.c +++ b/trunk/drivers/mtd/nand/atmel_nand.c @@ -1737,20 +1737,7 @@ static struct platform_driver atmel_nand_driver = { }, }; -static int __init atmel_nand_init(void) -{ - return platform_driver_probe(&atmel_nand_driver, atmel_nand_probe); -} - - -static void __exit atmel_nand_exit(void) -{ - platform_driver_unregister(&atmel_nand_driver); -} - - -module_init(atmel_nand_init); -module_exit(atmel_nand_exit); +module_platform_driver_probe(atmel_nand_driver, atmel_nand_probe); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Rick Bronson"); diff --git a/trunk/drivers/mtd/nand/bf5xx_nand.c b/trunk/drivers/mtd/nand/bf5xx_nand.c index 4271e948d1e2..776df3694f75 100644 --- a/trunk/drivers/mtd/nand/bf5xx_nand.c +++ b/trunk/drivers/mtd/nand/bf5xx_nand.c @@ -874,21 +874,7 @@ static struct platform_driver bf5xx_nand_driver = { }, }; -static int __init bf5xx_nand_init(void) -{ - printk(KERN_INFO "%s, Version %s (c) 2007 Analog Devices, Inc.\n", - DRV_DESC, DRV_VERSION); - - return platform_driver_register(&bf5xx_nand_driver); -} - -static void __exit bf5xx_nand_exit(void) -{ - platform_driver_unregister(&bf5xx_nand_driver); -} - -module_init(bf5xx_nand_init); -module_exit(bf5xx_nand_exit); +module_platform_driver(bf5xx_nand_driver); MODULE_LICENSE("GPL"); MODULE_AUTHOR(DRV_AUTHOR); diff --git a/trunk/drivers/mtd/nand/cafe_nand.c b/trunk/drivers/mtd/nand/cafe_nand.c index 010d61266536..c34985a55101 100644 --- a/trunk/drivers/mtd/nand/cafe_nand.c +++ b/trunk/drivers/mtd/nand/cafe_nand.c @@ -303,13 +303,7 @@ static void cafe_nand_cmdfunc(struct mtd_info *mtd, unsigned command, case NAND_CMD_SEQIN: case NAND_CMD_RNDIN: case NAND_CMD_STATUS: - case NAND_CMD_DEPLETE1: case NAND_CMD_RNDOUT: - case NAND_CMD_STATUS_ERROR: - case NAND_CMD_STATUS_ERROR0: - case NAND_CMD_STATUS_ERROR1: - case NAND_CMD_STATUS_ERROR2: - case NAND_CMD_STATUS_ERROR3: cafe_writel(cafe, cafe->ctl2, NAND_CTRL2); return; } @@ -536,8 +530,8 @@ static int cafe_nand_write_page_lowlevel(struct mtd_info *mtd, } static int cafe_nand_write_page(struct mtd_info *mtd, struct nand_chip *chip, - const uint8_t *buf, int oob_required, int page, - int cached, int raw) + uint32_t offset, int data_len, const uint8_t *buf, + int oob_required, int page, int cached, int raw) { int status; diff --git a/trunk/drivers/mtd/nand/davinci_nand.c b/trunk/drivers/mtd/nand/davinci_nand.c index 94e17af8e450..c3e15a558173 100644 --- a/trunk/drivers/mtd/nand/davinci_nand.c +++ b/trunk/drivers/mtd/nand/davinci_nand.c @@ -34,6 +34,7 @@ #include #include #include +#include #include #include @@ -577,7 +578,6 @@ static struct davinci_nand_pdata return pdev->dev.platform_data; } #else -#define davinci_nand_of_match NULL static struct davinci_nand_pdata *nand_davinci_get_pdata(struct platform_device *pdev) { @@ -878,22 +878,12 @@ static struct platform_driver nand_davinci_driver = { .driver = { .name = "davinci_nand", .owner = THIS_MODULE, - .of_match_table = davinci_nand_of_match, + .of_match_table = of_match_ptr(davinci_nand_of_match), }, }; MODULE_ALIAS("platform:davinci_nand"); -static int __init nand_davinci_init(void) -{ - return platform_driver_probe(&nand_davinci_driver, nand_davinci_probe); -} -module_init(nand_davinci_init); - -static void __exit nand_davinci_exit(void) -{ - platform_driver_unregister(&nand_davinci_driver); -} -module_exit(nand_davinci_exit); +module_platform_driver_probe(nand_davinci_driver, nand_davinci_probe); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Texas Instruments"); diff --git a/trunk/drivers/mtd/nand/denali_dt.c b/trunk/drivers/mtd/nand/denali_dt.c index 546f8cb5688d..92530244e2cb 100644 --- a/trunk/drivers/mtd/nand/denali_dt.c +++ b/trunk/drivers/mtd/nand/denali_dt.c @@ -42,7 +42,7 @@ static void __iomem *request_and_map(struct device *dev, } ptr = devm_ioremap_nocache(dev, res->start, resource_size(res)); - if (!res) + if (!ptr) dev_err(dev, "ioremap_nocache of %s failed!", res->name); return ptr; @@ -90,7 +90,7 @@ static int denali_dt_probe(struct platform_device *ofdev) denali->irq = platform_get_irq(ofdev, 0); if (denali->irq < 0) { dev_err(&ofdev->dev, "no irq defined\n"); - return -ENXIO; + return denali->irq; } denali->flash_reg = request_and_map(&ofdev->dev, denali_reg); @@ -146,21 +146,11 @@ static struct platform_driver denali_dt_driver = { .driver = { .name = "denali-nand-dt", .owner = THIS_MODULE, - .of_match_table = of_match_ptr(denali_nand_dt_ids), + .of_match_table = denali_nand_dt_ids, }, }; -static int __init denali_init_dt(void) -{ - return platform_driver_register(&denali_dt_driver); -} -module_init(denali_init_dt); - -static void __exit denali_exit_dt(void) -{ - platform_driver_unregister(&denali_dt_driver); -} -module_exit(denali_exit_dt); +module_platform_driver(denali_dt_driver); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Jamie Iles"); diff --git a/trunk/drivers/mtd/nand/docg4.c b/trunk/drivers/mtd/nand/docg4.c index 18fa4489e52e..fa25e7a08134 100644 --- a/trunk/drivers/mtd/nand/docg4.c +++ b/trunk/drivers/mtd/nand/docg4.c @@ -1397,18 +1397,7 @@ static struct platform_driver docg4_driver = { .remove = __exit_p(cleanup_docg4), }; -static int __init docg4_init(void) -{ - return platform_driver_probe(&docg4_driver, probe_docg4); -} - -static void __exit docg4_exit(void) -{ - platform_driver_unregister(&docg4_driver); -} - -module_init(docg4_init); -module_exit(docg4_exit); +module_platform_driver_probe(docg4_driver, probe_docg4); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Mike Dunn"); diff --git a/trunk/drivers/mtd/nand/fsmc_nand.c b/trunk/drivers/mtd/nand/fsmc_nand.c index 05ba3f0c2d19..911e2433fe30 100644 --- a/trunk/drivers/mtd/nand/fsmc_nand.c +++ b/trunk/drivers/mtd/nand/fsmc_nand.c @@ -1235,18 +1235,7 @@ static struct platform_driver fsmc_nand_driver = { }, }; -static int __init fsmc_nand_init(void) -{ - return platform_driver_probe(&fsmc_nand_driver, - fsmc_nand_probe); -} -module_init(fsmc_nand_init); - -static void __exit fsmc_nand_exit(void) -{ - platform_driver_unregister(&fsmc_nand_driver); -} -module_exit(fsmc_nand_exit); +module_platform_driver_probe(fsmc_nand_driver, fsmc_nand_probe); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Vipin Kumar , Ashish Priyadarshi"); diff --git a/trunk/drivers/mtd/nand/gpio.c b/trunk/drivers/mtd/nand/gpio.c index e789e3f51710..89065dd83d64 100644 --- a/trunk/drivers/mtd/nand/gpio.c +++ b/trunk/drivers/mtd/nand/gpio.c @@ -190,7 +190,6 @@ static struct resource *gpio_nand_get_io_sync_of(struct platform_device *pdev) return r; } #else /* CONFIG_OF */ -#define gpio_nand_id_table NULL static inline int gpio_nand_get_config_of(const struct device *dev, struct gpio_nand_platdata *plat) { @@ -259,8 +258,6 @@ static int gpio_nand_remove(struct platform_device *dev) if (gpio_is_valid(gpiomtd->plat.gpio_rdy)) gpio_free(gpiomtd->plat.gpio_rdy); - kfree(gpiomtd); - return 0; } @@ -297,7 +294,7 @@ static int gpio_nand_probe(struct platform_device *dev) if (!res0) return -EINVAL; - gpiomtd = kzalloc(sizeof(*gpiomtd), GFP_KERNEL); + gpiomtd = devm_kzalloc(&dev->dev, sizeof(*gpiomtd), GFP_KERNEL); if (gpiomtd == NULL) { dev_err(&dev->dev, "failed to create NAND MTD\n"); return -ENOMEM; @@ -412,7 +409,6 @@ static int gpio_nand_probe(struct platform_device *dev) iounmap(gpiomtd->nand_chip.IO_ADDR_R); release_mem_region(res0->start, resource_size(res0)); err_map: - kfree(gpiomtd); return ret; } @@ -421,7 +417,7 @@ static struct platform_driver gpio_nand_driver = { .remove = gpio_nand_remove, .driver = { .name = "gpio-nand", - .of_match_table = gpio_nand_id_table, + .of_match_table = of_match_ptr(gpio_nand_id_table), }, }; diff --git a/trunk/drivers/mtd/nand/h1910.c b/trunk/drivers/mtd/nand/h1910.c deleted file mode 100644 index 50166e93ba96..000000000000 --- a/trunk/drivers/mtd/nand/h1910.c +++ /dev/null @@ -1,167 +0,0 @@ -/* - * drivers/mtd/nand/h1910.c - * - * Copyright (C) 2003 Joshua Wise (joshua@joshuawise.com) - * - * Derived from drivers/mtd/nand/edb7312.c - * Copyright (C) 2002 Marius Gröger (mag@sysgo.de) - * Copyright (c) 2001 Thomas Gleixner (gleixner@autronix.de) - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - * - * Overview: - * This is a device driver for the NAND flash device found on the - * iPAQ h1910 board which utilizes the Samsung K9F2808 part. This is - * a 128Mibit (16MiB x 8 bits) NAND flash device. - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -/* - * MTD structure for EDB7312 board - */ -static struct mtd_info *h1910_nand_mtd = NULL; - -/* - * Module stuff - */ - -/* - * Define static partitions for flash device - */ -static struct mtd_partition partition_info[] = { - {name:"h1910 NAND Flash", - offset:0, - size:16 * 1024 * 1024} -}; - -#define NUM_PARTITIONS 1 - -/* - * hardware specific access to control-lines - * - * NAND_NCE: bit 0 - don't care - * NAND_CLE: bit 1 - address bit 2 - * NAND_ALE: bit 2 - address bit 3 - */ -static void h1910_hwcontrol(struct mtd_info *mtd, int cmd, - unsigned int ctrl) -{ - struct nand_chip *chip = mtd->priv; - - if (cmd != NAND_CMD_NONE) - writeb(cmd, chip->IO_ADDR_W | ((ctrl & 0x6) << 1)); -} - -/* - * read device ready pin - */ -#if 0 -static int h1910_device_ready(struct mtd_info *mtd) -{ - return (GPLR(55) & GPIO_bit(55)); -} -#endif - -/* - * Main initialization routine - */ -static int __init h1910_init(void) -{ - struct nand_chip *this; - void __iomem *nandaddr; - - if (!machine_is_h1900()) - return -ENODEV; - - nandaddr = ioremap(0x08000000, 0x1000); - if (!nandaddr) { - printk("Failed to ioremap nand flash.\n"); - return -ENOMEM; - } - - /* Allocate memory for MTD device structure and private data */ - h1910_nand_mtd = kmalloc(sizeof(struct mtd_info) + sizeof(struct nand_chip), GFP_KERNEL); - if (!h1910_nand_mtd) { - printk("Unable to allocate h1910 NAND MTD device structure.\n"); - iounmap((void *)nandaddr); - return -ENOMEM; - } - - /* Get pointer to private data */ - this = (struct nand_chip *)(&h1910_nand_mtd[1]); - - /* Initialize structures */ - memset(h1910_nand_mtd, 0, sizeof(struct mtd_info)); - memset(this, 0, sizeof(struct nand_chip)); - - /* Link the private data with the MTD structure */ - h1910_nand_mtd->priv = this; - h1910_nand_mtd->owner = THIS_MODULE; - - /* - * Enable VPEN - */ - GPSR(37) = GPIO_bit(37); - - /* insert callbacks */ - this->IO_ADDR_R = nandaddr; - this->IO_ADDR_W = nandaddr; - this->cmd_ctrl = h1910_hwcontrol; - this->dev_ready = NULL; /* unknown whether that was correct or not so we will just do it like this */ - /* 15 us command delay time */ - this->chip_delay = 50; - this->ecc.mode = NAND_ECC_SOFT; - - /* Scan to find existence of the device */ - if (nand_scan(h1910_nand_mtd, 1)) { - printk(KERN_NOTICE "No NAND device - returning -ENXIO\n"); - kfree(h1910_nand_mtd); - iounmap((void *)nandaddr); - return -ENXIO; - } - - /* Register the partitions */ - mtd_device_parse_register(h1910_nand_mtd, NULL, NULL, partition_info, - NUM_PARTITIONS); - - /* Return happy */ - return 0; -} - -module_init(h1910_init); - -/* - * Clean up routine - */ -static void __exit h1910_cleanup(void) -{ - struct nand_chip *this = (struct nand_chip *)&h1910_nand_mtd[1]; - - /* Release resources, unregister device */ - nand_release(h1910_nand_mtd); - - /* Release io resource */ - iounmap((void *)this->IO_ADDR_W); - - /* Free the MTD device structure */ - kfree(h1910_nand_mtd); -} - -module_exit(h1910_cleanup); - -MODULE_LICENSE("GPL"); -MODULE_AUTHOR("Joshua Wise "); -MODULE_DESCRIPTION("NAND flash driver for iPAQ h1910"); diff --git a/trunk/drivers/mtd/nand/lpc32xx_mlc.c b/trunk/drivers/mtd/nand/lpc32xx_mlc.c index 0ca22ae9135c..fd1df5e13ae4 100644 --- a/trunk/drivers/mtd/nand/lpc32xx_mlc.c +++ b/trunk/drivers/mtd/nand/lpc32xx_mlc.c @@ -540,8 +540,8 @@ static int lpc32xx_write_page_lowlevel(struct mtd_info *mtd, } static int lpc32xx_write_page(struct mtd_info *mtd, struct nand_chip *chip, - const uint8_t *buf, int oob_required, int page, - int cached, int raw) + uint32_t offset, int data_len, const uint8_t *buf, + int oob_required, int page, int cached, int raw) { int res; @@ -672,11 +672,6 @@ static int lpc32xx_nand_probe(struct platform_device *pdev) } rc = platform_get_resource(pdev, IORESOURCE_MEM, 0); - if (rc == NULL) { - dev_err(&pdev->dev, "No memory resource found for device!\r\n"); - return -ENXIO; - } - host->io_base = devm_ioremap_resource(&pdev->dev, rc); if (IS_ERR(host->io_base)) return PTR_ERR(host->io_base); diff --git a/trunk/drivers/mtd/nand/nand_base.c b/trunk/drivers/mtd/nand/nand_base.c index 42c63927609d..dfcd0a565c5b 100644 --- a/trunk/drivers/mtd/nand/nand_base.c +++ b/trunk/drivers/mtd/nand/nand_base.c @@ -4,7 +4,6 @@ * Overview: * This is the generic MTD driver for NAND flash devices. It should be * capable of working with almost all NAND chips currently available. - * Basic support for AG-AND chips is provided. * * Additional technical information is available on * http://www.linux-mtd.infradead.org/doc/nand.html @@ -22,8 +21,6 @@ * Enable cached programming for 2k page size chips * Check, if mtd->ecctype should be set to MTD_ECC_HW * if we have HW ECC support. - * The AG-AND chips have nice features for speed improvement, - * which are not supported yet. Read / program 4 pages in one go. * BBT table is not serialized, has to be fixed * * This program is free software; you can redistribute it and/or modify @@ -515,7 +512,7 @@ EXPORT_SYMBOL_GPL(nand_wait_ready); * @page_addr: the page address for this command, -1 if none * * Send command to NAND device. This function is used for small page devices - * (256/512 Bytes per page). + * (512 Bytes per page). */ static void nand_command(struct mtd_info *mtd, unsigned int command, int column, int page_addr) @@ -631,8 +628,7 @@ static void nand_command_lp(struct mtd_info *mtd, unsigned int command, } /* Command latch cycle */ - chip->cmd_ctrl(mtd, command & 0xff, - NAND_NCE | NAND_CLE | NAND_CTRL_CHANGE); + chip->cmd_ctrl(mtd, command, NAND_NCE | NAND_CLE | NAND_CTRL_CHANGE); if (column != -1 || page_addr != -1) { int ctrl = NAND_CTRL_CHANGE | NAND_NCE | NAND_ALE; @@ -671,16 +667,6 @@ static void nand_command_lp(struct mtd_info *mtd, unsigned int command, case NAND_CMD_SEQIN: case NAND_CMD_RNDIN: case NAND_CMD_STATUS: - case NAND_CMD_DEPLETE1: - return; - - case NAND_CMD_STATUS_ERROR: - case NAND_CMD_STATUS_ERROR0: - case NAND_CMD_STATUS_ERROR1: - case NAND_CMD_STATUS_ERROR2: - case NAND_CMD_STATUS_ERROR3: - /* Read error status commands require only a short delay */ - udelay(chip->chip_delay); return; case NAND_CMD_RESET: @@ -836,10 +822,7 @@ static int nand_wait(struct mtd_info *mtd, struct nand_chip *chip) */ ndelay(100); - if ((state == FL_ERASING) && (chip->options & NAND_IS_AND)) - chip->cmdfunc(mtd, NAND_CMD_STATUS_MULTI, -1, -1); - else - chip->cmdfunc(mtd, NAND_CMD_STATUS, -1, -1); + chip->cmdfunc(mtd, NAND_CMD_STATUS, -1, -1); if (in_interrupt() || oops_in_progress) panic_nand_wait(mtd, chip, timeo); @@ -1127,7 +1110,7 @@ static int nand_read_page_swecc(struct mtd_info *mtd, struct nand_chip *chip, } /** - * nand_read_subpage - [REPLACEABLE] software ECC based sub-page read function + * nand_read_subpage - [REPLACEABLE] ECC based sub-page read function * @mtd: mtd info structure * @chip: nand chip info structure * @data_offs: offset of requested data within the page @@ -1995,6 +1978,67 @@ static int nand_write_page_hwecc(struct mtd_info *mtd, struct nand_chip *chip, return 0; } + +/** + * nand_write_subpage_hwecc - [REPLACABLE] hardware ECC based subpage write + * @mtd: mtd info structure + * @chip: nand chip info structure + * @column: column address of subpage within the page + * @data_len: data length + * @oob_required: must write chip->oob_poi to OOB + */ +static int nand_write_subpage_hwecc(struct mtd_info *mtd, + struct nand_chip *chip, uint32_t offset, + uint32_t data_len, const uint8_t *data_buf, + int oob_required) +{ + uint8_t *oob_buf = chip->oob_poi; + uint8_t *ecc_calc = chip->buffers->ecccalc; + int ecc_size = chip->ecc.size; + int ecc_bytes = chip->ecc.bytes; + int ecc_steps = chip->ecc.steps; + uint32_t *eccpos = chip->ecc.layout->eccpos; + uint32_t start_step = offset / ecc_size; + uint32_t end_step = (offset + data_len - 1) / ecc_size; + int oob_bytes = mtd->oobsize / ecc_steps; + int step, i; + + for (step = 0; step < ecc_steps; step++) { + /* configure controller for WRITE access */ + chip->ecc.hwctl(mtd, NAND_ECC_WRITE); + + /* write data (untouched subpages already masked by 0xFF) */ + chip->write_buf(mtd, data_buf, ecc_size); + + /* mask ECC of un-touched subpages by padding 0xFF */ + if ((step < start_step) || (step > end_step)) + memset(ecc_calc, 0xff, ecc_bytes); + else + chip->ecc.calculate(mtd, data_buf, ecc_calc); + + /* mask OOB of un-touched subpages by padding 0xFF */ + /* if oob_required, preserve OOB metadata of written subpage */ + if (!oob_required || (step < start_step) || (step > end_step)) + memset(oob_buf, 0xff, oob_bytes); + + data_buf += ecc_size; + ecc_calc += ecc_bytes; + oob_buf += oob_bytes; + } + + /* copy calculated ECC for whole page to chip->buffer->oob */ + /* this include masked-value(0xFF) for unwritten subpages */ + ecc_calc = chip->buffers->ecccalc; + for (i = 0; i < chip->ecc.total; i++) + chip->oob_poi[eccpos[i]] = ecc_calc[i]; + + /* write OOB buffer to NAND device */ + chip->write_buf(mtd, chip->oob_poi, mtd->oobsize); + + return 0; +} + + /** * nand_write_page_syndrome - [REPLACEABLE] hardware ECC syndrome based page write * @mtd: mtd info structure @@ -2047,6 +2091,8 @@ static int nand_write_page_syndrome(struct mtd_info *mtd, * nand_write_page - [REPLACEABLE] write one page * @mtd: MTD device structure * @chip: NAND chip descriptor + * @offset: address offset within the page + * @data_len: length of actual data to be written * @buf: the data to write * @oob_required: must write chip->oob_poi to OOB * @page: page number to write @@ -2054,15 +2100,25 @@ static int nand_write_page_syndrome(struct mtd_info *mtd, * @raw: use _raw version of write_page */ static int nand_write_page(struct mtd_info *mtd, struct nand_chip *chip, - const uint8_t *buf, int oob_required, int page, - int cached, int raw) + uint32_t offset, int data_len, const uint8_t *buf, + int oob_required, int page, int cached, int raw) { - int status; + int status, subpage; + + if (!(chip->options & NAND_NO_SUBPAGE_WRITE) && + chip->ecc.write_subpage) + subpage = offset || (data_len < mtd->writesize); + else + subpage = 0; chip->cmdfunc(mtd, NAND_CMD_SEQIN, 0x00, page); if (unlikely(raw)) - status = chip->ecc.write_page_raw(mtd, chip, buf, oob_required); + status = chip->ecc.write_page_raw(mtd, chip, buf, + oob_required); + else if (subpage) + status = chip->ecc.write_subpage(mtd, chip, offset, data_len, + buf, oob_required); else status = chip->ecc.write_page(mtd, chip, buf, oob_required); @@ -2075,7 +2131,7 @@ static int nand_write_page(struct mtd_info *mtd, struct nand_chip *chip, */ cached = 0; - if (!cached || !(chip->options & NAND_CACHEPRG)) { + if (!cached || !NAND_HAS_CACHEPROG(chip)) { chip->cmdfunc(mtd, NAND_CMD_PAGEPROG, -1, -1); status = chip->waitfunc(mtd, chip); @@ -2176,7 +2232,7 @@ static int nand_do_write_ops(struct mtd_info *mtd, loff_t to, uint8_t *oob = ops->oobbuf; uint8_t *buf = ops->datbuf; - int ret, subpage; + int ret; int oob_required = oob ? 1 : 0; ops->retlen = 0; @@ -2191,10 +2247,6 @@ static int nand_do_write_ops(struct mtd_info *mtd, loff_t to, } column = to & (mtd->writesize - 1); - subpage = column || (writelen & (mtd->writesize - 1)); - - if (subpage && oob) - return -EINVAL; chipnr = (int)(to >> chip->chip_shift); chip->select_chip(mtd, chipnr); @@ -2243,9 +2295,9 @@ static int nand_do_write_ops(struct mtd_info *mtd, loff_t to, /* We still need to erase leftover OOB data */ memset(chip->oob_poi, 0xff, mtd->oobsize); } - - ret = chip->write_page(mtd, chip, wbuf, oob_required, page, - cached, (ops->mode == MTD_OPS_RAW)); + ret = chip->write_page(mtd, chip, column, bytes, wbuf, + oob_required, page, cached, + (ops->mode == MTD_OPS_RAW)); if (ret) break; @@ -2480,24 +2532,6 @@ static void single_erase_cmd(struct mtd_info *mtd, int page) chip->cmdfunc(mtd, NAND_CMD_ERASE2, -1, -1); } -/** - * multi_erase_cmd - [GENERIC] AND specific block erase command function - * @mtd: MTD device structure - * @page: the page address of the block which will be erased - * - * AND multi block erase command function. Erase 4 consecutive blocks. - */ -static void multi_erase_cmd(struct mtd_info *mtd, int page) -{ - struct nand_chip *chip = mtd->priv; - /* Send commands to erase a block */ - chip->cmdfunc(mtd, NAND_CMD_ERASE1, -1, page++); - chip->cmdfunc(mtd, NAND_CMD_ERASE1, -1, page++); - chip->cmdfunc(mtd, NAND_CMD_ERASE1, -1, page++); - chip->cmdfunc(mtd, NAND_CMD_ERASE1, -1, page); - chip->cmdfunc(mtd, NAND_CMD_ERASE2, -1, -1); -} - /** * nand_erase - [MTD Interface] erase block(s) * @mtd: MTD device structure @@ -2510,7 +2544,6 @@ static int nand_erase(struct mtd_info *mtd, struct erase_info *instr) return nand_erase_nand(mtd, instr, 0); } -#define BBT_PAGE_MASK 0xffffff3f /** * nand_erase_nand - [INTERN] erase block(s) * @mtd: MTD device structure @@ -2524,8 +2557,6 @@ int nand_erase_nand(struct mtd_info *mtd, struct erase_info *instr, { int page, status, pages_per_block, ret, chipnr; struct nand_chip *chip = mtd->priv; - loff_t rewrite_bbt[NAND_MAX_CHIPS] = {0}; - unsigned int bbt_masked_page = 0xffffffff; loff_t len; pr_debug("%s: start = 0x%012llx, len = %llu\n", @@ -2556,15 +2587,6 @@ int nand_erase_nand(struct mtd_info *mtd, struct erase_info *instr, goto erase_exit; } - /* - * If BBT requires refresh, set the BBT page mask to see if the BBT - * should be rewritten. Otherwise the mask is set to 0xffffffff which - * can not be matched. This is also done when the bbt is actually - * erased to avoid recursive updates. - */ - if (chip->options & BBT_AUTO_REFRESH && !allowbbt) - bbt_masked_page = chip->bbt_td->pages[chipnr] & BBT_PAGE_MASK; - /* Loop through the pages */ len = instr->len; @@ -2610,15 +2632,6 @@ int nand_erase_nand(struct mtd_info *mtd, struct erase_info *instr, goto erase_exit; } - /* - * If BBT requires refresh, set the BBT rewrite flag to the - * page being erased. - */ - if (bbt_masked_page != 0xffffffff && - (page & BBT_PAGE_MASK) == bbt_masked_page) - rewrite_bbt[chipnr] = - ((loff_t)page << chip->page_shift); - /* Increment page address and decrement length */ len -= (1 << chip->phys_erase_shift); page += pages_per_block; @@ -2628,15 +2641,6 @@ int nand_erase_nand(struct mtd_info *mtd, struct erase_info *instr, chipnr++; chip->select_chip(mtd, -1); chip->select_chip(mtd, chipnr); - - /* - * If BBT requires refresh and BBT-PERCHIP, set the BBT - * page mask to see if this BBT should be rewritten. - */ - if (bbt_masked_page != 0xffffffff && - (chip->bbt_td->options & NAND_BBT_PERCHIP)) - bbt_masked_page = chip->bbt_td->pages[chipnr] & - BBT_PAGE_MASK; } } instr->state = MTD_ERASE_DONE; @@ -2653,23 +2657,6 @@ int nand_erase_nand(struct mtd_info *mtd, struct erase_info *instr, if (!ret) mtd_erase_callback(instr); - /* - * If BBT requires refresh and erase was successful, rewrite any - * selected bad block tables. - */ - if (bbt_masked_page == 0xffffffff || ret) - return ret; - - for (chipnr = 0; chipnr < chip->numchips; chipnr++) { - if (!rewrite_bbt[chipnr]) - continue; - /* Update the BBT for chip */ - pr_debug("%s: nand_update_bbt (%d:0x%0llx 0x%0x)\n", - __func__, chipnr, rewrite_bbt[chipnr], - chip->bbt_td->pages[chipnr]); - nand_update_bbt(mtd, rewrite_bbt[chipnr]); - } - /* Return more or less happy */ return ret; } @@ -2905,8 +2892,6 @@ static int nand_flash_detect_onfi(struct mtd_info *mtd, struct nand_chip *chip, chip->onfi_version = 20; else if (val & (1 << 1)) chip->onfi_version = 10; - else - chip->onfi_version = 0; if (!chip->onfi_version) { pr_info("%s: unsupported ONFI version: %d\n", __func__, val); @@ -3171,6 +3156,30 @@ static void nand_decode_bbm_options(struct mtd_info *mtd, chip->bbt_options |= NAND_BBT_SCAN2NDPAGE; } +static inline bool is_full_id_nand(struct nand_flash_dev *type) +{ + return type->id_len; +} + +static bool find_full_id_nand(struct mtd_info *mtd, struct nand_chip *chip, + struct nand_flash_dev *type, u8 *id_data, int *busw) +{ + if (!strncmp(type->id, id_data, type->id_len)) { + mtd->writesize = type->pagesize; + mtd->erasesize = type->erasesize; + mtd->oobsize = type->oobsize; + + chip->cellinfo = id_data[2]; + chip->chipsize = (uint64_t)type->chipsize << 20; + chip->options |= type->options; + + *busw = type->options & NAND_BUSWIDTH_16; + + return true; + } + return false; +} + /* * Get the flash and manufacturer id and lookup if the type is supported. */ @@ -3222,9 +3231,14 @@ static struct nand_flash_dev *nand_get_flash_type(struct mtd_info *mtd, if (!type) type = nand_flash_ids; - for (; type->name != NULL; type++) - if (*dev_id == type->id) - break; + for (; type->name != NULL; type++) { + if (is_full_id_nand(type)) { + if (find_full_id_nand(mtd, chip, type, id_data, &busw)) + goto ident_done; + } else if (*dev_id == type->dev_id) { + break; + } + } chip->onfi_version = 0; if (!type->name || !type->pagesize) { @@ -3302,12 +3316,7 @@ static struct nand_flash_dev *nand_get_flash_type(struct mtd_info *mtd, } chip->badblockbits = 8; - - /* Check for AND chips with 4 page planes */ - if (chip->options & NAND_4PAGE_ARRAY) - chip->erase_cmd = multi_erase_cmd; - else - chip->erase_cmd = single_erase_cmd; + chip->erase_cmd = single_erase_cmd; /* Do not replace user supplied command function! */ if (mtd->writesize > 512 && chip->cmdfunc == nand_command) @@ -3474,6 +3483,10 @@ int nand_scan_tail(struct mtd_info *mtd) chip->ecc.read_oob = nand_read_oob_std; if (!chip->ecc.write_oob) chip->ecc.write_oob = nand_write_oob_std; + if (!chip->ecc.read_subpage) + chip->ecc.read_subpage = nand_read_subpage; + if (!chip->ecc.write_subpage) + chip->ecc.write_subpage = nand_write_subpage_hwecc; case NAND_ECC_HW_SYNDROME: if ((!chip->ecc.calculate || !chip->ecc.correct || diff --git a/trunk/drivers/mtd/nand/nand_bbt.c b/trunk/drivers/mtd/nand/nand_bbt.c index 916d6e9c0ab1..267264320e06 100644 --- a/trunk/drivers/mtd/nand/nand_bbt.c +++ b/trunk/drivers/mtd/nand/nand_bbt.c @@ -1240,15 +1240,6 @@ int nand_update_bbt(struct mtd_info *mtd, loff_t offs) */ static uint8_t scan_ff_pattern[] = { 0xff, 0xff }; -static uint8_t scan_agand_pattern[] = { 0x1C, 0x71, 0xC7, 0x1C, 0x71, 0xC7 }; - -static struct nand_bbt_descr agand_flashbased = { - .options = NAND_BBT_SCANEMPTY | NAND_BBT_SCANALLPAGES, - .offs = 0x20, - .len = 6, - .pattern = scan_agand_pattern -}; - /* Generic flash bbt descriptors */ static uint8_t bbt_pattern[] = {'B', 'b', 't', '0' }; static uint8_t mirror_pattern[] = {'1', 't', 'b', 'B' }; @@ -1333,22 +1324,6 @@ int nand_default_bbt(struct mtd_info *mtd) { struct nand_chip *this = mtd->priv; - /* - * Default for AG-AND. We must use a flash based bad block table as the - * devices have factory marked _good_ blocks. Erasing those blocks - * leads to loss of the good / bad information, so we _must_ store this - * information in a good / bad table during startup. - */ - if (this->options & NAND_IS_AND) { - /* Use the default pattern descriptors */ - if (!this->bbt_td) { - this->bbt_td = &bbt_main_descr; - this->bbt_md = &bbt_mirror_descr; - } - this->bbt_options |= NAND_BBT_USE_FLASH; - return nand_scan_bbt(mtd, &agand_flashbased); - } - /* Is a flash based bad block table requested? */ if (this->bbt_options & NAND_BBT_USE_FLASH) { /* Use the default pattern descriptors */ diff --git a/trunk/drivers/mtd/nand/nand_ids.c b/trunk/drivers/mtd/nand/nand_ids.c index 9c612388e5de..683813a46a90 100644 --- a/trunk/drivers/mtd/nand/nand_ids.c +++ b/trunk/drivers/mtd/nand/nand_ids.c @@ -10,163 +10,153 @@ */ #include #include -/* -* Chip ID list -* -* Name. ID code, pagesize, chipsize in MegaByte, eraseblock size, -* options -* -* Pagesize; 0, 256, 512 -* 0 get this information from the extended chip ID -+ 256 256 Byte page size -* 512 512 Byte page size -*/ -struct nand_flash_dev nand_flash_ids[] = { +#include + +#define LP_OPTIONS NAND_SAMSUNG_LP_OPTIONS +#define LP_OPTIONS16 (LP_OPTIONS | NAND_BUSWIDTH_16) + #define SP_OPTIONS NAND_NEED_READRDY #define SP_OPTIONS16 (SP_OPTIONS | NAND_BUSWIDTH_16) -#ifdef CONFIG_MTD_NAND_MUSEUM_IDS - {"NAND 1MiB 5V 8-bit", 0x6e, 256, 1, 0x1000, SP_OPTIONS}, - {"NAND 2MiB 5V 8-bit", 0x64, 256, 2, 0x1000, SP_OPTIONS}, - {"NAND 4MiB 5V 8-bit", 0x6b, 512, 4, 0x2000, SP_OPTIONS}, - {"NAND 1MiB 3,3V 8-bit", 0xe8, 256, 1, 0x1000, SP_OPTIONS}, - {"NAND 1MiB 3,3V 8-bit", 0xec, 256, 1, 0x1000, SP_OPTIONS}, - {"NAND 2MiB 3,3V 8-bit", 0xea, 256, 2, 0x1000, SP_OPTIONS}, - {"NAND 4MiB 3,3V 8-bit", 0xd5, 512, 4, 0x2000, SP_OPTIONS}, - {"NAND 4MiB 3,3V 8-bit", 0xe3, 512, 4, 0x2000, SP_OPTIONS}, - {"NAND 4MiB 3,3V 8-bit", 0xe5, 512, 4, 0x2000, SP_OPTIONS}, - {"NAND 8MiB 3,3V 8-bit", 0xd6, 512, 8, 0x2000, SP_OPTIONS}, - - {"NAND 8MiB 1,8V 8-bit", 0x39, 512, 8, 0x2000, SP_OPTIONS}, - {"NAND 8MiB 3,3V 8-bit", 0xe6, 512, 8, 0x2000, SP_OPTIONS}, - {"NAND 8MiB 1,8V 16-bit", 0x49, 512, 8, 0x2000, SP_OPTIONS16}, - {"NAND 8MiB 3,3V 16-bit", 0x59, 512, 8, 0x2000, SP_OPTIONS16}, -#endif - - {"NAND 16MiB 1,8V 8-bit", 0x33, 512, 16, 0x4000, SP_OPTIONS}, - {"NAND 16MiB 3,3V 8-bit", 0x73, 512, 16, 0x4000, SP_OPTIONS}, - {"NAND 16MiB 1,8V 16-bit", 0x43, 512, 16, 0x4000, SP_OPTIONS16}, - {"NAND 16MiB 3,3V 16-bit", 0x53, 512, 16, 0x4000, SP_OPTIONS16}, - - {"NAND 32MiB 1,8V 8-bit", 0x35, 512, 32, 0x4000, SP_OPTIONS}, - {"NAND 32MiB 3,3V 8-bit", 0x75, 512, 32, 0x4000, SP_OPTIONS}, - {"NAND 32MiB 1,8V 16-bit", 0x45, 512, 32, 0x4000, SP_OPTIONS16}, - {"NAND 32MiB 3,3V 16-bit", 0x55, 512, 32, 0x4000, SP_OPTIONS16}, - - {"NAND 64MiB 1,8V 8-bit", 0x36, 512, 64, 0x4000, SP_OPTIONS}, - {"NAND 64MiB 3,3V 8-bit", 0x76, 512, 64, 0x4000, SP_OPTIONS}, - {"NAND 64MiB 1,8V 16-bit", 0x46, 512, 64, 0x4000, SP_OPTIONS16}, - {"NAND 64MiB 3,3V 16-bit", 0x56, 512, 64, 0x4000, SP_OPTIONS16}, - - {"NAND 128MiB 1,8V 8-bit", 0x78, 512, 128, 0x4000, SP_OPTIONS}, - {"NAND 128MiB 1,8V 8-bit", 0x39, 512, 128, 0x4000, SP_OPTIONS}, - {"NAND 128MiB 3,3V 8-bit", 0x79, 512, 128, 0x4000, SP_OPTIONS}, - {"NAND 128MiB 1,8V 16-bit", 0x72, 512, 128, 0x4000, SP_OPTIONS16}, - {"NAND 128MiB 1,8V 16-bit", 0x49, 512, 128, 0x4000, SP_OPTIONS16}, - {"NAND 128MiB 3,3V 16-bit", 0x74, 512, 128, 0x4000, SP_OPTIONS16}, - {"NAND 128MiB 3,3V 16-bit", 0x59, 512, 128, 0x4000, SP_OPTIONS16}, - - {"NAND 256MiB 3,3V 8-bit", 0x71, 512, 256, 0x4000, SP_OPTIONS}, +/* + * The chip ID list: + * name, device ID, page size, chip size in MiB, eraseblock size, options + * + * If page size and eraseblock size are 0, the sizes are taken from the + * extended chip ID. + */ +struct nand_flash_dev nand_flash_ids[] = { + /* + * Some incompatible NAND chips share device ID's and so must be + * listed by full ID. We list them first so that we can easily identify + * the most specific match. + */ + {"TC58NVG2S0F 4G 3.3V 8-bit", + { .id = {0x98, 0xdc, 0x90, 0x26, 0x76, 0x15, 0x01, 0x08} }, + SZ_4K, SZ_512, SZ_256K, 0, 8, 224}, + {"TC58NVG3S0F 8G 3.3V 8-bit", + { .id = {0x98, 0xd3, 0x90, 0x26, 0x76, 0x15, 0x02, 0x08} }, + SZ_4K, SZ_1K, SZ_256K, 0, 8, 232}, + {"TC58NVG5D2 32G 3.3V 8-bit", + { .id = {0x98, 0xd7, 0x94, 0x32, 0x76, 0x56, 0x09, 0x00} }, + SZ_8K, SZ_4K, SZ_1M, 0, 8, 640}, + {"TC58NVG6D2 64G 3.3V 8-bit", + { .id = {0x98, 0xde, 0x94, 0x82, 0x76, 0x56, 0x04, 0x20} }, + SZ_8K, SZ_8K, SZ_2M, 0, 8, 640}, + + LEGACY_ID_NAND("NAND 4MiB 5V 8-bit", 0x6B, 4, SZ_8K, SP_OPTIONS), + LEGACY_ID_NAND("NAND 4MiB 3,3V 8-bit", 0xE3, 4, SZ_8K, SP_OPTIONS), + LEGACY_ID_NAND("NAND 4MiB 3,3V 8-bit", 0xE5, 4, SZ_8K, SP_OPTIONS), + LEGACY_ID_NAND("NAND 8MiB 3,3V 8-bit", 0xD6, 8, SZ_8K, SP_OPTIONS), + LEGACY_ID_NAND("NAND 8MiB 3,3V 8-bit", 0xE6, 8, SZ_8K, SP_OPTIONS), + + LEGACY_ID_NAND("NAND 16MiB 1,8V 8-bit", 0x33, 16, SZ_16K, SP_OPTIONS), + LEGACY_ID_NAND("NAND 16MiB 3,3V 8-bit", 0x73, 16, SZ_16K, SP_OPTIONS), + LEGACY_ID_NAND("NAND 16MiB 1,8V 16-bit", 0x43, 16, SZ_16K, SP_OPTIONS16), + LEGACY_ID_NAND("NAND 16MiB 3,3V 16-bit", 0x53, 16, SZ_16K, SP_OPTIONS16), + + LEGACY_ID_NAND("NAND 32MiB 1,8V 8-bit", 0x35, 32, SZ_16K, SP_OPTIONS), + LEGACY_ID_NAND("NAND 32MiB 3,3V 8-bit", 0x75, 32, SZ_16K, SP_OPTIONS), + LEGACY_ID_NAND("NAND 32MiB 1,8V 16-bit", 0x45, 32, SZ_16K, SP_OPTIONS16), + LEGACY_ID_NAND("NAND 32MiB 3,3V 16-bit", 0x55, 32, SZ_16K, SP_OPTIONS16), + + LEGACY_ID_NAND("NAND 64MiB 1,8V 8-bit", 0x36, 64, SZ_16K, SP_OPTIONS), + LEGACY_ID_NAND("NAND 64MiB 3,3V 8-bit", 0x76, 64, SZ_16K, SP_OPTIONS), + LEGACY_ID_NAND("NAND 64MiB 1,8V 16-bit", 0x46, 64, SZ_16K, SP_OPTIONS16), + LEGACY_ID_NAND("NAND 64MiB 3,3V 16-bit", 0x56, 64, SZ_16K, SP_OPTIONS16), + + LEGACY_ID_NAND("NAND 128MiB 1,8V 8-bit", 0x78, 128, SZ_16K, SP_OPTIONS), + LEGACY_ID_NAND("NAND 128MiB 1,8V 8-bit", 0x39, 128, SZ_16K, SP_OPTIONS), + LEGACY_ID_NAND("NAND 128MiB 3,3V 8-bit", 0x79, 128, SZ_16K, SP_OPTIONS), + LEGACY_ID_NAND("NAND 128MiB 1,8V 16-bit", 0x72, 128, SZ_16K, SP_OPTIONS16), + LEGACY_ID_NAND("NAND 128MiB 1,8V 16-bit", 0x49, 128, SZ_16K, SP_OPTIONS16), + LEGACY_ID_NAND("NAND 128MiB 3,3V 16-bit", 0x74, 128, SZ_16K, SP_OPTIONS16), + LEGACY_ID_NAND("NAND 128MiB 3,3V 16-bit", 0x59, 128, SZ_16K, SP_OPTIONS16), + + LEGACY_ID_NAND("NAND 256MiB 3,3V 8-bit", 0x71, 256, SZ_16K, SP_OPTIONS), /* - * These are the new chips with large page size. The pagesize and the - * erasesize is determined from the extended id bytes + * These are the new chips with large page size. Their page size and + * eraseblock size are determined from the extended ID bytes. */ -#define LP_OPTIONS NAND_SAMSUNG_LP_OPTIONS -#define LP_OPTIONS16 (LP_OPTIONS | NAND_BUSWIDTH_16) /* 512 Megabit */ - {"NAND 64MiB 1,8V 8-bit", 0xA2, 0, 64, 0, LP_OPTIONS}, - {"NAND 64MiB 1,8V 8-bit", 0xA0, 0, 64, 0, LP_OPTIONS}, - {"NAND 64MiB 3,3V 8-bit", 0xF2, 0, 64, 0, LP_OPTIONS}, - {"NAND 64MiB 3,3V 8-bit", 0xD0, 0, 64, 0, LP_OPTIONS}, - {"NAND 64MiB 3,3V 8-bit", 0xF0, 0, 64, 0, LP_OPTIONS}, - {"NAND 64MiB 1,8V 16-bit", 0xB2, 0, 64, 0, LP_OPTIONS16}, - {"NAND 64MiB 1,8V 16-bit", 0xB0, 0, 64, 0, LP_OPTIONS16}, - {"NAND 64MiB 3,3V 16-bit", 0xC2, 0, 64, 0, LP_OPTIONS16}, - {"NAND 64MiB 3,3V 16-bit", 0xC0, 0, 64, 0, LP_OPTIONS16}, + EXTENDED_ID_NAND("NAND 64MiB 1,8V 8-bit", 0xA2, 64, LP_OPTIONS), + EXTENDED_ID_NAND("NAND 64MiB 1,8V 8-bit", 0xA0, 64, LP_OPTIONS), + EXTENDED_ID_NAND("NAND 64MiB 3,3V 8-bit", 0xF2, 64, LP_OPTIONS), + EXTENDED_ID_NAND("NAND 64MiB 3,3V 8-bit", 0xD0, 64, LP_OPTIONS), + EXTENDED_ID_NAND("NAND 64MiB 3,3V 8-bit", 0xF0, 64, LP_OPTIONS), + EXTENDED_ID_NAND("NAND 64MiB 1,8V 16-bit", 0xB2, 64, LP_OPTIONS16), + EXTENDED_ID_NAND("NAND 64MiB 1,8V 16-bit", 0xB0, 64, LP_OPTIONS16), + EXTENDED_ID_NAND("NAND 64MiB 3,3V 16-bit", 0xC2, 64, LP_OPTIONS16), + EXTENDED_ID_NAND("NAND 64MiB 3,3V 16-bit", 0xC0, 64, LP_OPTIONS16), /* 1 Gigabit */ - {"NAND 128MiB 1,8V 8-bit", 0xA1, 0, 128, 0, LP_OPTIONS}, - {"NAND 128MiB 3,3V 8-bit", 0xF1, 0, 128, 0, LP_OPTIONS}, - {"NAND 128MiB 3,3V 8-bit", 0xD1, 0, 128, 0, LP_OPTIONS}, - {"NAND 128MiB 1,8V 16-bit", 0xB1, 0, 128, 0, LP_OPTIONS16}, - {"NAND 128MiB 3,3V 16-bit", 0xC1, 0, 128, 0, LP_OPTIONS16}, - {"NAND 128MiB 1,8V 16-bit", 0xAD, 0, 128, 0, LP_OPTIONS16}, + EXTENDED_ID_NAND("NAND 128MiB 1,8V 8-bit", 0xA1, 128, LP_OPTIONS), + EXTENDED_ID_NAND("NAND 128MiB 3,3V 8-bit", 0xF1, 128, LP_OPTIONS), + EXTENDED_ID_NAND("NAND 128MiB 3,3V 8-bit", 0xD1, 128, LP_OPTIONS), + EXTENDED_ID_NAND("NAND 128MiB 1,8V 16-bit", 0xB1, 128, LP_OPTIONS16), + EXTENDED_ID_NAND("NAND 128MiB 3,3V 16-bit", 0xC1, 128, LP_OPTIONS16), + EXTENDED_ID_NAND("NAND 128MiB 1,8V 16-bit", 0xAD, 128, LP_OPTIONS16), /* 2 Gigabit */ - {"NAND 256MiB 1,8V 8-bit", 0xAA, 0, 256, 0, LP_OPTIONS}, - {"NAND 256MiB 3,3V 8-bit", 0xDA, 0, 256, 0, LP_OPTIONS}, - {"NAND 256MiB 1,8V 16-bit", 0xBA, 0, 256, 0, LP_OPTIONS16}, - {"NAND 256MiB 3,3V 16-bit", 0xCA, 0, 256, 0, LP_OPTIONS16}, + EXTENDED_ID_NAND("NAND 256MiB 1,8V 8-bit", 0xAA, 256, LP_OPTIONS), + EXTENDED_ID_NAND("NAND 256MiB 3,3V 8-bit", 0xDA, 256, LP_OPTIONS), + EXTENDED_ID_NAND("NAND 256MiB 1,8V 16-bit", 0xBA, 256, LP_OPTIONS16), + EXTENDED_ID_NAND("NAND 256MiB 3,3V 16-bit", 0xCA, 256, LP_OPTIONS16), /* 4 Gigabit */ - {"NAND 512MiB 1,8V 8-bit", 0xAC, 0, 512, 0, LP_OPTIONS}, - {"NAND 512MiB 3,3V 8-bit", 0xDC, 0, 512, 0, LP_OPTIONS}, - {"NAND 512MiB 1,8V 16-bit", 0xBC, 0, 512, 0, LP_OPTIONS16}, - {"NAND 512MiB 3,3V 16-bit", 0xCC, 0, 512, 0, LP_OPTIONS16}, + EXTENDED_ID_NAND("NAND 512MiB 1,8V 8-bit", 0xAC, 512, LP_OPTIONS), + EXTENDED_ID_NAND("NAND 512MiB 3,3V 8-bit", 0xDC, 512, LP_OPTIONS), + EXTENDED_ID_NAND("NAND 512MiB 1,8V 16-bit", 0xBC, 512, LP_OPTIONS16), + EXTENDED_ID_NAND("NAND 512MiB 3,3V 16-bit", 0xCC, 512, LP_OPTIONS16), /* 8 Gigabit */ - {"NAND 1GiB 1,8V 8-bit", 0xA3, 0, 1024, 0, LP_OPTIONS}, - {"NAND 1GiB 3,3V 8-bit", 0xD3, 0, 1024, 0, LP_OPTIONS}, - {"NAND 1GiB 1,8V 16-bit", 0xB3, 0, 1024, 0, LP_OPTIONS16}, - {"NAND 1GiB 3,3V 16-bit", 0xC3, 0, 1024, 0, LP_OPTIONS16}, + EXTENDED_ID_NAND("NAND 1GiB 1,8V 8-bit", 0xA3, 1024, LP_OPTIONS), + EXTENDED_ID_NAND("NAND 1GiB 3,3V 8-bit", 0xD3, 1024, LP_OPTIONS), + EXTENDED_ID_NAND("NAND 1GiB 1,8V 16-bit", 0xB3, 1024, LP_OPTIONS16), + EXTENDED_ID_NAND("NAND 1GiB 3,3V 16-bit", 0xC3, 1024, LP_OPTIONS16), /* 16 Gigabit */ - {"NAND 2GiB 1,8V 8-bit", 0xA5, 0, 2048, 0, LP_OPTIONS}, - {"NAND 2GiB 3,3V 8-bit", 0xD5, 0, 2048, 0, LP_OPTIONS}, - {"NAND 2GiB 1,8V 16-bit", 0xB5, 0, 2048, 0, LP_OPTIONS16}, - {"NAND 2GiB 3,3V 16-bit", 0xC5, 0, 2048, 0, LP_OPTIONS16}, + EXTENDED_ID_NAND("NAND 2GiB 1,8V 8-bit", 0xA5, 2048, LP_OPTIONS), + EXTENDED_ID_NAND("NAND 2GiB 3,3V 8-bit", 0xD5, 2048, LP_OPTIONS), + EXTENDED_ID_NAND("NAND 2GiB 1,8V 16-bit", 0xB5, 2048, LP_OPTIONS16), + EXTENDED_ID_NAND("NAND 2GiB 3,3V 16-bit", 0xC5, 2048, LP_OPTIONS16), /* 32 Gigabit */ - {"NAND 4GiB 1,8V 8-bit", 0xA7, 0, 4096, 0, LP_OPTIONS}, - {"NAND 4GiB 3,3V 8-bit", 0xD7, 0, 4096, 0, LP_OPTIONS}, - {"NAND 4GiB 1,8V 16-bit", 0xB7, 0, 4096, 0, LP_OPTIONS16}, - {"NAND 4GiB 3,3V 16-bit", 0xC7, 0, 4096, 0, LP_OPTIONS16}, + EXTENDED_ID_NAND("NAND 4GiB 1,8V 8-bit", 0xA7, 4096, LP_OPTIONS), + EXTENDED_ID_NAND("NAND 4GiB 3,3V 8-bit", 0xD7, 4096, LP_OPTIONS), + EXTENDED_ID_NAND("NAND 4GiB 1,8V 16-bit", 0xB7, 4096, LP_OPTIONS16), + EXTENDED_ID_NAND("NAND 4GiB 3,3V 16-bit", 0xC7, 4096, LP_OPTIONS16), /* 64 Gigabit */ - {"NAND 8GiB 1,8V 8-bit", 0xAE, 0, 8192, 0, LP_OPTIONS}, - {"NAND 8GiB 3,3V 8-bit", 0xDE, 0, 8192, 0, LP_OPTIONS}, - {"NAND 8GiB 1,8V 16-bit", 0xBE, 0, 8192, 0, LP_OPTIONS16}, - {"NAND 8GiB 3,3V 16-bit", 0xCE, 0, 8192, 0, LP_OPTIONS16}, + EXTENDED_ID_NAND("NAND 8GiB 1,8V 8-bit", 0xAE, 8192, LP_OPTIONS), + EXTENDED_ID_NAND("NAND 8GiB 3,3V 8-bit", 0xDE, 8192, LP_OPTIONS), + EXTENDED_ID_NAND("NAND 8GiB 1,8V 16-bit", 0xBE, 8192, LP_OPTIONS16), + EXTENDED_ID_NAND("NAND 8GiB 3,3V 16-bit", 0xCE, 8192, LP_OPTIONS16), /* 128 Gigabit */ - {"NAND 16GiB 1,8V 8-bit", 0x1A, 0, 16384, 0, LP_OPTIONS}, - {"NAND 16GiB 3,3V 8-bit", 0x3A, 0, 16384, 0, LP_OPTIONS}, - {"NAND 16GiB 1,8V 16-bit", 0x2A, 0, 16384, 0, LP_OPTIONS16}, - {"NAND 16GiB 3,3V 16-bit", 0x4A, 0, 16384, 0, LP_OPTIONS16}, + EXTENDED_ID_NAND("NAND 16GiB 1,8V 8-bit", 0x1A, 16384, LP_OPTIONS), + EXTENDED_ID_NAND("NAND 16GiB 3,3V 8-bit", 0x3A, 16384, LP_OPTIONS), + EXTENDED_ID_NAND("NAND 16GiB 1,8V 16-bit", 0x2A, 16384, LP_OPTIONS16), + EXTENDED_ID_NAND("NAND 16GiB 3,3V 16-bit", 0x4A, 16384, LP_OPTIONS16), /* 256 Gigabit */ - {"NAND 32GiB 1,8V 8-bit", 0x1C, 0, 32768, 0, LP_OPTIONS}, - {"NAND 32GiB 3,3V 8-bit", 0x3C, 0, 32768, 0, LP_OPTIONS}, - {"NAND 32GiB 1,8V 16-bit", 0x2C, 0, 32768, 0, LP_OPTIONS16}, - {"NAND 32GiB 3,3V 16-bit", 0x4C, 0, 32768, 0, LP_OPTIONS16}, + EXTENDED_ID_NAND("NAND 32GiB 1,8V 8-bit", 0x1C, 32768, LP_OPTIONS), + EXTENDED_ID_NAND("NAND 32GiB 3,3V 8-bit", 0x3C, 32768, LP_OPTIONS), + EXTENDED_ID_NAND("NAND 32GiB 1,8V 16-bit", 0x2C, 32768, LP_OPTIONS16), + EXTENDED_ID_NAND("NAND 32GiB 3,3V 16-bit", 0x4C, 32768, LP_OPTIONS16), /* 512 Gigabit */ - {"NAND 64GiB 1,8V 8-bit", 0x1E, 0, 65536, 0, LP_OPTIONS}, - {"NAND 64GiB 3,3V 8-bit", 0x3E, 0, 65536, 0, LP_OPTIONS}, - {"NAND 64GiB 1,8V 16-bit", 0x2E, 0, 65536, 0, LP_OPTIONS16}, - {"NAND 64GiB 3,3V 16-bit", 0x4E, 0, 65536, 0, LP_OPTIONS16}, + EXTENDED_ID_NAND("NAND 64GiB 1,8V 8-bit", 0x1E, 65536, LP_OPTIONS), + EXTENDED_ID_NAND("NAND 64GiB 3,3V 8-bit", 0x3E, 65536, LP_OPTIONS), + EXTENDED_ID_NAND("NAND 64GiB 1,8V 16-bit", 0x2E, 65536, LP_OPTIONS16), + EXTENDED_ID_NAND("NAND 64GiB 3,3V 16-bit", 0x4E, 65536, LP_OPTIONS16), - /* - * Renesas AND 1 Gigabit. Those chips do not support extended id and - * have a strange page/block layout ! The chosen minimum erasesize is - * 4 * 2 * 2048 = 16384 Byte, as those chips have an array of 4 page - * planes 1 block = 2 pages, but due to plane arrangement the blocks - * 0-3 consists of page 0 + 4,1 + 5, 2 + 6, 3 + 7 Anyway JFFS2 would - * increase the eraseblock size so we chose a combined one which can be - * erased in one go There are more speed improvements for reads and - * writes possible, but not implemented now - */ - {"AND 128MiB 3,3V 8-bit", 0x01, 2048, 128, 0x4000, - NAND_IS_AND | NAND_4PAGE_ARRAY | BBT_AUTO_REFRESH}, - - {NULL,} + {NULL} }; -/* -* Manufacturer ID list -*/ +/* Manufacturer IDs */ struct nand_manufacturers nand_manuf_ids[] = { {NAND_MFR_TOSHIBA, "Toshiba"}, {NAND_MFR_SAMSUNG, "Samsung"}, diff --git a/trunk/drivers/mtd/nand/nandsim.c b/trunk/drivers/mtd/nand/nandsim.c index 891c52a30e6a..cb38f3d94218 100644 --- a/trunk/drivers/mtd/nand/nandsim.c +++ b/trunk/drivers/mtd/nand/nandsim.c @@ -218,7 +218,6 @@ MODULE_PARM_DESC(bch, "Enable BCH ecc and set how many bits should " #define STATE_CMD_READOOB 0x00000005 /* read OOB area */ #define STATE_CMD_ERASE1 0x00000006 /* sector erase first command */ #define STATE_CMD_STATUS 0x00000007 /* read status */ -#define STATE_CMD_STATUS_M 0x00000008 /* read multi-plane status (isn't implemented) */ #define STATE_CMD_SEQIN 0x00000009 /* sequential data input */ #define STATE_CMD_READID 0x0000000A /* read ID */ #define STATE_CMD_ERASE2 0x0000000B /* sector erase second command */ @@ -263,14 +262,13 @@ MODULE_PARM_DESC(bch, "Enable BCH ecc and set how many bits should " #define NS_OPER_STATES 6 /* Maximum number of states in operation */ #define OPT_ANY 0xFFFFFFFF /* any chip supports this operation */ -#define OPT_PAGE256 0x00000001 /* 256-byte page chips */ #define OPT_PAGE512 0x00000002 /* 512-byte page chips */ #define OPT_PAGE2048 0x00000008 /* 2048-byte page chips */ #define OPT_SMARTMEDIA 0x00000010 /* SmartMedia technology chips */ #define OPT_PAGE512_8BIT 0x00000040 /* 512-byte page chips with 8-bit bus width */ #define OPT_PAGE4096 0x00000080 /* 4096-byte page chips */ #define OPT_LARGEPAGE (OPT_PAGE2048 | OPT_PAGE4096) /* 2048 & 4096-byte page chips */ -#define OPT_SMALLPAGE (OPT_PAGE256 | OPT_PAGE512) /* 256 and 512-byte page chips */ +#define OPT_SMALLPAGE (OPT_PAGE512) /* 512-byte page chips */ /* Remove action bits from state */ #define NS_STATE(x) ((x) & ~ACTION_MASK) @@ -406,8 +404,6 @@ static struct nandsim_operations { {OPT_ANY, {STATE_CMD_ERASE1, STATE_ADDR_SEC, STATE_CMD_ERASE2 | ACTION_SECERASE, STATE_READY}}, /* Read status */ {OPT_ANY, {STATE_CMD_STATUS, STATE_DATAOUT_STATUS, STATE_READY}}, - /* Read multi-plane status */ - {OPT_SMARTMEDIA, {STATE_CMD_STATUS_M, STATE_DATAOUT_STATUS_M, STATE_READY}}, /* Read ID */ {OPT_ANY, {STATE_CMD_READID, STATE_ADDR_ZERO, STATE_DATAOUT_ID, STATE_READY}}, /* Large page devices read page */ @@ -699,10 +695,7 @@ static int init_nandsim(struct mtd_info *mtd) ns->geom.secszoob = ns->geom.secsz + ns->geom.oobsz * ns->geom.pgsec; ns->options = 0; - if (ns->geom.pgsz == 256) { - ns->options |= OPT_PAGE256; - } - else if (ns->geom.pgsz == 512) { + if (ns->geom.pgsz == 512) { ns->options |= OPT_PAGE512; if (ns->busw == 8) ns->options |= OPT_PAGE512_8BIT; @@ -769,9 +762,9 @@ static int init_nandsim(struct mtd_info *mtd) } /* Detect how many ID bytes the NAND chip outputs */ - for (i = 0; nand_flash_ids[i].name != NULL; i++) { - if (second_id_byte != nand_flash_ids[i].id) - continue; + for (i = 0; nand_flash_ids[i].name != NULL; i++) { + if (second_id_byte != nand_flash_ids[i].dev_id) + continue; } if (ns->busw == 16) @@ -1079,8 +1072,6 @@ static char *get_state_name(uint32_t state) return "STATE_CMD_ERASE1"; case STATE_CMD_STATUS: return "STATE_CMD_STATUS"; - case STATE_CMD_STATUS_M: - return "STATE_CMD_STATUS_M"; case STATE_CMD_SEQIN: return "STATE_CMD_SEQIN"; case STATE_CMD_READID: @@ -1145,7 +1136,6 @@ static int check_command(int cmd) case NAND_CMD_RNDOUTSTART: return 0; - case NAND_CMD_STATUS_MULTI: default: return 1; } @@ -1171,8 +1161,6 @@ static uint32_t get_state_by_command(unsigned command) return STATE_CMD_ERASE1; case NAND_CMD_STATUS: return STATE_CMD_STATUS; - case NAND_CMD_STATUS_MULTI: - return STATE_CMD_STATUS_M; case NAND_CMD_SEQIN: return STATE_CMD_SEQIN; case NAND_CMD_READID: @@ -2306,7 +2294,7 @@ static int __init ns_init_module(void) nand->geom.idbytes = 2; nand->regs.status = NS_STATUS_OK(nand); nand->nxstate = STATE_UNKNOWN; - nand->options |= OPT_PAGE256; /* temporary value */ + nand->options |= OPT_PAGE512; /* temporary value */ nand->ids[0] = first_id_byte; nand->ids[1] = second_id_byte; nand->ids[2] = third_id_byte; diff --git a/trunk/drivers/mtd/nand/nuc900_nand.c b/trunk/drivers/mtd/nand/nuc900_nand.c index a6191198d259..cd6be2ed53a8 100644 --- a/trunk/drivers/mtd/nand/nuc900_nand.c +++ b/trunk/drivers/mtd/nand/nuc900_nand.c @@ -177,15 +177,6 @@ static void nuc900_nand_command_lp(struct mtd_info *mtd, unsigned int command, case NAND_CMD_SEQIN: case NAND_CMD_RNDIN: case NAND_CMD_STATUS: - case NAND_CMD_DEPLETE1: - return; - - case NAND_CMD_STATUS_ERROR: - case NAND_CMD_STATUS_ERROR0: - case NAND_CMD_STATUS_ERROR1: - case NAND_CMD_STATUS_ERROR2: - case NAND_CMD_STATUS_ERROR3: - udelay(chip->chip_delay); return; case NAND_CMD_RESET: diff --git a/trunk/drivers/mtd/nand/omap2.c b/trunk/drivers/mtd/nand/omap2.c index 8e820ddf4e08..81b80af55872 100644 --- a/trunk/drivers/mtd/nand/omap2.c +++ b/trunk/drivers/mtd/nand/omap2.c @@ -1023,9 +1023,9 @@ static int omap_wait(struct mtd_info *mtd, struct nand_chip *chip) int status, state = this->state; if (state == FL_ERASING) - timeo += (HZ * 400) / 1000; + timeo += msecs_to_jiffies(400); else - timeo += (HZ * 20) / 1000; + timeo += msecs_to_jiffies(20); writeb(NAND_CMD_STATUS & 0xFF, info->reg.gpmc_nand_command); while (time_before(jiffies, timeo)) { @@ -1701,8 +1701,9 @@ static int omap3_init_bch(struct mtd_info *mtd, int ecc_opt) elm_node = of_find_node_by_phandle(be32_to_cpup(parp)); pdev = of_find_device_by_node(elm_node); info->elm_dev = &pdev->dev; - elm_config(info->elm_dev, bch_type); - info->is_elm_used = true; + + if (elm_config(info->elm_dev, bch_type) == 0) + info->is_elm_used = true; } if (info->is_elm_used && (mtd->writesize <= 4096)) { diff --git a/trunk/drivers/mtd/nand/orion_nand.c b/trunk/drivers/mtd/nand/orion_nand.c index cd72b9299f6b..8fbd00208610 100644 --- a/trunk/drivers/mtd/nand/orion_nand.c +++ b/trunk/drivers/mtd/nand/orion_nand.c @@ -231,18 +231,7 @@ static struct platform_driver orion_nand_driver = { }, }; -static int __init orion_nand_init(void) -{ - return platform_driver_probe(&orion_nand_driver, orion_nand_probe); -} - -static void __exit orion_nand_exit(void) -{ - platform_driver_unregister(&orion_nand_driver); -} - -module_init(orion_nand_init); -module_exit(orion_nand_exit); +module_platform_driver_probe(orion_nand_driver, orion_nand_probe); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Tzachi Perelstein"); diff --git a/trunk/drivers/mtd/nand/ppchameleonevb.c b/trunk/drivers/mtd/nand/ppchameleonevb.c deleted file mode 100644 index 0ddd90e5788f..000000000000 --- a/trunk/drivers/mtd/nand/ppchameleonevb.c +++ /dev/null @@ -1,403 +0,0 @@ -/* - * drivers/mtd/nand/ppchameleonevb.c - * - * Copyright (C) 2003 DAVE Srl (info@wawnet.biz) - * - * Derived from drivers/mtd/nand/edb7312.c - * - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - * - * Overview: - * This is a device driver for the NAND flash devices found on the - * PPChameleon/PPChameleonEVB system. - * PPChameleon options (autodetected): - * - BA model: no NAND - * - ME model: 32MB (Samsung K9F5608U0B) - * - HI model: 128MB (Samsung K9F1G08UOM) - * PPChameleonEVB options: - * - 32MB (Samsung K9F5608U0B) - */ - -#include -#include -#include -#include -#include -#include -#include -#include - -#undef USE_READY_BUSY_PIN -#define USE_READY_BUSY_PIN -/* see datasheets (tR) */ -#define NAND_BIG_DELAY_US 25 -#define NAND_SMALL_DELAY_US 10 - -/* handy sizes */ -#define SZ_4M 0x00400000 -#define NAND_SMALL_SIZE 0x02000000 -#define NAND_MTD_NAME "ppchameleon-nand" -#define NAND_EVB_MTD_NAME "ppchameleonevb-nand" - -/* GPIO pins used to drive NAND chip mounted on processor module */ -#define NAND_nCE_GPIO_PIN (0x80000000 >> 1) -#define NAND_CLE_GPIO_PIN (0x80000000 >> 2) -#define NAND_ALE_GPIO_PIN (0x80000000 >> 3) -#define NAND_RB_GPIO_PIN (0x80000000 >> 4) -/* GPIO pins used to drive NAND chip mounted on EVB */ -#define NAND_EVB_nCE_GPIO_PIN (0x80000000 >> 14) -#define NAND_EVB_CLE_GPIO_PIN (0x80000000 >> 15) -#define NAND_EVB_ALE_GPIO_PIN (0x80000000 >> 16) -#define NAND_EVB_RB_GPIO_PIN (0x80000000 >> 31) - -/* - * MTD structure for PPChameleonEVB board - */ -static struct mtd_info *ppchameleon_mtd = NULL; -static struct mtd_info *ppchameleonevb_mtd = NULL; - -/* - * Module stuff - */ -static unsigned long ppchameleon_fio_pbase = CFG_NAND0_PADDR; -static unsigned long ppchameleonevb_fio_pbase = CFG_NAND1_PADDR; - -#ifdef MODULE -module_param(ppchameleon_fio_pbase, ulong, 0); -module_param(ppchameleonevb_fio_pbase, ulong, 0); -#else -__setup("ppchameleon_fio_pbase=", ppchameleon_fio_pbase); -__setup("ppchameleonevb_fio_pbase=", ppchameleonevb_fio_pbase); -#endif - -/* - * Define static partitions for flash devices - */ -static struct mtd_partition partition_info_hi[] = { - { .name = "PPChameleon HI Nand Flash", - .offset = 0, - .size = 128 * 1024 * 1024 - } -}; - -static struct mtd_partition partition_info_me[] = { - { .name = "PPChameleon ME Nand Flash", - .offset = 0, - .size = 32 * 1024 * 1024 - } -}; - -static struct mtd_partition partition_info_evb[] = { - { .name = "PPChameleonEVB Nand Flash", - .offset = 0, - .size = 32 * 1024 * 1024 - } -}; - -#define NUM_PARTITIONS 1 - -/* - * hardware specific access to control-lines - */ -static void ppchameleon_hwcontrol(struct mtd_info *mtdinfo, int cmd, - unsigned int ctrl) -{ - struct nand_chip *chip = mtd->priv; - - if (ctrl & NAND_CTRL_CHANGE) { -#error Missing headerfiles. No way to fix this. -tglx - switch (cmd) { - case NAND_CTL_SETCLE: - MACRO_NAND_CTL_SETCLE((unsigned long)CFG_NAND0_PADDR); - break; - case NAND_CTL_CLRCLE: - MACRO_NAND_CTL_CLRCLE((unsigned long)CFG_NAND0_PADDR); - break; - case NAND_CTL_SETALE: - MACRO_NAND_CTL_SETALE((unsigned long)CFG_NAND0_PADDR); - break; - case NAND_CTL_CLRALE: - MACRO_NAND_CTL_CLRALE((unsigned long)CFG_NAND0_PADDR); - break; - case NAND_CTL_SETNCE: - MACRO_NAND_ENABLE_CE((unsigned long)CFG_NAND0_PADDR); - break; - case NAND_CTL_CLRNCE: - MACRO_NAND_DISABLE_CE((unsigned long)CFG_NAND0_PADDR); - break; - } - } - if (cmd != NAND_CMD_NONE) - writeb(cmd, chip->IO_ADDR_W); -} - -static void ppchameleonevb_hwcontrol(struct mtd_info *mtdinfo, int cmd, - unsigned int ctrl) -{ - struct nand_chip *chip = mtd->priv; - - if (ctrl & NAND_CTRL_CHANGE) { -#error Missing headerfiles. No way to fix this. -tglx - switch (cmd) { - case NAND_CTL_SETCLE: - MACRO_NAND_CTL_SETCLE((unsigned long)CFG_NAND1_PADDR); - break; - case NAND_CTL_CLRCLE: - MACRO_NAND_CTL_CLRCLE((unsigned long)CFG_NAND1_PADDR); - break; - case NAND_CTL_SETALE: - MACRO_NAND_CTL_SETALE((unsigned long)CFG_NAND1_PADDR); - break; - case NAND_CTL_CLRALE: - MACRO_NAND_CTL_CLRALE((unsigned long)CFG_NAND1_PADDR); - break; - case NAND_CTL_SETNCE: - MACRO_NAND_ENABLE_CE((unsigned long)CFG_NAND1_PADDR); - break; - case NAND_CTL_CLRNCE: - MACRO_NAND_DISABLE_CE((unsigned long)CFG_NAND1_PADDR); - break; - } - } - if (cmd != NAND_CMD_NONE) - writeb(cmd, chip->IO_ADDR_W); -} - -#ifdef USE_READY_BUSY_PIN -/* - * read device ready pin - */ -static int ppchameleon_device_ready(struct mtd_info *minfo) -{ - if (in_be32((volatile unsigned *)GPIO0_IR) & NAND_RB_GPIO_PIN) - return 1; - return 0; -} - -static int ppchameleonevb_device_ready(struct mtd_info *minfo) -{ - if (in_be32((volatile unsigned *)GPIO0_IR) & NAND_EVB_RB_GPIO_PIN) - return 1; - return 0; -} -#endif - -/* - * Main initialization routine - */ -static int __init ppchameleonevb_init(void) -{ - struct nand_chip *this; - void __iomem *ppchameleon_fio_base; - void __iomem *ppchameleonevb_fio_base; - - /********************************* - * Processor module NAND (if any) * - *********************************/ - /* Allocate memory for MTD device structure and private data */ - ppchameleon_mtd = kmalloc(sizeof(struct mtd_info) + sizeof(struct nand_chip), GFP_KERNEL); - if (!ppchameleon_mtd) { - printk("Unable to allocate PPChameleon NAND MTD device structure.\n"); - return -ENOMEM; - } - - /* map physical address */ - ppchameleon_fio_base = ioremap(ppchameleon_fio_pbase, SZ_4M); - if (!ppchameleon_fio_base) { - printk("ioremap PPChameleon NAND flash failed\n"); - kfree(ppchameleon_mtd); - return -EIO; - } - - /* Get pointer to private data */ - this = (struct nand_chip *)(&ppchameleon_mtd[1]); - - /* Initialize structures */ - memset(ppchameleon_mtd, 0, sizeof(struct mtd_info)); - memset(this, 0, sizeof(struct nand_chip)); - - /* Link the private data with the MTD structure */ - ppchameleon_mtd->priv = this; - ppchameleon_mtd->owner = THIS_MODULE; - - /* Initialize GPIOs */ - /* Pin mapping for NAND chip */ - /* - CE GPIO_01 - CLE GPIO_02 - ALE GPIO_03 - R/B GPIO_04 - */ - /* output select */ - out_be32((volatile unsigned *)GPIO0_OSRH, in_be32((volatile unsigned *)GPIO0_OSRH) & 0xC0FFFFFF); - /* three-state select */ - out_be32((volatile unsigned *)GPIO0_TSRH, in_be32((volatile unsigned *)GPIO0_TSRH) & 0xC0FFFFFF); - /* enable output driver */ - out_be32((volatile unsigned *)GPIO0_TCR, - in_be32((volatile unsigned *)GPIO0_TCR) | NAND_nCE_GPIO_PIN | NAND_CLE_GPIO_PIN | NAND_ALE_GPIO_PIN); -#ifdef USE_READY_BUSY_PIN - /* three-state select */ - out_be32((volatile unsigned *)GPIO0_TSRH, in_be32((volatile unsigned *)GPIO0_TSRH) & 0xFF3FFFFF); - /* high-impedecence */ - out_be32((volatile unsigned *)GPIO0_TCR, in_be32((volatile unsigned *)GPIO0_TCR) & (~NAND_RB_GPIO_PIN)); - /* input select */ - out_be32((volatile unsigned *)GPIO0_ISR1H, - (in_be32((volatile unsigned *)GPIO0_ISR1H) & 0xFF3FFFFF) | 0x00400000); -#endif - - /* insert callbacks */ - this->IO_ADDR_R = ppchameleon_fio_base; - this->IO_ADDR_W = ppchameleon_fio_base; - this->cmd_ctrl = ppchameleon_hwcontrol; -#ifdef USE_READY_BUSY_PIN - this->dev_ready = ppchameleon_device_ready; -#endif - this->chip_delay = NAND_BIG_DELAY_US; - /* ECC mode */ - this->ecc.mode = NAND_ECC_SOFT; - - /* Scan to find existence of the device (it could not be mounted) */ - if (nand_scan(ppchameleon_mtd, 1)) { - iounmap((void *)ppchameleon_fio_base); - ppchameleon_fio_base = NULL; - kfree(ppchameleon_mtd); - goto nand_evb_init; - } -#ifndef USE_READY_BUSY_PIN - /* Adjust delay if necessary */ - if (ppchameleon_mtd->size == NAND_SMALL_SIZE) - this->chip_delay = NAND_SMALL_DELAY_US; -#endif - - ppchameleon_mtd->name = "ppchameleon-nand"; - - /* Register the partitions */ - mtd_device_parse_register(ppchameleon_mtd, NULL, NULL, - ppchameleon_mtd->size == NAND_SMALL_SIZE ? - partition_info_me : partition_info_hi, - NUM_PARTITIONS); - - nand_evb_init: - /**************************** - * EVB NAND (always present) * - ****************************/ - /* Allocate memory for MTD device structure and private data */ - ppchameleonevb_mtd = kmalloc(sizeof(struct mtd_info) + sizeof(struct nand_chip), GFP_KERNEL); - if (!ppchameleonevb_mtd) { - printk("Unable to allocate PPChameleonEVB NAND MTD device structure.\n"); - if (ppchameleon_fio_base) - iounmap(ppchameleon_fio_base); - return -ENOMEM; - } - - /* map physical address */ - ppchameleonevb_fio_base = ioremap(ppchameleonevb_fio_pbase, SZ_4M); - if (!ppchameleonevb_fio_base) { - printk("ioremap PPChameleonEVB NAND flash failed\n"); - kfree(ppchameleonevb_mtd); - if (ppchameleon_fio_base) - iounmap(ppchameleon_fio_base); - return -EIO; - } - - /* Get pointer to private data */ - this = (struct nand_chip *)(&ppchameleonevb_mtd[1]); - - /* Initialize structures */ - memset(ppchameleonevb_mtd, 0, sizeof(struct mtd_info)); - memset(this, 0, sizeof(struct nand_chip)); - - /* Link the private data with the MTD structure */ - ppchameleonevb_mtd->priv = this; - - /* Initialize GPIOs */ - /* Pin mapping for NAND chip */ - /* - CE GPIO_14 - CLE GPIO_15 - ALE GPIO_16 - R/B GPIO_31 - */ - /* output select */ - out_be32((volatile unsigned *)GPIO0_OSRH, in_be32((volatile unsigned *)GPIO0_OSRH) & 0xFFFFFFF0); - out_be32((volatile unsigned *)GPIO0_OSRL, in_be32((volatile unsigned *)GPIO0_OSRL) & 0x3FFFFFFF); - /* three-state select */ - out_be32((volatile unsigned *)GPIO0_TSRH, in_be32((volatile unsigned *)GPIO0_TSRH) & 0xFFFFFFF0); - out_be32((volatile unsigned *)GPIO0_TSRL, in_be32((volatile unsigned *)GPIO0_TSRL) & 0x3FFFFFFF); - /* enable output driver */ - out_be32((volatile unsigned *)GPIO0_TCR, in_be32((volatile unsigned *)GPIO0_TCR) | NAND_EVB_nCE_GPIO_PIN | - NAND_EVB_CLE_GPIO_PIN | NAND_EVB_ALE_GPIO_PIN); -#ifdef USE_READY_BUSY_PIN - /* three-state select */ - out_be32((volatile unsigned *)GPIO0_TSRL, in_be32((volatile unsigned *)GPIO0_TSRL) & 0xFFFFFFFC); - /* high-impedecence */ - out_be32((volatile unsigned *)GPIO0_TCR, in_be32((volatile unsigned *)GPIO0_TCR) & (~NAND_EVB_RB_GPIO_PIN)); - /* input select */ - out_be32((volatile unsigned *)GPIO0_ISR1L, - (in_be32((volatile unsigned *)GPIO0_ISR1L) & 0xFFFFFFFC) | 0x00000001); -#endif - - /* insert callbacks */ - this->IO_ADDR_R = ppchameleonevb_fio_base; - this->IO_ADDR_W = ppchameleonevb_fio_base; - this->cmd_ctrl = ppchameleonevb_hwcontrol; -#ifdef USE_READY_BUSY_PIN - this->dev_ready = ppchameleonevb_device_ready; -#endif - this->chip_delay = NAND_SMALL_DELAY_US; - - /* ECC mode */ - this->ecc.mode = NAND_ECC_SOFT; - - /* Scan to find existence of the device */ - if (nand_scan(ppchameleonevb_mtd, 1)) { - iounmap((void *)ppchameleonevb_fio_base); - kfree(ppchameleonevb_mtd); - if (ppchameleon_fio_base) - iounmap(ppchameleon_fio_base); - return -ENXIO; - } - - ppchameleonevb_mtd->name = NAND_EVB_MTD_NAME; - - /* Register the partitions */ - mtd_device_parse_register(ppchameleonevb_mtd, NULL, NULL, - ppchameleon_mtd->size == NAND_SMALL_SIZE ? - partition_info_me : partition_info_hi, - NUM_PARTITIONS); - - /* Return happy */ - return 0; -} - -module_init(ppchameleonevb_init); - -/* - * Clean up routine - */ -static void __exit ppchameleonevb_cleanup(void) -{ - struct nand_chip *this; - - /* Release resources, unregister device(s) */ - nand_release(ppchameleon_mtd); - nand_release(ppchameleonevb_mtd); - - /* Release iomaps */ - this = (struct nand_chip *) &ppchameleon_mtd[1]; - iounmap((void *) this->IO_ADDR_R); - this = (struct nand_chip *) &ppchameleonevb_mtd[1]; - iounmap((void *) this->IO_ADDR_R); - - /* Free the MTD device structure */ - kfree (ppchameleon_mtd); - kfree (ppchameleonevb_mtd); -} -module_exit(ppchameleonevb_cleanup); - -MODULE_LICENSE("GPL"); -MODULE_AUTHOR("DAVE Srl "); -MODULE_DESCRIPTION("MTD map driver for DAVE Srl PPChameleonEVB board"); diff --git a/trunk/drivers/mtd/nand/pxa3xx_nand.c b/trunk/drivers/mtd/nand/pxa3xx_nand.c index 37ee75c7bacb..dec80ca6a5ce 100644 --- a/trunk/drivers/mtd/nand/pxa3xx_nand.c +++ b/trunk/drivers/mtd/nand/pxa3xx_nand.c @@ -989,7 +989,7 @@ static int pxa3xx_nand_scan(struct mtd_info *mtd) } pxa3xx_flash_ids[0].name = f->name; - pxa3xx_flash_ids[0].id = (f->chip_id >> 8) & 0xffff; + pxa3xx_flash_ids[0].dev_id = (f->chip_id >> 8) & 0xffff; pxa3xx_flash_ids[0].pagesize = f->page_size; chipsize = (uint64_t)f->num_blocks * f->page_per_block * f->page_size; pxa3xx_flash_ids[0].chipsize = chipsize >> 20; diff --git a/trunk/drivers/mtd/nand/rtc_from4.c b/trunk/drivers/mtd/nand/rtc_from4.c deleted file mode 100644 index e55b5cfbe145..000000000000 --- a/trunk/drivers/mtd/nand/rtc_from4.c +++ /dev/null @@ -1,624 +0,0 @@ -/* - * drivers/mtd/nand/rtc_from4.c - * - * Copyright (C) 2004 Red Hat, Inc. - * - * Derived from drivers/mtd/nand/spia.c - * Copyright (C) 2000 Steven J. Hill (sjhill@realitydiluted.com) - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - * - * Overview: - * This is a device driver for the AG-AND flash device found on the - * Renesas Technology Corp. Flash ROM 4-slot interface board (FROM_BOARD4), - * which utilizes the Renesas HN29V1G91T-30 part. - * This chip is a 1 GBibit (128MiB x 8 bits) AG-AND flash device. - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -/* - * MTD structure for Renesas board - */ -static struct mtd_info *rtc_from4_mtd = NULL; - -#define RTC_FROM4_MAX_CHIPS 2 - -/* HS77x9 processor register defines */ -#define SH77X9_BCR1 ((volatile unsigned short *)(0xFFFFFF60)) -#define SH77X9_BCR2 ((volatile unsigned short *)(0xFFFFFF62)) -#define SH77X9_WCR1 ((volatile unsigned short *)(0xFFFFFF64)) -#define SH77X9_WCR2 ((volatile unsigned short *)(0xFFFFFF66)) -#define SH77X9_MCR ((volatile unsigned short *)(0xFFFFFF68)) -#define SH77X9_PCR ((volatile unsigned short *)(0xFFFFFF6C)) -#define SH77X9_FRQCR ((volatile unsigned short *)(0xFFFFFF80)) - -/* - * Values specific to the Renesas Technology Corp. FROM_BOARD4 (used with HS77x9 processor) - */ -/* Address where flash is mapped */ -#define RTC_FROM4_FIO_BASE 0x14000000 - -/* CLE and ALE are tied to address lines 5 & 4, respectively */ -#define RTC_FROM4_CLE (1 << 5) -#define RTC_FROM4_ALE (1 << 4) - -/* address lines A24-A22 used for chip selection */ -#define RTC_FROM4_NAND_ADDR_SLOT3 (0x00800000) -#define RTC_FROM4_NAND_ADDR_SLOT4 (0x00C00000) -#define RTC_FROM4_NAND_ADDR_FPGA (0x01000000) -/* mask address lines A24-A22 used for chip selection */ -#define RTC_FROM4_NAND_ADDR_MASK (RTC_FROM4_NAND_ADDR_SLOT3 | RTC_FROM4_NAND_ADDR_SLOT4 | RTC_FROM4_NAND_ADDR_FPGA) - -/* FPGA status register for checking device ready (bit zero) */ -#define RTC_FROM4_FPGA_SR (RTC_FROM4_NAND_ADDR_FPGA | 0x00000002) -#define RTC_FROM4_DEVICE_READY 0x0001 - -/* FPGA Reed-Solomon ECC Control register */ - -#define RTC_FROM4_RS_ECC_CTL (RTC_FROM4_NAND_ADDR_FPGA | 0x00000050) -#define RTC_FROM4_RS_ECC_CTL_CLR (1 << 7) -#define RTC_FROM4_RS_ECC_CTL_GEN (1 << 6) -#define RTC_FROM4_RS_ECC_CTL_FD_E (1 << 5) - -/* FPGA Reed-Solomon ECC code base */ -#define RTC_FROM4_RS_ECC (RTC_FROM4_NAND_ADDR_FPGA | 0x00000060) -#define RTC_FROM4_RS_ECCN (RTC_FROM4_NAND_ADDR_FPGA | 0x00000080) - -/* FPGA Reed-Solomon ECC check register */ -#define RTC_FROM4_RS_ECC_CHK (RTC_FROM4_NAND_ADDR_FPGA | 0x00000070) -#define RTC_FROM4_RS_ECC_CHK_ERROR (1 << 7) - -#define ERR_STAT_ECC_AVAILABLE 0x20 - -/* Undefine for software ECC */ -#define RTC_FROM4_HWECC 1 - -/* Define as 1 for no virtual erase blocks (in JFFS2) */ -#define RTC_FROM4_NO_VIRTBLOCKS 0 - -/* - * Module stuff - */ -static void __iomem *rtc_from4_fio_base = (void *)P2SEGADDR(RTC_FROM4_FIO_BASE); - -static const struct mtd_partition partition_info[] = { - { - .name = "Renesas flash partition 1", - .offset = 0, - .size = MTDPART_SIZ_FULL}, -}; - -#define NUM_PARTITIONS 1 - -/* - * hardware specific flash bbt decriptors - * Note: this is to allow debugging by disabling - * NAND_BBT_CREATE and/or NAND_BBT_WRITE - * - */ -static uint8_t bbt_pattern[] = { 'B', 'b', 't', '0' }; -static uint8_t mirror_pattern[] = { '1', 't', 'b', 'B' }; - -static struct nand_bbt_descr rtc_from4_bbt_main_descr = { - .options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE - | NAND_BBT_2BIT | NAND_BBT_VERSION | NAND_BBT_PERCHIP, - .offs = 40, - .len = 4, - .veroffs = 44, - .maxblocks = 4, - .pattern = bbt_pattern -}; - -static struct nand_bbt_descr rtc_from4_bbt_mirror_descr = { - .options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE - | NAND_BBT_2BIT | NAND_BBT_VERSION | NAND_BBT_PERCHIP, - .offs = 40, - .len = 4, - .veroffs = 44, - .maxblocks = 4, - .pattern = mirror_pattern -}; - -#ifdef RTC_FROM4_HWECC - -/* the Reed Solomon control structure */ -static struct rs_control *rs_decoder; - -/* - * hardware specific Out Of Band information - */ -static struct nand_ecclayout rtc_from4_nand_oobinfo = { - .eccbytes = 32, - .eccpos = { - 0, 1, 2, 3, 4, 5, 6, 7, - 8, 9, 10, 11, 12, 13, 14, 15, - 16, 17, 18, 19, 20, 21, 22, 23, - 24, 25, 26, 27, 28, 29, 30, 31}, - .oobfree = {{32, 32}} -}; - -#endif - -/* - * rtc_from4_hwcontrol - hardware specific access to control-lines - * @mtd: MTD device structure - * @cmd: hardware control command - * - * Address lines (A5 and A4) are used to control Command and Address Latch - * Enable on this board, so set the read/write address appropriately. - * - * Chip Enable is also controlled by the Chip Select (CS5) and - * Address lines (A24-A22), so no action is required here. - * - */ -static void rtc_from4_hwcontrol(struct mtd_info *mtd, int cmd, - unsigned int ctrl) -{ - struct nand_chip *chip = (mtd->priv); - - if (cmd == NAND_CMD_NONE) - return; - - if (ctrl & NAND_CLE) - writeb(cmd, chip->IO_ADDR_W | RTC_FROM4_CLE); - else - writeb(cmd, chip->IO_ADDR_W | RTC_FROM4_ALE); -} - -/* - * rtc_from4_nand_select_chip - hardware specific chip select - * @mtd: MTD device structure - * @chip: Chip to select (0 == slot 3, 1 == slot 4) - * - * The chip select is based on address lines A24-A22. - * This driver uses flash slots 3 and 4 (A23-A22). - * - */ -static void rtc_from4_nand_select_chip(struct mtd_info *mtd, int chip) -{ - struct nand_chip *this = mtd->priv; - - this->IO_ADDR_R = (void __iomem *)((unsigned long)this->IO_ADDR_R & ~RTC_FROM4_NAND_ADDR_MASK); - this->IO_ADDR_W = (void __iomem *)((unsigned long)this->IO_ADDR_W & ~RTC_FROM4_NAND_ADDR_MASK); - - switch (chip) { - - case 0: /* select slot 3 chip */ - this->IO_ADDR_R = (void __iomem *)((unsigned long)this->IO_ADDR_R | RTC_FROM4_NAND_ADDR_SLOT3); - this->IO_ADDR_W = (void __iomem *)((unsigned long)this->IO_ADDR_W | RTC_FROM4_NAND_ADDR_SLOT3); - break; - case 1: /* select slot 4 chip */ - this->IO_ADDR_R = (void __iomem *)((unsigned long)this->IO_ADDR_R | RTC_FROM4_NAND_ADDR_SLOT4); - this->IO_ADDR_W = (void __iomem *)((unsigned long)this->IO_ADDR_W | RTC_FROM4_NAND_ADDR_SLOT4); - break; - - } -} - -/* - * rtc_from4_nand_device_ready - hardware specific ready/busy check - * @mtd: MTD device structure - * - * This board provides the Ready/Busy state in the status register - * of the FPGA. Bit zero indicates the RDY(1)/BSY(0) signal. - * - */ -static int rtc_from4_nand_device_ready(struct mtd_info *mtd) -{ - unsigned short status; - - status = *((volatile unsigned short *)(rtc_from4_fio_base + RTC_FROM4_FPGA_SR)); - - return (status & RTC_FROM4_DEVICE_READY); - -} - -/* - * deplete - code to perform device recovery in case there was a power loss - * @mtd: MTD device structure - * @chip: Chip to select (0 == slot 3, 1 == slot 4) - * - * If there was a sudden loss of power during an erase operation, a - * "device recovery" operation must be performed when power is restored - * to ensure correct operation. This routine performs the required steps - * for the requested chip. - * - * See page 86 of the data sheet for details. - * - */ -static void deplete(struct mtd_info *mtd, int chip) -{ - struct nand_chip *this = mtd->priv; - - /* wait until device is ready */ - while (!this->dev_ready(mtd)) ; - - this->select_chip(mtd, chip); - - /* Send the commands for device recovery, phase 1 */ - this->cmdfunc(mtd, NAND_CMD_DEPLETE1, 0x0000, 0x0000); - this->cmdfunc(mtd, NAND_CMD_DEPLETE2, -1, -1); - - /* Send the commands for device recovery, phase 2 */ - this->cmdfunc(mtd, NAND_CMD_DEPLETE1, 0x0000, 0x0004); - this->cmdfunc(mtd, NAND_CMD_DEPLETE2, -1, -1); - -} - -#ifdef RTC_FROM4_HWECC -/* - * rtc_from4_enable_hwecc - hardware specific hardware ECC enable function - * @mtd: MTD device structure - * @mode: I/O mode; read or write - * - * enable hardware ECC for data read or write - * - */ -static void rtc_from4_enable_hwecc(struct mtd_info *mtd, int mode) -{ - volatile unsigned short *rs_ecc_ctl = (volatile unsigned short *)(rtc_from4_fio_base + RTC_FROM4_RS_ECC_CTL); - unsigned short status; - - switch (mode) { - case NAND_ECC_READ: - status = RTC_FROM4_RS_ECC_CTL_CLR | RTC_FROM4_RS_ECC_CTL_FD_E; - - *rs_ecc_ctl = status; - break; - - case NAND_ECC_READSYN: - status = 0x00; - - *rs_ecc_ctl = status; - break; - - case NAND_ECC_WRITE: - status = RTC_FROM4_RS_ECC_CTL_CLR | RTC_FROM4_RS_ECC_CTL_GEN | RTC_FROM4_RS_ECC_CTL_FD_E; - - *rs_ecc_ctl = status; - break; - - default: - BUG(); - break; - } - -} - -/* - * rtc_from4_calculate_ecc - hardware specific code to read ECC code - * @mtd: MTD device structure - * @dat: buffer containing the data to generate ECC codes - * @ecc_code ECC codes calculated - * - * The ECC code is calculated by the FPGA. All we have to do is read the values - * from the FPGA registers. - * - * Note: We read from the inverted registers, since data is inverted before - * the code is calculated. So all 0xff data (blank page) results in all 0xff rs code - * - */ -static void rtc_from4_calculate_ecc(struct mtd_info *mtd, const u_char *dat, u_char *ecc_code) -{ - volatile unsigned short *rs_eccn = (volatile unsigned short *)(rtc_from4_fio_base + RTC_FROM4_RS_ECCN); - unsigned short value; - int i; - - for (i = 0; i < 8; i++) { - value = *rs_eccn; - ecc_code[i] = (unsigned char)value; - rs_eccn++; - } - ecc_code[7] |= 0x0f; /* set the last four bits (not used) */ -} - -/* - * rtc_from4_correct_data - hardware specific code to correct data using ECC code - * @mtd: MTD device structure - * @buf: buffer containing the data to generate ECC codes - * @ecc1 ECC codes read - * @ecc2 ECC codes calculated - * - * The FPGA tells us fast, if there's an error or not. If no, we go back happy - * else we read the ecc results from the fpga and call the rs library to decode - * and hopefully correct the error. - * - */ -static int rtc_from4_correct_data(struct mtd_info *mtd, const u_char *buf, u_char *ecc1, u_char *ecc2) -{ - int i, j, res; - unsigned short status; - uint16_t par[6], syn[6]; - uint8_t ecc[8]; - volatile unsigned short *rs_ecc; - - status = *((volatile unsigned short *)(rtc_from4_fio_base + RTC_FROM4_RS_ECC_CHK)); - - if (!(status & RTC_FROM4_RS_ECC_CHK_ERROR)) { - return 0; - } - - /* Read the syndrome pattern from the FPGA and correct the bitorder */ - rs_ecc = (volatile unsigned short *)(rtc_from4_fio_base + RTC_FROM4_RS_ECC); - for (i = 0; i < 8; i++) { - ecc[i] = bitrev8(*rs_ecc); - rs_ecc++; - } - - /* convert into 6 10bit syndrome fields */ - par[5] = rs_decoder->index_of[(((uint16_t) ecc[0] >> 0) & 0x0ff) | (((uint16_t) ecc[1] << 8) & 0x300)]; - par[4] = rs_decoder->index_of[(((uint16_t) ecc[1] >> 2) & 0x03f) | (((uint16_t) ecc[2] << 6) & 0x3c0)]; - par[3] = rs_decoder->index_of[(((uint16_t) ecc[2] >> 4) & 0x00f) | (((uint16_t) ecc[3] << 4) & 0x3f0)]; - par[2] = rs_decoder->index_of[(((uint16_t) ecc[3] >> 6) & 0x003) | (((uint16_t) ecc[4] << 2) & 0x3fc)]; - par[1] = rs_decoder->index_of[(((uint16_t) ecc[5] >> 0) & 0x0ff) | (((uint16_t) ecc[6] << 8) & 0x300)]; - par[0] = (((uint16_t) ecc[6] >> 2) & 0x03f) | (((uint16_t) ecc[7] << 6) & 0x3c0); - - /* Convert to computable syndrome */ - for (i = 0; i < 6; i++) { - syn[i] = par[0]; - for (j = 1; j < 6; j++) - if (par[j] != rs_decoder->nn) - syn[i] ^= rs_decoder->alpha_to[rs_modnn(rs_decoder, par[j] + i * j)]; - - /* Convert to index form */ - syn[i] = rs_decoder->index_of[syn[i]]; - } - - /* Let the library code do its magic. */ - res = decode_rs8(rs_decoder, (uint8_t *) buf, par, 512, syn, 0, NULL, 0xff, NULL); - if (res > 0) { - pr_debug("rtc_from4_correct_data: " "ECC corrected %d errors on read\n", res); - } - return res; -} - -/** - * rtc_from4_errstat - perform additional error status checks - * @mtd: MTD device structure - * @this: NAND chip structure - * @state: state or the operation - * @status: status code returned from read status - * @page: startpage inside the chip, must be called with (page & this->pagemask) - * - * Perform additional error status checks on erase and write failures - * to determine if errors are correctable. For this device, correctable - * 1-bit errors on erase and write are considered acceptable. - * - * note: see pages 34..37 of data sheet for details. - * - */ -static int rtc_from4_errstat(struct mtd_info *mtd, struct nand_chip *this, - int state, int status, int page) -{ - int er_stat = 0; - int rtn, retlen; - size_t len; - uint8_t *buf; - int i; - - this->cmdfunc(mtd, NAND_CMD_STATUS_CLEAR, -1, -1); - - if (state == FL_ERASING) { - - for (i = 0; i < 4; i++) { - if (!(status & 1 << (i + 1))) - continue; - this->cmdfunc(mtd, (NAND_CMD_STATUS_ERROR + i + 1), - -1, -1); - rtn = this->read_byte(mtd); - this->cmdfunc(mtd, NAND_CMD_STATUS_RESET, -1, -1); - - /* err_ecc_not_avail */ - if (!(rtn & ERR_STAT_ECC_AVAILABLE)) - er_stat |= 1 << (i + 1); - } - - } else if (state == FL_WRITING) { - - unsigned long corrected = mtd->ecc_stats.corrected; - - /* single bank write logic */ - this->cmdfunc(mtd, NAND_CMD_STATUS_ERROR, -1, -1); - rtn = this->read_byte(mtd); - this->cmdfunc(mtd, NAND_CMD_STATUS_RESET, -1, -1); - - if (!(rtn & ERR_STAT_ECC_AVAILABLE)) { - /* err_ecc_not_avail */ - er_stat |= 1 << 1; - goto out; - } - - len = mtd->writesize; - buf = kmalloc(len, GFP_KERNEL); - if (!buf) { - er_stat = 1; - goto out; - } - - /* recovery read */ - rtn = nand_do_read(mtd, page, len, &retlen, buf); - - /* if read failed or > 1-bit error corrected */ - if (rtn || (mtd->ecc_stats.corrected - corrected) > 1) - er_stat |= 1 << 1; - kfree(buf); - } -out: - rtn = status; - if (er_stat == 0) { /* if ECC is available */ - rtn = (status & ~NAND_STATUS_FAIL); /* clear the error bit */ - } - - return rtn; -} -#endif - -/* - * Main initialization routine - */ -static int __init rtc_from4_init(void) -{ - struct nand_chip *this; - unsigned short bcr1, bcr2, wcr2; - int i; - int ret; - - /* Allocate memory for MTD device structure and private data */ - rtc_from4_mtd = kmalloc(sizeof(struct mtd_info) + sizeof(struct nand_chip), GFP_KERNEL); - if (!rtc_from4_mtd) { - printk("Unable to allocate Renesas NAND MTD device structure.\n"); - return -ENOMEM; - } - - /* Get pointer to private data */ - this = (struct nand_chip *)(&rtc_from4_mtd[1]); - - /* Initialize structures */ - memset(rtc_from4_mtd, 0, sizeof(struct mtd_info)); - memset(this, 0, sizeof(struct nand_chip)); - - /* Link the private data with the MTD structure */ - rtc_from4_mtd->priv = this; - rtc_from4_mtd->owner = THIS_MODULE; - - /* set area 5 as PCMCIA mode to clear the spec of tDH(Data hold time;9ns min) */ - bcr1 = *SH77X9_BCR1 & ~0x0002; - bcr1 |= 0x0002; - *SH77X9_BCR1 = bcr1; - - /* set */ - bcr2 = *SH77X9_BCR2 & ~0x0c00; - bcr2 |= 0x0800; - *SH77X9_BCR2 = bcr2; - - /* set area 5 wait states */ - wcr2 = *SH77X9_WCR2 & ~0x1c00; - wcr2 |= 0x1c00; - *SH77X9_WCR2 = wcr2; - - /* Set address of NAND IO lines */ - this->IO_ADDR_R = rtc_from4_fio_base; - this->IO_ADDR_W = rtc_from4_fio_base; - /* Set address of hardware control function */ - this->cmd_ctrl = rtc_from4_hwcontrol; - /* Set address of chip select function */ - this->select_chip = rtc_from4_nand_select_chip; - /* command delay time (in us) */ - this->chip_delay = 100; - /* return the status of the Ready/Busy line */ - this->dev_ready = rtc_from4_nand_device_ready; - -#ifdef RTC_FROM4_HWECC - printk(KERN_INFO "rtc_from4_init: using hardware ECC detection.\n"); - - this->ecc.mode = NAND_ECC_HW_SYNDROME; - this->ecc.size = 512; - this->ecc.bytes = 8; - this->ecc.strength = 3; - /* return the status of extra status and ECC checks */ - this->errstat = rtc_from4_errstat; - /* set the nand_oobinfo to support FPGA H/W error detection */ - this->ecc.layout = &rtc_from4_nand_oobinfo; - this->ecc.hwctl = rtc_from4_enable_hwecc; - this->ecc.calculate = rtc_from4_calculate_ecc; - this->ecc.correct = rtc_from4_correct_data; - - /* We could create the decoder on demand, if memory is a concern. - * This way we have it handy, if an error happens - * - * Symbolsize is 10 (bits) - * Primitve polynomial is x^10+x^3+1 - * first consecutive root is 0 - * primitve element to generate roots = 1 - * generator polinomial degree = 6 - */ - rs_decoder = init_rs(10, 0x409, 0, 1, 6); - if (!rs_decoder) { - printk(KERN_ERR "Could not create a RS decoder\n"); - ret = -ENOMEM; - goto err_1; - } -#else - printk(KERN_INFO "rtc_from4_init: using software ECC detection.\n"); - - this->ecc.mode = NAND_ECC_SOFT; -#endif - - /* set the bad block tables to support debugging */ - this->bbt_td = &rtc_from4_bbt_main_descr; - this->bbt_md = &rtc_from4_bbt_mirror_descr; - - /* Scan to find existence of the device */ - if (nand_scan(rtc_from4_mtd, RTC_FROM4_MAX_CHIPS)) { - ret = -ENXIO; - goto err_2; - } - - /* Perform 'device recovery' for each chip in case there was a power loss. */ - for (i = 0; i < this->numchips; i++) { - deplete(rtc_from4_mtd, i); - } - -#if RTC_FROM4_NO_VIRTBLOCKS - /* use a smaller erase block to minimize wasted space when a block is bad */ - /* note: this uses eight times as much RAM as using the default and makes */ - /* mounts take four times as long. */ - rtc_from4_mtd->flags |= MTD_NO_VIRTBLOCKS; -#endif - - /* Register the partitions */ - ret = mtd_device_register(rtc_from4_mtd, partition_info, - NUM_PARTITIONS); - if (ret) - goto err_3; - - /* Return happy */ - return 0; -err_3: - nand_release(rtc_from4_mtd); -err_2: - free_rs(rs_decoder); -err_1: - kfree(rtc_from4_mtd); - return ret; -} - -module_init(rtc_from4_init); - -/* - * Clean up routine - */ -static void __exit rtc_from4_cleanup(void) -{ - /* Release resource, unregister partitions */ - nand_release(rtc_from4_mtd); - - /* Free the MTD device structure */ - kfree(rtc_from4_mtd); - -#ifdef RTC_FROM4_HWECC - /* Free the reed solomon resources */ - if (rs_decoder) { - free_rs(rs_decoder); - } -#endif -} - -module_exit(rtc_from4_cleanup); - -MODULE_LICENSE("GPL"); -MODULE_AUTHOR("d.marlin #include #include +#include #include "sm_common.h" static struct nand_ecclayout nand_oob_sm = { @@ -67,44 +68,37 @@ static int sm_block_markbad(struct mtd_info *mtd, loff_t ofs) return error; } - static struct nand_flash_dev nand_smartmedia_flash_ids[] = { - {"SmartMedia 1MiB 5V", 0x6e, 256, 1, 0x1000, 0}, - {"SmartMedia 1MiB 3,3V", 0xe8, 256, 1, 0x1000, 0}, - {"SmartMedia 1MiB 3,3V", 0xec, 256, 1, 0x1000, 0}, - {"SmartMedia 2MiB 3,3V", 0xea, 256, 2, 0x1000, 0}, - {"SmartMedia 2MiB 5V", 0x64, 256, 2, 0x1000, 0}, - {"SmartMedia 2MiB 3,3V ROM", 0x5d, 512, 2, 0x2000, NAND_ROM}, - {"SmartMedia 4MiB 3,3V", 0xe3, 512, 4, 0x2000, 0}, - {"SmartMedia 4MiB 3,3/5V", 0xe5, 512, 4, 0x2000, 0}, - {"SmartMedia 4MiB 5V", 0x6b, 512, 4, 0x2000, 0}, - {"SmartMedia 4MiB 3,3V ROM", 0xd5, 512, 4, 0x2000, NAND_ROM}, - {"SmartMedia 8MiB 3,3V", 0xe6, 512, 8, 0x2000, 0}, - {"SmartMedia 8MiB 3,3V ROM", 0xd6, 512, 8, 0x2000, NAND_ROM}, - {"SmartMedia 16MiB 3,3V", 0x73, 512, 16, 0x4000, 0}, - {"SmartMedia 16MiB 3,3V ROM", 0x57, 512, 16, 0x4000, NAND_ROM}, - {"SmartMedia 32MiB 3,3V", 0x75, 512, 32, 0x4000, 0}, - {"SmartMedia 32MiB 3,3V ROM", 0x58, 512, 32, 0x4000, NAND_ROM}, - {"SmartMedia 64MiB 3,3V", 0x76, 512, 64, 0x4000, 0}, - {"SmartMedia 64MiB 3,3V ROM", 0xd9, 512, 64, 0x4000, NAND_ROM}, - {"SmartMedia 128MiB 3,3V", 0x79, 512, 128, 0x4000, 0}, - {"SmartMedia 128MiB 3,3V ROM", 0xda, 512, 128, 0x4000, NAND_ROM}, - {"SmartMedia 256MiB 3,3V", 0x71, 512, 256, 0x4000 }, - {"SmartMedia 256MiB 3,3V ROM", 0x5b, 512, 256, 0x4000, NAND_ROM}, - {NULL,} + LEGACY_ID_NAND("SmartMedia 2MiB 3,3V ROM", 0x5d, 2, SZ_8K, NAND_ROM), + LEGACY_ID_NAND("SmartMedia 4MiB 3,3V", 0xe3, 4, SZ_8K, 0), + LEGACY_ID_NAND("SmartMedia 4MiB 3,3/5V", 0xe5, 4, SZ_8K, 0), + LEGACY_ID_NAND("SmartMedia 4MiB 5V", 0x6b, 4, SZ_8K, 0), + LEGACY_ID_NAND("SmartMedia 4MiB 3,3V ROM", 0xd5, 4, SZ_8K, NAND_ROM), + LEGACY_ID_NAND("SmartMedia 8MiB 3,3V", 0xe6, 8, SZ_8K, 0), + LEGACY_ID_NAND("SmartMedia 8MiB 3,3V ROM", 0xd6, 8, SZ_8K, NAND_ROM), + LEGACY_ID_NAND("SmartMedia 16MiB 3,3V", 0x73, 16, SZ_16K, 0), + LEGACY_ID_NAND("SmartMedia 16MiB 3,3V ROM", 0x57, 16, SZ_16K, NAND_ROM), + LEGACY_ID_NAND("SmartMedia 32MiB 3,3V", 0x75, 32, SZ_16K, 0), + LEGACY_ID_NAND("SmartMedia 32MiB 3,3V ROM", 0x58, 32, SZ_16K, NAND_ROM), + LEGACY_ID_NAND("SmartMedia 64MiB 3,3V", 0x76, 64, SZ_16K, 0), + LEGACY_ID_NAND("SmartMedia 64MiB 3,3V ROM", 0xd9, 64, SZ_16K, NAND_ROM), + LEGACY_ID_NAND("SmartMedia 128MiB 3,3V", 0x79, 128, SZ_16K, 0), + LEGACY_ID_NAND("SmartMedia 128MiB 3,3V ROM", 0xda, 128, SZ_16K, NAND_ROM), + LEGACY_ID_NAND("SmartMedia 256MiB 3, 3V", 0x71, 256, SZ_16K, 0), + LEGACY_ID_NAND("SmartMedia 256MiB 3,3V ROM", 0x5b, 256, SZ_16K, NAND_ROM), + {NULL} }; static struct nand_flash_dev nand_xd_flash_ids[] = { - - {"xD 16MiB 3,3V", 0x73, 512, 16, 0x4000, 0}, - {"xD 32MiB 3,3V", 0x75, 512, 32, 0x4000, 0}, - {"xD 64MiB 3,3V", 0x76, 512, 64, 0x4000, 0}, - {"xD 128MiB 3,3V", 0x79, 512, 128, 0x4000, 0}, - {"xD 256MiB 3,3V", 0x71, 512, 256, 0x4000, NAND_BROKEN_XD}, - {"xD 512MiB 3,3V", 0xdc, 512, 512, 0x4000, NAND_BROKEN_XD}, - {"xD 1GiB 3,3V", 0xd3, 512, 1024, 0x4000, NAND_BROKEN_XD}, - {"xD 2GiB 3,3V", 0xd5, 512, 2048, 0x4000, NAND_BROKEN_XD}, - {NULL,} + LEGACY_ID_NAND("xD 16MiB 3,3V", 0x73, 16, SZ_16K, 0), + LEGACY_ID_NAND("xD 32MiB 3,3V", 0x75, 32, SZ_16K, 0), + LEGACY_ID_NAND("xD 64MiB 3,3V", 0x76, 64, SZ_16K, 0), + LEGACY_ID_NAND("xD 128MiB 3,3V", 0x79, 128, SZ_16K, 0), + LEGACY_ID_NAND("xD 256MiB 3,3V", 0x71, 256, SZ_16K, NAND_BROKEN_XD), + LEGACY_ID_NAND("xD 512MiB 3,3V", 0xdc, 512, SZ_16K, NAND_BROKEN_XD), + LEGACY_ID_NAND("xD 1GiB 3,3V", 0xd3, 1024, SZ_16K, NAND_BROKEN_XD), + LEGACY_ID_NAND("xD 2GiB 3,3V", 0xd5, 2048, SZ_16K, NAND_BROKEN_XD), + {NULL} }; int sm_register_device(struct mtd_info *mtd, int smartmedia) diff --git a/trunk/drivers/mtd/nand/txx9ndfmc.c b/trunk/drivers/mtd/nand/txx9ndfmc.c index e1e8748aa47b..7ed654c68b08 100644 --- a/trunk/drivers/mtd/nand/txx9ndfmc.c +++ b/trunk/drivers/mtd/nand/txx9ndfmc.c @@ -427,18 +427,7 @@ static struct platform_driver txx9ndfmc_driver = { }, }; -static int __init txx9ndfmc_init(void) -{ - return platform_driver_probe(&txx9ndfmc_driver, txx9ndfmc_probe); -} - -static void __exit txx9ndfmc_exit(void) -{ - platform_driver_unregister(&txx9ndfmc_driver); -} - -module_init(txx9ndfmc_init); -module_exit(txx9ndfmc_exit); +module_platform_driver_probe(txx9ndfmc_driver, txx9ndfmc_probe); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("TXx9 SoC NAND flash controller driver"); diff --git a/trunk/drivers/mtd/ofpart.c b/trunk/drivers/mtd/ofpart.c index 30bd907a260a..553d6d6d5603 100644 --- a/trunk/drivers/mtd/ofpart.c +++ b/trunk/drivers/mtd/ofpart.c @@ -55,6 +55,7 @@ static int parse_ofpart_partitions(struct mtd_info *master, while ((pp = of_get_next_child(node, pp))) { const __be32 *reg; int len; + int a_cells, s_cells; reg = of_get_property(pp, "reg", &len); if (!reg) { @@ -62,8 +63,10 @@ static int parse_ofpart_partitions(struct mtd_info *master, continue; } - (*pparts)[i].offset = be32_to_cpu(reg[0]); - (*pparts)[i].size = be32_to_cpu(reg[1]); + a_cells = of_n_addr_cells(pp); + s_cells = of_n_size_cells(pp); + (*pparts)[i].offset = of_read_number(reg, a_cells); + (*pparts)[i].size = of_read_number(reg + a_cells, s_cells); partname = of_get_property(pp, "label", &len); if (!partname) diff --git a/trunk/drivers/mtd/onenand/Kconfig b/trunk/drivers/mtd/onenand/Kconfig index 91467bb03634..ab2607273e80 100644 --- a/trunk/drivers/mtd/onenand/Kconfig +++ b/trunk/drivers/mtd/onenand/Kconfig @@ -40,7 +40,6 @@ config MTD_ONENAND_SAMSUNG config MTD_ONENAND_OTP bool "OneNAND OTP Support" - select HAVE_MTD_OTP help One Block of the NAND Flash Array memory is reserved as a One-Time Programmable Block memory area. @@ -68,10 +67,4 @@ config MTD_ONENAND_2X_PROGRAM And more recent chips -config MTD_ONENAND_SIM - tristate "OneNAND simulator support" - help - The simulator may simulate various OneNAND flash chips for the - OneNAND MTD layer. - endif # MTD_ONENAND diff --git a/trunk/drivers/mtd/onenand/Makefile b/trunk/drivers/mtd/onenand/Makefile index 2b7884c7577e..9d6540e8b3d2 100644 --- a/trunk/drivers/mtd/onenand/Makefile +++ b/trunk/drivers/mtd/onenand/Makefile @@ -10,7 +10,4 @@ obj-$(CONFIG_MTD_ONENAND_GENERIC) += generic.o obj-$(CONFIG_MTD_ONENAND_OMAP2) += omap2.o obj-$(CONFIG_MTD_ONENAND_SAMSUNG) += samsung.o -# Simulator -obj-$(CONFIG_MTD_ONENAND_SIM) += onenand_sim.o - onenand-objs = onenand_base.o onenand_bbt.o diff --git a/trunk/drivers/mtd/onenand/omap2.c b/trunk/drivers/mtd/onenand/omap2.c index eec2aedb4ab8..d98b198edd53 100644 --- a/trunk/drivers/mtd/onenand/omap2.c +++ b/trunk/drivers/mtd/onenand/omap2.c @@ -832,19 +832,7 @@ static struct platform_driver omap2_onenand_driver = { }, }; -static int __init omap2_onenand_init(void) -{ - printk(KERN_INFO "OneNAND driver initializing\n"); - return platform_driver_register(&omap2_onenand_driver); -} - -static void __exit omap2_onenand_exit(void) -{ - platform_driver_unregister(&omap2_onenand_driver); -} - -module_init(omap2_onenand_init); -module_exit(omap2_onenand_exit); +module_platform_driver(omap2_onenand_driver); MODULE_ALIAS("platform:" DRIVER_NAME); MODULE_LICENSE("GPL"); diff --git a/trunk/drivers/mtd/onenand/onenand_sim.c b/trunk/drivers/mtd/onenand/onenand_sim.c deleted file mode 100644 index 85399e3accda..000000000000 --- a/trunk/drivers/mtd/onenand/onenand_sim.c +++ /dev/null @@ -1,564 +0,0 @@ -/* - * linux/drivers/mtd/onenand/onenand_sim.c - * - * The OneNAND simulator - * - * Copyright © 2005-2007 Samsung Electronics - * Kyungmin Park - * - * Vishak G , Rohit Hagargundgi - * Flex-OneNAND simulator support - * Copyright (C) Samsung Electronics, 2008 - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - */ - -#include -#include -#include -#include -#include -#include -#include -#include - -#include - -#ifndef CONFIG_ONENAND_SIM_MANUFACTURER -#define CONFIG_ONENAND_SIM_MANUFACTURER 0xec -#endif - -#ifndef CONFIG_ONENAND_SIM_DEVICE_ID -#define CONFIG_ONENAND_SIM_DEVICE_ID 0x04 -#endif - -#define CONFIG_FLEXONENAND ((CONFIG_ONENAND_SIM_DEVICE_ID >> 9) & 1) - -#ifndef CONFIG_ONENAND_SIM_VERSION_ID -#define CONFIG_ONENAND_SIM_VERSION_ID 0x1e -#endif - -#ifndef CONFIG_ONENAND_SIM_TECHNOLOGY_ID -#define CONFIG_ONENAND_SIM_TECHNOLOGY_ID CONFIG_FLEXONENAND -#endif - -/* Initial boundary values for Flex-OneNAND Simulator */ -#ifndef CONFIG_FLEXONENAND_SIM_DIE0_BOUNDARY -#define CONFIG_FLEXONENAND_SIM_DIE0_BOUNDARY 0x01 -#endif - -#ifndef CONFIG_FLEXONENAND_SIM_DIE1_BOUNDARY -#define CONFIG_FLEXONENAND_SIM_DIE1_BOUNDARY 0x01 -#endif - -static int manuf_id = CONFIG_ONENAND_SIM_MANUFACTURER; -static int device_id = CONFIG_ONENAND_SIM_DEVICE_ID; -static int version_id = CONFIG_ONENAND_SIM_VERSION_ID; -static int technology_id = CONFIG_ONENAND_SIM_TECHNOLOGY_ID; -static int boundary[] = { - CONFIG_FLEXONENAND_SIM_DIE0_BOUNDARY, - CONFIG_FLEXONENAND_SIM_DIE1_BOUNDARY, -}; - -struct onenand_flash { - void __iomem *base; - void __iomem *data; -}; - -#define ONENAND_CORE(flash) (flash->data) -#define ONENAND_CORE_SPARE(flash, this, offset) \ - ((flash->data) + (this->chipsize) + (offset >> 5)) - -#define ONENAND_MAIN_AREA(this, offset) \ - (this->base + ONENAND_DATARAM + offset) - -#define ONENAND_SPARE_AREA(this, offset) \ - (this->base + ONENAND_SPARERAM + offset) - -#define ONENAND_GET_WP_STATUS(this) \ - (readw(this->base + ONENAND_REG_WP_STATUS)) - -#define ONENAND_SET_WP_STATUS(v, this) \ - (writew(v, this->base + ONENAND_REG_WP_STATUS)) - -/* It has all 0xff chars */ -#define MAX_ONENAND_PAGESIZE (4096 + 128) -static unsigned char *ffchars; - -#if CONFIG_FLEXONENAND -#define PARTITION_NAME "Flex-OneNAND simulator partition" -#else -#define PARTITION_NAME "OneNAND simulator partition" -#endif - -static struct mtd_partition os_partitions[] = { - { - .name = PARTITION_NAME, - .offset = 0, - .size = MTDPART_SIZ_FULL, - }, -}; - -/* - * OneNAND simulator mtd - */ -struct onenand_info { - struct mtd_info mtd; - struct mtd_partition *parts; - struct onenand_chip onenand; - struct onenand_flash flash; -}; - -static struct onenand_info *info; - -#define DPRINTK(format, args...) \ -do { \ - printk(KERN_DEBUG "%s[%d]: " format "\n", __func__, \ - __LINE__, ##args); \ -} while (0) - -/** - * onenand_lock_handle - Handle Lock scheme - * @this: OneNAND device structure - * @cmd: The command to be sent - * - * Send lock command to OneNAND device. - * The lock scheme depends on chip type. - */ -static void onenand_lock_handle(struct onenand_chip *this, int cmd) -{ - int block_lock_scheme; - int status; - - status = ONENAND_GET_WP_STATUS(this); - block_lock_scheme = !(this->options & ONENAND_HAS_CONT_LOCK); - - switch (cmd) { - case ONENAND_CMD_UNLOCK: - case ONENAND_CMD_UNLOCK_ALL: - if (block_lock_scheme) - ONENAND_SET_WP_STATUS(ONENAND_WP_US, this); - else - ONENAND_SET_WP_STATUS(status | ONENAND_WP_US, this); - break; - - case ONENAND_CMD_LOCK: - if (block_lock_scheme) - ONENAND_SET_WP_STATUS(ONENAND_WP_LS, this); - else - ONENAND_SET_WP_STATUS(status | ONENAND_WP_LS, this); - break; - - case ONENAND_CMD_LOCK_TIGHT: - if (block_lock_scheme) - ONENAND_SET_WP_STATUS(ONENAND_WP_LTS, this); - else - ONENAND_SET_WP_STATUS(status | ONENAND_WP_LTS, this); - break; - - default: - break; - } -} - -/** - * onenand_bootram_handle - Handle BootRAM area - * @this: OneNAND device structure - * @cmd: The command to be sent - * - * Emulate BootRAM area. It is possible to do basic operation using BootRAM. - */ -static void onenand_bootram_handle(struct onenand_chip *this, int cmd) -{ - switch (cmd) { - case ONENAND_CMD_READID: - writew(manuf_id, this->base); - writew(device_id, this->base + 2); - writew(version_id, this->base + 4); - break; - - default: - /* REVIST: Handle other commands */ - break; - } -} - -/** - * onenand_update_interrupt - Set interrupt register - * @this: OneNAND device structure - * @cmd: The command to be sent - * - * Update interrupt register. The status depends on command. - */ -static void onenand_update_interrupt(struct onenand_chip *this, int cmd) -{ - int interrupt = ONENAND_INT_MASTER; - - switch (cmd) { - case ONENAND_CMD_READ: - case ONENAND_CMD_READOOB: - interrupt |= ONENAND_INT_READ; - break; - - case ONENAND_CMD_PROG: - case ONENAND_CMD_PROGOOB: - interrupt |= ONENAND_INT_WRITE; - break; - - case ONENAND_CMD_ERASE: - interrupt |= ONENAND_INT_ERASE; - break; - - case ONENAND_CMD_RESET: - interrupt |= ONENAND_INT_RESET; - break; - - default: - break; - } - - writew(interrupt, this->base + ONENAND_REG_INTERRUPT); -} - -/** - * onenand_check_overwrite - Check if over-write happened - * @dest: The destination pointer - * @src: The source pointer - * @count: The length to be check - * - * Returns: 0 on same, otherwise 1 - * - * Compare the source with destination - */ -static int onenand_check_overwrite(void *dest, void *src, size_t count) -{ - unsigned int *s = (unsigned int *) src; - unsigned int *d = (unsigned int *) dest; - int i; - - count >>= 2; - for (i = 0; i < count; i++) - if ((*s++ ^ *d++) != 0) - return 1; - - return 0; -} - -/** - * onenand_data_handle - Handle OneNAND Core and DataRAM - * @this: OneNAND device structure - * @cmd: The command to be sent - * @dataram: Which dataram used - * @offset: The offset to OneNAND Core - * - * Copy data from OneNAND Core to DataRAM (read) - * Copy data from DataRAM to OneNAND Core (write) - * Erase the OneNAND Core (erase) - */ -static void onenand_data_handle(struct onenand_chip *this, int cmd, - int dataram, unsigned int offset) -{ - struct mtd_info *mtd = &info->mtd; - struct onenand_flash *flash = this->priv; - int main_offset, spare_offset, die = 0; - void __iomem *src; - void __iomem *dest; - unsigned int i; - static int pi_operation; - int erasesize, rgn; - - if (dataram) { - main_offset = mtd->writesize; - spare_offset = mtd->oobsize; - } else { - main_offset = 0; - spare_offset = 0; - } - - if (pi_operation) { - die = readw(this->base + ONENAND_REG_START_ADDRESS2); - die >>= ONENAND_DDP_SHIFT; - } - - switch (cmd) { - case FLEXONENAND_CMD_PI_ACCESS: - pi_operation = 1; - break; - - case ONENAND_CMD_RESET: - pi_operation = 0; - break; - - case ONENAND_CMD_READ: - src = ONENAND_CORE(flash) + offset; - dest = ONENAND_MAIN_AREA(this, main_offset); - if (pi_operation) { - writew(boundary[die], this->base + ONENAND_DATARAM); - break; - } - memcpy(dest, src, mtd->writesize); - /* Fall through */ - - case ONENAND_CMD_READOOB: - src = ONENAND_CORE_SPARE(flash, this, offset); - dest = ONENAND_SPARE_AREA(this, spare_offset); - memcpy(dest, src, mtd->oobsize); - break; - - case ONENAND_CMD_PROG: - src = ONENAND_MAIN_AREA(this, main_offset); - dest = ONENAND_CORE(flash) + offset; - if (pi_operation) { - boundary[die] = readw(this->base + ONENAND_DATARAM); - break; - } - /* To handle partial write */ - for (i = 0; i < (1 << mtd->subpage_sft); i++) { - int off = i * this->subpagesize; - if (!memcmp(src + off, ffchars, this->subpagesize)) - continue; - if (memcmp(dest + off, ffchars, this->subpagesize) && - onenand_check_overwrite(dest + off, src + off, this->subpagesize)) - printk(KERN_ERR "over-write happened at 0x%08x\n", offset); - memcpy(dest + off, src + off, this->subpagesize); - } - /* Fall through */ - - case ONENAND_CMD_PROGOOB: - src = ONENAND_SPARE_AREA(this, spare_offset); - /* Check all data is 0xff chars */ - if (!memcmp(src, ffchars, mtd->oobsize)) - break; - - dest = ONENAND_CORE_SPARE(flash, this, offset); - if (memcmp(dest, ffchars, mtd->oobsize) && - onenand_check_overwrite(dest, src, mtd->oobsize)) - printk(KERN_ERR "OOB: over-write happened at 0x%08x\n", - offset); - memcpy(dest, src, mtd->oobsize); - break; - - case ONENAND_CMD_ERASE: - if (pi_operation) - break; - - if (FLEXONENAND(this)) { - rgn = flexonenand_region(mtd, offset); - erasesize = mtd->eraseregions[rgn].erasesize; - } else - erasesize = mtd->erasesize; - - memset(ONENAND_CORE(flash) + offset, 0xff, erasesize); - memset(ONENAND_CORE_SPARE(flash, this, offset), 0xff, - (erasesize >> 5)); - break; - - default: - break; - } -} - -/** - * onenand_command_handle - Handle command - * @this: OneNAND device structure - * @cmd: The command to be sent - * - * Emulate OneNAND command. - */ -static void onenand_command_handle(struct onenand_chip *this, int cmd) -{ - unsigned long offset = 0; - int block = -1, page = -1, bufferram = -1; - int dataram = 0; - - switch (cmd) { - case ONENAND_CMD_UNLOCK: - case ONENAND_CMD_LOCK: - case ONENAND_CMD_LOCK_TIGHT: - case ONENAND_CMD_UNLOCK_ALL: - onenand_lock_handle(this, cmd); - break; - - case ONENAND_CMD_BUFFERRAM: - /* Do nothing */ - return; - - default: - block = (int) readw(this->base + ONENAND_REG_START_ADDRESS1); - if (block & (1 << ONENAND_DDP_SHIFT)) { - block &= ~(1 << ONENAND_DDP_SHIFT); - /* The half of chip block */ - block += this->chipsize >> (this->erase_shift + 1); - } - if (cmd == ONENAND_CMD_ERASE) - break; - - page = (int) readw(this->base + ONENAND_REG_START_ADDRESS8); - page = (page >> ONENAND_FPA_SHIFT); - bufferram = (int) readw(this->base + ONENAND_REG_START_BUFFER); - bufferram >>= ONENAND_BSA_SHIFT; - bufferram &= ONENAND_BSA_DATARAM1; - dataram = (bufferram == ONENAND_BSA_DATARAM1) ? 1 : 0; - break; - } - - if (block != -1) - offset = onenand_addr(this, block); - - if (page != -1) - offset += page << this->page_shift; - - onenand_data_handle(this, cmd, dataram, offset); - - onenand_update_interrupt(this, cmd); -} - -/** - * onenand_writew - [OneNAND Interface] Emulate write operation - * @value: value to write - * @addr: address to write - * - * Write OneNAND register with value - */ -static void onenand_writew(unsigned short value, void __iomem * addr) -{ - struct onenand_chip *this = info->mtd.priv; - - /* BootRAM handling */ - if (addr < this->base + ONENAND_DATARAM) { - onenand_bootram_handle(this, value); - return; - } - /* Command handling */ - if (addr == this->base + ONENAND_REG_COMMAND) - onenand_command_handle(this, value); - - writew(value, addr); -} - -/** - * flash_init - Initialize OneNAND simulator - * @flash: OneNAND simulator data strucutres - * - * Initialize OneNAND simulator. - */ -static int __init flash_init(struct onenand_flash *flash) -{ - int density, size; - int buffer_size; - - flash->base = kzalloc(131072, GFP_KERNEL); - if (!flash->base) { - printk(KERN_ERR "Unable to allocate base address.\n"); - return -ENOMEM; - } - - density = device_id >> ONENAND_DEVICE_DENSITY_SHIFT; - density &= ONENAND_DEVICE_DENSITY_MASK; - size = ((16 << 20) << density); - - ONENAND_CORE(flash) = vmalloc(size + (size >> 5)); - if (!ONENAND_CORE(flash)) { - printk(KERN_ERR "Unable to allocate nand core address.\n"); - kfree(flash->base); - return -ENOMEM; - } - - memset(ONENAND_CORE(flash), 0xff, size + (size >> 5)); - - /* Setup registers */ - writew(manuf_id, flash->base + ONENAND_REG_MANUFACTURER_ID); - writew(device_id, flash->base + ONENAND_REG_DEVICE_ID); - writew(version_id, flash->base + ONENAND_REG_VERSION_ID); - writew(technology_id, flash->base + ONENAND_REG_TECHNOLOGY); - - if (density < 2 && (!CONFIG_FLEXONENAND)) - buffer_size = 0x0400; /* 1KiB page */ - else - buffer_size = 0x0800; /* 2KiB page */ - writew(buffer_size, flash->base + ONENAND_REG_DATA_BUFFER_SIZE); - - return 0; -} - -/** - * flash_exit - Clean up OneNAND simulator - * @flash: OneNAND simulator data structures - * - * Clean up OneNAND simulator. - */ -static void flash_exit(struct onenand_flash *flash) -{ - vfree(ONENAND_CORE(flash)); - kfree(flash->base); -} - -static int __init onenand_sim_init(void) -{ - /* Allocate all 0xff chars pointer */ - ffchars = kmalloc(MAX_ONENAND_PAGESIZE, GFP_KERNEL); - if (!ffchars) { - printk(KERN_ERR "Unable to allocate ff chars.\n"); - return -ENOMEM; - } - memset(ffchars, 0xff, MAX_ONENAND_PAGESIZE); - - /* Allocate OneNAND simulator mtd pointer */ - info = kzalloc(sizeof(struct onenand_info), GFP_KERNEL); - if (!info) { - printk(KERN_ERR "Unable to allocate core structures.\n"); - kfree(ffchars); - return -ENOMEM; - } - - /* Override write_word function */ - info->onenand.write_word = onenand_writew; - - if (flash_init(&info->flash)) { - printk(KERN_ERR "Unable to allocate flash.\n"); - kfree(ffchars); - kfree(info); - return -ENOMEM; - } - - info->parts = os_partitions; - - info->onenand.base = info->flash.base; - info->onenand.priv = &info->flash; - - info->mtd.name = "OneNAND simulator"; - info->mtd.priv = &info->onenand; - info->mtd.owner = THIS_MODULE; - - if (onenand_scan(&info->mtd, 1)) { - flash_exit(&info->flash); - kfree(ffchars); - kfree(info); - return -ENXIO; - } - - mtd_device_register(&info->mtd, info->parts, - ARRAY_SIZE(os_partitions)); - - return 0; -} - -static void __exit onenand_sim_exit(void) -{ - struct onenand_chip *this = info->mtd.priv; - struct onenand_flash *flash = this->priv; - - onenand_release(&info->mtd); - flash_exit(flash); - kfree(ffchars); - kfree(info); -} - -module_init(onenand_sim_init); -module_exit(onenand_sim_exit); - -MODULE_AUTHOR("Kyungmin Park "); -MODULE_DESCRIPTION("The OneNAND flash simulator"); -MODULE_LICENSE("GPL"); diff --git a/trunk/drivers/net/bonding/bond_3ad.c b/trunk/drivers/net/bonding/bond_3ad.c index fc58d118d844..390061d09693 100644 --- a/trunk/drivers/net/bonding/bond_3ad.c +++ b/trunk/drivers/net/bonding/bond_3ad.c @@ -2360,14 +2360,15 @@ int bond_3ad_set_carrier(struct bonding *bond) } /** - * bond_3ad_get_active_agg_info - get information of the active aggregator + * __bond_3ad_get_active_agg_info - get information of the active aggregator * @bond: bonding struct to work on * @ad_info: ad_info struct to fill with the bond's info * * Returns: 0 on success * < 0 on error */ -int bond_3ad_get_active_agg_info(struct bonding *bond, struct ad_info *ad_info) +int __bond_3ad_get_active_agg_info(struct bonding *bond, + struct ad_info *ad_info) { struct aggregator *aggregator = NULL; struct port *port; @@ -2391,6 +2392,18 @@ int bond_3ad_get_active_agg_info(struct bonding *bond, struct ad_info *ad_info) return -1; } +/* Wrapper used to hold bond->lock so no slave manipulation can occur */ +int bond_3ad_get_active_agg_info(struct bonding *bond, struct ad_info *ad_info) +{ + int ret; + + read_lock(&bond->lock); + ret = __bond_3ad_get_active_agg_info(bond, ad_info); + read_unlock(&bond->lock); + + return ret; +} + int bond_3ad_xmit_xor(struct sk_buff *skb, struct net_device *dev) { struct slave *slave, *start_at; @@ -2402,8 +2415,8 @@ int bond_3ad_xmit_xor(struct sk_buff *skb, struct net_device *dev) struct ad_info ad_info; int res = 1; - if (bond_3ad_get_active_agg_info(bond, &ad_info)) { - pr_debug("%s: Error: bond_3ad_get_active_agg_info failed\n", + if (__bond_3ad_get_active_agg_info(bond, &ad_info)) { + pr_debug("%s: Error: __bond_3ad_get_active_agg_info failed\n", dev->name); goto out; } diff --git a/trunk/drivers/net/bonding/bond_3ad.h b/trunk/drivers/net/bonding/bond_3ad.h index 0cfaa4afdece..5d91ad0cc041 100644 --- a/trunk/drivers/net/bonding/bond_3ad.h +++ b/trunk/drivers/net/bonding/bond_3ad.h @@ -273,6 +273,8 @@ void bond_3ad_adapter_speed_changed(struct slave *slave); void bond_3ad_adapter_duplex_changed(struct slave *slave); void bond_3ad_handle_link_change(struct slave *slave, char link); int bond_3ad_get_active_agg_info(struct bonding *bond, struct ad_info *ad_info); +int __bond_3ad_get_active_agg_info(struct bonding *bond, + struct ad_info *ad_info); int bond_3ad_xmit_xor(struct sk_buff *skb, struct net_device *dev); int bond_3ad_lacpdu_recv(const struct sk_buff *skb, struct bonding *bond, struct slave *slave); diff --git a/trunk/drivers/net/bonding/bond_main.c b/trunk/drivers/net/bonding/bond_main.c index d0aade04e49a..29b846cbfb48 100644 --- a/trunk/drivers/net/bonding/bond_main.c +++ b/trunk/drivers/net/bonding/bond_main.c @@ -1362,6 +1362,7 @@ static netdev_features_t bond_fix_features(struct net_device *dev, slave->dev->features, mask); } + features = netdev_add_tso_features(features, mask); out: read_unlock(&bond->lock); @@ -2555,8 +2556,8 @@ static void bond_arp_send(struct net_device *slave_dev, int arp_op, __be32 dest_ { struct sk_buff *skb; - pr_debug("arp %d on slave %s: dst %x src %x vid %d\n", arp_op, - slave_dev->name, dest_ip, src_ip, vlan_id); + pr_debug("arp %d on slave %s: dst %pI4 src %pI4 vid %d\n", arp_op, + slave_dev->name, &dest_ip, &src_ip, vlan_id); skb = arp_create(arp_op, ETH_P_ARP, dest_ip, slave_dev, src_ip, NULL, slave_dev->dev_addr, NULL); @@ -2588,7 +2589,7 @@ static void bond_arp_send_all(struct bonding *bond, struct slave *slave) __be32 addr; if (!targets[i]) break; - pr_debug("basa: target %x\n", targets[i]); + pr_debug("basa: target %pI4\n", &targets[i]); if (!bond_vlan_used(bond)) { pr_debug("basa: empty vlan: arp_send\n"); addr = bond_confirm_addr(bond->dev, targets[i], 0); @@ -4470,7 +4471,7 @@ int bond_parse_parm(const char *buf, const struct bond_parm_tbl *tbl) static int bond_check_params(struct bond_params *params) { - int arp_validate_value, fail_over_mac_value, primary_reselect_value; + int arp_validate_value, fail_over_mac_value, primary_reselect_value, i; /* * Convert string parameters. @@ -4650,19 +4651,18 @@ static int bond_check_params(struct bond_params *params) arp_interval = BOND_LINK_ARP_INTERV; } - for (arp_ip_count = 0; - (arp_ip_count < BOND_MAX_ARP_TARGETS) && arp_ip_target[arp_ip_count]; - arp_ip_count++) { + for (arp_ip_count = 0, i = 0; + (arp_ip_count < BOND_MAX_ARP_TARGETS) && arp_ip_target[i]; i++) { /* not complete check, but should be good enough to catch mistakes */ - __be32 ip = in_aton(arp_ip_target[arp_ip_count]); - if (!isdigit(arp_ip_target[arp_ip_count][0]) || - ip == 0 || ip == htonl(INADDR_BROADCAST)) { + __be32 ip = in_aton(arp_ip_target[i]); + if (!isdigit(arp_ip_target[i][0]) || ip == 0 || + ip == htonl(INADDR_BROADCAST)) { pr_warning("Warning: bad arp_ip_target module parameter (%s), ARP monitoring will not be performed\n", - arp_ip_target[arp_ip_count]); + arp_ip_target[i]); arp_interval = 0; } else { - arp_target[arp_ip_count] = ip; + arp_target[arp_ip_count++] = ip; } } @@ -4696,8 +4696,6 @@ static int bond_check_params(struct bond_params *params) if (miimon) { pr_info("MII link monitoring set to %d ms\n", miimon); } else if (arp_interval) { - int i; - pr_info("ARP monitoring set to %d ms, validate %s, with %d target(s):", arp_interval, arp_validate_tbl[arp_validate_value].modename, diff --git a/trunk/drivers/net/bonding/bond_procfs.c b/trunk/drivers/net/bonding/bond_procfs.c index 94d06f1307b8..4060d41f0ee7 100644 --- a/trunk/drivers/net/bonding/bond_procfs.c +++ b/trunk/drivers/net/bonding/bond_procfs.c @@ -130,7 +130,7 @@ static void bond_info_show_master(struct seq_file *seq) seq_printf(seq, "Aggregator selection policy (ad_select): %s\n", ad_select_tbl[bond->params.ad_select].modename); - if (bond_3ad_get_active_agg_info(bond, &ad_info)) { + if (__bond_3ad_get_active_agg_info(bond, &ad_info)) { seq_printf(seq, "bond %s has no active aggregator\n", bond->dev->name); } else { diff --git a/trunk/drivers/net/bonding/bond_sysfs.c b/trunk/drivers/net/bonding/bond_sysfs.c index ea7a388f4843..d7434e0a610e 100644 --- a/trunk/drivers/net/bonding/bond_sysfs.c +++ b/trunk/drivers/net/bonding/bond_sysfs.c @@ -316,6 +316,9 @@ static ssize_t bonding_store_mode(struct device *d, int new_value, ret = count; struct bonding *bond = to_bond(d); + if (!rtnl_trylock()) + return restart_syscall(); + if (bond->dev->flags & IFF_UP) { pr_err("unable to update mode of %s because interface is up.\n", bond->dev->name); @@ -352,6 +355,7 @@ static ssize_t bonding_store_mode(struct device *d, bond->dev->name, bond_mode_tbl[new_value].modename, new_value); out: + rtnl_unlock(); return ret; } static DEVICE_ATTR(mode, S_IRUGO | S_IWUSR, @@ -1315,7 +1319,6 @@ static ssize_t bonding_show_mii_status(struct device *d, } static DEVICE_ATTR(mii_status, S_IRUGO, bonding_show_mii_status, NULL); - /* * Show current 802.3ad aggregator ID. */ @@ -1329,7 +1332,7 @@ static ssize_t bonding_show_ad_aggregator(struct device *d, if (bond->params.mode == BOND_MODE_8023AD) { struct ad_info ad_info; count = sprintf(buf, "%d\n", - (bond_3ad_get_active_agg_info(bond, &ad_info)) + bond_3ad_get_active_agg_info(bond, &ad_info) ? 0 : ad_info.aggregator_id); } @@ -1351,7 +1354,7 @@ static ssize_t bonding_show_ad_num_ports(struct device *d, if (bond->params.mode == BOND_MODE_8023AD) { struct ad_info ad_info; count = sprintf(buf, "%d\n", - (bond_3ad_get_active_agg_info(bond, &ad_info)) + bond_3ad_get_active_agg_info(bond, &ad_info) ? 0 : ad_info.ports); } @@ -1373,7 +1376,7 @@ static ssize_t bonding_show_ad_actor_key(struct device *d, if (bond->params.mode == BOND_MODE_8023AD) { struct ad_info ad_info; count = sprintf(buf, "%d\n", - (bond_3ad_get_active_agg_info(bond, &ad_info)) + bond_3ad_get_active_agg_info(bond, &ad_info) ? 0 : ad_info.actor_key); } @@ -1395,7 +1398,7 @@ static ssize_t bonding_show_ad_partner_key(struct device *d, if (bond->params.mode == BOND_MODE_8023AD) { struct ad_info ad_info; count = sprintf(buf, "%d\n", - (bond_3ad_get_active_agg_info(bond, &ad_info)) + bond_3ad_get_active_agg_info(bond, &ad_info) ? 0 : ad_info.partner_key); } diff --git a/trunk/drivers/net/caif/Kconfig b/trunk/drivers/net/caif/Kconfig index 7ffc756131a2..547098086773 100644 --- a/trunk/drivers/net/caif/Kconfig +++ b/trunk/drivers/net/caif/Kconfig @@ -43,7 +43,7 @@ config CAIF_HSI config CAIF_VIRTIO tristate "CAIF virtio transport driver" - depends on CAIF + depends on CAIF && HAS_DMA select VHOST_RING select VIRTIO select GENERIC_ALLOCATOR diff --git a/trunk/drivers/net/ethernet/3com/3c59x.c b/trunk/drivers/net/ethernet/3com/3c59x.c index de570a8f8967..072c6f14e8fc 100644 --- a/trunk/drivers/net/ethernet/3com/3c59x.c +++ b/trunk/drivers/net/ethernet/3com/3c59x.c @@ -632,7 +632,6 @@ struct vortex_private { pm_state_valid:1, /* pci_dev->saved_config_space has sane contents */ open:1, medialock:1, - must_free_region:1, /* Flag: if zero, Cardbus owns the I/O region */ large_frames:1, /* accept large frames */ handling_irq:1; /* private in_irq indicator */ /* {get|set}_wol operations are already serialized by rtnl. @@ -1012,6 +1011,12 @@ static int vortex_init_one(struct pci_dev *pdev, if (rc < 0) goto out; + rc = pci_request_regions(pdev, DRV_NAME); + if (rc < 0) { + pci_disable_device(pdev); + goto out; + } + unit = vortex_cards_found; if (global_use_mmio < 0 && (unit >= MAX_UNITS || use_mmio[unit] < 0)) { @@ -1027,6 +1032,7 @@ static int vortex_init_one(struct pci_dev *pdev, if (!ioaddr) /* If mapping fails, fall-back to BAR 0... */ ioaddr = pci_iomap(pdev, 0, 0); if (!ioaddr) { + pci_release_regions(pdev); pci_disable_device(pdev); rc = -ENOMEM; goto out; @@ -1036,6 +1042,7 @@ static int vortex_init_one(struct pci_dev *pdev, ent->driver_data, unit); if (rc < 0) { pci_iounmap(pdev, ioaddr); + pci_release_regions(pdev); pci_disable_device(pdev); goto out; } @@ -1178,11 +1185,6 @@ static int vortex_probe1(struct device *gendev, void __iomem *ioaddr, int irq, /* PCI-only startup logic */ if (pdev) { - /* EISA resources already marked, so only PCI needs to do this here */ - /* Ignore return value, because Cardbus drivers already allocate for us */ - if (request_region(dev->base_addr, vci->io_size, print_name) != NULL) - vp->must_free_region = 1; - /* enable bus-mastering if necessary */ if (vci->flags & PCI_USES_MASTER) pci_set_master(pdev); @@ -1220,7 +1222,7 @@ static int vortex_probe1(struct device *gendev, void __iomem *ioaddr, int irq, &vp->rx_ring_dma); retval = -ENOMEM; if (!vp->rx_ring) - goto free_region; + goto free_device; vp->tx_ring = (struct boom_tx_desc *)(vp->rx_ring + RX_RING_SIZE); vp->tx_ring_dma = vp->rx_ring_dma + sizeof(struct boom_rx_desc) * RX_RING_SIZE; @@ -1484,9 +1486,7 @@ static int vortex_probe1(struct device *gendev, void __iomem *ioaddr, int irq, + sizeof(struct boom_tx_desc) * TX_RING_SIZE, vp->rx_ring, vp->rx_ring_dma); -free_region: - if (vp->must_free_region) - release_region(dev->base_addr, vci->io_size); +free_device: free_netdev(dev); pr_err(PFX "vortex_probe1 fails. Returns %d\n", retval); out: @@ -3254,8 +3254,9 @@ static void vortex_remove_one(struct pci_dev *pdev) + sizeof(struct boom_tx_desc) * TX_RING_SIZE, vp->rx_ring, vp->rx_ring_dma); - if (vp->must_free_region) - release_region(dev->base_addr, vp->io_size); + + pci_release_regions(pdev); + free_netdev(dev); } diff --git a/trunk/drivers/net/ethernet/adi/bfin_mac.c b/trunk/drivers/net/ethernet/adi/bfin_mac.c index ee705771bd2c..dada66bfe0d6 100644 --- a/trunk/drivers/net/ethernet/adi/bfin_mac.c +++ b/trunk/drivers/net/ethernet/adi/bfin_mac.c @@ -1700,7 +1700,8 @@ static int bfin_mac_probe(struct platform_device *pdev) } bfin_mac_hwtstamp_init(ndev); - if (bfin_phc_init(ndev, &pdev->dev)) { + rc = bfin_phc_init(ndev, &pdev->dev); + if (rc) { dev_err(&pdev->dev, "Cannot register PHC device!\n"); goto out_err_phc; } diff --git a/trunk/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/trunk/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c index b8fbe266ab68..be59ec4b2c30 100644 --- a/trunk/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c +++ b/trunk/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c @@ -3313,6 +3313,7 @@ static void bnx2x_set_pbd_gso_e2(struct sk_buff *skb, u32 *parsing_data, */ static void bnx2x_set_pbd_gso(struct sk_buff *skb, struct eth_tx_parse_bd_e1x *pbd, + struct eth_tx_start_bd *tx_start_bd, u32 xmit_type) { pbd->lso_mss = cpu_to_le16(skb_shinfo(skb)->gso_size); @@ -3326,11 +3327,14 @@ static void bnx2x_set_pbd_gso(struct sk_buff *skb, ip_hdr(skb)->daddr, 0, IPPROTO_TCP, 0)); - } else + /* GSO on 57710/57711 needs FW to calculate IP checksum */ + tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_IP_CSUM; + } else { pbd->tcp_pseudo_csum = bswab16(~csum_ipv6_magic(&ipv6_hdr(skb)->saddr, &ipv6_hdr(skb)->daddr, 0, IPPROTO_TCP, 0)); + } pbd->global_data |= cpu_to_le16(ETH_TX_PARSE_BD_E1X_PSEUDO_CS_WITHOUT_LEN); @@ -3814,7 +3818,8 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev) bnx2x_set_pbd_gso_e2(skb, &pbd_e2_parsing_data, xmit_type); else - bnx2x_set_pbd_gso(skb, pbd_e1x, xmit_type); + bnx2x_set_pbd_gso(skb, pbd_e1x, tx_start_bd, + xmit_type); } /* Set the PBD's parsing_data field if not zero diff --git a/trunk/drivers/net/ethernet/broadcom/tg3.c b/trunk/drivers/net/ethernet/broadcom/tg3.c index 728d42ab2a76..1f2dd928888a 100644 --- a/trunk/drivers/net/ethernet/broadcom/tg3.c +++ b/trunk/drivers/net/ethernet/broadcom/tg3.c @@ -94,10 +94,10 @@ static inline void _tg3_flag_clear(enum TG3_FLAGS flag, unsigned long *bits) #define DRV_MODULE_NAME "tg3" #define TG3_MAJ_NUM 3 -#define TG3_MIN_NUM 131 +#define TG3_MIN_NUM 132 #define DRV_MODULE_VERSION \ __stringify(TG3_MAJ_NUM) "." __stringify(TG3_MIN_NUM) -#define DRV_MODULE_RELDATE "April 09, 2013" +#define DRV_MODULE_RELDATE "May 21, 2013" #define RESET_KIND_SHUTDOWN 0 #define RESET_KIND_INIT 1 @@ -2957,6 +2957,31 @@ static int tg3_5700_link_polarity(struct tg3 *tp, u32 speed) return 0; } +static bool tg3_phy_power_bug(struct tg3 *tp) +{ + switch (tg3_asic_rev(tp)) { + case ASIC_REV_5700: + case ASIC_REV_5704: + return true; + case ASIC_REV_5780: + if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) + return true; + return false; + case ASIC_REV_5717: + if (!tp->pci_fn) + return true; + return false; + case ASIC_REV_5719: + case ASIC_REV_5720: + if ((tp->phy_flags & TG3_PHYFLG_PHY_SERDES) && + !tp->pci_fn) + return true; + return false; + } + + return false; +} + static void tg3_power_down_phy(struct tg3 *tp, bool do_low_power) { u32 val; @@ -3016,12 +3041,7 @@ static void tg3_power_down_phy(struct tg3 *tp, bool do_low_power) /* The PHY should not be powered down on some chips because * of bugs. */ - if (tg3_asic_rev(tp) == ASIC_REV_5700 || - tg3_asic_rev(tp) == ASIC_REV_5704 || - (tg3_asic_rev(tp) == ASIC_REV_5780 && - (tp->phy_flags & TG3_PHYFLG_MII_SERDES)) || - (tg3_asic_rev(tp) == ASIC_REV_5717 && - !tp->pci_fn)) + if (tg3_phy_power_bug(tp)) return; if (tg3_chip_rev(tp) == CHIPREV_5784_AX || @@ -7428,6 +7448,20 @@ static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len) return (base > 0xffffdcc0) && (base + len + 8 < base); } +/* Test for TSO DMA buffers that cross into regions which are within MSS bytes + * of any 4GB boundaries: 4G, 8G, etc + */ +static inline int tg3_4g_tso_overflow_test(struct tg3 *tp, dma_addr_t mapping, + u32 len, u32 mss) +{ + if (tg3_asic_rev(tp) == ASIC_REV_5762 && mss) { + u32 base = (u32) mapping & 0xffffffff; + + return ((base + len + (mss & 0x3fff)) < base); + } + return 0; +} + /* Test for DMA addresses > 40-bit */ static inline int tg3_40bit_overflow_test(struct tg3 *tp, dma_addr_t mapping, int len) @@ -7464,6 +7498,9 @@ static bool tg3_tx_frag_set(struct tg3_napi *tnapi, u32 *entry, u32 *budget, if (tg3_4g_overflow_test(map, len)) hwbug = true; + if (tg3_4g_tso_overflow_test(tp, map, len, mss)) + hwbug = true; + if (tg3_40bit_overflow_test(tp, map, len)) hwbug = true; @@ -8874,6 +8911,10 @@ static int tg3_chip_reset(struct tg3 *tp) tg3_halt_cpu(tp, RX_CPU_BASE); } + err = tg3_poll_fw(tp); + if (err) + return err; + tw32(GRC_MODE, tp->grc_mode); if (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A0) { @@ -8904,10 +8945,6 @@ static int tg3_chip_reset(struct tg3 *tp) tg3_ape_unlock(tp, TG3_APE_LOCK_GRC); - err = tg3_poll_fw(tp); - if (err) - return err; - tg3_mdio_start(tp); if (tg3_flag(tp, PCI_EXPRESS) && diff --git a/trunk/drivers/net/ethernet/brocade/bna/bnad.c b/trunk/drivers/net/ethernet/brocade/bna/bnad.c index ce4a030d3d0c..07f7ef05c3f2 100644 --- a/trunk/drivers/net/ethernet/brocade/bna/bnad.c +++ b/trunk/drivers/net/ethernet/brocade/bna/bnad.c @@ -3236,9 +3236,10 @@ bnad_init(struct bnad *bnad, sprintf(bnad->wq_name, "%s_wq_%d", BNAD_NAME, bnad->id); bnad->work_q = create_singlethread_workqueue(bnad->wq_name); - - if (!bnad->work_q) + if (!bnad->work_q) { + iounmap(bnad->bar0); return -ENOMEM; + } return 0; } diff --git a/trunk/drivers/net/ethernet/cadence/Kconfig b/trunk/drivers/net/ethernet/cadence/Kconfig index 1194446f859a..768285ec10f4 100644 --- a/trunk/drivers/net/ethernet/cadence/Kconfig +++ b/trunk/drivers/net/ethernet/cadence/Kconfig @@ -22,7 +22,7 @@ if NET_CADENCE config ARM_AT91_ETHER tristate "AT91RM9200 Ethernet support" - depends on GENERIC_HARDIRQS + depends on GENERIC_HARDIRQS && HAS_DMA select NET_CORE select MACB ---help--- @@ -31,6 +31,7 @@ config ARM_AT91_ETHER config MACB tristate "Cadence MACB/GEM support" + depends on HAS_DMA select PHYLIB ---help--- The Cadence MACB ethernet interface is found on many Atmel AT32 and diff --git a/trunk/drivers/net/ethernet/cadence/macb.c b/trunk/drivers/net/ethernet/cadence/macb.c index 6be513deb17f..c89aa41dd448 100644 --- a/trunk/drivers/net/ethernet/cadence/macb.c +++ b/trunk/drivers/net/ethernet/cadence/macb.c @@ -485,7 +485,8 @@ static void macb_tx_interrupt(struct macb *bp) status = macb_readl(bp, TSR); macb_writel(bp, TSR, status); - macb_writel(bp, ISR, MACB_BIT(TCOMP)); + if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE) + macb_writel(bp, ISR, MACB_BIT(TCOMP)); netdev_vdbg(bp->dev, "macb_tx_interrupt status = 0x%03lx\n", (unsigned long)status); @@ -738,7 +739,8 @@ static irqreturn_t macb_interrupt(int irq, void *dev_id) * now. */ macb_writel(bp, IDR, MACB_RX_INT_FLAGS); - macb_writel(bp, ISR, MACB_BIT(RCOMP)); + if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE) + macb_writel(bp, ISR, MACB_BIT(RCOMP)); if (napi_schedule_prep(&bp->napi)) { netdev_vdbg(bp->dev, "scheduling RX softirq\n"); @@ -1062,6 +1064,17 @@ static void macb_configure_dma(struct macb *bp) } } +/* + * Configure peripheral capacities according to integration options used + */ +static void macb_configure_caps(struct macb *bp) +{ + if (macb_is_gem(bp)) { + if (GEM_BF(IRQCOR, gem_readl(bp, DCFG1)) == 0) + bp->caps |= MACB_CAPS_ISR_CLEAR_ON_WRITE; + } +} + static void macb_init_hw(struct macb *bp) { u32 config; @@ -1084,6 +1097,7 @@ static void macb_init_hw(struct macb *bp) bp->duplex = DUPLEX_HALF; macb_configure_dma(bp); + macb_configure_caps(bp); /* Initialize TX and RX buffers */ macb_writel(bp, RBQP, bp->rx_ring_dma); diff --git a/trunk/drivers/net/ethernet/cadence/macb.h b/trunk/drivers/net/ethernet/cadence/macb.h index 993d70380688..548c0ecae869 100644 --- a/trunk/drivers/net/ethernet/cadence/macb.h +++ b/trunk/drivers/net/ethernet/cadence/macb.h @@ -300,6 +300,8 @@ #define MACB_REV_SIZE 16 /* Bitfields in DCFG1. */ +#define GEM_IRQCOR_OFFSET 23 +#define GEM_IRQCOR_SIZE 1 #define GEM_DBWDEF_OFFSET 25 #define GEM_DBWDEF_SIZE 3 @@ -323,6 +325,9 @@ #define MACB_MAN_READ 2 #define MACB_MAN_CODE 2 +/* Capability mask bits */ +#define MACB_CAPS_ISR_CLEAR_ON_WRITE 0x1 + /* Bit manipulation macros */ #define MACB_BIT(name) \ (1 << MACB_##name##_OFFSET) @@ -574,6 +579,8 @@ struct macb { unsigned int speed; unsigned int duplex; + u32 caps; + phy_interface_t phy_interface; /* AT91RM9200 transmit */ diff --git a/trunk/drivers/net/ethernet/calxeda/Kconfig b/trunk/drivers/net/ethernet/calxeda/Kconfig index aba435c3d4ae..184a063bed5f 100644 --- a/trunk/drivers/net/ethernet/calxeda/Kconfig +++ b/trunk/drivers/net/ethernet/calxeda/Kconfig @@ -1,6 +1,6 @@ config NET_CALXEDA_XGMAC tristate "Calxeda 1G/10G XGMAC Ethernet driver" - depends on HAS_IOMEM + depends on HAS_IOMEM && HAS_DMA select CRC32 help This is the driver for the XGMAC Ethernet IP block found on Calxeda diff --git a/trunk/drivers/net/ethernet/emulex/benet/be_cmds.c b/trunk/drivers/net/ethernet/emulex/benet/be_cmds.c index e1e5bb9d9054..a236ecd27cf3 100644 --- a/trunk/drivers/net/ethernet/emulex/benet/be_cmds.c +++ b/trunk/drivers/net/ethernet/emulex/benet/be_cmds.c @@ -2640,9 +2640,8 @@ int be_cmd_get_mac_from_list(struct be_adapter *adapter, u8 *mac, req = get_mac_list_cmd.va; be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, - OPCODE_COMMON_GET_MAC_LIST, sizeof(*req), - wrb, &get_mac_list_cmd); - + OPCODE_COMMON_GET_MAC_LIST, + get_mac_list_cmd.size, wrb, &get_mac_list_cmd); req->hdr.domain = domain; req->mac_type = MAC_ADDRESS_TYPE_NETWORK; req->perm_override = 1; @@ -2977,22 +2976,17 @@ static struct be_nic_resource_desc *be_get_nic_desc(u8 *buf, u32 desc_count, for (i = 0; i < desc_count; i++) { desc->desc_len = desc->desc_len ? : RESOURCE_DESC_SIZE; if (((void *)desc + desc->desc_len) > - (void *)(buf + max_buf_size)) { - desc = NULL; - break; - } + (void *)(buf + max_buf_size)) + return NULL; if (desc->desc_type == NIC_RESOURCE_DESC_TYPE_V0 || desc->desc_type == NIC_RESOURCE_DESC_TYPE_V1) - break; + return desc; desc = (void *)desc + desc->desc_len; } - if (!desc || i == MAX_RESOURCE_DESC) - return NULL; - - return desc; + return NULL; } /* Uses Mbox */ diff --git a/trunk/drivers/net/ethernet/emulex/benet/be_main.c b/trunk/drivers/net/ethernet/emulex/benet/be_main.c index 6c52a60dcdb7..ca2967b0f18b 100644 --- a/trunk/drivers/net/ethernet/emulex/benet/be_main.c +++ b/trunk/drivers/net/ethernet/emulex/benet/be_main.c @@ -780,26 +780,18 @@ static struct sk_buff *be_insert_vlan_in_pkt(struct be_adapter *adapter, if (unlikely(!skb)) return skb; - if (vlan_tx_tag_present(skb)) { + if (vlan_tx_tag_present(skb)) vlan_tag = be_get_tx_vlan_tag(adapter, skb); - skb = __vlan_put_tag(skb, htons(ETH_P_8021Q), vlan_tag); - if (skb) - skb->vlan_tci = 0; - } - - if (qnq_async_evt_rcvd(adapter) && adapter->pvid) { - if (!vlan_tag) - vlan_tag = adapter->pvid; - if (skip_hw_vlan) - *skip_hw_vlan = true; - } + else if (qnq_async_evt_rcvd(adapter) && adapter->pvid) + vlan_tag = adapter->pvid; if (vlan_tag) { skb = __vlan_put_tag(skb, htons(ETH_P_8021Q), vlan_tag); if (unlikely(!skb)) return skb; - skb->vlan_tci = 0; + if (skip_hw_vlan) + *skip_hw_vlan = true; } /* Insert the outer VLAN, if any */ @@ -1827,7 +1819,7 @@ static void be_rx_cq_clean(struct be_rx_obj *rxo) mdelay(1); } else { be_rx_compl_discard(rxo, rxcp); - be_cq_notify(adapter, rx_cq->id, true, 1); + be_cq_notify(adapter, rx_cq->id, false, 1); if (rxcp->num_rcvd == 0) break; } @@ -2533,11 +2525,6 @@ static void be_rx_qs_destroy(struct be_adapter *adapter) q = &rxo->q; if (q->created) { be_cmd_rxq_destroy(adapter, q); - /* After the rxq is invalidated, wait for a grace time - * of 1ms for all dma to end and the flush compl to - * arrive - */ - mdelay(1); be_rx_cq_clean(rxo); } be_queue_free(adapter, q); @@ -2564,6 +2551,7 @@ static int be_close(struct net_device *netdev) * all tx skbs are freed. */ be_tx_compl_clean(adapter); + netif_tx_disable(netdev); be_rx_qs_destroy(adapter); @@ -2672,6 +2660,7 @@ static int be_open(struct net_device *netdev) if (!status) be_link_status_update(adapter, link_status); + netif_tx_start_all_queues(netdev); be_roce_dev_open(adapter); return 0; err: @@ -2783,6 +2772,8 @@ static void be_vf_clear(struct be_adapter *adapter) goto done; } + pci_disable_sriov(adapter->pdev); + for_all_vfs(adapter, vf_cfg, vf) { if (lancer_chip(adapter)) be_cmd_set_mac_list(adapter, NULL, 0, vf + 1); @@ -2792,7 +2783,6 @@ static void be_vf_clear(struct be_adapter *adapter) be_cmd_if_destroy(adapter, vf_cfg->if_handle, vf + 1); } - pci_disable_sriov(adapter->pdev); done: kfree(adapter->vf_cfg); adapter->num_vfs = 0; @@ -2889,13 +2879,8 @@ static int be_vf_setup(struct be_adapter *adapter) dev_info(dev, "Device supports %d VFs and not %d\n", adapter->dev_num_vfs, num_vfs); adapter->num_vfs = min_t(u16, num_vfs, adapter->dev_num_vfs); - - status = pci_enable_sriov(adapter->pdev, num_vfs); - if (status) { - dev_err(dev, "SRIOV enable failed\n"); - adapter->num_vfs = 0; + if (!adapter->num_vfs) return 0; - } } status = be_vf_setup_init(adapter); @@ -2944,6 +2929,15 @@ static int be_vf_setup(struct be_adapter *adapter) be_cmd_enable_vf(adapter, vf + 1); } + + if (!old_vfs) { + status = pci_enable_sriov(adapter->pdev, adapter->num_vfs); + if (status) { + dev_err(dev, "SRIOV enable failed\n"); + adapter->num_vfs = 0; + goto err; + } + } return 0; err: dev_err(dev, "VF setup failed\n"); @@ -3198,7 +3192,7 @@ static int be_setup(struct be_adapter *adapter) be_cmd_set_flow_control(adapter, adapter->tx_fc, adapter->rx_fc); - if (be_physfn(adapter) && num_vfs) { + if (be_physfn(adapter)) { if (adapter->dev_num_vfs) be_vf_setup(adapter); else diff --git a/trunk/drivers/net/ethernet/freescale/fec.h b/trunk/drivers/net/ethernet/freescale/fec.h index ceb4d43c132d..9ce5b7185fda 100644 --- a/trunk/drivers/net/ethernet/freescale/fec.h +++ b/trunk/drivers/net/ethernet/freescale/fec.h @@ -198,6 +198,11 @@ struct bufdesc_ex { #define FLAG_RX_CSUM_ENABLED (BD_ENET_RX_ICE | BD_ENET_RX_PCR) #define FLAG_RX_CSUM_ERROR (BD_ENET_RX_ICE | BD_ENET_RX_PCR) +struct fec_enet_delayed_work { + struct delayed_work delay_work; + bool timeout; +}; + /* The FEC buffer descriptors track the ring buffers. The rx_bd_base and * tx_bd_base always point to the base of the buffer descriptors. The * cur_rx and cur_tx point to the currently available buffer. @@ -232,9 +237,6 @@ struct fec_enet_private { /* The ring entries to be free()ed */ struct bufdesc *dirty_tx; - /* hold while accessing the HW like ringbuffer for tx/rx but not MAC */ - spinlock_t hw_lock; - struct platform_device *pdev; int opened; @@ -269,7 +271,7 @@ struct fec_enet_private { int hwts_rx_en; int hwts_tx_en; struct timer_list time_keep; - + struct fec_enet_delayed_work delay_work; }; void fec_ptp_init(struct net_device *ndev, struct platform_device *pdev); diff --git a/trunk/drivers/net/ethernet/freescale/fec_main.c b/trunk/drivers/net/ethernet/freescale/fec_main.c index e25bf832e6b3..85a06037b242 100644 --- a/trunk/drivers/net/ethernet/freescale/fec_main.c +++ b/trunk/drivers/net/ethernet/freescale/fec_main.c @@ -87,6 +87,8 @@ #define FEC_QUIRK_HAS_GBIT (1 << 3) /* Controller has extend desc buffer */ #define FEC_QUIRK_HAS_BUFDESC_EX (1 << 4) +/* Controller has hardware checksum support */ +#define FEC_QUIRK_HAS_CSUM (1 << 5) static struct platform_device_id fec_devtype[] = { { @@ -105,9 +107,9 @@ static struct platform_device_id fec_devtype[] = { }, { .name = "imx6q-fec", .driver_data = FEC_QUIRK_ENET_MAC | FEC_QUIRK_HAS_GBIT | - FEC_QUIRK_HAS_BUFDESC_EX, + FEC_QUIRK_HAS_BUFDESC_EX | FEC_QUIRK_HAS_CSUM, }, { - .name = "mvf-fec", + .name = "mvf600-fec", .driver_data = FEC_QUIRK_ENET_MAC, }, { /* sentinel */ @@ -120,7 +122,7 @@ enum imx_fec_type { IMX27_FEC, /* runs on i.mx27/35/51 */ IMX28_FEC, IMX6Q_FEC, - MVF_FEC, + MVF600_FEC, }; static const struct of_device_id fec_dt_ids[] = { @@ -128,7 +130,7 @@ static const struct of_device_id fec_dt_ids[] = { { .compatible = "fsl,imx27-fec", .data = &fec_devtype[IMX27_FEC], }, { .compatible = "fsl,imx28-fec", .data = &fec_devtype[IMX28_FEC], }, { .compatible = "fsl,imx6q-fec", .data = &fec_devtype[IMX6Q_FEC], }, - { .compatible = "fsl,mvf-fec", .data = &fec_devtype[MVF_FEC], }, + { .compatible = "fsl,mvf600-fec", .data = &fec_devtype[MVF600_FEC], }, { /* sentinel */ } }; MODULE_DEVICE_TABLE(of, fec_dt_ids); @@ -445,6 +447,13 @@ fec_restart(struct net_device *ndev, int duplex) u32 rcntl = OPT_FRAME_SIZE | 0x04; u32 ecntl = 0x2; /* ETHEREN */ + if (netif_running(ndev)) { + netif_device_detach(ndev); + napi_disable(&fep->napi); + netif_stop_queue(ndev); + netif_tx_lock_bh(ndev); + } + /* Whack a reset. We should wait for this. */ writel(1, fep->hwp + FEC_ECNTRL); udelay(10); @@ -605,6 +614,13 @@ fec_restart(struct net_device *ndev, int duplex) /* Enable interrupts we wish to service */ writel(FEC_DEFAULT_IMASK, fep->hwp + FEC_IMASK); + + if (netif_running(ndev)) { + netif_tx_unlock_bh(ndev); + netif_wake_queue(ndev); + napi_enable(&fep->napi); + netif_device_attach(ndev); + } } static void @@ -644,8 +660,22 @@ fec_timeout(struct net_device *ndev) ndev->stats.tx_errors++; - fec_restart(ndev, fep->full_duplex); - netif_wake_queue(ndev); + fep->delay_work.timeout = true; + schedule_delayed_work(&(fep->delay_work.delay_work), 0); +} + +static void fec_enet_work(struct work_struct *work) +{ + struct fec_enet_private *fep = + container_of(work, + struct fec_enet_private, + delay_work.delay_work.work); + + if (fep->delay_work.timeout) { + fep->delay_work.timeout = false; + fec_restart(fep->netdev, fep->full_duplex); + netif_wake_queue(fep->netdev); + } } static void @@ -1024,16 +1054,12 @@ static void fec_enet_adjust_link(struct net_device *ndev) { struct fec_enet_private *fep = netdev_priv(ndev); struct phy_device *phy_dev = fep->phy_dev; - unsigned long flags; - int status_change = 0; - spin_lock_irqsave(&fep->hw_lock, flags); - /* Prevent a state halted on mii error */ if (fep->mii_timeout && phy_dev->state == PHY_HALTED) { phy_dev->state = PHY_RESUMING; - goto spin_unlock; + return; } if (phy_dev->link) { @@ -1061,9 +1087,6 @@ static void fec_enet_adjust_link(struct net_device *ndev) } } -spin_unlock: - spin_unlock_irqrestore(&fep->hw_lock, flags); - if (status_change) phy_print_status(phy_dev); } @@ -1723,6 +1746,8 @@ static const struct net_device_ops fec_netdev_ops = { static int fec_enet_init(struct net_device *ndev) { struct fec_enet_private *fep = netdev_priv(ndev); + const struct platform_device_id *id_entry = + platform_get_device_id(fep->pdev); struct bufdesc *cbd_base; /* Allocate memory for buffer descriptors. */ @@ -1732,7 +1757,6 @@ static int fec_enet_init(struct net_device *ndev) return -ENOMEM; memset(cbd_base, 0, PAGE_SIZE); - spin_lock_init(&fep->hw_lock); fep->netdev = ndev; @@ -1755,12 +1779,14 @@ static int fec_enet_init(struct net_device *ndev) writel(FEC_RX_DISABLED_IMASK, fep->hwp + FEC_IMASK); netif_napi_add(ndev, &fep->napi, fec_enet_rx_napi, FEC_NAPI_WEIGHT); - /* enable hw accelerator */ - ndev->features |= (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM - | NETIF_F_RXCSUM); - ndev->hw_features |= (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM - | NETIF_F_RXCSUM); - fep->csum_flags |= FLAG_RX_CSUM_ENABLED; + if (id_entry->driver_data & FEC_QUIRK_HAS_CSUM) { + /* enable hw accelerator */ + ndev->features |= (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM + | NETIF_F_RXCSUM); + ndev->hw_features |= (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM + | NETIF_F_RXCSUM); + fep->csum_flags |= FLAG_RX_CSUM_ENABLED; + } fec_restart(ndev, 0); @@ -1952,6 +1978,7 @@ fec_probe(struct platform_device *pdev) if (fep->bufdesc_ex && fep->ptp_clock) netdev_info(ndev, "registered PHC device %d\n", fep->dev_id); + INIT_DELAYED_WORK(&(fep->delay_work.delay_work), fec_enet_work); return 0; failed_register: @@ -1984,6 +2011,7 @@ fec_drv_remove(struct platform_device *pdev) struct fec_enet_private *fep = netdev_priv(ndev); int i; + cancel_delayed_work_sync(&(fep->delay_work.delay_work)); unregister_netdev(ndev); fec_enet_mii_remove(fep); del_timer_sync(&fep->time_keep); diff --git a/trunk/drivers/net/ethernet/freescale/gianfar_ptp.c b/trunk/drivers/net/ethernet/freescale/gianfar_ptp.c index 576e4b858fce..083ea2b4d20a 100644 --- a/trunk/drivers/net/ethernet/freescale/gianfar_ptp.c +++ b/trunk/drivers/net/ethernet/freescale/gianfar_ptp.c @@ -524,6 +524,7 @@ static int gianfar_ptp_probe(struct platform_device *dev) return 0; no_clock: + iounmap(etsects->regs); no_ioremap: release_resource(etsects->rsrc); no_resource: diff --git a/trunk/drivers/net/ethernet/ibm/emac/core.c b/trunk/drivers/net/ethernet/ibm/emac/core.c index 4989481c19f0..d300a0c0eafc 100644 --- a/trunk/drivers/net/ethernet/ibm/emac/core.c +++ b/trunk/drivers/net/ethernet/ibm/emac/core.c @@ -359,10 +359,26 @@ static int emac_reset(struct emac_instance *dev) } #ifdef CONFIG_PPC_DCR_NATIVE - /* Enable internal clock source */ - if (emac_has_feature(dev, EMAC_FTR_460EX_PHY_CLK_FIX)) - dcri_clrset(SDR0, SDR0_ETH_CFG, - 0, SDR0_ETH_CFG_ECS << dev->cell_index); + /* + * PPC460EX/GT Embedded Processor Advanced User's Manual + * section 28.10.1 Mode Register 0 (EMACx_MR0) states: + * Note: The PHY must provide a TX Clk in order to perform a soft reset + * of the EMAC. If none is present, select the internal clock + * (SDR0_ETH_CFG[EMACx_PHY_CLK] = 1). + * After a soft reset, select the external clock. + */ + if (emac_has_feature(dev, EMAC_FTR_460EX_PHY_CLK_FIX)) { + if (dev->phy_address == 0xffffffff && + dev->phy_map == 0xffffffff) { + /* No PHY: select internal loop clock before reset */ + dcri_clrset(SDR0, SDR0_ETH_CFG, + 0, SDR0_ETH_CFG_ECS << dev->cell_index); + } else { + /* PHY present: select external clock before reset */ + dcri_clrset(SDR0, SDR0_ETH_CFG, + SDR0_ETH_CFG_ECS << dev->cell_index, 0); + } + } #endif out_be32(&p->mr0, EMAC_MR0_SRST); @@ -370,10 +386,14 @@ static int emac_reset(struct emac_instance *dev) --n; #ifdef CONFIG_PPC_DCR_NATIVE - /* Enable external clock source */ - if (emac_has_feature(dev, EMAC_FTR_460EX_PHY_CLK_FIX)) - dcri_clrset(SDR0, SDR0_ETH_CFG, - SDR0_ETH_CFG_ECS << dev->cell_index, 0); + if (emac_has_feature(dev, EMAC_FTR_460EX_PHY_CLK_FIX)) { + if (dev->phy_address == 0xffffffff && + dev->phy_map == 0xffffffff) { + /* No PHY: restore external clock source after reset */ + dcri_clrset(SDR0, SDR0_ETH_CFG, + SDR0_ETH_CFG_ECS << dev->cell_index, 0); + } + } #endif if (n) { diff --git a/trunk/drivers/net/ethernet/icplus/ipg.h b/trunk/drivers/net/ethernet/icplus/ipg.h index 6ce027355fcf..abb300a31912 100644 --- a/trunk/drivers/net/ethernet/icplus/ipg.h +++ b/trunk/drivers/net/ethernet/icplus/ipg.h @@ -195,57 +195,57 @@ enum ipg_regs { /* TFD data structure masks. */ /* TFDList, TFC */ -#define IPG_TFC_RSVD_MASK 0x0000FFFF9FFFFFFF -#define IPG_TFC_FRAMEID 0x000000000000FFFF -#define IPG_TFC_WORDALIGN 0x0000000000030000 -#define IPG_TFC_WORDALIGNTODWORD 0x0000000000000000 -#define IPG_TFC_WORDALIGNTOWORD 0x0000000000020000 -#define IPG_TFC_WORDALIGNDISABLED 0x0000000000030000 -#define IPG_TFC_TCPCHECKSUMENABLE 0x0000000000040000 -#define IPG_TFC_UDPCHECKSUMENABLE 0x0000000000080000 -#define IPG_TFC_IPCHECKSUMENABLE 0x0000000000100000 -#define IPG_TFC_FCSAPPENDDISABLE 0x0000000000200000 -#define IPG_TFC_TXINDICATE 0x0000000000400000 -#define IPG_TFC_TXDMAINDICATE 0x0000000000800000 -#define IPG_TFC_FRAGCOUNT 0x000000000F000000 -#define IPG_TFC_VLANTAGINSERT 0x0000000010000000 -#define IPG_TFC_TFDDONE 0x0000000080000000 -#define IPG_TFC_VID 0x00000FFF00000000 -#define IPG_TFC_CFI 0x0000100000000000 -#define IPG_TFC_USERPRIORITY 0x0000E00000000000 +#define IPG_TFC_RSVD_MASK 0x0000FFFF9FFFFFFFULL +#define IPG_TFC_FRAMEID 0x000000000000FFFFULL +#define IPG_TFC_WORDALIGN 0x0000000000030000ULL +#define IPG_TFC_WORDALIGNTODWORD 0x0000000000000000ULL +#define IPG_TFC_WORDALIGNTOWORD 0x0000000000020000ULL +#define IPG_TFC_WORDALIGNDISABLED 0x0000000000030000ULL +#define IPG_TFC_TCPCHECKSUMENABLE 0x0000000000040000ULL +#define IPG_TFC_UDPCHECKSUMENABLE 0x0000000000080000ULL +#define IPG_TFC_IPCHECKSUMENABLE 0x0000000000100000ULL +#define IPG_TFC_FCSAPPENDDISABLE 0x0000000000200000ULL +#define IPG_TFC_TXINDICATE 0x0000000000400000ULL +#define IPG_TFC_TXDMAINDICATE 0x0000000000800000ULL +#define IPG_TFC_FRAGCOUNT 0x000000000F000000ULL +#define IPG_TFC_VLANTAGINSERT 0x0000000010000000ULL +#define IPG_TFC_TFDDONE 0x0000000080000000ULL +#define IPG_TFC_VID 0x00000FFF00000000ULL +#define IPG_TFC_CFI 0x0000100000000000ULL +#define IPG_TFC_USERPRIORITY 0x0000E00000000000ULL /* TFDList, FragInfo */ -#define IPG_TFI_RSVD_MASK 0xFFFF00FFFFFFFFFF -#define IPG_TFI_FRAGADDR 0x000000FFFFFFFFFF -#define IPG_TFI_FRAGLEN 0xFFFF000000000000LL +#define IPG_TFI_RSVD_MASK 0xFFFF00FFFFFFFFFFULL +#define IPG_TFI_FRAGADDR 0x000000FFFFFFFFFFULL +#define IPG_TFI_FRAGLEN 0xFFFF000000000000ULL /* RFD data structure masks. */ /* RFDList, RFS */ -#define IPG_RFS_RSVD_MASK 0x0000FFFFFFFFFFFF -#define IPG_RFS_RXFRAMELEN 0x000000000000FFFF -#define IPG_RFS_RXFIFOOVERRUN 0x0000000000010000 -#define IPG_RFS_RXRUNTFRAME 0x0000000000020000 -#define IPG_RFS_RXALIGNMENTERROR 0x0000000000040000 -#define IPG_RFS_RXFCSERROR 0x0000000000080000 -#define IPG_RFS_RXOVERSIZEDFRAME 0x0000000000100000 -#define IPG_RFS_RXLENGTHERROR 0x0000000000200000 -#define IPG_RFS_VLANDETECTED 0x0000000000400000 -#define IPG_RFS_TCPDETECTED 0x0000000000800000 -#define IPG_RFS_TCPERROR 0x0000000001000000 -#define IPG_RFS_UDPDETECTED 0x0000000002000000 -#define IPG_RFS_UDPERROR 0x0000000004000000 -#define IPG_RFS_IPDETECTED 0x0000000008000000 -#define IPG_RFS_IPERROR 0x0000000010000000 -#define IPG_RFS_FRAMESTART 0x0000000020000000 -#define IPG_RFS_FRAMEEND 0x0000000040000000 -#define IPG_RFS_RFDDONE 0x0000000080000000 -#define IPG_RFS_TCI 0x0000FFFF00000000 +#define IPG_RFS_RSVD_MASK 0x0000FFFFFFFFFFFFULL +#define IPG_RFS_RXFRAMELEN 0x000000000000FFFFULL +#define IPG_RFS_RXFIFOOVERRUN 0x0000000000010000ULL +#define IPG_RFS_RXRUNTFRAME 0x0000000000020000ULL +#define IPG_RFS_RXALIGNMENTERROR 0x0000000000040000ULL +#define IPG_RFS_RXFCSERROR 0x0000000000080000ULL +#define IPG_RFS_RXOVERSIZEDFRAME 0x0000000000100000ULL +#define IPG_RFS_RXLENGTHERROR 0x0000000000200000ULL +#define IPG_RFS_VLANDETECTED 0x0000000000400000ULL +#define IPG_RFS_TCPDETECTED 0x0000000000800000ULL +#define IPG_RFS_TCPERROR 0x0000000001000000ULL +#define IPG_RFS_UDPDETECTED 0x0000000002000000ULL +#define IPG_RFS_UDPERROR 0x0000000004000000ULL +#define IPG_RFS_IPDETECTED 0x0000000008000000ULL +#define IPG_RFS_IPERROR 0x0000000010000000ULL +#define IPG_RFS_FRAMESTART 0x0000000020000000ULL +#define IPG_RFS_FRAMEEND 0x0000000040000000ULL +#define IPG_RFS_RFDDONE 0x0000000080000000ULL +#define IPG_RFS_TCI 0x0000FFFF00000000ULL /* RFDList, FragInfo */ -#define IPG_RFI_RSVD_MASK 0xFFFF00FFFFFFFFFF -#define IPG_RFI_FRAGADDR 0x000000FFFFFFFFFF -#define IPG_RFI_FRAGLEN 0xFFFF000000000000LL +#define IPG_RFI_RSVD_MASK 0xFFFF00FFFFFFFFFFULL +#define IPG_RFI_FRAGADDR 0x000000FFFFFFFFFFULL +#define IPG_RFI_FRAGLEN 0xFFFF000000000000ULL /* I/O Register masks. */ diff --git a/trunk/drivers/net/ethernet/marvell/mv643xx_eth.c b/trunk/drivers/net/ethernet/marvell/mv643xx_eth.c index d0afeea181fb..2ad1494efbb3 100644 --- a/trunk/drivers/net/ethernet/marvell/mv643xx_eth.c +++ b/trunk/drivers/net/ethernet/marvell/mv643xx_eth.c @@ -867,7 +867,7 @@ static int txq_reclaim(struct tx_queue *txq, int budget, int force) struct netdev_queue *nq = netdev_get_tx_queue(mp->dev, txq->index); int reclaimed; - __netif_tx_lock(nq, smp_processor_id()); + __netif_tx_lock_bh(nq); reclaimed = 0; while (reclaimed < budget && txq->tx_desc_count > 0) { @@ -913,7 +913,7 @@ static int txq_reclaim(struct tx_queue *txq, int budget, int force) dev_kfree_skb(skb); } - __netif_tx_unlock(nq); + __netif_tx_unlock_bh(nq); if (reclaimed < budget) mp->work_tx &= ~(1 << txq->index); @@ -2745,7 +2745,7 @@ static int mv643xx_eth_probe(struct platform_device *pdev) INIT_WORK(&mp->tx_timeout_task, tx_timeout_task); - netif_napi_add(dev, &mp->napi, mv643xx_eth_poll, 128); + netif_napi_add(dev, &mp->napi, mv643xx_eth_poll, NAPI_POLL_WEIGHT); init_timer(&mp->rx_oom); mp->rx_oom.data = (unsigned long)mp; diff --git a/trunk/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c b/trunk/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c index bcf4d118e98c..c9e6b62dd000 100644 --- a/trunk/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c +++ b/trunk/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c @@ -889,7 +889,7 @@ static int mlx4_en_flow_replace(struct net_device *dev, .queue_mode = MLX4_NET_TRANS_Q_FIFO, .exclusive = 0, .allow_loopback = 1, - .promisc_mode = MLX4_FS_PROMISC_NONE, + .promisc_mode = MLX4_FS_REGULAR, }; rule.port = priv->port; diff --git a/trunk/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/trunk/drivers/net/ethernet/mellanox/mlx4/en_netdev.c index a69a908614e6..b35f94700093 100644 --- a/trunk/drivers/net/ethernet/mellanox/mlx4/en_netdev.c +++ b/trunk/drivers/net/ethernet/mellanox/mlx4/en_netdev.c @@ -127,7 +127,7 @@ static void mlx4_en_filter_work(struct work_struct *work) .queue_mode = MLX4_NET_TRANS_Q_LIFO, .exclusive = 1, .allow_loopback = 1, - .promisc_mode = MLX4_FS_PROMISC_NONE, + .promisc_mode = MLX4_FS_REGULAR, .port = priv->port, .priority = MLX4_DOMAIN_RFS, }; @@ -448,7 +448,7 @@ static int mlx4_en_uc_steer_add(struct mlx4_en_priv *priv, .queue_mode = MLX4_NET_TRANS_Q_FIFO, .exclusive = 0, .allow_loopback = 1, - .promisc_mode = MLX4_FS_PROMISC_NONE, + .promisc_mode = MLX4_FS_REGULAR, .priority = MLX4_DOMAIN_NIC, }; @@ -795,7 +795,7 @@ static void mlx4_en_set_promisc_mode(struct mlx4_en_priv *priv, err = mlx4_flow_steer_promisc_add(mdev->dev, priv->port, priv->base_qpn, - MLX4_FS_PROMISC_UPLINK); + MLX4_FS_ALL_DEFAULT); if (err) en_err(priv, "Failed enabling promiscuous mode\n"); priv->flags |= MLX4_EN_FLAG_MC_PROMISC; @@ -858,7 +858,7 @@ static void mlx4_en_clear_promisc_mode(struct mlx4_en_priv *priv, case MLX4_STEERING_MODE_DEVICE_MANAGED: err = mlx4_flow_steer_promisc_remove(mdev->dev, priv->port, - MLX4_FS_PROMISC_UPLINK); + MLX4_FS_ALL_DEFAULT); if (err) en_err(priv, "Failed disabling promiscuous mode\n"); priv->flags &= ~MLX4_EN_FLAG_MC_PROMISC; @@ -919,7 +919,7 @@ static void mlx4_en_do_multicast(struct mlx4_en_priv *priv, err = mlx4_flow_steer_promisc_add(mdev->dev, priv->port, priv->base_qpn, - MLX4_FS_PROMISC_ALL_MULTI); + MLX4_FS_MC_DEFAULT); break; case MLX4_STEERING_MODE_B0: @@ -942,7 +942,7 @@ static void mlx4_en_do_multicast(struct mlx4_en_priv *priv, case MLX4_STEERING_MODE_DEVICE_MANAGED: err = mlx4_flow_steer_promisc_remove(mdev->dev, priv->port, - MLX4_FS_PROMISC_ALL_MULTI); + MLX4_FS_MC_DEFAULT); break; case MLX4_STEERING_MODE_B0: @@ -1621,10 +1621,10 @@ void mlx4_en_stop_port(struct net_device *dev, int detach) MLX4_EN_FLAG_MC_PROMISC); mlx4_flow_steer_promisc_remove(mdev->dev, priv->port, - MLX4_FS_PROMISC_UPLINK); + MLX4_FS_ALL_DEFAULT); mlx4_flow_steer_promisc_remove(mdev->dev, priv->port, - MLX4_FS_PROMISC_ALL_MULTI); + MLX4_FS_MC_DEFAULT); } else if (priv->flags & MLX4_EN_FLAG_PROMISC) { priv->flags &= ~MLX4_EN_FLAG_PROMISC; diff --git a/trunk/drivers/net/ethernet/mellanox/mlx4/en_resources.c b/trunk/drivers/net/ethernet/mellanox/mlx4/en_resources.c index 91f2b2c43c12..d3f508697a3d 100644 --- a/trunk/drivers/net/ethernet/mellanox/mlx4/en_resources.c +++ b/trunk/drivers/net/ethernet/mellanox/mlx4/en_resources.c @@ -60,7 +60,7 @@ void mlx4_en_fill_qp_context(struct mlx4_en_priv *priv, int size, int stride, context->pri_path.sched_queue = 0x83 | (priv->port - 1) << 6; if (user_prio >= 0) { context->pri_path.sched_queue |= user_prio << 3; - context->pri_path.feup = 1 << 6; + context->pri_path.feup = MLX4_FEUP_FORCE_ETH_UP; } context->pri_path.counter_index = 0xff; context->cqn_send = cpu_to_be32(cqn); diff --git a/trunk/drivers/net/ethernet/mellanox/mlx4/eq.c b/trunk/drivers/net/ethernet/mellanox/mlx4/eq.c index 8e3123a1df88..6000342f9725 100644 --- a/trunk/drivers/net/ethernet/mellanox/mlx4/eq.c +++ b/trunk/drivers/net/ethernet/mellanox/mlx4/eq.c @@ -497,8 +497,8 @@ static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq) break; case MLX4_EVENT_TYPE_SRQ_LIMIT: - mlx4_warn(dev, "%s: MLX4_EVENT_TYPE_SRQ_LIMIT\n", - __func__); + mlx4_dbg(dev, "%s: MLX4_EVENT_TYPE_SRQ_LIMIT\n", + __func__); case MLX4_EVENT_TYPE_SRQ_CATAS_ERROR: if (mlx4_is_master(dev)) { /* forward only to slave owning the SRQ */ diff --git a/trunk/drivers/net/ethernet/mellanox/mlx4/fw.c b/trunk/drivers/net/ethernet/mellanox/mlx4/fw.c index b147bdd40768..58a8e535d698 100644 --- a/trunk/drivers/net/ethernet/mellanox/mlx4/fw.c +++ b/trunk/drivers/net/ethernet/mellanox/mlx4/fw.c @@ -131,7 +131,9 @@ static void dump_dev_cap_flags2(struct mlx4_dev *dev, u64 flags) [2] = "RSS XOR Hash Function support", [3] = "Device manage flow steering support", [4] = "Automatic MAC reassignment support", - [5] = "Time stamping support" + [5] = "Time stamping support", + [6] = "VST (control vlan insertion/stripping) support", + [7] = "FSM (MAC anti-spoofing) support" }; int i; diff --git a/trunk/drivers/net/ethernet/mellanox/mlx4/mcg.c b/trunk/drivers/net/ethernet/mellanox/mlx4/mcg.c index ffc78d2cb0cf..f3e804f2a35f 100644 --- a/trunk/drivers/net/ethernet/mellanox/mlx4/mcg.c +++ b/trunk/drivers/net/ethernet/mellanox/mlx4/mcg.c @@ -645,25 +645,37 @@ static int find_entry(struct mlx4_dev *dev, u8 port, return err; } +static const u8 __promisc_mode[] = { + [MLX4_FS_REGULAR] = 0x0, + [MLX4_FS_ALL_DEFAULT] = 0x1, + [MLX4_FS_MC_DEFAULT] = 0x3, + [MLX4_FS_UC_SNIFFER] = 0x4, + [MLX4_FS_MC_SNIFFER] = 0x5, +}; + +int mlx4_map_sw_to_hw_steering_mode(struct mlx4_dev *dev, + enum mlx4_net_trans_promisc_mode flow_type) +{ + if (flow_type >= MLX4_FS_MODE_NUM || flow_type < 0) { + mlx4_err(dev, "Invalid flow type. type = %d\n", flow_type); + return -EINVAL; + } + return __promisc_mode[flow_type]; +} +EXPORT_SYMBOL_GPL(mlx4_map_sw_to_hw_steering_mode); + static void trans_rule_ctrl_to_hw(struct mlx4_net_trans_rule *ctrl, struct mlx4_net_trans_rule_hw_ctrl *hw) { - static const u8 __promisc_mode[] = { - [MLX4_FS_PROMISC_NONE] = 0x0, - [MLX4_FS_PROMISC_UPLINK] = 0x1, - [MLX4_FS_PROMISC_FUNCTION_PORT] = 0x2, - [MLX4_FS_PROMISC_ALL_MULTI] = 0x3, - }; - - u32 dw = 0; - - dw = ctrl->queue_mode == MLX4_NET_TRANS_Q_LIFO ? 1 : 0; - dw |= ctrl->exclusive ? (1 << 2) : 0; - dw |= ctrl->allow_loopback ? (1 << 3) : 0; - dw |= __promisc_mode[ctrl->promisc_mode] << 8; - dw |= ctrl->priority << 16; - - hw->ctrl = cpu_to_be32(dw); + u8 flags = 0; + + flags = ctrl->queue_mode == MLX4_NET_TRANS_Q_LIFO ? 1 : 0; + flags |= ctrl->exclusive ? (1 << 2) : 0; + flags |= ctrl->allow_loopback ? (1 << 3) : 0; + + hw->flags = flags; + hw->type = __promisc_mode[ctrl->promisc_mode]; + hw->prio = cpu_to_be16(ctrl->priority); hw->port = ctrl->port; hw->qpn = cpu_to_be32(ctrl->qpn); } @@ -677,29 +689,51 @@ const u16 __sw_id_hw[] = { [MLX4_NET_TRANS_RULE_ID_UDP] = 0xE006 }; +int mlx4_map_sw_to_hw_steering_id(struct mlx4_dev *dev, + enum mlx4_net_trans_rule_id id) +{ + if (id >= MLX4_NET_TRANS_RULE_NUM || id < 0) { + mlx4_err(dev, "Invalid network rule id. id = %d\n", id); + return -EINVAL; + } + return __sw_id_hw[id]; +} +EXPORT_SYMBOL_GPL(mlx4_map_sw_to_hw_steering_id); + +static const int __rule_hw_sz[] = { + [MLX4_NET_TRANS_RULE_ID_ETH] = + sizeof(struct mlx4_net_trans_rule_hw_eth), + [MLX4_NET_TRANS_RULE_ID_IB] = + sizeof(struct mlx4_net_trans_rule_hw_ib), + [MLX4_NET_TRANS_RULE_ID_IPV6] = 0, + [MLX4_NET_TRANS_RULE_ID_IPV4] = + sizeof(struct mlx4_net_trans_rule_hw_ipv4), + [MLX4_NET_TRANS_RULE_ID_TCP] = + sizeof(struct mlx4_net_trans_rule_hw_tcp_udp), + [MLX4_NET_TRANS_RULE_ID_UDP] = + sizeof(struct mlx4_net_trans_rule_hw_tcp_udp) +}; + +int mlx4_hw_rule_sz(struct mlx4_dev *dev, + enum mlx4_net_trans_rule_id id) +{ + if (id >= MLX4_NET_TRANS_RULE_NUM || id < 0) { + mlx4_err(dev, "Invalid network rule id. id = %d\n", id); + return -EINVAL; + } + + return __rule_hw_sz[id]; +} +EXPORT_SYMBOL_GPL(mlx4_hw_rule_sz); + static int parse_trans_rule(struct mlx4_dev *dev, struct mlx4_spec_list *spec, struct _rule_hw *rule_hw) { - static const size_t __rule_hw_sz[] = { - [MLX4_NET_TRANS_RULE_ID_ETH] = - sizeof(struct mlx4_net_trans_rule_hw_eth), - [MLX4_NET_TRANS_RULE_ID_IB] = - sizeof(struct mlx4_net_trans_rule_hw_ib), - [MLX4_NET_TRANS_RULE_ID_IPV6] = 0, - [MLX4_NET_TRANS_RULE_ID_IPV4] = - sizeof(struct mlx4_net_trans_rule_hw_ipv4), - [MLX4_NET_TRANS_RULE_ID_TCP] = - sizeof(struct mlx4_net_trans_rule_hw_tcp_udp), - [MLX4_NET_TRANS_RULE_ID_UDP] = - sizeof(struct mlx4_net_trans_rule_hw_tcp_udp) - }; - if (spec->id >= MLX4_NET_TRANS_RULE_NUM) { - mlx4_err(dev, "Invalid network rule id. id = %d\n", spec->id); + if (mlx4_hw_rule_sz(dev, spec->id) < 0) return -EINVAL; - } - memset(rule_hw, 0, __rule_hw_sz[spec->id]); + memset(rule_hw, 0, mlx4_hw_rule_sz(dev, spec->id)); rule_hw->id = cpu_to_be16(__sw_id_hw[spec->id]); - rule_hw->size = __rule_hw_sz[spec->id] >> 2; + rule_hw->size = mlx4_hw_rule_sz(dev, spec->id) >> 2; switch (spec->id) { case MLX4_NET_TRANS_RULE_ID_ETH: @@ -713,12 +747,12 @@ static int parse_trans_rule(struct mlx4_dev *dev, struct mlx4_spec_list *spec, rule_hw->eth.ether_type_enable = 1; rule_hw->eth.ether_type = spec->eth.ether_type; } - rule_hw->eth.vlan_id = spec->eth.vlan_id; - rule_hw->eth.vlan_id_msk = spec->eth.vlan_id_msk; + rule_hw->eth.vlan_tag = spec->eth.vlan_id; + rule_hw->eth.vlan_tag_msk = spec->eth.vlan_id_msk; break; case MLX4_NET_TRANS_RULE_ID_IB: - rule_hw->ib.qpn = spec->ib.r_qpn; + rule_hw->ib.l3_qpn = spec->ib.l3_qpn; rule_hw->ib.qpn_mask = spec->ib.qpn_msk; memcpy(&rule_hw->ib.dst_gid, &spec->ib.dst_gid, 16); memcpy(&rule_hw->ib.dst_gid_msk, &spec->ib.dst_gid_msk, 16); @@ -1136,7 +1170,7 @@ int mlx4_trans_to_dmfs_attach(struct mlx4_dev *dev, struct mlx4_qp *qp, struct mlx4_net_trans_rule rule = { .queue_mode = MLX4_NET_TRANS_Q_FIFO, .exclusive = 0, - .promisc_mode = MLX4_FS_PROMISC_NONE, + .promisc_mode = MLX4_FS_REGULAR, .priority = MLX4_DOMAIN_NIC, }; @@ -1229,11 +1263,10 @@ int mlx4_flow_steer_promisc_add(struct mlx4_dev *dev, u8 port, u64 *regid_p; switch (mode) { - case MLX4_FS_PROMISC_UPLINK: - case MLX4_FS_PROMISC_FUNCTION_PORT: + case MLX4_FS_ALL_DEFAULT: regid_p = &dev->regid_promisc_array[port]; break; - case MLX4_FS_PROMISC_ALL_MULTI: + case MLX4_FS_MC_DEFAULT: regid_p = &dev->regid_allmulti_array[port]; break; default: @@ -1260,11 +1293,10 @@ int mlx4_flow_steer_promisc_remove(struct mlx4_dev *dev, u8 port, u64 *regid_p; switch (mode) { - case MLX4_FS_PROMISC_UPLINK: - case MLX4_FS_PROMISC_FUNCTION_PORT: + case MLX4_FS_ALL_DEFAULT: regid_p = &dev->regid_promisc_array[port]; break; - case MLX4_FS_PROMISC_ALL_MULTI: + case MLX4_FS_MC_DEFAULT: regid_p = &dev->regid_allmulti_array[port]; break; default: diff --git a/trunk/drivers/net/ethernet/mellanox/mlx4/mlx4.h b/trunk/drivers/net/ethernet/mellanox/mlx4/mlx4.h index eac3dae10efe..df15bb6631cc 100644 --- a/trunk/drivers/net/ethernet/mellanox/mlx4/mlx4.h +++ b/trunk/drivers/net/ethernet/mellanox/mlx4/mlx4.h @@ -730,85 +730,6 @@ struct mlx4_steer { struct list_head steer_entries[MLX4_NUM_STEERS]; }; -struct mlx4_net_trans_rule_hw_ctrl { - __be32 ctrl; - u8 rsvd1; - u8 funcid; - u8 vep; - u8 port; - __be32 qpn; - __be32 rsvd2; -}; - -struct mlx4_net_trans_rule_hw_ib { - u8 size; - u8 rsvd1; - __be16 id; - u32 rsvd2; - __be32 qpn; - __be32 qpn_mask; - u8 dst_gid[16]; - u8 dst_gid_msk[16]; -} __packed; - -struct mlx4_net_trans_rule_hw_eth { - u8 size; - u8 rsvd; - __be16 id; - u8 rsvd1[6]; - u8 dst_mac[6]; - u16 rsvd2; - u8 dst_mac_msk[6]; - u16 rsvd3; - u8 src_mac[6]; - u16 rsvd4; - u8 src_mac_msk[6]; - u8 rsvd5; - u8 ether_type_enable; - __be16 ether_type; - __be16 vlan_id_msk; - __be16 vlan_id; -} __packed; - -struct mlx4_net_trans_rule_hw_tcp_udp { - u8 size; - u8 rsvd; - __be16 id; - __be16 rsvd1[3]; - __be16 dst_port; - __be16 rsvd2; - __be16 dst_port_msk; - __be16 rsvd3; - __be16 src_port; - __be16 rsvd4; - __be16 src_port_msk; -} __packed; - -struct mlx4_net_trans_rule_hw_ipv4 { - u8 size; - u8 rsvd; - __be16 id; - __be32 rsvd1; - __be32 dst_ip; - __be32 dst_ip_msk; - __be32 src_ip; - __be32 src_ip_msk; -} __packed; - -struct _rule_hw { - union { - struct { - u8 size; - u8 rsvd; - __be16 id; - }; - struct mlx4_net_trans_rule_hw_eth eth; - struct mlx4_net_trans_rule_hw_ib ib; - struct mlx4_net_trans_rule_hw_ipv4 ipv4; - struct mlx4_net_trans_rule_hw_tcp_udp tcp_udp; - }; -}; - enum { MLX4_PCI_DEV_IS_VF = 1 << 0, MLX4_PCI_DEV_FORCE_SENSE_PORT = 1 << 1, diff --git a/trunk/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c b/trunk/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c index e12e0d2e0ee0..1157f028a90f 100644 --- a/trunk/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c +++ b/trunk/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c @@ -372,24 +372,29 @@ static int update_vport_qp_param(struct mlx4_dev *dev, if (MLX4_QP_ST_RC == qp_type) return -EINVAL; + /* force strip vlan by clear vsd */ + qpc->param3 &= ~cpu_to_be32(MLX4_STRIP_VLAN); + if (0 != vp_oper->state.default_vlan) { + qpc->pri_path.vlan_control = + MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED | + MLX4_VLAN_CTRL_ETH_RX_BLOCK_PRIO_TAGGED | + MLX4_VLAN_CTRL_ETH_RX_BLOCK_UNTAGGED; + } else { /* priority tagged */ + qpc->pri_path.vlan_control = + MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED | + MLX4_VLAN_CTRL_ETH_RX_BLOCK_TAGGED; + } + + qpc->pri_path.fvl_rx |= MLX4_FVL_RX_FORCE_ETH_VLAN; qpc->pri_path.vlan_index = vp_oper->vlan_idx; - qpc->pri_path.fl = (1 << 6) | (1 << 2); /* set cv bit and hide_cqe_vlan bit*/ - qpc->pri_path.feup |= 1 << 3; /* set fvl bit */ + qpc->pri_path.fl |= MLX4_FL_CV | MLX4_FL_ETH_HIDE_CQE_VLAN; + qpc->pri_path.feup |= MLX4_FEUP_FORCE_ETH_UP | MLX4_FVL_FORCE_ETH_VLAN; qpc->pri_path.sched_queue &= 0xC7; qpc->pri_path.sched_queue |= (vp_oper->state.default_qos) << 3; - mlx4_dbg(dev, "qp %d port %d Q 0x%x set vlan to %d vidx %d feup %x fl %x\n", - be32_to_cpu(qpc->local_qpn) & 0xffffff, port, - (int)(qpc->pri_path.sched_queue), vp_oper->state.default_vlan, - vp_oper->vlan_idx, (int)(qpc->pri_path.feup), - (int)(qpc->pri_path.fl)); } if (vp_oper->state.spoofchk) { - qpc->pri_path.feup |= 1 << 5; /* set fsm bit */; + qpc->pri_path.feup |= MLX4_FSM_FORCE_ETH_SRC_MAC; qpc->pri_path.grh_mylmc = (0x80 & qpc->pri_path.grh_mylmc) + vp_oper->mac_idx; - mlx4_dbg(dev, "spoof qp %d port %d feup 0x%x, myLmc 0x%x mindx %d\n", - be32_to_cpu(qpc->local_qpn) & 0xffffff, port, - (int)qpc->pri_path.feup, (int)qpc->pri_path.grh_mylmc, - vp_oper->mac_idx); } return 0; } diff --git a/trunk/drivers/net/ethernet/mellanox/mlx4/srq.c b/trunk/drivers/net/ethernet/mellanox/mlx4/srq.c index e329fe1f11b7..79fd269e2c54 100644 --- a/trunk/drivers/net/ethernet/mellanox/mlx4/srq.c +++ b/trunk/drivers/net/ethernet/mellanox/mlx4/srq.c @@ -298,3 +298,18 @@ void mlx4_cleanup_srq_table(struct mlx4_dev *dev) return; mlx4_bitmap_cleanup(&mlx4_priv(dev)->srq_table.bitmap); } + +struct mlx4_srq *mlx4_srq_lookup(struct mlx4_dev *dev, u32 srqn) +{ + struct mlx4_srq_table *srq_table = &mlx4_priv(dev)->srq_table; + struct mlx4_srq *srq; + unsigned long flags; + + spin_lock_irqsave(&srq_table->lock, flags); + srq = radix_tree_lookup(&srq_table->tree, + srqn & (dev->caps.num_srqs - 1)); + spin_unlock_irqrestore(&srq_table->lock, flags); + + return srq; +} +EXPORT_SYMBOL_GPL(mlx4_srq_lookup); diff --git a/trunk/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h b/trunk/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h index 90c253b145ef..c1b693cb3df3 100644 --- a/trunk/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h +++ b/trunk/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h @@ -429,6 +429,7 @@ struct qlcnic_hardware_context { u16 port_type; u16 board_type; + u16 supported_type; u16 link_speed; u16 link_duplex; @@ -906,8 +907,11 @@ struct qlcnic_ipaddr { #define QLCNIC_FW_HANG 0x4000 #define QLCNIC_FW_LRO_MSS_CAP 0x8000 #define QLCNIC_TX_INTR_SHARED 0x10000 +#define QLCNIC_APP_CHANGED_FLAGS 0x20000 #define QLCNIC_IS_MSI_FAMILY(adapter) \ ((adapter)->flags & (QLCNIC_MSI_ENABLED | QLCNIC_MSIX_ENABLED)) +#define QLCNIC_IS_TSO_CAPABLE(adapter) \ + ((adapter)->ahw->capabilities & QLCNIC_FW_CAPABILITY_TSO) #define QLCNIC_DEF_NUM_STS_DESC_RINGS 4 #define QLCNIC_MSIX_TBL_SPACE 8192 @@ -1033,6 +1037,7 @@ struct qlcnic_adapter { spinlock_t rx_mac_learn_lock; u32 file_prd_off; /*File fw product offset*/ u32 fw_version; + u32 offload_flags; const struct firmware *fw; }; @@ -1514,6 +1519,7 @@ void qlcnic_create_diag_entries(struct qlcnic_adapter *adapter); void qlcnic_remove_diag_entries(struct qlcnic_adapter *adapter); void qlcnic_82xx_add_sysfs(struct qlcnic_adapter *adapter); void qlcnic_82xx_remove_sysfs(struct qlcnic_adapter *adapter); +int qlcnic_82xx_get_settings(struct qlcnic_adapter *, struct ethtool_cmd *); int qlcnicvf_config_bridged_mode(struct qlcnic_adapter *, u32); int qlcnicvf_config_led(struct qlcnic_adapter *, u32, u32); @@ -1540,6 +1546,8 @@ void qlcnic_add_lb_filter(struct qlcnic_adapter *, struct sk_buff *, int, u16); int qlcnic_83xx_configure_opmode(struct qlcnic_adapter *adapter); int qlcnic_read_mac_addr(struct qlcnic_adapter *); int qlcnic_setup_netdev(struct qlcnic_adapter *, struct net_device *, int); +void qlcnic_set_netdev_features(struct qlcnic_adapter *, + struct qlcnic_esw_func_cfg *); void qlcnic_sriov_vf_schedule_multi(struct net_device *); void qlcnic_vf_add_mc_list(struct net_device *, u16); diff --git a/trunk/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c b/trunk/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c index ea790a93ee7c..b4ff1e35a11d 100644 --- a/trunk/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c +++ b/trunk/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c @@ -696,15 +696,14 @@ u32 qlcnic_83xx_mac_rcode(struct qlcnic_adapter *adapter) return 1; } -u32 qlcnic_83xx_mbx_poll(struct qlcnic_adapter *adapter) +u32 qlcnic_83xx_mbx_poll(struct qlcnic_adapter *adapter, u32 *wait_time) { u32 data; - unsigned long wait_time = 0; struct qlcnic_hardware_context *ahw = adapter->ahw; /* wait for mailbox completion */ do { data = QLCRDX(ahw, QLCNIC_FW_MBX_CTRL); - if (++wait_time > QLCNIC_MBX_TIMEOUT) { + if (++(*wait_time) > QLCNIC_MBX_TIMEOUT) { data = QLCNIC_RCODE_TIMEOUT; break; } @@ -720,8 +719,8 @@ int qlcnic_83xx_mbx_op(struct qlcnic_adapter *adapter, u16 opcode; u8 mbx_err_code; unsigned long flags; - u32 rsp, mbx_val, fw_data, rsp_num, mbx_cmd; struct qlcnic_hardware_context *ahw = adapter->ahw; + u32 rsp, mbx_val, fw_data, rsp_num, mbx_cmd, wait_time = 0; opcode = LSW(cmd->req.arg[0]); if (!test_bit(QLC_83XX_MBX_READY, &adapter->ahw->idc.status)) { @@ -754,15 +753,13 @@ int qlcnic_83xx_mbx_op(struct qlcnic_adapter *adapter, /* Signal FW about the impending command */ QLCWRX(ahw, QLCNIC_HOST_MBX_CTRL, QLCNIC_SET_OWNER); poll: - rsp = qlcnic_83xx_mbx_poll(adapter); + rsp = qlcnic_83xx_mbx_poll(adapter, &wait_time); if (rsp != QLCNIC_RCODE_TIMEOUT) { /* Get the FW response data */ fw_data = readl(QLCNIC_MBX_FW(ahw, 0)); if (fw_data & QLCNIC_MBX_ASYNC_EVENT) { __qlcnic_83xx_process_aen(adapter); - mbx_val = QLCRDX(ahw, QLCNIC_HOST_MBX_CTRL); - if (mbx_val) - goto poll; + goto poll; } mbx_err_code = QLCNIC_MBX_STATUS(fw_data); rsp_num = QLCNIC_MBX_NUM_REGS(fw_data); @@ -1276,11 +1273,13 @@ int qlcnic_83xx_create_tx_ctx(struct qlcnic_adapter *adapter, return err; } -static int qlcnic_83xx_diag_alloc_res(struct net_device *netdev, int test) +static int qlcnic_83xx_diag_alloc_res(struct net_device *netdev, int test, + int num_sds_ring) { struct qlcnic_adapter *adapter = netdev_priv(netdev); struct qlcnic_host_sds_ring *sds_ring; struct qlcnic_host_rds_ring *rds_ring; + u16 adapter_state = adapter->is_up; u8 ring; int ret; @@ -1304,6 +1303,10 @@ static int qlcnic_83xx_diag_alloc_res(struct net_device *netdev, int test) ret = qlcnic_fw_create_ctx(adapter); if (ret) { qlcnic_detach(adapter); + if (adapter_state == QLCNIC_ADAPTER_UP_MAGIC) { + adapter->max_sds_rings = num_sds_ring; + qlcnic_attach(adapter); + } netif_device_attach(netdev); return ret; } @@ -1596,7 +1599,8 @@ int qlcnic_83xx_loopback_test(struct net_device *netdev, u8 mode) if (test_and_set_bit(__QLCNIC_RESETTING, &adapter->state)) return -EBUSY; - ret = qlcnic_83xx_diag_alloc_res(netdev, QLCNIC_LOOPBACK_TEST); + ret = qlcnic_83xx_diag_alloc_res(netdev, QLCNIC_LOOPBACK_TEST, + max_sds_rings); if (ret) goto fail_diag_alloc; @@ -2830,6 +2834,23 @@ int qlcnic_83xx_test_link(struct qlcnic_adapter *adapter) break; } config = cmd.rsp.arg[3]; + if (QLC_83XX_SFP_PRESENT(config)) { + switch (ahw->module_type) { + case LINKEVENT_MODULE_OPTICAL_UNKNOWN: + case LINKEVENT_MODULE_OPTICAL_SRLR: + case LINKEVENT_MODULE_OPTICAL_LRM: + case LINKEVENT_MODULE_OPTICAL_SFP_1G: + ahw->supported_type = PORT_FIBRE; + break; + case LINKEVENT_MODULE_TWINAX_UNSUPPORTED_CABLE: + case LINKEVENT_MODULE_TWINAX_UNSUPPORTED_CABLELEN: + case LINKEVENT_MODULE_TWINAX: + ahw->supported_type = PORT_TP; + break; + default: + ahw->supported_type = PORT_OTHER; + } + } if (config & 1) err = 1; } @@ -2838,7 +2859,8 @@ int qlcnic_83xx_test_link(struct qlcnic_adapter *adapter) return config; } -int qlcnic_83xx_get_settings(struct qlcnic_adapter *adapter) +int qlcnic_83xx_get_settings(struct qlcnic_adapter *adapter, + struct ethtool_cmd *ecmd) { u32 config = 0; int status = 0; @@ -2851,6 +2873,54 @@ int qlcnic_83xx_get_settings(struct qlcnic_adapter *adapter) ahw->module_type = QLC_83XX_SFP_MODULE_TYPE(config); /* hard code until there is a way to get it from flash */ ahw->board_type = QLCNIC_BRDTYPE_83XX_10G; + + if (netif_running(adapter->netdev) && ahw->has_link_events) { + ethtool_cmd_speed_set(ecmd, ahw->link_speed); + ecmd->duplex = ahw->link_duplex; + ecmd->autoneg = ahw->link_autoneg; + } else { + ethtool_cmd_speed_set(ecmd, SPEED_UNKNOWN); + ecmd->duplex = DUPLEX_UNKNOWN; + ecmd->autoneg = AUTONEG_DISABLE; + } + + if (ahw->port_type == QLCNIC_XGBE) { + ecmd->supported = SUPPORTED_1000baseT_Full; + ecmd->advertising = ADVERTISED_1000baseT_Full; + } else { + ecmd->supported = (SUPPORTED_10baseT_Half | + SUPPORTED_10baseT_Full | + SUPPORTED_100baseT_Half | + SUPPORTED_100baseT_Full | + SUPPORTED_1000baseT_Half | + SUPPORTED_1000baseT_Full); + ecmd->advertising = (ADVERTISED_100baseT_Half | + ADVERTISED_100baseT_Full | + ADVERTISED_1000baseT_Half | + ADVERTISED_1000baseT_Full); + } + + switch (ahw->supported_type) { + case PORT_FIBRE: + ecmd->supported |= SUPPORTED_FIBRE; + ecmd->advertising |= ADVERTISED_FIBRE; + ecmd->port = PORT_FIBRE; + ecmd->transceiver = XCVR_EXTERNAL; + break; + case PORT_TP: + ecmd->supported |= SUPPORTED_TP; + ecmd->advertising |= ADVERTISED_TP; + ecmd->port = PORT_TP; + ecmd->transceiver = XCVR_INTERNAL; + break; + default: + ecmd->supported |= SUPPORTED_FIBRE; + ecmd->advertising |= ADVERTISED_FIBRE; + ecmd->port = PORT_OTHER; + ecmd->transceiver = XCVR_EXTERNAL; + break; + } + ecmd->phy_address = ahw->physical_port; return status; } @@ -3046,7 +3116,8 @@ int qlcnic_83xx_interrupt_test(struct net_device *netdev) if (test_and_set_bit(__QLCNIC_RESETTING, &adapter->state)) return -EIO; - ret = qlcnic_83xx_diag_alloc_res(netdev, QLCNIC_INTERRUPT_TEST); + ret = qlcnic_83xx_diag_alloc_res(netdev, QLCNIC_INTERRUPT_TEST, + max_sds_rings); if (ret) goto fail_diag_irq; diff --git a/trunk/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h b/trunk/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h index 1f1d85e6f2af..f5db67fc9f55 100644 --- a/trunk/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h +++ b/trunk/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h @@ -603,7 +603,7 @@ int qlcnic_83xx_get_vnic_pf_info(struct qlcnic_adapter *, struct qlcnic_info *); void qlcnic_83xx_get_minidump_template(struct qlcnic_adapter *); void qlcnic_83xx_get_stats(struct qlcnic_adapter *adapter, u64 *data); -int qlcnic_83xx_get_settings(struct qlcnic_adapter *); +int qlcnic_83xx_get_settings(struct qlcnic_adapter *, struct ethtool_cmd *); int qlcnic_83xx_set_settings(struct qlcnic_adapter *, struct ethtool_cmd *); void qlcnic_83xx_get_pauseparam(struct qlcnic_adapter *, struct ethtool_pauseparam *); @@ -620,7 +620,7 @@ int qlcnic_83xx_flash_test(struct qlcnic_adapter *); int qlcnic_83xx_enable_flash_write(struct qlcnic_adapter *); int qlcnic_83xx_disable_flash_write(struct qlcnic_adapter *); u32 qlcnic_83xx_mac_rcode(struct qlcnic_adapter *); -u32 qlcnic_83xx_mbx_poll(struct qlcnic_adapter *); +u32 qlcnic_83xx_mbx_poll(struct qlcnic_adapter *, u32 *); void qlcnic_83xx_enable_mbx_poll(struct qlcnic_adapter *); void qlcnic_83xx_disable_mbx_poll(struct qlcnic_adapter *); #endif diff --git a/trunk/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c b/trunk/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c index ab1d8d99cbd5..5e7fb1dfb97b 100644 --- a/trunk/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c +++ b/trunk/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c @@ -382,8 +382,6 @@ static int qlcnic_83xx_idc_tx_soft_reset(struct qlcnic_adapter *adapter) clear_bit(__QLCNIC_RESETTING, &adapter->state); dev_err(&adapter->pdev->dev, "%s:\n", __func__); - adapter->netdev->trans_start = jiffies; - return 0; } @@ -435,10 +433,6 @@ static void qlcnic_83xx_idc_attach_driver(struct qlcnic_adapter *adapter) } done: netif_device_attach(netdev); - if (netif_running(netdev)) { - netif_carrier_on(netdev); - netif_wake_queue(netdev); - } } static int qlcnic_83xx_idc_enter_failed_state(struct qlcnic_adapter *adapter, @@ -642,15 +636,21 @@ static int qlcnic_83xx_idc_reattach_driver(struct qlcnic_adapter *adapter) static void qlcnic_83xx_idc_update_idc_params(struct qlcnic_adapter *adapter) { + struct qlcnic_hardware_context *ahw = adapter->ahw; + qlcnic_83xx_idc_update_drv_presence_reg(adapter, 1, 1); - clear_bit(__QLCNIC_RESETTING, &adapter->state); set_bit(QLC_83XX_MBX_READY, &adapter->ahw->idc.status); qlcnic_83xx_idc_update_audit_reg(adapter, 0, 1); set_bit(QLC_83XX_MODULE_LOADED, &adapter->ahw->idc.status); - adapter->ahw->idc.quiesce_req = 0; - adapter->ahw->idc.delay = QLC_83XX_IDC_FW_POLL_DELAY; - adapter->ahw->idc.err_code = 0; - adapter->ahw->idc.collect_dump = 0; + + ahw->idc.quiesce_req = 0; + ahw->idc.delay = QLC_83XX_IDC_FW_POLL_DELAY; + ahw->idc.err_code = 0; + ahw->idc.collect_dump = 0; + ahw->reset_context = 0; + adapter->tx_timeo_cnt = 0; + + clear_bit(__QLCNIC_RESETTING, &adapter->state); } /** @@ -851,6 +851,7 @@ static int qlcnic_83xx_idc_ready_state(struct qlcnic_adapter *adapter) /* Check for soft reset request */ if (ahw->reset_context && !(val & QLC_83XX_IDC_DISABLE_FW_RESET_RECOVERY)) { + adapter->ahw->reset_context = 0; qlcnic_83xx_idc_tx_soft_reset(adapter); return ret; } @@ -914,6 +915,7 @@ static int qlcnic_83xx_idc_need_quiesce_state(struct qlcnic_adapter *adapter) static int qlcnic_83xx_idc_failed_state(struct qlcnic_adapter *adapter) { dev_err(&adapter->pdev->dev, "%s: please restart!!\n", __func__); + clear_bit(__QLCNIC_RESETTING, &adapter->state); adapter->ahw->idc.err_code = -EIO; return 0; diff --git a/trunk/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c b/trunk/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c index 08efb4635007..f67652de5a63 100644 --- a/trunk/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c +++ b/trunk/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c @@ -131,12 +131,13 @@ static const char qlcnic_83xx_rx_stats_strings[][ETH_GSTRING_LEN] = { "ctx_lro_pkt_cnt", "ctx_ip_csum_error", "ctx_rx_pkts_wo_ctx", - "ctx_rx_pkts_dropped_wo_sts", + "ctx_rx_pkts_drop_wo_sds_on_card", + "ctx_rx_pkts_drop_wo_sds_on_host", "ctx_rx_osized_pkts", "ctx_rx_pkts_dropped_wo_rds", "ctx_rx_unexpected_mcast_pkts", "ctx_invalid_mac_address", - "ctx_rx_rds_ring_prim_attemoted", + "ctx_rx_rds_ring_prim_attempted", "ctx_rx_rds_ring_prim_success", "ctx_num_lro_flows_added", "ctx_num_lro_flows_removed", @@ -251,6 +252,18 @@ static int qlcnic_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd) { struct qlcnic_adapter *adapter = netdev_priv(dev); + + if (qlcnic_82xx_check(adapter)) + return qlcnic_82xx_get_settings(adapter, ecmd); + else if (qlcnic_83xx_check(adapter)) + return qlcnic_83xx_get_settings(adapter, ecmd); + + return -EIO; +} + +int qlcnic_82xx_get_settings(struct qlcnic_adapter *adapter, + struct ethtool_cmd *ecmd) +{ struct qlcnic_hardware_context *ahw = adapter->ahw; u32 speed, reg; int check_sfp_module = 0; @@ -276,10 +289,7 @@ qlcnic_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd) } else if (adapter->ahw->port_type == QLCNIC_XGBE) { u32 val = 0; - if (qlcnic_83xx_check(adapter)) - qlcnic_83xx_get_settings(adapter); - else - val = QLCRD32(adapter, QLCNIC_PORT_MODE_ADDR); + val = QLCRD32(adapter, QLCNIC_PORT_MODE_ADDR); if (val == QLCNIC_PORT_MODE_802_3_AP) { ecmd->supported = SUPPORTED_1000baseT_Full; @@ -289,16 +299,13 @@ qlcnic_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd) ecmd->advertising = ADVERTISED_10000baseT_Full; } - if (netif_running(dev) && adapter->ahw->has_link_events) { - if (qlcnic_82xx_check(adapter)) { - reg = QLCRD32(adapter, - P3P_LINK_SPEED_REG(pcifn)); - speed = P3P_LINK_SPEED_VAL(pcifn, reg); - ahw->link_speed = speed * P3P_LINK_SPEED_MHZ; - } - ethtool_cmd_speed_set(ecmd, adapter->ahw->link_speed); - ecmd->autoneg = adapter->ahw->link_autoneg; - ecmd->duplex = adapter->ahw->link_duplex; + if (netif_running(adapter->netdev) && ahw->has_link_events) { + reg = QLCRD32(adapter, P3P_LINK_SPEED_REG(pcifn)); + speed = P3P_LINK_SPEED_VAL(pcifn, reg); + ahw->link_speed = speed * P3P_LINK_SPEED_MHZ; + ethtool_cmd_speed_set(ecmd, ahw->link_speed); + ecmd->autoneg = ahw->link_autoneg; + ecmd->duplex = ahw->link_duplex; goto skip; } @@ -340,8 +347,8 @@ qlcnic_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd) case QLCNIC_BRDTYPE_P3P_10G_SFP_QT: ecmd->advertising |= ADVERTISED_TP; ecmd->supported |= SUPPORTED_TP; - check_sfp_module = netif_running(dev) && - adapter->ahw->has_link_events; + check_sfp_module = netif_running(adapter->netdev) && + ahw->has_link_events; case QLCNIC_BRDTYPE_P3P_10G_XFP: ecmd->supported |= SUPPORTED_FIBRE; ecmd->advertising |= ADVERTISED_FIBRE; @@ -355,8 +362,8 @@ qlcnic_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd) ecmd->advertising |= (ADVERTISED_FIBRE | ADVERTISED_TP); ecmd->port = PORT_FIBRE; - check_sfp_module = netif_running(dev) && - adapter->ahw->has_link_events; + check_sfp_module = netif_running(adapter->netdev) && + ahw->has_link_events; } else { ecmd->autoneg = AUTONEG_ENABLE; ecmd->supported |= (SUPPORTED_TP | SUPPORTED_Autoneg); @@ -365,13 +372,6 @@ qlcnic_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd) ecmd->port = PORT_TP; } break; - case QLCNIC_BRDTYPE_83XX_10G: - ecmd->autoneg = AUTONEG_DISABLE; - ecmd->supported |= (SUPPORTED_FIBRE | SUPPORTED_TP); - ecmd->advertising |= (ADVERTISED_FIBRE | ADVERTISED_TP); - ecmd->port = PORT_FIBRE; - check_sfp_module = netif_running(dev) && ahw->has_link_events; - break; default: dev_err(&adapter->pdev->dev, "Unsupported board model %d\n", adapter->ahw->board_type); diff --git a/trunk/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c b/trunk/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c index 6a6512ba9f38..106a12f2a02f 100644 --- a/trunk/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c +++ b/trunk/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c @@ -973,16 +973,57 @@ int qlcnic_change_mtu(struct net_device *netdev, int mtu) return rc; } +static netdev_features_t qlcnic_process_flags(struct qlcnic_adapter *adapter, + netdev_features_t features) +{ + u32 offload_flags = adapter->offload_flags; + + if (offload_flags & BIT_0) { + features |= NETIF_F_RXCSUM | NETIF_F_IP_CSUM | + NETIF_F_IPV6_CSUM; + adapter->rx_csum = 1; + if (QLCNIC_IS_TSO_CAPABLE(adapter)) { + if (!(offload_flags & BIT_1)) + features &= ~NETIF_F_TSO; + else + features |= NETIF_F_TSO; + + if (!(offload_flags & BIT_2)) + features &= ~NETIF_F_TSO6; + else + features |= NETIF_F_TSO6; + } + } else { + features &= ~(NETIF_F_RXCSUM | + NETIF_F_IP_CSUM | + NETIF_F_IPV6_CSUM); + + if (QLCNIC_IS_TSO_CAPABLE(adapter)) + features &= ~(NETIF_F_TSO | NETIF_F_TSO6); + adapter->rx_csum = 0; + } + + return features; +} netdev_features_t qlcnic_fix_features(struct net_device *netdev, netdev_features_t features) { struct qlcnic_adapter *adapter = netdev_priv(netdev); + netdev_features_t changed; - if ((adapter->flags & QLCNIC_ESWITCH_ENABLED) && - qlcnic_82xx_check(adapter)) { - netdev_features_t changed = features ^ netdev->features; - features ^= changed & (NETIF_F_ALL_CSUM | NETIF_F_RXCSUM); + if (qlcnic_82xx_check(adapter) && + (adapter->flags & QLCNIC_ESWITCH_ENABLED)) { + if (adapter->flags & QLCNIC_APP_CHANGED_FLAGS) { + features = qlcnic_process_flags(adapter, features); + } else { + changed = features ^ netdev->features; + features ^= changed & (NETIF_F_RXCSUM | + NETIF_F_IP_CSUM | + NETIF_F_IPV6_CSUM | + NETIF_F_TSO | + NETIF_F_TSO6); + } } if (!(features & NETIF_F_RXCSUM)) diff --git a/trunk/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.h b/trunk/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.h index 95b1b5732838..b6818f4356b9 100644 --- a/trunk/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.h +++ b/trunk/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.h @@ -134,7 +134,7 @@ struct qlcnic_mailbox_metadata { #define QLCNIC_SET_OWNER 1 #define QLCNIC_CLR_OWNER 0 -#define QLCNIC_MBX_TIMEOUT 10000 +#define QLCNIC_MBX_TIMEOUT 5000 #define QLCNIC_MBX_RSP_OK 1 #define QLCNIC_MBX_PORT_RSP_OK 0x1a diff --git a/trunk/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c b/trunk/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c index 264d5a4f8153..aeb26a850679 100644 --- a/trunk/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c +++ b/trunk/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c @@ -37,24 +37,24 @@ MODULE_PARM_DESC(qlcnic_mac_learn, "Mac Filter (0=learning is disabled, 1=Driver learning is enabled, 2=FDB learning is enabled)"); int qlcnic_use_msi = 1; -MODULE_PARM_DESC(use_msi, "MSI interrupt (0=disabled, 1=enabled"); +MODULE_PARM_DESC(use_msi, "MSI interrupt (0=disabled, 1=enabled)"); module_param_named(use_msi, qlcnic_use_msi, int, 0444); int qlcnic_use_msi_x = 1; -MODULE_PARM_DESC(use_msi_x, "MSI-X interrupt (0=disabled, 1=enabled"); +MODULE_PARM_DESC(use_msi_x, "MSI-X interrupt (0=disabled, 1=enabled)"); module_param_named(use_msi_x, qlcnic_use_msi_x, int, 0444); int qlcnic_auto_fw_reset = 1; -MODULE_PARM_DESC(auto_fw_reset, "Auto firmware reset (0=disabled, 1=enabled"); +MODULE_PARM_DESC(auto_fw_reset, "Auto firmware reset (0=disabled, 1=enabled)"); module_param_named(auto_fw_reset, qlcnic_auto_fw_reset, int, 0644); int qlcnic_load_fw_file; -MODULE_PARM_DESC(load_fw_file, "Load firmware from (0=flash, 1=file"); +MODULE_PARM_DESC(load_fw_file, "Load firmware from (0=flash, 1=file)"); module_param_named(load_fw_file, qlcnic_load_fw_file, int, 0444); int qlcnic_config_npars; module_param(qlcnic_config_npars, int, 0444); -MODULE_PARM_DESC(qlcnic_config_npars, "Configure NPARs (0=disabled, 1=enabled"); +MODULE_PARM_DESC(qlcnic_config_npars, "Configure NPARs (0=disabled, 1=enabled)"); static int qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent); static void qlcnic_remove(struct pci_dev *pdev); @@ -84,14 +84,9 @@ static int qlcnic_start_firmware(struct qlcnic_adapter *); static void qlcnic_free_lb_filters_mem(struct qlcnic_adapter *adapter); static void qlcnic_dev_set_npar_ready(struct qlcnic_adapter *); static int qlcnicvf_start_firmware(struct qlcnic_adapter *); -static void qlcnic_set_netdev_features(struct qlcnic_adapter *, - struct qlcnic_esw_func_cfg *); static int qlcnic_vlan_rx_add(struct net_device *, __be16, u16); static int qlcnic_vlan_rx_del(struct net_device *, __be16, u16); -#define QLCNIC_IS_TSO_CAPABLE(adapter) \ - ((adapter)->ahw->capabilities & QLCNIC_FW_CAPABILITY_TSO) - static u32 qlcnic_vlan_tx_check(struct qlcnic_adapter *adapter) { struct qlcnic_hardware_context *ahw = adapter->ahw; @@ -308,6 +303,23 @@ int qlcnic_read_mac_addr(struct qlcnic_adapter *adapter) return 0; } +static void qlcnic_delete_adapter_mac(struct qlcnic_adapter *adapter) +{ + struct qlcnic_mac_list_s *cur; + struct list_head *head; + + list_for_each(head, &adapter->mac_list) { + cur = list_entry(head, struct qlcnic_mac_list_s, list); + if (!memcmp(adapter->mac_addr, cur->mac_addr, ETH_ALEN)) { + qlcnic_sre_macaddr_change(adapter, cur->mac_addr, + 0, QLCNIC_MAC_DEL); + list_del(&cur->list); + kfree(cur); + return; + } + } +} + static int qlcnic_set_mac(struct net_device *netdev, void *p) { struct qlcnic_adapter *adapter = netdev_priv(netdev); @@ -322,11 +334,15 @@ static int qlcnic_set_mac(struct net_device *netdev, void *p) if (!is_valid_ether_addr(addr->sa_data)) return -EINVAL; + if (!memcmp(adapter->mac_addr, addr->sa_data, ETH_ALEN)) + return 0; + if (test_bit(__QLCNIC_DEV_UP, &adapter->state)) { netif_device_detach(netdev); qlcnic_napi_disable(adapter); } + qlcnic_delete_adapter_mac(adapter); memcpy(adapter->mac_addr, addr->sa_data, netdev->addr_len); memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len); qlcnic_set_multi(adapter->netdev); @@ -1053,8 +1069,6 @@ void qlcnic_set_eswitch_port_features(struct qlcnic_adapter *adapter, if (!esw_cfg->promisc_mode) adapter->flags |= QLCNIC_PROMISC_DISABLED; - - qlcnic_set_netdev_features(adapter, esw_cfg); } int qlcnic_set_eswitch_port_config(struct qlcnic_adapter *adapter) @@ -1069,51 +1083,23 @@ int qlcnic_set_eswitch_port_config(struct qlcnic_adapter *adapter) return -EIO; qlcnic_set_vlan_config(adapter, &esw_cfg); qlcnic_set_eswitch_port_features(adapter, &esw_cfg); + qlcnic_set_netdev_features(adapter, &esw_cfg); return 0; } -static void -qlcnic_set_netdev_features(struct qlcnic_adapter *adapter, - struct qlcnic_esw_func_cfg *esw_cfg) +void qlcnic_set_netdev_features(struct qlcnic_adapter *adapter, + struct qlcnic_esw_func_cfg *esw_cfg) { struct net_device *netdev = adapter->netdev; - unsigned long features, vlan_features; if (qlcnic_83xx_check(adapter)) return; - features = (NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_RXCSUM | - NETIF_F_IPV6_CSUM | NETIF_F_GRO); - vlan_features = (NETIF_F_SG | NETIF_F_IP_CSUM | - NETIF_F_IPV6_CSUM); - - if (QLCNIC_IS_TSO_CAPABLE(adapter)) { - features |= (NETIF_F_TSO | NETIF_F_TSO6); - vlan_features |= (NETIF_F_TSO | NETIF_F_TSO6); - } - - if (netdev->features & NETIF_F_LRO) - features |= NETIF_F_LRO; - - if (esw_cfg->offload_flags & BIT_0) { - netdev->features |= features; - adapter->rx_csum = 1; - if (!(esw_cfg->offload_flags & BIT_1)) { - netdev->features &= ~NETIF_F_TSO; - features &= ~NETIF_F_TSO; - } - if (!(esw_cfg->offload_flags & BIT_2)) { - netdev->features &= ~NETIF_F_TSO6; - features &= ~NETIF_F_TSO6; - } - } else { - netdev->features &= ~features; - features &= ~features; - adapter->rx_csum = 0; - } - - netdev->vlan_features = (features & vlan_features); + adapter->offload_flags = esw_cfg->offload_flags; + adapter->flags |= QLCNIC_APP_CHANGED_FLAGS; + netdev_update_features(netdev); + adapter->flags &= ~QLCNIC_APP_CHANGED_FLAGS; } static int @@ -1995,8 +1981,10 @@ qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent) pci_enable_pcie_error_reporting(pdev); ahw = kzalloc(sizeof(struct qlcnic_hardware_context), GFP_KERNEL); - if (!ahw) + if (!ahw) { + err = -ENOMEM; goto err_out_free_res; + } switch (ent->device) { case PCI_DEVICE_ID_QLOGIC_QLE824X: @@ -2032,6 +2020,7 @@ qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent) adapter->qlcnic_wq = create_singlethread_workqueue("qlcnic"); if (adapter->qlcnic_wq == NULL) { + err = -ENOMEM; dev_err(&pdev->dev, "Failed to create workqueue\n"); goto err_out_free_netdev; } @@ -2112,6 +2101,10 @@ qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent) goto err_out_disable_msi; } + err = qlcnic_get_act_pci_func(adapter); + if (err) + goto err_out_disable_mbx_intr; + err = qlcnic_setup_netdev(adapter, netdev, pci_using_dac); if (err) goto err_out_disable_mbx_intr; @@ -2141,9 +2134,6 @@ qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent) break; } - if (qlcnic_get_act_pci_func(adapter)) - goto err_out_disable_mbx_intr; - if (adapter->drv_mac_learn) qlcnic_alloc_lb_filters_mem(adapter); @@ -2481,12 +2471,17 @@ static void qlcnic_tx_timeout(struct net_device *netdev) if (test_bit(__QLCNIC_RESETTING, &adapter->state)) return; - dev_err(&netdev->dev, "transmit timeout, resetting.\n"); - - if (++adapter->tx_timeo_cnt >= QLCNIC_MAX_TX_TIMEOUTS) - adapter->need_fw_reset = 1; - else + if (++adapter->tx_timeo_cnt >= QLCNIC_MAX_TX_TIMEOUTS) { + netdev_info(netdev, "Tx timeout, reset the adapter.\n"); + if (qlcnic_82xx_check(adapter)) + adapter->need_fw_reset = 1; + else if (qlcnic_83xx_check(adapter)) + qlcnic_83xx_idc_request_reset(adapter, + QLCNIC_FORCE_FW_DUMP_KEY); + } else { + netdev_info(netdev, "Tx timeout, reset adapter context.\n"); adapter->ahw->reset_context = 1; + } } static struct net_device_stats *qlcnic_get_stats(struct net_device *netdev) @@ -3123,10 +3118,8 @@ qlcnic_check_health(struct qlcnic_adapter *adapter) if (adapter->need_fw_reset) goto detach; - if (adapter->ahw->reset_context && qlcnic_auto_fw_reset) { + if (adapter->ahw->reset_context && qlcnic_auto_fw_reset) qlcnic_reset_hw_context(adapter); - adapter->netdev->trans_start = jiffies; - } return 0; } diff --git a/trunk/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c b/trunk/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c index 44d547d78b84..196b2d100407 100644 --- a/trunk/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c +++ b/trunk/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c @@ -280,9 +280,9 @@ void qlcnic_sriov_cleanup(struct qlcnic_adapter *adapter) static int qlcnic_sriov_post_bc_msg(struct qlcnic_adapter *adapter, u32 *hdr, u32 *pay, u8 pci_func, u8 size) { + u32 rsp, mbx_val, fw_data, rsp_num, mbx_cmd, val, wait_time = 0; struct qlcnic_hardware_context *ahw = adapter->ahw; unsigned long flags; - u32 rsp, mbx_val, fw_data, rsp_num, mbx_cmd, val; u16 opcode; u8 mbx_err_code; int i, j; @@ -330,15 +330,13 @@ static int qlcnic_sriov_post_bc_msg(struct qlcnic_adapter *adapter, u32 *hdr, * assume something is wrong. */ poll: - rsp = qlcnic_83xx_mbx_poll(adapter); + rsp = qlcnic_83xx_mbx_poll(adapter, &wait_time); if (rsp != QLCNIC_RCODE_TIMEOUT) { /* Get the FW response data */ fw_data = readl(QLCNIC_MBX_FW(ahw, 0)); if (fw_data & QLCNIC_MBX_ASYNC_EVENT) { __qlcnic_83xx_process_aen(adapter); - mbx_val = QLCRDX(ahw, QLCNIC_HOST_MBX_CTRL); - if (mbx_val) - goto poll; + goto poll; } mbx_err_code = QLCNIC_MBX_STATUS(fw_data); rsp_num = QLCNIC_MBX_NUM_REGS(fw_data); @@ -1736,7 +1734,6 @@ static int qlcnic_sriov_vf_handle_context_reset(struct qlcnic_adapter *adapter) if (!qlcnic_sriov_vf_reinit_driver(adapter)) { qlcnic_sriov_vf_attach(adapter); - adapter->netdev->trans_start = jiffies; adapter->tx_timeo_cnt = 0; adapter->reset_ctx_cnt = 0; adapter->fw_fail_cnt = 0; diff --git a/trunk/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c b/trunk/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c index c81be2da119b..1a66ccded235 100644 --- a/trunk/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c +++ b/trunk/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c @@ -1133,9 +1133,6 @@ static int qlcnic_sriov_validate_linkevent(struct qlcnic_vf_info *vf, if ((cmd->req.arg[1] >> 16) != vf->rx_ctx_id) return -EINVAL; - if (!(cmd->req.arg[1] & BIT_8)) - return -EINVAL; - return 0; } diff --git a/trunk/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c b/trunk/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c index 4e22e794a186..e7a2fe21b649 100644 --- a/trunk/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c +++ b/trunk/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c @@ -544,6 +544,9 @@ static ssize_t qlcnic_sysfs_write_esw_config(struct file *file, switch (esw_cfg[i].op_mode) { case QLCNIC_PORT_DEFAULTS: qlcnic_set_eswitch_port_features(adapter, &esw_cfg[i]); + rtnl_lock(); + qlcnic_set_netdev_features(adapter, &esw_cfg[i]); + rtnl_unlock(); break; case QLCNIC_ADD_VLAN: qlcnic_set_vlan_config(adapter, &esw_cfg[i]); diff --git a/trunk/drivers/net/ethernet/qlogic/qlge/qlge_main.c b/trunk/drivers/net/ethernet/qlogic/qlge/qlge_main.c index 87463bc701a6..50235d201592 100644 --- a/trunk/drivers/net/ethernet/qlogic/qlge/qlge_main.c +++ b/trunk/drivers/net/ethernet/qlogic/qlge/qlge_main.c @@ -1106,6 +1106,7 @@ static int ql_get_next_chunk(struct ql_adapter *qdev, struct rx_ring *rx_ring, if (pci_dma_mapping_error(qdev->pdev, map)) { __free_pages(rx_ring->pg_chunk.page, qdev->lbq_buf_order); + rx_ring->pg_chunk.page = NULL; netif_err(qdev, drv, qdev->ndev, "PCI mapping failed.\n"); return -ENOMEM; @@ -2777,6 +2778,12 @@ static void ql_free_lbq_buffers(struct ql_adapter *qdev, struct rx_ring *rx_ring curr_idx = 0; } + if (rx_ring->pg_chunk.page) { + pci_unmap_page(qdev->pdev, rx_ring->pg_chunk.map, + ql_lbq_block_size(qdev), PCI_DMA_FROMDEVICE); + put_page(rx_ring->pg_chunk.page); + rx_ring->pg_chunk.page = NULL; + } } static void ql_free_sbq_buffers(struct ql_adapter *qdev, struct rx_ring *rx_ring) diff --git a/trunk/drivers/net/ethernet/realtek/8139cp.c b/trunk/drivers/net/ethernet/realtek/8139cp.c index 7d1fb9ad1296..03523459c406 100644 --- a/trunk/drivers/net/ethernet/realtek/8139cp.c +++ b/trunk/drivers/net/ethernet/realtek/8139cp.c @@ -1136,6 +1136,7 @@ static void cp_clean_rings (struct cp_private *cp) cp->dev->stats.tx_dropped++; } } + netdev_reset_queue(cp->dev); memset(cp->rx_ring, 0, sizeof(struct cp_desc) * CP_RX_RING_SIZE); memset(cp->tx_ring, 0, sizeof(struct cp_desc) * CP_TX_RING_SIZE); diff --git a/trunk/drivers/net/ethernet/realtek/r8169.c b/trunk/drivers/net/ethernet/realtek/r8169.c index 79c520b64fdd..393f961a013c 100644 --- a/trunk/drivers/net/ethernet/realtek/r8169.c +++ b/trunk/drivers/net/ethernet/realtek/r8169.c @@ -5856,7 +5856,20 @@ static int rtl8169_xmit_frags(struct rtl8169_private *tp, struct sk_buff *skb, return -EIO; } -static inline void rtl8169_tso_csum(struct rtl8169_private *tp, +static bool rtl_skb_pad(struct sk_buff *skb) +{ + if (skb_padto(skb, ETH_ZLEN)) + return false; + skb_put(skb, ETH_ZLEN - skb->len); + return true; +} + +static bool rtl_test_hw_pad_bug(struct rtl8169_private *tp, struct sk_buff *skb) +{ + return skb->len < ETH_ZLEN && tp->mac_version == RTL_GIGA_MAC_VER_34; +} + +static inline bool rtl8169_tso_csum(struct rtl8169_private *tp, struct sk_buff *skb, u32 *opts) { const struct rtl_tx_desc_info *info = tx_desc_info + tp->txd_version; @@ -5869,13 +5882,20 @@ static inline void rtl8169_tso_csum(struct rtl8169_private *tp, } else if (skb->ip_summed == CHECKSUM_PARTIAL) { const struct iphdr *ip = ip_hdr(skb); + if (unlikely(rtl_test_hw_pad_bug(tp, skb))) + return skb_checksum_help(skb) == 0 && rtl_skb_pad(skb); + if (ip->protocol == IPPROTO_TCP) opts[offset] |= info->checksum.tcp; else if (ip->protocol == IPPROTO_UDP) opts[offset] |= info->checksum.udp; else WARN_ON_ONCE(1); + } else { + if (unlikely(rtl_test_hw_pad_bug(tp, skb))) + return rtl_skb_pad(skb); } + return true; } static netdev_tx_t rtl8169_start_xmit(struct sk_buff *skb, @@ -5896,17 +5916,15 @@ static netdev_tx_t rtl8169_start_xmit(struct sk_buff *skb, goto err_stop_0; } - /* 8168evl does not automatically pad to minimum length. */ - if (unlikely(tp->mac_version == RTL_GIGA_MAC_VER_34 && - skb->len < ETH_ZLEN)) { - if (skb_padto(skb, ETH_ZLEN)) - goto err_update_stats; - skb_put(skb, ETH_ZLEN - skb->len); - } - if (unlikely(le32_to_cpu(txd->opts1) & DescOwn)) goto err_stop_0; + opts[1] = cpu_to_le32(rtl8169_tx_vlan_tag(skb)); + opts[0] = DescOwn; + + if (!rtl8169_tso_csum(tp, skb, opts)) + goto err_update_stats; + len = skb_headlen(skb); mapping = dma_map_single(d, skb->data, len, DMA_TO_DEVICE); if (unlikely(dma_mapping_error(d, mapping))) { @@ -5918,11 +5936,6 @@ static netdev_tx_t rtl8169_start_xmit(struct sk_buff *skb, tp->tx_skb[entry].len = len; txd->addr = cpu_to_le64(mapping); - opts[1] = cpu_to_le32(rtl8169_tx_vlan_tag(skb)); - opts[0] = DescOwn; - - rtl8169_tso_csum(tp, skb, opts); - frags = rtl8169_xmit_frags(tp, skb, opts); if (frags < 0) goto err_dma_1; diff --git a/trunk/drivers/net/ethernet/renesas/sh_eth.c b/trunk/drivers/net/ethernet/renesas/sh_eth.c index 33dc6f2418f2..42e9dd05c936 100644 --- a/trunk/drivers/net/ethernet/renesas/sh_eth.c +++ b/trunk/drivers/net/ethernet/renesas/sh_eth.c @@ -2745,11 +2745,6 @@ static int sh_eth_drv_probe(struct platform_device *pdev) if (mdp->cd->tsu) { struct resource *rtsu; rtsu = platform_get_resource(pdev, IORESOURCE_MEM, 1); - if (!rtsu) { - dev_err(&pdev->dev, "Not found TSU resource\n"); - ret = -ENODEV; - goto out_release; - } mdp->tsu_addr = devm_ioremap_resource(&pdev->dev, rtsu); if (IS_ERR(mdp->tsu_addr)) { ret = PTR_ERR(mdp->tsu_addr); diff --git a/trunk/drivers/net/ethernet/sfc/efx.c b/trunk/drivers/net/ethernet/sfc/efx.c index 01b99206139a..39e4cb39de29 100644 --- a/trunk/drivers/net/ethernet/sfc/efx.c +++ b/trunk/drivers/net/ethernet/sfc/efx.c @@ -638,14 +638,16 @@ static void efx_start_datapath(struct efx_nic *efx) EFX_MAX_FRAME_LEN(efx->net_dev->mtu) + efx->type->rx_buffer_padding); rx_buf_len = (sizeof(struct efx_rx_page_state) + - EFX_PAGE_IP_ALIGN + efx->rx_dma_len); + NET_IP_ALIGN + efx->rx_dma_len); if (rx_buf_len <= PAGE_SIZE) { efx->rx_scatter = false; efx->rx_buffer_order = 0; } else if (efx->type->can_rx_scatter) { + BUILD_BUG_ON(EFX_RX_USR_BUF_SIZE % L1_CACHE_BYTES); BUILD_BUG_ON(sizeof(struct efx_rx_page_state) + - EFX_PAGE_IP_ALIGN + EFX_RX_USR_BUF_SIZE > - PAGE_SIZE / 2); + 2 * ALIGN(NET_IP_ALIGN + EFX_RX_USR_BUF_SIZE, + EFX_RX_BUF_ALIGNMENT) > + PAGE_SIZE); efx->rx_scatter = true; efx->rx_dma_len = EFX_RX_USR_BUF_SIZE; efx->rx_buffer_order = 0; diff --git a/trunk/drivers/net/ethernet/sfc/net_driver.h b/trunk/drivers/net/ethernet/sfc/net_driver.h index 9bd433a095c5..39d6bd77f015 100644 --- a/trunk/drivers/net/ethernet/sfc/net_driver.h +++ b/trunk/drivers/net/ethernet/sfc/net_driver.h @@ -72,8 +72,20 @@ /* Maximum possible MTU the driver supports */ #define EFX_MAX_MTU (9 * 1024) -/* Size of an RX scatter buffer. Small enough to pack 2 into a 4K page. */ -#define EFX_RX_USR_BUF_SIZE 1824 +/* Size of an RX scatter buffer. Small enough to pack 2 into a 4K page, + * and should be a multiple of the cache line size. + */ +#define EFX_RX_USR_BUF_SIZE (2048 - 256) + +/* If possible, we should ensure cache line alignment at start and end + * of every buffer. Otherwise, we just need to ensure 4-byte + * alignment of the network header. + */ +#if NET_IP_ALIGN == 0 +#define EFX_RX_BUF_ALIGNMENT L1_CACHE_BYTES +#else +#define EFX_RX_BUF_ALIGNMENT 4 +#endif /* Forward declare Precision Time Protocol (PTP) support structure. */ struct efx_ptp_data; @@ -467,25 +479,12 @@ enum nic_state { STATE_RECOVERY = 3, /* device recovering from PCI error */ }; -/* - * Alignment of page-allocated RX buffers - * - * Controls the number of bytes inserted at the start of an RX buffer. - * This is the equivalent of NET_IP_ALIGN [which controls the alignment - * of the skb->head for hardware DMA]. - */ -#ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS -#define EFX_PAGE_IP_ALIGN 0 -#else -#define EFX_PAGE_IP_ALIGN NET_IP_ALIGN -#endif - /* * Alignment of the skb->head which wraps a page-allocated RX buffer * * The skb allocated to wrap an rx_buffer can have this alignment. Since * the data is memcpy'd from the rx_buf, it does not need to be equal to - * EFX_PAGE_IP_ALIGN. + * NET_IP_ALIGN. */ #define EFX_PAGE_SKB_ALIGN 2 diff --git a/trunk/drivers/net/ethernet/sfc/ptp.c b/trunk/drivers/net/ethernet/sfc/ptp.c index 07f6baa15c0c..9a95abf2dedf 100644 --- a/trunk/drivers/net/ethernet/sfc/ptp.c +++ b/trunk/drivers/net/ethernet/sfc/ptp.c @@ -912,8 +912,10 @@ static int efx_ptp_probe_channel(struct efx_channel *channel) ptp->phc_clock = ptp_clock_register(&ptp->phc_clock_info, &efx->pci_dev->dev); - if (!ptp->phc_clock) + if (IS_ERR(ptp->phc_clock)) { + rc = PTR_ERR(ptp->phc_clock); goto fail3; + } INIT_WORK(&ptp->pps_work, efx_ptp_pps_worker); ptp->pps_workwq = create_singlethread_workqueue("sfc_pps"); diff --git a/trunk/drivers/net/ethernet/sfc/rx.c b/trunk/drivers/net/ethernet/sfc/rx.c index e73e30bac10e..a7dfe36cabf4 100644 --- a/trunk/drivers/net/ethernet/sfc/rx.c +++ b/trunk/drivers/net/ethernet/sfc/rx.c @@ -93,8 +93,8 @@ static inline void efx_sync_rx_buffer(struct efx_nic *efx, void efx_rx_config_page_split(struct efx_nic *efx) { - efx->rx_page_buf_step = ALIGN(efx->rx_dma_len + EFX_PAGE_IP_ALIGN, - L1_CACHE_BYTES); + efx->rx_page_buf_step = ALIGN(efx->rx_dma_len + NET_IP_ALIGN, + EFX_RX_BUF_ALIGNMENT); efx->rx_bufs_per_page = efx->rx_buffer_order ? 1 : ((PAGE_SIZE - sizeof(struct efx_rx_page_state)) / efx->rx_page_buf_step); @@ -188,9 +188,9 @@ static int efx_init_rx_buffers(struct efx_rx_queue *rx_queue) do { index = rx_queue->added_count & rx_queue->ptr_mask; rx_buf = efx_rx_buffer(rx_queue, index); - rx_buf->dma_addr = dma_addr + EFX_PAGE_IP_ALIGN; + rx_buf->dma_addr = dma_addr + NET_IP_ALIGN; rx_buf->page = page; - rx_buf->page_offset = page_offset + EFX_PAGE_IP_ALIGN; + rx_buf->page_offset = page_offset + NET_IP_ALIGN; rx_buf->len = efx->rx_dma_len; rx_buf->flags = 0; ++rx_queue->added_count; diff --git a/trunk/drivers/net/ethernet/stmicro/stmmac/Kconfig b/trunk/drivers/net/ethernet/stmicro/stmmac/Kconfig index f695a50bac47..43c1f3223322 100644 --- a/trunk/drivers/net/ethernet/stmicro/stmmac/Kconfig +++ b/trunk/drivers/net/ethernet/stmicro/stmmac/Kconfig @@ -1,6 +1,6 @@ config STMMAC_ETH tristate "STMicroelectronics 10/100/1000 Ethernet driver" - depends on HAS_IOMEM + depends on HAS_IOMEM && HAS_DMA select NET_CORE select MII select PHYLIB diff --git a/trunk/drivers/net/ethernet/tile/tilegx.c b/trunk/drivers/net/ethernet/tile/tilegx.c index 66e025ad5df1..f3c2d034b32c 100644 --- a/trunk/drivers/net/ethernet/tile/tilegx.c +++ b/trunk/drivers/net/ethernet/tile/tilegx.c @@ -930,7 +930,7 @@ static int tile_net_setup_interrupts(struct net_device *dev) if (info->has_iqueue) { gxio_mpipe_request_notif_ring_interrupt( &context, cpu_x(cpu), cpu_y(cpu), - 1, ingress_irq, info->iqueue.ring); + KERNEL_PL, ingress_irq, info->iqueue.ring); } } diff --git a/trunk/drivers/net/ethernet/toshiba/spider_net.c b/trunk/drivers/net/ethernet/toshiba/spider_net.c index c655fe60121e..5734480c1ecf 100644 --- a/trunk/drivers/net/ethernet/toshiba/spider_net.c +++ b/trunk/drivers/net/ethernet/toshiba/spider_net.c @@ -1990,7 +1990,8 @@ spider_net_open(struct net_device *netdev) goto alloc_rx_failed; /* Allocate rx skbs */ - if (spider_net_alloc_rx_skbs(card)) + result = spider_net_alloc_rx_skbs(card); + if (result) goto alloc_skbs_failed; spider_net_set_multi(netdev); diff --git a/trunk/drivers/net/irda/bfin_sir.c b/trunk/drivers/net/irda/bfin_sir.c index a06fca61c9a0..22b4527321b1 100644 --- a/trunk/drivers/net/irda/bfin_sir.c +++ b/trunk/drivers/net/irda/bfin_sir.c @@ -609,7 +609,7 @@ static int bfin_sir_open(struct net_device *dev) { struct bfin_sir_self *self = netdev_priv(dev); struct bfin_sir_port *port = self->sir_port; - int err = -ENOMEM; + int err; self->newspeed = 0; self->speed = 9600; @@ -623,8 +623,10 @@ static int bfin_sir_open(struct net_device *dev) bfin_sir_set_speed(port, 9600); self->irlap = irlap_open(dev, &self->qos, DRIVER_NAME); - if (!self->irlap) + if (!self->irlap) { + err = -ENOMEM; goto err_irlap; + } INIT_WORK(&self->work, bfin_sir_send_work); diff --git a/trunk/drivers/net/macvlan.c b/trunk/drivers/net/macvlan.c index d5a141c7c4e7..1c502bb0c916 100644 --- a/trunk/drivers/net/macvlan.c +++ b/trunk/drivers/net/macvlan.c @@ -229,7 +229,8 @@ static rx_handler_result_t macvlan_handle_frame(struct sk_buff **pskb) } if (port->passthru) - vlan = list_first_entry(&port->vlans, struct macvlan_dev, list); + vlan = list_first_or_null_rcu(&port->vlans, + struct macvlan_dev, list); else vlan = macvlan_hash_lookup(port, eth->h_dest); if (vlan == NULL) @@ -814,7 +815,7 @@ int macvlan_common_newlink(struct net *src_net, struct net_device *dev, if (err < 0) goto upper_dev_unlink; - list_add_tail(&vlan->list, &port->vlans); + list_add_tail_rcu(&vlan->list, &port->vlans); netif_stacked_transfer_operstate(lowerdev, dev); return 0; @@ -842,7 +843,7 @@ void macvlan_dellink(struct net_device *dev, struct list_head *head) { struct macvlan_dev *vlan = netdev_priv(dev); - list_del(&vlan->list); + list_del_rcu(&vlan->list); unregister_netdevice_queue(dev, head); netdev_upper_dev_unlink(vlan->lowerdev, dev); } diff --git a/trunk/drivers/net/ntb_netdev.c b/trunk/drivers/net/ntb_netdev.c index ed947dd76fbd..f3cdf64997d6 100644 --- a/trunk/drivers/net/ntb_netdev.c +++ b/trunk/drivers/net/ntb_netdev.c @@ -375,6 +375,8 @@ static void ntb_netdev_remove(struct pci_dev *pdev) if (dev == NULL) return; + list_del(&dev->list); + ndev = dev->ndev; unregister_netdev(ndev); diff --git a/trunk/drivers/net/phy/Kconfig b/trunk/drivers/net/phy/Kconfig index 450345261bd3..1e11f2bfd9ce 100644 --- a/trunk/drivers/net/phy/Kconfig +++ b/trunk/drivers/net/phy/Kconfig @@ -126,7 +126,7 @@ config MDIO_BITBANG config MDIO_GPIO tristate "Support for GPIO lib-based bitbanged MDIO buses" - depends on MDIO_BITBANG && GENERIC_GPIO + depends on MDIO_BITBANG && GPIOLIB ---help--- Supports GPIO lib-based MDIO busses. diff --git a/trunk/drivers/net/usb/cdc_ether.c b/trunk/drivers/net/usb/cdc_ether.c index 24fbec27a22a..078795fe6e31 100644 --- a/trunk/drivers/net/usb/cdc_ether.c +++ b/trunk/drivers/net/usb/cdc_ether.c @@ -613,6 +613,13 @@ static const struct usb_device_id products [] = { .driver_info = 0, }, +/* Dell Wireless 5804 (Novatel E371) - handled by qmi_wwan */ +{ + USB_DEVICE_AND_INTERFACE_INFO(DELL_VENDOR_ID, 0x819b, USB_CLASS_COMM, + USB_CDC_SUBCLASS_ETHERNET, USB_CDC_PROTO_NONE), + .driver_info = 0, +}, + /* AnyDATA ADU960S - handled by qmi_wwan */ { USB_DEVICE_AND_INTERFACE_INFO(0x16d5, 0x650a, USB_CLASS_COMM, diff --git a/trunk/drivers/net/usb/qmi_wwan.c b/trunk/drivers/net/usb/qmi_wwan.c index 834e405fb57a..86adfa0a912e 100644 --- a/trunk/drivers/net/usb/qmi_wwan.c +++ b/trunk/drivers/net/usb/qmi_wwan.c @@ -501,6 +501,13 @@ static const struct usb_device_id products[] = { USB_CDC_PROTO_NONE), .driver_info = (unsigned long)&qmi_wwan_info, }, + { /* Dell Wireless 5804 (Novatel E371) */ + USB_DEVICE_AND_INTERFACE_INFO(0x413C, 0x819b, + USB_CLASS_COMM, + USB_CDC_SUBCLASS_ETHERNET, + USB_CDC_PROTO_NONE), + .driver_info = (unsigned long)&qmi_wwan_info, + }, { /* ADU960S */ USB_DEVICE_AND_INTERFACE_INFO(0x16d5, 0x650a, USB_CLASS_COMM, @@ -575,6 +582,7 @@ static const struct usb_device_id products[] = { {QMI_FIXED_INTF(0x1bbb, 0x011e, 4)}, /* Telekom Speedstick LTE II (Alcatel One Touch L100V LTE) */ {QMI_FIXED_INTF(0x2357, 0x0201, 4)}, /* TP-LINK HSUPA Modem MA180 */ {QMI_FIXED_INTF(0x1bc7, 0x1200, 5)}, /* Telit LE920 */ + {QMI_FIXED_INTF(0x1e2d, 0x12d1, 4)}, /* Cinterion PLxx */ /* 4. Gobi 1000 devices */ {QMI_GOBI1K_DEVICE(0x05c6, 0x9212)}, /* Acer Gobi Modem Device */ diff --git a/trunk/drivers/net/usb/rtl8150.c b/trunk/drivers/net/usb/rtl8150.c index a491d3a95393..6cbdac67f3a0 100644 --- a/trunk/drivers/net/usb/rtl8150.c +++ b/trunk/drivers/net/usb/rtl8150.c @@ -130,19 +130,23 @@ struct rtl8150 { struct usb_device *udev; struct tasklet_struct tl; struct net_device *netdev; - struct urb *rx_urb, *tx_urb, *intr_urb, *ctrl_urb; + struct urb *rx_urb, *tx_urb, *intr_urb; struct sk_buff *tx_skb, *rx_skb; struct sk_buff *rx_skb_pool[RX_SKB_POOL_SIZE]; spinlock_t rx_pool_lock; struct usb_ctrlrequest dr; int intr_interval; - __le16 rx_creg; u8 *intr_buff; u8 phy; }; typedef struct rtl8150 rtl8150_t; +struct async_req { + struct usb_ctrlrequest dr; + u16 rx_creg; +}; + static const char driver_name [] = "rtl8150"; /* @@ -164,51 +168,47 @@ static int set_registers(rtl8150_t * dev, u16 indx, u16 size, void *data) indx, 0, data, size, 500); } -static void ctrl_callback(struct urb *urb) +static void async_set_reg_cb(struct urb *urb) { - rtl8150_t *dev; + struct async_req *req = (struct async_req *)urb->context; int status = urb->status; - switch (status) { - case 0: - break; - case -EINPROGRESS: - break; - case -ENOENT: - break; - default: - if (printk_ratelimit()) - dev_warn(&urb->dev->dev, "ctrl urb status %d\n", status); - } - dev = urb->context; - clear_bit(RX_REG_SET, &dev->flags); + if (status < 0) + dev_dbg(&urb->dev->dev, "%s failed with %d", __func__, status); + kfree(req); + usb_free_urb(urb); } -static int async_set_registers(rtl8150_t * dev, u16 indx, u16 size) +static int async_set_registers(rtl8150_t *dev, u16 indx, u16 size, u16 reg) { - int ret; - - if (test_bit(RX_REG_SET, &dev->flags)) - return -EAGAIN; + int res = -ENOMEM; + struct urb *async_urb; + struct async_req *req; - dev->dr.bRequestType = RTL8150_REQT_WRITE; - dev->dr.bRequest = RTL8150_REQ_SET_REGS; - dev->dr.wValue = cpu_to_le16(indx); - dev->dr.wIndex = 0; - dev->dr.wLength = cpu_to_le16(size); - dev->ctrl_urb->transfer_buffer_length = size; - usb_fill_control_urb(dev->ctrl_urb, dev->udev, - usb_sndctrlpipe(dev->udev, 0), (char *) &dev->dr, - &dev->rx_creg, size, ctrl_callback, dev); - if ((ret = usb_submit_urb(dev->ctrl_urb, GFP_ATOMIC))) { - if (ret == -ENODEV) + req = kmalloc(sizeof(struct async_req), GFP_ATOMIC); + if (req == NULL) + return res; + async_urb = usb_alloc_urb(0, GFP_ATOMIC); + if (async_urb == NULL) { + kfree(req); + return res; + } + req->rx_creg = cpu_to_le16(reg); + req->dr.bRequestType = RTL8150_REQT_WRITE; + req->dr.bRequest = RTL8150_REQ_SET_REGS; + req->dr.wIndex = 0; + req->dr.wValue = cpu_to_le16(indx); + req->dr.wLength = cpu_to_le16(size); + usb_fill_control_urb(async_urb, dev->udev, + usb_sndctrlpipe(dev->udev, 0), (void *)&req->dr, + &req->rx_creg, size, async_set_reg_cb, req); + res = usb_submit_urb(async_urb, GFP_ATOMIC); + if (res) { + if (res == -ENODEV) netif_device_detach(dev->netdev); - dev_err(&dev->udev->dev, - "control request submission failed: %d\n", ret); - } else - set_bit(RX_REG_SET, &dev->flags); - - return ret; + dev_err(&dev->udev->dev, "%s failed with %d\n", __func__, res); + } + return res; } static int read_mii_word(rtl8150_t * dev, u8 phy, __u8 indx, u16 * reg) @@ -330,13 +330,6 @@ static int alloc_all_urbs(rtl8150_t * dev) usb_free_urb(dev->tx_urb); return 0; } - dev->ctrl_urb = usb_alloc_urb(0, GFP_KERNEL); - if (!dev->ctrl_urb) { - usb_free_urb(dev->rx_urb); - usb_free_urb(dev->tx_urb); - usb_free_urb(dev->intr_urb); - return 0; - } return 1; } @@ -346,7 +339,6 @@ static void free_all_urbs(rtl8150_t * dev) usb_free_urb(dev->rx_urb); usb_free_urb(dev->tx_urb); usb_free_urb(dev->intr_urb); - usb_free_urb(dev->ctrl_urb); } static void unlink_all_urbs(rtl8150_t * dev) @@ -354,7 +346,6 @@ static void unlink_all_urbs(rtl8150_t * dev) usb_kill_urb(dev->rx_urb); usb_kill_urb(dev->tx_urb); usb_kill_urb(dev->intr_urb); - usb_kill_urb(dev->ctrl_urb); } static inline struct sk_buff *pull_skb(rtl8150_t *dev) @@ -629,7 +620,6 @@ static int enable_net_traffic(rtl8150_t * dev) } /* RCR bit7=1 attach Rx info at the end; =0 HW CRC (which is broken) */ rcr = 0x9e; - dev->rx_creg = cpu_to_le16(rcr); tcr = 0xd8; cr = 0x0c; if (!(rcr & 0x80)) @@ -662,20 +652,22 @@ static void rtl8150_tx_timeout(struct net_device *netdev) static void rtl8150_set_multicast(struct net_device *netdev) { rtl8150_t *dev = netdev_priv(netdev); + u16 rx_creg = 0x9e; + netif_stop_queue(netdev); if (netdev->flags & IFF_PROMISC) { - dev->rx_creg |= cpu_to_le16(0x0001); + rx_creg |= 0x0001; dev_info(&netdev->dev, "%s: promiscuous mode\n", netdev->name); } else if (!netdev_mc_empty(netdev) || (netdev->flags & IFF_ALLMULTI)) { - dev->rx_creg &= cpu_to_le16(0xfffe); - dev->rx_creg |= cpu_to_le16(0x0002); + rx_creg &= 0xfffe; + rx_creg |= 0x0002; dev_info(&netdev->dev, "%s: allmulti set\n", netdev->name); } else { /* ~RX_MULTICAST, ~RX_PROMISCUOUS */ - dev->rx_creg &= cpu_to_le16(0x00fc); + rx_creg &= 0x00fc; } - async_set_registers(dev, RCR, 2); + async_set_registers(dev, RCR, sizeof(rx_creg), rx_creg); netif_wake_queue(netdev); } diff --git a/trunk/drivers/net/usb/sierra_net.c b/trunk/drivers/net/usb/sierra_net.c index a923d61c6fc5..a79e9d334928 100644 --- a/trunk/drivers/net/usb/sierra_net.c +++ b/trunk/drivers/net/usb/sierra_net.c @@ -426,6 +426,13 @@ static void sierra_net_dosync(struct usbnet *dev) dev_dbg(&dev->udev->dev, "%s", __func__); + /* The SIERRA_NET_HIP_MSYNC_ID command appears to request that the + * firmware restart itself. After restarting, the modem will respond + * with the SIERRA_NET_HIP_RESTART_ID indication. The driver continues + * sending MSYNC commands every few seconds until it receives the + * RESTART event from the firmware + */ + /* tell modem we are ready */ status = sierra_net_send_sync(dev); if (status < 0) @@ -704,6 +711,9 @@ static int sierra_net_bind(struct usbnet *dev, struct usb_interface *intf) /* set context index initially to 0 - prepares tx hdr template */ sierra_net_set_ctx_index(priv, 0); + /* prepare sync message template */ + memcpy(priv->sync_msg, sync_tmplate, sizeof(priv->sync_msg)); + /* decrease the rx_urb_size and max_tx_size to 4k on USB 1.1 */ dev->rx_urb_size = SIERRA_NET_RX_URB_SIZE; if (dev->udev->speed != USB_SPEED_HIGH) @@ -739,11 +749,6 @@ static int sierra_net_bind(struct usbnet *dev, struct usb_interface *intf) kfree(priv); return -ENODEV; } - /* prepare sync message from template */ - memcpy(priv->sync_msg, sync_tmplate, sizeof(priv->sync_msg)); - - /* initiate the sync sequence */ - sierra_net_dosync(dev); return 0; } @@ -766,8 +771,9 @@ static void sierra_net_unbind(struct usbnet *dev, struct usb_interface *intf) netdev_err(dev->net, "usb_control_msg failed, status %d\n", status); - sierra_net_set_private(dev, NULL); + usbnet_status_stop(dev); + sierra_net_set_private(dev, NULL); kfree(priv); } @@ -908,6 +914,24 @@ static const struct driver_info sierra_net_info_direct_ip = { .tx_fixup = sierra_net_tx_fixup, }; +static int +sierra_net_probe(struct usb_interface *udev, const struct usb_device_id *prod) +{ + int ret; + + ret = usbnet_probe(udev, prod); + if (ret == 0) { + struct usbnet *dev = usb_get_intfdata(udev); + + ret = usbnet_status_start(dev, GFP_KERNEL); + if (ret == 0) { + /* Interrupt URB now set up; initiate sync sequence */ + sierra_net_dosync(dev); + } + } + return ret; +} + #define DIRECT_IP_DEVICE(vend, prod) \ {USB_DEVICE_INTERFACE_NUMBER(vend, prod, 7), \ .driver_info = (unsigned long)&sierra_net_info_direct_ip}, \ @@ -930,7 +954,7 @@ MODULE_DEVICE_TABLE(usb, products); static struct usb_driver sierra_net_driver = { .name = "sierra_net", .id_table = products, - .probe = usbnet_probe, + .probe = sierra_net_probe, .disconnect = usbnet_disconnect, .suspend = usbnet_suspend, .resume = usbnet_resume, diff --git a/trunk/drivers/net/usb/usbnet.c b/trunk/drivers/net/usb/usbnet.c index 1e5a9b72650e..06ee82f557d4 100644 --- a/trunk/drivers/net/usb/usbnet.c +++ b/trunk/drivers/net/usb/usbnet.c @@ -252,6 +252,70 @@ static int init_status (struct usbnet *dev, struct usb_interface *intf) return 0; } +/* Submit the interrupt URB if not previously submitted, increasing refcount */ +int usbnet_status_start(struct usbnet *dev, gfp_t mem_flags) +{ + int ret = 0; + + WARN_ON_ONCE(dev->interrupt == NULL); + if (dev->interrupt) { + mutex_lock(&dev->interrupt_mutex); + + if (++dev->interrupt_count == 1) + ret = usb_submit_urb(dev->interrupt, mem_flags); + + dev_dbg(&dev->udev->dev, "incremented interrupt URB count to %d\n", + dev->interrupt_count); + mutex_unlock(&dev->interrupt_mutex); + } + return ret; +} +EXPORT_SYMBOL_GPL(usbnet_status_start); + +/* For resume; submit interrupt URB if previously submitted */ +static int __usbnet_status_start_force(struct usbnet *dev, gfp_t mem_flags) +{ + int ret = 0; + + mutex_lock(&dev->interrupt_mutex); + if (dev->interrupt_count) { + ret = usb_submit_urb(dev->interrupt, mem_flags); + dev_dbg(&dev->udev->dev, + "submitted interrupt URB for resume\n"); + } + mutex_unlock(&dev->interrupt_mutex); + return ret; +} + +/* Kill the interrupt URB if all submitters want it killed */ +void usbnet_status_stop(struct usbnet *dev) +{ + if (dev->interrupt) { + mutex_lock(&dev->interrupt_mutex); + WARN_ON(dev->interrupt_count == 0); + + if (dev->interrupt_count && --dev->interrupt_count == 0) + usb_kill_urb(dev->interrupt); + + dev_dbg(&dev->udev->dev, + "decremented interrupt URB count to %d\n", + dev->interrupt_count); + mutex_unlock(&dev->interrupt_mutex); + } +} +EXPORT_SYMBOL_GPL(usbnet_status_stop); + +/* For suspend; always kill interrupt URB */ +static void __usbnet_status_stop_force(struct usbnet *dev) +{ + if (dev->interrupt) { + mutex_lock(&dev->interrupt_mutex); + usb_kill_urb(dev->interrupt); + dev_dbg(&dev->udev->dev, "killed interrupt URB for suspend\n"); + mutex_unlock(&dev->interrupt_mutex); + } +} + /* Passes this packet up the stack, updating its accounting. * Some link protocols batch packets, so their rx_fixup paths * can return clones as well as just modify the original skb. @@ -725,7 +789,7 @@ int usbnet_stop (struct net_device *net) if (!(info->flags & FLAG_AVOID_UNLINK_URBS)) usbnet_terminate_urbs(dev); - usb_kill_urb(dev->interrupt); + usbnet_status_stop(dev); usbnet_purge_paused_rxq(dev); @@ -787,7 +851,7 @@ int usbnet_open (struct net_device *net) /* start any status interrupt transfer */ if (dev->interrupt) { - retval = usb_submit_urb (dev->interrupt, GFP_KERNEL); + retval = usbnet_status_start(dev, GFP_KERNEL); if (retval < 0) { netif_err(dev, ifup, dev->net, "intr submit %d\n", retval); @@ -1413,7 +1477,7 @@ usbnet_probe (struct usb_interface *udev, const struct usb_device_id *prod) /* usbnet already took usb runtime pm, so have to enable the feature * for usb interface, otherwise usb_autopm_get_interface may return - * failure if USB_SUSPEND(RUNTIME_PM) is enabled. + * failure if RUNTIME_PM is enabled. */ if (!driver->supports_autosuspend) { driver->supports_autosuspend = 1; @@ -1458,6 +1522,8 @@ usbnet_probe (struct usb_interface *udev, const struct usb_device_id *prod) dev->delay.data = (unsigned long) dev; init_timer (&dev->delay); mutex_init (&dev->phy_mutex); + mutex_init(&dev->interrupt_mutex); + dev->interrupt_count = 0; dev->net = net; strcpy (net->name, "usb%d"); @@ -1593,7 +1659,7 @@ int usbnet_suspend (struct usb_interface *intf, pm_message_t message) */ netif_device_detach (dev->net); usbnet_terminate_urbs(dev); - usb_kill_urb(dev->interrupt); + __usbnet_status_stop_force(dev); /* * reattach so runtime management can use and @@ -1613,9 +1679,8 @@ int usbnet_resume (struct usb_interface *intf) int retval; if (!--dev->suspend_count) { - /* resume interrupt URBs */ - if (dev->interrupt && test_bit(EVENT_DEV_OPEN, &dev->flags)) - usb_submit_urb(dev->interrupt, GFP_NOIO); + /* resume interrupt URB if it was previously submitted */ + __usbnet_status_start_force(dev, GFP_NOIO); spin_lock_irq(&dev->txq.lock); while ((res = usb_get_from_anchor(&dev->deferred))) { diff --git a/trunk/drivers/net/virtio_net.c b/trunk/drivers/net/virtio_net.c index 3c23fdc27bf0..c9e00387d999 100644 --- a/trunk/drivers/net/virtio_net.c +++ b/trunk/drivers/net/virtio_net.c @@ -28,7 +28,7 @@ #include #include -static int napi_weight = 128; +static int napi_weight = NAPI_POLL_WEIGHT; module_param(napi_weight, int, 0444); static bool csum = true, gso = true; @@ -636,10 +636,11 @@ static int virtnet_open(struct net_device *dev) struct virtnet_info *vi = netdev_priv(dev); int i; - for (i = 0; i < vi->curr_queue_pairs; i++) { - /* Make sure we have some buffers: if oom use wq. */ - if (!try_fill_recv(&vi->rq[i], GFP_KERNEL)) - schedule_delayed_work(&vi->refill, 0); + for (i = 0; i < vi->max_queue_pairs; i++) { + if (i < vi->curr_queue_pairs) + /* Make sure we have some buffers: if oom use wq. */ + if (!try_fill_recv(&vi->rq[i], GFP_KERNEL)) + schedule_delayed_work(&vi->refill, 0); virtnet_napi_enable(&vi->rq[i]); } diff --git a/trunk/drivers/net/vxlan.c b/trunk/drivers/net/vxlan.c index ba81f3c39a83..3b1d2ee7156b 100644 --- a/trunk/drivers/net/vxlan.c +++ b/trunk/drivers/net/vxlan.c @@ -301,7 +301,7 @@ static inline struct hlist_head *vxlan_fdb_head(struct vxlan_dev *vxlan, } /* Look up Ethernet address in forwarding table */ -static struct vxlan_fdb *vxlan_find_mac(struct vxlan_dev *vxlan, +static struct vxlan_fdb *__vxlan_find_mac(struct vxlan_dev *vxlan, const u8 *mac) { @@ -316,6 +316,18 @@ static struct vxlan_fdb *vxlan_find_mac(struct vxlan_dev *vxlan, return NULL; } +static struct vxlan_fdb *vxlan_find_mac(struct vxlan_dev *vxlan, + const u8 *mac) +{ + struct vxlan_fdb *f; + + f = __vxlan_find_mac(vxlan, mac); + if (f) + f->used = jiffies; + + return f; +} + /* Add/update destinations for multicast */ static int vxlan_fdb_append(struct vxlan_fdb *f, __be32 ip, __be16 port, __u32 vni, __u32 ifindex) @@ -353,7 +365,7 @@ static int vxlan_fdb_create(struct vxlan_dev *vxlan, struct vxlan_fdb *f; int notify = 0; - f = vxlan_find_mac(vxlan, mac); + f = __vxlan_find_mac(vxlan, mac); if (f) { if (flags & NLM_F_EXCL) { netdev_dbg(vxlan->dev, @@ -563,7 +575,6 @@ static void vxlan_snoop(struct net_device *dev, f = vxlan_find_mac(vxlan, src_mac); if (likely(f)) { - f->used = jiffies; if (likely(f->remote.remote_ip == src_ip)) return; diff --git a/trunk/drivers/net/wireless/ath/ath5k/base.c b/trunk/drivers/net/wireless/ath/ath5k/base.c index 9b20d9ee2719..7f702fe3ecc2 100644 --- a/trunk/drivers/net/wireless/ath/ath5k/base.c +++ b/trunk/drivers/net/wireless/ath/ath5k/base.c @@ -2369,6 +2369,9 @@ ath5k_tx_complete_poll_work(struct work_struct *work) int i; bool needreset = false; + if (!test_bit(ATH_STAT_STARTED, ah->status)) + return; + mutex_lock(&ah->lock); for (i = 0; i < ARRAY_SIZE(ah->txqs); i++) { @@ -2676,6 +2679,7 @@ int ath5k_start(struct ieee80211_hw *hw) mmiowb(); mutex_unlock(&ah->lock); + set_bit(ATH_STAT_STARTED, ah->status); ieee80211_queue_delayed_work(ah->hw, &ah->tx_complete_work, msecs_to_jiffies(ATH5K_TX_COMPLETE_POLL_INT)); @@ -2737,6 +2741,7 @@ void ath5k_stop(struct ieee80211_hw *hw) ath5k_stop_tasklets(ah); + clear_bit(ATH_STAT_STARTED, ah->status); cancel_delayed_work_sync(&ah->tx_complete_work); if (!ath5k_modparam_no_hw_rfkill_switch) diff --git a/trunk/drivers/net/wireless/ath/ath9k/Kconfig b/trunk/drivers/net/wireless/ath/ath9k/Kconfig index 17507dc8a1e7..f3dc124c60c7 100644 --- a/trunk/drivers/net/wireless/ath/ath9k/Kconfig +++ b/trunk/drivers/net/wireless/ath/ath9k/Kconfig @@ -17,7 +17,7 @@ config ATH9K_BTCOEX_SUPPORT config ATH9K tristate "Atheros 802.11n wireless cards support" - depends on MAC80211 + depends on MAC80211 && HAS_DMA select ATH9K_HW select MAC80211_LEDS select LEDS_CLASS diff --git a/trunk/drivers/net/wireless/ath/ath9k/ar9003_calib.c b/trunk/drivers/net/wireless/ath/ath9k/ar9003_calib.c index 639ba7d18ea4..6988e1d081f2 100644 --- a/trunk/drivers/net/wireless/ath/ath9k/ar9003_calib.c +++ b/trunk/drivers/net/wireless/ath/ath9k/ar9003_calib.c @@ -965,7 +965,7 @@ static void ar9003_hw_do_manual_peak_cal(struct ath_hw *ah, { int i; - if (!AR_SREV_9462(ah) && !AR_SREV_9565(ah)) + if (!AR_SREV_9462(ah) && !AR_SREV_9565(ah) && !AR_SREV_9485(ah)) return; for (i = 0; i < AR9300_MAX_CHAINS; i++) { diff --git a/trunk/drivers/net/wireless/ath/ath9k/ar9485_initvals.h b/trunk/drivers/net/wireless/ath/ath9k/ar9485_initvals.h index 712f415b8c08..88ff1d7b53ab 100644 --- a/trunk/drivers/net/wireless/ath/ath9k/ar9485_initvals.h +++ b/trunk/drivers/net/wireless/ath/ath9k/ar9485_initvals.h @@ -1020,7 +1020,7 @@ static const u32 ar9485_1_1_baseband_postamble[][5] = { {0x0000a284, 0x00000000, 0x00000000, 0x000002a0, 0x000002a0}, {0x0000a288, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, {0x0000a28c, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, - {0x0000a2c4, 0x00158d18, 0x00158d18, 0x00158d18, 0x00158d18}, + {0x0000a2c4, 0x00158d18, 0x00158d18, 0x00058d18, 0x00058d18}, {0x0000a2d0, 0x00071981, 0x00071981, 0x00071982, 0x00071982}, {0x0000a2d8, 0xf999a83a, 0xf999a83a, 0xf999a83a, 0xf999a83a}, {0x0000a358, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, diff --git a/trunk/drivers/net/wireless/ath/ath9k/ar9565_1p0_initvals.h b/trunk/drivers/net/wireless/ath/ath9k/ar9565_1p0_initvals.h index 0c2ac0c6dc89..e85a8b076c22 100644 --- a/trunk/drivers/net/wireless/ath/ath9k/ar9565_1p0_initvals.h +++ b/trunk/drivers/net/wireless/ath/ath9k/ar9565_1p0_initvals.h @@ -233,9 +233,9 @@ static const u32 ar9565_1p0_baseband_core[][2] = { {0x00009d10, 0x01834061}, {0x00009d14, 0x00c00400}, {0x00009d18, 0x00000000}, - {0x00009e08, 0x0078230c}, - {0x00009e24, 0x990bb515}, - {0x00009e28, 0x126f0000}, + {0x00009e08, 0x0038230c}, + {0x00009e24, 0x9907b515}, + {0x00009e28, 0x126f0600}, {0x00009e30, 0x06336f77}, {0x00009e34, 0x6af6532f}, {0x00009e38, 0x0cc80c00}, @@ -337,7 +337,7 @@ static const u32 ar9565_1p0_baseband_core[][2] = { static const u32 ar9565_1p0_baseband_postamble[][5] = { /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */ - {0x00009810, 0xd00a8005, 0xd00a8005, 0xd00a8005, 0xd00a800d}, + {0x00009810, 0xd00a8005, 0xd00a8005, 0xd00a8005, 0xd00a8009}, {0x00009820, 0x206a022e, 0x206a022e, 0x206a012e, 0x206a01ae}, {0x00009824, 0x5ac640d0, 0x5ac640d0, 0x5ac640d0, 0x63c640da}, {0x00009828, 0x06903081, 0x06903081, 0x06903881, 0x09143c81}, @@ -345,9 +345,9 @@ static const u32 ar9565_1p0_baseband_postamble[][5] = { {0x00009830, 0x0000059c, 0x0000059c, 0x0000059c, 0x0000059c}, {0x00009c00, 0x000000c4, 0x000000c4, 0x000000c4, 0x000000c4}, {0x00009e00, 0x0372111a, 0x0372111a, 0x037216a0, 0x037216a0}, - {0x00009e04, 0x00802020, 0x00802020, 0x00802020, 0x00802020}, - {0x00009e0c, 0x6c4000e2, 0x6d4000e2, 0x6d4000e2, 0x6c4000d8}, - {0x00009e10, 0x7ec88d2e, 0x7ec88d2e, 0x7ec84d2e, 0x7ec86d2e}, + {0x00009e04, 0x00802020, 0x00802020, 0x00142020, 0x00142020}, + {0x00009e0c, 0x6c4000e2, 0x6d4000e2, 0x6d4000e2, 0x6c4000e2}, + {0x00009e10, 0x7ec88d2e, 0x7ec88d2e, 0x7ec84d2e, 0x7ec84d2e}, {0x00009e14, 0x37b95d5e, 0x37b9605e, 0x3379605e, 0x33795d5e}, {0x00009e18, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, {0x00009e1c, 0x0001cf9c, 0x0001cf9c, 0x00021f9c, 0x00021f9c}, @@ -450,6 +450,8 @@ static const u32 ar9565_1p0_soc_postamble[][5] = { static const u32 ar9565_1p0_Common_rx_gain_table[][2] = { /* Addr allmodes */ + {0x00004050, 0x00300300}, + {0x0000406c, 0x00100000}, {0x0000a000, 0x00010000}, {0x0000a004, 0x00030002}, {0x0000a008, 0x00050004}, @@ -498,27 +500,27 @@ static const u32 ar9565_1p0_Common_rx_gain_table[][2] = { {0x0000a0b4, 0x00000000}, {0x0000a0b8, 0x00000000}, {0x0000a0bc, 0x00000000}, - {0x0000a0c0, 0x001f0000}, - {0x0000a0c4, 0x01000101}, - {0x0000a0c8, 0x011e011f}, - {0x0000a0cc, 0x011c011d}, - {0x0000a0d0, 0x02030204}, - {0x0000a0d4, 0x02010202}, - {0x0000a0d8, 0x021f0200}, - {0x0000a0dc, 0x0302021e}, - {0x0000a0e0, 0x03000301}, - {0x0000a0e4, 0x031e031f}, - {0x0000a0e8, 0x0402031d}, - {0x0000a0ec, 0x04000401}, - {0x0000a0f0, 0x041e041f}, - {0x0000a0f4, 0x0502041d}, - {0x0000a0f8, 0x05000501}, - {0x0000a0fc, 0x051e051f}, - {0x0000a100, 0x06010602}, - {0x0000a104, 0x061f0600}, - {0x0000a108, 0x061d061e}, - {0x0000a10c, 0x07020703}, - {0x0000a110, 0x07000701}, + {0x0000a0c0, 0x00bf00a0}, + {0x0000a0c4, 0x11a011a1}, + {0x0000a0c8, 0x11be11bf}, + {0x0000a0cc, 0x11bc11bd}, + {0x0000a0d0, 0x22632264}, + {0x0000a0d4, 0x22612262}, + {0x0000a0d8, 0x227f2260}, + {0x0000a0dc, 0x4322227e}, + {0x0000a0e0, 0x43204321}, + {0x0000a0e4, 0x433e433f}, + {0x0000a0e8, 0x4462433d}, + {0x0000a0ec, 0x44604461}, + {0x0000a0f0, 0x447e447f}, + {0x0000a0f4, 0x5582447d}, + {0x0000a0f8, 0x55805581}, + {0x0000a0fc, 0x559e559f}, + {0x0000a100, 0x66816682}, + {0x0000a104, 0x669f6680}, + {0x0000a108, 0x669d669e}, + {0x0000a10c, 0x77627763}, + {0x0000a110, 0x77607761}, {0x0000a114, 0x00000000}, {0x0000a118, 0x00000000}, {0x0000a11c, 0x00000000}, @@ -530,27 +532,27 @@ static const u32 ar9565_1p0_Common_rx_gain_table[][2] = { {0x0000a134, 0x00000000}, {0x0000a138, 0x00000000}, {0x0000a13c, 0x00000000}, - {0x0000a140, 0x001f0000}, - {0x0000a144, 0x01000101}, - {0x0000a148, 0x011e011f}, - {0x0000a14c, 0x011c011d}, - {0x0000a150, 0x02030204}, - {0x0000a154, 0x02010202}, - {0x0000a158, 0x021f0200}, - {0x0000a15c, 0x0302021e}, - {0x0000a160, 0x03000301}, - {0x0000a164, 0x031e031f}, - {0x0000a168, 0x0402031d}, - {0x0000a16c, 0x04000401}, - {0x0000a170, 0x041e041f}, - {0x0000a174, 0x0502041d}, - {0x0000a178, 0x05000501}, - {0x0000a17c, 0x051e051f}, - {0x0000a180, 0x06010602}, - {0x0000a184, 0x061f0600}, - {0x0000a188, 0x061d061e}, - {0x0000a18c, 0x07020703}, - {0x0000a190, 0x07000701}, + {0x0000a140, 0x00bf00a0}, + {0x0000a144, 0x11a011a1}, + {0x0000a148, 0x11be11bf}, + {0x0000a14c, 0x11bc11bd}, + {0x0000a150, 0x22632264}, + {0x0000a154, 0x22612262}, + {0x0000a158, 0x227f2260}, + {0x0000a15c, 0x4322227e}, + {0x0000a160, 0x43204321}, + {0x0000a164, 0x433e433f}, + {0x0000a168, 0x4462433d}, + {0x0000a16c, 0x44604461}, + {0x0000a170, 0x447e447f}, + {0x0000a174, 0x5582447d}, + {0x0000a178, 0x55805581}, + {0x0000a17c, 0x559e559f}, + {0x0000a180, 0x66816682}, + {0x0000a184, 0x669f6680}, + {0x0000a188, 0x669d669e}, + {0x0000a18c, 0x77e677e7}, + {0x0000a190, 0x77e477e5}, {0x0000a194, 0x00000000}, {0x0000a198, 0x00000000}, {0x0000a19c, 0x00000000}, @@ -770,7 +772,7 @@ static const u32 ar9565_1p0_Modes_lowest_ob_db_tx_gain_table[][5] = { static const u32 ar9565_1p0_pciephy_clkreq_disable_L1[][2] = { /* Addr allmodes */ - {0x00018c00, 0x18213ede}, + {0x00018c00, 0x18212ede}, {0x00018c04, 0x000801d8}, {0x00018c08, 0x0003780c}, }; @@ -889,8 +891,8 @@ static const u32 ar9565_1p0_common_wo_xlna_rx_gain_table[][2] = { {0x0000a180, 0x66816682}, {0x0000a184, 0x669f6680}, {0x0000a188, 0x669d669e}, - {0x0000a18c, 0x77627763}, - {0x0000a190, 0x77607761}, + {0x0000a18c, 0x77e677e7}, + {0x0000a190, 0x77e477e5}, {0x0000a194, 0x00000000}, {0x0000a198, 0x00000000}, {0x0000a19c, 0x00000000}, @@ -1114,7 +1116,7 @@ static const u32 ar9565_1p0_modes_high_ob_db_tx_gain_table[][5] = { {0x0000a2e0, 0xffecec00, 0xffecec00, 0xfd339c84, 0xfd339c84}, {0x0000a2e4, 0xfc0f0000, 0xfc0f0000, 0xfec3e000, 0xfec3e000}, {0x0000a2e8, 0xfc100000, 0xfc100000, 0xfffc0000, 0xfffc0000}, - {0x0000a410, 0x000050d9, 0x000050d9, 0x000050d9, 0x000050d9}, + {0x0000a410, 0x000050d9, 0x000050d9, 0x000050df, 0x000050df}, {0x0000a500, 0x00002220, 0x00002220, 0x00000000, 0x00000000}, {0x0000a504, 0x06002223, 0x06002223, 0x04000002, 0x04000002}, {0x0000a508, 0x0b022220, 0x0b022220, 0x08000004, 0x08000004}, @@ -1140,13 +1142,13 @@ static const u32 ar9565_1p0_modes_high_ob_db_tx_gain_table[][5] = { {0x0000a558, 0x69027f56, 0x69027f56, 0x53001ce5, 0x53001ce5}, {0x0000a55c, 0x6d029f56, 0x6d029f56, 0x57001ce9, 0x57001ce9}, {0x0000a560, 0x73049f56, 0x73049f56, 0x5b001ceb, 0x5b001ceb}, - {0x0000a564, 0x7804ff56, 0x7804ff56, 0x5d001eec, 0x5d001eec}, - {0x0000a568, 0x7804ff56, 0x7804ff56, 0x5d001eec, 0x5d001eec}, - {0x0000a56c, 0x7804ff56, 0x7804ff56, 0x5d001eec, 0x5d001eec}, - {0x0000a570, 0x7804ff56, 0x7804ff56, 0x5d001eec, 0x5d001eec}, - {0x0000a574, 0x7804ff56, 0x7804ff56, 0x5d001eec, 0x5d001eec}, - {0x0000a578, 0x7804ff56, 0x7804ff56, 0x5d001eec, 0x5d001eec}, - {0x0000a57c, 0x7804ff56, 0x7804ff56, 0x5d001eec, 0x5d001eec}, + {0x0000a564, 0x7804ff56, 0x7804ff56, 0x60001cf0, 0x60001cf0}, + {0x0000a568, 0x7804ff56, 0x7804ff56, 0x61001cf1, 0x61001cf1}, + {0x0000a56c, 0x7804ff56, 0x7804ff56, 0x62001cf2, 0x62001cf2}, + {0x0000a570, 0x7804ff56, 0x7804ff56, 0x63001cf3, 0x63001cf3}, + {0x0000a574, 0x7804ff56, 0x7804ff56, 0x64001cf4, 0x64001cf4}, + {0x0000a578, 0x7804ff56, 0x7804ff56, 0x66001ff6, 0x66001ff6}, + {0x0000a57c, 0x7804ff56, 0x7804ff56, 0x66001ff6, 0x66001ff6}, {0x0000a600, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, {0x0000a604, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, {0x0000a608, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, @@ -1174,7 +1176,7 @@ static const u32 ar9565_1p0_modes_high_power_tx_gain_table[][5] = { {0x0000a2e0, 0xffecec00, 0xffecec00, 0xfd339c84, 0xfd339c84}, {0x0000a2e4, 0xfc0f0000, 0xfc0f0000, 0xfec3e000, 0xfec3e000}, {0x0000a2e8, 0xfc100000, 0xfc100000, 0xfffc0000, 0xfffc0000}, - {0x0000a410, 0x000050d9, 0x000050d9, 0x000050d9, 0x000050d9}, + {0x0000a410, 0x000050d9, 0x000050d9, 0x000050df, 0x000050df}, {0x0000a500, 0x00002220, 0x00002220, 0x00000000, 0x00000000}, {0x0000a504, 0x06002223, 0x06002223, 0x04000002, 0x04000002}, {0x0000a508, 0x0a022220, 0x0a022220, 0x08000004, 0x08000004}, @@ -1200,13 +1202,13 @@ static const u32 ar9565_1p0_modes_high_power_tx_gain_table[][5] = { {0x0000a558, 0x66027f56, 0x66027f56, 0x4c001ce5, 0x4c001ce5}, {0x0000a55c, 0x6a029f56, 0x6a029f56, 0x50001ce9, 0x50001ce9}, {0x0000a560, 0x70049f56, 0x70049f56, 0x54001ceb, 0x54001ceb}, - {0x0000a564, 0x7504ff56, 0x7504ff56, 0x56001eec, 0x56001eec}, - {0x0000a568, 0x7504ff56, 0x7504ff56, 0x56001eec, 0x56001eec}, - {0x0000a56c, 0x7504ff56, 0x7504ff56, 0x56001eec, 0x56001eec}, - {0x0000a570, 0x7504ff56, 0x7504ff56, 0x56001eec, 0x56001eec}, - {0x0000a574, 0x7504ff56, 0x7504ff56, 0x56001eec, 0x56001eec}, - {0x0000a578, 0x7504ff56, 0x7504ff56, 0x56001eec, 0x56001eec}, - {0x0000a57c, 0x7504ff56, 0x7504ff56, 0x56001eec, 0x56001eec}, + {0x0000a564, 0x7504ff56, 0x7504ff56, 0x59001cf0, 0x59001cf0}, + {0x0000a568, 0x7504ff56, 0x7504ff56, 0x5a001cf1, 0x5a001cf1}, + {0x0000a56c, 0x7504ff56, 0x7504ff56, 0x5b001cf2, 0x5b001cf2}, + {0x0000a570, 0x7504ff56, 0x7504ff56, 0x5c001cf3, 0x5c001cf3}, + {0x0000a574, 0x7504ff56, 0x7504ff56, 0x5d001cf4, 0x5d001cf4}, + {0x0000a578, 0x7504ff56, 0x7504ff56, 0x5f001ff6, 0x5f001ff6}, + {0x0000a57c, 0x7504ff56, 0x7504ff56, 0x5f001ff6, 0x5f001ff6}, {0x0000a600, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, {0x0000a604, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, {0x0000a608, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, diff --git a/trunk/drivers/net/wireless/ath/ath9k/ath9k.h b/trunk/drivers/net/wireless/ath/ath9k/ath9k.h index 8a1888d02070..366002f266f8 100644 --- a/trunk/drivers/net/wireless/ath/ath9k/ath9k.h +++ b/trunk/drivers/net/wireless/ath/ath9k/ath9k.h @@ -254,6 +254,7 @@ struct ath_atx_tid { int sched; int paused; u8 state; + bool stop_cb; }; struct ath_node { @@ -351,7 +352,8 @@ void ath_tx_tasklet(struct ath_softc *sc); void ath_tx_edma_tasklet(struct ath_softc *sc); int ath_tx_aggr_start(struct ath_softc *sc, struct ieee80211_sta *sta, u16 tid, u16 *ssn); -void ath_tx_aggr_stop(struct ath_softc *sc, struct ieee80211_sta *sta, u16 tid); +bool ath_tx_aggr_stop(struct ath_softc *sc, struct ieee80211_sta *sta, u16 tid, + bool flush); void ath_tx_aggr_resume(struct ath_softc *sc, struct ieee80211_sta *sta, u16 tid); void ath_tx_aggr_wakeup(struct ath_softc *sc, struct ath_node *an); diff --git a/trunk/drivers/net/wireless/ath/ath9k/debug.c b/trunk/drivers/net/wireless/ath/ath9k/debug.c index e6307b86363a..b37eb8d38811 100644 --- a/trunk/drivers/net/wireless/ath/ath9k/debug.c +++ b/trunk/drivers/net/wireless/ath/ath9k/debug.c @@ -2008,6 +2008,14 @@ void ath9k_get_et_stats(struct ieee80211_hw *hw, WARN_ON(i != ATH9K_SSTATS_LEN); } +void ath9k_deinit_debug(struct ath_softc *sc) +{ + if (config_enabled(CONFIG_ATH9K_DEBUGFS) && sc->rfs_chan_spec_scan) { + relay_close(sc->rfs_chan_spec_scan); + sc->rfs_chan_spec_scan = NULL; + } +} + int ath9k_init_debug(struct ath_hw *ah) { struct ath_common *common = ath9k_hw_common(ah); diff --git a/trunk/drivers/net/wireless/ath/ath9k/debug.h b/trunk/drivers/net/wireless/ath/ath9k/debug.h index 794a7ec83a24..9d49aab8b989 100644 --- a/trunk/drivers/net/wireless/ath/ath9k/debug.h +++ b/trunk/drivers/net/wireless/ath/ath9k/debug.h @@ -304,6 +304,7 @@ struct ath9k_debug { }; int ath9k_init_debug(struct ath_hw *ah); +void ath9k_deinit_debug(struct ath_softc *sc); void ath_debug_stat_interrupt(struct ath_softc *sc, enum ath9k_int status); void ath_debug_stat_tx(struct ath_softc *sc, struct ath_buf *bf, @@ -339,6 +340,10 @@ static inline int ath9k_init_debug(struct ath_hw *ah) return 0; } +static inline void ath9k_deinit_debug(struct ath_softc *sc) +{ +} + static inline void ath_debug_stat_interrupt(struct ath_softc *sc, enum ath9k_int status) { diff --git a/trunk/drivers/net/wireless/ath/ath9k/init.c b/trunk/drivers/net/wireless/ath/ath9k/init.c index 0237b2868961..aba415103f94 100644 --- a/trunk/drivers/net/wireless/ath/ath9k/init.c +++ b/trunk/drivers/net/wireless/ath/ath9k/init.c @@ -906,7 +906,7 @@ int ath9k_init_device(u16 devid, struct ath_softc *sc, if (!ath_is_world_regd(reg)) { error = regulatory_hint(hw->wiphy, reg->alpha2); if (error) - goto unregister; + goto debug_cleanup; } ath_init_leds(sc); @@ -914,6 +914,8 @@ int ath9k_init_device(u16 devid, struct ath_softc *sc, return 0; +debug_cleanup: + ath9k_deinit_debug(sc); unregister: ieee80211_unregister_hw(hw); rx_cleanup: @@ -942,11 +944,6 @@ static void ath9k_deinit_softc(struct ath_softc *sc) sc->dfs_detector->exit(sc->dfs_detector); ath9k_eeprom_release(sc); - - if (config_enabled(CONFIG_ATH9K_DEBUGFS) && sc->rfs_chan_spec_scan) { - relay_close(sc->rfs_chan_spec_scan); - sc->rfs_chan_spec_scan = NULL; - } } void ath9k_deinit_device(struct ath_softc *sc) @@ -960,6 +957,7 @@ void ath9k_deinit_device(struct ath_softc *sc) ath9k_ps_restore(sc); + ath9k_deinit_debug(sc); ieee80211_unregister_hw(hw); ath_rx_cleanup(sc); ath9k_deinit_softc(sc); diff --git a/trunk/drivers/net/wireless/ath/ath9k/main.c b/trunk/drivers/net/wireless/ath/ath9k/main.c index 6963862a1872..2382d1262e7f 100644 --- a/trunk/drivers/net/wireless/ath/ath9k/main.c +++ b/trunk/drivers/net/wireless/ath/ath9k/main.c @@ -227,13 +227,13 @@ static bool ath_complete_reset(struct ath_softc *sc, bool start) if (!test_bit(SC_OP_BEACONS, &sc->sc_flags)) goto work; - ath9k_set_beacon(sc); - if (ah->opmode == NL80211_IFTYPE_STATION && test_bit(SC_OP_PRIM_STA_VIF, &sc->sc_flags)) { spin_lock_irqsave(&sc->sc_pm_lock, flags); sc->ps_flags |= PS_BEACON_SYNC | PS_WAIT_FOR_BEACON; spin_unlock_irqrestore(&sc->sc_pm_lock, flags); + } else { + ath9k_set_beacon(sc); } work: ath_restart_work(sc); @@ -1332,6 +1332,7 @@ static int ath9k_sta_add(struct ieee80211_hw *hw, struct ath_common *common = ath9k_hw_common(sc->sc_ah); struct ath_node *an = (struct ath_node *) sta->drv_priv; struct ieee80211_key_conf ps_key = { }; + int key; ath_node_attach(sc, sta, vif); @@ -1339,7 +1340,9 @@ static int ath9k_sta_add(struct ieee80211_hw *hw, vif->type != NL80211_IFTYPE_AP_VLAN) return 0; - an->ps_key = ath_key_config(common, vif, sta, &ps_key); + key = ath_key_config(common, vif, sta, &ps_key); + if (key > 0) + an->ps_key = key; return 0; } @@ -1356,6 +1359,7 @@ static void ath9k_del_ps_key(struct ath_softc *sc, return; ath_key_delete(common, &ps_key); + an->ps_key = 0; } static int ath9k_sta_remove(struct ieee80211_hw *hw, @@ -1683,6 +1687,7 @@ static int ath9k_ampdu_action(struct ieee80211_hw *hw, u16 tid, u16 *ssn, u8 buf_size) { struct ath_softc *sc = hw->priv; + bool flush = false; int ret = 0; local_bh_disable(); @@ -1699,12 +1704,13 @@ static int ath9k_ampdu_action(struct ieee80211_hw *hw, ieee80211_start_tx_ba_cb_irqsafe(vif, sta->addr, tid); ath9k_ps_restore(sc); break; - case IEEE80211_AMPDU_TX_STOP_CONT: case IEEE80211_AMPDU_TX_STOP_FLUSH: case IEEE80211_AMPDU_TX_STOP_FLUSH_CONT: + flush = true; + case IEEE80211_AMPDU_TX_STOP_CONT: ath9k_ps_wakeup(sc); - ath_tx_aggr_stop(sc, sta, tid); - ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid); + if (ath_tx_aggr_stop(sc, sta, tid, flush)) + ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid); ath9k_ps_restore(sc); break; case IEEE80211_AMPDU_TX_OPERATIONAL: diff --git a/trunk/drivers/net/wireless/ath/ath9k/xmit.c b/trunk/drivers/net/wireless/ath/ath9k/xmit.c index eab0fcb7ded6..14bb3354ea64 100644 --- a/trunk/drivers/net/wireless/ath/ath9k/xmit.c +++ b/trunk/drivers/net/wireless/ath/ath9k/xmit.c @@ -164,7 +164,20 @@ static void ath_set_rates(struct ieee80211_vif *vif, struct ieee80211_sta *sta, ARRAY_SIZE(bf->rates)); } -static void ath_tx_flush_tid(struct ath_softc *sc, struct ath_atx_tid *tid) +static void ath_tx_clear_tid(struct ath_softc *sc, struct ath_atx_tid *tid) +{ + tid->state &= ~AGGR_ADDBA_COMPLETE; + tid->state &= ~AGGR_CLEANUP; + if (!tid->stop_cb) + return; + + ieee80211_start_tx_ba_cb_irqsafe(tid->an->vif, tid->an->sta->addr, + tid->tidno); + tid->stop_cb = false; +} + +static void ath_tx_flush_tid(struct ath_softc *sc, struct ath_atx_tid *tid, + bool flush_packets) { struct ath_txq *txq = tid->ac->txq; struct sk_buff *skb; @@ -181,16 +194,15 @@ static void ath_tx_flush_tid(struct ath_softc *sc, struct ath_atx_tid *tid) while ((skb = __skb_dequeue(&tid->buf_q))) { fi = get_frame_info(skb); bf = fi->bf; + if (!bf && !flush_packets) + bf = ath_tx_setup_buffer(sc, txq, tid, skb); if (!bf) { - bf = ath_tx_setup_buffer(sc, txq, tid, skb); - if (!bf) { - ieee80211_free_txskb(sc->hw, skb); - continue; - } + ieee80211_free_txskb(sc->hw, skb); + continue; } - if (fi->retries) { + if (fi->retries || flush_packets) { list_add_tail(&bf->list, &bf_head); ath_tx_update_baw(sc, tid, bf->bf_state.seqno); ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0); @@ -201,12 +213,10 @@ static void ath_tx_flush_tid(struct ath_softc *sc, struct ath_atx_tid *tid) } } - if (tid->baw_head == tid->baw_tail) { - tid->state &= ~AGGR_ADDBA_COMPLETE; - tid->state &= ~AGGR_CLEANUP; - } + if (tid->baw_head == tid->baw_tail) + ath_tx_clear_tid(sc, tid); - if (sendbar) { + if (sendbar && !flush_packets) { ath_txq_unlock(sc, txq); ath_send_bar(tid, tid->seq_start); ath_txq_lock(sc, txq); @@ -277,9 +287,7 @@ static void ath_tid_drain(struct ath_softc *sc, struct ath_txq *txq, list_add_tail(&bf->list, &bf_head); - if (fi->retries) - ath_tx_update_baw(sc, tid, bf->bf_state.seqno); - + ath_tx_update_baw(sc, tid, bf->bf_state.seqno); ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0); } @@ -602,7 +610,7 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq, } if (tid->state & AGGR_CLEANUP) - ath_tx_flush_tid(sc, tid); + ath_tx_flush_tid(sc, tid, false); rcu_read_unlock(); @@ -620,6 +628,7 @@ static void ath_tx_process_buffer(struct ath_softc *sc, struct ath_txq *txq, struct ath_tx_status *ts, struct ath_buf *bf, struct list_head *bf_head) { + struct ieee80211_tx_info *info; bool txok, flush; txok = !(ts->ts_status & ATH9K_TXERR_MASK); @@ -631,8 +640,12 @@ static void ath_tx_process_buffer(struct ath_softc *sc, struct ath_txq *txq, txq->axq_ampdu_depth--; if (!bf_isampdu(bf)) { - if (!flush) + if (!flush) { + info = IEEE80211_SKB_CB(bf->bf_mpdu); + memcpy(info->control.rates, bf->rates, + sizeof(info->control.rates)); ath_tx_rc_status(sc, bf, ts, 1, txok ? 0 : 1, txok); + } ath_tx_complete_buf(sc, bf, txq, bf_head, ts, txok); } else ath_tx_complete_aggr(sc, txq, bf, bf_head, ts, txok); @@ -676,7 +689,7 @@ static u32 ath_lookup_rate(struct ath_softc *sc, struct ath_buf *bf, skb = bf->bf_mpdu; tx_info = IEEE80211_SKB_CB(skb); - rates = tx_info->control.rates; + rates = bf->rates; /* * Find the lowest frame length among the rate series that will have a @@ -1256,18 +1269,23 @@ int ath_tx_aggr_start(struct ath_softc *sc, struct ieee80211_sta *sta, return 0; } -void ath_tx_aggr_stop(struct ath_softc *sc, struct ieee80211_sta *sta, u16 tid) +bool ath_tx_aggr_stop(struct ath_softc *sc, struct ieee80211_sta *sta, u16 tid, + bool flush) { struct ath_node *an = (struct ath_node *)sta->drv_priv; struct ath_atx_tid *txtid = ATH_AN_2_TID(an, tid); struct ath_txq *txq = txtid->ac->txq; + bool ret = !flush; + + if (flush) + txtid->stop_cb = false; if (txtid->state & AGGR_CLEANUP) - return; + return false; if (!(txtid->state & AGGR_ADDBA_COMPLETE)) { txtid->state &= ~AGGR_ADDBA_PROGRESS; - return; + return ret; } ath_txq_lock(sc, txq); @@ -1279,13 +1297,17 @@ void ath_tx_aggr_stop(struct ath_softc *sc, struct ieee80211_sta *sta, u16 tid) * TID can only be reused after all in-progress subframes have been * completed. */ - if (txtid->baw_head != txtid->baw_tail) + if (txtid->baw_head != txtid->baw_tail) { txtid->state |= AGGR_CLEANUP; - else + ret = false; + txtid->stop_cb = !flush; + } else { txtid->state &= ~AGGR_ADDBA_COMPLETE; + } - ath_tx_flush_tid(sc, txtid); + ath_tx_flush_tid(sc, txtid, flush); ath_txq_unlock_complete(sc, txq); + return ret; } void ath_tx_aggr_sleep(struct ieee80211_sta *sta, struct ath_softc *sc, @@ -2415,6 +2437,7 @@ void ath_tx_node_init(struct ath_softc *sc, struct ath_node *an) tid->ac = &an->ac[acno]; tid->state &= ~AGGR_ADDBA_COMPLETE; tid->state &= ~AGGR_ADDBA_PROGRESS; + tid->stop_cb = false; } for (acno = 0, ac = &an->ac[acno]; @@ -2451,8 +2474,7 @@ void ath_tx_node_cleanup(struct ath_softc *sc, struct ath_node *an) } ath_tid_drain(sc, txq, tid); - tid->state &= ~AGGR_ADDBA_COMPLETE; - tid->state &= ~AGGR_CLEANUP; + ath_tx_clear_tid(sc, tid); ath_txq_unlock(sc, txq); } diff --git a/trunk/drivers/net/wireless/b43/dma.c b/trunk/drivers/net/wireless/b43/dma.c index 523355b87659..f7c70b3a6ea9 100644 --- a/trunk/drivers/net/wireless/b43/dma.c +++ b/trunk/drivers/net/wireless/b43/dma.c @@ -1728,6 +1728,25 @@ static void dma_rx(struct b43_dmaring *ring, int *slot) sync_descbuffer_for_device(ring, dmaaddr, ring->rx_buffersize); } +void b43_dma_handle_rx_overflow(struct b43_dmaring *ring) +{ + int current_slot, previous_slot; + + B43_WARN_ON(ring->tx); + + /* Device has filled all buffers, drop all packets and let TCP + * decrease speed. + * Decrement RX index by one will let the device to see all slots + * as free again + */ + /* + *TODO: How to increase rx_drop in mac80211? + */ + current_slot = ring->ops->get_current_rxslot(ring); + previous_slot = prev_slot(ring, current_slot); + ring->ops->set_current_rxslot(ring, previous_slot); +} + void b43_dma_rx(struct b43_dmaring *ring) { const struct b43_dma_ops *ops = ring->ops; diff --git a/trunk/drivers/net/wireless/b43/dma.h b/trunk/drivers/net/wireless/b43/dma.h index 9fdd1983079c..df8c8cdcbdb5 100644 --- a/trunk/drivers/net/wireless/b43/dma.h +++ b/trunk/drivers/net/wireless/b43/dma.h @@ -9,7 +9,7 @@ /* DMA-Interrupt reasons. */ #define B43_DMAIRQ_FATALMASK ((1 << 10) | (1 << 11) | (1 << 12) \ | (1 << 14) | (1 << 15)) -#define B43_DMAIRQ_NONFATALMASK (1 << 13) +#define B43_DMAIRQ_RDESC_UFLOW (1 << 13) #define B43_DMAIRQ_RX_DONE (1 << 16) /*** 32-bit DMA Engine. ***/ @@ -295,6 +295,8 @@ int b43_dma_tx(struct b43_wldev *dev, void b43_dma_handle_txstatus(struct b43_wldev *dev, const struct b43_txstatus *status); +void b43_dma_handle_rx_overflow(struct b43_dmaring *ring); + void b43_dma_rx(struct b43_dmaring *ring); void b43_dma_direct_fifo_rx(struct b43_wldev *dev, diff --git a/trunk/drivers/net/wireless/b43/main.c b/trunk/drivers/net/wireless/b43/main.c index d377f77d30b5..6dd07e2ec595 100644 --- a/trunk/drivers/net/wireless/b43/main.c +++ b/trunk/drivers/net/wireless/b43/main.c @@ -1902,30 +1902,18 @@ static void b43_do_interrupt_thread(struct b43_wldev *dev) } } - if (unlikely(merged_dma_reason & (B43_DMAIRQ_FATALMASK | - B43_DMAIRQ_NONFATALMASK))) { - if (merged_dma_reason & B43_DMAIRQ_FATALMASK) { - b43err(dev->wl, "Fatal DMA error: " - "0x%08X, 0x%08X, 0x%08X, " - "0x%08X, 0x%08X, 0x%08X\n", - dma_reason[0], dma_reason[1], - dma_reason[2], dma_reason[3], - dma_reason[4], dma_reason[5]); - b43err(dev->wl, "This device does not support DMA " + if (unlikely(merged_dma_reason & (B43_DMAIRQ_FATALMASK))) { + b43err(dev->wl, + "Fatal DMA error: 0x%08X, 0x%08X, 0x%08X, 0x%08X, 0x%08X, 0x%08X\n", + dma_reason[0], dma_reason[1], + dma_reason[2], dma_reason[3], + dma_reason[4], dma_reason[5]); + b43err(dev->wl, "This device does not support DMA " "on your system. It will now be switched to PIO.\n"); - /* Fall back to PIO transfers if we get fatal DMA errors! */ - dev->use_pio = true; - b43_controller_restart(dev, "DMA error"); - return; - } - if (merged_dma_reason & B43_DMAIRQ_NONFATALMASK) { - b43err(dev->wl, "DMA error: " - "0x%08X, 0x%08X, 0x%08X, " - "0x%08X, 0x%08X, 0x%08X\n", - dma_reason[0], dma_reason[1], - dma_reason[2], dma_reason[3], - dma_reason[4], dma_reason[5]); - } + /* Fall back to PIO transfers if we get fatal DMA errors! */ + dev->use_pio = true; + b43_controller_restart(dev, "DMA error"); + return; } if (unlikely(reason & B43_IRQ_UCODE_DEBUG)) @@ -1944,6 +1932,11 @@ static void b43_do_interrupt_thread(struct b43_wldev *dev) handle_irq_noise(dev); /* Check the DMA reason registers for received data. */ + if (dma_reason[0] & B43_DMAIRQ_RDESC_UFLOW) { + if (B43_DEBUG) + b43warn(dev->wl, "RX descriptor underrun\n"); + b43_dma_handle_rx_overflow(dev->dma.rx_ring); + } if (dma_reason[0] & B43_DMAIRQ_RX_DONE) { if (b43_using_pio_transfers(dev)) b43_pio_rx(dev->pio.rx_queue); @@ -2001,7 +1994,7 @@ static irqreturn_t b43_do_interrupt(struct b43_wldev *dev) return IRQ_NONE; dev->dma_reason[0] = b43_read32(dev, B43_MMIO_DMA0_REASON) - & 0x0001DC00; + & 0x0001FC00; dev->dma_reason[1] = b43_read32(dev, B43_MMIO_DMA1_REASON) & 0x0000DC00; dev->dma_reason[2] = b43_read32(dev, B43_MMIO_DMA2_REASON) @@ -3130,7 +3123,7 @@ static int b43_chip_init(struct b43_wldev *dev) b43_write32(dev, 0x018C, 0x02000000); } b43_write32(dev, B43_MMIO_GEN_IRQ_REASON, 0x00004000); - b43_write32(dev, B43_MMIO_DMA0_IRQ_MASK, 0x0001DC00); + b43_write32(dev, B43_MMIO_DMA0_IRQ_MASK, 0x0001FC00); b43_write32(dev, B43_MMIO_DMA1_IRQ_MASK, 0x0000DC00); b43_write32(dev, B43_MMIO_DMA2_IRQ_MASK, 0x0000DC00); b43_write32(dev, B43_MMIO_DMA3_IRQ_MASK, 0x0001DC00); diff --git a/trunk/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c b/trunk/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c index 6d758f285352..761f501959a9 100644 --- a/trunk/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c +++ b/trunk/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c @@ -4140,6 +4140,10 @@ static const struct ieee80211_iface_limit brcmf_iface_limits[] = { .types = BIT(NL80211_IFTYPE_P2P_CLIENT) | BIT(NL80211_IFTYPE_P2P_GO) }, + { + .max = 1, + .types = BIT(NL80211_IFTYPE_P2P_DEVICE) + } }; static const struct ieee80211_iface_combination brcmf_iface_combos[] = { { @@ -4197,7 +4201,8 @@ static struct wiphy *brcmf_setup_wiphy(struct device *phydev) BIT(NL80211_IFTYPE_ADHOC) | BIT(NL80211_IFTYPE_AP) | BIT(NL80211_IFTYPE_P2P_CLIENT) | - BIT(NL80211_IFTYPE_P2P_GO); + BIT(NL80211_IFTYPE_P2P_GO) | + BIT(NL80211_IFTYPE_P2P_DEVICE); wiphy->iface_combinations = brcmf_iface_combos; wiphy->n_iface_combinations = ARRAY_SIZE(brcmf_iface_combos); wiphy->bands[IEEE80211_BAND_2GHZ] = &__wl_band_2ghz; diff --git a/trunk/drivers/net/wireless/iwlegacy/4965-mac.c b/trunk/drivers/net/wireless/iwlegacy/4965-mac.c index b8f82e688c72..9a95045c97b6 100644 --- a/trunk/drivers/net/wireless/iwlegacy/4965-mac.c +++ b/trunk/drivers/net/wireless/iwlegacy/4965-mac.c @@ -5741,8 +5741,7 @@ il4965_mac_setup_register(struct il_priv *il, u32 max_probe_length) hw->flags = IEEE80211_HW_SIGNAL_DBM | IEEE80211_HW_AMPDU_AGGREGATION | IEEE80211_HW_NEED_DTIM_BEFORE_ASSOC | IEEE80211_HW_SPECTRUM_MGMT | - IEEE80211_HW_REPORTS_TX_ACK_STATUS | IEEE80211_HW_SUPPORTS_PS | - IEEE80211_HW_SUPPORTS_DYNAMIC_PS; + IEEE80211_HW_SUPPORTS_PS | IEEE80211_HW_SUPPORTS_DYNAMIC_PS; if (il->cfg->sku & IL_SKU_N) hw->flags |= IEEE80211_HW_SUPPORTS_DYNAMIC_SMPS | diff --git a/trunk/drivers/net/wireless/iwlegacy/common.c b/trunk/drivers/net/wireless/iwlegacy/common.c index 592d0aa634a8..e9a3cbc409ae 100644 --- a/trunk/drivers/net/wireless/iwlegacy/common.c +++ b/trunk/drivers/net/wireless/iwlegacy/common.c @@ -1423,7 +1423,7 @@ il_setup_rx_scan_handlers(struct il_priv *il) } EXPORT_SYMBOL(il_setup_rx_scan_handlers); -inline u16 +u16 il_get_active_dwell_time(struct il_priv *il, enum ieee80211_band band, u8 n_probes) { diff --git a/trunk/drivers/net/wireless/iwlwifi/mvm/fw-api.h b/trunk/drivers/net/wireless/iwlwifi/mvm/fw-api.h index 191dcae8ba47..c6384555aab4 100644 --- a/trunk/drivers/net/wireless/iwlwifi/mvm/fw-api.h +++ b/trunk/drivers/net/wireless/iwlwifi/mvm/fw-api.h @@ -173,6 +173,8 @@ enum { REPLY_DEBUG_CMD = 0xf0, DEBUG_LOG_MSG = 0xf7, + MCAST_FILTER_CMD = 0xd0, + /* D3 commands/notifications */ D3_CONFIG_CMD = 0xd3, PROT_OFFLOAD_CONFIG_CMD = 0xd4, @@ -948,4 +950,29 @@ struct iwl_set_calib_default_cmd { u8 data[0]; } __packed; /* PHY_CALIB_OVERRIDE_VALUES_S */ +#define MAX_PORT_ID_NUM 2 + +/** + * struct iwl_mcast_filter_cmd - configure multicast filter. + * @filter_own: Set 1 to filter out multicast packets sent by station itself + * @port_id: Multicast MAC addresses array specifier. This is a strange way + * to identify network interface adopted in host-device IF. + * It is used by FW as index in array of addresses. This array has + * MAX_PORT_ID_NUM members. + * @count: Number of MAC addresses in the array + * @pass_all: Set 1 to pass all multicast packets. + * @bssid: current association BSSID. + * @addr_list: Place holder for array of MAC addresses. + * IMPORTANT: add padding if necessary to ensure DWORD alignment. + */ +struct iwl_mcast_filter_cmd { + u8 filter_own; + u8 port_id; + u8 count; + u8 pass_all; + u8 bssid[6]; + u8 reserved[2]; + u8 addr_list[0]; +} __packed; /* MCAST_FILTERING_CMD_API_S_VER_1 */ + #endif /* __fw_api_h__ */ diff --git a/trunk/drivers/net/wireless/iwlwifi/mvm/mac-ctxt.c b/trunk/drivers/net/wireless/iwlwifi/mvm/mac-ctxt.c index e6eca4d66f6c..b2cc3d98e0f7 100644 --- a/trunk/drivers/net/wireless/iwlwifi/mvm/mac-ctxt.c +++ b/trunk/drivers/net/wireless/iwlwifi/mvm/mac-ctxt.c @@ -586,10 +586,12 @@ static int iwl_mvm_mac_ctxt_send_cmd(struct iwl_mvm *mvm, */ static void iwl_mvm_mac_ctxt_cmd_fill_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif, - struct iwl_mac_data_sta *ctxt_sta) + struct iwl_mac_data_sta *ctxt_sta, + bool force_assoc_off) { /* We need the dtim_period to set the MAC as associated */ - if (vif->bss_conf.assoc && vif->bss_conf.dtim_period) { + if (vif->bss_conf.assoc && vif->bss_conf.dtim_period && + !force_assoc_off) { u32 dtim_offs; /* @@ -659,7 +661,8 @@ static int iwl_mvm_mac_ctxt_cmd_station(struct iwl_mvm *mvm, cmd.filter_flags &= ~cpu_to_le32(MAC_FILTER_IN_BEACON); /* Fill the data specific for station mode */ - iwl_mvm_mac_ctxt_cmd_fill_sta(mvm, vif, &cmd.sta); + iwl_mvm_mac_ctxt_cmd_fill_sta(mvm, vif, &cmd.sta, + action == FW_CTXT_ACTION_ADD); return iwl_mvm_mac_ctxt_send_cmd(mvm, &cmd); } @@ -677,7 +680,8 @@ static int iwl_mvm_mac_ctxt_cmd_p2p_client(struct iwl_mvm *mvm, iwl_mvm_mac_ctxt_cmd_common(mvm, vif, &cmd, action); /* Fill the data specific for station mode */ - iwl_mvm_mac_ctxt_cmd_fill_sta(mvm, vif, &cmd.p2p_sta.sta); + iwl_mvm_mac_ctxt_cmd_fill_sta(mvm, vif, &cmd.p2p_sta.sta, + action == FW_CTXT_ACTION_ADD); cmd.p2p_sta.ctwin = cpu_to_le32(noa->oppps_ctwindow & IEEE80211_P2P_OPPPS_CTWINDOW_MASK); diff --git a/trunk/drivers/net/wireless/iwlwifi/mvm/mac80211.c b/trunk/drivers/net/wireless/iwlwifi/mvm/mac80211.c index dd158ec571fb..a5eb8c82f16a 100644 --- a/trunk/drivers/net/wireless/iwlwifi/mvm/mac80211.c +++ b/trunk/drivers/net/wireless/iwlwifi/mvm/mac80211.c @@ -701,6 +701,20 @@ static void iwl_mvm_configure_filter(struct ieee80211_hw *hw, *total_flags = 0; } +static int iwl_mvm_configure_mcast_filter(struct iwl_mvm *mvm, + struct ieee80211_vif *vif) +{ + struct iwl_mcast_filter_cmd mcast_filter_cmd = { + .pass_all = 1, + }; + + memcpy(mcast_filter_cmd.bssid, vif->bss_conf.bssid, ETH_ALEN); + + return iwl_mvm_send_cmd_pdu(mvm, MCAST_FILTER_CMD, CMD_SYNC, + sizeof(mcast_filter_cmd), + &mcast_filter_cmd); +} + static void iwl_mvm_bss_info_changed_station(struct iwl_mvm *mvm, struct ieee80211_vif *vif, struct ieee80211_bss_conf *bss_conf, @@ -722,6 +736,7 @@ static void iwl_mvm_bss_info_changed_station(struct iwl_mvm *mvm, return; } iwl_mvm_bt_coex_vif_assoc(mvm, vif); + iwl_mvm_configure_mcast_filter(mvm, vif); } else if (mvmvif->ap_sta_id != IWL_MVM_STATION_COUNT) { /* remove AP station now that the MAC is unassoc */ ret = iwl_mvm_rm_sta_id(mvm, vif, mvmvif->ap_sta_id); @@ -931,7 +946,7 @@ static void iwl_mvm_mac_sta_notify(struct ieee80211_hw *hw, switch (cmd) { case STA_NOTIFY_SLEEP: - if (atomic_read(&mvmsta->pending_frames) > 0) + if (atomic_read(&mvm->pending_frames[mvmsta->sta_id]) > 0) ieee80211_sta_block_awake(hw, sta, true); /* * The fw updates the STA to be asleep. Tx packets on the Tx diff --git a/trunk/drivers/net/wireless/iwlwifi/mvm/mvm.h b/trunk/drivers/net/wireless/iwlwifi/mvm/mvm.h index 8269bc562951..9f46b23801bc 100644 --- a/trunk/drivers/net/wireless/iwlwifi/mvm/mvm.h +++ b/trunk/drivers/net/wireless/iwlwifi/mvm/mvm.h @@ -292,6 +292,7 @@ struct iwl_mvm { struct ieee80211_sta __rcu *fw_id_to_mac_id[IWL_MVM_STATION_COUNT]; struct work_struct sta_drained_wk; unsigned long sta_drained[BITS_TO_LONGS(IWL_MVM_STATION_COUNT)]; + atomic_t pending_frames[IWL_MVM_STATION_COUNT]; /* configured by mac80211 */ u32 rts_threshold; diff --git a/trunk/drivers/net/wireless/iwlwifi/mvm/ops.c b/trunk/drivers/net/wireless/iwlwifi/mvm/ops.c index fe031d304d1e..b29c31a41594 100644 --- a/trunk/drivers/net/wireless/iwlwifi/mvm/ops.c +++ b/trunk/drivers/net/wireless/iwlwifi/mvm/ops.c @@ -292,6 +292,7 @@ static const char *iwl_mvm_cmd_strings[REPLY_MAX] = { CMD(BT_COEX_PROT_ENV), CMD(BT_PROFILE_NOTIFICATION), CMD(BT_CONFIG), + CMD(MCAST_FILTER_CMD), }; #undef CMD diff --git a/trunk/drivers/net/wireless/iwlwifi/mvm/scan.c b/trunk/drivers/net/wireless/iwlwifi/mvm/scan.c index 2157b0f8ced5..2476e43799d5 100644 --- a/trunk/drivers/net/wireless/iwlwifi/mvm/scan.c +++ b/trunk/drivers/net/wireless/iwlwifi/mvm/scan.c @@ -298,6 +298,12 @@ int iwl_mvm_scan_request(struct iwl_mvm *mvm, else cmd->type = cpu_to_le32(SCAN_TYPE_FORCED); + /* + * TODO: This is a WA due to a bug in the FW AUX framework that does not + * properly handle time events that fail to be scheduled + */ + cmd->type = cpu_to_le32(SCAN_TYPE_FORCED); + cmd->repeats = cpu_to_le32(1); /* diff --git a/trunk/drivers/net/wireless/iwlwifi/mvm/sta.c b/trunk/drivers/net/wireless/iwlwifi/mvm/sta.c index 0fd96e4da461..5c664ed54400 100644 --- a/trunk/drivers/net/wireless/iwlwifi/mvm/sta.c +++ b/trunk/drivers/net/wireless/iwlwifi/mvm/sta.c @@ -219,7 +219,7 @@ int iwl_mvm_add_sta(struct iwl_mvm *mvm, mvm_sta->max_agg_bufsize = LINK_QUAL_AGG_FRAME_LIMIT_DEF; /* HW restart, don't assume the memory has been zeroed */ - atomic_set(&mvm_sta->pending_frames, 0); + atomic_set(&mvm->pending_frames[sta_id], 0); mvm_sta->tid_disable_agg = 0; mvm_sta->tfd_queue_msk = 0; for (i = 0; i < IEEE80211_NUM_ACS; i++) @@ -406,15 +406,22 @@ int iwl_mvm_rm_sta(struct iwl_mvm *mvm, mvmvif->ap_sta_id = IWL_MVM_STATION_COUNT; } + /* + * Make sure that the tx response code sees the station as -EBUSY and + * calls the drain worker. + */ + spin_lock_bh(&mvm_sta->lock); /* * There are frames pending on the AC queues for this station. * We need to wait until all the frames are drained... */ - if (atomic_read(&mvm_sta->pending_frames)) { - ret = iwl_mvm_drain_sta(mvm, mvm_sta, true); + if (atomic_read(&mvm->pending_frames[mvm_sta->sta_id])) { rcu_assign_pointer(mvm->fw_id_to_mac_id[mvm_sta->sta_id], ERR_PTR(-EBUSY)); + spin_unlock_bh(&mvm_sta->lock); + ret = iwl_mvm_drain_sta(mvm, mvm_sta, true); } else { + spin_unlock_bh(&mvm_sta->lock); ret = iwl_mvm_rm_sta_common(mvm, mvm_sta->sta_id); rcu_assign_pointer(mvm->fw_id_to_mac_id[mvm_sta->sta_id], NULL); } diff --git a/trunk/drivers/net/wireless/iwlwifi/mvm/sta.h b/trunk/drivers/net/wireless/iwlwifi/mvm/sta.h index 12abd2d71835..a4ddce77aaae 100644 --- a/trunk/drivers/net/wireless/iwlwifi/mvm/sta.h +++ b/trunk/drivers/net/wireless/iwlwifi/mvm/sta.h @@ -274,7 +274,6 @@ struct iwl_mvm_tid_data { * @bt_reduced_txpower: is reduced tx power enabled for this station * @lock: lock to protect the whole struct. Since %tid_data is access from Tx * and from Tx response flow, it needs a spinlock. - * @pending_frames: number of frames for this STA on the shared Tx queues. * @tid_data: per tid data. Look at %iwl_mvm_tid_data. * * When mac80211 creates a station it reserves some space (hw->sta_data_size) @@ -290,7 +289,6 @@ struct iwl_mvm_sta { u8 max_agg_bufsize; bool bt_reduced_txpower; spinlock_t lock; - atomic_t pending_frames; struct iwl_mvm_tid_data tid_data[IWL_MAX_TID_COUNT]; struct iwl_lq_sta lq_sta; struct ieee80211_vif *vif; diff --git a/trunk/drivers/net/wireless/iwlwifi/mvm/tx.c b/trunk/drivers/net/wireless/iwlwifi/mvm/tx.c index 479074303bd7..f212f16502ff 100644 --- a/trunk/drivers/net/wireless/iwlwifi/mvm/tx.c +++ b/trunk/drivers/net/wireless/iwlwifi/mvm/tx.c @@ -416,9 +416,8 @@ int iwl_mvm_tx_skb(struct iwl_mvm *mvm, struct sk_buff *skb, spin_unlock(&mvmsta->lock); - if (mvmsta->vif->type == NL80211_IFTYPE_AP && - txq_id < IWL_MVM_FIRST_AGG_QUEUE) - atomic_inc(&mvmsta->pending_frames); + if (txq_id < IWL_MVM_FIRST_AGG_QUEUE) + atomic_inc(&mvm->pending_frames[mvmsta->sta_id]); return 0; @@ -680,16 +679,41 @@ static void iwl_mvm_rx_tx_cmd_single(struct iwl_mvm *mvm, /* * If the txq is not an AMPDU queue, there is no chance we freed * several skbs. Check that out... - * If there are no pending frames for this STA, notify mac80211 that - * this station can go to sleep in its STA table. */ - if (txq_id < IWL_MVM_FIRST_AGG_QUEUE && mvmsta && - !WARN_ON(skb_freed > 1) && - mvmsta->vif->type == NL80211_IFTYPE_AP && - atomic_sub_and_test(skb_freed, &mvmsta->pending_frames)) { - ieee80211_sta_block_awake(mvm->hw, sta, false); - set_bit(sta_id, mvm->sta_drained); - schedule_work(&mvm->sta_drained_wk); + if (txq_id < IWL_MVM_FIRST_AGG_QUEUE && !WARN_ON(skb_freed > 1) && + atomic_sub_and_test(skb_freed, &mvm->pending_frames[sta_id])) { + if (mvmsta) { + /* + * If there are no pending frames for this STA, notify + * mac80211 that this station can go to sleep in its + * STA table. + */ + if (mvmsta->vif->type == NL80211_IFTYPE_AP) + ieee80211_sta_block_awake(mvm->hw, sta, false); + /* + * We might very well have taken mvmsta pointer while + * the station was being removed. The remove flow might + * have seen a pending_frame (because we didn't take + * the lock) even if now the queues are drained. So make + * really sure now that this the station is not being + * removed. If it is, run the drain worker to remove it. + */ + spin_lock_bh(&mvmsta->lock); + sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]); + if (IS_ERR_OR_NULL(sta)) { + /* + * Station disappeared in the meantime: + * so we are draining. + */ + set_bit(sta_id, mvm->sta_drained); + schedule_work(&mvm->sta_drained_wk); + } + spin_unlock_bh(&mvmsta->lock); + } else if (!mvmsta) { + /* Tx response without STA, so we are draining */ + set_bit(sta_id, mvm->sta_drained); + schedule_work(&mvm->sta_drained_wk); + } } rcu_read_unlock(); diff --git a/trunk/drivers/net/wireless/mac80211_hwsim.c b/trunk/drivers/net/wireless/mac80211_hwsim.c index b878a32e7a98..cb34c7895f2a 100644 --- a/trunk/drivers/net/wireless/mac80211_hwsim.c +++ b/trunk/drivers/net/wireless/mac80211_hwsim.c @@ -1723,11 +1723,11 @@ static void mac80211_hwsim_free(void) class_destroy(hwsim_class); } - -static struct device_driver mac80211_hwsim_driver = { - .name = "mac80211_hwsim", - .bus = &platform_bus_type, - .owner = THIS_MODULE, +static struct platform_driver mac80211_hwsim_driver = { + .driver = { + .name = "mac80211_hwsim", + .owner = THIS_MODULE, + }, }; static const struct net_device_ops hwsim_netdev_ops = { @@ -2219,7 +2219,7 @@ static int __init init_mac80211_hwsim(void) spin_lock_init(&hwsim_radio_lock); INIT_LIST_HEAD(&hwsim_radios); - err = driver_register(&mac80211_hwsim_driver); + err = platform_driver_register(&mac80211_hwsim_driver); if (err) return err; @@ -2254,7 +2254,7 @@ static int __init init_mac80211_hwsim(void) err = -ENOMEM; goto failed_drvdata; } - data->dev->driver = &mac80211_hwsim_driver; + data->dev->driver = &mac80211_hwsim_driver.driver; err = device_bind_driver(data->dev); if (err != 0) { printk(KERN_DEBUG @@ -2564,7 +2564,7 @@ static int __init init_mac80211_hwsim(void) failed: mac80211_hwsim_free(); failed_unregister_driver: - driver_unregister(&mac80211_hwsim_driver); + platform_driver_unregister(&mac80211_hwsim_driver); return err; } module_init(init_mac80211_hwsim); @@ -2577,6 +2577,6 @@ static void __exit exit_mac80211_hwsim(void) mac80211_hwsim_free(); unregister_netdev(hwsim_mon); - driver_unregister(&mac80211_hwsim_driver); + platform_driver_unregister(&mac80211_hwsim_driver); } module_exit(exit_mac80211_hwsim); diff --git a/trunk/drivers/net/wireless/mwifiex/cfg80211.c b/trunk/drivers/net/wireless/mwifiex/cfg80211.c index d3c8ece980d8..e42b266a023a 100644 --- a/trunk/drivers/net/wireless/mwifiex/cfg80211.c +++ b/trunk/drivers/net/wireless/mwifiex/cfg80211.c @@ -2234,9 +2234,6 @@ int mwifiex_del_virtual_intf(struct wiphy *wiphy, struct wireless_dev *wdev) if (wdev->netdev->reg_state == NETREG_REGISTERED) unregister_netdevice(wdev->netdev); - if (wdev->netdev->reg_state == NETREG_UNREGISTERED) - free_netdev(wdev->netdev); - /* Clear the priv in adapter */ priv->netdev = NULL; diff --git a/trunk/drivers/net/wireless/mwifiex/cmdevt.c b/trunk/drivers/net/wireless/mwifiex/cmdevt.c index 74db0d24a579..26755d9acb55 100644 --- a/trunk/drivers/net/wireless/mwifiex/cmdevt.c +++ b/trunk/drivers/net/wireless/mwifiex/cmdevt.c @@ -1191,6 +1191,7 @@ mwifiex_process_hs_config(struct mwifiex_adapter *adapter) adapter->if_ops.wakeup(adapter); adapter->hs_activated = false; adapter->is_hs_configured = false; + adapter->is_suspended = false; mwifiex_hs_activated_event(mwifiex_get_priv(adapter, MWIFIEX_BSS_ROLE_ANY), false); diff --git a/trunk/drivers/net/wireless/mwifiex/main.c b/trunk/drivers/net/wireless/mwifiex/main.c index 121443a0f2a1..2eb88ea9acf7 100644 --- a/trunk/drivers/net/wireless/mwifiex/main.c +++ b/trunk/drivers/net/wireless/mwifiex/main.c @@ -655,6 +655,7 @@ void mwifiex_init_priv_params(struct mwifiex_private *priv, struct net_device *dev) { dev->netdev_ops = &mwifiex_netdev_ops; + dev->destructor = free_netdev; /* Initialize private structure */ priv->current_key_index = 0; priv->media_connected = false; diff --git a/trunk/drivers/net/wireless/mwifiex/sta_ioctl.c b/trunk/drivers/net/wireless/mwifiex/sta_ioctl.c index 311d0b26b81c..1a8a19dbd635 100644 --- a/trunk/drivers/net/wireless/mwifiex/sta_ioctl.c +++ b/trunk/drivers/net/wireless/mwifiex/sta_ioctl.c @@ -96,7 +96,7 @@ int mwifiex_request_set_multicast_list(struct mwifiex_private *priv, } else { /* Multicast */ priv->curr_pkt_filter &= ~HostCmd_ACT_MAC_PROMISCUOUS_ENABLE; - if (mcast_list->mode == MWIFIEX_MULTICAST_MODE) { + if (mcast_list->mode == MWIFIEX_ALL_MULTI_MODE) { dev_dbg(priv->adapter->dev, "info: Enabling All Multicast!\n"); priv->curr_pkt_filter |= @@ -108,20 +108,11 @@ int mwifiex_request_set_multicast_list(struct mwifiex_private *priv, dev_dbg(priv->adapter->dev, "info: Set multicast list=%d\n", mcast_list->num_multicast_addr); - /* Set multicast addresses to firmware */ - if (old_pkt_filter == priv->curr_pkt_filter) { - /* Send request to firmware */ - ret = mwifiex_send_cmd_async(priv, - HostCmd_CMD_MAC_MULTICAST_ADR, - HostCmd_ACT_GEN_SET, 0, - mcast_list); - } else { - /* Send request to firmware */ - ret = mwifiex_send_cmd_async(priv, - HostCmd_CMD_MAC_MULTICAST_ADR, - HostCmd_ACT_GEN_SET, 0, - mcast_list); - } + /* Send multicast addresses to firmware */ + ret = mwifiex_send_cmd_async(priv, + HostCmd_CMD_MAC_MULTICAST_ADR, + HostCmd_ACT_GEN_SET, 0, + mcast_list); } } } diff --git a/trunk/drivers/net/wireless/rtlwifi/rtl8188ee/trx.h b/trunk/drivers/net/wireless/rtlwifi/rtl8188ee/trx.h index d3a02e73f53a..21ca33a7c770 100644 --- a/trunk/drivers/net/wireless/rtlwifi/rtl8188ee/trx.h +++ b/trunk/drivers/net/wireless/rtlwifi/rtl8188ee/trx.h @@ -550,7 +550,7 @@ do { \ rxmcs == DESC92C_RATE11M) struct phy_rx_agc_info_t { - #if __LITTLE_ENDIAN + #ifdef __LITTLE_ENDIAN u8 gain:7, trsw:1; #else u8 trsw:1, gain:7; @@ -574,7 +574,7 @@ struct phy_status_rpt { u8 stream_target_csi[2]; u8 sig_evm; u8 rsvd_3; -#if __LITTLE_ENDIAN +#ifdef __LITTLE_ENDIAN u8 antsel_rx_keep_2:1; /*ex_intf_flg:1;*/ u8 sgi_en:1; u8 rxsc:2; diff --git a/trunk/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c b/trunk/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c index 23d640a4debd..938b1e670b93 100644 --- a/trunk/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c +++ b/trunk/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c @@ -349,6 +349,7 @@ static struct usb_device_id rtl8192c_usb_ids[] = { {RTL_USB_DEVICE(0x07aa, 0x0056, rtl92cu_hal_cfg)}, /*ATKK-Gemtek*/ {RTL_USB_DEVICE(0x07b8, 0x8178, rtl92cu_hal_cfg)}, /*Funai -Abocom*/ {RTL_USB_DEVICE(0x0846, 0x9021, rtl92cu_hal_cfg)}, /*Netgear-Sercomm*/ + {RTL_USB_DEVICE(0x0846, 0xf001, rtl92cu_hal_cfg)}, /*On Netwrks N300MA*/ {RTL_USB_DEVICE(0x0b05, 0x17ab, rtl92cu_hal_cfg)}, /*ASUS-Edimax*/ {RTL_USB_DEVICE(0x0bda, 0x8186, rtl92cu_hal_cfg)}, /*Realtek 92CE-VAU*/ {RTL_USB_DEVICE(0x0df6, 0x0061, rtl92cu_hal_cfg)}, /*Sitecom-Edimax*/ diff --git a/trunk/drivers/ntb/ntb_hw.c b/trunk/drivers/ntb/ntb_hw.c index f802e7c92356..2dacd19e1b8a 100644 --- a/trunk/drivers/ntb/ntb_hw.c +++ b/trunk/drivers/ntb/ntb_hw.c @@ -345,7 +345,7 @@ int ntb_read_remote_spad(struct ntb_device *ndev, unsigned int idx, u32 *val) */ void __iomem *ntb_get_mw_vbase(struct ntb_device *ndev, unsigned int mw) { - if (mw > NTB_NUM_MW) + if (mw >= NTB_NUM_MW) return NULL; return ndev->mw[mw].vbase; @@ -362,7 +362,7 @@ void __iomem *ntb_get_mw_vbase(struct ntb_device *ndev, unsigned int mw) */ resource_size_t ntb_get_mw_size(struct ntb_device *ndev, unsigned int mw) { - if (mw > NTB_NUM_MW) + if (mw >= NTB_NUM_MW) return 0; return ndev->mw[mw].bar_sz; @@ -380,7 +380,7 @@ resource_size_t ntb_get_mw_size(struct ntb_device *ndev, unsigned int mw) */ void ntb_set_mw_addr(struct ntb_device *ndev, unsigned int mw, u64 addr) { - if (mw > NTB_NUM_MW) + if (mw >= NTB_NUM_MW) return; dev_dbg(&ndev->pdev->dev, "Writing addr %Lx to BAR %d\n", addr, @@ -1027,8 +1027,8 @@ static int ntb_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) ndev->mw[i].vbase = ioremap_wc(pci_resource_start(pdev, MW_TO_BAR(i)), ndev->mw[i].bar_sz); - dev_info(&pdev->dev, "MW %d size %d\n", i, - (u32) pci_resource_len(pdev, MW_TO_BAR(i))); + dev_info(&pdev->dev, "MW %d size %llu\n", i, + pci_resource_len(pdev, MW_TO_BAR(i))); if (!ndev->mw[i].vbase) { dev_warn(&pdev->dev, "Cannot remap BAR %d\n", MW_TO_BAR(i)); diff --git a/trunk/drivers/ntb/ntb_transport.c b/trunk/drivers/ntb/ntb_transport.c index e0bdfd7f9930..f8d7081ee301 100644 --- a/trunk/drivers/ntb/ntb_transport.c +++ b/trunk/drivers/ntb/ntb_transport.c @@ -58,7 +58,7 @@ #include #include "ntb_hw.h" -#define NTB_TRANSPORT_VERSION 2 +#define NTB_TRANSPORT_VERSION 3 static unsigned int transport_mtu = 0x401E; module_param(transport_mtu, uint, 0644); @@ -173,10 +173,13 @@ struct ntb_payload_header { enum { VERSION = 0, - MW0_SZ, - MW1_SZ, - NUM_QPS, QP_LINKS, + NUM_QPS, + NUM_MWS, + MW0_SZ_HIGH, + MW0_SZ_LOW, + MW1_SZ_HIGH, + MW1_SZ_LOW, MAX_SPAD, }; @@ -297,7 +300,7 @@ int ntb_register_client_dev(char *device_name) { struct ntb_transport_client_dev *client_dev; struct ntb_transport *nt; - int rc; + int rc, i = 0; if (list_empty(&ntb_transport_list)) return -ENODEV; @@ -315,7 +318,7 @@ int ntb_register_client_dev(char *device_name) dev = &client_dev->dev; /* setup and register client devices */ - dev_set_name(dev, "%s", device_name); + dev_set_name(dev, "%s%d", device_name, i); dev->bus = &ntb_bus_type; dev->release = ntb_client_release; dev->parent = &ntb_query_pdev(nt->ndev)->dev; @@ -327,6 +330,7 @@ int ntb_register_client_dev(char *device_name) } list_add_tail(&client_dev->entry, &nt->client_devs); + i++; } return 0; @@ -486,12 +490,13 @@ static void ntb_transport_setup_qp_mw(struct ntb_transport *nt, (qp_num / NTB_NUM_MW * rx_size); rx_size -= sizeof(struct ntb_rx_info); - qp->rx_buff = qp->remote_rx_info + sizeof(struct ntb_rx_info); - qp->rx_max_frame = min(transport_mtu, rx_size); + qp->rx_buff = qp->remote_rx_info + 1; + /* Due to housekeeping, there must be atleast 2 buffs */ + qp->rx_max_frame = min(transport_mtu, rx_size / 2); qp->rx_max_entry = rx_size / qp->rx_max_frame; qp->rx_index = 0; - qp->remote_rx_info->entry = qp->rx_max_entry; + qp->remote_rx_info->entry = qp->rx_max_entry - 1; /* setup the hdr offsets with 0's */ for (i = 0; i < qp->rx_max_entry; i++) { @@ -502,6 +507,19 @@ static void ntb_transport_setup_qp_mw(struct ntb_transport *nt, qp->rx_pkts = 0; qp->tx_pkts = 0; + qp->tx_index = 0; +} + +static void ntb_free_mw(struct ntb_transport *nt, int num_mw) +{ + struct ntb_transport_mw *mw = &nt->mw[num_mw]; + struct pci_dev *pdev = ntb_query_pdev(nt->ndev); + + if (!mw->virt_addr) + return; + + dma_free_coherent(&pdev->dev, mw->size, mw->virt_addr, mw->dma_addr); + mw->virt_addr = NULL; } static int ntb_set_mw(struct ntb_transport *nt, int num_mw, unsigned int size) @@ -509,12 +527,20 @@ static int ntb_set_mw(struct ntb_transport *nt, int num_mw, unsigned int size) struct ntb_transport_mw *mw = &nt->mw[num_mw]; struct pci_dev *pdev = ntb_query_pdev(nt->ndev); + /* No need to re-setup */ + if (mw->size == ALIGN(size, 4096)) + return 0; + + if (mw->size != 0) + ntb_free_mw(nt, num_mw); + /* Alloc memory for receiving data. Must be 4k aligned */ mw->size = ALIGN(size, 4096); mw->virt_addr = dma_alloc_coherent(&pdev->dev, mw->size, &mw->dma_addr, GFP_KERNEL); if (!mw->virt_addr) { + mw->size = 0; dev_err(&pdev->dev, "Unable to allocate MW buffer of size %d\n", (int) mw->size); return -ENOMEM; @@ -604,25 +630,31 @@ static void ntb_transport_link_work(struct work_struct *work) u32 val; int rc, i; - /* send the local info */ - rc = ntb_write_remote_spad(ndev, VERSION, NTB_TRANSPORT_VERSION); - if (rc) { - dev_err(&pdev->dev, "Error writing %x to remote spad %d\n", - 0, VERSION); - goto out; - } + /* send the local info, in the opposite order of the way we read it */ + for (i = 0; i < NTB_NUM_MW; i++) { + rc = ntb_write_remote_spad(ndev, MW0_SZ_HIGH + (i * 2), + ntb_get_mw_size(ndev, i) >> 32); + if (rc) { + dev_err(&pdev->dev, "Error writing %u to remote spad %d\n", + (u32)(ntb_get_mw_size(ndev, i) >> 32), + MW0_SZ_HIGH + (i * 2)); + goto out; + } - rc = ntb_write_remote_spad(ndev, MW0_SZ, ntb_get_mw_size(ndev, 0)); - if (rc) { - dev_err(&pdev->dev, "Error writing %x to remote spad %d\n", - (u32) ntb_get_mw_size(ndev, 0), MW0_SZ); - goto out; + rc = ntb_write_remote_spad(ndev, MW0_SZ_LOW + (i * 2), + (u32) ntb_get_mw_size(ndev, i)); + if (rc) { + dev_err(&pdev->dev, "Error writing %u to remote spad %d\n", + (u32) ntb_get_mw_size(ndev, i), + MW0_SZ_LOW + (i * 2)); + goto out; + } } - rc = ntb_write_remote_spad(ndev, MW1_SZ, ntb_get_mw_size(ndev, 1)); + rc = ntb_write_remote_spad(ndev, NUM_MWS, NTB_NUM_MW); if (rc) { dev_err(&pdev->dev, "Error writing %x to remote spad %d\n", - (u32) ntb_get_mw_size(ndev, 1), MW1_SZ); + NTB_NUM_MW, NUM_MWS); goto out; } @@ -633,16 +665,10 @@ static void ntb_transport_link_work(struct work_struct *work) goto out; } - rc = ntb_read_local_spad(nt->ndev, QP_LINKS, &val); - if (rc) { - dev_err(&pdev->dev, "Error reading spad %d\n", QP_LINKS); - goto out; - } - - rc = ntb_write_remote_spad(ndev, QP_LINKS, val); + rc = ntb_write_remote_spad(ndev, VERSION, NTB_TRANSPORT_VERSION); if (rc) { dev_err(&pdev->dev, "Error writing %x to remote spad %d\n", - val, QP_LINKS); + NTB_TRANSPORT_VERSION, VERSION); goto out; } @@ -667,33 +693,43 @@ static void ntb_transport_link_work(struct work_struct *work) goto out; dev_dbg(&pdev->dev, "Remote max number of qps = %d\n", val); - rc = ntb_read_remote_spad(ndev, MW0_SZ, &val); + rc = ntb_read_remote_spad(ndev, NUM_MWS, &val); if (rc) { - dev_err(&pdev->dev, "Error reading remote spad %d\n", MW0_SZ); + dev_err(&pdev->dev, "Error reading remote spad %d\n", NUM_MWS); goto out; } - if (!val) + if (val != NTB_NUM_MW) goto out; - dev_dbg(&pdev->dev, "Remote MW0 size = %d\n", val); + dev_dbg(&pdev->dev, "Remote number of mws = %d\n", val); - rc = ntb_set_mw(nt, 0, val); - if (rc) - goto out; + for (i = 0; i < NTB_NUM_MW; i++) { + u64 val64; - rc = ntb_read_remote_spad(ndev, MW1_SZ, &val); - if (rc) { - dev_err(&pdev->dev, "Error reading remote spad %d\n", MW1_SZ); - goto out; - } + rc = ntb_read_remote_spad(ndev, MW0_SZ_HIGH + (i * 2), &val); + if (rc) { + dev_err(&pdev->dev, "Error reading remote spad %d\n", + MW0_SZ_HIGH + (i * 2)); + goto out1; + } - if (!val) - goto out; - dev_dbg(&pdev->dev, "Remote MW1 size = %d\n", val); + val64 = (u64) val << 32; - rc = ntb_set_mw(nt, 1, val); - if (rc) - goto out; + rc = ntb_read_remote_spad(ndev, MW0_SZ_LOW + (i * 2), &val); + if (rc) { + dev_err(&pdev->dev, "Error reading remote spad %d\n", + MW0_SZ_LOW + (i * 2)); + goto out1; + } + + val64 |= val; + + dev_dbg(&pdev->dev, "Remote MW%d size = %llu\n", i, val64); + + rc = ntb_set_mw(nt, i, val64); + if (rc) + goto out1; + } nt->transport_link = NTB_LINK_UP; @@ -708,6 +744,9 @@ static void ntb_transport_link_work(struct work_struct *work) return; +out1: + for (i = 0; i < NTB_NUM_MW; i++) + ntb_free_mw(nt, i); out: if (ntb_hw_link_status(ndev)) schedule_delayed_work(&nt->link_work, @@ -780,10 +819,10 @@ static void ntb_transport_init_queue(struct ntb_transport *nt, (qp_num / NTB_NUM_MW * tx_size); tx_size -= sizeof(struct ntb_rx_info); - qp->tx_mw = qp->rx_info + sizeof(struct ntb_rx_info); - qp->tx_max_frame = min(transport_mtu, tx_size); + qp->tx_mw = qp->rx_info + 1; + /* Due to housekeeping, there must be atleast 2 buffs */ + qp->tx_max_frame = min(transport_mtu, tx_size / 2); qp->tx_max_entry = tx_size / qp->tx_max_frame; - qp->tx_index = 0; if (nt->debugfs_dir) { char debugfs_name[4]; @@ -897,10 +936,7 @@ void ntb_transport_free(void *transport) pdev = ntb_query_pdev(nt->ndev); for (i = 0; i < NTB_NUM_MW; i++) - if (nt->mw[i].virt_addr) - dma_free_coherent(&pdev->dev, nt->mw[i].size, - nt->mw[i].virt_addr, - nt->mw[i].dma_addr); + ntb_free_mw(nt, i); kfree(nt->qps); ntb_unregister_transport(nt->ndev); @@ -999,11 +1035,16 @@ static int ntb_process_rxc(struct ntb_transport_qp *qp) static void ntb_transport_rx(unsigned long data) { struct ntb_transport_qp *qp = (struct ntb_transport_qp *)data; - int rc; + int rc, i; - do { + /* Limit the number of packets processed in a single interrupt to + * provide fairness to others + */ + for (i = 0; i < qp->rx_max_entry; i++) { rc = ntb_process_rxc(qp); - } while (!rc); + if (rc) + break; + } } static void ntb_transport_rxc_db(void *data, int db_num) @@ -1210,12 +1251,14 @@ EXPORT_SYMBOL_GPL(ntb_transport_create_queue); */ void ntb_transport_free_queue(struct ntb_transport_qp *qp) { - struct pci_dev *pdev = ntb_query_pdev(qp->ndev); + struct pci_dev *pdev; struct ntb_queue_entry *entry; if (!qp) return; + pdev = ntb_query_pdev(qp->ndev); + cancel_delayed_work_sync(&qp->link_work); ntb_unregister_db_callback(qp->ndev, qp->qp_num); @@ -1371,12 +1414,13 @@ EXPORT_SYMBOL_GPL(ntb_transport_link_up); */ void ntb_transport_link_down(struct ntb_transport_qp *qp) { - struct pci_dev *pdev = ntb_query_pdev(qp->ndev); + struct pci_dev *pdev; int rc, val; if (!qp) return; + pdev = ntb_query_pdev(qp->ndev); qp->client_ready = NTB_LINK_DOWN; rc = ntb_read_local_spad(qp->ndev, QP_LINKS, &val); @@ -1408,6 +1452,9 @@ EXPORT_SYMBOL_GPL(ntb_transport_link_down); */ bool ntb_transport_link_query(struct ntb_transport_qp *qp) { + if (!qp) + return false; + return qp->qp_link == NTB_LINK_UP; } EXPORT_SYMBOL_GPL(ntb_transport_link_query); @@ -1422,6 +1469,9 @@ EXPORT_SYMBOL_GPL(ntb_transport_link_query); */ unsigned char ntb_transport_qp_num(struct ntb_transport_qp *qp) { + if (!qp) + return 0; + return qp->qp_num; } EXPORT_SYMBOL_GPL(ntb_transport_qp_num); @@ -1436,6 +1486,9 @@ EXPORT_SYMBOL_GPL(ntb_transport_qp_num); */ unsigned int ntb_transport_max_size(struct ntb_transport_qp *qp) { + if (!qp) + return 0; + return qp->tx_max_frame - sizeof(struct ntb_payload_header); } EXPORT_SYMBOL_GPL(ntb_transport_max_size); diff --git a/trunk/drivers/of/base.c b/trunk/drivers/of/base.c index c76d16c972cc..f53b992f060a 100644 --- a/trunk/drivers/of/base.c +++ b/trunk/drivers/of/base.c @@ -1208,11 +1208,11 @@ static int __of_parse_phandle_with_args(const struct device_node *np, out_args->args_count = count; for (i = 0; i < count; i++) out_args->args[i] = be32_to_cpup(list++); + } else { + of_node_put(node); } /* Found it! return success */ - if (node) - of_node_put(node); return 0; } diff --git a/trunk/drivers/of/of_mdio.c b/trunk/drivers/of/of_mdio.c index 23049aeca662..d5a57a9e329c 100644 --- a/trunk/drivers/of/of_mdio.c +++ b/trunk/drivers/of/of_mdio.c @@ -84,13 +84,10 @@ int of_mdiobus_register(struct mii_bus *mdio, struct device_node *np) phy = get_phy_device(mdio, addr, is_c45); if (!phy || IS_ERR(phy)) { - phy = phy_device_create(mdio, addr, 0, false, NULL); - if (!phy || IS_ERR(phy)) { - dev_err(&mdio->dev, - "error creating PHY at address %i\n", - addr); - continue; - } + dev_err(&mdio->dev, + "cannot get PHY at address %i\n", + addr); + continue; } /* Associate the OF node with the device structure so it diff --git a/trunk/drivers/pci/bus.c b/trunk/drivers/pci/bus.c index 748f8f3e9ff5..32e66a6f12d9 100644 --- a/trunk/drivers/pci/bus.c +++ b/trunk/drivers/pci/bus.c @@ -174,6 +174,7 @@ int pci_bus_add_device(struct pci_dev *dev) * Can not put in pci_device_add yet because resources * are not assigned yet for some devices. */ + pci_fixup_device(pci_fixup_final, dev); pci_create_sysfs_dev_files(dev); dev->match_driver = true; diff --git a/trunk/drivers/pci/hotplug/acpiphp_glue.c b/trunk/drivers/pci/hotplug/acpiphp_glue.c index 96fed19c6d90..716aa93fff76 100644 --- a/trunk/drivers/pci/hotplug/acpiphp_glue.c +++ b/trunk/drivers/pci/hotplug/acpiphp_glue.c @@ -950,6 +950,20 @@ check_sub_bridges(acpi_handle handle, u32 lvl, void *context, void **rv) return AE_OK ; } +void acpiphp_check_host_bridge(acpi_handle handle) +{ + struct acpiphp_bridge *bridge; + + bridge = acpiphp_handle_to_bridge(handle); + if (bridge) { + acpiphp_check_bridge(bridge); + put_bridge(bridge); + } + + acpi_walk_namespace(ACPI_TYPE_DEVICE, handle, + ACPI_UINT32_MAX, check_sub_bridges, NULL, NULL, NULL); +} + static void _handle_hotplug_event_bridge(struct work_struct *work) { struct acpiphp_bridge *bridge; diff --git a/trunk/drivers/pci/msi.c b/trunk/drivers/pci/msi.c index d40bed726769..2c1075213bec 100644 --- a/trunk/drivers/pci/msi.c +++ b/trunk/drivers/pci/msi.c @@ -563,8 +563,10 @@ static int msi_capability_init(struct pci_dev *dev, int nvec) entry->msi_attrib.default_irq = dev->irq; /* Save IOAPIC IRQ */ entry->msi_attrib.pos = dev->msi_cap; - entry->mask_pos = dev->msi_cap + (control & PCI_MSI_FLAGS_64BIT) ? - PCI_MSI_MASK_64 : PCI_MSI_MASK_32; + if (control & PCI_MSI_FLAGS_64BIT) + entry->mask_pos = dev->msi_cap + PCI_MSI_MASK_64; + else + entry->mask_pos = dev->msi_cap + PCI_MSI_MASK_32; /* All MSIs are unmasked by default, Mask them all */ if (entry->msi_attrib.maskbit) pci_read_config_dword(dev, entry->mask_pos, &entry->masked); diff --git a/trunk/drivers/pci/probe.c b/trunk/drivers/pci/probe.c index 631aeb7d2d2d..70f10fa3c1b2 100644 --- a/trunk/drivers/pci/probe.c +++ b/trunk/drivers/pci/probe.c @@ -1341,7 +1341,6 @@ void pci_device_add(struct pci_dev *dev, struct pci_bus *bus) list_add_tail(&dev->bus_list, &bus->devices); up_write(&pci_bus_sem); - pci_fixup_device(pci_fixup_final, dev); ret = pcibios_add_device(dev); WARN_ON(ret < 0); diff --git a/trunk/drivers/pcmcia/m8xx_pcmcia.c b/trunk/drivers/pcmcia/m8xx_pcmcia.c index a3a851e49321..18c0d8d1ddf7 100644 --- a/trunk/drivers/pcmcia/m8xx_pcmcia.c +++ b/trunk/drivers/pcmcia/m8xx_pcmcia.c @@ -68,12 +68,6 @@ MODULE_LICENSE("Dual MPL/GPL"); #if !defined(CONFIG_PCMCIA_SLOT_A) && !defined(CONFIG_PCMCIA_SLOT_B) -/* The RPX series use SLOT_B */ -#if defined(CONFIG_RPXCLASSIC) || defined(CONFIG_RPXLITE) -#define CONFIG_PCMCIA_SLOT_B -#define CONFIG_BD_IS_MHZ -#endif - /* The ADS board use SLOT_A */ #ifdef CONFIG_ADS #define CONFIG_PCMCIA_SLOT_A @@ -253,81 +247,6 @@ static irqreturn_t m8xx_interrupt(int irq, void *dev); #define PCMCIA_BMT_LIMIT (15*4) /* Bus Monitor Timeout value */ -/* ------------------------------------------------------------------------- */ -/* board specific stuff: */ -/* voltage_set(), hardware_enable() and hardware_disable() */ -/* ------------------------------------------------------------------------- */ -/* RPX Boards from Embedded Planet */ - -#if defined(CONFIG_RPXCLASSIC) || defined(CONFIG_RPXLITE) - -/* The RPX boards seems to have it's bus monitor timeout set to 6*8 clocks. - * SYPCR is write once only, therefore must the slowest memory be faster - * than the bus monitor or we will get a machine check due to the bus timeout. - */ - -#define PCMCIA_BOARD_MSG "RPX CLASSIC or RPX LITE" - -#undef PCMCIA_BMT_LIMIT -#define PCMCIA_BMT_LIMIT (6*8) - -static int voltage_set(int slot, int vcc, int vpp) -{ - u32 reg = 0; - - switch (vcc) { - case 0: - break; - case 33: - reg |= BCSR1_PCVCTL4; - break; - case 50: - reg |= BCSR1_PCVCTL5; - break; - default: - return 1; - } - - switch (vpp) { - case 0: - break; - case 33: - case 50: - if (vcc == vpp) - reg |= BCSR1_PCVCTL6; - else - return 1; - break; - case 120: - reg |= BCSR1_PCVCTL7; - default: - return 1; - } - - if (!((vcc == 50) || (vcc == 0))) - return 1; - - /* first, turn off all power */ - - out_be32(((u32 *) RPX_CSR_ADDR), - in_be32(((u32 *) RPX_CSR_ADDR)) & ~(BCSR1_PCVCTL4 | - BCSR1_PCVCTL5 | - BCSR1_PCVCTL6 | - BCSR1_PCVCTL7)); - - /* enable new powersettings */ - - out_be32(((u32 *) RPX_CSR_ADDR), in_be32(((u32 *) RPX_CSR_ADDR)) | reg); - - return 0; -} - -#define socket_get(_slot_) PCMCIA_SOCKET_KEY_5V -#define hardware_enable(_slot_) /* No hardware to enable */ -#define hardware_disable(_slot_) /* No hardware to disable */ - -#endif /* CONFIG_RPXCLASSIC */ - /* FADS Boards from Motorola */ #if defined(CONFIG_FADS) @@ -419,65 +338,6 @@ static inline int voltage_set(int slot, int vcc, int vpp) #endif -/* ------------------------------------------------------------------------- */ -/* Motorola MBX860 */ - -#if defined(CONFIG_MBX) - -#define PCMCIA_BOARD_MSG "MBX" - -static int voltage_set(int slot, int vcc, int vpp) -{ - u8 reg = 0; - - switch (vcc) { - case 0: - break; - case 33: - reg |= CSR2_VCC_33; - break; - case 50: - reg |= CSR2_VCC_50; - break; - default: - return 1; - } - - switch (vpp) { - case 0: - break; - case 33: - case 50: - if (vcc == vpp) - reg |= CSR2_VPP_VCC; - else - return 1; - break; - case 120: - if ((vcc == 33) || (vcc == 50)) - reg |= CSR2_VPP_12; - else - return 1; - default: - return 1; - } - - /* first, turn off all power */ - out_8((u8 *) MBX_CSR2_ADDR, - in_8((u8 *) MBX_CSR2_ADDR) & ~(CSR2_VCC_MASK | CSR2_VPP_MASK)); - - /* enable new powersettings */ - out_8((u8 *) MBX_CSR2_ADDR, in_8((u8 *) MBX_CSR2_ADDR) | reg); - - return 0; -} - -#define socket_get(_slot_) PCMCIA_SOCKET_KEY_5V -#define hardware_enable(_slot_) /* No hardware to enable */ -#define hardware_disable(_slot_) /* No hardware to disable */ - -#endif /* CONFIG_MBX */ - #if defined(CONFIG_PRxK) #include extern volatile fpga_pc_regs *fpga_pc; diff --git a/trunk/drivers/pinctrl/pinctrl-abx500.c b/trunk/drivers/pinctrl/pinctrl-abx500.c index aa17f7580f61..6d4532702f80 100644 --- a/trunk/drivers/pinctrl/pinctrl-abx500.c +++ b/trunk/drivers/pinctrl/pinctrl-abx500.c @@ -851,23 +851,12 @@ static int abx500_gpio_probe(struct platform_device *pdev) if (abx500_pdata) pdata = abx500_pdata->gpio; - if (!pdata) { - if (np) { - const struct of_device_id *match; - match = of_match_device(abx500_gpio_match, &pdev->dev); - if (!match) - return -ENODEV; - id = (unsigned long)match->data; - } else { - dev_err(&pdev->dev, "gpio dt and platform data missing\n"); - return -ENODEV; - } + if (!(pdata || np)) { + dev_err(&pdev->dev, "gpio dt and platform data missing\n"); + return -ENODEV; } - if (platid) - id = platid->driver_data; - pct = devm_kzalloc(&pdev->dev, sizeof(struct abx500_pinctrl), GFP_KERNEL); if (pct == NULL) { @@ -882,6 +871,16 @@ static int abx500_gpio_probe(struct platform_device *pdev) pct->chip.dev = &pdev->dev; pct->chip.base = (np) ? -1 : pdata->gpio_base; + if (platid) + id = platid->driver_data; + else if (np) { + const struct of_device_id *match; + + match = of_match_device(abx500_gpio_match, &pdev->dev); + if (match) + id = (unsigned long)match->data; + } + /* initialize the lock */ mutex_init(&pct->lock); @@ -900,8 +899,7 @@ static int abx500_gpio_probe(struct platform_device *pdev) abx500_pinctrl_ab8505_init(&pct->soc); break; default: - dev_err(&pdev->dev, "Unsupported pinctrl sub driver (%d)\n", - (int) platid->driver_data); + dev_err(&pdev->dev, "Unsupported pinctrl sub driver (%d)\n", id); mutex_destroy(&pct->lock); return -EINVAL; } diff --git a/trunk/drivers/pinctrl/pinctrl-coh901.c b/trunk/drivers/pinctrl/pinctrl-coh901.c index edde3acc4186..a67af419f531 100644 --- a/trunk/drivers/pinctrl/pinctrl-coh901.c +++ b/trunk/drivers/pinctrl/pinctrl-coh901.c @@ -713,11 +713,6 @@ static int __init u300_gpio_probe(struct platform_device *pdev) gpio->dev = &pdev->dev; memres = platform_get_resource(pdev, IORESOURCE_MEM, 0); - if (!memres) { - dev_err(gpio->dev, "could not get GPIO memory resource\n"); - return -ENODEV; - } - gpio->base = devm_ioremap_resource(&pdev->dev, memres); if (IS_ERR(gpio->base)) return PTR_ERR(gpio->base); diff --git a/trunk/drivers/pinctrl/pinctrl-exynos5440.c b/trunk/drivers/pinctrl/pinctrl-exynos5440.c index 6038503ed929..32a48f44f574 100644 --- a/trunk/drivers/pinctrl/pinctrl-exynos5440.c +++ b/trunk/drivers/pinctrl/pinctrl-exynos5440.c @@ -1000,11 +1000,6 @@ static int exynos5440_pinctrl_probe(struct platform_device *pdev) } res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - if (!res) { - dev_err(dev, "cannot find IO resource\n"); - return -ENOENT; - } - priv->reg_base = devm_ioremap_resource(&pdev->dev, res); if (IS_ERR(priv->reg_base)) return PTR_ERR(priv->reg_base); diff --git a/trunk/drivers/pinctrl/pinctrl-lantiq.c b/trunk/drivers/pinctrl/pinctrl-lantiq.c index 615c5002b757..d22ca252b80d 100644 --- a/trunk/drivers/pinctrl/pinctrl-lantiq.c +++ b/trunk/drivers/pinctrl/pinctrl-lantiq.c @@ -52,7 +52,8 @@ static void ltq_pinctrl_dt_free_map(struct pinctrl_dev *pctldev, int i; for (i = 0; i < num_maps; i++) - if (map[i].type == PIN_MAP_TYPE_CONFIGS_PIN) + if (map[i].type == PIN_MAP_TYPE_CONFIGS_PIN || + map[i].type == PIN_MAP_TYPE_CONFIGS_GROUP) kfree(map[i].data.configs.configs); kfree(map); } diff --git a/trunk/drivers/pinctrl/pinctrl-samsung.c b/trunk/drivers/pinctrl/pinctrl-samsung.c index 976366899f68..055d0162098b 100644 --- a/trunk/drivers/pinctrl/pinctrl-samsung.c +++ b/trunk/drivers/pinctrl/pinctrl-samsung.c @@ -932,11 +932,6 @@ static int samsung_pinctrl_probe(struct platform_device *pdev) drvdata->dev = dev; res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - if (!res) { - dev_err(dev, "cannot find IO resource\n"); - return -ENOENT; - } - drvdata->virt_base = devm_ioremap_resource(&pdev->dev, res); if (IS_ERR(drvdata->virt_base)) return PTR_ERR(drvdata->virt_base); diff --git a/trunk/drivers/pinctrl/pinctrl-single.c b/trunk/drivers/pinctrl/pinctrl-single.c index 5f2d2bfd356e..b9fa04618601 100644 --- a/trunk/drivers/pinctrl/pinctrl-single.c +++ b/trunk/drivers/pinctrl/pinctrl-single.c @@ -1166,7 +1166,8 @@ static int pcs_parse_one_pinctrl_entry(struct pcs_device *pcs, (*map)->data.mux.function = np->name; if (pcs->is_pinconf) { - if (pcs_parse_pinconf(pcs, np, function, map)) + res = pcs_parse_pinconf(pcs, np, function, map); + if (res) goto free_pingroups; *num_maps = 2; } else { diff --git a/trunk/drivers/pinctrl/pinctrl-xway.c b/trunk/drivers/pinctrl/pinctrl-xway.c index f2977cff8366..e92132c76a6b 100644 --- a/trunk/drivers/pinctrl/pinctrl-xway.c +++ b/trunk/drivers/pinctrl/pinctrl-xway.c @@ -716,10 +716,6 @@ static int pinmux_xway_probe(struct platform_device *pdev) /* get and remap our register range */ res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - if (!res) { - dev_err(&pdev->dev, "Failed to get resource\n"); - return -ENOENT; - } xway_info.membase[0] = devm_ioremap_resource(&pdev->dev, res); if (IS_ERR(xway_info.membase[0])) return PTR_ERR(xway_info.membase[0]); diff --git a/trunk/drivers/pinctrl/sh-pfc/Kconfig b/trunk/drivers/pinctrl/sh-pfc/Kconfig index 0e1f99c33d47..f8a2ae413c7f 100644 --- a/trunk/drivers/pinctrl/sh-pfc/Kconfig +++ b/trunk/drivers/pinctrl/sh-pfc/Kconfig @@ -6,7 +6,7 @@ if ARCH_SHMOBILE || SUPERH config PINCTRL_SH_PFC # XXX move off the gpio dependency - depends on GENERIC_GPIO + depends on GPIOLIB select GPIO_SH_PFC if ARCH_REQUIRE_GPIOLIB select PINMUX select PINCONF @@ -40,19 +40,19 @@ config PINCTRL_PFC_R8A7779 config PINCTRL_PFC_SH7203 def_bool y depends on CPU_SUBTYPE_SH7203 - depends on GENERIC_GPIO + depends on GPIOLIB select PINCTRL_SH_PFC config PINCTRL_PFC_SH7264 def_bool y depends on CPU_SUBTYPE_SH7264 - depends on GENERIC_GPIO + depends on GPIOLIB select PINCTRL_SH_PFC config PINCTRL_PFC_SH7269 def_bool y depends on CPU_SUBTYPE_SH7269 - depends on GENERIC_GPIO + depends on GPIOLIB select PINCTRL_SH_PFC config PINCTRL_PFC_SH7372 @@ -68,55 +68,55 @@ config PINCTRL_PFC_SH73A0 config PINCTRL_PFC_SH7720 def_bool y depends on CPU_SUBTYPE_SH7720 - depends on GENERIC_GPIO + depends on GPIOLIB select PINCTRL_SH_PFC config PINCTRL_PFC_SH7722 def_bool y depends on CPU_SUBTYPE_SH7722 - depends on GENERIC_GPIO + depends on GPIOLIB select PINCTRL_SH_PFC config PINCTRL_PFC_SH7723 def_bool y depends on CPU_SUBTYPE_SH7723 - depends on GENERIC_GPIO + depends on GPIOLIB select PINCTRL_SH_PFC config PINCTRL_PFC_SH7724 def_bool y depends on CPU_SUBTYPE_SH7724 - depends on GENERIC_GPIO + depends on GPIOLIB select PINCTRL_SH_PFC config PINCTRL_PFC_SH7734 def_bool y depends on CPU_SUBTYPE_SH7734 - depends on GENERIC_GPIO + depends on GPIOLIB select PINCTRL_SH_PFC config PINCTRL_PFC_SH7757 def_bool y depends on CPU_SUBTYPE_SH7757 - depends on GENERIC_GPIO + depends on GPIOLIB select PINCTRL_SH_PFC config PINCTRL_PFC_SH7785 def_bool y depends on CPU_SUBTYPE_SH7785 - depends on GENERIC_GPIO + depends on GPIOLIB select PINCTRL_SH_PFC config PINCTRL_PFC_SH7786 def_bool y depends on CPU_SUBTYPE_SH7786 - depends on GENERIC_GPIO + depends on GPIOLIB select PINCTRL_SH_PFC config PINCTRL_PFC_SHX3 def_bool y depends on CPU_SUBTYPE_SHX3 - depends on GENERIC_GPIO + depends on GPIOLIB select PINCTRL_SH_PFC endif diff --git a/trunk/drivers/pinctrl/vt8500/pinctrl-wm8750.c b/trunk/drivers/pinctrl/vt8500/pinctrl-wm8750.c index b964cc550568..de43262398db 100644 --- a/trunk/drivers/pinctrl/vt8500/pinctrl-wm8750.c +++ b/trunk/drivers/pinctrl/vt8500/pinctrl-wm8750.c @@ -53,7 +53,7 @@ static const struct wmt_pinctrl_bank_registers wm8750_banks[] = { #define WMT_PIN_EXTGPIO6 WMT_PIN(0, 6) #define WMT_PIN_EXTGPIO7 WMT_PIN(0, 7) #define WMT_PIN_WAKEUP0 WMT_PIN(0, 16) -#define WMT_PIN_WAKEUP1 WMT_PIN(0, 16) +#define WMT_PIN_WAKEUP1 WMT_PIN(0, 17) #define WMT_PIN_SD0CD WMT_PIN(0, 28) #define WMT_PIN_VDOUT0 WMT_PIN(1, 0) #define WMT_PIN_VDOUT1 WMT_PIN(1, 1) diff --git a/trunk/drivers/platform/x86/Kconfig b/trunk/drivers/platform/x86/Kconfig index 3338437b559b..85772616efbf 100644 --- a/trunk/drivers/platform/x86/Kconfig +++ b/trunk/drivers/platform/x86/Kconfig @@ -781,4 +781,12 @@ config APPLE_GMUX graphics as well as the backlight. Currently only backlight control is supported by the driver. +config PVPANIC + tristate "pvpanic device support" + depends on ACPI + ---help--- + This driver provides support for the pvpanic device. pvpanic is + a paravirtualized device provided by QEMU; it lets a virtual machine + (guest) communicate panic events to the host. + endif # X86_PLATFORM_DEVICES diff --git a/trunk/drivers/platform/x86/Makefile b/trunk/drivers/platform/x86/Makefile index ace2b38942fe..ef0ec746f78c 100644 --- a/trunk/drivers/platform/x86/Makefile +++ b/trunk/drivers/platform/x86/Makefile @@ -51,3 +51,5 @@ obj-$(CONFIG_INTEL_OAKTRAIL) += intel_oaktrail.o obj-$(CONFIG_SAMSUNG_Q10) += samsung-q10.o obj-$(CONFIG_APPLE_GMUX) += apple-gmux.o obj-$(CONFIG_CHROMEOS_LAPTOP) += chromeos_laptop.o + +obj-$(CONFIG_PVPANIC) += pvpanic.o diff --git a/trunk/drivers/platform/x86/asus-nb-wmi.c b/trunk/drivers/platform/x86/asus-nb-wmi.c index 210b5b872125..8fcb41e18b9c 100644 --- a/trunk/drivers/platform/x86/asus-nb-wmi.c +++ b/trunk/drivers/platform/x86/asus-nb-wmi.c @@ -171,6 +171,15 @@ static struct dmi_system_id asus_quirks[] = { }, .driver_data = &quirk_asus_x401u, }, + { + .callback = dmi_matched, + .ident = "ASUSTeK COMPUTER INC. X75A", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."), + DMI_MATCH(DMI_PRODUCT_NAME, "X75A"), + }, + .driver_data = &quirk_asus_x401u, + }, {}, }; diff --git a/trunk/drivers/platform/x86/dell-laptop.c b/trunk/drivers/platform/x86/dell-laptop.c index fa3ee6209572..1134119521ac 100644 --- a/trunk/drivers/platform/x86/dell-laptop.c +++ b/trunk/drivers/platform/x86/dell-laptop.c @@ -284,6 +284,7 @@ static void __init parse_da_table(const struct dmi_header *dm) { /* Final token is a terminator, so we don't want to copy it */ int tokens = (dm->length-11)/sizeof(struct calling_interface_token)-1; + struct calling_interface_token *new_da_tokens; struct calling_interface_structure *table = container_of(dm, struct calling_interface_structure, header); @@ -296,12 +297,13 @@ static void __init parse_da_table(const struct dmi_header *dm) da_command_address = table->cmdIOAddress; da_command_code = table->cmdIOCode; - da_tokens = krealloc(da_tokens, (da_num_tokens + tokens) * - sizeof(struct calling_interface_token), - GFP_KERNEL); + new_da_tokens = krealloc(da_tokens, (da_num_tokens + tokens) * + sizeof(struct calling_interface_token), + GFP_KERNEL); - if (!da_tokens) + if (!new_da_tokens) return; + da_tokens = new_da_tokens; memcpy(da_tokens+da_num_tokens, table->tokens, sizeof(struct calling_interface_token) * tokens); diff --git a/trunk/drivers/platform/x86/dell-wmi-aio.c b/trunk/drivers/platform/x86/dell-wmi-aio.c index 3f945457f71c..bcf8cc6b5537 100644 --- a/trunk/drivers/platform/x86/dell-wmi-aio.c +++ b/trunk/drivers/platform/x86/dell-wmi-aio.c @@ -34,6 +34,14 @@ MODULE_LICENSE("GPL"); #define EVENT_GUID1 "284A0E6B-380E-472A-921F-E52786257FB4" #define EVENT_GUID2 "02314822-307C-4F66-BF0E-48AEAEB26CC8" +struct dell_wmi_event { + u16 length; + /* 0x000: A hot key pressed or an event occurred + * 0x00F: A sequence of hot keys are pressed */ + u16 type; + u16 event[]; +}; + static const char *dell_wmi_aio_guids[] = { EVENT_GUID1, EVENT_GUID2, @@ -46,15 +54,41 @@ MODULE_ALIAS("wmi:"EVENT_GUID2); static const struct key_entry dell_wmi_aio_keymap[] = { { KE_KEY, 0xc0, { KEY_VOLUMEUP } }, { KE_KEY, 0xc1, { KEY_VOLUMEDOWN } }, + { KE_KEY, 0xe030, { KEY_VOLUMEUP } }, + { KE_KEY, 0xe02e, { KEY_VOLUMEDOWN } }, + { KE_KEY, 0xe020, { KEY_MUTE } }, + { KE_KEY, 0xe027, { KEY_DISPLAYTOGGLE } }, + { KE_KEY, 0xe006, { KEY_BRIGHTNESSUP } }, + { KE_KEY, 0xe005, { KEY_BRIGHTNESSDOWN } }, + { KE_KEY, 0xe00b, { KEY_SWITCHVIDEOMODE } }, { KE_END, 0 } }; static struct input_dev *dell_wmi_aio_input_dev; +/* + * The new WMI event data format will follow the dell_wmi_event structure + * So, we will check if the buffer matches the format + */ +static bool dell_wmi_aio_event_check(u8 *buffer, int length) +{ + struct dell_wmi_event *event = (struct dell_wmi_event *)buffer; + + if (event == NULL || length < 6) + return false; + + if ((event->type == 0 || event->type == 0xf) && + event->length >= 2) + return true; + + return false; +} + static void dell_wmi_aio_notify(u32 value, void *context) { struct acpi_buffer response = { ACPI_ALLOCATE_BUFFER, NULL }; union acpi_object *obj; + struct dell_wmi_event *event; acpi_status status; status = wmi_get_event_data(value, &response); @@ -65,7 +99,7 @@ static void dell_wmi_aio_notify(u32 value, void *context) obj = (union acpi_object *)response.pointer; if (obj) { - unsigned int scancode; + unsigned int scancode = 0; switch (obj->type) { case ACPI_TYPE_INTEGER: @@ -75,13 +109,22 @@ static void dell_wmi_aio_notify(u32 value, void *context) scancode, 1, true); break; case ACPI_TYPE_BUFFER: - /* Broken machines return the scancode in a buffer */ - if (obj->buffer.pointer && obj->buffer.length > 0) { - scancode = obj->buffer.pointer[0]; + if (dell_wmi_aio_event_check(obj->buffer.pointer, + obj->buffer.length)) { + event = (struct dell_wmi_event *) + obj->buffer.pointer; + scancode = event->event[0]; + } else { + /* Broken machines return the scancode in a + buffer */ + if (obj->buffer.pointer && + obj->buffer.length > 0) + scancode = obj->buffer.pointer[0]; + } + if (scancode) sparse_keymap_report_event( dell_wmi_aio_input_dev, scancode, 1, true); - } break; } } diff --git a/trunk/drivers/platform/x86/hp-wmi.c b/trunk/drivers/platform/x86/hp-wmi.c index 1a779bbfb87d..8df0c5a21be2 100644 --- a/trunk/drivers/platform/x86/hp-wmi.c +++ b/trunk/drivers/platform/x86/hp-wmi.c @@ -71,6 +71,14 @@ enum hp_wmi_event_ids { HPWMI_WIRELESS = 5, HPWMI_CPU_BATTERY_THROTTLE = 6, HPWMI_LOCK_SWITCH = 7, + HPWMI_LID_SWITCH = 8, + HPWMI_SCREEN_ROTATION = 9, + HPWMI_COOLSENSE_SYSTEM_MOBILE = 0x0A, + HPWMI_COOLSENSE_SYSTEM_HOT = 0x0B, + HPWMI_PROXIMITY_SENSOR = 0x0C, + HPWMI_BACKLIT_KB_BRIGHTNESS = 0x0D, + HPWMI_PEAKSHIFT_PERIOD = 0x0F, + HPWMI_BATTERY_CHARGE_PERIOD = 0x10, }; struct bios_args { @@ -536,6 +544,22 @@ static void hp_wmi_notify(u32 value, void *context) break; case HPWMI_LOCK_SWITCH: break; + case HPWMI_LID_SWITCH: + break; + case HPWMI_SCREEN_ROTATION: + break; + case HPWMI_COOLSENSE_SYSTEM_MOBILE: + break; + case HPWMI_COOLSENSE_SYSTEM_HOT: + break; + case HPWMI_PROXIMITY_SENSOR: + break; + case HPWMI_BACKLIT_KB_BRIGHTNESS: + break; + case HPWMI_PEAKSHIFT_PERIOD: + break; + case HPWMI_BATTERY_CHARGE_PERIOD: + break; default: pr_info("Unknown event_id - %d - 0x%x\n", event_id, event_data); break; diff --git a/trunk/drivers/platform/x86/hp_accel.c b/trunk/drivers/platform/x86/hp_accel.c index e64a7a870d42..a8e43cf70fac 100644 --- a/trunk/drivers/platform/x86/hp_accel.c +++ b/trunk/drivers/platform/x86/hp_accel.c @@ -362,7 +362,8 @@ static int lis3lv02d_suspend(struct device *dev) static int lis3lv02d_resume(struct device *dev) { - return lis3lv02d_poweron(&lis3_dev); + lis3lv02d_poweron(&lis3_dev); + return 0; } static SIMPLE_DEV_PM_OPS(hp_accel_pm, lis3lv02d_suspend, lis3lv02d_resume); diff --git a/trunk/drivers/platform/x86/ideapad-laptop.c b/trunk/drivers/platform/x86/ideapad-laptop.c index 17f00b8dc5cb..89c4519d48ac 100644 --- a/trunk/drivers/platform/x86/ideapad-laptop.c +++ b/trunk/drivers/platform/x86/ideapad-laptop.c @@ -640,7 +640,8 @@ static void ideapad_check_special_buttons(struct ideapad_private *priv) for (bit = 0; bit < 16; bit++) { if (test_bit(bit, &value)) { switch (bit) { - case 6: + case 0: /* Z580 */ + case 6: /* Z570 */ /* Thermal Management button */ ideapad_input_report(priv, 65); break; @@ -648,6 +649,9 @@ static void ideapad_check_special_buttons(struct ideapad_private *priv) /* OneKey Theater button */ ideapad_input_report(priv, 64); break; + default: + pr_info("Unknown special button: %lu\n", bit); + break; } } } diff --git a/trunk/drivers/platform/x86/pvpanic.c b/trunk/drivers/platform/x86/pvpanic.c new file mode 100644 index 000000000000..47ae0c47d4b5 --- /dev/null +++ b/trunk/drivers/platform/x86/pvpanic.c @@ -0,0 +1,124 @@ +/* + * pvpanic.c - pvpanic Device Support + * + * Copyright (C) 2013 Fujitsu. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include +#include +#include +#include +#include +#include + +MODULE_AUTHOR("Hu Tao "); +MODULE_DESCRIPTION("pvpanic device driver"); +MODULE_LICENSE("GPL"); + +static int pvpanic_add(struct acpi_device *device); +static int pvpanic_remove(struct acpi_device *device); + +static const struct acpi_device_id pvpanic_device_ids[] = { + { "QEMU0001", 0 }, + { "", 0 }, +}; +MODULE_DEVICE_TABLE(acpi, pvpanic_device_ids); + +#define PVPANIC_PANICKED (1 << 0) + +static u16 port; + +static struct acpi_driver pvpanic_driver = { + .name = "pvpanic", + .class = "QEMU", + .ids = pvpanic_device_ids, + .ops = { + .add = pvpanic_add, + .remove = pvpanic_remove, + }, + .owner = THIS_MODULE, +}; + +static void +pvpanic_send_event(unsigned int event) +{ + outb(event, port); +} + +static int +pvpanic_panic_notify(struct notifier_block *nb, unsigned long code, + void *unused) +{ + pvpanic_send_event(PVPANIC_PANICKED); + return NOTIFY_DONE; +} + +static struct notifier_block pvpanic_panic_nb = { + .notifier_call = pvpanic_panic_notify, +}; + + +static acpi_status +pvpanic_walk_resources(struct acpi_resource *res, void *context) +{ + switch (res->type) { + case ACPI_RESOURCE_TYPE_END_TAG: + return AE_OK; + + case ACPI_RESOURCE_TYPE_IO: + port = res->data.io.minimum; + return AE_OK; + + default: + return AE_ERROR; + } +} + +static int pvpanic_add(struct acpi_device *device) +{ + acpi_status status; + u64 ret; + + status = acpi_evaluate_integer(device->handle, "_STA", NULL, + &ret); + + if (ACPI_FAILURE(status) || (ret & 0x0B) != 0x0B) + return -ENODEV; + + acpi_walk_resources(device->handle, METHOD_NAME__CRS, + pvpanic_walk_resources, NULL); + + if (!port) + return -ENODEV; + + atomic_notifier_chain_register(&panic_notifier_list, + &pvpanic_panic_nb); + + return 0; +} + +static int pvpanic_remove(struct acpi_device *device) +{ + + atomic_notifier_chain_unregister(&panic_notifier_list, + &pvpanic_panic_nb); + return 0; +} + +module_acpi_driver(pvpanic_driver); diff --git a/trunk/drivers/platform/x86/samsung-q10.c b/trunk/drivers/platform/x86/samsung-q10.c index 5f770059fd4d..1a90b62a71c6 100644 --- a/trunk/drivers/platform/x86/samsung-q10.c +++ b/trunk/drivers/platform/x86/samsung-q10.c @@ -176,10 +176,7 @@ static int __init samsungq10_init(void) samsungq10_probe, NULL, 0, NULL, 0); - if (IS_ERR(samsungq10_device)) - return PTR_ERR(samsungq10_device); - - return 0; + return PTR_RET(samsungq10_device); } static void __exit samsungq10_exit(void) diff --git a/trunk/drivers/platform/x86/sony-laptop.c b/trunk/drivers/platform/x86/sony-laptop.c index d544e3aaf761..2ac045f27f10 100644 --- a/trunk/drivers/platform/x86/sony-laptop.c +++ b/trunk/drivers/platform/x86/sony-laptop.c @@ -1255,6 +1255,11 @@ static void sony_nc_notify(struct acpi_device *device, u32 event) real_ev = __sony_nc_gfx_switch_status_get(); break; + case 0x015B: + /* Hybrid GFX switching SVS151290S */ + ev_type = GFX_SWITCH; + real_ev = __sony_nc_gfx_switch_status_get(); + break; default: dprintk("Unknown event 0x%x for handle 0x%x\n", event, handle); @@ -1353,6 +1358,7 @@ static void sony_nc_function_setup(struct acpi_device *device, break; case 0x0128: case 0x0146: + case 0x015B: result = sony_nc_gfx_switch_setup(pf_device, handle); if (result) pr_err("couldn't set up GFX Switch status (%d)\n", @@ -1375,6 +1381,7 @@ static void sony_nc_function_setup(struct acpi_device *device, case 0x0143: case 0x014b: case 0x014c: + case 0x0163: result = sony_nc_kbd_backlight_setup(pf_device, handle); if (result) pr_err("couldn't set up keyboard backlight function (%d)\n", @@ -1426,6 +1433,7 @@ static void sony_nc_function_cleanup(struct platform_device *pd) break; case 0x0128: case 0x0146: + case 0x015B: sony_nc_gfx_switch_cleanup(pd); break; case 0x0131: @@ -1439,6 +1447,7 @@ static void sony_nc_function_cleanup(struct platform_device *pd) case 0x0143: case 0x014b: case 0x014c: + case 0x0163: sony_nc_kbd_backlight_cleanup(pd); break; default: @@ -1485,6 +1494,7 @@ static void sony_nc_function_resume(void) case 0x0143: case 0x014b: case 0x014c: + case 0x0163: sony_nc_kbd_backlight_resume(); break; default: @@ -2390,7 +2400,9 @@ static int __sony_nc_gfx_switch_status_get(void) { unsigned int result; - if (sony_call_snc_handle(gfxs_ctl->handle, 0x0100, &result)) + if (sony_call_snc_handle(gfxs_ctl->handle, + gfxs_ctl->handle == 0x015B ? 0x0000 : 0x0100, + &result)) return -EIO; switch (gfxs_ctl->handle) { @@ -2400,6 +2412,12 @@ static int __sony_nc_gfx_switch_status_get(void) */ return result & 0x1 ? SPEED : STAMINA; break; + case 0x015B: + /* 0: discrete GFX (speed) + * 1: integrated GFX (stamina) + */ + return result & 0x1 ? STAMINA : SPEED; + break; case 0x0128: /* it's a more elaborated bitmask, for now: * 2: integrated GFX (stamina) diff --git a/trunk/drivers/pwm/pwm-imx.c b/trunk/drivers/pwm/pwm-imx.c index ec287989eafc..c938bae18812 100644 --- a/trunk/drivers/pwm/pwm-imx.c +++ b/trunk/drivers/pwm/pwm-imx.c @@ -265,11 +265,6 @@ static int imx_pwm_probe(struct platform_device *pdev) imx->chip.npwm = 1; r = platform_get_resource(pdev, IORESOURCE_MEM, 0); - if (r == NULL) { - dev_err(&pdev->dev, "no memory resource defined\n"); - return -ENODEV; - } - imx->mmio_base = devm_ioremap_resource(&pdev->dev, r); if (IS_ERR(imx->mmio_base)) return PTR_ERR(imx->mmio_base); diff --git a/trunk/drivers/pwm/pwm-puv3.c b/trunk/drivers/pwm/pwm-puv3.c index d1eb499fb15d..ed6007b27585 100644 --- a/trunk/drivers/pwm/pwm-puv3.c +++ b/trunk/drivers/pwm/pwm-puv3.c @@ -117,11 +117,6 @@ static int pwm_probe(struct platform_device *pdev) return PTR_ERR(puv3->clk); r = platform_get_resource(pdev, IORESOURCE_MEM, 0); - if (r == NULL) { - dev_err(&pdev->dev, "no memory resource defined\n"); - return -ENODEV; - } - puv3->base = devm_ioremap_resource(&pdev->dev, r); if (IS_ERR(puv3->base)) return PTR_ERR(puv3->base); diff --git a/trunk/drivers/pwm/pwm-pxa.c b/trunk/drivers/pwm/pwm-pxa.c index dee6ab552a0a..dc9717551d39 100644 --- a/trunk/drivers/pwm/pwm-pxa.c +++ b/trunk/drivers/pwm/pwm-pxa.c @@ -147,11 +147,6 @@ static int pwm_probe(struct platform_device *pdev) pwm->chip.npwm = (id->driver_data & HAS_SECONDARY_PWM) ? 2 : 1; r = platform_get_resource(pdev, IORESOURCE_MEM, 0); - if (r == NULL) { - dev_err(&pdev->dev, "no memory resource defined\n"); - return -ENODEV; - } - pwm->mmio_base = devm_ioremap_resource(&pdev->dev, r); if (IS_ERR(pwm->mmio_base)) return PTR_ERR(pwm->mmio_base); diff --git a/trunk/drivers/pwm/pwm-tegra.c b/trunk/drivers/pwm/pwm-tegra.c index 3d75f4a88f98..a5402933001f 100644 --- a/trunk/drivers/pwm/pwm-tegra.c +++ b/trunk/drivers/pwm/pwm-tegra.c @@ -181,11 +181,6 @@ static int tegra_pwm_probe(struct platform_device *pdev) pwm->dev = &pdev->dev; r = platform_get_resource(pdev, IORESOURCE_MEM, 0); - if (!r) { - dev_err(&pdev->dev, "no memory resources defined\n"); - return -ENODEV; - } - pwm->mmio_base = devm_ioremap_resource(&pdev->dev, r); if (IS_ERR(pwm->mmio_base)) return PTR_ERR(pwm->mmio_base); diff --git a/trunk/drivers/pwm/pwm-tiecap.c b/trunk/drivers/pwm/pwm-tiecap.c index 0d65fb2e02c7..72ca42dfa733 100644 --- a/trunk/drivers/pwm/pwm-tiecap.c +++ b/trunk/drivers/pwm/pwm-tiecap.c @@ -240,11 +240,6 @@ static int ecap_pwm_probe(struct platform_device *pdev) pc->chip.npwm = 1; r = platform_get_resource(pdev, IORESOURCE_MEM, 0); - if (!r) { - dev_err(&pdev->dev, "no memory resource defined\n"); - return -ENODEV; - } - pc->mmio_base = devm_ioremap_resource(&pdev->dev, r); if (IS_ERR(pc->mmio_base)) return PTR_ERR(pc->mmio_base); diff --git a/trunk/drivers/pwm/pwm-tiehrpwm.c b/trunk/drivers/pwm/pwm-tiehrpwm.c index 6a217596942f..48a485c2e422 100644 --- a/trunk/drivers/pwm/pwm-tiehrpwm.c +++ b/trunk/drivers/pwm/pwm-tiehrpwm.c @@ -471,11 +471,6 @@ static int ehrpwm_pwm_probe(struct platform_device *pdev) pc->chip.npwm = NUM_PWM_CHANNEL; r = platform_get_resource(pdev, IORESOURCE_MEM, 0); - if (!r) { - dev_err(&pdev->dev, "no memory resource defined\n"); - return -ENODEV; - } - pc->mmio_base = devm_ioremap_resource(&pdev->dev, r); if (IS_ERR(pc->mmio_base)) return PTR_ERR(pc->mmio_base); diff --git a/trunk/drivers/pwm/pwm-tipwmss.c b/trunk/drivers/pwm/pwm-tipwmss.c index c9c3d3a1e0eb..3b119bc2c3c6 100644 --- a/trunk/drivers/pwm/pwm-tipwmss.c +++ b/trunk/drivers/pwm/pwm-tipwmss.c @@ -70,11 +70,6 @@ static int pwmss_probe(struct platform_device *pdev) mutex_init(&info->pwmss_lock); r = platform_get_resource(pdev, IORESOURCE_MEM, 0); - if (!r) { - dev_err(&pdev->dev, "no memory resource defined\n"); - return -ENODEV; - } - info->mmio_base = devm_ioremap_resource(&pdev->dev, r); if (IS_ERR(info->mmio_base)) return PTR_ERR(info->mmio_base); diff --git a/trunk/drivers/pwm/pwm-vt8500.c b/trunk/drivers/pwm/pwm-vt8500.c index 69effd19afc7..323125abf3f4 100644 --- a/trunk/drivers/pwm/pwm-vt8500.c +++ b/trunk/drivers/pwm/pwm-vt8500.c @@ -230,11 +230,6 @@ static int vt8500_pwm_probe(struct platform_device *pdev) } r = platform_get_resource(pdev, IORESOURCE_MEM, 0); - if (r == NULL) { - dev_err(&pdev->dev, "no memory resource defined\n"); - return -ENODEV; - } - chip->base = devm_ioremap_resource(&pdev->dev, r); if (IS_ERR(chip->base)) return PTR_ERR(chip->base); diff --git a/trunk/drivers/regulator/Kconfig b/trunk/drivers/regulator/Kconfig index a5d97eaee99e..8bb26446037e 100644 --- a/trunk/drivers/regulator/Kconfig +++ b/trunk/drivers/regulator/Kconfig @@ -66,7 +66,7 @@ config REGULATOR_USERSPACE_CONSUMER config REGULATOR_GPIO tristate "GPIO regulator support" - depends on GENERIC_GPIO + depends on GPIOLIB help This driver provides support for regulators that can be controlled via gpios. diff --git a/trunk/drivers/rtc/Kconfig b/trunk/drivers/rtc/Kconfig index 0c81915b1997..b9838130a7b0 100644 --- a/trunk/drivers/rtc/Kconfig +++ b/trunk/drivers/rtc/Kconfig @@ -20,7 +20,6 @@ if RTC_CLASS config RTC_HCTOSYS bool "Set system time from RTC on startup and resume" default y - depends on !ALWAYS_USE_PERSISTENT_CLOCK help If you say yes here, the system time (wall clock) will be set using the value read from a specified RTC device. This is useful to avoid @@ -29,7 +28,6 @@ config RTC_HCTOSYS config RTC_SYSTOHC bool "Set the RTC time based on NTP synchronization" default y - depends on !ALWAYS_USE_PERSISTENT_CLOCK help If you say yes here, the system time (wall clock) will be stored in the RTC specified by RTC_HCTOSYS_DEVICE approximately every 11 diff --git a/trunk/drivers/rtc/rtc-nuc900.c b/trunk/drivers/rtc/rtc-nuc900.c index f5dfb6e5e7d9..d592e2fe43f7 100644 --- a/trunk/drivers/rtc/rtc-nuc900.c +++ b/trunk/drivers/rtc/rtc-nuc900.c @@ -234,11 +234,6 @@ static int __init nuc900_rtc_probe(struct platform_device *pdev) return -ENOMEM; } res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - if (!res) { - dev_err(&pdev->dev, "platform_get_resource failed\n"); - return -ENXIO; - } - nuc900_rtc->rtc_reg = devm_ioremap_resource(&pdev->dev, res); if (IS_ERR(nuc900_rtc->rtc_reg)) return PTR_ERR(nuc900_rtc->rtc_reg); diff --git a/trunk/drivers/rtc/rtc-omap.c b/trunk/drivers/rtc/rtc-omap.c index 4e1bdb832e37..b0ba3fc991ea 100644 --- a/trunk/drivers/rtc/rtc-omap.c +++ b/trunk/drivers/rtc/rtc-omap.c @@ -347,11 +347,6 @@ static int __init omap_rtc_probe(struct platform_device *pdev) } res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - if (!res) { - pr_debug("%s: RTC resource data missing\n", pdev->name); - return -ENOENT; - } - rtc_base = devm_ioremap_resource(&pdev->dev, res); if (IS_ERR(rtc_base)) return PTR_ERR(rtc_base); diff --git a/trunk/drivers/rtc/rtc-s3c.c b/trunk/drivers/rtc/rtc-s3c.c index 14040b22888d..0b495e8b8e66 100644 --- a/trunk/drivers/rtc/rtc-s3c.c +++ b/trunk/drivers/rtc/rtc-s3c.c @@ -477,11 +477,6 @@ static int s3c_rtc_probe(struct platform_device *pdev) /* get the memory region */ res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - if (res == NULL) { - dev_err(&pdev->dev, "failed to get memory region resource\n"); - return -ENOENT; - } - s3c_rtc_base = devm_ioremap_resource(&pdev->dev, res); if (IS_ERR(s3c_rtc_base)) return PTR_ERR(s3c_rtc_base); diff --git a/trunk/drivers/rtc/rtc-tegra.c b/trunk/drivers/rtc/rtc-tegra.c index a34315d25478..76af92ad5a8a 100644 --- a/trunk/drivers/rtc/rtc-tegra.c +++ b/trunk/drivers/rtc/rtc-tegra.c @@ -322,12 +322,6 @@ static int __init tegra_rtc_probe(struct platform_device *pdev) return -ENOMEM; res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - if (!res) { - dev_err(&pdev->dev, - "Unable to allocate resources for device.\n"); - return -EBUSY; - } - info->rtc_base = devm_ioremap_resource(&pdev->dev, res); if (IS_ERR(info->rtc_base)) return PTR_ERR(info->rtc_base); diff --git a/trunk/drivers/rtc/rtc-tile.c b/trunk/drivers/rtc/rtc-tile.c index 249b6531f119..fc3dee95f166 100644 --- a/trunk/drivers/rtc/rtc-tile.c +++ b/trunk/drivers/rtc/rtc-tile.c @@ -146,6 +146,7 @@ static int __init tile_rtc_driver_init(void) */ static void __exit tile_rtc_driver_exit(void) { + platform_device_unregister(tile_rtc_platform_device); platform_driver_unregister(&tile_rtc_platform_driver); } diff --git a/trunk/drivers/s390/block/xpram.c b/trunk/drivers/s390/block/xpram.c index 690c3338a8ae..464dd29d06c0 100644 --- a/trunk/drivers/s390/block/xpram.c +++ b/trunk/drivers/s390/block/xpram.c @@ -343,6 +343,7 @@ static int __init xpram_setup_blkdev(void) put_disk(xpram_disks[i]); goto out; } + queue_flag_set_unlocked(QUEUE_FLAG_NONROT, xpram_queues[i]); blk_queue_make_request(xpram_queues[i], xpram_make_request); blk_queue_logical_block_size(xpram_queues[i], 4096); } diff --git a/trunk/drivers/s390/cio/chp.c b/trunk/drivers/s390/cio/chp.c index 21fabc6d5a9c..6c440d4349d4 100644 --- a/trunk/drivers/s390/cio/chp.c +++ b/trunk/drivers/s390/cio/chp.c @@ -352,12 +352,48 @@ static ssize_t chp_shared_show(struct device *dev, static DEVICE_ATTR(shared, 0444, chp_shared_show, NULL); +static ssize_t chp_chid_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct channel_path *chp = to_channelpath(dev); + ssize_t rc; + + mutex_lock(&chp->lock); + if (chp->desc_fmt1.flags & 0x10) + rc = sprintf(buf, "%04x\n", chp->desc_fmt1.chid); + else + rc = 0; + mutex_unlock(&chp->lock); + + return rc; +} +static DEVICE_ATTR(chid, 0444, chp_chid_show, NULL); + +static ssize_t chp_chid_external_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct channel_path *chp = to_channelpath(dev); + ssize_t rc; + + mutex_lock(&chp->lock); + if (chp->desc_fmt1.flags & 0x10) + rc = sprintf(buf, "%x\n", chp->desc_fmt1.flags & 0x8 ? 1 : 0); + else + rc = 0; + mutex_unlock(&chp->lock); + + return rc; +} +static DEVICE_ATTR(chid_external, 0444, chp_chid_external_show, NULL); + static struct attribute *chp_attrs[] = { &dev_attr_status.attr, &dev_attr_configure.attr, &dev_attr_type.attr, &dev_attr_cmg.attr, &dev_attr_shared.attr, + &dev_attr_chid.attr, + &dev_attr_chid_external.attr, NULL, }; static struct attribute_group chp_attr_group = { diff --git a/trunk/drivers/s390/cio/chsc.h b/trunk/drivers/s390/cio/chsc.h index 349d5fc47196..e7ef2a683b8f 100644 --- a/trunk/drivers/s390/cio/chsc.h +++ b/trunk/drivers/s390/cio/chsc.h @@ -43,7 +43,9 @@ struct channel_path_desc_fmt1 { u8 chpid; u32:24; u8 chpp; - u32 unused[3]; + u32 unused[2]; + u16 chid; + u32:16; u16 mdc; u16:13; u8 r:1; diff --git a/trunk/drivers/scsi/Kconfig b/trunk/drivers/scsi/Kconfig index db95c547c09d..86af29f53bbe 100644 --- a/trunk/drivers/scsi/Kconfig +++ b/trunk/drivers/scsi/Kconfig @@ -1353,6 +1353,8 @@ config SCSI_LPFC tristate "Emulex LightPulse Fibre Channel Support" depends on PCI && SCSI select SCSI_FC_ATTRS + select GENERIC_CSUM + select CRC_T10DIF help This lpfc driver supports the Emulex LightPulse Family of Fibre Channel PCI host adapters. diff --git a/trunk/drivers/scsi/aic94xx/aic94xx_dev.c b/trunk/drivers/scsi/aic94xx/aic94xx_dev.c index 64136c56e706..33072388ea16 100644 --- a/trunk/drivers/scsi/aic94xx/aic94xx_dev.c +++ b/trunk/drivers/scsi/aic94xx/aic94xx_dev.c @@ -84,7 +84,7 @@ static void asd_set_ddb_type(struct domain_device *dev) struct asd_ha_struct *asd_ha = dev->port->ha->lldd_ha; int ddb = (int) (unsigned long) dev->lldd_dev; - if (dev->dev_type == SATA_PM_PORT) + if (dev->dev_type == SAS_SATA_PM_PORT) asd_ddbsite_write_byte(asd_ha,ddb, DDB_TYPE, DDB_TYPE_PM_PORT); else if (dev->tproto) asd_ddbsite_write_byte(asd_ha,ddb, DDB_TYPE, DDB_TYPE_TARGET); @@ -116,7 +116,7 @@ void asd_set_dmamode(struct domain_device *dev) int ddb = (int) (unsigned long) dev->lldd_dev; u32 qdepth = 0; - if (dev->dev_type == SATA_DEV || dev->dev_type == SATA_PM_PORT) { + if (dev->dev_type == SAS_SATA_DEV || dev->dev_type == SAS_SATA_PM_PORT) { if (ata_id_has_ncq(ata_dev->id)) qdepth = ata_id_queue_depth(ata_dev->id); asd_ddbsite_write_dword(asd_ha, ddb, SATA_TAG_ALLOC_MASK, @@ -140,8 +140,8 @@ static int asd_init_sata(struct domain_device *dev) int ddb = (int) (unsigned long) dev->lldd_dev; asd_ddbsite_write_word(asd_ha, ddb, ATA_CMD_SCBPTR, 0xFFFF); - if (dev->dev_type == SATA_DEV || dev->dev_type == SATA_PM || - dev->dev_type == SATA_PM_PORT) { + if (dev->dev_type == SAS_SATA_DEV || dev->dev_type == SAS_SATA_PM || + dev->dev_type == SAS_SATA_PM_PORT) { struct dev_to_host_fis *fis = (struct dev_to_host_fis *) dev->frame_rcvd; asd_ddbsite_write_byte(asd_ha, ddb, SATA_STATUS, fis->status); @@ -174,7 +174,7 @@ static int asd_init_target_ddb(struct domain_device *dev) asd_ddbsite_write_byte(asd_ha, ddb, CONN_MASK, dev->port->phy_mask); if (dev->port->oob_mode != SATA_OOB_MODE) { flags |= OPEN_REQUIRED; - if ((dev->dev_type == SATA_DEV) || + if ((dev->dev_type == SAS_SATA_DEV) || (dev->tproto & SAS_PROTOCOL_STP)) { struct smp_resp *rps_resp = &dev->sata_dev.rps_resp; if (rps_resp->frame_type == SMP_RESPONSE && @@ -188,8 +188,8 @@ static int asd_init_target_ddb(struct domain_device *dev) } else { flags |= CONCURRENT_CONN_SUPP; if (!dev->parent && - (dev->dev_type == EDGE_DEV || - dev->dev_type == FANOUT_DEV)) + (dev->dev_type == SAS_EDGE_EXPANDER_DEVICE || + dev->dev_type == SAS_FANOUT_EXPANDER_DEVICE)) asd_ddbsite_write_byte(asd_ha, ddb, MAX_CCONN, 4); else @@ -198,7 +198,7 @@ static int asd_init_target_ddb(struct domain_device *dev) asd_ddbsite_write_byte(asd_ha, ddb, NUM_CTX, 1); } } - if (dev->dev_type == SATA_PM) + if (dev->dev_type == SAS_SATA_PM) flags |= SATA_MULTIPORT; asd_ddbsite_write_byte(asd_ha, ddb, DDB_TARG_FLAGS, flags); @@ -211,7 +211,7 @@ static int asd_init_target_ddb(struct domain_device *dev) asd_ddbsite_write_word(asd_ha, ddb, SEND_QUEUE_TAIL, 0xFFFF); asd_ddbsite_write_word(asd_ha, ddb, SISTER_DDB, 0xFFFF); - if (dev->dev_type == SATA_DEV || (dev->tproto & SAS_PROTOCOL_STP)) { + if (dev->dev_type == SAS_SATA_DEV || (dev->tproto & SAS_PROTOCOL_STP)) { i = asd_init_sata(dev); if (i < 0) { asd_free_ddb(asd_ha, ddb); @@ -219,7 +219,7 @@ static int asd_init_target_ddb(struct domain_device *dev) } } - if (dev->dev_type == SAS_END_DEV) { + if (dev->dev_type == SAS_END_DEVICE) { struct sas_end_device *rdev = rphy_to_end_device(dev->rphy); if (rdev->I_T_nexus_loss_timeout > 0) asd_ddbsite_write_word(asd_ha, ddb, ITNL_TIMEOUT, @@ -328,10 +328,10 @@ int asd_dev_found(struct domain_device *dev) spin_lock_irqsave(&asd_ha->hw_prof.ddb_lock, flags); switch (dev->dev_type) { - case SATA_PM: + case SAS_SATA_PM: res = asd_init_sata_pm_ddb(dev); break; - case SATA_PM_PORT: + case SAS_SATA_PM_PORT: res = asd_init_sata_pm_port_ddb(dev); break; default: diff --git a/trunk/drivers/scsi/aic94xx/aic94xx_hwi.c b/trunk/drivers/scsi/aic94xx/aic94xx_hwi.c index 81b736c76fff..4df867e07b20 100644 --- a/trunk/drivers/scsi/aic94xx/aic94xx_hwi.c +++ b/trunk/drivers/scsi/aic94xx/aic94xx_hwi.c @@ -74,7 +74,7 @@ static void asd_init_phy_identify(struct asd_phy *phy) memset(phy->identify_frame, 0, sizeof(*phy->identify_frame)); - phy->identify_frame->dev_type = SAS_END_DEV; + phy->identify_frame->dev_type = SAS_END_DEVICE; if (phy->sas_phy.role & PHY_ROLE_INITIATOR) phy->identify_frame->initiator_bits = phy->sas_phy.iproto; if (phy->sas_phy.role & PHY_ROLE_TARGET) diff --git a/trunk/drivers/scsi/aic94xx/aic94xx_tmf.c b/trunk/drivers/scsi/aic94xx/aic94xx_tmf.c index cf9040933da6..d4c35df3d4ae 100644 --- a/trunk/drivers/scsi/aic94xx/aic94xx_tmf.c +++ b/trunk/drivers/scsi/aic94xx/aic94xx_tmf.c @@ -184,7 +184,7 @@ int asd_I_T_nexus_reset(struct domain_device *dev) struct sas_phy *phy = sas_get_local_phy(dev); /* Standard mandates link reset for ATA (type 0) and * hard reset for SSP (type 1) */ - int reset_type = (dev->dev_type == SATA_DEV || + int reset_type = (dev->dev_type == SAS_SATA_DEV || (dev->tproto & SAS_PROTOCOL_STP)) ? 0 : 1; asd_clear_nexus_I_T(dev, NEXUS_PHASE_PRE); diff --git a/trunk/drivers/scsi/be2iscsi/be.h b/trunk/drivers/scsi/be2iscsi/be.h index f1733dfa3ae2..777e7c0bbb4b 100644 --- a/trunk/drivers/scsi/be2iscsi/be.h +++ b/trunk/drivers/scsi/be2iscsi/be.h @@ -1,5 +1,5 @@ /** - * Copyright (C) 2005 - 2012 Emulex + * Copyright (C) 2005 - 2013 Emulex * All rights reserved. * * This program is free software; you can redistribute it and/or diff --git a/trunk/drivers/scsi/be2iscsi/be_cmds.c b/trunk/drivers/scsi/be2iscsi/be_cmds.c index 5c87768c109c..e66aa7c11a8a 100644 --- a/trunk/drivers/scsi/be2iscsi/be_cmds.c +++ b/trunk/drivers/scsi/be2iscsi/be_cmds.c @@ -1,5 +1,5 @@ /** - * Copyright (C) 2005 - 2012 Emulex + * Copyright (C) 2005 - 2013 Emulex * All rights reserved. * * This program is free software; you can redistribute it and/or @@ -155,6 +155,7 @@ int beiscsi_mccq_compl(struct beiscsi_hba *phba, uint16_t status = 0, addl_status = 0, wrb_num = 0; struct be_mcc_wrb *temp_wrb; struct be_cmd_req_hdr *ioctl_hdr; + struct be_cmd_resp_hdr *ioctl_resp_hdr; struct be_queue_info *mccq = &phba->ctrl.mcc_obj.q; if (beiscsi_error(phba)) @@ -204,6 +205,12 @@ int beiscsi_mccq_compl(struct beiscsi_hba *phba, ioctl_hdr->subsystem, ioctl_hdr->opcode, status, addl_status); + + if (status == MCC_STATUS_INSUFFICIENT_BUFFER) { + ioctl_resp_hdr = (struct be_cmd_resp_hdr *) ioctl_hdr; + if (ioctl_resp_hdr->response_length) + goto release_mcc_tag; + } rc = -EAGAIN; } @@ -267,6 +274,7 @@ static int be_mcc_compl_process(struct be_ctrl_info *ctrl, struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem); struct beiscsi_hba *phba = pci_get_drvdata(ctrl->pdev); struct be_cmd_req_hdr *hdr = embedded_payload(wrb); + struct be_cmd_resp_hdr *resp_hdr; be_dws_le_to_cpu(compl, 4); @@ -284,6 +292,11 @@ static int be_mcc_compl_process(struct be_ctrl_info *ctrl, hdr->subsystem, hdr->opcode, compl_status, extd_status); + if (compl_status == MCC_STATUS_INSUFFICIENT_BUFFER) { + resp_hdr = (struct be_cmd_resp_hdr *) hdr; + if (resp_hdr->response_length) + return 0; + } return -EBUSY; } return 0; @@ -335,30 +348,26 @@ static void be2iscsi_fail_session(struct iscsi_cls_session *cls_session) void beiscsi_async_link_state_process(struct beiscsi_hba *phba, struct be_async_event_link_state *evt) { - switch (evt->port_link_status) { - case ASYNC_EVENT_LINK_DOWN: + if ((evt->port_link_status == ASYNC_EVENT_LINK_DOWN) || + ((evt->port_link_status & ASYNC_EVENT_LOGICAL) && + (evt->port_fault != BEISCSI_PHY_LINK_FAULT_NONE))) { + phba->state = BE_ADAPTER_LINK_DOWN; + beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG | BEISCSI_LOG_INIT, - "BC_%d : Link Down on Physical Port %d\n", + "BC_%d : Link Down on Port %d\n", evt->physical_port); - phba->state |= BE_ADAPTER_LINK_DOWN; iscsi_host_for_each_session(phba->shost, be2iscsi_fail_session); - break; - case ASYNC_EVENT_LINK_UP: + } else if ((evt->port_link_status & ASYNC_EVENT_LINK_UP) || + ((evt->port_link_status & ASYNC_EVENT_LOGICAL) && + (evt->port_fault == BEISCSI_PHY_LINK_FAULT_NONE))) { phba->state = BE_ADAPTER_UP; + beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG | BEISCSI_LOG_INIT, - "BC_%d : Link UP on Physical Port %d\n", - evt->physical_port); - break; - default: - beiscsi_log(phba, KERN_ERR, - BEISCSI_LOG_CONFIG | BEISCSI_LOG_INIT, - "BC_%d : Unexpected Async Notification %d on" - "Physical Port %d\n", - evt->port_link_status, + "BC_%d : Link UP on Port %d\n", evt->physical_port); } } @@ -479,7 +488,7 @@ static int be_mbox_db_ready_wait(struct be_ctrl_info *ctrl) { void __iomem *db = ctrl->db + MPU_MAILBOX_DB_OFFSET; struct beiscsi_hba *phba = pci_get_drvdata(ctrl->pdev); - int wait = 0; + uint32_t wait = 0; u32 ready; do { @@ -527,6 +536,10 @@ int be_mbox_notify(struct be_ctrl_info *ctrl) struct be_mcc_compl *compl = &mbox->compl; struct beiscsi_hba *phba = pci_get_drvdata(ctrl->pdev); + status = be_mbox_db_ready_wait(ctrl); + if (status) + return status; + val &= ~MPU_MAILBOX_DB_RDY_MASK; val |= MPU_MAILBOX_DB_HI_MASK; val |= (upper_32_bits(mbox_mem->dma) >> 2) << 2; @@ -580,6 +593,10 @@ static int be_mbox_notify_wait(struct beiscsi_hba *phba) struct be_mcc_compl *compl = &mbox->compl; struct be_ctrl_info *ctrl = &phba->ctrl; + status = be_mbox_db_ready_wait(ctrl); + if (status) + return status; + val |= MPU_MAILBOX_DB_HI_MASK; /* at bits 2 - 31 place mbox dma addr msb bits 34 - 63 */ val |= (upper_32_bits(mbox_mem->dma) >> 2) << 2; @@ -732,6 +749,16 @@ int beiscsi_cmd_eq_create(struct be_ctrl_info *ctrl, return status; } +/** + * be_cmd_fw_initialize()- Initialize FW + * @ctrl: Pointer to function control structure + * + * Send FW initialize pattern for the function. + * + * return + * Success: 0 + * Failure: Non-Zero value + **/ int be_cmd_fw_initialize(struct be_ctrl_info *ctrl) { struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem); @@ -762,6 +789,47 @@ int be_cmd_fw_initialize(struct be_ctrl_info *ctrl) return status; } +/** + * be_cmd_fw_uninit()- Uinitialize FW + * @ctrl: Pointer to function control structure + * + * Send FW uninitialize pattern for the function + * + * return + * Success: 0 + * Failure: Non-Zero value + **/ +int be_cmd_fw_uninit(struct be_ctrl_info *ctrl) +{ + struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem); + struct beiscsi_hba *phba = pci_get_drvdata(ctrl->pdev); + int status; + u8 *endian_check; + + spin_lock(&ctrl->mbox_lock); + memset(wrb, 0, sizeof(*wrb)); + + endian_check = (u8 *) wrb; + *endian_check++ = 0xFF; + *endian_check++ = 0xAA; + *endian_check++ = 0xBB; + *endian_check++ = 0xFF; + *endian_check++ = 0xFF; + *endian_check++ = 0xCC; + *endian_check++ = 0xDD; + *endian_check = 0xFF; + + be_dws_cpu_to_le(wrb, sizeof(*wrb)); + + status = be_mbox_notify(ctrl); + if (status) + beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, + "BC_%d : be_cmd_fw_uninit Failed\n"); + + spin_unlock(&ctrl->mbox_lock); + return status; +} + int beiscsi_cmd_cq_create(struct be_ctrl_info *ctrl, struct be_queue_info *cq, struct be_queue_info *eq, bool sol_evts, bool no_delay, int coalesce_wm) @@ -783,20 +851,7 @@ int beiscsi_cmd_cq_create(struct be_ctrl_info *ctrl, OPCODE_COMMON_CQ_CREATE, sizeof(*req)); req->num_pages = cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size)); - if (chip_skh_r(ctrl->pdev)) { - req->hdr.version = MBX_CMD_VER2; - req->page_size = 1; - AMAP_SET_BITS(struct amap_cq_context_v2, coalescwm, - ctxt, coalesce_wm); - AMAP_SET_BITS(struct amap_cq_context_v2, nodelay, - ctxt, no_delay); - AMAP_SET_BITS(struct amap_cq_context_v2, count, ctxt, - __ilog2_u32(cq->len / 256)); - AMAP_SET_BITS(struct amap_cq_context_v2, valid, ctxt, 1); - AMAP_SET_BITS(struct amap_cq_context_v2, eventable, ctxt, 1); - AMAP_SET_BITS(struct amap_cq_context_v2, eqid, ctxt, eq->id); - AMAP_SET_BITS(struct amap_cq_context_v2, armed, ctxt, 1); - } else { + if (is_chip_be2_be3r(phba)) { AMAP_SET_BITS(struct amap_cq_context, coalescwm, ctxt, coalesce_wm); AMAP_SET_BITS(struct amap_cq_context, nodelay, ctxt, no_delay); @@ -809,6 +864,19 @@ int beiscsi_cmd_cq_create(struct be_ctrl_info *ctrl, AMAP_SET_BITS(struct amap_cq_context, armed, ctxt, 1); AMAP_SET_BITS(struct amap_cq_context, func, ctxt, PCI_FUNC(ctrl->pdev->devfn)); + } else { + req->hdr.version = MBX_CMD_VER2; + req->page_size = 1; + AMAP_SET_BITS(struct amap_cq_context_v2, coalescwm, + ctxt, coalesce_wm); + AMAP_SET_BITS(struct amap_cq_context_v2, nodelay, + ctxt, no_delay); + AMAP_SET_BITS(struct amap_cq_context_v2, count, ctxt, + __ilog2_u32(cq->len / 256)); + AMAP_SET_BITS(struct amap_cq_context_v2, valid, ctxt, 1); + AMAP_SET_BITS(struct amap_cq_context_v2, eventable, ctxt, 1); + AMAP_SET_BITS(struct amap_cq_context_v2, eqid, ctxt, eq->id); + AMAP_SET_BITS(struct amap_cq_context_v2, armed, ctxt, 1); } be_dws_cpu_to_le(ctxt, sizeof(req->context)); @@ -949,6 +1017,7 @@ int be_cmd_create_default_pdu_queue(struct be_ctrl_info *ctrl, struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem); struct be_defq_create_req *req = embedded_payload(wrb); struct be_dma_mem *q_mem = &dq->dma_mem; + struct beiscsi_hba *phba = pci_get_drvdata(ctrl->pdev); void *ctxt = &req->context; int status; @@ -961,17 +1030,36 @@ int be_cmd_create_default_pdu_queue(struct be_ctrl_info *ctrl, OPCODE_COMMON_ISCSI_DEFQ_CREATE, sizeof(*req)); req->num_pages = PAGES_4K_SPANNED(q_mem->va, q_mem->size); - AMAP_SET_BITS(struct amap_be_default_pdu_context, rx_pdid, ctxt, 0); - AMAP_SET_BITS(struct amap_be_default_pdu_context, rx_pdid_valid, ctxt, - 1); - AMAP_SET_BITS(struct amap_be_default_pdu_context, pci_func_id, ctxt, - PCI_FUNC(ctrl->pdev->devfn)); - AMAP_SET_BITS(struct amap_be_default_pdu_context, ring_size, ctxt, - be_encoded_q_len(length / sizeof(struct phys_addr))); - AMAP_SET_BITS(struct amap_be_default_pdu_context, default_buffer_size, - ctxt, entry_size); - AMAP_SET_BITS(struct amap_be_default_pdu_context, cq_id_recv, ctxt, - cq->id); + + if (is_chip_be2_be3r(phba)) { + AMAP_SET_BITS(struct amap_be_default_pdu_context, + rx_pdid, ctxt, 0); + AMAP_SET_BITS(struct amap_be_default_pdu_context, + rx_pdid_valid, ctxt, 1); + AMAP_SET_BITS(struct amap_be_default_pdu_context, + pci_func_id, ctxt, PCI_FUNC(ctrl->pdev->devfn)); + AMAP_SET_BITS(struct amap_be_default_pdu_context, + ring_size, ctxt, + be_encoded_q_len(length / + sizeof(struct phys_addr))); + AMAP_SET_BITS(struct amap_be_default_pdu_context, + default_buffer_size, ctxt, entry_size); + AMAP_SET_BITS(struct amap_be_default_pdu_context, + cq_id_recv, ctxt, cq->id); + } else { + AMAP_SET_BITS(struct amap_default_pdu_context_ext, + rx_pdid, ctxt, 0); + AMAP_SET_BITS(struct amap_default_pdu_context_ext, + rx_pdid_valid, ctxt, 1); + AMAP_SET_BITS(struct amap_default_pdu_context_ext, + ring_size, ctxt, + be_encoded_q_len(length / + sizeof(struct phys_addr))); + AMAP_SET_BITS(struct amap_default_pdu_context_ext, + default_buffer_size, ctxt, entry_size); + AMAP_SET_BITS(struct amap_default_pdu_context_ext, + cq_id_recv, ctxt, cq->id); + } be_dws_cpu_to_le(ctxt, sizeof(req->context)); diff --git a/trunk/drivers/scsi/be2iscsi/be_cmds.h b/trunk/drivers/scsi/be2iscsi/be_cmds.h index 23397d51ac54..99073086dfe0 100644 --- a/trunk/drivers/scsi/be2iscsi/be_cmds.h +++ b/trunk/drivers/scsi/be2iscsi/be_cmds.h @@ -1,5 +1,5 @@ /** - * Copyright (C) 2005 - 2012 Emulex + * Copyright (C) 2005 - 2013 Emulex * All rights reserved. * * This program is free software; you can redistribute it and/or @@ -52,6 +52,10 @@ struct be_mcc_wrb { /* Completion Status */ #define MCC_STATUS_SUCCESS 0x0 +#define MCC_STATUS_FAILED 0x1 +#define MCC_STATUS_ILLEGAL_REQUEST 0x2 +#define MCC_STATUS_ILLEGAL_FIELD 0x3 +#define MCC_STATUS_INSUFFICIENT_BUFFER 0x4 #define CQE_STATUS_COMPL_MASK 0xFFFF #define CQE_STATUS_COMPL_SHIFT 0 /* bits 0 - 15 */ @@ -118,7 +122,8 @@ struct be_async_event_trailer { enum { ASYNC_EVENT_LINK_DOWN = 0x0, - ASYNC_EVENT_LINK_UP = 0x1 + ASYNC_EVENT_LINK_UP = 0x1, + ASYNC_EVENT_LOGICAL = 0x2 }; /** @@ -130,6 +135,9 @@ struct be_async_event_link_state { u8 port_link_status; u8 port_duplex; u8 port_speed; +#define BEISCSI_PHY_LINK_FAULT_NONE 0x00 +#define BEISCSI_PHY_LINK_FAULT_LOCAL 0x01 +#define BEISCSI_PHY_LINK_FAULT_REMOTE 0x02 u8 port_fault; u8 rsvd0[7]; struct be_async_event_trailer trailer; @@ -697,6 +705,7 @@ int beiscsi_mccq_compl(struct beiscsi_hba *phba, uint32_t tag, struct be_mcc_wrb **wrb, void *cmd_va); /*ISCSI Functuions */ int be_cmd_fw_initialize(struct be_ctrl_info *ctrl); +int be_cmd_fw_uninit(struct be_ctrl_info *ctrl); struct be_mcc_wrb *wrb_from_mbox(struct be_dma_mem *mbox_mem); struct be_mcc_wrb *wrb_from_mccq(struct beiscsi_hba *phba); @@ -751,6 +760,18 @@ struct amap_be_default_pdu_context { u8 rsvd4[32]; /* dword 3 */ } __packed; +struct amap_default_pdu_context_ext { + u8 rsvd0[16]; /* dword 0 */ + u8 ring_size[4]; /* dword 0 */ + u8 rsvd1[12]; /* dword 0 */ + u8 rsvd2[22]; /* dword 1 */ + u8 rx_pdid[9]; /* dword 1 */ + u8 rx_pdid_valid; /* dword 1 */ + u8 default_buffer_size[16]; /* dword 2 */ + u8 cq_id_recv[16]; /* dword 2 */ + u8 rsvd3[32]; /* dword 3 */ +} __packed; + struct be_defq_create_req { struct be_cmd_req_hdr hdr; u16 num_pages; @@ -896,7 +917,7 @@ struct amap_it_dmsg_cqe_v2 { * stack to notify the * controller of a posted Work Request Block */ -#define DB_WRB_POST_CID_MASK 0x3FF /* bits 0 - 9 */ +#define DB_WRB_POST_CID_MASK 0xFFFF /* bits 0 - 16 */ #define DB_DEF_PDU_WRB_INDEX_MASK 0xFF /* bits 0 - 9 */ #define DB_DEF_PDU_WRB_INDEX_SHIFT 16 diff --git a/trunk/drivers/scsi/be2iscsi/be_iscsi.c b/trunk/drivers/scsi/be2iscsi/be_iscsi.c index 9014690fe841..ef36be003f67 100644 --- a/trunk/drivers/scsi/be2iscsi/be_iscsi.c +++ b/trunk/drivers/scsi/be2iscsi/be_iscsi.c @@ -1,5 +1,5 @@ /** - * Copyright (C) 2005 - 2012 Emulex + * Copyright (C) 2005 - 2013 Emulex * All rights reserved. * * This program is free software; you can redistribute it and/or @@ -161,7 +161,9 @@ static int beiscsi_bindconn_cid(struct beiscsi_hba *phba, struct beiscsi_conn *beiscsi_conn, unsigned int cid) { - if (phba->conn_table[cid]) { + uint16_t cri_index = BE_GET_CRI_FROM_CID(cid); + + if (phba->conn_table[cri_index]) { beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG, "BS_%d : Connection table already occupied. Detected clash\n"); @@ -169,9 +171,9 @@ static int beiscsi_bindconn_cid(struct beiscsi_hba *phba, } else { beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_CONFIG, "BS_%d : phba->conn_table[%d]=%p(beiscsi_conn)\n", - cid, beiscsi_conn); + cri_index, beiscsi_conn); - phba->conn_table[cid] = beiscsi_conn; + phba->conn_table[cri_index] = beiscsi_conn; } return 0; } @@ -990,9 +992,27 @@ static void beiscsi_put_cid(struct beiscsi_hba *phba, unsigned short cid) static void beiscsi_free_ep(struct beiscsi_endpoint *beiscsi_ep) { struct beiscsi_hba *phba = beiscsi_ep->phba; + struct beiscsi_conn *beiscsi_conn; beiscsi_put_cid(phba, beiscsi_ep->ep_cid); beiscsi_ep->phba = NULL; + phba->ep_array[BE_GET_CRI_FROM_CID + (beiscsi_ep->ep_cid)] = NULL; + + /** + * Check if any connection resource allocated by driver + * is to be freed.This case occurs when target redirection + * or connection retry is done. + **/ + if (!beiscsi_ep->conn) + return; + + beiscsi_conn = beiscsi_ep->conn; + if (beiscsi_conn->login_in_progress) { + beiscsi_free_mgmt_task_handles(beiscsi_conn, + beiscsi_conn->task); + beiscsi_conn->login_in_progress = 0; + } } /** @@ -1009,7 +1029,6 @@ static int beiscsi_open_conn(struct iscsi_endpoint *ep, { struct beiscsi_endpoint *beiscsi_ep = ep->dd_data; struct beiscsi_hba *phba = beiscsi_ep->phba; - struct be_mcc_wrb *wrb; struct tcp_connect_and_offload_out *ptcpcnct_out; struct be_dma_mem nonemb_cmd; unsigned int tag; @@ -1029,15 +1048,8 @@ static int beiscsi_open_conn(struct iscsi_endpoint *ep, "BS_%d : In beiscsi_open_conn, ep_cid=%d\n", beiscsi_ep->ep_cid); - phba->ep_array[beiscsi_ep->ep_cid - - phba->fw_config.iscsi_cid_start] = ep; - if (beiscsi_ep->ep_cid > (phba->fw_config.iscsi_cid_start + - phba->params.cxns_per_ctrl * 2)) { - - beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG, - "BS_%d : Failed in allocate iscsi cid\n"); - goto free_ep; - } + phba->ep_array[BE_GET_CRI_FROM_CID + (beiscsi_ep->ep_cid)] = ep; beiscsi_ep->cid_vld = 0; nonemb_cmd.va = pci_alloc_consistent(phba->ctrl.pdev, @@ -1049,24 +1061,24 @@ static int beiscsi_open_conn(struct iscsi_endpoint *ep, "BS_%d : Failed to allocate memory for" " mgmt_open_connection\n"); - beiscsi_put_cid(phba, beiscsi_ep->ep_cid); + beiscsi_free_ep(beiscsi_ep); return -ENOMEM; } nonemb_cmd.size = sizeof(struct tcp_connect_and_offload_in); memset(nonemb_cmd.va, 0, nonemb_cmd.size); tag = mgmt_open_connection(phba, dst_addr, beiscsi_ep, &nonemb_cmd); - if (!tag) { + if (tag <= 0) { beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG, "BS_%d : mgmt_open_connection Failed for cid=%d\n", beiscsi_ep->ep_cid); - beiscsi_put_cid(phba, beiscsi_ep->ep_cid); pci_free_consistent(phba->ctrl.pdev, nonemb_cmd.size, nonemb_cmd.va, nonemb_cmd.dma); + beiscsi_free_ep(beiscsi_ep); return -EAGAIN; } - ret = beiscsi_mccq_compl(phba, tag, &wrb, NULL); + ret = beiscsi_mccq_compl(phba, tag, NULL, nonemb_cmd.va); if (ret) { beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG | BEISCSI_LOG_MBOX, @@ -1074,10 +1086,11 @@ static int beiscsi_open_conn(struct iscsi_endpoint *ep, pci_free_consistent(phba->ctrl.pdev, nonemb_cmd.size, nonemb_cmd.va, nonemb_cmd.dma); - goto free_ep; + beiscsi_free_ep(beiscsi_ep); + return -EBUSY; } - ptcpcnct_out = embedded_payload(wrb); + ptcpcnct_out = (struct tcp_connect_and_offload_out *)nonemb_cmd.va; beiscsi_ep = ep->dd_data; beiscsi_ep->fw_handle = ptcpcnct_out->connection_handle; beiscsi_ep->cid_vld = 1; @@ -1087,10 +1100,6 @@ static int beiscsi_open_conn(struct iscsi_endpoint *ep, pci_free_consistent(phba->ctrl.pdev, nonemb_cmd.size, nonemb_cmd.va, nonemb_cmd.dma); return 0; - -free_ep: - beiscsi_free_ep(beiscsi_ep); - return -EBUSY; } /** @@ -1119,6 +1128,13 @@ beiscsi_ep_connect(struct Scsi_Host *shost, struct sockaddr *dst_addr, return ERR_PTR(ret); } + if (beiscsi_error(phba)) { + ret = -EIO; + beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_CONFIG, + "BS_%d : The FW state Not Stable!!!\n"); + return ERR_PTR(ret); + } + if (phba->state != BE_ADAPTER_UP) { ret = -EBUSY; beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_CONFIG, @@ -1201,8 +1217,10 @@ static int beiscsi_close_conn(struct beiscsi_endpoint *beiscsi_ep, int flag) static int beiscsi_unbind_conn_to_cid(struct beiscsi_hba *phba, unsigned int cid) { - if (phba->conn_table[cid]) - phba->conn_table[cid] = NULL; + uint16_t cri_index = BE_GET_CRI_FROM_CID(cid); + + if (phba->conn_table[cri_index]) + phba->conn_table[cri_index] = NULL; else { beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_CONFIG, "BS_%d : Connection table Not occupied.\n"); diff --git a/trunk/drivers/scsi/be2iscsi/be_iscsi.h b/trunk/drivers/scsi/be2iscsi/be_iscsi.h index 38eab7232159..31ddc8494398 100644 --- a/trunk/drivers/scsi/be2iscsi/be_iscsi.h +++ b/trunk/drivers/scsi/be2iscsi/be_iscsi.h @@ -1,5 +1,5 @@ /** - * Copyright (C) 2005 - 2012 Emulex + * Copyright (C) 2005 - 2013 Emulex * All rights reserved. * * This program is free software; you can redistribute it and/or diff --git a/trunk/drivers/scsi/be2iscsi/be_main.c b/trunk/drivers/scsi/be2iscsi/be_main.c index 4e2733d23003..d24a2867bc21 100644 --- a/trunk/drivers/scsi/be2iscsi/be_main.c +++ b/trunk/drivers/scsi/be2iscsi/be_main.c @@ -1,5 +1,5 @@ /** - * Copyright (C) 2005 - 2012 Emulex + * Copyright (C) 2005 - 2013 Emulex * All rights reserved. * * This program is free software; you can redistribute it and/or @@ -153,10 +153,14 @@ BEISCSI_RW_ATTR(log_enable, 0x00, DEVICE_ATTR(beiscsi_drvr_ver, S_IRUGO, beiscsi_drvr_ver_disp, NULL); DEVICE_ATTR(beiscsi_adapter_family, S_IRUGO, beiscsi_adap_family_disp, NULL); +DEVICE_ATTR(beiscsi_fw_ver, S_IRUGO, beiscsi_fw_ver_disp, NULL); +DEVICE_ATTR(beiscsi_active_cid_count, S_IRUGO, beiscsi_active_cid_disp, NULL); struct device_attribute *beiscsi_attrs[] = { &dev_attr_beiscsi_log_enable, &dev_attr_beiscsi_drvr_ver, &dev_attr_beiscsi_adapter_family, + &dev_attr_beiscsi_fw_ver, + &dev_attr_beiscsi_active_cid_count, NULL, }; @@ -702,7 +706,7 @@ static void beiscsi_get_params(struct beiscsi_hba *phba) + BE2_TMFS + BE2_NOPOUT_REQ)); phba->params.cxns_per_ctrl = phba->fw_config.iscsi_cid_count; - phba->params.asyncpdus_per_ctrl = phba->fw_config.iscsi_cid_count * 2; + phba->params.asyncpdus_per_ctrl = phba->fw_config.iscsi_cid_count; phba->params.icds_per_ctrl = phba->fw_config.iscsi_icd_count; phba->params.num_sge_per_io = BE2_SGE; phba->params.defpdu_hdr_sz = BE2_DEFPDU_HDR_SZ; @@ -1032,7 +1036,6 @@ static void hwi_ring_cq_db(struct beiscsi_hba *phba, static unsigned int beiscsi_process_async_pdu(struct beiscsi_conn *beiscsi_conn, struct beiscsi_hba *phba, - unsigned short cid, struct pdu_base *ppdu, unsigned long pdu_len, void *pbuffer, unsigned long buf_len) @@ -1144,9 +1147,10 @@ struct wrb_handle *alloc_wrb_handle(struct beiscsi_hba *phba, unsigned int cid) struct hwi_wrb_context *pwrb_context; struct hwi_controller *phwi_ctrlr; struct wrb_handle *pwrb_handle, *pwrb_handle_tmp; + uint16_t cri_index = BE_GET_CRI_FROM_CID(cid); phwi_ctrlr = phba->phwi_ctrlr; - pwrb_context = &phwi_ctrlr->wrb_context[cid]; + pwrb_context = &phwi_ctrlr->wrb_context[cri_index]; if (pwrb_context->wrb_handles_available >= 2) { pwrb_handle = pwrb_context->pwrb_handle_base[ pwrb_context->alloc_index]; @@ -1322,8 +1326,9 @@ be_complete_logout(struct beiscsi_conn *beiscsi_conn, hdr->t2retain = 0; hdr->flags = csol_cqe->i_flags; hdr->response = csol_cqe->i_resp; - hdr->exp_cmdsn = csol_cqe->exp_cmdsn; - hdr->max_cmdsn = (csol_cqe->exp_cmdsn + csol_cqe->cmd_wnd - 1); + hdr->exp_cmdsn = cpu_to_be32(csol_cqe->exp_cmdsn); + hdr->max_cmdsn = cpu_to_be32(csol_cqe->exp_cmdsn + + csol_cqe->cmd_wnd - 1); hdr->dlength[0] = 0; hdr->dlength[1] = 0; @@ -1346,9 +1351,9 @@ be_complete_tmf(struct beiscsi_conn *beiscsi_conn, hdr->opcode = ISCSI_OP_SCSI_TMFUNC_RSP; hdr->flags = csol_cqe->i_flags; hdr->response = csol_cqe->i_resp; - hdr->exp_cmdsn = csol_cqe->exp_cmdsn; - hdr->max_cmdsn = (csol_cqe->exp_cmdsn + - csol_cqe->cmd_wnd - 1); + hdr->exp_cmdsn = cpu_to_be32(csol_cqe->exp_cmdsn); + hdr->max_cmdsn = cpu_to_be32(csol_cqe->exp_cmdsn + + csol_cqe->cmd_wnd - 1); hdr->itt = io_task->libiscsi_itt; __iscsi_complete_pdu(conn, (struct iscsi_hdr *)hdr, NULL, 0); @@ -1363,35 +1368,29 @@ hwi_complete_drvr_msgs(struct beiscsi_conn *beiscsi_conn, struct hwi_controller *phwi_ctrlr; struct iscsi_task *task; struct beiscsi_io_task *io_task; - struct iscsi_conn *conn = beiscsi_conn->conn; - struct iscsi_session *session = conn->session; - uint16_t wrb_index, cid; + uint16_t wrb_index, cid, cri_index; phwi_ctrlr = phba->phwi_ctrlr; - if (chip_skh_r(phba->pcidev)) { - wrb_index = AMAP_GET_BITS(struct amap_it_dmsg_cqe_v2, + if (is_chip_be2_be3r(phba)) { + wrb_index = AMAP_GET_BITS(struct amap_it_dmsg_cqe, wrb_idx, psol); - cid = AMAP_GET_BITS(struct amap_it_dmsg_cqe_v2, + cid = AMAP_GET_BITS(struct amap_it_dmsg_cqe, cid, psol); } else { - wrb_index = AMAP_GET_BITS(struct amap_it_dmsg_cqe, + wrb_index = AMAP_GET_BITS(struct amap_it_dmsg_cqe_v2, wrb_idx, psol); - cid = AMAP_GET_BITS(struct amap_it_dmsg_cqe, + cid = AMAP_GET_BITS(struct amap_it_dmsg_cqe_v2, cid, psol); } - pwrb_context = &phwi_ctrlr->wrb_context[ - cid - phba->fw_config.iscsi_cid_start]; + cri_index = BE_GET_CRI_FROM_CID(cid); + pwrb_context = &phwi_ctrlr->wrb_context[cri_index]; pwrb_handle = pwrb_context->pwrb_handle_basestd[wrb_index]; task = pwrb_handle->pio_handle; io_task = task->dd_data; - spin_lock_bh(&phba->mgmt_sgl_lock); - free_mgmt_sgl_handle(phba, io_task->psgl_handle); - spin_unlock_bh(&phba->mgmt_sgl_lock); - spin_lock_bh(&session->lock); - free_wrb_handle(phba, pwrb_context, pwrb_handle); - spin_unlock_bh(&session->lock); + memset(io_task->pwrb_handle->pwrb, 0, sizeof(struct iscsi_wrb)); + iscsi_put_task(task); } static void @@ -1406,8 +1405,8 @@ be_complete_nopin_resp(struct beiscsi_conn *beiscsi_conn, hdr = (struct iscsi_nopin *)task->hdr; hdr->flags = csol_cqe->i_flags; hdr->exp_cmdsn = cpu_to_be32(csol_cqe->exp_cmdsn); - hdr->max_cmdsn = be32_to_cpu(hdr->exp_cmdsn + - csol_cqe->cmd_wnd - 1); + hdr->max_cmdsn = cpu_to_be32(csol_cqe->exp_cmdsn + + csol_cqe->cmd_wnd - 1); hdr->opcode = ISCSI_OP_NOOP_IN; hdr->itt = io_task->libiscsi_itt; @@ -1418,7 +1417,26 @@ static void adapter_get_sol_cqe(struct beiscsi_hba *phba, struct sol_cqe *psol, struct common_sol_cqe *csol_cqe) { - if (chip_skh_r(phba->pcidev)) { + if (is_chip_be2_be3r(phba)) { + csol_cqe->exp_cmdsn = AMAP_GET_BITS(struct amap_sol_cqe, + i_exp_cmd_sn, psol); + csol_cqe->res_cnt = AMAP_GET_BITS(struct amap_sol_cqe, + i_res_cnt, psol); + csol_cqe->cmd_wnd = AMAP_GET_BITS(struct amap_sol_cqe, + i_cmd_wnd, psol); + csol_cqe->wrb_index = AMAP_GET_BITS(struct amap_sol_cqe, + wrb_index, psol); + csol_cqe->cid = AMAP_GET_BITS(struct amap_sol_cqe, + cid, psol); + csol_cqe->hw_sts = AMAP_GET_BITS(struct amap_sol_cqe, + hw_sts, psol); + csol_cqe->i_resp = AMAP_GET_BITS(struct amap_sol_cqe, + i_resp, psol); + csol_cqe->i_sts = AMAP_GET_BITS(struct amap_sol_cqe, + i_sts, psol); + csol_cqe->i_flags = AMAP_GET_BITS(struct amap_sol_cqe, + i_flags, psol); + } else { csol_cqe->exp_cmdsn = AMAP_GET_BITS(struct amap_sol_cqe_v2, i_exp_cmd_sn, psol); csol_cqe->res_cnt = AMAP_GET_BITS(struct amap_sol_cqe_v2, @@ -1429,7 +1447,7 @@ static void adapter_get_sol_cqe(struct beiscsi_hba *phba, cid, psol); csol_cqe->hw_sts = AMAP_GET_BITS(struct amap_sol_cqe_v2, hw_sts, psol); - csol_cqe->cmd_wnd = AMAP_GET_BITS(struct amap_sol_cqe, + csol_cqe->cmd_wnd = AMAP_GET_BITS(struct amap_sol_cqe_v2, i_cmd_wnd, psol); if (AMAP_GET_BITS(struct amap_sol_cqe_v2, cmd_cmpl, psol)) @@ -1445,25 +1463,6 @@ static void adapter_get_sol_cqe(struct beiscsi_hba *phba, if (AMAP_GET_BITS(struct amap_sol_cqe_v2, o, psol)) csol_cqe->i_flags |= ISCSI_FLAG_CMD_OVERFLOW; - } else { - csol_cqe->exp_cmdsn = AMAP_GET_BITS(struct amap_sol_cqe, - i_exp_cmd_sn, psol); - csol_cqe->res_cnt = AMAP_GET_BITS(struct amap_sol_cqe, - i_res_cnt, psol); - csol_cqe->cmd_wnd = AMAP_GET_BITS(struct amap_sol_cqe, - i_cmd_wnd, psol); - csol_cqe->wrb_index = AMAP_GET_BITS(struct amap_sol_cqe, - wrb_index, psol); - csol_cqe->cid = AMAP_GET_BITS(struct amap_sol_cqe, - cid, psol); - csol_cqe->hw_sts = AMAP_GET_BITS(struct amap_sol_cqe, - hw_sts, psol); - csol_cqe->i_resp = AMAP_GET_BITS(struct amap_sol_cqe, - i_resp, psol); - csol_cqe->i_sts = AMAP_GET_BITS(struct amap_sol_cqe, - i_sts, psol); - csol_cqe->i_flags = AMAP_GET_BITS(struct amap_sol_cqe, - i_flags, psol); } } @@ -1480,14 +1479,15 @@ static void hwi_complete_cmd(struct beiscsi_conn *beiscsi_conn, struct iscsi_conn *conn = beiscsi_conn->conn; struct iscsi_session *session = conn->session; struct common_sol_cqe csol_cqe = {0}; + uint16_t cri_index = 0; phwi_ctrlr = phba->phwi_ctrlr; /* Copy the elements to a common structure */ adapter_get_sol_cqe(phba, psol, &csol_cqe); - pwrb_context = &phwi_ctrlr->wrb_context[ - csol_cqe.cid - phba->fw_config.iscsi_cid_start]; + cri_index = BE_GET_CRI_FROM_CID(csol_cqe.cid); + pwrb_context = &phwi_ctrlr->wrb_context[cri_index]; pwrb_handle = pwrb_context->pwrb_handle_basestd[ csol_cqe.wrb_index]; @@ -1561,15 +1561,15 @@ hwi_get_async_handle(struct beiscsi_hba *phba, unsigned char is_header = 0; unsigned int index, dpl; - if (chip_skh_r(phba->pcidev)) { - dpl = AMAP_GET_BITS(struct amap_i_t_dpdu_cqe_v2, + if (is_chip_be2_be3r(phba)) { + dpl = AMAP_GET_BITS(struct amap_i_t_dpdu_cqe, dpl, pdpdu_cqe); - index = AMAP_GET_BITS(struct amap_i_t_dpdu_cqe_v2, + index = AMAP_GET_BITS(struct amap_i_t_dpdu_cqe, index, pdpdu_cqe); } else { - dpl = AMAP_GET_BITS(struct amap_i_t_dpdu_cqe, + dpl = AMAP_GET_BITS(struct amap_i_t_dpdu_cqe_v2, dpl, pdpdu_cqe); - index = AMAP_GET_BITS(struct amap_i_t_dpdu_cqe, + index = AMAP_GET_BITS(struct amap_i_t_dpdu_cqe_v2, index, pdpdu_cqe); } @@ -1613,8 +1613,8 @@ hwi_get_async_handle(struct beiscsi_hba *phba, WARN_ON(!pasync_handle); - pasync_handle->cri = (unsigned short)beiscsi_conn->beiscsi_conn_cid - - phba->fw_config.iscsi_cid_start; + pasync_handle->cri = + BE_GET_CRI_FROM_CID(beiscsi_conn->beiscsi_conn_cid); pasync_handle->is_header = is_header; pasync_handle->buffer_len = dpl; *pcq_index = index; @@ -1856,8 +1856,6 @@ hwi_fwd_async_msg(struct beiscsi_conn *beiscsi_conn, } status = beiscsi_process_async_pdu(beiscsi_conn, phba, - (beiscsi_conn->beiscsi_conn_cid - - phba->fw_config.iscsi_cid_start), phdr, hdr_len, pfirst_buffer, offset); @@ -2011,6 +2009,7 @@ static unsigned int beiscsi_process_cq(struct be_eq_obj *pbe_eq) unsigned int num_processed = 0; unsigned int tot_nump = 0; unsigned short code = 0, cid = 0; + uint16_t cri_index = 0; struct beiscsi_conn *beiscsi_conn; struct beiscsi_endpoint *beiscsi_ep; struct iscsi_endpoint *ep; @@ -2028,7 +2027,9 @@ static unsigned int beiscsi_process_cq(struct be_eq_obj *pbe_eq) 32] & CQE_CODE_MASK); /* Get the CID */ - if (chip_skh_r(phba->pcidev)) { + if (is_chip_be2_be3r(phba)) { + cid = AMAP_GET_BITS(struct amap_sol_cqe, cid, sol); + } else { if ((code == DRIVERMSG_NOTIFY) || (code == UNSOL_HDR_NOTIFY) || (code == UNSOL_DATA_NOTIFY)) @@ -2038,10 +2039,10 @@ static unsigned int beiscsi_process_cq(struct be_eq_obj *pbe_eq) else cid = AMAP_GET_BITS(struct amap_sol_cqe_v2, cid, sol); - } else - cid = AMAP_GET_BITS(struct amap_sol_cqe, cid, sol); + } - ep = phba->ep_array[cid - phba->fw_config.iscsi_cid_start]; + cri_index = BE_GET_CRI_FROM_CID(cid); + ep = phba->ep_array[cri_index]; beiscsi_ep = ep->dd_data; beiscsi_conn = beiscsi_ep->conn; @@ -2191,7 +2192,7 @@ void beiscsi_process_all_cqs(struct work_struct *work) static int be_iopoll(struct blk_iopoll *iop, int budget) { - static unsigned int ret; + unsigned int ret; struct beiscsi_hba *phba; struct be_eq_obj *pbe_eq; @@ -2416,11 +2417,11 @@ static void hwi_write_buffer(struct iscsi_wrb *pwrb, struct iscsi_task *task) /* Check for the data_count */ dsp_value = (task->data_count) ? 1 : 0; - if (chip_skh_r(phba->pcidev)) - AMAP_SET_BITS(struct amap_iscsi_wrb_v2, dsp, + if (is_chip_be2_be3r(phba)) + AMAP_SET_BITS(struct amap_iscsi_wrb, dsp, pwrb, dsp_value); else - AMAP_SET_BITS(struct amap_iscsi_wrb, dsp, + AMAP_SET_BITS(struct amap_iscsi_wrb_v2, dsp, pwrb, dsp_value); /* Map addr only if there is data_count */ @@ -2538,8 +2539,9 @@ static void beiscsi_find_mem_req(struct beiscsi_hba *phba) static int beiscsi_alloc_mem(struct beiscsi_hba *phba) { - struct be_mem_descriptor *mem_descr; dma_addr_t bus_add; + struct hwi_controller *phwi_ctrlr; + struct be_mem_descriptor *mem_descr; struct mem_array *mem_arr, *mem_arr_orig; unsigned int i, j, alloc_size, curr_alloc_size; @@ -2547,9 +2549,18 @@ static int beiscsi_alloc_mem(struct beiscsi_hba *phba) if (!phba->phwi_ctrlr) return -ENOMEM; + /* Allocate memory for wrb_context */ + phwi_ctrlr = phba->phwi_ctrlr; + phwi_ctrlr->wrb_context = kzalloc(sizeof(struct hwi_wrb_context) * + phba->params.cxns_per_ctrl, + GFP_KERNEL); + if (!phwi_ctrlr->wrb_context) + return -ENOMEM; + phba->init_mem = kcalloc(SE_MEM_MAX, sizeof(*mem_descr), GFP_KERNEL); if (!phba->init_mem) { + kfree(phwi_ctrlr->wrb_context); kfree(phba->phwi_ctrlr); return -ENOMEM; } @@ -2558,6 +2569,7 @@ static int beiscsi_alloc_mem(struct beiscsi_hba *phba) GFP_KERNEL); if (!mem_arr_orig) { kfree(phba->init_mem); + kfree(phwi_ctrlr->wrb_context); kfree(phba->phwi_ctrlr); return -ENOMEM; } @@ -2628,6 +2640,7 @@ static int beiscsi_alloc_mem(struct beiscsi_hba *phba) } kfree(mem_arr_orig); kfree(phba->init_mem); + kfree(phba->phwi_ctrlr->wrb_context); kfree(phba->phwi_ctrlr); return -ENOMEM; } @@ -2666,6 +2679,7 @@ static void iscsi_init_global_templates(struct beiscsi_hba *phba) static int beiscsi_init_wrb_handle(struct beiscsi_hba *phba) { struct be_mem_descriptor *mem_descr_wrbh, *mem_descr_wrb; + struct hwi_context_memory *phwi_ctxt; struct wrb_handle *pwrb_handle = NULL; struct hwi_controller *phwi_ctrlr; struct hwi_wrb_context *pwrb_context; @@ -2680,7 +2694,18 @@ static int beiscsi_init_wrb_handle(struct beiscsi_hba *phba) mem_descr_wrb += HWI_MEM_WRB; phwi_ctrlr = phba->phwi_ctrlr; - for (index = 0; index < phba->params.cxns_per_ctrl * 2; index += 2) { + /* Allocate memory for WRBQ */ + phwi_ctxt = phwi_ctrlr->phwi_ctxt; + phwi_ctxt->be_wrbq = kzalloc(sizeof(struct be_queue_info) * + phba->fw_config.iscsi_cid_count, + GFP_KERNEL); + if (!phwi_ctxt->be_wrbq) { + beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, + "BM_%d : WRBQ Mem Alloc Failed\n"); + return -ENOMEM; + } + + for (index = 0; index < phba->params.cxns_per_ctrl; index++) { pwrb_context = &phwi_ctrlr->wrb_context[index]; pwrb_context->pwrb_handle_base = kzalloc(sizeof(struct wrb_handle *) * @@ -2723,7 +2748,7 @@ static int beiscsi_init_wrb_handle(struct beiscsi_hba *phba) } } idx = 0; - for (index = 0; index < phba->params.cxns_per_ctrl * 2; index += 2) { + for (index = 0; index < phba->params.cxns_per_ctrl; index++) { pwrb_context = &phwi_ctrlr->wrb_context[index]; if (!num_cxn_wrb) { pwrb = mem_descr_wrb->mem_array[idx].virtual_address; @@ -2752,7 +2777,7 @@ static int beiscsi_init_wrb_handle(struct beiscsi_hba *phba) return -ENOMEM; } -static void hwi_init_async_pdu_ctx(struct beiscsi_hba *phba) +static int hwi_init_async_pdu_ctx(struct beiscsi_hba *phba) { struct hwi_controller *phwi_ctrlr; struct hba_parameters *p = &phba->params; @@ -2770,6 +2795,15 @@ static void hwi_init_async_pdu_ctx(struct beiscsi_hba *phba) pasync_ctx = phwi_ctrlr->phwi_ctxt->pasync_ctx; memset(pasync_ctx, 0, sizeof(*pasync_ctx)); + pasync_ctx->async_entry = kzalloc(sizeof(struct hwi_async_entry) * + phba->fw_config.iscsi_cid_count, + GFP_KERNEL); + if (!pasync_ctx->async_entry) { + beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, + "BM_%d : hwi_init_async_pdu_ctx Mem Alloc Failed\n"); + return -ENOMEM; + } + pasync_ctx->num_entries = p->asyncpdus_per_ctrl; pasync_ctx->buffer_size = p->defpdu_hdr_sz; @@ -2934,6 +2968,8 @@ static void hwi_init_async_pdu_ctx(struct beiscsi_hba *phba) pasync_ctx->async_header.ep_read_ptr = -1; pasync_ctx->async_data.host_write_ptr = 0; pasync_ctx->async_data.ep_read_ptr = -1; + + return 0; } static int @@ -3293,6 +3329,7 @@ beiscsi_create_wrb_rings(struct beiscsi_hba *phba, void *wrb_vaddr; struct be_dma_mem sgl; struct be_mem_descriptor *mem_descr; + struct hwi_wrb_context *pwrb_context; int status; idx = 0; @@ -3351,8 +3388,9 @@ beiscsi_create_wrb_rings(struct beiscsi_hba *phba, kfree(pwrb_arr); return status; } - phwi_ctrlr->wrb_context[i * 2].cid = phwi_context->be_wrbq[i]. - id; + pwrb_context = &phwi_ctrlr->wrb_context[i]; + pwrb_context->cid = phwi_context->be_wrbq[i].id; + BE_SET_CID_TO_CRI(i, pwrb_context->cid); } kfree(pwrb_arr); return 0; @@ -3365,7 +3403,7 @@ static void free_wrb_handles(struct beiscsi_hba *phba) struct hwi_wrb_context *pwrb_context; phwi_ctrlr = phba->phwi_ctrlr; - for (index = 0; index < phba->params.cxns_per_ctrl * 2; index += 2) { + for (index = 0; index < phba->params.cxns_per_ctrl; index++) { pwrb_context = &phwi_ctrlr->wrb_context[index]; kfree(pwrb_context->pwrb_handle_base); kfree(pwrb_context->pwrb_handle_basestd); @@ -3394,6 +3432,7 @@ static void hwi_cleanup(struct beiscsi_hba *phba) struct be_ctrl_info *ctrl = &phba->ctrl; struct hwi_controller *phwi_ctrlr; struct hwi_context_memory *phwi_context; + struct hwi_async_pdu_context *pasync_ctx; int i, eq_num; phwi_ctrlr = phba->phwi_ctrlr; @@ -3403,6 +3442,7 @@ static void hwi_cleanup(struct beiscsi_hba *phba) if (q->created) beiscsi_cmd_q_destroy(ctrl, q, QTYPE_WRBQ); } + kfree(phwi_context->be_wrbq); free_wrb_handles(phba); q = &phwi_context->be_def_hdrq; @@ -3430,6 +3470,10 @@ static void hwi_cleanup(struct beiscsi_hba *phba) beiscsi_cmd_q_destroy(ctrl, q, QTYPE_EQ); } be_mcc_queues_destroy(phba); + + pasync_ctx = phwi_ctrlr->phwi_ctxt->pasync_ctx; + kfree(pasync_ctx->async_entry); + be_cmd_fw_uninit(ctrl); } static int be_mcc_queues_create(struct beiscsi_hba *phba, @@ -3607,7 +3651,12 @@ static int hwi_init_controller(struct beiscsi_hba *phba) if (beiscsi_init_wrb_handle(phba)) return -ENOMEM; - hwi_init_async_pdu_ctx(phba); + if (hwi_init_async_pdu_ctx(phba)) { + beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, + "BM_%d : hwi_init_async_pdu_ctx failed\n"); + return -ENOMEM; + } + if (hwi_init_port(phba) != 0) { beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, "BM_%d : hwi_init_controller failed\n"); @@ -3637,6 +3686,7 @@ static void beiscsi_free_mem(struct beiscsi_hba *phba) mem_descr++; } kfree(phba->init_mem); + kfree(phba->phwi_ctrlr->wrb_context); kfree(phba->phwi_ctrlr); } @@ -3769,7 +3819,7 @@ static int beiscsi_init_sgl_handle(struct beiscsi_hba *phba) static int hba_setup_cid_tbls(struct beiscsi_hba *phba) { - int i, new_cid; + int i; phba->cid_array = kzalloc(sizeof(void *) * phba->params.cxns_per_ctrl, GFP_KERNEL); @@ -3780,19 +3830,33 @@ static int hba_setup_cid_tbls(struct beiscsi_hba *phba) return -ENOMEM; } phba->ep_array = kzalloc(sizeof(struct iscsi_endpoint *) * - phba->params.cxns_per_ctrl * 2, GFP_KERNEL); + phba->params.cxns_per_ctrl, GFP_KERNEL); if (!phba->ep_array) { beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, "BM_%d : Failed to allocate memory in " "hba_setup_cid_tbls\n"); kfree(phba->cid_array); + phba->cid_array = NULL; return -ENOMEM; } - new_cid = phba->fw_config.iscsi_cid_start; - for (i = 0; i < phba->params.cxns_per_ctrl; i++) { - phba->cid_array[i] = new_cid; - new_cid += 2; + + phba->conn_table = kzalloc(sizeof(struct beiscsi_conn *) * + phba->params.cxns_per_ctrl, GFP_KERNEL); + if (!phba->conn_table) { + beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, + "BM_%d : Failed to allocate memory in" + "hba_setup_cid_tbls\n"); + + kfree(phba->cid_array); + kfree(phba->ep_array); + phba->cid_array = NULL; + phba->ep_array = NULL; + return -ENOMEM; } + + for (i = 0; i < phba->params.cxns_per_ctrl; i++) + phba->cid_array[i] = phba->phwi_ctrlr->wrb_context[i].cid; + phba->avlbl_cids = phba->params.cxns_per_ctrl; return 0; } @@ -4062,6 +4126,53 @@ static void beiscsi_clean_port(struct beiscsi_hba *phba) kfree(phba->eh_sgl_hndl_base); kfree(phba->cid_array); kfree(phba->ep_array); + kfree(phba->conn_table); +} + +/** + * beiscsi_free_mgmt_task_handles()- Free driver CXN resources + * @beiscsi_conn: ptr to the conn to be cleaned up + * @task: ptr to iscsi_task resource to be freed. + * + * Free driver mgmt resources binded to CXN. + **/ +void +beiscsi_free_mgmt_task_handles(struct beiscsi_conn *beiscsi_conn, + struct iscsi_task *task) +{ + struct beiscsi_io_task *io_task; + struct beiscsi_hba *phba = beiscsi_conn->phba; + struct hwi_wrb_context *pwrb_context; + struct hwi_controller *phwi_ctrlr; + uint16_t cri_index = BE_GET_CRI_FROM_CID( + beiscsi_conn->beiscsi_conn_cid); + + phwi_ctrlr = phba->phwi_ctrlr; + pwrb_context = &phwi_ctrlr->wrb_context[cri_index]; + + io_task = task->dd_data; + + if (io_task->pwrb_handle) { + memset(io_task->pwrb_handle->pwrb, 0, + sizeof(struct iscsi_wrb)); + free_wrb_handle(phba, pwrb_context, + io_task->pwrb_handle); + io_task->pwrb_handle = NULL; + } + + if (io_task->psgl_handle) { + spin_lock_bh(&phba->mgmt_sgl_lock); + free_mgmt_sgl_handle(phba, + io_task->psgl_handle); + io_task->psgl_handle = NULL; + spin_unlock_bh(&phba->mgmt_sgl_lock); + } + + if (io_task->mtask_addr) + pci_unmap_single(phba->pcidev, + io_task->mtask_addr, + io_task->mtask_data_count, + PCI_DMA_TODEVICE); } /** @@ -4078,10 +4189,11 @@ static void beiscsi_cleanup_task(struct iscsi_task *task) struct beiscsi_session *beiscsi_sess = beiscsi_conn->beiscsi_sess; struct hwi_wrb_context *pwrb_context; struct hwi_controller *phwi_ctrlr; + uint16_t cri_index = BE_GET_CRI_FROM_CID( + beiscsi_conn->beiscsi_conn_cid); phwi_ctrlr = phba->phwi_ctrlr; - pwrb_context = &phwi_ctrlr->wrb_context[beiscsi_conn->beiscsi_conn_cid - - phba->fw_config.iscsi_cid_start]; + pwrb_context = &phwi_ctrlr->wrb_context[cri_index]; if (io_task->cmd_bhs) { pci_pool_free(beiscsi_sess->bhs_pool, io_task->cmd_bhs, @@ -4103,27 +4215,8 @@ static void beiscsi_cleanup_task(struct iscsi_task *task) io_task->psgl_handle = NULL; } } else { - if (!beiscsi_conn->login_in_progress) { - if (io_task->pwrb_handle) { - free_wrb_handle(phba, pwrb_context, - io_task->pwrb_handle); - io_task->pwrb_handle = NULL; - } - if (io_task->psgl_handle) { - spin_lock(&phba->mgmt_sgl_lock); - free_mgmt_sgl_handle(phba, - io_task->psgl_handle); - spin_unlock(&phba->mgmt_sgl_lock); - io_task->psgl_handle = NULL; - } - if (io_task->mtask_addr) { - pci_unmap_single(phba->pcidev, - io_task->mtask_addr, - io_task->mtask_data_count, - PCI_DMA_TODEVICE); - io_task->mtask_addr = 0; - } - } + if (!beiscsi_conn->login_in_progress) + beiscsi_free_mgmt_task_handles(beiscsi_conn, task); } } @@ -4146,15 +4239,14 @@ beiscsi_offload_connection(struct beiscsi_conn *beiscsi_conn, beiscsi_cleanup_task(task); spin_unlock_bh(&session->lock); - pwrb_handle = alloc_wrb_handle(phba, (beiscsi_conn->beiscsi_conn_cid - - phba->fw_config.iscsi_cid_start)); + pwrb_handle = alloc_wrb_handle(phba, beiscsi_conn->beiscsi_conn_cid); /* Check for the adapter family */ - if (chip_skh_r(phba->pcidev)) - beiscsi_offload_cxn_v2(params, pwrb_handle); - else + if (is_chip_be2_be3r(phba)) beiscsi_offload_cxn_v0(params, pwrb_handle, phba->init_mem); + else + beiscsi_offload_cxn_v2(params, pwrb_handle); be_dws_le_to_cpu(pwrb_handle->pwrb, sizeof(struct iscsi_target_context_update_wrb)); @@ -4194,6 +4286,7 @@ static int beiscsi_alloc_pdu(struct iscsi_task *task, uint8_t opcode) struct hwi_wrb_context *pwrb_context; struct hwi_controller *phwi_ctrlr; itt_t itt; + uint16_t cri_index = 0; struct beiscsi_session *beiscsi_sess = beiscsi_conn->beiscsi_sess; dma_addr_t paddr; @@ -4223,8 +4316,7 @@ static int beiscsi_alloc_pdu(struct iscsi_task *task, uint8_t opcode) goto free_hndls; } io_task->pwrb_handle = alloc_wrb_handle(phba, - beiscsi_conn->beiscsi_conn_cid - - phba->fw_config.iscsi_cid_start); + beiscsi_conn->beiscsi_conn_cid); if (!io_task->pwrb_handle) { beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_IO | BEISCSI_LOG_CONFIG, @@ -4236,6 +4328,7 @@ static int beiscsi_alloc_pdu(struct iscsi_task *task, uint8_t opcode) } else { io_task->scsi_cmnd = NULL; if ((opcode & ISCSI_OPCODE_MASK) == ISCSI_OP_LOGIN) { + beiscsi_conn->task = task; if (!beiscsi_conn->login_in_progress) { spin_lock(&phba->mgmt_sgl_lock); io_task->psgl_handle = (struct sgl_handle *) @@ -4257,8 +4350,7 @@ static int beiscsi_alloc_pdu(struct iscsi_task *task, uint8_t opcode) io_task->psgl_handle; io_task->pwrb_handle = alloc_wrb_handle(phba, - beiscsi_conn->beiscsi_conn_cid - - phba->fw_config.iscsi_cid_start); + beiscsi_conn->beiscsi_conn_cid); if (!io_task->pwrb_handle) { beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_IO | @@ -4278,7 +4370,6 @@ static int beiscsi_alloc_pdu(struct iscsi_task *task, uint8_t opcode) io_task->pwrb_handle = beiscsi_conn->plogin_wrb_handle; } - beiscsi_conn->task = task; } else { spin_lock(&phba->mgmt_sgl_lock); io_task->psgl_handle = alloc_mgmt_sgl_handle(phba); @@ -4295,8 +4386,7 @@ static int beiscsi_alloc_pdu(struct iscsi_task *task, uint8_t opcode) } io_task->pwrb_handle = alloc_wrb_handle(phba, - beiscsi_conn->beiscsi_conn_cid - - phba->fw_config.iscsi_cid_start); + beiscsi_conn->beiscsi_conn_cid); if (!io_task->pwrb_handle) { beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_IO | BEISCSI_LOG_CONFIG, @@ -4324,12 +4414,13 @@ static int beiscsi_alloc_pdu(struct iscsi_task *task, uint8_t opcode) free_mgmt_hndls: spin_lock(&phba->mgmt_sgl_lock); free_mgmt_sgl_handle(phba, io_task->psgl_handle); + io_task->psgl_handle = NULL; spin_unlock(&phba->mgmt_sgl_lock); free_hndls: phwi_ctrlr = phba->phwi_ctrlr; - pwrb_context = &phwi_ctrlr->wrb_context[ - beiscsi_conn->beiscsi_conn_cid - - phba->fw_config.iscsi_cid_start]; + cri_index = BE_GET_CRI_FROM_CID( + beiscsi_conn->beiscsi_conn_cid); + pwrb_context = &phwi_ctrlr->wrb_context[cri_index]; if (io_task->pwrb_handle) free_wrb_handle(phba, pwrb_context, io_task->pwrb_handle); io_task->pwrb_handle = NULL; @@ -4351,7 +4442,6 @@ int beiscsi_iotask_v2(struct iscsi_task *task, struct scatterlist *sg, unsigned int doorbell = 0; pwrb = io_task->pwrb_handle->pwrb; - memset(pwrb, 0, sizeof(*pwrb)); io_task->cmd_bhs->iscsi_hdr.exp_statsn = 0; io_task->bhs_len = sizeof(struct be_cmd_bhs); @@ -4465,19 +4555,7 @@ static int beiscsi_mtask(struct iscsi_task *task) pwrb = io_task->pwrb_handle->pwrb; memset(pwrb, 0, sizeof(*pwrb)); - if (chip_skh_r(phba->pcidev)) { - AMAP_SET_BITS(struct amap_iscsi_wrb_v2, cmdsn_itt, pwrb, - be32_to_cpu(task->cmdsn)); - AMAP_SET_BITS(struct amap_iscsi_wrb_v2, wrb_idx, pwrb, - io_task->pwrb_handle->wrb_index); - AMAP_SET_BITS(struct amap_iscsi_wrb_v2, sgl_idx, pwrb, - io_task->psgl_handle->sgl_index); - AMAP_SET_BITS(struct amap_iscsi_wrb_v2, r2t_exp_dtl, pwrb, - task->data_count); - AMAP_SET_BITS(struct amap_iscsi_wrb_v2, ptr2nextwrb, pwrb, - io_task->pwrb_handle->nxt_wrb_index); - pwrb_typeoffset = SKH_WRB_TYPE_OFFSET; - } else { + if (is_chip_be2_be3r(phba)) { AMAP_SET_BITS(struct amap_iscsi_wrb, cmdsn_itt, pwrb, be32_to_cpu(task->cmdsn)); AMAP_SET_BITS(struct amap_iscsi_wrb, wrb_idx, pwrb, @@ -4489,6 +4567,18 @@ static int beiscsi_mtask(struct iscsi_task *task) AMAP_SET_BITS(struct amap_iscsi_wrb, ptr2nextwrb, pwrb, io_task->pwrb_handle->nxt_wrb_index); pwrb_typeoffset = BE_WRB_TYPE_OFFSET; + } else { + AMAP_SET_BITS(struct amap_iscsi_wrb_v2, cmdsn_itt, pwrb, + be32_to_cpu(task->cmdsn)); + AMAP_SET_BITS(struct amap_iscsi_wrb_v2, wrb_idx, pwrb, + io_task->pwrb_handle->wrb_index); + AMAP_SET_BITS(struct amap_iscsi_wrb_v2, sgl_idx, pwrb, + io_task->psgl_handle->sgl_index); + AMAP_SET_BITS(struct amap_iscsi_wrb_v2, r2t_exp_dtl, pwrb, + task->data_count); + AMAP_SET_BITS(struct amap_iscsi_wrb_v2, ptr2nextwrb, pwrb, + io_task->pwrb_handle->nxt_wrb_index); + pwrb_typeoffset = SKH_WRB_TYPE_OFFSET; } @@ -4501,19 +4591,19 @@ static int beiscsi_mtask(struct iscsi_task *task) case ISCSI_OP_NOOP_OUT: if (task->hdr->ttt != ISCSI_RESERVED_TAG) { ADAPTER_SET_WRB_TYPE(pwrb, TGT_DM_CMD, pwrb_typeoffset); - if (chip_skh_r(phba->pcidev)) - AMAP_SET_BITS(struct amap_iscsi_wrb_v2, + if (is_chip_be2_be3r(phba)) + AMAP_SET_BITS(struct amap_iscsi_wrb, dmsg, pwrb, 1); else - AMAP_SET_BITS(struct amap_iscsi_wrb, + AMAP_SET_BITS(struct amap_iscsi_wrb_v2, dmsg, pwrb, 1); } else { ADAPTER_SET_WRB_TYPE(pwrb, INI_RD_CMD, pwrb_typeoffset); - if (chip_skh_r(phba->pcidev)) - AMAP_SET_BITS(struct amap_iscsi_wrb_v2, + if (is_chip_be2_be3r(phba)) + AMAP_SET_BITS(struct amap_iscsi_wrb, dmsg, pwrb, 0); else - AMAP_SET_BITS(struct amap_iscsi_wrb, + AMAP_SET_BITS(struct amap_iscsi_wrb_v2, dmsg, pwrb, 0); } hwi_write_buffer(pwrb, task); @@ -4540,9 +4630,9 @@ static int beiscsi_mtask(struct iscsi_task *task) } /* Set the task type */ - io_task->wrb_type = (chip_skh_r(phba->pcidev)) ? - AMAP_GET_BITS(struct amap_iscsi_wrb_v2, type, pwrb) : - AMAP_GET_BITS(struct amap_iscsi_wrb, type, pwrb); + io_task->wrb_type = (is_chip_be2_be3r(phba)) ? + AMAP_GET_BITS(struct amap_iscsi_wrb, type, pwrb) : + AMAP_GET_BITS(struct amap_iscsi_wrb_v2, type, pwrb); doorbell |= cid & DB_WRB_POST_CID_MASK; doorbell |= (io_task->pwrb_handle->wrb_index & @@ -4834,6 +4924,7 @@ static int beiscsi_dev_probe(struct pci_dev *pcidev, case OC_SKH_ID1: phba->generation = BE_GEN4; phba->iotask_fn = beiscsi_iotask_v2; + break; default: phba->generation = 0; } diff --git a/trunk/drivers/scsi/be2iscsi/be_main.h b/trunk/drivers/scsi/be2iscsi/be_main.h index 5946577d79d6..2c06ef3c02ac 100644 --- a/trunk/drivers/scsi/be2iscsi/be_main.h +++ b/trunk/drivers/scsi/be2iscsi/be_main.h @@ -1,5 +1,5 @@ /** - * Copyright (C) 2005 - 2012 Emulex + * Copyright (C) 2005 - 2013 Emulex * All rights reserved. * * This program is free software; you can redistribute it and/or @@ -36,7 +36,7 @@ #include "be.h" #define DRV_NAME "be2iscsi" -#define BUILD_STR "10.0.272.0" +#define BUILD_STR "10.0.467.0" #define BE_NAME "Emulex OneConnect" \ "Open-iSCSI Driver version" BUILD_STR #define DRV_DESC BE_NAME " " "Driver" @@ -66,8 +66,9 @@ #define MAX_CPUS 64 #define BEISCSI_MAX_NUM_CPUS 7 -#define OC_SKH_MAX_NUM_CPUS 63 +#define OC_SKH_MAX_NUM_CPUS 31 +#define BEISCSI_VER_STRLEN 32 #define BEISCSI_SGLIST_ELEMENTS 30 @@ -265,7 +266,9 @@ struct invalidate_command_table { unsigned short cid; } __packed; -#define chip_skh_r(pdev) (pdev->device == OC_SKH_ID1) +#define chip_be2(phba) (phba->generation == BE_GEN2) +#define chip_be3_r(phba) (phba->generation == BE_GEN3) +#define is_chip_be2_be3r(phba) (chip_be3_r(phba) || (chip_be2(phba))) struct beiscsi_hba { struct hba_parameters params; struct hwi_controller *phwi_ctrlr; @@ -304,10 +307,15 @@ struct beiscsi_hba { unsigned short avlbl_cids; unsigned short cid_alloc; unsigned short cid_free; - struct beiscsi_conn *conn_table[BE2_MAX_SESSIONS * 2]; struct list_head hba_queue; +#define BE_MAX_SESSION 2048 +#define BE_SET_CID_TO_CRI(cri_index, cid) \ + (phba->cid_to_cri_map[cid] = cri_index) +#define BE_GET_CRI_FROM_CID(cid) (phba->cid_to_cri_map[cid]) + unsigned short cid_to_cri_map[BE_MAX_SESSION]; unsigned short *cid_array; struct iscsi_endpoint **ep_array; + struct beiscsi_conn **conn_table; struct iscsi_boot_kset *boot_kset; struct Scsi_Host *shost; struct iscsi_iface *ipv4_iface; @@ -339,6 +347,7 @@ struct beiscsi_hba { struct delayed_work beiscsi_hw_check_task; u8 mac_address[ETH_ALEN]; + char fw_ver_str[BEISCSI_VER_STRLEN]; char wq_name[20]; struct workqueue_struct *wq; /* The actuak work queue */ struct be_ctrl_info ctrl; @@ -563,7 +572,7 @@ struct hwi_async_pdu_context { * This is a varying size list! Do not add anything * after this entry!! */ - struct hwi_async_entry async_entry[BE2_MAX_SESSIONS * 2]; + struct hwi_async_entry *async_entry; }; #define PDUCQE_CODE_MASK 0x0000003F @@ -749,6 +758,8 @@ void free_mgmt_sgl_handle(struct beiscsi_hba *phba, struct sgl_handle *psgl_handle); void beiscsi_process_all_cqs(struct work_struct *work); +void beiscsi_free_mgmt_task_handles(struct beiscsi_conn *beiscsi_conn, + struct iscsi_task *task); static inline bool beiscsi_error(struct beiscsi_hba *phba) { @@ -933,7 +944,7 @@ struct hwi_controller { struct sgl_handle *psgl_handle_base; unsigned int wrb_mem_index; - struct hwi_wrb_context wrb_context[BE2_MAX_SESSIONS * 2]; + struct hwi_wrb_context *wrb_context; struct mcc_wrb *pmcc_wrb_base; struct be_ring default_pdu_hdr; struct be_ring default_pdu_data; @@ -970,9 +981,7 @@ struct hwi_context_memory { struct be_queue_info be_def_hdrq; struct be_queue_info be_def_dataq; - struct be_queue_info be_wrbq[BE2_MAX_SESSIONS]; - struct be_mcc_wrb_context *pbe_mcc_context; - + struct be_queue_info *be_wrbq; struct hwi_async_pdu_context *pasync_ctx; }; diff --git a/trunk/drivers/scsi/be2iscsi/be_mgmt.c b/trunk/drivers/scsi/be2iscsi/be_mgmt.c index 55cc9902263d..245a9595a93a 100644 --- a/trunk/drivers/scsi/be2iscsi/be_mgmt.c +++ b/trunk/drivers/scsi/be2iscsi/be_mgmt.c @@ -1,5 +1,5 @@ /** - * Copyright (C) 2005 - 2012 Emulex + * Copyright (C) 2005 - 2013 Emulex * All rights reserved. * * This program is free software; you can redistribute it and/or @@ -368,6 +368,8 @@ int mgmt_check_supported_fw(struct be_ctrl_info *ctrl, beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT, "BM_%d : phba->fw_config.iscsi_features = %d\n", phba->fw_config.iscsi_features); + memcpy(phba->fw_ver_str, resp->params.hba_attribs. + firmware_version_string, BEISCSI_VER_STRLEN); } else beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, "BG_%d : Failed in mgmt_check_supported_fw\n"); @@ -1259,6 +1261,45 @@ beiscsi_drvr_ver_disp(struct device *dev, struct device_attribute *attr, return snprintf(buf, PAGE_SIZE, BE_NAME "\n"); } +/** + * beiscsi_fw_ver_disp()- Display Firmware Version + * @dev: ptr to device not used. + * @attr: device attribute, not used. + * @buf: contains formatted text Firmware version + * + * return + * size of the formatted string + **/ +ssize_t +beiscsi_fw_ver_disp(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct Scsi_Host *shost = class_to_shost(dev); + struct beiscsi_hba *phba = iscsi_host_priv(shost); + + return snprintf(buf, PAGE_SIZE, "%s\n", phba->fw_ver_str); +} + +/** + * beiscsi_active_cid_disp()- Display Sessions Active + * @dev: ptr to device not used. + * @attr: device attribute, not used. + * @buf: contains formatted text Session Count + * + * return + * size of the formatted string + **/ +ssize_t +beiscsi_active_cid_disp(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct Scsi_Host *shost = class_to_shost(dev); + struct beiscsi_hba *phba = iscsi_host_priv(shost); + + return snprintf(buf, PAGE_SIZE, "%d\n", + (phba->params.cxns_per_ctrl - phba->avlbl_cids)); +} + /** * beiscsi_adap_family_disp()- Display adapter family. * @dev: ptr to device to get priv structure diff --git a/trunk/drivers/scsi/be2iscsi/be_mgmt.h b/trunk/drivers/scsi/be2iscsi/be_mgmt.h index 2e4968add799..04af7e74fe48 100644 --- a/trunk/drivers/scsi/be2iscsi/be_mgmt.h +++ b/trunk/drivers/scsi/be2iscsi/be_mgmt.h @@ -1,5 +1,5 @@ /** - * Copyright (C) 2005 - 2012 Emulex + * Copyright (C) 2005 - 2013 Emulex * All rights reserved. * * This program is free software; you can redistribute it and/or @@ -156,25 +156,25 @@ union invalidate_commands_params { } __packed; struct mgmt_hba_attributes { - u8 flashrom_version_string[32]; - u8 manufacturer_name[32]; + u8 flashrom_version_string[BEISCSI_VER_STRLEN]; + u8 manufacturer_name[BEISCSI_VER_STRLEN]; u32 supported_modes; u8 seeprom_version_lo; u8 seeprom_version_hi; u8 rsvd0[2]; u32 fw_cmd_data_struct_version; u32 ep_fw_data_struct_version; - u32 future_reserved[12]; + u8 ncsi_version_string[12]; u32 default_extended_timeout; - u8 controller_model_number[32]; + u8 controller_model_number[BEISCSI_VER_STRLEN]; u8 controller_description[64]; - u8 controller_serial_number[32]; - u8 ip_version_string[32]; - u8 firmware_version_string[32]; - u8 bios_version_string[32]; - u8 redboot_version_string[32]; - u8 driver_version_string[32]; - u8 fw_on_flash_version_string[32]; + u8 controller_serial_number[BEISCSI_VER_STRLEN]; + u8 ip_version_string[BEISCSI_VER_STRLEN]; + u8 firmware_version_string[BEISCSI_VER_STRLEN]; + u8 bios_version_string[BEISCSI_VER_STRLEN]; + u8 redboot_version_string[BEISCSI_VER_STRLEN]; + u8 driver_version_string[BEISCSI_VER_STRLEN]; + u8 fw_on_flash_version_string[BEISCSI_VER_STRLEN]; u32 functionalities_supported; u16 max_cdblength; u8 asic_revision; @@ -190,7 +190,8 @@ struct mgmt_hba_attributes { u32 firmware_post_status; u32 hba_mtu[8]; u8 iscsi_features; - u8 future_u8[3]; + u8 asic_generation; + u8 future_u8[2]; u32 future_u32[3]; } __packed; @@ -207,7 +208,7 @@ struct mgmt_controller_attributes { u64 unique_identifier; u8 netfilters; u8 rsvd0[3]; - u8 future_u32[4]; + u32 future_u32[4]; } __packed; struct be_mgmt_controller_attributes { @@ -311,6 +312,12 @@ int mgmt_set_vlan(struct beiscsi_hba *phba, uint16_t vlan_tag); ssize_t beiscsi_drvr_ver_disp(struct device *dev, struct device_attribute *attr, char *buf); +ssize_t beiscsi_fw_ver_disp(struct device *dev, + struct device_attribute *attr, char *buf); + +ssize_t beiscsi_active_cid_disp(struct device *dev, + struct device_attribute *attr, char *buf); + ssize_t beiscsi_adap_family_disp(struct device *dev, struct device_attribute *attr, char *buf); diff --git a/trunk/drivers/scsi/bnx2fc/bnx2fc.h b/trunk/drivers/scsi/bnx2fc/bnx2fc.h index 11596b2c4702..08b22a901c25 100644 --- a/trunk/drivers/scsi/bnx2fc/bnx2fc.h +++ b/trunk/drivers/scsi/bnx2fc/bnx2fc.h @@ -2,7 +2,7 @@ #define _BNX2FC_H_ /* bnx2fc.h: Broadcom NetXtreme II Linux FCoE offload driver. * - * Copyright (c) 2008 - 2011 Broadcom Corporation + * Copyright (c) 2008 - 2013 Broadcom Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by @@ -64,10 +64,12 @@ #include "bnx2fc_constants.h" #define BNX2FC_NAME "bnx2fc" -#define BNX2FC_VERSION "1.0.13" +#define BNX2FC_VERSION "1.0.14" #define PFX "bnx2fc: " +#define BCM_CHIP_LEN 16 + #define BNX2X_DOORBELL_PCI_BAR 2 #define BNX2FC_MAX_BD_LEN 0xffff @@ -241,6 +243,8 @@ struct bnx2fc_hba { int wait_for_link_down; int num_ofld_sess; struct list_head vports; + + char chip_num[BCM_CHIP_LEN]; }; struct bnx2fc_interface { diff --git a/trunk/drivers/scsi/bnx2fc/bnx2fc_els.c b/trunk/drivers/scsi/bnx2fc/bnx2fc_els.c index bdbbb13b8534..b1c9a4f8caee 100644 --- a/trunk/drivers/scsi/bnx2fc/bnx2fc_els.c +++ b/trunk/drivers/scsi/bnx2fc/bnx2fc_els.c @@ -3,7 +3,7 @@ * This file contains helper routines that handle ELS requests * and responses. * - * Copyright (c) 2008 - 2011 Broadcom Corporation + * Copyright (c) 2008 - 2013 Broadcom Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by diff --git a/trunk/drivers/scsi/bnx2fc/bnx2fc_fcoe.c b/trunk/drivers/scsi/bnx2fc/bnx2fc_fcoe.c index 7dffec1e5715..69ac55495c1d 100644 --- a/trunk/drivers/scsi/bnx2fc/bnx2fc_fcoe.c +++ b/trunk/drivers/scsi/bnx2fc/bnx2fc_fcoe.c @@ -3,7 +3,7 @@ * cnic modules to create FCoE instances, send/receive non-offloaded * FIP/FCoE packets, listen to link events etc. * - * Copyright (c) 2008 - 2011 Broadcom Corporation + * Copyright (c) 2008 - 2013 Broadcom Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by @@ -22,7 +22,7 @@ DEFINE_PER_CPU(struct bnx2fc_percpu_s, bnx2fc_percpu); #define DRV_MODULE_NAME "bnx2fc" #define DRV_MODULE_VERSION BNX2FC_VERSION -#define DRV_MODULE_RELDATE "Dec 21, 2012" +#define DRV_MODULE_RELDATE "Mar 08, 2013" static char version[] = @@ -679,6 +679,7 @@ static int bnx2fc_shost_config(struct fc_lport *lport, struct device *dev) { struct fcoe_port *port = lport_priv(lport); struct bnx2fc_interface *interface = port->priv; + struct bnx2fc_hba *hba = interface->hba; struct Scsi_Host *shost = lport->host; int rc = 0; @@ -699,8 +700,9 @@ static int bnx2fc_shost_config(struct fc_lport *lport, struct device *dev) } if (!lport->vport) fc_host_max_npiv_vports(lport->host) = USHRT_MAX; - sprintf(fc_host_symbolic_name(lport->host), "%s v%s over %s", - BNX2FC_NAME, BNX2FC_VERSION, + snprintf(fc_host_symbolic_name(lport->host), 256, + "%s (Broadcom %s) v%s over %s", + BNX2FC_NAME, hba->chip_num, BNX2FC_VERSION, interface->netdev->name); return 0; @@ -1656,23 +1658,60 @@ static int bnx2fc_bind_adapter_devices(struct bnx2fc_hba *hba) static int bnx2fc_bind_pcidev(struct bnx2fc_hba *hba) { struct cnic_dev *cnic; + struct pci_dev *pdev; if (!hba->cnic) { printk(KERN_ERR PFX "cnic is NULL\n"); return -ENODEV; } cnic = hba->cnic; - hba->pcidev = cnic->pcidev; - if (hba->pcidev) - pci_dev_get(hba->pcidev); + pdev = hba->pcidev = cnic->pcidev; + if (!hba->pcidev) + return -ENODEV; + switch (pdev->device) { + case PCI_DEVICE_ID_NX2_57710: + strncpy(hba->chip_num, "BCM57710", BCM_CHIP_LEN); + break; + case PCI_DEVICE_ID_NX2_57711: + strncpy(hba->chip_num, "BCM57711", BCM_CHIP_LEN); + break; + case PCI_DEVICE_ID_NX2_57712: + case PCI_DEVICE_ID_NX2_57712_MF: + case PCI_DEVICE_ID_NX2_57712_VF: + strncpy(hba->chip_num, "BCM57712", BCM_CHIP_LEN); + break; + case PCI_DEVICE_ID_NX2_57800: + case PCI_DEVICE_ID_NX2_57800_MF: + case PCI_DEVICE_ID_NX2_57800_VF: + strncpy(hba->chip_num, "BCM57800", BCM_CHIP_LEN); + break; + case PCI_DEVICE_ID_NX2_57810: + case PCI_DEVICE_ID_NX2_57810_MF: + case PCI_DEVICE_ID_NX2_57810_VF: + strncpy(hba->chip_num, "BCM57810", BCM_CHIP_LEN); + break; + case PCI_DEVICE_ID_NX2_57840: + case PCI_DEVICE_ID_NX2_57840_MF: + case PCI_DEVICE_ID_NX2_57840_VF: + case PCI_DEVICE_ID_NX2_57840_2_20: + case PCI_DEVICE_ID_NX2_57840_4_10: + strncpy(hba->chip_num, "BCM57840", BCM_CHIP_LEN); + break; + default: + pr_err(PFX "Unknown device id 0x%x\n", pdev->device); + break; + } + pci_dev_get(hba->pcidev); return 0; } static void bnx2fc_unbind_pcidev(struct bnx2fc_hba *hba) { - if (hba->pcidev) + if (hba->pcidev) { + hba->chip_num[0] = '\0'; pci_dev_put(hba->pcidev); + } hba->pcidev = NULL; } diff --git a/trunk/drivers/scsi/bnx2fc/bnx2fc_hwi.c b/trunk/drivers/scsi/bnx2fc/bnx2fc_hwi.c index 50510ffe1bf5..c0d035a8f8f9 100644 --- a/trunk/drivers/scsi/bnx2fc/bnx2fc_hwi.c +++ b/trunk/drivers/scsi/bnx2fc/bnx2fc_hwi.c @@ -2,7 +2,7 @@ * This file contains the code that low level functions that interact * with 57712 FCoE firmware. * - * Copyright (c) 2008 - 2011 Broadcom Corporation + * Copyright (c) 2008 - 2013 Broadcom Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by @@ -126,7 +126,11 @@ int bnx2fc_send_fw_fcoe_init_msg(struct bnx2fc_hba *hba) fcoe_init3.error_bit_map_lo = 0xffffffff; fcoe_init3.error_bit_map_hi = 0xffffffff; - fcoe_init3.perf_config = 1; + /* + * enable both cached connection and cached tasks + * 0 = none, 1 = cached connection, 2 = cached tasks, 3 = both + */ + fcoe_init3.perf_config = 3; kwqe_arr[0] = (struct kwqe *) &fcoe_init1; kwqe_arr[1] = (struct kwqe *) &fcoe_init2; diff --git a/trunk/drivers/scsi/bnx2fc/bnx2fc_io.c b/trunk/drivers/scsi/bnx2fc/bnx2fc_io.c index 723a9a8ba5ee..575142e92d9c 100644 --- a/trunk/drivers/scsi/bnx2fc/bnx2fc_io.c +++ b/trunk/drivers/scsi/bnx2fc/bnx2fc_io.c @@ -1,7 +1,7 @@ /* bnx2fc_io.c: Broadcom NetXtreme II Linux FCoE offload driver. * IO manager and SCSI IO processing. * - * Copyright (c) 2008 - 2011 Broadcom Corporation + * Copyright (c) 2008 - 2013 Broadcom Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by @@ -1270,8 +1270,11 @@ int bnx2fc_eh_abort(struct scsi_cmnd *sc_cmd) spin_lock_bh(&tgt->tgt_lock); io_req->wait_for_comp = 0; - if (!(test_and_set_bit(BNX2FC_FLAG_ABTS_DONE, - &io_req->req_flags))) { + if (test_bit(BNX2FC_FLAG_IO_COMPL, &io_req->req_flags)) { + BNX2FC_IO_DBG(io_req, "IO completed in a different context\n"); + rc = SUCCESS; + } else if (!(test_and_set_bit(BNX2FC_FLAG_ABTS_DONE, + &io_req->req_flags))) { /* Let the scsi-ml try to recover this command */ printk(KERN_ERR PFX "abort failed, xid = 0x%x\n", io_req->xid); diff --git a/trunk/drivers/scsi/bnx2fc/bnx2fc_tgt.c b/trunk/drivers/scsi/bnx2fc/bnx2fc_tgt.c index c57a3bb8a9fb..4d93177dfb53 100644 --- a/trunk/drivers/scsi/bnx2fc/bnx2fc_tgt.c +++ b/trunk/drivers/scsi/bnx2fc/bnx2fc_tgt.c @@ -2,7 +2,7 @@ * Handles operations such as session offload/upload etc, and manages * session resources such as connection id and qp resources. * - * Copyright (c) 2008 - 2011 Broadcom Corporation + * Copyright (c) 2008 - 2013 Broadcom Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by diff --git a/trunk/drivers/scsi/csiostor/csio_lnode.h b/trunk/drivers/scsi/csiostor/csio_lnode.h index 0f9c04175b11..372a67d122d3 100644 --- a/trunk/drivers/scsi/csiostor/csio_lnode.h +++ b/trunk/drivers/scsi/csiostor/csio_lnode.h @@ -114,7 +114,7 @@ struct csio_lnode_stats { uint32_t n_rnode_match; /* matched rnode */ uint32_t n_dev_loss_tmo; /* Device loss timeout */ uint32_t n_fdmi_err; /* fdmi err */ - uint32_t n_evt_fw[PROTO_ERR_IMPL_LOGO]; /* fw events */ + uint32_t n_evt_fw[PROTO_ERR_IMPL_LOGO + 1]; /* fw events */ enum csio_ln_ev n_evt_sm[CSIO_LNE_MAX_EVENT]; /* State m/c events */ uint32_t n_rnode_alloc; /* rnode allocated */ uint32_t n_rnode_free; /* rnode freed */ diff --git a/trunk/drivers/scsi/csiostor/csio_rnode.h b/trunk/drivers/scsi/csiostor/csio_rnode.h index 65940096a80d..433434221222 100644 --- a/trunk/drivers/scsi/csiostor/csio_rnode.h +++ b/trunk/drivers/scsi/csiostor/csio_rnode.h @@ -63,7 +63,7 @@ struct csio_rnode_stats { uint32_t n_err_nomem; /* error nomem */ uint32_t n_evt_unexp; /* unexpected event */ uint32_t n_evt_drop; /* unexpected event */ - uint32_t n_evt_fw[PROTO_ERR_IMPL_LOGO]; /* fw events */ + uint32_t n_evt_fw[PROTO_ERR_IMPL_LOGO + 1]; /* fw events */ enum csio_rn_ev n_evt_sm[CSIO_RNFE_MAX_EVENT]; /* State m/c events */ uint32_t n_lun_rst; /* Number of resets of * of LUNs under this diff --git a/trunk/drivers/scsi/fnic/fnic.h b/trunk/drivers/scsi/fnic/fnic.h index 98436c363035..b6d1f92ed33c 100644 --- a/trunk/drivers/scsi/fnic/fnic.h +++ b/trunk/drivers/scsi/fnic/fnic.h @@ -38,7 +38,7 @@ #define DRV_NAME "fnic" #define DRV_DESCRIPTION "Cisco FCoE HBA Driver" -#define DRV_VERSION "1.5.0.2" +#define DRV_VERSION "1.5.0.22" #define PFX DRV_NAME ": " #define DFX DRV_NAME "%d: " @@ -192,6 +192,18 @@ enum fnic_state { struct mempool; +enum fnic_evt { + FNIC_EVT_START_VLAN_DISC = 1, + FNIC_EVT_START_FCF_DISC = 2, + FNIC_EVT_MAX, +}; + +struct fnic_event { + struct list_head list; + struct fnic *fnic; + enum fnic_evt event; +}; + /* Per-instance private data structure */ struct fnic { struct fc_lport *lport; @@ -254,6 +266,18 @@ struct fnic { struct sk_buff_head frame_queue; struct sk_buff_head tx_queue; + /*** FIP related data members -- start ***/ + void (*set_vlan)(struct fnic *, u16 vlan); + struct work_struct fip_frame_work; + struct sk_buff_head fip_frame_queue; + struct timer_list fip_timer; + struct list_head vlans; + spinlock_t vlans_lock; + + struct work_struct event_work; + struct list_head evlist; + /*** FIP related data members -- end ***/ + /* copy work queue cache line section */ ____cacheline_aligned struct vnic_wq_copy wq_copy[FNIC_WQ_COPY_MAX]; /* completion queue cache line section */ @@ -278,6 +302,7 @@ static inline struct fnic *fnic_from_ctlr(struct fcoe_ctlr *fip) } extern struct workqueue_struct *fnic_event_queue; +extern struct workqueue_struct *fnic_fip_queue; extern struct device_attribute *fnic_attrs[]; void fnic_clear_intr_mode(struct fnic *fnic); @@ -289,6 +314,7 @@ int fnic_send(struct fc_lport *, struct fc_frame *); void fnic_free_wq_buf(struct vnic_wq *wq, struct vnic_wq_buf *buf); void fnic_handle_frame(struct work_struct *work); void fnic_handle_link(struct work_struct *work); +void fnic_handle_event(struct work_struct *work); int fnic_rq_cmpl_handler(struct fnic *fnic, int); int fnic_alloc_rq_frame(struct vnic_rq *rq); void fnic_free_rq_buf(struct vnic_rq *rq, struct vnic_rq_buf *buf); @@ -321,6 +347,12 @@ void fnic_handle_link_event(struct fnic *fnic); int fnic_is_abts_pending(struct fnic *, struct scsi_cmnd *); +void fnic_handle_fip_frame(struct work_struct *work); +void fnic_handle_fip_event(struct fnic *fnic); +void fnic_fcoe_reset_vlans(struct fnic *fnic); +void fnic_fcoe_evlist_free(struct fnic *fnic); +extern void fnic_handle_fip_timer(struct fnic *fnic); + static inline int fnic_chk_state_flags_locked(struct fnic *fnic, unsigned long st_flags) { diff --git a/trunk/drivers/scsi/fnic/fnic_fcs.c b/trunk/drivers/scsi/fnic/fnic_fcs.c index 483eb9dbe663..006fa92a02df 100644 --- a/trunk/drivers/scsi/fnic/fnic_fcs.c +++ b/trunk/drivers/scsi/fnic/fnic_fcs.c @@ -31,12 +31,20 @@ #include #include "fnic_io.h" #include "fnic.h" +#include "fnic_fip.h" #include "cq_enet_desc.h" #include "cq_exch_desc.h" +static u8 fcoe_all_fcfs[ETH_ALEN]; +struct workqueue_struct *fnic_fip_queue; struct workqueue_struct *fnic_event_queue; static void fnic_set_eth_mode(struct fnic *); +static void fnic_fcoe_send_vlan_req(struct fnic *fnic); +static void fnic_fcoe_start_fcf_disc(struct fnic *fnic); +static void fnic_fcoe_process_vlan_resp(struct fnic *fnic, struct sk_buff *); +static int fnic_fcoe_vlan_check(struct fnic *fnic, u16 flag); +static int fnic_fcoe_handle_fip_frame(struct fnic *fnic, struct sk_buff *skb); void fnic_handle_link(struct work_struct *work) { @@ -69,6 +77,11 @@ void fnic_handle_link(struct work_struct *work) FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host, "link down\n"); fcoe_ctlr_link_down(&fnic->ctlr); + if (fnic->config.flags & VFCF_FIP_CAPABLE) { + /* start FCoE VLAN discovery */ + fnic_fcoe_send_vlan_req(fnic); + return; + } FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host, "link up\n"); fcoe_ctlr_link_up(&fnic->ctlr); @@ -79,6 +92,11 @@ void fnic_handle_link(struct work_struct *work) } else if (fnic->link_status) { /* DOWN -> UP */ spin_unlock_irqrestore(&fnic->fnic_lock, flags); + if (fnic->config.flags & VFCF_FIP_CAPABLE) { + /* start FCoE VLAN discovery */ + fnic_fcoe_send_vlan_req(fnic); + return; + } FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host, "link up\n"); fcoe_ctlr_link_up(&fnic->ctlr); } else { @@ -128,6 +146,441 @@ void fnic_handle_frame(struct work_struct *work) } } +void fnic_fcoe_evlist_free(struct fnic *fnic) +{ + struct fnic_event *fevt = NULL; + struct fnic_event *next = NULL; + unsigned long flags; + + spin_lock_irqsave(&fnic->fnic_lock, flags); + if (list_empty(&fnic->evlist)) { + spin_unlock_irqrestore(&fnic->fnic_lock, flags); + return; + } + + list_for_each_entry_safe(fevt, next, &fnic->evlist, list) { + list_del(&fevt->list); + kfree(fevt); + } + spin_unlock_irqrestore(&fnic->fnic_lock, flags); +} + +void fnic_handle_event(struct work_struct *work) +{ + struct fnic *fnic = container_of(work, struct fnic, event_work); + struct fnic_event *fevt = NULL; + struct fnic_event *next = NULL; + unsigned long flags; + + spin_lock_irqsave(&fnic->fnic_lock, flags); + if (list_empty(&fnic->evlist)) { + spin_unlock_irqrestore(&fnic->fnic_lock, flags); + return; + } + + list_for_each_entry_safe(fevt, next, &fnic->evlist, list) { + if (fnic->stop_rx_link_events) { + list_del(&fevt->list); + kfree(fevt); + spin_unlock_irqrestore(&fnic->fnic_lock, flags); + return; + } + /* + * If we're in a transitional state, just re-queue and return. + * The queue will be serviced when we get to a stable state. + */ + if (fnic->state != FNIC_IN_FC_MODE && + fnic->state != FNIC_IN_ETH_MODE) { + spin_unlock_irqrestore(&fnic->fnic_lock, flags); + return; + } + + list_del(&fevt->list); + switch (fevt->event) { + case FNIC_EVT_START_VLAN_DISC: + spin_unlock_irqrestore(&fnic->fnic_lock, flags); + fnic_fcoe_send_vlan_req(fnic); + spin_lock_irqsave(&fnic->fnic_lock, flags); + break; + case FNIC_EVT_START_FCF_DISC: + FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host, + "Start FCF Discovery\n"); + fnic_fcoe_start_fcf_disc(fnic); + break; + default: + FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host, + "Unknown event 0x%x\n", fevt->event); + break; + } + kfree(fevt); + } + spin_unlock_irqrestore(&fnic->fnic_lock, flags); +} + +/** + * Check if the Received FIP FLOGI frame is rejected + * @fip: The FCoE controller that received the frame + * @skb: The received FIP frame + * + * Returns non-zero if the frame is rejected with unsupported cmd with + * insufficient resource els explanation. + */ +static inline int is_fnic_fip_flogi_reject(struct fcoe_ctlr *fip, + struct sk_buff *skb) +{ + struct fc_lport *lport = fip->lp; + struct fip_header *fiph; + struct fc_frame_header *fh = NULL; + struct fip_desc *desc; + struct fip_encaps *els; + enum fip_desc_type els_dtype = 0; + u16 op; + u8 els_op; + u8 sub; + + size_t els_len = 0; + size_t rlen; + size_t dlen = 0; + + if (skb_linearize(skb)) + return 0; + + if (skb->len < sizeof(*fiph)) + return 0; + + fiph = (struct fip_header *)skb->data; + op = ntohs(fiph->fip_op); + sub = fiph->fip_subcode; + + if (op != FIP_OP_LS) + return 0; + + if (sub != FIP_SC_REP) + return 0; + + rlen = ntohs(fiph->fip_dl_len) * 4; + if (rlen + sizeof(*fiph) > skb->len) + return 0; + + desc = (struct fip_desc *)(fiph + 1); + dlen = desc->fip_dlen * FIP_BPW; + + if (desc->fip_dtype == FIP_DT_FLOGI) { + + shost_printk(KERN_DEBUG, lport->host, + " FIP TYPE FLOGI: fab name:%llx " + "vfid:%d map:%x\n", + fip->sel_fcf->fabric_name, fip->sel_fcf->vfid, + fip->sel_fcf->fc_map); + if (dlen < sizeof(*els) + sizeof(*fh) + 1) + return 0; + + els_len = dlen - sizeof(*els); + els = (struct fip_encaps *)desc; + fh = (struct fc_frame_header *)(els + 1); + els_dtype = desc->fip_dtype; + + if (!fh) + return 0; + + /* + * ELS command code, reason and explanation should be = Reject, + * unsupported command and insufficient resource + */ + els_op = *(u8 *)(fh + 1); + if (els_op == ELS_LS_RJT) { + shost_printk(KERN_INFO, lport->host, + "Flogi Request Rejected by Switch\n"); + return 1; + } + shost_printk(KERN_INFO, lport->host, + "Flogi Request Accepted by Switch\n"); + } + return 0; +} + +static void fnic_fcoe_send_vlan_req(struct fnic *fnic) +{ + struct fcoe_ctlr *fip = &fnic->ctlr; + struct sk_buff *skb; + char *eth_fr; + int fr_len; + struct fip_vlan *vlan; + u64 vlan_tov; + + fnic_fcoe_reset_vlans(fnic); + fnic->set_vlan(fnic, 0); + FNIC_FCS_DBG(KERN_INFO, fnic->lport->host, + "Sending VLAN request...\n"); + skb = dev_alloc_skb(sizeof(struct fip_vlan)); + if (!skb) + return; + + fr_len = sizeof(*vlan); + eth_fr = (char *)skb->data; + vlan = (struct fip_vlan *)eth_fr; + + memset(vlan, 0, sizeof(*vlan)); + memcpy(vlan->eth.h_source, fip->ctl_src_addr, ETH_ALEN); + memcpy(vlan->eth.h_dest, fcoe_all_fcfs, ETH_ALEN); + vlan->eth.h_proto = htons(ETH_P_FIP); + + vlan->fip.fip_ver = FIP_VER_ENCAPS(FIP_VER); + vlan->fip.fip_op = htons(FIP_OP_VLAN); + vlan->fip.fip_subcode = FIP_SC_VL_REQ; + vlan->fip.fip_dl_len = htons(sizeof(vlan->desc) / FIP_BPW); + + vlan->desc.mac.fd_desc.fip_dtype = FIP_DT_MAC; + vlan->desc.mac.fd_desc.fip_dlen = sizeof(vlan->desc.mac) / FIP_BPW; + memcpy(&vlan->desc.mac.fd_mac, fip->ctl_src_addr, ETH_ALEN); + + vlan->desc.wwnn.fd_desc.fip_dtype = FIP_DT_NAME; + vlan->desc.wwnn.fd_desc.fip_dlen = sizeof(vlan->desc.wwnn) / FIP_BPW; + put_unaligned_be64(fip->lp->wwnn, &vlan->desc.wwnn.fd_wwn); + + skb_put(skb, sizeof(*vlan)); + skb->protocol = htons(ETH_P_FIP); + skb_reset_mac_header(skb); + skb_reset_network_header(skb); + fip->send(fip, skb); + + /* set a timer so that we can retry if there no response */ + vlan_tov = jiffies + msecs_to_jiffies(FCOE_CTLR_FIPVLAN_TOV); + mod_timer(&fnic->fip_timer, round_jiffies(vlan_tov)); +} + +static void fnic_fcoe_process_vlan_resp(struct fnic *fnic, struct sk_buff *skb) +{ + struct fcoe_ctlr *fip = &fnic->ctlr; + struct fip_header *fiph; + struct fip_desc *desc; + u16 vid; + size_t rlen; + size_t dlen; + struct fcoe_vlan *vlan; + u64 sol_time; + unsigned long flags; + + FNIC_FCS_DBG(KERN_INFO, fnic->lport->host, + "Received VLAN response...\n"); + + fiph = (struct fip_header *) skb->data; + + FNIC_FCS_DBG(KERN_INFO, fnic->lport->host, + "Received VLAN response... OP 0x%x SUB_OP 0x%x\n", + ntohs(fiph->fip_op), fiph->fip_subcode); + + rlen = ntohs(fiph->fip_dl_len) * 4; + fnic_fcoe_reset_vlans(fnic); + spin_lock_irqsave(&fnic->vlans_lock, flags); + desc = (struct fip_desc *)(fiph + 1); + while (rlen > 0) { + dlen = desc->fip_dlen * FIP_BPW; + switch (desc->fip_dtype) { + case FIP_DT_VLAN: + vid = ntohs(((struct fip_vlan_desc *)desc)->fd_vlan); + shost_printk(KERN_INFO, fnic->lport->host, + "process_vlan_resp: FIP VLAN %d\n", vid); + vlan = kmalloc(sizeof(*vlan), + GFP_ATOMIC); + if (!vlan) { + /* retry from timer */ + spin_unlock_irqrestore(&fnic->vlans_lock, + flags); + goto out; + } + memset(vlan, 0, sizeof(struct fcoe_vlan)); + vlan->vid = vid & 0x0fff; + vlan->state = FIP_VLAN_AVAIL; + list_add_tail(&vlan->list, &fnic->vlans); + break; + } + desc = (struct fip_desc *)((char *)desc + dlen); + rlen -= dlen; + } + + /* any VLAN descriptors present ? */ + if (list_empty(&fnic->vlans)) { + /* retry from timer */ + FNIC_FCS_DBG(KERN_INFO, fnic->lport->host, + "No VLAN descriptors in FIP VLAN response\n"); + spin_unlock_irqrestore(&fnic->vlans_lock, flags); + goto out; + } + + vlan = list_first_entry(&fnic->vlans, struct fcoe_vlan, list); + fnic->set_vlan(fnic, vlan->vid); + vlan->state = FIP_VLAN_SENT; /* sent now */ + vlan->sol_count++; + spin_unlock_irqrestore(&fnic->vlans_lock, flags); + + /* start the solicitation */ + fcoe_ctlr_link_up(fip); + + sol_time = jiffies + msecs_to_jiffies(FCOE_CTLR_START_DELAY); + mod_timer(&fnic->fip_timer, round_jiffies(sol_time)); +out: + return; +} + +static void fnic_fcoe_start_fcf_disc(struct fnic *fnic) +{ + unsigned long flags; + struct fcoe_vlan *vlan; + u64 sol_time; + + spin_lock_irqsave(&fnic->vlans_lock, flags); + vlan = list_first_entry(&fnic->vlans, struct fcoe_vlan, list); + fnic->set_vlan(fnic, vlan->vid); + vlan->state = FIP_VLAN_SENT; /* sent now */ + vlan->sol_count = 1; + spin_unlock_irqrestore(&fnic->vlans_lock, flags); + + /* start the solicitation */ + fcoe_ctlr_link_up(&fnic->ctlr); + + sol_time = jiffies + msecs_to_jiffies(FCOE_CTLR_START_DELAY); + mod_timer(&fnic->fip_timer, round_jiffies(sol_time)); +} + +static int fnic_fcoe_vlan_check(struct fnic *fnic, u16 flag) +{ + unsigned long flags; + struct fcoe_vlan *fvlan; + + spin_lock_irqsave(&fnic->vlans_lock, flags); + if (list_empty(&fnic->vlans)) { + spin_unlock_irqrestore(&fnic->vlans_lock, flags); + return -EINVAL; + } + + fvlan = list_first_entry(&fnic->vlans, struct fcoe_vlan, list); + if (fvlan->state == FIP_VLAN_USED) { + spin_unlock_irqrestore(&fnic->vlans_lock, flags); + return 0; + } + + if (fvlan->state == FIP_VLAN_SENT) { + fvlan->state = FIP_VLAN_USED; + spin_unlock_irqrestore(&fnic->vlans_lock, flags); + return 0; + } + spin_unlock_irqrestore(&fnic->vlans_lock, flags); + return -EINVAL; +} + +static void fnic_event_enq(struct fnic *fnic, enum fnic_evt ev) +{ + struct fnic_event *fevt; + unsigned long flags; + + fevt = kmalloc(sizeof(*fevt), GFP_ATOMIC); + if (!fevt) + return; + + fevt->fnic = fnic; + fevt->event = ev; + + spin_lock_irqsave(&fnic->fnic_lock, flags); + list_add_tail(&fevt->list, &fnic->evlist); + spin_unlock_irqrestore(&fnic->fnic_lock, flags); + + schedule_work(&fnic->event_work); +} + +static int fnic_fcoe_handle_fip_frame(struct fnic *fnic, struct sk_buff *skb) +{ + struct fip_header *fiph; + int ret = 1; + u16 op; + u8 sub; + + if (!skb || !(skb->data)) + return -1; + + if (skb_linearize(skb)) + goto drop; + + fiph = (struct fip_header *)skb->data; + op = ntohs(fiph->fip_op); + sub = fiph->fip_subcode; + + if (FIP_VER_DECAPS(fiph->fip_ver) != FIP_VER) + goto drop; + + if (ntohs(fiph->fip_dl_len) * FIP_BPW + sizeof(*fiph) > skb->len) + goto drop; + + if (op == FIP_OP_DISC && sub == FIP_SC_ADV) { + if (fnic_fcoe_vlan_check(fnic, ntohs(fiph->fip_flags))) + goto drop; + /* pass it on to fcoe */ + ret = 1; + } else if (op == FIP_OP_VLAN && sub == FIP_SC_VL_REP) { + /* set the vlan as used */ + fnic_fcoe_process_vlan_resp(fnic, skb); + ret = 0; + } else if (op == FIP_OP_CTRL && sub == FIP_SC_CLR_VLINK) { + /* received CVL request, restart vlan disc */ + fnic_event_enq(fnic, FNIC_EVT_START_VLAN_DISC); + /* pass it on to fcoe */ + ret = 1; + } +drop: + return ret; +} + +void fnic_handle_fip_frame(struct work_struct *work) +{ + struct fnic *fnic = container_of(work, struct fnic, fip_frame_work); + unsigned long flags; + struct sk_buff *skb; + struct ethhdr *eh; + + while ((skb = skb_dequeue(&fnic->fip_frame_queue))) { + spin_lock_irqsave(&fnic->fnic_lock, flags); + if (fnic->stop_rx_link_events) { + spin_unlock_irqrestore(&fnic->fnic_lock, flags); + dev_kfree_skb(skb); + return; + } + /* + * If we're in a transitional state, just re-queue and return. + * The queue will be serviced when we get to a stable state. + */ + if (fnic->state != FNIC_IN_FC_MODE && + fnic->state != FNIC_IN_ETH_MODE) { + skb_queue_head(&fnic->fip_frame_queue, skb); + spin_unlock_irqrestore(&fnic->fnic_lock, flags); + return; + } + spin_unlock_irqrestore(&fnic->fnic_lock, flags); + eh = (struct ethhdr *)skb->data; + if (eh->h_proto == htons(ETH_P_FIP)) { + skb_pull(skb, sizeof(*eh)); + if (fnic_fcoe_handle_fip_frame(fnic, skb) <= 0) { + dev_kfree_skb(skb); + continue; + } + /* + * If there's FLOGI rejects - clear all + * fcf's & restart from scratch + */ + if (is_fnic_fip_flogi_reject(&fnic->ctlr, skb)) { + shost_printk(KERN_INFO, fnic->lport->host, + "Trigger a Link down - VLAN Disc\n"); + fcoe_ctlr_link_down(&fnic->ctlr); + /* start FCoE VLAN discovery */ + fnic_fcoe_send_vlan_req(fnic); + dev_kfree_skb(skb); + continue; + } + fcoe_ctlr_recv(&fnic->ctlr, skb); + continue; + } + } +} + /** * fnic_import_rq_eth_pkt() - handle received FCoE or FIP frame. * @fnic: fnic instance. @@ -150,8 +603,14 @@ static inline int fnic_import_rq_eth_pkt(struct fnic *fnic, struct sk_buff *skb) skb_reset_mac_header(skb); } if (eh->h_proto == htons(ETH_P_FIP)) { - skb_pull(skb, sizeof(*eh)); - fcoe_ctlr_recv(&fnic->ctlr, skb); + if (!(fnic->config.flags & VFCF_FIP_CAPABLE)) { + printk(KERN_ERR "Dropped FIP frame, as firmware " + "uses non-FIP mode, Enable FIP " + "using UCSM\n"); + goto drop; + } + skb_queue_tail(&fnic->fip_frame_queue, skb); + queue_work(fnic_fip_queue, &fnic->fip_frame_work); return 1; /* let caller know packet was used */ } if (eh->h_proto != htons(ETH_P_FCOE)) @@ -720,3 +1179,104 @@ void fnic_free_wq_buf(struct vnic_wq *wq, struct vnic_wq_buf *buf) dev_kfree_skb(fp_skb(fp)); buf->os_buf = NULL; } + +void fnic_fcoe_reset_vlans(struct fnic *fnic) +{ + unsigned long flags; + struct fcoe_vlan *vlan; + struct fcoe_vlan *next; + + /* + * indicate a link down to fcoe so that all fcf's are free'd + * might not be required since we did this before sending vlan + * discovery request + */ + spin_lock_irqsave(&fnic->vlans_lock, flags); + if (!list_empty(&fnic->vlans)) { + list_for_each_entry_safe(vlan, next, &fnic->vlans, list) { + list_del(&vlan->list); + kfree(vlan); + } + } + spin_unlock_irqrestore(&fnic->vlans_lock, flags); +} + +void fnic_handle_fip_timer(struct fnic *fnic) +{ + unsigned long flags; + struct fcoe_vlan *vlan; + u64 sol_time; + + spin_lock_irqsave(&fnic->fnic_lock, flags); + if (fnic->stop_rx_link_events) { + spin_unlock_irqrestore(&fnic->fnic_lock, flags); + return; + } + spin_unlock_irqrestore(&fnic->fnic_lock, flags); + + if (fnic->ctlr.mode == FIP_ST_NON_FIP) + return; + + spin_lock_irqsave(&fnic->vlans_lock, flags); + if (list_empty(&fnic->vlans)) { + /* no vlans available, try again */ + FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host, + "Start VLAN Discovery\n"); + spin_unlock_irqrestore(&fnic->vlans_lock, flags); + fnic_event_enq(fnic, FNIC_EVT_START_VLAN_DISC); + return; + } + + vlan = list_first_entry(&fnic->vlans, struct fcoe_vlan, list); + shost_printk(KERN_DEBUG, fnic->lport->host, + "fip_timer: vlan %d state %d sol_count %d\n", + vlan->vid, vlan->state, vlan->sol_count); + switch (vlan->state) { + case FIP_VLAN_USED: + FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host, + "FIP VLAN is selected for FC transaction\n"); + spin_unlock_irqrestore(&fnic->vlans_lock, flags); + break; + case FIP_VLAN_FAILED: + /* if all vlans are in failed state, restart vlan disc */ + FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host, + "Start VLAN Discovery\n"); + spin_unlock_irqrestore(&fnic->vlans_lock, flags); + fnic_event_enq(fnic, FNIC_EVT_START_VLAN_DISC); + break; + case FIP_VLAN_SENT: + if (vlan->sol_count >= FCOE_CTLR_MAX_SOL) { + /* + * no response on this vlan, remove from the list. + * Try the next vlan + */ + shost_printk(KERN_INFO, fnic->lport->host, + "Dequeue this VLAN ID %d from list\n", + vlan->vid); + list_del(&vlan->list); + kfree(vlan); + vlan = NULL; + if (list_empty(&fnic->vlans)) { + /* we exhausted all vlans, restart vlan disc */ + spin_unlock_irqrestore(&fnic->vlans_lock, + flags); + shost_printk(KERN_INFO, fnic->lport->host, + "fip_timer: vlan list empty, " + "trigger vlan disc\n"); + fnic_event_enq(fnic, FNIC_EVT_START_VLAN_DISC); + return; + } + /* check the next vlan */ + vlan = list_first_entry(&fnic->vlans, struct fcoe_vlan, + list); + fnic->set_vlan(fnic, vlan->vid); + vlan->state = FIP_VLAN_SENT; /* sent now */ + } + spin_unlock_irqrestore(&fnic->vlans_lock, flags); + vlan->sol_count++; + sol_time = jiffies + msecs_to_jiffies + (FCOE_CTLR_START_DELAY); + mod_timer(&fnic->fip_timer, round_jiffies(sol_time)); + break; + } +} diff --git a/trunk/drivers/scsi/fnic/fnic_fip.h b/trunk/drivers/scsi/fnic/fnic_fip.h new file mode 100644 index 000000000000..87e74c2ab971 --- /dev/null +++ b/trunk/drivers/scsi/fnic/fnic_fip.h @@ -0,0 +1,68 @@ +/* + * Copyright 2008 Cisco Systems, Inc. All rights reserved. + * Copyright 2007 Nuova Systems, Inc. All rights reserved. + * + * This program is free software; you may redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; version 2 of the License. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#ifndef _FNIC_FIP_H_ +#define _FNIC_FIP_H_ + + +#define FCOE_CTLR_START_DELAY 2000 /* ms after first adv. to choose FCF */ +#define FCOE_CTLR_FIPVLAN_TOV 2000 /* ms after FIP VLAN disc */ +#define FCOE_CTLR_MAX_SOL 8 + +#define FINC_MAX_FLOGI_REJECTS 8 + +/* + * FIP_DT_VLAN descriptor. + */ +struct fip_vlan_desc { + struct fip_desc fd_desc; + __be16 fd_vlan; +} __attribute__((packed)); + +struct vlan { + __be16 vid; + __be16 type; +}; + +/* + * VLAN entry. + */ +struct fcoe_vlan { + struct list_head list; + u16 vid; /* vlan ID */ + u16 sol_count; /* no. of sols sent */ + u16 state; /* state */ +}; + +enum fip_vlan_state { + FIP_VLAN_AVAIL = 0, /* don't do anything */ + FIP_VLAN_SENT = 1, /* sent */ + FIP_VLAN_USED = 2, /* succeed */ + FIP_VLAN_FAILED = 3, /* failed to response */ +}; + +struct fip_vlan { + struct ethhdr eth; + struct fip_header fip; + struct { + struct fip_mac_desc mac; + struct fip_wwn_desc wwnn; + } desc; +}; + +#endif /* __FINC_FIP_H_ */ diff --git a/trunk/drivers/scsi/fnic/fnic_main.c b/trunk/drivers/scsi/fnic/fnic_main.c index d601ac543c52..5f09d1814d26 100644 --- a/trunk/drivers/scsi/fnic/fnic_main.c +++ b/trunk/drivers/scsi/fnic/fnic_main.c @@ -39,6 +39,7 @@ #include "vnic_intr.h" #include "vnic_stats.h" #include "fnic_io.h" +#include "fnic_fip.h" #include "fnic.h" #define PCI_DEVICE_ID_CISCO_FNIC 0x0045 @@ -292,6 +293,13 @@ static void fnic_notify_timer(unsigned long data) round_jiffies(jiffies + FNIC_NOTIFY_TIMER_PERIOD)); } +static void fnic_fip_notify_timer(unsigned long data) +{ + struct fnic *fnic = (struct fnic *)data; + + fnic_handle_fip_timer(fnic); +} + static void fnic_notify_timer_start(struct fnic *fnic) { switch (vnic_dev_get_intr_mode(fnic->vdev)) { @@ -403,6 +411,12 @@ static u8 *fnic_get_mac(struct fc_lport *lport) return fnic->data_src_addr; } +static void fnic_set_vlan(struct fnic *fnic, u16 vlan_id) +{ + u16 old_vlan; + old_vlan = vnic_dev_set_default_vlan(fnic->vdev, vlan_id); +} + static int fnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent) { struct Scsi_Host *host; @@ -620,7 +634,29 @@ static int fnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent) vnic_dev_packet_filter(fnic->vdev, 1, 1, 0, 0, 0); vnic_dev_add_addr(fnic->vdev, FIP_ALL_ENODE_MACS); vnic_dev_add_addr(fnic->vdev, fnic->ctlr.ctl_src_addr); + fnic->set_vlan = fnic_set_vlan; fcoe_ctlr_init(&fnic->ctlr, FIP_MODE_AUTO); + setup_timer(&fnic->fip_timer, fnic_fip_notify_timer, + (unsigned long)fnic); + spin_lock_init(&fnic->vlans_lock); + INIT_WORK(&fnic->fip_frame_work, fnic_handle_fip_frame); + INIT_WORK(&fnic->event_work, fnic_handle_event); + skb_queue_head_init(&fnic->fip_frame_queue); + spin_lock_irqsave(&fnic_list_lock, flags); + if (!fnic_fip_queue) { + fnic_fip_queue = + create_singlethread_workqueue("fnic_fip_q"); + if (!fnic_fip_queue) { + spin_unlock_irqrestore(&fnic_list_lock, flags); + printk(KERN_ERR PFX "fnic FIP work queue " + "create failed\n"); + err = -ENOMEM; + goto err_out_free_max_pool; + } + } + spin_unlock_irqrestore(&fnic_list_lock, flags); + INIT_LIST_HEAD(&fnic->evlist); + INIT_LIST_HEAD(&fnic->vlans); } else { shost_printk(KERN_INFO, fnic->lport->host, "firmware uses non-FIP mode\n"); @@ -807,6 +843,13 @@ static void fnic_remove(struct pci_dev *pdev) skb_queue_purge(&fnic->frame_queue); skb_queue_purge(&fnic->tx_queue); + if (fnic->config.flags & VFCF_FIP_CAPABLE) { + del_timer_sync(&fnic->fip_timer); + skb_queue_purge(&fnic->fip_frame_queue); + fnic_fcoe_reset_vlans(fnic); + fnic_fcoe_evlist_free(fnic); + } + /* * Log off the fabric. This stops all remote ports, dns port, * logs off the fabric. This flushes all rport, disc, lport work @@ -889,8 +932,8 @@ static int __init fnic_init_module(void) len = sizeof(struct fnic_sgl_list); fnic_sgl_cache[FNIC_SGL_CACHE_MAX] = kmem_cache_create ("fnic_sgl_max", len + FNIC_SG_DESC_ALIGN, FNIC_SG_DESC_ALIGN, - SLAB_HWCACHE_ALIGN, - NULL); + SLAB_HWCACHE_ALIGN, + NULL); if (!fnic_sgl_cache[FNIC_SGL_CACHE_MAX]) { printk(KERN_ERR PFX "failed to create fnic max sgl slab\n"); err = -ENOMEM; @@ -951,6 +994,10 @@ static void __exit fnic_cleanup_module(void) { pci_unregister_driver(&fnic_driver); destroy_workqueue(fnic_event_queue); + if (fnic_fip_queue) { + flush_workqueue(fnic_fip_queue); + destroy_workqueue(fnic_fip_queue); + } kmem_cache_destroy(fnic_sgl_cache[FNIC_SGL_CACHE_MAX]); kmem_cache_destroy(fnic_sgl_cache[FNIC_SGL_CACHE_DFLT]); kmem_cache_destroy(fnic_io_req_cache); diff --git a/trunk/drivers/scsi/fnic/vnic_dev.c b/trunk/drivers/scsi/fnic/vnic_dev.c index b576be734e2e..9795d6f3e197 100644 --- a/trunk/drivers/scsi/fnic/vnic_dev.c +++ b/trunk/drivers/scsi/fnic/vnic_dev.c @@ -584,6 +584,16 @@ int vnic_dev_init(struct vnic_dev *vdev, int arg) return vnic_dev_cmd(vdev, CMD_INIT, &a0, &a1, wait); } +u16 vnic_dev_set_default_vlan(struct vnic_dev *vdev, u16 new_default_vlan) +{ + u64 a0 = new_default_vlan, a1 = 0; + int wait = 1000; + int old_vlan = 0; + + old_vlan = vnic_dev_cmd(vdev, CMD_SET_DEFAULT_VLAN, &a0, &a1, wait); + return (u16)old_vlan; +} + int vnic_dev_link_status(struct vnic_dev *vdev) { if (vdev->linkstatus) diff --git a/trunk/drivers/scsi/fnic/vnic_dev.h b/trunk/drivers/scsi/fnic/vnic_dev.h index f9935a8a5a09..40d4195f562b 100644 --- a/trunk/drivers/scsi/fnic/vnic_dev.h +++ b/trunk/drivers/scsi/fnic/vnic_dev.h @@ -148,6 +148,8 @@ int vnic_dev_disable(struct vnic_dev *vdev); int vnic_dev_open(struct vnic_dev *vdev, int arg); int vnic_dev_open_done(struct vnic_dev *vdev, int *done); int vnic_dev_init(struct vnic_dev *vdev, int arg); +u16 vnic_dev_set_default_vlan(struct vnic_dev *vdev, + u16 new_default_vlan); int vnic_dev_soft_reset(struct vnic_dev *vdev, int arg); int vnic_dev_soft_reset_done(struct vnic_dev *vdev, int *done); void vnic_dev_set_intr_mode(struct vnic_dev *vdev, diff --git a/trunk/drivers/scsi/fnic/vnic_devcmd.h b/trunk/drivers/scsi/fnic/vnic_devcmd.h index 7c9ccbd4134b..3e2fcbda6aed 100644 --- a/trunk/drivers/scsi/fnic/vnic_devcmd.h +++ b/trunk/drivers/scsi/fnic/vnic_devcmd.h @@ -196,6 +196,73 @@ enum vnic_devcmd_cmd { /* undo initialize of virtual link */ CMD_DEINIT = _CMDCNW(_CMD_DIR_NONE, _CMD_VTYPE_ALL, 34), + + /* check fw capability of a cmd: + * in: (u32)a0=cmd + * out: (u32)a0=errno, 0:valid cmd, a1=supported VNIC_STF_* bits */ + CMD_CAPABILITY = _CMDC(_CMD_DIR_RW, _CMD_VTYPE_ALL, 36), + + /* persistent binding info + * in: (u64)a0=paddr of arg + * (u32)a1=CMD_PERBI_XXX */ + CMD_PERBI = _CMDC(_CMD_DIR_RW, _CMD_VTYPE_FC, 37), + + /* Interrupt Assert Register functionality + * in: (u16)a0=interrupt number to assert + */ + CMD_IAR = _CMDCNW(_CMD_DIR_WRITE, _CMD_VTYPE_ALL, 38), + + /* initiate hangreset, like softreset after hang detected */ + CMD_HANG_RESET = _CMDC(_CMD_DIR_NONE, _CMD_VTYPE_ALL, 39), + + /* hangreset status: + * out: a0=0 reset complete, a0=1 reset in progress */ + CMD_HANG_RESET_STATUS = _CMDC(_CMD_DIR_READ, _CMD_VTYPE_ALL, 40), + + /* + * Set hw ingress packet vlan rewrite mode: + * in: (u32)a0=new vlan rewrite mode + * out: (u32)a0=old vlan rewrite mode */ + CMD_IG_VLAN_REWRITE_MODE = _CMDC(_CMD_DIR_RW, _CMD_VTYPE_ENET, 41), + + /* + * in: (u16)a0=bdf of target vnic + * (u32)a1=cmd to proxy + * a2-a15=args to cmd in a1 + * out: (u32)a0=status of proxied cmd + * a1-a15=out args of proxied cmd */ + CMD_PROXY_BY_BDF = _CMDC(_CMD_DIR_RW, _CMD_VTYPE_ALL, 42), + + /* + * As for BY_BDF except a0 is index of hvnlink subordinate vnic + * or SR-IOV virtual vnic + */ + CMD_PROXY_BY_INDEX = _CMDC(_CMD_DIR_RW, _CMD_VTYPE_ALL, 43), + + /* + * For HPP toggle: + * adapter-info-get + * in: (u64)a0=phsical address of buffer passed in from caller. + * (u16)a1=size of buffer specified in a0. + * out: (u64)a0=phsical address of buffer passed in from caller. + * (u16)a1=actual bytes from VIF-CONFIG-INFO TLV, or + * 0 if no VIF-CONFIG-INFO TLV was ever received. */ + CMD_CONFIG_INFO_GET = _CMDC(_CMD_DIR_RW, _CMD_VTYPE_ALL, 44), + + /* + * INT13 API: (u64)a0=paddr to vnic_int13_params struct + * (u32)a1=INT13_CMD_xxx + */ + CMD_INT13_ALL = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ALL, 45), + + /* + * Set default vlan: + * in: (u16)a0=new default vlan + * (u16)a1=zero for overriding vlan with param a0, + * non-zero for resetting vlan to the default + * out: (u16)a0=old default vlan + */ + CMD_SET_DEFAULT_VLAN = _CMDC(_CMD_DIR_RW, _CMD_VTYPE_ALL, 46) }; /* flags for CMD_OPEN */ diff --git a/trunk/drivers/scsi/ibmvscsi/ibmvfc.c b/trunk/drivers/scsi/ibmvscsi/ibmvfc.c index cc82d0f322b6..4e31caa21ddf 100644 --- a/trunk/drivers/scsi/ibmvscsi/ibmvfc.c +++ b/trunk/drivers/scsi/ibmvscsi/ibmvfc.c @@ -2179,7 +2179,7 @@ static int ibmvfc_cancel_all(struct scsi_device *sdev, int type) return 0; } - if (vhost->state == IBMVFC_ACTIVE) { + if (vhost->logged_in) { evt = ibmvfc_get_event(vhost); ibmvfc_init_event(evt, ibmvfc_sync_completion, IBMVFC_MAD_FORMAT); @@ -2190,7 +2190,12 @@ static int ibmvfc_cancel_all(struct scsi_device *sdev, int type) tmf->common.length = sizeof(*tmf); tmf->scsi_id = rport->port_id; int_to_scsilun(sdev->lun, &tmf->lun); - tmf->flags = (type | IBMVFC_TMF_LUA_VALID); + if (!(vhost->login_buf->resp.capabilities & IBMVFC_CAN_SUPPRESS_ABTS)) + type &= ~IBMVFC_TMF_SUPPRESS_ABTS; + if (vhost->state == IBMVFC_ACTIVE) + tmf->flags = (type | IBMVFC_TMF_LUA_VALID); + else + tmf->flags = ((type & IBMVFC_TMF_SUPPRESS_ABTS) | IBMVFC_TMF_LUA_VALID); tmf->cancel_key = (unsigned long)sdev->hostdata; tmf->my_cancel_key = (unsigned long)starget->hostdata; @@ -2327,7 +2332,7 @@ static int ibmvfc_abort_task_set(struct scsi_device *sdev) timeout = wait_for_completion_timeout(&evt->comp, timeout); if (!timeout) { - rc = ibmvfc_cancel_all(sdev, IBMVFC_TMF_ABORT_TASK_SET); + rc = ibmvfc_cancel_all(sdev, 0); if (!rc) { rc = ibmvfc_wait_for_ops(vhost, sdev->hostdata, ibmvfc_match_key); if (rc == SUCCESS) @@ -2383,24 +2388,30 @@ static int ibmvfc_abort_task_set(struct scsi_device *sdev) * @cmd: scsi command to abort * * Returns: - * SUCCESS / FAILED + * SUCCESS / FAST_IO_FAIL / FAILED **/ static int ibmvfc_eh_abort_handler(struct scsi_cmnd *cmd) { struct scsi_device *sdev = cmd->device; struct ibmvfc_host *vhost = shost_priv(sdev->host); - int cancel_rc, abort_rc; + int cancel_rc, block_rc; int rc = FAILED; ENTER; - fc_block_scsi_eh(cmd); + block_rc = fc_block_scsi_eh(cmd); ibmvfc_wait_while_resetting(vhost); - cancel_rc = ibmvfc_cancel_all(sdev, IBMVFC_TMF_ABORT_TASK_SET); - abort_rc = ibmvfc_abort_task_set(sdev); + if (block_rc != FAST_IO_FAIL) { + cancel_rc = ibmvfc_cancel_all(sdev, IBMVFC_TMF_ABORT_TASK_SET); + ibmvfc_abort_task_set(sdev); + } else + cancel_rc = ibmvfc_cancel_all(sdev, IBMVFC_TMF_SUPPRESS_ABTS); - if (!cancel_rc && !abort_rc) + if (!cancel_rc) rc = ibmvfc_wait_for_ops(vhost, sdev, ibmvfc_match_lun); + if (block_rc == FAST_IO_FAIL && rc != FAILED) + rc = FAST_IO_FAIL; + LEAVE; return rc; } @@ -2410,28 +2421,46 @@ static int ibmvfc_eh_abort_handler(struct scsi_cmnd *cmd) * @cmd: scsi command struct * * Returns: - * SUCCESS / FAILED + * SUCCESS / FAST_IO_FAIL / FAILED **/ static int ibmvfc_eh_device_reset_handler(struct scsi_cmnd *cmd) { struct scsi_device *sdev = cmd->device; struct ibmvfc_host *vhost = shost_priv(sdev->host); - int cancel_rc, reset_rc; + int cancel_rc, block_rc, reset_rc = 0; int rc = FAILED; ENTER; - fc_block_scsi_eh(cmd); + block_rc = fc_block_scsi_eh(cmd); ibmvfc_wait_while_resetting(vhost); - cancel_rc = ibmvfc_cancel_all(sdev, IBMVFC_TMF_LUN_RESET); - reset_rc = ibmvfc_reset_device(sdev, IBMVFC_LUN_RESET, "LUN"); + if (block_rc != FAST_IO_FAIL) { + cancel_rc = ibmvfc_cancel_all(sdev, IBMVFC_TMF_LUN_RESET); + reset_rc = ibmvfc_reset_device(sdev, IBMVFC_LUN_RESET, "LUN"); + } else + cancel_rc = ibmvfc_cancel_all(sdev, IBMVFC_TMF_SUPPRESS_ABTS); if (!cancel_rc && !reset_rc) rc = ibmvfc_wait_for_ops(vhost, sdev, ibmvfc_match_lun); + if (block_rc == FAST_IO_FAIL && rc != FAILED) + rc = FAST_IO_FAIL; + LEAVE; return rc; } +/** + * ibmvfc_dev_cancel_all_noreset - Device iterated cancel all function + * @sdev: scsi device struct + * @data: return code + * + **/ +static void ibmvfc_dev_cancel_all_noreset(struct scsi_device *sdev, void *data) +{ + unsigned long *rc = data; + *rc |= ibmvfc_cancel_all(sdev, IBMVFC_TMF_SUPPRESS_ABTS); +} + /** * ibmvfc_dev_cancel_all_reset - Device iterated cancel all function * @sdev: scsi device struct @@ -2449,26 +2478,33 @@ static void ibmvfc_dev_cancel_all_reset(struct scsi_device *sdev, void *data) * @cmd: scsi command struct * * Returns: - * SUCCESS / FAILED + * SUCCESS / FAST_IO_FAIL / FAILED **/ static int ibmvfc_eh_target_reset_handler(struct scsi_cmnd *cmd) { struct scsi_device *sdev = cmd->device; struct ibmvfc_host *vhost = shost_priv(sdev->host); struct scsi_target *starget = scsi_target(sdev); - int reset_rc; + int block_rc; + int reset_rc = 0; int rc = FAILED; unsigned long cancel_rc = 0; ENTER; - fc_block_scsi_eh(cmd); + block_rc = fc_block_scsi_eh(cmd); ibmvfc_wait_while_resetting(vhost); - starget_for_each_device(starget, &cancel_rc, ibmvfc_dev_cancel_all_reset); - reset_rc = ibmvfc_reset_device(sdev, IBMVFC_TARGET_RESET, "target"); + if (block_rc != FAST_IO_FAIL) { + starget_for_each_device(starget, &cancel_rc, ibmvfc_dev_cancel_all_reset); + reset_rc = ibmvfc_reset_device(sdev, IBMVFC_TARGET_RESET, "target"); + } else + starget_for_each_device(starget, &cancel_rc, ibmvfc_dev_cancel_all_noreset); if (!cancel_rc && !reset_rc) rc = ibmvfc_wait_for_ops(vhost, starget, ibmvfc_match_target); + if (block_rc == FAST_IO_FAIL && rc != FAILED) + rc = FAST_IO_FAIL; + LEAVE; return rc; } @@ -2480,12 +2516,16 @@ static int ibmvfc_eh_target_reset_handler(struct scsi_cmnd *cmd) **/ static int ibmvfc_eh_host_reset_handler(struct scsi_cmnd *cmd) { - int rc; + int rc, block_rc; struct ibmvfc_host *vhost = shost_priv(cmd->device->host); - fc_block_scsi_eh(cmd); + block_rc = fc_block_scsi_eh(cmd); dev_err(vhost->dev, "Resetting connection due to error recovery\n"); rc = ibmvfc_issue_fc_host_lip(vhost->host); + + if (block_rc == FAST_IO_FAIL) + return FAST_IO_FAIL; + return rc ? FAILED : SUCCESS; } @@ -2509,8 +2549,7 @@ static void ibmvfc_terminate_rport_io(struct fc_rport *rport) dev_rport = starget_to_rport(scsi_target(sdev)); if (dev_rport != rport) continue; - ibmvfc_cancel_all(sdev, IBMVFC_TMF_ABORT_TASK_SET); - ibmvfc_abort_task_set(sdev); + ibmvfc_cancel_all(sdev, IBMVFC_TMF_SUPPRESS_ABTS); } rc = ibmvfc_wait_for_ops(vhost, rport, ibmvfc_match_rport); diff --git a/trunk/drivers/scsi/ibmvscsi/ibmvfc.h b/trunk/drivers/scsi/ibmvscsi/ibmvfc.h index 3be8af624e6f..017a5290e8c1 100644 --- a/trunk/drivers/scsi/ibmvscsi/ibmvfc.h +++ b/trunk/drivers/scsi/ibmvscsi/ibmvfc.h @@ -29,8 +29,8 @@ #include "viosrp.h" #define IBMVFC_NAME "ibmvfc" -#define IBMVFC_DRIVER_VERSION "1.0.10" -#define IBMVFC_DRIVER_DATE "(August 24, 2012)" +#define IBMVFC_DRIVER_VERSION "1.0.11" +#define IBMVFC_DRIVER_DATE "(April 12, 2013)" #define IBMVFC_DEFAULT_TIMEOUT 60 #define IBMVFC_ADISC_CANCEL_TIMEOUT 45 @@ -208,10 +208,10 @@ struct ibmvfc_npiv_login_resp { u16 error; u32 flags; #define IBMVFC_NATIVE_FC 0x01 -#define IBMVFC_CAN_FLUSH_ON_HALT 0x08 u32 reserved; u64 capabilities; #define IBMVFC_CAN_FLUSH_ON_HALT 0x08 +#define IBMVFC_CAN_SUPPRESS_ABTS 0x10 u32 max_cmds; u32 scsi_id_sz; u64 max_dma_len; @@ -351,6 +351,7 @@ struct ibmvfc_tmf { #define IBMVFC_TMF_LUN_RESET 0x10 #define IBMVFC_TMF_TGT_RESET 0x20 #define IBMVFC_TMF_LUA_VALID 0x40 +#define IBMVFC_TMF_SUPPRESS_ABTS 0x80 u32 cancel_key; u32 my_cancel_key; u32 pad; diff --git a/trunk/drivers/scsi/ipr.c b/trunk/drivers/scsi/ipr.c index 2197b57fb225..82a3c1ec8706 100644 --- a/trunk/drivers/scsi/ipr.c +++ b/trunk/drivers/scsi/ipr.c @@ -4777,7 +4777,7 @@ static int ipr_eh_host_reset(struct scsi_cmnd *cmd) ioa_cfg = (struct ipr_ioa_cfg *) cmd->device->host->hostdata; spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); - if (!ioa_cfg->in_reset_reload) { + if (!ioa_cfg->in_reset_reload && !ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead) { ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_ABBREV); dev_err(&ioa_cfg->pdev->dev, "Adapter being reset as a result of error recovery.\n"); @@ -6421,7 +6421,7 @@ static void ipr_build_ata_ioadl64(struct ipr_cmnd *ipr_cmd, { u32 ioadl_flags = 0; struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb; - struct ipr_ioadl64_desc *ioadl64 = ipr_cmd->i.ioadl64; + struct ipr_ioadl64_desc *ioadl64 = ipr_cmd->i.ata_ioadl.ioadl64; struct ipr_ioadl64_desc *last_ioadl64 = NULL; int len = qc->nbytes; struct scatterlist *sg; @@ -6441,7 +6441,7 @@ static void ipr_build_ata_ioadl64(struct ipr_cmnd *ipr_cmd, ioarcb->ioadl_len = cpu_to_be32(sizeof(struct ipr_ioadl64_desc) * ipr_cmd->dma_use_sg); ioarcb->u.sis64_addr_data.data_ioadl_addr = - cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, i.ata_ioadl)); + cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, i.ata_ioadl.ioadl64)); for_each_sg(qc->sg, sg, qc->n_elem, si) { ioadl64->flags = cpu_to_be32(ioadl_flags); @@ -6739,6 +6739,7 @@ static int ipr_invalid_adapter(struct ipr_ioa_cfg *ioa_cfg) static int ipr_ioa_bringdown_done(struct ipr_cmnd *ipr_cmd) { struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; + int i; ENTER; if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].removing_ioa) { @@ -6750,6 +6751,13 @@ static int ipr_ioa_bringdown_done(struct ipr_cmnd *ipr_cmd) ioa_cfg->in_reset_reload = 0; ioa_cfg->reset_retries = 0; + for (i = 0; i < ioa_cfg->hrrq_num; i++) { + spin_lock(&ioa_cfg->hrrq[i]._lock); + ioa_cfg->hrrq[i].ioa_is_dead = 1; + spin_unlock(&ioa_cfg->hrrq[i]._lock); + } + wmb(); + list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q); wake_up_all(&ioa_cfg->reset_wait_q); LEAVE; @@ -8651,7 +8659,7 @@ static void ipr_pci_perm_failure(struct pci_dev *pdev) spin_lock_irqsave(ioa_cfg->host->host_lock, flags); if (ioa_cfg->sdt_state == WAIT_FOR_DUMP) ioa_cfg->sdt_state = ABORT_DUMP; - ioa_cfg->reset_retries = IPR_NUM_RESET_RELOAD_RETRIES; + ioa_cfg->reset_retries = IPR_NUM_RESET_RELOAD_RETRIES - 1; ioa_cfg->in_ioa_bringdown = 1; for (i = 0; i < ioa_cfg->hrrq_num; i++) { spin_lock(&ioa_cfg->hrrq[i]._lock); diff --git a/trunk/drivers/scsi/ipr.h b/trunk/drivers/scsi/ipr.h index 21a6ff1ed5c6..a1fb840596ef 100644 --- a/trunk/drivers/scsi/ipr.h +++ b/trunk/drivers/scsi/ipr.h @@ -552,7 +552,7 @@ struct ipr_ioarcb_ata_regs { /* 22 bytes */ u8 hob_lbam; u8 hob_lbah; u8 ctl; -}__attribute__ ((packed, aligned(4))); +}__attribute__ ((packed, aligned(2))); struct ipr_ioadl_desc { __be32 flags_and_data_len; diff --git a/trunk/drivers/scsi/isci/remote_device.c b/trunk/drivers/scsi/isci/remote_device.c index c3aa6c5457b9..96a26f454673 100644 --- a/trunk/drivers/scsi/isci/remote_device.c +++ b/trunk/drivers/scsi/isci/remote_device.c @@ -1085,7 +1085,7 @@ static void sci_remote_device_ready_state_enter(struct sci_base_state_machine *s struct isci_host *ihost = idev->owning_port->owning_controller; struct domain_device *dev = idev->domain_dev; - if (dev->dev_type == SATA_DEV || (dev->tproto & SAS_PROTOCOL_SATA)) { + if (dev->dev_type == SAS_SATA_DEV || (dev->tproto & SAS_PROTOCOL_SATA)) { sci_change_state(&idev->sm, SCI_STP_DEV_IDLE); } else if (dev_is_expander(dev)) { sci_change_state(&idev->sm, SCI_SMP_DEV_IDLE); @@ -1098,7 +1098,7 @@ static void sci_remote_device_ready_state_exit(struct sci_base_state_machine *sm struct isci_remote_device *idev = container_of(sm, typeof(*idev), sm); struct domain_device *dev = idev->domain_dev; - if (dev->dev_type == SAS_END_DEV) { + if (dev->dev_type == SAS_END_DEVICE) { struct isci_host *ihost = idev->owning_port->owning_controller; isci_remote_device_not_ready(ihost, idev, diff --git a/trunk/drivers/scsi/isci/remote_device.h b/trunk/drivers/scsi/isci/remote_device.h index 7674caae1d88..47a013fffae7 100644 --- a/trunk/drivers/scsi/isci/remote_device.h +++ b/trunk/drivers/scsi/isci/remote_device.h @@ -297,7 +297,7 @@ static inline struct isci_remote_device *rnc_to_dev(struct sci_remote_node_conte static inline bool dev_is_expander(struct domain_device *dev) { - return dev->dev_type == EDGE_DEV || dev->dev_type == FANOUT_DEV; + return dev->dev_type == SAS_EDGE_EXPANDER_DEVICE || dev->dev_type == SAS_FANOUT_EXPANDER_DEVICE; } static inline void sci_remote_device_decrement_request_count(struct isci_remote_device *idev) diff --git a/trunk/drivers/scsi/isci/request.c b/trunk/drivers/scsi/isci/request.c index 9594ab62702b..e3e3bcbd5a9f 100644 --- a/trunk/drivers/scsi/isci/request.c +++ b/trunk/drivers/scsi/isci/request.c @@ -2978,7 +2978,7 @@ static void sci_request_started_state_enter(struct sci_base_state_machine *sm) /* all unaccelerated request types (non ssp or ncq) handled with * substates */ - if (!task && dev->dev_type == SAS_END_DEV) { + if (!task && dev->dev_type == SAS_END_DEVICE) { state = SCI_REQ_TASK_WAIT_TC_COMP; } else if (task && task->task_proto == SAS_PROTOCOL_SMP) { state = SCI_REQ_SMP_WAIT_RESP; @@ -3101,7 +3101,7 @@ sci_io_request_construct(struct isci_host *ihost, if (idev->rnc.remote_node_index == SCIC_SDS_REMOTE_NODE_CONTEXT_INVALID_INDEX) return SCI_FAILURE_INVALID_REMOTE_DEVICE; - if (dev->dev_type == SAS_END_DEV) + if (dev->dev_type == SAS_END_DEVICE) /* pass */; else if (dev_is_sata(dev)) memset(&ireq->stp.cmd, 0, sizeof(ireq->stp.cmd)); @@ -3125,7 +3125,7 @@ enum sci_status sci_task_request_construct(struct isci_host *ihost, /* Build the common part of the request */ sci_general_request_construct(ihost, idev, ireq); - if (dev->dev_type == SAS_END_DEV || dev_is_sata(dev)) { + if (dev->dev_type == SAS_END_DEVICE || dev_is_sata(dev)) { set_bit(IREQ_TMF, &ireq->flags); memset(ireq->tc, 0, sizeof(struct scu_task_context)); diff --git a/trunk/drivers/scsi/isci/task.c b/trunk/drivers/scsi/isci/task.c index b6f19a1db780..9bb020ac089c 100644 --- a/trunk/drivers/scsi/isci/task.c +++ b/trunk/drivers/scsi/isci/task.c @@ -250,7 +250,7 @@ static struct isci_request *isci_task_request_build(struct isci_host *ihost, } /* XXX convert to get this from task->tproto like other drivers */ - if (dev->dev_type == SAS_END_DEV) { + if (dev->dev_type == SAS_END_DEVICE) { isci_tmf->proto = SAS_PROTOCOL_SSP; status = sci_task_request_construct_ssp(ireq); if (status != SCI_SUCCESS) diff --git a/trunk/drivers/scsi/libsas/sas_ata.c b/trunk/drivers/scsi/libsas/sas_ata.c index bdb81cda8401..161c98efade9 100644 --- a/trunk/drivers/scsi/libsas/sas_ata.c +++ b/trunk/drivers/scsi/libsas/sas_ata.c @@ -285,14 +285,14 @@ int sas_get_ata_info(struct domain_device *dev, struct ex_phy *phy) if (phy->attached_tproto & SAS_PROTOCOL_STP) dev->tproto = phy->attached_tproto; if (phy->attached_sata_dev) - dev->tproto |= SATA_DEV; + dev->tproto |= SAS_SATA_DEV; - if (phy->attached_dev_type == SATA_PENDING) - dev->dev_type = SATA_PENDING; + if (phy->attached_dev_type == SAS_SATA_PENDING) + dev->dev_type = SAS_SATA_PENDING; else { int res; - dev->dev_type = SATA_DEV; + dev->dev_type = SAS_SATA_DEV; res = sas_get_report_phy_sata(dev->parent, phy->phy_id, &dev->sata_dev.rps_resp); if (res) { @@ -314,7 +314,7 @@ static int sas_ata_clear_pending(struct domain_device *dev, struct ex_phy *phy) int res; /* we weren't pending, so successfully end the reset sequence now */ - if (dev->dev_type != SATA_PENDING) + if (dev->dev_type != SAS_SATA_PENDING) return 1; /* hmmm, if this succeeds do we need to repost the domain_device to the @@ -348,9 +348,9 @@ static int smp_ata_check_ready(struct ata_link *link) return 0; switch (ex_phy->attached_dev_type) { - case SATA_PENDING: + case SAS_SATA_PENDING: return 0; - case SAS_END_DEV: + case SAS_END_DEVICE: if (ex_phy->attached_sata_dev) return sas_ata_clear_pending(dev, ex_phy); default: @@ -631,7 +631,7 @@ static void sas_get_ata_command_set(struct domain_device *dev) struct dev_to_host_fis *fis = (struct dev_to_host_fis *) dev->frame_rcvd; - if (dev->dev_type == SATA_PENDING) + if (dev->dev_type == SAS_SATA_PENDING) return; if ((fis->sector_count == 1 && /* ATA */ @@ -797,7 +797,7 @@ int sas_discover_sata(struct domain_device *dev) { int res; - if (dev->dev_type == SATA_PM) + if (dev->dev_type == SAS_SATA_PM) return -ENODEV; sas_get_ata_command_set(dev); diff --git a/trunk/drivers/scsi/libsas/sas_discover.c b/trunk/drivers/scsi/libsas/sas_discover.c index a0c3003e0c7d..62b58d38ce2e 100644 --- a/trunk/drivers/scsi/libsas/sas_discover.c +++ b/trunk/drivers/scsi/libsas/sas_discover.c @@ -39,11 +39,11 @@ void sas_init_dev(struct domain_device *dev) { switch (dev->dev_type) { - case SAS_END_DEV: + case SAS_END_DEVICE: INIT_LIST_HEAD(&dev->ssp_dev.eh_list_node); break; - case EDGE_DEV: - case FANOUT_DEV: + case SAS_EDGE_EXPANDER_DEVICE: + case SAS_FANOUT_EXPANDER_DEVICE: INIT_LIST_HEAD(&dev->ex_dev.children); mutex_init(&dev->ex_dev.cmd_mutex); break; @@ -93,9 +93,9 @@ static int sas_get_port_device(struct asd_sas_port *port) if (fis->interrupt_reason == 1 && fis->lbal == 1 && fis->byte_count_low==0x69 && fis->byte_count_high == 0x96 && (fis->device & ~0x10) == 0) - dev->dev_type = SATA_PM; + dev->dev_type = SAS_SATA_PM; else - dev->dev_type = SATA_DEV; + dev->dev_type = SAS_SATA_DEV; dev->tproto = SAS_PROTOCOL_SATA; } else { struct sas_identify_frame *id = @@ -109,21 +109,21 @@ static int sas_get_port_device(struct asd_sas_port *port) dev->port = port; switch (dev->dev_type) { - case SATA_DEV: + case SAS_SATA_DEV: rc = sas_ata_init(dev); if (rc) { rphy = NULL; break; } /* fall through */ - case SAS_END_DEV: + case SAS_END_DEVICE: rphy = sas_end_device_alloc(port->port); break; - case EDGE_DEV: + case SAS_EDGE_EXPANDER_DEVICE: rphy = sas_expander_alloc(port->port, SAS_EDGE_EXPANDER_DEVICE); break; - case FANOUT_DEV: + case SAS_FANOUT_EXPANDER_DEVICE: rphy = sas_expander_alloc(port->port, SAS_FANOUT_EXPANDER_DEVICE); break; @@ -156,7 +156,7 @@ static int sas_get_port_device(struct asd_sas_port *port) dev->rphy = rphy; get_device(&dev->rphy->dev); - if (dev_is_sata(dev) || dev->dev_type == SAS_END_DEV) + if (dev_is_sata(dev) || dev->dev_type == SAS_END_DEVICE) list_add_tail(&dev->disco_list_node, &port->disco_list); else { spin_lock_irq(&port->dev_list_lock); @@ -315,7 +315,7 @@ void sas_free_device(struct kref *kref) dev->phy = NULL; /* remove the phys and ports, everything else should be gone */ - if (dev->dev_type == EDGE_DEV || dev->dev_type == FANOUT_DEV) + if (dev->dev_type == SAS_EDGE_EXPANDER_DEVICE || dev->dev_type == SAS_FANOUT_EXPANDER_DEVICE) kfree(dev->ex_dev.ex_phy); if (dev_is_sata(dev) && dev->sata_dev.ap) { @@ -343,7 +343,7 @@ static void sas_unregister_common_dev(struct asd_sas_port *port, struct domain_d spin_unlock_irq(&port->dev_list_lock); spin_lock_irq(&ha->lock); - if (dev->dev_type == SAS_END_DEV && + if (dev->dev_type == SAS_END_DEVICE && !list_empty(&dev->ssp_dev.eh_list_node)) { list_del_init(&dev->ssp_dev.eh_list_node); ha->eh_active--; @@ -457,15 +457,15 @@ static void sas_discover_domain(struct work_struct *work) task_pid_nr(current)); switch (dev->dev_type) { - case SAS_END_DEV: + case SAS_END_DEVICE: error = sas_discover_end_dev(dev); break; - case EDGE_DEV: - case FANOUT_DEV: + case SAS_EDGE_EXPANDER_DEVICE: + case SAS_FANOUT_EXPANDER_DEVICE: error = sas_discover_root_expander(dev); break; - case SATA_DEV: - case SATA_PM: + case SAS_SATA_DEV: + case SAS_SATA_PM: #ifdef CONFIG_SCSI_SAS_ATA error = sas_discover_sata(dev); break; diff --git a/trunk/drivers/scsi/libsas/sas_expander.c b/trunk/drivers/scsi/libsas/sas_expander.c index f42b0e15410f..446b85110a1f 100644 --- a/trunk/drivers/scsi/libsas/sas_expander.c +++ b/trunk/drivers/scsi/libsas/sas_expander.c @@ -183,21 +183,21 @@ static char sas_route_char(struct domain_device *dev, struct ex_phy *phy) } } -static enum sas_dev_type to_dev_type(struct discover_resp *dr) +static enum sas_device_type to_dev_type(struct discover_resp *dr) { /* This is detecting a failure to transmit initial dev to host * FIS as described in section J.5 of sas-2 r16 */ - if (dr->attached_dev_type == NO_DEVICE && dr->attached_sata_dev && + if (dr->attached_dev_type == SAS_PHY_UNUSED && dr->attached_sata_dev && dr->linkrate >= SAS_LINK_RATE_1_5_GBPS) - return SATA_PENDING; + return SAS_SATA_PENDING; else return dr->attached_dev_type; } static void sas_set_ex_phy(struct domain_device *dev, int phy_id, void *rsp) { - enum sas_dev_type dev_type; + enum sas_device_type dev_type; enum sas_linkrate linkrate; u8 sas_addr[SAS_ADDR_SIZE]; struct smp_resp *resp = rsp; @@ -238,7 +238,7 @@ static void sas_set_ex_phy(struct domain_device *dev, int phy_id, void *rsp) /* Handle vacant phy - rest of dr data is not valid so skip it */ if (phy->phy_state == PHY_VACANT) { memset(phy->attached_sas_addr, 0, SAS_ADDR_SIZE); - phy->attached_dev_type = NO_DEVICE; + phy->attached_dev_type = SAS_PHY_UNUSED; if (!test_bit(SAS_HA_ATA_EH_ACTIVE, &ha->state)) { phy->phy_id = phy_id; goto skip; @@ -259,7 +259,7 @@ static void sas_set_ex_phy(struct domain_device *dev, int phy_id, void *rsp) /* help some expanders that fail to zero sas_address in the 'no * device' case */ - if (phy->attached_dev_type == NO_DEVICE || + if (phy->attached_dev_type == SAS_PHY_UNUSED || phy->linkrate < SAS_LINK_RATE_1_5_GBPS) memset(phy->attached_sas_addr, 0, SAS_ADDR_SIZE); else @@ -292,13 +292,13 @@ static void sas_set_ex_phy(struct domain_device *dev, int phy_id, void *rsp) out: switch (phy->attached_dev_type) { - case SATA_PENDING: + case SAS_SATA_PENDING: type = "stp pending"; break; - case NO_DEVICE: + case SAS_PHY_UNUSED: type = "no device"; break; - case SAS_END_DEV: + case SAS_END_DEVICE: if (phy->attached_iproto) { if (phy->attached_tproto) type = "host+target"; @@ -311,8 +311,8 @@ static void sas_set_ex_phy(struct domain_device *dev, int phy_id, void *rsp) type = "ssp"; } break; - case EDGE_DEV: - case FANOUT_DEV: + case SAS_EDGE_EXPANDER_DEVICE: + case SAS_FANOUT_EXPANDER_DEVICE: type = "smp"; break; default: @@ -833,7 +833,7 @@ static struct domain_device *sas_ex_discover_end_dev( } else #endif if (phy->attached_tproto & SAS_PROTOCOL_SSP) { - child->dev_type = SAS_END_DEV; + child->dev_type = SAS_END_DEVICE; rphy = sas_end_device_alloc(phy->port); /* FIXME: error handling */ if (unlikely(!rphy)) @@ -932,11 +932,11 @@ static struct domain_device *sas_ex_discover_expander( switch (phy->attached_dev_type) { - case EDGE_DEV: + case SAS_EDGE_EXPANDER_DEVICE: rphy = sas_expander_alloc(phy->port, SAS_EDGE_EXPANDER_DEVICE); break; - case FANOUT_DEV: + case SAS_FANOUT_EXPANDER_DEVICE: rphy = sas_expander_alloc(phy->port, SAS_FANOUT_EXPANDER_DEVICE); break; @@ -1013,7 +1013,7 @@ static int sas_ex_discover_dev(struct domain_device *dev, int phy_id) if (sas_dev_present_in_domain(dev->port, ex_phy->attached_sas_addr)) sas_ex_disable_port(dev, ex_phy->attached_sas_addr); - if (ex_phy->attached_dev_type == NO_DEVICE) { + if (ex_phy->attached_dev_type == SAS_PHY_UNUSED) { if (ex_phy->routing_attr == DIRECT_ROUTING) { memset(ex_phy->attached_sas_addr, 0, SAS_ADDR_SIZE); sas_configure_routing(dev, ex_phy->attached_sas_addr); @@ -1022,10 +1022,10 @@ static int sas_ex_discover_dev(struct domain_device *dev, int phy_id) } else if (ex_phy->linkrate == SAS_LINK_RATE_UNKNOWN) return 0; - if (ex_phy->attached_dev_type != SAS_END_DEV && - ex_phy->attached_dev_type != FANOUT_DEV && - ex_phy->attached_dev_type != EDGE_DEV && - ex_phy->attached_dev_type != SATA_PENDING) { + if (ex_phy->attached_dev_type != SAS_END_DEVICE && + ex_phy->attached_dev_type != SAS_FANOUT_EXPANDER_DEVICE && + ex_phy->attached_dev_type != SAS_EDGE_EXPANDER_DEVICE && + ex_phy->attached_dev_type != SAS_SATA_PENDING) { SAS_DPRINTK("unknown device type(0x%x) attached to ex %016llx " "phy 0x%x\n", ex_phy->attached_dev_type, SAS_ADDR(dev->sas_addr), @@ -1049,11 +1049,11 @@ static int sas_ex_discover_dev(struct domain_device *dev, int phy_id) } switch (ex_phy->attached_dev_type) { - case SAS_END_DEV: - case SATA_PENDING: + case SAS_END_DEVICE: + case SAS_SATA_PENDING: child = sas_ex_discover_end_dev(dev, phy_id); break; - case FANOUT_DEV: + case SAS_FANOUT_EXPANDER_DEVICE: if (SAS_ADDR(dev->port->disc.fanout_sas_addr)) { SAS_DPRINTK("second fanout expander %016llx phy 0x%x " "attached to ex %016llx phy 0x%x\n", @@ -1067,7 +1067,7 @@ static int sas_ex_discover_dev(struct domain_device *dev, int phy_id) memcpy(dev->port->disc.fanout_sas_addr, ex_phy->attached_sas_addr, SAS_ADDR_SIZE); /* fallthrough */ - case EDGE_DEV: + case SAS_EDGE_EXPANDER_DEVICE: child = sas_ex_discover_expander(dev, phy_id); break; default: @@ -1111,8 +1111,8 @@ static int sas_find_sub_addr(struct domain_device *dev, u8 *sub_addr) phy->phy_state == PHY_NOT_PRESENT) continue; - if ((phy->attached_dev_type == EDGE_DEV || - phy->attached_dev_type == FANOUT_DEV) && + if ((phy->attached_dev_type == SAS_EDGE_EXPANDER_DEVICE || + phy->attached_dev_type == SAS_FANOUT_EXPANDER_DEVICE) && phy->routing_attr == SUBTRACTIVE_ROUTING) { memcpy(sub_addr, phy->attached_sas_addr,SAS_ADDR_SIZE); @@ -1130,8 +1130,8 @@ static int sas_check_level_subtractive_boundary(struct domain_device *dev) u8 sub_addr[8] = {0, }; list_for_each_entry(child, &ex->children, siblings) { - if (child->dev_type != EDGE_DEV && - child->dev_type != FANOUT_DEV) + if (child->dev_type != SAS_EDGE_EXPANDER_DEVICE && + child->dev_type != SAS_FANOUT_EXPANDER_DEVICE) continue; if (sub_addr[0] == 0) { sas_find_sub_addr(child, sub_addr); @@ -1208,7 +1208,7 @@ static int sas_check_ex_subtractive_boundary(struct domain_device *dev) int i; u8 *sub_sas_addr = NULL; - if (dev->dev_type != EDGE_DEV) + if (dev->dev_type != SAS_EDGE_EXPANDER_DEVICE) return 0; for (i = 0; i < ex->num_phys; i++) { @@ -1218,8 +1218,8 @@ static int sas_check_ex_subtractive_boundary(struct domain_device *dev) phy->phy_state == PHY_NOT_PRESENT) continue; - if ((phy->attached_dev_type == FANOUT_DEV || - phy->attached_dev_type == EDGE_DEV) && + if ((phy->attached_dev_type == SAS_FANOUT_EXPANDER_DEVICE || + phy->attached_dev_type == SAS_EDGE_EXPANDER_DEVICE) && phy->routing_attr == SUBTRACTIVE_ROUTING) { if (!sub_sas_addr) @@ -1245,8 +1245,8 @@ static void sas_print_parent_topology_bug(struct domain_device *child, struct ex_phy *child_phy) { static const char *ex_type[] = { - [EDGE_DEV] = "edge", - [FANOUT_DEV] = "fanout", + [SAS_EDGE_EXPANDER_DEVICE] = "edge", + [SAS_FANOUT_EXPANDER_DEVICE] = "fanout", }; struct domain_device *parent = child->parent; @@ -1321,8 +1321,8 @@ static int sas_check_parent_topology(struct domain_device *child) if (!child->parent) return 0; - if (child->parent->dev_type != EDGE_DEV && - child->parent->dev_type != FANOUT_DEV) + if (child->parent->dev_type != SAS_EDGE_EXPANDER_DEVICE && + child->parent->dev_type != SAS_FANOUT_EXPANDER_DEVICE) return 0; parent_ex = &child->parent->ex_dev; @@ -1341,8 +1341,8 @@ static int sas_check_parent_topology(struct domain_device *child) child_phy = &child_ex->ex_phy[parent_phy->attached_phy_id]; switch (child->parent->dev_type) { - case EDGE_DEV: - if (child->dev_type == FANOUT_DEV) { + case SAS_EDGE_EXPANDER_DEVICE: + if (child->dev_type == SAS_FANOUT_EXPANDER_DEVICE) { if (parent_phy->routing_attr != SUBTRACTIVE_ROUTING || child_phy->routing_attr != TABLE_ROUTING) { sas_print_parent_topology_bug(child, parent_phy, child_phy); @@ -1366,7 +1366,7 @@ static int sas_check_parent_topology(struct domain_device *child) } } break; - case FANOUT_DEV: + case SAS_FANOUT_EXPANDER_DEVICE: if (parent_phy->routing_attr != TABLE_ROUTING || child_phy->routing_attr != SUBTRACTIVE_ROUTING) { sas_print_parent_topology_bug(child, parent_phy, child_phy); @@ -1619,8 +1619,8 @@ static int sas_ex_level_discovery(struct asd_sas_port *port, const int level) struct domain_device *dev; list_for_each_entry(dev, &port->dev_list, dev_list_node) { - if (dev->dev_type == EDGE_DEV || - dev->dev_type == FANOUT_DEV) { + if (dev->dev_type == SAS_EDGE_EXPANDER_DEVICE || + dev->dev_type == SAS_FANOUT_EXPANDER_DEVICE) { struct sas_expander_device *ex = rphy_to_expander_device(dev->rphy); @@ -1720,7 +1720,7 @@ static int sas_get_phy_change_count(struct domain_device *dev, } static int sas_get_phy_attached_dev(struct domain_device *dev, int phy_id, - u8 *sas_addr, enum sas_dev_type *type) + u8 *sas_addr, enum sas_device_type *type) { int res; struct smp_resp *disc_resp; @@ -1849,7 +1849,7 @@ static int sas_find_bcast_dev(struct domain_device *dev, SAS_DPRINTK("Expander phys DID NOT change\n"); } list_for_each_entry(ch, &ex->children, siblings) { - if (ch->dev_type == EDGE_DEV || ch->dev_type == FANOUT_DEV) { + if (ch->dev_type == SAS_EDGE_EXPANDER_DEVICE || ch->dev_type == SAS_FANOUT_EXPANDER_DEVICE) { res = sas_find_bcast_dev(ch, src_dev); if (*src_dev) return res; @@ -1866,8 +1866,8 @@ static void sas_unregister_ex_tree(struct asd_sas_port *port, struct domain_devi list_for_each_entry_safe(child, n, &ex->children, siblings) { set_bit(SAS_DEV_GONE, &child->state); - if (child->dev_type == EDGE_DEV || - child->dev_type == FANOUT_DEV) + if (child->dev_type == SAS_EDGE_EXPANDER_DEVICE || + child->dev_type == SAS_FANOUT_EXPANDER_DEVICE) sas_unregister_ex_tree(port, child); else sas_unregister_dev(port, child); @@ -1887,8 +1887,8 @@ static void sas_unregister_devs_sas_addr(struct domain_device *parent, if (SAS_ADDR(child->sas_addr) == SAS_ADDR(phy->attached_sas_addr)) { set_bit(SAS_DEV_GONE, &child->state); - if (child->dev_type == EDGE_DEV || - child->dev_type == FANOUT_DEV) + if (child->dev_type == SAS_EDGE_EXPANDER_DEVICE || + child->dev_type == SAS_FANOUT_EXPANDER_DEVICE) sas_unregister_ex_tree(parent->port, child); else sas_unregister_dev(parent->port, child); @@ -1916,8 +1916,8 @@ static int sas_discover_bfs_by_root_level(struct domain_device *root, int res = 0; list_for_each_entry(child, &ex_root->children, siblings) { - if (child->dev_type == EDGE_DEV || - child->dev_type == FANOUT_DEV) { + if (child->dev_type == SAS_EDGE_EXPANDER_DEVICE || + child->dev_type == SAS_FANOUT_EXPANDER_DEVICE) { struct sas_expander_device *ex = rphy_to_expander_device(child->rphy); @@ -1970,8 +1970,8 @@ static int sas_discover_new(struct domain_device *dev, int phy_id) list_for_each_entry(child, &dev->ex_dev.children, siblings) { if (SAS_ADDR(child->sas_addr) == SAS_ADDR(ex_phy->attached_sas_addr)) { - if (child->dev_type == EDGE_DEV || - child->dev_type == FANOUT_DEV) + if (child->dev_type == SAS_EDGE_EXPANDER_DEVICE || + child->dev_type == SAS_FANOUT_EXPANDER_DEVICE) res = sas_discover_bfs_by_root(child); break; } @@ -1979,16 +1979,16 @@ static int sas_discover_new(struct domain_device *dev, int phy_id) return res; } -static bool dev_type_flutter(enum sas_dev_type new, enum sas_dev_type old) +static bool dev_type_flutter(enum sas_device_type new, enum sas_device_type old) { if (old == new) return true; /* treat device directed resets as flutter, if we went - * SAS_END_DEV to SATA_PENDING the link needs recovery + * SAS_END_DEVICE to SAS_SATA_PENDING the link needs recovery */ - if ((old == SATA_PENDING && new == SAS_END_DEV) || - (old == SAS_END_DEV && new == SATA_PENDING)) + if ((old == SAS_SATA_PENDING && new == SAS_END_DEVICE) || + (old == SAS_END_DEVICE && new == SAS_SATA_PENDING)) return true; return false; @@ -1998,7 +1998,7 @@ static int sas_rediscover_dev(struct domain_device *dev, int phy_id, bool last) { struct expander_device *ex = &dev->ex_dev; struct ex_phy *phy = &ex->ex_phy[phy_id]; - enum sas_dev_type type = NO_DEVICE; + enum sas_device_type type = SAS_PHY_UNUSED; u8 sas_addr[8]; int res; @@ -2032,7 +2032,7 @@ static int sas_rediscover_dev(struct domain_device *dev, int phy_id, bool last) sas_ex_phy_discover(dev, phy_id); - if (ata_dev && phy->attached_dev_type == SATA_PENDING) + if (ata_dev && phy->attached_dev_type == SAS_SATA_PENDING) action = ", needs recovery"; SAS_DPRINTK("ex %016llx phy 0x%x broadcast flutter%s\n", SAS_ADDR(dev->sas_addr), phy_id, action); diff --git a/trunk/drivers/scsi/libsas/sas_internal.h b/trunk/drivers/scsi/libsas/sas_internal.h index 1de67964e5a1..7e7ba83f0a21 100644 --- a/trunk/drivers/scsi/libsas/sas_internal.h +++ b/trunk/drivers/scsi/libsas/sas_internal.h @@ -131,16 +131,16 @@ static inline void sas_fill_in_rphy(struct domain_device *dev, rphy->identify.initiator_port_protocols = dev->iproto; rphy->identify.target_port_protocols = dev->tproto; switch (dev->dev_type) { - case SATA_DEV: + case SAS_SATA_DEV: /* FIXME: need sata device type */ - case SAS_END_DEV: - case SATA_PENDING: + case SAS_END_DEVICE: + case SAS_SATA_PENDING: rphy->identify.device_type = SAS_END_DEVICE; break; - case EDGE_DEV: + case SAS_EDGE_EXPANDER_DEVICE: rphy->identify.device_type = SAS_EDGE_EXPANDER_DEVICE; break; - case FANOUT_DEV: + case SAS_FANOUT_EXPANDER_DEVICE: rphy->identify.device_type = SAS_FANOUT_EXPANDER_DEVICE; break; default: diff --git a/trunk/drivers/scsi/libsas/sas_port.c b/trunk/drivers/scsi/libsas/sas_port.c index 1398b714c018..d3c5297c6c89 100644 --- a/trunk/drivers/scsi/libsas/sas_port.c +++ b/trunk/drivers/scsi/libsas/sas_port.c @@ -69,7 +69,7 @@ static void sas_resume_port(struct asd_sas_phy *phy) continue; } - if (dev->dev_type == EDGE_DEV || dev->dev_type == FANOUT_DEV) { + if (dev->dev_type == SAS_EDGE_EXPANDER_DEVICE || dev->dev_type == SAS_FANOUT_EXPANDER_DEVICE) { dev->ex_dev.ex_change_count = -1; for (i = 0; i < dev->ex_dev.num_phys; i++) { struct ex_phy *phy = &dev->ex_dev.ex_phy[i]; diff --git a/trunk/drivers/scsi/lpfc/lpfc.h b/trunk/drivers/scsi/lpfc/lpfc.h index 7706c99ec8bb..bcc56cac4fd8 100644 --- a/trunk/drivers/scsi/lpfc/lpfc.h +++ b/trunk/drivers/scsi/lpfc/lpfc.h @@ -46,10 +46,15 @@ struct lpfc_sli2_slim; #define LPFC_DEFAULT_MENLO_SG_SEG_CNT 128 /* sg element count per scsi cmnd for menlo needs nearly twice as for firmware downloads using bsg */ -#define LPFC_DEFAULT_PROT_SG_SEG_CNT 4096 /* sg protection elements count */ + +#define LPFC_MIN_SG_SLI4_BUF_SZ 0x800 /* based on LPFC_DEFAULT_SG_SEG_CNT */ +#define LPFC_MAX_SG_SLI4_SEG_CNT_DIF 128 /* sg element count per scsi cmnd */ +#define LPFC_MAX_SG_SEG_CNT_DIF 512 /* sg element count per scsi cmnd */ #define LPFC_MAX_SG_SEG_CNT 4096 /* sg element count per scsi cmnd */ +#define LPFC_MAX_SGL_SEG_CNT 512 /* SGL element count per scsi cmnd */ +#define LPFC_MAX_BPL_SEG_CNT 4096 /* BPL element count per scsi cmnd */ + #define LPFC_MAX_SGE_SIZE 0x80000000 /* Maximum data allowed in a SGE */ -#define LPFC_MAX_PROT_SG_SEG_CNT 4096 /* prot sg element count per scsi cmd*/ #define LPFC_IOCB_LIST_CNT 2250 /* list of IOCBs for fast-path usage. */ #define LPFC_Q_RAMP_UP_INTERVAL 120 /* lun q_depth ramp up interval */ #define LPFC_VNAME_LEN 100 /* vport symbolic name length */ @@ -66,8 +71,10 @@ struct lpfc_sli2_slim; * queue depths when there are driver resource error or Firmware * resource error. */ -#define QUEUE_RAMP_DOWN_INTERVAL (1 * HZ) /* 1 Second */ -#define QUEUE_RAMP_UP_INTERVAL (300 * HZ) /* 5 minutes */ +/* 1 Second */ +#define QUEUE_RAMP_DOWN_INTERVAL (msecs_to_jiffies(1000 * 1)) +/* 5 minutes */ +#define QUEUE_RAMP_UP_INTERVAL (msecs_to_jiffies(1000 * 300)) /* Number of exchanges reserved for discovery to complete */ #define LPFC_DISC_IOCB_BUFF_COUNT 20 @@ -671,6 +678,7 @@ struct lpfc_hba { uint32_t lmt; uint32_t fc_topology; /* link topology, from LINK INIT */ + uint32_t fc_topology_changed; /* link topology, from LINK INIT */ struct lpfc_stats fc_stat; @@ -701,9 +709,11 @@ struct lpfc_hba { uint32_t cfg_poll_tmo; uint32_t cfg_use_msi; uint32_t cfg_fcp_imax; + uint32_t cfg_fcp_cpu_map; uint32_t cfg_fcp_wq_count; uint32_t cfg_fcp_eq_count; uint32_t cfg_fcp_io_channel; + uint32_t cfg_total_seg_cnt; uint32_t cfg_sg_seg_cnt; uint32_t cfg_prot_sg_seg_cnt; uint32_t cfg_sg_dma_buf_size; @@ -804,8 +814,10 @@ struct lpfc_hba { uint64_t bg_reftag_err_cnt; /* fastpath list. */ - spinlock_t scsi_buf_list_lock; - struct list_head lpfc_scsi_buf_list; + spinlock_t scsi_buf_list_get_lock; /* SCSI buf alloc list lock */ + spinlock_t scsi_buf_list_put_lock; /* SCSI buf free list lock */ + struct list_head lpfc_scsi_buf_list_get; + struct list_head lpfc_scsi_buf_list_put; uint32_t total_scsi_bufs; struct list_head lpfc_iocb_list; uint32_t total_iocbq_bufs; diff --git a/trunk/drivers/scsi/lpfc/lpfc_attr.c b/trunk/drivers/scsi/lpfc/lpfc_attr.c index 9290713af253..3c5625b8b1f4 100644 --- a/trunk/drivers/scsi/lpfc/lpfc_attr.c +++ b/trunk/drivers/scsi/lpfc/lpfc_attr.c @@ -674,6 +674,9 @@ lpfc_do_offline(struct lpfc_hba *phba, uint32_t type) int i; int rc; + if (phba->pport->fc_flag & FC_OFFLINE_MODE) + return 0; + init_completion(&online_compl); rc = lpfc_workq_post_event(phba, &status, &online_compl, LPFC_EVT_OFFLINE_PREP); @@ -741,7 +744,8 @@ lpfc_selective_reset(struct lpfc_hba *phba) int status = 0; int rc; - if (!phba->cfg_enable_hba_reset) + if ((!phba->cfg_enable_hba_reset) || + (phba->pport->fc_flag & FC_OFFLINE_MODE)) return -EACCES; status = lpfc_do_offline(phba, LPFC_EVT_OFFLINE); @@ -895,6 +899,7 @@ lpfc_sli4_pdev_reg_request(struct lpfc_hba *phba, uint32_t opcode) pci_disable_sriov(pdev); phba->cfg_sriov_nr_virtfn = 0; } + status = lpfc_do_offline(phba, LPFC_EVT_OFFLINE); if (status != 0) @@ -2801,6 +2806,8 @@ lpfc_topology_store(struct device *dev, struct device_attribute *attr, lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT, "3054 lpfc_topology changed from %d to %d\n", prev_val, val); + if (prev_val != val && phba->sli_rev == LPFC_SLI_REV4) + phba->fc_topology_changed = 1; err = lpfc_issue_lip(lpfc_shost_from_vport(phba->pport)); if (err) { phba->cfg_topology = prev_val; @@ -3792,6 +3799,141 @@ lpfc_fcp_imax_init(struct lpfc_hba *phba, int val) static DEVICE_ATTR(lpfc_fcp_imax, S_IRUGO | S_IWUSR, lpfc_fcp_imax_show, lpfc_fcp_imax_store); +/** + * lpfc_state_show - Display current driver CPU affinity + * @dev: class converted to a Scsi_host structure. + * @attr: device attribute, not used. + * @buf: on return contains text describing the state of the link. + * + * Returns: size of formatted string. + **/ +static ssize_t +lpfc_fcp_cpu_map_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct Scsi_Host *shost = class_to_shost(dev); + struct lpfc_vport *vport = (struct lpfc_vport *)shost->hostdata; + struct lpfc_hba *phba = vport->phba; + struct lpfc_vector_map_info *cpup; + int idx, len = 0; + + if ((phba->sli_rev != LPFC_SLI_REV4) || + (phba->intr_type != MSIX)) + return len; + + switch (phba->cfg_fcp_cpu_map) { + case 0: + len += snprintf(buf + len, PAGE_SIZE-len, + "fcp_cpu_map: No mapping (%d)\n", + phba->cfg_fcp_cpu_map); + return len; + case 1: + len += snprintf(buf + len, PAGE_SIZE-len, + "fcp_cpu_map: HBA centric mapping (%d): " + "%d online CPUs\n", + phba->cfg_fcp_cpu_map, + phba->sli4_hba.num_online_cpu); + break; + case 2: + len += snprintf(buf + len, PAGE_SIZE-len, + "fcp_cpu_map: Driver centric mapping (%d): " + "%d online CPUs\n", + phba->cfg_fcp_cpu_map, + phba->sli4_hba.num_online_cpu); + break; + } + + cpup = phba->sli4_hba.cpu_map; + for (idx = 0; idx < phba->sli4_hba.num_present_cpu; idx++) { + if (cpup->irq == LPFC_VECTOR_MAP_EMPTY) + len += snprintf(buf + len, PAGE_SIZE-len, + "CPU %02d io_chan %02d " + "physid %d coreid %d\n", + idx, cpup->channel_id, cpup->phys_id, + cpup->core_id); + else + len += snprintf(buf + len, PAGE_SIZE-len, + "CPU %02d io_chan %02d " + "physid %d coreid %d IRQ %d\n", + idx, cpup->channel_id, cpup->phys_id, + cpup->core_id, cpup->irq); + + cpup++; + } + return len; +} + +/** + * lpfc_fcp_cpu_map_store - Change CPU affinity of driver vectors + * @dev: class device that is converted into a Scsi_host. + * @attr: device attribute, not used. + * @buf: one or more lpfc_polling_flags values. + * @count: not used. + * + * Returns: + * -EINVAL - Not implemented yet. + **/ +static ssize_t +lpfc_fcp_cpu_map_store(struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) +{ + int status = -EINVAL; + return status; +} + +/* +# lpfc_fcp_cpu_map: Defines how to map CPUs to IRQ vectors +# for the HBA. +# +# Value range is [0 to 2]. Default value is LPFC_DRIVER_CPU_MAP (2). +# 0 - Do not affinitze IRQ vectors +# 1 - Affintize HBA vectors with respect to each HBA +# (start with CPU0 for each HBA) +# 2 - Affintize HBA vectors with respect to the entire driver +# (round robin thru all CPUs across all HBAs) +*/ +static int lpfc_fcp_cpu_map = LPFC_DRIVER_CPU_MAP; +module_param(lpfc_fcp_cpu_map, int, S_IRUGO|S_IWUSR); +MODULE_PARM_DESC(lpfc_fcp_cpu_map, + "Defines how to map CPUs to IRQ vectors per HBA"); + +/** + * lpfc_fcp_cpu_map_init - Set the initial sr-iov virtual function enable + * @phba: lpfc_hba pointer. + * @val: link speed value. + * + * Description: + * If val is in a valid range [0-2], then affinitze the adapter's + * MSIX vectors. + * + * Returns: + * zero if val saved. + * -EINVAL val out of range + **/ +static int +lpfc_fcp_cpu_map_init(struct lpfc_hba *phba, int val) +{ + if (phba->sli_rev != LPFC_SLI_REV4) { + phba->cfg_fcp_cpu_map = 0; + return 0; + } + + if (val >= LPFC_MIN_CPU_MAP && val <= LPFC_MAX_CPU_MAP) { + phba->cfg_fcp_cpu_map = val; + return 0; + } + + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "3326 fcp_cpu_map: %d out of range, using default\n", + val); + phba->cfg_fcp_cpu_map = LPFC_DRIVER_CPU_MAP; + + return 0; +} + +static DEVICE_ATTR(lpfc_fcp_cpu_map, S_IRUGO | S_IWUSR, + lpfc_fcp_cpu_map_show, lpfc_fcp_cpu_map_store); + /* # lpfc_fcp_class: Determines FC class to use for the FCP protocol. # Value range is [2,3]. Default value is 3. @@ -4009,12 +4151,11 @@ LPFC_ATTR_R(enable_bg, 0, 0, 1, "Enable BlockGuard Support"); # 0 = disabled (default) # 1 = enabled # Value range is [0,1]. Default value is 0. +# +# This feature in under investigation and may be supported in the future. */ unsigned int lpfc_fcp_look_ahead = LPFC_LOOK_AHEAD_OFF; -module_param(lpfc_fcp_look_ahead, uint, S_IRUGO); -MODULE_PARM_DESC(lpfc_fcp_look_ahead, "Look ahead for completions"); - /* # lpfc_prot_mask: i # - Bit mask of host protection capabilities used to register with the @@ -4071,16 +4212,23 @@ MODULE_PARM_DESC(lpfc_delay_discovery, /* * lpfc_sg_seg_cnt - Initial Maximum DMA Segment Count - * This value can be set to values between 64 and 256. The default value is + * This value can be set to values between 64 and 4096. The default value is * 64, but may be increased to allow for larger Max I/O sizes. The scsi layer * will be allowed to request I/Os of sizes up to (MAX_SEG_COUNT * SEG_SIZE). + * Because of the additional overhead involved in setting up T10-DIF, + * this parameter will be limited to 128 if BlockGuard is enabled under SLI4 + * and will be limited to 512 if BlockGuard is enabled under SLI3. */ LPFC_ATTR_R(sg_seg_cnt, LPFC_DEFAULT_SG_SEG_CNT, LPFC_DEFAULT_SG_SEG_CNT, LPFC_MAX_SG_SEG_CNT, "Max Scatter Gather Segment Count"); -LPFC_ATTR_R(prot_sg_seg_cnt, LPFC_DEFAULT_PROT_SG_SEG_CNT, - LPFC_DEFAULT_PROT_SG_SEG_CNT, LPFC_MAX_PROT_SG_SEG_CNT, - "Max Protection Scatter Gather Segment Count"); +/* + * This parameter will be depricated, the driver cannot limit the + * protection data s/g list. + */ +LPFC_ATTR_R(prot_sg_seg_cnt, LPFC_DEFAULT_SG_SEG_CNT, + LPFC_DEFAULT_SG_SEG_CNT, LPFC_MAX_SG_SEG_CNT, + "Max Protection Scatter Gather Segment Count"); struct device_attribute *lpfc_hba_attrs[] = { &dev_attr_bg_info, @@ -4141,6 +4289,7 @@ struct device_attribute *lpfc_hba_attrs[] = { &dev_attr_lpfc_poll_tmo, &dev_attr_lpfc_use_msi, &dev_attr_lpfc_fcp_imax, + &dev_attr_lpfc_fcp_cpu_map, &dev_attr_lpfc_fcp_wq_count, &dev_attr_lpfc_fcp_eq_count, &dev_attr_lpfc_fcp_io_channel, @@ -5123,6 +5272,7 @@ lpfc_get_cfgparam(struct lpfc_hba *phba) lpfc_enable_rrq_init(phba, lpfc_enable_rrq); lpfc_use_msi_init(phba, lpfc_use_msi); lpfc_fcp_imax_init(phba, lpfc_fcp_imax); + lpfc_fcp_cpu_map_init(phba, lpfc_fcp_cpu_map); lpfc_fcp_wq_count_init(phba, lpfc_fcp_wq_count); lpfc_fcp_eq_count_init(phba, lpfc_fcp_eq_count); lpfc_fcp_io_channel_init(phba, lpfc_fcp_io_channel); diff --git a/trunk/drivers/scsi/lpfc/lpfc_bsg.c b/trunk/drivers/scsi/lpfc/lpfc_bsg.c index 888666892004..094be2cad65b 100644 --- a/trunk/drivers/scsi/lpfc/lpfc_bsg.c +++ b/trunk/drivers/scsi/lpfc/lpfc_bsg.c @@ -219,26 +219,35 @@ lpfc_bsg_copy_data(struct lpfc_dmabuf *dma_buffers, unsigned int transfer_bytes, bytes_copied = 0; unsigned int sg_offset, dma_offset; unsigned char *dma_address, *sg_address; - struct scatterlist *sgel; LIST_HEAD(temp_list); - + struct sg_mapping_iter miter; + unsigned long flags; + unsigned int sg_flags = SG_MITER_ATOMIC; + bool sg_valid; list_splice_init(&dma_buffers->list, &temp_list); list_add(&dma_buffers->list, &temp_list); sg_offset = 0; - sgel = bsg_buffers->sg_list; + if (to_buffers) + sg_flags |= SG_MITER_FROM_SG; + else + sg_flags |= SG_MITER_TO_SG; + sg_miter_start(&miter, bsg_buffers->sg_list, bsg_buffers->sg_cnt, + sg_flags); + local_irq_save(flags); + sg_valid = sg_miter_next(&miter); list_for_each_entry(mp, &temp_list, list) { dma_offset = 0; - while (bytes_to_transfer && sgel && + while (bytes_to_transfer && sg_valid && (dma_offset < LPFC_BPL_SIZE)) { dma_address = mp->virt + dma_offset; if (sg_offset) { /* Continue previous partial transfer of sg */ - sg_address = sg_virt(sgel) + sg_offset; - transfer_bytes = sgel->length - sg_offset; + sg_address = miter.addr + sg_offset; + transfer_bytes = miter.length - sg_offset; } else { - sg_address = sg_virt(sgel); - transfer_bytes = sgel->length; + sg_address = miter.addr; + transfer_bytes = miter.length; } if (bytes_to_transfer < transfer_bytes) transfer_bytes = bytes_to_transfer; @@ -252,12 +261,14 @@ lpfc_bsg_copy_data(struct lpfc_dmabuf *dma_buffers, sg_offset += transfer_bytes; bytes_to_transfer -= transfer_bytes; bytes_copied += transfer_bytes; - if (sg_offset >= sgel->length) { + if (sg_offset >= miter.length) { sg_offset = 0; - sgel = sg_next(sgel); + sg_valid = sg_miter_next(&miter); } } } + sg_miter_stop(&miter); + local_irq_restore(flags); list_del_init(&dma_buffers->list); list_splice(&temp_list, &dma_buffers->list); return bytes_copied; @@ -471,6 +482,7 @@ lpfc_bsg_send_mgmt_cmd(struct fc_bsg_job *job) cmdiocbq->context1 = dd_data; cmdiocbq->context2 = cmp; cmdiocbq->context3 = bmp; + cmdiocbq->context_un.ndlp = ndlp; dd_data->type = TYPE_IOCB; dd_data->set_job = job; dd_data->context_un.iocb.cmdiocbq = cmdiocbq; @@ -1508,6 +1520,7 @@ lpfc_issue_ct_rsp(struct lpfc_hba *phba, struct fc_bsg_job *job, uint32_t tag, ctiocb->context1 = dd_data; ctiocb->context2 = cmp; ctiocb->context3 = bmp; + ctiocb->context_un.ndlp = ndlp; ctiocb->iocb_cmpl = lpfc_issue_ct_rsp_cmp; dd_data->type = TYPE_IOCB; @@ -2576,7 +2589,8 @@ static int lpfcdiag_loop_get_xri(struct lpfc_hba *phba, uint16_t rpi, evt->wait_time_stamp = jiffies; time_left = wait_event_interruptible_timeout( evt->wq, !list_empty(&evt->events_to_see), - ((phba->fc_ratov * 2) + LPFC_DRVR_TIMEOUT) * HZ); + msecs_to_jiffies(1000 * + ((phba->fc_ratov * 2) + LPFC_DRVR_TIMEOUT))); if (list_empty(&evt->events_to_see)) ret_val = (time_left) ? -EINTR : -ETIMEDOUT; else { @@ -3151,7 +3165,8 @@ lpfc_bsg_diag_loopback_run(struct fc_bsg_job *job) evt->waiting = 1; time_left = wait_event_interruptible_timeout( evt->wq, !list_empty(&evt->events_to_see), - ((phba->fc_ratov * 2) + LPFC_DRVR_TIMEOUT) * HZ); + msecs_to_jiffies(1000 * + ((phba->fc_ratov * 2) + LPFC_DRVR_TIMEOUT))); evt->waiting = 0; if (list_empty(&evt->events_to_see)) { rc = (time_left) ? -EINTR : -ETIMEDOUT; diff --git a/trunk/drivers/scsi/lpfc/lpfc_crtn.h b/trunk/drivers/scsi/lpfc/lpfc_crtn.h index 7631893ae005..d41456e5f814 100644 --- a/trunk/drivers/scsi/lpfc/lpfc_crtn.h +++ b/trunk/drivers/scsi/lpfc/lpfc_crtn.h @@ -470,3 +470,4 @@ int lpfc_sli4_xri_sgl_update(struct lpfc_hba *); void lpfc_free_sgl_list(struct lpfc_hba *, struct list_head *); uint32_t lpfc_sli_port_speed_get(struct lpfc_hba *); int lpfc_sli4_request_firmware_update(struct lpfc_hba *, uint8_t); +void lpfc_sli4_offline_eratt(struct lpfc_hba *); diff --git a/trunk/drivers/scsi/lpfc/lpfc_ct.c b/trunk/drivers/scsi/lpfc/lpfc_ct.c index 7bff3a19af56..ae1a07c57cae 100644 --- a/trunk/drivers/scsi/lpfc/lpfc_ct.c +++ b/trunk/drivers/scsi/lpfc/lpfc_ct.c @@ -1811,7 +1811,8 @@ lpfc_fdmi_timeout_handler(struct lpfc_vport *vport) if (init_utsname()->nodename[0] != '\0') lpfc_fdmi_cmd(vport, ndlp, SLI_MGMT_DHBA); else - mod_timer(&vport->fc_fdmitmo, jiffies + HZ * 60); + mod_timer(&vport->fc_fdmitmo, jiffies + + msecs_to_jiffies(1000 * 60)); } return; } diff --git a/trunk/drivers/scsi/lpfc/lpfc_els.c b/trunk/drivers/scsi/lpfc/lpfc_els.c index bbed8471bf0b..3cae0a92e8bd 100644 --- a/trunk/drivers/scsi/lpfc/lpfc_els.c +++ b/trunk/drivers/scsi/lpfc/lpfc_els.c @@ -29,6 +29,7 @@ #include #include + #include "lpfc_hw4.h" #include "lpfc_hw.h" #include "lpfc_sli.h" @@ -238,7 +239,10 @@ lpfc_prep_els_iocb(struct lpfc_vport *vport, uint8_t expectRsp, icmd->un.elsreq64.remoteID = did; /* DID */ icmd->ulpCommand = CMD_ELS_REQUEST64_CR; - icmd->ulpTimeout = phba->fc_ratov * 2; + if (elscmd == ELS_CMD_FLOGI) + icmd->ulpTimeout = FF_DEF_RATOV * 2; + else + icmd->ulpTimeout = phba->fc_ratov * 2; } else { icmd->un.xseq64.bdl.addrHigh = putPaddrHigh(pbuflist->phys); icmd->un.xseq64.bdl.addrLow = putPaddrLow(pbuflist->phys); @@ -308,16 +312,20 @@ lpfc_prep_els_iocb(struct lpfc_vport *vport, uint8_t expectRsp, /* Xmit ELS command to remote NPORT */ lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, "0116 Xmit ELS command x%x to remote " - "NPORT x%x I/O tag: x%x, port state: x%x\n", + "NPORT x%x I/O tag: x%x, port state:x%x" + " fc_flag:x%x\n", elscmd, did, elsiocb->iotag, - vport->port_state); + vport->port_state, + vport->fc_flag); } else { /* Xmit ELS response to remote NPORT */ lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, "0117 Xmit ELS response x%x to remote " - "NPORT x%x I/O tag: x%x, size: x%x\n", + "NPORT x%x I/O tag: x%x, size: x%x " + "port_state x%x fc_flag x%x\n", elscmd, ndlp->nlp_DID, elsiocb->iotag, - cmdSize); + cmdSize, vport->port_state, + vport->fc_flag); } return elsiocb; @@ -909,6 +917,23 @@ lpfc_cmpl_els_flogi_nport(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, spin_lock_irq(shost->host_lock); vport->fc_flag |= FC_PT2PT; spin_unlock_irq(shost->host_lock); + /* If physical FC port changed, unreg VFI and ALL VPIs / RPIs */ + if ((phba->sli_rev == LPFC_SLI_REV4) && phba->fc_topology_changed) { + lpfc_unregister_fcf_prep(phba); + + /* The FC_VFI_REGISTERED flag will get clear in the cmpl + * handler for unreg_vfi, but if we don't force the + * FC_VFI_REGISTERED flag then the reg_vfi mailbox could be + * built with the update bit set instead of just the vp bit to + * change the Nport ID. We need to have the vp set and the + * Upd cleared on topology changes. + */ + spin_lock_irq(shost->host_lock); + vport->fc_flag &= ~FC_VFI_REGISTERED; + spin_unlock_irq(shost->host_lock); + phba->fc_topology_changed = 0; + lpfc_issue_reg_vfi(vport); + } /* Start discovery - this should just do CLEAR_LA */ lpfc_disc_start(vport); @@ -1030,9 +1055,19 @@ lpfc_cmpl_els_flogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, vport->cfg_discovery_threads = LPFC_MAX_DISC_THREADS; if ((phba->sli_rev == LPFC_SLI_REV4) && (!(vport->fc_flag & FC_VFI_REGISTERED) || - (vport->fc_prevDID != vport->fc_myDID))) { - if (vport->fc_flag & FC_VFI_REGISTERED) - lpfc_sli4_unreg_all_rpis(vport); + (vport->fc_prevDID != vport->fc_myDID) || + phba->fc_topology_changed)) { + if (vport->fc_flag & FC_VFI_REGISTERED) { + if (phba->fc_topology_changed) { + lpfc_unregister_fcf_prep(phba); + spin_lock_irq(shost->host_lock); + vport->fc_flag &= ~FC_VFI_REGISTERED; + spin_unlock_irq(shost->host_lock); + phba->fc_topology_changed = 0; + } else { + lpfc_sli4_unreg_all_rpis(vport); + } + } lpfc_issue_reg_vfi(vport); lpfc_nlp_put(ndlp); goto out; @@ -1054,10 +1089,11 @@ lpfc_cmpl_els_flogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, /* FLOGI completes successfully */ lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, - "0101 FLOGI completes successfully " - "Data: x%x x%x x%x x%x\n", + "0101 FLOGI completes successfully, I/O tag:x%x, " + "Data: x%x x%x x%x x%x x%x x%x\n", cmdiocb->iotag, irsp->un.ulpWord[4], sp->cmn.e_d_tov, - sp->cmn.w2.r_a_tov, sp->cmn.edtovResolution); + sp->cmn.w2.r_a_tov, sp->cmn.edtovResolution, + vport->port_state, vport->fc_flag); if (vport->port_state == LPFC_FLOGI) { /* @@ -5047,6 +5083,8 @@ lpfc_els_rcv_flogi(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, struct ls_rjt stat; uint32_t cmd, did; int rc; + uint32_t fc_flag = 0; + uint32_t port_state = 0; cmd = *lp++; sp = (struct serv_parm *) lp; @@ -5113,16 +5151,25 @@ lpfc_els_rcv_flogi(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, * will be. */ vport->fc_myDID = PT2PT_LocalID; - } + } else + vport->fc_myDID = PT2PT_RemoteID; /* * The vport state should go to LPFC_FLOGI only * AFTER we issue a FLOGI, not receive one. */ spin_lock_irq(shost->host_lock); + fc_flag = vport->fc_flag; + port_state = vport->port_state; vport->fc_flag |= FC_PT2PT; vport->fc_flag &= ~(FC_FABRIC | FC_PUBLIC_LOOP); + vport->port_state = LPFC_FLOGI; spin_unlock_irq(shost->host_lock); + lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, + "3311 Rcv Flogi PS x%x new PS x%x " + "fc_flag x%x new fc_flag x%x\n", + port_state, vport->port_state, + fc_flag, vport->fc_flag); /* * We temporarily set fc_myDID to make it look like we are @@ -6241,7 +6288,8 @@ lpfc_els_timeout_handler(struct lpfc_vport *vport) } if (!list_empty(&phba->sli.ring[LPFC_ELS_RING].txcmplq)) - mod_timer(&vport->els_tmofunc, jiffies + HZ * timeout); + mod_timer(&vport->els_tmofunc, + jiffies + msecs_to_jiffies(1000 * timeout)); } /** @@ -6612,7 +6660,9 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, /* ELS command received from NPORT */ lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, "0112 ELS command x%x received from NPORT x%x " - "Data: x%x\n", cmd, did, vport->port_state); + "Data: x%x x%x x%x x%x\n", + cmd, did, vport->port_state, vport->fc_flag, + vport->fc_myDID, vport->fc_prevDID); switch (cmd) { case ELS_CMD_PLOGI: lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, @@ -6621,6 +6671,19 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, phba->fc_stat.elsRcvPLOGI++; ndlp = lpfc_plogi_confirm_nport(phba, payload, ndlp); + if (phba->sli_rev == LPFC_SLI_REV4 && + (phba->pport->fc_flag & FC_PT2PT)) { + vport->fc_prevDID = vport->fc_myDID; + /* Our DID needs to be updated before registering + * the vfi. This is done in lpfc_rcv_plogi but + * that is called after the reg_vfi. + */ + vport->fc_myDID = elsiocb->iocb.un.rcvels.parmRo; + lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, + "3312 Remote port assigned DID x%x " + "%x\n", vport->fc_myDID, + vport->fc_prevDID); + } lpfc_send_els_event(vport, ndlp, payload); @@ -6630,6 +6693,7 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, rjt_exp = LSEXP_NOTHING_MORE; break; } + shost = lpfc_shost_from_vport(vport); if (vport->port_state < LPFC_DISC_AUTH) { if (!(phba->pport->fc_flag & FC_PT2PT) || (phba->pport->fc_flag & FC_PT2PT_PLOGI)) { @@ -6641,9 +6705,18 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, * another NPort and the other side has initiated * the PLOGI before responding to our FLOGI. */ + if (phba->sli_rev == LPFC_SLI_REV4 && + (phba->fc_topology_changed || + vport->fc_myDID != vport->fc_prevDID)) { + lpfc_unregister_fcf_prep(phba); + spin_lock_irq(shost->host_lock); + vport->fc_flag &= ~FC_VFI_REGISTERED; + spin_unlock_irq(shost->host_lock); + phba->fc_topology_changed = 0; + lpfc_issue_reg_vfi(vport); + } } - shost = lpfc_shost_from_vport(vport); spin_lock_irq(shost->host_lock); ndlp->nlp_flag &= ~NLP_TARGET_REMOVE; spin_unlock_irq(shost->host_lock); @@ -7002,8 +7075,11 @@ lpfc_do_scr_ns_plogi(struct lpfc_hba *phba, struct lpfc_vport *vport) spin_lock_irq(shost->host_lock); if (vport->fc_flag & FC_DISC_DELAYED) { spin_unlock_irq(shost->host_lock); + lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY, + "3334 Delay fc port discovery for %d seconds\n", + phba->fc_ratov); mod_timer(&vport->delayed_disc_tmo, - jiffies + HZ * phba->fc_ratov); + jiffies + msecs_to_jiffies(1000 * phba->fc_ratov)); return; } spin_unlock_irq(shost->host_lock); @@ -7287,7 +7363,7 @@ lpfc_retry_pport_discovery(struct lpfc_hba *phba) return; shost = lpfc_shost_from_vport(phba->pport); - mod_timer(&ndlp->nlp_delayfunc, jiffies + HZ); + mod_timer(&ndlp->nlp_delayfunc, jiffies + msecs_to_jiffies(1000)); spin_lock_irq(shost->host_lock); ndlp->nlp_flag |= NLP_DELAY_TMO; spin_unlock_irq(shost->host_lock); @@ -7791,7 +7867,8 @@ lpfc_block_fabric_iocbs(struct lpfc_hba *phba) blocked = test_and_set_bit(FABRIC_COMANDS_BLOCKED, &phba->bit_flags); /* Start a timer to unblock fabric iocbs after 100ms */ if (!blocked) - mod_timer(&phba->fabric_block_timer, jiffies + HZ/10 ); + mod_timer(&phba->fabric_block_timer, + jiffies + msecs_to_jiffies(100)); return; } diff --git a/trunk/drivers/scsi/lpfc/lpfc_hbadisc.c b/trunk/drivers/scsi/lpfc/lpfc_hbadisc.c index 326e05a65a73..0f6e2548f35d 100644 --- a/trunk/drivers/scsi/lpfc/lpfc_hbadisc.c +++ b/trunk/drivers/scsi/lpfc/lpfc_hbadisc.c @@ -160,11 +160,12 @@ lpfc_dev_loss_tmo_callbk(struct fc_rport *rport) if (!list_empty(&evtp->evt_listp)) return; + evtp->evt_arg1 = lpfc_nlp_get(ndlp); + spin_lock_irq(&phba->hbalock); /* We need to hold the node by incrementing the reference * count until this queued work is done */ - evtp->evt_arg1 = lpfc_nlp_get(ndlp); if (evtp->evt_arg1) { evtp->evt = LPFC_EVT_DEV_LOSS; list_add_tail(&evtp->evt_listp, &phba->work_list); @@ -1008,9 +1009,6 @@ lpfc_linkup(struct lpfc_hba *phba) for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) lpfc_linkup_port(vports[i]); lpfc_destroy_vport_work_array(phba, vports); - if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) && - (phba->sli_rev < LPFC_SLI_REV4)) - lpfc_issue_clear_la(phba, phba->pport); return 0; } @@ -1436,7 +1434,8 @@ lpfc_register_fcf(struct lpfc_hba *phba) if (phba->fcf.fcf_flag & FCF_REGISTERED) { phba->fcf.fcf_flag |= (FCF_SCAN_DONE | FCF_IN_USE); phba->hba_flag &= ~FCF_TS_INPROG; - if (phba->pport->port_state != LPFC_FLOGI) { + if (phba->pport->port_state != LPFC_FLOGI && + phba->pport->fc_flag & FC_FABRIC) { phba->hba_flag |= FCF_RR_INPROG; spin_unlock_irq(&phba->hbalock); lpfc_initial_flogi(phba->pport); @@ -2270,8 +2269,11 @@ lpfc_mbx_cmpl_fcf_scan_read_fcf_rec(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) spin_unlock_irq(&phba->hbalock); lpfc_printf_log(phba, KERN_INFO, LOG_FIP, "2836 New FCF matches in-use " - "FCF (x%x)\n", - phba->fcf.current_rec.fcf_indx); + "FCF (x%x), port_state:x%x, " + "fc_flag:x%x\n", + phba->fcf.current_rec.fcf_indx, + phba->pport->port_state, + phba->pport->fc_flag); goto out; } else lpfc_printf_log(phba, KERN_ERR, LOG_FIP, @@ -2796,7 +2798,19 @@ void lpfc_issue_init_vpi(struct lpfc_vport *vport) { LPFC_MBOXQ_t *mboxq; - int rc; + int rc, vpi; + + if ((vport->port_type != LPFC_PHYSICAL_PORT) && (!vport->vpi)) { + vpi = lpfc_alloc_vpi(vport->phba); + if (!vpi) { + lpfc_printf_vlog(vport, KERN_ERR, + LOG_MBOX, + "3303 Failed to obtain vport vpi\n"); + lpfc_vport_set_state(vport, FC_VPORT_FAILED); + return; + } + vport->vpi = vpi; + } mboxq = mempool_alloc(vport->phba->mbox_mem_pool, GFP_KERNEL); if (!mboxq) { @@ -2894,9 +2908,14 @@ lpfc_mbx_cmpl_reg_vfi(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) goto out_free_mem; } - /* If the VFI is already registered, there is nothing else to do */ + /* If the VFI is already registered, there is nothing else to do + * Unless this was a VFI update and we are in PT2PT mode, then + * we should drop through to set the port state to ready. + */ if (vport->fc_flag & FC_VFI_REGISTERED) - goto out_free_mem; + if (!(phba->sli_rev == LPFC_SLI_REV4 && + vport->fc_flag & FC_PT2PT)) + goto out_free_mem; /* The VPI is implicitly registered when the VFI is registered */ spin_lock_irq(shost->host_lock); @@ -2913,6 +2932,13 @@ lpfc_mbx_cmpl_reg_vfi(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) goto out_free_mem; } + lpfc_printf_vlog(vport, KERN_INFO, LOG_SLI, + "3313 cmpl reg vfi port_state:%x fc_flag:%x myDid:%x " + "alpacnt:%d LinkState:%x topology:%x\n", + vport->port_state, vport->fc_flag, vport->fc_myDID, + vport->phba->alpa_map[0], + phba->link_state, phba->fc_topology); + if (vport->port_state == LPFC_FABRIC_CFG_LINK) { /* * For private loop or for NPort pt2pt, @@ -2925,7 +2951,10 @@ lpfc_mbx_cmpl_reg_vfi(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) /* Use loop map to make discovery list */ lpfc_disc_list_loopmap(vport); /* Start discovery */ - lpfc_disc_start(vport); + if (vport->fc_flag & FC_PT2PT) + vport->port_state = LPFC_VPORT_READY; + else + lpfc_disc_start(vport); } else { lpfc_start_fdiscs(phba); lpfc_do_scr_ns_plogi(phba, vport); @@ -3007,6 +3036,15 @@ lpfc_mbx_process_link_up(struct lpfc_hba *phba, struct lpfc_mbx_read_top *la) break; } + if (phba->fc_topology && + phba->fc_topology != bf_get(lpfc_mbx_read_top_topology, la)) { + lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, + "3314 Toplogy changed was 0x%x is 0x%x\n", + phba->fc_topology, + bf_get(lpfc_mbx_read_top_topology, la)); + phba->fc_topology_changed = 1; + } + phba->fc_topology = bf_get(lpfc_mbx_read_top_topology, la); phba->link_flag &= ~LS_NPIV_FAB_SUPPORTED; @@ -4235,7 +4273,7 @@ lpfc_set_disctmo(struct lpfc_vport *vport) tmo, vport->port_state, vport->fc_flag); } - mod_timer(&vport->fc_disctmo, jiffies + HZ * tmo); + mod_timer(&vport->fc_disctmo, jiffies + msecs_to_jiffies(1000 * tmo)); spin_lock_irq(shost->host_lock); vport->fc_flag |= FC_DISC_TMO; spin_unlock_irq(shost->host_lock); @@ -4949,8 +4987,12 @@ lpfc_disc_start(struct lpfc_vport *vport) uint32_t clear_la_pending; int did_changed; - if (!lpfc_is_link_up(phba)) + if (!lpfc_is_link_up(phba)) { + lpfc_printf_vlog(vport, KERN_INFO, LOG_SLI, + "3315 Link is not up %x\n", + phba->link_state); return; + } if (phba->link_state == LPFC_CLEAR_LA) clear_la_pending = 1; @@ -4983,11 +5025,13 @@ lpfc_disc_start(struct lpfc_vport *vport) if (num_sent) return; - /* Register the VPI for SLI3, NON-NPIV only. */ + /* Register the VPI for SLI3, NPIV only. */ if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) && !(vport->fc_flag & FC_PT2PT) && !(vport->fc_flag & FC_RSCN_MODE) && (phba->sli_rev < LPFC_SLI_REV4)) { + if (vport->port_type == LPFC_PHYSICAL_PORT) + lpfc_issue_clear_la(phba, vport); lpfc_issue_reg_vpi(phba, vport); return; } @@ -5410,7 +5454,8 @@ lpfc_mbx_cmpl_fdmi_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) if (vport->cfg_fdmi_on == 1) lpfc_fdmi_cmd(vport, ndlp, SLI_MGMT_DHBA); else - mod_timer(&vport->fc_fdmitmo, jiffies + HZ * 60); + mod_timer(&vport->fc_fdmitmo, + jiffies + msecs_to_jiffies(1000 * 60)); /* decrement the node reference count held for this callback * function. @@ -5855,7 +5900,7 @@ lpfc_unregister_fcf_prep(struct lpfc_hba *phba) struct lpfc_vport **vports; struct lpfc_nodelist *ndlp; struct Scsi_Host *shost; - int i, rc; + int i = 0, rc; /* Unregister RPIs */ if (lpfc_fcf_inuse(phba)) @@ -5883,6 +5928,20 @@ lpfc_unregister_fcf_prep(struct lpfc_hba *phba) spin_unlock_irq(shost->host_lock); } lpfc_destroy_vport_work_array(phba, vports); + if (i == 0 && (!(phba->sli3_options & LPFC_SLI3_NPIV_ENABLED))) { + ndlp = lpfc_findnode_did(phba->pport, Fabric_DID); + if (ndlp) + lpfc_cancel_retry_delay_tmo(phba->pport, ndlp); + lpfc_cleanup_pending_mbox(phba->pport); + if (phba->sli_rev == LPFC_SLI_REV4) + lpfc_sli4_unreg_all_rpis(phba->pport); + lpfc_mbx_unreg_vpi(phba->pport); + shost = lpfc_shost_from_vport(phba->pport); + spin_lock_irq(shost->host_lock); + phba->pport->fc_flag |= FC_VPORT_NEEDS_INIT_VPI; + phba->pport->vpi_state &= ~LPFC_VPI_REGISTERED; + spin_unlock_irq(shost->host_lock); + } /* Cleanup any outstanding ELS commands */ lpfc_els_flush_all_cmd(phba); diff --git a/trunk/drivers/scsi/lpfc/lpfc_hw.h b/trunk/drivers/scsi/lpfc/lpfc_hw.h index e8c476031703..83700c18f468 100644 --- a/trunk/drivers/scsi/lpfc/lpfc_hw.h +++ b/trunk/drivers/scsi/lpfc/lpfc_hw.h @@ -1667,6 +1667,7 @@ enum lpfc_protgrp_type { #define BG_OP_IN_CSUM_OUT_CSUM 0x5 #define BG_OP_IN_CRC_OUT_CSUM 0x6 #define BG_OP_IN_CSUM_OUT_CRC 0x7 +#define BG_OP_RAW_MODE 0x8 struct lpfc_pde5 { uint32_t word0; diff --git a/trunk/drivers/scsi/lpfc/lpfc_hw4.h b/trunk/drivers/scsi/lpfc/lpfc_hw4.h index 1dd2f6f0a127..713a4613ec3a 100644 --- a/trunk/drivers/scsi/lpfc/lpfc_hw4.h +++ b/trunk/drivers/scsi/lpfc/lpfc_hw4.h @@ -200,6 +200,11 @@ struct lpfc_sli_intf { #define LPFC_MAX_IMAX 5000000 #define LPFC_DEF_IMAX 50000 +#define LPFC_MIN_CPU_MAP 0 +#define LPFC_MAX_CPU_MAP 2 +#define LPFC_HBA_CPU_MAP 1 +#define LPFC_DRIVER_CPU_MAP 2 /* Default */ + /* PORT_CAPABILITIES constants. */ #define LPFC_MAX_SUPPORTED_PAGES 8 @@ -621,7 +626,7 @@ struct lpfc_register { #define lpfc_sliport_status_rdy_SHIFT 23 #define lpfc_sliport_status_rdy_MASK 0x1 #define lpfc_sliport_status_rdy_WORD word0 -#define MAX_IF_TYPE_2_RESETS 1000 +#define MAX_IF_TYPE_2_RESETS 6 #define LPFC_CTL_PORT_CTL_OFFSET 0x408 #define lpfc_sliport_ctrl_end_SHIFT 30 diff --git a/trunk/drivers/scsi/lpfc/lpfc_init.c b/trunk/drivers/scsi/lpfc/lpfc_init.c index 90b8b0515e23..cb465b253910 100644 --- a/trunk/drivers/scsi/lpfc/lpfc_init.c +++ b/trunk/drivers/scsi/lpfc/lpfc_init.c @@ -33,6 +33,7 @@ #include #include #include +#include #include #include @@ -58,6 +59,9 @@ char *_dump_buf_dif; unsigned long _dump_buf_dif_order; spinlock_t _dump_buf_lock; +/* Used when mapping IRQ vectors in a driver centric manner */ +uint16_t lpfc_used_cpu[LPFC_MAX_CPU]; + static void lpfc_get_hba_model_desc(struct lpfc_hba *, uint8_t *, uint8_t *); static int lpfc_post_rcv_buf(struct lpfc_hba *); static int lpfc_sli4_queue_verify(struct lpfc_hba *); @@ -541,13 +545,16 @@ lpfc_config_port_post(struct lpfc_hba *phba) /* Set up ring-0 (ELS) timer */ timeout = phba->fc_ratov * 2; - mod_timer(&vport->els_tmofunc, jiffies + HZ * timeout); + mod_timer(&vport->els_tmofunc, + jiffies + msecs_to_jiffies(1000 * timeout)); /* Set up heart beat (HB) timer */ - mod_timer(&phba->hb_tmofunc, jiffies + HZ * LPFC_HB_MBOX_INTERVAL); + mod_timer(&phba->hb_tmofunc, + jiffies + msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL)); phba->hb_outstanding = 0; phba->last_completion_time = jiffies; /* Set up error attention (ERATT) polling timer */ - mod_timer(&phba->eratt_poll, jiffies + HZ * LPFC_ERATT_POLL_INTERVAL); + mod_timer(&phba->eratt_poll, + jiffies + msecs_to_jiffies(1000 * LPFC_ERATT_POLL_INTERVAL)); if (phba->hba_flag & LINK_DISABLED) { lpfc_printf_log(phba, @@ -908,9 +915,9 @@ lpfc_hba_down_post_s4(struct lpfc_hba *phba) psb->pCmd = NULL; psb->status = IOSTAT_SUCCESS; } - spin_lock_irqsave(&phba->scsi_buf_list_lock, iflag); - list_splice(&aborts, &phba->lpfc_scsi_buf_list); - spin_unlock_irqrestore(&phba->scsi_buf_list_lock, iflag); + spin_lock_irqsave(&phba->scsi_buf_list_put_lock, iflag); + list_splice(&aborts, &phba->lpfc_scsi_buf_list_put); + spin_unlock_irqrestore(&phba->scsi_buf_list_put_lock, iflag); return 0; } @@ -1021,7 +1028,8 @@ lpfc_hb_mbox_cmpl(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmboxq) !(phba->link_state == LPFC_HBA_ERROR) && !(phba->pport->load_flag & FC_UNLOADING)) mod_timer(&phba->hb_tmofunc, - jiffies + HZ * LPFC_HB_MBOX_INTERVAL); + jiffies + + msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL)); return; } @@ -1064,15 +1072,18 @@ lpfc_hb_timeout_handler(struct lpfc_hba *phba) spin_lock_irq(&phba->pport->work_port_lock); - if (time_after(phba->last_completion_time + LPFC_HB_MBOX_INTERVAL * HZ, - jiffies)) { + if (time_after(phba->last_completion_time + + msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL), + jiffies)) { spin_unlock_irq(&phba->pport->work_port_lock); if (!phba->hb_outstanding) mod_timer(&phba->hb_tmofunc, - jiffies + HZ * LPFC_HB_MBOX_INTERVAL); + jiffies + + msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL)); else mod_timer(&phba->hb_tmofunc, - jiffies + HZ * LPFC_HB_MBOX_TIMEOUT); + jiffies + + msecs_to_jiffies(1000 * LPFC_HB_MBOX_TIMEOUT)); return; } spin_unlock_irq(&phba->pport->work_port_lock); @@ -1104,7 +1115,8 @@ lpfc_hb_timeout_handler(struct lpfc_hba *phba) if (!pmboxq) { mod_timer(&phba->hb_tmofunc, jiffies + - HZ * LPFC_HB_MBOX_INTERVAL); + msecs_to_jiffies(1000 * + LPFC_HB_MBOX_INTERVAL)); return; } @@ -1120,7 +1132,8 @@ lpfc_hb_timeout_handler(struct lpfc_hba *phba) phba->mbox_mem_pool); mod_timer(&phba->hb_tmofunc, jiffies + - HZ * LPFC_HB_MBOX_INTERVAL); + msecs_to_jiffies(1000 * + LPFC_HB_MBOX_INTERVAL)); return; } phba->skipped_hb = 0; @@ -1136,7 +1149,8 @@ lpfc_hb_timeout_handler(struct lpfc_hba *phba) phba->skipped_hb = jiffies; mod_timer(&phba->hb_tmofunc, - jiffies + HZ * LPFC_HB_MBOX_TIMEOUT); + jiffies + + msecs_to_jiffies(1000 * LPFC_HB_MBOX_TIMEOUT)); return; } else { /* @@ -1150,7 +1164,8 @@ lpfc_hb_timeout_handler(struct lpfc_hba *phba) jiffies_to_msecs(jiffies - phba->last_completion_time)); mod_timer(&phba->hb_tmofunc, - jiffies + HZ * LPFC_HB_MBOX_TIMEOUT); + jiffies + + msecs_to_jiffies(1000 * LPFC_HB_MBOX_TIMEOUT)); } } } @@ -1191,7 +1206,7 @@ lpfc_offline_eratt(struct lpfc_hba *phba) * This routine is called to bring a SLI4 HBA offline when HBA hardware error * other than Port Error 6 has been detected. **/ -static void +void lpfc_sli4_offline_eratt(struct lpfc_hba *phba) { lpfc_offline_prep(phba, LPFC_MBX_NO_WAIT); @@ -2633,6 +2648,7 @@ lpfc_online(struct lpfc_hba *phba) struct lpfc_vport *vport; struct lpfc_vport **vports; int i; + bool vpis_cleared = false; if (!phba) return 0; @@ -2656,6 +2672,10 @@ lpfc_online(struct lpfc_hba *phba) lpfc_unblock_mgmt_io(phba); return 1; } + spin_lock_irq(&phba->hbalock); + if (!phba->sli4_hba.max_cfg_param.vpi_used) + vpis_cleared = true; + spin_unlock_irq(&phba->hbalock); } else { if (lpfc_sli_hba_setup(phba)) { /* Initialize SLI2/SLI3 HBA */ lpfc_unblock_mgmt_io(phba); @@ -2672,8 +2692,13 @@ lpfc_online(struct lpfc_hba *phba) vports[i]->fc_flag &= ~FC_OFFLINE_MODE; if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) vports[i]->fc_flag |= FC_VPORT_NEEDS_REG_VPI; - if (phba->sli_rev == LPFC_SLI_REV4) + if (phba->sli_rev == LPFC_SLI_REV4) { vports[i]->fc_flag |= FC_VPORT_NEEDS_INIT_VPI; + if ((vpis_cleared) && + (vports[i]->port_type != + LPFC_PHYSICAL_PORT)) + vports[i]->vpi = 0; + } spin_unlock_irq(shost->host_lock); } lpfc_destroy_vport_work_array(phba, vports); @@ -2833,16 +2858,30 @@ lpfc_scsi_free(struct lpfc_hba *phba) struct lpfc_iocbq *io, *io_next; spin_lock_irq(&phba->hbalock); + /* Release all the lpfc_scsi_bufs maintained by this host. */ - spin_lock(&phba->scsi_buf_list_lock); - list_for_each_entry_safe(sb, sb_next, &phba->lpfc_scsi_buf_list, list) { + + spin_lock(&phba->scsi_buf_list_put_lock); + list_for_each_entry_safe(sb, sb_next, &phba->lpfc_scsi_buf_list_put, + list) { list_del(&sb->list); pci_pool_free(phba->lpfc_scsi_dma_buf_pool, sb->data, sb->dma_handle); kfree(sb); phba->total_scsi_bufs--; } - spin_unlock(&phba->scsi_buf_list_lock); + spin_unlock(&phba->scsi_buf_list_put_lock); + + spin_lock(&phba->scsi_buf_list_get_lock); + list_for_each_entry_safe(sb, sb_next, &phba->lpfc_scsi_buf_list_get, + list) { + list_del(&sb->list); + pci_pool_free(phba->lpfc_scsi_dma_buf_pool, sb->data, + sb->dma_handle); + kfree(sb); + phba->total_scsi_bufs--; + } + spin_unlock(&phba->scsi_buf_list_get_lock); /* Release all the lpfc_iocbq entries maintained by this host. */ list_for_each_entry_safe(io, io_next, &phba->lpfc_iocb_list, list) { @@ -2978,9 +3017,12 @@ lpfc_sli4_xri_sgl_update(struct lpfc_hba *phba) phba->sli4_hba.scsi_xri_cnt, phba->sli4_hba.scsi_xri_max); - spin_lock_irq(&phba->scsi_buf_list_lock); - list_splice_init(&phba->lpfc_scsi_buf_list, &scsi_sgl_list); - spin_unlock_irq(&phba->scsi_buf_list_lock); + spin_lock_irq(&phba->scsi_buf_list_get_lock); + spin_lock_irq(&phba->scsi_buf_list_put_lock); + list_splice_init(&phba->lpfc_scsi_buf_list_get, &scsi_sgl_list); + list_splice(&phba->lpfc_scsi_buf_list_put, &scsi_sgl_list); + spin_unlock_irq(&phba->scsi_buf_list_put_lock); + spin_unlock_irq(&phba->scsi_buf_list_get_lock); if (phba->sli4_hba.scsi_xri_cnt > phba->sli4_hba.scsi_xri_max) { /* max scsi xri shrinked below the allocated scsi buffers */ @@ -2994,9 +3036,9 @@ lpfc_sli4_xri_sgl_update(struct lpfc_hba *phba) psb->dma_handle); kfree(psb); } - spin_lock_irq(&phba->scsi_buf_list_lock); + spin_lock_irq(&phba->scsi_buf_list_get_lock); phba->sli4_hba.scsi_xri_cnt -= scsi_xri_cnt; - spin_unlock_irq(&phba->scsi_buf_list_lock); + spin_unlock_irq(&phba->scsi_buf_list_get_lock); } /* update xris associated to remaining allocated scsi buffers */ @@ -3014,9 +3056,12 @@ lpfc_sli4_xri_sgl_update(struct lpfc_hba *phba) psb->cur_iocbq.sli4_lxritag = lxri; psb->cur_iocbq.sli4_xritag = phba->sli4_hba.xri_ids[lxri]; } - spin_lock_irq(&phba->scsi_buf_list_lock); - list_splice_init(&scsi_sgl_list, &phba->lpfc_scsi_buf_list); - spin_unlock_irq(&phba->scsi_buf_list_lock); + spin_lock_irq(&phba->scsi_buf_list_get_lock); + spin_lock_irq(&phba->scsi_buf_list_put_lock); + list_splice_init(&scsi_sgl_list, &phba->lpfc_scsi_buf_list_get); + INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list_put); + spin_unlock_irq(&phba->scsi_buf_list_put_lock); + spin_unlock_irq(&phba->scsi_buf_list_get_lock); return 0; @@ -3197,14 +3242,15 @@ int lpfc_scan_finished(struct Scsi_Host *shost, unsigned long time) stat = 1; goto finished; } - if (time >= 30 * HZ) { + if (time >= msecs_to_jiffies(30 * 1000)) { lpfc_printf_log(phba, KERN_INFO, LOG_INIT, "0461 Scanning longer than 30 " "seconds. Continuing initialization\n"); stat = 1; goto finished; } - if (time >= 15 * HZ && phba->link_state <= LPFC_LINK_DOWN) { + if (time >= msecs_to_jiffies(15 * 1000) && + phba->link_state <= LPFC_LINK_DOWN) { lpfc_printf_log(phba, KERN_INFO, LOG_INIT, "0465 Link down longer than 15 " "seconds. Continuing initialization\n"); @@ -3216,7 +3262,7 @@ int lpfc_scan_finished(struct Scsi_Host *shost, unsigned long time) goto finished; if (vport->num_disc_nodes || vport->fc_prli_sent) goto finished; - if (vport->fc_map_cnt == 0 && time < 2 * HZ) + if (vport->fc_map_cnt == 0 && time < msecs_to_jiffies(2 * 1000)) goto finished; if ((phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE) != 0) goto finished; @@ -4215,7 +4261,8 @@ lpfc_sli4_async_fip_evt(struct lpfc_hba *phba, * If there are other active VLinks present, * re-instantiate the Vlink using FDISC. */ - mod_timer(&ndlp->nlp_delayfunc, jiffies + HZ); + mod_timer(&ndlp->nlp_delayfunc, + jiffies + msecs_to_jiffies(1000)); shost = lpfc_shost_from_vport(vport); spin_lock_irq(shost->host_lock); ndlp->nlp_flag |= NLP_DELAY_TMO; @@ -4707,23 +4754,52 @@ lpfc_sli_driver_resource_setup(struct lpfc_hba *phba) return -ENOMEM; /* - * Since the sg_tablesize is module parameter, the sg_dma_buf_size + * Since lpfc_sg_seg_cnt is module parameter, the sg_dma_buf_size * used to create the sg_dma_buf_pool must be dynamically calculated. - * 2 segments are added since the IOCB needs a command and response bde. */ - phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) + - sizeof(struct fcp_rsp) + - ((phba->cfg_sg_seg_cnt + 2) * sizeof(struct ulp_bde64)); + /* Initialize the host templates the configured values. */ + lpfc_vport_template.sg_tablesize = phba->cfg_sg_seg_cnt; + lpfc_template.sg_tablesize = phba->cfg_sg_seg_cnt; + + /* There are going to be 2 reserved BDEs: 1 FCP cmnd + 1 FCP rsp */ if (phba->cfg_enable_bg) { - phba->cfg_sg_seg_cnt = LPFC_MAX_SG_SEG_CNT; - phba->cfg_sg_dma_buf_size += - phba->cfg_prot_sg_seg_cnt * sizeof(struct ulp_bde64); + /* + * The scsi_buf for a T10-DIF I/O will hold the FCP cmnd, + * the FCP rsp, and a BDE for each. Sice we have no control + * over how many protection data segments the SCSI Layer + * will hand us (ie: there could be one for every block + * in the IO), we just allocate enough BDEs to accomidate + * our max amount and we need to limit lpfc_sg_seg_cnt to + * minimize the risk of running out. + */ + phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) + + sizeof(struct fcp_rsp) + + (LPFC_MAX_SG_SEG_CNT * sizeof(struct ulp_bde64)); + + if (phba->cfg_sg_seg_cnt > LPFC_MAX_SG_SEG_CNT_DIF) + phba->cfg_sg_seg_cnt = LPFC_MAX_SG_SEG_CNT_DIF; + + /* Total BDEs in BPL for scsi_sg_list and scsi_sg_prot_list */ + phba->cfg_total_seg_cnt = LPFC_MAX_SG_SEG_CNT; + } else { + /* + * The scsi_buf for a regular I/O will hold the FCP cmnd, + * the FCP rsp, a BDE for each, and a BDE for up to + * cfg_sg_seg_cnt data segments. + */ + phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) + + sizeof(struct fcp_rsp) + + ((phba->cfg_sg_seg_cnt + 2) * sizeof(struct ulp_bde64)); + + /* Total BDEs in BPL for scsi_sg_list */ + phba->cfg_total_seg_cnt = phba->cfg_sg_seg_cnt + 2; } - /* Also reinitialize the host templates with new values. */ - lpfc_vport_template.sg_tablesize = phba->cfg_sg_seg_cnt; - lpfc_template.sg_tablesize = phba->cfg_sg_seg_cnt; + lpfc_printf_log(phba, KERN_INFO, LOG_INIT | LOG_FCP, + "9088 sg_tablesize:%d dmabuf_size:%d total_bde:%d\n", + phba->cfg_sg_seg_cnt, phba->cfg_sg_dma_buf_size, + phba->cfg_total_seg_cnt); phba->max_vpi = LPFC_MAX_VPI; /* This will be set to correct value after config_port mbox */ @@ -4789,13 +4865,13 @@ lpfc_sli_driver_resource_unset(struct lpfc_hba *phba) static int lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba) { + struct lpfc_vector_map_info *cpup; struct lpfc_sli *psli; LPFC_MBOXQ_t *mboxq; - int rc, i, hbq_count, buf_size, dma_buf_size, max_buf_size; + int rc, i, hbq_count, max_buf_size; uint8_t pn_page[LPFC_MAX_SUPPORTED_PAGES] = {0}; struct lpfc_mqe *mqe; - int longs, sli_family; - int sges_per_segment; + int longs; /* Before proceed, wait for POST done and device ready */ rc = lpfc_sli4_post_status_check(phba); @@ -4863,11 +4939,6 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba) phba->fc_map[1] = LPFC_FCOE_FCF_MAP1; phba->fc_map[2] = LPFC_FCOE_FCF_MAP2; - /* With BlockGuard we can have multiple SGEs per Data Segemnt */ - sges_per_segment = 1; - if (phba->cfg_enable_bg) - sges_per_segment = 2; - /* * For SLI4, instead of using ring 0 (LPFC_FCP_RING) for FCP commands * we will associate a new ring, for each FCP fastpath EQ/CQ/WQ tuple. @@ -4878,43 +4949,71 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba) sizeof(struct lpfc_sli_ring), GFP_KERNEL); if (!phba->sli.ring) return -ENOMEM; + /* - * Since the sg_tablesize is module parameter, the sg_dma_buf_size + * It doesn't matter what family our adapter is in, we are + * limited to 2 Pages, 512 SGEs, for our SGL. + * There are going to be 2 reserved SGEs: 1 FCP cmnd + 1 FCP rsp + */ + max_buf_size = (2 * SLI4_PAGE_SIZE); + if (phba->cfg_sg_seg_cnt > LPFC_MAX_SGL_SEG_CNT - 2) + phba->cfg_sg_seg_cnt = LPFC_MAX_SGL_SEG_CNT - 2; + + /* + * Since lpfc_sg_seg_cnt is module parameter, the sg_dma_buf_size * used to create the sg_dma_buf_pool must be dynamically calculated. - * 2 segments are added since the IOCB needs a command and response bde. - * To insure that the scsi sgl does not cross a 4k page boundary only - * sgl sizes of must be a power of 2. */ - buf_size = (sizeof(struct fcp_cmnd) + sizeof(struct fcp_rsp) + - (((phba->cfg_sg_seg_cnt * sges_per_segment) + 2) * - sizeof(struct sli4_sge))); - - sli_family = bf_get(lpfc_sli_intf_sli_family, &phba->sli4_hba.sli_intf); - max_buf_size = LPFC_SLI4_MAX_BUF_SIZE; - switch (sli_family) { - case LPFC_SLI_INTF_FAMILY_BE2: - case LPFC_SLI_INTF_FAMILY_BE3: - /* There is a single hint for BE - 2 pages per BPL. */ - if (bf_get(lpfc_sli_intf_sli_hint1, &phba->sli4_hba.sli_intf) == - LPFC_SLI_INTF_SLI_HINT1_1) - max_buf_size = LPFC_SLI4_FL1_MAX_BUF_SIZE; - break; - case LPFC_SLI_INTF_FAMILY_LNCR_A0: - case LPFC_SLI_INTF_FAMILY_LNCR_B0: - default: - break; + + if (phba->cfg_enable_bg) { + /* + * The scsi_buf for a T10-DIF I/O will hold the FCP cmnd, + * the FCP rsp, and a SGE for each. Sice we have no control + * over how many protection data segments the SCSI Layer + * will hand us (ie: there could be one for every block + * in the IO), we just allocate enough SGEs to accomidate + * our max amount and we need to limit lpfc_sg_seg_cnt to + * minimize the risk of running out. + */ + phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) + + sizeof(struct fcp_rsp) + max_buf_size; + + /* Total SGEs for scsi_sg_list and scsi_sg_prot_list */ + phba->cfg_total_seg_cnt = LPFC_MAX_SGL_SEG_CNT; + + if (phba->cfg_sg_seg_cnt > LPFC_MAX_SG_SLI4_SEG_CNT_DIF) + phba->cfg_sg_seg_cnt = LPFC_MAX_SG_SLI4_SEG_CNT_DIF; + } else { + /* + * The scsi_buf for a regular I/O will hold the FCP cmnd, + * the FCP rsp, a SGE for each, and a SGE for up to + * cfg_sg_seg_cnt data segments. + */ + phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) + + sizeof(struct fcp_rsp) + + ((phba->cfg_sg_seg_cnt + 2) * sizeof(struct sli4_sge)); + + /* Total SGEs for scsi_sg_list */ + phba->cfg_total_seg_cnt = phba->cfg_sg_seg_cnt + 2; + /* + * NOTE: if (phba->cfg_sg_seg_cnt + 2) <= 256 we only need + * to post 1 page for the SGL. + */ } - for (dma_buf_size = LPFC_SLI4_MIN_BUF_SIZE; - dma_buf_size < max_buf_size && buf_size > dma_buf_size; - dma_buf_size = dma_buf_size << 1) - ; - if (dma_buf_size == max_buf_size) - phba->cfg_sg_seg_cnt = (dma_buf_size - - sizeof(struct fcp_cmnd) - sizeof(struct fcp_rsp) - - (2 * sizeof(struct sli4_sge))) / - sizeof(struct sli4_sge); - phba->cfg_sg_dma_buf_size = dma_buf_size; + /* Initialize the host templates with the updated values. */ + lpfc_vport_template.sg_tablesize = phba->cfg_sg_seg_cnt; + lpfc_template.sg_tablesize = phba->cfg_sg_seg_cnt; + + if (phba->cfg_sg_dma_buf_size <= LPFC_MIN_SG_SLI4_BUF_SZ) + phba->cfg_sg_dma_buf_size = LPFC_MIN_SG_SLI4_BUF_SZ; + else + phba->cfg_sg_dma_buf_size = + SLI4_PAGE_ALIGN(phba->cfg_sg_dma_buf_size); + + lpfc_printf_log(phba, KERN_INFO, LOG_INIT | LOG_FCP, + "9087 sg_tablesize:%d dmabuf_size:%d total_sge:%d\n", + phba->cfg_sg_seg_cnt, phba->cfg_sg_dma_buf_size, + phba->cfg_total_seg_cnt); /* Initialize buffer queue management fields */ hbq_count = lpfc_sli_hbq_count(); @@ -5104,6 +5203,26 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba) goto out_free_fcp_eq_hdl; } + phba->sli4_hba.cpu_map = kzalloc((sizeof(struct lpfc_vector_map_info) * + phba->sli4_hba.num_present_cpu), + GFP_KERNEL); + if (!phba->sli4_hba.cpu_map) { + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "3327 Failed allocate memory for msi-x " + "interrupt vector mapping\n"); + rc = -ENOMEM; + goto out_free_msix; + } + /* Initialize io channels for round robin */ + cpup = phba->sli4_hba.cpu_map; + rc = 0; + for (i = 0; i < phba->sli4_hba.num_present_cpu; i++) { + cpup->channel_id = rc; + rc++; + if (rc >= phba->cfg_fcp_io_channel) + rc = 0; + } + /* * Enable sr-iov virtual functions if supported and configured * through the module parameter. @@ -5123,6 +5242,8 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba) return 0; +out_free_msix: + kfree(phba->sli4_hba.msix_entries); out_free_fcp_eq_hdl: kfree(phba->sli4_hba.fcp_eq_hdl); out_free_fcf_rr_bmask: @@ -5152,6 +5273,11 @@ lpfc_sli4_driver_resource_unset(struct lpfc_hba *phba) { struct lpfc_fcf_conn_entry *conn_entry, *next_conn_entry; + /* Free memory allocated for msi-x interrupt vector to CPU mapping */ + kfree(phba->sli4_hba.cpu_map); + phba->sli4_hba.num_present_cpu = 0; + phba->sli4_hba.num_online_cpu = 0; + /* Free memory allocated for msi-x interrupt vector entries */ kfree(phba->sli4_hba.msix_entries); @@ -5260,8 +5386,10 @@ lpfc_setup_driver_resource_phase1(struct lpfc_hba *phba) init_waitqueue_head(&phba->work_waitq); /* Initialize the scsi buffer list used by driver for scsi IO */ - spin_lock_init(&phba->scsi_buf_list_lock); - INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list); + spin_lock_init(&phba->scsi_buf_list_get_lock); + INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list_get); + spin_lock_init(&phba->scsi_buf_list_put_lock); + INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list_put); /* Initialize the fabric iocb list */ INIT_LIST_HEAD(&phba->fabric_iocb_list); @@ -6696,6 +6824,7 @@ lpfc_sli4_queue_verify(struct lpfc_hba *phba) int cfg_fcp_io_channel; uint32_t cpu; uint32_t i = 0; + uint32_t j = 0; /* @@ -6706,15 +6835,21 @@ lpfc_sli4_queue_verify(struct lpfc_hba *phba) /* Sanity check on HBA EQ parameters */ cfg_fcp_io_channel = phba->cfg_fcp_io_channel; - /* It doesn't make sense to have more io channels then CPUs */ - for_each_online_cpu(cpu) { - i++; + /* It doesn't make sense to have more io channels then online CPUs */ + for_each_present_cpu(cpu) { + if (cpu_online(cpu)) + i++; + j++; } + phba->sli4_hba.num_online_cpu = i; + phba->sli4_hba.num_present_cpu = j; + if (i < cfg_fcp_io_channel) { lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "3188 Reducing IO channels to match number of " - "CPUs: from %d to %d\n", cfg_fcp_io_channel, i); + "online CPUs: from %d to %d\n", + cfg_fcp_io_channel, i); cfg_fcp_io_channel = i; } @@ -7743,8 +7878,13 @@ lpfc_pci_function_reset(struct lpfc_hba *phba) out: /* Catch the not-ready port failure after a port reset. */ - if (num_resets >= MAX_IF_TYPE_2_RESETS) + if (num_resets >= MAX_IF_TYPE_2_RESETS) { + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "3317 HBA not functional: IP Reset Failed " + "after (%d) retries, try: " + "echo fw_reset > board_mode\n", num_resets); rc = -ENODEV; + } return rc; } @@ -8208,6 +8348,269 @@ lpfc_sli_disable_intr(struct lpfc_hba *phba) return; } +/** + * lpfc_find_next_cpu - Find next available CPU that matches the phys_id + * @phba: pointer to lpfc hba data structure. + * + * Find next available CPU to use for IRQ to CPU affinity. + */ +static int +lpfc_find_next_cpu(struct lpfc_hba *phba, uint32_t phys_id) +{ + struct lpfc_vector_map_info *cpup; + int cpu; + + cpup = phba->sli4_hba.cpu_map; + for (cpu = 0; cpu < phba->sli4_hba.num_present_cpu; cpu++) { + /* CPU must be online */ + if (cpu_online(cpu)) { + if ((cpup->irq == LPFC_VECTOR_MAP_EMPTY) && + (lpfc_used_cpu[cpu] == LPFC_VECTOR_MAP_EMPTY) && + (cpup->phys_id == phys_id)) { + return cpu; + } + } + cpup++; + } + + /* + * If we get here, we have used ALL CPUs for the specific + * phys_id. Now we need to clear out lpfc_used_cpu and start + * reusing CPUs. + */ + + for (cpu = 0; cpu < phba->sli4_hba.num_present_cpu; cpu++) { + if (lpfc_used_cpu[cpu] == phys_id) + lpfc_used_cpu[cpu] = LPFC_VECTOR_MAP_EMPTY; + } + + cpup = phba->sli4_hba.cpu_map; + for (cpu = 0; cpu < phba->sli4_hba.num_present_cpu; cpu++) { + /* CPU must be online */ + if (cpu_online(cpu)) { + if ((cpup->irq == LPFC_VECTOR_MAP_EMPTY) && + (cpup->phys_id == phys_id)) { + return cpu; + } + } + cpup++; + } + return LPFC_VECTOR_MAP_EMPTY; +} + +/** + * lpfc_sli4_set_affinity - Set affinity for HBA IRQ vectors + * @phba: pointer to lpfc hba data structure. + * @vectors: number of HBA vectors + * + * Affinitize MSIX IRQ vectors to CPUs. Try to equally spread vector + * affinization across multple physical CPUs (numa nodes). + * In addition, this routine will assign an IO channel for each CPU + * to use when issuing I/Os. + */ +static int +lpfc_sli4_set_affinity(struct lpfc_hba *phba, int vectors) +{ + int i, idx, saved_chann, used_chann, cpu, phys_id; + int max_phys_id, num_io_channel, first_cpu; + struct lpfc_vector_map_info *cpup; +#ifdef CONFIG_X86 + struct cpuinfo_x86 *cpuinfo; +#endif + struct cpumask *mask; + uint8_t chann[LPFC_FCP_IO_CHAN_MAX+1]; + + /* If there is no mapping, just return */ + if (!phba->cfg_fcp_cpu_map) + return 1; + + /* Init cpu_map array */ + memset(phba->sli4_hba.cpu_map, 0xff, + (sizeof(struct lpfc_vector_map_info) * + phba->sli4_hba.num_present_cpu)); + + max_phys_id = 0; + phys_id = 0; + num_io_channel = 0; + first_cpu = LPFC_VECTOR_MAP_EMPTY; + + /* Update CPU map with physical id and core id of each CPU */ + cpup = phba->sli4_hba.cpu_map; + for (cpu = 0; cpu < phba->sli4_hba.num_present_cpu; cpu++) { +#ifdef CONFIG_X86 + cpuinfo = &cpu_data(cpu); + cpup->phys_id = cpuinfo->phys_proc_id; + cpup->core_id = cpuinfo->cpu_core_id; +#else + /* No distinction between CPUs for other platforms */ + cpup->phys_id = 0; + cpup->core_id = 0; +#endif + + lpfc_printf_log(phba, KERN_INFO, LOG_INIT, + "3328 CPU physid %d coreid %d\n", + cpup->phys_id, cpup->core_id); + + if (cpup->phys_id > max_phys_id) + max_phys_id = cpup->phys_id; + cpup++; + } + + /* Now associate the HBA vectors with specific CPUs */ + for (idx = 0; idx < vectors; idx++) { + cpup = phba->sli4_hba.cpu_map; + cpu = lpfc_find_next_cpu(phba, phys_id); + if (cpu == LPFC_VECTOR_MAP_EMPTY) { + + /* Try for all phys_id's */ + for (i = 1; i < max_phys_id; i++) { + phys_id++; + if (phys_id > max_phys_id) + phys_id = 0; + cpu = lpfc_find_next_cpu(phba, phys_id); + if (cpu == LPFC_VECTOR_MAP_EMPTY) + continue; + goto found; + } + + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "3329 Cannot set affinity:" + "Error mapping vector %d (%d)\n", + idx, vectors); + return 0; + } +found: + cpup += cpu; + if (phba->cfg_fcp_cpu_map == LPFC_DRIVER_CPU_MAP) + lpfc_used_cpu[cpu] = phys_id; + + /* Associate vector with selected CPU */ + cpup->irq = phba->sli4_hba.msix_entries[idx].vector; + + /* Associate IO channel with selected CPU */ + cpup->channel_id = idx; + num_io_channel++; + + if (first_cpu == LPFC_VECTOR_MAP_EMPTY) + first_cpu = cpu; + + /* Now affinitize to the selected CPU */ + mask = &cpup->maskbits; + cpumask_clear(mask); + cpumask_set_cpu(cpu, mask); + i = irq_set_affinity_hint(phba->sli4_hba.msix_entries[idx]. + vector, mask); + + lpfc_printf_log(phba, KERN_INFO, LOG_INIT, + "3330 Set Affinity: CPU %d channel %d " + "irq %d (%x)\n", + cpu, cpup->channel_id, + phba->sli4_hba.msix_entries[idx].vector, i); + + /* Spread vector mapping across multple physical CPU nodes */ + phys_id++; + if (phys_id > max_phys_id) + phys_id = 0; + } + + /* + * Finally fill in the IO channel for any remaining CPUs. + * At this point, all IO channels have been assigned to a specific + * MSIx vector, mapped to a specific CPU. + * Base the remaining IO channel assigned, to IO channels already + * assigned to other CPUs on the same phys_id. + */ + for (i = 0; i <= max_phys_id; i++) { + /* + * If there are no io channels already mapped to + * this phys_id, just round robin thru the io_channels. + * Setup chann[] for round robin. + */ + for (idx = 0; idx < phba->cfg_fcp_io_channel; idx++) + chann[idx] = idx; + + saved_chann = 0; + used_chann = 0; + + /* + * First build a list of IO channels already assigned + * to this phys_id before reassigning the same IO + * channels to the remaining CPUs. + */ + cpup = phba->sli4_hba.cpu_map; + cpu = first_cpu; + cpup += cpu; + for (idx = 0; idx < phba->sli4_hba.num_present_cpu; + idx++) { + if (cpup->phys_id == i) { + /* + * Save any IO channels that are + * already mapped to this phys_id. + */ + if (cpup->irq != LPFC_VECTOR_MAP_EMPTY) { + chann[saved_chann] = + cpup->channel_id; + saved_chann++; + goto out; + } + + /* See if we are using round-robin */ + if (saved_chann == 0) + saved_chann = + phba->cfg_fcp_io_channel; + + /* Associate next IO channel with CPU */ + cpup->channel_id = chann[used_chann]; + num_io_channel++; + used_chann++; + if (used_chann == saved_chann) + used_chann = 0; + + lpfc_printf_log(phba, KERN_INFO, LOG_INIT, + "3331 Set IO_CHANN " + "CPU %d channel %d\n", + idx, cpup->channel_id); + } +out: + cpu++; + if (cpu >= phba->sli4_hba.num_present_cpu) { + cpup = phba->sli4_hba.cpu_map; + cpu = 0; + } else { + cpup++; + } + } + } + + if (phba->sli4_hba.num_online_cpu != phba->sli4_hba.num_present_cpu) { + cpup = phba->sli4_hba.cpu_map; + for (idx = 0; idx < phba->sli4_hba.num_present_cpu; idx++) { + if (cpup->channel_id == LPFC_VECTOR_MAP_EMPTY) { + cpup->channel_id = 0; + num_io_channel++; + + lpfc_printf_log(phba, KERN_INFO, LOG_INIT, + "3332 Assign IO_CHANN " + "CPU %d channel %d\n", + idx, cpup->channel_id); + } + cpup++; + } + } + + /* Sanity check */ + if (num_io_channel != phba->sli4_hba.num_present_cpu) + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "3333 Set affinity mismatch:" + "%d chann != %d cpus: %d vactors\n", + num_io_channel, phba->sli4_hba.num_present_cpu, + vectors); + + phba->cfg_fcp_io_sched = LPFC_FCP_SCHED_BY_CPU; + return 1; +} + + /** * lpfc_sli4_enable_msix - Enable MSI-X interrupt mode to SLI-4 device * @phba: pointer to lpfc hba data structure. @@ -8259,9 +8662,7 @@ lpfc_sli4_enable_msix(struct lpfc_hba *phba) phba->sli4_hba.msix_entries[index].vector, phba->sli4_hba.msix_entries[index].entry); - /* - * Assign MSI-X vectors to interrupt handlers - */ + /* Assign MSI-X vectors to interrupt handlers */ for (index = 0; index < vectors; index++) { memset(&phba->sli4_hba.handler_name[index], 0, 16); sprintf((char *)&phba->sli4_hba.handler_name[index], @@ -8289,6 +8690,8 @@ lpfc_sli4_enable_msix(struct lpfc_hba *phba) phba->cfg_fcp_io_channel, vectors); phba->cfg_fcp_io_channel = vectors; } + + lpfc_sli4_set_affinity(phba, vectors); return rc; cfg_fail_out: @@ -9213,15 +9616,15 @@ lpfc_sli_prep_dev_for_reset(struct lpfc_hba *phba) /* Block all SCSI devices' I/Os on the host */ lpfc_scsi_dev_block(phba); + /* Flush all driver's outstanding SCSI I/Os as we are to reset */ + lpfc_sli_flush_fcp_rings(phba); + /* stop all timers */ lpfc_stop_hba_timers(phba); /* Disable interrupt and pci device */ lpfc_sli_disable_intr(phba); pci_disable_device(phba->pcidev); - - /* Flush all driver's outstanding SCSI I/Os as we are to reset */ - lpfc_sli_flush_fcp_rings(phba); } /** @@ -9966,6 +10369,9 @@ lpfc_sli4_prep_dev_for_reset(struct lpfc_hba *phba) /* Block all SCSI devices' I/Os on the host */ lpfc_scsi_dev_block(phba); + /* Flush all driver's outstanding SCSI I/Os as we are to reset */ + lpfc_sli_flush_fcp_rings(phba); + /* stop all timers */ lpfc_stop_hba_timers(phba); @@ -9973,9 +10379,6 @@ lpfc_sli4_prep_dev_for_reset(struct lpfc_hba *phba) lpfc_sli4_disable_intr(phba); lpfc_sli4_queue_destroy(phba); pci_disable_device(phba->pcidev); - - /* Flush all driver's outstanding SCSI I/Os as we are to reset */ - lpfc_sli_flush_fcp_rings(phba); } /** @@ -10535,6 +10938,7 @@ static struct miscdevice lpfc_mgmt_dev = { static int __init lpfc_init(void) { + int cpu; int error = 0; printk(LPFC_MODULE_DESC "\n"); @@ -10561,6 +10965,11 @@ lpfc_init(void) return -ENOMEM; } } + + /* Initialize in case vector mapping is needed */ + for (cpu = 0; cpu < LPFC_MAX_CPU; cpu++) + lpfc_used_cpu[cpu] = LPFC_VECTOR_MAP_EMPTY; + error = pci_register_driver(&lpfc_driver); if (error) { fc_release_transport(lpfc_transport_template); diff --git a/trunk/drivers/scsi/lpfc/lpfc_logmsg.h b/trunk/drivers/scsi/lpfc/lpfc_logmsg.h index baf53e6c2bd1..2a4e5d21eab2 100644 --- a/trunk/drivers/scsi/lpfc/lpfc_logmsg.h +++ b/trunk/drivers/scsi/lpfc/lpfc_logmsg.h @@ -37,6 +37,7 @@ #define LOG_EVENT 0x00010000 /* CT,TEMP,DUMP, logging */ #define LOG_FIP 0x00020000 /* FIP events */ #define LOG_FCP_UNDER 0x00040000 /* FCP underruns errors */ +#define LOG_SCSI_CMD 0x00080000 /* ALL SCSI commands */ #define LOG_ALL_MSG 0xffffffff /* LOG all messages */ #define lpfc_printf_vlog(vport, level, mask, fmt, arg...) \ diff --git a/trunk/drivers/scsi/lpfc/lpfc_mbox.c b/trunk/drivers/scsi/lpfc/lpfc_mbox.c index a7a9fa468308..41363db7d426 100644 --- a/trunk/drivers/scsi/lpfc/lpfc_mbox.c +++ b/trunk/drivers/scsi/lpfc/lpfc_mbox.c @@ -2149,18 +2149,21 @@ lpfc_reg_vfi(struct lpfcMboxq *mbox, struct lpfc_vport *vport, dma_addr_t phys) /* Only FC supports upd bit */ if ((phba->sli4_hba.lnk_info.lnk_tp == LPFC_LNK_TYPE_FC) && - (vport->fc_flag & FC_VFI_REGISTERED)) { + (vport->fc_flag & FC_VFI_REGISTERED) && + (!phba->fc_topology_changed)) { bf_set(lpfc_reg_vfi_vp, reg_vfi, 0); bf_set(lpfc_reg_vfi_upd, reg_vfi, 1); } lpfc_printf_vlog(vport, KERN_INFO, LOG_MBOX, "3134 Register VFI, mydid:x%x, fcfi:%d, " - " vfi:%d, vpi:%d, fc_pname:%x%x\n", + " vfi:%d, vpi:%d, fc_pname:%x%x fc_flag:x%x" + " port_state:x%x topology chg:%d\n", vport->fc_myDID, phba->fcf.fcfi, phba->sli4_hba.vfi_ids[vport->vfi], phba->vpi_ids[vport->vpi], - reg_vfi->wwn[0], reg_vfi->wwn[1]); + reg_vfi->wwn[0], reg_vfi->wwn[1], vport->fc_flag, + vport->port_state, phba->fc_topology_changed); } /** diff --git a/trunk/drivers/scsi/lpfc/lpfc_mem.c b/trunk/drivers/scsi/lpfc/lpfc_mem.c index cd86069a0ba8..812d0cd7c86d 100644 --- a/trunk/drivers/scsi/lpfc/lpfc_mem.c +++ b/trunk/drivers/scsi/lpfc/lpfc_mem.c @@ -64,18 +64,26 @@ lpfc_mem_alloc(struct lpfc_hba *phba, int align) struct lpfc_dma_pool *pool = &phba->lpfc_mbuf_safety_pool; int i; - if (phba->sli_rev == LPFC_SLI_REV4) + if (phba->sli_rev == LPFC_SLI_REV4) { + /* Calculate alignment */ + if (phba->cfg_sg_dma_buf_size < SLI4_PAGE_SIZE) + i = phba->cfg_sg_dma_buf_size; + else + i = SLI4_PAGE_SIZE; + phba->lpfc_scsi_dma_buf_pool = pci_pool_create("lpfc_scsi_dma_buf_pool", phba->pcidev, phba->cfg_sg_dma_buf_size, - phba->cfg_sg_dma_buf_size, + i, 0); - else + } else { phba->lpfc_scsi_dma_buf_pool = pci_pool_create("lpfc_scsi_dma_buf_pool", phba->pcidev, phba->cfg_sg_dma_buf_size, align, 0); + } + if (!phba->lpfc_scsi_dma_buf_pool) goto fail; diff --git a/trunk/drivers/scsi/lpfc/lpfc_nportdisc.c b/trunk/drivers/scsi/lpfc/lpfc_nportdisc.c index 82f4d3542289..31e9b92f5a9b 100644 --- a/trunk/drivers/scsi/lpfc/lpfc_nportdisc.c +++ b/trunk/drivers/scsi/lpfc/lpfc_nportdisc.c @@ -332,9 +332,11 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, /* PLOGI chkparm OK */ lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, - "0114 PLOGI chkparm OK Data: x%x x%x x%x x%x\n", + "0114 PLOGI chkparm OK Data: x%x x%x x%x " + "x%x x%x x%x\n", ndlp->nlp_DID, ndlp->nlp_state, ndlp->nlp_flag, - ndlp->nlp_rpi); + ndlp->nlp_rpi, vport->port_state, + vport->fc_flag); if (vport->cfg_fcp_class == 2 && sp->cls2.classValid) ndlp->nlp_fcp_info |= CLASS2; @@ -574,7 +576,7 @@ lpfc_rcv_padisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL); /* 1 sec timeout */ - mod_timer(&ndlp->nlp_delayfunc, jiffies + HZ); + mod_timer(&ndlp->nlp_delayfunc, jiffies + msecs_to_jiffies(1000)); spin_lock_irq(shost->host_lock); ndlp->nlp_flag |= NLP_DELAY_TMO; @@ -631,7 +633,8 @@ lpfc_rcv_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, * If there are other active VLinks present, * re-instantiate the Vlink using FDISC. */ - mod_timer(&ndlp->nlp_delayfunc, jiffies + HZ); + mod_timer(&ndlp->nlp_delayfunc, + jiffies + msecs_to_jiffies(1000)); spin_lock_irq(shost->host_lock); ndlp->nlp_flag |= NLP_DELAY_TMO; spin_unlock_irq(shost->host_lock); @@ -648,7 +651,8 @@ lpfc_rcv_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, !(ndlp->nlp_type & NLP_FCP_INITIATOR))) || (ndlp->nlp_state == NLP_STE_ADISC_ISSUE)) { /* Only try to re-login if this is NOT a Fabric Node */ - mod_timer(&ndlp->nlp_delayfunc, jiffies + HZ * 1); + mod_timer(&ndlp->nlp_delayfunc, + jiffies + msecs_to_jiffies(1000 * 1)); spin_lock_irq(shost->host_lock); ndlp->nlp_flag |= NLP_DELAY_TMO; spin_unlock_irq(shost->host_lock); @@ -969,7 +973,7 @@ lpfc_rcv_els_plogi_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, } /* Put ndlp in npr state set plogi timer for 1 sec */ - mod_timer(&ndlp->nlp_delayfunc, jiffies + HZ * 1); + mod_timer(&ndlp->nlp_delayfunc, jiffies + msecs_to_jiffies(1000 * 1)); spin_lock_irq(shost->host_lock); ndlp->nlp_flag |= NLP_DELAY_TMO; spin_unlock_irq(shost->host_lock); @@ -1303,7 +1307,8 @@ lpfc_cmpl_adisc_adisc_issue(struct lpfc_vport *vport, if ((irsp->ulpStatus) || (!lpfc_check_adisc(vport, ndlp, &ap->nodeName, &ap->portName))) { /* 1 sec timeout */ - mod_timer(&ndlp->nlp_delayfunc, jiffies + HZ); + mod_timer(&ndlp->nlp_delayfunc, + jiffies + msecs_to_jiffies(1000)); spin_lock_irq(shost->host_lock); ndlp->nlp_flag |= NLP_DELAY_TMO; spin_unlock_irq(shost->host_lock); @@ -1509,7 +1514,8 @@ lpfc_cmpl_reglogin_reglogin_issue(struct lpfc_vport *vport, } /* Put ndlp in npr state set plogi timer for 1 sec */ - mod_timer(&ndlp->nlp_delayfunc, jiffies + HZ * 1); + mod_timer(&ndlp->nlp_delayfunc, + jiffies + msecs_to_jiffies(1000 * 1)); spin_lock_irq(shost->host_lock); ndlp->nlp_flag |= NLP_DELAY_TMO; spin_unlock_irq(shost->host_lock); @@ -2145,7 +2151,8 @@ lpfc_rcv_prlo_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL); if ((ndlp->nlp_flag & NLP_DELAY_TMO) == 0) { - mod_timer(&ndlp->nlp_delayfunc, jiffies + HZ * 1); + mod_timer(&ndlp->nlp_delayfunc, + jiffies + msecs_to_jiffies(1000 * 1)); spin_lock_irq(shost->host_lock); ndlp->nlp_flag |= NLP_DELAY_TMO; ndlp->nlp_flag &= ~NLP_NPR_ADISC; diff --git a/trunk/drivers/scsi/lpfc/lpfc_scsi.c b/trunk/drivers/scsi/lpfc/lpfc_scsi.c index 74b8710e1e90..8523b278ec9d 100644 --- a/trunk/drivers/scsi/lpfc/lpfc_scsi.c +++ b/trunk/drivers/scsi/lpfc/lpfc_scsi.c @@ -24,6 +24,8 @@ #include #include #include +#include +#include #include #include @@ -48,7 +50,7 @@ #define LPFC_RESET_WAIT 2 #define LPFC_ABORT_WAIT 2 -int _dump_buf_done; +int _dump_buf_done = 1; static char *dif_op_str[] = { "PROT_NORMAL", @@ -66,6 +68,10 @@ struct scsi_dif_tuple { __be32 ref_tag; /* Target LBA or indirect LBA */ }; +#if !defined(SCSI_PROT_GUARD_CHECK) || !defined(SCSI_PROT_REF_CHECK) +#define scsi_prot_flagged(sc, flg) sc +#endif + static void lpfc_release_scsi_buf_s4(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb); static void @@ -534,7 +540,16 @@ lpfc_new_scsi_buf_s3(struct lpfc_vport *vport, int num_to_alloc) dma_addr_t pdma_phys_fcp_rsp; dma_addr_t pdma_phys_bpl; uint16_t iotag; - int bcnt; + int bcnt, bpl_size; + + bpl_size = phba->cfg_sg_dma_buf_size - + (sizeof(struct fcp_cmnd) + sizeof(struct fcp_rsp)); + + lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP, + "9067 ALLOC %d scsi_bufs: %d (%d + %d + %d)\n", + num_to_alloc, phba->cfg_sg_dma_buf_size, + (int)sizeof(struct fcp_cmnd), + (int)sizeof(struct fcp_rsp), bpl_size); for (bcnt = 0; bcnt < num_to_alloc; bcnt++) { psb = kzalloc(sizeof(struct lpfc_scsi_buf), GFP_KERNEL); @@ -759,7 +774,7 @@ lpfc_sli4_post_scsi_sgl_list(struct lpfc_hba *phba, struct list_head *post_sblist, int sb_count) { struct lpfc_scsi_buf *psb, *psb_next; - int status; + int status, sgl_size; int post_cnt = 0, block_cnt = 0, num_posting = 0, num_posted = 0; dma_addr_t pdma_phys_bpl1; int last_xritag = NO_XRI; @@ -771,6 +786,9 @@ lpfc_sli4_post_scsi_sgl_list(struct lpfc_hba *phba, if (sb_count <= 0) return -EINVAL; + sgl_size = phba->cfg_sg_dma_buf_size - + (sizeof(struct fcp_cmnd) + sizeof(struct fcp_rsp)); + list_for_each_entry_safe(psb, psb_next, post_sblist, list) { list_del_init(&psb->list); block_cnt++; @@ -803,7 +821,7 @@ lpfc_sli4_post_scsi_sgl_list(struct lpfc_hba *phba, post_cnt = block_cnt; } else if (block_cnt == 1) { /* last single sgl with non-contiguous xri */ - if (phba->cfg_sg_dma_buf_size > SGL_PAGE_SIZE) + if (sgl_size > SGL_PAGE_SIZE) pdma_phys_bpl1 = psb->dma_phys_bpl + SGL_PAGE_SIZE; else @@ -885,9 +903,12 @@ lpfc_sli4_repost_scsi_sgl_list(struct lpfc_hba *phba) int num_posted, rc = 0; /* get all SCSI buffers need to repost to a local list */ - spin_lock_irq(&phba->scsi_buf_list_lock); - list_splice_init(&phba->lpfc_scsi_buf_list, &post_sblist); - spin_unlock_irq(&phba->scsi_buf_list_lock); + spin_lock_irq(&phba->scsi_buf_list_get_lock); + spin_lock_irq(&phba->scsi_buf_list_put_lock); + list_splice_init(&phba->lpfc_scsi_buf_list_get, &post_sblist); + list_splice(&phba->lpfc_scsi_buf_list_put, &post_sblist); + spin_unlock_irq(&phba->scsi_buf_list_put_lock); + spin_unlock_irq(&phba->scsi_buf_list_get_lock); /* post the list of scsi buffer sgls to port if available */ if (!list_empty(&post_sblist)) { @@ -923,13 +944,22 @@ lpfc_new_scsi_buf_s4(struct lpfc_vport *vport, int num_to_alloc) IOCB_t *iocb; dma_addr_t pdma_phys_fcp_cmd; dma_addr_t pdma_phys_fcp_rsp; - dma_addr_t pdma_phys_bpl, pdma_phys_bpl1; + dma_addr_t pdma_phys_bpl; uint16_t iotag, lxri = 0; - int bcnt, num_posted; + int bcnt, num_posted, sgl_size; LIST_HEAD(prep_sblist); LIST_HEAD(post_sblist); LIST_HEAD(scsi_sblist); + sgl_size = phba->cfg_sg_dma_buf_size - + (sizeof(struct fcp_cmnd) + sizeof(struct fcp_rsp)); + + lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP, + "9068 ALLOC %d scsi_bufs: %d (%d + %d + %d)\n", + num_to_alloc, phba->cfg_sg_dma_buf_size, sgl_size, + (int)sizeof(struct fcp_cmnd), + (int)sizeof(struct fcp_rsp)); + for (bcnt = 0; bcnt < num_to_alloc; bcnt++) { psb = kzalloc(sizeof(struct lpfc_scsi_buf), GFP_KERNEL); if (!psb) @@ -948,6 +978,15 @@ lpfc_new_scsi_buf_s4(struct lpfc_vport *vport, int num_to_alloc) } memset(psb->data, 0, phba->cfg_sg_dma_buf_size); + /* Page alignment is CRITICAL, double check to be sure */ + if (((unsigned long)(psb->data) & + (unsigned long)(SLI4_PAGE_SIZE - 1)) != 0) { + pci_pool_free(phba->lpfc_scsi_dma_buf_pool, + psb->data, psb->dma_handle); + kfree(psb); + break; + } + /* Allocate iotag for psb->cur_iocbq. */ iotag = lpfc_sli_next_iotag(phba, &psb->cur_iocbq); if (iotag == 0) { @@ -968,17 +1007,14 @@ lpfc_new_scsi_buf_s4(struct lpfc_vport *vport, int num_to_alloc) psb->cur_iocbq.sli4_xritag = phba->sli4_hba.xri_ids[lxri]; psb->cur_iocbq.iocb_flag |= LPFC_IO_FCP; psb->fcp_bpl = psb->data; - psb->fcp_cmnd = (psb->data + phba->cfg_sg_dma_buf_size) - - (sizeof(struct fcp_cmnd) + sizeof(struct fcp_rsp)); + psb->fcp_cmnd = (psb->data + sgl_size); psb->fcp_rsp = (struct fcp_rsp *)((uint8_t *)psb->fcp_cmnd + sizeof(struct fcp_cmnd)); /* Initialize local short-hand pointers. */ sgl = (struct sli4_sge *)psb->fcp_bpl; pdma_phys_bpl = psb->dma_handle; - pdma_phys_fcp_cmd = - (psb->dma_handle + phba->cfg_sg_dma_buf_size) - - (sizeof(struct fcp_cmnd) + sizeof(struct fcp_rsp)); + pdma_phys_fcp_cmd = (psb->dma_handle + sgl_size); pdma_phys_fcp_rsp = pdma_phys_fcp_cmd + sizeof(struct fcp_cmnd); /* @@ -1020,17 +1056,13 @@ lpfc_new_scsi_buf_s4(struct lpfc_vport *vport, int num_to_alloc) iocb->ulpLe = 1; iocb->ulpClass = CLASS3; psb->cur_iocbq.context1 = psb; - if (phba->cfg_sg_dma_buf_size > SGL_PAGE_SIZE) - pdma_phys_bpl1 = pdma_phys_bpl + SGL_PAGE_SIZE; - else - pdma_phys_bpl1 = 0; psb->dma_phys_bpl = pdma_phys_bpl; /* add the scsi buffer to a post list */ list_add_tail(&psb->list, &post_sblist); - spin_lock_irq(&phba->scsi_buf_list_lock); + spin_lock_irq(&phba->scsi_buf_list_get_lock); phba->sli4_hba.scsi_xri_cnt++; - spin_unlock_irq(&phba->scsi_buf_list_lock); + spin_unlock_irq(&phba->scsi_buf_list_get_lock); } lpfc_printf_log(phba, KERN_INFO, LOG_BG, "3021 Allocate %d out of %d requested new SCSI " @@ -1079,17 +1111,23 @@ static struct lpfc_scsi_buf* lpfc_get_scsi_buf_s3(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp) { struct lpfc_scsi_buf * lpfc_cmd = NULL; - struct list_head *scsi_buf_list = &phba->lpfc_scsi_buf_list; - unsigned long iflag = 0; - - spin_lock_irqsave(&phba->scsi_buf_list_lock, iflag); - list_remove_head(scsi_buf_list, lpfc_cmd, struct lpfc_scsi_buf, list); - if (lpfc_cmd) { - lpfc_cmd->seg_cnt = 0; - lpfc_cmd->nonsg_phys = 0; - lpfc_cmd->prot_seg_cnt = 0; + struct list_head *scsi_buf_list_get = &phba->lpfc_scsi_buf_list_get; + unsigned long gflag = 0; + unsigned long pflag = 0; + + spin_lock_irqsave(&phba->scsi_buf_list_get_lock, gflag); + list_remove_head(scsi_buf_list_get, lpfc_cmd, struct lpfc_scsi_buf, + list); + if (!lpfc_cmd) { + spin_lock_irqsave(&phba->scsi_buf_list_put_lock, pflag); + list_splice(&phba->lpfc_scsi_buf_list_put, + &phba->lpfc_scsi_buf_list_get); + INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list_put); + list_remove_head(scsi_buf_list_get, lpfc_cmd, + struct lpfc_scsi_buf, list); + spin_unlock_irqrestore(&phba->scsi_buf_list_put_lock, pflag); } - spin_unlock_irqrestore(&phba->scsi_buf_list_lock, iflag); + spin_unlock_irqrestore(&phba->scsi_buf_list_get_lock, gflag); return lpfc_cmd; } /** @@ -1107,28 +1145,39 @@ static struct lpfc_scsi_buf* lpfc_get_scsi_buf_s4(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp) { struct lpfc_scsi_buf *lpfc_cmd ; - unsigned long iflag = 0; + unsigned long gflag = 0; + unsigned long pflag = 0; int found = 0; - spin_lock_irqsave(&phba->scsi_buf_list_lock, iflag); - list_for_each_entry(lpfc_cmd, &phba->lpfc_scsi_buf_list, - list) { + spin_lock_irqsave(&phba->scsi_buf_list_get_lock, gflag); + list_for_each_entry(lpfc_cmd, &phba->lpfc_scsi_buf_list_get, list) { if (lpfc_test_rrq_active(phba, ndlp, lpfc_cmd->cur_iocbq.sli4_lxritag)) continue; list_del(&lpfc_cmd->list); found = 1; - lpfc_cmd->seg_cnt = 0; - lpfc_cmd->nonsg_phys = 0; - lpfc_cmd->prot_seg_cnt = 0; break; } - spin_unlock_irqrestore(&phba->scsi_buf_list_lock, - iflag); + if (!found) { + spin_lock_irqsave(&phba->scsi_buf_list_put_lock, pflag); + list_splice(&phba->lpfc_scsi_buf_list_put, + &phba->lpfc_scsi_buf_list_get); + INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list_put); + spin_unlock_irqrestore(&phba->scsi_buf_list_put_lock, pflag); + list_for_each_entry(lpfc_cmd, &phba->lpfc_scsi_buf_list_get, + list) { + if (lpfc_test_rrq_active( + phba, ndlp, lpfc_cmd->cur_iocbq.sli4_lxritag)) + continue; + list_del(&lpfc_cmd->list); + found = 1; + break; + } + } + spin_unlock_irqrestore(&phba->scsi_buf_list_get_lock, gflag); if (!found) return NULL; - else - return lpfc_cmd; + return lpfc_cmd; } /** * lpfc_get_scsi_buf - Get a scsi buffer from lpfc_scsi_buf_list of the HBA @@ -1160,10 +1209,15 @@ lpfc_release_scsi_buf_s3(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb) { unsigned long iflag = 0; - spin_lock_irqsave(&phba->scsi_buf_list_lock, iflag); + psb->seg_cnt = 0; + psb->nonsg_phys = 0; + psb->prot_seg_cnt = 0; + + spin_lock_irqsave(&phba->scsi_buf_list_put_lock, iflag); psb->pCmd = NULL; - list_add_tail(&psb->list, &phba->lpfc_scsi_buf_list); - spin_unlock_irqrestore(&phba->scsi_buf_list_lock, iflag); + psb->cur_iocbq.iocb_flag = LPFC_IO_FCP; + list_add_tail(&psb->list, &phba->lpfc_scsi_buf_list_put); + spin_unlock_irqrestore(&phba->scsi_buf_list_put_lock, iflag); } /** @@ -1181,6 +1235,10 @@ lpfc_release_scsi_buf_s4(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb) { unsigned long iflag = 0; + psb->seg_cnt = 0; + psb->nonsg_phys = 0; + psb->prot_seg_cnt = 0; + if (psb->exch_busy) { spin_lock_irqsave(&phba->sli4_hba.abts_scsi_buf_list_lock, iflag); @@ -1190,11 +1248,11 @@ lpfc_release_scsi_buf_s4(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb) spin_unlock_irqrestore(&phba->sli4_hba.abts_scsi_buf_list_lock, iflag); } else { - - spin_lock_irqsave(&phba->scsi_buf_list_lock, iflag); psb->pCmd = NULL; - list_add_tail(&psb->list, &phba->lpfc_scsi_buf_list); - spin_unlock_irqrestore(&phba->scsi_buf_list_lock, iflag); + psb->cur_iocbq.iocb_flag = LPFC_IO_FCP; + spin_lock_irqsave(&phba->scsi_buf_list_put_lock, iflag); + list_add_tail(&psb->list, &phba->lpfc_scsi_buf_list_put); + spin_unlock_irqrestore(&phba->scsi_buf_list_put_lock, iflag); } } @@ -1268,6 +1326,7 @@ lpfc_scsi_prep_dma_buf_s3(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd) "dma_map_sg. Config %d, seg_cnt %d\n", __func__, phba->cfg_sg_seg_cnt, lpfc_cmd->seg_cnt); + lpfc_cmd->seg_cnt = 0; scsi_dma_unmap(scsi_cmnd); return 1; } @@ -2013,9 +2072,21 @@ lpfc_bg_setup_bpl(struct lpfc_hba *phba, struct scsi_cmnd *sc, bf_set(pde6_type, pde6, LPFC_PDE6_DESCRIPTOR); bf_set(pde6_optx, pde6, txop); bf_set(pde6_oprx, pde6, rxop); + + /* + * We only need to check the data on READs, for WRITEs + * protection data is automatically generated, not checked. + */ if (datadir == DMA_FROM_DEVICE) { - bf_set(pde6_ce, pde6, checking); - bf_set(pde6_re, pde6, checking); + if (scsi_prot_flagged(sc, SCSI_PROT_GUARD_CHECK)) + bf_set(pde6_ce, pde6, checking); + else + bf_set(pde6_ce, pde6, 0); + + if (scsi_prot_flagged(sc, SCSI_PROT_REF_CHECK)) + bf_set(pde6_re, pde6, checking); + else + bf_set(pde6_re, pde6, 0); } bf_set(pde6_ai, pde6, 1); bf_set(pde6_ae, pde6, 0); @@ -2145,6 +2216,10 @@ lpfc_bg_setup_bpl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc, split_offset = 0; do { + /* Check to see if we ran out of space */ + if (num_bde >= (phba->cfg_total_seg_cnt - 2)) + return num_bde + 3; + /* setup PDE5 with what we have */ pde5 = (struct lpfc_pde5 *) bpl; memset(pde5, 0, sizeof(struct lpfc_pde5)); @@ -2164,8 +2239,17 @@ lpfc_bg_setup_bpl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc, bf_set(pde6_type, pde6, LPFC_PDE6_DESCRIPTOR); bf_set(pde6_optx, pde6, txop); bf_set(pde6_oprx, pde6, rxop); - bf_set(pde6_ce, pde6, checking); - bf_set(pde6_re, pde6, checking); + + if (scsi_prot_flagged(sc, SCSI_PROT_GUARD_CHECK)) + bf_set(pde6_ce, pde6, checking); + else + bf_set(pde6_ce, pde6, 0); + + if (scsi_prot_flagged(sc, SCSI_PROT_REF_CHECK)) + bf_set(pde6_re, pde6, checking); + else + bf_set(pde6_re, pde6, 0); + bf_set(pde6_ai, pde6, 1); bf_set(pde6_ae, pde6, 0); bf_set(pde6_apptagval, pde6, 0); @@ -2213,6 +2297,10 @@ lpfc_bg_setup_bpl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc, pgdone = 0; subtotal = 0; /* total bytes processed for current prot grp */ while (!pgdone) { + /* Check to see if we ran out of space */ + if (num_bde >= phba->cfg_total_seg_cnt) + return num_bde + 1; + if (!sgde) { lpfc_printf_log(phba, KERN_ERR, LOG_BG, "9065 BLKGRD:%s Invalid data segment\n", @@ -2324,7 +2412,6 @@ lpfc_bg_setup_sgl(struct lpfc_hba *phba, struct scsi_cmnd *sc, struct sli4_sge_diseed *diseed = NULL; dma_addr_t physaddr; int i = 0, num_sge = 0, status; - int datadir = sc->sc_data_direction; uint32_t reftag; unsigned blksize; uint8_t txop, rxop; @@ -2362,13 +2449,26 @@ lpfc_bg_setup_sgl(struct lpfc_hba *phba, struct scsi_cmnd *sc, diseed->ref_tag = cpu_to_le32(reftag); diseed->ref_tag_tran = diseed->ref_tag; + /* + * We only need to check the data on READs, for WRITEs + * protection data is automatically generated, not checked. + */ + if (sc->sc_data_direction == DMA_FROM_DEVICE) { + if (scsi_prot_flagged(sc, SCSI_PROT_GUARD_CHECK)) + bf_set(lpfc_sli4_sge_dif_ce, diseed, checking); + else + bf_set(lpfc_sli4_sge_dif_ce, diseed, 0); + + if (scsi_prot_flagged(sc, SCSI_PROT_REF_CHECK)) + bf_set(lpfc_sli4_sge_dif_re, diseed, checking); + else + bf_set(lpfc_sli4_sge_dif_re, diseed, 0); + } + /* setup DISEED with the rest of the info */ bf_set(lpfc_sli4_sge_dif_optx, diseed, txop); bf_set(lpfc_sli4_sge_dif_oprx, diseed, rxop); - if (datadir == DMA_FROM_DEVICE) { - bf_set(lpfc_sli4_sge_dif_ce, diseed, checking); - bf_set(lpfc_sli4_sge_dif_re, diseed, checking); - } + bf_set(lpfc_sli4_sge_dif_ai, diseed, 1); bf_set(lpfc_sli4_sge_dif_me, diseed, 0); @@ -2497,6 +2597,10 @@ lpfc_bg_setup_sgl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc, split_offset = 0; do { + /* Check to see if we ran out of space */ + if (num_sge >= (phba->cfg_total_seg_cnt - 2)) + return num_sge + 3; + /* setup DISEED with what we have */ diseed = (struct sli4_sge_diseed *) sgl; memset(diseed, 0, sizeof(struct sli4_sge_diseed)); @@ -2506,11 +2610,34 @@ lpfc_bg_setup_sgl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc, diseed->ref_tag = cpu_to_le32(reftag); diseed->ref_tag_tran = diseed->ref_tag; + if (scsi_prot_flagged(sc, SCSI_PROT_GUARD_CHECK)) { + bf_set(lpfc_sli4_sge_dif_ce, diseed, checking); + + } else { + bf_set(lpfc_sli4_sge_dif_ce, diseed, 0); + /* + * When in this mode, the hardware will replace + * the guard tag from the host with a + * newly generated good CRC for the wire. + * Switch to raw mode here to avoid this + * behavior. What the host sends gets put on the wire. + */ + if (txop == BG_OP_IN_CRC_OUT_CRC) { + txop = BG_OP_RAW_MODE; + rxop = BG_OP_RAW_MODE; + } + } + + + if (scsi_prot_flagged(sc, SCSI_PROT_REF_CHECK)) + bf_set(lpfc_sli4_sge_dif_re, diseed, checking); + else + bf_set(lpfc_sli4_sge_dif_re, diseed, 0); + /* setup DISEED with the rest of the info */ bf_set(lpfc_sli4_sge_dif_optx, diseed, txop); bf_set(lpfc_sli4_sge_dif_oprx, diseed, rxop); - bf_set(lpfc_sli4_sge_dif_ce, diseed, checking); - bf_set(lpfc_sli4_sge_dif_re, diseed, checking); + bf_set(lpfc_sli4_sge_dif_ai, diseed, 1); bf_set(lpfc_sli4_sge_dif_me, diseed, 0); @@ -2556,6 +2683,10 @@ lpfc_bg_setup_sgl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc, pgdone = 0; subtotal = 0; /* total bytes processed for current prot grp */ while (!pgdone) { + /* Check to see if we ran out of space */ + if (num_sge >= phba->cfg_total_seg_cnt) + return num_sge + 1; + if (!sgde) { lpfc_printf_log(phba, KERN_ERR, LOG_BG, "9086 BLKGRD:%s Invalid data segment\n", @@ -2669,6 +2800,47 @@ lpfc_prot_group_type(struct lpfc_hba *phba, struct scsi_cmnd *sc) return ret; } +/** + * lpfc_bg_scsi_adjust_dl - Adjust SCSI data length for BlockGuard + * @phba: The Hba for which this call is being executed. + * @lpfc_cmd: The scsi buffer which is going to be adjusted. + * + * Adjust the data length to account for how much data + * is actually on the wire. + * + * returns the adjusted data length + **/ +static int +lpfc_bg_scsi_adjust_dl(struct lpfc_hba *phba, + struct lpfc_scsi_buf *lpfc_cmd) +{ + struct scsi_cmnd *sc = lpfc_cmd->pCmd; + int fcpdl; + + fcpdl = scsi_bufflen(sc); + + /* Check if there is protection data on the wire */ + if (sc->sc_data_direction == DMA_FROM_DEVICE) { + /* Read */ + if (scsi_get_prot_op(sc) == SCSI_PROT_READ_INSERT) + return fcpdl; + + } else { + /* Write */ + if (scsi_get_prot_op(sc) == SCSI_PROT_WRITE_STRIP) + return fcpdl; + } + + /* + * If we are in DIF Type 1 mode every data block has a 8 byte + * DIF (trailer) attached to it. Must ajust FCP data length. + */ + if (scsi_prot_flagged(sc, SCSI_PROT_TRANSFER_PI)) + fcpdl += (fcpdl / lpfc_cmd_blksize(sc)) * 8; + + return fcpdl; +} + /** * lpfc_bg_scsi_prep_dma_buf_s3 - DMA mapping for scsi buffer to SLI3 IF spec * @phba: The Hba for which this call is being executed. @@ -2689,8 +2861,7 @@ lpfc_bg_scsi_prep_dma_buf_s3(struct lpfc_hba *phba, uint32_t num_bde = 0; int datasegcnt, protsegcnt, datadir = scsi_cmnd->sc_data_direction; int prot_group_type = 0; - int diflen, fcpdl; - unsigned blksize; + int fcpdl; /* * Start the lpfc command prep by bumping the bpl beyond fcp_cmnd @@ -2711,28 +2882,28 @@ lpfc_bg_scsi_prep_dma_buf_s3(struct lpfc_hba *phba, return 1; lpfc_cmd->seg_cnt = datasegcnt; - if (lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt) { - lpfc_printf_log(phba, KERN_ERR, LOG_BG, - "9067 BLKGRD: %s: Too many sg segments" - " from dma_map_sg. Config %d, seg_cnt" - " %d\n", - __func__, phba->cfg_sg_seg_cnt, - lpfc_cmd->seg_cnt); - scsi_dma_unmap(scsi_cmnd); - return 1; - } + + /* First check if data segment count from SCSI Layer is good */ + if (lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt) + goto err; prot_group_type = lpfc_prot_group_type(phba, scsi_cmnd); switch (prot_group_type) { case LPFC_PG_TYPE_NO_DIF: + + /* Here we need to add a PDE5 and PDE6 to the count */ + if ((lpfc_cmd->seg_cnt + 2) > phba->cfg_total_seg_cnt) + goto err; + num_bde = lpfc_bg_setup_bpl(phba, scsi_cmnd, bpl, datasegcnt); /* we should have 2 or more entries in buffer list */ if (num_bde < 2) goto err; break; - case LPFC_PG_TYPE_DIF_BUF:{ + + case LPFC_PG_TYPE_DIF_BUF: /* * This type indicates that protection buffers are * passed to the driver, so that needs to be prepared @@ -2747,31 +2918,28 @@ lpfc_bg_scsi_prep_dma_buf_s3(struct lpfc_hba *phba, } lpfc_cmd->prot_seg_cnt = protsegcnt; - if (lpfc_cmd->prot_seg_cnt - > phba->cfg_prot_sg_seg_cnt) { - lpfc_printf_log(phba, KERN_ERR, LOG_BG, - "9068 BLKGRD: %s: Too many prot sg " - "segments from dma_map_sg. Config %d," - "prot_seg_cnt %d\n", __func__, - phba->cfg_prot_sg_seg_cnt, - lpfc_cmd->prot_seg_cnt); - dma_unmap_sg(&phba->pcidev->dev, - scsi_prot_sglist(scsi_cmnd), - scsi_prot_sg_count(scsi_cmnd), - datadir); - scsi_dma_unmap(scsi_cmnd); - return 1; - } + + /* + * There is a minimun of 4 BPLs used for every + * protection data segment. + */ + if ((lpfc_cmd->prot_seg_cnt * 4) > + (phba->cfg_total_seg_cnt - 2)) + goto err; num_bde = lpfc_bg_setup_bpl_prot(phba, scsi_cmnd, bpl, datasegcnt, protsegcnt); /* we should have 3 or more entries in buffer list */ - if (num_bde < 3) + if ((num_bde < 3) || + (num_bde > phba->cfg_total_seg_cnt)) goto err; break; - } + case LPFC_PG_TYPE_INVALID: default: + scsi_dma_unmap(scsi_cmnd); + lpfc_cmd->seg_cnt = 0; + lpfc_printf_log(phba, KERN_ERR, LOG_FCP, "9022 Unexpected protection group %i\n", prot_group_type); @@ -2790,18 +2958,7 @@ lpfc_bg_scsi_prep_dma_buf_s3(struct lpfc_hba *phba, iocb_cmd->ulpBdeCount = 1; iocb_cmd->ulpLe = 1; - fcpdl = scsi_bufflen(scsi_cmnd); - - if (scsi_get_prot_type(scsi_cmnd) == SCSI_PROT_DIF_TYPE1) { - /* - * We are in DIF Type 1 mode - * Every data block has a 8 byte DIF (trailer) - * attached to it. Must ajust FCP data length - */ - blksize = lpfc_cmd_blksize(scsi_cmnd); - diflen = (fcpdl / blksize) * 8; - fcpdl += diflen; - } + fcpdl = lpfc_bg_scsi_adjust_dl(phba, lpfc_cmd); fcp_cmnd->fcpDl = be32_to_cpu(fcpdl); /* @@ -2812,13 +2969,233 @@ lpfc_bg_scsi_prep_dma_buf_s3(struct lpfc_hba *phba, return 0; err: + if (lpfc_cmd->seg_cnt) + scsi_dma_unmap(scsi_cmnd); + if (lpfc_cmd->prot_seg_cnt) + dma_unmap_sg(&phba->pcidev->dev, scsi_prot_sglist(scsi_cmnd), + scsi_prot_sg_count(scsi_cmnd), + scsi_cmnd->sc_data_direction); + lpfc_printf_log(phba, KERN_ERR, LOG_FCP, - "9023 Could not setup all needed BDE's" - "prot_group_type=%d, num_bde=%d\n", + "9023 Cannot setup S/G List for HBA" + "IO segs %d/%d BPL %d SCSI %d: %d %d\n", + lpfc_cmd->seg_cnt, lpfc_cmd->prot_seg_cnt, + phba->cfg_total_seg_cnt, phba->cfg_sg_seg_cnt, prot_group_type, num_bde); + + lpfc_cmd->seg_cnt = 0; + lpfc_cmd->prot_seg_cnt = 0; return 1; } +/* + * This function calcuates the T10 DIF guard tag + * on the specified data using a CRC algorithmn + * using crc_t10dif. + */ +uint16_t +lpfc_bg_crc(uint8_t *data, int count) +{ + uint16_t crc = 0; + uint16_t x; + + crc = crc_t10dif(data, count); + x = cpu_to_be16(crc); + return x; +} + +/* + * This function calcuates the T10 DIF guard tag + * on the specified data using a CSUM algorithmn + * using ip_compute_csum. + */ +uint16_t +lpfc_bg_csum(uint8_t *data, int count) +{ + uint16_t ret; + + ret = ip_compute_csum(data, count); + return ret; +} + +/* + * This function examines the protection data to try to determine + * what type of T10-DIF error occurred. + */ +void +lpfc_calc_bg_err(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd) +{ + struct scatterlist *sgpe; /* s/g prot entry */ + struct scatterlist *sgde; /* s/g data entry */ + struct scsi_cmnd *cmd = lpfc_cmd->pCmd; + struct scsi_dif_tuple *src = NULL; + uint8_t *data_src = NULL; + uint16_t guard_tag, guard_type; + uint16_t start_app_tag, app_tag; + uint32_t start_ref_tag, ref_tag; + int prot, protsegcnt; + int err_type, len, data_len; + int chk_ref, chk_app, chk_guard; + uint16_t sum; + unsigned blksize; + + err_type = BGS_GUARD_ERR_MASK; + sum = 0; + guard_tag = 0; + + /* First check to see if there is protection data to examine */ + prot = scsi_get_prot_op(cmd); + if ((prot == SCSI_PROT_READ_STRIP) || + (prot == SCSI_PROT_WRITE_INSERT) || + (prot == SCSI_PROT_NORMAL)) + goto out; + + /* Currently the driver just supports ref_tag and guard_tag checking */ + chk_ref = 1; + chk_app = 0; + chk_guard = 0; + + /* Setup a ptr to the protection data provided by the SCSI host */ + sgpe = scsi_prot_sglist(cmd); + protsegcnt = lpfc_cmd->prot_seg_cnt; + + if (sgpe && protsegcnt) { + + /* + * We will only try to verify guard tag if the segment + * data length is a multiple of the blksize. + */ + sgde = scsi_sglist(cmd); + blksize = lpfc_cmd_blksize(cmd); + data_src = (uint8_t *)sg_virt(sgde); + data_len = sgde->length; + if ((data_len & (blksize - 1)) == 0) + chk_guard = 1; + guard_type = scsi_host_get_guard(cmd->device->host); + + start_ref_tag = (uint32_t)scsi_get_lba(cmd); /* Truncate LBA */ + start_app_tag = src->app_tag; + src = (struct scsi_dif_tuple *)sg_virt(sgpe); + len = sgpe->length; + while (src && protsegcnt) { + while (len) { + + /* + * First check to see if a protection data + * check is valid + */ + if ((src->ref_tag == 0xffffffff) || + (src->app_tag == 0xffff)) { + start_ref_tag++; + goto skipit; + } + + /* App Tag checking */ + app_tag = src->app_tag; + if (chk_app && (app_tag != start_app_tag)) { + err_type = BGS_APPTAG_ERR_MASK; + goto out; + } + + /* Reference Tag checking */ + ref_tag = be32_to_cpu(src->ref_tag); + if (chk_ref && (ref_tag != start_ref_tag)) { + err_type = BGS_REFTAG_ERR_MASK; + goto out; + } + start_ref_tag++; + + /* Guard Tag checking */ + if (chk_guard) { + guard_tag = src->guard_tag; + if (guard_type == SHOST_DIX_GUARD_IP) + sum = lpfc_bg_csum(data_src, + blksize); + else + sum = lpfc_bg_crc(data_src, + blksize); + if ((guard_tag != sum)) { + err_type = BGS_GUARD_ERR_MASK; + goto out; + } + } +skipit: + len -= sizeof(struct scsi_dif_tuple); + if (len < 0) + len = 0; + src++; + + data_src += blksize; + data_len -= blksize; + + /* + * Are we at the end of the Data segment? + * The data segment is only used for Guard + * tag checking. + */ + if (chk_guard && (data_len == 0)) { + chk_guard = 0; + sgde = sg_next(sgde); + if (!sgde) + goto out; + + data_src = (uint8_t *)sg_virt(sgde); + data_len = sgde->length; + if ((data_len & (blksize - 1)) == 0) + chk_guard = 1; + } + } + + /* Goto the next Protection data segment */ + sgpe = sg_next(sgpe); + if (sgpe) { + src = (struct scsi_dif_tuple *)sg_virt(sgpe); + len = sgpe->length; + } else { + src = NULL; + } + protsegcnt--; + } + } +out: + if (err_type == BGS_GUARD_ERR_MASK) { + scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST, + 0x10, 0x1); + cmd->result = DRIVER_SENSE << 24 + | ScsiResult(DID_ABORT, SAM_STAT_CHECK_CONDITION); + phba->bg_guard_err_cnt++; + lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG, + "9069 BLKGRD: LBA %lx grd_tag error %x != %x\n", + (unsigned long)scsi_get_lba(cmd), + sum, guard_tag); + + } else if (err_type == BGS_REFTAG_ERR_MASK) { + scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST, + 0x10, 0x3); + cmd->result = DRIVER_SENSE << 24 + | ScsiResult(DID_ABORT, SAM_STAT_CHECK_CONDITION); + + phba->bg_reftag_err_cnt++; + lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG, + "9066 BLKGRD: LBA %lx ref_tag error %x != %x\n", + (unsigned long)scsi_get_lba(cmd), + ref_tag, start_ref_tag); + + } else if (err_type == BGS_APPTAG_ERR_MASK) { + scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST, + 0x10, 0x2); + cmd->result = DRIVER_SENSE << 24 + | ScsiResult(DID_ABORT, SAM_STAT_CHECK_CONDITION); + + phba->bg_apptag_err_cnt++; + lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG, + "9041 BLKGRD: LBA %lx app_tag error %x != %x\n", + (unsigned long)scsi_get_lba(cmd), + app_tag, start_app_tag); + } +} + + /* * This function checks for BlockGuard errors detected by * the HBA. In case of errors, the ASC/ASCQ fields in the @@ -2842,12 +3219,6 @@ lpfc_parse_bg_err(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd, uint32_t bgstat = bgf->bgstat; uint64_t failing_sector = 0; - lpfc_printf_log(phba, KERN_ERR, LOG_BG, "9069 BLKGRD: BG ERROR in cmd" - " 0x%x lba 0x%llx blk cnt 0x%x " - "bgstat=0x%x bghm=0x%x\n", - cmd->cmnd[0], (unsigned long long)scsi_get_lba(cmd), - blk_rq_sectors(cmd->request), bgstat, bghm); - spin_lock(&_dump_buf_lock); if (!_dump_buf_done) { lpfc_printf_log(phba, KERN_ERR, LOG_BG, "9070 BLKGRD: Saving" @@ -2870,18 +3241,24 @@ lpfc_parse_bg_err(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd, if (lpfc_bgs_get_invalid_prof(bgstat)) { cmd->result = ScsiResult(DID_ERROR, 0); - lpfc_printf_log(phba, KERN_ERR, LOG_BG, "9072 BLKGRD: Invalid" - " BlockGuard profile. bgstat:0x%x\n", - bgstat); + lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG, + "9072 BLKGRD: Invalid BG Profile in cmd" + " 0x%x lba 0x%llx blk cnt 0x%x " + "bgstat=x%x bghm=x%x\n", cmd->cmnd[0], + (unsigned long long)scsi_get_lba(cmd), + blk_rq_sectors(cmd->request), bgstat, bghm); ret = (-1); goto out; } if (lpfc_bgs_get_uninit_dif_block(bgstat)) { cmd->result = ScsiResult(DID_ERROR, 0); - lpfc_printf_log(phba, KERN_ERR, LOG_BG, "9073 BLKGRD: " - "Invalid BlockGuard DIF Block. bgstat:0x%x\n", - bgstat); + lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG, + "9073 BLKGRD: Invalid BG PDIF Block in cmd" + " 0x%x lba 0x%llx blk cnt 0x%x " + "bgstat=x%x bghm=x%x\n", cmd->cmnd[0], + (unsigned long long)scsi_get_lba(cmd), + blk_rq_sectors(cmd->request), bgstat, bghm); ret = (-1); goto out; } @@ -2894,8 +3271,12 @@ lpfc_parse_bg_err(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd, cmd->result = DRIVER_SENSE << 24 | ScsiResult(DID_ABORT, SAM_STAT_CHECK_CONDITION); phba->bg_guard_err_cnt++; - lpfc_printf_log(phba, KERN_ERR, LOG_BG, - "9055 BLKGRD: guard_tag error\n"); + lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG, + "9055 BLKGRD: Guard Tag error in cmd" + " 0x%x lba 0x%llx blk cnt 0x%x " + "bgstat=x%x bghm=x%x\n", cmd->cmnd[0], + (unsigned long long)scsi_get_lba(cmd), + blk_rq_sectors(cmd->request), bgstat, bghm); } if (lpfc_bgs_get_reftag_err(bgstat)) { @@ -2907,8 +3288,12 @@ lpfc_parse_bg_err(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd, | ScsiResult(DID_ABORT, SAM_STAT_CHECK_CONDITION); phba->bg_reftag_err_cnt++; - lpfc_printf_log(phba, KERN_ERR, LOG_BG, - "9056 BLKGRD: ref_tag error\n"); + lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG, + "9056 BLKGRD: Ref Tag error in cmd" + " 0x%x lba 0x%llx blk cnt 0x%x " + "bgstat=x%x bghm=x%x\n", cmd->cmnd[0], + (unsigned long long)scsi_get_lba(cmd), + blk_rq_sectors(cmd->request), bgstat, bghm); } if (lpfc_bgs_get_apptag_err(bgstat)) { @@ -2920,8 +3305,12 @@ lpfc_parse_bg_err(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd, | ScsiResult(DID_ABORT, SAM_STAT_CHECK_CONDITION); phba->bg_apptag_err_cnt++; - lpfc_printf_log(phba, KERN_ERR, LOG_BG, - "9061 BLKGRD: app_tag error\n"); + lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG, + "9061 BLKGRD: App Tag error in cmd" + " 0x%x lba 0x%llx blk cnt 0x%x " + "bgstat=x%x bghm=x%x\n", cmd->cmnd[0], + (unsigned long long)scsi_get_lba(cmd), + blk_rq_sectors(cmd->request), bgstat, bghm); } if (lpfc_bgs_get_hi_water_mark_present(bgstat)) { @@ -2960,11 +3349,16 @@ lpfc_parse_bg_err(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd, if (!ret) { /* No error was reported - problem in FW? */ - cmd->result = ScsiResult(DID_ERROR, 0); - lpfc_printf_log(phba, KERN_ERR, LOG_BG, - "9057 BLKGRD: Unknown error reported!\n"); + lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG, + "9057 BLKGRD: Unknown error in cmd" + " 0x%x lba 0x%llx blk cnt 0x%x " + "bgstat=x%x bghm=x%x\n", cmd->cmnd[0], + (unsigned long long)scsi_get_lba(cmd), + blk_rq_sectors(cmd->request), bgstat, bghm); + + /* Calcuate what type of error it was */ + lpfc_calc_bg_err(phba, lpfc_cmd); } - out: return ret; } @@ -3028,6 +3422,7 @@ lpfc_scsi_prep_dma_buf_s4(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd) "dma_map_sg. Config %d, seg_cnt %d\n", __func__, phba->cfg_sg_seg_cnt, lpfc_cmd->seg_cnt); + lpfc_cmd->seg_cnt = 0; scsi_dma_unmap(scsi_cmnd); return 1; } @@ -3093,45 +3488,6 @@ lpfc_scsi_prep_dma_buf_s4(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd) return 0; } -/** - * lpfc_bg_scsi_adjust_dl - Adjust SCSI data length for BlockGuard - * @phba: The Hba for which this call is being executed. - * @lpfc_cmd: The scsi buffer which is going to be adjusted. - * - * Adjust the data length to account for how much data - * is actually on the wire. - * - * returns the adjusted data length - **/ -static int -lpfc_bg_scsi_adjust_dl(struct lpfc_hba *phba, - struct lpfc_scsi_buf *lpfc_cmd) -{ - struct scsi_cmnd *sc = lpfc_cmd->pCmd; - int diflen, fcpdl; - unsigned blksize; - - fcpdl = scsi_bufflen(sc); - - /* Check if there is protection data on the wire */ - if (sc->sc_data_direction == DMA_FROM_DEVICE) { - /* Read */ - if (scsi_get_prot_op(sc) == SCSI_PROT_READ_INSERT) - return fcpdl; - - } else { - /* Write */ - if (scsi_get_prot_op(sc) == SCSI_PROT_WRITE_STRIP) - return fcpdl; - } - - /* If protection data on the wire, adjust the count accordingly */ - blksize = lpfc_cmd_blksize(sc); - diflen = (fcpdl / blksize) * 8; - fcpdl += diflen; - return fcpdl; -} - /** * lpfc_bg_scsi_prep_dma_buf_s4 - DMA mapping for scsi buffer to SLI4 IF spec * @phba: The Hba for which this call is being executed. @@ -3149,14 +3505,14 @@ lpfc_bg_scsi_prep_dma_buf_s4(struct lpfc_hba *phba, struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd; struct sli4_sge *sgl = (struct sli4_sge *)(lpfc_cmd->fcp_bpl); IOCB_t *iocb_cmd = &lpfc_cmd->cur_iocbq.iocb; - uint32_t num_bde = 0; + uint32_t num_sge = 0; int datasegcnt, protsegcnt, datadir = scsi_cmnd->sc_data_direction; int prot_group_type = 0; int fcpdl; /* * Start the lpfc command prep by bumping the sgl beyond fcp_cmnd - * fcp_rsp regions to the first data bde entry + * fcp_rsp regions to the first data sge entry */ if (scsi_sg_count(scsi_cmnd)) { /* @@ -3179,28 +3535,28 @@ lpfc_bg_scsi_prep_dma_buf_s4(struct lpfc_hba *phba, sgl += 1; lpfc_cmd->seg_cnt = datasegcnt; - if (lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt) { - lpfc_printf_log(phba, KERN_ERR, LOG_BG, - "9087 BLKGRD: %s: Too many sg segments" - " from dma_map_sg. Config %d, seg_cnt" - " %d\n", - __func__, phba->cfg_sg_seg_cnt, - lpfc_cmd->seg_cnt); - scsi_dma_unmap(scsi_cmnd); - return 1; - } + + /* First check if data segment count from SCSI Layer is good */ + if (lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt) + goto err; prot_group_type = lpfc_prot_group_type(phba, scsi_cmnd); switch (prot_group_type) { case LPFC_PG_TYPE_NO_DIF: - num_bde = lpfc_bg_setup_sgl(phba, scsi_cmnd, sgl, + /* Here we need to add a DISEED to the count */ + if ((lpfc_cmd->seg_cnt + 1) > phba->cfg_total_seg_cnt) + goto err; + + num_sge = lpfc_bg_setup_sgl(phba, scsi_cmnd, sgl, datasegcnt); + /* we should have 2 or more entries in buffer list */ - if (num_bde < 2) + if (num_sge < 2) goto err; break; - case LPFC_PG_TYPE_DIF_BUF:{ + + case LPFC_PG_TYPE_DIF_BUF: /* * This type indicates that protection buffers are * passed to the driver, so that needs to be prepared @@ -3215,31 +3571,28 @@ lpfc_bg_scsi_prep_dma_buf_s4(struct lpfc_hba *phba, } lpfc_cmd->prot_seg_cnt = protsegcnt; - if (lpfc_cmd->prot_seg_cnt - > phba->cfg_prot_sg_seg_cnt) { - lpfc_printf_log(phba, KERN_ERR, LOG_BG, - "9088 BLKGRD: %s: Too many prot sg " - "segments from dma_map_sg. Config %d," - "prot_seg_cnt %d\n", __func__, - phba->cfg_prot_sg_seg_cnt, - lpfc_cmd->prot_seg_cnt); - dma_unmap_sg(&phba->pcidev->dev, - scsi_prot_sglist(scsi_cmnd), - scsi_prot_sg_count(scsi_cmnd), - datadir); - scsi_dma_unmap(scsi_cmnd); - return 1; - } + /* + * There is a minimun of 3 SGEs used for every + * protection data segment. + */ + if ((lpfc_cmd->prot_seg_cnt * 3) > + (phba->cfg_total_seg_cnt - 2)) + goto err; - num_bde = lpfc_bg_setup_sgl_prot(phba, scsi_cmnd, sgl, + num_sge = lpfc_bg_setup_sgl_prot(phba, scsi_cmnd, sgl, datasegcnt, protsegcnt); + /* we should have 3 or more entries in buffer list */ - if (num_bde < 3) + if ((num_sge < 3) || + (num_sge > phba->cfg_total_seg_cnt)) goto err; break; - } + case LPFC_PG_TYPE_INVALID: default: + scsi_dma_unmap(scsi_cmnd); + lpfc_cmd->seg_cnt = 0; + lpfc_printf_log(phba, KERN_ERR, LOG_FCP, "9083 Unexpected protection group %i\n", prot_group_type); @@ -3263,7 +3616,6 @@ lpfc_bg_scsi_prep_dma_buf_s4(struct lpfc_hba *phba, } fcpdl = lpfc_bg_scsi_adjust_dl(phba, lpfc_cmd); - fcp_cmnd->fcpDl = be32_to_cpu(fcpdl); /* @@ -3274,10 +3626,22 @@ lpfc_bg_scsi_prep_dma_buf_s4(struct lpfc_hba *phba, return 0; err: + if (lpfc_cmd->seg_cnt) + scsi_dma_unmap(scsi_cmnd); + if (lpfc_cmd->prot_seg_cnt) + dma_unmap_sg(&phba->pcidev->dev, scsi_prot_sglist(scsi_cmnd), + scsi_prot_sg_count(scsi_cmnd), + scsi_cmnd->sc_data_direction); + lpfc_printf_log(phba, KERN_ERR, LOG_FCP, - "9084 Could not setup all needed BDE's" - "prot_group_type=%d, num_bde=%d\n", - prot_group_type, num_bde); + "9084 Cannot setup S/G List for HBA" + "IO segs %d/%d SGL %d SCSI %d: %d %d\n", + lpfc_cmd->seg_cnt, lpfc_cmd->prot_seg_cnt, + phba->cfg_total_seg_cnt, phba->cfg_sg_seg_cnt, + prot_group_type, num_sge); + + lpfc_cmd->seg_cnt = 0; + lpfc_cmd->prot_seg_cnt = 0; return 1; } @@ -4357,7 +4721,8 @@ lpfc_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *cmnd) if (scsi_get_prot_op(cmnd) != SCSI_PROT_NORMAL) { if (vport->phba->cfg_enable_bg) { - lpfc_printf_vlog(vport, KERN_INFO, LOG_BG, + lpfc_printf_vlog(vport, + KERN_INFO, LOG_SCSI_CMD, "9033 BLKGRD: rcvd %s cmd:x%x " "sector x%llx cnt %u pt %x\n", dif_op_str[scsi_get_prot_op(cmnd)], @@ -4369,7 +4734,8 @@ lpfc_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *cmnd) err = lpfc_bg_scsi_prep_dma_buf(phba, lpfc_cmd); } else { if (vport->phba->cfg_enable_bg) { - lpfc_printf_vlog(vport, KERN_INFO, LOG_BG, + lpfc_printf_vlog(vport, + KERN_INFO, LOG_SCSI_CMD, "9038 BLKGRD: rcvd PROT_NORMAL cmd: " "x%x sector x%llx cnt %u pt %x\n", cmnd->cmnd[0], @@ -4542,7 +4908,7 @@ lpfc_abort_handler(struct scsi_cmnd *cmnd) /* Wait for abort to complete */ wait_event_timeout(waitq, (lpfc_cmd->pCmd != cmnd), - (2*vport->cfg_devloss_tmo*HZ)); + msecs_to_jiffies(2*vport->cfg_devloss_tmo*1000)); lpfc_cmd->waitq = NULL; if (lpfc_cmd->pCmd == cmnd) { @@ -5012,16 +5378,24 @@ lpfc_host_reset_handler(struct scsi_cmnd *cmnd) struct lpfc_hba *phba = vport->phba; int rc, ret = SUCCESS; + lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP, + "3172 SCSI layer issued Host Reset Data:\n"); + lpfc_offline_prep(phba, LPFC_MBX_WAIT); lpfc_offline(phba); rc = lpfc_sli_brdrestart(phba); if (rc) ret = FAILED; - lpfc_online(phba); + rc = lpfc_online(phba); + if (rc) + ret = FAILED; lpfc_unblock_mgmt_io(phba); - lpfc_printf_log(phba, KERN_ERR, LOG_FCP, - "3172 SCSI layer issued Host Reset Data: x%x\n", ret); + if (ret == FAILED) { + lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP, + "3323 Failed host reset, bring it offline\n"); + lpfc_sli4_offline_eratt(phba); + } return ret; } @@ -5088,11 +5462,11 @@ lpfc_slave_alloc(struct scsi_device *sdev) } num_allocated = lpfc_new_scsi_buf(vport, num_to_alloc); if (num_to_alloc != num_allocated) { - lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP, - "0708 Allocation request of %d " - "command buffers did not succeed. " - "Allocated %d buffers.\n", - num_to_alloc, num_allocated); + lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP, + "0708 Allocation request of %d " + "command buffers did not succeed. " + "Allocated %d buffers.\n", + num_to_alloc, num_allocated); } if (num_allocated > 0) phba->total_scsi_bufs += num_allocated; diff --git a/trunk/drivers/scsi/lpfc/lpfc_sli.c b/trunk/drivers/scsi/lpfc/lpfc_sli.c index 35dd17eb0f27..572579f87de4 100644 --- a/trunk/drivers/scsi/lpfc/lpfc_sli.c +++ b/trunk/drivers/scsi/lpfc/lpfc_sli.c @@ -667,7 +667,7 @@ lpfc_handle_rrq_active(struct lpfc_hba *phba) spin_lock_irqsave(&phba->hbalock, iflags); phba->hba_flag &= ~HBA_RRQ_ACTIVE; - next_time = jiffies + HZ * (phba->fc_ratov + 1); + next_time = jiffies + msecs_to_jiffies(1000 * (phba->fc_ratov + 1)); list_for_each_entry_safe(rrq, nextrrq, &phba->active_rrq_list, list) { if (time_after(jiffies, rrq->rrq_stop_time)) @@ -782,7 +782,7 @@ lpfc_cleanup_wt_rrqs(struct lpfc_hba *phba) return; spin_lock_irqsave(&phba->hbalock, iflags); phba->hba_flag &= ~HBA_RRQ_ACTIVE; - next_time = jiffies + HZ * (phba->fc_ratov * 2); + next_time = jiffies + msecs_to_jiffies(1000 * (phba->fc_ratov * 2)); list_splice_init(&phba->active_rrq_list, &rrq_list); spin_unlock_irqrestore(&phba->hbalock, iflags); @@ -878,7 +878,8 @@ lpfc_set_rrq_active(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp, else rrq->send_rrq = 0; rrq->xritag = xritag; - rrq->rrq_stop_time = jiffies + HZ * (phba->fc_ratov + 1); + rrq->rrq_stop_time = jiffies + + msecs_to_jiffies(1000 * (phba->fc_ratov + 1)); rrq->ndlp = ndlp; rrq->nlp_DID = ndlp->nlp_DID; rrq->vport = ndlp->vport; @@ -926,8 +927,7 @@ __lpfc_sli_get_sglq(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq) } else if ((piocbq->iocb.ulpCommand == CMD_GEN_REQUEST64_CR) && !(piocbq->iocb_flag & LPFC_IO_LIBDFC)) ndlp = piocbq->context_un.ndlp; - else if ((piocbq->iocb.ulpCommand == CMD_ELS_REQUEST64_CR) && - (piocbq->iocb_flag & LPFC_IO_LIBDFC)) + else if (piocbq->iocb_flag & LPFC_IO_LIBDFC) ndlp = piocbq->context_un.ndlp; else ndlp = piocbq->context1; @@ -1339,7 +1339,8 @@ lpfc_sli_ringtxcmpl_put(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, BUG(); else mod_timer(&piocb->vport->els_tmofunc, - jiffies + HZ * (phba->fc_ratov << 1)); + jiffies + + msecs_to_jiffies(1000 * (phba->fc_ratov << 1))); } @@ -2340,7 +2341,8 @@ lpfc_sli_handle_mb_event(struct lpfc_hba *phba) /* Mailbox cmd Cmpl */ lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI, "(%d):0307 Mailbox cmd x%x (x%x/x%x) Cmpl x%p " - "Data: x%x x%x x%x x%x x%x x%x x%x x%x x%x\n", + "Data: x%x x%x x%x x%x x%x x%x x%x x%x x%x " + "x%x x%x x%x\n", pmb->vport ? pmb->vport->vpi : 0, pmbox->mbxCommand, lpfc_sli_config_mbox_subsys_get(phba, pmb), @@ -2354,7 +2356,10 @@ lpfc_sli_handle_mb_event(struct lpfc_hba *phba) pmbox->un.varWords[4], pmbox->un.varWords[5], pmbox->un.varWords[6], - pmbox->un.varWords[7]); + pmbox->un.varWords[7], + pmbox->un.varWords[8], + pmbox->un.varWords[9], + pmbox->un.varWords[10]); if (pmb->mbox_cmpl) pmb->mbox_cmpl(phba,pmb); @@ -2908,8 +2913,9 @@ void lpfc_poll_eratt(unsigned long ptr) lpfc_worker_wake_up(phba); else /* Restart the timer for next eratt poll */ - mod_timer(&phba->eratt_poll, jiffies + - HZ * LPFC_ERATT_POLL_INTERVAL); + mod_timer(&phba->eratt_poll, + jiffies + + msecs_to_jiffies(1000 * LPFC_ERATT_POLL_INTERVAL)); return; } @@ -5511,6 +5517,7 @@ lpfc_sli4_dealloc_extent(struct lpfc_hba *phba, uint16_t type) list_del_init(&rsrc_blk->list); kfree(rsrc_blk); } + phba->sli4_hba.max_cfg_param.vpi_used = 0; break; case LPFC_RSC_TYPE_FCOE_XRI: kfree(phba->sli4_hba.xri_bmask); @@ -5811,6 +5818,7 @@ lpfc_sli4_dealloc_resource_identifiers(struct lpfc_hba *phba) lpfc_sli4_dealloc_extent(phba, LPFC_RSC_TYPE_FCOE_VFI); } else { kfree(phba->vpi_bmask); + phba->sli4_hba.max_cfg_param.vpi_used = 0; kfree(phba->vpi_ids); bf_set(lpfc_vpi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0); kfree(phba->sli4_hba.xri_bmask); @@ -5992,7 +6000,7 @@ lpfc_sli4_repost_els_sgl_list(struct lpfc_hba *phba) struct lpfc_sglq *sglq_entry = NULL; struct lpfc_sglq *sglq_entry_next = NULL; struct lpfc_sglq *sglq_entry_first = NULL; - int status, post_cnt = 0, num_posted = 0, block_cnt = 0; + int status, total_cnt, post_cnt = 0, num_posted = 0, block_cnt = 0; int last_xritag = NO_XRI; LIST_HEAD(prep_sgl_list); LIST_HEAD(blck_sgl_list); @@ -6004,6 +6012,7 @@ lpfc_sli4_repost_els_sgl_list(struct lpfc_hba *phba) list_splice_init(&phba->sli4_hba.lpfc_sgl_list, &allc_sgl_list); spin_unlock_irq(&phba->hbalock); + total_cnt = phba->sli4_hba.els_xri_cnt; list_for_each_entry_safe(sglq_entry, sglq_entry_next, &allc_sgl_list, list) { list_del_init(&sglq_entry->list); @@ -6055,9 +6064,7 @@ lpfc_sli4_repost_els_sgl_list(struct lpfc_hba *phba) sglq_entry->sli4_xritag); list_add_tail(&sglq_entry->list, &free_sgl_list); - spin_lock_irq(&phba->hbalock); - phba->sli4_hba.els_xri_cnt--; - spin_unlock_irq(&phba->hbalock); + total_cnt--; } } } @@ -6085,9 +6092,7 @@ lpfc_sli4_repost_els_sgl_list(struct lpfc_hba *phba) (sglq_entry_first->sli4_xritag + post_cnt - 1)); list_splice_init(&blck_sgl_list, &free_sgl_list); - spin_lock_irq(&phba->hbalock); - phba->sli4_hba.els_xri_cnt -= post_cnt; - spin_unlock_irq(&phba->hbalock); + total_cnt -= post_cnt; } /* don't reset xirtag due to hole in xri block */ @@ -6097,6 +6102,8 @@ lpfc_sli4_repost_els_sgl_list(struct lpfc_hba *phba) /* reset els sgl post count for next round of posting */ post_cnt = 0; } + /* update the number of XRIs posted for ELS */ + phba->sli4_hba.els_xri_cnt = total_cnt; /* free the els sgls failed to post */ lpfc_free_sgl_list(phba, &free_sgl_list); @@ -6446,16 +6453,17 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba) /* Start the ELS watchdog timer */ mod_timer(&vport->els_tmofunc, - jiffies + HZ * (phba->fc_ratov * 2)); + jiffies + msecs_to_jiffies(1000 * (phba->fc_ratov * 2))); /* Start heart beat timer */ mod_timer(&phba->hb_tmofunc, - jiffies + HZ * LPFC_HB_MBOX_INTERVAL); + jiffies + msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL)); phba->hb_outstanding = 0; phba->last_completion_time = jiffies; /* Start error attention (ERATT) polling timer */ - mod_timer(&phba->eratt_poll, jiffies + HZ * LPFC_ERATT_POLL_INTERVAL); + mod_timer(&phba->eratt_poll, + jiffies + msecs_to_jiffies(1000 * LPFC_ERATT_POLL_INTERVAL)); /* Enable PCIe device Advanced Error Reporting (AER) if configured */ if (phba->cfg_aer_support == 1 && !(phba->hba_flag & HBA_AER_ENABLED)) { @@ -6822,8 +6830,9 @@ lpfc_sli_issue_mbox_s3(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, goto out_not_finished; } /* timeout active mbox command */ - mod_timer(&psli->mbox_tmo, (jiffies + - (HZ * lpfc_mbox_tmo_val(phba, pmbox)))); + timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, pmbox) * + 1000); + mod_timer(&psli->mbox_tmo, jiffies + timeout); } /* Mailbox cmd issue */ @@ -7496,7 +7505,7 @@ lpfc_sli4_post_async_mbox(struct lpfc_hba *phba) /* Start timer for the mbox_tmo and log some mailbox post messages */ mod_timer(&psli->mbox_tmo, (jiffies + - (HZ * lpfc_mbox_tmo_val(phba, mboxq)))); + msecs_to_jiffies(1000 * lpfc_mbox_tmo_val(phba, mboxq)))); lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI, "(%d):0355 Mailbox cmd x%x (x%x/x%x) issue Data: " @@ -7914,15 +7923,21 @@ lpfc_sli4_bpl2sgl(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq, static inline uint32_t lpfc_sli4_scmd_to_wqidx_distr(struct lpfc_hba *phba) { - int i; - - if (phba->cfg_fcp_io_sched == LPFC_FCP_SCHED_BY_CPU) - i = smp_processor_id(); - else - i = atomic_add_return(1, &phba->fcp_qidx); + struct lpfc_vector_map_info *cpup; + int chann, cpu; - i = (i % phba->cfg_fcp_io_channel); - return i; + if (phba->cfg_fcp_io_sched == LPFC_FCP_SCHED_BY_CPU) { + cpu = smp_processor_id(); + if (cpu < phba->sli4_hba.num_present_cpu) { + cpup = phba->sli4_hba.cpu_map; + cpup += cpu; + return cpup->channel_id; + } + chann = cpu; + } + chann = atomic_add_return(1, &phba->fcp_qidx); + chann = (chann % phba->cfg_fcp_io_channel); + return chann; } /** @@ -8444,10 +8459,14 @@ __lpfc_sli_issue_iocb_s4(struct lpfc_hba *phba, uint32_t ring_number, if ((piocb->iocb_flag & LPFC_IO_FCP) || (piocb->iocb_flag & LPFC_USE_FCPWQIDX)) { + if (unlikely(!phba->sli4_hba.fcp_wq)) + return IOCB_ERROR; if (lpfc_sli4_wq_put(phba->sli4_hba.fcp_wq[piocb->fcp_wqidx], &wqe)) return IOCB_ERROR; } else { + if (unlikely(!phba->sli4_hba.els_wq)) + return IOCB_ERROR; if (lpfc_sli4_wq_put(phba->sli4_hba.els_wq, &wqe)) return IOCB_ERROR; } @@ -10003,7 +10022,7 @@ lpfc_sli_issue_iocb_wait(struct lpfc_hba *phba, retval = lpfc_sli_issue_iocb(phba, ring_number, piocb, SLI_IOCB_RET_IOCB); if (retval == IOCB_SUCCESS) { - timeout_req = timeout * HZ; + timeout_req = msecs_to_jiffies(timeout * 1000); timeleft = wait_event_timeout(done_q, lpfc_chk_iocb_flg(phba, piocb, LPFC_IO_WAKE), timeout_req); @@ -10108,7 +10127,7 @@ lpfc_sli_issue_mbox_wait(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq, if (retval == MBX_BUSY || retval == MBX_SUCCESS) { wait_event_interruptible_timeout(done_q, pmboxq->mbox_flag & LPFC_MBX_WAKE, - timeout * HZ); + msecs_to_jiffies(timeout * 1000)); spin_lock_irqsave(&phba->hbalock, flag); pmboxq->context1 = NULL; @@ -12899,8 +12918,9 @@ lpfc_wq_create(struct lpfc_hba *phba, struct lpfc_queue *wq, } wq->db_regaddr = bar_memmap_p + db_offset; lpfc_printf_log(phba, KERN_INFO, LOG_INIT, - "3264 WQ[%d]: barset:x%x, offset:x%x\n", - wq->queue_id, pci_barset, db_offset); + "3264 WQ[%d]: barset:x%x, offset:x%x, " + "format:x%x\n", wq->queue_id, pci_barset, + db_offset, wq->db_format); } else { wq->db_format = LPFC_DB_LIST_FORMAT; wq->db_regaddr = phba->sli4_hba.WQDBregaddr; @@ -13120,8 +13140,9 @@ lpfc_rq_create(struct lpfc_hba *phba, struct lpfc_queue *hrq, } hrq->db_regaddr = bar_memmap_p + db_offset; lpfc_printf_log(phba, KERN_INFO, LOG_INIT, - "3266 RQ[qid:%d]: barset:x%x, offset:x%x\n", - hrq->queue_id, pci_barset, db_offset); + "3266 RQ[qid:%d]: barset:x%x, offset:x%x, " + "format:x%x\n", hrq->queue_id, pci_barset, + db_offset, hrq->db_format); } else { hrq->db_format = LPFC_DB_RING_FORMAT; hrq->db_regaddr = phba->sli4_hba.RQDBregaddr; @@ -13971,13 +13992,14 @@ lpfc_fc_frame_check(struct lpfc_hba *phba, struct fc_frame_header *fc_hdr) } lpfc_printf_log(phba, KERN_INFO, LOG_ELS, - "2538 Received frame rctl:%s type:%s " - "Frame Data:%08x %08x %08x %08x %08x %08x\n", - rctl_names[fc_hdr->fh_r_ctl], - type_names[fc_hdr->fh_type], + "2538 Received frame rctl:%s (x%x), type:%s (x%x), " + "frame Data:%08x %08x %08x %08x %08x %08x %08x\n", + rctl_names[fc_hdr->fh_r_ctl], fc_hdr->fh_r_ctl, + type_names[fc_hdr->fh_type], fc_hdr->fh_type, be32_to_cpu(header[0]), be32_to_cpu(header[1]), be32_to_cpu(header[2]), be32_to_cpu(header[3]), - be32_to_cpu(header[4]), be32_to_cpu(header[5])); + be32_to_cpu(header[4]), be32_to_cpu(header[5]), + be32_to_cpu(header[6])); return 0; drop: lpfc_printf_log(phba, KERN_WARNING, LOG_ELS, diff --git a/trunk/drivers/scsi/lpfc/lpfc_sli4.h b/trunk/drivers/scsi/lpfc/lpfc_sli4.h index be02b59ea279..67af460184ba 100644 --- a/trunk/drivers/scsi/lpfc/lpfc_sli4.h +++ b/trunk/drivers/scsi/lpfc/lpfc_sli4.h @@ -346,11 +346,6 @@ struct lpfc_bmbx { #define SLI4_CT_VFI 2 #define SLI4_CT_FCFI 3 -#define LPFC_SLI4_FL1_MAX_SEGMENT_SIZE 0x10000 -#define LPFC_SLI4_FL1_MAX_BUF_SIZE 0X2000 -#define LPFC_SLI4_MIN_BUF_SIZE 0x400 -#define LPFC_SLI4_MAX_BUF_SIZE 0x20000 - /* * SLI4 specific data structures */ @@ -440,6 +435,17 @@ struct lpfc_sli4_lnk_info { #define LPFC_SLI4_HANDLER_NAME_SZ 16 +/* Used for IRQ vector to CPU mapping */ +struct lpfc_vector_map_info { + uint16_t phys_id; + uint16_t core_id; + uint16_t irq; + uint16_t channel_id; + struct cpumask maskbits; +}; +#define LPFC_VECTOR_MAP_EMPTY 0xffff +#define LPFC_MAX_CPU 256 + /* SLI4 HBA data structure entries */ struct lpfc_sli4_hba { void __iomem *conf_regs_memmap_p; /* Kernel memory mapped address for @@ -573,6 +579,11 @@ struct lpfc_sli4_hba { struct lpfc_iov iov; spinlock_t abts_scsi_buf_list_lock; /* list of aborted SCSI IOs */ spinlock_t abts_sgl_list_lock; /* list of aborted els IOs */ + + /* CPU to vector mapping information */ + struct lpfc_vector_map_info *cpu_map; + uint16_t num_online_cpu; + uint16_t num_present_cpu; }; enum lpfc_sge_type { diff --git a/trunk/drivers/scsi/lpfc/lpfc_version.h b/trunk/drivers/scsi/lpfc/lpfc_version.h index 664cd04f7cd8..a38dc3b16969 100644 --- a/trunk/drivers/scsi/lpfc/lpfc_version.h +++ b/trunk/drivers/scsi/lpfc/lpfc_version.h @@ -18,7 +18,7 @@ * included with this package. * *******************************************************************/ -#define LPFC_DRIVER_VERSION "8.3.38" +#define LPFC_DRIVER_VERSION "8.3.39" #define LPFC_DRIVER_NAME "lpfc" /* Used for SLI 2/3 */ diff --git a/trunk/drivers/scsi/lpfc/lpfc_vport.c b/trunk/drivers/scsi/lpfc/lpfc_vport.c index 0fe188e66000..e28e431564b0 100644 --- a/trunk/drivers/scsi/lpfc/lpfc_vport.c +++ b/trunk/drivers/scsi/lpfc/lpfc_vport.c @@ -80,7 +80,7 @@ inline void lpfc_vport_set_state(struct lpfc_vport *vport, } } -static int +int lpfc_alloc_vpi(struct lpfc_hba *phba) { unsigned long vpi; @@ -568,6 +568,7 @@ lpfc_vport_delete(struct fc_vport *fc_vport) struct lpfc_vport *vport = *(struct lpfc_vport **)fc_vport->dd_data; struct lpfc_hba *phba = vport->phba; long timeout; + bool ns_ndlp_referenced = false; if (vport->port_type == LPFC_PHYSICAL_PORT) { lpfc_printf_vlog(vport, KERN_ERR, LOG_VPORT, @@ -628,6 +629,18 @@ lpfc_vport_delete(struct fc_vport *fc_vport) lpfc_debugfs_terminate(vport); + /* + * The call to fc_remove_host might release the NameServer ndlp. Since + * we might need to use the ndlp to send the DA_ID CT command, + * increment the reference for the NameServer ndlp to prevent it from + * being released. + */ + ndlp = lpfc_findnode_did(vport, NameServer_DID); + if (ndlp && NLP_CHK_NODE_ACT(ndlp)) { + lpfc_nlp_get(ndlp); + ns_ndlp_referenced = true; + } + /* Remove FC host and then SCSI host with the vport */ fc_remove_host(lpfc_shost_from_vport(vport)); scsi_remove_host(lpfc_shost_from_vport(vport)); @@ -734,6 +747,16 @@ lpfc_vport_delete(struct fc_vport *fc_vport) lpfc_discovery_wait(vport); skip_logo: + + /* + * If the NameServer ndlp has been incremented to allow the DA_ID CT + * command to be sent, decrement the ndlp now. + */ + if (ns_ndlp_referenced) { + ndlp = lpfc_findnode_did(vport, NameServer_DID); + lpfc_nlp_put(ndlp); + } + lpfc_cleanup(vport); lpfc_sli_host_down(vport); diff --git a/trunk/drivers/scsi/lpfc/lpfc_vport.h b/trunk/drivers/scsi/lpfc/lpfc_vport.h index 90828340acea..6b2c94eb8134 100644 --- a/trunk/drivers/scsi/lpfc/lpfc_vport.h +++ b/trunk/drivers/scsi/lpfc/lpfc_vport.h @@ -90,6 +90,7 @@ int lpfc_vport_getinfo(struct Scsi_Host *, struct vport_info *); int lpfc_vport_tgt_remove(struct Scsi_Host *, uint, uint); struct lpfc_vport **lpfc_create_vport_work_array(struct lpfc_hba *); void lpfc_destroy_vport_work_array(struct lpfc_hba *, struct lpfc_vport **); +int lpfc_alloc_vpi(struct lpfc_hba *phba); /* * queuecommand VPORT-specific return codes. Specified in the host byte code. diff --git a/trunk/drivers/scsi/megaraid/megaraid_sas_base.c b/trunk/drivers/scsi/megaraid/megaraid_sas_base.c index 7c90d57b867e..3a9ddae86f1f 100644 --- a/trunk/drivers/scsi/megaraid/megaraid_sas_base.c +++ b/trunk/drivers/scsi/megaraid/megaraid_sas_base.c @@ -4931,11 +4931,12 @@ static int megasas_mgmt_ioctl_fw(struct file *file, unsigned long arg) printk(KERN_ERR "megaraid_sas: timed out while" "waiting for HBA to recover\n"); error = -ENODEV; - goto out_kfree_ioc; + goto out_up; } spin_unlock_irqrestore(&instance->hba_lock, flags); error = megasas_mgmt_fw_ioctl(instance, user_ioc, ioc); + out_up: up(&instance->ioctl_sem); out_kfree_ioc: diff --git a/trunk/drivers/scsi/mvsas/mv_init.c b/trunk/drivers/scsi/mvsas/mv_init.c index 74550922ad55..7b7381d7671f 100644 --- a/trunk/drivers/scsi/mvsas/mv_init.c +++ b/trunk/drivers/scsi/mvsas/mv_init.c @@ -254,7 +254,7 @@ static int mvs_alloc(struct mvs_info *mvi, struct Scsi_Host *shost) } for (i = 0; i < MVS_MAX_DEVICES; i++) { mvi->devices[i].taskfileset = MVS_ID_NOT_MAPPED; - mvi->devices[i].dev_type = NO_DEVICE; + mvi->devices[i].dev_type = SAS_PHY_UNUSED; mvi->devices[i].device_id = i; mvi->devices[i].dev_status = MVS_DEV_NORMAL; init_timer(&mvi->devices[i].timer); diff --git a/trunk/drivers/scsi/mvsas/mv_sas.c b/trunk/drivers/scsi/mvsas/mv_sas.c index 532110f4562a..c9e244984e30 100644 --- a/trunk/drivers/scsi/mvsas/mv_sas.c +++ b/trunk/drivers/scsi/mvsas/mv_sas.c @@ -706,7 +706,7 @@ static int mvs_task_prep_ssp(struct mvs_info *mvi, return 0; } -#define DEV_IS_GONE(mvi_dev) ((!mvi_dev || (mvi_dev->dev_type == NO_DEVICE))) +#define DEV_IS_GONE(mvi_dev) ((!mvi_dev || (mvi_dev->dev_type == SAS_PHY_UNUSED))) static int mvs_task_prep(struct sas_task *task, struct mvs_info *mvi, int is_tmf, struct mvs_tmf_task *tmf, int *pass) { @@ -726,7 +726,7 @@ static int mvs_task_prep(struct sas_task *task, struct mvs_info *mvi, int is_tmf * libsas will use dev->port, should * not call task_done for sata */ - if (dev->dev_type != SATA_DEV) + if (dev->dev_type != SAS_SATA_DEV) task->task_done(task); return rc; } @@ -1159,10 +1159,10 @@ void mvs_update_phyinfo(struct mvs_info *mvi, int i, int get_st) phy->identify.device_type = phy->att_dev_info & PORT_DEV_TYPE_MASK; - if (phy->identify.device_type == SAS_END_DEV) + if (phy->identify.device_type == SAS_END_DEVICE) phy->identify.target_port_protocols = SAS_PROTOCOL_SSP; - else if (phy->identify.device_type != NO_DEVICE) + else if (phy->identify.device_type != SAS_PHY_UNUSED) phy->identify.target_port_protocols = SAS_PROTOCOL_SMP; if (oob_done) @@ -1260,7 +1260,7 @@ struct mvs_device *mvs_alloc_dev(struct mvs_info *mvi) { u32 dev; for (dev = 0; dev < MVS_MAX_DEVICES; dev++) { - if (mvi->devices[dev].dev_type == NO_DEVICE) { + if (mvi->devices[dev].dev_type == SAS_PHY_UNUSED) { mvi->devices[dev].device_id = dev; return &mvi->devices[dev]; } @@ -1278,7 +1278,7 @@ void mvs_free_dev(struct mvs_device *mvi_dev) u32 id = mvi_dev->device_id; memset(mvi_dev, 0, sizeof(*mvi_dev)); mvi_dev->device_id = id; - mvi_dev->dev_type = NO_DEVICE; + mvi_dev->dev_type = SAS_PHY_UNUSED; mvi_dev->dev_status = MVS_DEV_NORMAL; mvi_dev->taskfileset = MVS_ID_NOT_MAPPED; } @@ -1480,7 +1480,7 @@ static int mvs_debug_I_T_nexus_reset(struct domain_device *dev) { int rc; struct sas_phy *phy = sas_get_local_phy(dev); - int reset_type = (dev->dev_type == SATA_DEV || + int reset_type = (dev->dev_type == SAS_SATA_DEV || (dev->tproto & SAS_PROTOCOL_STP)) ? 0 : 1; rc = sas_phy_reset(phy, reset_type); sas_put_local_phy(phy); @@ -1629,7 +1629,7 @@ int mvs_abort_task(struct sas_task *task) } else if (task->task_proto & SAS_PROTOCOL_SATA || task->task_proto & SAS_PROTOCOL_STP) { - if (SATA_DEV == dev->dev_type) { + if (SAS_SATA_DEV == dev->dev_type) { struct mvs_slot_info *slot = task->lldd_task; u32 slot_idx = (u32)(slot - mvi->slot_info); mv_dprintk("mvs_abort_task() mvi=%p task=%p " diff --git a/trunk/drivers/scsi/mvsas/mv_sas.h b/trunk/drivers/scsi/mvsas/mv_sas.h index 9f3cc13a5ce7..60e2fb7f2dca 100644 --- a/trunk/drivers/scsi/mvsas/mv_sas.h +++ b/trunk/drivers/scsi/mvsas/mv_sas.h @@ -67,7 +67,7 @@ extern const struct mvs_dispatch mvs_94xx_dispatch; extern struct kmem_cache *mvs_task_list_cache; #define DEV_IS_EXPANDER(type) \ - ((type == EDGE_DEV) || (type == FANOUT_DEV)) + ((type == SAS_EDGE_EXPANDER_DEVICE) || (type == SAS_FANOUT_EXPANDER_DEVICE)) #define bit(n) ((u64)1 << n) @@ -241,7 +241,7 @@ struct mvs_phy { struct mvs_device { struct list_head dev_entry; - enum sas_dev_type dev_type; + enum sas_device_type dev_type; struct mvs_info *mvi_info; struct domain_device *sas_device; struct timer_list timer; diff --git a/trunk/drivers/scsi/pm8001/Makefile b/trunk/drivers/scsi/pm8001/Makefile index 52f04296171c..ce4cd87c7c66 100644 --- a/trunk/drivers/scsi/pm8001/Makefile +++ b/trunk/drivers/scsi/pm8001/Makefile @@ -4,9 +4,10 @@ # Copyright (C) 2008-2009 USI Co., Ltd. -obj-$(CONFIG_SCSI_PM8001) += pm8001.o -pm8001-y += pm8001_init.o \ +obj-$(CONFIG_SCSI_PM8001) += pm80xx.o +pm80xx-y += pm8001_init.o \ pm8001_sas.o \ pm8001_ctl.o \ - pm8001_hwi.o + pm8001_hwi.o \ + pm80xx_hwi.o diff --git a/trunk/drivers/scsi/pm8001/pm8001_ctl.c b/trunk/drivers/scsi/pm8001/pm8001_ctl.c index 45bc197bc22f..d99f41c2ca13 100644 --- a/trunk/drivers/scsi/pm8001/pm8001_ctl.c +++ b/trunk/drivers/scsi/pm8001/pm8001_ctl.c @@ -1,5 +1,5 @@ /* - * PMC-Sierra SPC 8001 SAS/SATA based host adapters driver + * PMC-Sierra 8001/8081/8088/8089 SAS/SATA based host adapters driver * * Copyright (c) 2008-2009 USI Co., Ltd. * All rights reserved. @@ -58,8 +58,13 @@ static ssize_t pm8001_ctl_mpi_interface_rev_show(struct device *cdev, struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost); struct pm8001_hba_info *pm8001_ha = sha->lldd_ha; - return snprintf(buf, PAGE_SIZE, "%d\n", - pm8001_ha->main_cfg_tbl.interface_rev); + if (pm8001_ha->chip_id == chip_8001) { + return snprintf(buf, PAGE_SIZE, "%d\n", + pm8001_ha->main_cfg_tbl.pm8001_tbl.interface_rev); + } else { + return snprintf(buf, PAGE_SIZE, "%d\n", + pm8001_ha->main_cfg_tbl.pm80xx_tbl.interface_rev); + } } static DEVICE_ATTR(interface_rev, S_IRUGO, pm8001_ctl_mpi_interface_rev_show, NULL); @@ -78,11 +83,19 @@ static ssize_t pm8001_ctl_fw_version_show(struct device *cdev, struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost); struct pm8001_hba_info *pm8001_ha = sha->lldd_ha; - return snprintf(buf, PAGE_SIZE, "%02x.%02x.%02x.%02x\n", - (u8)(pm8001_ha->main_cfg_tbl.firmware_rev >> 24), - (u8)(pm8001_ha->main_cfg_tbl.firmware_rev >> 16), - (u8)(pm8001_ha->main_cfg_tbl.firmware_rev >> 8), - (u8)(pm8001_ha->main_cfg_tbl.firmware_rev)); + if (pm8001_ha->chip_id == chip_8001) { + return snprintf(buf, PAGE_SIZE, "%02x.%02x.%02x.%02x\n", + (u8)(pm8001_ha->main_cfg_tbl.pm8001_tbl.firmware_rev >> 24), + (u8)(pm8001_ha->main_cfg_tbl.pm8001_tbl.firmware_rev >> 16), + (u8)(pm8001_ha->main_cfg_tbl.pm8001_tbl.firmware_rev >> 8), + (u8)(pm8001_ha->main_cfg_tbl.pm8001_tbl.firmware_rev)); + } else { + return snprintf(buf, PAGE_SIZE, "%02x.%02x.%02x.%02x\n", + (u8)(pm8001_ha->main_cfg_tbl.pm80xx_tbl.firmware_rev >> 24), + (u8)(pm8001_ha->main_cfg_tbl.pm80xx_tbl.firmware_rev >> 16), + (u8)(pm8001_ha->main_cfg_tbl.pm80xx_tbl.firmware_rev >> 8), + (u8)(pm8001_ha->main_cfg_tbl.pm80xx_tbl.firmware_rev)); + } } static DEVICE_ATTR(fw_version, S_IRUGO, pm8001_ctl_fw_version_show, NULL); /** @@ -99,8 +112,13 @@ static ssize_t pm8001_ctl_max_out_io_show(struct device *cdev, struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost); struct pm8001_hba_info *pm8001_ha = sha->lldd_ha; - return snprintf(buf, PAGE_SIZE, "%d\n", - pm8001_ha->main_cfg_tbl.max_out_io); + if (pm8001_ha->chip_id == chip_8001) { + return snprintf(buf, PAGE_SIZE, "%d\n", + pm8001_ha->main_cfg_tbl.pm8001_tbl.max_out_io); + } else { + return snprintf(buf, PAGE_SIZE, "%d\n", + pm8001_ha->main_cfg_tbl.pm80xx_tbl.max_out_io); + } } static DEVICE_ATTR(max_out_io, S_IRUGO, pm8001_ctl_max_out_io_show, NULL); /** @@ -117,8 +135,15 @@ static ssize_t pm8001_ctl_max_devices_show(struct device *cdev, struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost); struct pm8001_hba_info *pm8001_ha = sha->lldd_ha; - return snprintf(buf, PAGE_SIZE, "%04d\n", - (u16)(pm8001_ha->main_cfg_tbl.max_sgl >> 16)); + if (pm8001_ha->chip_id == chip_8001) { + return snprintf(buf, PAGE_SIZE, "%04d\n", + (u16)(pm8001_ha->main_cfg_tbl.pm8001_tbl.max_sgl >> 16) + ); + } else { + return snprintf(buf, PAGE_SIZE, "%04d\n", + (u16)(pm8001_ha->main_cfg_tbl.pm80xx_tbl.max_sgl >> 16) + ); + } } static DEVICE_ATTR(max_devices, S_IRUGO, pm8001_ctl_max_devices_show, NULL); /** @@ -136,8 +161,15 @@ static ssize_t pm8001_ctl_max_sg_list_show(struct device *cdev, struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost); struct pm8001_hba_info *pm8001_ha = sha->lldd_ha; - return snprintf(buf, PAGE_SIZE, "%04d\n", - pm8001_ha->main_cfg_tbl.max_sgl & 0x0000FFFF); + if (pm8001_ha->chip_id == chip_8001) { + return snprintf(buf, PAGE_SIZE, "%04d\n", + pm8001_ha->main_cfg_tbl.pm8001_tbl.max_sgl & 0x0000FFFF + ); + } else { + return snprintf(buf, PAGE_SIZE, "%04d\n", + pm8001_ha->main_cfg_tbl.pm80xx_tbl.max_sgl & 0x0000FFFF + ); + } } static DEVICE_ATTR(max_sg_list, S_IRUGO, pm8001_ctl_max_sg_list_show, NULL); @@ -173,7 +205,14 @@ static ssize_t pm8001_ctl_sas_spec_support_show(struct device *cdev, struct Scsi_Host *shost = class_to_shost(cdev); struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost); struct pm8001_hba_info *pm8001_ha = sha->lldd_ha; - mode = (pm8001_ha->main_cfg_tbl.ctrl_cap_flag & 0xfe000000)>>25; + /* fe000000 means supports SAS2.1 */ + if (pm8001_ha->chip_id == chip_8001) + mode = (pm8001_ha->main_cfg_tbl.pm8001_tbl.ctrl_cap_flag & + 0xfe000000)>>25; + else + /* fe000000 means supports SAS2.1 */ + mode = (pm8001_ha->main_cfg_tbl.pm80xx_tbl.ctrl_cap_flag & + 0xfe000000)>>25; return show_sas_spec_support_status(mode, buf); } static DEVICE_ATTR(sas_spec_support, S_IRUGO, @@ -361,10 +400,11 @@ static int pm8001_set_nvmd(struct pm8001_hba_info *pm8001_ha) goto out; } payload = (struct pm8001_ioctl_payload *)ioctlbuffer; - memcpy((u8 *)payload->func_specific, (u8 *)pm8001_ha->fw_image->data, + memcpy((u8 *)&payload->func_specific, (u8 *)pm8001_ha->fw_image->data, pm8001_ha->fw_image->size); payload->length = pm8001_ha->fw_image->size; payload->id = 0; + payload->minor_function = 0x1; pm8001_ha->nvmd_completion = &completion; ret = PM8001_CHIP_DISP->set_nvmd_req(pm8001_ha, payload); wait_for_completion(&completion); @@ -411,7 +451,7 @@ static int pm8001_update_flash(struct pm8001_hba_info *pm8001_ha) payload->length = 1024*16; payload->id = 0; fwControl = - (struct fw_control_info *)payload->func_specific; + (struct fw_control_info *)&payload->func_specific; fwControl->len = IOCTL_BUF_SIZE; /* IN */ fwControl->size = partitionSize + HEADER_LEN;/* IN */ fwControl->retcode = 0;/* OUT */ diff --git a/trunk/drivers/scsi/pm8001/pm8001_defs.h b/trunk/drivers/scsi/pm8001/pm8001_defs.h index c3d20c8d4abe..479c5a7a863a 100644 --- a/trunk/drivers/scsi/pm8001/pm8001_defs.h +++ b/trunk/drivers/scsi/pm8001/pm8001_defs.h @@ -1,5 +1,5 @@ /* - * PMC-Sierra SPC 8001 SAS/SATA based host adapters driver + * PMC-Sierra 8001/8081/8088/8089 SAS/SATA based host adapters driver * * Copyright (c) 2008-2009 USI Co., Ltd. * All rights reserved. @@ -43,9 +43,12 @@ enum chip_flavors { chip_8001, + chip_8008, + chip_8009, + chip_8018, + chip_8019 }; -#define USI_MAX_MEMCNT 9 -#define PM8001_MAX_DMA_SG SG_ALL + enum phy_speed { PHY_SPEED_15 = 0x01, PHY_SPEED_30 = 0x02, @@ -69,23 +72,34 @@ enum port_type { #define PM8001_MPI_QUEUE 1024 /* maximum mpi queue entries */ #define PM8001_MAX_INB_NUM 1 #define PM8001_MAX_OUTB_NUM 1 +#define PM8001_MAX_SPCV_INB_NUM 1 +#define PM8001_MAX_SPCV_OUTB_NUM 4 #define PM8001_CAN_QUEUE 508 /* SCSI Queue depth */ +/* Inbound/Outbound queue size */ +#define IOMB_SIZE_SPC 64 +#define IOMB_SIZE_SPCV 128 + /* unchangeable hardware details */ -#define PM8001_MAX_PHYS 8 /* max. possible phys */ -#define PM8001_MAX_PORTS 8 /* max. possible ports */ -#define PM8001_MAX_DEVICES 1024 /* max supported device */ +#define PM8001_MAX_PHYS 16 /* max. possible phys */ +#define PM8001_MAX_PORTS 16 /* max. possible ports */ +#define PM8001_MAX_DEVICES 2048 /* max supported device */ +#define PM8001_MAX_MSIX_VEC 64 /* max msi-x int for spcv/ve */ +#define USI_MAX_MEMCNT_BASE 5 +#define IB (USI_MAX_MEMCNT_BASE + 1) +#define CI (IB + PM8001_MAX_SPCV_INB_NUM) +#define OB (CI + PM8001_MAX_SPCV_INB_NUM) +#define PI (OB + PM8001_MAX_SPCV_OUTB_NUM) +#define USI_MAX_MEMCNT (PI + PM8001_MAX_SPCV_OUTB_NUM) +#define PM8001_MAX_DMA_SG SG_ALL enum memory_region_num { AAP1 = 0x0, /* application acceleration processor */ IOP, /* IO processor */ - CI, /* consumer index */ - PI, /* producer index */ - IB, /* inbound queue */ - OB, /* outbound queue */ NVMD, /* NVM device */ DEV_MEM, /* memory for devices */ CCB_MEM, /* memory for command control block */ + FW_FLASH /* memory for fw flash update */ }; #define PM8001_EVENT_LOG_SIZE (128 * 1024) diff --git a/trunk/drivers/scsi/pm8001/pm8001_hwi.c b/trunk/drivers/scsi/pm8001/pm8001_hwi.c index b8dd05074abb..69dd49c05f1e 100644 --- a/trunk/drivers/scsi/pm8001/pm8001_hwi.c +++ b/trunk/drivers/scsi/pm8001/pm8001_hwi.c @@ -50,32 +50,39 @@ static void read_main_config_table(struct pm8001_hba_info *pm8001_ha) { void __iomem *address = pm8001_ha->main_cfg_tbl_addr; - pm8001_ha->main_cfg_tbl.signature = pm8001_mr32(address, 0x00); - pm8001_ha->main_cfg_tbl.interface_rev = pm8001_mr32(address, 0x04); - pm8001_ha->main_cfg_tbl.firmware_rev = pm8001_mr32(address, 0x08); - pm8001_ha->main_cfg_tbl.max_out_io = pm8001_mr32(address, 0x0C); - pm8001_ha->main_cfg_tbl.max_sgl = pm8001_mr32(address, 0x10); - pm8001_ha->main_cfg_tbl.ctrl_cap_flag = pm8001_mr32(address, 0x14); - pm8001_ha->main_cfg_tbl.gst_offset = pm8001_mr32(address, 0x18); - pm8001_ha->main_cfg_tbl.inbound_queue_offset = + pm8001_ha->main_cfg_tbl.pm8001_tbl.signature = + pm8001_mr32(address, 0x00); + pm8001_ha->main_cfg_tbl.pm8001_tbl.interface_rev = + pm8001_mr32(address, 0x04); + pm8001_ha->main_cfg_tbl.pm8001_tbl.firmware_rev = + pm8001_mr32(address, 0x08); + pm8001_ha->main_cfg_tbl.pm8001_tbl.max_out_io = + pm8001_mr32(address, 0x0C); + pm8001_ha->main_cfg_tbl.pm8001_tbl.max_sgl = + pm8001_mr32(address, 0x10); + pm8001_ha->main_cfg_tbl.pm8001_tbl.ctrl_cap_flag = + pm8001_mr32(address, 0x14); + pm8001_ha->main_cfg_tbl.pm8001_tbl.gst_offset = + pm8001_mr32(address, 0x18); + pm8001_ha->main_cfg_tbl.pm8001_tbl.inbound_queue_offset = pm8001_mr32(address, MAIN_IBQ_OFFSET); - pm8001_ha->main_cfg_tbl.outbound_queue_offset = + pm8001_ha->main_cfg_tbl.pm8001_tbl.outbound_queue_offset = pm8001_mr32(address, MAIN_OBQ_OFFSET); - pm8001_ha->main_cfg_tbl.hda_mode_flag = + pm8001_ha->main_cfg_tbl.pm8001_tbl.hda_mode_flag = pm8001_mr32(address, MAIN_HDA_FLAGS_OFFSET); /* read analog Setting offset from the configuration table */ - pm8001_ha->main_cfg_tbl.anolog_setup_table_offset = + pm8001_ha->main_cfg_tbl.pm8001_tbl.anolog_setup_table_offset = pm8001_mr32(address, MAIN_ANALOG_SETUP_OFFSET); /* read Error Dump Offset and Length */ - pm8001_ha->main_cfg_tbl.fatal_err_dump_offset0 = + pm8001_ha->main_cfg_tbl.pm8001_tbl.fatal_err_dump_offset0 = pm8001_mr32(address, MAIN_FATAL_ERROR_RDUMP0_OFFSET); - pm8001_ha->main_cfg_tbl.fatal_err_dump_length0 = + pm8001_ha->main_cfg_tbl.pm8001_tbl.fatal_err_dump_length0 = pm8001_mr32(address, MAIN_FATAL_ERROR_RDUMP0_LENGTH); - pm8001_ha->main_cfg_tbl.fatal_err_dump_offset1 = + pm8001_ha->main_cfg_tbl.pm8001_tbl.fatal_err_dump_offset1 = pm8001_mr32(address, MAIN_FATAL_ERROR_RDUMP1_OFFSET); - pm8001_ha->main_cfg_tbl.fatal_err_dump_length1 = + pm8001_ha->main_cfg_tbl.pm8001_tbl.fatal_err_dump_length1 = pm8001_mr32(address, MAIN_FATAL_ERROR_RDUMP1_LENGTH); } @@ -86,31 +93,56 @@ static void read_main_config_table(struct pm8001_hba_info *pm8001_ha) static void read_general_status_table(struct pm8001_hba_info *pm8001_ha) { void __iomem *address = pm8001_ha->general_stat_tbl_addr; - pm8001_ha->gs_tbl.gst_len_mpistate = pm8001_mr32(address, 0x00); - pm8001_ha->gs_tbl.iq_freeze_state0 = pm8001_mr32(address, 0x04); - pm8001_ha->gs_tbl.iq_freeze_state1 = pm8001_mr32(address, 0x08); - pm8001_ha->gs_tbl.msgu_tcnt = pm8001_mr32(address, 0x0C); - pm8001_ha->gs_tbl.iop_tcnt = pm8001_mr32(address, 0x10); - pm8001_ha->gs_tbl.reserved = pm8001_mr32(address, 0x14); - pm8001_ha->gs_tbl.phy_state[0] = pm8001_mr32(address, 0x18); - pm8001_ha->gs_tbl.phy_state[1] = pm8001_mr32(address, 0x1C); - pm8001_ha->gs_tbl.phy_state[2] = pm8001_mr32(address, 0x20); - pm8001_ha->gs_tbl.phy_state[3] = pm8001_mr32(address, 0x24); - pm8001_ha->gs_tbl.phy_state[4] = pm8001_mr32(address, 0x28); - pm8001_ha->gs_tbl.phy_state[5] = pm8001_mr32(address, 0x2C); - pm8001_ha->gs_tbl.phy_state[6] = pm8001_mr32(address, 0x30); - pm8001_ha->gs_tbl.phy_state[7] = pm8001_mr32(address, 0x34); - pm8001_ha->gs_tbl.reserved1 = pm8001_mr32(address, 0x38); - pm8001_ha->gs_tbl.reserved2 = pm8001_mr32(address, 0x3C); - pm8001_ha->gs_tbl.reserved3 = pm8001_mr32(address, 0x40); - pm8001_ha->gs_tbl.recover_err_info[0] = pm8001_mr32(address, 0x44); - pm8001_ha->gs_tbl.recover_err_info[1] = pm8001_mr32(address, 0x48); - pm8001_ha->gs_tbl.recover_err_info[2] = pm8001_mr32(address, 0x4C); - pm8001_ha->gs_tbl.recover_err_info[3] = pm8001_mr32(address, 0x50); - pm8001_ha->gs_tbl.recover_err_info[4] = pm8001_mr32(address, 0x54); - pm8001_ha->gs_tbl.recover_err_info[5] = pm8001_mr32(address, 0x58); - pm8001_ha->gs_tbl.recover_err_info[6] = pm8001_mr32(address, 0x5C); - pm8001_ha->gs_tbl.recover_err_info[7] = pm8001_mr32(address, 0x60); + pm8001_ha->gs_tbl.pm8001_tbl.gst_len_mpistate = + pm8001_mr32(address, 0x00); + pm8001_ha->gs_tbl.pm8001_tbl.iq_freeze_state0 = + pm8001_mr32(address, 0x04); + pm8001_ha->gs_tbl.pm8001_tbl.iq_freeze_state1 = + pm8001_mr32(address, 0x08); + pm8001_ha->gs_tbl.pm8001_tbl.msgu_tcnt = + pm8001_mr32(address, 0x0C); + pm8001_ha->gs_tbl.pm8001_tbl.iop_tcnt = + pm8001_mr32(address, 0x10); + pm8001_ha->gs_tbl.pm8001_tbl.rsvd = + pm8001_mr32(address, 0x14); + pm8001_ha->gs_tbl.pm8001_tbl.phy_state[0] = + pm8001_mr32(address, 0x18); + pm8001_ha->gs_tbl.pm8001_tbl.phy_state[1] = + pm8001_mr32(address, 0x1C); + pm8001_ha->gs_tbl.pm8001_tbl.phy_state[2] = + pm8001_mr32(address, 0x20); + pm8001_ha->gs_tbl.pm8001_tbl.phy_state[3] = + pm8001_mr32(address, 0x24); + pm8001_ha->gs_tbl.pm8001_tbl.phy_state[4] = + pm8001_mr32(address, 0x28); + pm8001_ha->gs_tbl.pm8001_tbl.phy_state[5] = + pm8001_mr32(address, 0x2C); + pm8001_ha->gs_tbl.pm8001_tbl.phy_state[6] = + pm8001_mr32(address, 0x30); + pm8001_ha->gs_tbl.pm8001_tbl.phy_state[7] = + pm8001_mr32(address, 0x34); + pm8001_ha->gs_tbl.pm8001_tbl.gpio_input_val = + pm8001_mr32(address, 0x38); + pm8001_ha->gs_tbl.pm8001_tbl.rsvd1[0] = + pm8001_mr32(address, 0x3C); + pm8001_ha->gs_tbl.pm8001_tbl.rsvd1[1] = + pm8001_mr32(address, 0x40); + pm8001_ha->gs_tbl.pm8001_tbl.recover_err_info[0] = + pm8001_mr32(address, 0x44); + pm8001_ha->gs_tbl.pm8001_tbl.recover_err_info[1] = + pm8001_mr32(address, 0x48); + pm8001_ha->gs_tbl.pm8001_tbl.recover_err_info[2] = + pm8001_mr32(address, 0x4C); + pm8001_ha->gs_tbl.pm8001_tbl.recover_err_info[3] = + pm8001_mr32(address, 0x50); + pm8001_ha->gs_tbl.pm8001_tbl.recover_err_info[4] = + pm8001_mr32(address, 0x54); + pm8001_ha->gs_tbl.pm8001_tbl.recover_err_info[5] = + pm8001_mr32(address, 0x58); + pm8001_ha->gs_tbl.pm8001_tbl.recover_err_info[6] = + pm8001_mr32(address, 0x5C); + pm8001_ha->gs_tbl.pm8001_tbl.recover_err_info[7] = + pm8001_mr32(address, 0x60); } /** @@ -119,10 +151,9 @@ static void read_general_status_table(struct pm8001_hba_info *pm8001_ha) */ static void read_inbnd_queue_table(struct pm8001_hba_info *pm8001_ha) { - int inbQ_num = 1; int i; void __iomem *address = pm8001_ha->inbnd_q_tbl_addr; - for (i = 0; i < inbQ_num; i++) { + for (i = 0; i < PM8001_MAX_INB_NUM; i++) { u32 offset = i * 0x20; pm8001_ha->inbnd_q_tbl[i].pi_pci_bar = get_pci_bar_index(pm8001_mr32(address, (offset + 0x14))); @@ -137,10 +168,9 @@ static void read_inbnd_queue_table(struct pm8001_hba_info *pm8001_ha) */ static void read_outbnd_queue_table(struct pm8001_hba_info *pm8001_ha) { - int outbQ_num = 1; int i; void __iomem *address = pm8001_ha->outbnd_q_tbl_addr; - for (i = 0; i < outbQ_num; i++) { + for (i = 0; i < PM8001_MAX_OUTB_NUM; i++) { u32 offset = i * 0x24; pm8001_ha->outbnd_q_tbl[i].ci_pci_bar = get_pci_bar_index(pm8001_mr32(address, (offset + 0x14))); @@ -155,54 +185,57 @@ static void read_outbnd_queue_table(struct pm8001_hba_info *pm8001_ha) */ static void init_default_table_values(struct pm8001_hba_info *pm8001_ha) { - int qn = 1; int i; u32 offsetib, offsetob; void __iomem *addressib = pm8001_ha->inbnd_q_tbl_addr; void __iomem *addressob = pm8001_ha->outbnd_q_tbl_addr; - pm8001_ha->main_cfg_tbl.inbound_q_nppd_hppd = 0; - pm8001_ha->main_cfg_tbl.outbound_hw_event_pid0_3 = 0; - pm8001_ha->main_cfg_tbl.outbound_hw_event_pid4_7 = 0; - pm8001_ha->main_cfg_tbl.outbound_ncq_event_pid0_3 = 0; - pm8001_ha->main_cfg_tbl.outbound_ncq_event_pid4_7 = 0; - pm8001_ha->main_cfg_tbl.outbound_tgt_ITNexus_event_pid0_3 = 0; - pm8001_ha->main_cfg_tbl.outbound_tgt_ITNexus_event_pid4_7 = 0; - pm8001_ha->main_cfg_tbl.outbound_tgt_ssp_event_pid0_3 = 0; - pm8001_ha->main_cfg_tbl.outbound_tgt_ssp_event_pid4_7 = 0; - pm8001_ha->main_cfg_tbl.outbound_tgt_smp_event_pid0_3 = 0; - pm8001_ha->main_cfg_tbl.outbound_tgt_smp_event_pid4_7 = 0; - - pm8001_ha->main_cfg_tbl.upper_event_log_addr = + pm8001_ha->main_cfg_tbl.pm8001_tbl.inbound_q_nppd_hppd = 0; + pm8001_ha->main_cfg_tbl.pm8001_tbl.outbound_hw_event_pid0_3 = 0; + pm8001_ha->main_cfg_tbl.pm8001_tbl.outbound_hw_event_pid4_7 = 0; + pm8001_ha->main_cfg_tbl.pm8001_tbl.outbound_ncq_event_pid0_3 = 0; + pm8001_ha->main_cfg_tbl.pm8001_tbl.outbound_ncq_event_pid4_7 = 0; + pm8001_ha->main_cfg_tbl.pm8001_tbl.outbound_tgt_ITNexus_event_pid0_3 = + 0; + pm8001_ha->main_cfg_tbl.pm8001_tbl.outbound_tgt_ITNexus_event_pid4_7 = + 0; + pm8001_ha->main_cfg_tbl.pm8001_tbl.outbound_tgt_ssp_event_pid0_3 = 0; + pm8001_ha->main_cfg_tbl.pm8001_tbl.outbound_tgt_ssp_event_pid4_7 = 0; + pm8001_ha->main_cfg_tbl.pm8001_tbl.outbound_tgt_smp_event_pid0_3 = 0; + pm8001_ha->main_cfg_tbl.pm8001_tbl.outbound_tgt_smp_event_pid4_7 = 0; + + pm8001_ha->main_cfg_tbl.pm8001_tbl.upper_event_log_addr = pm8001_ha->memoryMap.region[AAP1].phys_addr_hi; - pm8001_ha->main_cfg_tbl.lower_event_log_addr = + pm8001_ha->main_cfg_tbl.pm8001_tbl.lower_event_log_addr = pm8001_ha->memoryMap.region[AAP1].phys_addr_lo; - pm8001_ha->main_cfg_tbl.event_log_size = PM8001_EVENT_LOG_SIZE; - pm8001_ha->main_cfg_tbl.event_log_option = 0x01; - pm8001_ha->main_cfg_tbl.upper_iop_event_log_addr = + pm8001_ha->main_cfg_tbl.pm8001_tbl.event_log_size = + PM8001_EVENT_LOG_SIZE; + pm8001_ha->main_cfg_tbl.pm8001_tbl.event_log_option = 0x01; + pm8001_ha->main_cfg_tbl.pm8001_tbl.upper_iop_event_log_addr = pm8001_ha->memoryMap.region[IOP].phys_addr_hi; - pm8001_ha->main_cfg_tbl.lower_iop_event_log_addr = + pm8001_ha->main_cfg_tbl.pm8001_tbl.lower_iop_event_log_addr = pm8001_ha->memoryMap.region[IOP].phys_addr_lo; - pm8001_ha->main_cfg_tbl.iop_event_log_size = PM8001_EVENT_LOG_SIZE; - pm8001_ha->main_cfg_tbl.iop_event_log_option = 0x01; - pm8001_ha->main_cfg_tbl.fatal_err_interrupt = 0x01; - for (i = 0; i < qn; i++) { + pm8001_ha->main_cfg_tbl.pm8001_tbl.iop_event_log_size = + PM8001_EVENT_LOG_SIZE; + pm8001_ha->main_cfg_tbl.pm8001_tbl.iop_event_log_option = 0x01; + pm8001_ha->main_cfg_tbl.pm8001_tbl.fatal_err_interrupt = 0x01; + for (i = 0; i < PM8001_MAX_INB_NUM; i++) { pm8001_ha->inbnd_q_tbl[i].element_pri_size_cnt = PM8001_MPI_QUEUE | (64 << 16) | (0x00<<30); pm8001_ha->inbnd_q_tbl[i].upper_base_addr = - pm8001_ha->memoryMap.region[IB].phys_addr_hi; + pm8001_ha->memoryMap.region[IB + i].phys_addr_hi; pm8001_ha->inbnd_q_tbl[i].lower_base_addr = - pm8001_ha->memoryMap.region[IB].phys_addr_lo; + pm8001_ha->memoryMap.region[IB + i].phys_addr_lo; pm8001_ha->inbnd_q_tbl[i].base_virt = - (u8 *)pm8001_ha->memoryMap.region[IB].virt_ptr; + (u8 *)pm8001_ha->memoryMap.region[IB + i].virt_ptr; pm8001_ha->inbnd_q_tbl[i].total_length = - pm8001_ha->memoryMap.region[IB].total_len; + pm8001_ha->memoryMap.region[IB + i].total_len; pm8001_ha->inbnd_q_tbl[i].ci_upper_base_addr = - pm8001_ha->memoryMap.region[CI].phys_addr_hi; + pm8001_ha->memoryMap.region[CI + i].phys_addr_hi; pm8001_ha->inbnd_q_tbl[i].ci_lower_base_addr = - pm8001_ha->memoryMap.region[CI].phys_addr_lo; + pm8001_ha->memoryMap.region[CI + i].phys_addr_lo; pm8001_ha->inbnd_q_tbl[i].ci_virt = - pm8001_ha->memoryMap.region[CI].virt_ptr; + pm8001_ha->memoryMap.region[CI + i].virt_ptr; offsetib = i * 0x20; pm8001_ha->inbnd_q_tbl[i].pi_pci_bar = get_pci_bar_index(pm8001_mr32(addressib, @@ -212,25 +245,25 @@ static void init_default_table_values(struct pm8001_hba_info *pm8001_ha) pm8001_ha->inbnd_q_tbl[i].producer_idx = 0; pm8001_ha->inbnd_q_tbl[i].consumer_index = 0; } - for (i = 0; i < qn; i++) { + for (i = 0; i < PM8001_MAX_OUTB_NUM; i++) { pm8001_ha->outbnd_q_tbl[i].element_size_cnt = PM8001_MPI_QUEUE | (64 << 16) | (0x01<<30); pm8001_ha->outbnd_q_tbl[i].upper_base_addr = - pm8001_ha->memoryMap.region[OB].phys_addr_hi; + pm8001_ha->memoryMap.region[OB + i].phys_addr_hi; pm8001_ha->outbnd_q_tbl[i].lower_base_addr = - pm8001_ha->memoryMap.region[OB].phys_addr_lo; + pm8001_ha->memoryMap.region[OB + i].phys_addr_lo; pm8001_ha->outbnd_q_tbl[i].base_virt = - (u8 *)pm8001_ha->memoryMap.region[OB].virt_ptr; + (u8 *)pm8001_ha->memoryMap.region[OB + i].virt_ptr; pm8001_ha->outbnd_q_tbl[i].total_length = - pm8001_ha->memoryMap.region[OB].total_len; + pm8001_ha->memoryMap.region[OB + i].total_len; pm8001_ha->outbnd_q_tbl[i].pi_upper_base_addr = - pm8001_ha->memoryMap.region[PI].phys_addr_hi; + pm8001_ha->memoryMap.region[PI + i].phys_addr_hi; pm8001_ha->outbnd_q_tbl[i].pi_lower_base_addr = - pm8001_ha->memoryMap.region[PI].phys_addr_lo; + pm8001_ha->memoryMap.region[PI + i].phys_addr_lo; pm8001_ha->outbnd_q_tbl[i].interrup_vec_cnt_delay = - 0 | (10 << 16) | (0 << 24); + 0 | (10 << 16) | (i << 24); pm8001_ha->outbnd_q_tbl[i].pi_virt = - pm8001_ha->memoryMap.region[PI].virt_ptr; + pm8001_ha->memoryMap.region[PI + i].virt_ptr; offsetob = i * 0x24; pm8001_ha->outbnd_q_tbl[i].ci_pci_bar = get_pci_bar_index(pm8001_mr32(addressob, @@ -250,42 +283,51 @@ static void update_main_config_table(struct pm8001_hba_info *pm8001_ha) { void __iomem *address = pm8001_ha->main_cfg_tbl_addr; pm8001_mw32(address, 0x24, - pm8001_ha->main_cfg_tbl.inbound_q_nppd_hppd); + pm8001_ha->main_cfg_tbl.pm8001_tbl.inbound_q_nppd_hppd); pm8001_mw32(address, 0x28, - pm8001_ha->main_cfg_tbl.outbound_hw_event_pid0_3); + pm8001_ha->main_cfg_tbl.pm8001_tbl.outbound_hw_event_pid0_3); pm8001_mw32(address, 0x2C, - pm8001_ha->main_cfg_tbl.outbound_hw_event_pid4_7); + pm8001_ha->main_cfg_tbl.pm8001_tbl.outbound_hw_event_pid4_7); pm8001_mw32(address, 0x30, - pm8001_ha->main_cfg_tbl.outbound_ncq_event_pid0_3); + pm8001_ha->main_cfg_tbl.pm8001_tbl.outbound_ncq_event_pid0_3); pm8001_mw32(address, 0x34, - pm8001_ha->main_cfg_tbl.outbound_ncq_event_pid4_7); + pm8001_ha->main_cfg_tbl.pm8001_tbl.outbound_ncq_event_pid4_7); pm8001_mw32(address, 0x38, - pm8001_ha->main_cfg_tbl.outbound_tgt_ITNexus_event_pid0_3); + pm8001_ha->main_cfg_tbl.pm8001_tbl. + outbound_tgt_ITNexus_event_pid0_3); pm8001_mw32(address, 0x3C, - pm8001_ha->main_cfg_tbl.outbound_tgt_ITNexus_event_pid4_7); + pm8001_ha->main_cfg_tbl.pm8001_tbl. + outbound_tgt_ITNexus_event_pid4_7); pm8001_mw32(address, 0x40, - pm8001_ha->main_cfg_tbl.outbound_tgt_ssp_event_pid0_3); + pm8001_ha->main_cfg_tbl.pm8001_tbl. + outbound_tgt_ssp_event_pid0_3); pm8001_mw32(address, 0x44, - pm8001_ha->main_cfg_tbl.outbound_tgt_ssp_event_pid4_7); + pm8001_ha->main_cfg_tbl.pm8001_tbl. + outbound_tgt_ssp_event_pid4_7); pm8001_mw32(address, 0x48, - pm8001_ha->main_cfg_tbl.outbound_tgt_smp_event_pid0_3); + pm8001_ha->main_cfg_tbl.pm8001_tbl. + outbound_tgt_smp_event_pid0_3); pm8001_mw32(address, 0x4C, - pm8001_ha->main_cfg_tbl.outbound_tgt_smp_event_pid4_7); + pm8001_ha->main_cfg_tbl.pm8001_tbl. + outbound_tgt_smp_event_pid4_7); pm8001_mw32(address, 0x50, - pm8001_ha->main_cfg_tbl.upper_event_log_addr); + pm8001_ha->main_cfg_tbl.pm8001_tbl.upper_event_log_addr); pm8001_mw32(address, 0x54, - pm8001_ha->main_cfg_tbl.lower_event_log_addr); - pm8001_mw32(address, 0x58, pm8001_ha->main_cfg_tbl.event_log_size); - pm8001_mw32(address, 0x5C, pm8001_ha->main_cfg_tbl.event_log_option); + pm8001_ha->main_cfg_tbl.pm8001_tbl.lower_event_log_addr); + pm8001_mw32(address, 0x58, + pm8001_ha->main_cfg_tbl.pm8001_tbl.event_log_size); + pm8001_mw32(address, 0x5C, + pm8001_ha->main_cfg_tbl.pm8001_tbl.event_log_option); pm8001_mw32(address, 0x60, - pm8001_ha->main_cfg_tbl.upper_iop_event_log_addr); + pm8001_ha->main_cfg_tbl.pm8001_tbl.upper_iop_event_log_addr); pm8001_mw32(address, 0x64, - pm8001_ha->main_cfg_tbl.lower_iop_event_log_addr); - pm8001_mw32(address, 0x68, pm8001_ha->main_cfg_tbl.iop_event_log_size); + pm8001_ha->main_cfg_tbl.pm8001_tbl.lower_iop_event_log_addr); + pm8001_mw32(address, 0x68, + pm8001_ha->main_cfg_tbl.pm8001_tbl.iop_event_log_size); pm8001_mw32(address, 0x6C, - pm8001_ha->main_cfg_tbl.iop_event_log_option); + pm8001_ha->main_cfg_tbl.pm8001_tbl.iop_event_log_option); pm8001_mw32(address, 0x70, - pm8001_ha->main_cfg_tbl.fatal_err_interrupt); + pm8001_ha->main_cfg_tbl.pm8001_tbl.fatal_err_interrupt); } /** @@ -597,6 +639,19 @@ static void init_pci_device_addresses(struct pm8001_hba_info *pm8001_ha) */ static int pm8001_chip_init(struct pm8001_hba_info *pm8001_ha) { + u8 i = 0; + u16 deviceid; + pci_read_config_word(pm8001_ha->pdev, PCI_DEVICE_ID, &deviceid); + /* 8081 controllers need BAR shift to access MPI space + * as this is shared with BIOS data */ + if (deviceid == 0x8081) { + if (-1 == pm8001_bar4_shift(pm8001_ha, GSM_SM_BASE)) { + PM8001_FAIL_DBG(pm8001_ha, + pm8001_printk("Shift Bar4 to 0x%x failed\n", + GSM_SM_BASE)); + return -1; + } + } /* check the firmware status */ if (-1 == check_fw_ready(pm8001_ha)) { PM8001_FAIL_DBG(pm8001_ha, @@ -613,11 +668,16 @@ static int pm8001_chip_init(struct pm8001_hba_info *pm8001_ha) read_outbnd_queue_table(pm8001_ha); /* update main config table ,inbound table and outbound table */ update_main_config_table(pm8001_ha); - update_inbnd_queue_table(pm8001_ha, 0); - update_outbnd_queue_table(pm8001_ha, 0); - mpi_set_phys_g3_with_ssc(pm8001_ha, 0); - /* 7->130ms, 34->500ms, 119->1.5s */ - mpi_set_open_retry_interval_reg(pm8001_ha, 119); + for (i = 0; i < PM8001_MAX_INB_NUM; i++) + update_inbnd_queue_table(pm8001_ha, i); + for (i = 0; i < PM8001_MAX_OUTB_NUM; i++) + update_outbnd_queue_table(pm8001_ha, i); + /* 8081 controller donot require these operations */ + if (deviceid != 0x8081) { + mpi_set_phys_g3_with_ssc(pm8001_ha, 0); + /* 7->130ms, 34->500ms, 119->1.5s */ + mpi_set_open_retry_interval_reg(pm8001_ha, 119); + } /* notify firmware update finished and check initialization status */ if (0 == mpi_init_check(pm8001_ha)) { PM8001_INIT_DBG(pm8001_ha, @@ -639,6 +699,16 @@ static int mpi_uninit_check(struct pm8001_hba_info *pm8001_ha) u32 max_wait_count; u32 value; u32 gst_len_mpistate; + u16 deviceid; + pci_read_config_word(pm8001_ha->pdev, PCI_DEVICE_ID, &deviceid); + if (deviceid == 0x8081) { + if (-1 == pm8001_bar4_shift(pm8001_ha, GSM_SM_BASE)) { + PM8001_FAIL_DBG(pm8001_ha, + pm8001_printk("Shift Bar4 to 0x%x failed\n", + GSM_SM_BASE)); + return -1; + } + } init_pci_device_addresses(pm8001_ha); /* Write bit1=1 to Inbound DoorBell Register to tell the SPC FW the table is stop */ @@ -740,14 +810,14 @@ static u32 soft_reset_ready_check(struct pm8001_hba_info *pm8001_ha) * pm8001_chip_soft_rst - soft reset the PM8001 chip, so that the clear all * the FW register status to the originated status. * @pm8001_ha: our hba card information - * @signature: signature in host scratch pad0 register. */ static int -pm8001_chip_soft_rst(struct pm8001_hba_info *pm8001_ha, u32 signature) +pm8001_chip_soft_rst(struct pm8001_hba_info *pm8001_ha) { u32 regVal, toggleVal; u32 max_wait_count; u32 regVal1, regVal2, regVal3; + u32 signature = 0x252acbcd; /* for host scratch pad0 */ unsigned long flags; /* step1: Check FW is ready for soft reset */ @@ -1113,7 +1183,7 @@ static void pm8001_hw_chip_rst(struct pm8001_hba_info *pm8001_ha) * pm8001_chip_iounmap - which maped when initialized. * @pm8001_ha: our hba card information */ -static void pm8001_chip_iounmap(struct pm8001_hba_info *pm8001_ha) +void pm8001_chip_iounmap(struct pm8001_hba_info *pm8001_ha) { s8 bar, logical = 0; for (bar = 0; bar < 6; bar++) { @@ -1192,7 +1262,7 @@ pm8001_chip_msix_interrupt_disable(struct pm8001_hba_info *pm8001_ha, * @pm8001_ha: our hba card information */ static void -pm8001_chip_interrupt_enable(struct pm8001_hba_info *pm8001_ha) +pm8001_chip_interrupt_enable(struct pm8001_hba_info *pm8001_ha, u8 vec) { #ifdef PM8001_USE_MSIX pm8001_chip_msix_interrupt_enable(pm8001_ha, 0); @@ -1207,7 +1277,7 @@ pm8001_chip_interrupt_enable(struct pm8001_hba_info *pm8001_ha) * @pm8001_ha: our hba card information */ static void -pm8001_chip_interrupt_disable(struct pm8001_hba_info *pm8001_ha) +pm8001_chip_interrupt_disable(struct pm8001_hba_info *pm8001_ha, u8 vec) { #ifdef PM8001_USE_MSIX pm8001_chip_msix_interrupt_disable(pm8001_ha, 0); @@ -1218,12 +1288,13 @@ pm8001_chip_interrupt_disable(struct pm8001_hba_info *pm8001_ha) } /** - * mpi_msg_free_get- get the free message buffer for transfer inbound queue. + * pm8001_mpi_msg_free_get - get the free message buffer for transfer + * inbound queue. * @circularQ: the inbound queue we want to transfer to HBA. * @messageSize: the message size of this transfer, normally it is 64 bytes * @messagePtr: the pointer to message. */ -static int mpi_msg_free_get(struct inbound_queue_table *circularQ, +int pm8001_mpi_msg_free_get(struct inbound_queue_table *circularQ, u16 messageSize, void **messagePtr) { u32 offset, consumer_index; @@ -1231,7 +1302,7 @@ static int mpi_msg_free_get(struct inbound_queue_table *circularQ, u8 bcCount = 1; /* only support single buffer */ /* Checks is the requested message size can be allocated in this queue*/ - if (messageSize > 64) { + if (messageSize > IOMB_SIZE_SPCV) { *messagePtr = NULL; return -1; } @@ -1245,7 +1316,7 @@ static int mpi_msg_free_get(struct inbound_queue_table *circularQ, return -1; } /* get memory IOMB buffer address */ - offset = circularQ->producer_idx * 64; + offset = circularQ->producer_idx * messageSize; /* increment to next bcCount element */ circularQ->producer_idx = (circularQ->producer_idx + bcCount) % PM8001_MPI_QUEUE; @@ -1257,29 +1328,30 @@ static int mpi_msg_free_get(struct inbound_queue_table *circularQ, } /** - * mpi_build_cmd- build the message queue for transfer, update the PI to FW - * to tell the fw to get this message from IOMB. + * pm8001_mpi_build_cmd- build the message queue for transfer, update the PI to + * FW to tell the fw to get this message from IOMB. * @pm8001_ha: our hba card information * @circularQ: the inbound queue we want to transfer to HBA. * @opCode: the operation code represents commands which LLDD and fw recognized. * @payload: the command payload of each operation command. */ -static int mpi_build_cmd(struct pm8001_hba_info *pm8001_ha, +int pm8001_mpi_build_cmd(struct pm8001_hba_info *pm8001_ha, struct inbound_queue_table *circularQ, - u32 opCode, void *payload) + u32 opCode, void *payload, u32 responseQueue) { u32 Header = 0, hpriority = 0, bc = 1, category = 0x02; - u32 responseQueue = 0; void *pMessage; - if (mpi_msg_free_get(circularQ, 64, &pMessage) < 0) { + if (pm8001_mpi_msg_free_get(circularQ, pm8001_ha->iomb_size, + &pMessage) < 0) { PM8001_IO_DBG(pm8001_ha, pm8001_printk("No free mpi buffer\n")); return -1; } BUG_ON(!payload); /*Copy to the payload*/ - memcpy(pMessage, payload, (64 - sizeof(struct mpi_msg_hdr))); + memcpy(pMessage, payload, (pm8001_ha->iomb_size - + sizeof(struct mpi_msg_hdr))); /*Build the header*/ Header = ((1 << 31) | (hpriority << 30) | ((bc & 0x1f) << 24) @@ -1291,12 +1363,13 @@ static int mpi_build_cmd(struct pm8001_hba_info *pm8001_ha, pm8001_cw32(pm8001_ha, circularQ->pi_pci_bar, circularQ->pi_offset, circularQ->producer_idx); PM8001_IO_DBG(pm8001_ha, - pm8001_printk("after PI= %d CI= %d\n", circularQ->producer_idx, - circularQ->consumer_index)); + pm8001_printk("INB Q %x OPCODE:%x , UPDATED PI=%d CI=%d\n", + responseQueue, opCode, circularQ->producer_idx, + circularQ->consumer_index)); return 0; } -static u32 mpi_msg_free_set(struct pm8001_hba_info *pm8001_ha, void *pMsg, +u32 pm8001_mpi_msg_free_set(struct pm8001_hba_info *pm8001_ha, void *pMsg, struct outbound_queue_table *circularQ, u8 bc) { u32 producer_index; @@ -1305,7 +1378,7 @@ static u32 mpi_msg_free_set(struct pm8001_hba_info *pm8001_ha, void *pMsg, msgHeader = (struct mpi_msg_hdr *)(pMsg - sizeof(struct mpi_msg_hdr)); pOutBoundMsgHeader = (struct mpi_msg_hdr *)(circularQ->base_virt + - circularQ->consumer_idx * 64); + circularQ->consumer_idx * pm8001_ha->iomb_size); if (pOutBoundMsgHeader != msgHeader) { PM8001_FAIL_DBG(pm8001_ha, pm8001_printk("consumer_idx = %d msgHeader = %p\n", @@ -1336,13 +1409,14 @@ static u32 mpi_msg_free_set(struct pm8001_hba_info *pm8001_ha, void *pMsg, } /** - * mpi_msg_consume- get the MPI message from outbound queue message table. + * pm8001_mpi_msg_consume- get the MPI message from outbound queue + * message table. * @pm8001_ha: our hba card information * @circularQ: the outbound queue table. * @messagePtr1: the message contents of this outbound message. * @pBC: the message size. */ -static u32 mpi_msg_consume(struct pm8001_hba_info *pm8001_ha, +u32 pm8001_mpi_msg_consume(struct pm8001_hba_info *pm8001_ha, struct outbound_queue_table *circularQ, void **messagePtr1, u8 *pBC) { @@ -1356,7 +1430,7 @@ static u32 mpi_msg_consume(struct pm8001_hba_info *pm8001_ha, /*Get the pointer to the circular queue buffer element*/ msgHeader = (struct mpi_msg_hdr *) (circularQ->base_virt + - circularQ->consumer_idx * 64); + circularQ->consumer_idx * pm8001_ha->iomb_size); /* read header */ header_tmp = pm8001_read_32(msgHeader); msgHeader_tmp = cpu_to_le32(header_tmp); @@ -1416,7 +1490,7 @@ static u32 mpi_msg_consume(struct pm8001_hba_info *pm8001_ha, return MPI_IO_STATUS_BUSY; } -static void pm8001_work_fn(struct work_struct *work) +void pm8001_work_fn(struct work_struct *work) { struct pm8001_work *pw = container_of(work, struct pm8001_work, work); struct pm8001_device *pm8001_dev; @@ -1431,7 +1505,7 @@ static void pm8001_work_fn(struct work_struct *work) pm8001_dev = pw->data; /* Most stash device structure */ if ((pm8001_dev == NULL) || ((pw->handler != IO_XFER_ERROR_BREAK) - && (pm8001_dev->dev_type == NO_DEVICE))) { + && (pm8001_dev->dev_type == SAS_PHY_UNUSED))) { kfree(pw); return; } @@ -1596,7 +1670,7 @@ static void pm8001_work_fn(struct work_struct *work) } break; case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS: dev = pm8001_dev->sas_device; - pm8001_I_T_nexus_reset(dev); + pm8001_I_T_nexus_event_handler(dev); break; case IO_OPEN_CNX_ERROR_STP_RESOURCES_BUSY: dev = pm8001_dev->sas_device; @@ -1614,7 +1688,7 @@ static void pm8001_work_fn(struct work_struct *work) kfree(pw); } -static int pm8001_handle_event(struct pm8001_hba_info *pm8001_ha, void *data, +int pm8001_handle_event(struct pm8001_hba_info *pm8001_ha, void *data, int handler) { struct pm8001_work *pw; @@ -1633,6 +1707,123 @@ static int pm8001_handle_event(struct pm8001_hba_info *pm8001_ha, void *data, return ret; } +static void pm8001_send_abort_all(struct pm8001_hba_info *pm8001_ha, + struct pm8001_device *pm8001_ha_dev) +{ + int res; + u32 ccb_tag; + struct pm8001_ccb_info *ccb; + struct sas_task *task = NULL; + struct task_abort_req task_abort; + struct inbound_queue_table *circularQ; + u32 opc = OPC_INB_SATA_ABORT; + int ret; + + if (!pm8001_ha_dev) { + PM8001_FAIL_DBG(pm8001_ha, pm8001_printk("dev is null\n")); + return; + } + + task = sas_alloc_slow_task(GFP_ATOMIC); + + if (!task) { + PM8001_FAIL_DBG(pm8001_ha, pm8001_printk("cannot " + "allocate task\n")); + return; + } + + task->task_done = pm8001_task_done; + + res = pm8001_tag_alloc(pm8001_ha, &ccb_tag); + if (res) + return; + + ccb = &pm8001_ha->ccb_info[ccb_tag]; + ccb->device = pm8001_ha_dev; + ccb->ccb_tag = ccb_tag; + ccb->task = task; + + circularQ = &pm8001_ha->inbnd_q_tbl[0]; + + memset(&task_abort, 0, sizeof(task_abort)); + task_abort.abort_all = cpu_to_le32(1); + task_abort.device_id = cpu_to_le32(pm8001_ha_dev->device_id); + task_abort.tag = cpu_to_le32(ccb_tag); + + ret = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &task_abort, 0); + +} + +static void pm8001_send_read_log(struct pm8001_hba_info *pm8001_ha, + struct pm8001_device *pm8001_ha_dev) +{ + struct sata_start_req sata_cmd; + int res; + u32 ccb_tag; + struct pm8001_ccb_info *ccb; + struct sas_task *task = NULL; + struct host_to_dev_fis fis; + struct domain_device *dev; + struct inbound_queue_table *circularQ; + u32 opc = OPC_INB_SATA_HOST_OPSTART; + + task = sas_alloc_slow_task(GFP_ATOMIC); + + if (!task) { + PM8001_FAIL_DBG(pm8001_ha, + pm8001_printk("cannot allocate task !!!\n")); + return; + } + task->task_done = pm8001_task_done; + + res = pm8001_tag_alloc(pm8001_ha, &ccb_tag); + if (res) { + PM8001_FAIL_DBG(pm8001_ha, + pm8001_printk("cannot allocate tag !!!\n")); + return; + } + + /* allocate domain device by ourselves as libsas + * is not going to provide any + */ + dev = kzalloc(sizeof(struct domain_device), GFP_ATOMIC); + if (!dev) { + PM8001_FAIL_DBG(pm8001_ha, + pm8001_printk("Domain device cannot be allocated\n")); + sas_free_task(task); + return; + } else { + task->dev = dev; + task->dev->lldd_dev = pm8001_ha_dev; + } + + ccb = &pm8001_ha->ccb_info[ccb_tag]; + ccb->device = pm8001_ha_dev; + ccb->ccb_tag = ccb_tag; + ccb->task = task; + pm8001_ha_dev->id |= NCQ_READ_LOG_FLAG; + pm8001_ha_dev->id |= NCQ_2ND_RLE_FLAG; + + memset(&sata_cmd, 0, sizeof(sata_cmd)); + circularQ = &pm8001_ha->inbnd_q_tbl[0]; + + /* construct read log FIS */ + memset(&fis, 0, sizeof(struct host_to_dev_fis)); + fis.fis_type = 0x27; + fis.flags = 0x80; + fis.command = ATA_CMD_READ_LOG_EXT; + fis.lbal = 0x10; + fis.sector_count = 0x1; + + sata_cmd.tag = cpu_to_le32(ccb_tag); + sata_cmd.device_id = cpu_to_le32(pm8001_ha_dev->device_id); + sata_cmd.ncqtag_atap_dir_m |= ((0x1 << 7) | (0x5 << 9)); + memcpy(&sata_cmd.sata_fis, &fis, sizeof(struct host_to_dev_fis)); + + res = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &sata_cmd, 0); + +} + /** * mpi_ssp_completion- process the event that FW response to the SSP request. * @pm8001_ha: our hba card information @@ -1867,7 +2058,7 @@ mpi_ssp_completion(struct pm8001_hba_info *pm8001_ha , void *piomb) break; } PM8001_IO_DBG(pm8001_ha, - pm8001_printk("scsi_status = %x \n ", + pm8001_printk("scsi_status = %x\n ", psspPayload->ssp_resp_iu.status)); spin_lock_irqsave(&t->task_state_lock, flags); t->task_state_flags &= ~SAS_TASK_STATE_PENDING; @@ -2096,16 +2287,44 @@ mpi_sata_completion(struct pm8001_hba_info *pm8001_ha, void *piomb) status = le32_to_cpu(psataPayload->status); tag = le32_to_cpu(psataPayload->tag); + if (!tag) { + PM8001_FAIL_DBG(pm8001_ha, + pm8001_printk("tag null\n")); + return; + } ccb = &pm8001_ha->ccb_info[tag]; param = le32_to_cpu(psataPayload->param); - t = ccb->task; + if (ccb) { + t = ccb->task; + pm8001_dev = ccb->device; + } else { + PM8001_FAIL_DBG(pm8001_ha, + pm8001_printk("ccb null\n")); + return; + } + + if (t) { + if (t->dev && (t->dev->lldd_dev)) + pm8001_dev = t->dev->lldd_dev; + } else { + PM8001_FAIL_DBG(pm8001_ha, + pm8001_printk("task null\n")); + return; + } + + if ((pm8001_dev && !(pm8001_dev->id & NCQ_READ_LOG_FLAG)) + && unlikely(!t || !t->lldd_task || !t->dev)) { + PM8001_FAIL_DBG(pm8001_ha, + pm8001_printk("task or dev null\n")); + return; + } + ts = &t->task_status; - pm8001_dev = ccb->device; - if (status) + if (!ts) { PM8001_FAIL_DBG(pm8001_ha, - pm8001_printk("sata IO status 0x%x\n", status)); - if (unlikely(!t || !t->lldd_task || !t->dev)) + pm8001_printk("ts null\n")); return; + } switch (status) { case IO_SUCCESS: @@ -2113,6 +2332,19 @@ mpi_sata_completion(struct pm8001_hba_info *pm8001_ha, void *piomb) if (param == 0) { ts->resp = SAS_TASK_COMPLETE; ts->stat = SAM_STAT_GOOD; + /* check if response is for SEND READ LOG */ + if (pm8001_dev && + (pm8001_dev->id & NCQ_READ_LOG_FLAG)) { + /* set new bit for abort_all */ + pm8001_dev->id |= NCQ_ABORT_ALL_FLAG; + /* clear bit for read log */ + pm8001_dev->id = pm8001_dev->id & 0x7FFFFFFF; + pm8001_send_abort_all(pm8001_ha, pm8001_dev); + /* Free the tag */ + pm8001_tag_free(pm8001_ha, tag); + sas_free_task(t); + return; + } } else { u8 len; ts->resp = SAS_TASK_COMPLETE; @@ -2423,6 +2655,29 @@ static void mpi_sata_event(struct pm8001_hba_info *pm8001_ha , void *piomb) u32 dev_id = le32_to_cpu(psataPayload->device_id); unsigned long flags; + ccb = &pm8001_ha->ccb_info[tag]; + + if (ccb) { + t = ccb->task; + pm8001_dev = ccb->device; + } else { + PM8001_FAIL_DBG(pm8001_ha, + pm8001_printk("No CCB !!!. returning\n")); + } + if (event) + PM8001_FAIL_DBG(pm8001_ha, + pm8001_printk("SATA EVENT 0x%x\n", event)); + + /* Check if this is NCQ error */ + if (event == IO_XFER_ERROR_ABORTED_NCQ_MODE) { + /* find device using device id */ + pm8001_dev = pm8001_find_dev(pm8001_ha, dev_id); + /* send read log extension */ + if (pm8001_dev) + pm8001_send_read_log(pm8001_ha, pm8001_dev); + return; + } + ccb = &pm8001_ha->ccb_info[tag]; t = ccb->task; pm8001_dev = ccb->device; @@ -2432,9 +2687,9 @@ static void mpi_sata_event(struct pm8001_hba_info *pm8001_ha , void *piomb) if (unlikely(!t || !t->lldd_task || !t->dev)) return; ts = &t->task_status; - PM8001_IO_DBG(pm8001_ha, - pm8001_printk("port_id = %x,device_id = %x\n", - port_id, dev_id)); + PM8001_IO_DBG(pm8001_ha, pm8001_printk( + "port_id:0x%x, device_id:0x%x, tag:0x%x, event:0x%x\n", + port_id, dev_id, tag, event)); switch (event) { case IO_OVERFLOW: PM8001_IO_DBG(pm8001_ha, pm8001_printk("IO_UNDERFLOW\n")); @@ -2822,8 +3077,8 @@ mpi_smp_completion(struct pm8001_hba_info *pm8001_ha, void *piomb) } } -static void -mpi_set_dev_state_resp(struct pm8001_hba_info *pm8001_ha, void *piomb) +void pm8001_mpi_set_dev_state_resp(struct pm8001_hba_info *pm8001_ha, + void *piomb) { struct set_dev_state_resp *pPayload = (struct set_dev_state_resp *)(piomb + 4); @@ -2843,8 +3098,7 @@ mpi_set_dev_state_resp(struct pm8001_hba_info *pm8001_ha, void *piomb) pm8001_ccb_free(pm8001_ha, tag); } -static void -mpi_set_nvmd_resp(struct pm8001_hba_info *pm8001_ha, void *piomb) +void pm8001_mpi_set_nvmd_resp(struct pm8001_hba_info *pm8001_ha, void *piomb) { struct get_nvm_data_resp *pPayload = (struct get_nvm_data_resp *)(piomb + 4); @@ -2863,8 +3117,8 @@ mpi_set_nvmd_resp(struct pm8001_hba_info *pm8001_ha, void *piomb) pm8001_ccb_free(pm8001_ha, tag); } -static void -mpi_get_nvmd_resp(struct pm8001_hba_info *pm8001_ha, void *piomb) +void +pm8001_mpi_get_nvmd_resp(struct pm8001_hba_info *pm8001_ha, void *piomb) { struct fw_control_ex *fw_control_context; struct get_nvm_data_resp *pPayload = @@ -2925,7 +3179,7 @@ mpi_get_nvmd_resp(struct pm8001_hba_info *pm8001_ha, void *piomb) pm8001_ccb_free(pm8001_ha, tag); } -static int mpi_local_phy_ctl(struct pm8001_hba_info *pm8001_ha, void *piomb) +int pm8001_mpi_local_phy_ctl(struct pm8001_hba_info *pm8001_ha, void *piomb) { struct local_phy_ctl_resp *pPayload = (struct local_phy_ctl_resp *)(piomb + 4); @@ -2954,7 +3208,7 @@ static int mpi_local_phy_ctl(struct pm8001_hba_info *pm8001_ha, void *piomb) * while receive a broadcast(change) primitive just tell the sas * layer to discover the changed domain rather than the whole domain. */ -static void pm8001_bytes_dmaed(struct pm8001_hba_info *pm8001_ha, int i) +void pm8001_bytes_dmaed(struct pm8001_hba_info *pm8001_ha, int i) { struct pm8001_phy *phy = &pm8001_ha->phy[i]; struct asd_sas_phy *sas_phy = &phy->sas_phy; @@ -2988,7 +3242,7 @@ static void pm8001_bytes_dmaed(struct pm8001_hba_info *pm8001_ha, int i) } /* Get the link rate speed */ -static void get_lrate_mode(struct pm8001_phy *phy, u8 link_rate) +void pm8001_get_lrate_mode(struct pm8001_phy *phy, u8 link_rate) { struct sas_phy *sas_phy = phy->sas_phy.phy; @@ -3025,7 +3279,7 @@ static void get_lrate_mode(struct pm8001_phy *phy, u8 link_rate) * LOCKING: the frame_rcvd_lock needs to be held since this parses the frame * buffer. */ -static void pm8001_get_attached_sas_addr(struct pm8001_phy *phy, +void pm8001_get_attached_sas_addr(struct pm8001_phy *phy, u8 *sas_addr) { if (phy->sas_phy.frame_rcvd[0] == 0x34 @@ -3067,7 +3321,7 @@ static void pm8001_hw_event_ack_req(struct pm8001_hba_info *pm8001_ha, ((phyId & 0x0F) << 4) | (port_id & 0x0F)); payload.param0 = cpu_to_le32(param0); payload.param1 = cpu_to_le32(param1); - mpi_build_cmd(pm8001_ha, circularQ, opc, &payload); + pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &payload, 0); } static int pm8001_chip_phy_ctl_req(struct pm8001_hba_info *pm8001_ha, @@ -3112,19 +3366,19 @@ hw_event_sas_phy_up(struct pm8001_hba_info *pm8001_ha, void *piomb) pm8001_chip_phy_ctl_req(pm8001_ha, phy_id, PHY_NOTIFY_ENABLE_SPINUP); port->port_attached = 1; - get_lrate_mode(phy, link_rate); + pm8001_get_lrate_mode(phy, link_rate); break; case SAS_EDGE_EXPANDER_DEVICE: PM8001_MSG_DBG(pm8001_ha, pm8001_printk("expander device.\n")); port->port_attached = 1; - get_lrate_mode(phy, link_rate); + pm8001_get_lrate_mode(phy, link_rate); break; case SAS_FANOUT_EXPANDER_DEVICE: PM8001_MSG_DBG(pm8001_ha, pm8001_printk("fanout expander device.\n")); port->port_attached = 1; - get_lrate_mode(phy, link_rate); + pm8001_get_lrate_mode(phy, link_rate); break; default: PM8001_MSG_DBG(pm8001_ha, @@ -3179,7 +3433,7 @@ hw_event_sata_phy_up(struct pm8001_hba_info *pm8001_ha, void *piomb) " phy id = %d\n", port_id, phy_id)); port->port_state = portstate; port->port_attached = 1; - get_lrate_mode(phy, link_rate); + pm8001_get_lrate_mode(phy, link_rate); phy->phy_type |= PORT_TYPE_SATA; phy->phy_attached = 1; phy->sas_phy.oob_mode = SATA_OOB_MODE; @@ -3189,7 +3443,7 @@ hw_event_sata_phy_up(struct pm8001_hba_info *pm8001_ha, void *piomb) sizeof(struct dev_to_host_fis)); phy->frame_rcvd_size = sizeof(struct dev_to_host_fis); phy->identify.target_port_protocols = SAS_PROTOCOL_SATA; - phy->identify.device_type = SATA_DEV; + phy->identify.device_type = SAS_SATA_DEV; pm8001_get_attached_sas_addr(phy, phy->sas_phy.attached_sas_addr); spin_unlock_irqrestore(&phy->sas_phy.frame_rcvd_lock, flags); pm8001_bytes_dmaed(pm8001_ha, phy_id); @@ -3260,7 +3514,7 @@ hw_event_phy_down(struct pm8001_hba_info *pm8001_ha, void *piomb) } /** - * mpi_reg_resp -process register device ID response. + * pm8001_mpi_reg_resp -process register device ID response. * @pm8001_ha: our hba card information * @piomb: IO message buffer * @@ -3269,7 +3523,7 @@ hw_event_phy_down(struct pm8001_hba_info *pm8001_ha, void *piomb) * has assigned, from now,inter-communication with FW is no longer using the * SAS address, use device ID which FW assigned. */ -static int mpi_reg_resp(struct pm8001_hba_info *pm8001_ha, void *piomb) +int pm8001_mpi_reg_resp(struct pm8001_hba_info *pm8001_ha, void *piomb) { u32 status; u32 device_id; @@ -3331,7 +3585,7 @@ static int mpi_reg_resp(struct pm8001_hba_info *pm8001_ha, void *piomb) return 0; } -static int mpi_dereg_resp(struct pm8001_hba_info *pm8001_ha, void *piomb) +int pm8001_mpi_dereg_resp(struct pm8001_hba_info *pm8001_ha, void *piomb) { u32 status; u32 device_id; @@ -3347,8 +3601,13 @@ static int mpi_dereg_resp(struct pm8001_hba_info *pm8001_ha, void *piomb) return 0; } -static int -mpi_fw_flash_update_resp(struct pm8001_hba_info *pm8001_ha, void *piomb) +/** + * fw_flash_update_resp - Response from FW for flash update command. + * @pm8001_ha: our hba card information + * @piomb: IO message buffer + */ +int pm8001_mpi_fw_flash_update_resp(struct pm8001_hba_info *pm8001_ha, + void *piomb) { u32 status; struct fw_control_ex fw_control_context; @@ -3403,10 +3662,6 @@ mpi_fw_flash_update_resp(struct pm8001_hba_info *pm8001_ha, void *piomb) break; } ccb->fw_control_context->fw_control->retcode = status; - pci_free_consistent(pm8001_ha->pdev, - fw_control_context.len, - fw_control_context.virtAddr, - fw_control_context.phys_addr); complete(pm8001_ha->nvmd_completion); ccb->task = NULL; ccb->ccb_tag = 0xFFFFFFFF; @@ -3414,8 +3669,7 @@ mpi_fw_flash_update_resp(struct pm8001_hba_info *pm8001_ha, void *piomb) return 0; } -static int -mpi_general_event(struct pm8001_hba_info *pm8001_ha , void *piomb) +int pm8001_mpi_general_event(struct pm8001_hba_info *pm8001_ha , void *piomb) { u32 status; int i; @@ -3431,8 +3685,7 @@ mpi_general_event(struct pm8001_hba_info *pm8001_ha , void *piomb) return 0; } -static int -mpi_task_abort_resp(struct pm8001_hba_info *pm8001_ha, void *piomb) +int pm8001_mpi_task_abort_resp(struct pm8001_hba_info *pm8001_ha, void *piomb) { struct sas_task *t; struct pm8001_ccb_info *ccb; @@ -3440,19 +3693,29 @@ mpi_task_abort_resp(struct pm8001_hba_info *pm8001_ha, void *piomb) u32 status ; u32 tag, scp; struct task_status_struct *ts; + struct pm8001_device *pm8001_dev; struct task_abort_resp *pPayload = (struct task_abort_resp *)(piomb + 4); status = le32_to_cpu(pPayload->status); tag = le32_to_cpu(pPayload->tag); + if (!tag) { + PM8001_FAIL_DBG(pm8001_ha, + pm8001_printk(" TAG NULL. RETURNING !!!")); + return -1; + } + scp = le32_to_cpu(pPayload->scp); ccb = &pm8001_ha->ccb_info[tag]; t = ccb->task; - PM8001_IO_DBG(pm8001_ha, - pm8001_printk(" status = 0x%x\n", status)); - if (t == NULL) + pm8001_dev = ccb->device; /* retrieve device */ + + if (!t) { + PM8001_FAIL_DBG(pm8001_ha, + pm8001_printk(" TASK NULL. RETURNING !!!")); return -1; + } ts = &t->task_status; if (status != 0) PM8001_FAIL_DBG(pm8001_ha, @@ -3476,7 +3739,15 @@ mpi_task_abort_resp(struct pm8001_hba_info *pm8001_ha, void *piomb) spin_unlock_irqrestore(&t->task_state_lock, flags); pm8001_ccb_task_free(pm8001_ha, t, ccb, tag); mb(); - t->task_done(t); + + if ((pm8001_dev->id & NCQ_ABORT_ALL_FLAG) && t) { + pm8001_tag_free(pm8001_ha, tag); + sas_free_task(t); + /* clear the flag */ + pm8001_dev->id &= 0xBFFFFFFF; + } else + t->task_done(t); + return 0; } @@ -3727,17 +3998,17 @@ static void process_one_iomb(struct pm8001_hba_info *pm8001_ha, void *piomb) case OPC_OUB_LOCAL_PHY_CNTRL: PM8001_MSG_DBG(pm8001_ha, pm8001_printk("OPC_OUB_LOCAL_PHY_CNTRL\n")); - mpi_local_phy_ctl(pm8001_ha, piomb); + pm8001_mpi_local_phy_ctl(pm8001_ha, piomb); break; case OPC_OUB_DEV_REGIST: PM8001_MSG_DBG(pm8001_ha, pm8001_printk("OPC_OUB_DEV_REGIST\n")); - mpi_reg_resp(pm8001_ha, piomb); + pm8001_mpi_reg_resp(pm8001_ha, piomb); break; case OPC_OUB_DEREG_DEV: PM8001_MSG_DBG(pm8001_ha, pm8001_printk("unregister the device\n")); - mpi_dereg_resp(pm8001_ha, piomb); + pm8001_mpi_dereg_resp(pm8001_ha, piomb); break; case OPC_OUB_GET_DEV_HANDLE: PM8001_MSG_DBG(pm8001_ha, @@ -3775,7 +4046,7 @@ static void process_one_iomb(struct pm8001_hba_info *pm8001_ha, void *piomb) case OPC_OUB_FW_FLASH_UPDATE: PM8001_MSG_DBG(pm8001_ha, pm8001_printk("OPC_OUB_FW_FLASH_UPDATE\n")); - mpi_fw_flash_update_resp(pm8001_ha, piomb); + pm8001_mpi_fw_flash_update_resp(pm8001_ha, piomb); break; case OPC_OUB_GPIO_RESPONSE: PM8001_MSG_DBG(pm8001_ha, @@ -3788,17 +4059,17 @@ static void process_one_iomb(struct pm8001_hba_info *pm8001_ha, void *piomb) case OPC_OUB_GENERAL_EVENT: PM8001_MSG_DBG(pm8001_ha, pm8001_printk("OPC_OUB_GENERAL_EVENT\n")); - mpi_general_event(pm8001_ha, piomb); + pm8001_mpi_general_event(pm8001_ha, piomb); break; case OPC_OUB_SSP_ABORT_RSP: PM8001_MSG_DBG(pm8001_ha, pm8001_printk("OPC_OUB_SSP_ABORT_RSP\n")); - mpi_task_abort_resp(pm8001_ha, piomb); + pm8001_mpi_task_abort_resp(pm8001_ha, piomb); break; case OPC_OUB_SATA_ABORT_RSP: PM8001_MSG_DBG(pm8001_ha, pm8001_printk("OPC_OUB_SATA_ABORT_RSP\n")); - mpi_task_abort_resp(pm8001_ha, piomb); + pm8001_mpi_task_abort_resp(pm8001_ha, piomb); break; case OPC_OUB_SAS_DIAG_MODE_START_END: PM8001_MSG_DBG(pm8001_ha, @@ -3823,17 +4094,17 @@ static void process_one_iomb(struct pm8001_hba_info *pm8001_ha, void *piomb) case OPC_OUB_SMP_ABORT_RSP: PM8001_MSG_DBG(pm8001_ha, pm8001_printk("OPC_OUB_SMP_ABORT_RSP\n")); - mpi_task_abort_resp(pm8001_ha, piomb); + pm8001_mpi_task_abort_resp(pm8001_ha, piomb); break; case OPC_OUB_GET_NVMD_DATA: PM8001_MSG_DBG(pm8001_ha, pm8001_printk("OPC_OUB_GET_NVMD_DATA\n")); - mpi_get_nvmd_resp(pm8001_ha, piomb); + pm8001_mpi_get_nvmd_resp(pm8001_ha, piomb); break; case OPC_OUB_SET_NVMD_DATA: PM8001_MSG_DBG(pm8001_ha, pm8001_printk("OPC_OUB_SET_NVMD_DATA\n")); - mpi_set_nvmd_resp(pm8001_ha, piomb); + pm8001_mpi_set_nvmd_resp(pm8001_ha, piomb); break; case OPC_OUB_DEVICE_HANDLE_REMOVAL: PM8001_MSG_DBG(pm8001_ha, @@ -3842,7 +4113,7 @@ static void process_one_iomb(struct pm8001_hba_info *pm8001_ha, void *piomb) case OPC_OUB_SET_DEVICE_STATE: PM8001_MSG_DBG(pm8001_ha, pm8001_printk("OPC_OUB_SET_DEVICE_STATE\n")); - mpi_set_dev_state_resp(pm8001_ha, piomb); + pm8001_mpi_set_dev_state_resp(pm8001_ha, piomb); break; case OPC_OUB_GET_DEVICE_STATE: PM8001_MSG_DBG(pm8001_ha, @@ -3864,7 +4135,7 @@ static void process_one_iomb(struct pm8001_hba_info *pm8001_ha, void *piomb) } } -static int process_oq(struct pm8001_hba_info *pm8001_ha) +static int process_oq(struct pm8001_hba_info *pm8001_ha, u8 vec) { struct outbound_queue_table *circularQ; void *pMsg1 = NULL; @@ -3873,14 +4144,15 @@ static int process_oq(struct pm8001_hba_info *pm8001_ha) unsigned long flags; spin_lock_irqsave(&pm8001_ha->lock, flags); - circularQ = &pm8001_ha->outbnd_q_tbl[0]; + circularQ = &pm8001_ha->outbnd_q_tbl[vec]; do { - ret = mpi_msg_consume(pm8001_ha, circularQ, &pMsg1, &bc); + ret = pm8001_mpi_msg_consume(pm8001_ha, circularQ, &pMsg1, &bc); if (MPI_IO_STATUS_SUCCESS == ret) { /* process the outbound message */ process_one_iomb(pm8001_ha, (void *)(pMsg1 - 4)); /* free the message from the outbound circular buffer */ - mpi_msg_free_set(pm8001_ha, pMsg1, circularQ, bc); + pm8001_mpi_msg_free_set(pm8001_ha, pMsg1, + circularQ, bc); } if (MPI_IO_STATUS_BUSY == ret) { /* Update the producer index from SPC */ @@ -3903,7 +4175,7 @@ static const u8 data_dir_flags[] = { [PCI_DMA_FROMDEVICE] = DATA_DIR_IN,/* INBOUND */ [PCI_DMA_NONE] = DATA_DIR_NONE,/* NO TRANSFER */ }; -static void +void pm8001_chip_make_sg(struct scatterlist *scatter, int nr, void *prd) { int i; @@ -3978,7 +4250,7 @@ static int pm8001_chip_smp_req(struct pm8001_hba_info *pm8001_ha, smp_cmd.long_smp_req.long_resp_size = cpu_to_le32((u32)sg_dma_len(&task->smp_task.smp_resp)-4); build_smp_cmd(pm8001_dev->device_id, smp_cmd.tag, &smp_cmd); - mpi_build_cmd(pm8001_ha, circularQ, opc, (u32 *)&smp_cmd); + pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, (u32 *)&smp_cmd, 0); return 0; err_out_2: @@ -4042,7 +4314,7 @@ static int pm8001_chip_ssp_io_req(struct pm8001_hba_info *pm8001_ha, ssp_cmd.len = cpu_to_le32(task->total_xfer_len); ssp_cmd.esgl = 0; } - ret = mpi_build_cmd(pm8001_ha, circularQ, opc, &ssp_cmd); + ret = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &ssp_cmd, 0); return ret; } @@ -4060,6 +4332,7 @@ static int pm8001_chip_sata_req(struct pm8001_hba_info *pm8001_ha, u32 ATAP = 0x0; u32 dir; struct inbound_queue_table *circularQ; + unsigned long flags; u32 opc = OPC_INB_SATA_HOST_OPSTART; memset(&sata_cmd, 0, sizeof(sata_cmd)); circularQ = &pm8001_ha->inbnd_q_tbl[0]; @@ -4080,8 +4353,10 @@ static int pm8001_chip_sata_req(struct pm8001_hba_info *pm8001_ha, PM8001_IO_DBG(pm8001_ha, pm8001_printk("FPDMA\n")); } } - if (task->ata_task.use_ncq && pm8001_get_ncq_tag(task, &hdr_tag)) + if (task->ata_task.use_ncq && pm8001_get_ncq_tag(task, &hdr_tag)) { + task->ata_task.fis.sector_count |= (u8) (hdr_tag << 3); ncg_tag = hdr_tag; + } dir = data_dir_flags[task->data_dir] << 8; sata_cmd.tag = cpu_to_le32(tag); sata_cmd.device_id = cpu_to_le32(pm8001_ha_dev->device_id); @@ -4112,7 +4387,55 @@ static int pm8001_chip_sata_req(struct pm8001_hba_info *pm8001_ha, sata_cmd.len = cpu_to_le32(task->total_xfer_len); sata_cmd.esgl = 0; } - ret = mpi_build_cmd(pm8001_ha, circularQ, opc, &sata_cmd); + + /* Check for read log for failed drive and return */ + if (sata_cmd.sata_fis.command == 0x2f) { + if (pm8001_ha_dev && ((pm8001_ha_dev->id & NCQ_READ_LOG_FLAG) || + (pm8001_ha_dev->id & NCQ_ABORT_ALL_FLAG) || + (pm8001_ha_dev->id & NCQ_2ND_RLE_FLAG))) { + struct task_status_struct *ts; + + pm8001_ha_dev->id &= 0xDFFFFFFF; + ts = &task->task_status; + + spin_lock_irqsave(&task->task_state_lock, flags); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAM_STAT_GOOD; + task->task_state_flags &= ~SAS_TASK_STATE_PENDING; + task->task_state_flags &= ~SAS_TASK_AT_INITIATOR; + task->task_state_flags |= SAS_TASK_STATE_DONE; + if (unlikely((task->task_state_flags & + SAS_TASK_STATE_ABORTED))) { + spin_unlock_irqrestore(&task->task_state_lock, + flags); + PM8001_FAIL_DBG(pm8001_ha, + pm8001_printk("task 0x%p resp 0x%x " + " stat 0x%x but aborted by upper layer " + "\n", task, ts->resp, ts->stat)); + pm8001_ccb_task_free(pm8001_ha, task, ccb, tag); + } else if (task->uldd_task) { + spin_unlock_irqrestore(&task->task_state_lock, + flags); + pm8001_ccb_task_free(pm8001_ha, task, ccb, tag); + mb();/* ditto */ + spin_unlock_irq(&pm8001_ha->lock); + task->task_done(task); + spin_lock_irq(&pm8001_ha->lock); + return 0; + } else if (!task->uldd_task) { + spin_unlock_irqrestore(&task->task_state_lock, + flags); + pm8001_ccb_task_free(pm8001_ha, task, ccb, tag); + mb();/*ditto*/ + spin_unlock_irq(&pm8001_ha->lock); + task->task_done(task); + spin_lock_irq(&pm8001_ha->lock); + return 0; + } + } + } + + ret = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &sata_cmd, 0); return ret; } @@ -4142,12 +4465,12 @@ pm8001_chip_phy_start_req(struct pm8001_hba_info *pm8001_ha, u8 phy_id) payload.ase_sh_lm_slr_phyid = cpu_to_le32(SPINHOLD_DISABLE | LINKMODE_AUTO | LINKRATE_15 | LINKRATE_30 | LINKRATE_60 | phy_id); - payload.sas_identify.dev_type = SAS_END_DEV; + payload.sas_identify.dev_type = SAS_END_DEVICE; payload.sas_identify.initiator_bits = SAS_PROTOCOL_ALL; memcpy(payload.sas_identify.sas_addr, pm8001_ha->sas_addr, SAS_ADDR_SIZE); payload.sas_identify.phy_id = phy_id; - ret = mpi_build_cmd(pm8001_ha, circularQ, opcode, &payload); + ret = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opcode, &payload, 0); return ret; } @@ -4157,7 +4480,7 @@ pm8001_chip_phy_start_req(struct pm8001_hba_info *pm8001_ha, u8 phy_id) * @num: the inbound queue number * @phy_id: the phy id which we wanted to start up. */ -static int pm8001_chip_phy_stop_req(struct pm8001_hba_info *pm8001_ha, +int pm8001_chip_phy_stop_req(struct pm8001_hba_info *pm8001_ha, u8 phy_id) { struct phy_stop_req payload; @@ -4169,12 +4492,12 @@ static int pm8001_chip_phy_stop_req(struct pm8001_hba_info *pm8001_ha, memset(&payload, 0, sizeof(payload)); payload.tag = cpu_to_le32(tag); payload.phy_id = cpu_to_le32(phy_id); - ret = mpi_build_cmd(pm8001_ha, circularQ, opcode, &payload); + ret = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opcode, &payload, 0); return ret; } /** - * see comments on mpi_reg_resp. + * see comments on pm8001_mpi_reg_resp. */ static int pm8001_chip_reg_dev_req(struct pm8001_hba_info *pm8001_ha, struct pm8001_device *pm8001_dev, u32 flag) @@ -4204,11 +4527,11 @@ static int pm8001_chip_reg_dev_req(struct pm8001_hba_info *pm8001_ha, if (flag == 1) stp_sspsmp_sata = 0x02; /*direct attached sata */ else { - if (pm8001_dev->dev_type == SATA_DEV) + if (pm8001_dev->dev_type == SAS_SATA_DEV) stp_sspsmp_sata = 0x00; /* stp*/ - else if (pm8001_dev->dev_type == SAS_END_DEV || - pm8001_dev->dev_type == EDGE_DEV || - pm8001_dev->dev_type == FANOUT_DEV) + else if (pm8001_dev->dev_type == SAS_END_DEVICE || + pm8001_dev->dev_type == SAS_EDGE_EXPANDER_DEVICE || + pm8001_dev->dev_type == SAS_FANOUT_EXPANDER_DEVICE) stp_sspsmp_sata = 0x01; /*ssp or smp*/ } if (parent_dev && DEV_IS_EXPANDER(parent_dev->dev_type)) @@ -4228,14 +4551,14 @@ static int pm8001_chip_reg_dev_req(struct pm8001_hba_info *pm8001_ha, cpu_to_le32(ITNT | (firstBurstSize * 0x10000)); memcpy(payload.sas_addr, pm8001_dev->sas_device->sas_addr, SAS_ADDR_SIZE); - rc = mpi_build_cmd(pm8001_ha, circularQ, opc, &payload); + rc = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &payload, 0); return rc; } /** - * see comments on mpi_reg_resp. + * see comments on pm8001_mpi_reg_resp. */ -static int pm8001_chip_dereg_dev_req(struct pm8001_hba_info *pm8001_ha, +int pm8001_chip_dereg_dev_req(struct pm8001_hba_info *pm8001_ha, u32 device_id) { struct dereg_dev_req payload; @@ -4249,7 +4572,7 @@ static int pm8001_chip_dereg_dev_req(struct pm8001_hba_info *pm8001_ha, payload.device_id = cpu_to_le32(device_id); PM8001_MSG_DBG(pm8001_ha, pm8001_printk("unregister device device_id = %d\n", device_id)); - ret = mpi_build_cmd(pm8001_ha, circularQ, opc, &payload); + ret = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &payload, 0); return ret; } @@ -4272,7 +4595,7 @@ static int pm8001_chip_phy_ctl_req(struct pm8001_hba_info *pm8001_ha, payload.tag = cpu_to_le32(1); payload.phyop_phyid = cpu_to_le32(((phy_op & 0xff) << 8) | (phyId & 0x0F)); - ret = mpi_build_cmd(pm8001_ha, circularQ, opc, &payload); + ret = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &payload, 0); return ret; } @@ -4296,11 +4619,11 @@ static u32 pm8001_chip_is_our_interupt(struct pm8001_hba_info *pm8001_ha) * @stat: stat. */ static irqreturn_t -pm8001_chip_isr(struct pm8001_hba_info *pm8001_ha) +pm8001_chip_isr(struct pm8001_hba_info *pm8001_ha, u8 vec) { - pm8001_chip_interrupt_disable(pm8001_ha); - process_oq(pm8001_ha); - pm8001_chip_interrupt_enable(pm8001_ha); + pm8001_chip_interrupt_disable(pm8001_ha, vec); + process_oq(pm8001_ha, vec); + pm8001_chip_interrupt_enable(pm8001_ha, vec); return IRQ_HANDLED; } @@ -4322,7 +4645,7 @@ static int send_task_abort(struct pm8001_hba_info *pm8001_ha, u32 opc, task_abort.device_id = cpu_to_le32(dev_id); task_abort.tag = cpu_to_le32(cmd_tag); } - ret = mpi_build_cmd(pm8001_ha, circularQ, opc, &task_abort); + ret = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &task_abort, 0); return ret; } @@ -4331,16 +4654,17 @@ static int send_task_abort(struct pm8001_hba_info *pm8001_ha, u32 opc, * @task: the task we wanted to aborted. * @flag: the abort flag. */ -static int pm8001_chip_abort_task(struct pm8001_hba_info *pm8001_ha, +int pm8001_chip_abort_task(struct pm8001_hba_info *pm8001_ha, struct pm8001_device *pm8001_dev, u8 flag, u32 task_tag, u32 cmd_tag) { u32 opc, device_id; int rc = TMF_RESP_FUNC_FAILED; - PM8001_EH_DBG(pm8001_ha, pm8001_printk("cmd_tag = %x, abort task tag" - " = %x", cmd_tag, task_tag)); - if (pm8001_dev->dev_type == SAS_END_DEV) + PM8001_EH_DBG(pm8001_ha, + pm8001_printk("cmd_tag = %x, abort task tag = 0x%x", + cmd_tag, task_tag)); + if (pm8001_dev->dev_type == SAS_END_DEVICE) opc = OPC_INB_SSP_ABORT; - else if (pm8001_dev->dev_type == SATA_DEV) + else if (pm8001_dev->dev_type == SAS_SATA_DEV) opc = OPC_INB_SATA_ABORT; else opc = OPC_INB_SMP_ABORT;/* SMP */ @@ -4358,7 +4682,7 @@ static int pm8001_chip_abort_task(struct pm8001_hba_info *pm8001_ha, * @ccb: the ccb information. * @tmf: task management function. */ -static int pm8001_chip_ssp_tm_req(struct pm8001_hba_info *pm8001_ha, +int pm8001_chip_ssp_tm_req(struct pm8001_hba_info *pm8001_ha, struct pm8001_ccb_info *ccb, struct pm8001_tmf_task *tmf) { struct sas_task *task = ccb->task; @@ -4376,11 +4700,11 @@ static int pm8001_chip_ssp_tm_req(struct pm8001_hba_info *pm8001_ha, memcpy(sspTMCmd.lun, task->ssp_task.LUN, 8); sspTMCmd.tag = cpu_to_le32(ccb->ccb_tag); circularQ = &pm8001_ha->inbnd_q_tbl[0]; - ret = mpi_build_cmd(pm8001_ha, circularQ, opc, &sspTMCmd); + ret = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &sspTMCmd, 0); return ret; } -static int pm8001_chip_get_nvmd_req(struct pm8001_hba_info *pm8001_ha, +int pm8001_chip_get_nvmd_req(struct pm8001_hba_info *pm8001_ha, void *payload) { u32 opc = OPC_INB_GET_NVMD_DATA; @@ -4397,7 +4721,7 @@ static int pm8001_chip_get_nvmd_req(struct pm8001_hba_info *pm8001_ha, fw_control_context = kzalloc(sizeof(struct fw_control_ex), GFP_KERNEL); if (!fw_control_context) return -ENOMEM; - fw_control_context->usrAddr = (u8 *)&ioctl_payload->func_specific[0]; + fw_control_context->usrAddr = (u8 *)ioctl_payload->func_specific; fw_control_context->len = ioctl_payload->length; circularQ = &pm8001_ha->inbnd_q_tbl[0]; memset(&nvmd_req, 0, sizeof(nvmd_req)); @@ -4456,11 +4780,11 @@ static int pm8001_chip_get_nvmd_req(struct pm8001_hba_info *pm8001_ha, default: break; } - rc = mpi_build_cmd(pm8001_ha, circularQ, opc, &nvmd_req); + rc = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &nvmd_req, 0); return rc; } -static int pm8001_chip_set_nvmd_req(struct pm8001_hba_info *pm8001_ha, +int pm8001_chip_set_nvmd_req(struct pm8001_hba_info *pm8001_ha, void *payload) { u32 opc = OPC_INB_SET_NVMD_DATA; @@ -4479,7 +4803,7 @@ static int pm8001_chip_set_nvmd_req(struct pm8001_hba_info *pm8001_ha, return -ENOMEM; circularQ = &pm8001_ha->inbnd_q_tbl[0]; memcpy(pm8001_ha->memoryMap.region[NVMD].virt_ptr, - ioctl_payload->func_specific, + &ioctl_payload->func_specific, ioctl_payload->length); memset(&nvmd_req, 0, sizeof(nvmd_req)); rc = pm8001_tag_alloc(pm8001_ha, &tag); @@ -4536,7 +4860,7 @@ static int pm8001_chip_set_nvmd_req(struct pm8001_hba_info *pm8001_ha, default: break; } - rc = mpi_build_cmd(pm8001_ha, circularQ, opc, &nvmd_req); + rc = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &nvmd_req, 0); return rc; } @@ -4545,7 +4869,7 @@ static int pm8001_chip_set_nvmd_req(struct pm8001_hba_info *pm8001_ha, * @pm8001_ha: our hba card information. * @fw_flash_updata_info: firmware flash update param */ -static int +int pm8001_chip_fw_flash_update_build(struct pm8001_hba_info *pm8001_ha, void *fw_flash_updata_info, u32 tag) { @@ -4567,11 +4891,11 @@ pm8001_chip_fw_flash_update_build(struct pm8001_hba_info *pm8001_ha, cpu_to_le32(lower_32_bits(le64_to_cpu(info->sgl.addr))); payload.sgl_addr_hi = cpu_to_le32(upper_32_bits(le64_to_cpu(info->sgl.addr))); - ret = mpi_build_cmd(pm8001_ha, circularQ, opc, &payload); + ret = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &payload, 0); return ret; } -static int +int pm8001_chip_fw_flash_update_req(struct pm8001_hba_info *pm8001_ha, void *payload) { @@ -4581,29 +4905,14 @@ pm8001_chip_fw_flash_update_req(struct pm8001_hba_info *pm8001_ha, int rc; u32 tag; struct pm8001_ccb_info *ccb; - void *buffer = NULL; - dma_addr_t phys_addr; - u32 phys_addr_hi; - u32 phys_addr_lo; + void *buffer = pm8001_ha->memoryMap.region[FW_FLASH].virt_ptr; + dma_addr_t phys_addr = pm8001_ha->memoryMap.region[FW_FLASH].phys_addr; struct pm8001_ioctl_payload *ioctl_payload = payload; fw_control_context = kzalloc(sizeof(struct fw_control_ex), GFP_KERNEL); if (!fw_control_context) return -ENOMEM; - fw_control = (struct fw_control_info *)&ioctl_payload->func_specific[0]; - if (fw_control->len != 0) { - if (pm8001_mem_alloc(pm8001_ha->pdev, - (void **)&buffer, - &phys_addr, - &phys_addr_hi, - &phys_addr_lo, - fw_control->len, 0) != 0) { - PM8001_FAIL_DBG(pm8001_ha, - pm8001_printk("Mem alloc failure\n")); - kfree(fw_control_context); - return -ENOMEM; - } - } + fw_control = (struct fw_control_info *)&ioctl_payload->func_specific; memcpy(buffer, fw_control->buffer, fw_control->len); flash_update_info.sgl.addr = cpu_to_le64(phys_addr); flash_update_info.sgl.im_len.len = cpu_to_le32(fw_control->len); @@ -4613,6 +4922,7 @@ pm8001_chip_fw_flash_update_req(struct pm8001_hba_info *pm8001_ha, flash_update_info.total_image_len = fw_control->size; fw_control_context->fw_control = fw_control; fw_control_context->virtAddr = buffer; + fw_control_context->phys_addr = phys_addr; fw_control_context->len = fw_control->len; rc = pm8001_tag_alloc(pm8001_ha, &tag); if (rc) { @@ -4627,7 +4937,7 @@ pm8001_chip_fw_flash_update_req(struct pm8001_hba_info *pm8001_ha, return rc; } -static int +int pm8001_chip_set_dev_state_req(struct pm8001_hba_info *pm8001_ha, struct pm8001_device *pm8001_dev, u32 state) { @@ -4648,7 +4958,7 @@ pm8001_chip_set_dev_state_req(struct pm8001_hba_info *pm8001_ha, payload.tag = cpu_to_le32(tag); payload.device_id = cpu_to_le32(pm8001_dev->device_id); payload.nds = cpu_to_le32(state); - rc = mpi_build_cmd(pm8001_ha, circularQ, opc, &payload); + rc = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &payload, 0); return rc; } @@ -4673,7 +4983,7 @@ pm8001_chip_sas_re_initialization(struct pm8001_hba_info *pm8001_ha) payload.SSAHOLT = cpu_to_le32(0xd << 25); payload.sata_hol_tmo = cpu_to_le32(80); payload.open_reject_cmdretries_data_retries = cpu_to_le32(0xff00ff); - rc = mpi_build_cmd(pm8001_ha, circularQ, opc, &payload); + rc = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &payload, 0); return rc; } @@ -4706,4 +5016,3 @@ const struct pm8001_dispatch pm8001_8001_dispatch = { .set_dev_state_req = pm8001_chip_set_dev_state_req, .sas_re_init_req = pm8001_chip_sas_re_initialization, }; - diff --git a/trunk/drivers/scsi/pm8001/pm8001_hwi.h b/trunk/drivers/scsi/pm8001/pm8001_hwi.h index d437309cb1e1..d7c1e2034226 100644 --- a/trunk/drivers/scsi/pm8001/pm8001_hwi.h +++ b/trunk/drivers/scsi/pm8001/pm8001_hwi.h @@ -131,6 +131,8 @@ #define LINKRATE_30 (0x02 << 8) #define LINKRATE_60 (0x04 << 8) +/* for new SPC controllers MEMBASE III is shared between BIOS and DATA */ +#define GSM_SM_BASE 0x4F0000 struct mpi_msg_hdr{ __le32 header; /* Bits [11:0] - Message operation code */ /* Bits [15:12] - Message Category */ @@ -298,7 +300,7 @@ struct local_phy_ctl_resp { #define OP_BITS 0x0000FF00 -#define ID_BITS 0x0000000F +#define ID_BITS 0x000000FF /* * brief the data structure of PORT Control Command diff --git a/trunk/drivers/scsi/pm8001/pm8001_init.c b/trunk/drivers/scsi/pm8001/pm8001_init.c index 3d5e522e00fc..e4b9bc7f5410 100644 --- a/trunk/drivers/scsi/pm8001/pm8001_init.c +++ b/trunk/drivers/scsi/pm8001/pm8001_init.c @@ -1,5 +1,5 @@ /* - * PMC-Sierra SPC 8001 SAS/SATA based host adapters driver + * PMC-Sierra PM8001/8081/8088/8089 SAS/SATA based host adapters driver * * Copyright (c) 2008-2009 USI Co., Ltd. * All rights reserved. @@ -44,8 +44,16 @@ static struct scsi_transport_template *pm8001_stt; +/** + * chip info structure to identify chip key functionality as + * encryption available/not, no of ports, hw specific function ref + */ static const struct pm8001_chip_info pm8001_chips[] = { - [chip_8001] = { 8, &pm8001_8001_dispatch,}, + [chip_8001] = {0, 8, &pm8001_8001_dispatch,}, + [chip_8008] = {0, 8, &pm8001_80xx_dispatch,}, + [chip_8009] = {1, 8, &pm8001_80xx_dispatch,}, + [chip_8018] = {0, 16, &pm8001_80xx_dispatch,}, + [chip_8019] = {1, 16, &pm8001_80xx_dispatch,}, }; static int pm8001_id; @@ -155,37 +163,75 @@ static void pm8001_free(struct pm8001_hba_info *pm8001_ha) } #ifdef PM8001_USE_TASKLET + +/** + * tasklet for 64 msi-x interrupt handler + * @opaque: the passed general host adapter struct + * Note: pm8001_tasklet is common for pm8001 & pm80xx + */ static void pm8001_tasklet(unsigned long opaque) { struct pm8001_hba_info *pm8001_ha; + u32 vec; pm8001_ha = (struct pm8001_hba_info *)opaque; if (unlikely(!pm8001_ha)) BUG_ON(1); - PM8001_CHIP_DISP->isr(pm8001_ha); + vec = pm8001_ha->int_vector; + PM8001_CHIP_DISP->isr(pm8001_ha, vec); +} +#endif + +static struct pm8001_hba_info *outq_to_hba(u8 *outq) +{ + return container_of((outq - *outq), struct pm8001_hba_info, outq[0]); } + +/** + * pm8001_interrupt_handler_msix - main MSIX interrupt handler. + * It obtains the vector number and calls the equivalent bottom + * half or services directly. + * @opaque: the passed outbound queue/vector. Host structure is + * retrieved from the same. + */ +static irqreturn_t pm8001_interrupt_handler_msix(int irq, void *opaque) +{ + struct pm8001_hba_info *pm8001_ha = outq_to_hba(opaque); + u8 outq = *(u8 *)opaque; + irqreturn_t ret = IRQ_HANDLED; + if (unlikely(!pm8001_ha)) + return IRQ_NONE; + if (!PM8001_CHIP_DISP->is_our_interupt(pm8001_ha)) + return IRQ_NONE; + pm8001_ha->int_vector = outq; +#ifdef PM8001_USE_TASKLET + tasklet_schedule(&pm8001_ha->tasklet); +#else + ret = PM8001_CHIP_DISP->isr(pm8001_ha, outq); #endif + return ret; +} +/** + * pm8001_interrupt_handler_intx - main INTx interrupt handler. + * @dev_id: sas_ha structure. The HBA is retrieved from sas_has structure. + */ - /** - * pm8001_interrupt - when HBA originate a interrupt,we should invoke this - * dispatcher to handle each case. - * @irq: irq number. - * @opaque: the passed general host adapter struct - */ -static irqreturn_t pm8001_interrupt(int irq, void *opaque) +static irqreturn_t pm8001_interrupt_handler_intx(int irq, void *dev_id) { struct pm8001_hba_info *pm8001_ha; irqreturn_t ret = IRQ_HANDLED; - struct sas_ha_struct *sha = opaque; + struct sas_ha_struct *sha = dev_id; pm8001_ha = sha->lldd_ha; if (unlikely(!pm8001_ha)) return IRQ_NONE; if (!PM8001_CHIP_DISP->is_our_interupt(pm8001_ha)) return IRQ_NONE; + + pm8001_ha->int_vector = 0; #ifdef PM8001_USE_TASKLET tasklet_schedule(&pm8001_ha->tasklet); #else - ret = PM8001_CHIP_DISP->isr(pm8001_ha); + ret = PM8001_CHIP_DISP->isr(pm8001_ha, 0); #endif return ret; } @@ -195,10 +241,14 @@ static irqreturn_t pm8001_interrupt(int irq, void *opaque) * @pm8001_ha:our hba structure. * */ -static int pm8001_alloc(struct pm8001_hba_info *pm8001_ha) +static int pm8001_alloc(struct pm8001_hba_info *pm8001_ha, + const struct pci_device_id *ent) { int i; spin_lock_init(&pm8001_ha->lock); + PM8001_INIT_DBG(pm8001_ha, + pm8001_printk("pm8001_alloc: PHY:%x\n", + pm8001_ha->chip->n_phy)); for (i = 0; i < pm8001_ha->chip->n_phy; i++) { pm8001_phy_init(pm8001_ha, i); pm8001_ha->port[i].wide_port_phymap = 0; @@ -222,30 +272,57 @@ static int pm8001_alloc(struct pm8001_hba_info *pm8001_ha) pm8001_ha->memoryMap.region[IOP].total_len = PM8001_EVENT_LOG_SIZE; pm8001_ha->memoryMap.region[IOP].alignment = 32; - /* MPI Memory region 3 for consumer Index of inbound queues */ - pm8001_ha->memoryMap.region[CI].num_elements = 1; - pm8001_ha->memoryMap.region[CI].element_size = 4; - pm8001_ha->memoryMap.region[CI].total_len = 4; - pm8001_ha->memoryMap.region[CI].alignment = 4; - - /* MPI Memory region 4 for producer Index of outbound queues */ - pm8001_ha->memoryMap.region[PI].num_elements = 1; - pm8001_ha->memoryMap.region[PI].element_size = 4; - pm8001_ha->memoryMap.region[PI].total_len = 4; - pm8001_ha->memoryMap.region[PI].alignment = 4; - - /* MPI Memory region 5 inbound queues */ - pm8001_ha->memoryMap.region[IB].num_elements = PM8001_MPI_QUEUE; - pm8001_ha->memoryMap.region[IB].element_size = 64; - pm8001_ha->memoryMap.region[IB].total_len = PM8001_MPI_QUEUE * 64; - pm8001_ha->memoryMap.region[IB].alignment = 64; - - /* MPI Memory region 6 outbound queues */ - pm8001_ha->memoryMap.region[OB].num_elements = PM8001_MPI_QUEUE; - pm8001_ha->memoryMap.region[OB].element_size = 64; - pm8001_ha->memoryMap.region[OB].total_len = PM8001_MPI_QUEUE * 64; - pm8001_ha->memoryMap.region[OB].alignment = 64; + for (i = 0; i < PM8001_MAX_SPCV_INB_NUM; i++) { + /* MPI Memory region 3 for consumer Index of inbound queues */ + pm8001_ha->memoryMap.region[CI+i].num_elements = 1; + pm8001_ha->memoryMap.region[CI+i].element_size = 4; + pm8001_ha->memoryMap.region[CI+i].total_len = 4; + pm8001_ha->memoryMap.region[CI+i].alignment = 4; + + if ((ent->driver_data) != chip_8001) { + /* MPI Memory region 5 inbound queues */ + pm8001_ha->memoryMap.region[IB+i].num_elements = + PM8001_MPI_QUEUE; + pm8001_ha->memoryMap.region[IB+i].element_size = 128; + pm8001_ha->memoryMap.region[IB+i].total_len = + PM8001_MPI_QUEUE * 128; + pm8001_ha->memoryMap.region[IB+i].alignment = 128; + } else { + pm8001_ha->memoryMap.region[IB+i].num_elements = + PM8001_MPI_QUEUE; + pm8001_ha->memoryMap.region[IB+i].element_size = 64; + pm8001_ha->memoryMap.region[IB+i].total_len = + PM8001_MPI_QUEUE * 64; + pm8001_ha->memoryMap.region[IB+i].alignment = 64; + } + } + + for (i = 0; i < PM8001_MAX_SPCV_OUTB_NUM; i++) { + /* MPI Memory region 4 for producer Index of outbound queues */ + pm8001_ha->memoryMap.region[PI+i].num_elements = 1; + pm8001_ha->memoryMap.region[PI+i].element_size = 4; + pm8001_ha->memoryMap.region[PI+i].total_len = 4; + pm8001_ha->memoryMap.region[PI+i].alignment = 4; + + if (ent->driver_data != chip_8001) { + /* MPI Memory region 6 Outbound queues */ + pm8001_ha->memoryMap.region[OB+i].num_elements = + PM8001_MPI_QUEUE; + pm8001_ha->memoryMap.region[OB+i].element_size = 128; + pm8001_ha->memoryMap.region[OB+i].total_len = + PM8001_MPI_QUEUE * 128; + pm8001_ha->memoryMap.region[OB+i].alignment = 128; + } else { + /* MPI Memory region 6 Outbound queues */ + pm8001_ha->memoryMap.region[OB+i].num_elements = + PM8001_MPI_QUEUE; + pm8001_ha->memoryMap.region[OB+i].element_size = 64; + pm8001_ha->memoryMap.region[OB+i].total_len = + PM8001_MPI_QUEUE * 64; + pm8001_ha->memoryMap.region[OB+i].alignment = 64; + } + } /* Memory region write DMA*/ pm8001_ha->memoryMap.region[NVMD].num_elements = 1; pm8001_ha->memoryMap.region[NVMD].element_size = 4096; @@ -264,6 +341,9 @@ static int pm8001_alloc(struct pm8001_hba_info *pm8001_ha) pm8001_ha->memoryMap.region[CCB_MEM].total_len = PM8001_MAX_CCB * sizeof(struct pm8001_ccb_info); + /* Memory region for fw flash */ + pm8001_ha->memoryMap.region[FW_FLASH].total_len = 4096; + for (i = 0; i < USI_MAX_MEMCNT; i++) { if (pm8001_mem_alloc(pm8001_ha->pdev, &pm8001_ha->memoryMap.region[i].virt_ptr, @@ -281,7 +361,7 @@ static int pm8001_alloc(struct pm8001_hba_info *pm8001_ha) pm8001_ha->devices = pm8001_ha->memoryMap.region[DEV_MEM].virt_ptr; for (i = 0; i < PM8001_MAX_DEVICES; i++) { - pm8001_ha->devices[i].dev_type = NO_DEVICE; + pm8001_ha->devices[i].dev_type = SAS_PHY_UNUSED; pm8001_ha->devices[i].id = i; pm8001_ha->devices[i].device_id = PM8001_MAX_DEVICES; pm8001_ha->devices[i].running_req = 0; @@ -339,10 +419,12 @@ static int pm8001_ioremap(struct pm8001_hba_info *pm8001_ha) ioremap(pm8001_ha->io_mem[logicalBar].membase, pm8001_ha->io_mem[logicalBar].memsize); PM8001_INIT_DBG(pm8001_ha, - pm8001_printk("PCI: bar %d, logicalBar %d " - "virt_addr=%lx,len=%d\n", bar, logicalBar, - (unsigned long) - pm8001_ha->io_mem[logicalBar].memvirtaddr, + pm8001_printk("PCI: bar %d, logicalBar %d ", + bar, logicalBar)); + PM8001_INIT_DBG(pm8001_ha, pm8001_printk( + "base addr %llx virt_addr=%llx len=%d\n", + (u64)pm8001_ha->io_mem[logicalBar].membase, + (u64)pm8001_ha->io_mem[logicalBar].memvirtaddr, pm8001_ha->io_mem[logicalBar].memsize)); } else { pm8001_ha->io_mem[logicalBar].membase = 0; @@ -361,8 +443,9 @@ static int pm8001_ioremap(struct pm8001_hba_info *pm8001_ha) * @shost: scsi host struct which has been initialized before. */ static struct pm8001_hba_info *pm8001_pci_alloc(struct pci_dev *pdev, - u32 chip_id, - struct Scsi_Host *shost) + const struct pci_device_id *ent, + struct Scsi_Host *shost) + { struct pm8001_hba_info *pm8001_ha; struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost); @@ -374,7 +457,7 @@ static struct pm8001_hba_info *pm8001_pci_alloc(struct pci_dev *pdev, pm8001_ha->pdev = pdev; pm8001_ha->dev = &pdev->dev; - pm8001_ha->chip_id = chip_id; + pm8001_ha->chip_id = ent->driver_data; pm8001_ha->chip = &pm8001_chips[pm8001_ha->chip_id]; pm8001_ha->irq = pdev->irq; pm8001_ha->sas = sha; @@ -382,12 +465,22 @@ static struct pm8001_hba_info *pm8001_pci_alloc(struct pci_dev *pdev, pm8001_ha->id = pm8001_id++; pm8001_ha->logging_level = 0x01; sprintf(pm8001_ha->name, "%s%d", DRV_NAME, pm8001_ha->id); + /* IOMB size is 128 for 8088/89 controllers */ + if (pm8001_ha->chip_id != chip_8001) + pm8001_ha->iomb_size = IOMB_SIZE_SPCV; + else + pm8001_ha->iomb_size = IOMB_SIZE_SPC; + #ifdef PM8001_USE_TASKLET + /** + * default tasklet for non msi-x interrupt handler/first msi-x + * interrupt handler + **/ tasklet_init(&pm8001_ha->tasklet, pm8001_tasklet, - (unsigned long)pm8001_ha); + (unsigned long)pm8001_ha); #endif pm8001_ioremap(pm8001_ha); - if (!pm8001_alloc(pm8001_ha)) + if (!pm8001_alloc(pm8001_ha, ent)) return pm8001_ha; pm8001_free(pm8001_ha); return NULL; @@ -512,21 +605,50 @@ static void pm8001_post_sas_ha_init(struct Scsi_Host *shost, */ static void pm8001_init_sas_add(struct pm8001_hba_info *pm8001_ha) { - u8 i; + u8 i, j; #ifdef PM8001_READ_VPD + /* For new SPC controllers WWN is stored in flash vpd + * For SPC/SPCve controllers WWN is stored in EEPROM + * For Older SPC WWN is stored in NVMD + */ DECLARE_COMPLETION_ONSTACK(completion); struct pm8001_ioctl_payload payload; + u16 deviceid; + pci_read_config_word(pm8001_ha->pdev, PCI_DEVICE_ID, &deviceid); pm8001_ha->nvmd_completion = &completion; - payload.minor_function = 0; - payload.length = 128; - payload.func_specific = kzalloc(128, GFP_KERNEL); + + if (pm8001_ha->chip_id == chip_8001) { + if (deviceid == 0x8081) { + payload.minor_function = 4; + payload.length = 4096; + } else { + payload.minor_function = 0; + payload.length = 128; + } + } else { + payload.minor_function = 1; + payload.length = 4096; + } + payload.offset = 0; + payload.func_specific = kzalloc(payload.length, GFP_KERNEL); PM8001_CHIP_DISP->get_nvmd_req(pm8001_ha, &payload); wait_for_completion(&completion); + + for (i = 0, j = 0; i <= 7; i++, j++) { + if (pm8001_ha->chip_id == chip_8001) { + if (deviceid == 0x8081) + pm8001_ha->sas_addr[j] = + payload.func_specific[0x704 + i]; + } else + pm8001_ha->sas_addr[j] = + payload.func_specific[0x804 + i]; + } + for (i = 0; i < pm8001_ha->chip->n_phy; i++) { - memcpy(&pm8001_ha->phy[i].dev_sas_addr, pm8001_ha->sas_addr, - SAS_ADDR_SIZE); + memcpy(&pm8001_ha->phy[i].dev_sas_addr, + pm8001_ha->sas_addr, SAS_ADDR_SIZE); PM8001_INIT_DBG(pm8001_ha, - pm8001_printk("phy %d sas_addr = %016llx \n", i, + pm8001_printk("phy %d sas_addr = %016llx\n", i, pm8001_ha->phy[i].dev_sas_addr)); } #else @@ -547,31 +669,50 @@ static void pm8001_init_sas_add(struct pm8001_hba_info *pm8001_ha) * @chip_info: our ha struct. * @irq_handler: irq_handler */ -static u32 pm8001_setup_msix(struct pm8001_hba_info *pm8001_ha, - irq_handler_t irq_handler) +static u32 pm8001_setup_msix(struct pm8001_hba_info *pm8001_ha) { u32 i = 0, j = 0; - u32 number_of_intr = 1; + u32 number_of_intr; int flag = 0; u32 max_entry; int rc; + static char intr_drvname[PM8001_MAX_MSIX_VEC][sizeof(DRV_NAME)+3]; + + /* SPCv controllers supports 64 msi-x */ + if (pm8001_ha->chip_id == chip_8001) { + number_of_intr = 1; + flag |= IRQF_DISABLED; + } else { + number_of_intr = PM8001_MAX_MSIX_VEC; + flag &= ~IRQF_SHARED; + flag |= IRQF_DISABLED; + } + max_entry = sizeof(pm8001_ha->msix_entries) / sizeof(pm8001_ha->msix_entries[0]); - flag |= IRQF_DISABLED; for (i = 0; i < max_entry ; i++) pm8001_ha->msix_entries[i].entry = i; rc = pci_enable_msix(pm8001_ha->pdev, pm8001_ha->msix_entries, number_of_intr); pm8001_ha->number_of_intr = number_of_intr; if (!rc) { + PM8001_INIT_DBG(pm8001_ha, pm8001_printk( + "pci_enable_msix request ret:%d no of intr %d\n", + rc, pm8001_ha->number_of_intr)); + + for (i = 0; i < number_of_intr; i++) + pm8001_ha->outq[i] = i; + for (i = 0; i < number_of_intr; i++) { + snprintf(intr_drvname[i], sizeof(intr_drvname[0]), + DRV_NAME"%d", i); if (request_irq(pm8001_ha->msix_entries[i].vector, - irq_handler, flag, DRV_NAME, - SHOST_TO_SAS_HA(pm8001_ha->shost))) { + pm8001_interrupt_handler_msix, flag, + intr_drvname[i], &pm8001_ha->outq[i])) { for (j = 0; j < i; j++) free_irq( pm8001_ha->msix_entries[j].vector, - SHOST_TO_SAS_HA(pm8001_ha->shost)); + &pm8001_ha->outq[j]); pci_disable_msix(pm8001_ha->pdev); break; } @@ -588,22 +729,24 @@ static u32 pm8001_setup_msix(struct pm8001_hba_info *pm8001_ha, static u32 pm8001_request_irq(struct pm8001_hba_info *pm8001_ha) { struct pci_dev *pdev; - irq_handler_t irq_handler = pm8001_interrupt; int rc; pdev = pm8001_ha->pdev; #ifdef PM8001_USE_MSIX if (pci_find_capability(pdev, PCI_CAP_ID_MSIX)) - return pm8001_setup_msix(pm8001_ha, irq_handler); - else + return pm8001_setup_msix(pm8001_ha); + else { + PM8001_INIT_DBG(pm8001_ha, + pm8001_printk("MSIX not supported!!!\n")); goto intx; + } #endif intx: /* initialize the INT-X interrupt */ - rc = request_irq(pdev->irq, irq_handler, IRQF_SHARED, DRV_NAME, - SHOST_TO_SAS_HA(pm8001_ha->shost)); + rc = request_irq(pdev->irq, pm8001_interrupt_handler_intx, IRQF_SHARED, + DRV_NAME, SHOST_TO_SAS_HA(pm8001_ha->shost)); return rc; } @@ -621,12 +764,13 @@ static int pm8001_pci_probe(struct pci_dev *pdev, { unsigned int rc; u32 pci_reg; + u8 i = 0; struct pm8001_hba_info *pm8001_ha; struct Scsi_Host *shost = NULL; const struct pm8001_chip_info *chip; dev_printk(KERN_INFO, &pdev->dev, - "pm8001: driver version %s\n", DRV_VERSION); + "pm80xx: driver version %s\n", DRV_VERSION); rc = pci_enable_device(pdev); if (rc) goto err_out_enable; @@ -665,25 +809,39 @@ static int pm8001_pci_probe(struct pci_dev *pdev, goto err_out_free; } pci_set_drvdata(pdev, SHOST_TO_SAS_HA(shost)); - pm8001_ha = pm8001_pci_alloc(pdev, chip_8001, shost); + /* ent->driver variable is used to differentiate between controllers */ + pm8001_ha = pm8001_pci_alloc(pdev, ent, shost); if (!pm8001_ha) { rc = -ENOMEM; goto err_out_free; } list_add_tail(&pm8001_ha->list, &hba_list); - PM8001_CHIP_DISP->chip_soft_rst(pm8001_ha, 0x252acbcd); + PM8001_CHIP_DISP->chip_soft_rst(pm8001_ha); rc = PM8001_CHIP_DISP->chip_init(pm8001_ha); - if (rc) + if (rc) { + PM8001_FAIL_DBG(pm8001_ha, pm8001_printk( + "chip_init failed [ret: %d]\n", rc)); goto err_out_ha_free; + } rc = scsi_add_host(shost, &pdev->dev); if (rc) goto err_out_ha_free; rc = pm8001_request_irq(pm8001_ha); - if (rc) + if (rc) { + PM8001_FAIL_DBG(pm8001_ha, pm8001_printk( + "pm8001_request_irq failed [ret: %d]\n", rc)); goto err_out_shost; + } + + PM8001_CHIP_DISP->interrupt_enable(pm8001_ha, 0); + if (pm8001_ha->chip_id != chip_8001) { + for (i = 1; i < pm8001_ha->number_of_intr; i++) + PM8001_CHIP_DISP->interrupt_enable(pm8001_ha, i); + /* setup thermal configuration. */ + pm80xx_set_thermal_config(pm8001_ha); + } - PM8001_CHIP_DISP->interrupt_enable(pm8001_ha); pm8001_init_sas_add(pm8001_ha); pm8001_post_sas_ha_init(shost, chip); rc = sas_register_ha(SHOST_TO_SAS_HA(shost)); @@ -719,14 +877,15 @@ static void pm8001_pci_remove(struct pci_dev *pdev) sas_remove_host(pm8001_ha->shost); list_del(&pm8001_ha->list); scsi_remove_host(pm8001_ha->shost); - PM8001_CHIP_DISP->interrupt_disable(pm8001_ha); - PM8001_CHIP_DISP->chip_soft_rst(pm8001_ha, 0x252acbcd); + PM8001_CHIP_DISP->interrupt_disable(pm8001_ha, 0xFF); + PM8001_CHIP_DISP->chip_soft_rst(pm8001_ha); #ifdef PM8001_USE_MSIX for (i = 0; i < pm8001_ha->number_of_intr; i++) synchronize_irq(pm8001_ha->msix_entries[i].vector); for (i = 0; i < pm8001_ha->number_of_intr; i++) - free_irq(pm8001_ha->msix_entries[i].vector, sha); + free_irq(pm8001_ha->msix_entries[i].vector, + &pm8001_ha->outq[i]); pci_disable_msix(pdev); #else free_irq(pm8001_ha->irq, sha); @@ -763,13 +922,14 @@ static int pm8001_pci_suspend(struct pci_dev *pdev, pm_message_t state) printk(KERN_ERR " PCI PM not supported\n"); return -ENODEV; } - PM8001_CHIP_DISP->interrupt_disable(pm8001_ha); - PM8001_CHIP_DISP->chip_soft_rst(pm8001_ha, 0x252acbcd); + PM8001_CHIP_DISP->interrupt_disable(pm8001_ha, 0xFF); + PM8001_CHIP_DISP->chip_soft_rst(pm8001_ha); #ifdef PM8001_USE_MSIX for (i = 0; i < pm8001_ha->number_of_intr; i++) synchronize_irq(pm8001_ha->msix_entries[i].vector); for (i = 0; i < pm8001_ha->number_of_intr; i++) - free_irq(pm8001_ha->msix_entries[i].vector, sha); + free_irq(pm8001_ha->msix_entries[i].vector, + &pm8001_ha->outq[i]); pci_disable_msix(pdev); #else free_irq(pm8001_ha->irq, sha); @@ -798,6 +958,7 @@ static int pm8001_pci_resume(struct pci_dev *pdev) struct sas_ha_struct *sha = pci_get_drvdata(pdev); struct pm8001_hba_info *pm8001_ha; int rc; + u8 i = 0; u32 device_state; pm8001_ha = sha->lldd_ha; device_state = pdev->current_state; @@ -820,19 +981,33 @@ static int pm8001_pci_resume(struct pci_dev *pdev) if (rc) goto err_out_disable; - PM8001_CHIP_DISP->chip_soft_rst(pm8001_ha, 0x252acbcd); + /* chip soft rst only for spc */ + if (pm8001_ha->chip_id == chip_8001) { + PM8001_CHIP_DISP->chip_soft_rst(pm8001_ha); + PM8001_INIT_DBG(pm8001_ha, + pm8001_printk("chip soft reset successful\n")); + } rc = PM8001_CHIP_DISP->chip_init(pm8001_ha); if (rc) goto err_out_disable; - PM8001_CHIP_DISP->interrupt_disable(pm8001_ha); + + /* disable all the interrupt bits */ + PM8001_CHIP_DISP->interrupt_disable(pm8001_ha, 0xFF); + rc = pm8001_request_irq(pm8001_ha); if (rc) goto err_out_disable; - #ifdef PM8001_USE_TASKLET +#ifdef PM8001_USE_TASKLET + /* default tasklet for non msi-x interrupt handler/first msi-x + * interrupt handler */ tasklet_init(&pm8001_ha->tasklet, pm8001_tasklet, - (unsigned long)pm8001_ha); - #endif - PM8001_CHIP_DISP->interrupt_enable(pm8001_ha); + (unsigned long)pm8001_ha); +#endif + PM8001_CHIP_DISP->interrupt_enable(pm8001_ha, 0); + if (pm8001_ha->chip_id != chip_8001) { + for (i = 1; i < pm8001_ha->number_of_intr; i++) + PM8001_CHIP_DISP->interrupt_enable(pm8001_ha, i); + } scsi_unblock_requests(pm8001_ha->shost); return 0; @@ -843,14 +1018,45 @@ static int pm8001_pci_resume(struct pci_dev *pdev) return rc; } +/* update of pci device, vendor id and driver data with + * unique value for each of the controller + */ static struct pci_device_id pm8001_pci_table[] = { - { - PCI_VDEVICE(PMC_Sierra, 0x8001), chip_8001 - }, + { PCI_VDEVICE(PMC_Sierra, 0x8001), chip_8001 }, { PCI_DEVICE(0x117c, 0x0042), .driver_data = chip_8001 }, + /* Support for SPC/SPCv/SPCve controllers */ + { PCI_VDEVICE(ADAPTEC2, 0x8001), chip_8001 }, + { PCI_VDEVICE(PMC_Sierra, 0x8008), chip_8008 }, + { PCI_VDEVICE(ADAPTEC2, 0x8008), chip_8008 }, + { PCI_VDEVICE(PMC_Sierra, 0x8018), chip_8018 }, + { PCI_VDEVICE(ADAPTEC2, 0x8018), chip_8018 }, + { PCI_VDEVICE(PMC_Sierra, 0x8009), chip_8009 }, + { PCI_VDEVICE(ADAPTEC2, 0x8009), chip_8009 }, + { PCI_VDEVICE(PMC_Sierra, 0x8019), chip_8019 }, + { PCI_VDEVICE(ADAPTEC2, 0x8019), chip_8019 }, + { PCI_VENDOR_ID_ADAPTEC2, 0x8081, + PCI_VENDOR_ID_ADAPTEC2, 0x0400, 0, 0, chip_8001 }, + { PCI_VENDOR_ID_ADAPTEC2, 0x8081, + PCI_VENDOR_ID_ADAPTEC2, 0x0800, 0, 0, chip_8001 }, + { PCI_VENDOR_ID_ADAPTEC2, 0x8088, + PCI_VENDOR_ID_ADAPTEC2, 0x0008, 0, 0, chip_8008 }, + { PCI_VENDOR_ID_ADAPTEC2, 0x8088, + PCI_VENDOR_ID_ADAPTEC2, 0x0800, 0, 0, chip_8008 }, + { PCI_VENDOR_ID_ADAPTEC2, 0x8089, + PCI_VENDOR_ID_ADAPTEC2, 0x0008, 0, 0, chip_8009 }, + { PCI_VENDOR_ID_ADAPTEC2, 0x8089, + PCI_VENDOR_ID_ADAPTEC2, 0x0800, 0, 0, chip_8009 }, + { PCI_VENDOR_ID_ADAPTEC2, 0x8088, + PCI_VENDOR_ID_ADAPTEC2, 0x0016, 0, 0, chip_8018 }, + { PCI_VENDOR_ID_ADAPTEC2, 0x8088, + PCI_VENDOR_ID_ADAPTEC2, 0x1600, 0, 0, chip_8018 }, + { PCI_VENDOR_ID_ADAPTEC2, 0x8089, + PCI_VENDOR_ID_ADAPTEC2, 0x0016, 0, 0, chip_8019 }, + { PCI_VENDOR_ID_ADAPTEC2, 0x8089, + PCI_VENDOR_ID_ADAPTEC2, 0x1600, 0, 0, chip_8019 }, {} /* terminate list */ }; @@ -870,7 +1076,7 @@ static int __init pm8001_init(void) { int rc = -ENOMEM; - pm8001_wq = alloc_workqueue("pm8001", 0, 0); + pm8001_wq = alloc_workqueue("pm80xx", 0, 0); if (!pm8001_wq) goto err; @@ -902,7 +1108,8 @@ module_init(pm8001_init); module_exit(pm8001_exit); MODULE_AUTHOR("Jack Wang "); -MODULE_DESCRIPTION("PMC-Sierra PM8001 SAS/SATA controller driver"); +MODULE_DESCRIPTION( + "PMC-Sierra PM8001/8081/8088/8089 SAS/SATA controller driver"); MODULE_VERSION(DRV_VERSION); MODULE_LICENSE("GPL"); MODULE_DEVICE_TABLE(pci, pm8001_pci_table); diff --git a/trunk/drivers/scsi/pm8001/pm8001_sas.c b/trunk/drivers/scsi/pm8001/pm8001_sas.c index b961112395d5..a85d73de7c80 100644 --- a/trunk/drivers/scsi/pm8001/pm8001_sas.c +++ b/trunk/drivers/scsi/pm8001/pm8001_sas.c @@ -1,5 +1,5 @@ /* - * PMC-Sierra SPC 8001 SAS/SATA based host adapters driver + * PMC-Sierra PM8001/8081/8088/8089 SAS/SATA based host adapters driver * * Copyright (c) 2008-2009 USI Co., Ltd. * All rights reserved. @@ -68,7 +68,7 @@ static void pm8001_tag_clear(struct pm8001_hba_info *pm8001_ha, u32 tag) clear_bit(tag, bitmap); } -static void pm8001_tag_free(struct pm8001_hba_info *pm8001_ha, u32 tag) +void pm8001_tag_free(struct pm8001_hba_info *pm8001_ha, u32 tag) { pm8001_tag_clear(pm8001_ha, tag); } @@ -212,10 +212,12 @@ int pm8001_phy_control(struct asd_sas_phy *sas_phy, enum phy_func func, break; case PHY_FUNC_GET_EVENTS: spin_lock_irqsave(&pm8001_ha->lock, flags); - if (-1 == pm8001_bar4_shift(pm8001_ha, + if (pm8001_ha->chip_id == chip_8001) { + if (-1 == pm8001_bar4_shift(pm8001_ha, (phy_id < 4) ? 0x30000 : 0x40000)) { - spin_unlock_irqrestore(&pm8001_ha->lock, flags); - return -EINVAL; + spin_unlock_irqrestore(&pm8001_ha->lock, flags); + return -EINVAL; + } } { struct sas_phy *phy = sas_phy->phy; @@ -228,7 +230,8 @@ int pm8001_phy_control(struct asd_sas_phy *sas_phy, enum phy_func func, phy->loss_of_dword_sync_count = qp[3]; phy->phy_reset_problem_count = qp[4]; } - pm8001_bar4_shift(pm8001_ha, 0); + if (pm8001_ha->chip_id == chip_8001) + pm8001_bar4_shift(pm8001_ha, 0); spin_unlock_irqrestore(&pm8001_ha->lock, flags); return 0; default: @@ -249,7 +252,9 @@ void pm8001_scan_start(struct Scsi_Host *shost) struct pm8001_hba_info *pm8001_ha; struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost); pm8001_ha = sha->lldd_ha; - PM8001_CHIP_DISP->sas_re_init_req(pm8001_ha); + /* SAS_RE_INITIALIZATION not available in SPCv/ve */ + if (pm8001_ha->chip_id == chip_8001) + PM8001_CHIP_DISP->sas_re_init_req(pm8001_ha); for (i = 0; i < pm8001_ha->chip->n_phy; ++i) PM8001_CHIP_DISP->phy_start_req(pm8001_ha, i); } @@ -352,7 +357,7 @@ static int sas_find_local_port_id(struct domain_device *dev) * @tmf: the task management IU */ #define DEV_IS_GONE(pm8001_dev) \ - ((!pm8001_dev || (pm8001_dev->dev_type == NO_DEVICE))) + ((!pm8001_dev || (pm8001_dev->dev_type == SAS_PHY_UNUSED))) static int pm8001_task_exec(struct sas_task *task, const int num, gfp_t gfp_flags, int is_tmf, struct pm8001_tmf_task *tmf) { @@ -370,7 +375,7 @@ static int pm8001_task_exec(struct sas_task *task, const int num, struct task_status_struct *tsm = &t->task_status; tsm->resp = SAS_TASK_UNDELIVERED; tsm->stat = SAS_PHY_DOWN; - if (dev->dev_type != SATA_DEV) + if (dev->dev_type != SAS_SATA_DEV) t->task_done(t); return 0; } @@ -548,7 +553,7 @@ struct pm8001_device *pm8001_alloc_dev(struct pm8001_hba_info *pm8001_ha) { u32 dev; for (dev = 0; dev < PM8001_MAX_DEVICES; dev++) { - if (pm8001_ha->devices[dev].dev_type == NO_DEVICE) { + if (pm8001_ha->devices[dev].dev_type == SAS_PHY_UNUSED) { pm8001_ha->devices[dev].id = dev; return &pm8001_ha->devices[dev]; } @@ -560,13 +565,31 @@ struct pm8001_device *pm8001_alloc_dev(struct pm8001_hba_info *pm8001_ha) } return NULL; } +/** + * pm8001_find_dev - find a matching pm8001_device + * @pm8001_ha: our hba card information + */ +struct pm8001_device *pm8001_find_dev(struct pm8001_hba_info *pm8001_ha, + u32 device_id) +{ + u32 dev; + for (dev = 0; dev < PM8001_MAX_DEVICES; dev++) { + if (pm8001_ha->devices[dev].device_id == device_id) + return &pm8001_ha->devices[dev]; + } + if (dev == PM8001_MAX_DEVICES) { + PM8001_FAIL_DBG(pm8001_ha, pm8001_printk("NO MATCHING " + "DEVICE FOUND !!!\n")); + } + return NULL; +} static void pm8001_free_dev(struct pm8001_device *pm8001_dev) { u32 id = pm8001_dev->id; memset(pm8001_dev, 0, sizeof(*pm8001_dev)); pm8001_dev->id = id; - pm8001_dev->dev_type = NO_DEVICE; + pm8001_dev->dev_type = SAS_PHY_UNUSED; pm8001_dev->device_id = PM8001_MAX_DEVICES; pm8001_dev->sas_device = NULL; } @@ -624,7 +647,7 @@ static int pm8001_dev_found_notify(struct domain_device *dev) res = -1; } } else { - if (dev->dev_type == SATA_DEV) { + if (dev->dev_type == SAS_SATA_DEV) { pm8001_device->attached_phy = dev->rphy->identify.phy_identifier; flag = 1; /* directly sata*/ @@ -634,7 +657,7 @@ static int pm8001_dev_found_notify(struct domain_device *dev) PM8001_CHIP_DISP->reg_dev_req(pm8001_ha, pm8001_device, flag); spin_unlock_irqrestore(&pm8001_ha->lock, flags); wait_for_completion(&completion); - if (dev->dev_type == SAS_END_DEV) + if (dev->dev_type == SAS_END_DEVICE) msleep(50); pm8001_ha->flags = PM8001F_RUN_TIME; return 0; @@ -648,7 +671,7 @@ int pm8001_dev_found(struct domain_device *dev) return pm8001_dev_found_notify(dev); } -static void pm8001_task_done(struct sas_task *task) +void pm8001_task_done(struct sas_task *task) { if (!del_timer(&task->slow_task->timer)) return; @@ -904,7 +927,7 @@ void pm8001_open_reject_retry( struct pm8001_ccb_info *ccb = &pm8001_ha->ccb_info[i]; pm8001_dev = ccb->device; - if (!pm8001_dev || (pm8001_dev->dev_type == NO_DEVICE)) + if (!pm8001_dev || (pm8001_dev->dev_type == SAS_PHY_UNUSED)) continue; if (!device_to_close) { uintptr_t d = (uintptr_t)pm8001_dev @@ -995,6 +1018,72 @@ int pm8001_I_T_nexus_reset(struct domain_device *dev) return rc; } +/* +* This function handle the IT_NEXUS_XXX event or completion +* status code for SSP/SATA/SMP I/O request. +*/ +int pm8001_I_T_nexus_event_handler(struct domain_device *dev) +{ + int rc = TMF_RESP_FUNC_FAILED; + struct pm8001_device *pm8001_dev; + struct pm8001_hba_info *pm8001_ha; + struct sas_phy *phy; + u32 device_id = 0; + + if (!dev || !dev->lldd_dev) + return -1; + + pm8001_dev = dev->lldd_dev; + device_id = pm8001_dev->device_id; + pm8001_ha = pm8001_find_ha_by_dev(dev); + + PM8001_EH_DBG(pm8001_ha, + pm8001_printk("I_T_Nexus handler invoked !!")); + + phy = sas_get_local_phy(dev); + + if (dev_is_sata(dev)) { + DECLARE_COMPLETION_ONSTACK(completion_setstate); + if (scsi_is_sas_phy_local(phy)) { + rc = 0; + goto out; + } + /* send internal ssp/sata/smp abort command to FW */ + rc = pm8001_exec_internal_task_abort(pm8001_ha, pm8001_dev , + dev, 1, 0); + msleep(100); + + /* deregister the target device */ + pm8001_dev_gone_notify(dev); + msleep(200); + + /*send phy reset to hard reset target */ + rc = sas_phy_reset(phy, 1); + msleep(2000); + pm8001_dev->setds_completion = &completion_setstate; + + wait_for_completion(&completion_setstate); + } else { + /* send internal ssp/sata/smp abort command to FW */ + rc = pm8001_exec_internal_task_abort(pm8001_ha, pm8001_dev , + dev, 1, 0); + msleep(100); + + /* deregister the target device */ + pm8001_dev_gone_notify(dev); + msleep(200); + + /*send phy reset to hard reset target */ + rc = sas_phy_reset(phy, 1); + msleep(2000); + } + PM8001_EH_DBG(pm8001_ha, pm8001_printk(" for device[%x]:rc=%d\n", + pm8001_dev->device_id, rc)); +out: + sas_put_local_phy(phy); + + return rc; +} /* mandatory SAM-3, the task reset the specified LUN*/ int pm8001_lu_reset(struct domain_device *dev, u8 *lun) { diff --git a/trunk/drivers/scsi/pm8001/pm8001_sas.h b/trunk/drivers/scsi/pm8001/pm8001_sas.h index 11008205aeb3..570819464d90 100644 --- a/trunk/drivers/scsi/pm8001/pm8001_sas.h +++ b/trunk/drivers/scsi/pm8001/pm8001_sas.h @@ -1,5 +1,5 @@ /* - * PMC-Sierra SPC 8001 SAS/SATA based host adapters driver + * PMC-Sierra PM8001/8081/8088/8089 SAS/SATA based host adapters driver * * Copyright (c) 2008-2009 USI Co., Ltd. * All rights reserved. @@ -57,8 +57,8 @@ #include #include "pm8001_defs.h" -#define DRV_NAME "pm8001" -#define DRV_VERSION "0.1.36" +#define DRV_NAME "pm80xx" +#define DRV_VERSION "0.1.37" #define PM8001_FAIL_LOGGING 0x01 /* Error message logging */ #define PM8001_INIT_LOGGING 0x02 /* driver init logging */ #define PM8001_DISC_LOGGING 0x04 /* discovery layer logging */ @@ -66,8 +66,8 @@ #define PM8001_EH_LOGGING 0x10 /* libsas EH function logging*/ #define PM8001_IOCTL_LOGGING 0x20 /* IOCTL message logging */ #define PM8001_MSG_LOGGING 0x40 /* misc message logging */ -#define pm8001_printk(format, arg...) printk(KERN_INFO "%s %d:" format,\ - __func__, __LINE__, ## arg) +#define pm8001_printk(format, arg...) printk(KERN_INFO "pm80xx %s %d:" \ + format, __func__, __LINE__, ## arg) #define PM8001_CHECK_LOGGING(HBA, LEVEL, CMD) \ do { \ if (unlikely(HBA->logging_level & LEVEL)) \ @@ -103,11 +103,12 @@ do { \ #define PM8001_READ_VPD -#define DEV_IS_EXPANDER(type) ((type == EDGE_DEV) || (type == FANOUT_DEV)) +#define DEV_IS_EXPANDER(type) ((type == SAS_EDGE_EXPANDER_DEVICE) || (type == SAS_FANOUT_EXPANDER_DEVICE)) #define PM8001_NAME_LENGTH 32/* generic length of strings */ extern struct list_head hba_list; extern const struct pm8001_dispatch pm8001_8001_dispatch; +extern const struct pm8001_dispatch pm8001_80xx_dispatch; struct pm8001_hba_info; struct pm8001_ccb_info; @@ -131,15 +132,15 @@ struct pm8001_ioctl_payload { struct pm8001_dispatch { char *name; int (*chip_init)(struct pm8001_hba_info *pm8001_ha); - int (*chip_soft_rst)(struct pm8001_hba_info *pm8001_ha, u32 signature); + int (*chip_soft_rst)(struct pm8001_hba_info *pm8001_ha); void (*chip_rst)(struct pm8001_hba_info *pm8001_ha); int (*chip_ioremap)(struct pm8001_hba_info *pm8001_ha); void (*chip_iounmap)(struct pm8001_hba_info *pm8001_ha); - irqreturn_t (*isr)(struct pm8001_hba_info *pm8001_ha); + irqreturn_t (*isr)(struct pm8001_hba_info *pm8001_ha, u8 vec); u32 (*is_our_interupt)(struct pm8001_hba_info *pm8001_ha); - int (*isr_process_oq)(struct pm8001_hba_info *pm8001_ha); - void (*interrupt_enable)(struct pm8001_hba_info *pm8001_ha); - void (*interrupt_disable)(struct pm8001_hba_info *pm8001_ha); + int (*isr_process_oq)(struct pm8001_hba_info *pm8001_ha, u8 vec); + void (*interrupt_enable)(struct pm8001_hba_info *pm8001_ha, u8 vec); + void (*interrupt_disable)(struct pm8001_hba_info *pm8001_ha, u8 vec); void (*make_prd)(struct scatterlist *scatter, int nr, void *prd); int (*smp_req)(struct pm8001_hba_info *pm8001_ha, struct pm8001_ccb_info *ccb); @@ -173,6 +174,7 @@ struct pm8001_dispatch { }; struct pm8001_chip_info { + u32 encrypt; u32 n_phy; const struct pm8001_dispatch *dispatch; }; @@ -204,7 +206,7 @@ struct pm8001_phy { }; struct pm8001_device { - enum sas_dev_type dev_type; + enum sas_device_type dev_type; struct domain_device *sas_device; u32 attached_phy; u32 id; @@ -256,7 +258,20 @@ struct mpi_mem_req { struct mpi_mem region[USI_MAX_MEMCNT]; }; -struct main_cfg_table { +struct encrypt { + u32 cipher_mode; + u32 sec_mode; + u32 status; + u32 flag; +}; + +struct sas_phy_attribute_table { + u32 phystart1_16[16]; + u32 outbound_hw_event_pid1_16[16]; +}; + +union main_cfg_table { + struct { u32 signature; u32 interface_rev; u32 firmware_rev; @@ -292,19 +307,69 @@ struct main_cfg_table { u32 fatal_err_dump_length1; u32 hda_mode_flag; u32 anolog_setup_table_offset; + u32 rsvd[4]; + } pm8001_tbl; + + struct { + u32 signature; + u32 interface_rev; + u32 firmware_rev; + u32 max_out_io; + u32 max_sgl; + u32 ctrl_cap_flag; + u32 gst_offset; + u32 inbound_queue_offset; + u32 outbound_queue_offset; + u32 inbound_q_nppd_hppd; + u32 rsvd[8]; + u32 crc_core_dump; + u32 rsvd1; + u32 upper_event_log_addr; + u32 lower_event_log_addr; + u32 event_log_size; + u32 event_log_severity; + u32 upper_pcs_event_log_addr; + u32 lower_pcs_event_log_addr; + u32 pcs_event_log_size; + u32 pcs_event_log_severity; + u32 fatal_err_interrupt; + u32 fatal_err_dump_offset0; + u32 fatal_err_dump_length0; + u32 fatal_err_dump_offset1; + u32 fatal_err_dump_length1; + u32 gpio_led_mapping; + u32 analog_setup_table_offset; + u32 int_vec_table_offset; + u32 phy_attr_table_offset; + u32 port_recovery_timer; + u32 interrupt_reassertion_delay; + } pm80xx_tbl; }; -struct general_status_table { + +union general_status_table { + struct { u32 gst_len_mpistate; u32 iq_freeze_state0; u32 iq_freeze_state1; u32 msgu_tcnt; u32 iop_tcnt; - u32 reserved; + u32 rsvd; u32 phy_state[8]; - u32 reserved1; - u32 reserved2; - u32 reserved3; + u32 gpio_input_val; + u32 rsvd1[2]; + u32 recover_err_info[8]; + } pm8001_tbl; + struct { + u32 gst_len_mpistate; + u32 iq_freeze_state0; + u32 iq_freeze_state1; + u32 msgu_tcnt; + u32 iop_tcnt; + u32 rsvd[9]; + u32 gpio_input_val; + u32 rsvd1[2]; u32 recover_err_info[8]; + } pm80xx_tbl; }; struct inbound_queue_table { u32 element_pri_size_cnt; @@ -351,15 +416,21 @@ struct pm8001_hba_info { struct device *dev; struct pm8001_hba_memspace io_mem[6]; struct mpi_mem_req memoryMap; + struct encrypt encrypt_info; /* support encryption */ void __iomem *msg_unit_tbl_addr;/*Message Unit Table Addr*/ void __iomem *main_cfg_tbl_addr;/*Main Config Table Addr*/ void __iomem *general_stat_tbl_addr;/*General Status Table Addr*/ void __iomem *inbnd_q_tbl_addr;/*Inbound Queue Config Table Addr*/ void __iomem *outbnd_q_tbl_addr;/*Outbound Queue Config Table Addr*/ - struct main_cfg_table main_cfg_tbl; - struct general_status_table gs_tbl; - struct inbound_queue_table inbnd_q_tbl[PM8001_MAX_INB_NUM]; - struct outbound_queue_table outbnd_q_tbl[PM8001_MAX_OUTB_NUM]; + void __iomem *pspa_q_tbl_addr; + /*MPI SAS PHY attributes Queue Config Table Addr*/ + void __iomem *ivt_tbl_addr; /*MPI IVT Table Addr */ + union main_cfg_table main_cfg_tbl; + union general_status_table gs_tbl; + struct inbound_queue_table inbnd_q_tbl[PM8001_MAX_SPCV_INB_NUM]; + struct outbound_queue_table outbnd_q_tbl[PM8001_MAX_SPCV_OUTB_NUM]; + struct sas_phy_attribute_table phy_attr_table; + /* MPI SAS PHY attributes */ u8 sas_addr[SAS_ADDR_SIZE]; struct sas_ha_struct *sas;/* SCSI/SAS glue */ struct Scsi_Host *shost; @@ -372,10 +443,12 @@ struct pm8001_hba_info { struct pm8001_port port[PM8001_MAX_PHYS]; u32 id; u32 irq; + u32 iomb_size; /* SPC and SPCV IOMB size */ struct pm8001_device *devices; struct pm8001_ccb_info *ccb_info; #ifdef PM8001_USE_MSIX - struct msix_entry msix_entries[16];/*for msi-x interrupt*/ + struct msix_entry msix_entries[PM8001_MAX_MSIX_VEC]; + /*for msi-x interrupt*/ int number_of_intr;/*will be used in remove()*/ #endif #ifdef PM8001_USE_TASKLET @@ -383,7 +456,10 @@ struct pm8001_hba_info { #endif u32 logging_level; u32 fw_status; + u32 smp_exp_mode; + u32 int_vector; const struct firmware *fw_image; + u8 outq[PM8001_MAX_MSIX_VEC]; }; struct pm8001_work { @@ -419,6 +495,9 @@ struct pm8001_fw_image_header { #define FLASH_UPDATE_DNLD_NOT_SUPPORTED 0x10 #define FLASH_UPDATE_DISABLED 0x11 +#define NCQ_READ_LOG_FLAG 0x80000000 +#define NCQ_ABORT_ALL_FLAG 0x40000000 +#define NCQ_2ND_RLE_FLAG 0x20000000 /** * brief param structure for firmware flash update. */ @@ -484,6 +563,7 @@ int pm8001_dev_found(struct domain_device *dev); void pm8001_dev_gone(struct domain_device *dev); int pm8001_lu_reset(struct domain_device *dev, u8 *lun); int pm8001_I_T_nexus_reset(struct domain_device *dev); +int pm8001_I_T_nexus_event_handler(struct domain_device *dev); int pm8001_query_task(struct sas_task *task); void pm8001_open_reject_retry( struct pm8001_hba_info *pm8001_ha, @@ -493,6 +573,61 @@ int pm8001_mem_alloc(struct pci_dev *pdev, void **virt_addr, dma_addr_t *pphys_addr, u32 *pphys_addr_hi, u32 *pphys_addr_lo, u32 mem_size, u32 align); +void pm8001_chip_iounmap(struct pm8001_hba_info *pm8001_ha); +int pm8001_mpi_build_cmd(struct pm8001_hba_info *pm8001_ha, + struct inbound_queue_table *circularQ, + u32 opCode, void *payload, u32 responseQueue); +int pm8001_mpi_msg_free_get(struct inbound_queue_table *circularQ, + u16 messageSize, void **messagePtr); +u32 pm8001_mpi_msg_free_set(struct pm8001_hba_info *pm8001_ha, void *pMsg, + struct outbound_queue_table *circularQ, u8 bc); +u32 pm8001_mpi_msg_consume(struct pm8001_hba_info *pm8001_ha, + struct outbound_queue_table *circularQ, + void **messagePtr1, u8 *pBC); +int pm8001_chip_set_dev_state_req(struct pm8001_hba_info *pm8001_ha, + struct pm8001_device *pm8001_dev, u32 state); +int pm8001_chip_fw_flash_update_req(struct pm8001_hba_info *pm8001_ha, + void *payload); +int pm8001_chip_fw_flash_update_build(struct pm8001_hba_info *pm8001_ha, + void *fw_flash_updata_info, u32 tag); +int pm8001_chip_set_nvmd_req(struct pm8001_hba_info *pm8001_ha, void *payload); +int pm8001_chip_get_nvmd_req(struct pm8001_hba_info *pm8001_ha, void *payload); +int pm8001_chip_ssp_tm_req(struct pm8001_hba_info *pm8001_ha, + struct pm8001_ccb_info *ccb, + struct pm8001_tmf_task *tmf); +int pm8001_chip_abort_task(struct pm8001_hba_info *pm8001_ha, + struct pm8001_device *pm8001_dev, + u8 flag, u32 task_tag, u32 cmd_tag); +int pm8001_chip_dereg_dev_req(struct pm8001_hba_info *pm8001_ha, u32 device_id); +void pm8001_chip_make_sg(struct scatterlist *scatter, int nr, void *prd); +void pm8001_work_fn(struct work_struct *work); +int pm8001_handle_event(struct pm8001_hba_info *pm8001_ha, + void *data, int handler); +void pm8001_mpi_set_dev_state_resp(struct pm8001_hba_info *pm8001_ha, + void *piomb); +void pm8001_mpi_set_nvmd_resp(struct pm8001_hba_info *pm8001_ha, + void *piomb); +void pm8001_mpi_get_nvmd_resp(struct pm8001_hba_info *pm8001_ha, + void *piomb); +int pm8001_mpi_local_phy_ctl(struct pm8001_hba_info *pm8001_ha, + void *piomb); +void pm8001_get_lrate_mode(struct pm8001_phy *phy, u8 link_rate); +void pm8001_get_attached_sas_addr(struct pm8001_phy *phy, u8 *sas_addr); +void pm8001_bytes_dmaed(struct pm8001_hba_info *pm8001_ha, int i); +int pm8001_mpi_reg_resp(struct pm8001_hba_info *pm8001_ha, void *piomb); +int pm8001_mpi_dereg_resp(struct pm8001_hba_info *pm8001_ha, void *piomb); +int pm8001_mpi_fw_flash_update_resp(struct pm8001_hba_info *pm8001_ha, + void *piomb); +int pm8001_mpi_general_event(struct pm8001_hba_info *pm8001_ha , void *piomb); +int pm8001_mpi_task_abort_resp(struct pm8001_hba_info *pm8001_ha, void *piomb); +struct sas_task *pm8001_alloc_task(void); +void pm8001_task_done(struct sas_task *task); +void pm8001_free_task(struct sas_task *task); +void pm8001_tag_free(struct pm8001_hba_info *pm8001_ha, u32 tag); +struct pm8001_device *pm8001_find_dev(struct pm8001_hba_info *pm8001_ha, + u32 device_id); +int pm80xx_set_thermal_config(struct pm8001_hba_info *pm8001_ha); + int pm8001_bar4_shift(struct pm8001_hba_info *pm8001_ha, u32 shiftValue); /* ctl shared API */ diff --git a/trunk/drivers/scsi/pm8001/pm80xx_hwi.c b/trunk/drivers/scsi/pm8001/pm80xx_hwi.c new file mode 100644 index 000000000000..302514d8157b --- /dev/null +++ b/trunk/drivers/scsi/pm8001/pm80xx_hwi.c @@ -0,0 +1,4130 @@ +/* + * PMC-Sierra SPCv/ve 8088/8089 SAS/SATA based host adapters driver + * + * Copyright (c) 2008-2009 PMC-Sierra, Inc., + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions, and the following disclaimer, + * without modification. + * 2. Redistributions in binary form must reproduce at minimum a disclaimer + * substantially similar to the "NO WARRANTY" disclaimer below + * ("Disclaimer") and any redistribution must be conditioned upon + * including a substantially similar Disclaimer requirement for further + * binary redistribution. + * 3. Neither the names of the above-listed copyright holders nor the names + * of any contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * Alternatively, this software may be distributed under the terms of the + * GNU General Public License ("GPL") version 2 as published by the Free + * Software Foundation. + * + * NO WARRANTY + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, + * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGES. + * + */ + #include + #include "pm8001_sas.h" + #include "pm80xx_hwi.h" + #include "pm8001_chips.h" + #include "pm8001_ctl.h" + +#define SMP_DIRECT 1 +#define SMP_INDIRECT 2 +/** + * read_main_config_table - read the configure table and save it. + * @pm8001_ha: our hba card information + */ +static void read_main_config_table(struct pm8001_hba_info *pm8001_ha) +{ + void __iomem *address = pm8001_ha->main_cfg_tbl_addr; + + pm8001_ha->main_cfg_tbl.pm80xx_tbl.signature = + pm8001_mr32(address, MAIN_SIGNATURE_OFFSET); + pm8001_ha->main_cfg_tbl.pm80xx_tbl.interface_rev = + pm8001_mr32(address, MAIN_INTERFACE_REVISION); + pm8001_ha->main_cfg_tbl.pm80xx_tbl.firmware_rev = + pm8001_mr32(address, MAIN_FW_REVISION); + pm8001_ha->main_cfg_tbl.pm80xx_tbl.max_out_io = + pm8001_mr32(address, MAIN_MAX_OUTSTANDING_IO_OFFSET); + pm8001_ha->main_cfg_tbl.pm80xx_tbl.max_sgl = + pm8001_mr32(address, MAIN_MAX_SGL_OFFSET); + pm8001_ha->main_cfg_tbl.pm80xx_tbl.ctrl_cap_flag = + pm8001_mr32(address, MAIN_CNTRL_CAP_OFFSET); + pm8001_ha->main_cfg_tbl.pm80xx_tbl.gst_offset = + pm8001_mr32(address, MAIN_GST_OFFSET); + pm8001_ha->main_cfg_tbl.pm80xx_tbl.inbound_queue_offset = + pm8001_mr32(address, MAIN_IBQ_OFFSET); + pm8001_ha->main_cfg_tbl.pm80xx_tbl.outbound_queue_offset = + pm8001_mr32(address, MAIN_OBQ_OFFSET); + + /* read Error Dump Offset and Length */ + pm8001_ha->main_cfg_tbl.pm80xx_tbl.fatal_err_dump_offset0 = + pm8001_mr32(address, MAIN_FATAL_ERROR_RDUMP0_OFFSET); + pm8001_ha->main_cfg_tbl.pm80xx_tbl.fatal_err_dump_length0 = + pm8001_mr32(address, MAIN_FATAL_ERROR_RDUMP0_LENGTH); + pm8001_ha->main_cfg_tbl.pm80xx_tbl.fatal_err_dump_offset1 = + pm8001_mr32(address, MAIN_FATAL_ERROR_RDUMP1_OFFSET); + pm8001_ha->main_cfg_tbl.pm80xx_tbl.fatal_err_dump_length1 = + pm8001_mr32(address, MAIN_FATAL_ERROR_RDUMP1_LENGTH); + + /* read GPIO LED settings from the configuration table */ + pm8001_ha->main_cfg_tbl.pm80xx_tbl.gpio_led_mapping = + pm8001_mr32(address, MAIN_GPIO_LED_FLAGS_OFFSET); + + /* read analog Setting offset from the configuration table */ + pm8001_ha->main_cfg_tbl.pm80xx_tbl.analog_setup_table_offset = + pm8001_mr32(address, MAIN_ANALOG_SETUP_OFFSET); + + pm8001_ha->main_cfg_tbl.pm80xx_tbl.int_vec_table_offset = + pm8001_mr32(address, MAIN_INT_VECTOR_TABLE_OFFSET); + pm8001_ha->main_cfg_tbl.pm80xx_tbl.phy_attr_table_offset = + pm8001_mr32(address, MAIN_SAS_PHY_ATTR_TABLE_OFFSET); +} + +/** + * read_general_status_table - read the general status table and save it. + * @pm8001_ha: our hba card information + */ +static void read_general_status_table(struct pm8001_hba_info *pm8001_ha) +{ + void __iomem *address = pm8001_ha->general_stat_tbl_addr; + pm8001_ha->gs_tbl.pm80xx_tbl.gst_len_mpistate = + pm8001_mr32(address, GST_GSTLEN_MPIS_OFFSET); + pm8001_ha->gs_tbl.pm80xx_tbl.iq_freeze_state0 = + pm8001_mr32(address, GST_IQ_FREEZE_STATE0_OFFSET); + pm8001_ha->gs_tbl.pm80xx_tbl.iq_freeze_state1 = + pm8001_mr32(address, GST_IQ_FREEZE_STATE1_OFFSET); + pm8001_ha->gs_tbl.pm80xx_tbl.msgu_tcnt = + pm8001_mr32(address, GST_MSGUTCNT_OFFSET); + pm8001_ha->gs_tbl.pm80xx_tbl.iop_tcnt = + pm8001_mr32(address, GST_IOPTCNT_OFFSET); + pm8001_ha->gs_tbl.pm80xx_tbl.gpio_input_val = + pm8001_mr32(address, GST_GPIO_INPUT_VAL); + pm8001_ha->gs_tbl.pm80xx_tbl.recover_err_info[0] = + pm8001_mr32(address, GST_RERRINFO_OFFSET0); + pm8001_ha->gs_tbl.pm80xx_tbl.recover_err_info[1] = + pm8001_mr32(address, GST_RERRINFO_OFFSET1); + pm8001_ha->gs_tbl.pm80xx_tbl.recover_err_info[2] = + pm8001_mr32(address, GST_RERRINFO_OFFSET2); + pm8001_ha->gs_tbl.pm80xx_tbl.recover_err_info[3] = + pm8001_mr32(address, GST_RERRINFO_OFFSET3); + pm8001_ha->gs_tbl.pm80xx_tbl.recover_err_info[4] = + pm8001_mr32(address, GST_RERRINFO_OFFSET4); + pm8001_ha->gs_tbl.pm80xx_tbl.recover_err_info[5] = + pm8001_mr32(address, GST_RERRINFO_OFFSET5); + pm8001_ha->gs_tbl.pm80xx_tbl.recover_err_info[6] = + pm8001_mr32(address, GST_RERRINFO_OFFSET6); + pm8001_ha->gs_tbl.pm80xx_tbl.recover_err_info[7] = + pm8001_mr32(address, GST_RERRINFO_OFFSET7); +} +/** + * read_phy_attr_table - read the phy attribute table and save it. + * @pm8001_ha: our hba card information + */ +static void read_phy_attr_table(struct pm8001_hba_info *pm8001_ha) +{ + void __iomem *address = pm8001_ha->pspa_q_tbl_addr; + pm8001_ha->phy_attr_table.phystart1_16[0] = + pm8001_mr32(address, PSPA_PHYSTATE0_OFFSET); + pm8001_ha->phy_attr_table.phystart1_16[1] = + pm8001_mr32(address, PSPA_PHYSTATE1_OFFSET); + pm8001_ha->phy_attr_table.phystart1_16[2] = + pm8001_mr32(address, PSPA_PHYSTATE2_OFFSET); + pm8001_ha->phy_attr_table.phystart1_16[3] = + pm8001_mr32(address, PSPA_PHYSTATE3_OFFSET); + pm8001_ha->phy_attr_table.phystart1_16[4] = + pm8001_mr32(address, PSPA_PHYSTATE4_OFFSET); + pm8001_ha->phy_attr_table.phystart1_16[5] = + pm8001_mr32(address, PSPA_PHYSTATE5_OFFSET); + pm8001_ha->phy_attr_table.phystart1_16[6] = + pm8001_mr32(address, PSPA_PHYSTATE6_OFFSET); + pm8001_ha->phy_attr_table.phystart1_16[7] = + pm8001_mr32(address, PSPA_PHYSTATE7_OFFSET); + pm8001_ha->phy_attr_table.phystart1_16[8] = + pm8001_mr32(address, PSPA_PHYSTATE8_OFFSET); + pm8001_ha->phy_attr_table.phystart1_16[9] = + pm8001_mr32(address, PSPA_PHYSTATE9_OFFSET); + pm8001_ha->phy_attr_table.phystart1_16[10] = + pm8001_mr32(address, PSPA_PHYSTATE10_OFFSET); + pm8001_ha->phy_attr_table.phystart1_16[11] = + pm8001_mr32(address, PSPA_PHYSTATE11_OFFSET); + pm8001_ha->phy_attr_table.phystart1_16[12] = + pm8001_mr32(address, PSPA_PHYSTATE12_OFFSET); + pm8001_ha->phy_attr_table.phystart1_16[13] = + pm8001_mr32(address, PSPA_PHYSTATE13_OFFSET); + pm8001_ha->phy_attr_table.phystart1_16[14] = + pm8001_mr32(address, PSPA_PHYSTATE14_OFFSET); + pm8001_ha->phy_attr_table.phystart1_16[15] = + pm8001_mr32(address, PSPA_PHYSTATE15_OFFSET); + + pm8001_ha->phy_attr_table.outbound_hw_event_pid1_16[0] = + pm8001_mr32(address, PSPA_OB_HW_EVENT_PID0_OFFSET); + pm8001_ha->phy_attr_table.outbound_hw_event_pid1_16[1] = + pm8001_mr32(address, PSPA_OB_HW_EVENT_PID1_OFFSET); + pm8001_ha->phy_attr_table.outbound_hw_event_pid1_16[2] = + pm8001_mr32(address, PSPA_OB_HW_EVENT_PID2_OFFSET); + pm8001_ha->phy_attr_table.outbound_hw_event_pid1_16[3] = + pm8001_mr32(address, PSPA_OB_HW_EVENT_PID3_OFFSET); + pm8001_ha->phy_attr_table.outbound_hw_event_pid1_16[4] = + pm8001_mr32(address, PSPA_OB_HW_EVENT_PID4_OFFSET); + pm8001_ha->phy_attr_table.outbound_hw_event_pid1_16[5] = + pm8001_mr32(address, PSPA_OB_HW_EVENT_PID5_OFFSET); + pm8001_ha->phy_attr_table.outbound_hw_event_pid1_16[6] = + pm8001_mr32(address, PSPA_OB_HW_EVENT_PID6_OFFSET); + pm8001_ha->phy_attr_table.outbound_hw_event_pid1_16[7] = + pm8001_mr32(address, PSPA_OB_HW_EVENT_PID7_OFFSET); + pm8001_ha->phy_attr_table.outbound_hw_event_pid1_16[8] = + pm8001_mr32(address, PSPA_OB_HW_EVENT_PID8_OFFSET); + pm8001_ha->phy_attr_table.outbound_hw_event_pid1_16[9] = + pm8001_mr32(address, PSPA_OB_HW_EVENT_PID9_OFFSET); + pm8001_ha->phy_attr_table.outbound_hw_event_pid1_16[10] = + pm8001_mr32(address, PSPA_OB_HW_EVENT_PID10_OFFSET); + pm8001_ha->phy_attr_table.outbound_hw_event_pid1_16[11] = + pm8001_mr32(address, PSPA_OB_HW_EVENT_PID11_OFFSET); + pm8001_ha->phy_attr_table.outbound_hw_event_pid1_16[12] = + pm8001_mr32(address, PSPA_OB_HW_EVENT_PID12_OFFSET); + pm8001_ha->phy_attr_table.outbound_hw_event_pid1_16[13] = + pm8001_mr32(address, PSPA_OB_HW_EVENT_PID13_OFFSET); + pm8001_ha->phy_attr_table.outbound_hw_event_pid1_16[14] = + pm8001_mr32(address, PSPA_OB_HW_EVENT_PID14_OFFSET); + pm8001_ha->phy_attr_table.outbound_hw_event_pid1_16[15] = + pm8001_mr32(address, PSPA_OB_HW_EVENT_PID15_OFFSET); + +} + +/** + * read_inbnd_queue_table - read the inbound queue table and save it. + * @pm8001_ha: our hba card information + */ +static void read_inbnd_queue_table(struct pm8001_hba_info *pm8001_ha) +{ + int i; + void __iomem *address = pm8001_ha->inbnd_q_tbl_addr; + for (i = 0; i < PM8001_MAX_SPCV_INB_NUM; i++) { + u32 offset = i * 0x20; + pm8001_ha->inbnd_q_tbl[i].pi_pci_bar = + get_pci_bar_index(pm8001_mr32(address, + (offset + IB_PIPCI_BAR))); + pm8001_ha->inbnd_q_tbl[i].pi_offset = + pm8001_mr32(address, (offset + IB_PIPCI_BAR_OFFSET)); + } +} + +/** + * read_outbnd_queue_table - read the outbound queue table and save it. + * @pm8001_ha: our hba card information + */ +static void read_outbnd_queue_table(struct pm8001_hba_info *pm8001_ha) +{ + int i; + void __iomem *address = pm8001_ha->outbnd_q_tbl_addr; + for (i = 0; i < PM8001_MAX_SPCV_OUTB_NUM; i++) { + u32 offset = i * 0x24; + pm8001_ha->outbnd_q_tbl[i].ci_pci_bar = + get_pci_bar_index(pm8001_mr32(address, + (offset + OB_CIPCI_BAR))); + pm8001_ha->outbnd_q_tbl[i].ci_offset = + pm8001_mr32(address, (offset + OB_CIPCI_BAR_OFFSET)); + } +} + +/** + * init_default_table_values - init the default table. + * @pm8001_ha: our hba card information + */ +static void init_default_table_values(struct pm8001_hba_info *pm8001_ha) +{ + int i; + u32 offsetib, offsetob; + void __iomem *addressib = pm8001_ha->inbnd_q_tbl_addr; + void __iomem *addressob = pm8001_ha->outbnd_q_tbl_addr; + + pm8001_ha->main_cfg_tbl.pm80xx_tbl.upper_event_log_addr = + pm8001_ha->memoryMap.region[AAP1].phys_addr_hi; + pm8001_ha->main_cfg_tbl.pm80xx_tbl.lower_event_log_addr = + pm8001_ha->memoryMap.region[AAP1].phys_addr_lo; + pm8001_ha->main_cfg_tbl.pm80xx_tbl.event_log_size = + PM8001_EVENT_LOG_SIZE; + pm8001_ha->main_cfg_tbl.pm80xx_tbl.event_log_severity = 0x01; + pm8001_ha->main_cfg_tbl.pm80xx_tbl.upper_pcs_event_log_addr = + pm8001_ha->memoryMap.region[IOP].phys_addr_hi; + pm8001_ha->main_cfg_tbl.pm80xx_tbl.lower_pcs_event_log_addr = + pm8001_ha->memoryMap.region[IOP].phys_addr_lo; + pm8001_ha->main_cfg_tbl.pm80xx_tbl.pcs_event_log_size = + PM8001_EVENT_LOG_SIZE; + pm8001_ha->main_cfg_tbl.pm80xx_tbl.pcs_event_log_severity = 0x01; + pm8001_ha->main_cfg_tbl.pm80xx_tbl.fatal_err_interrupt = 0x01; + + /* Disable end to end CRC checking */ + pm8001_ha->main_cfg_tbl.pm80xx_tbl.crc_core_dump = (0x1 << 16); + + for (i = 0; i < PM8001_MAX_SPCV_INB_NUM; i++) { + pm8001_ha->inbnd_q_tbl[i].element_pri_size_cnt = + PM8001_MPI_QUEUE | (64 << 16) | (0x00<<30); + pm8001_ha->inbnd_q_tbl[i].upper_base_addr = + pm8001_ha->memoryMap.region[IB + i].phys_addr_hi; + pm8001_ha->inbnd_q_tbl[i].lower_base_addr = + pm8001_ha->memoryMap.region[IB + i].phys_addr_lo; + pm8001_ha->inbnd_q_tbl[i].base_virt = + (u8 *)pm8001_ha->memoryMap.region[IB + i].virt_ptr; + pm8001_ha->inbnd_q_tbl[i].total_length = + pm8001_ha->memoryMap.region[IB + i].total_len; + pm8001_ha->inbnd_q_tbl[i].ci_upper_base_addr = + pm8001_ha->memoryMap.region[CI + i].phys_addr_hi; + pm8001_ha->inbnd_q_tbl[i].ci_lower_base_addr = + pm8001_ha->memoryMap.region[CI + i].phys_addr_lo; + pm8001_ha->inbnd_q_tbl[i].ci_virt = + pm8001_ha->memoryMap.region[CI + i].virt_ptr; + offsetib = i * 0x20; + pm8001_ha->inbnd_q_tbl[i].pi_pci_bar = + get_pci_bar_index(pm8001_mr32(addressib, + (offsetib + 0x14))); + pm8001_ha->inbnd_q_tbl[i].pi_offset = + pm8001_mr32(addressib, (offsetib + 0x18)); + pm8001_ha->inbnd_q_tbl[i].producer_idx = 0; + pm8001_ha->inbnd_q_tbl[i].consumer_index = 0; + } + for (i = 0; i < PM8001_MAX_SPCV_OUTB_NUM; i++) { + pm8001_ha->outbnd_q_tbl[i].element_size_cnt = + PM8001_MPI_QUEUE | (64 << 16) | (0x01<<30); + pm8001_ha->outbnd_q_tbl[i].upper_base_addr = + pm8001_ha->memoryMap.region[OB + i].phys_addr_hi; + pm8001_ha->outbnd_q_tbl[i].lower_base_addr = + pm8001_ha->memoryMap.region[OB + i].phys_addr_lo; + pm8001_ha->outbnd_q_tbl[i].base_virt = + (u8 *)pm8001_ha->memoryMap.region[OB + i].virt_ptr; + pm8001_ha->outbnd_q_tbl[i].total_length = + pm8001_ha->memoryMap.region[OB + i].total_len; + pm8001_ha->outbnd_q_tbl[i].pi_upper_base_addr = + pm8001_ha->memoryMap.region[PI + i].phys_addr_hi; + pm8001_ha->outbnd_q_tbl[i].pi_lower_base_addr = + pm8001_ha->memoryMap.region[PI + i].phys_addr_lo; + /* interrupt vector based on oq */ + pm8001_ha->outbnd_q_tbl[i].interrup_vec_cnt_delay = (i << 24); + pm8001_ha->outbnd_q_tbl[i].pi_virt = + pm8001_ha->memoryMap.region[PI + i].virt_ptr; + offsetob = i * 0x24; + pm8001_ha->outbnd_q_tbl[i].ci_pci_bar = + get_pci_bar_index(pm8001_mr32(addressob, + offsetob + 0x14)); + pm8001_ha->outbnd_q_tbl[i].ci_offset = + pm8001_mr32(addressob, (offsetob + 0x18)); + pm8001_ha->outbnd_q_tbl[i].consumer_idx = 0; + pm8001_ha->outbnd_q_tbl[i].producer_index = 0; + } +} + +/** + * update_main_config_table - update the main default table to the HBA. + * @pm8001_ha: our hba card information + */ +static void update_main_config_table(struct pm8001_hba_info *pm8001_ha) +{ + void __iomem *address = pm8001_ha->main_cfg_tbl_addr; + pm8001_mw32(address, MAIN_IQNPPD_HPPD_OFFSET, + pm8001_ha->main_cfg_tbl.pm80xx_tbl.inbound_q_nppd_hppd); + pm8001_mw32(address, MAIN_EVENT_LOG_ADDR_HI, + pm8001_ha->main_cfg_tbl.pm80xx_tbl.upper_event_log_addr); + pm8001_mw32(address, MAIN_EVENT_LOG_ADDR_LO, + pm8001_ha->main_cfg_tbl.pm80xx_tbl.lower_event_log_addr); + pm8001_mw32(address, MAIN_EVENT_LOG_BUFF_SIZE, + pm8001_ha->main_cfg_tbl.pm80xx_tbl.event_log_size); + pm8001_mw32(address, MAIN_EVENT_LOG_OPTION, + pm8001_ha->main_cfg_tbl.pm80xx_tbl.event_log_severity); + pm8001_mw32(address, MAIN_PCS_EVENT_LOG_ADDR_HI, + pm8001_ha->main_cfg_tbl.pm80xx_tbl.upper_pcs_event_log_addr); + pm8001_mw32(address, MAIN_PCS_EVENT_LOG_ADDR_LO, + pm8001_ha->main_cfg_tbl.pm80xx_tbl.lower_pcs_event_log_addr); + pm8001_mw32(address, MAIN_PCS_EVENT_LOG_BUFF_SIZE, + pm8001_ha->main_cfg_tbl.pm80xx_tbl.pcs_event_log_size); + pm8001_mw32(address, MAIN_PCS_EVENT_LOG_OPTION, + pm8001_ha->main_cfg_tbl.pm80xx_tbl.pcs_event_log_severity); + pm8001_mw32(address, MAIN_FATAL_ERROR_INTERRUPT, + pm8001_ha->main_cfg_tbl.pm80xx_tbl.fatal_err_interrupt); + pm8001_mw32(address, MAIN_EVENT_CRC_CHECK, + pm8001_ha->main_cfg_tbl.pm80xx_tbl.crc_core_dump); + + /* SPCv specific */ + pm8001_ha->main_cfg_tbl.pm80xx_tbl.gpio_led_mapping &= 0xCFFFFFFF; + /* Set GPIOLED to 0x2 for LED indicator */ + pm8001_ha->main_cfg_tbl.pm80xx_tbl.gpio_led_mapping |= 0x20000000; + pm8001_mw32(address, MAIN_GPIO_LED_FLAGS_OFFSET, + pm8001_ha->main_cfg_tbl.pm80xx_tbl.gpio_led_mapping); + + pm8001_mw32(address, MAIN_PORT_RECOVERY_TIMER, + pm8001_ha->main_cfg_tbl.pm80xx_tbl.port_recovery_timer); + pm8001_mw32(address, MAIN_INT_REASSERTION_DELAY, + pm8001_ha->main_cfg_tbl.pm80xx_tbl.interrupt_reassertion_delay); +} + +/** + * update_inbnd_queue_table - update the inbound queue table to the HBA. + * @pm8001_ha: our hba card information + */ +static void update_inbnd_queue_table(struct pm8001_hba_info *pm8001_ha, + int number) +{ + void __iomem *address = pm8001_ha->inbnd_q_tbl_addr; + u16 offset = number * 0x20; + pm8001_mw32(address, offset + IB_PROPERITY_OFFSET, + pm8001_ha->inbnd_q_tbl[number].element_pri_size_cnt); + pm8001_mw32(address, offset + IB_BASE_ADDR_HI_OFFSET, + pm8001_ha->inbnd_q_tbl[number].upper_base_addr); + pm8001_mw32(address, offset + IB_BASE_ADDR_LO_OFFSET, + pm8001_ha->inbnd_q_tbl[number].lower_base_addr); + pm8001_mw32(address, offset + IB_CI_BASE_ADDR_HI_OFFSET, + pm8001_ha->inbnd_q_tbl[number].ci_upper_base_addr); + pm8001_mw32(address, offset + IB_CI_BASE_ADDR_LO_OFFSET, + pm8001_ha->inbnd_q_tbl[number].ci_lower_base_addr); +} + +/** + * update_outbnd_queue_table - update the outbound queue table to the HBA. + * @pm8001_ha: our hba card information + */ +static void update_outbnd_queue_table(struct pm8001_hba_info *pm8001_ha, + int number) +{ + void __iomem *address = pm8001_ha->outbnd_q_tbl_addr; + u16 offset = number * 0x24; + pm8001_mw32(address, offset + OB_PROPERITY_OFFSET, + pm8001_ha->outbnd_q_tbl[number].element_size_cnt); + pm8001_mw32(address, offset + OB_BASE_ADDR_HI_OFFSET, + pm8001_ha->outbnd_q_tbl[number].upper_base_addr); + pm8001_mw32(address, offset + OB_BASE_ADDR_LO_OFFSET, + pm8001_ha->outbnd_q_tbl[number].lower_base_addr); + pm8001_mw32(address, offset + OB_PI_BASE_ADDR_HI_OFFSET, + pm8001_ha->outbnd_q_tbl[number].pi_upper_base_addr); + pm8001_mw32(address, offset + OB_PI_BASE_ADDR_LO_OFFSET, + pm8001_ha->outbnd_q_tbl[number].pi_lower_base_addr); + pm8001_mw32(address, offset + OB_INTERRUPT_COALES_OFFSET, + pm8001_ha->outbnd_q_tbl[number].interrup_vec_cnt_delay); +} + +/** + * mpi_init_check - check firmware initialization status. + * @pm8001_ha: our hba card information + */ +static int mpi_init_check(struct pm8001_hba_info *pm8001_ha) +{ + u32 max_wait_count; + u32 value; + u32 gst_len_mpistate; + + /* Write bit0=1 to Inbound DoorBell Register to tell the SPC FW the + table is updated */ + pm8001_cw32(pm8001_ha, 0, MSGU_IBDB_SET, SPCv_MSGU_CFG_TABLE_UPDATE); + /* wait until Inbound DoorBell Clear Register toggled */ + max_wait_count = 2 * 1000 * 1000;/* 2 sec for spcv/ve */ + do { + udelay(1); + value = pm8001_cr32(pm8001_ha, 0, MSGU_IBDB_SET); + value &= SPCv_MSGU_CFG_TABLE_UPDATE; + } while ((value != 0) && (--max_wait_count)); + + if (!max_wait_count) + return -1; + /* check the MPI-State for initialization upto 100ms*/ + max_wait_count = 100 * 1000;/* 100 msec */ + do { + udelay(1); + gst_len_mpistate = + pm8001_mr32(pm8001_ha->general_stat_tbl_addr, + GST_GSTLEN_MPIS_OFFSET); + } while ((GST_MPI_STATE_INIT != + (gst_len_mpistate & GST_MPI_STATE_MASK)) && (--max_wait_count)); + if (!max_wait_count) + return -1; + + /* check MPI Initialization error */ + gst_len_mpistate = gst_len_mpistate >> 16; + if (0x0000 != gst_len_mpistate) + return -1; + + return 0; +} + +/** + * check_fw_ready - The LLDD check if the FW is ready, if not, return error. + * @pm8001_ha: our hba card information + */ +static int check_fw_ready(struct pm8001_hba_info *pm8001_ha) +{ + u32 value; + u32 max_wait_count; + u32 max_wait_time; + int ret = 0; + + /* reset / PCIe ready */ + max_wait_time = max_wait_count = 100 * 1000; /* 100 milli sec */ + do { + udelay(1); + value = pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_1); + } while ((value == 0xFFFFFFFF) && (--max_wait_count)); + + /* check ila status */ + max_wait_time = max_wait_count = 1000 * 1000; /* 1000 milli sec */ + do { + udelay(1); + value = pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_1); + } while (((value & SCRATCH_PAD_ILA_READY) != + SCRATCH_PAD_ILA_READY) && (--max_wait_count)); + if (!max_wait_count) + ret = -1; + else { + PM8001_MSG_DBG(pm8001_ha, + pm8001_printk(" ila ready status in %d millisec\n", + (max_wait_time - max_wait_count))); + } + + /* check RAAE status */ + max_wait_time = max_wait_count = 1800 * 1000; /* 1800 milli sec */ + do { + udelay(1); + value = pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_1); + } while (((value & SCRATCH_PAD_RAAE_READY) != + SCRATCH_PAD_RAAE_READY) && (--max_wait_count)); + if (!max_wait_count) + ret = -1; + else { + PM8001_MSG_DBG(pm8001_ha, + pm8001_printk(" raae ready status in %d millisec\n", + (max_wait_time - max_wait_count))); + } + + /* check iop0 status */ + max_wait_time = max_wait_count = 600 * 1000; /* 600 milli sec */ + do { + udelay(1); + value = pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_1); + } while (((value & SCRATCH_PAD_IOP0_READY) != SCRATCH_PAD_IOP0_READY) && + (--max_wait_count)); + if (!max_wait_count) + ret = -1; + else { + PM8001_MSG_DBG(pm8001_ha, + pm8001_printk(" iop0 ready status in %d millisec\n", + (max_wait_time - max_wait_count))); + } + + /* check iop1 status only for 16 port controllers */ + if ((pm8001_ha->chip_id != chip_8008) && + (pm8001_ha->chip_id != chip_8009)) { + /* 200 milli sec */ + max_wait_time = max_wait_count = 200 * 1000; + do { + udelay(1); + value = pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_1); + } while (((value & SCRATCH_PAD_IOP1_READY) != + SCRATCH_PAD_IOP1_READY) && (--max_wait_count)); + if (!max_wait_count) + ret = -1; + else { + PM8001_MSG_DBG(pm8001_ha, pm8001_printk( + "iop1 ready status in %d millisec\n", + (max_wait_time - max_wait_count))); + } + } + + return ret; +} + +static void init_pci_device_addresses(struct pm8001_hba_info *pm8001_ha) +{ + void __iomem *base_addr; + u32 value; + u32 offset; + u32 pcibar; + u32 pcilogic; + + value = pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_0); + offset = value & 0x03FFFFFF; /* scratch pad 0 TBL address */ + + PM8001_INIT_DBG(pm8001_ha, + pm8001_printk("Scratchpad 0 Offset: 0x%x value 0x%x\n", + offset, value)); + pcilogic = (value & 0xFC000000) >> 26; + pcibar = get_pci_bar_index(pcilogic); + PM8001_INIT_DBG(pm8001_ha, + pm8001_printk("Scratchpad 0 PCI BAR: %d\n", pcibar)); + pm8001_ha->main_cfg_tbl_addr = base_addr = + pm8001_ha->io_mem[pcibar].memvirtaddr + offset; + pm8001_ha->general_stat_tbl_addr = + base_addr + (pm8001_cr32(pm8001_ha, pcibar, offset + 0x18) & + 0xFFFFFF); + pm8001_ha->inbnd_q_tbl_addr = + base_addr + (pm8001_cr32(pm8001_ha, pcibar, offset + 0x1C) & + 0xFFFFFF); + pm8001_ha->outbnd_q_tbl_addr = + base_addr + (pm8001_cr32(pm8001_ha, pcibar, offset + 0x20) & + 0xFFFFFF); + pm8001_ha->ivt_tbl_addr = + base_addr + (pm8001_cr32(pm8001_ha, pcibar, offset + 0x8C) & + 0xFFFFFF); + pm8001_ha->pspa_q_tbl_addr = + base_addr + (pm8001_cr32(pm8001_ha, pcibar, offset + 0x90) & + 0xFFFFFF); + + PM8001_INIT_DBG(pm8001_ha, + pm8001_printk("GST OFFSET 0x%x\n", + pm8001_cr32(pm8001_ha, pcibar, offset + 0x18))); + PM8001_INIT_DBG(pm8001_ha, + pm8001_printk("INBND OFFSET 0x%x\n", + pm8001_cr32(pm8001_ha, pcibar, offset + 0x1C))); + PM8001_INIT_DBG(pm8001_ha, + pm8001_printk("OBND OFFSET 0x%x\n", + pm8001_cr32(pm8001_ha, pcibar, offset + 0x20))); + PM8001_INIT_DBG(pm8001_ha, + pm8001_printk("IVT OFFSET 0x%x\n", + pm8001_cr32(pm8001_ha, pcibar, offset + 0x8C))); + PM8001_INIT_DBG(pm8001_ha, + pm8001_printk("PSPA OFFSET 0x%x\n", + pm8001_cr32(pm8001_ha, pcibar, offset + 0x90))); + PM8001_INIT_DBG(pm8001_ha, + pm8001_printk("addr - main cfg %p general status %p\n", + pm8001_ha->main_cfg_tbl_addr, + pm8001_ha->general_stat_tbl_addr)); + PM8001_INIT_DBG(pm8001_ha, + pm8001_printk("addr - inbnd %p obnd %p\n", + pm8001_ha->inbnd_q_tbl_addr, + pm8001_ha->outbnd_q_tbl_addr)); + PM8001_INIT_DBG(pm8001_ha, + pm8001_printk("addr - pspa %p ivt %p\n", + pm8001_ha->pspa_q_tbl_addr, + pm8001_ha->ivt_tbl_addr)); +} + +/** + * pm80xx_set_thermal_config - support the thermal configuration + * @pm8001_ha: our hba card information. + */ +int +pm80xx_set_thermal_config(struct pm8001_hba_info *pm8001_ha) +{ + struct set_ctrl_cfg_req payload; + struct inbound_queue_table *circularQ; + int rc; + u32 tag; + u32 opc = OPC_INB_SET_CONTROLLER_CONFIG; + + memset(&payload, 0, sizeof(struct set_ctrl_cfg_req)); + rc = pm8001_tag_alloc(pm8001_ha, &tag); + if (rc) + return -1; + + circularQ = &pm8001_ha->inbnd_q_tbl[0]; + payload.tag = cpu_to_le32(tag); + payload.cfg_pg[0] = (THERMAL_LOG_ENABLE << 9) | + (THERMAL_ENABLE << 8) | THERMAL_OP_CODE; + payload.cfg_pg[1] = (LTEMPHIL << 24) | (RTEMPHIL << 8); + + rc = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &payload, 0); + return rc; + +} + +/** +* pm80xx_set_sas_protocol_timer_config - support the SAS Protocol +* Timer configuration page +* @pm8001_ha: our hba card information. +*/ +static int +pm80xx_set_sas_protocol_timer_config(struct pm8001_hba_info *pm8001_ha) +{ + struct set_ctrl_cfg_req payload; + struct inbound_queue_table *circularQ; + SASProtocolTimerConfig_t SASConfigPage; + int rc; + u32 tag; + u32 opc = OPC_INB_SET_CONTROLLER_CONFIG; + + memset(&payload, 0, sizeof(struct set_ctrl_cfg_req)); + memset(&SASConfigPage, 0, sizeof(SASProtocolTimerConfig_t)); + + rc = pm8001_tag_alloc(pm8001_ha, &tag); + + if (rc) + return -1; + + circularQ = &pm8001_ha->inbnd_q_tbl[0]; + payload.tag = cpu_to_le32(tag); + + SASConfigPage.pageCode = SAS_PROTOCOL_TIMER_CONFIG_PAGE; + SASConfigPage.MST_MSI = 3 << 15; + SASConfigPage.STP_SSP_MCT_TMO = (STP_MCT_TMO << 16) | SSP_MCT_TMO; + SASConfigPage.STP_FRM_TMO = (SAS_MAX_OPEN_TIME << 24) | + (SMP_MAX_CONN_TIMER << 16) | STP_FRM_TIMER; + SASConfigPage.STP_IDLE_TMO = STP_IDLE_TIME; + + if (SASConfigPage.STP_IDLE_TMO > 0x3FFFFFF) + SASConfigPage.STP_IDLE_TMO = 0x3FFFFFF; + + + SASConfigPage.OPNRJT_RTRY_INTVL = (SAS_MFD << 16) | + SAS_OPNRJT_RTRY_INTVL; + SASConfigPage.Data_Cmd_OPNRJT_RTRY_TMO = (SAS_DOPNRJT_RTRY_TMO << 16) + | SAS_COPNRJT_RTRY_TMO; + SASConfigPage.Data_Cmd_OPNRJT_RTRY_THR = (SAS_DOPNRJT_RTRY_THR << 16) + | SAS_COPNRJT_RTRY_THR; + SASConfigPage.MAX_AIP = SAS_MAX_AIP; + + PM8001_INIT_DBG(pm8001_ha, + pm8001_printk("SASConfigPage.pageCode " + "0x%08x\n", SASConfigPage.pageCode)); + PM8001_INIT_DBG(pm8001_ha, + pm8001_printk("SASConfigPage.MST_MSI " + " 0x%08x\n", SASConfigPage.MST_MSI)); + PM8001_INIT_DBG(pm8001_ha, + pm8001_printk("SASConfigPage.STP_SSP_MCT_TMO " + " 0x%08x\n", SASConfigPage.STP_SSP_MCT_TMO)); + PM8001_INIT_DBG(pm8001_ha, + pm8001_printk("SASConfigPage.STP_FRM_TMO " + " 0x%08x\n", SASConfigPage.STP_FRM_TMO)); + PM8001_INIT_DBG(pm8001_ha, + pm8001_printk("SASConfigPage.STP_IDLE_TMO " + " 0x%08x\n", SASConfigPage.STP_IDLE_TMO)); + PM8001_INIT_DBG(pm8001_ha, + pm8001_printk("SASConfigPage.OPNRJT_RTRY_INTVL " + " 0x%08x\n", SASConfigPage.OPNRJT_RTRY_INTVL)); + PM8001_INIT_DBG(pm8001_ha, + pm8001_printk("SASConfigPage.Data_Cmd_OPNRJT_RTRY_TMO " + " 0x%08x\n", SASConfigPage.Data_Cmd_OPNRJT_RTRY_TMO)); + PM8001_INIT_DBG(pm8001_ha, + pm8001_printk("SASConfigPage.Data_Cmd_OPNRJT_RTRY_THR " + " 0x%08x\n", SASConfigPage.Data_Cmd_OPNRJT_RTRY_THR)); + PM8001_INIT_DBG(pm8001_ha, pm8001_printk("SASConfigPage.MAX_AIP " + " 0x%08x\n", SASConfigPage.MAX_AIP)); + + memcpy(&payload.cfg_pg, &SASConfigPage, + sizeof(SASProtocolTimerConfig_t)); + + rc = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &payload, 0); + + return rc; +} + +/** + * pm80xx_get_encrypt_info - Check for encryption + * @pm8001_ha: our hba card information. + */ +static int +pm80xx_get_encrypt_info(struct pm8001_hba_info *pm8001_ha) +{ + u32 scratch3_value; + int ret; + + /* Read encryption status from SCRATCH PAD 3 */ + scratch3_value = pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_3); + + if ((scratch3_value & SCRATCH_PAD3_ENC_MASK) == + SCRATCH_PAD3_ENC_READY) { + if (scratch3_value & SCRATCH_PAD3_XTS_ENABLED) + pm8001_ha->encrypt_info.cipher_mode = CIPHER_MODE_XTS; + if ((scratch3_value & SCRATCH_PAD3_SM_MASK) == + SCRATCH_PAD3_SMF_ENABLED) + pm8001_ha->encrypt_info.sec_mode = SEC_MODE_SMF; + if ((scratch3_value & SCRATCH_PAD3_SM_MASK) == + SCRATCH_PAD3_SMA_ENABLED) + pm8001_ha->encrypt_info.sec_mode = SEC_MODE_SMA; + if ((scratch3_value & SCRATCH_PAD3_SM_MASK) == + SCRATCH_PAD3_SMB_ENABLED) + pm8001_ha->encrypt_info.sec_mode = SEC_MODE_SMB; + pm8001_ha->encrypt_info.status = 0; + PM8001_INIT_DBG(pm8001_ha, pm8001_printk( + "Encryption: SCRATCH_PAD3_ENC_READY 0x%08X." + "Cipher mode 0x%x Sec mode 0x%x status 0x%x\n", + scratch3_value, pm8001_ha->encrypt_info.cipher_mode, + pm8001_ha->encrypt_info.sec_mode, + pm8001_ha->encrypt_info.status)); + ret = 0; + } else if ((scratch3_value & SCRATCH_PAD3_ENC_READY) == + SCRATCH_PAD3_ENC_DISABLED) { + PM8001_INIT_DBG(pm8001_ha, pm8001_printk( + "Encryption: SCRATCH_PAD3_ENC_DISABLED 0x%08X\n", + scratch3_value)); + pm8001_ha->encrypt_info.status = 0xFFFFFFFF; + pm8001_ha->encrypt_info.cipher_mode = 0; + pm8001_ha->encrypt_info.sec_mode = 0; + return 0; + } else if ((scratch3_value & SCRATCH_PAD3_ENC_MASK) == + SCRATCH_PAD3_ENC_DIS_ERR) { + pm8001_ha->encrypt_info.status = + (scratch3_value & SCRATCH_PAD3_ERR_CODE) >> 16; + if (scratch3_value & SCRATCH_PAD3_XTS_ENABLED) + pm8001_ha->encrypt_info.cipher_mode = CIPHER_MODE_XTS; + if ((scratch3_value & SCRATCH_PAD3_SM_MASK) == + SCRATCH_PAD3_SMF_ENABLED) + pm8001_ha->encrypt_info.sec_mode = SEC_MODE_SMF; + if ((scratch3_value & SCRATCH_PAD3_SM_MASK) == + SCRATCH_PAD3_SMA_ENABLED) + pm8001_ha->encrypt_info.sec_mode = SEC_MODE_SMA; + if ((scratch3_value & SCRATCH_PAD3_SM_MASK) == + SCRATCH_PAD3_SMB_ENABLED) + pm8001_ha->encrypt_info.sec_mode = SEC_MODE_SMB; + PM8001_INIT_DBG(pm8001_ha, pm8001_printk( + "Encryption: SCRATCH_PAD3_DIS_ERR 0x%08X." + "Cipher mode 0x%x sec mode 0x%x status 0x%x\n", + scratch3_value, pm8001_ha->encrypt_info.cipher_mode, + pm8001_ha->encrypt_info.sec_mode, + pm8001_ha->encrypt_info.status)); + ret = -1; + } else if ((scratch3_value & SCRATCH_PAD3_ENC_MASK) == + SCRATCH_PAD3_ENC_ENA_ERR) { + + pm8001_ha->encrypt_info.status = + (scratch3_value & SCRATCH_PAD3_ERR_CODE) >> 16; + if (scratch3_value & SCRATCH_PAD3_XTS_ENABLED) + pm8001_ha->encrypt_info.cipher_mode = CIPHER_MODE_XTS; + if ((scratch3_value & SCRATCH_PAD3_SM_MASK) == + SCRATCH_PAD3_SMF_ENABLED) + pm8001_ha->encrypt_info.sec_mode = SEC_MODE_SMF; + if ((scratch3_value & SCRATCH_PAD3_SM_MASK) == + SCRATCH_PAD3_SMA_ENABLED) + pm8001_ha->encrypt_info.sec_mode = SEC_MODE_SMA; + if ((scratch3_value & SCRATCH_PAD3_SM_MASK) == + SCRATCH_PAD3_SMB_ENABLED) + pm8001_ha->encrypt_info.sec_mode = SEC_MODE_SMB; + + PM8001_INIT_DBG(pm8001_ha, pm8001_printk( + "Encryption: SCRATCH_PAD3_ENA_ERR 0x%08X." + "Cipher mode 0x%x sec mode 0x%x status 0x%x\n", + scratch3_value, pm8001_ha->encrypt_info.cipher_mode, + pm8001_ha->encrypt_info.sec_mode, + pm8001_ha->encrypt_info.status)); + ret = -1; + } + return ret; +} + +/** + * pm80xx_encrypt_update - update flash with encryption informtion + * @pm8001_ha: our hba card information. + */ +static int pm80xx_encrypt_update(struct pm8001_hba_info *pm8001_ha) +{ + struct kek_mgmt_req payload; + struct inbound_queue_table *circularQ; + int rc; + u32 tag; + u32 opc = OPC_INB_KEK_MANAGEMENT; + + memset(&payload, 0, sizeof(struct kek_mgmt_req)); + rc = pm8001_tag_alloc(pm8001_ha, &tag); + if (rc) + return -1; + + circularQ = &pm8001_ha->inbnd_q_tbl[0]; + payload.tag = cpu_to_le32(tag); + /* Currently only one key is used. New KEK index is 1. + * Current KEK index is 1. Store KEK to NVRAM is 1. + */ + payload.new_curidx_ksop = ((1 << 24) | (1 << 16) | (1 << 8) | + KEK_MGMT_SUBOP_KEYCARDUPDATE); + + rc = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &payload, 0); + + return rc; +} + +/** + * pm8001_chip_init - the main init function that initialize whole PM8001 chip. + * @pm8001_ha: our hba card information + */ +static int pm80xx_chip_init(struct pm8001_hba_info *pm8001_ha) +{ + int ret; + u8 i = 0; + + /* check the firmware status */ + if (-1 == check_fw_ready(pm8001_ha)) { + PM8001_FAIL_DBG(pm8001_ha, + pm8001_printk("Firmware is not ready!\n")); + return -EBUSY; + } + + /* Initialize pci space address eg: mpi offset */ + init_pci_device_addresses(pm8001_ha); + init_default_table_values(pm8001_ha); + read_main_config_table(pm8001_ha); + read_general_status_table(pm8001_ha); + read_inbnd_queue_table(pm8001_ha); + read_outbnd_queue_table(pm8001_ha); + read_phy_attr_table(pm8001_ha); + + /* update main config table ,inbound table and outbound table */ + update_main_config_table(pm8001_ha); + for (i = 0; i < PM8001_MAX_SPCV_INB_NUM; i++) + update_inbnd_queue_table(pm8001_ha, i); + for (i = 0; i < PM8001_MAX_SPCV_OUTB_NUM; i++) + update_outbnd_queue_table(pm8001_ha, i); + + /* notify firmware update finished and check initialization status */ + if (0 == mpi_init_check(pm8001_ha)) { + PM8001_INIT_DBG(pm8001_ha, + pm8001_printk("MPI initialize successful!\n")); + } else + return -EBUSY; + + /* send SAS protocol timer configuration page to FW */ + ret = pm80xx_set_sas_protocol_timer_config(pm8001_ha); + + /* Check for encryption */ + if (pm8001_ha->chip->encrypt) { + PM8001_INIT_DBG(pm8001_ha, + pm8001_printk("Checking for encryption\n")); + ret = pm80xx_get_encrypt_info(pm8001_ha); + if (ret == -1) { + PM8001_INIT_DBG(pm8001_ha, + pm8001_printk("Encryption error !!\n")); + if (pm8001_ha->encrypt_info.status == 0x81) { + PM8001_INIT_DBG(pm8001_ha, pm8001_printk( + "Encryption enabled with error." + "Saving encryption key to flash\n")); + pm80xx_encrypt_update(pm8001_ha); + } + } + } + return 0; +} + +static int mpi_uninit_check(struct pm8001_hba_info *pm8001_ha) +{ + u32 max_wait_count; + u32 value; + u32 gst_len_mpistate; + init_pci_device_addresses(pm8001_ha); + /* Write bit1=1 to Inbound DoorBell Register to tell the SPC FW the + table is stop */ + pm8001_cw32(pm8001_ha, 0, MSGU_IBDB_SET, SPCv_MSGU_CFG_TABLE_RESET); + + /* wait until Inbound DoorBell Clear Register toggled */ + max_wait_count = 2 * 1000 * 1000; /* 2 sec for spcv/ve */ + do { + udelay(1); + value = pm8001_cr32(pm8001_ha, 0, MSGU_IBDB_SET); + value &= SPCv_MSGU_CFG_TABLE_RESET; + } while ((value != 0) && (--max_wait_count)); + + if (!max_wait_count) { + PM8001_FAIL_DBG(pm8001_ha, + pm8001_printk("TIMEOUT:IBDB value/=%x\n", value)); + return -1; + } + + /* check the MPI-State for termination in progress */ + /* wait until Inbound DoorBell Clear Register toggled */ + max_wait_count = 2 * 1000 * 1000; /* 2 sec for spcv/ve */ + do { + udelay(1); + gst_len_mpistate = + pm8001_mr32(pm8001_ha->general_stat_tbl_addr, + GST_GSTLEN_MPIS_OFFSET); + if (GST_MPI_STATE_UNINIT == + (gst_len_mpistate & GST_MPI_STATE_MASK)) + break; + } while (--max_wait_count); + if (!max_wait_count) { + PM8001_FAIL_DBG(pm8001_ha, + pm8001_printk(" TIME OUT MPI State = 0x%x\n", + gst_len_mpistate & GST_MPI_STATE_MASK)); + return -1; + } + + return 0; +} + +/** + * pm8001_chip_soft_rst - soft reset the PM8001 chip, so that the clear all + * the FW register status to the originated status. + * @pm8001_ha: our hba card information + */ + +static int +pm80xx_chip_soft_rst(struct pm8001_hba_info *pm8001_ha) +{ + u32 regval; + u32 bootloader_state; + + /* Check if MPI is in ready state to reset */ + if (mpi_uninit_check(pm8001_ha) != 0) { + PM8001_FAIL_DBG(pm8001_ha, + pm8001_printk("MPI state is not ready\n")); + return -1; + } + + /* checked for reset register normal state; 0x0 */ + regval = pm8001_cr32(pm8001_ha, 0, SPC_REG_SOFT_RESET); + PM8001_INIT_DBG(pm8001_ha, + pm8001_printk("reset register before write : 0x%x\n", regval)); + + pm8001_cw32(pm8001_ha, 0, SPC_REG_SOFT_RESET, SPCv_NORMAL_RESET_VALUE); + mdelay(500); + + regval = pm8001_cr32(pm8001_ha, 0, SPC_REG_SOFT_RESET); + PM8001_INIT_DBG(pm8001_ha, + pm8001_printk("reset register after write 0x%x\n", regval)); + + if ((regval & SPCv_SOFT_RESET_READ_MASK) == + SPCv_SOFT_RESET_NORMAL_RESET_OCCURED) { + PM8001_MSG_DBG(pm8001_ha, + pm8001_printk(" soft reset successful [regval: 0x%x]\n", + regval)); + } else { + PM8001_MSG_DBG(pm8001_ha, + pm8001_printk(" soft reset failed [regval: 0x%x]\n", + regval)); + + /* check bootloader is successfully executed or in HDA mode */ + bootloader_state = + pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_1) & + SCRATCH_PAD1_BOOTSTATE_MASK; + + if (bootloader_state == SCRATCH_PAD1_BOOTSTATE_HDA_SEEPROM) { + PM8001_MSG_DBG(pm8001_ha, pm8001_printk( + "Bootloader state - HDA mode SEEPROM\n")); + } else if (bootloader_state == + SCRATCH_PAD1_BOOTSTATE_HDA_BOOTSTRAP) { + PM8001_MSG_DBG(pm8001_ha, pm8001_printk( + "Bootloader state - HDA mode Bootstrap Pin\n")); + } else if (bootloader_state == + SCRATCH_PAD1_BOOTSTATE_HDA_SOFTRESET) { + PM8001_MSG_DBG(pm8001_ha, pm8001_printk( + "Bootloader state - HDA mode soft reset\n")); + } else if (bootloader_state == + SCRATCH_PAD1_BOOTSTATE_CRIT_ERROR) { + PM8001_MSG_DBG(pm8001_ha, pm8001_printk( + "Bootloader state-HDA mode critical error\n")); + } + return -EBUSY; + } + + /* check the firmware status after reset */ + if (-1 == check_fw_ready(pm8001_ha)) { + PM8001_FAIL_DBG(pm8001_ha, + pm8001_printk("Firmware is not ready!\n")); + return -EBUSY; + } + PM8001_INIT_DBG(pm8001_ha, + pm8001_printk("SPCv soft reset Complete\n")); + return 0; +} + +static void pm80xx_hw_chip_rst(struct pm8001_hba_info *pm8001_ha) +{ + u32 i; + + PM8001_INIT_DBG(pm8001_ha, + pm8001_printk("chip reset start\n")); + + /* do SPCv chip reset. */ + pm8001_cw32(pm8001_ha, 0, SPC_REG_SOFT_RESET, 0x11); + PM8001_INIT_DBG(pm8001_ha, + pm8001_printk("SPC soft reset Complete\n")); + + /* Check this ..whether delay is required or no */ + /* delay 10 usec */ + udelay(10); + + /* wait for 20 msec until the firmware gets reloaded */ + i = 20; + do { + mdelay(1); + } while ((--i) != 0); + + PM8001_INIT_DBG(pm8001_ha, + pm8001_printk("chip reset finished\n")); +} + +/** + * pm8001_chip_interrupt_enable - enable PM8001 chip interrupt + * @pm8001_ha: our hba card information + */ +static void +pm80xx_chip_intx_interrupt_enable(struct pm8001_hba_info *pm8001_ha) +{ + pm8001_cw32(pm8001_ha, 0, MSGU_ODMR, ODMR_CLEAR_ALL); + pm8001_cw32(pm8001_ha, 0, MSGU_ODCR, ODCR_CLEAR_ALL); +} + +/** + * pm8001_chip_intx_interrupt_disable- disable PM8001 chip interrupt + * @pm8001_ha: our hba card information + */ +static void +pm80xx_chip_intx_interrupt_disable(struct pm8001_hba_info *pm8001_ha) +{ + pm8001_cw32(pm8001_ha, 0, MSGU_ODMR_CLR, ODMR_MASK_ALL); +} + +/** + * pm8001_chip_interrupt_enable - enable PM8001 chip interrupt + * @pm8001_ha: our hba card information + */ +static void +pm80xx_chip_interrupt_enable(struct pm8001_hba_info *pm8001_ha, u8 vec) +{ +#ifdef PM8001_USE_MSIX + u32 mask; + mask = (u32)(1 << vec); + + pm8001_cw32(pm8001_ha, 0, MSGU_ODMR_CLR, (u32)(mask & 0xFFFFFFFF)); + return; +#endif + pm80xx_chip_intx_interrupt_enable(pm8001_ha); + +} + +/** + * pm8001_chip_interrupt_disable- disable PM8001 chip interrupt + * @pm8001_ha: our hba card information + */ +static void +pm80xx_chip_interrupt_disable(struct pm8001_hba_info *pm8001_ha, u8 vec) +{ +#ifdef PM8001_USE_MSIX + u32 mask; + if (vec == 0xFF) + mask = 0xFFFFFFFF; + else + mask = (u32)(1 << vec); + pm8001_cw32(pm8001_ha, 0, MSGU_ODMR, (u32)(mask & 0xFFFFFFFF)); + return; +#endif + pm80xx_chip_intx_interrupt_disable(pm8001_ha); +} + +static void pm80xx_send_abort_all(struct pm8001_hba_info *pm8001_ha, + struct pm8001_device *pm8001_ha_dev) +{ + int res; + u32 ccb_tag; + struct pm8001_ccb_info *ccb; + struct sas_task *task = NULL; + struct task_abort_req task_abort; + struct inbound_queue_table *circularQ; + u32 opc = OPC_INB_SATA_ABORT; + int ret; + + if (!pm8001_ha_dev) { + PM8001_FAIL_DBG(pm8001_ha, pm8001_printk("dev is null\n")); + return; + } + + task = sas_alloc_slow_task(GFP_ATOMIC); + + if (!task) { + PM8001_FAIL_DBG(pm8001_ha, pm8001_printk("cannot " + "allocate task\n")); + return; + } + + task->task_done = pm8001_task_done; + + res = pm8001_tag_alloc(pm8001_ha, &ccb_tag); + if (res) + return; + + ccb = &pm8001_ha->ccb_info[ccb_tag]; + ccb->device = pm8001_ha_dev; + ccb->ccb_tag = ccb_tag; + ccb->task = task; + + circularQ = &pm8001_ha->inbnd_q_tbl[0]; + + memset(&task_abort, 0, sizeof(task_abort)); + task_abort.abort_all = cpu_to_le32(1); + task_abort.device_id = cpu_to_le32(pm8001_ha_dev->device_id); + task_abort.tag = cpu_to_le32(ccb_tag); + + ret = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &task_abort, 0); + +} + +static void pm80xx_send_read_log(struct pm8001_hba_info *pm8001_ha, + struct pm8001_device *pm8001_ha_dev) +{ + struct sata_start_req sata_cmd; + int res; + u32 ccb_tag; + struct pm8001_ccb_info *ccb; + struct sas_task *task = NULL; + struct host_to_dev_fis fis; + struct domain_device *dev; + struct inbound_queue_table *circularQ; + u32 opc = OPC_INB_SATA_HOST_OPSTART; + + task = sas_alloc_slow_task(GFP_ATOMIC); + + if (!task) { + PM8001_FAIL_DBG(pm8001_ha, + pm8001_printk("cannot allocate task !!!\n")); + return; + } + task->task_done = pm8001_task_done; + + res = pm8001_tag_alloc(pm8001_ha, &ccb_tag); + if (res) { + PM8001_FAIL_DBG(pm8001_ha, + pm8001_printk("cannot allocate tag !!!\n")); + return; + } + + /* allocate domain device by ourselves as libsas + * is not going to provide any + */ + dev = kzalloc(sizeof(struct domain_device), GFP_ATOMIC); + if (!dev) { + PM8001_FAIL_DBG(pm8001_ha, + pm8001_printk("Domain device cannot be allocated\n")); + sas_free_task(task); + return; + } else { + task->dev = dev; + task->dev->lldd_dev = pm8001_ha_dev; + } + + ccb = &pm8001_ha->ccb_info[ccb_tag]; + ccb->device = pm8001_ha_dev; + ccb->ccb_tag = ccb_tag; + ccb->task = task; + pm8001_ha_dev->id |= NCQ_READ_LOG_FLAG; + pm8001_ha_dev->id |= NCQ_2ND_RLE_FLAG; + + memset(&sata_cmd, 0, sizeof(sata_cmd)); + circularQ = &pm8001_ha->inbnd_q_tbl[0]; + + /* construct read log FIS */ + memset(&fis, 0, sizeof(struct host_to_dev_fis)); + fis.fis_type = 0x27; + fis.flags = 0x80; + fis.command = ATA_CMD_READ_LOG_EXT; + fis.lbal = 0x10; + fis.sector_count = 0x1; + + sata_cmd.tag = cpu_to_le32(ccb_tag); + sata_cmd.device_id = cpu_to_le32(pm8001_ha_dev->device_id); + sata_cmd.ncqtag_atap_dir_m_dad |= ((0x1 << 7) | (0x5 << 9)); + memcpy(&sata_cmd.sata_fis, &fis, sizeof(struct host_to_dev_fis)); + + res = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &sata_cmd, 0); + +} + +/** + * mpi_ssp_completion- process the event that FW response to the SSP request. + * @pm8001_ha: our hba card information + * @piomb: the message contents of this outbound message. + * + * When FW has completed a ssp request for example a IO request, after it has + * filled the SG data with the data, it will trigger this event represent + * that he has finished the job,please check the coresponding buffer. + * So we will tell the caller who maybe waiting the result to tell upper layer + * that the task has been finished. + */ +static void +mpi_ssp_completion(struct pm8001_hba_info *pm8001_ha , void *piomb) +{ + struct sas_task *t; + struct pm8001_ccb_info *ccb; + unsigned long flags; + u32 status; + u32 param; + u32 tag; + struct ssp_completion_resp *psspPayload; + struct task_status_struct *ts; + struct ssp_response_iu *iu; + struct pm8001_device *pm8001_dev; + psspPayload = (struct ssp_completion_resp *)(piomb + 4); + status = le32_to_cpu(psspPayload->status); + tag = le32_to_cpu(psspPayload->tag); + ccb = &pm8001_ha->ccb_info[tag]; + if ((status == IO_ABORTED) && ccb->open_retry) { + /* Being completed by another */ + ccb->open_retry = 0; + return; + } + pm8001_dev = ccb->device; + param = le32_to_cpu(psspPayload->param); + t = ccb->task; + + if (status && status != IO_UNDERFLOW) + PM8001_FAIL_DBG(pm8001_ha, + pm8001_printk("sas IO status 0x%x\n", status)); + if (unlikely(!t || !t->lldd_task || !t->dev)) + return; + ts = &t->task_status; + switch (status) { + case IO_SUCCESS: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_SUCCESS ,param = 0x%x\n", + param)); + if (param == 0) { + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAM_STAT_GOOD; + } else { + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_PROTO_RESPONSE; + ts->residual = param; + iu = &psspPayload->ssp_resp_iu; + sas_ssp_task_response(pm8001_ha->dev, t, iu); + } + if (pm8001_dev) + pm8001_dev->running_req--; + break; + case IO_ABORTED: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_ABORTED IOMB Tag\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_ABORTED_TASK; + break; + case IO_UNDERFLOW: + /* SSP Completion with error */ + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_UNDERFLOW ,param = 0x%x\n", + param)); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_DATA_UNDERRUN; + ts->residual = param; + if (pm8001_dev) + pm8001_dev->running_req--; + break; + case IO_NO_DEVICE: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_NO_DEVICE\n")); + ts->resp = SAS_TASK_UNDELIVERED; + ts->stat = SAS_PHY_DOWN; + break; + case IO_XFER_ERROR_BREAK: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_XFER_ERROR_BREAK\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_REJECT; + /* Force the midlayer to retry */ + ts->open_rej_reason = SAS_OREJ_RSVD_RETRY; + break; + case IO_XFER_ERROR_PHY_NOT_READY: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_XFER_ERROR_PHY_NOT_READY\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_REJECT; + ts->open_rej_reason = SAS_OREJ_RSVD_RETRY; + break; + case IO_OPEN_CNX_ERROR_PROTOCOL_NOT_SUPPORTED: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_OPEN_CNX_ERROR_PROTOCOL_NOT_SUPPORTED\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_REJECT; + ts->open_rej_reason = SAS_OREJ_EPROTO; + break; + case IO_OPEN_CNX_ERROR_ZONE_VIOLATION: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_OPEN_CNX_ERROR_ZONE_VIOLATION\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_REJECT; + ts->open_rej_reason = SAS_OREJ_UNKNOWN; + break; + case IO_OPEN_CNX_ERROR_BREAK: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_OPEN_CNX_ERROR_BREAK\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_REJECT; + ts->open_rej_reason = SAS_OREJ_RSVD_RETRY; + break; + case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS: + case IO_XFER_OPEN_RETRY_BACKOFF_THRESHOLD_REACHED: + case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS_OPEN_TMO: + case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS_NO_DEST: + case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS_OPEN_COLLIDE: + case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS_PATHWAY_BLOCKED: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_REJECT; + ts->open_rej_reason = SAS_OREJ_UNKNOWN; + if (!t->uldd_task) + pm8001_handle_event(pm8001_ha, + pm8001_dev, + IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS); + break; + case IO_OPEN_CNX_ERROR_BAD_DESTINATION: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_OPEN_CNX_ERROR_BAD_DESTINATION\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_REJECT; + ts->open_rej_reason = SAS_OREJ_BAD_DEST; + break; + case IO_OPEN_CNX_ERROR_CONNECTION_RATE_NOT_SUPPORTED: + PM8001_IO_DBG(pm8001_ha, pm8001_printk( + "IO_OPEN_CNX_ERROR_CONNECTION_RATE_NOT_SUPPORTED\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_REJECT; + ts->open_rej_reason = SAS_OREJ_CONN_RATE; + break; + case IO_OPEN_CNX_ERROR_WRONG_DESTINATION: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_OPEN_CNX_ERROR_WRONG_DESTINATION\n")); + ts->resp = SAS_TASK_UNDELIVERED; + ts->stat = SAS_OPEN_REJECT; + ts->open_rej_reason = SAS_OREJ_WRONG_DEST; + break; + case IO_XFER_ERROR_NAK_RECEIVED: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_XFER_ERROR_NAK_RECEIVED\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_REJECT; + ts->open_rej_reason = SAS_OREJ_RSVD_RETRY; + break; + case IO_XFER_ERROR_ACK_NAK_TIMEOUT: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_XFER_ERROR_ACK_NAK_TIMEOUT\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_NAK_R_ERR; + break; + case IO_XFER_ERROR_DMA: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_XFER_ERROR_DMA\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_REJECT; + break; + case IO_XFER_OPEN_RETRY_TIMEOUT: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_XFER_OPEN_RETRY_TIMEOUT\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_REJECT; + ts->open_rej_reason = SAS_OREJ_RSVD_RETRY; + break; + case IO_XFER_ERROR_OFFSET_MISMATCH: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_XFER_ERROR_OFFSET_MISMATCH\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_REJECT; + break; + case IO_PORT_IN_RESET: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_PORT_IN_RESET\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_REJECT; + break; + case IO_DS_NON_OPERATIONAL: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_DS_NON_OPERATIONAL\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_REJECT; + if (!t->uldd_task) + pm8001_handle_event(pm8001_ha, + pm8001_dev, + IO_DS_NON_OPERATIONAL); + break; + case IO_DS_IN_RECOVERY: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_DS_IN_RECOVERY\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_REJECT; + break; + case IO_TM_TAG_NOT_FOUND: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_TM_TAG_NOT_FOUND\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_REJECT; + break; + case IO_SSP_EXT_IU_ZERO_LEN_ERROR: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_SSP_EXT_IU_ZERO_LEN_ERROR\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_REJECT; + break; + case IO_OPEN_CNX_ERROR_HW_RESOURCE_BUSY: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_OPEN_CNX_ERROR_HW_RESOURCE_BUSY\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_REJECT; + ts->open_rej_reason = SAS_OREJ_RSVD_RETRY; + break; + default: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("Unknown status 0x%x\n", status)); + /* not allowed case. Therefore, return failed status */ + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_REJECT; + break; + } + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("scsi_status = 0x%x\n ", + psspPayload->ssp_resp_iu.status)); + spin_lock_irqsave(&t->task_state_lock, flags); + t->task_state_flags &= ~SAS_TASK_STATE_PENDING; + t->task_state_flags &= ~SAS_TASK_AT_INITIATOR; + t->task_state_flags |= SAS_TASK_STATE_DONE; + if (unlikely((t->task_state_flags & SAS_TASK_STATE_ABORTED))) { + spin_unlock_irqrestore(&t->task_state_lock, flags); + PM8001_FAIL_DBG(pm8001_ha, pm8001_printk( + "task 0x%p done with io_status 0x%x resp 0x%x " + "stat 0x%x but aborted by upper layer!\n", + t, status, ts->resp, ts->stat)); + pm8001_ccb_task_free(pm8001_ha, t, ccb, tag); + } else { + spin_unlock_irqrestore(&t->task_state_lock, flags); + pm8001_ccb_task_free(pm8001_ha, t, ccb, tag); + mb();/* in order to force CPU ordering */ + t->task_done(t); + } +} + +/*See the comments for mpi_ssp_completion */ +static void mpi_ssp_event(struct pm8001_hba_info *pm8001_ha , void *piomb) +{ + struct sas_task *t; + unsigned long flags; + struct task_status_struct *ts; + struct pm8001_ccb_info *ccb; + struct pm8001_device *pm8001_dev; + struct ssp_event_resp *psspPayload = + (struct ssp_event_resp *)(piomb + 4); + u32 event = le32_to_cpu(psspPayload->event); + u32 tag = le32_to_cpu(psspPayload->tag); + u32 port_id = le32_to_cpu(psspPayload->port_id); + + ccb = &pm8001_ha->ccb_info[tag]; + t = ccb->task; + pm8001_dev = ccb->device; + if (event) + PM8001_FAIL_DBG(pm8001_ha, + pm8001_printk("sas IO status 0x%x\n", event)); + if (unlikely(!t || !t->lldd_task || !t->dev)) + return; + ts = &t->task_status; + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("port_id:0x%x, tag:0x%x, event:0x%x\n", + port_id, tag, event)); + switch (event) { + case IO_OVERFLOW: + PM8001_IO_DBG(pm8001_ha, pm8001_printk("IO_UNDERFLOW\n");) + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_DATA_OVERRUN; + ts->residual = 0; + if (pm8001_dev) + pm8001_dev->running_req--; + break; + case IO_XFER_ERROR_BREAK: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_XFER_ERROR_BREAK\n")); + pm8001_handle_event(pm8001_ha, t, IO_XFER_ERROR_BREAK); + return; + case IO_XFER_ERROR_PHY_NOT_READY: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_XFER_ERROR_PHY_NOT_READY\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_REJECT; + ts->open_rej_reason = SAS_OREJ_RSVD_RETRY; + break; + case IO_OPEN_CNX_ERROR_PROTOCOL_NOT_SUPPORTED: + PM8001_IO_DBG(pm8001_ha, pm8001_printk( + "IO_OPEN_CNX_ERROR_PROTOCOL_NOT_SUPPORTED\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_REJECT; + ts->open_rej_reason = SAS_OREJ_EPROTO; + break; + case IO_OPEN_CNX_ERROR_ZONE_VIOLATION: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_OPEN_CNX_ERROR_ZONE_VIOLATION\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_REJECT; + ts->open_rej_reason = SAS_OREJ_UNKNOWN; + break; + case IO_OPEN_CNX_ERROR_BREAK: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_OPEN_CNX_ERROR_BREAK\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_REJECT; + ts->open_rej_reason = SAS_OREJ_RSVD_RETRY; + break; + case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS: + case IO_XFER_OPEN_RETRY_BACKOFF_THRESHOLD_REACHED: + case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS_OPEN_TMO: + case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS_NO_DEST: + case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS_OPEN_COLLIDE: + case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS_PATHWAY_BLOCKED: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_REJECT; + ts->open_rej_reason = SAS_OREJ_UNKNOWN; + if (!t->uldd_task) + pm8001_handle_event(pm8001_ha, + pm8001_dev, + IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS); + break; + case IO_OPEN_CNX_ERROR_BAD_DESTINATION: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_OPEN_CNX_ERROR_BAD_DESTINATION\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_REJECT; + ts->open_rej_reason = SAS_OREJ_BAD_DEST; + break; + case IO_OPEN_CNX_ERROR_CONNECTION_RATE_NOT_SUPPORTED: + PM8001_IO_DBG(pm8001_ha, pm8001_printk( + "IO_OPEN_CNX_ERROR_CONNECTION_RATE_NOT_SUPPORTED\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_REJECT; + ts->open_rej_reason = SAS_OREJ_CONN_RATE; + break; + case IO_OPEN_CNX_ERROR_WRONG_DESTINATION: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_OPEN_CNX_ERROR_WRONG_DESTINATION\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_REJECT; + ts->open_rej_reason = SAS_OREJ_WRONG_DEST; + break; + case IO_XFER_ERROR_NAK_RECEIVED: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_XFER_ERROR_NAK_RECEIVED\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_REJECT; + ts->open_rej_reason = SAS_OREJ_RSVD_RETRY; + break; + case IO_XFER_ERROR_ACK_NAK_TIMEOUT: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_XFER_ERROR_ACK_NAK_TIMEOUT\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_NAK_R_ERR; + break; + case IO_XFER_OPEN_RETRY_TIMEOUT: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_XFER_OPEN_RETRY_TIMEOUT\n")); + pm8001_handle_event(pm8001_ha, t, IO_XFER_OPEN_RETRY_TIMEOUT); + return; + case IO_XFER_ERROR_UNEXPECTED_PHASE: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_XFER_ERROR_UNEXPECTED_PHASE\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_DATA_OVERRUN; + break; + case IO_XFER_ERROR_XFER_RDY_OVERRUN: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_XFER_ERROR_XFER_RDY_OVERRUN\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_DATA_OVERRUN; + break; + case IO_XFER_ERROR_XFER_RDY_NOT_EXPECTED: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_XFER_ERROR_XFER_RDY_NOT_EXPECTED\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_DATA_OVERRUN; + break; + case IO_XFER_ERROR_CMD_ISSUE_ACK_NAK_TIMEOUT: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_XFER_ERROR_CMD_ISSUE_ACK_NAK_TIMEOUT\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_DATA_OVERRUN; + break; + case IO_XFER_ERROR_OFFSET_MISMATCH: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_XFER_ERROR_OFFSET_MISMATCH\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_DATA_OVERRUN; + break; + case IO_XFER_ERROR_XFER_ZERO_DATA_LEN: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_XFER_ERROR_XFER_ZERO_DATA_LEN\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_DATA_OVERRUN; + break; + case IO_XFER_ERROR_INTERNAL_CRC_ERROR: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_XFR_ERROR_INTERNAL_CRC_ERROR\n")); + /* TBC: used default set values */ + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_DATA_OVERRUN; + break; + case IO_XFER_CMD_FRAME_ISSUED: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_XFER_CMD_FRAME_ISSUED\n")); + return; + default: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("Unknown status 0x%x\n", event)); + /* not allowed case. Therefore, return failed status */ + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_DATA_OVERRUN; + break; + } + spin_lock_irqsave(&t->task_state_lock, flags); + t->task_state_flags &= ~SAS_TASK_STATE_PENDING; + t->task_state_flags &= ~SAS_TASK_AT_INITIATOR; + t->task_state_flags |= SAS_TASK_STATE_DONE; + if (unlikely((t->task_state_flags & SAS_TASK_STATE_ABORTED))) { + spin_unlock_irqrestore(&t->task_state_lock, flags); + PM8001_FAIL_DBG(pm8001_ha, pm8001_printk( + "task 0x%p done with event 0x%x resp 0x%x " + "stat 0x%x but aborted by upper layer!\n", + t, event, ts->resp, ts->stat)); + pm8001_ccb_task_free(pm8001_ha, t, ccb, tag); + } else { + spin_unlock_irqrestore(&t->task_state_lock, flags); + pm8001_ccb_task_free(pm8001_ha, t, ccb, tag); + mb();/* in order to force CPU ordering */ + t->task_done(t); + } +} + +/*See the comments for mpi_ssp_completion */ +static void +mpi_sata_completion(struct pm8001_hba_info *pm8001_ha, void *piomb) +{ + struct sas_task *t; + struct pm8001_ccb_info *ccb; + u32 param; + u32 status; + u32 tag; + struct sata_completion_resp *psataPayload; + struct task_status_struct *ts; + struct ata_task_resp *resp ; + u32 *sata_resp; + struct pm8001_device *pm8001_dev; + unsigned long flags; + + psataPayload = (struct sata_completion_resp *)(piomb + 4); + status = le32_to_cpu(psataPayload->status); + tag = le32_to_cpu(psataPayload->tag); + + if (!tag) { + PM8001_FAIL_DBG(pm8001_ha, + pm8001_printk("tag null\n")); + return; + } + ccb = &pm8001_ha->ccb_info[tag]; + param = le32_to_cpu(psataPayload->param); + if (ccb) { + t = ccb->task; + pm8001_dev = ccb->device; + } else { + PM8001_FAIL_DBG(pm8001_ha, + pm8001_printk("ccb null\n")); + return; + } + + if (t) { + if (t->dev && (t->dev->lldd_dev)) + pm8001_dev = t->dev->lldd_dev; + } else { + PM8001_FAIL_DBG(pm8001_ha, + pm8001_printk("task null\n")); + return; + } + + if ((pm8001_dev && !(pm8001_dev->id & NCQ_READ_LOG_FLAG)) + && unlikely(!t || !t->lldd_task || !t->dev)) { + PM8001_FAIL_DBG(pm8001_ha, + pm8001_printk("task or dev null\n")); + return; + } + + ts = &t->task_status; + if (!ts) { + PM8001_FAIL_DBG(pm8001_ha, + pm8001_printk("ts null\n")); + return; + } + + switch (status) { + case IO_SUCCESS: + PM8001_IO_DBG(pm8001_ha, pm8001_printk("IO_SUCCESS\n")); + if (param == 0) { + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAM_STAT_GOOD; + /* check if response is for SEND READ LOG */ + if (pm8001_dev && + (pm8001_dev->id & NCQ_READ_LOG_FLAG)) { + /* set new bit for abort_all */ + pm8001_dev->id |= NCQ_ABORT_ALL_FLAG; + /* clear bit for read log */ + pm8001_dev->id = pm8001_dev->id & 0x7FFFFFFF; + pm80xx_send_abort_all(pm8001_ha, pm8001_dev); + /* Free the tag */ + pm8001_tag_free(pm8001_ha, tag); + sas_free_task(t); + return; + } + } else { + u8 len; + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_PROTO_RESPONSE; + ts->residual = param; + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("SAS_PROTO_RESPONSE len = %d\n", + param)); + sata_resp = &psataPayload->sata_resp[0]; + resp = (struct ata_task_resp *)ts->buf; + if (t->ata_task.dma_xfer == 0 && + t->data_dir == PCI_DMA_FROMDEVICE) { + len = sizeof(struct pio_setup_fis); + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("PIO read len = %d\n", len)); + } else if (t->ata_task.use_ncq) { + len = sizeof(struct set_dev_bits_fis); + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("FPDMA len = %d\n", len)); + } else { + len = sizeof(struct dev_to_host_fis); + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("other len = %d\n", len)); + } + if (SAS_STATUS_BUF_SIZE >= sizeof(*resp)) { + resp->frame_len = len; + memcpy(&resp->ending_fis[0], sata_resp, len); + ts->buf_valid_size = sizeof(*resp); + } else + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("response to large\n")); + } + if (pm8001_dev) + pm8001_dev->running_req--; + break; + case IO_ABORTED: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_ABORTED IOMB Tag\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_ABORTED_TASK; + if (pm8001_dev) + pm8001_dev->running_req--; + break; + /* following cases are to do cases */ + case IO_UNDERFLOW: + /* SATA Completion with error */ + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_UNDERFLOW param = %d\n", param)); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_DATA_UNDERRUN; + ts->residual = param; + if (pm8001_dev) + pm8001_dev->running_req--; + break; + case IO_NO_DEVICE: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_NO_DEVICE\n")); + ts->resp = SAS_TASK_UNDELIVERED; + ts->stat = SAS_PHY_DOWN; + break; + case IO_XFER_ERROR_BREAK: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_XFER_ERROR_BREAK\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_INTERRUPTED; + break; + case IO_XFER_ERROR_PHY_NOT_READY: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_XFER_ERROR_PHY_NOT_READY\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_REJECT; + ts->open_rej_reason = SAS_OREJ_RSVD_RETRY; + break; + case IO_OPEN_CNX_ERROR_PROTOCOL_NOT_SUPPORTED: + PM8001_IO_DBG(pm8001_ha, pm8001_printk( + "IO_OPEN_CNX_ERROR_PROTOCOL_NOT_SUPPORTED\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_REJECT; + ts->open_rej_reason = SAS_OREJ_EPROTO; + break; + case IO_OPEN_CNX_ERROR_ZONE_VIOLATION: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_OPEN_CNX_ERROR_ZONE_VIOLATION\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_REJECT; + ts->open_rej_reason = SAS_OREJ_UNKNOWN; + break; + case IO_OPEN_CNX_ERROR_BREAK: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_OPEN_CNX_ERROR_BREAK\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_REJECT; + ts->open_rej_reason = SAS_OREJ_RSVD_CONT0; + break; + case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS: + case IO_XFER_OPEN_RETRY_BACKOFF_THRESHOLD_REACHED: + case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS_OPEN_TMO: + case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS_NO_DEST: + case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS_OPEN_COLLIDE: + case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS_PATHWAY_BLOCKED: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_DEV_NO_RESPONSE; + if (!t->uldd_task) { + pm8001_handle_event(pm8001_ha, + pm8001_dev, + IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS); + ts->resp = SAS_TASK_UNDELIVERED; + ts->stat = SAS_QUEUE_FULL; + pm8001_ccb_task_free(pm8001_ha, t, ccb, tag); + mb();/*in order to force CPU ordering*/ + spin_unlock_irq(&pm8001_ha->lock); + t->task_done(t); + spin_lock_irq(&pm8001_ha->lock); + return; + } + break; + case IO_OPEN_CNX_ERROR_BAD_DESTINATION: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_OPEN_CNX_ERROR_BAD_DESTINATION\n")); + ts->resp = SAS_TASK_UNDELIVERED; + ts->stat = SAS_OPEN_REJECT; + ts->open_rej_reason = SAS_OREJ_BAD_DEST; + if (!t->uldd_task) { + pm8001_handle_event(pm8001_ha, + pm8001_dev, + IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS); + ts->resp = SAS_TASK_UNDELIVERED; + ts->stat = SAS_QUEUE_FULL; + pm8001_ccb_task_free(pm8001_ha, t, ccb, tag); + mb();/*ditto*/ + spin_unlock_irq(&pm8001_ha->lock); + t->task_done(t); + spin_lock_irq(&pm8001_ha->lock); + return; + } + break; + case IO_OPEN_CNX_ERROR_CONNECTION_RATE_NOT_SUPPORTED: + PM8001_IO_DBG(pm8001_ha, pm8001_printk( + "IO_OPEN_CNX_ERROR_CONNECTION_RATE_NOT_SUPPORTED\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_REJECT; + ts->open_rej_reason = SAS_OREJ_CONN_RATE; + break; + case IO_OPEN_CNX_ERROR_STP_RESOURCES_BUSY: + PM8001_IO_DBG(pm8001_ha, pm8001_printk( + "IO_OPEN_CNX_ERROR_STP_RESOURCES_BUSY\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_DEV_NO_RESPONSE; + if (!t->uldd_task) { + pm8001_handle_event(pm8001_ha, + pm8001_dev, + IO_OPEN_CNX_ERROR_STP_RESOURCES_BUSY); + ts->resp = SAS_TASK_UNDELIVERED; + ts->stat = SAS_QUEUE_FULL; + pm8001_ccb_task_free(pm8001_ha, t, ccb, tag); + mb();/* ditto*/ + spin_unlock_irq(&pm8001_ha->lock); + t->task_done(t); + spin_lock_irq(&pm8001_ha->lock); + return; + } + break; + case IO_OPEN_CNX_ERROR_WRONG_DESTINATION: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_OPEN_CNX_ERROR_WRONG_DESTINATION\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_REJECT; + ts->open_rej_reason = SAS_OREJ_WRONG_DEST; + break; + case IO_XFER_ERROR_NAK_RECEIVED: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_XFER_ERROR_NAK_RECEIVED\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_NAK_R_ERR; + break; + case IO_XFER_ERROR_ACK_NAK_TIMEOUT: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_XFER_ERROR_ACK_NAK_TIMEOUT\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_NAK_R_ERR; + break; + case IO_XFER_ERROR_DMA: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_XFER_ERROR_DMA\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_ABORTED_TASK; + break; + case IO_XFER_ERROR_SATA_LINK_TIMEOUT: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_XFER_ERROR_SATA_LINK_TIMEOUT\n")); + ts->resp = SAS_TASK_UNDELIVERED; + ts->stat = SAS_DEV_NO_RESPONSE; + break; + case IO_XFER_ERROR_REJECTED_NCQ_MODE: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_XFER_ERROR_REJECTED_NCQ_MODE\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_DATA_UNDERRUN; + break; + case IO_XFER_OPEN_RETRY_TIMEOUT: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_XFER_OPEN_RETRY_TIMEOUT\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_TO; + break; + case IO_PORT_IN_RESET: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_PORT_IN_RESET\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_DEV_NO_RESPONSE; + break; + case IO_DS_NON_OPERATIONAL: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_DS_NON_OPERATIONAL\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_DEV_NO_RESPONSE; + if (!t->uldd_task) { + pm8001_handle_event(pm8001_ha, pm8001_dev, + IO_DS_NON_OPERATIONAL); + ts->resp = SAS_TASK_UNDELIVERED; + ts->stat = SAS_QUEUE_FULL; + pm8001_ccb_task_free(pm8001_ha, t, ccb, tag); + mb();/*ditto*/ + spin_unlock_irq(&pm8001_ha->lock); + t->task_done(t); + spin_lock_irq(&pm8001_ha->lock); + return; + } + break; + case IO_DS_IN_RECOVERY: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_DS_IN_RECOVERY\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_DEV_NO_RESPONSE; + break; + case IO_DS_IN_ERROR: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_DS_IN_ERROR\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_DEV_NO_RESPONSE; + if (!t->uldd_task) { + pm8001_handle_event(pm8001_ha, pm8001_dev, + IO_DS_IN_ERROR); + ts->resp = SAS_TASK_UNDELIVERED; + ts->stat = SAS_QUEUE_FULL; + pm8001_ccb_task_free(pm8001_ha, t, ccb, tag); + mb();/*ditto*/ + spin_unlock_irq(&pm8001_ha->lock); + t->task_done(t); + spin_lock_irq(&pm8001_ha->lock); + return; + } + break; + case IO_OPEN_CNX_ERROR_HW_RESOURCE_BUSY: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_OPEN_CNX_ERROR_HW_RESOURCE_BUSY\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_REJECT; + ts->open_rej_reason = SAS_OREJ_RSVD_RETRY; + default: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("Unknown status 0x%x\n", status)); + /* not allowed case. Therefore, return failed status */ + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_DEV_NO_RESPONSE; + break; + } + spin_lock_irqsave(&t->task_state_lock, flags); + t->task_state_flags &= ~SAS_TASK_STATE_PENDING; + t->task_state_flags &= ~SAS_TASK_AT_INITIATOR; + t->task_state_flags |= SAS_TASK_STATE_DONE; + if (unlikely((t->task_state_flags & SAS_TASK_STATE_ABORTED))) { + spin_unlock_irqrestore(&t->task_state_lock, flags); + PM8001_FAIL_DBG(pm8001_ha, + pm8001_printk("task 0x%p done with io_status 0x%x" + " resp 0x%x stat 0x%x but aborted by upper layer!\n", + t, status, ts->resp, ts->stat)); + pm8001_ccb_task_free(pm8001_ha, t, ccb, tag); + } else if (t->uldd_task) { + spin_unlock_irqrestore(&t->task_state_lock, flags); + pm8001_ccb_task_free(pm8001_ha, t, ccb, tag); + mb();/* ditto */ + spin_unlock_irq(&pm8001_ha->lock); + t->task_done(t); + spin_lock_irq(&pm8001_ha->lock); + } else if (!t->uldd_task) { + spin_unlock_irqrestore(&t->task_state_lock, flags); + pm8001_ccb_task_free(pm8001_ha, t, ccb, tag); + mb();/*ditto*/ + spin_unlock_irq(&pm8001_ha->lock); + t->task_done(t); + spin_lock_irq(&pm8001_ha->lock); + } +} + +/*See the comments for mpi_ssp_completion */ +static void mpi_sata_event(struct pm8001_hba_info *pm8001_ha , void *piomb) +{ + struct sas_task *t; + struct task_status_struct *ts; + struct pm8001_ccb_info *ccb; + struct pm8001_device *pm8001_dev; + struct sata_event_resp *psataPayload = + (struct sata_event_resp *)(piomb + 4); + u32 event = le32_to_cpu(psataPayload->event); + u32 tag = le32_to_cpu(psataPayload->tag); + u32 port_id = le32_to_cpu(psataPayload->port_id); + u32 dev_id = le32_to_cpu(psataPayload->device_id); + unsigned long flags; + + ccb = &pm8001_ha->ccb_info[tag]; + + if (ccb) { + t = ccb->task; + pm8001_dev = ccb->device; + } else { + PM8001_FAIL_DBG(pm8001_ha, + pm8001_printk("No CCB !!!. returning\n")); + return; + } + if (event) + PM8001_FAIL_DBG(pm8001_ha, + pm8001_printk("SATA EVENT 0x%x\n", event)); + + /* Check if this is NCQ error */ + if (event == IO_XFER_ERROR_ABORTED_NCQ_MODE) { + /* find device using device id */ + pm8001_dev = pm8001_find_dev(pm8001_ha, dev_id); + /* send read log extension */ + if (pm8001_dev) + pm80xx_send_read_log(pm8001_ha, pm8001_dev); + return; + } + + if (unlikely(!t || !t->lldd_task || !t->dev)) { + PM8001_FAIL_DBG(pm8001_ha, + pm8001_printk("task or dev null\n")); + return; + } + + ts = &t->task_status; + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("port_id:0x%x, tag:0x%x, event:0x%x\n", + port_id, tag, event)); + switch (event) { + case IO_OVERFLOW: + PM8001_IO_DBG(pm8001_ha, pm8001_printk("IO_UNDERFLOW\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_DATA_OVERRUN; + ts->residual = 0; + if (pm8001_dev) + pm8001_dev->running_req--; + break; + case IO_XFER_ERROR_BREAK: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_XFER_ERROR_BREAK\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_INTERRUPTED; + break; + case IO_XFER_ERROR_PHY_NOT_READY: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_XFER_ERROR_PHY_NOT_READY\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_REJECT; + ts->open_rej_reason = SAS_OREJ_RSVD_RETRY; + break; + case IO_OPEN_CNX_ERROR_PROTOCOL_NOT_SUPPORTED: + PM8001_IO_DBG(pm8001_ha, pm8001_printk( + "IO_OPEN_CNX_ERROR_PROTOCOL_NOT_SUPPORTED\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_REJECT; + ts->open_rej_reason = SAS_OREJ_EPROTO; + break; + case IO_OPEN_CNX_ERROR_ZONE_VIOLATION: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_OPEN_CNX_ERROR_ZONE_VIOLATION\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_REJECT; + ts->open_rej_reason = SAS_OREJ_UNKNOWN; + break; + case IO_OPEN_CNX_ERROR_BREAK: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_OPEN_CNX_ERROR_BREAK\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_REJECT; + ts->open_rej_reason = SAS_OREJ_RSVD_CONT0; + break; + case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS: + case IO_XFER_OPEN_RETRY_BACKOFF_THRESHOLD_REACHED: + case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS_OPEN_TMO: + case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS_NO_DEST: + case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS_OPEN_COLLIDE: + case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS_PATHWAY_BLOCKED: + PM8001_FAIL_DBG(pm8001_ha, + pm8001_printk("IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS\n")); + ts->resp = SAS_TASK_UNDELIVERED; + ts->stat = SAS_DEV_NO_RESPONSE; + if (!t->uldd_task) { + pm8001_handle_event(pm8001_ha, + pm8001_dev, + IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_QUEUE_FULL; + pm8001_ccb_task_free(pm8001_ha, t, ccb, tag); + mb();/*ditto*/ + spin_unlock_irq(&pm8001_ha->lock); + t->task_done(t); + spin_lock_irq(&pm8001_ha->lock); + return; + } + break; + case IO_OPEN_CNX_ERROR_BAD_DESTINATION: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_OPEN_CNX_ERROR_BAD_DESTINATION\n")); + ts->resp = SAS_TASK_UNDELIVERED; + ts->stat = SAS_OPEN_REJECT; + ts->open_rej_reason = SAS_OREJ_BAD_DEST; + break; + case IO_OPEN_CNX_ERROR_CONNECTION_RATE_NOT_SUPPORTED: + PM8001_IO_DBG(pm8001_ha, pm8001_printk( + "IO_OPEN_CNX_ERROR_CONNECTION_RATE_NOT_SUPPORTED\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_REJECT; + ts->open_rej_reason = SAS_OREJ_CONN_RATE; + break; + case IO_OPEN_CNX_ERROR_WRONG_DESTINATION: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_OPEN_CNX_ERROR_WRONG_DESTINATION\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_REJECT; + ts->open_rej_reason = SAS_OREJ_WRONG_DEST; + break; + case IO_XFER_ERROR_NAK_RECEIVED: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_XFER_ERROR_NAK_RECEIVED\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_NAK_R_ERR; + break; + case IO_XFER_ERROR_PEER_ABORTED: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_XFER_ERROR_PEER_ABORTED\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_NAK_R_ERR; + break; + case IO_XFER_ERROR_REJECTED_NCQ_MODE: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_XFER_ERROR_REJECTED_NCQ_MODE\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_DATA_UNDERRUN; + break; + case IO_XFER_OPEN_RETRY_TIMEOUT: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_XFER_OPEN_RETRY_TIMEOUT\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_TO; + break; + case IO_XFER_ERROR_UNEXPECTED_PHASE: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_XFER_ERROR_UNEXPECTED_PHASE\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_TO; + break; + case IO_XFER_ERROR_XFER_RDY_OVERRUN: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_XFER_ERROR_XFER_RDY_OVERRUN\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_TO; + break; + case IO_XFER_ERROR_XFER_RDY_NOT_EXPECTED: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_XFER_ERROR_XFER_RDY_NOT_EXPECTED\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_TO; + break; + case IO_XFER_ERROR_OFFSET_MISMATCH: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_XFER_ERROR_OFFSET_MISMATCH\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_TO; + break; + case IO_XFER_ERROR_XFER_ZERO_DATA_LEN: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_XFER_ERROR_XFER_ZERO_DATA_LEN\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_TO; + break; + case IO_XFER_CMD_FRAME_ISSUED: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_XFER_CMD_FRAME_ISSUED\n")); + break; + case IO_XFER_PIO_SETUP_ERROR: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_XFER_PIO_SETUP_ERROR\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_TO; + break; + case IO_XFER_ERROR_INTERNAL_CRC_ERROR: + PM8001_FAIL_DBG(pm8001_ha, + pm8001_printk("IO_XFR_ERROR_INTERNAL_CRC_ERROR\n")); + /* TBC: used default set values */ + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_TO; + break; + case IO_XFER_DMA_ACTIVATE_TIMEOUT: + PM8001_FAIL_DBG(pm8001_ha, + pm8001_printk("IO_XFR_DMA_ACTIVATE_TIMEOUT\n")); + /* TBC: used default set values */ + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_TO; + break; + default: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("Unknown status 0x%x\n", event)); + /* not allowed case. Therefore, return failed status */ + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_TO; + break; + } + spin_lock_irqsave(&t->task_state_lock, flags); + t->task_state_flags &= ~SAS_TASK_STATE_PENDING; + t->task_state_flags &= ~SAS_TASK_AT_INITIATOR; + t->task_state_flags |= SAS_TASK_STATE_DONE; + if (unlikely((t->task_state_flags & SAS_TASK_STATE_ABORTED))) { + spin_unlock_irqrestore(&t->task_state_lock, flags); + PM8001_FAIL_DBG(pm8001_ha, + pm8001_printk("task 0x%p done with io_status 0x%x" + " resp 0x%x stat 0x%x but aborted by upper layer!\n", + t, event, ts->resp, ts->stat)); + pm8001_ccb_task_free(pm8001_ha, t, ccb, tag); + } else if (t->uldd_task) { + spin_unlock_irqrestore(&t->task_state_lock, flags); + pm8001_ccb_task_free(pm8001_ha, t, ccb, tag); + mb();/* ditto */ + spin_unlock_irq(&pm8001_ha->lock); + t->task_done(t); + spin_lock_irq(&pm8001_ha->lock); + } else if (!t->uldd_task) { + spin_unlock_irqrestore(&t->task_state_lock, flags); + pm8001_ccb_task_free(pm8001_ha, t, ccb, tag); + mb();/*ditto*/ + spin_unlock_irq(&pm8001_ha->lock); + t->task_done(t); + spin_lock_irq(&pm8001_ha->lock); + } +} + +/*See the comments for mpi_ssp_completion */ +static void +mpi_smp_completion(struct pm8001_hba_info *pm8001_ha, void *piomb) +{ + u32 param, i; + struct sas_task *t; + struct pm8001_ccb_info *ccb; + unsigned long flags; + u32 status; + u32 tag; + struct smp_completion_resp *psmpPayload; + struct task_status_struct *ts; + struct pm8001_device *pm8001_dev; + char *pdma_respaddr = NULL; + + psmpPayload = (struct smp_completion_resp *)(piomb + 4); + status = le32_to_cpu(psmpPayload->status); + tag = le32_to_cpu(psmpPayload->tag); + + ccb = &pm8001_ha->ccb_info[tag]; + param = le32_to_cpu(psmpPayload->param); + t = ccb->task; + ts = &t->task_status; + pm8001_dev = ccb->device; + if (status) + PM8001_FAIL_DBG(pm8001_ha, + pm8001_printk("smp IO status 0x%x\n", status)); + if (unlikely(!t || !t->lldd_task || !t->dev)) + return; + + switch (status) { + + case IO_SUCCESS: + PM8001_IO_DBG(pm8001_ha, pm8001_printk("IO_SUCCESS\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAM_STAT_GOOD; + if (pm8001_dev) + pm8001_dev->running_req--; + if (pm8001_ha->smp_exp_mode == SMP_DIRECT) { + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("DIRECT RESPONSE Length:%d\n", + param)); + pdma_respaddr = (char *)(phys_to_virt(cpu_to_le64 + ((u64)sg_dma_address + (&t->smp_task.smp_resp)))); + for (i = 0; i < param; i++) { + *(pdma_respaddr+i) = psmpPayload->_r_a[i]; + PM8001_IO_DBG(pm8001_ha, pm8001_printk( + "SMP Byte%d DMA data 0x%x psmp 0x%x\n", + i, *(pdma_respaddr+i), + psmpPayload->_r_a[i])); + } + } + break; + case IO_ABORTED: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_ABORTED IOMB\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_ABORTED_TASK; + if (pm8001_dev) + pm8001_dev->running_req--; + break; + case IO_OVERFLOW: + PM8001_IO_DBG(pm8001_ha, pm8001_printk("IO_UNDERFLOW\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_DATA_OVERRUN; + ts->residual = 0; + if (pm8001_dev) + pm8001_dev->running_req--; + break; + case IO_NO_DEVICE: + PM8001_IO_DBG(pm8001_ha, pm8001_printk("IO_NO_DEVICE\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_PHY_DOWN; + break; + case IO_ERROR_HW_TIMEOUT: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_ERROR_HW_TIMEOUT\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAM_STAT_BUSY; + break; + case IO_XFER_ERROR_BREAK: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_XFER_ERROR_BREAK\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAM_STAT_BUSY; + break; + case IO_XFER_ERROR_PHY_NOT_READY: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_XFER_ERROR_PHY_NOT_READY\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAM_STAT_BUSY; + break; + case IO_OPEN_CNX_ERROR_PROTOCOL_NOT_SUPPORTED: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_OPEN_CNX_ERROR_PROTOCOL_NOT_SUPPORTED\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_REJECT; + ts->open_rej_reason = SAS_OREJ_UNKNOWN; + break; + case IO_OPEN_CNX_ERROR_ZONE_VIOLATION: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_OPEN_CNX_ERROR_ZONE_VIOLATION\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_REJECT; + ts->open_rej_reason = SAS_OREJ_UNKNOWN; + break; + case IO_OPEN_CNX_ERROR_BREAK: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_OPEN_CNX_ERROR_BREAK\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_REJECT; + ts->open_rej_reason = SAS_OREJ_RSVD_CONT0; + break; + case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS: + case IO_XFER_OPEN_RETRY_BACKOFF_THRESHOLD_REACHED: + case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS_OPEN_TMO: + case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS_NO_DEST: + case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS_OPEN_COLLIDE: + case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS_PATHWAY_BLOCKED: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_REJECT; + ts->open_rej_reason = SAS_OREJ_UNKNOWN; + pm8001_handle_event(pm8001_ha, + pm8001_dev, + IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS); + break; + case IO_OPEN_CNX_ERROR_BAD_DESTINATION: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_OPEN_CNX_ERROR_BAD_DESTINATION\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_REJECT; + ts->open_rej_reason = SAS_OREJ_BAD_DEST; + break; + case IO_OPEN_CNX_ERROR_CONNECTION_RATE_NOT_SUPPORTED: + PM8001_IO_DBG(pm8001_ha, pm8001_printk(\ + "IO_OPEN_CNX_ERROR_CONNECTION_RATE_NOT_SUPPORTED\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_REJECT; + ts->open_rej_reason = SAS_OREJ_CONN_RATE; + break; + case IO_OPEN_CNX_ERROR_WRONG_DESTINATION: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_OPEN_CNX_ERROR_WRONG_DESTINATION\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_REJECT; + ts->open_rej_reason = SAS_OREJ_WRONG_DEST; + break; + case IO_XFER_ERROR_RX_FRAME: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_XFER_ERROR_RX_FRAME\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_DEV_NO_RESPONSE; + break; + case IO_XFER_OPEN_RETRY_TIMEOUT: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_XFER_OPEN_RETRY_TIMEOUT\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_REJECT; + ts->open_rej_reason = SAS_OREJ_RSVD_RETRY; + break; + case IO_ERROR_INTERNAL_SMP_RESOURCE: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_ERROR_INTERNAL_SMP_RESOURCE\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_QUEUE_FULL; + break; + case IO_PORT_IN_RESET: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_PORT_IN_RESET\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_REJECT; + ts->open_rej_reason = SAS_OREJ_RSVD_RETRY; + break; + case IO_DS_NON_OPERATIONAL: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_DS_NON_OPERATIONAL\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_DEV_NO_RESPONSE; + break; + case IO_DS_IN_RECOVERY: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_DS_IN_RECOVERY\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_REJECT; + ts->open_rej_reason = SAS_OREJ_RSVD_RETRY; + break; + case IO_OPEN_CNX_ERROR_HW_RESOURCE_BUSY: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("IO_OPEN_CNX_ERROR_HW_RESOURCE_BUSY\n")); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_OPEN_REJECT; + ts->open_rej_reason = SAS_OREJ_RSVD_RETRY; + break; + default: + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("Unknown status 0x%x\n", status)); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAS_DEV_NO_RESPONSE; + /* not allowed case. Therefore, return failed status */ + break; + } + spin_lock_irqsave(&t->task_state_lock, flags); + t->task_state_flags &= ~SAS_TASK_STATE_PENDING; + t->task_state_flags &= ~SAS_TASK_AT_INITIATOR; + t->task_state_flags |= SAS_TASK_STATE_DONE; + if (unlikely((t->task_state_flags & SAS_TASK_STATE_ABORTED))) { + spin_unlock_irqrestore(&t->task_state_lock, flags); + PM8001_FAIL_DBG(pm8001_ha, pm8001_printk( + "task 0x%p done with io_status 0x%x resp 0x%x" + "stat 0x%x but aborted by upper layer!\n", + t, status, ts->resp, ts->stat)); + pm8001_ccb_task_free(pm8001_ha, t, ccb, tag); + } else { + spin_unlock_irqrestore(&t->task_state_lock, flags); + pm8001_ccb_task_free(pm8001_ha, t, ccb, tag); + mb();/* in order to force CPU ordering */ + t->task_done(t); + } +} + +/** + * pm80xx_hw_event_ack_req- For PM8001,some events need to acknowage to FW. + * @pm8001_ha: our hba card information + * @Qnum: the outbound queue message number. + * @SEA: source of event to ack + * @port_id: port id. + * @phyId: phy id. + * @param0: parameter 0. + * @param1: parameter 1. + */ +static void pm80xx_hw_event_ack_req(struct pm8001_hba_info *pm8001_ha, + u32 Qnum, u32 SEA, u32 port_id, u32 phyId, u32 param0, u32 param1) +{ + struct hw_event_ack_req payload; + u32 opc = OPC_INB_SAS_HW_EVENT_ACK; + + struct inbound_queue_table *circularQ; + + memset((u8 *)&payload, 0, sizeof(payload)); + circularQ = &pm8001_ha->inbnd_q_tbl[Qnum]; + payload.tag = cpu_to_le32(1); + payload.phyid_sea_portid = cpu_to_le32(((SEA & 0xFFFF) << 8) | + ((phyId & 0xFF) << 24) | (port_id & 0xFF)); + payload.param0 = cpu_to_le32(param0); + payload.param1 = cpu_to_le32(param1); + pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &payload, 0); +} + +static int pm80xx_chip_phy_ctl_req(struct pm8001_hba_info *pm8001_ha, + u32 phyId, u32 phy_op); + +/** + * hw_event_sas_phy_up -FW tells me a SAS phy up event. + * @pm8001_ha: our hba card information + * @piomb: IO message buffer + */ +static void +hw_event_sas_phy_up(struct pm8001_hba_info *pm8001_ha, void *piomb) +{ + struct hw_event_resp *pPayload = + (struct hw_event_resp *)(piomb + 4); + u32 lr_status_evt_portid = + le32_to_cpu(pPayload->lr_status_evt_portid); + u32 phyid_npip_portstate = le32_to_cpu(pPayload->phyid_npip_portstate); + + u8 link_rate = + (u8)((lr_status_evt_portid & 0xF0000000) >> 28); + u8 port_id = (u8)(lr_status_evt_portid & 0x000000FF); + u8 phy_id = + (u8)((phyid_npip_portstate & 0xFF0000) >> 16); + u8 portstate = (u8)(phyid_npip_portstate & 0x0000000F); + + struct pm8001_port *port = &pm8001_ha->port[port_id]; + struct sas_ha_struct *sas_ha = pm8001_ha->sas; + struct pm8001_phy *phy = &pm8001_ha->phy[phy_id]; + unsigned long flags; + u8 deviceType = pPayload->sas_identify.dev_type; + port->port_state = portstate; + PM8001_MSG_DBG(pm8001_ha, pm8001_printk( + "portid:%d; phyid:%d; linkrate:%d; " + "portstate:%x; devicetype:%x\n", + port_id, phy_id, link_rate, portstate, deviceType)); + + switch (deviceType) { + case SAS_PHY_UNUSED: + PM8001_MSG_DBG(pm8001_ha, + pm8001_printk("device type no device.\n")); + break; + case SAS_END_DEVICE: + PM8001_MSG_DBG(pm8001_ha, pm8001_printk("end device.\n")); + pm80xx_chip_phy_ctl_req(pm8001_ha, phy_id, + PHY_NOTIFY_ENABLE_SPINUP); + port->port_attached = 1; + pm8001_get_lrate_mode(phy, link_rate); + break; + case SAS_EDGE_EXPANDER_DEVICE: + PM8001_MSG_DBG(pm8001_ha, + pm8001_printk("expander device.\n")); + port->port_attached = 1; + pm8001_get_lrate_mode(phy, link_rate); + break; + case SAS_FANOUT_EXPANDER_DEVICE: + PM8001_MSG_DBG(pm8001_ha, + pm8001_printk("fanout expander device.\n")); + port->port_attached = 1; + pm8001_get_lrate_mode(phy, link_rate); + break; + default: + PM8001_MSG_DBG(pm8001_ha, + pm8001_printk("unknown device type(%x)\n", deviceType)); + break; + } + phy->phy_type |= PORT_TYPE_SAS; + phy->identify.device_type = deviceType; + phy->phy_attached = 1; + if (phy->identify.device_type == SAS_END_DEVICE) + phy->identify.target_port_protocols = SAS_PROTOCOL_SSP; + else if (phy->identify.device_type != SAS_PHY_UNUSED) + phy->identify.target_port_protocols = SAS_PROTOCOL_SMP; + phy->sas_phy.oob_mode = SAS_OOB_MODE; + sas_ha->notify_phy_event(&phy->sas_phy, PHYE_OOB_DONE); + spin_lock_irqsave(&phy->sas_phy.frame_rcvd_lock, flags); + memcpy(phy->frame_rcvd, &pPayload->sas_identify, + sizeof(struct sas_identify_frame)-4); + phy->frame_rcvd_size = sizeof(struct sas_identify_frame) - 4; + pm8001_get_attached_sas_addr(phy, phy->sas_phy.attached_sas_addr); + spin_unlock_irqrestore(&phy->sas_phy.frame_rcvd_lock, flags); + if (pm8001_ha->flags == PM8001F_RUN_TIME) + mdelay(200);/*delay a moment to wait disk to spinup*/ + pm8001_bytes_dmaed(pm8001_ha, phy_id); +} + +/** + * hw_event_sata_phy_up -FW tells me a SATA phy up event. + * @pm8001_ha: our hba card information + * @piomb: IO message buffer + */ +static void +hw_event_sata_phy_up(struct pm8001_hba_info *pm8001_ha, void *piomb) +{ + struct hw_event_resp *pPayload = + (struct hw_event_resp *)(piomb + 4); + u32 phyid_npip_portstate = le32_to_cpu(pPayload->phyid_npip_portstate); + u32 lr_status_evt_portid = + le32_to_cpu(pPayload->lr_status_evt_portid); + u8 link_rate = + (u8)((lr_status_evt_portid & 0xF0000000) >> 28); + u8 port_id = (u8)(lr_status_evt_portid & 0x000000FF); + u8 phy_id = + (u8)((phyid_npip_portstate & 0xFF0000) >> 16); + + u8 portstate = (u8)(phyid_npip_portstate & 0x0000000F); + + struct pm8001_port *port = &pm8001_ha->port[port_id]; + struct sas_ha_struct *sas_ha = pm8001_ha->sas; + struct pm8001_phy *phy = &pm8001_ha->phy[phy_id]; + unsigned long flags; + PM8001_MSG_DBG(pm8001_ha, pm8001_printk( + "port id %d, phy id %d link_rate %d portstate 0x%x\n", + port_id, phy_id, link_rate, portstate)); + + port->port_state = portstate; + port->port_attached = 1; + pm8001_get_lrate_mode(phy, link_rate); + phy->phy_type |= PORT_TYPE_SATA; + phy->phy_attached = 1; + phy->sas_phy.oob_mode = SATA_OOB_MODE; + sas_ha->notify_phy_event(&phy->sas_phy, PHYE_OOB_DONE); + spin_lock_irqsave(&phy->sas_phy.frame_rcvd_lock, flags); + memcpy(phy->frame_rcvd, ((u8 *)&pPayload->sata_fis - 4), + sizeof(struct dev_to_host_fis)); + phy->frame_rcvd_size = sizeof(struct dev_to_host_fis); + phy->identify.target_port_protocols = SAS_PROTOCOL_SATA; + phy->identify.device_type = SAS_SATA_DEV; + pm8001_get_attached_sas_addr(phy, phy->sas_phy.attached_sas_addr); + spin_unlock_irqrestore(&phy->sas_phy.frame_rcvd_lock, flags); + pm8001_bytes_dmaed(pm8001_ha, phy_id); +} + +/** + * hw_event_phy_down -we should notify the libsas the phy is down. + * @pm8001_ha: our hba card information + * @piomb: IO message buffer + */ +static void +hw_event_phy_down(struct pm8001_hba_info *pm8001_ha, void *piomb) +{ + struct hw_event_resp *pPayload = + (struct hw_event_resp *)(piomb + 4); + + u32 lr_status_evt_portid = + le32_to_cpu(pPayload->lr_status_evt_portid); + u8 port_id = (u8)(lr_status_evt_portid & 0x000000FF); + u32 phyid_npip_portstate = le32_to_cpu(pPayload->phyid_npip_portstate); + u8 phy_id = + (u8)((phyid_npip_portstate & 0xFF0000) >> 16); + u8 portstate = (u8)(phyid_npip_portstate & 0x0000000F); + + struct pm8001_port *port = &pm8001_ha->port[port_id]; + struct pm8001_phy *phy = &pm8001_ha->phy[phy_id]; + port->port_state = portstate; + phy->phy_type = 0; + phy->identify.device_type = 0; + phy->phy_attached = 0; + memset(&phy->dev_sas_addr, 0, SAS_ADDR_SIZE); + switch (portstate) { + case PORT_VALID: + break; + case PORT_INVALID: + PM8001_MSG_DBG(pm8001_ha, + pm8001_printk(" PortInvalid portID %d\n", port_id)); + PM8001_MSG_DBG(pm8001_ha, + pm8001_printk(" Last phy Down and port invalid\n")); + port->port_attached = 0; + pm80xx_hw_event_ack_req(pm8001_ha, 0, HW_EVENT_PHY_DOWN, + port_id, phy_id, 0, 0); + break; + case PORT_IN_RESET: + PM8001_MSG_DBG(pm8001_ha, + pm8001_printk(" Port In Reset portID %d\n", port_id)); + break; + case PORT_NOT_ESTABLISHED: + PM8001_MSG_DBG(pm8001_ha, + pm8001_printk(" phy Down and PORT_NOT_ESTABLISHED\n")); + port->port_attached = 0; + break; + case PORT_LOSTCOMM: + PM8001_MSG_DBG(pm8001_ha, + pm8001_printk(" phy Down and PORT_LOSTCOMM\n")); + PM8001_MSG_DBG(pm8001_ha, + pm8001_printk(" Last phy Down and port invalid\n")); + port->port_attached = 0; + pm80xx_hw_event_ack_req(pm8001_ha, 0, HW_EVENT_PHY_DOWN, + port_id, phy_id, 0, 0); + break; + default: + port->port_attached = 0; + PM8001_MSG_DBG(pm8001_ha, + pm8001_printk(" phy Down and(default) = 0x%x\n", + portstate)); + break; + + } +} + +static int mpi_phy_start_resp(struct pm8001_hba_info *pm8001_ha, void *piomb) +{ + struct phy_start_resp *pPayload = + (struct phy_start_resp *)(piomb + 4); + u32 status = + le32_to_cpu(pPayload->status); + u32 phy_id = + le32_to_cpu(pPayload->phyid); + struct pm8001_phy *phy = &pm8001_ha->phy[phy_id]; + + PM8001_INIT_DBG(pm8001_ha, + pm8001_printk("phy start resp status:0x%x, phyid:0x%x\n", + status, phy_id)); + if (status == 0) { + phy->phy_state = 1; + if (pm8001_ha->flags == PM8001F_RUN_TIME) + complete(phy->enable_completion); + } + return 0; + +} + +/** + * mpi_thermal_hw_event -The hw event has come. + * @pm8001_ha: our hba card information + * @piomb: IO message buffer + */ +static int mpi_thermal_hw_event(struct pm8001_hba_info *pm8001_ha, void *piomb) +{ + struct thermal_hw_event *pPayload = + (struct thermal_hw_event *)(piomb + 4); + + u32 thermal_event = le32_to_cpu(pPayload->thermal_event); + u32 rht_lht = le32_to_cpu(pPayload->rht_lht); + + if (thermal_event & 0x40) { + PM8001_IO_DBG(pm8001_ha, pm8001_printk( + "Thermal Event: Local high temperature violated!\n")); + PM8001_IO_DBG(pm8001_ha, pm8001_printk( + "Thermal Event: Measured local high temperature %d\n", + ((rht_lht & 0xFF00) >> 8))); + } + if (thermal_event & 0x10) { + PM8001_IO_DBG(pm8001_ha, pm8001_printk( + "Thermal Event: Remote high temperature violated!\n")); + PM8001_IO_DBG(pm8001_ha, pm8001_printk( + "Thermal Event: Measured remote high temperature %d\n", + ((rht_lht & 0xFF000000) >> 24))); + } + return 0; +} + +/** + * mpi_hw_event -The hw event has come. + * @pm8001_ha: our hba card information + * @piomb: IO message buffer + */ +static int mpi_hw_event(struct pm8001_hba_info *pm8001_ha, void *piomb) +{ + unsigned long flags; + struct hw_event_resp *pPayload = + (struct hw_event_resp *)(piomb + 4); + u32 lr_status_evt_portid = + le32_to_cpu(pPayload->lr_status_evt_portid); + u32 phyid_npip_portstate = le32_to_cpu(pPayload->phyid_npip_portstate); + u8 port_id = (u8)(lr_status_evt_portid & 0x000000FF); + u8 phy_id = + (u8)((phyid_npip_portstate & 0xFF0000) >> 16); + u16 eventType = + (u16)((lr_status_evt_portid & 0x00FFFF00) >> 8); + u8 status = + (u8)((lr_status_evt_portid & 0x0F000000) >> 24); + + struct sas_ha_struct *sas_ha = pm8001_ha->sas; + struct pm8001_phy *phy = &pm8001_ha->phy[phy_id]; + struct asd_sas_phy *sas_phy = sas_ha->sas_phy[phy_id]; + PM8001_MSG_DBG(pm8001_ha, + pm8001_printk("portid:%d phyid:%d event:0x%x status:0x%x\n", + port_id, phy_id, eventType, status)); + + switch (eventType) { + + case HW_EVENT_SAS_PHY_UP: + PM8001_MSG_DBG(pm8001_ha, + pm8001_printk("HW_EVENT_PHY_START_STATUS\n")); + hw_event_sas_phy_up(pm8001_ha, piomb); + break; + case HW_EVENT_SATA_PHY_UP: + PM8001_MSG_DBG(pm8001_ha, + pm8001_printk("HW_EVENT_SATA_PHY_UP\n")); + hw_event_sata_phy_up(pm8001_ha, piomb); + break; + case HW_EVENT_SATA_SPINUP_HOLD: + PM8001_MSG_DBG(pm8001_ha, + pm8001_printk("HW_EVENT_SATA_SPINUP_HOLD\n")); + sas_ha->notify_phy_event(&phy->sas_phy, PHYE_SPINUP_HOLD); + break; + case HW_EVENT_PHY_DOWN: + PM8001_MSG_DBG(pm8001_ha, + pm8001_printk("HW_EVENT_PHY_DOWN\n")); + sas_ha->notify_phy_event(&phy->sas_phy, PHYE_LOSS_OF_SIGNAL); + phy->phy_attached = 0; + phy->phy_state = 0; + hw_event_phy_down(pm8001_ha, piomb); + break; + case HW_EVENT_PORT_INVALID: + PM8001_MSG_DBG(pm8001_ha, + pm8001_printk("HW_EVENT_PORT_INVALID\n")); + sas_phy_disconnected(sas_phy); + phy->phy_attached = 0; + sas_ha->notify_port_event(sas_phy, PORTE_LINK_RESET_ERR); + break; + /* the broadcast change primitive received, tell the LIBSAS this event + to revalidate the sas domain*/ + case HW_EVENT_BROADCAST_CHANGE: + PM8001_MSG_DBG(pm8001_ha, + pm8001_printk("HW_EVENT_BROADCAST_CHANGE\n")); + pm80xx_hw_event_ack_req(pm8001_ha, 0, HW_EVENT_BROADCAST_CHANGE, + port_id, phy_id, 1, 0); + spin_lock_irqsave(&sas_phy->sas_prim_lock, flags); + sas_phy->sas_prim = HW_EVENT_BROADCAST_CHANGE; + spin_unlock_irqrestore(&sas_phy->sas_prim_lock, flags); + sas_ha->notify_port_event(sas_phy, PORTE_BROADCAST_RCVD); + break; + case HW_EVENT_PHY_ERROR: + PM8001_MSG_DBG(pm8001_ha, + pm8001_printk("HW_EVENT_PHY_ERROR\n")); + sas_phy_disconnected(&phy->sas_phy); + phy->phy_attached = 0; + sas_ha->notify_phy_event(&phy->sas_phy, PHYE_OOB_ERROR); + break; + case HW_EVENT_BROADCAST_EXP: + PM8001_MSG_DBG(pm8001_ha, + pm8001_printk("HW_EVENT_BROADCAST_EXP\n")); + spin_lock_irqsave(&sas_phy->sas_prim_lock, flags); + sas_phy->sas_prim = HW_EVENT_BROADCAST_EXP; + spin_unlock_irqrestore(&sas_phy->sas_prim_lock, flags); + sas_ha->notify_port_event(sas_phy, PORTE_BROADCAST_RCVD); + break; + case HW_EVENT_LINK_ERR_INVALID_DWORD: + PM8001_MSG_DBG(pm8001_ha, + pm8001_printk("HW_EVENT_LINK_ERR_INVALID_DWORD\n")); + pm80xx_hw_event_ack_req(pm8001_ha, 0, + HW_EVENT_LINK_ERR_INVALID_DWORD, port_id, phy_id, 0, 0); + sas_phy_disconnected(sas_phy); + phy->phy_attached = 0; + sas_ha->notify_port_event(sas_phy, PORTE_LINK_RESET_ERR); + break; + case HW_EVENT_LINK_ERR_DISPARITY_ERROR: + PM8001_MSG_DBG(pm8001_ha, + pm8001_printk("HW_EVENT_LINK_ERR_DISPARITY_ERROR\n")); + pm80xx_hw_event_ack_req(pm8001_ha, 0, + HW_EVENT_LINK_ERR_DISPARITY_ERROR, + port_id, phy_id, 0, 0); + sas_phy_disconnected(sas_phy); + phy->phy_attached = 0; + sas_ha->notify_port_event(sas_phy, PORTE_LINK_RESET_ERR); + break; + case HW_EVENT_LINK_ERR_CODE_VIOLATION: + PM8001_MSG_DBG(pm8001_ha, + pm8001_printk("HW_EVENT_LINK_ERR_CODE_VIOLATION\n")); + pm80xx_hw_event_ack_req(pm8001_ha, 0, + HW_EVENT_LINK_ERR_CODE_VIOLATION, + port_id, phy_id, 0, 0); + sas_phy_disconnected(sas_phy); + phy->phy_attached = 0; + sas_ha->notify_port_event(sas_phy, PORTE_LINK_RESET_ERR); + break; + case HW_EVENT_LINK_ERR_LOSS_OF_DWORD_SYNCH: + PM8001_MSG_DBG(pm8001_ha, pm8001_printk( + "HW_EVENT_LINK_ERR_LOSS_OF_DWORD_SYNCH\n")); + pm80xx_hw_event_ack_req(pm8001_ha, 0, + HW_EVENT_LINK_ERR_LOSS_OF_DWORD_SYNCH, + port_id, phy_id, 0, 0); + sas_phy_disconnected(sas_phy); + phy->phy_attached = 0; + sas_ha->notify_port_event(sas_phy, PORTE_LINK_RESET_ERR); + break; + case HW_EVENT_MALFUNCTION: + PM8001_MSG_DBG(pm8001_ha, + pm8001_printk("HW_EVENT_MALFUNCTION\n")); + break; + case HW_EVENT_BROADCAST_SES: + PM8001_MSG_DBG(pm8001_ha, + pm8001_printk("HW_EVENT_BROADCAST_SES\n")); + spin_lock_irqsave(&sas_phy->sas_prim_lock, flags); + sas_phy->sas_prim = HW_EVENT_BROADCAST_SES; + spin_unlock_irqrestore(&sas_phy->sas_prim_lock, flags); + sas_ha->notify_port_event(sas_phy, PORTE_BROADCAST_RCVD); + break; + case HW_EVENT_INBOUND_CRC_ERROR: + PM8001_MSG_DBG(pm8001_ha, + pm8001_printk("HW_EVENT_INBOUND_CRC_ERROR\n")); + pm80xx_hw_event_ack_req(pm8001_ha, 0, + HW_EVENT_INBOUND_CRC_ERROR, + port_id, phy_id, 0, 0); + break; + case HW_EVENT_HARD_RESET_RECEIVED: + PM8001_MSG_DBG(pm8001_ha, + pm8001_printk("HW_EVENT_HARD_RESET_RECEIVED\n")); + sas_ha->notify_port_event(sas_phy, PORTE_HARD_RESET); + break; + case HW_EVENT_ID_FRAME_TIMEOUT: + PM8001_MSG_DBG(pm8001_ha, + pm8001_printk("HW_EVENT_ID_FRAME_TIMEOUT\n")); + sas_phy_disconnected(sas_phy); + phy->phy_attached = 0; + sas_ha->notify_port_event(sas_phy, PORTE_LINK_RESET_ERR); + break; + case HW_EVENT_LINK_ERR_PHY_RESET_FAILED: + PM8001_MSG_DBG(pm8001_ha, + pm8001_printk("HW_EVENT_LINK_ERR_PHY_RESET_FAILED\n")); + pm80xx_hw_event_ack_req(pm8001_ha, 0, + HW_EVENT_LINK_ERR_PHY_RESET_FAILED, + port_id, phy_id, 0, 0); + sas_phy_disconnected(sas_phy); + phy->phy_attached = 0; + sas_ha->notify_port_event(sas_phy, PORTE_LINK_RESET_ERR); + break; + case HW_EVENT_PORT_RESET_TIMER_TMO: + PM8001_MSG_DBG(pm8001_ha, + pm8001_printk("HW_EVENT_PORT_RESET_TIMER_TMO\n")); + sas_phy_disconnected(sas_phy); + phy->phy_attached = 0; + sas_ha->notify_port_event(sas_phy, PORTE_LINK_RESET_ERR); + break; + case HW_EVENT_PORT_RECOVERY_TIMER_TMO: + PM8001_MSG_DBG(pm8001_ha, + pm8001_printk("HW_EVENT_PORT_RECOVERY_TIMER_TMO\n")); + pm80xx_hw_event_ack_req(pm8001_ha, 0, + HW_EVENT_PORT_RECOVERY_TIMER_TMO, + port_id, phy_id, 0, 0); + sas_phy_disconnected(sas_phy); + phy->phy_attached = 0; + sas_ha->notify_port_event(sas_phy, PORTE_LINK_RESET_ERR); + break; + case HW_EVENT_PORT_RECOVER: + PM8001_MSG_DBG(pm8001_ha, + pm8001_printk("HW_EVENT_PORT_RECOVER\n")); + break; + case HW_EVENT_PORT_RESET_COMPLETE: + PM8001_MSG_DBG(pm8001_ha, + pm8001_printk("HW_EVENT_PORT_RESET_COMPLETE\n")); + break; + case EVENT_BROADCAST_ASYNCH_EVENT: + PM8001_MSG_DBG(pm8001_ha, + pm8001_printk("EVENT_BROADCAST_ASYNCH_EVENT\n")); + break; + default: + PM8001_MSG_DBG(pm8001_ha, + pm8001_printk("Unknown event type 0x%x\n", eventType)); + break; + } + return 0; +} + +/** + * mpi_phy_stop_resp - SPCv specific + * @pm8001_ha: our hba card information + * @piomb: IO message buffer + */ +static int mpi_phy_stop_resp(struct pm8001_hba_info *pm8001_ha, void *piomb) +{ + struct phy_stop_resp *pPayload = + (struct phy_stop_resp *)(piomb + 4); + u32 status = + le32_to_cpu(pPayload->status); + u32 phyid = + le32_to_cpu(pPayload->phyid); + struct pm8001_phy *phy = &pm8001_ha->phy[phyid]; + PM8001_MSG_DBG(pm8001_ha, + pm8001_printk("phy:0x%x status:0x%x\n", + phyid, status)); + if (status == 0) + phy->phy_state = 0; + return 0; +} + +/** + * mpi_set_controller_config_resp - SPCv specific + * @pm8001_ha: our hba card information + * @piomb: IO message buffer + */ +static int mpi_set_controller_config_resp(struct pm8001_hba_info *pm8001_ha, + void *piomb) +{ + struct set_ctrl_cfg_resp *pPayload = + (struct set_ctrl_cfg_resp *)(piomb + 4); + u32 status = le32_to_cpu(pPayload->status); + u32 err_qlfr_pgcd = le32_to_cpu(pPayload->err_qlfr_pgcd); + + PM8001_MSG_DBG(pm8001_ha, pm8001_printk( + "SET CONTROLLER RESP: status 0x%x qlfr_pgcd 0x%x\n", + status, err_qlfr_pgcd)); + + return 0; +} + +/** + * mpi_get_controller_config_resp - SPCv specific + * @pm8001_ha: our hba card information + * @piomb: IO message buffer + */ +static int mpi_get_controller_config_resp(struct pm8001_hba_info *pm8001_ha, + void *piomb) +{ + PM8001_MSG_DBG(pm8001_ha, + pm8001_printk(" pm80xx_addition_functionality\n")); + + return 0; +} + +/** + * mpi_get_phy_profile_resp - SPCv specific + * @pm8001_ha: our hba card information + * @piomb: IO message buffer + */ +static int mpi_get_phy_profile_resp(struct pm8001_hba_info *pm8001_ha, + void *piomb) +{ + PM8001_MSG_DBG(pm8001_ha, + pm8001_printk(" pm80xx_addition_functionality\n")); + + return 0; +} + +/** + * mpi_flash_op_ext_resp - SPCv specific + * @pm8001_ha: our hba card information + * @piomb: IO message buffer + */ +static int mpi_flash_op_ext_resp(struct pm8001_hba_info *pm8001_ha, void *piomb) +{ + PM8001_MSG_DBG(pm8001_ha, + pm8001_printk(" pm80xx_addition_functionality\n")); + + return 0; +} + +/** + * mpi_set_phy_profile_resp - SPCv specific + * @pm8001_ha: our hba card information + * @piomb: IO message buffer + */ +static int mpi_set_phy_profile_resp(struct pm8001_hba_info *pm8001_ha, + void *piomb) +{ + PM8001_MSG_DBG(pm8001_ha, + pm8001_printk(" pm80xx_addition_functionality\n")); + + return 0; +} + +/** + * mpi_kek_management_resp - SPCv specific + * @pm8001_ha: our hba card information + * @piomb: IO message buffer + */ +static int mpi_kek_management_resp(struct pm8001_hba_info *pm8001_ha, + void *piomb) +{ + struct kek_mgmt_resp *pPayload = (struct kek_mgmt_resp *)(piomb + 4); + + u32 status = le32_to_cpu(pPayload->status); + u32 kidx_new_curr_ksop = le32_to_cpu(pPayload->kidx_new_curr_ksop); + u32 err_qlfr = le32_to_cpu(pPayload->err_qlfr); + + PM8001_MSG_DBG(pm8001_ha, pm8001_printk( + "KEK MGMT RESP. Status 0x%x idx_ksop 0x%x err_qlfr 0x%x\n", + status, kidx_new_curr_ksop, err_qlfr)); + + return 0; +} + +/** + * mpi_dek_management_resp - SPCv specific + * @pm8001_ha: our hba card information + * @piomb: IO message buffer + */ +static int mpi_dek_management_resp(struct pm8001_hba_info *pm8001_ha, + void *piomb) +{ + PM8001_MSG_DBG(pm8001_ha, + pm8001_printk(" pm80xx_addition_functionality\n")); + + return 0; +} + +/** + * ssp_coalesced_comp_resp - SPCv specific + * @pm8001_ha: our hba card information + * @piomb: IO message buffer + */ +static int ssp_coalesced_comp_resp(struct pm8001_hba_info *pm8001_ha, + void *piomb) +{ + PM8001_MSG_DBG(pm8001_ha, + pm8001_printk(" pm80xx_addition_functionality\n")); + + return 0; +} + +/** + * process_one_iomb - process one outbound Queue memory block + * @pm8001_ha: our hba card information + * @piomb: IO message buffer + */ +static void process_one_iomb(struct pm8001_hba_info *pm8001_ha, void *piomb) +{ + __le32 pHeader = *(__le32 *)piomb; + u32 opc = (u32)((le32_to_cpu(pHeader)) & 0xFFF); + + switch (opc) { + case OPC_OUB_ECHO: + PM8001_MSG_DBG(pm8001_ha, pm8001_printk("OPC_OUB_ECHO\n")); + break; + case OPC_OUB_HW_EVENT: + PM8001_MSG_DBG(pm8001_ha, + pm8001_printk("OPC_OUB_HW_EVENT\n")); + mpi_hw_event(pm8001_ha, piomb); + break; + case OPC_OUB_THERM_HW_EVENT: + PM8001_MSG_DBG(pm8001_ha, + pm8001_printk("OPC_OUB_THERMAL_EVENT\n")); + mpi_thermal_hw_event(pm8001_ha, piomb); + break; + case OPC_OUB_SSP_COMP: + PM8001_MSG_DBG(pm8001_ha, + pm8001_printk("OPC_OUB_SSP_COMP\n")); + mpi_ssp_completion(pm8001_ha, piomb); + break; + case OPC_OUB_SMP_COMP: + PM8001_MSG_DBG(pm8001_ha, + pm8001_printk("OPC_OUB_SMP_COMP\n")); + mpi_smp_completion(pm8001_ha, piomb); + break; + case OPC_OUB_LOCAL_PHY_CNTRL: + PM8001_MSG_DBG(pm8001_ha, + pm8001_printk("OPC_OUB_LOCAL_PHY_CNTRL\n")); + pm8001_mpi_local_phy_ctl(pm8001_ha, piomb); + break; + case OPC_OUB_DEV_REGIST: + PM8001_MSG_DBG(pm8001_ha, + pm8001_printk("OPC_OUB_DEV_REGIST\n")); + pm8001_mpi_reg_resp(pm8001_ha, piomb); + break; + case OPC_OUB_DEREG_DEV: + PM8001_MSG_DBG(pm8001_ha, + pm8001_printk("unresgister the deviece\n")); + pm8001_mpi_dereg_resp(pm8001_ha, piomb); + break; + case OPC_OUB_GET_DEV_HANDLE: + PM8001_MSG_DBG(pm8001_ha, + pm8001_printk("OPC_OUB_GET_DEV_HANDLE\n")); + break; + case OPC_OUB_SATA_COMP: + PM8001_MSG_DBG(pm8001_ha, + pm8001_printk("OPC_OUB_SATA_COMP\n")); + mpi_sata_completion(pm8001_ha, piomb); + break; + case OPC_OUB_SATA_EVENT: + PM8001_MSG_DBG(pm8001_ha, + pm8001_printk("OPC_OUB_SATA_EVENT\n")); + mpi_sata_event(pm8001_ha, piomb); + break; + case OPC_OUB_SSP_EVENT: + PM8001_MSG_DBG(pm8001_ha, + pm8001_printk("OPC_OUB_SSP_EVENT\n")); + mpi_ssp_event(pm8001_ha, piomb); + break; + case OPC_OUB_DEV_HANDLE_ARRIV: + PM8001_MSG_DBG(pm8001_ha, + pm8001_printk("OPC_OUB_DEV_HANDLE_ARRIV\n")); + /*This is for target*/ + break; + case OPC_OUB_SSP_RECV_EVENT: + PM8001_MSG_DBG(pm8001_ha, + pm8001_printk("OPC_OUB_SSP_RECV_EVENT\n")); + /*This is for target*/ + break; + case OPC_OUB_FW_FLASH_UPDATE: + PM8001_MSG_DBG(pm8001_ha, + pm8001_printk("OPC_OUB_FW_FLASH_UPDATE\n")); + pm8001_mpi_fw_flash_update_resp(pm8001_ha, piomb); + break; + case OPC_OUB_GPIO_RESPONSE: + PM8001_MSG_DBG(pm8001_ha, + pm8001_printk("OPC_OUB_GPIO_RESPONSE\n")); + break; + case OPC_OUB_GPIO_EVENT: + PM8001_MSG_DBG(pm8001_ha, + pm8001_printk("OPC_OUB_GPIO_EVENT\n")); + break; + case OPC_OUB_GENERAL_EVENT: + PM8001_MSG_DBG(pm8001_ha, + pm8001_printk("OPC_OUB_GENERAL_EVENT\n")); + pm8001_mpi_general_event(pm8001_ha, piomb); + break; + case OPC_OUB_SSP_ABORT_RSP: + PM8001_MSG_DBG(pm8001_ha, + pm8001_printk("OPC_OUB_SSP_ABORT_RSP\n")); + pm8001_mpi_task_abort_resp(pm8001_ha, piomb); + break; + case OPC_OUB_SATA_ABORT_RSP: + PM8001_MSG_DBG(pm8001_ha, + pm8001_printk("OPC_OUB_SATA_ABORT_RSP\n")); + pm8001_mpi_task_abort_resp(pm8001_ha, piomb); + break; + case OPC_OUB_SAS_DIAG_MODE_START_END: + PM8001_MSG_DBG(pm8001_ha, + pm8001_printk("OPC_OUB_SAS_DIAG_MODE_START_END\n")); + break; + case OPC_OUB_SAS_DIAG_EXECUTE: + PM8001_MSG_DBG(pm8001_ha, + pm8001_printk("OPC_OUB_SAS_DIAG_EXECUTE\n")); + break; + case OPC_OUB_GET_TIME_STAMP: + PM8001_MSG_DBG(pm8001_ha, + pm8001_printk("OPC_OUB_GET_TIME_STAMP\n")); + break; + case OPC_OUB_SAS_HW_EVENT_ACK: + PM8001_MSG_DBG(pm8001_ha, + pm8001_printk("OPC_OUB_SAS_HW_EVENT_ACK\n")); + break; + case OPC_OUB_PORT_CONTROL: + PM8001_MSG_DBG(pm8001_ha, + pm8001_printk("OPC_OUB_PORT_CONTROL\n")); + break; + case OPC_OUB_SMP_ABORT_RSP: + PM8001_MSG_DBG(pm8001_ha, + pm8001_printk("OPC_OUB_SMP_ABORT_RSP\n")); + pm8001_mpi_task_abort_resp(pm8001_ha, piomb); + break; + case OPC_OUB_GET_NVMD_DATA: + PM8001_MSG_DBG(pm8001_ha, + pm8001_printk("OPC_OUB_GET_NVMD_DATA\n")); + pm8001_mpi_get_nvmd_resp(pm8001_ha, piomb); + break; + case OPC_OUB_SET_NVMD_DATA: + PM8001_MSG_DBG(pm8001_ha, + pm8001_printk("OPC_OUB_SET_NVMD_DATA\n")); + pm8001_mpi_set_nvmd_resp(pm8001_ha, piomb); + break; + case OPC_OUB_DEVICE_HANDLE_REMOVAL: + PM8001_MSG_DBG(pm8001_ha, + pm8001_printk("OPC_OUB_DEVICE_HANDLE_REMOVAL\n")); + break; + case OPC_OUB_SET_DEVICE_STATE: + PM8001_MSG_DBG(pm8001_ha, + pm8001_printk("OPC_OUB_SET_DEVICE_STATE\n")); + pm8001_mpi_set_dev_state_resp(pm8001_ha, piomb); + break; + case OPC_OUB_GET_DEVICE_STATE: + PM8001_MSG_DBG(pm8001_ha, + pm8001_printk("OPC_OUB_GET_DEVICE_STATE\n")); + break; + case OPC_OUB_SET_DEV_INFO: + PM8001_MSG_DBG(pm8001_ha, + pm8001_printk("OPC_OUB_SET_DEV_INFO\n")); + break; + /* spcv specifc commands */ + case OPC_OUB_PHY_START_RESP: + PM8001_MSG_DBG(pm8001_ha, pm8001_printk( + "OPC_OUB_PHY_START_RESP opcode:%x\n", opc)); + mpi_phy_start_resp(pm8001_ha, piomb); + break; + case OPC_OUB_PHY_STOP_RESP: + PM8001_MSG_DBG(pm8001_ha, pm8001_printk( + "OPC_OUB_PHY_STOP_RESP opcode:%x\n", opc)); + mpi_phy_stop_resp(pm8001_ha, piomb); + break; + case OPC_OUB_SET_CONTROLLER_CONFIG: + PM8001_MSG_DBG(pm8001_ha, pm8001_printk( + "OPC_OUB_SET_CONTROLLER_CONFIG opcode:%x\n", opc)); + mpi_set_controller_config_resp(pm8001_ha, piomb); + break; + case OPC_OUB_GET_CONTROLLER_CONFIG: + PM8001_MSG_DBG(pm8001_ha, pm8001_printk( + "OPC_OUB_GET_CONTROLLER_CONFIG opcode:%x\n", opc)); + mpi_get_controller_config_resp(pm8001_ha, piomb); + break; + case OPC_OUB_GET_PHY_PROFILE: + PM8001_MSG_DBG(pm8001_ha, pm8001_printk( + "OPC_OUB_GET_PHY_PROFILE opcode:%x\n", opc)); + mpi_get_phy_profile_resp(pm8001_ha, piomb); + break; + case OPC_OUB_FLASH_OP_EXT: + PM8001_MSG_DBG(pm8001_ha, pm8001_printk( + "OPC_OUB_FLASH_OP_EXT opcode:%x\n", opc)); + mpi_flash_op_ext_resp(pm8001_ha, piomb); + break; + case OPC_OUB_SET_PHY_PROFILE: + PM8001_MSG_DBG(pm8001_ha, pm8001_printk( + "OPC_OUB_SET_PHY_PROFILE opcode:%x\n", opc)); + mpi_set_phy_profile_resp(pm8001_ha, piomb); + break; + case OPC_OUB_KEK_MANAGEMENT_RESP: + PM8001_MSG_DBG(pm8001_ha, pm8001_printk( + "OPC_OUB_KEK_MANAGEMENT_RESP opcode:%x\n", opc)); + mpi_kek_management_resp(pm8001_ha, piomb); + break; + case OPC_OUB_DEK_MANAGEMENT_RESP: + PM8001_MSG_DBG(pm8001_ha, pm8001_printk( + "OPC_OUB_DEK_MANAGEMENT_RESP opcode:%x\n", opc)); + mpi_dek_management_resp(pm8001_ha, piomb); + break; + case OPC_OUB_SSP_COALESCED_COMP_RESP: + PM8001_MSG_DBG(pm8001_ha, pm8001_printk( + "OPC_OUB_SSP_COALESCED_COMP_RESP opcode:%x\n", opc)); + ssp_coalesced_comp_resp(pm8001_ha, piomb); + break; + default: + PM8001_MSG_DBG(pm8001_ha, pm8001_printk( + "Unknown outbound Queue IOMB OPC = 0x%x\n", opc)); + break; + } +} + +static int process_oq(struct pm8001_hba_info *pm8001_ha, u8 vec) +{ + struct outbound_queue_table *circularQ; + void *pMsg1 = NULL; + u8 uninitialized_var(bc); + u32 ret = MPI_IO_STATUS_FAIL; + unsigned long flags; + + spin_lock_irqsave(&pm8001_ha->lock, flags); + circularQ = &pm8001_ha->outbnd_q_tbl[vec]; + do { + ret = pm8001_mpi_msg_consume(pm8001_ha, circularQ, &pMsg1, &bc); + if (MPI_IO_STATUS_SUCCESS == ret) { + /* process the outbound message */ + process_one_iomb(pm8001_ha, (void *)(pMsg1 - 4)); + /* free the message from the outbound circular buffer */ + pm8001_mpi_msg_free_set(pm8001_ha, pMsg1, + circularQ, bc); + } + if (MPI_IO_STATUS_BUSY == ret) { + /* Update the producer index from SPC */ + circularQ->producer_index = + cpu_to_le32(pm8001_read_32(circularQ->pi_virt)); + if (le32_to_cpu(circularQ->producer_index) == + circularQ->consumer_idx) + /* OQ is empty */ + break; + } + } while (1); + spin_unlock_irqrestore(&pm8001_ha->lock, flags); + return ret; +} + +/* PCI_DMA_... to our direction translation. */ +static const u8 data_dir_flags[] = { + [PCI_DMA_BIDIRECTIONAL] = DATA_DIR_BYRECIPIENT,/* UNSPECIFIED */ + [PCI_DMA_TODEVICE] = DATA_DIR_OUT,/* OUTBOUND */ + [PCI_DMA_FROMDEVICE] = DATA_DIR_IN,/* INBOUND */ + [PCI_DMA_NONE] = DATA_DIR_NONE,/* NO TRANSFER */ +}; + +static void build_smp_cmd(u32 deviceID, __le32 hTag, + struct smp_req *psmp_cmd, int mode, int length) +{ + psmp_cmd->tag = hTag; + psmp_cmd->device_id = cpu_to_le32(deviceID); + if (mode == SMP_DIRECT) { + length = length - 4; /* subtract crc */ + psmp_cmd->len_ip_ir = cpu_to_le32(length << 16); + } else { + psmp_cmd->len_ip_ir = cpu_to_le32(1|(1 << 1)); + } +} + +/** + * pm8001_chip_smp_req - send a SMP task to FW + * @pm8001_ha: our hba card information. + * @ccb: the ccb information this request used. + */ +static int pm80xx_chip_smp_req(struct pm8001_hba_info *pm8001_ha, + struct pm8001_ccb_info *ccb) +{ + int elem, rc; + struct sas_task *task = ccb->task; + struct domain_device *dev = task->dev; + struct pm8001_device *pm8001_dev = dev->lldd_dev; + struct scatterlist *sg_req, *sg_resp; + u32 req_len, resp_len; + struct smp_req smp_cmd; + u32 opc; + struct inbound_queue_table *circularQ; + char *preq_dma_addr = NULL; + __le64 tmp_addr; + u32 i, length; + + memset(&smp_cmd, 0, sizeof(smp_cmd)); + /* + * DMA-map SMP request, response buffers + */ + sg_req = &task->smp_task.smp_req; + elem = dma_map_sg(pm8001_ha->dev, sg_req, 1, PCI_DMA_TODEVICE); + if (!elem) + return -ENOMEM; + req_len = sg_dma_len(sg_req); + + sg_resp = &task->smp_task.smp_resp; + elem = dma_map_sg(pm8001_ha->dev, sg_resp, 1, PCI_DMA_FROMDEVICE); + if (!elem) { + rc = -ENOMEM; + goto err_out; + } + resp_len = sg_dma_len(sg_resp); + /* must be in dwords */ + if ((req_len & 0x3) || (resp_len & 0x3)) { + rc = -EINVAL; + goto err_out_2; + } + + opc = OPC_INB_SMP_REQUEST; + circularQ = &pm8001_ha->inbnd_q_tbl[0]; + smp_cmd.tag = cpu_to_le32(ccb->ccb_tag); + + length = sg_req->length; + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("SMP Frame Length %d\n", sg_req->length)); + if (!(length - 8)) + pm8001_ha->smp_exp_mode = SMP_DIRECT; + else + pm8001_ha->smp_exp_mode = SMP_INDIRECT; + + /* DIRECT MODE support only in spcv/ve */ + pm8001_ha->smp_exp_mode = SMP_DIRECT; + + tmp_addr = cpu_to_le64((u64)sg_dma_address(&task->smp_task.smp_req)); + preq_dma_addr = (char *)phys_to_virt(tmp_addr); + + /* INDIRECT MODE command settings. Use DMA */ + if (pm8001_ha->smp_exp_mode == SMP_INDIRECT) { + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("SMP REQUEST INDIRECT MODE\n")); + /* for SPCv indirect mode. Place the top 4 bytes of + * SMP Request header here. */ + for (i = 0; i < 4; i++) + smp_cmd.smp_req16[i] = *(preq_dma_addr + i); + /* exclude top 4 bytes for SMP req header */ + smp_cmd.long_smp_req.long_req_addr = + cpu_to_le64((u64)sg_dma_address + (&task->smp_task.smp_req) - 4); + /* exclude 4 bytes for SMP req header and CRC */ + smp_cmd.long_smp_req.long_req_size = + cpu_to_le32((u32)sg_dma_len(&task->smp_task.smp_req)-8); + smp_cmd.long_smp_req.long_resp_addr = + cpu_to_le64((u64)sg_dma_address + (&task->smp_task.smp_resp)); + smp_cmd.long_smp_req.long_resp_size = + cpu_to_le32((u32)sg_dma_len + (&task->smp_task.smp_resp)-4); + } else { /* DIRECT MODE */ + smp_cmd.long_smp_req.long_req_addr = + cpu_to_le64((u64)sg_dma_address + (&task->smp_task.smp_req)); + smp_cmd.long_smp_req.long_req_size = + cpu_to_le32((u32)sg_dma_len(&task->smp_task.smp_req)-4); + smp_cmd.long_smp_req.long_resp_addr = + cpu_to_le64((u64)sg_dma_address + (&task->smp_task.smp_resp)); + smp_cmd.long_smp_req.long_resp_size = + cpu_to_le32 + ((u32)sg_dma_len(&task->smp_task.smp_resp)-4); + } + if (pm8001_ha->smp_exp_mode == SMP_DIRECT) { + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("SMP REQUEST DIRECT MODE\n")); + for (i = 0; i < length; i++) + if (i < 16) { + smp_cmd.smp_req16[i] = *(preq_dma_addr+i); + PM8001_IO_DBG(pm8001_ha, pm8001_printk( + "Byte[%d]:%x (DMA data:%x)\n", + i, smp_cmd.smp_req16[i], + *(preq_dma_addr))); + } else { + smp_cmd.smp_req[i] = *(preq_dma_addr+i); + PM8001_IO_DBG(pm8001_ha, pm8001_printk( + "Byte[%d]:%x (DMA data:%x)\n", + i, smp_cmd.smp_req[i], + *(preq_dma_addr))); + } + } + + build_smp_cmd(pm8001_dev->device_id, smp_cmd.tag, + &smp_cmd, pm8001_ha->smp_exp_mode, length); + pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, (u32 *)&smp_cmd, 0); + return 0; + +err_out_2: + dma_unmap_sg(pm8001_ha->dev, &ccb->task->smp_task.smp_resp, 1, + PCI_DMA_FROMDEVICE); +err_out: + dma_unmap_sg(pm8001_ha->dev, &ccb->task->smp_task.smp_req, 1, + PCI_DMA_TODEVICE); + return rc; +} + +static int check_enc_sas_cmd(struct sas_task *task) +{ + if ((task->ssp_task.cdb[0] == READ_10) + || (task->ssp_task.cdb[0] == WRITE_10) + || (task->ssp_task.cdb[0] == WRITE_VERIFY)) + return 1; + else + return 0; +} + +static int check_enc_sat_cmd(struct sas_task *task) +{ + int ret = 0; + switch (task->ata_task.fis.command) { + case ATA_CMD_FPDMA_READ: + case ATA_CMD_READ_EXT: + case ATA_CMD_READ: + case ATA_CMD_FPDMA_WRITE: + case ATA_CMD_WRITE_EXT: + case ATA_CMD_WRITE: + case ATA_CMD_PIO_READ: + case ATA_CMD_PIO_READ_EXT: + case ATA_CMD_PIO_WRITE: + case ATA_CMD_PIO_WRITE_EXT: + ret = 1; + break; + default: + ret = 0; + break; + } + return ret; +} + +/** + * pm80xx_chip_ssp_io_req - send a SSP task to FW + * @pm8001_ha: our hba card information. + * @ccb: the ccb information this request used. + */ +static int pm80xx_chip_ssp_io_req(struct pm8001_hba_info *pm8001_ha, + struct pm8001_ccb_info *ccb) +{ + struct sas_task *task = ccb->task; + struct domain_device *dev = task->dev; + struct pm8001_device *pm8001_dev = dev->lldd_dev; + struct ssp_ini_io_start_req ssp_cmd; + u32 tag = ccb->ccb_tag; + int ret; + u64 phys_addr; + struct inbound_queue_table *circularQ; + static u32 inb; + static u32 outb; + u32 opc = OPC_INB_SSPINIIOSTART; + memset(&ssp_cmd, 0, sizeof(ssp_cmd)); + memcpy(ssp_cmd.ssp_iu.lun, task->ssp_task.LUN, 8); + /* data address domain added for spcv; set to 0 by host, + * used internally by controller + * 0 for SAS 1.1 and SAS 2.0 compatible TLR + */ + ssp_cmd.dad_dir_m_tlr = + cpu_to_le32(data_dir_flags[task->data_dir] << 8 | 0x0); + ssp_cmd.data_len = cpu_to_le32(task->total_xfer_len); + ssp_cmd.device_id = cpu_to_le32(pm8001_dev->device_id); + ssp_cmd.tag = cpu_to_le32(tag); + if (task->ssp_task.enable_first_burst) + ssp_cmd.ssp_iu.efb_prio_attr |= 0x80; + ssp_cmd.ssp_iu.efb_prio_attr |= (task->ssp_task.task_prio << 3); + ssp_cmd.ssp_iu.efb_prio_attr |= (task->ssp_task.task_attr & 7); + memcpy(ssp_cmd.ssp_iu.cdb, task->ssp_task.cdb, 16); + circularQ = &pm8001_ha->inbnd_q_tbl[0]; + + /* Check if encryption is set */ + if (pm8001_ha->chip->encrypt && + !(pm8001_ha->encrypt_info.status) && check_enc_sas_cmd(task)) { + PM8001_IO_DBG(pm8001_ha, pm8001_printk( + "Encryption enabled.Sending Encrypt SAS command 0x%x\n", + task->ssp_task.cdb[0])); + opc = OPC_INB_SSP_INI_DIF_ENC_IO; + /* enable encryption. 0 for SAS 1.1 and SAS 2.0 compatible TLR*/ + ssp_cmd.dad_dir_m_tlr = cpu_to_le32 + ((data_dir_flags[task->data_dir] << 8) | 0x20 | 0x0); + + /* fill in PRD (scatter/gather) table, if any */ + if (task->num_scatter > 1) { + pm8001_chip_make_sg(task->scatter, + ccb->n_elem, ccb->buf_prd); + phys_addr = ccb->ccb_dma_handle + + offsetof(struct pm8001_ccb_info, buf_prd[0]); + ssp_cmd.enc_addr_low = + cpu_to_le32(lower_32_bits(phys_addr)); + ssp_cmd.enc_addr_high = + cpu_to_le32(upper_32_bits(phys_addr)); + ssp_cmd.enc_esgl = cpu_to_le32(1<<31); + } else if (task->num_scatter == 1) { + u64 dma_addr = sg_dma_address(task->scatter); + ssp_cmd.enc_addr_low = + cpu_to_le32(lower_32_bits(dma_addr)); + ssp_cmd.enc_addr_high = + cpu_to_le32(upper_32_bits(dma_addr)); + ssp_cmd.enc_len = cpu_to_le32(task->total_xfer_len); + ssp_cmd.enc_esgl = 0; + } else if (task->num_scatter == 0) { + ssp_cmd.enc_addr_low = 0; + ssp_cmd.enc_addr_high = 0; + ssp_cmd.enc_len = cpu_to_le32(task->total_xfer_len); + ssp_cmd.enc_esgl = 0; + } + /* XTS mode. All other fields are 0 */ + ssp_cmd.key_cmode = 0x6 << 4; + /* set tweak values. Should be the start lba */ + ssp_cmd.twk_val0 = cpu_to_le32((task->ssp_task.cdb[2] << 24) | + (task->ssp_task.cdb[3] << 16) | + (task->ssp_task.cdb[4] << 8) | + (task->ssp_task.cdb[5])); + } else { + PM8001_IO_DBG(pm8001_ha, pm8001_printk( + "Sending Normal SAS command 0x%x inb q %x\n", + task->ssp_task.cdb[0], inb)); + /* fill in PRD (scatter/gather) table, if any */ + if (task->num_scatter > 1) { + pm8001_chip_make_sg(task->scatter, ccb->n_elem, + ccb->buf_prd); + phys_addr = ccb->ccb_dma_handle + + offsetof(struct pm8001_ccb_info, buf_prd[0]); + ssp_cmd.addr_low = + cpu_to_le32(lower_32_bits(phys_addr)); + ssp_cmd.addr_high = + cpu_to_le32(upper_32_bits(phys_addr)); + ssp_cmd.esgl = cpu_to_le32(1<<31); + } else if (task->num_scatter == 1) { + u64 dma_addr = sg_dma_address(task->scatter); + ssp_cmd.addr_low = cpu_to_le32(lower_32_bits(dma_addr)); + ssp_cmd.addr_high = + cpu_to_le32(upper_32_bits(dma_addr)); + ssp_cmd.len = cpu_to_le32(task->total_xfer_len); + ssp_cmd.esgl = 0; + } else if (task->num_scatter == 0) { + ssp_cmd.addr_low = 0; + ssp_cmd.addr_high = 0; + ssp_cmd.len = cpu_to_le32(task->total_xfer_len); + ssp_cmd.esgl = 0; + } + } + ret = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &ssp_cmd, outb++); + + /* rotate the outb queue */ + outb = outb%PM8001_MAX_SPCV_OUTB_NUM; + + return ret; +} + +static int pm80xx_chip_sata_req(struct pm8001_hba_info *pm8001_ha, + struct pm8001_ccb_info *ccb) +{ + struct sas_task *task = ccb->task; + struct domain_device *dev = task->dev; + struct pm8001_device *pm8001_ha_dev = dev->lldd_dev; + u32 tag = ccb->ccb_tag; + int ret; + static u32 inb; + static u32 outb; + struct sata_start_req sata_cmd; + u32 hdr_tag, ncg_tag = 0; + u64 phys_addr; + u32 ATAP = 0x0; + u32 dir; + struct inbound_queue_table *circularQ; + unsigned long flags; + u32 opc = OPC_INB_SATA_HOST_OPSTART; + memset(&sata_cmd, 0, sizeof(sata_cmd)); + circularQ = &pm8001_ha->inbnd_q_tbl[0]; + + if (task->data_dir == PCI_DMA_NONE) { + ATAP = 0x04; /* no data*/ + PM8001_IO_DBG(pm8001_ha, pm8001_printk("no data\n")); + } else if (likely(!task->ata_task.device_control_reg_update)) { + if (task->ata_task.dma_xfer) { + ATAP = 0x06; /* DMA */ + PM8001_IO_DBG(pm8001_ha, pm8001_printk("DMA\n")); + } else { + ATAP = 0x05; /* PIO*/ + PM8001_IO_DBG(pm8001_ha, pm8001_printk("PIO\n")); + } + if (task->ata_task.use_ncq && + dev->sata_dev.command_set != ATAPI_COMMAND_SET) { + ATAP = 0x07; /* FPDMA */ + PM8001_IO_DBG(pm8001_ha, pm8001_printk("FPDMA\n")); + } + } + if (task->ata_task.use_ncq && pm8001_get_ncq_tag(task, &hdr_tag)) { + task->ata_task.fis.sector_count |= (u8) (hdr_tag << 3); + ncg_tag = hdr_tag; + } + dir = data_dir_flags[task->data_dir] << 8; + sata_cmd.tag = cpu_to_le32(tag); + sata_cmd.device_id = cpu_to_le32(pm8001_ha_dev->device_id); + sata_cmd.data_len = cpu_to_le32(task->total_xfer_len); + + sata_cmd.sata_fis = task->ata_task.fis; + if (likely(!task->ata_task.device_control_reg_update)) + sata_cmd.sata_fis.flags |= 0x80;/* C=1: update ATA cmd reg */ + sata_cmd.sata_fis.flags &= 0xF0;/* PM_PORT field shall be 0 */ + + /* Check if encryption is set */ + if (pm8001_ha->chip->encrypt && + !(pm8001_ha->encrypt_info.status) && check_enc_sat_cmd(task)) { + PM8001_IO_DBG(pm8001_ha, pm8001_printk( + "Encryption enabled.Sending Encrypt SATA cmd 0x%x\n", + sata_cmd.sata_fis.command)); + opc = OPC_INB_SATA_DIF_ENC_IO; + + /* set encryption bit */ + sata_cmd.ncqtag_atap_dir_m_dad = + cpu_to_le32(((ncg_tag & 0xff)<<16)| + ((ATAP & 0x3f) << 10) | 0x20 | dir); + /* dad (bit 0-1) is 0 */ + /* fill in PRD (scatter/gather) table, if any */ + if (task->num_scatter > 1) { + pm8001_chip_make_sg(task->scatter, + ccb->n_elem, ccb->buf_prd); + phys_addr = ccb->ccb_dma_handle + + offsetof(struct pm8001_ccb_info, buf_prd[0]); + sata_cmd.enc_addr_low = lower_32_bits(phys_addr); + sata_cmd.enc_addr_high = upper_32_bits(phys_addr); + sata_cmd.enc_esgl = cpu_to_le32(1 << 31); + } else if (task->num_scatter == 1) { + u64 dma_addr = sg_dma_address(task->scatter); + sata_cmd.enc_addr_low = lower_32_bits(dma_addr); + sata_cmd.enc_addr_high = upper_32_bits(dma_addr); + sata_cmd.enc_len = cpu_to_le32(task->total_xfer_len); + sata_cmd.enc_esgl = 0; + } else if (task->num_scatter == 0) { + sata_cmd.enc_addr_low = 0; + sata_cmd.enc_addr_high = 0; + sata_cmd.enc_len = cpu_to_le32(task->total_xfer_len); + sata_cmd.enc_esgl = 0; + } + /* XTS mode. All other fields are 0 */ + sata_cmd.key_index_mode = 0x6 << 4; + /* set tweak values. Should be the start lba */ + sata_cmd.twk_val0 = + cpu_to_le32((sata_cmd.sata_fis.lbal_exp << 24) | + (sata_cmd.sata_fis.lbah << 16) | + (sata_cmd.sata_fis.lbam << 8) | + (sata_cmd.sata_fis.lbal)); + sata_cmd.twk_val1 = + cpu_to_le32((sata_cmd.sata_fis.lbah_exp << 8) | + (sata_cmd.sata_fis.lbam_exp)); + } else { + PM8001_IO_DBG(pm8001_ha, pm8001_printk( + "Sending Normal SATA command 0x%x inb %x\n", + sata_cmd.sata_fis.command, inb)); + /* dad (bit 0-1) is 0 */ + sata_cmd.ncqtag_atap_dir_m_dad = + cpu_to_le32(((ncg_tag & 0xff)<<16) | + ((ATAP & 0x3f) << 10) | dir); + + /* fill in PRD (scatter/gather) table, if any */ + if (task->num_scatter > 1) { + pm8001_chip_make_sg(task->scatter, + ccb->n_elem, ccb->buf_prd); + phys_addr = ccb->ccb_dma_handle + + offsetof(struct pm8001_ccb_info, buf_prd[0]); + sata_cmd.addr_low = lower_32_bits(phys_addr); + sata_cmd.addr_high = upper_32_bits(phys_addr); + sata_cmd.esgl = cpu_to_le32(1 << 31); + } else if (task->num_scatter == 1) { + u64 dma_addr = sg_dma_address(task->scatter); + sata_cmd.addr_low = lower_32_bits(dma_addr); + sata_cmd.addr_high = upper_32_bits(dma_addr); + sata_cmd.len = cpu_to_le32(task->total_xfer_len); + sata_cmd.esgl = 0; + } else if (task->num_scatter == 0) { + sata_cmd.addr_low = 0; + sata_cmd.addr_high = 0; + sata_cmd.len = cpu_to_le32(task->total_xfer_len); + sata_cmd.esgl = 0; + } + /* scsi cdb */ + sata_cmd.atapi_scsi_cdb[0] = + cpu_to_le32(((task->ata_task.atapi_packet[0]) | + (task->ata_task.atapi_packet[1] << 8) | + (task->ata_task.atapi_packet[2] << 16) | + (task->ata_task.atapi_packet[3] << 24))); + sata_cmd.atapi_scsi_cdb[1] = + cpu_to_le32(((task->ata_task.atapi_packet[4]) | + (task->ata_task.atapi_packet[5] << 8) | + (task->ata_task.atapi_packet[6] << 16) | + (task->ata_task.atapi_packet[7] << 24))); + sata_cmd.atapi_scsi_cdb[2] = + cpu_to_le32(((task->ata_task.atapi_packet[8]) | + (task->ata_task.atapi_packet[9] << 8) | + (task->ata_task.atapi_packet[10] << 16) | + (task->ata_task.atapi_packet[11] << 24))); + sata_cmd.atapi_scsi_cdb[3] = + cpu_to_le32(((task->ata_task.atapi_packet[12]) | + (task->ata_task.atapi_packet[13] << 8) | + (task->ata_task.atapi_packet[14] << 16) | + (task->ata_task.atapi_packet[15] << 24))); + } + + /* Check for read log for failed drive and return */ + if (sata_cmd.sata_fis.command == 0x2f) { + if (pm8001_ha_dev && ((pm8001_ha_dev->id & NCQ_READ_LOG_FLAG) || + (pm8001_ha_dev->id & NCQ_ABORT_ALL_FLAG) || + (pm8001_ha_dev->id & NCQ_2ND_RLE_FLAG))) { + struct task_status_struct *ts; + + pm8001_ha_dev->id &= 0xDFFFFFFF; + ts = &task->task_status; + + spin_lock_irqsave(&task->task_state_lock, flags); + ts->resp = SAS_TASK_COMPLETE; + ts->stat = SAM_STAT_GOOD; + task->task_state_flags &= ~SAS_TASK_STATE_PENDING; + task->task_state_flags &= ~SAS_TASK_AT_INITIATOR; + task->task_state_flags |= SAS_TASK_STATE_DONE; + if (unlikely((task->task_state_flags & + SAS_TASK_STATE_ABORTED))) { + spin_unlock_irqrestore(&task->task_state_lock, + flags); + PM8001_FAIL_DBG(pm8001_ha, + pm8001_printk("task 0x%p resp 0x%x " + " stat 0x%x but aborted by upper layer " + "\n", task, ts->resp, ts->stat)); + pm8001_ccb_task_free(pm8001_ha, task, ccb, tag); + return 0; + } else if (task->uldd_task) { + spin_unlock_irqrestore(&task->task_state_lock, + flags); + pm8001_ccb_task_free(pm8001_ha, task, ccb, tag); + mb();/* ditto */ + spin_unlock_irq(&pm8001_ha->lock); + task->task_done(task); + spin_lock_irq(&pm8001_ha->lock); + return 0; + } else if (!task->uldd_task) { + spin_unlock_irqrestore(&task->task_state_lock, + flags); + pm8001_ccb_task_free(pm8001_ha, task, ccb, tag); + mb();/*ditto*/ + spin_unlock_irq(&pm8001_ha->lock); + task->task_done(task); + spin_lock_irq(&pm8001_ha->lock); + return 0; + } + } + } + + ret = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, + &sata_cmd, outb++); + + /* rotate the outb queue */ + outb = outb%PM8001_MAX_SPCV_OUTB_NUM; + return ret; +} + +/** + * pm80xx_chip_phy_start_req - start phy via PHY_START COMMAND + * @pm8001_ha: our hba card information. + * @num: the inbound queue number + * @phy_id: the phy id which we wanted to start up. + */ +static int +pm80xx_chip_phy_start_req(struct pm8001_hba_info *pm8001_ha, u8 phy_id) +{ + struct phy_start_req payload; + struct inbound_queue_table *circularQ; + int ret; + u32 tag = 0x01; + u32 opcode = OPC_INB_PHYSTART; + circularQ = &pm8001_ha->inbnd_q_tbl[0]; + memset(&payload, 0, sizeof(payload)); + payload.tag = cpu_to_le32(tag); + + PM8001_INIT_DBG(pm8001_ha, + pm8001_printk("PHY START REQ for phy_id %d\n", phy_id)); + /* + ** [0:7] PHY Identifier + ** [8:11] link rate 1.5G, 3G, 6G + ** [12:13] link mode 01b SAS mode; 10b SATA mode; 11b Auto mode + ** [14] 0b disable spin up hold; 1b enable spin up hold + ** [15] ob no change in current PHY analig setup 1b enable using SPAST + */ + payload.ase_sh_lm_slr_phyid = cpu_to_le32(SPINHOLD_DISABLE | + LINKMODE_AUTO | LINKRATE_15 | + LINKRATE_30 | LINKRATE_60 | phy_id); + /* SSC Disable and SAS Analog ST configuration */ + /** + payload.ase_sh_lm_slr_phyid = + cpu_to_le32(SSC_DISABLE_30 | SAS_ASE | SPINHOLD_DISABLE | + LINKMODE_AUTO | LINKRATE_15 | LINKRATE_30 | LINKRATE_60 | + phy_id); + Have to add "SAS PHY Analog Setup SPASTI 1 Byte" Based on need + **/ + + payload.sas_identify.dev_type = SAS_END_DEVICE; + payload.sas_identify.initiator_bits = SAS_PROTOCOL_ALL; + memcpy(payload.sas_identify.sas_addr, + pm8001_ha->sas_addr, SAS_ADDR_SIZE); + payload.sas_identify.phy_id = phy_id; + ret = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opcode, &payload, 0); + return ret; +} + +/** + * pm8001_chip_phy_stop_req - start phy via PHY_STOP COMMAND + * @pm8001_ha: our hba card information. + * @num: the inbound queue number + * @phy_id: the phy id which we wanted to start up. + */ +static int pm80xx_chip_phy_stop_req(struct pm8001_hba_info *pm8001_ha, + u8 phy_id) +{ + struct phy_stop_req payload; + struct inbound_queue_table *circularQ; + int ret; + u32 tag = 0x01; + u32 opcode = OPC_INB_PHYSTOP; + circularQ = &pm8001_ha->inbnd_q_tbl[0]; + memset(&payload, 0, sizeof(payload)); + payload.tag = cpu_to_le32(tag); + payload.phy_id = cpu_to_le32(phy_id); + ret = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opcode, &payload, 0); + return ret; +} + +/** + * see comments on pm8001_mpi_reg_resp. + */ +static int pm80xx_chip_reg_dev_req(struct pm8001_hba_info *pm8001_ha, + struct pm8001_device *pm8001_dev, u32 flag) +{ + struct reg_dev_req payload; + u32 opc; + u32 stp_sspsmp_sata = 0x4; + struct inbound_queue_table *circularQ; + u32 linkrate, phy_id; + int rc, tag = 0xdeadbeef; + struct pm8001_ccb_info *ccb; + u8 retryFlag = 0x1; + u16 firstBurstSize = 0; + u16 ITNT = 2000; + struct domain_device *dev = pm8001_dev->sas_device; + struct domain_device *parent_dev = dev->parent; + circularQ = &pm8001_ha->inbnd_q_tbl[0]; + + memset(&payload, 0, sizeof(payload)); + rc = pm8001_tag_alloc(pm8001_ha, &tag); + if (rc) + return rc; + ccb = &pm8001_ha->ccb_info[tag]; + ccb->device = pm8001_dev; + ccb->ccb_tag = tag; + payload.tag = cpu_to_le32(tag); + + if (flag == 1) { + stp_sspsmp_sata = 0x02; /*direct attached sata */ + } else { + if (pm8001_dev->dev_type == SAS_SATA_DEV) + stp_sspsmp_sata = 0x00; /* stp*/ + else if (pm8001_dev->dev_type == SAS_END_DEVICE || + pm8001_dev->dev_type == SAS_EDGE_EXPANDER_DEVICE || + pm8001_dev->dev_type == SAS_FANOUT_EXPANDER_DEVICE) + stp_sspsmp_sata = 0x01; /*ssp or smp*/ + } + if (parent_dev && DEV_IS_EXPANDER(parent_dev->dev_type)) + phy_id = parent_dev->ex_dev.ex_phy->phy_id; + else + phy_id = pm8001_dev->attached_phy; + + opc = OPC_INB_REG_DEV; + + linkrate = (pm8001_dev->sas_device->linkrate < dev->port->linkrate) ? + pm8001_dev->sas_device->linkrate : dev->port->linkrate; + + payload.phyid_portid = + cpu_to_le32(((pm8001_dev->sas_device->port->id) & 0xFF) | + ((phy_id & 0xFF) << 8)); + + payload.dtype_dlr_mcn_ir_retry = cpu_to_le32((retryFlag & 0x01) | + ((linkrate & 0x0F) << 24) | + ((stp_sspsmp_sata & 0x03) << 28)); + payload.firstburstsize_ITNexustimeout = + cpu_to_le32(ITNT | (firstBurstSize * 0x10000)); + + memcpy(payload.sas_addr, pm8001_dev->sas_device->sas_addr, + SAS_ADDR_SIZE); + + rc = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &payload, 0); + + return rc; +} + +/** + * pm80xx_chip_phy_ctl_req - support the local phy operation + * @pm8001_ha: our hba card information. + * @num: the inbound queue number + * @phy_id: the phy id which we wanted to operate + * @phy_op: + */ +static int pm80xx_chip_phy_ctl_req(struct pm8001_hba_info *pm8001_ha, + u32 phyId, u32 phy_op) +{ + struct local_phy_ctl_req payload; + struct inbound_queue_table *circularQ; + int ret; + u32 opc = OPC_INB_LOCAL_PHY_CONTROL; + memset(&payload, 0, sizeof(payload)); + circularQ = &pm8001_ha->inbnd_q_tbl[0]; + payload.tag = cpu_to_le32(1); + payload.phyop_phyid = + cpu_to_le32(((phy_op & 0xFF) << 8) | (phyId & 0xFF)); + ret = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &payload, 0); + return ret; +} + +static u32 pm80xx_chip_is_our_interupt(struct pm8001_hba_info *pm8001_ha) +{ + u32 value; +#ifdef PM8001_USE_MSIX + return 1; +#endif + value = pm8001_cr32(pm8001_ha, 0, MSGU_ODR); + if (value) + return 1; + return 0; + +} + +/** + * pm8001_chip_isr - PM8001 isr handler. + * @pm8001_ha: our hba card information. + * @irq: irq number. + * @stat: stat. + */ +static irqreturn_t +pm80xx_chip_isr(struct pm8001_hba_info *pm8001_ha, u8 vec) +{ + pm80xx_chip_interrupt_disable(pm8001_ha, vec); + process_oq(pm8001_ha, vec); + pm80xx_chip_interrupt_enable(pm8001_ha, vec); + return IRQ_HANDLED; +} + +const struct pm8001_dispatch pm8001_80xx_dispatch = { + .name = "pmc80xx", + .chip_init = pm80xx_chip_init, + .chip_soft_rst = pm80xx_chip_soft_rst, + .chip_rst = pm80xx_hw_chip_rst, + .chip_iounmap = pm8001_chip_iounmap, + .isr = pm80xx_chip_isr, + .is_our_interupt = pm80xx_chip_is_our_interupt, + .isr_process_oq = process_oq, + .interrupt_enable = pm80xx_chip_interrupt_enable, + .interrupt_disable = pm80xx_chip_interrupt_disable, + .make_prd = pm8001_chip_make_sg, + .smp_req = pm80xx_chip_smp_req, + .ssp_io_req = pm80xx_chip_ssp_io_req, + .sata_req = pm80xx_chip_sata_req, + .phy_start_req = pm80xx_chip_phy_start_req, + .phy_stop_req = pm80xx_chip_phy_stop_req, + .reg_dev_req = pm80xx_chip_reg_dev_req, + .dereg_dev_req = pm8001_chip_dereg_dev_req, + .phy_ctl_req = pm80xx_chip_phy_ctl_req, + .task_abort = pm8001_chip_abort_task, + .ssp_tm_req = pm8001_chip_ssp_tm_req, + .get_nvmd_req = pm8001_chip_get_nvmd_req, + .set_nvmd_req = pm8001_chip_set_nvmd_req, + .fw_flash_update_req = pm8001_chip_fw_flash_update_req, + .set_dev_state_req = pm8001_chip_set_dev_state_req, +}; diff --git a/trunk/drivers/scsi/pm8001/pm80xx_hwi.h b/trunk/drivers/scsi/pm8001/pm80xx_hwi.h new file mode 100644 index 000000000000..2b760ba75d7b --- /dev/null +++ b/trunk/drivers/scsi/pm8001/pm80xx_hwi.h @@ -0,0 +1,1523 @@ +/* + * PMC-Sierra SPCv/ve 8088/8089 SAS/SATA based host adapters driver + * + * Copyright (c) 2008-2009 USI Co., Ltd. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions, and the following disclaimer, + * without modification. + * 2. Redistributions in binary form must reproduce at minimum a disclaimer + * substantially similar to the "NO WARRANTY" disclaimer below + * ("Disclaimer") and any redistribution must be conditioned upon + * including a substantially similar Disclaimer requirement for further + * binary redistribution. + * 3. Neither the names of the above-listed copyright holders nor the names + * of any contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * Alternatively, this software may be distributed under the terms of the + * GNU General Public License ("GPL") version 2 as published by the Free + * Software Foundation. + * + * NO WARRANTY + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, + * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGES. + * + */ + +#ifndef _PMC8001_REG_H_ +#define _PMC8001_REG_H_ + +#include +#include + +/* for Request Opcode of IOMB */ +#define OPC_INB_ECHO 1 /* 0x000 */ +#define OPC_INB_PHYSTART 4 /* 0x004 */ +#define OPC_INB_PHYSTOP 5 /* 0x005 */ +#define OPC_INB_SSPINIIOSTART 6 /* 0x006 */ +#define OPC_INB_SSPINITMSTART 7 /* 0x007 */ +/* 0x8 RESV IN SPCv */ +#define OPC_INB_RSVD 8 /* 0x008 */ +#define OPC_INB_DEV_HANDLE_ACCEPT 9 /* 0x009 */ +#define OPC_INB_SSPTGTIOSTART 10 /* 0x00A */ +#define OPC_INB_SSPTGTRSPSTART 11 /* 0x00B */ +/* 0xC, 0xD, 0xE removed in SPCv */ +#define OPC_INB_SSP_ABORT 15 /* 0x00F */ +#define OPC_INB_DEREG_DEV_HANDLE 16 /* 0x010 */ +#define OPC_INB_GET_DEV_HANDLE 17 /* 0x011 */ +#define OPC_INB_SMP_REQUEST 18 /* 0x012 */ +/* 0x13 SMP_RESPONSE is removed in SPCv */ +#define OPC_INB_SMP_ABORT 20 /* 0x014 */ +/* 0x16 RESV IN SPCv */ +#define OPC_INB_RSVD1 22 /* 0x016 */ +#define OPC_INB_SATA_HOST_OPSTART 23 /* 0x017 */ +#define OPC_INB_SATA_ABORT 24 /* 0x018 */ +#define OPC_INB_LOCAL_PHY_CONTROL 25 /* 0x019 */ +/* 0x1A RESV IN SPCv */ +#define OPC_INB_RSVD2 26 /* 0x01A */ +#define OPC_INB_FW_FLASH_UPDATE 32 /* 0x020 */ +#define OPC_INB_GPIO 34 /* 0x022 */ +#define OPC_INB_SAS_DIAG_MODE_START_END 35 /* 0x023 */ +#define OPC_INB_SAS_DIAG_EXECUTE 36 /* 0x024 */ +/* 0x25 RESV IN SPCv */ +#define OPC_INB_RSVD3 37 /* 0x025 */ +#define OPC_INB_GET_TIME_STAMP 38 /* 0x026 */ +#define OPC_INB_PORT_CONTROL 39 /* 0x027 */ +#define OPC_INB_GET_NVMD_DATA 40 /* 0x028 */ +#define OPC_INB_SET_NVMD_DATA 41 /* 0x029 */ +#define OPC_INB_SET_DEVICE_STATE 42 /* 0x02A */ +#define OPC_INB_GET_DEVICE_STATE 43 /* 0x02B */ +#define OPC_INB_SET_DEV_INFO 44 /* 0x02C */ +/* 0x2D RESV IN SPCv */ +#define OPC_INB_RSVD4 45 /* 0x02D */ +#define OPC_INB_SGPIO_REGISTER 46 /* 0x02E */ +#define OPC_INB_PCIE_DIAG_EXEC 47 /* 0x02F */ +#define OPC_INB_SET_CONTROLLER_CONFIG 48 /* 0x030 */ +#define OPC_INB_GET_CONTROLLER_CONFIG 49 /* 0x031 */ +#define OPC_INB_REG_DEV 50 /* 0x032 */ +#define OPC_INB_SAS_HW_EVENT_ACK 51 /* 0x033 */ +#define OPC_INB_GET_DEVICE_INFO 52 /* 0x034 */ +#define OPC_INB_GET_PHY_PROFILE 53 /* 0x035 */ +#define OPC_INB_FLASH_OP_EXT 54 /* 0x036 */ +#define OPC_INB_SET_PHY_PROFILE 55 /* 0x037 */ +#define OPC_INB_KEK_MANAGEMENT 256 /* 0x100 */ +#define OPC_INB_DEK_MANAGEMENT 257 /* 0x101 */ +#define OPC_INB_SSP_INI_DIF_ENC_IO 258 /* 0x102 */ +#define OPC_INB_SATA_DIF_ENC_IO 259 /* 0x103 */ + +/* for Response Opcode of IOMB */ +#define OPC_OUB_ECHO 1 /* 0x001 */ +#define OPC_OUB_RSVD 4 /* 0x004 */ +#define OPC_OUB_SSP_COMP 5 /* 0x005 */ +#define OPC_OUB_SMP_COMP 6 /* 0x006 */ +#define OPC_OUB_LOCAL_PHY_CNTRL 7 /* 0x007 */ +#define OPC_OUB_RSVD1 10 /* 0x00A */ +#define OPC_OUB_DEREG_DEV 11 /* 0x00B */ +#define OPC_OUB_GET_DEV_HANDLE 12 /* 0x00C */ +#define OPC_OUB_SATA_COMP 13 /* 0x00D */ +#define OPC_OUB_SATA_EVENT 14 /* 0x00E */ +#define OPC_OUB_SSP_EVENT 15 /* 0x00F */ +#define OPC_OUB_RSVD2 16 /* 0x010 */ +/* 0x11 - SMP_RECEIVED Notification removed in SPCv*/ +#define OPC_OUB_SSP_RECV_EVENT 18 /* 0x012 */ +#define OPC_OUB_RSVD3 19 /* 0x013 */ +#define OPC_OUB_FW_FLASH_UPDATE 20 /* 0x014 */ +#define OPC_OUB_GPIO_RESPONSE 22 /* 0x016 */ +#define OPC_OUB_GPIO_EVENT 23 /* 0x017 */ +#define OPC_OUB_GENERAL_EVENT 24 /* 0x018 */ +#define OPC_OUB_SSP_ABORT_RSP 26 /* 0x01A */ +#define OPC_OUB_SATA_ABORT_RSP 27 /* 0x01B */ +#define OPC_OUB_SAS_DIAG_MODE_START_END 28 /* 0x01C */ +#define OPC_OUB_SAS_DIAG_EXECUTE 29 /* 0x01D */ +#define OPC_OUB_GET_TIME_STAMP 30 /* 0x01E */ +#define OPC_OUB_RSVD4 31 /* 0x01F */ +#define OPC_OUB_PORT_CONTROL 32 /* 0x020 */ +#define OPC_OUB_SKIP_ENTRY 33 /* 0x021 */ +#define OPC_OUB_SMP_ABORT_RSP 34 /* 0x022 */ +#define OPC_OUB_GET_NVMD_DATA 35 /* 0x023 */ +#define OPC_OUB_SET_NVMD_DATA 36 /* 0x024 */ +#define OPC_OUB_DEVICE_HANDLE_REMOVAL 37 /* 0x025 */ +#define OPC_OUB_SET_DEVICE_STATE 38 /* 0x026 */ +#define OPC_OUB_GET_DEVICE_STATE 39 /* 0x027 */ +#define OPC_OUB_SET_DEV_INFO 40 /* 0x028 */ +#define OPC_OUB_RSVD5 41 /* 0x029 */ +#define OPC_OUB_HW_EVENT 1792 /* 0x700 */ +#define OPC_OUB_DEV_HANDLE_ARRIV 1824 /* 0x720 */ +#define OPC_OUB_THERM_HW_EVENT 1840 /* 0x730 */ +#define OPC_OUB_SGPIO_RESP 2094 /* 0x82E */ +#define OPC_OUB_PCIE_DIAG_EXECUTE 2095 /* 0x82F */ +#define OPC_OUB_DEV_REGIST 2098 /* 0x832 */ +#define OPC_OUB_SAS_HW_EVENT_ACK 2099 /* 0x833 */ +#define OPC_OUB_GET_DEVICE_INFO 2100 /* 0x834 */ +/* spcv specific commands */ +#define OPC_OUB_PHY_START_RESP 2052 /* 0x804 */ +#define OPC_OUB_PHY_STOP_RESP 2053 /* 0x805 */ +#define OPC_OUB_SET_CONTROLLER_CONFIG 2096 /* 0x830 */ +#define OPC_OUB_GET_CONTROLLER_CONFIG 2097 /* 0x831 */ +#define OPC_OUB_GET_PHY_PROFILE 2101 /* 0x835 */ +#define OPC_OUB_FLASH_OP_EXT 2102 /* 0x836 */ +#define OPC_OUB_SET_PHY_PROFILE 2103 /* 0x837 */ +#define OPC_OUB_KEK_MANAGEMENT_RESP 2304 /* 0x900 */ +#define OPC_OUB_DEK_MANAGEMENT_RESP 2305 /* 0x901 */ +#define OPC_OUB_SSP_COALESCED_COMP_RESP 2306 /* 0x902 */ + +/* for phy start*/ +#define SSC_DISABLE_15 (0x01 << 16) +#define SSC_DISABLE_30 (0x02 << 16) +#define SSC_DISABLE_60 (0x04 << 16) +#define SAS_ASE (0x01 << 15) +#define SPINHOLD_DISABLE (0x00 << 14) +#define SPINHOLD_ENABLE (0x01 << 14) +#define LINKMODE_SAS (0x01 << 12) +#define LINKMODE_DSATA (0x02 << 12) +#define LINKMODE_AUTO (0x03 << 12) +#define LINKRATE_15 (0x01 << 8) +#define LINKRATE_30 (0x02 << 8) +#define LINKRATE_60 (0x06 << 8) + +/* Thermal related */ +#define THERMAL_ENABLE 0x1 +#define THERMAL_LOG_ENABLE 0x1 +#define THERMAL_OP_CODE 0x6 +#define LTEMPHIL 70 +#define RTEMPHIL 100 + +/* Encryption info */ +#define SCRATCH_PAD3_ENC_DISABLED 0x00000000 +#define SCRATCH_PAD3_ENC_DIS_ERR 0x00000001 +#define SCRATCH_PAD3_ENC_ENA_ERR 0x00000002 +#define SCRATCH_PAD3_ENC_READY 0x00000003 +#define SCRATCH_PAD3_ENC_MASK SCRATCH_PAD3_ENC_READY + +#define SCRATCH_PAD3_XTS_ENABLED (1 << 14) +#define SCRATCH_PAD3_SMA_ENABLED (1 << 4) +#define SCRATCH_PAD3_SMB_ENABLED (1 << 5) +#define SCRATCH_PAD3_SMF_ENABLED 0 +#define SCRATCH_PAD3_SM_MASK 0x000000F0 +#define SCRATCH_PAD3_ERR_CODE 0x00FF0000 + +#define SEC_MODE_SMF 0x0 +#define SEC_MODE_SMA 0x100 +#define SEC_MODE_SMB 0x200 +#define CIPHER_MODE_ECB 0x00000001 +#define CIPHER_MODE_XTS 0x00000002 +#define KEK_MGMT_SUBOP_KEYCARDUPDATE 0x4 + +/* SAS protocol timer configuration page */ +#define SAS_PROTOCOL_TIMER_CONFIG_PAGE 0x04 +#define STP_MCT_TMO 32 +#define SSP_MCT_TMO 32 +#define SAS_MAX_OPEN_TIME 5 +#define SMP_MAX_CONN_TIMER 0xFF +#define STP_FRM_TIMER 0 +#define STP_IDLE_TIME 5 /* 5 us; controller default */ +#define SAS_MFD 0 +#define SAS_OPNRJT_RTRY_INTVL 2 +#define SAS_DOPNRJT_RTRY_TMO 128 +#define SAS_COPNRJT_RTRY_TMO 128 + +/* + Making ORR bigger than IT NEXUS LOSS which is 2000000us = 2 second. + Assuming a bigger value 3 second, 3000000/128 = 23437.5 where 128 + is DOPNRJT_RTRY_TMO +*/ +#define SAS_DOPNRJT_RTRY_THR 23438 +#define SAS_COPNRJT_RTRY_THR 23438 +#define SAS_MAX_AIP 0x200000 +#define IT_NEXUS_TIMEOUT 0x7D0 +#define PORT_RECOVERY_TIMEOUT ((IT_NEXUS_TIMEOUT/100) + 30) + +struct mpi_msg_hdr { + __le32 header; /* Bits [11:0] - Message operation code */ + /* Bits [15:12] - Message Category */ + /* Bits [21:16] - Outboundqueue ID for the + operation completion message */ + /* Bits [23:22] - Reserved */ + /* Bits [28:24] - Buffer Count, indicates how + many buffer are allocated for the massage */ + /* Bits [30:29] - Reserved */ + /* Bits [31] - Message Valid bit */ +} __attribute__((packed, aligned(4))); + +/* + * brief the data structure of PHY Start Command + * use to describe enable the phy (128 bytes) + */ +struct phy_start_req { + __le32 tag; + __le32 ase_sh_lm_slr_phyid; + struct sas_identify_frame sas_identify; /* 28 Bytes */ + __le32 spasti; + u32 reserved[21]; +} __attribute__((packed, aligned(4))); + +/* + * brief the data structure of PHY Start Command + * use to disable the phy (128 bytes) + */ +struct phy_stop_req { + __le32 tag; + __le32 phy_id; + u32 reserved[29]; +} __attribute__((packed, aligned(4))); + +/* set device bits fis - device to host */ +struct set_dev_bits_fis { + u8 fis_type; /* 0xA1*/ + u8 n_i_pmport; + /* b7 : n Bit. Notification bit. If set device needs attention. */ + /* b6 : i Bit. Interrupt Bit */ + /* b5-b4: reserved2 */ + /* b3-b0: PM Port */ + u8 status; + u8 error; + u32 _r_a; +} __attribute__ ((packed)); +/* PIO setup FIS - device to host */ +struct pio_setup_fis { + u8 fis_type; /* 0x5f */ + u8 i_d_pmPort; + /* b7 : reserved */ + /* b6 : i bit. Interrupt bit */ + /* b5 : d bit. data transfer direction. set to 1 for device to host + xfer */ + /* b4 : reserved */ + /* b3-b0: PM Port */ + u8 status; + u8 error; + u8 lbal; + u8 lbam; + u8 lbah; + u8 device; + u8 lbal_exp; + u8 lbam_exp; + u8 lbah_exp; + u8 _r_a; + u8 sector_count; + u8 sector_count_exp; + u8 _r_b; + u8 e_status; + u8 _r_c[2]; + u8 transfer_count; +} __attribute__ ((packed)); + +/* + * brief the data structure of SATA Completion Response + * use to describe the sata task response (64 bytes) + */ +struct sata_completion_resp { + __le32 tag; + __le32 status; + __le32 param; + u32 sata_resp[12]; +} __attribute__((packed, aligned(4))); + +/* + * brief the data structure of SAS HW Event Notification + * use to alert the host about the hardware event(64 bytes) + */ +/* updated outbound struct for spcv */ + +struct hw_event_resp { + __le32 lr_status_evt_portid; + __le32 evt_param; + __le32 phyid_npip_portstate; + struct sas_identify_frame sas_identify; + struct dev_to_host_fis sata_fis; +} __attribute__((packed, aligned(4))); + +/* + * brief the data structure for thermal event notification + */ + +struct thermal_hw_event { + __le32 thermal_event; + __le32 rht_lht; +} __attribute__((packed, aligned(4))); + +/* + * brief the data structure of REGISTER DEVICE Command + * use to describe MPI REGISTER DEVICE Command (64 bytes) + */ + +struct reg_dev_req { + __le32 tag; + __le32 phyid_portid; + __le32 dtype_dlr_mcn_ir_retry; + __le32 firstburstsize_ITNexustimeout; + u8 sas_addr[SAS_ADDR_SIZE]; + __le32 upper_device_id; + u32 reserved[24]; +} __attribute__((packed, aligned(4))); + +/* + * brief the data structure of DEREGISTER DEVICE Command + * use to request spc to remove all internal resources associated + * with the device id (64 bytes) + */ + +struct dereg_dev_req { + __le32 tag; + __le32 device_id; + u32 reserved[29]; +} __attribute__((packed, aligned(4))); + +/* + * brief the data structure of DEVICE_REGISTRATION Response + * use to notify the completion of the device registration (64 bytes) + */ +struct dev_reg_resp { + __le32 tag; + __le32 status; + __le32 device_id; + u32 reserved[12]; +} __attribute__((packed, aligned(4))); + +/* + * brief the data structure of Local PHY Control Command + * use to issue PHY CONTROL to local phy (64 bytes) + */ +struct local_phy_ctl_req { + __le32 tag; + __le32 phyop_phyid; + u32 reserved1[29]; +} __attribute__((packed, aligned(4))); + +/** + * brief the data structure of Local Phy Control Response + * use to describe MPI Local Phy Control Response (64 bytes) + */ + struct local_phy_ctl_resp { + __le32 tag; + __le32 phyop_phyid; + __le32 status; + u32 reserved[12]; +} __attribute__((packed, aligned(4))); + +#define OP_BITS 0x0000FF00 +#define ID_BITS 0x000000FF + +/* + * brief the data structure of PORT Control Command + * use to control port properties (64 bytes) + */ + +struct port_ctl_req { + __le32 tag; + __le32 portop_portid; + __le32 param0; + __le32 param1; + u32 reserved1[27]; +} __attribute__((packed, aligned(4))); + +/* + * brief the data structure of HW Event Ack Command + * use to acknowledge receive HW event (64 bytes) + */ +struct hw_event_ack_req { + __le32 tag; + __le32 phyid_sea_portid; + __le32 param0; + __le32 param1; + u32 reserved1[27]; +} __attribute__((packed, aligned(4))); + +/* + * brief the data structure of PHY_START Response Command + * indicates the completion of PHY_START command (64 bytes) + */ +struct phy_start_resp { + __le32 tag; + __le32 status; + __le32 phyid; + u32 reserved[12]; +} __attribute__((packed, aligned(4))); + +/* + * brief the data structure of PHY_STOP Response Command + * indicates the completion of PHY_STOP command (64 bytes) + */ +struct phy_stop_resp { + __le32 tag; + __le32 status; + __le32 phyid; + u32 reserved[12]; +} __attribute__((packed, aligned(4))); + +/* + * brief the data structure of SSP Completion Response + * use to indicate a SSP Completion (n bytes) + */ +struct ssp_completion_resp { + __le32 tag; + __le32 status; + __le32 param; + __le32 ssptag_rescv_rescpad; + struct ssp_response_iu ssp_resp_iu; + __le32 residual_count; +} __attribute__((packed, aligned(4))); + +#define SSP_RESCV_BIT 0x00010000 + +/* + * brief the data structure of SATA EVNET response + * use to indicate a SATA Completion (64 bytes) + */ +struct sata_event_resp { + __le32 tag; + __le32 event; + __le32 port_id; + __le32 device_id; + u32 reserved; + __le32 event_param0; + __le32 event_param1; + __le32 sata_addr_h32; + __le32 sata_addr_l32; + __le32 e_udt1_udt0_crc; + __le32 e_udt5_udt4_udt3_udt2; + __le32 a_udt1_udt0_crc; + __le32 a_udt5_udt4_udt3_udt2; + __le32 hwdevid_diferr; + __le32 err_framelen_byteoffset; + __le32 err_dataframe; +} __attribute__((packed, aligned(4))); + +/* + * brief the data structure of SSP EVNET esponse + * use to indicate a SSP Completion (64 bytes) + */ +struct ssp_event_resp { + __le32 tag; + __le32 event; + __le32 port_id; + __le32 device_id; + __le32 ssp_tag; + __le32 event_param0; + __le32 event_param1; + __le32 sas_addr_h32; + __le32 sas_addr_l32; + __le32 e_udt1_udt0_crc; + __le32 e_udt5_udt4_udt3_udt2; + __le32 a_udt1_udt0_crc; + __le32 a_udt5_udt4_udt3_udt2; + __le32 hwdevid_diferr; + __le32 err_framelen_byteoffset; + __le32 err_dataframe; +} __attribute__((packed, aligned(4))); + +/** + * brief the data structure of General Event Notification Response + * use to describe MPI General Event Notification Response (64 bytes) + */ +struct general_event_resp { + __le32 status; + __le32 inb_IOMB_payload[14]; +} __attribute__((packed, aligned(4))); + +#define GENERAL_EVENT_PAYLOAD 14 +#define OPCODE_BITS 0x00000fff + +/* + * brief the data structure of SMP Request Command + * use to describe MPI SMP REQUEST Command (64 bytes) + */ +struct smp_req { + __le32 tag; + __le32 device_id; + __le32 len_ip_ir; + /* Bits [0] - Indirect response */ + /* Bits [1] - Indirect Payload */ + /* Bits [15:2] - Reserved */ + /* Bits [23:16] - direct payload Len */ + /* Bits [31:24] - Reserved */ + u8 smp_req16[16]; + union { + u8 smp_req[32]; + struct { + __le64 long_req_addr;/* sg dma address, LE */ + __le32 long_req_size;/* LE */ + u32 _r_a; + __le64 long_resp_addr;/* sg dma address, LE */ + __le32 long_resp_size;/* LE */ + u32 _r_b; + } long_smp_req;/* sequencer extension */ + }; + __le32 rsvd[16]; +} __attribute__((packed, aligned(4))); +/* + * brief the data structure of SMP Completion Response + * use to describe MPI SMP Completion Response (64 bytes) + */ +struct smp_completion_resp { + __le32 tag; + __le32 status; + __le32 param; + u8 _r_a[252]; +} __attribute__((packed, aligned(4))); + +/* + *brief the data structure of SSP SMP SATA Abort Command + * use to describe MPI SSP SMP & SATA Abort Command (64 bytes) + */ +struct task_abort_req { + __le32 tag; + __le32 device_id; + __le32 tag_to_abort; + __le32 abort_all; + u32 reserved[27]; +} __attribute__((packed, aligned(4))); + +/* These flags used for SSP SMP & SATA Abort */ +#define ABORT_MASK 0x3 +#define ABORT_SINGLE 0x0 +#define ABORT_ALL 0x1 + +/** + * brief the data structure of SSP SATA SMP Abort Response + * use to describe SSP SMP & SATA Abort Response ( 64 bytes) + */ +struct task_abort_resp { + __le32 tag; + __le32 status; + __le32 scp; + u32 reserved[12]; +} __attribute__((packed, aligned(4))); + +/** + * brief the data structure of SAS Diagnostic Start/End Command + * use to describe MPI SAS Diagnostic Start/End Command (64 bytes) + */ +struct sas_diag_start_end_req { + __le32 tag; + __le32 operation_phyid; + u32 reserved[29]; +} __attribute__((packed, aligned(4))); + +/** + * brief the data structure of SAS Diagnostic Execute Command + * use to describe MPI SAS Diagnostic Execute Command (64 bytes) + */ +struct sas_diag_execute_req { + __le32 tag; + __le32 cmdtype_cmddesc_phyid; + __le32 pat1_pat2; + __le32 threshold; + __le32 codepat_errmsk; + __le32 pmon; + __le32 pERF1CTL; + u32 reserved[24]; +} __attribute__((packed, aligned(4))); + +#define SAS_DIAG_PARAM_BYTES 24 + +/* + * brief the data structure of Set Device State Command + * use to describe MPI Set Device State Command (64 bytes) + */ +struct set_dev_state_req { + __le32 tag; + __le32 device_id; + __le32 nds; + u32 reserved[28]; +} __attribute__((packed, aligned(4))); + +/* + * brief the data structure of SATA Start Command + * use to describe MPI SATA IO Start Command (64 bytes) + * Note: This structure is common for normal / encryption I/O + */ + +struct sata_start_req { + __le32 tag; + __le32 device_id; + __le32 data_len; + __le32 ncqtag_atap_dir_m_dad; + struct host_to_dev_fis sata_fis; + u32 reserved1; + u32 reserved2; /* dword 11. rsvd for normal I/O. */ + /* EPLE Descl for enc I/O */ + u32 addr_low; /* dword 12. rsvd for enc I/O */ + u32 addr_high; /* dword 13. reserved for enc I/O */ + __le32 len; /* dword 14: length for normal I/O. */ + /* EPLE Desch for enc I/O */ + __le32 esgl; /* dword 15. rsvd for enc I/O */ + __le32 atapi_scsi_cdb[4]; /* dword 16-19. rsvd for enc I/O */ + /* The below fields are reserved for normal I/O */ + __le32 key_index_mode; /* dword 20 */ + __le32 sector_cnt_enss;/* dword 21 */ + __le32 keytagl; /* dword 22 */ + __le32 keytagh; /* dword 23 */ + __le32 twk_val0; /* dword 24 */ + __le32 twk_val1; /* dword 25 */ + __le32 twk_val2; /* dword 26 */ + __le32 twk_val3; /* dword 27 */ + __le32 enc_addr_low; /* dword 28. Encryption SGL address high */ + __le32 enc_addr_high; /* dword 29. Encryption SGL address low */ + __le32 enc_len; /* dword 30. Encryption length */ + __le32 enc_esgl; /* dword 31. Encryption esgl bit */ +} __attribute__((packed, aligned(4))); + +/** + * brief the data structure of SSP INI TM Start Command + * use to describe MPI SSP INI TM Start Command (64 bytes) + */ +struct ssp_ini_tm_start_req { + __le32 tag; + __le32 device_id; + __le32 relate_tag; + __le32 tmf; + u8 lun[8]; + __le32 ds_ads_m; + u32 reserved[24]; +} __attribute__((packed, aligned(4))); + +struct ssp_info_unit { + u8 lun[8];/* SCSI Logical Unit Number */ + u8 reserved1;/* reserved */ + u8 efb_prio_attr; + /* B7 : enabledFirstBurst */ + /* B6-3 : taskPriority */ + /* B2-0 : taskAttribute */ + u8 reserved2; /* reserved */ + u8 additional_cdb_len; + /* B7-2 : additional_cdb_len */ + /* B1-0 : reserved */ + u8 cdb[16];/* The SCSI CDB up to 16 bytes length */ +} __attribute__((packed, aligned(4))); + +/** + * brief the data structure of SSP INI IO Start Command + * use to describe MPI SSP INI IO Start Command (64 bytes) + * Note: This structure is common for normal / encryption I/O + */ +struct ssp_ini_io_start_req { + __le32 tag; + __le32 device_id; + __le32 data_len; + __le32 dad_dir_m_tlr; + struct ssp_info_unit ssp_iu; + __le32 addr_low; /* dword 12: sgl low for normal I/O. */ + /* epl_descl for encryption I/O */ + __le32 addr_high; /* dword 13: sgl hi for normal I/O */ + /* dpl_descl for encryption I/O */ + __le32 len; /* dword 14: len for normal I/O. */ + /* edpl_desch for encryption I/O */ + __le32 esgl; /* dword 15: ESGL bit for normal I/O. */ + /* user defined tag mask for enc I/O */ + /* The below fields are reserved for normal I/O */ + u8 udt[12]; /* dword 16-18 */ + __le32 sectcnt_ios; /* dword 19 */ + __le32 key_cmode; /* dword 20 */ + __le32 ks_enss; /* dword 21 */ + __le32 keytagl; /* dword 22 */ + __le32 keytagh; /* dword 23 */ + __le32 twk_val0; /* dword 24 */ + __le32 twk_val1; /* dword 25 */ + __le32 twk_val2; /* dword 26 */ + __le32 twk_val3; /* dword 27 */ + __le32 enc_addr_low; /* dword 28: Encryption sgl addr low */ + __le32 enc_addr_high; /* dword 29: Encryption sgl addr hi */ + __le32 enc_len; /* dword 30: Encryption length */ + __le32 enc_esgl; /* dword 31: ESGL bit for encryption */ +} __attribute__((packed, aligned(4))); + +/** + * brief the data structure for SSP_INI_DIF_ENC_IO COMMAND + * use to initiate SSP I/O operation with optional DIF/ENC + */ +struct ssp_dif_enc_io_req { + __le32 tag; + __le32 device_id; + __le32 data_len; + __le32 dirMTlr; + __le32 sspiu0; + __le32 sspiu1; + __le32 sspiu2; + __le32 sspiu3; + __le32 sspiu4; + __le32 sspiu5; + __le32 sspiu6; + __le32 epl_des; + __le32 dpl_desl_ndplr; + __le32 dpl_desh; + __le32 uum_uuv_bss_difbits; + u8 udt[12]; + __le32 sectcnt_ios; + __le32 key_cmode; + __le32 ks_enss; + __le32 keytagl; + __le32 keytagh; + __le32 twk_val0; + __le32 twk_val1; + __le32 twk_val2; + __le32 twk_val3; + __le32 addr_low; + __le32 addr_high; + __le32 len; + __le32 esgl; +} __attribute__((packed, aligned(4))); + +/** + * brief the data structure of Firmware download + * use to describe MPI FW DOWNLOAD Command (64 bytes) + */ +struct fw_flash_Update_req { + __le32 tag; + __le32 cur_image_offset; + __le32 cur_image_len; + __le32 total_image_len; + u32 reserved0[7]; + __le32 sgl_addr_lo; + __le32 sgl_addr_hi; + __le32 len; + __le32 ext_reserved; + u32 reserved1[16]; +} __attribute__((packed, aligned(4))); + +#define FWFLASH_IOMB_RESERVED_LEN 0x07 +/** + * brief the data structure of FW_FLASH_UPDATE Response + * use to describe MPI FW_FLASH_UPDATE Response (64 bytes) + * + */ + struct fw_flash_Update_resp { + __le32 tag; + __le32 status; + u32 reserved[13]; +} __attribute__((packed, aligned(4))); + +/** + * brief the data structure of Get NVM Data Command + * use to get data from NVM in HBA(64 bytes) + */ +struct get_nvm_data_req { + __le32 tag; + __le32 len_ir_vpdd; + __le32 vpd_offset; + u32 reserved[8]; + __le32 resp_addr_lo; + __le32 resp_addr_hi; + __le32 resp_len; + u32 reserved1[17]; +} __attribute__((packed, aligned(4))); + +struct set_nvm_data_req { + __le32 tag; + __le32 len_ir_vpdd; + __le32 vpd_offset; + u32 reserved[8]; + __le32 resp_addr_lo; + __le32 resp_addr_hi; + __le32 resp_len; + u32 reserved1[17]; +} __attribute__((packed, aligned(4))); + +/** + * brief the data structure for SET CONTROLLER CONFIG COMMAND + * use to modify controller configuration + */ +struct set_ctrl_cfg_req { + __le32 tag; + __le32 cfg_pg[14]; + u32 reserved[16]; +} __attribute__((packed, aligned(4))); + +/** + * brief the data structure for GET CONTROLLER CONFIG COMMAND + * use to get controller configuration page + */ +struct get_ctrl_cfg_req { + __le32 tag; + __le32 pgcd; + __le32 int_vec; + u32 reserved[28]; +} __attribute__((packed, aligned(4))); + +/** + * brief the data structure for KEK_MANAGEMENT COMMAND + * use for KEK management + */ +struct kek_mgmt_req { + __le32 tag; + __le32 new_curidx_ksop; + u32 reserved; + __le32 kblob[12]; + u32 reserved1[16]; +} __attribute__((packed, aligned(4))); + +/** + * brief the data structure for DEK_MANAGEMENT COMMAND + * use for DEK management + */ +struct dek_mgmt_req { + __le32 tag; + __le32 kidx_dsop; + __le32 dekidx; + __le32 addr_l; + __le32 addr_h; + __le32 nent; + __le32 dbf_tblsize; + u32 reserved[24]; +} __attribute__((packed, aligned(4))); + +/** + * brief the data structure for SET PHY PROFILE COMMAND + * use to retrive phy specific information + */ +struct set_phy_profile_req { + __le32 tag; + __le32 ppc_phyid; + u32 reserved[29]; +} __attribute__((packed, aligned(4))); + +/** + * brief the data structure for GET PHY PROFILE COMMAND + * use to retrive phy specific information + */ +struct get_phy_profile_req { + __le32 tag; + __le32 ppc_phyid; + __le32 profile[29]; +} __attribute__((packed, aligned(4))); + +/** + * brief the data structure for EXT FLASH PARTITION + * use to manage ext flash partition + */ +struct ext_flash_partition_req { + __le32 tag; + __le32 cmd; + __le32 offset; + __le32 len; + u32 reserved[7]; + __le32 addr_low; + __le32 addr_high; + __le32 len1; + __le32 ext; + u32 reserved1[16]; +} __attribute__((packed, aligned(4))); + +#define TWI_DEVICE 0x0 +#define C_SEEPROM 0x1 +#define VPD_FLASH 0x4 +#define AAP1_RDUMP 0x5 +#define IOP_RDUMP 0x6 +#define EXPAN_ROM 0x7 + +#define IPMode 0x80000000 +#define NVMD_TYPE 0x0000000F +#define NVMD_STAT 0x0000FFFF +#define NVMD_LEN 0xFF000000 +/** + * brief the data structure of Get NVMD Data Response + * use to describe MPI Get NVMD Data Response (64 bytes) + */ +struct get_nvm_data_resp { + __le32 tag; + __le32 ir_tda_bn_dps_das_nvm; + __le32 dlen_status; + __le32 nvm_data[12]; +} __attribute__((packed, aligned(4))); + +/** + * brief the data structure of SAS Diagnostic Start/End Response + * use to describe MPI SAS Diagnostic Start/End Response (64 bytes) + * + */ +struct sas_diag_start_end_resp { + __le32 tag; + __le32 status; + u32 reserved[13]; +} __attribute__((packed, aligned(4))); + +/** + * brief the data structure of SAS Diagnostic Execute Response + * use to describe MPI SAS Diagnostic Execute Response (64 bytes) + * + */ +struct sas_diag_execute_resp { + __le32 tag; + __le32 cmdtype_cmddesc_phyid; + __le32 Status; + __le32 ReportData; + u32 reserved[11]; +} __attribute__((packed, aligned(4))); + +/** + * brief the data structure of Set Device State Response + * use to describe MPI Set Device State Response (64 bytes) + * + */ +struct set_dev_state_resp { + __le32 tag; + __le32 status; + __le32 device_id; + __le32 pds_nds; + u32 reserved[11]; +} __attribute__((packed, aligned(4))); + +/* new outbound structure for spcv - begins */ +/** + * brief the data structure for SET CONTROLLER CONFIG COMMAND + * use to modify controller configuration + */ +struct set_ctrl_cfg_resp { + __le32 tag; + __le32 status; + __le32 err_qlfr_pgcd; + u32 reserved[12]; +} __attribute__((packed, aligned(4))); + +struct get_ctrl_cfg_resp { + __le32 tag; + __le32 status; + __le32 err_qlfr; + __le32 confg_page[12]; +} __attribute__((packed, aligned(4))); + +struct kek_mgmt_resp { + __le32 tag; + __le32 status; + __le32 kidx_new_curr_ksop; + __le32 err_qlfr; + u32 reserved[11]; +} __attribute__((packed, aligned(4))); + +struct dek_mgmt_resp { + __le32 tag; + __le32 status; + __le32 kekidx_tbls_dsop; + __le32 dekidx; + __le32 err_qlfr; + u32 reserved[10]; +} __attribute__((packed, aligned(4))); + +struct get_phy_profile_resp { + __le32 tag; + __le32 status; + __le32 ppc_phyid; + __le32 ppc_specific_rsp[12]; +} __attribute__((packed, aligned(4))); + +struct flash_op_ext_resp { + __le32 tag; + __le32 cmd; + __le32 status; + __le32 epart_size; + __le32 epart_sect_size; + u32 reserved[10]; +} __attribute__((packed, aligned(4))); + +struct set_phy_profile_resp { + __le32 tag; + __le32 status; + __le32 ppc_phyid; + __le32 ppc_specific_rsp[12]; +} __attribute__((packed, aligned(4))); + +struct ssp_coalesced_comp_resp { + __le32 coal_cnt; + __le32 tag0; + __le32 ssp_tag0; + __le32 tag1; + __le32 ssp_tag1; + __le32 add_tag_ssp_tag[10]; +} __attribute__((packed, aligned(4))); + +/* new outbound structure for spcv - ends */ + +/* brief data structure for SAS protocol timer configuration page. + * + */ +struct SASProtocolTimerConfig { + __le32 pageCode; /* 0 */ + __le32 MST_MSI; /* 1 */ + __le32 STP_SSP_MCT_TMO; /* 2 */ + __le32 STP_FRM_TMO; /* 3 */ + __le32 STP_IDLE_TMO; /* 4 */ + __le32 OPNRJT_RTRY_INTVL; /* 5 */ + __le32 Data_Cmd_OPNRJT_RTRY_TMO; /* 6 */ + __le32 Data_Cmd_OPNRJT_RTRY_THR; /* 7 */ + __le32 MAX_AIP; /* 8 */ +} __attribute__((packed, aligned(4))); + +typedef struct SASProtocolTimerConfig SASProtocolTimerConfig_t; + +#define NDS_BITS 0x0F +#define PDS_BITS 0xF0 + +/* + * HW Events type + */ + +#define HW_EVENT_RESET_START 0x01 +#define HW_EVENT_CHIP_RESET_COMPLETE 0x02 +#define HW_EVENT_PHY_STOP_STATUS 0x03 +#define HW_EVENT_SAS_PHY_UP 0x04 +#define HW_EVENT_SATA_PHY_UP 0x05 +#define HW_EVENT_SATA_SPINUP_HOLD 0x06 +#define HW_EVENT_PHY_DOWN 0x07 +#define HW_EVENT_PORT_INVALID 0x08 +#define HW_EVENT_BROADCAST_CHANGE 0x09 +#define HW_EVENT_PHY_ERROR 0x0A +#define HW_EVENT_BROADCAST_SES 0x0B +#define HW_EVENT_INBOUND_CRC_ERROR 0x0C +#define HW_EVENT_HARD_RESET_RECEIVED 0x0D +#define HW_EVENT_MALFUNCTION 0x0E +#define HW_EVENT_ID_FRAME_TIMEOUT 0x0F +#define HW_EVENT_BROADCAST_EXP 0x10 +#define HW_EVENT_PHY_START_STATUS 0x11 +#define HW_EVENT_LINK_ERR_INVALID_DWORD 0x12 +#define HW_EVENT_LINK_ERR_DISPARITY_ERROR 0x13 +#define HW_EVENT_LINK_ERR_CODE_VIOLATION 0x14 +#define HW_EVENT_LINK_ERR_LOSS_OF_DWORD_SYNCH 0x15 +#define HW_EVENT_LINK_ERR_PHY_RESET_FAILED 0x16 +#define HW_EVENT_PORT_RECOVERY_TIMER_TMO 0x17 +#define HW_EVENT_PORT_RECOVER 0x18 +#define HW_EVENT_PORT_RESET_TIMER_TMO 0x19 +#define HW_EVENT_PORT_RESET_COMPLETE 0x20 +#define EVENT_BROADCAST_ASYNCH_EVENT 0x21 + +/* port state */ +#define PORT_NOT_ESTABLISHED 0x00 +#define PORT_VALID 0x01 +#define PORT_LOSTCOMM 0x02 +#define PORT_IN_RESET 0x04 +#define PORT_3RD_PARTY_RESET 0x07 +#define PORT_INVALID 0x08 + +/* + * SSP/SMP/SATA IO Completion Status values + */ + +#define IO_SUCCESS 0x00 +#define IO_ABORTED 0x01 +#define IO_OVERFLOW 0x02 +#define IO_UNDERFLOW 0x03 +#define IO_FAILED 0x04 +#define IO_ABORT_RESET 0x05 +#define IO_NOT_VALID 0x06 +#define IO_NO_DEVICE 0x07 +#define IO_ILLEGAL_PARAMETER 0x08 +#define IO_LINK_FAILURE 0x09 +#define IO_PROG_ERROR 0x0A + +#define IO_EDC_IN_ERROR 0x0B +#define IO_EDC_OUT_ERROR 0x0C +#define IO_ERROR_HW_TIMEOUT 0x0D +#define IO_XFER_ERROR_BREAK 0x0E +#define IO_XFER_ERROR_PHY_NOT_READY 0x0F +#define IO_OPEN_CNX_ERROR_PROTOCOL_NOT_SUPPORTED 0x10 +#define IO_OPEN_CNX_ERROR_ZONE_VIOLATION 0x11 +#define IO_OPEN_CNX_ERROR_BREAK 0x12 +#define IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS 0x13 +#define IO_OPEN_CNX_ERROR_BAD_DESTINATION 0x14 +#define IO_OPEN_CNX_ERROR_CONNECTION_RATE_NOT_SUPPORTED 0x15 +#define IO_OPEN_CNX_ERROR_STP_RESOURCES_BUSY 0x16 +#define IO_OPEN_CNX_ERROR_WRONG_DESTINATION 0x17 +/* This error code 0x18 is not used on SPCv */ +#define IO_OPEN_CNX_ERROR_UNKNOWN_ERROR 0x18 +#define IO_XFER_ERROR_NAK_RECEIVED 0x19 +#define IO_XFER_ERROR_ACK_NAK_TIMEOUT 0x1A +#define IO_XFER_ERROR_PEER_ABORTED 0x1B +#define IO_XFER_ERROR_RX_FRAME 0x1C +#define IO_XFER_ERROR_DMA 0x1D +#define IO_XFER_ERROR_CREDIT_TIMEOUT 0x1E +#define IO_XFER_ERROR_SATA_LINK_TIMEOUT 0x1F +#define IO_XFER_ERROR_SATA 0x20 + +/* This error code 0x22 is not used on SPCv */ +#define IO_XFER_ERROR_ABORTED_DUE_TO_SRST 0x22 +#define IO_XFER_ERROR_REJECTED_NCQ_MODE 0x21 +#define IO_XFER_ERROR_ABORTED_NCQ_MODE 0x23 +#define IO_XFER_OPEN_RETRY_TIMEOUT 0x24 +/* This error code 0x25 is not used on SPCv */ +#define IO_XFER_SMP_RESP_CONNECTION_ERROR 0x25 +#define IO_XFER_ERROR_UNEXPECTED_PHASE 0x26 +#define IO_XFER_ERROR_XFER_RDY_OVERRUN 0x27 +#define IO_XFER_ERROR_XFER_RDY_NOT_EXPECTED 0x28 +#define IO_XFER_ERROR_CMD_ISSUE_ACK_NAK_TIMEOUT 0x30 + +/* The following error code 0x31 and 0x32 are not using (obsolete) */ +#define IO_XFER_ERROR_CMD_ISSUE_BREAK_BEFORE_ACK_NAK 0x31 +#define IO_XFER_ERROR_CMD_ISSUE_PHY_DOWN_BEFORE_ACK_NAK 0x32 + +#define IO_XFER_ERROR_OFFSET_MISMATCH 0x34 +#define IO_XFER_ERROR_XFER_ZERO_DATA_LEN 0x35 +#define IO_XFER_CMD_FRAME_ISSUED 0x36 +#define IO_ERROR_INTERNAL_SMP_RESOURCE 0x37 +#define IO_PORT_IN_RESET 0x38 +#define IO_DS_NON_OPERATIONAL 0x39 +#define IO_DS_IN_RECOVERY 0x3A +#define IO_TM_TAG_NOT_FOUND 0x3B +#define IO_XFER_PIO_SETUP_ERROR 0x3C +#define IO_SSP_EXT_IU_ZERO_LEN_ERROR 0x3D +#define IO_DS_IN_ERROR 0x3E +#define IO_OPEN_CNX_ERROR_HW_RESOURCE_BUSY 0x3F +#define IO_ABORT_IN_PROGRESS 0x40 +#define IO_ABORT_DELAYED 0x41 +#define IO_INVALID_LENGTH 0x42 + +/********** additional response event values *****************/ + +#define IO_OPEN_CNX_ERROR_HW_RESOURCE_BUSY_ALT 0x43 +#define IO_XFER_OPEN_RETRY_BACKOFF_THRESHOLD_REACHED 0x44 +#define IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS_OPEN_TMO 0x45 +#define IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS_NO_DEST 0x46 +#define IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS_OPEN_COLLIDE 0x47 +#define IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS_PATHWAY_BLOCKED 0x48 +#define IO_DS_INVALID 0x49 +/* WARNING: the value is not contiguous from here */ +#define IO_XFER_ERR_LAST_PIO_DATAIN_CRC_ERR 0x52 +#define IO_XFER_DMA_ACTIVATE_TIMEOUT 0x53 +#define IO_XFER_ERROR_INTERNAL_CRC_ERROR 0x54 +#define MPI_IO_RQE_BUSY_FULL 0x55 +#define IO_XFER_ERR_EOB_DATA_OVERRUN 0x56 +#define IO_XFR_ERROR_INVALID_SSP_RSP_FRAME 0x57 +#define IO_OPEN_CNX_ERROR_OPEN_PREEMPTED 0x58 + +#define MPI_ERR_IO_RESOURCE_UNAVAILABLE 0x1004 +#define MPI_ERR_ATAPI_DEVICE_BUSY 0x1024 + +#define IO_XFR_ERROR_DEK_KEY_CACHE_MISS 0x2040 +/* + * An encryption IO request failed due to DEK Key Tag mismatch. + * The key tag supplied in the encryption IOMB does not match with + * the Key Tag in the referenced DEK Entry. + */ +#define IO_XFR_ERROR_DEK_KEY_TAG_MISMATCH 0x2041 +#define IO_XFR_ERROR_CIPHER_MODE_INVALID 0x2042 +/* + * An encryption I/O request failed because the initial value (IV) + * in the unwrapped DEK blob didn't match the IV used to unwrap it. + */ +#define IO_XFR_ERROR_DEK_IV_MISMATCH 0x2043 +/* An encryption I/O request failed due to an internal RAM ECC or + * interface error while unwrapping the DEK. */ +#define IO_XFR_ERROR_DEK_RAM_INTERFACE_ERROR 0x2044 +/* An encryption I/O request failed due to an internal RAM ECC or + * interface error while unwrapping the DEK. */ +#define IO_XFR_ERROR_INTERNAL_RAM 0x2045 +/* + * An encryption I/O request failed + * because the DEK index specified in the I/O was outside the bounds of + * the total number of entries in the host DEK table. + */ +#define IO_XFR_ERROR_DEK_INDEX_OUT_OF_BOUNDS0x2046 + +/* define DIF IO response error status code */ +#define IO_XFR_ERROR_DIF_MISMATCH 0x3000 +#define IO_XFR_ERROR_DIF_APPLICATION_TAG_MISMATCH 0x3001 +#define IO_XFR_ERROR_DIF_REFERENCE_TAG_MISMATCH 0x3002 +#define IO_XFR_ERROR_DIF_CRC_MISMATCH 0x3003 + +/* define operator management response status and error qualifier code */ +#define OPR_MGMT_OP_NOT_SUPPORTED 0x2060 +#define OPR_MGMT_MPI_ENC_ERR_OPR_PARAM_ILLEGAL 0x2061 +#define OPR_MGMT_MPI_ENC_ERR_OPR_ID_NOT_FOUND 0x2062 +#define OPR_MGMT_MPI_ENC_ERR_OPR_ROLE_NOT_MATCH 0x2063 +#define OPR_MGMT_MPI_ENC_ERR_OPR_MAX_NUM_EXCEEDED 0x2064 +#define OPR_MGMT_MPI_ENC_ERR_KEK_UNWRAP_FAIL 0x2022 +#define OPR_MGMT_MPI_ENC_ERR_NVRAM_OPERATION_FAILURE 0x2023 +/***************** additional response event values ***************/ + +/* WARNING: This error code must always be the last number. + * If you add error code, modify this code also + * It is used as an index + */ +#define IO_ERROR_UNKNOWN_GENERIC 0x2023 + +/* MSGU CONFIGURATION TABLE*/ + +#define SPCv_MSGU_CFG_TABLE_UPDATE 0x01 +#define SPCv_MSGU_CFG_TABLE_RESET 0x02 +#define SPCv_MSGU_CFG_TABLE_FREEZE 0x04 +#define SPCv_MSGU_CFG_TABLE_UNFREEZE 0x08 +#define MSGU_IBDB_SET 0x00 +#define MSGU_HOST_INT_STATUS 0x08 +#define MSGU_HOST_INT_MASK 0x0C +#define MSGU_IOPIB_INT_STATUS 0x18 +#define MSGU_IOPIB_INT_MASK 0x1C +#define MSGU_IBDB_CLEAR 0x20 + +#define MSGU_MSGU_CONTROL 0x24 +#define MSGU_ODR 0x20 +#define MSGU_ODCR 0x28 + +#define MSGU_ODMR 0x30 +#define MSGU_ODMR_U 0x34 +#define MSGU_ODMR_CLR 0x38 +#define MSGU_ODMR_CLR_U 0x3C +#define MSGU_OD_RSVD 0x40 + +#define MSGU_SCRATCH_PAD_0 0x44 +#define MSGU_SCRATCH_PAD_1 0x48 +#define MSGU_SCRATCH_PAD_2 0x4C +#define MSGU_SCRATCH_PAD_3 0x50 +#define MSGU_HOST_SCRATCH_PAD_0 0x54 +#define MSGU_HOST_SCRATCH_PAD_1 0x58 +#define MSGU_HOST_SCRATCH_PAD_2 0x5C +#define MSGU_HOST_SCRATCH_PAD_3 0x60 +#define MSGU_HOST_SCRATCH_PAD_4 0x64 +#define MSGU_HOST_SCRATCH_PAD_5 0x68 +#define MSGU_HOST_SCRATCH_PAD_6 0x6C +#define MSGU_HOST_SCRATCH_PAD_7 0x70 + +/* bit definition for ODMR register */ +#define ODMR_MASK_ALL 0xFFFFFFFF/* mask all + interrupt vector */ +#define ODMR_CLEAR_ALL 0 /* clear all + interrupt vector */ +/* bit definition for ODCR register */ +#define ODCR_CLEAR_ALL 0xFFFFFFFF /* mask all + interrupt vector*/ +/* MSIX Interupts */ +#define MSIX_TABLE_OFFSET 0x2000 +#define MSIX_TABLE_ELEMENT_SIZE 0x10 +#define MSIX_INTERRUPT_CONTROL_OFFSET 0xC +#define MSIX_TABLE_BASE (MSIX_TABLE_OFFSET + \ + MSIX_INTERRUPT_CONTROL_OFFSET) +#define MSIX_INTERRUPT_DISABLE 0x1 +#define MSIX_INTERRUPT_ENABLE 0x0 + +/* state definition for Scratch Pad1 register */ +#define SCRATCH_PAD_RAAE_READY 0x3 +#define SCRATCH_PAD_ILA_READY 0xC +#define SCRATCH_PAD_BOOT_LOAD_SUCCESS 0x0 +#define SCRATCH_PAD_IOP0_READY 0xC00 +#define SCRATCH_PAD_IOP1_READY 0x3000 + +/* boot loader state */ +#define SCRATCH_PAD1_BOOTSTATE_MASK 0x70 /* Bit 4-6 */ +#define SCRATCH_PAD1_BOOTSTATE_SUCESS 0x0 /* Load successful */ +#define SCRATCH_PAD1_BOOTSTATE_HDA_SEEPROM 0x10 /* HDA SEEPROM */ +#define SCRATCH_PAD1_BOOTSTATE_HDA_BOOTSTRAP 0x20 /* HDA BootStrap Pins */ +#define SCRATCH_PAD1_BOOTSTATE_HDA_SOFTRESET 0x30 /* HDA Soft Reset */ +#define SCRATCH_PAD1_BOOTSTATE_CRIT_ERROR 0x40 /* HDA critical error */ +#define SCRATCH_PAD1_BOOTSTATE_R1 0x50 /* Reserved */ +#define SCRATCH_PAD1_BOOTSTATE_R2 0x60 /* Reserved */ +#define SCRATCH_PAD1_BOOTSTATE_FATAL 0x70 /* Fatal Error */ + + /* state definition for Scratch Pad2 register */ +#define SCRATCH_PAD2_POR 0x00 /* power on state */ +#define SCRATCH_PAD2_SFR 0x01 /* soft reset state */ +#define SCRATCH_PAD2_ERR 0x02 /* error state */ +#define SCRATCH_PAD2_RDY 0x03 /* ready state */ +#define SCRATCH_PAD2_FWRDY_RST 0x04 /* FW rdy for soft reset flag */ +#define SCRATCH_PAD2_IOPRDY_RST 0x08 /* IOP ready for soft reset */ +#define SCRATCH_PAD2_STATE_MASK 0xFFFFFFF4 /* ScratchPad 2 + Mask, bit1-0 State */ +#define SCRATCH_PAD2_RESERVED 0x000003FC/* Scratch Pad1 + Reserved bit 2 to 9 */ + +#define SCRATCH_PAD_ERROR_MASK 0xFFFFFC00 /* Error mask bits */ +#define SCRATCH_PAD_STATE_MASK 0x00000003 /* State Mask bits */ + +/* main configuration offset - byte offset */ +#define MAIN_SIGNATURE_OFFSET 0x00 /* DWORD 0x00 */ +#define MAIN_INTERFACE_REVISION 0x04 /* DWORD 0x01 */ +#define MAIN_FW_REVISION 0x08 /* DWORD 0x02 */ +#define MAIN_MAX_OUTSTANDING_IO_OFFSET 0x0C /* DWORD 0x03 */ +#define MAIN_MAX_SGL_OFFSET 0x10 /* DWORD 0x04 */ +#define MAIN_CNTRL_CAP_OFFSET 0x14 /* DWORD 0x05 */ +#define MAIN_GST_OFFSET 0x18 /* DWORD 0x06 */ +#define MAIN_IBQ_OFFSET 0x1C /* DWORD 0x07 */ +#define MAIN_OBQ_OFFSET 0x20 /* DWORD 0x08 */ +#define MAIN_IQNPPD_HPPD_OFFSET 0x24 /* DWORD 0x09 */ + +/* 0x28 - 0x4C - RSVD */ +#define MAIN_EVENT_CRC_CHECK 0x48 /* DWORD 0x12 */ +#define MAIN_EVENT_LOG_ADDR_HI 0x50 /* DWORD 0x14 */ +#define MAIN_EVENT_LOG_ADDR_LO 0x54 /* DWORD 0x15 */ +#define MAIN_EVENT_LOG_BUFF_SIZE 0x58 /* DWORD 0x16 */ +#define MAIN_EVENT_LOG_OPTION 0x5C /* DWORD 0x17 */ +#define MAIN_PCS_EVENT_LOG_ADDR_HI 0x60 /* DWORD 0x18 */ +#define MAIN_PCS_EVENT_LOG_ADDR_LO 0x64 /* DWORD 0x19 */ +#define MAIN_PCS_EVENT_LOG_BUFF_SIZE 0x68 /* DWORD 0x1A */ +#define MAIN_PCS_EVENT_LOG_OPTION 0x6C /* DWORD 0x1B */ +#define MAIN_FATAL_ERROR_INTERRUPT 0x70 /* DWORD 0x1C */ +#define MAIN_FATAL_ERROR_RDUMP0_OFFSET 0x74 /* DWORD 0x1D */ +#define MAIN_FATAL_ERROR_RDUMP0_LENGTH 0x78 /* DWORD 0x1E */ +#define MAIN_FATAL_ERROR_RDUMP1_OFFSET 0x7C /* DWORD 0x1F */ +#define MAIN_FATAL_ERROR_RDUMP1_LENGTH 0x80 /* DWORD 0x20 */ +#define MAIN_GPIO_LED_FLAGS_OFFSET 0x84 /* DWORD 0x21 */ +#define MAIN_ANALOG_SETUP_OFFSET 0x88 /* DWORD 0x22 */ + +#define MAIN_INT_VECTOR_TABLE_OFFSET 0x8C /* DWORD 0x23 */ +#define MAIN_SAS_PHY_ATTR_TABLE_OFFSET 0x90 /* DWORD 0x24 */ +#define MAIN_PORT_RECOVERY_TIMER 0x94 /* DWORD 0x25 */ +#define MAIN_INT_REASSERTION_DELAY 0x98 /* DWORD 0x26 */ + +/* Gereral Status Table offset - byte offset */ +#define GST_GSTLEN_MPIS_OFFSET 0x00 +#define GST_IQ_FREEZE_STATE0_OFFSET 0x04 +#define GST_IQ_FREEZE_STATE1_OFFSET 0x08 +#define GST_MSGUTCNT_OFFSET 0x0C +#define GST_IOPTCNT_OFFSET 0x10 +/* 0x14 - 0x34 - RSVD */ +#define GST_GPIO_INPUT_VAL 0x38 +/* 0x3c - 0x40 - RSVD */ +#define GST_RERRINFO_OFFSET0 0x44 +#define GST_RERRINFO_OFFSET1 0x48 +#define GST_RERRINFO_OFFSET2 0x4c +#define GST_RERRINFO_OFFSET3 0x50 +#define GST_RERRINFO_OFFSET4 0x54 +#define GST_RERRINFO_OFFSET5 0x58 +#define GST_RERRINFO_OFFSET6 0x5c +#define GST_RERRINFO_OFFSET7 0x60 + +/* General Status Table - MPI state */ +#define GST_MPI_STATE_UNINIT 0x00 +#define GST_MPI_STATE_INIT 0x01 +#define GST_MPI_STATE_TERMINATION 0x02 +#define GST_MPI_STATE_ERROR 0x03 +#define GST_MPI_STATE_MASK 0x07 + +/* Per SAS PHY Attributes */ + +#define PSPA_PHYSTATE0_OFFSET 0x00 /* Dword V */ +#define PSPA_OB_HW_EVENT_PID0_OFFSET 0x04 /* DWORD V+1 */ +#define PSPA_PHYSTATE1_OFFSET 0x08 /* Dword V+2 */ +#define PSPA_OB_HW_EVENT_PID1_OFFSET 0x0C /* DWORD V+3 */ +#define PSPA_PHYSTATE2_OFFSET 0x10 /* Dword V+4 */ +#define PSPA_OB_HW_EVENT_PID2_OFFSET 0x14 /* DWORD V+5 */ +#define PSPA_PHYSTATE3_OFFSET 0x18 /* Dword V+6 */ +#define PSPA_OB_HW_EVENT_PID3_OFFSET 0x1C /* DWORD V+7 */ +#define PSPA_PHYSTATE4_OFFSET 0x20 /* Dword V+8 */ +#define PSPA_OB_HW_EVENT_PID4_OFFSET 0x24 /* DWORD V+9 */ +#define PSPA_PHYSTATE5_OFFSET 0x28 /* Dword V+10 */ +#define PSPA_OB_HW_EVENT_PID5_OFFSET 0x2C /* DWORD V+11 */ +#define PSPA_PHYSTATE6_OFFSET 0x30 /* Dword V+12 */ +#define PSPA_OB_HW_EVENT_PID6_OFFSET 0x34 /* DWORD V+13 */ +#define PSPA_PHYSTATE7_OFFSET 0x38 /* Dword V+14 */ +#define PSPA_OB_HW_EVENT_PID7_OFFSET 0x3C /* DWORD V+15 */ +#define PSPA_PHYSTATE8_OFFSET 0x40 /* DWORD V+16 */ +#define PSPA_OB_HW_EVENT_PID8_OFFSET 0x44 /* DWORD V+17 */ +#define PSPA_PHYSTATE9_OFFSET 0x48 /* DWORD V+18 */ +#define PSPA_OB_HW_EVENT_PID9_OFFSET 0x4C /* DWORD V+19 */ +#define PSPA_PHYSTATE10_OFFSET 0x50 /* DWORD V+20 */ +#define PSPA_OB_HW_EVENT_PID10_OFFSET 0x54 /* DWORD V+21 */ +#define PSPA_PHYSTATE11_OFFSET 0x58 /* DWORD V+22 */ +#define PSPA_OB_HW_EVENT_PID11_OFFSET 0x5C /* DWORD V+23 */ +#define PSPA_PHYSTATE12_OFFSET 0x60 /* DWORD V+24 */ +#define PSPA_OB_HW_EVENT_PID12_OFFSET 0x64 /* DWORD V+25 */ +#define PSPA_PHYSTATE13_OFFSET 0x68 /* DWORD V+26 */ +#define PSPA_OB_HW_EVENT_PID13_OFFSET 0x6c /* DWORD V+27 */ +#define PSPA_PHYSTATE14_OFFSET 0x70 /* DWORD V+28 */ +#define PSPA_OB_HW_EVENT_PID14_OFFSET 0x74 /* DWORD V+29 */ +#define PSPA_PHYSTATE15_OFFSET 0x78 /* DWORD V+30 */ +#define PSPA_OB_HW_EVENT_PID15_OFFSET 0x7c /* DWORD V+31 */ +/* end PSPA */ + +/* inbound queue configuration offset - byte offset */ +#define IB_PROPERITY_OFFSET 0x00 +#define IB_BASE_ADDR_HI_OFFSET 0x04 +#define IB_BASE_ADDR_LO_OFFSET 0x08 +#define IB_CI_BASE_ADDR_HI_OFFSET 0x0C +#define IB_CI_BASE_ADDR_LO_OFFSET 0x10 +#define IB_PIPCI_BAR 0x14 +#define IB_PIPCI_BAR_OFFSET 0x18 +#define IB_RESERVED_OFFSET 0x1C + +/* outbound queue configuration offset - byte offset */ +#define OB_PROPERITY_OFFSET 0x00 +#define OB_BASE_ADDR_HI_OFFSET 0x04 +#define OB_BASE_ADDR_LO_OFFSET 0x08 +#define OB_PI_BASE_ADDR_HI_OFFSET 0x0C +#define OB_PI_BASE_ADDR_LO_OFFSET 0x10 +#define OB_CIPCI_BAR 0x14 +#define OB_CIPCI_BAR_OFFSET 0x18 +#define OB_INTERRUPT_COALES_OFFSET 0x1C +#define OB_DYNAMIC_COALES_OFFSET 0x20 +#define OB_PROPERTY_INT_ENABLE 0x40000000 + +#define MBIC_NMI_ENABLE_VPE0_IOP 0x000418 +#define MBIC_NMI_ENABLE_VPE0_AAP1 0x000418 +/* PCIE registers - BAR2(0x18), BAR1(win) 0x010000 */ +#define PCIE_EVENT_INTERRUPT_ENABLE 0x003040 +#define PCIE_EVENT_INTERRUPT 0x003044 +#define PCIE_ERROR_INTERRUPT_ENABLE 0x003048 +#define PCIE_ERROR_INTERRUPT 0x00304C + +/* SPCV soft reset */ +#define SPC_REG_SOFT_RESET 0x00001000 +#define SPCv_NORMAL_RESET_VALUE 0x1 + +#define SPCv_SOFT_RESET_READ_MASK 0xC0 +#define SPCv_SOFT_RESET_NO_RESET 0x0 +#define SPCv_SOFT_RESET_NORMAL_RESET_OCCURED 0x40 +#define SPCv_SOFT_RESET_HDA_MODE_OCCURED 0x80 +#define SPCv_SOFT_RESET_CHIP_RESET_OCCURED 0xC0 + +/* signature definition for host scratch pad0 register */ +#define SPC_SOFT_RESET_SIGNATURE 0x252acbcd +/* Signature for Soft Reset */ + +/* SPC Reset register - BAR4(0x20), BAR2(win) (need dynamic mapping) */ +#define SPC_REG_RESET 0x000000/* reset register */ + +/* bit definition for SPC_RESET register */ +#define SPC_REG_RESET_OSSP 0x00000001 +#define SPC_REG_RESET_RAAE 0x00000002 +#define SPC_REG_RESET_PCS_SPBC 0x00000004 +#define SPC_REG_RESET_PCS_IOP_SS 0x00000008 +#define SPC_REG_RESET_PCS_AAP1_SS 0x00000010 +#define SPC_REG_RESET_PCS_AAP2_SS 0x00000020 +#define SPC_REG_RESET_PCS_LM 0x00000040 +#define SPC_REG_RESET_PCS 0x00000080 +#define SPC_REG_RESET_GSM 0x00000100 +#define SPC_REG_RESET_DDR2 0x00010000 +#define SPC_REG_RESET_BDMA_CORE 0x00020000 +#define SPC_REG_RESET_BDMA_SXCBI 0x00040000 +#define SPC_REG_RESET_PCIE_AL_SXCBI 0x00080000 +#define SPC_REG_RESET_PCIE_PWR 0x00100000 +#define SPC_REG_RESET_PCIE_SFT 0x00200000 +#define SPC_REG_RESET_PCS_SXCBI 0x00400000 +#define SPC_REG_RESET_LMS_SXCBI 0x00800000 +#define SPC_REG_RESET_PMIC_SXCBI 0x01000000 +#define SPC_REG_RESET_PMIC_CORE 0x02000000 +#define SPC_REG_RESET_PCIE_PC_SXCBI 0x04000000 +#define SPC_REG_RESET_DEVICE 0x80000000 + +/* registers for BAR Shifting - BAR2(0x18), BAR1(win) */ +#define SPCV_IBW_AXI_TRANSLATION_LOW 0x001010 + +#define MBIC_AAP1_ADDR_BASE 0x060000 +#define MBIC_IOP_ADDR_BASE 0x070000 +#define GSM_ADDR_BASE 0x0700000 +/* Dynamic map through Bar4 - 0x00700000 */ +#define GSM_CONFIG_RESET 0x00000000 +#define RAM_ECC_DB_ERR 0x00000018 +#define GSM_READ_ADDR_PARITY_INDIC 0x00000058 +#define GSM_WRITE_ADDR_PARITY_INDIC 0x00000060 +#define GSM_WRITE_DATA_PARITY_INDIC 0x00000068 +#define GSM_READ_ADDR_PARITY_CHECK 0x00000038 +#define GSM_WRITE_ADDR_PARITY_CHECK 0x00000040 +#define GSM_WRITE_DATA_PARITY_CHECK 0x00000048 + +#define RB6_ACCESS_REG 0x6A0000 +#define HDAC_EXEC_CMD 0x0002 +#define HDA_C_PA 0xcb +#define HDA_SEQ_ID_BITS 0x00ff0000 +#define HDA_GSM_OFFSET_BITS 0x00FFFFFF +#define HDA_GSM_CMD_OFFSET_BITS 0x42C0 +#define HDA_GSM_RSP_OFFSET_BITS 0x42E0 + +#define MBIC_AAP1_ADDR_BASE 0x060000 +#define MBIC_IOP_ADDR_BASE 0x070000 +#define GSM_ADDR_BASE 0x0700000 +#define SPC_TOP_LEVEL_ADDR_BASE 0x000000 +#define GSM_CONFIG_RESET_VALUE 0x00003b00 +#define GPIO_ADDR_BASE 0x00090000 +#define GPIO_GPIO_0_0UTPUT_CTL_OFFSET 0x0000010c + +/* RB6 offset */ +#define SPC_RB6_OFFSET 0x80C0 +/* Magic number of soft reset for RB6 */ +#define RB6_MAGIC_NUMBER_RST 0x1234 + +/* Device Register status */ +#define DEVREG_SUCCESS 0x00 +#define DEVREG_FAILURE_OUT_OF_RESOURCE 0x01 +#define DEVREG_FAILURE_DEVICE_ALREADY_REGISTERED 0x02 +#define DEVREG_FAILURE_INVALID_PHY_ID 0x03 +#define DEVREG_FAILURE_PHY_ID_ALREADY_REGISTERED 0x04 +#define DEVREG_FAILURE_PORT_ID_OUT_OF_RANGE 0x05 +#define DEVREG_FAILURE_PORT_NOT_VALID_STATE 0x06 +#define DEVREG_FAILURE_DEVICE_TYPE_NOT_VALID 0x07 + +#endif diff --git a/trunk/drivers/scsi/qla2xxx/Kconfig b/trunk/drivers/scsi/qla2xxx/Kconfig index 317a7fdc3b82..23d607218ae8 100644 --- a/trunk/drivers/scsi/qla2xxx/Kconfig +++ b/trunk/drivers/scsi/qla2xxx/Kconfig @@ -24,7 +24,9 @@ config SCSI_QLA_FC Firmware images can be retrieved from: - ftp://ftp.qlogic.com/outgoing/linux/firmware/ + http://ldriver.qlogic.com/firmware/ + + They are also included in the linux-firmware tree as well. config TCM_QLA2XXX tristate "TCM_QLA2XXX fabric module for Qlogic 2xxx series target mode HBAs" diff --git a/trunk/drivers/scsi/qla2xxx/qla_mr.c b/trunk/drivers/scsi/qla2xxx/qla_mr.c index 729b74389f83..937fed8cb038 100644 --- a/trunk/drivers/scsi/qla2xxx/qla_mr.c +++ b/trunk/drivers/scsi/qla2xxx/qla_mr.c @@ -3003,12 +3003,10 @@ qlafx00_build_scsi_iocbs(srb_t *sp, struct cmd_type_7_fx00 *cmd_pkt, /* Set transfer direction */ if (cmd->sc_data_direction == DMA_TO_DEVICE) { - lcmd_pkt->cntrl_flags = - __constant_cpu_to_le16(TMF_WRITE_DATA); + lcmd_pkt->cntrl_flags = TMF_WRITE_DATA; vha->qla_stats.output_bytes += scsi_bufflen(cmd); } else if (cmd->sc_data_direction == DMA_FROM_DEVICE) { - lcmd_pkt->cntrl_flags = - __constant_cpu_to_le16(TMF_READ_DATA); + lcmd_pkt->cntrl_flags = TMF_READ_DATA; vha->qla_stats.input_bytes += scsi_bufflen(cmd); } diff --git a/trunk/drivers/scsi/qla2xxx/qla_os.c b/trunk/drivers/scsi/qla2xxx/qla_os.c index 5307bf86d5e0..ad72c1d85111 100644 --- a/trunk/drivers/scsi/qla2xxx/qla_os.c +++ b/trunk/drivers/scsi/qla2xxx/qla_os.c @@ -644,7 +644,7 @@ qla2x00_sp_free_dma(void *vha, void *ptr) qla2x00_rel_sp(sp->fcport->vha, sp); } -void +static void qla2x00_sp_compl(void *data, void *ptr, int res) { struct qla_hw_data *ha = (struct qla_hw_data *)data; diff --git a/trunk/drivers/scsi/qla4xxx/ql4_iocb.c b/trunk/drivers/scsi/qla4xxx/ql4_iocb.c index 14fec976f634..fad71ed067ec 100644 --- a/trunk/drivers/scsi/qla4xxx/ql4_iocb.c +++ b/trunk/drivers/scsi/qla4xxx/ql4_iocb.c @@ -507,6 +507,7 @@ static int qla4xxx_send_mbox_iocb(struct scsi_qla_host *ha, struct mrb *mrb, mrb->mbox_cmd = in_mbox[0]; wmb(); + ha->iocb_cnt += mrb->iocb_cnt; ha->isp_ops->queue_iocb(ha); exit_mbox_iocb: spin_unlock_irqrestore(&ha->hardware_lock, flags); diff --git a/trunk/drivers/scsi/qla4xxx/ql4_os.c b/trunk/drivers/scsi/qla4xxx/ql4_os.c index a47f99957ba8..4d231c12463e 100644 --- a/trunk/drivers/scsi/qla4xxx/ql4_os.c +++ b/trunk/drivers/scsi/qla4xxx/ql4_os.c @@ -2216,14 +2216,14 @@ static int qla4xxx_copy_to_fwddb_param(struct iscsi_bus_flash_session *sess, fw_ddb_entry->iscsi_def_time2retain = cpu_to_le16(sess->time2retain); fw_ddb_entry->tgt_portal_grp = cpu_to_le16(sess->tpgt); fw_ddb_entry->mss = cpu_to_le16(conn->max_segment_size); - fw_ddb_entry->tcp_xmt_wsf = cpu_to_le16(conn->tcp_xmit_wsf); - fw_ddb_entry->tcp_rcv_wsf = cpu_to_le16(conn->tcp_recv_wsf); + fw_ddb_entry->tcp_xmt_wsf = (uint8_t) cpu_to_le32(conn->tcp_xmit_wsf); + fw_ddb_entry->tcp_rcv_wsf = (uint8_t) cpu_to_le32(conn->tcp_recv_wsf); fw_ddb_entry->ipv4_tos = conn->ipv4_tos; fw_ddb_entry->ipv6_flow_lbl = cpu_to_le16(conn->ipv6_flow_label); fw_ddb_entry->ka_timeout = cpu_to_le16(conn->keepalive_timeout); fw_ddb_entry->lcl_port = cpu_to_le16(conn->local_port); - fw_ddb_entry->stat_sn = cpu_to_le16(conn->statsn); - fw_ddb_entry->exp_stat_sn = cpu_to_le16(conn->exp_statsn); + fw_ddb_entry->stat_sn = cpu_to_le32(conn->statsn); + fw_ddb_entry->exp_stat_sn = cpu_to_le32(conn->exp_statsn); fw_ddb_entry->ddb_link = cpu_to_le16(sess->discovery_parent_type); fw_ddb_entry->chap_tbl_idx = cpu_to_le16(sess->chap_out_idx); fw_ddb_entry->tsid = cpu_to_le16(sess->tsid); @@ -5504,9 +5504,9 @@ static int qla4xxx_sysfs_ddb_is_non_persistent(struct device *dev, void *data) * If this is invoked as a result of a userspace call then the entry is marked * as nonpersistent using flash_state field. **/ -int qla4xxx_sysfs_ddb_tgt_create(struct scsi_qla_host *ha, - struct dev_db_entry *fw_ddb_entry, - uint16_t *idx, int user) +static int qla4xxx_sysfs_ddb_tgt_create(struct scsi_qla_host *ha, + struct dev_db_entry *fw_ddb_entry, + uint16_t *idx, int user) { struct iscsi_bus_flash_session *fnode_sess = NULL; struct iscsi_bus_flash_conn *fnode_conn = NULL; @@ -5605,6 +5605,7 @@ static int qla4xxx_sysfs_ddb_add(struct Scsi_Host *shost, const char *buf, ql4_printk(KERN_ERR, ha, "%s: A non-persistent entry %s found\n", __func__, dev->kobj.name); + put_device(dev); goto exit_ddb_add; } @@ -6112,8 +6113,7 @@ qla4xxx_sysfs_ddb_get_param(struct iscsi_bus_flash_session *fnode_sess, int parent_type, parent_index = 0xffff; int rc = 0; - dev = iscsi_find_flashnode_conn(fnode_sess, NULL, - iscsi_is_flashnode_conn_dev); + dev = iscsi_find_flashnode_conn(fnode_sess); if (!dev) return -EIO; @@ -6276,8 +6276,7 @@ qla4xxx_sysfs_ddb_get_param(struct iscsi_bus_flash_session *fnode_sess, rc = sprintf(buf, "\n"); break; case ISCSI_FLASHNODE_DISCOVERY_PARENT_IDX: - if ((fnode_sess->discovery_parent_idx) >= 0 && - (fnode_sess->discovery_parent_idx < MAX_DDB_ENTRIES)) + if (fnode_sess->discovery_parent_idx < MAX_DDB_ENTRIES) parent_index = fnode_sess->discovery_parent_idx; rc = sprintf(buf, "%u\n", parent_index); @@ -6287,8 +6286,7 @@ qla4xxx_sysfs_ddb_get_param(struct iscsi_bus_flash_session *fnode_sess, parent_type = ISCSI_DISC_PARENT_ISNS; else if (fnode_sess->discovery_parent_type == DDB_NO_LINK) parent_type = ISCSI_DISC_PARENT_UNKNOWN; - else if (fnode_sess->discovery_parent_type >= 0 && - fnode_sess->discovery_parent_type < MAX_DDB_ENTRIES) + else if (fnode_sess->discovery_parent_type < MAX_DDB_ENTRIES) parent_type = ISCSI_DISC_PARENT_SENDTGT; else parent_type = ISCSI_DISC_PARENT_UNKNOWN; @@ -6349,6 +6347,8 @@ qla4xxx_sysfs_ddb_get_param(struct iscsi_bus_flash_session *fnode_sess, rc = -ENOSYS; break; } + + put_device(dev); return rc; } @@ -6368,20 +6368,11 @@ qla4xxx_sysfs_ddb_set_param(struct iscsi_bus_flash_session *fnode_sess, { struct Scsi_Host *shost = iscsi_flash_session_to_shost(fnode_sess); struct scsi_qla_host *ha = to_qla_host(shost); - struct dev_db_entry *fw_ddb_entry = NULL; struct iscsi_flashnode_param_info *fnode_param; struct nlattr *attr; int rc = QLA_ERROR; uint32_t rem = len; - fw_ddb_entry = kzalloc(sizeof(*fw_ddb_entry), GFP_KERNEL); - if (!fw_ddb_entry) { - DEBUG2(ql4_printk(KERN_ERR, ha, - "%s: Unable to allocate ddb buffer\n", - __func__)); - return -ENOMEM; - } - nla_for_each_attr(attr, data, len, rem) { fnode_param = nla_data(attr); @@ -6590,16 +6581,11 @@ static int qla4xxx_sysfs_ddb_delete(struct iscsi_bus_flash_session *fnode_sess) struct dev_db_entry *fw_ddb_entry = NULL; dma_addr_t fw_ddb_entry_dma; uint16_t *ddb_cookie = NULL; - size_t ddb_size; + size_t ddb_size = 0; void *pddb = NULL; int target_id; int rc = 0; - if (!fnode_sess) { - rc = -EINVAL; - goto exit_ddb_del; - } - if (fnode_sess->is_boot_target) { rc = -EPERM; DEBUG2(ql4_printk(KERN_ERR, ha, @@ -6631,8 +6617,7 @@ static int qla4xxx_sysfs_ddb_delete(struct iscsi_bus_flash_session *fnode_sess) dev_db_start_offset += (fnode_sess->target_id * sizeof(*fw_ddb_entry)); - dev_db_start_offset += (void *)&(fw_ddb_entry->cookie) - - (void *)fw_ddb_entry; + dev_db_start_offset += offsetof(struct dev_db_entry, cookie); ddb_size = sizeof(*ddb_cookie); } diff --git a/trunk/drivers/scsi/qla4xxx/ql4_version.h b/trunk/drivers/scsi/qla4xxx/ql4_version.h index 83e0fec35d56..fe873cf7570d 100644 --- a/trunk/drivers/scsi/qla4xxx/ql4_version.h +++ b/trunk/drivers/scsi/qla4xxx/ql4_version.h @@ -5,4 +5,4 @@ * See LICENSE.qla4xxx for copyright and licensing details. */ -#define QLA4XXX_DRIVER_VERSION "5.03.00-k8" +#define QLA4XXX_DRIVER_VERSION "5.03.00-k9" diff --git a/trunk/drivers/scsi/scsi_debug.c b/trunk/drivers/scsi/scsi_debug.c index 5add6f4e7928..0a537a0515ca 100644 --- a/trunk/drivers/scsi/scsi_debug.c +++ b/trunk/drivers/scsi/scsi_debug.c @@ -1997,24 +1997,39 @@ static int prot_verify_write(struct scsi_cmnd *SCpnt, sector_t start_sec, return ret; } -static unsigned int map_state(sector_t lba, unsigned int *num) +static unsigned long lba_to_map_index(sector_t lba) +{ + if (scsi_debug_unmap_alignment) { + lba += scsi_debug_unmap_granularity - + scsi_debug_unmap_alignment; + } + do_div(lba, scsi_debug_unmap_granularity); + + return lba; +} + +static sector_t map_index_to_lba(unsigned long index) { - unsigned int granularity, alignment, mapped; - sector_t block, next, end; + return index * scsi_debug_unmap_granularity - + scsi_debug_unmap_alignment; +} - granularity = scsi_debug_unmap_granularity; - alignment = granularity - scsi_debug_unmap_alignment; - block = lba + alignment; - do_div(block, granularity); +static unsigned int map_state(sector_t lba, unsigned int *num) +{ + sector_t end; + unsigned int mapped; + unsigned long index; + unsigned long next; - mapped = test_bit(block, map_storep); + index = lba_to_map_index(lba); + mapped = test_bit(index, map_storep); if (mapped) - next = find_next_zero_bit(map_storep, map_size, block); + next = find_next_zero_bit(map_storep, map_size, index); else - next = find_next_bit(map_storep, map_size, block); + next = find_next_bit(map_storep, map_size, index); - end = next * granularity - scsi_debug_unmap_alignment; + end = min_t(sector_t, sdebug_store_sectors, map_index_to_lba(next)); *num = end - lba; return mapped; @@ -2022,47 +2037,37 @@ static unsigned int map_state(sector_t lba, unsigned int *num) static void map_region(sector_t lba, unsigned int len) { - unsigned int granularity, alignment; sector_t end = lba + len; - granularity = scsi_debug_unmap_granularity; - alignment = granularity - scsi_debug_unmap_alignment; - while (lba < end) { - sector_t block, rem; - - block = lba + alignment; - rem = do_div(block, granularity); + unsigned long index = lba_to_map_index(lba); - if (block < map_size) - set_bit(block, map_storep); + if (index < map_size) + set_bit(index, map_storep); - lba += granularity - rem; + lba = map_index_to_lba(index + 1); } } static void unmap_region(sector_t lba, unsigned int len) { - unsigned int granularity, alignment; sector_t end = lba + len; - granularity = scsi_debug_unmap_granularity; - alignment = granularity - scsi_debug_unmap_alignment; - while (lba < end) { - sector_t block, rem; - - block = lba + alignment; - rem = do_div(block, granularity); + unsigned long index = lba_to_map_index(lba); - if (rem == 0 && lba + granularity < end && block < map_size) { - clear_bit(block, map_storep); - if (scsi_debug_lbprz) + if (lba == map_index_to_lba(index) && + lba + scsi_debug_unmap_granularity <= end && + index < map_size) { + clear_bit(index, map_storep); + if (scsi_debug_lbprz) { memset(fake_storep + - block * scsi_debug_sector_size, 0, - scsi_debug_sector_size); + lba * scsi_debug_sector_size, 0, + scsi_debug_sector_size * + scsi_debug_unmap_granularity); + } } - lba += granularity - rem; + lba = map_index_to_lba(index + 1); } } @@ -2089,7 +2094,7 @@ static int resp_write(struct scsi_cmnd *SCpnt, unsigned long long lba, write_lock_irqsave(&atomic_rw, iflags); ret = do_device_access(SCpnt, devip, lba, num, 1); - if (scsi_debug_unmap_granularity) + if (scsi_debug_lbp()) map_region(lba, num); write_unlock_irqrestore(&atomic_rw, iflags); if (-1 == ret) @@ -2122,7 +2127,7 @@ static int resp_write_same(struct scsi_cmnd *scmd, unsigned long long lba, write_lock_irqsave(&atomic_rw, iflags); - if (unmap && scsi_debug_unmap_granularity) { + if (unmap && scsi_debug_lbp()) { unmap_region(lba, num); goto out; } @@ -2146,7 +2151,7 @@ static int resp_write_same(struct scsi_cmnd *scmd, unsigned long long lba, fake_storep + (lba * scsi_debug_sector_size), scsi_debug_sector_size); - if (scsi_debug_unmap_granularity) + if (scsi_debug_lbp()) map_region(lba, num); out: write_unlock_irqrestore(&atomic_rw, iflags); @@ -3389,8 +3394,6 @@ static int __init scsi_debug_init(void) /* Logical Block Provisioning */ if (scsi_debug_lbp()) { - unsigned int map_bytes; - scsi_debug_unmap_max_blocks = clamp(scsi_debug_unmap_max_blocks, 0U, 0xffffffffU); @@ -3401,16 +3404,16 @@ static int __init scsi_debug_init(void) clamp(scsi_debug_unmap_granularity, 1U, 0xffffffffU); if (scsi_debug_unmap_alignment && - scsi_debug_unmap_granularity < scsi_debug_unmap_alignment) { + scsi_debug_unmap_granularity <= + scsi_debug_unmap_alignment) { printk(KERN_ERR - "%s: ERR: unmap_granularity < unmap_alignment\n", + "%s: ERR: unmap_granularity <= unmap_alignment\n", __func__); return -EINVAL; } - map_size = (sdebug_store_sectors / scsi_debug_unmap_granularity); - map_bytes = map_size >> 3; - map_storep = vmalloc(map_bytes); + map_size = lba_to_map_index(sdebug_store_sectors - 1) + 1; + map_storep = vmalloc(BITS_TO_LONGS(map_size) * sizeof(long)); printk(KERN_INFO "scsi_debug_init: %lu provisioning blocks\n", map_size); @@ -3421,7 +3424,7 @@ static int __init scsi_debug_init(void) goto free_vm; } - memset(map_storep, 0x0, map_bytes); + bitmap_zero(map_storep, map_size); /* Map first 1KB for partition table */ if (scsi_debug_num_parts) diff --git a/trunk/drivers/scsi/scsi_error.c b/trunk/drivers/scsi/scsi_error.c index c1b05a83d403..f43de1e56420 100644 --- a/trunk/drivers/scsi/scsi_error.c +++ b/trunk/drivers/scsi/scsi_error.c @@ -25,6 +25,7 @@ #include #include #include +#include #include #include @@ -791,32 +792,48 @@ static int scsi_send_eh_cmnd(struct scsi_cmnd *scmd, unsigned char *cmnd, struct scsi_device *sdev = scmd->device; struct Scsi_Host *shost = sdev->host; DECLARE_COMPLETION_ONSTACK(done); - unsigned long timeleft; + unsigned long timeleft = timeout; struct scsi_eh_save ses; + const unsigned long stall_for = msecs_to_jiffies(100); int rtn; +retry: scsi_eh_prep_cmnd(scmd, &ses, cmnd, cmnd_size, sense_bytes); shost->eh_action = &done; scsi_log_send(scmd); scmd->scsi_done = scsi_eh_done; - shost->hostt->queuecommand(shost, scmd); - - timeleft = wait_for_completion_timeout(&done, timeout); + rtn = shost->hostt->queuecommand(shost, scmd); + if (rtn) { + if (timeleft > stall_for) { + scsi_eh_restore_cmnd(scmd, &ses); + timeleft -= stall_for; + msleep(jiffies_to_msecs(stall_for)); + goto retry; + } + /* signal not to enter either branch of the if () below */ + timeleft = 0; + rtn = NEEDS_RETRY; + } else { + timeleft = wait_for_completion_timeout(&done, timeout); + } shost->eh_action = NULL; - scsi_log_completion(scmd, SUCCESS); + scsi_log_completion(scmd, rtn); SCSI_LOG_ERROR_RECOVERY(3, printk("%s: scmd: %p, timeleft: %ld\n", __func__, scmd, timeleft)); /* - * If there is time left scsi_eh_done got called, and we will - * examine the actual status codes to see whether the command - * actually did complete normally, else tell the host to forget - * about this command. + * If there is time left scsi_eh_done got called, and we will examine + * the actual status codes to see whether the command actually did + * complete normally, else if we have a zero return and no time left, + * the command must still be pending, so abort it and return FAILED. + * If we never actually managed to issue the command, because + * ->queuecommand() kept returning non zero, use the rtn = FAILED + * value above (so don't execute either branch of the if) */ if (timeleft) { rtn = scsi_eh_completed_normally(scmd); @@ -837,7 +854,7 @@ static int scsi_send_eh_cmnd(struct scsi_cmnd *scmd, unsigned char *cmnd, rtn = FAILED; break; } - } else { + } else if (!rtn) { scsi_abort_eh_cmnd(scmd); rtn = FAILED; } diff --git a/trunk/drivers/scsi/scsi_lib.c b/trunk/drivers/scsi/scsi_lib.c index c31187d79343..86d522004a20 100644 --- a/trunk/drivers/scsi/scsi_lib.c +++ b/trunk/drivers/scsi/scsi_lib.c @@ -276,11 +276,10 @@ int scsi_execute(struct scsi_device *sdev, const unsigned char *cmd, } EXPORT_SYMBOL(scsi_execute); - -int scsi_execute_req(struct scsi_device *sdev, const unsigned char *cmd, +int scsi_execute_req_flags(struct scsi_device *sdev, const unsigned char *cmd, int data_direction, void *buffer, unsigned bufflen, struct scsi_sense_hdr *sshdr, int timeout, int retries, - int *resid) + int *resid, int flags) { char *sense = NULL; int result; @@ -291,14 +290,14 @@ int scsi_execute_req(struct scsi_device *sdev, const unsigned char *cmd, return DRIVER_ERROR << 24; } result = scsi_execute(sdev, cmd, data_direction, buffer, bufflen, - sense, timeout, retries, 0, resid); + sense, timeout, retries, flags, resid); if (sshdr) scsi_normalize_sense(sense, SCSI_SENSE_BUFFERSIZE, sshdr); kfree(sense); return result; } -EXPORT_SYMBOL(scsi_execute_req); +EXPORT_SYMBOL(scsi_execute_req_flags); /* * Function: scsi_init_cmd_errh() diff --git a/trunk/drivers/scsi/scsi_pm.c b/trunk/drivers/scsi/scsi_pm.c index 8f6b12cbd224..42539ee2cb11 100644 --- a/trunk/drivers/scsi/scsi_pm.c +++ b/trunk/drivers/scsi/scsi_pm.c @@ -144,33 +144,83 @@ static int scsi_bus_restore(struct device *dev) #ifdef CONFIG_PM_RUNTIME +static int sdev_blk_runtime_suspend(struct scsi_device *sdev, + int (*cb)(struct device *)) +{ + int err; + + err = blk_pre_runtime_suspend(sdev->request_queue); + if (err) + return err; + if (cb) + err = cb(&sdev->sdev_gendev); + blk_post_runtime_suspend(sdev->request_queue, err); + + return err; +} + +static int sdev_runtime_suspend(struct device *dev) +{ + const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; + int (*cb)(struct device *) = pm ? pm->runtime_suspend : NULL; + struct scsi_device *sdev = to_scsi_device(dev); + int err; + + if (sdev->request_queue->dev) + return sdev_blk_runtime_suspend(sdev, cb); + + err = scsi_dev_type_suspend(dev, cb); + if (err == -EAGAIN) + pm_schedule_suspend(dev, jiffies_to_msecs( + round_jiffies_up_relative(HZ/10))); + return err; +} + static int scsi_runtime_suspend(struct device *dev) { int err = 0; - const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; dev_dbg(dev, "scsi_runtime_suspend\n"); - if (scsi_is_sdev_device(dev)) { - err = scsi_dev_type_suspend(dev, - pm ? pm->runtime_suspend : NULL); - if (err == -EAGAIN) - pm_schedule_suspend(dev, jiffies_to_msecs( - round_jiffies_up_relative(HZ/10))); - } + if (scsi_is_sdev_device(dev)) + err = sdev_runtime_suspend(dev); /* Insert hooks here for targets, hosts, and transport classes */ return err; } -static int scsi_runtime_resume(struct device *dev) +static int sdev_blk_runtime_resume(struct scsi_device *sdev, + int (*cb)(struct device *)) { int err = 0; + + blk_pre_runtime_resume(sdev->request_queue); + if (cb) + err = cb(&sdev->sdev_gendev); + blk_post_runtime_resume(sdev->request_queue, err); + + return err; +} + +static int sdev_runtime_resume(struct device *dev) +{ + struct scsi_device *sdev = to_scsi_device(dev); const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; + int (*cb)(struct device *) = pm ? pm->runtime_resume : NULL; + + if (sdev->request_queue->dev) + return sdev_blk_runtime_resume(sdev, cb); + else + return scsi_dev_type_resume(dev, cb); +} + +static int scsi_runtime_resume(struct device *dev) +{ + int err = 0; dev_dbg(dev, "scsi_runtime_resume\n"); if (scsi_is_sdev_device(dev)) - err = scsi_dev_type_resume(dev, pm ? pm->runtime_resume : NULL); + err = sdev_runtime_resume(dev); /* Insert hooks here for targets, hosts, and transport classes */ @@ -185,10 +235,18 @@ static int scsi_runtime_idle(struct device *dev) /* Insert hooks here for targets, hosts, and transport classes */ - if (scsi_is_sdev_device(dev)) - err = pm_schedule_suspend(dev, 100); - else + if (scsi_is_sdev_device(dev)) { + struct scsi_device *sdev = to_scsi_device(dev); + + if (sdev->request_queue->dev) { + pm_runtime_mark_last_busy(dev); + err = pm_runtime_autosuspend(dev); + } else { + err = pm_runtime_suspend(dev); + } + } else { err = pm_runtime_suspend(dev); + } return err; } diff --git a/trunk/drivers/scsi/scsi_transport_iscsi.c b/trunk/drivers/scsi/scsi_transport_iscsi.c index 47799a33d6ca..133926b1bb78 100644 --- a/trunk/drivers/scsi/scsi_transport_iscsi.c +++ b/trunk/drivers/scsi/scsi_transport_iscsi.c @@ -1019,8 +1019,7 @@ static int flashnode_match_index(struct device *dev, void *data) /** * iscsi_get_flashnode_by_index -finds flashnode session entry by index * @shost: pointer to host data - * @data: pointer to data containing value to use for comparison - * @fn: function pointer that does actual comparison + * @idx: index to match * * Finds the flashnode session object for the passed index * @@ -1029,13 +1028,13 @@ static int flashnode_match_index(struct device *dev, void *data) * %NULL on failure */ static struct iscsi_bus_flash_session * -iscsi_get_flashnode_by_index(struct Scsi_Host *shost, void *data, - int (*fn)(struct device *dev, void *data)) +iscsi_get_flashnode_by_index(struct Scsi_Host *shost, uint32_t idx) { struct iscsi_bus_flash_session *fnode_sess = NULL; struct device *dev; - dev = device_find_child(&shost->shost_gendev, data, fn); + dev = device_find_child(&shost->shost_gendev, &idx, + flashnode_match_index); if (dev) fnode_sess = iscsi_dev_to_flash_session(dev); @@ -1059,18 +1058,13 @@ struct device * iscsi_find_flashnode_sess(struct Scsi_Host *shost, void *data, int (*fn)(struct device *dev, void *data)) { - struct device *dev; - - dev = device_find_child(&shost->shost_gendev, data, fn); - return dev; + return device_find_child(&shost->shost_gendev, data, fn); } EXPORT_SYMBOL_GPL(iscsi_find_flashnode_sess); /** * iscsi_find_flashnode_conn - finds flashnode connection entry * @fnode_sess: pointer to parent flashnode session entry - * @data: pointer to data containing value to use for comparison - * @fn: function pointer that does actual comparison * * Finds the flashnode connection object comparing the data passed using logic * defined in passed function pointer @@ -1080,14 +1074,10 @@ EXPORT_SYMBOL_GPL(iscsi_find_flashnode_sess); * %NULL on failure */ struct device * -iscsi_find_flashnode_conn(struct iscsi_bus_flash_session *fnode_sess, - void *data, - int (*fn)(struct device *dev, void *data)) +iscsi_find_flashnode_conn(struct iscsi_bus_flash_session *fnode_sess) { - struct device *dev; - - dev = device_find_child(&fnode_sess->dev, data, fn); - return dev; + return device_find_child(&fnode_sess->dev, NULL, + iscsi_is_flashnode_conn_dev); } EXPORT_SYMBOL_GPL(iscsi_find_flashnode_conn); @@ -2808,7 +2798,7 @@ static int iscsi_set_flashnode_param(struct iscsi_transport *transport, struct iscsi_bus_flash_session *fnode_sess; struct iscsi_bus_flash_conn *fnode_conn; struct device *dev; - uint32_t *idx; + uint32_t idx; int err = 0; if (!transport->set_flashnode_param) { @@ -2824,25 +2814,27 @@ static int iscsi_set_flashnode_param(struct iscsi_transport *transport, goto put_host; } - idx = &ev->u.set_flashnode.flashnode_idx; - fnode_sess = iscsi_get_flashnode_by_index(shost, idx, - flashnode_match_index); + idx = ev->u.set_flashnode.flashnode_idx; + fnode_sess = iscsi_get_flashnode_by_index(shost, idx); if (!fnode_sess) { pr_err("%s could not find flashnode %u for host no %u\n", - __func__, *idx, ev->u.set_flashnode.host_no); + __func__, idx, ev->u.set_flashnode.host_no); err = -ENODEV; goto put_host; } - dev = iscsi_find_flashnode_conn(fnode_sess, NULL, - iscsi_is_flashnode_conn_dev); + dev = iscsi_find_flashnode_conn(fnode_sess); if (!dev) { err = -ENODEV; - goto put_host; + goto put_sess; } fnode_conn = iscsi_dev_to_flash_conn(dev); err = transport->set_flashnode_param(fnode_sess, fnode_conn, data, len); + put_device(dev); + +put_sess: + put_device(&fnode_sess->dev); put_host: scsi_host_put(shost); @@ -2891,7 +2883,7 @@ static int iscsi_del_flashnode(struct iscsi_transport *transport, { struct Scsi_Host *shost; struct iscsi_bus_flash_session *fnode_sess; - uint32_t *idx; + uint32_t idx; int err = 0; if (!transport->del_flashnode) { @@ -2907,17 +2899,17 @@ static int iscsi_del_flashnode(struct iscsi_transport *transport, goto put_host; } - idx = &ev->u.del_flashnode.flashnode_idx; - fnode_sess = iscsi_get_flashnode_by_index(shost, idx, - flashnode_match_index); + idx = ev->u.del_flashnode.flashnode_idx; + fnode_sess = iscsi_get_flashnode_by_index(shost, idx); if (!fnode_sess) { pr_err("%s could not find flashnode %u for host no %u\n", - __func__, *idx, ev->u.del_flashnode.host_no); + __func__, idx, ev->u.del_flashnode.host_no); err = -ENODEV; goto put_host; } err = transport->del_flashnode(fnode_sess); + put_device(&fnode_sess->dev); put_host: scsi_host_put(shost); @@ -2933,7 +2925,7 @@ static int iscsi_login_flashnode(struct iscsi_transport *transport, struct iscsi_bus_flash_session *fnode_sess; struct iscsi_bus_flash_conn *fnode_conn; struct device *dev; - uint32_t *idx; + uint32_t idx; int err = 0; if (!transport->login_flashnode) { @@ -2949,25 +2941,27 @@ static int iscsi_login_flashnode(struct iscsi_transport *transport, goto put_host; } - idx = &ev->u.login_flashnode.flashnode_idx; - fnode_sess = iscsi_get_flashnode_by_index(shost, idx, - flashnode_match_index); + idx = ev->u.login_flashnode.flashnode_idx; + fnode_sess = iscsi_get_flashnode_by_index(shost, idx); if (!fnode_sess) { pr_err("%s could not find flashnode %u for host no %u\n", - __func__, *idx, ev->u.login_flashnode.host_no); + __func__, idx, ev->u.login_flashnode.host_no); err = -ENODEV; goto put_host; } - dev = iscsi_find_flashnode_conn(fnode_sess, NULL, - iscsi_is_flashnode_conn_dev); + dev = iscsi_find_flashnode_conn(fnode_sess); if (!dev) { err = -ENODEV; - goto put_host; + goto put_sess; } fnode_conn = iscsi_dev_to_flash_conn(dev); err = transport->login_flashnode(fnode_sess, fnode_conn); + put_device(dev); + +put_sess: + put_device(&fnode_sess->dev); put_host: scsi_host_put(shost); @@ -2983,7 +2977,7 @@ static int iscsi_logout_flashnode(struct iscsi_transport *transport, struct iscsi_bus_flash_session *fnode_sess; struct iscsi_bus_flash_conn *fnode_conn; struct device *dev; - uint32_t *idx; + uint32_t idx; int err = 0; if (!transport->logout_flashnode) { @@ -2999,26 +2993,28 @@ static int iscsi_logout_flashnode(struct iscsi_transport *transport, goto put_host; } - idx = &ev->u.logout_flashnode.flashnode_idx; - fnode_sess = iscsi_get_flashnode_by_index(shost, idx, - flashnode_match_index); + idx = ev->u.logout_flashnode.flashnode_idx; + fnode_sess = iscsi_get_flashnode_by_index(shost, idx); if (!fnode_sess) { pr_err("%s could not find flashnode %u for host no %u\n", - __func__, *idx, ev->u.logout_flashnode.host_no); + __func__, idx, ev->u.logout_flashnode.host_no); err = -ENODEV; goto put_host; } - dev = iscsi_find_flashnode_conn(fnode_sess, NULL, - iscsi_is_flashnode_conn_dev); + dev = iscsi_find_flashnode_conn(fnode_sess); if (!dev) { err = -ENODEV; - goto put_host; + goto put_sess; } fnode_conn = iscsi_dev_to_flash_conn(dev); err = transport->logout_flashnode(fnode_sess, fnode_conn); + put_device(dev); + +put_sess: + put_device(&fnode_sess->dev); put_host: scsi_host_put(shost); @@ -3985,8 +3981,10 @@ static __init int iscsi_transport_init(void) } iscsi_eh_timer_workq = create_singlethread_workqueue("iscsi_eh"); - if (!iscsi_eh_timer_workq) + if (!iscsi_eh_timer_workq) { + err = -ENOMEM; goto release_nls; + } return 0; diff --git a/trunk/drivers/scsi/sd.c b/trunk/drivers/scsi/sd.c index e6689776b4f6..c1c555242d0d 100644 --- a/trunk/drivers/scsi/sd.c +++ b/trunk/drivers/scsi/sd.c @@ -142,6 +142,7 @@ sd_store_cache_type(struct device *dev, struct device_attribute *attr, char *buffer_data; struct scsi_mode_data data; struct scsi_sense_hdr sshdr; + const char *temp = "temporary "; int len; if (sdp->type != TYPE_DISK) @@ -150,6 +151,13 @@ sd_store_cache_type(struct device *dev, struct device_attribute *attr, * it's not worth the risk */ return -EINVAL; + if (strncmp(buf, temp, sizeof(temp) - 1) == 0) { + buf += sizeof(temp) - 1; + sdkp->cache_override = 1; + } else { + sdkp->cache_override = 0; + } + for (i = 0; i < ARRAY_SIZE(sd_cache_types); i++) { len = strlen(sd_cache_types[i]); if (strncmp(sd_cache_types[i], buf, len) == 0 && @@ -162,6 +170,13 @@ sd_store_cache_type(struct device *dev, struct device_attribute *attr, return -EINVAL; rcd = ct & 0x01 ? 1 : 0; wce = ct & 0x02 ? 1 : 0; + + if (sdkp->cache_override) { + sdkp->WCE = wce; + sdkp->RCD = rcd; + return count; + } + if (scsi_mode_sense(sdp, 0x08, 8, buffer, sizeof(buffer), SD_TIMEOUT, SD_MAX_RETRIES, &data, NULL)) return -EINVAL; @@ -1121,10 +1136,6 @@ static int sd_open(struct block_device *bdev, fmode_t mode) sdev = sdkp->device; - retval = scsi_autopm_get_device(sdev); - if (retval) - goto error_autopm; - /* * If the device is in error recovery, wait until it is done. * If the device is offline, then disallow any access to it. @@ -1169,8 +1180,6 @@ static int sd_open(struct block_device *bdev, fmode_t mode) return 0; error_out: - scsi_autopm_put_device(sdev); -error_autopm: scsi_disk_put(sdkp); return retval; } @@ -1205,7 +1214,6 @@ static void sd_release(struct gendisk *disk, fmode_t mode) * XXX is followed by a "rmmod sd_mod"? */ - scsi_autopm_put_device(sdev); scsi_disk_put(sdkp); } @@ -1366,14 +1374,9 @@ static unsigned int sd_check_events(struct gendisk *disk, unsigned int clearing) retval = -ENODEV; if (scsi_block_when_processing_errors(sdp)) { - retval = scsi_autopm_get_device(sdp); - if (retval) - goto out; - sshdr = kzalloc(sizeof(*sshdr), GFP_KERNEL); retval = scsi_test_unit_ready(sdp, SD_TIMEOUT, SD_MAX_RETRIES, sshdr); - scsi_autopm_put_device(sdp); } /* failed to execute TUR, assume media not present */ @@ -1423,8 +1426,9 @@ static int sd_sync_cache(struct scsi_disk *sdkp) * Leave the rest of the command zero to indicate * flush everything. */ - res = scsi_execute_req(sdp, cmd, DMA_NONE, NULL, 0, &sshdr, - SD_FLUSH_TIMEOUT, SD_MAX_RETRIES, NULL); + res = scsi_execute_req_flags(sdp, cmd, DMA_NONE, NULL, 0, + &sshdr, SD_FLUSH_TIMEOUT, + SD_MAX_RETRIES, NULL, REQ_PM); if (res == 0) break; } @@ -2318,6 +2322,10 @@ sd_read_cache_type(struct scsi_disk *sdkp, unsigned char *buffer) int old_rcd = sdkp->RCD; int old_dpofua = sdkp->DPOFUA; + + if (sdkp->cache_override) + return; + first_len = 4; if (sdp->skip_ms_page_8) { if (sdp->type == TYPE_RBC) @@ -2811,6 +2819,7 @@ static void sd_probe_async(void *data, async_cookie_t cookie) sdkp->capacity = 0; sdkp->media_present = 1; sdkp->write_prot = 0; + sdkp->cache_override = 0; sdkp->WCE = 0; sdkp->RCD = 0; sdkp->ATO = 0; @@ -2837,6 +2846,7 @@ static void sd_probe_async(void *data, async_cookie_t cookie) sd_printk(KERN_NOTICE, sdkp, "Attached SCSI %sdisk\n", sdp->removable ? "removable " : ""); + blk_pm_runtime_init(sdp->request_queue, dev); scsi_autopm_put_device(sdp); put_device(&sdkp->dev); } @@ -3020,8 +3030,8 @@ static int sd_start_stop_device(struct scsi_disk *sdkp, int start) if (!scsi_device_online(sdp)) return -ENODEV; - res = scsi_execute_req(sdp, cmd, DMA_NONE, NULL, 0, &sshdr, - SD_TIMEOUT, SD_MAX_RETRIES, NULL); + res = scsi_execute_req_flags(sdp, cmd, DMA_NONE, NULL, 0, &sshdr, + SD_TIMEOUT, SD_MAX_RETRIES, NULL, REQ_PM); if (res) { sd_printk(KERN_WARNING, sdkp, "START_STOP FAILED\n"); sd_print_result(sdkp, res); diff --git a/trunk/drivers/scsi/sd.h b/trunk/drivers/scsi/sd.h index 74a1e4ca5401..2386aeb41fe8 100644 --- a/trunk/drivers/scsi/sd.h +++ b/trunk/drivers/scsi/sd.h @@ -73,6 +73,7 @@ struct scsi_disk { u8 protection_type;/* Data Integrity Field */ u8 provisioning_mode; unsigned ATO : 1; /* state of disk ATO bit */ + unsigned cache_override : 1; /* temp override of WCE,RCD */ unsigned WCE : 1; /* state of disk WCE bit */ unsigned RCD : 1; /* state of disk RCD bit, unused */ unsigned DPOFUA : 1; /* state of disk DPOFUA bit */ diff --git a/trunk/drivers/scsi/sd_dif.c b/trunk/drivers/scsi/sd_dif.c index 04998f36e507..6174ca4ea275 100644 --- a/trunk/drivers/scsi/sd_dif.c +++ b/trunk/drivers/scsi/sd_dif.c @@ -93,14 +93,6 @@ static int sd_dif_type1_verify(struct blk_integrity_exchg *bix, csum_fn *fn) if (sdt->app_tag == 0xffff) return 0; - /* Bad ref tag received from disk */ - if (sdt->ref_tag == 0xffffffff) { - printk(KERN_ERR - "%s: bad phys ref tag on sector %lu\n", - bix->disk_name, (unsigned long)sector); - return -EIO; - } - if (be32_to_cpu(sdt->ref_tag) != (sector & 0xffffffff)) { printk(KERN_ERR "%s: ref tag error on sector %lu (rcvd %u)\n", diff --git a/trunk/drivers/scsi/ufs/Kconfig b/trunk/drivers/scsi/ufs/Kconfig index 0371047c5922..35faf24c6044 100644 --- a/trunk/drivers/scsi/ufs/Kconfig +++ b/trunk/drivers/scsi/ufs/Kconfig @@ -57,3 +57,14 @@ config SCSI_UFSHCD_PCI If you have a controller with this interface, say Y or M here. If unsure, say N. + +config SCSI_UFSHCD_PLATFORM + tristate "Platform bus based UFS Controller support" + depends on SCSI_UFSHCD + ---help--- + This selects the UFS host controller support. Select this if + you have an UFS controller on Platform bus. + + If you have a controller with this interface, say Y or M here. + + If unsure, say N. diff --git a/trunk/drivers/scsi/ufs/Makefile b/trunk/drivers/scsi/ufs/Makefile index 9eda0dfbd6df..1e5bd48457d6 100644 --- a/trunk/drivers/scsi/ufs/Makefile +++ b/trunk/drivers/scsi/ufs/Makefile @@ -1,3 +1,4 @@ # UFSHCD makefile obj-$(CONFIG_SCSI_UFSHCD) += ufshcd.o obj-$(CONFIG_SCSI_UFSHCD_PCI) += ufshcd-pci.o +obj-$(CONFIG_SCSI_UFSHCD_PLATFORM) += ufshcd-pltfrm.o diff --git a/trunk/drivers/scsi/ufs/ufshcd-pltfrm.c b/trunk/drivers/scsi/ufs/ufshcd-pltfrm.c new file mode 100644 index 000000000000..03319acd9c72 --- /dev/null +++ b/trunk/drivers/scsi/ufs/ufshcd-pltfrm.c @@ -0,0 +1,217 @@ +/* + * Universal Flash Storage Host controller Platform bus based glue driver + * + * This code is based on drivers/scsi/ufs/ufshcd-pltfrm.c + * Copyright (C) 2011-2013 Samsung India Software Operations + * + * Authors: + * Santosh Yaraganavi + * Vinayak Holikatti + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * See the COPYING file in the top-level directory or visit + * + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * This program is provided "AS IS" and "WITH ALL FAULTS" and + * without warranty of any kind. You are solely responsible for + * determining the appropriateness of using and distributing + * the program and assume all risks associated with your exercise + * of rights with respect to the program, including but not limited + * to infringement of third party rights, the risks and costs of + * program errors, damage to or loss of data, programs or equipment, + * and unavailability or interruption of operations. Under no + * circumstances will the contributor of this Program be liable for + * any damages of any kind arising from your use or distribution of + * this program. + */ + +#include "ufshcd.h" +#include + +#ifdef CONFIG_PM +/** + * ufshcd_pltfrm_suspend - suspend power management function + * @dev: pointer to device handle + * + * + * Returns 0 + */ +static int ufshcd_pltfrm_suspend(struct device *dev) +{ + struct platform_device *pdev = to_platform_device(dev); + struct ufs_hba *hba = platform_get_drvdata(pdev); + + /* + * TODO: + * 1. Call ufshcd_suspend + * 2. Do bus specific power management + */ + + disable_irq(hba->irq); + + return 0; +} + +/** + * ufshcd_pltfrm_resume - resume power management function + * @dev: pointer to device handle + * + * Returns 0 + */ +static int ufshcd_pltfrm_resume(struct device *dev) +{ + struct platform_device *pdev = to_platform_device(dev); + struct ufs_hba *hba = platform_get_drvdata(pdev); + + /* + * TODO: + * 1. Call ufshcd_resume. + * 2. Do bus specific wake up + */ + + enable_irq(hba->irq); + + return 0; +} +#else +#define ufshcd_pltfrm_suspend NULL +#define ufshcd_pltfrm_resume NULL +#endif + +/** + * ufshcd_pltfrm_probe - probe routine of the driver + * @pdev: pointer to Platform device handle + * + * Returns 0 on success, non-zero value on failure + */ +static int ufshcd_pltfrm_probe(struct platform_device *pdev) +{ + struct ufs_hba *hba; + void __iomem *mmio_base; + struct resource *mem_res; + struct resource *irq_res; + resource_size_t mem_size; + int err; + struct device *dev = &pdev->dev; + + mem_res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!mem_res) { + dev_err(&pdev->dev, + "Memory resource not available\n"); + err = -ENODEV; + goto out_error; + } + + mem_size = resource_size(mem_res); + if (!request_mem_region(mem_res->start, mem_size, "ufshcd")) { + dev_err(&pdev->dev, + "Cannot reserve the memory resource\n"); + err = -EBUSY; + goto out_error; + } + + mmio_base = ioremap_nocache(mem_res->start, mem_size); + if (!mmio_base) { + dev_err(&pdev->dev, "memory map failed\n"); + err = -ENOMEM; + goto out_release_regions; + } + + irq_res = platform_get_resource(pdev, IORESOURCE_IRQ, 0); + if (!irq_res) { + dev_err(&pdev->dev, "IRQ resource not available\n"); + err = -ENODEV; + goto out_iounmap; + } + + err = dma_set_coherent_mask(dev, dev->coherent_dma_mask); + if (err) { + dev_err(&pdev->dev, "set dma mask failed\n"); + goto out_iounmap; + } + + err = ufshcd_init(&pdev->dev, &hba, mmio_base, irq_res->start); + if (err) { + dev_err(&pdev->dev, "Intialization failed\n"); + goto out_iounmap; + } + + platform_set_drvdata(pdev, hba); + + return 0; + +out_iounmap: + iounmap(mmio_base); +out_release_regions: + release_mem_region(mem_res->start, mem_size); +out_error: + return err; +} + +/** + * ufshcd_pltfrm_remove - remove platform driver routine + * @pdev: pointer to platform device handle + * + * Returns 0 on success, non-zero value on failure + */ +static int ufshcd_pltfrm_remove(struct platform_device *pdev) +{ + struct resource *mem_res; + resource_size_t mem_size; + struct ufs_hba *hba = platform_get_drvdata(pdev); + + disable_irq(hba->irq); + + /* Some buggy controllers raise interrupt after + * the resources are removed. So first we unregister the + * irq handler and then the resources used by driver + */ + + free_irq(hba->irq, hba); + ufshcd_remove(hba); + mem_res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!mem_res) + dev_err(&pdev->dev, "ufshcd: Memory resource not available\n"); + else { + mem_size = resource_size(mem_res); + release_mem_region(mem_res->start, mem_size); + } + platform_set_drvdata(pdev, NULL); + return 0; +} + +static const struct of_device_id ufs_of_match[] = { + { .compatible = "jedec,ufs-1.1"}, +}; + +static const struct dev_pm_ops ufshcd_dev_pm_ops = { + .suspend = ufshcd_pltfrm_suspend, + .resume = ufshcd_pltfrm_resume, +}; + +static struct platform_driver ufshcd_pltfrm_driver = { + .probe = ufshcd_pltfrm_probe, + .remove = ufshcd_pltfrm_remove, + .driver = { + .name = "ufshcd", + .owner = THIS_MODULE, + .pm = &ufshcd_dev_pm_ops, + .of_match_table = ufs_of_match, + }, +}; + +module_platform_driver(ufshcd_pltfrm_driver); + +MODULE_AUTHOR("Santosh Yaragnavi "); +MODULE_AUTHOR("Vinayak Holikatti "); +MODULE_DESCRIPTION("UFS host controller Pltform bus based glue driver"); +MODULE_LICENSE("GPL"); +MODULE_VERSION(UFSHCD_DRIVER_VERSION); diff --git a/trunk/drivers/scsi/ufs/ufshcd.c b/trunk/drivers/scsi/ufs/ufshcd.c index 60fd40c4e4c2..c32a478df81b 100644 --- a/trunk/drivers/scsi/ufs/ufshcd.c +++ b/trunk/drivers/scsi/ufs/ufshcd.c @@ -478,7 +478,7 @@ static void ufshcd_compose_upiu(struct ufshcd_lrb *lrbp) ucd_cmd_ptr->header.dword_2 = 0; ucd_cmd_ptr->exp_data_transfer_len = - cpu_to_be32(lrbp->cmd->transfersize); + cpu_to_be32(lrbp->cmd->sdb.length); memcpy(ucd_cmd_ptr->cdb, lrbp->cmd->cmnd, diff --git a/trunk/drivers/spi/Kconfig b/trunk/drivers/spi/Kconfig index 141d8c10b764..92a9345d7a6b 100644 --- a/trunk/drivers/spi/Kconfig +++ b/trunk/drivers/spi/Kconfig @@ -62,7 +62,7 @@ config SPI_ALTERA config SPI_ATH79 tristate "Atheros AR71XX/AR724X/AR913X SPI controller driver" - depends on ATH79 && GENERIC_GPIO + depends on ATH79 && GPIOLIB select SPI_BITBANG help This enables support for the SPI controller present on the @@ -175,7 +175,7 @@ config SPI_FALCON config SPI_GPIO tristate "GPIO-based bitbanging SPI Master" - depends on GENERIC_GPIO + depends on GPIOLIB select SPI_BITBANG help This simple GPIO bitbanging SPI master uses the arch-neutral GPIO @@ -259,7 +259,7 @@ config SPI_FSL_ESPI config SPI_OC_TINY tristate "OpenCores tiny SPI" - depends on GENERIC_GPIO + depends on GPIOLIB select SPI_BITBANG help This is the driver for OpenCores tiny SPI master controller. @@ -457,7 +457,7 @@ config SPI_TOPCLIFF_PCH config SPI_TXX9 tristate "Toshiba TXx9 SPI controller" - depends on GENERIC_GPIO && CPU_TX49XX + depends on GPIOLIB && CPU_TX49XX help SPI driver for Toshiba TXx9 MIPS SoCs diff --git a/trunk/drivers/spi/spi-atmel.c b/trunk/drivers/spi/spi-atmel.c index 787bd2c22bca..380387a47b1d 100644 --- a/trunk/drivers/spi/spi-atmel.c +++ b/trunk/drivers/spi/spi-atmel.c @@ -526,13 +526,17 @@ static void atmel_spi_next_xfer_pio(struct spi_master *master, } if (xfer->tx_buf) - spi_writel(as, TDR, *(u8 *)(xfer->tx_buf)); + if (xfer->bits_per_word > 8) + spi_writel(as, TDR, *(u16 *)(xfer->tx_buf)); + else + spi_writel(as, TDR, *(u8 *)(xfer->tx_buf)); else spi_writel(as, TDR, 0); dev_dbg(master->dev.parent, - " start pio xfer %p: len %u tx %p rx %p\n", - xfer, xfer->len, xfer->tx_buf, xfer->rx_buf); + " start pio xfer %p: len %u tx %p rx %p bitpw %d\n", + xfer, xfer->len, xfer->tx_buf, xfer->rx_buf, + xfer->bits_per_word); /* Enable relevant interrupts */ spi_writel(as, IER, SPI_BIT(RDRF) | SPI_BIT(OVRES)); @@ -950,21 +954,39 @@ atmel_spi_pump_pio_data(struct atmel_spi *as, struct spi_transfer *xfer) { u8 *txp; u8 *rxp; + u16 *txp16; + u16 *rxp16; unsigned long xfer_pos = xfer->len - as->current_remaining_bytes; if (xfer->rx_buf) { - rxp = ((u8 *)xfer->rx_buf) + xfer_pos; - *rxp = spi_readl(as, RDR); + if (xfer->bits_per_word > 8) { + rxp16 = (u16 *)(((u8 *)xfer->rx_buf) + xfer_pos); + *rxp16 = spi_readl(as, RDR); + } else { + rxp = ((u8 *)xfer->rx_buf) + xfer_pos; + *rxp = spi_readl(as, RDR); + } } else { spi_readl(as, RDR); } - - as->current_remaining_bytes--; + if (xfer->bits_per_word > 8) { + as->current_remaining_bytes -= 2; + if (as->current_remaining_bytes < 0) + as->current_remaining_bytes = 0; + } else { + as->current_remaining_bytes--; + } if (as->current_remaining_bytes) { if (xfer->tx_buf) { - txp = ((u8 *)xfer->tx_buf) + xfer_pos + 1; - spi_writel(as, TDR, *txp); + if (xfer->bits_per_word > 8) { + txp16 = (u16 *)(((u8 *)xfer->tx_buf) + + xfer_pos + 2); + spi_writel(as, TDR, *txp16); + } else { + txp = ((u8 *)xfer->tx_buf) + xfer_pos + 1; + spi_writel(as, TDR, *txp); + } } else { spi_writel(as, TDR, 0); } @@ -1378,9 +1400,16 @@ static int atmel_spi_transfer(struct spi_device *spi, struct spi_message *msg) } } + if (xfer->bits_per_word > 8) { + if (xfer->len % 2) { + dev_dbg(&spi->dev, "buffer len should be 16 bits aligned\n"); + return -EINVAL; + } + } + /* FIXME implement these protocol options!! */ - if (xfer->speed_hz) { - dev_dbg(&spi->dev, "no protocol options yet\n"); + if (xfer->speed_hz < spi->max_speed_hz) { + dev_dbg(&spi->dev, "can't change speed in transfer\n"); return -ENOPROTOOPT; } diff --git a/trunk/drivers/spi/spi-davinci.c b/trunk/drivers/spi/spi-davinci.c index 2e8f24a1fb95..50b13c9b1ab6 100644 --- a/trunk/drivers/spi/spi-davinci.c +++ b/trunk/drivers/spi/spi-davinci.c @@ -784,7 +784,7 @@ static const struct of_device_id davinci_spi_of_match[] = { }, { }, }; -MODULE_DEVICE_TABLE(of, davini_spi_of_match); +MODULE_DEVICE_TABLE(of, davinci_spi_of_match); /** * spi_davinci_get_pdata - Get platform data from DTS binding diff --git a/trunk/drivers/spi/spi-tegra20-sflash.c b/trunk/drivers/spi/spi-tegra20-sflash.c index d65c000efe35..09df8e22dba0 100644 --- a/trunk/drivers/spi/spi-tegra20-sflash.c +++ b/trunk/drivers/spi/spi-tegra20-sflash.c @@ -489,11 +489,6 @@ static int tegra_sflash_probe(struct platform_device *pdev) tegra_sflash_parse_dt(tsd); r = platform_get_resource(pdev, IORESOURCE_MEM, 0); - if (!r) { - dev_err(&pdev->dev, "No IO memory resource\n"); - ret = -ENODEV; - goto exit_free_master; - } tsd->base = devm_ioremap_resource(&pdev->dev, r); if (IS_ERR(tsd->base)) { ret = PTR_ERR(tsd->base); diff --git a/trunk/drivers/spi/spi.c b/trunk/drivers/spi/spi.c index 163fd802b7ac..32b7bb111eb6 100644 --- a/trunk/drivers/spi/spi.c +++ b/trunk/drivers/spi/spi.c @@ -334,7 +334,7 @@ struct spi_device *spi_alloc_device(struct spi_master *master) spi->dev.parent = &master->dev; spi->dev.bus = &spi_bus_type; spi->dev.release = spidev_release; - spi->cs_gpio = -EINVAL; + spi->cs_gpio = -ENOENT; device_initialize(&spi->dev); return spi; } @@ -1067,8 +1067,11 @@ static int of_spi_register_master(struct spi_master *master) nb = of_gpio_named_count(np, "cs-gpios"); master->num_chipselect = max(nb, (int)master->num_chipselect); - if (nb < 1) + /* Return error only for an incorrectly formed cs-gpios property */ + if (nb == 0 || nb == -ENOENT) return 0; + else if (nb < 0) + return nb; cs = devm_kzalloc(&master->dev, sizeof(int) * master->num_chipselect, @@ -1079,7 +1082,7 @@ static int of_spi_register_master(struct spi_master *master) return -ENOMEM; for (i = 0; i < master->num_chipselect; i++) - cs[i] = -EINVAL; + cs[i] = -ENOENT; for (i = 0; i < nb; i++) cs[i] = of_get_named_gpio(np, "cs-gpios", i); diff --git a/trunk/drivers/ssb/driver_mipscore.c b/trunk/drivers/ssb/driver_mipscore.c index fa385a368a56..09077067b0c8 100644 --- a/trunk/drivers/ssb/driver_mipscore.c +++ b/trunk/drivers/ssb/driver_mipscore.c @@ -18,7 +18,7 @@ #include "ssb_private.h" -static const char *part_probes[] = { "bcm47xxpart", NULL }; +static const char * const part_probes[] = { "bcm47xxpart", NULL }; static struct physmap_flash_data ssb_pflash_data = { .part_probe_types = part_probes, diff --git a/trunk/drivers/staging/Kconfig b/trunk/drivers/staging/Kconfig index 4e8a1794f50a..aefe820a8005 100644 --- a/trunk/drivers/staging/Kconfig +++ b/trunk/drivers/staging/Kconfig @@ -72,10 +72,10 @@ source "drivers/staging/sep/Kconfig" source "drivers/staging/iio/Kconfig" -source "drivers/staging/zram/Kconfig" - source "drivers/staging/zsmalloc/Kconfig" +source "drivers/staging/zram/Kconfig" + source "drivers/staging/wlags49_h2/Kconfig" source "drivers/staging/wlags49_h25/Kconfig" diff --git a/trunk/drivers/staging/android/Kconfig b/trunk/drivers/staging/android/Kconfig index 9f61d46da157..c0c95be0f969 100644 --- a/trunk/drivers/staging/android/Kconfig +++ b/trunk/drivers/staging/android/Kconfig @@ -54,7 +54,7 @@ config ANDROID_TIMED_OUTPUT config ANDROID_TIMED_GPIO tristate "Android timed gpio driver" - depends on GENERIC_GPIO && ANDROID_TIMED_OUTPUT + depends on GPIOLIB && ANDROID_TIMED_OUTPUT default n config ANDROID_LOW_MEMORY_KILLER diff --git a/trunk/drivers/staging/android/logger.c b/trunk/drivers/staging/android/logger.c index b040200a5a55..9bd874789ce5 100644 --- a/trunk/drivers/staging/android/logger.c +++ b/trunk/drivers/staging/android/logger.c @@ -242,7 +242,7 @@ static ssize_t do_read_log_to_user(struct logger_log *log, * 'log->buffer' which contains the first entry readable by 'euid' */ static size_t get_next_entry_by_uid(struct logger_log *log, - size_t off, uid_t euid) + size_t off, kuid_t euid) { while (off != log->w_off) { struct logger_entry *entry; @@ -251,7 +251,7 @@ static size_t get_next_entry_by_uid(struct logger_log *log, entry = get_entry_header(log, off, &scratch); - if (entry->euid == euid) + if (uid_eq(entry->euid, euid)) return off; next_len = sizeof(struct logger_entry) + entry->len; diff --git a/trunk/drivers/staging/android/logger.h b/trunk/drivers/staging/android/logger.h index cc6bbd99c8e0..70af7d805dff 100644 --- a/trunk/drivers/staging/android/logger.h +++ b/trunk/drivers/staging/android/logger.h @@ -66,7 +66,7 @@ struct logger_entry { __s32 tid; __s32 sec; __s32 nsec; - uid_t euid; + kuid_t euid; char msg[0]; }; diff --git a/trunk/drivers/staging/comedi/Kconfig b/trunk/drivers/staging/comedi/Kconfig index 7871579bb83d..87e852a0ef49 100644 --- a/trunk/drivers/staging/comedi/Kconfig +++ b/trunk/drivers/staging/comedi/Kconfig @@ -981,6 +981,7 @@ config COMEDI_ME_DAQ config COMEDI_NI_6527 tristate "NI 6527 support" + depends on HAS_DMA select COMEDI_MITE ---help--- Enable support for the National Instruments 6527 PCI card @@ -990,6 +991,7 @@ config COMEDI_NI_6527 config COMEDI_NI_65XX tristate "NI 65xx static dio PCI card support" + depends on HAS_DMA select COMEDI_MITE ---help--- Enable support for National Instruments 65xx static dio boards. @@ -1003,6 +1005,7 @@ config COMEDI_NI_65XX config COMEDI_NI_660X tristate "NI 660x counter/timer PCI card support" + depends on HAS_DMA select COMEDI_NI_TIOCMD ---help--- Enable support for National Instruments PCI-6601 (ni_660x), PCI-6602, @@ -1013,6 +1016,7 @@ config COMEDI_NI_660X config COMEDI_NI_670X tristate "NI 670x PCI card support" + depends on HAS_DMA select COMEDI_MITE ---help--- Enable support for National Instruments PCI-6703 and PCI-6704 @@ -1022,6 +1026,7 @@ config COMEDI_NI_670X config COMEDI_NI_LABPC_PCI tristate "NI Lab-PC PCI-1200 support" + depends on HAS_DMA select COMEDI_NI_LABPC select COMEDI_MITE ---help--- @@ -1032,6 +1037,7 @@ config COMEDI_NI_LABPC_PCI config COMEDI_NI_PCIDIO tristate "NI PCI-DIO32HS, PCI-6533, PCI-6534 support" + depends on HAS_DMA select COMEDI_MITE select COMEDI_8255 ---help--- @@ -1043,6 +1049,7 @@ config COMEDI_NI_PCIDIO config COMEDI_NI_PCIMIO tristate "NI PCI-MIO-E series and M series support" + depends on HAS_DMA select COMEDI_NI_TIOCMD select COMEDI_8255 select COMEDI_FC @@ -1095,10 +1102,12 @@ config COMEDI_SSV_DNP called ssv_dnp. config COMEDI_MITE + depends on HAS_DMA tristate config COMEDI_NI_TIOCMD tristate + depends on HAS_DMA select COMEDI_NI_TIO select COMEDI_MITE diff --git a/trunk/drivers/staging/comedi/comedi_buf.c b/trunk/drivers/staging/comedi/comedi_buf.c index ca709901fb3e..d4be0e68509b 100644 --- a/trunk/drivers/staging/comedi/comedi_buf.c +++ b/trunk/drivers/staging/comedi/comedi_buf.c @@ -51,10 +51,12 @@ static void __comedi_buf_free(struct comedi_device *dev, clear_bit(PG_reserved, &(virt_to_page(buf->virt_addr)->flags)); if (s->async_dma_dir != DMA_NONE) { +#ifdef CONFIG_HAS_DMA dma_free_coherent(dev->hw_dev, PAGE_SIZE, buf->virt_addr, buf->dma_addr); +#endif } else { free_page((unsigned long)buf->virt_addr); } @@ -74,6 +76,12 @@ static void __comedi_buf_alloc(struct comedi_device *dev, struct comedi_buf_page *buf; unsigned i; + if (!IS_ENABLED(CONFIG_HAS_DMA) && s->async_dma_dir != DMA_NONE) { + dev_err(dev->class_dev, + "dma buffer allocation not supported\n"); + return; + } + async->buf_page_list = vzalloc(sizeof(*buf) * n_pages); if (async->buf_page_list) pages = vmalloc(sizeof(struct page *) * n_pages); @@ -84,11 +92,15 @@ static void __comedi_buf_alloc(struct comedi_device *dev, for (i = 0; i < n_pages; i++) { buf = &async->buf_page_list[i]; if (s->async_dma_dir != DMA_NONE) +#ifdef CONFIG_HAS_DMA buf->virt_addr = dma_alloc_coherent(dev->hw_dev, PAGE_SIZE, &buf->dma_addr, GFP_KERNEL | __GFP_COMP); +#else + break; +#endif else buf->virt_addr = (void *)get_zeroed_page(GFP_KERNEL); if (!buf->virt_addr) diff --git a/trunk/drivers/staging/comedi/comedi_fops.c b/trunk/drivers/staging/comedi/comedi_fops.c index 00f2547024ec..924c54c9c31f 100644 --- a/trunk/drivers/staging/comedi/comedi_fops.c +++ b/trunk/drivers/staging/comedi/comedi_fops.c @@ -246,9 +246,6 @@ static int resize_async_buffer(struct comedi_device *dev, return -EBUSY; } - if (!async->prealloc_buf) - return -EINVAL; - /* make sure buffer is an integral number of pages * (we round up) */ new_size = (new_size + PAGE_SIZE - 1) & PAGE_MASK; diff --git a/trunk/drivers/staging/comedi/drivers/ni_labpc.c b/trunk/drivers/staging/comedi/drivers/ni_labpc.c index 3d978f34d212..77a7bb632580 100644 --- a/trunk/drivers/staging/comedi/drivers/ni_labpc.c +++ b/trunk/drivers/staging/comedi/drivers/ni_labpc.c @@ -976,8 +976,7 @@ static int labpc_ai_cmd(struct comedi_device *dev, struct comedi_subdevice *s) /* clear flip-flop to make sure 2-byte registers for * count and address get set correctly */ clear_dma_ff(devpriv->dma_chan); - set_dma_addr(devpriv->dma_chan, - virt_to_bus(devpriv->dma_buffer)); + set_dma_addr(devpriv->dma_chan, devpriv->dma_addr); /* set appropriate size of transfer */ devpriv->dma_transfer_size = labpc_suggest_transfer_size(cmd); if (cmd->stop_src == TRIG_COUNT && @@ -1089,7 +1088,7 @@ static void labpc_drain_dma(struct comedi_device *dev) devpriv->count -= num_points; /* set address and count for next transfer */ - set_dma_addr(devpriv->dma_chan, virt_to_bus(devpriv->dma_buffer)); + set_dma_addr(devpriv->dma_chan, devpriv->dma_addr); set_dma_count(devpriv->dma_chan, leftover * sample_size); release_dma_lock(flags); @@ -1741,6 +1740,9 @@ static int labpc_attach(struct comedi_device *dev, struct comedi_devconfig *it) unsigned long dma_flags; devpriv->dma_chan = dma_chan; + devpriv->dma_addr = + virt_to_bus(devpriv->dma_buffer); + dma_flags = claim_dma_lock(); disable_dma(devpriv->dma_chan); set_dma_mode(devpriv->dma_chan, DMA_MODE_READ); diff --git a/trunk/drivers/staging/comedi/drivers/ni_labpc.h b/trunk/drivers/staging/comedi/drivers/ni_labpc.h index 615f16f271c0..4b691f5a9965 100644 --- a/trunk/drivers/staging/comedi/drivers/ni_labpc.h +++ b/trunk/drivers/staging/comedi/drivers/ni_labpc.h @@ -82,6 +82,7 @@ struct labpc_private { unsigned int divisor_b1; unsigned int dma_chan; /* dma channel to use */ u16 *dma_buffer; /* buffer ai will dma into */ + phys_addr_t dma_addr; /* transfer size in bytes for current transfer */ unsigned int dma_transfer_size; /* we are using dma/fifo-half-full/etc. */ diff --git a/trunk/drivers/staging/comedi/drivers/ni_mio_common.c b/trunk/drivers/staging/comedi/drivers/ni_mio_common.c index a46d579016d9..8c5dee9b3b05 100644 --- a/trunk/drivers/staging/comedi/drivers/ni_mio_common.c +++ b/trunk/drivers/staging/comedi/drivers/ni_mio_common.c @@ -310,9 +310,11 @@ static int ni_gpct_insn_read(struct comedi_device *dev, static int ni_gpct_insn_config(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data); +#ifdef PCIDMA static int ni_gpct_cmd(struct comedi_device *dev, struct comedi_subdevice *s); static int ni_gpct_cmdtest(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_cmd *cmd); +#endif static int ni_gpct_cancel(struct comedi_device *dev, struct comedi_subdevice *s); static void handle_gpct_interrupt(struct comedi_device *dev, @@ -4617,9 +4619,7 @@ static int ni_E_init(struct comedi_device *dev) for (j = 0; j < NUM_GPCT; ++j) { s = &dev->subdevices[NI_GPCT_SUBDEV(j)]; s->type = COMEDI_SUBD_COUNTER; - s->subdev_flags = - SDF_READABLE | SDF_WRITABLE | SDF_LSAMPL | SDF_CMD_READ - /* | SDF_CMD_WRITE */ ; + s->subdev_flags = SDF_READABLE | SDF_WRITABLE | SDF_LSAMPL; s->n_chan = 3; if (board->reg_type & ni_reg_m_series_mask) s->maxdata = 0xffffffff; @@ -4628,11 +4628,14 @@ static int ni_E_init(struct comedi_device *dev) s->insn_read = &ni_gpct_insn_read; s->insn_write = &ni_gpct_insn_write; s->insn_config = &ni_gpct_insn_config; +#ifdef PCIDMA + s->subdev_flags |= SDF_CMD_READ /* | SDF_CMD_WRITE */; s->do_cmd = &ni_gpct_cmd; s->len_chanlist = 1; s->do_cmdtest = &ni_gpct_cmdtest; s->cancel = &ni_gpct_cancel; s->async_dma_dir = DMA_BIDIRECTIONAL; +#endif s->private = &devpriv->counter_dev->counters[j]; devpriv->counter_dev->counters[j].chip_index = 0; @@ -5216,10 +5219,10 @@ static int ni_gpct_insn_write(struct comedi_device *dev, return ni_tio_winsn(counter, insn, data); } +#ifdef PCIDMA static int ni_gpct_cmd(struct comedi_device *dev, struct comedi_subdevice *s) { int retval; -#ifdef PCIDMA struct ni_gpct *counter = s->private; /* const struct comedi_cmd *cmd = &s->async->cmd; */ @@ -5233,23 +5236,20 @@ static int ni_gpct_cmd(struct comedi_device *dev, struct comedi_subdevice *s) ni_tio_acknowledge_and_confirm(counter, NULL, NULL, NULL, NULL); ni_e_series_enable_second_irq(dev, counter->counter_index, 1); retval = ni_tio_cmd(counter, s->async); -#else - retval = -ENOTSUPP; -#endif return retval; } +#endif +#ifdef PCIDMA static int ni_gpct_cmdtest(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_cmd *cmd) { -#ifdef PCIDMA struct ni_gpct *counter = s->private; return ni_tio_cmdtest(counter, cmd); -#else return -ENOTSUPP; -#endif } +#endif static int ni_gpct_cancel(struct comedi_device *dev, struct comedi_subdevice *s) { diff --git a/trunk/drivers/staging/dwc2/Kconfig b/trunk/drivers/staging/dwc2/Kconfig index f0b4739c65a1..d15d9d58e5ac 100644 --- a/trunk/drivers/staging/dwc2/Kconfig +++ b/trunk/drivers/staging/dwc2/Kconfig @@ -2,7 +2,6 @@ config USB_DWC2 tristate "DesignWare USB2 DRD Core Support" depends on USB depends on VIRT_TO_BUS - select USB_OTG_UTILS help Say Y or M here if your system has a Dual Role HighSpeed USB controller based on the DesignWare HSOTG IP Core. @@ -39,6 +38,7 @@ config USB_DWC2_TRACK_MISSED_SOFS bool "Enable Missed SOF Tracking" help Say Y here to enable logging of missed SOF events to the dmesg log. + WARNING: This feature is still experimental. If in doubt, say N. config USB_DWC2_DEBUG_PERIODIC diff --git a/trunk/drivers/staging/dwc2/hcd_intr.c b/trunk/drivers/staging/dwc2/hcd_intr.c index 6e5dbed6ccec..e24062f0a49e 100644 --- a/trunk/drivers/staging/dwc2/hcd_intr.c +++ b/trunk/drivers/staging/dwc2/hcd_intr.c @@ -56,8 +56,6 @@ static void dwc2_track_missed_sofs(struct dwc2_hsotg *hsotg) { #ifdef CONFIG_USB_DWC2_TRACK_MISSED_SOFS -#warning Compiling code to track missed SOFs - u16 curr_frame_number = hsotg->frame_number; if (hsotg->frame_num_idx < FRAME_NUM_ARRAY_SIZE) { diff --git a/trunk/drivers/staging/dwc2/platform.c b/trunk/drivers/staging/dwc2/platform.c index 1f3d581a1078..44cce2fa6361 100644 --- a/trunk/drivers/staging/dwc2/platform.c +++ b/trunk/drivers/staging/dwc2/platform.c @@ -95,6 +95,14 @@ static int dwc2_driver_probe(struct platform_device *dev) hsotg->dev = &dev->dev; + /* + * Use reasonable defaults so platforms don't have to provide these. + */ + if (!dev->dev.dma_mask) + dev->dev.dma_mask = &dev->dev.coherent_dma_mask; + if (!dev->dev.coherent_dma_mask) + dev->dev.coherent_dma_mask = DMA_BIT_MASK(32); + irq = platform_get_irq(dev, 0); if (irq < 0) { dev_err(&dev->dev, "missing IRQ resource\n"); @@ -102,11 +110,6 @@ static int dwc2_driver_probe(struct platform_device *dev) } res = platform_get_resource(dev, IORESOURCE_MEM, 0); - if (!res) { - dev_err(&dev->dev, "missing memory base resource\n"); - return -EINVAL; - } - hsotg->regs = devm_ioremap_resource(&dev->dev, res); if (IS_ERR(hsotg->regs)) return PTR_ERR(hsotg->regs); diff --git a/trunk/drivers/staging/gdm72xx/Kconfig b/trunk/drivers/staging/gdm72xx/Kconfig index 3c18efe31365..69059138de4a 100644 --- a/trunk/drivers/staging/gdm72xx/Kconfig +++ b/trunk/drivers/staging/gdm72xx/Kconfig @@ -39,7 +39,7 @@ if WIMAX_GDM72XX_USB config WIMAX_GDM72XX_USB_PM bool "Enable power managerment support" - depends on USB_SUSPEND + depends on PM_RUNTIME endif # WIMAX_GDM72XX_USB diff --git a/trunk/drivers/staging/iio/accel/Kconfig b/trunk/drivers/staging/iio/accel/Kconfig index e2e786dc9c7b..ad45dfbdf417 100644 --- a/trunk/drivers/staging/iio/accel/Kconfig +++ b/trunk/drivers/staging/iio/accel/Kconfig @@ -61,7 +61,7 @@ config LIS3L02DQ depends on SPI select IIO_TRIGGER if IIO_BUFFER depends on !IIO_BUFFER || IIO_KFIFO_BUF - depends on GENERIC_GPIO + depends on GPIOLIB help Say yes here to build SPI support for the ST microelectronics accelerometer. The driver supplies direct access via sysfs files diff --git a/trunk/drivers/staging/iio/adc/Kconfig b/trunk/drivers/staging/iio/adc/Kconfig index d990829008ff..cabc7a367db5 100644 --- a/trunk/drivers/staging/iio/adc/Kconfig +++ b/trunk/drivers/staging/iio/adc/Kconfig @@ -73,7 +73,7 @@ config AD7780 config AD7816 tristate "Analog Devices AD7816/7/8 temperature sensor and ADC driver" depends on SPI - depends on GENERIC_GPIO + depends on GPIOLIB help Say yes here to build support for Analog Devices AD7816/7/8 temperature sensors and ADC. diff --git a/trunk/drivers/staging/iio/adc/mxs-lradc.c b/trunk/drivers/staging/iio/adc/mxs-lradc.c index 2856b8fd44ad..163c638e4095 100644 --- a/trunk/drivers/staging/iio/adc/mxs-lradc.c +++ b/trunk/drivers/staging/iio/adc/mxs-lradc.c @@ -690,7 +690,6 @@ static void mxs_lradc_trigger_remove(struct iio_dev *iio) static int mxs_lradc_buffer_preenable(struct iio_dev *iio) { struct mxs_lradc *lradc = iio_priv(iio); - struct iio_buffer *buffer = iio->buffer; int ret = 0, chan, ofs = 0; unsigned long enable = 0; uint32_t ctrl4_set = 0; @@ -698,7 +697,7 @@ static int mxs_lradc_buffer_preenable(struct iio_dev *iio) uint32_t ctrl1_irq = 0; const uint32_t chan_value = LRADC_CH_ACCUMULATE | ((LRADC_DELAY_TIMER_LOOP - 1) << LRADC_CH_NUM_SAMPLES_OFFSET); - const int len = bitmap_weight(buffer->scan_mask, LRADC_MAX_TOTAL_CHANS); + const int len = bitmap_weight(iio->active_scan_mask, LRADC_MAX_TOTAL_CHANS); if (!len) return -EINVAL; @@ -725,7 +724,7 @@ static int mxs_lradc_buffer_preenable(struct iio_dev *iio) lradc->base + LRADC_CTRL1 + STMP_OFFSET_REG_CLR); writel(0xff, lradc->base + LRADC_CTRL0 + STMP_OFFSET_REG_CLR); - for_each_set_bit(chan, buffer->scan_mask, LRADC_MAX_TOTAL_CHANS) { + for_each_set_bit(chan, iio->active_scan_mask, LRADC_MAX_TOTAL_CHANS) { ctrl4_set |= chan << LRADC_CTRL4_LRADCSELECT_OFFSET(ofs); ctrl4_clr |= LRADC_CTRL4_LRADCSELECT_MASK(ofs); ctrl1_irq |= LRADC_CTRL1_LRADC_IRQ_EN(ofs); diff --git a/trunk/drivers/staging/iio/addac/Kconfig b/trunk/drivers/staging/iio/addac/Kconfig index 698a8970b372..e6795e0bed1d 100644 --- a/trunk/drivers/staging/iio/addac/Kconfig +++ b/trunk/drivers/staging/iio/addac/Kconfig @@ -5,7 +5,7 @@ menu "Analog digital bi-direction converters" config ADT7316 tristate "Analog Devices ADT7316/7/8 ADT7516/7/9 temperature sensor, ADC and DAC driver" - depends on GENERIC_GPIO + depends on GPIOLIB help Say yes here to build support for Analog Devices ADT7316, ADT7317, ADT7318 and ADT7516, ADT7517, ADT7519 temperature sensors, ADC and DAC. diff --git a/trunk/drivers/staging/iio/light/tsl2x7x_core.c b/trunk/drivers/staging/iio/light/tsl2x7x_core.c index d060f2572512..c99f890cc6c6 100644 --- a/trunk/drivers/staging/iio/light/tsl2x7x_core.c +++ b/trunk/drivers/staging/iio/light/tsl2x7x_core.c @@ -1869,6 +1869,7 @@ static int tsl2x7x_probe(struct i2c_client *clientp, dev_info(&chip->client->dev, "%s: i2c device found does not match expected id\n", __func__); + ret = -EINVAL; goto fail1; } @@ -1907,7 +1908,7 @@ static int tsl2x7x_probe(struct i2c_client *clientp, if (ret) { dev_err(&clientp->dev, "%s: irq request failed", __func__); - goto fail2; + goto fail1; } } @@ -1920,17 +1921,17 @@ static int tsl2x7x_probe(struct i2c_client *clientp, if (ret) { dev_err(&clientp->dev, "%s: iio registration failed\n", __func__); - goto fail1; + goto fail2; } dev_info(&clientp->dev, "%s Light sensor found.\n", id->name); return 0; -fail1: +fail2: if (clientp->irq) free_irq(clientp->irq, indio_dev); -fail2: +fail1: iio_device_free(indio_dev); return ret; diff --git a/trunk/drivers/staging/iio/resolver/Kconfig b/trunk/drivers/staging/iio/resolver/Kconfig index 49f69ef986fc..ce360f163216 100644 --- a/trunk/drivers/staging/iio/resolver/Kconfig +++ b/trunk/drivers/staging/iio/resolver/Kconfig @@ -13,7 +13,7 @@ config AD2S90 config AD2S1200 tristate "Analog Devices ad2s1200/ad2s1205 driver" depends on SPI - depends on GENERIC_GPIO + depends on GPIOLIB help Say yes here to build support for Analog Devices spi resolver to digital converters, ad2s1200 and ad2s1205, provides direct access @@ -22,7 +22,7 @@ config AD2S1200 config AD2S1210 tristate "Analog Devices ad2s1210 driver" depends on SPI - depends on GENERIC_GPIO + depends on GPIOLIB help Say yes here to build support for Analog Devices spi resolver to digital converters, ad2s1210, provides direct access via sysfs. diff --git a/trunk/drivers/staging/iio/trigger/Kconfig b/trunk/drivers/staging/iio/trigger/Kconfig index d44d3ad26fa5..1a051da62505 100644 --- a/trunk/drivers/staging/iio/trigger/Kconfig +++ b/trunk/drivers/staging/iio/trigger/Kconfig @@ -14,7 +14,7 @@ config IIO_PERIODIC_RTC_TRIGGER config IIO_GPIO_TRIGGER tristate "GPIO trigger" - depends on GENERIC_GPIO + depends on GPIOLIB help Provides support for using GPIO pins as IIO triggers. diff --git a/trunk/drivers/staging/imx-drm/Kconfig b/trunk/drivers/staging/imx-drm/Kconfig index 8c9e40390f42..ef699f753186 100644 --- a/trunk/drivers/staging/imx-drm/Kconfig +++ b/trunk/drivers/staging/imx-drm/Kconfig @@ -1,6 +1,7 @@ config DRM_IMX tristate "DRM Support for Freescale i.MX" select DRM_KMS_HELPER + select VIDEOMODE_HELPERS select DRM_GEM_CMA_HELPER select DRM_KMS_CMA_HELPER depends on DRM && (ARCH_MXC || ARCH_MULTIPLATFORM) @@ -19,10 +20,12 @@ config DRM_IMX_FB_HELPER config DRM_IMX_PARALLEL_DISPLAY tristate "Support for parallel displays" depends on DRM_IMX + select VIDEOMODE_HELPERS config DRM_IMX_TVE tristate "Support for TV and VGA displays" depends on DRM_IMX + select REGMAP_MMIO help Choose this to enable the internal Television Encoder (TVe) found on i.MX53 processors. @@ -30,6 +33,7 @@ config DRM_IMX_TVE config DRM_IMX_IPUV3_CORE tristate "IPUv3 core support" depends on DRM_IMX + depends on RESET_CONTROLLER help Choose this if you have a i.MX5/6 system and want to use the IPU. This option only enables IPU base @@ -38,5 +42,6 @@ config DRM_IMX_IPUV3_CORE config DRM_IMX_IPUV3 tristate "DRM Support for i.MX IPUv3" depends on DRM_IMX + depends on DRM_IMX_IPUV3_CORE help Choose this if you have a i.MX5 or i.MX6 processor. diff --git a/trunk/drivers/staging/imx-drm/imx-tve.c b/trunk/drivers/staging/imx-drm/imx-tve.c index ac1634464407..03892de9bd7e 100644 --- a/trunk/drivers/staging/imx-drm/imx-tve.c +++ b/trunk/drivers/staging/imx-drm/imx-tve.c @@ -670,7 +670,9 @@ static int imx_tve_probe(struct platform_device *pdev) tve->dac_reg = devm_regulator_get(&pdev->dev, "dac"); if (!IS_ERR(tve->dac_reg)) { regulator_set_voltage(tve->dac_reg, 2750000, 2750000); - regulator_enable(tve->dac_reg); + ret = regulator_enable(tve->dac_reg); + if (ret) + return ret; } tve->clk = devm_clk_get(&pdev->dev, "tve"); diff --git a/trunk/drivers/staging/media/solo6x10/Kconfig b/trunk/drivers/staging/media/solo6x10/Kconfig index ec32776ff547..df6569b997b8 100644 --- a/trunk/drivers/staging/media/solo6x10/Kconfig +++ b/trunk/drivers/staging/media/solo6x10/Kconfig @@ -1,6 +1,7 @@ config SOLO6X10 tristate "Softlogic 6x10 MPEG codec cards" depends on PCI && VIDEO_DEV && SND && I2C + depends on FONTS select VIDEOBUF2_DMA_SG select VIDEOBUF2_DMA_CONTIG select SND_PCM diff --git a/trunk/drivers/staging/nvec/nvec.c b/trunk/drivers/staging/nvec/nvec.c index a88959f9a07a..197c393c4ca7 100644 --- a/trunk/drivers/staging/nvec/nvec.c +++ b/trunk/drivers/staging/nvec/nvec.c @@ -123,6 +123,20 @@ int nvec_register_notifier(struct nvec_chip *nvec, struct notifier_block *nb, } EXPORT_SYMBOL_GPL(nvec_register_notifier); +/** + * nvec_unregister_notifier - Unregister a notifier with nvec + * @nvec: A &struct nvec_chip + * @nb: The notifier block to unregister + * + * Unregisters a notifier with @nvec. The notifier will be removed from the + * atomic notifier chain. + */ +int nvec_unregister_notifier(struct nvec_chip *nvec, struct notifier_block *nb) +{ + return atomic_notifier_chain_unregister(&nvec->notifier_list, nb); +} +EXPORT_SYMBOL_GPL(nvec_unregister_notifier); + /** * nvec_status_notifier - The final notifier * @@ -185,7 +199,7 @@ static struct nvec_msg *nvec_msg_alloc(struct nvec_chip *nvec, * * Free the given message */ -inline void nvec_msg_free(struct nvec_chip *nvec, struct nvec_msg *msg) +void nvec_msg_free(struct nvec_chip *nvec, struct nvec_msg *msg) { if (msg != &nvec->tx_scratch) dev_vdbg(nvec->dev, "INFO: Free %ti\n", msg - nvec->msg_pool); @@ -800,11 +814,6 @@ static int tegra_nvec_probe(struct platform_device *pdev) } res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - if (!res) { - dev_err(&pdev->dev, "no mem resource?\n"); - return -ENODEV; - } - base = devm_ioremap_resource(&pdev->dev, res); if (IS_ERR(base)) return PTR_ERR(base); @@ -815,7 +824,7 @@ static int tegra_nvec_probe(struct platform_device *pdev) return -ENODEV; } - i2c_clk = clk_get(&pdev->dev, "div-clk"); + i2c_clk = devm_clk_get(&pdev->dev, "div-clk"); if (IS_ERR(i2c_clk)) { dev_err(nvec->dev, "failed to get controller clock\n"); return -ENODEV; @@ -902,8 +911,11 @@ static int tegra_nvec_remove(struct platform_device *pdev) nvec_toggle_global_events(nvec, false); mfd_remove_devices(nvec->dev); + nvec_unregister_notifier(nvec, &nvec->nvec_status_notifier); cancel_work_sync(&nvec->rx_work); cancel_work_sync(&nvec->tx_work); + /* FIXME: needs check wether nvec is responsible for power off */ + pm_power_off = NULL; return 0; } diff --git a/trunk/drivers/staging/nvec/nvec.h b/trunk/drivers/staging/nvec/nvec.h index b7a14bc0ab91..2b1316d87470 100644 --- a/trunk/drivers/staging/nvec/nvec.h +++ b/trunk/drivers/staging/nvec/nvec.h @@ -197,9 +197,8 @@ extern int nvec_register_notifier(struct nvec_chip *nvec, struct notifier_block *nb, unsigned int events); -extern int nvec_unregister_notifier(struct device *dev, - struct notifier_block *nb, - unsigned int events); +extern int nvec_unregister_notifier(struct nvec_chip *dev, + struct notifier_block *nb); extern void nvec_msg_free(struct nvec_chip *nvec, struct nvec_msg *msg); diff --git a/trunk/drivers/staging/nvec/nvec_kbd.c b/trunk/drivers/staging/nvec/nvec_kbd.c index 7445ce6422bb..a0ec52a4114f 100644 --- a/trunk/drivers/staging/nvec/nvec_kbd.c +++ b/trunk/drivers/staging/nvec/nvec_kbd.c @@ -169,8 +169,15 @@ static int nvec_kbd_probe(struct platform_device *pdev) static int nvec_kbd_remove(struct platform_device *pdev) { + struct nvec_chip *nvec = dev_get_drvdata(pdev->dev.parent); + char disable_kbd[] = { NVEC_KBD, DISABLE_KBD }, + uncnfg_wake_key_reporting[] = { NVEC_KBD, CNFG_WAKE_KEY_REPORTING, + false }; + nvec_write_async(nvec, uncnfg_wake_key_reporting, 3); + nvec_write_async(nvec, disable_kbd, 2); + nvec_unregister_notifier(nvec, &keys_dev.notifier); + input_unregister_device(keys_dev.input); - input_free_device(keys_dev.input); return 0; } @@ -188,4 +195,5 @@ module_platform_driver(nvec_kbd_driver); MODULE_AUTHOR("Marc Dietrich "); MODULE_DESCRIPTION("NVEC keyboard driver"); +MODULE_ALIAS("platform:nvec-kbd"); MODULE_LICENSE("GPL"); diff --git a/trunk/drivers/staging/nvec/nvec_power.c b/trunk/drivers/staging/nvec/nvec_power.c index 296f7b9a8c8c..aacfcd6954a3 100644 --- a/trunk/drivers/staging/nvec/nvec_power.c +++ b/trunk/drivers/staging/nvec/nvec_power.c @@ -414,6 +414,7 @@ static int nvec_power_remove(struct platform_device *pdev) struct nvec_power *power = platform_get_drvdata(pdev); cancel_delayed_work_sync(&power->poller); + nvec_unregister_notifier(power->nvec, &power->notifier); switch (pdev->id) { case AC: power_supply_unregister(&nvec_psy); diff --git a/trunk/drivers/staging/nvec/nvec_ps2.c b/trunk/drivers/staging/nvec/nvec_ps2.c index aff6b9b9f9aa..06dbb02085a9 100644 --- a/trunk/drivers/staging/nvec/nvec_ps2.c +++ b/trunk/drivers/staging/nvec/nvec_ps2.c @@ -106,7 +106,7 @@ static int nvec_mouse_probe(struct platform_device *pdev) struct serio *ser_dev; char mouse_reset[] = { NVEC_PS2, SEND_COMMAND, PSMOUSE_RST, 3 }; - ser_dev = devm_kzalloc(&pdev->dev, sizeof(struct serio), GFP_KERNEL); + ser_dev = kzalloc(sizeof(struct serio), GFP_KERNEL); if (ser_dev == NULL) return -ENOMEM; @@ -133,6 +133,11 @@ static int nvec_mouse_probe(struct platform_device *pdev) static int nvec_mouse_remove(struct platform_device *pdev) { + struct nvec_chip *nvec = dev_get_drvdata(pdev->dev.parent); + + ps2_sendcommand(ps2_dev.ser_dev, DISABLE_MOUSE); + ps2_stopstreaming(ps2_dev.ser_dev); + nvec_unregister_notifier(nvec, &ps2_dev.notifier); serio_unregister_port(ps2_dev.ser_dev); return 0; @@ -179,4 +184,5 @@ module_platform_driver(nvec_mouse_driver); MODULE_DESCRIPTION("NVEC mouse driver"); MODULE_AUTHOR("Marc Dietrich "); +MODULE_ALIAS("platform:nvec-mouse"); MODULE_LICENSE("GPL"); diff --git a/trunk/drivers/staging/sep/Kconfig b/trunk/drivers/staging/sep/Kconfig index 185b676d858a..aab945a316ea 100644 --- a/trunk/drivers/staging/sep/Kconfig +++ b/trunk/drivers/staging/sep/Kconfig @@ -1,6 +1,6 @@ config DX_SEP tristate "Discretix SEP driver" - depends on PCI + depends on PCI && CRYPTO help Discretix SEP driver; used for the security processor subsystem on board the Intel Mobile Internet Device and adds SEP availability diff --git a/trunk/drivers/staging/ste_rmi4/synaptics_i2c_rmi4.c b/trunk/drivers/staging/ste_rmi4/synaptics_i2c_rmi4.c index fe667dde43ce..386362c9964f 100644 --- a/trunk/drivers/staging/ste_rmi4/synaptics_i2c_rmi4.c +++ b/trunk/drivers/staging/ste_rmi4/synaptics_i2c_rmi4.c @@ -1087,7 +1087,11 @@ static int synaptics_rmi4_resume(struct device *dev) unsigned char intr_status; struct synaptics_rmi4_data *rmi4_data = dev_get_drvdata(dev); - regulator_enable(rmi4_data->regulator); + retval = regulator_enable(rmi4_data->regulator); + if (retval) { + dev_err(dev, "Regulator enable failed (%d)\n", retval); + return retval; + } enable_irq(rmi4_data->i2c_client->irq); rmi4_data->touch_stopped = false; diff --git a/trunk/drivers/staging/vt6656/hostap.c b/trunk/drivers/staging/vt6656/hostap.c index f4f1bf7a30fd..c699a3058b39 100644 --- a/trunk/drivers/staging/vt6656/hostap.c +++ b/trunk/drivers/staging/vt6656/hostap.c @@ -133,7 +133,7 @@ static int hostap_disable_hostapd(struct vnt_private *pDevice, int rtnl_locked) DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "%s: Netdevice %s unregistered\n", pDevice->dev->name, pDevice->apdev->name); } - kfree(pDevice->apdev); + free_netdev(pDevice->apdev); pDevice->apdev = NULL; pDevice->bEnable8021x = false; pDevice->bEnableHostWEP = false; diff --git a/trunk/drivers/staging/vt6656/iwctl.c b/trunk/drivers/staging/vt6656/iwctl.c index c335808211ee..d0cf7d8a20e5 100644 --- a/trunk/drivers/staging/vt6656/iwctl.c +++ b/trunk/drivers/staging/vt6656/iwctl.c @@ -1345,9 +1345,12 @@ int iwctl_siwpower(struct net_device *dev, struct iw_request_info *info, return rc; } + spin_lock_irq(&pDevice->lock); + if (wrq->disabled) { pDevice->ePSMode = WMAC_POWER_CAM; PSvDisablePowerSaving(pDevice); + spin_unlock_irq(&pDevice->lock); return rc; } if ((wrq->flags & IW_POWER_TYPE) == IW_POWER_TIMEOUT) { @@ -1358,6 +1361,9 @@ int iwctl_siwpower(struct net_device *dev, struct iw_request_info *info, pDevice->ePSMode = WMAC_POWER_FAST; PSvEnablePowerSaving((void *)pDevice, pMgmt->wListenInterval); } + + spin_unlock_irq(&pDevice->lock); + switch (wrq->flags & IW_POWER_MODE) { case IW_POWER_UNICAST_R: DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO " SIOCSIWPOWER: IW_POWER_UNICAST_R \n"); diff --git a/trunk/drivers/staging/zcache/ramster/ramster-howto.txt b/trunk/drivers/staging/zcache/ramster/ramster-howto.txt new file mode 100644 index 000000000000..7b1ee3bbfdd5 --- /dev/null +++ b/trunk/drivers/staging/zcache/ramster/ramster-howto.txt @@ -0,0 +1,366 @@ + RAMSTER HOW-TO + +Author: Dan Magenheimer +Ramster maintainer: Konrad Wilk + +This is a HOWTO document for ramster which, as of this writing, is in +the kernel as a subdirectory of zcache in drivers/staging, called ramster. +(Zcache can be built with or without ramster functionality.) If enabled +and properly configured, ramster allows memory capacity load balancing +across multiple machines in a cluster. Further, the ramster code serves +as an example of asynchronous access for zcache (as well as cleancache and +frontswap) that may prove useful for future transcendent memory +implementations, such as KVM and NVRAM. While ramster works today on +any network connection that supports kernel sockets, its features may +become more interesting on future high-speed fabrics/interconnects. + +Ramster requires both kernel and userland support. The userland support, +called ramster-tools, is known to work with EL6-based distros, but is a +set of poorly-hacked slightly-modified cluster tools based on ocfs2, which +includes an init file, a config file, and a userland binary that interfaces +to the kernel. This state of userland support reflects the abysmal userland +skills of this suitably-embarrassed author; any help/patches to turn +ramster-tools into more distributable rpms/debs useful for a wider range +of distros would be appreciated. The source RPM that can be used as a +starting point is available at: + http://oss.oracle.com/projects/tmem/files/RAMster/ + +As a result of this author's ignorance, userland setup described in this +HOWTO assumes an EL6 distro and is described in EL6 syntax. Apologies +if this offends anyone! + +Kernel support has only been tested on x86_64. Systems with an active +ocfs2 filesystem should work, but since ramster leverages a lot of +code from ocfs2, there may be latent issues. A kernel configuration that +includes CONFIG_OCFS2_FS should build OK, and should certainly run OK +if no ocfs2 filesystem is mounted. + +This HOWTO demonstrates memory capacity load balancing for a two-node +cluster, where one node called the "local" node becomes overcommitted +and the other node called the "remote" node provides additional RAM +capacity for use by the local node. Ramster is capable of more complex +topologies; see the last section titled "ADVANCED RAMSTER TOPOLOGIES". + +If you find any terms in this HOWTO unfamiliar or don't understand the +motivation for ramster, the following LWN reading is recommended: +-- Transcendent Memory in a Nutshell (lwn.net/Articles/454795) +-- The future calculus of memory management (lwn.net/Articles/475681) +And since ramster is built on top of zcache, this article may be helpful: +-- In-kernel memory compression (lwn.net/Articles/545244) + +Now that you've memorized the contents of those articles, let's get started! + +A. PRELIMINARY + +1) Install two x86_64 Linux systems that are known to work when + upgraded to a recent upstream Linux kernel version. + +On each system: + +2) Configure, build and install, then boot Linux, just to ensure it + can be done with an unmodified upstream kernel. Confirm you booted + the upstream kernel with "uname -a". + +3) If you plan to do any performance testing or unless you plan to + test only swapping, the "WasActive" patch is also highly recommended. + (Search lkml.org for WasActive, apply the patch, rebuild your kernel.) + For a demo or simple testing, the patch can be ignored. + +4) Install ramster-tools as root. An x86_64 rpm for EL6-based systems + can be found at: + http://oss.oracle.com/projects/tmem/files/RAMster/ + (Sorry but for now, non-EL6 users must recreate ramster-tools on + their own from source. See above.) + +5) Ensure that debugfs is mounted at each boot. Examples below assume it + is mounted at /sys/kernel/debug. + +B. BUILDING RAMSTER INTO THE KERNEL + +Do the following on each system: + +1) Using the kernel configuration mechanism of your choice, change + your config to include: + + CONFIG_CLEANCACHE=y + CONFIG_FRONTSWAP=y + CONFIG_STAGING=y + CONFIG_CONFIGFS_FS=y # NOTE: MUST BE y, not m + CONFIG_ZCACHE=y + CONFIG_RAMSTER=y + + For a linux-3.10 or later kernel, you should also set: + + CONFIG_ZCACHE_DEBUG=y + CONFIG_RAMSTER_DEBUG=y + + Before building the kernel please doublecheck your kernel config + file to ensure all of the settings are correct. + +2) Build this kernel and change your boot file (e.g. /etc/grub.conf) + so that the new kernel will boot. + +3) Add "zcache" and "ramster" as kernel boot parameters for the new kernel. + +4) Reboot each system approximately simultaneously. + +5) Check dmesg to ensure there are some messages from ramster, prefixed + by "ramster:" + + # dmesg | grep ramster + + You should also see a lot of files in: + + # ls /sys/kernel/debug/zcache + # ls /sys/kernel/debug/ramster + + These are mostly counters for various zcache and ramster activities. + You should also see files in: + + # ls /sys/kernel/mm/ramster + + These are sysfs files that control ramster as we shall see. + + Ramster now will act as a single-system zcache on each system + but doesn't yet know anything about the cluster so can't yet do + anything remotely. + +C. CONFIGURING THE RAMSTER CLUSTER + +This part can be error prone unless you are familiar with clustering +filesystems. We need to describe the cluster in a /etc/ramster.conf +file and the init scripts that parse it are extremely picky about +the syntax. + +1) Create a /etc/ramster.conf file and ensure it is identical on both + systems. This file mimics the ocfs2 format and there is a good amount + of documentation that can be searched for ocfs2.conf, but you can use: + + cluster: + name = ramster + node_count = 2 + node: + name = system1 + cluster = ramster + number = 0 + ip_address = my.ip.ad.r1 + ip_port = 7777 + node: + name = system2 + cluster = ramster + number = 1 + ip_address = my.ip.ad.r2 + ip_port = 7777 + + You must ensure that the "name" field in the file exactly matches + the output of "hostname" on each system; if "hostname" shows a + fully-qualified hostname, ensure the name is fully qualified in + /etc/ramster.conf. Obviously, substitute my.ip.ad.rx with proper + ip addresses. + +2) Enable the ramster service and configure it. If you used the + EL6 ramster-tools, this would be: + + # chkconfig --add ramster + # service ramster configure + + Set "load on boot" to "y", cluster to start is "ramster" (or whatever + name you chose in ramster.conf), heartbeat dead threshold as "500", + network idle timeout as "1000000". Leave the others as default. + +3) Reboot both systems. After reboot, try (assuming EL6 ramster-tools): + + # service ramster status + + You should see "Checking RAMSTER cluster "ramster": Online". If you do + not, something is wrong and ramster will not work. Note that you + should also see that the driver for "configfs" is loaded and mounted, + the driver for ocfs2_dlmfs is not loaded, and some numbers for network + parameters. You will also see "Checking RAMSTER heartbeat: Not active". + That's all OK. + +4) Now you need to start the cluster heartbeat; the cluster is not "up" + until all nodes detect a heartbeat. In a real cluster, heartbeat detection + is done via a cluster filesystem, but ramster doesn't require one. Some + hack-y kernel code in ramster can start the heartbeat for you though if + you tell it what nodes are "up". To enable the heartbeat, do: + + # echo 0 > /sys/kernel/mm/ramster/manual_node_up + # echo 1 > /sys/kernel/mm/ramster/manual_node_up + + This must be done on BOTH nodes and, to avoid timeouts, must be done + approximately concurrently on both nodes. On an EL6 system, it is + convenient to put these lines in /etc/rc.local. To confirm that the + cluster is now up, on both systems do: + + # dmesg | grep ramster + + You should see ramster "Accepted connection" messages in dmesg on both + nodes after this. Note that if you check userland status again with + + # service ramster status + + you will still see "Checking RAMSTER heartbeat: Not active". That's + still OK... the ramster kernel heartbeat hack doesn't communicate to + userland. + +5) You now must tell each node the node to which it should "remotify" pages. + On this two node cluster, we will assume the "local" node, node 0, has + memory overcommitted and will use ramster to utilize RAM capacity on + the "remote node", node 1. To configure this, on node 0, you do: + + # echo 1 > /sys/kernel/mm/ramster/remote_target_nodenum + + You should see "ramster: node 1 set as remotification target" in dmesg + on node 0. Again, on EL6, /etc/rc.local is a good place to put this + on node 0 so you don't forget to do it at each boot. + +6) One more step: By default, the ramster code does not "remotify" any + pages; this is primarily for testing purposes, but sometimes it is + useful. This may change in the future, but for now, on node 0, you do: + + # echo 1 > /sys/kernel/mm/ramster/pers_remotify_enable + # echo 1 > /sys/kernel/mm/ramster/eph_remotify_enable + + The first enables remotifying swap (persistent, aka frontswap) pages, + the second enables remotifying of page cache (ephemeral, cleancache) + pages. + + On EL6, these lines can also be put in /etc/rc.local (AFTER the + node_up lines), or at the beginning of a script that runs a workload. + +7) Note that most testing has been done with both/all machines booted + roughly simultaneously to avoid cluster timeouts. Ideally, you should + do this too unless you are trying to break ramster rather than just + use it. ;-) + +D. TESTING RAMSTER + +1) Note that ramster has no value unless pages get "remotified". For + swap/frontswap/persistent pages, this doesn't happen unless/until + the workload would cause swapping to occur, at which point pages + are put into frontswap/zcache, and the remotification thread starts + working. To get to the point where the system swaps, you either + need a workload for which the working set exceeds the RAM in the + system; or you need to somehow reduce the amount of RAM one of + the system sees. This latter is easy when testing in a VM, but + harder on physical systems. In some cases, "mem=xxxM" on the + kernel command line restricts memory, but for some values of xxx + the kernel may fail to boot. One may also try creating a fixed + RAMdisk, doing nothing with it, but ensuring that it eats up a fixed + amount of RAM. + +2) To see if ramster is working, on the "remote node", node 1, try: + + # grep . /sys/kernel/debug/ramster/foreign_* + # # note, that is space-dot-space between grep and the pathname + + to monitor the number (and max) ephemeral and persistent pages + that ramster has sent. If these stay at zero, ramster is not working + either because the workload on the local node (node 0) isn't creating + enough memory pressure or because "remotifying" isn't working. On the + local system, node 0, you can watch lots of useful information also. + Try: + + grep . /sys/kernel/debug/zcache/*pageframes* \ + /sys/kernel/debug/zcache/*zbytes* \ + /sys/kernel/debug/zcache/*zpages* \ + /sys/kernel/debug/ramster/*remote* + + Of particular note are the remote_*_pages_succ_get counters. These + show how many disk reads and/or disk writes have been avoided on the + overcommitted local system by storing pages remotely using ramster. + + At the risk of information overload, you can also grep: + + /sys/kernel/debug/cleancache/* and /sys/kernel/debug/frontswap/* + + These show, for example, how many disk reads and/or disk writes have + been avoided by using zcache to optimize RAM on the local system. + + +AUTOMATIC SWAP REPATRIATION + +You may notice that while the systems are idle, the foreign persistent +page count on the remote machine slowly decreases. This is because +ramster implements "frontswap selfshrinking": When possible, swap +pages that have been remotified are slowly repatriated to the local +machine. This is so that local RAM can be used when possible and +so that, in case of remote machine crash, the probability of loss +of data is reduced. + +REBOOTING / POWEROFF + +If a system is shut down while some of its swap pages still reside +on a remote system, the system may lock up during the shutdown +sequence. This will occur if the network is shut down before the +swap mechansim is shut down, which is the default ordering on many +distros. To avoid this annoying problem, simply shut off the swap +subsystem before starting the shutdown sequence, e.g.: + + # swapoff -a + # reboot + +Ideally, this swapoff-before-ifdown ordering should be enforced permanently +using shutdown scripts. + +KNOWN PROBLEMS + +1) You may periodically see messages such as: + + ramster_r2net, message length problem + + This is harmless but indicates that a node is sending messages + containing compressed pages that exceed the maximum for zcache + (PAGE_SIZE*15/16). The sender side needs to be fixed. + +2) If you see a "No longer connected to node..." message or a "No connection + established with node X after N seconds", it is possible you may + be in an unrecoverable state. If you are certain all of the + appropriate cluster configuration steps described above have been + performed, try rebooting the two servers concurrently to see if + the cluster starts. + + Note that "Connection to node... shutdown, state 7" is an intermediate + connection state. As long as you later see "Accepted connection", the + intermediate states are harmless. + +3) There are known issues in counting certain values. As a result + you may see periodic warnings from the kernel. Almost always you + will see "ramster: bad accounting for XXX". There are also "WARN_ONCE" + messages. If you see kernel warnings with a tombstone, please report + them. They are harmless but reflect bugs that need to be eventually fixed. + +ADVANCED RAMSTER TOPOLOGIES + +The kernel code for ramster can support up to eight nodes in a cluster, +but no testing has been done with more than three nodes. + +In the example described above, the "remote" node serves as a RAM +overflow for the "local" node. This can be made symmetric by appropriate +settings of the sysfs remote_target_nodenum file. For example, by setting: + + # echo 1 > /sys/kernel/mm/ramster/remote_target_nodenum + +on node 0, and + + # echo 0 > /sys/kernel/mm/ramster/remote_target_nodenum + +on node 1, each node can serve as a RAM overflow for the other. + +For more than two nodes, a "RAM server" can be configured. For a +three node system, set: + + # echo 0 > /sys/kernel/mm/ramster/remote_target_nodenum + +on node 1, and + + # echo 0 > /sys/kernel/mm/ramster/remote_target_nodenum + +on node 2. Then node 0 is a RAM server for node 1 and node 2. + +In this implementation of ramster, any remote node is potentially a single +point of failure (SPOF). Though the probability of failure is reduced +by automatic swap repatriation (see above), a proposed future enhancement +to ramster improves high-availability for the cluster by sending a copy +of each page of date to two other nodes. Patches welcome! diff --git a/trunk/drivers/staging/zcache/zcache-main.c b/trunk/drivers/staging/zcache/zcache-main.c index 522cb8e55142..dcceed29d31a 100644 --- a/trunk/drivers/staging/zcache/zcache-main.c +++ b/trunk/drivers/staging/zcache/zcache-main.c @@ -1922,15 +1922,15 @@ static int zcache_init(void) #ifdef CONFIG_ZCACHE_MODULE #ifdef CONFIG_RAMSTER -module_param(ramster_enabled, int, S_IRUGO); +module_param(ramster_enabled, bool, S_IRUGO); module_param(disable_frontswap_selfshrink, int, S_IRUGO); #endif -module_param(disable_cleancache, int, S_IRUGO); -module_param(disable_frontswap, int, S_IRUGO); +module_param(disable_cleancache, bool, S_IRUGO); +module_param(disable_frontswap, bool, S_IRUGO); #ifdef FRONTSWAP_HAS_EXCLUSIVE_GETS module_param(frontswap_has_exclusive_gets, bool, S_IRUGO); #endif -module_param(disable_frontswap_ignore_nonactive, int, S_IRUGO); +module_param(disable_frontswap_ignore_nonactive, bool, S_IRUGO); module_param(zcache_comp_name, charp, S_IRUGO); module_init(zcache_init); MODULE_LICENSE("GPL"); diff --git a/trunk/drivers/target/iscsi/iscsi_target.c b/trunk/drivers/target/iscsi/iscsi_target.c index ffbc6a94be52..262ef1f23b38 100644 --- a/trunk/drivers/target/iscsi/iscsi_target.c +++ b/trunk/drivers/target/iscsi/iscsi_target.c @@ -1250,7 +1250,7 @@ static u32 iscsit_do_crypto_hash_sg( static void iscsit_do_crypto_hash_buf( struct hash_desc *hash, - unsigned char *buf, + const void *buf, u32 payload_length, u32 padding, u8 *pad_bytes, @@ -2524,9 +2524,8 @@ static int iscsit_send_conn_drop_async_message( if (conn->conn_ops->HeaderDigest) { u32 *header_digest = (u32 *)&cmd->pdu[ISCSI_HDR_LEN]; - iscsit_do_crypto_hash_buf(&conn->conn_tx_hash, - (unsigned char *)hdr, ISCSI_HDR_LEN, - 0, NULL, (u8 *)header_digest); + iscsit_do_crypto_hash_buf(&conn->conn_tx_hash, hdr, + ISCSI_HDR_LEN, 0, NULL, (u8 *)header_digest); cmd->tx_size += ISCSI_CRC_LEN; pr_debug("Attaching CRC32C HeaderDigest to" @@ -2662,9 +2661,8 @@ static int iscsit_send_datain(struct iscsi_cmd *cmd, struct iscsi_conn *conn) if (conn->conn_ops->HeaderDigest) { u32 *header_digest = (u32 *)&cmd->pdu[ISCSI_HDR_LEN]; - iscsit_do_crypto_hash_buf(&conn->conn_tx_hash, - (unsigned char *)cmd->pdu, ISCSI_HDR_LEN, - 0, NULL, (u8 *)header_digest); + iscsit_do_crypto_hash_buf(&conn->conn_tx_hash, cmd->pdu, + ISCSI_HDR_LEN, 0, NULL, (u8 *)header_digest); iov[0].iov_len += ISCSI_CRC_LEN; tx_size += ISCSI_CRC_LEN; @@ -2841,9 +2839,8 @@ iscsit_send_logout(struct iscsi_cmd *cmd, struct iscsi_conn *conn) if (conn->conn_ops->HeaderDigest) { u32 *header_digest = (u32 *)&cmd->pdu[ISCSI_HDR_LEN]; - iscsit_do_crypto_hash_buf(&conn->conn_tx_hash, - (unsigned char *)&cmd->pdu[0], ISCSI_HDR_LEN, - 0, NULL, (u8 *)header_digest); + iscsit_do_crypto_hash_buf(&conn->conn_tx_hash, &cmd->pdu[0], + ISCSI_HDR_LEN, 0, NULL, (u8 *)header_digest); iov[0].iov_len += ISCSI_CRC_LEN; tx_size += ISCSI_CRC_LEN; @@ -2900,9 +2897,8 @@ static int iscsit_send_unsolicited_nopin( if (conn->conn_ops->HeaderDigest) { u32 *header_digest = (u32 *)&cmd->pdu[ISCSI_HDR_LEN]; - iscsit_do_crypto_hash_buf(&conn->conn_tx_hash, - (unsigned char *)hdr, ISCSI_HDR_LEN, - 0, NULL, (u8 *)header_digest); + iscsit_do_crypto_hash_buf(&conn->conn_tx_hash, hdr, + ISCSI_HDR_LEN, 0, NULL, (u8 *)header_digest); tx_size += ISCSI_CRC_LEN; pr_debug("Attaching CRC32C HeaderDigest to" @@ -2949,9 +2945,8 @@ iscsit_send_nopin(struct iscsi_cmd *cmd, struct iscsi_conn *conn) if (conn->conn_ops->HeaderDigest) { u32 *header_digest = (u32 *)&cmd->pdu[ISCSI_HDR_LEN]; - iscsit_do_crypto_hash_buf(&conn->conn_tx_hash, - (unsigned char *)hdr, ISCSI_HDR_LEN, - 0, NULL, (u8 *)header_digest); + iscsit_do_crypto_hash_buf(&conn->conn_tx_hash, hdr, + ISCSI_HDR_LEN, 0, NULL, (u8 *)header_digest); iov[0].iov_len += ISCSI_CRC_LEN; tx_size += ISCSI_CRC_LEN; @@ -3040,9 +3035,8 @@ static int iscsit_send_r2t( if (conn->conn_ops->HeaderDigest) { u32 *header_digest = (u32 *)&cmd->pdu[ISCSI_HDR_LEN]; - iscsit_do_crypto_hash_buf(&conn->conn_tx_hash, - (unsigned char *)hdr, ISCSI_HDR_LEN, - 0, NULL, (u8 *)header_digest); + iscsit_do_crypto_hash_buf(&conn->conn_tx_hash, hdr, + ISCSI_HDR_LEN, 0, NULL, (u8 *)header_digest); cmd->iov_misc[0].iov_len += ISCSI_CRC_LEN; tx_size += ISCSI_CRC_LEN; @@ -3256,9 +3250,8 @@ static int iscsit_send_response(struct iscsi_cmd *cmd, struct iscsi_conn *conn) if (conn->conn_ops->HeaderDigest) { u32 *header_digest = (u32 *)&cmd->pdu[ISCSI_HDR_LEN]; - iscsit_do_crypto_hash_buf(&conn->conn_tx_hash, - (unsigned char *)cmd->pdu, ISCSI_HDR_LEN, - 0, NULL, (u8 *)header_digest); + iscsit_do_crypto_hash_buf(&conn->conn_tx_hash, cmd->pdu, + ISCSI_HDR_LEN, 0, NULL, (u8 *)header_digest); iov[0].iov_len += ISCSI_CRC_LEN; tx_size += ISCSI_CRC_LEN; @@ -3329,9 +3322,8 @@ iscsit_send_task_mgt_rsp(struct iscsi_cmd *cmd, struct iscsi_conn *conn) if (conn->conn_ops->HeaderDigest) { u32 *header_digest = (u32 *)&cmd->pdu[ISCSI_HDR_LEN]; - iscsit_do_crypto_hash_buf(&conn->conn_tx_hash, - (unsigned char *)hdr, ISCSI_HDR_LEN, - 0, NULL, (u8 *)header_digest); + iscsit_do_crypto_hash_buf(&conn->conn_tx_hash, hdr, + ISCSI_HDR_LEN, 0, NULL, (u8 *)header_digest); cmd->iov_misc[0].iov_len += ISCSI_CRC_LEN; tx_size += ISCSI_CRC_LEN; @@ -3504,9 +3496,8 @@ static int iscsit_send_text_rsp( if (conn->conn_ops->HeaderDigest) { u32 *header_digest = (u32 *)&cmd->pdu[ISCSI_HDR_LEN]; - iscsit_do_crypto_hash_buf(&conn->conn_tx_hash, - (unsigned char *)hdr, ISCSI_HDR_LEN, - 0, NULL, (u8 *)header_digest); + iscsit_do_crypto_hash_buf(&conn->conn_tx_hash, hdr, + ISCSI_HDR_LEN, 0, NULL, (u8 *)header_digest); iov[0].iov_len += ISCSI_CRC_LEN; tx_size += ISCSI_CRC_LEN; @@ -3557,11 +3548,11 @@ static int iscsit_send_reject( struct iscsi_cmd *cmd, struct iscsi_conn *conn) { - u32 iov_count = 0, tx_size = 0; - struct iscsi_reject *hdr; + struct iscsi_reject *hdr = (struct iscsi_reject *)&cmd->pdu[0]; struct kvec *iov; + u32 iov_count = 0, tx_size; - iscsit_build_reject(cmd, conn, (struct iscsi_reject *)&cmd->pdu[0]); + iscsit_build_reject(cmd, conn, hdr); iov = &cmd->iov_misc[0]; iov[iov_count].iov_base = cmd->pdu; @@ -3574,9 +3565,8 @@ static int iscsit_send_reject( if (conn->conn_ops->HeaderDigest) { u32 *header_digest = (u32 *)&cmd->pdu[ISCSI_HDR_LEN]; - iscsit_do_crypto_hash_buf(&conn->conn_tx_hash, - (unsigned char *)hdr, ISCSI_HDR_LEN, - 0, NULL, (u8 *)header_digest); + iscsit_do_crypto_hash_buf(&conn->conn_tx_hash, hdr, + ISCSI_HDR_LEN, 0, NULL, (u8 *)header_digest); iov[0].iov_len += ISCSI_CRC_LEN; tx_size += ISCSI_CRC_LEN; @@ -3585,9 +3575,8 @@ static int iscsit_send_reject( } if (conn->conn_ops->DataDigest) { - iscsit_do_crypto_hash_buf(&conn->conn_tx_hash, - (unsigned char *)cmd->buf_ptr, ISCSI_HDR_LEN, - 0, NULL, (u8 *)&cmd->data_crc); + iscsit_do_crypto_hash_buf(&conn->conn_tx_hash, cmd->buf_ptr, + ISCSI_HDR_LEN, 0, NULL, (u8 *)&cmd->data_crc); iov[iov_count].iov_base = &cmd->data_crc; iov[iov_count++].iov_len = ISCSI_CRC_LEN; diff --git a/trunk/drivers/target/iscsi/iscsi_target_erl1.c b/trunk/drivers/target/iscsi/iscsi_target_erl1.c index 7816af6cdd12..40d9dbca987b 100644 --- a/trunk/drivers/target/iscsi/iscsi_target_erl1.c +++ b/trunk/drivers/target/iscsi/iscsi_target_erl1.c @@ -823,7 +823,7 @@ static int iscsit_attach_ooo_cmdsn( /* * CmdSN is greater than the tail of the list. */ - if (ooo_tail->cmdsn < ooo_cmdsn->cmdsn) + if (iscsi_sna_lt(ooo_tail->cmdsn, ooo_cmdsn->cmdsn)) list_add_tail(&ooo_cmdsn->ooo_list, &sess->sess_ooo_cmdsn_list); else { @@ -833,11 +833,12 @@ static int iscsit_attach_ooo_cmdsn( */ list_for_each_entry(ooo_tmp, &sess->sess_ooo_cmdsn_list, ooo_list) { - if (ooo_tmp->cmdsn < ooo_cmdsn->cmdsn) + if (iscsi_sna_lt(ooo_tmp->cmdsn, ooo_cmdsn->cmdsn)) continue; + /* Insert before this entry */ list_add(&ooo_cmdsn->ooo_list, - &ooo_tmp->ooo_list); + ooo_tmp->ooo_list.prev); break; } } diff --git a/trunk/drivers/target/iscsi/iscsi_target_parameters.c b/trunk/drivers/target/iscsi/iscsi_target_parameters.c index f690be9e5293..c2185fc31136 100644 --- a/trunk/drivers/target/iscsi/iscsi_target_parameters.c +++ b/trunk/drivers/target/iscsi/iscsi_target_parameters.c @@ -436,7 +436,7 @@ int iscsi_create_default_params(struct iscsi_param_list **param_list_ptr) /* * Extra parameters for ISER from RFC-5046 */ - param = iscsi_set_default_param(pl, RDMAEXTENTIONS, INITIAL_RDMAEXTENTIONS, + param = iscsi_set_default_param(pl, RDMAEXTENSIONS, INITIAL_RDMAEXTENSIONS, PHASE_OPERATIONAL, SCOPE_SESSION_WIDE, SENDER_BOTH, TYPERANGE_BOOL_AND, USE_LEADING_ONLY); if (!param) @@ -529,7 +529,7 @@ int iscsi_set_keys_to_negotiate( SET_PSTATE_NEGOTIATE(param); } else if (!strcmp(param->name, OFMARKINT)) { SET_PSTATE_NEGOTIATE(param); - } else if (!strcmp(param->name, RDMAEXTENTIONS)) { + } else if (!strcmp(param->name, RDMAEXTENSIONS)) { if (iser == true) SET_PSTATE_NEGOTIATE(param); } else if (!strcmp(param->name, INITIATORRECVDATASEGMENTLENGTH)) { @@ -580,7 +580,7 @@ int iscsi_set_keys_irrelevant_for_discovery( param->state &= ~PSTATE_NEGOTIATE; else if (!strcmp(param->name, OFMARKINT)) param->state &= ~PSTATE_NEGOTIATE; - else if (!strcmp(param->name, RDMAEXTENTIONS)) + else if (!strcmp(param->name, RDMAEXTENSIONS)) param->state &= ~PSTATE_NEGOTIATE; else if (!strcmp(param->name, INITIATORRECVDATASEGMENTLENGTH)) param->state &= ~PSTATE_NEGOTIATE; @@ -1977,7 +1977,7 @@ void iscsi_set_session_parameters( ops->SessionType = !strcmp(param->value, DISCOVERY); pr_debug("SessionType: %s\n", param->value); - } else if (!strcmp(param->name, RDMAEXTENTIONS)) { + } else if (!strcmp(param->name, RDMAEXTENSIONS)) { ops->RDMAExtensions = !strcmp(param->value, YES); pr_debug("RDMAExtensions: %s\n", param->value); diff --git a/trunk/drivers/target/iscsi/iscsi_target_parameters.h b/trunk/drivers/target/iscsi/iscsi_target_parameters.h index f31b9c4b83f2..915b06798505 100644 --- a/trunk/drivers/target/iscsi/iscsi_target_parameters.h +++ b/trunk/drivers/target/iscsi/iscsi_target_parameters.h @@ -91,7 +91,7 @@ extern void iscsi_set_session_parameters(struct iscsi_sess_ops *, /* * Parameter names of iSCSI Extentions for RDMA (iSER). See RFC-5046 */ -#define RDMAEXTENTIONS "RDMAExtensions" +#define RDMAEXTENSIONS "RDMAExtensions" #define INITIATORRECVDATASEGMENTLENGTH "InitiatorRecvDataSegmentLength" #define TARGETRECVDATASEGMENTLENGTH "TargetRecvDataSegmentLength" @@ -142,7 +142,7 @@ extern void iscsi_set_session_parameters(struct iscsi_sess_ops *, /* * Initial values for iSER parameters following RFC-5046 Section 6 */ -#define INITIAL_RDMAEXTENTIONS NO +#define INITIAL_RDMAEXTENSIONS NO #define INITIAL_INITIATORRECVDATASEGMENTLENGTH "262144" #define INITIAL_TARGETRECVDATASEGMENTLENGTH "8192" diff --git a/trunk/drivers/target/target_core_configfs.c b/trunk/drivers/target/target_core_configfs.c index 43b7ac6c5b1c..4a8bd36d3958 100644 --- a/trunk/drivers/target/target_core_configfs.c +++ b/trunk/drivers/target/target_core_configfs.c @@ -1584,6 +1584,13 @@ static struct target_core_configfs_attribute target_core_attr_dev_udev_path = { .store = target_core_store_dev_udev_path, }; +static ssize_t target_core_show_dev_enable(void *p, char *page) +{ + struct se_device *dev = p; + + return snprintf(page, PAGE_SIZE, "%d\n", !!(dev->dev_flags & DF_CONFIGURED)); +} + static ssize_t target_core_store_dev_enable( void *p, const char *page, @@ -1609,8 +1616,8 @@ static ssize_t target_core_store_dev_enable( static struct target_core_configfs_attribute target_core_attr_dev_enable = { .attr = { .ca_owner = THIS_MODULE, .ca_name = "enable", - .ca_mode = S_IWUSR }, - .show = NULL, + .ca_mode = S_IRUGO | S_IWUSR }, + .show = target_core_show_dev_enable, .store = target_core_store_dev_enable, }; diff --git a/trunk/drivers/target/target_core_device.c b/trunk/drivers/target/target_core_device.c index 2e4d655471bc..4630481b6043 100644 --- a/trunk/drivers/target/target_core_device.c +++ b/trunk/drivers/target/target_core_device.c @@ -68,7 +68,6 @@ transport_lookup_cmd_lun(struct se_cmd *se_cmd, u32 unpacked_lun) struct se_dev_entry *deve = se_cmd->se_deve; deve->total_cmds++; - deve->total_bytes += se_cmd->data_length; if ((se_cmd->data_direction == DMA_TO_DEVICE) && (deve->lun_flags & TRANSPORT_LUNFLAGS_READ_ONLY)) { @@ -85,8 +84,6 @@ transport_lookup_cmd_lun(struct se_cmd *se_cmd, u32 unpacked_lun) else if (se_cmd->data_direction == DMA_FROM_DEVICE) deve->read_bytes += se_cmd->data_length; - deve->deve_cmds++; - se_lun = deve->se_lun; se_cmd->se_lun = deve->se_lun; se_cmd->pr_res_key = deve->pr_res_key; @@ -275,17 +272,6 @@ int core_free_device_list_for_node( return 0; } -void core_dec_lacl_count(struct se_node_acl *se_nacl, struct se_cmd *se_cmd) -{ - struct se_dev_entry *deve; - unsigned long flags; - - spin_lock_irqsave(&se_nacl->device_list_lock, flags); - deve = se_nacl->device_list[se_cmd->orig_fe_lun]; - deve->deve_cmds--; - spin_unlock_irqrestore(&se_nacl->device_list_lock, flags); -} - void core_update_device_list_access( u32 mapped_lun, u32 lun_access, diff --git a/trunk/drivers/target/target_core_file.c b/trunk/drivers/target/target_core_file.c index 58ed683e04ae..1b1d544e927a 100644 --- a/trunk/drivers/target/target_core_file.c +++ b/trunk/drivers/target/target_core_file.c @@ -153,10 +153,6 @@ static int fd_configure_device(struct se_device *dev) struct request_queue *q = bdev_get_queue(inode->i_bdev); unsigned long long dev_size; - dev->dev_attrib.hw_block_size = - bdev_logical_block_size(inode->i_bdev); - dev->dev_attrib.hw_max_sectors = queue_max_hw_sectors(q); - /* * Determine the number of bytes from i_size_read() minus * one (1) logical sector from underlying struct block_device @@ -203,9 +199,6 @@ static int fd_configure_device(struct se_device *dev) goto fail; } - dev->dev_attrib.hw_block_size = FD_BLOCKSIZE; - dev->dev_attrib.hw_max_sectors = FD_MAX_SECTORS; - /* * Limit UNMAP emulation to 8k Number of LBAs (NoLB) */ @@ -226,6 +219,8 @@ static int fd_configure_device(struct se_device *dev) fd_dev->fd_block_size = dev->dev_attrib.hw_block_size; + dev->dev_attrib.hw_block_size = FD_BLOCKSIZE; + dev->dev_attrib.hw_max_sectors = FD_MAX_SECTORS; dev->dev_attrib.hw_queue_depth = FD_MAX_DEVICE_QUEUE_DEPTH; if (fd_dev->fbd_flags & FDBD_HAS_BUFFERED_IO_WCE) { diff --git a/trunk/drivers/target/target_core_iblock.c b/trunk/drivers/target/target_core_iblock.c index 07f5f94634bb..aa1620abec6d 100644 --- a/trunk/drivers/target/target_core_iblock.c +++ b/trunk/drivers/target/target_core_iblock.c @@ -615,6 +615,8 @@ iblock_execute_rw(struct se_cmd *cmd) rw = WRITE_FUA; else if (!(q->flush_flags & REQ_FLUSH)) rw = WRITE_FUA; + else + rw = WRITE; } else { rw = WRITE; } diff --git a/trunk/drivers/target/target_core_internal.h b/trunk/drivers/target/target_core_internal.h index 853bab60e362..18d49df4d0ac 100644 --- a/trunk/drivers/target/target_core_internal.h +++ b/trunk/drivers/target/target_core_internal.h @@ -8,7 +8,6 @@ extern struct t10_alua_lu_gp *default_lu_gp; struct se_dev_entry *core_get_se_deve_from_rtpi(struct se_node_acl *, u16); int core_free_device_list_for_node(struct se_node_acl *, struct se_portal_group *); -void core_dec_lacl_count(struct se_node_acl *, struct se_cmd *); void core_update_device_list_access(u32, u32, struct se_node_acl *); int core_enable_device_list_for_node(struct se_lun *, struct se_lun_acl *, u32, u32, struct se_node_acl *, struct se_portal_group *); diff --git a/trunk/drivers/target/target_core_rd.c b/trunk/drivers/target/target_core_rd.c index e0b3c379aa14..0921a64b5550 100644 --- a/trunk/drivers/target/target_core_rd.c +++ b/trunk/drivers/target/target_core_rd.c @@ -291,6 +291,11 @@ rd_execute_rw(struct se_cmd *cmd) u32 src_len; u64 tmp; + if (dev->rd_flags & RDF_NULLIO) { + target_complete_cmd(cmd, SAM_STAT_GOOD); + return 0; + } + tmp = cmd->t_task_lba * se_dev->dev_attrib.block_size; rd_offset = do_div(tmp, PAGE_SIZE); rd_page = tmp; @@ -373,11 +378,12 @@ rd_execute_rw(struct se_cmd *cmd) } enum { - Opt_rd_pages, Opt_err + Opt_rd_pages, Opt_rd_nullio, Opt_err }; static match_table_t tokens = { {Opt_rd_pages, "rd_pages=%d"}, + {Opt_rd_nullio, "rd_nullio=%d"}, {Opt_err, NULL} }; @@ -408,6 +414,14 @@ static ssize_t rd_set_configfs_dev_params(struct se_device *dev, " Count: %u\n", rd_dev->rd_page_count); rd_dev->rd_flags |= RDF_HAS_PAGE_COUNT; break; + case Opt_rd_nullio: + match_int(args, &arg); + if (arg != 1) + break; + + pr_debug("RAMDISK: Setting NULLIO flag: %d\n", arg); + rd_dev->rd_flags |= RDF_NULLIO; + break; default: break; } @@ -424,8 +438,9 @@ static ssize_t rd_show_configfs_dev_params(struct se_device *dev, char *b) ssize_t bl = sprintf(b, "TCM RamDisk ID: %u RamDisk Makeup: rd_mcp\n", rd_dev->rd_dev_id); bl += sprintf(b + bl, " PAGES/PAGE_SIZE: %u*%lu" - " SG_table_count: %u\n", rd_dev->rd_page_count, - PAGE_SIZE, rd_dev->sg_table_count); + " SG_table_count: %u nullio: %d\n", rd_dev->rd_page_count, + PAGE_SIZE, rd_dev->sg_table_count, + !!(rd_dev->rd_flags & RDF_NULLIO)); return bl; } diff --git a/trunk/drivers/target/target_core_rd.h b/trunk/drivers/target/target_core_rd.h index 933b38b6e563..1789d1e14395 100644 --- a/trunk/drivers/target/target_core_rd.h +++ b/trunk/drivers/target/target_core_rd.h @@ -22,6 +22,7 @@ struct rd_dev_sg_table { } ____cacheline_aligned; #define RDF_HAS_PAGE_COUNT 0x01 +#define RDF_NULLIO 0x02 struct rd_dev { struct se_device dev; diff --git a/trunk/drivers/target/target_core_transport.c b/trunk/drivers/target/target_core_transport.c index f8388b4024aa..4a793362309d 100644 --- a/trunk/drivers/target/target_core_transport.c +++ b/trunk/drivers/target/target_core_transport.c @@ -2163,8 +2163,6 @@ void transport_generic_free_cmd(struct se_cmd *cmd, int wait_for_tasks) if (wait_for_tasks) transport_wait_for_tasks(cmd); - core_dec_lacl_count(cmd->se_sess->se_node_acl, cmd); - if (cmd->se_lun) transport_lun_remove_cmd(cmd); @@ -2213,21 +2211,19 @@ static void target_release_cmd_kref(struct kref *kref) { struct se_cmd *se_cmd = container_of(kref, struct se_cmd, cmd_kref); struct se_session *se_sess = se_cmd->se_sess; - unsigned long flags; - spin_lock_irqsave(&se_sess->sess_cmd_lock, flags); if (list_empty(&se_cmd->se_cmd_list)) { - spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags); + spin_unlock(&se_sess->sess_cmd_lock); se_cmd->se_tfo->release_cmd(se_cmd); return; } if (se_sess->sess_tearing_down && se_cmd->cmd_wait_set) { - spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags); + spin_unlock(&se_sess->sess_cmd_lock); complete(&se_cmd->cmd_wait_comp); return; } list_del(&se_cmd->se_cmd_list); - spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags); + spin_unlock(&se_sess->sess_cmd_lock); se_cmd->se_tfo->release_cmd(se_cmd); } @@ -2238,7 +2234,8 @@ static void target_release_cmd_kref(struct kref *kref) */ int target_put_sess_cmd(struct se_session *se_sess, struct se_cmd *se_cmd) { - return kref_put(&se_cmd->cmd_kref, target_release_cmd_kref); + return kref_put_spinlock_irqsave(&se_cmd->cmd_kref, target_release_cmd_kref, + &se_sess->sess_cmd_lock); } EXPORT_SYMBOL(target_put_sess_cmd); diff --git a/trunk/drivers/thermal/Kconfig b/trunk/drivers/thermal/Kconfig index a764f165b589..5e3c02554d99 100644 --- a/trunk/drivers/thermal/Kconfig +++ b/trunk/drivers/thermal/Kconfig @@ -67,15 +67,16 @@ config THERMAL_GOV_USER_SPACE Enable this to let the user space manage the platform thermals. config CPU_THERMAL - tristate "generic cpu cooling support" + bool "generic cpu cooling support" depends on CPU_FREQ select CPU_FREQ_TABLE help This implements the generic cpu cooling mechanism through frequency - reduction, cpu hotplug and any other ways of reducing temperature. An - ACPI version of this already exists(drivers/acpi/processor_thermal.c). + reduction. An ACPI version of this already exists + (drivers/acpi/processor_thermal.c). This will be useful for platforms using the generic thermal interface and not the ACPI interface. + If you want this support, you should say Y here. config THERMAL_EMULATION @@ -86,6 +87,10 @@ config THERMAL_EMULATION user can manually input temperature and test the different trip threshold behaviour for simulation purpose. + WARNING: Be careful while enabling this option on production systems, + because userland can easily disable the thermal policy by simply + flooding this sysfs node with low temperature values. + config SPEAR_THERMAL bool "SPEAr thermal sensor driver" depends on PLAT_SPEAR @@ -117,15 +122,6 @@ config EXYNOS_THERMAL If you say yes here you get support for TMU (Thermal Management Unit) on SAMSUNG EXYNOS series of SoC. -config EXYNOS_THERMAL_EMUL - bool "EXYNOS TMU emulation mode support" - depends on EXYNOS_THERMAL - help - Exynos 4412 and 4414 and 5 series has emulation mode on TMU. - Enable this option will be make sysfs node in exynos thermal platform - device directory to support emulation mode. With emulation mode sysfs - node, you can manually input temperature to TMU for simulation purpose. - config DOVE_THERMAL tristate "Temperature sensor on Marvell Dove SoCs" depends on ARCH_DOVE @@ -144,6 +140,14 @@ config DB8500_THERMAL created. Cooling devices can be bound to the trip points to cool this thermal zone if trip points reached. +config ARMADA_THERMAL + tristate "Armada 370/XP thermal management" + depends on ARCH_MVEBU + depends on OF + help + Enable this option if you want to have support for thermal management + controller present in Armada 370 and Armada XP SoC. + config DB8500_CPUFREQ_COOLING tristate "DB8500 cpufreq cooling" depends on ARCH_U8500 diff --git a/trunk/drivers/thermal/Makefile b/trunk/drivers/thermal/Makefile index d3a2b38c31e8..c054d410ac3f 100644 --- a/trunk/drivers/thermal/Makefile +++ b/trunk/drivers/thermal/Makefile @@ -3,14 +3,15 @@ # obj-$(CONFIG_THERMAL) += thermal_sys.o +thermal_sys-y += thermal_core.o # governors -obj-$(CONFIG_THERMAL_GOV_FAIR_SHARE) += fair_share.o -obj-$(CONFIG_THERMAL_GOV_STEP_WISE) += step_wise.o -obj-$(CONFIG_THERMAL_GOV_USER_SPACE) += user_space.o +thermal_sys-$(CONFIG_THERMAL_GOV_FAIR_SHARE) += fair_share.o +thermal_sys-$(CONFIG_THERMAL_GOV_STEP_WISE) += step_wise.o +thermal_sys-$(CONFIG_THERMAL_GOV_USER_SPACE) += user_space.o # cpufreq cooling -obj-$(CONFIG_CPU_THERMAL) += cpu_cooling.o +thermal_sys-$(CONFIG_CPU_THERMAL) += cpu_cooling.o # platform thermal drivers obj-$(CONFIG_SPEAR_THERMAL) += spear_thermal.o @@ -19,6 +20,7 @@ obj-$(CONFIG_KIRKWOOD_THERMAL) += kirkwood_thermal.o obj-$(CONFIG_EXYNOS_THERMAL) += exynos_thermal.o obj-$(CONFIG_DOVE_THERMAL) += dove_thermal.o obj-$(CONFIG_DB8500_THERMAL) += db8500_thermal.o +obj-$(CONFIG_ARMADA_THERMAL) += armada_thermal.o obj-$(CONFIG_DB8500_CPUFREQ_COOLING) += db8500_cpufreq_cooling.o obj-$(CONFIG_INTEL_POWERCLAMP) += intel_powerclamp.o diff --git a/trunk/drivers/thermal/armada_thermal.c b/trunk/drivers/thermal/armada_thermal.c new file mode 100644 index 000000000000..54ffd64ca3f7 --- /dev/null +++ b/trunk/drivers/thermal/armada_thermal.c @@ -0,0 +1,222 @@ +/* + * Marvell Armada 370/XP thermal sensor driver + * + * Copyright (C) 2013 Marvell + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define THERMAL_VALID_OFFSET 9 +#define THERMAL_VALID_MASK 0x1 +#define THERMAL_TEMP_OFFSET 10 +#define THERMAL_TEMP_MASK 0x1ff + +/* Thermal Manager Control and Status Register */ +#define PMU_TDC0_SW_RST_MASK (0x1 << 1) +#define PMU_TM_DISABLE_OFFS 0 +#define PMU_TM_DISABLE_MASK (0x1 << PMU_TM_DISABLE_OFFS) +#define PMU_TDC0_REF_CAL_CNT_OFFS 11 +#define PMU_TDC0_REF_CAL_CNT_MASK (0x1ff << PMU_TDC0_REF_CAL_CNT_OFFS) +#define PMU_TDC0_OTF_CAL_MASK (0x1 << 30) +#define PMU_TDC0_START_CAL_MASK (0x1 << 25) + +struct armada_thermal_ops; + +/* Marvell EBU Thermal Sensor Dev Structure */ +struct armada_thermal_priv { + void __iomem *sensor; + void __iomem *control; + struct armada_thermal_ops *ops; +}; + +struct armada_thermal_ops { + /* Initialize the sensor */ + void (*init_sensor)(struct armada_thermal_priv *); + + /* Test for a valid sensor value (optional) */ + bool (*is_valid)(struct armada_thermal_priv *); +}; + +static void armadaxp_init_sensor(struct armada_thermal_priv *priv) +{ + unsigned long reg; + + reg = readl_relaxed(priv->control); + reg |= PMU_TDC0_OTF_CAL_MASK; + writel(reg, priv->control); + + /* Reference calibration value */ + reg &= ~PMU_TDC0_REF_CAL_CNT_MASK; + reg |= (0xf1 << PMU_TDC0_REF_CAL_CNT_OFFS); + writel(reg, priv->control); + + /* Reset the sensor */ + reg = readl_relaxed(priv->control); + writel((reg | PMU_TDC0_SW_RST_MASK), priv->control); + + writel(reg, priv->control); + + /* Enable the sensor */ + reg = readl_relaxed(priv->sensor); + reg &= ~PMU_TM_DISABLE_MASK; + writel(reg, priv->sensor); +} + +static void armada370_init_sensor(struct armada_thermal_priv *priv) +{ + unsigned long reg; + + reg = readl_relaxed(priv->control); + reg |= PMU_TDC0_OTF_CAL_MASK; + writel(reg, priv->control); + + /* Reference calibration value */ + reg &= ~PMU_TDC0_REF_CAL_CNT_MASK; + reg |= (0xf1 << PMU_TDC0_REF_CAL_CNT_OFFS); + writel(reg, priv->control); + + reg &= ~PMU_TDC0_START_CAL_MASK; + writel(reg, priv->control); + + mdelay(10); +} + +static bool armada_is_valid(struct armada_thermal_priv *priv) +{ + unsigned long reg = readl_relaxed(priv->sensor); + + return (reg >> THERMAL_VALID_OFFSET) & THERMAL_VALID_MASK; +} + +static int armada_get_temp(struct thermal_zone_device *thermal, + unsigned long *temp) +{ + struct armada_thermal_priv *priv = thermal->devdata; + unsigned long reg; + + /* Valid check */ + if (priv->ops->is_valid && !priv->ops->is_valid(priv)) { + dev_err(&thermal->device, + "Temperature sensor reading not valid\n"); + return -EIO; + } + + reg = readl_relaxed(priv->sensor); + reg = (reg >> THERMAL_TEMP_OFFSET) & THERMAL_TEMP_MASK; + *temp = (3153000000UL - (10000000UL*reg)) / 13825; + return 0; +} + +static struct thermal_zone_device_ops ops = { + .get_temp = armada_get_temp, +}; + +static const struct armada_thermal_ops armadaxp_ops = { + .init_sensor = armadaxp_init_sensor, +}; + +static const struct armada_thermal_ops armada370_ops = { + .is_valid = armada_is_valid, + .init_sensor = armada370_init_sensor, +}; + +static const struct of_device_id armada_thermal_id_table[] = { + { + .compatible = "marvell,armadaxp-thermal", + .data = &armadaxp_ops, + }, + { + .compatible = "marvell,armada370-thermal", + .data = &armada370_ops, + }, + { + /* sentinel */ + }, +}; +MODULE_DEVICE_TABLE(of, armada_thermal_id_table); + +static int armada_thermal_probe(struct platform_device *pdev) +{ + struct thermal_zone_device *thermal; + const struct of_device_id *match; + struct armada_thermal_priv *priv; + struct resource *res; + + match = of_match_device(armada_thermal_id_table, &pdev->dev); + if (!match) + return -ENODEV; + + priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL); + if (!priv) + return -ENOMEM; + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + priv->sensor = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(priv->sensor)) + return PTR_ERR(priv->sensor); + + res = platform_get_resource(pdev, IORESOURCE_MEM, 1); + priv->control = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(priv->control)) + return PTR_ERR(priv->control); + + priv->ops = (struct armada_thermal_ops *)match->data; + priv->ops->init_sensor(priv); + + thermal = thermal_zone_device_register("armada_thermal", 0, 0, + priv, &ops, NULL, 0, 0); + if (IS_ERR(thermal)) { + dev_err(&pdev->dev, + "Failed to register thermal zone device\n"); + return PTR_ERR(thermal); + } + + platform_set_drvdata(pdev, thermal); + + return 0; +} + +static int armada_thermal_exit(struct platform_device *pdev) +{ + struct thermal_zone_device *armada_thermal = + platform_get_drvdata(pdev); + + thermal_zone_device_unregister(armada_thermal); + platform_set_drvdata(pdev, NULL); + + return 0; +} + +static struct platform_driver armada_thermal_driver = { + .probe = armada_thermal_probe, + .remove = armada_thermal_exit, + .driver = { + .name = "armada_thermal", + .owner = THIS_MODULE, + .of_match_table = of_match_ptr(armada_thermal_id_table), + }, +}; + +module_platform_driver(armada_thermal_driver); + +MODULE_AUTHOR("Ezequiel Garcia "); +MODULE_DESCRIPTION("Armada 370/XP thermal driver"); +MODULE_LICENSE("GPL v2"); diff --git a/trunk/drivers/thermal/cpu_cooling.c b/trunk/drivers/thermal/cpu_cooling.c index 8dc44cbb3e09..c94bf2e5de62 100644 --- a/trunk/drivers/thermal/cpu_cooling.c +++ b/trunk/drivers/thermal/cpu_cooling.c @@ -20,10 +20,8 @@ * * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ */ -#include #include #include -#include #include #include #include @@ -31,21 +29,19 @@ #include /** - * struct cpufreq_cooling_device + * struct cpufreq_cooling_device - data for cooling device with cpufreq * @id: unique integer value corresponding to each cpufreq_cooling_device * registered. - * @cool_dev: thermal_cooling_device pointer to keep track of the the - * egistered cooling device. + * @cool_dev: thermal_cooling_device pointer to keep track of the + * registered cooling device. * @cpufreq_state: integer value representing the current state of cpufreq * cooling devices. * @cpufreq_val: integer value representing the absolute value of the clipped * frequency. * @allowed_cpus: all the cpus involved for this cpufreq_cooling_device. - * @node: list_head to link all cpufreq_cooling_device together. * * This structure is required for keeping information of each - * cpufreq_cooling_device registered as a list whose head is represented by - * cooling_cpufreq_list. In order to prevent corruption of this list a + * cpufreq_cooling_device registered. In order to prevent corruption of this a * mutex lock cooling_cpufreq_lock is used. */ struct cpufreq_cooling_device { @@ -54,9 +50,7 @@ struct cpufreq_cooling_device { unsigned int cpufreq_state; unsigned int cpufreq_val; struct cpumask allowed_cpus; - struct list_head node; }; -static LIST_HEAD(cooling_cpufreq_list); static DEFINE_IDR(cpufreq_idr); static DEFINE_MUTEX(cooling_cpufreq_lock); @@ -70,6 +64,11 @@ static struct cpufreq_cooling_device *notify_device; * get_idr - function to get a unique id. * @idr: struct idr * handle used to create a id. * @id: int * value generated by this function. + * + * This function will populate @id with an unique + * id, using the idr API. + * + * Return: 0 on success, an error code on failure. */ static int get_idr(struct idr *idr, int *id) { @@ -81,6 +80,7 @@ static int get_idr(struct idr *idr, int *id) if (unlikely(ret < 0)) return ret; *id = ret; + return 0; } @@ -99,63 +99,162 @@ static void release_idr(struct idr *idr, int id) /* Below code defines functions to be used for cpufreq as cooling device */ /** - * is_cpufreq_valid - function to check if a cpu has frequency transition policy. + * is_cpufreq_valid - function to check frequency transitioning capability. * @cpu: cpu for which check is needed. + * + * This function will check the current state of the system if + * it is capable of changing the frequency for a given @cpu. + * + * Return: 0 if the system is not currently capable of changing + * the frequency of given cpu. !0 in case the frequency is changeable. */ static int is_cpufreq_valid(int cpu) { struct cpufreq_policy policy; + return !cpufreq_get_policy(&policy, cpu); } +enum cpufreq_cooling_property { + GET_LEVEL, + GET_FREQ, + GET_MAXL, +}; + /** - * get_cpu_frequency - get the absolute value of frequency from level. - * @cpu: cpu for which frequency is fetched. - * @level: level of frequency, equals cooling state of cpu cooling device - * e.g level=0 --> 1st MAX FREQ, level=1 ---> 2nd MAX FREQ, .... etc + * get_property - fetch a property of interest for a give cpu. + * @cpu: cpu for which the property is required + * @input: query parameter + * @output: query return + * @property: type of query (frequency, level, max level) + * + * This is the common function to + * 1. get maximum cpu cooling states + * 2. translate frequency to cooling state + * 3. translate cooling state to frequency + * Note that the code may be not in good shape + * but it is written in this way in order to: + * a) reduce duplicate code as most of the code can be shared. + * b) make sure the logic is consistent when translating between + * cooling states and frequencies. + * + * Return: 0 on success, -EINVAL when invalid parameters are passed. */ -static unsigned int get_cpu_frequency(unsigned int cpu, unsigned long level) +static int get_property(unsigned int cpu, unsigned long input, + unsigned int *output, + enum cpufreq_cooling_property property) { - int ret = 0, i = 0; - unsigned long level_index; - bool descend = false; + int i, j; + unsigned long max_level = 0, level = 0; + unsigned int freq = CPUFREQ_ENTRY_INVALID; + int descend = -1; struct cpufreq_frequency_table *table = cpufreq_frequency_get_table(cpu); + + if (!output) + return -EINVAL; + if (!table) - return ret; + return -EINVAL; - while (table[i].frequency != CPUFREQ_TABLE_END) { + for (i = 0; table[i].frequency != CPUFREQ_TABLE_END; i++) { + /* ignore invalid entries */ if (table[i].frequency == CPUFREQ_ENTRY_INVALID) continue; - /*check if table in ascending or descending order*/ - if ((table[i + 1].frequency != CPUFREQ_TABLE_END) && - (table[i + 1].frequency < table[i].frequency) - && !descend) { - descend = true; - } + /* ignore duplicate entry */ + if (freq == table[i].frequency) + continue; + + /* get the frequency order */ + if (freq != CPUFREQ_ENTRY_INVALID && descend != -1) + descend = !!(freq > table[i].frequency); - /*return if level matched and table in descending order*/ - if (descend && i == level) - return table[i].frequency; - i++; + freq = table[i].frequency; + max_level++; } - i--; - if (level > i || descend) - return ret; - level_index = i - level; + /* get max level */ + if (property == GET_MAXL) { + *output = (unsigned int)max_level; + return 0; + } - /*Scan the table in reverse order and match the level*/ - while (i >= 0) { + if (property == GET_FREQ) + level = descend ? input : (max_level - input - 1); + + for (i = 0, j = 0; table[i].frequency != CPUFREQ_TABLE_END; i++) { + /* ignore invalid entry */ if (table[i].frequency == CPUFREQ_ENTRY_INVALID) continue; - /*return if level matched*/ - if (i == level_index) - return table[i].frequency; - i--; + + /* ignore duplicate entry */ + if (freq == table[i].frequency) + continue; + + /* now we have a valid frequency entry */ + freq = table[i].frequency; + + if (property == GET_LEVEL && (unsigned int)input == freq) { + /* get level by frequency */ + *output = descend ? j : (max_level - j - 1); + return 0; + } + if (property == GET_FREQ && level == j) { + /* get frequency by level */ + *output = freq; + return 0; + } + j++; } - return ret; + + return -EINVAL; +} + +/** + * cpufreq_cooling_get_level - for a give cpu, return the cooling level. + * @cpu: cpu for which the level is required + * @freq: the frequency of interest + * + * This function will match the cooling level corresponding to the + * requested @freq and return it. + * + * Return: The matched cooling level on success or THERMAL_CSTATE_INVALID + * otherwise. + */ +unsigned long cpufreq_cooling_get_level(unsigned int cpu, unsigned int freq) +{ + unsigned int val; + + if (get_property(cpu, (unsigned long)freq, &val, GET_LEVEL)) + return THERMAL_CSTATE_INVALID; + + return (unsigned long)val; +} +EXPORT_SYMBOL_GPL(cpufreq_cooling_get_level); + +/** + * get_cpu_frequency - get the absolute value of frequency from level. + * @cpu: cpu for which frequency is fetched. + * @level: cooling level + * + * This function matches cooling level with frequency. Based on a cooling level + * of frequency, equals cooling state of cpu cooling device, it will return + * the corresponding frequency. + * e.g level=0 --> 1st MAX FREQ, level=1 ---> 2nd MAX FREQ, .... etc + * + * Return: 0 on error, the corresponding frequency otherwise. + */ +static unsigned int get_cpu_frequency(unsigned int cpu, unsigned long level) +{ + int ret = 0; + unsigned int freq; + + ret = get_property(cpu, level, &freq, GET_FREQ); + if (ret) + return 0; + + return freq; } /** @@ -163,13 +262,19 @@ static unsigned int get_cpu_frequency(unsigned int cpu, unsigned long level) * @cpufreq_device: cpufreq_cooling_device pointer containing frequency * clipping data. * @cooling_state: value of the cooling state. + * + * Function used to make sure the cpufreq layer is aware of current thermal + * limits. The limits are applied by updating the cpufreq policy. + * + * Return: 0 on success, an error code otherwise (-EINVAL in case wrong + * cooling state). */ static int cpufreq_apply_cooling(struct cpufreq_cooling_device *cpufreq_device, - unsigned long cooling_state) + unsigned long cooling_state) { unsigned int cpuid, clip_freq; - struct cpumask *maskPtr = &cpufreq_device->allowed_cpus; - unsigned int cpu = cpumask_any(maskPtr); + struct cpumask *mask = &cpufreq_device->allowed_cpus; + unsigned int cpu = cpumask_any(mask); /* Check if the old cooling action is same as new cooling action */ @@ -184,7 +289,7 @@ static int cpufreq_apply_cooling(struct cpufreq_cooling_device *cpufreq_device, cpufreq_device->cpufreq_val = clip_freq; notify_device = cpufreq_device; - for_each_cpu(cpuid, maskPtr) { + for_each_cpu(cpuid, mask) { if (is_cpufreq_valid(cpuid)) cpufreq_update_policy(cpuid); } @@ -199,9 +304,15 @@ static int cpufreq_apply_cooling(struct cpufreq_cooling_device *cpufreq_device, * @nb: struct notifier_block * with callback info. * @event: value showing cpufreq event for which this function invoked. * @data: callback-specific data + * + * Callback to highjack the notification on cpufreq policy transition. + * Every time there is a change in policy, we will intercept and + * update the cpufreq policy with thermal constraints. + * + * Return: 0 (success) */ static int cpufreq_thermal_notifier(struct notifier_block *nb, - unsigned long event, void *data) + unsigned long event, void *data) { struct cpufreq_policy *policy = data; unsigned long max_freq = 0; @@ -212,7 +323,7 @@ static int cpufreq_thermal_notifier(struct notifier_block *nb, if (cpumask_test_cpu(policy->cpu, ¬ify_device->allowed_cpus)) max_freq = notify_device->cpufreq_val; - /* Never exceed user_policy.max*/ + /* Never exceed user_policy.max */ if (max_freq > policy->user_policy.max) max_freq = policy->user_policy.max; @@ -222,50 +333,46 @@ static int cpufreq_thermal_notifier(struct notifier_block *nb, return 0; } -/* - * cpufreq cooling device callback functions are defined below - */ +/* cpufreq cooling device callback functions are defined below */ /** * cpufreq_get_max_state - callback function to get the max cooling state. * @cdev: thermal cooling device pointer. * @state: fill this variable with the max cooling state. + * + * Callback for the thermal cooling device to return the cpufreq + * max cooling state. + * + * Return: 0 on success, an error code otherwise. */ static int cpufreq_get_max_state(struct thermal_cooling_device *cdev, unsigned long *state) { struct cpufreq_cooling_device *cpufreq_device = cdev->devdata; - struct cpumask *maskPtr = &cpufreq_device->allowed_cpus; + struct cpumask *mask = &cpufreq_device->allowed_cpus; unsigned int cpu; - struct cpufreq_frequency_table *table; - unsigned long count = 0; - int i = 0; - - cpu = cpumask_any(maskPtr); - table = cpufreq_frequency_get_table(cpu); - if (!table) { - *state = 0; - return 0; - } + unsigned int count = 0; + int ret; - for (i = 0; (table[i].frequency != CPUFREQ_TABLE_END); i++) { - if (table[i].frequency == CPUFREQ_ENTRY_INVALID) - continue; - count++; - } + cpu = cpumask_any(mask); - if (count > 0) { - *state = --count; - return 0; - } + ret = get_property(cpu, 0, &count, GET_MAXL); - return -EINVAL; + if (count > 0) + *state = count; + + return ret; } /** * cpufreq_get_cur_state - callback function to get the current cooling state. * @cdev: thermal cooling device pointer. * @state: fill this variable with the current cooling state. + * + * Callback for the thermal cooling device to return the cpufreq + * current cooling state. + * + * Return: 0 on success, an error code otherwise. */ static int cpufreq_get_cur_state(struct thermal_cooling_device *cdev, unsigned long *state) @@ -273,6 +380,7 @@ static int cpufreq_get_cur_state(struct thermal_cooling_device *cdev, struct cpufreq_cooling_device *cpufreq_device = cdev->devdata; *state = cpufreq_device->cpufreq_state; + return 0; } @@ -280,6 +388,11 @@ static int cpufreq_get_cur_state(struct thermal_cooling_device *cdev, * cpufreq_set_cur_state - callback function to set the current cooling state. * @cdev: thermal cooling device pointer. * @state: set this variable to the current cooling state. + * + * Callback for the thermal cooling device to change the cpufreq + * current cooling state. + * + * Return: 0 on success, an error code otherwise. */ static int cpufreq_set_cur_state(struct thermal_cooling_device *cdev, unsigned long state) @@ -304,9 +417,16 @@ static struct notifier_block thermal_cpufreq_notifier_block = { /** * cpufreq_cooling_register - function to create cpufreq cooling device. * @clip_cpus: cpumask of cpus where the frequency constraints will happen. + * + * This interface function registers the cpufreq cooling device with the name + * "thermal-cpufreq-%x". This api can support multiple instances of cpufreq + * cooling devices. + * + * Return: a valid struct thermal_cooling_device pointer on success, + * on failure, it returns a corresponding ERR_PTR(). */ -struct thermal_cooling_device *cpufreq_cooling_register( - const struct cpumask *clip_cpus) +struct thermal_cooling_device * +cpufreq_cooling_register(const struct cpumask *clip_cpus) { struct thermal_cooling_device *cool_dev; struct cpufreq_cooling_device *cpufreq_dev = NULL; @@ -315,9 +435,9 @@ struct thermal_cooling_device *cpufreq_cooling_register( int ret = 0, i; struct cpufreq_policy policy; - /*Verify that all the clip cpus have same freq_min, freq_max limit*/ + /* Verify that all the clip cpus have same freq_min, freq_max limit */ for_each_cpu(i, clip_cpus) { - /*continue if cpufreq policy not found and not return error*/ + /* continue if cpufreq policy not found and not return error */ if (!cpufreq_get_policy(&policy, i)) continue; if (min == 0 && max == 0) { @@ -325,12 +445,12 @@ struct thermal_cooling_device *cpufreq_cooling_register( max = policy.cpuinfo.max_freq; } else { if (min != policy.cpuinfo.min_freq || - max != policy.cpuinfo.max_freq) + max != policy.cpuinfo.max_freq) return ERR_PTR(-EINVAL); } } cpufreq_dev = kzalloc(sizeof(struct cpufreq_cooling_device), - GFP_KERNEL); + GFP_KERNEL); if (!cpufreq_dev) return ERR_PTR(-ENOMEM); @@ -342,10 +462,11 @@ struct thermal_cooling_device *cpufreq_cooling_register( return ERR_PTR(-EINVAL); } - sprintf(dev_name, "thermal-cpufreq-%d", cpufreq_dev->id); + snprintf(dev_name, sizeof(dev_name), "thermal-cpufreq-%d", + cpufreq_dev->id); cool_dev = thermal_cooling_device_register(dev_name, cpufreq_dev, - &cpufreq_cooling_ops); + &cpufreq_cooling_ops); if (!cool_dev) { release_idr(&cpufreq_idr, cpufreq_dev->id); kfree(cpufreq_dev); @@ -358,17 +479,20 @@ struct thermal_cooling_device *cpufreq_cooling_register( /* Register the notifier for first cpufreq cooling device */ if (cpufreq_dev_count == 0) cpufreq_register_notifier(&thermal_cpufreq_notifier_block, - CPUFREQ_POLICY_NOTIFIER); + CPUFREQ_POLICY_NOTIFIER); cpufreq_dev_count++; mutex_unlock(&cooling_cpufreq_lock); + return cool_dev; } -EXPORT_SYMBOL(cpufreq_cooling_register); +EXPORT_SYMBOL_GPL(cpufreq_cooling_register); /** * cpufreq_cooling_unregister - function to remove cpufreq cooling device. * @cdev: thermal cooling device pointer. + * + * This interface function unregisters the "thermal-cpufreq-%x" cooling device. */ void cpufreq_cooling_unregister(struct thermal_cooling_device *cdev) { @@ -378,14 +502,13 @@ void cpufreq_cooling_unregister(struct thermal_cooling_device *cdev) cpufreq_dev_count--; /* Unregister the notifier for the last cpufreq cooling device */ - if (cpufreq_dev_count == 0) { + if (cpufreq_dev_count == 0) cpufreq_unregister_notifier(&thermal_cpufreq_notifier_block, - CPUFREQ_POLICY_NOTIFIER); - } + CPUFREQ_POLICY_NOTIFIER); mutex_unlock(&cooling_cpufreq_lock); thermal_cooling_device_unregister(cpufreq_dev->cool_dev); release_idr(&cpufreq_idr, cpufreq_dev->id); kfree(cpufreq_dev); } -EXPORT_SYMBOL(cpufreq_cooling_unregister); +EXPORT_SYMBOL_GPL(cpufreq_cooling_unregister); diff --git a/trunk/drivers/thermal/db8500_cpufreq_cooling.c b/trunk/drivers/thermal/db8500_cpufreq_cooling.c index 21419851fc02..786d19263ab0 100644 --- a/trunk/drivers/thermal/db8500_cpufreq_cooling.c +++ b/trunk/drivers/thermal/db8500_cpufreq_cooling.c @@ -37,7 +37,7 @@ static int db8500_cpufreq_cooling_probe(struct platform_device *pdev) cpumask_set_cpu(0, &mask_val); cdev = cpufreq_cooling_register(&mask_val); - if (IS_ERR_OR_NULL(cdev)) { + if (IS_ERR(cdev)) { dev_err(&pdev->dev, "Failed to register cooling device\n"); return PTR_ERR(cdev); } diff --git a/trunk/drivers/thermal/db8500_thermal.c b/trunk/drivers/thermal/db8500_thermal.c index 61ce60a35921..1e3b3bf9f993 100644 --- a/trunk/drivers/thermal/db8500_thermal.c +++ b/trunk/drivers/thermal/db8500_thermal.c @@ -419,7 +419,8 @@ static int db8500_thermal_probe(struct platform_device *pdev) low_irq = platform_get_irq_byname(pdev, "IRQ_HOTMON_LOW"); if (low_irq < 0) { dev_err(&pdev->dev, "Get IRQ_HOTMON_LOW failed.\n"); - return low_irq; + ret = low_irq; + goto out_unlock; } ret = devm_request_threaded_irq(&pdev->dev, low_irq, NULL, @@ -427,13 +428,14 @@ static int db8500_thermal_probe(struct platform_device *pdev) "dbx500_temp_low", pzone); if (ret < 0) { dev_err(&pdev->dev, "Failed to allocate temp low irq.\n"); - return ret; + goto out_unlock; } high_irq = platform_get_irq_byname(pdev, "IRQ_HOTMON_HIGH"); if (high_irq < 0) { dev_err(&pdev->dev, "Get IRQ_HOTMON_HIGH failed.\n"); - return high_irq; + ret = high_irq; + goto out_unlock; } ret = devm_request_threaded_irq(&pdev->dev, high_irq, NULL, @@ -441,15 +443,16 @@ static int db8500_thermal_probe(struct platform_device *pdev) "dbx500_temp_high", pzone); if (ret < 0) { dev_err(&pdev->dev, "Failed to allocate temp high irq.\n"); - return ret; + goto out_unlock; } pzone->therm_dev = thermal_zone_device_register("db8500_thermal_zone", ptrips->num_trips, 0, pzone, &thdev_ops, NULL, 0, 0); - if (IS_ERR_OR_NULL(pzone->therm_dev)) { + if (IS_ERR(pzone->therm_dev)) { dev_err(&pdev->dev, "Register thermal zone device failed.\n"); - return PTR_ERR(pzone->therm_dev); + ret = PTR_ERR(pzone->therm_dev); + goto out_unlock; } dev_info(&pdev->dev, "Thermal zone device registered.\n"); @@ -461,9 +464,11 @@ static int db8500_thermal_probe(struct platform_device *pdev) platform_set_drvdata(pdev, pzone); pzone->mode = THERMAL_DEVICE_ENABLED; + +out_unlock: mutex_unlock(&pzone->th_lock); - return 0; + return ret; } static int db8500_thermal_remove(struct platform_device *pdev) diff --git a/trunk/drivers/thermal/dove_thermal.c b/trunk/drivers/thermal/dove_thermal.c index 3078c403b42d..a088d1365ca5 100644 --- a/trunk/drivers/thermal/dove_thermal.c +++ b/trunk/drivers/thermal/dove_thermal.c @@ -107,12 +107,13 @@ static int dove_get_temp(struct thermal_zone_device *thermal, } /* - * Calculate temperature. See Section 8.10.1 of 88AP510, - * Documentation/arm/Marvell/README + * Calculate temperature. According to Marvell internal + * documentation the formula for this is: + * Celsius = (322-reg)/1.3625 */ reg = readl_relaxed(priv->sensor); reg = (reg >> DOVE_THERMAL_TEMP_OFFSET) & DOVE_THERMAL_TEMP_MASK; - *temp = ((2281638UL - (7298*reg)) / 10); + *temp = ((3220000000UL - (10000000UL * reg)) / 13625); return 0; } @@ -148,10 +149,6 @@ static int dove_thermal_probe(struct platform_device *pdev) return PTR_ERR(priv->sensor); res = platform_get_resource(pdev, IORESOURCE_MEM, 1); - if (!res) { - dev_err(&pdev->dev, "Failed to get platform resource\n"); - return -ENODEV; - } priv->control = devm_ioremap_resource(&pdev->dev, res); if (IS_ERR(priv->control)) return PTR_ERR(priv->control); diff --git a/trunk/drivers/thermal/exynos_thermal.c b/trunk/drivers/thermal/exynos_thermal.c index b777ae6f0a8f..788b1ddcac6c 100644 --- a/trunk/drivers/thermal/exynos_thermal.c +++ b/trunk/drivers/thermal/exynos_thermal.c @@ -98,13 +98,13 @@ #define IDLE_INTERVAL 10000 #define MCELSIUS 1000 -#ifdef CONFIG_EXYNOS_THERMAL_EMUL +#ifdef CONFIG_THERMAL_EMULATION #define EXYNOS_EMUL_TIME 0x57F0 #define EXYNOS_EMUL_TIME_SHIFT 16 #define EXYNOS_EMUL_DATA_SHIFT 8 #define EXYNOS_EMUL_DATA_MASK 0xFF #define EXYNOS_EMUL_ENABLE 0x1 -#endif /* CONFIG_EXYNOS_THERMAL_EMUL */ +#endif /* CONFIG_THERMAL_EMULATION */ /* CPU Zone information */ #define PANIC_ZONE 4 @@ -143,6 +143,7 @@ struct thermal_cooling_conf { struct thermal_sensor_conf { char name[SENSOR_NAME_LEN]; int (*read_temperature)(void *data); + int (*write_emul_temp)(void *drv_data, unsigned long temp); struct thermal_trip_point_conf trip_data; struct thermal_cooling_conf cooling_data; void *private_data; @@ -240,26 +241,6 @@ static int exynos_get_crit_temp(struct thermal_zone_device *thermal, return ret; } -static int exynos_get_frequency_level(unsigned int cpu, unsigned int freq) -{ - int i = 0, ret = -EINVAL; - struct cpufreq_frequency_table *table = NULL; -#ifdef CONFIG_CPU_FREQ - table = cpufreq_frequency_get_table(cpu); -#endif - if (!table) - return ret; - - while (table[i].frequency != CPUFREQ_TABLE_END) { - if (table[i].frequency == CPUFREQ_ENTRY_INVALID) - continue; - if (table[i].frequency == freq) - return i; - i++; - } - return ret; -} - /* Bind callback functions for thermal zone */ static int exynos_bind(struct thermal_zone_device *thermal, struct thermal_cooling_device *cdev) @@ -286,8 +267,8 @@ static int exynos_bind(struct thermal_zone_device *thermal, /* Bind the thermal zone to the cpufreq cooling device */ for (i = 0; i < tab_size; i++) { clip_data = (struct freq_clip_table *)&(tab_ptr[i]); - level = exynos_get_frequency_level(0, clip_data->freq_clip_max); - if (level < 0) + level = cpufreq_cooling_get_level(0, clip_data->freq_clip_max); + if (level == THERMAL_CSTATE_INVALID) return 0; switch (GET_ZONE(i)) { case MONITOR_ZONE: @@ -367,6 +348,23 @@ static int exynos_get_temp(struct thermal_zone_device *thermal, return 0; } +/* Get temperature callback functions for thermal zone */ +static int exynos_set_emul_temp(struct thermal_zone_device *thermal, + unsigned long temp) +{ + void *data; + int ret = -EINVAL; + + if (!th_zone->sensor_conf) { + pr_info("Temperature sensor not initialised\n"); + return -EINVAL; + } + data = th_zone->sensor_conf->private_data; + if (th_zone->sensor_conf->write_emul_temp) + ret = th_zone->sensor_conf->write_emul_temp(data, temp); + return ret; +} + /* Get the temperature trend */ static int exynos_get_trend(struct thermal_zone_device *thermal, int trip, enum thermal_trend *trend) @@ -390,6 +388,7 @@ static struct thermal_zone_device_ops const exynos_dev_ops = { .bind = exynos_bind, .unbind = exynos_unbind, .get_temp = exynos_get_temp, + .set_emul_temp = exynos_set_emul_temp, .get_trend = exynos_get_trend, .get_mode = exynos_get_mode, .set_mode = exynos_set_mode, @@ -712,6 +711,47 @@ static int exynos_tmu_read(struct exynos_tmu_data *data) return temp; } +#ifdef CONFIG_THERMAL_EMULATION +static int exynos_tmu_set_emulation(void *drv_data, unsigned long temp) +{ + struct exynos_tmu_data *data = drv_data; + unsigned int reg; + int ret = -EINVAL; + + if (data->soc == SOC_ARCH_EXYNOS4210) + goto out; + + if (temp && temp < MCELSIUS) + goto out; + + mutex_lock(&data->lock); + clk_enable(data->clk); + + reg = readl(data->base + EXYNOS_EMUL_CON); + + if (temp) { + temp /= MCELSIUS; + + reg = (EXYNOS_EMUL_TIME << EXYNOS_EMUL_TIME_SHIFT) | + (temp_to_code(data, temp) + << EXYNOS_EMUL_DATA_SHIFT) | EXYNOS_EMUL_ENABLE; + } else { + reg &= ~EXYNOS_EMUL_ENABLE; + } + + writel(reg, data->base + EXYNOS_EMUL_CON); + + clk_disable(data->clk); + mutex_unlock(&data->lock); + return 0; +out: + return ret; +} +#else +static int exynos_tmu_set_emulation(void *drv_data, unsigned long temp) + { return -EINVAL; } +#endif/*CONFIG_THERMAL_EMULATION*/ + static void exynos_tmu_work(struct work_struct *work) { struct exynos_tmu_data *data = container_of(work, @@ -745,6 +785,7 @@ static irqreturn_t exynos_tmu_irq(int irq, void *id) static struct thermal_sensor_conf exynos_sensor_conf = { .name = "exynos-therm", .read_temperature = (int (*)(void *))exynos_tmu_read, + .write_emul_temp = exynos_tmu_set_emulation, }; #if defined(CONFIG_CPU_EXYNOS4210) @@ -813,6 +854,10 @@ static const struct of_device_id exynos_tmu_match[] = { .compatible = "samsung,exynos4210-tmu", .data = (void *)EXYNOS4210_TMU_DRV_DATA, }, + { + .compatible = "samsung,exynos4412-tmu", + .data = (void *)EXYNOS_TMU_DRV_DATA, + }, { .compatible = "samsung,exynos5250-tmu", .data = (void *)EXYNOS_TMU_DRV_DATA, @@ -851,93 +896,6 @@ static inline struct exynos_tmu_platform_data *exynos_get_driver_data( platform_get_device_id(pdev)->driver_data; } -#ifdef CONFIG_EXYNOS_THERMAL_EMUL -static ssize_t exynos_tmu_emulation_show(struct device *dev, - struct device_attribute *attr, - char *buf) -{ - struct platform_device *pdev = container_of(dev, - struct platform_device, dev); - struct exynos_tmu_data *data = platform_get_drvdata(pdev); - unsigned int reg; - u8 temp_code; - int temp = 0; - - if (data->soc == SOC_ARCH_EXYNOS4210) - goto out; - - mutex_lock(&data->lock); - clk_enable(data->clk); - reg = readl(data->base + EXYNOS_EMUL_CON); - clk_disable(data->clk); - mutex_unlock(&data->lock); - - if (reg & EXYNOS_EMUL_ENABLE) { - reg >>= EXYNOS_EMUL_DATA_SHIFT; - temp_code = reg & EXYNOS_EMUL_DATA_MASK; - temp = code_to_temp(data, temp_code); - } -out: - return sprintf(buf, "%d\n", temp * MCELSIUS); -} - -static ssize_t exynos_tmu_emulation_store(struct device *dev, - struct device_attribute *attr, - const char *buf, size_t count) -{ - struct platform_device *pdev = container_of(dev, - struct platform_device, dev); - struct exynos_tmu_data *data = platform_get_drvdata(pdev); - unsigned int reg; - int temp; - - if (data->soc == SOC_ARCH_EXYNOS4210) - goto out; - - if (!sscanf(buf, "%d\n", &temp) || temp < 0) - return -EINVAL; - - mutex_lock(&data->lock); - clk_enable(data->clk); - - reg = readl(data->base + EXYNOS_EMUL_CON); - - if (temp) { - /* Both CELSIUS and MCELSIUS type are available for input */ - if (temp > MCELSIUS) - temp /= MCELSIUS; - - reg = (EXYNOS_EMUL_TIME << EXYNOS_EMUL_TIME_SHIFT) | - (temp_to_code(data, (temp / MCELSIUS)) - << EXYNOS_EMUL_DATA_SHIFT) | EXYNOS_EMUL_ENABLE; - } else { - reg &= ~EXYNOS_EMUL_ENABLE; - } - - writel(reg, data->base + EXYNOS_EMUL_CON); - - clk_disable(data->clk); - mutex_unlock(&data->lock); - -out: - return count; -} - -static DEVICE_ATTR(emulation, 0644, exynos_tmu_emulation_show, - exynos_tmu_emulation_store); -static int create_emulation_sysfs(struct device *dev) -{ - return device_create_file(dev, &dev_attr_emulation); -} -static void remove_emulation_sysfs(struct device *dev) -{ - device_remove_file(dev, &dev_attr_emulation); -} -#else -static inline int create_emulation_sysfs(struct device *dev) { return 0; } -static inline void remove_emulation_sysfs(struct device *dev) {} -#endif - static int exynos_tmu_probe(struct platform_device *pdev) { struct exynos_tmu_data *data; @@ -967,11 +925,6 @@ static int exynos_tmu_probe(struct platform_device *pdev) INIT_WORK(&data->irq_work, exynos_tmu_work); data->mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); - if (!data->mem) { - dev_err(&pdev->dev, "Failed to get platform resource\n"); - return -ENOENT; - } - data->base = devm_ioremap_resource(&pdev->dev, data->mem); if (IS_ERR(data->base)) return PTR_ERR(data->base); @@ -983,12 +936,16 @@ static int exynos_tmu_probe(struct platform_device *pdev) return ret; } - data->clk = clk_get(NULL, "tmu_apbif"); + data->clk = devm_clk_get(&pdev->dev, "tmu_apbif"); if (IS_ERR(data->clk)) { dev_err(&pdev->dev, "Failed to get clock\n"); return PTR_ERR(data->clk); } + ret = clk_prepare(data->clk); + if (ret) + return ret; + if (pdata->type == SOC_ARCH_EXYNOS || pdata->type == SOC_ARCH_EXYNOS4210) data->soc = pdata->type; @@ -1037,14 +994,10 @@ static int exynos_tmu_probe(struct platform_device *pdev) goto err_clk; } - ret = create_emulation_sysfs(&pdev->dev); - if (ret) - dev_err(&pdev->dev, "Failed to create emulation mode sysfs node\n"); - return 0; err_clk: platform_set_drvdata(pdev, NULL); - clk_put(data->clk); + clk_unprepare(data->clk); return ret; } @@ -1052,13 +1005,11 @@ static int exynos_tmu_remove(struct platform_device *pdev) { struct exynos_tmu_data *data = platform_get_drvdata(pdev); - remove_emulation_sysfs(&pdev->dev); - exynos_tmu_control(pdev, false); exynos_unregister_thermal(); - clk_put(data->clk); + clk_unprepare(data->clk); platform_set_drvdata(pdev, NULL); diff --git a/trunk/drivers/thermal/fair_share.c b/trunk/drivers/thermal/fair_share.c index 792479f2b64b..944ba2f340c8 100644 --- a/trunk/drivers/thermal/fair_share.c +++ b/trunk/drivers/thermal/fair_share.c @@ -22,9 +22,6 @@ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ */ -#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt - -#include #include #include "thermal_core.h" @@ -111,23 +108,15 @@ static int fair_share_throttle(struct thermal_zone_device *tz, int trip) static struct thermal_governor thermal_gov_fair_share = { .name = "fair_share", .throttle = fair_share_throttle, - .owner = THIS_MODULE, }; -static int __init thermal_gov_fair_share_init(void) +int thermal_gov_fair_share_register(void) { return thermal_register_governor(&thermal_gov_fair_share); } -static void __exit thermal_gov_fair_share_exit(void) +void thermal_gov_fair_share_unregister(void) { thermal_unregister_governor(&thermal_gov_fair_share); } -/* This should load after thermal framework */ -fs_initcall(thermal_gov_fair_share_init); -module_exit(thermal_gov_fair_share_exit); - -MODULE_AUTHOR("Durgadoss R"); -MODULE_DESCRIPTION("A simple weight based thermal throttling governor"); -MODULE_LICENSE("GPL"); diff --git a/trunk/drivers/thermal/kirkwood_thermal.c b/trunk/drivers/thermal/kirkwood_thermal.c index e5500edb5285..dfeceaffbc03 100644 --- a/trunk/drivers/thermal/kirkwood_thermal.c +++ b/trunk/drivers/thermal/kirkwood_thermal.c @@ -41,21 +41,21 @@ static int kirkwood_get_temp(struct thermal_zone_device *thermal, reg = readl_relaxed(priv->sensor); /* Valid check */ - if (!(reg >> KIRKWOOD_THERMAL_VALID_OFFSET) & - KIRKWOOD_THERMAL_VALID_MASK) { + if (!((reg >> KIRKWOOD_THERMAL_VALID_OFFSET) & + KIRKWOOD_THERMAL_VALID_MASK)) { dev_err(&thermal->device, "Temperature sensor reading not valid\n"); return -EIO; } /* - * Calculate temperature. See Section 8.10.1 of the 88AP510, - * datasheet, which has the same sensor. - * Documentation/arm/Marvell/README + * Calculate temperature. According to Marvell internal + * documentation the formula for this is: + * Celsius = (322-reg)/1.3625 */ reg = (reg >> KIRKWOOD_THERMAL_TEMP_OFFSET) & KIRKWOOD_THERMAL_TEMP_MASK; - *temp = ((2281638UL - (7298*reg)) / 10); + *temp = ((3220000000UL - (10000000UL * reg)) / 13625); return 0; } diff --git a/trunk/drivers/thermal/rcar_thermal.c b/trunk/drivers/thermal/rcar_thermal.c index 2cc5b6115e3e..8d7edd4c8228 100644 --- a/trunk/drivers/thermal/rcar_thermal.c +++ b/trunk/drivers/thermal/rcar_thermal.c @@ -24,6 +24,7 @@ #include #include #include +#include #include #include #include @@ -377,6 +378,9 @@ static int rcar_thermal_probe(struct platform_device *pdev) spin_lock_init(&common->lock); common->dev = dev; + pm_runtime_enable(dev); + pm_runtime_get_sync(dev); + irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0); if (irq) { int ret; @@ -419,12 +423,15 @@ static int rcar_thermal_probe(struct platform_device *pdev) priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL); if (!priv) { dev_err(dev, "Could not allocate priv\n"); - return -ENOMEM; + ret = -ENOMEM; + goto error_unregister; } priv->base = devm_ioremap_resource(dev, res); - if (IS_ERR(priv->base)) - return PTR_ERR(priv->base); + if (IS_ERR(priv->base)) { + ret = PTR_ERR(priv->base); + goto error_unregister; + } priv->common = common; priv->id = i; @@ -443,10 +450,10 @@ static int rcar_thermal_probe(struct platform_device *pdev) goto error_unregister; } - list_move_tail(&priv->list, &common->head); - if (rcar_has_irq_support(priv)) rcar_thermal_irq_enable(priv); + + list_move_tail(&priv->list, &common->head); } platform_set_drvdata(pdev, common); @@ -456,8 +463,14 @@ static int rcar_thermal_probe(struct platform_device *pdev) return 0; error_unregister: - rcar_thermal_for_each_priv(priv, common) + rcar_thermal_for_each_priv(priv, common) { thermal_zone_device_unregister(priv->zone); + if (rcar_has_irq_support(priv)) + rcar_thermal_irq_disable(priv); + } + + pm_runtime_put_sync(dev); + pm_runtime_disable(dev); return ret; } @@ -465,13 +478,20 @@ static int rcar_thermal_probe(struct platform_device *pdev) static int rcar_thermal_remove(struct platform_device *pdev) { struct rcar_thermal_common *common = platform_get_drvdata(pdev); + struct device *dev = &pdev->dev; struct rcar_thermal_priv *priv; - rcar_thermal_for_each_priv(priv, common) + rcar_thermal_for_each_priv(priv, common) { thermal_zone_device_unregister(priv->zone); + if (rcar_has_irq_support(priv)) + rcar_thermal_irq_disable(priv); + } platform_set_drvdata(pdev, NULL); + pm_runtime_put_sync(dev); + pm_runtime_disable(dev); + return 0; } diff --git a/trunk/drivers/thermal/step_wise.c b/trunk/drivers/thermal/step_wise.c index 407cde3211c1..4d4ddae1a991 100644 --- a/trunk/drivers/thermal/step_wise.c +++ b/trunk/drivers/thermal/step_wise.c @@ -22,9 +22,6 @@ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ */ -#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt - -#include #include #include "thermal_core.h" @@ -59,9 +56,12 @@ static unsigned long get_target_state(struct thermal_instance *instance, switch (trend) { case THERMAL_TREND_RAISING: - if (throttle) + if (throttle) { cur_state = cur_state < instance->upper ? (cur_state + 1) : instance->upper; + if (cur_state < instance->lower) + cur_state = instance->lower; + } break; case THERMAL_TREND_RAISE_FULL: if (throttle) @@ -71,8 +71,11 @@ static unsigned long get_target_state(struct thermal_instance *instance, if (cur_state == instance->lower) { if (!throttle) cur_state = -1; - } else + } else { cur_state -= 1; + if (cur_state > instance->upper) + cur_state = instance->upper; + } break; case THERMAL_TREND_DROP_FULL: if (cur_state == instance->lower) { @@ -180,23 +183,14 @@ static int step_wise_throttle(struct thermal_zone_device *tz, int trip) static struct thermal_governor thermal_gov_step_wise = { .name = "step_wise", .throttle = step_wise_throttle, - .owner = THIS_MODULE, }; -static int __init thermal_gov_step_wise_init(void) +int thermal_gov_step_wise_register(void) { return thermal_register_governor(&thermal_gov_step_wise); } -static void __exit thermal_gov_step_wise_exit(void) +void thermal_gov_step_wise_unregister(void) { thermal_unregister_governor(&thermal_gov_step_wise); } - -/* This should load after thermal framework */ -fs_initcall(thermal_gov_step_wise_init); -module_exit(thermal_gov_step_wise_exit); - -MODULE_AUTHOR("Durgadoss R"); -MODULE_DESCRIPTION("A step-by-step thermal throttling governor"); -MODULE_LICENSE("GPL"); diff --git a/trunk/drivers/thermal/thermal_sys.c b/trunk/drivers/thermal/thermal_core.c similarity index 89% rename from trunk/drivers/thermal/thermal_sys.c rename to trunk/drivers/thermal/thermal_core.c index 5b7863a03f98..d755440791b7 100644 --- a/trunk/drivers/thermal/thermal_sys.c +++ b/trunk/drivers/thermal/thermal_core.c @@ -40,7 +40,7 @@ MODULE_AUTHOR("Zhang Rui"); MODULE_DESCRIPTION("Generic thermal management sysfs support"); -MODULE_LICENSE("GPL"); +MODULE_LICENSE("GPL v2"); static DEFINE_IDR(thermal_tz_idr); static DEFINE_IDR(thermal_cdev_idr); @@ -99,7 +99,6 @@ int thermal_register_governor(struct thermal_governor *governor) return err; } -EXPORT_SYMBOL_GPL(thermal_register_governor); void thermal_unregister_governor(struct thermal_governor *governor) { @@ -127,7 +126,6 @@ void thermal_unregister_governor(struct thermal_governor *governor) mutex_unlock(&thermal_governor_lock); return; } -EXPORT_SYMBOL_GPL(thermal_unregister_governor); static int get_idr(struct idr *idr, struct mutex *lock, int *id) { @@ -371,16 +369,28 @@ static void handle_thermal_trip(struct thermal_zone_device *tz, int trip) monitor_thermal_zone(tz); } -static int thermal_zone_get_temp(struct thermal_zone_device *tz, - unsigned long *temp) +/** + * thermal_zone_get_temp() - returns its the temperature of thermal zone + * @tz: a valid pointer to a struct thermal_zone_device + * @temp: a valid pointer to where to store the resulting temperature. + * + * When a valid thermal zone reference is passed, it will fetch its + * temperature and fill @temp. + * + * Return: On success returns 0, an error code otherwise + */ +int thermal_zone_get_temp(struct thermal_zone_device *tz, unsigned long *temp) { - int ret = 0; + int ret = -EINVAL; #ifdef CONFIG_THERMAL_EMULATION int count; unsigned long crit_temp = -1UL; enum thermal_trip_type type; #endif + if (!tz || IS_ERR(tz)) + goto exit; + mutex_lock(&tz->lock); ret = tz->ops->get_temp(tz, temp); @@ -404,8 +414,10 @@ static int thermal_zone_get_temp(struct thermal_zone_device *tz, skip_emul: #endif mutex_unlock(&tz->lock); +exit: return ret; } +EXPORT_SYMBOL_GPL(thermal_zone_get_temp); static void update_temperature(struct thermal_zone_device *tz) { @@ -434,7 +446,7 @@ void thermal_zone_device_update(struct thermal_zone_device *tz) for (count = 0; count < tz->trips; count++) handle_thermal_trip(tz, count); } -EXPORT_SYMBOL(thermal_zone_device_update); +EXPORT_SYMBOL_GPL(thermal_zone_device_update); static void thermal_zone_device_check(struct work_struct *work) { @@ -1097,13 +1109,23 @@ thermal_remove_hwmon_sysfs(struct thermal_zone_device *tz) #endif /** - * thermal_zone_bind_cooling_device - bind a cooling device to a thermal zone - * @tz: thermal zone device + * thermal_zone_bind_cooling_device() - bind a cooling device to a thermal zone + * @tz: pointer to struct thermal_zone_device * @trip: indicates which trip point the cooling devices is * associated with in this thermal zone. - * @cdev: thermal cooling device + * @cdev: pointer to struct thermal_cooling_device + * @upper: the Maximum cooling state for this trip point. + * THERMAL_NO_LIMIT means no upper limit, + * and the cooling device can be in max_state. + * @lower: the Minimum cooling state can be used for this trip point. + * THERMAL_NO_LIMIT means no lower limit, + * and the cooling device can be in cooling state 0. * + * This interface function bind a thermal cooling device to the certain trip + * point of a thermal zone device. * This function is usually called in the thermal zone device .bind callback. + * + * Return: 0 on success, the proper error value otherwise. */ int thermal_zone_bind_cooling_device(struct thermal_zone_device *tz, int trip, @@ -1197,16 +1219,21 @@ int thermal_zone_bind_cooling_device(struct thermal_zone_device *tz, kfree(dev); return result; } -EXPORT_SYMBOL(thermal_zone_bind_cooling_device); +EXPORT_SYMBOL_GPL(thermal_zone_bind_cooling_device); /** - * thermal_zone_unbind_cooling_device - unbind a cooling device from a thermal zone - * @tz: thermal zone device + * thermal_zone_unbind_cooling_device() - unbind a cooling device from a + * thermal zone. + * @tz: pointer to a struct thermal_zone_device. * @trip: indicates which trip point the cooling devices is * associated with in this thermal zone. - * @cdev: thermal cooling device + * @cdev: pointer to a struct thermal_cooling_device. * + * This interface function unbind a thermal cooling device from the certain + * trip point of a thermal zone device. * This function is usually called in the thermal zone device .unbind callback. + * + * Return: 0 on success, the proper error value otherwise. */ int thermal_zone_unbind_cooling_device(struct thermal_zone_device *tz, int trip, @@ -1237,7 +1264,7 @@ int thermal_zone_unbind_cooling_device(struct thermal_zone_device *tz, kfree(pos); return 0; } -EXPORT_SYMBOL(thermal_zone_unbind_cooling_device); +EXPORT_SYMBOL_GPL(thermal_zone_unbind_cooling_device); static void thermal_release(struct device *dev) { @@ -1260,10 +1287,17 @@ static struct class thermal_class = { }; /** - * thermal_cooling_device_register - register a new thermal cooling device + * thermal_cooling_device_register() - register a new thermal cooling device * @type: the thermal cooling device type. * @devdata: device private data. * @ops: standard thermal cooling devices callbacks. + * + * This interface function adds a new thermal cooling device (fan/processor/...) + * to /sys/class/thermal/ folder as cooling_device[0-*]. It tries to bind itself + * to all the thermal zone devices registered at the same time. + * + * Return: a pointer to the created struct thermal_cooling_device or an + * ERR_PTR. Caller must check return value with IS_ERR*() helpers. */ struct thermal_cooling_device * thermal_cooling_device_register(char *type, void *devdata, @@ -1289,7 +1323,7 @@ thermal_cooling_device_register(char *type, void *devdata, return ERR_PTR(result); } - strcpy(cdev->type, type ? : ""); + strlcpy(cdev->type, type ? : "", sizeof(cdev->type)); mutex_init(&cdev->lock); INIT_LIST_HEAD(&cdev->thermal_instances); cdev->ops = ops; @@ -1334,7 +1368,7 @@ thermal_cooling_device_register(char *type, void *devdata, device_unregister(&cdev->device); return ERR_PTR(result); } -EXPORT_SYMBOL(thermal_cooling_device_register); +EXPORT_SYMBOL_GPL(thermal_cooling_device_register); /** * thermal_cooling_device_unregister - removes the registered thermal cooling device @@ -1394,7 +1428,7 @@ void thermal_cooling_device_unregister(struct thermal_cooling_device *cdev) device_unregister(&cdev->device); return; } -EXPORT_SYMBOL(thermal_cooling_device_unregister); +EXPORT_SYMBOL_GPL(thermal_cooling_device_unregister); void thermal_cdev_update(struct thermal_cooling_device *cdev) { @@ -1420,7 +1454,7 @@ void thermal_cdev_update(struct thermal_cooling_device *cdev) EXPORT_SYMBOL(thermal_cdev_update); /** - * notify_thermal_framework - Sensor drivers use this API to notify framework + * thermal_notify_framework - Sensor drivers use this API to notify framework * @tz: thermal zone device * @trip: indicates which trip point has been crossed * @@ -1431,16 +1465,21 @@ EXPORT_SYMBOL(thermal_cdev_update); * The throttling policy is based on the configured platform data; if no * platform data is provided, this uses the step_wise throttling policy. */ -void notify_thermal_framework(struct thermal_zone_device *tz, int trip) +void thermal_notify_framework(struct thermal_zone_device *tz, int trip) { handle_thermal_trip(tz, trip); } -EXPORT_SYMBOL(notify_thermal_framework); +EXPORT_SYMBOL_GPL(thermal_notify_framework); /** - * create_trip_attrs - create attributes for trip points + * create_trip_attrs() - create attributes for trip points * @tz: the thermal zone device * @mask: Writeable trip point bitmap. + * + * helper function to instantiate sysfs entries for every trip + * point and its properties of a struct thermal_zone_device. + * + * Return: 0 on success, the proper error value otherwise. */ static int create_trip_attrs(struct thermal_zone_device *tz, int mask) { @@ -1541,7 +1580,7 @@ static void remove_trip_attrs(struct thermal_zone_device *tz) } /** - * thermal_zone_device_register - register a new thermal zone device + * thermal_zone_device_register() - register a new thermal zone device * @type: the thermal zone device type * @trips: the number of trip points the thermal zone support * @mask: a bit string indicating the writeablility of trip points @@ -1554,8 +1593,15 @@ static void remove_trip_attrs(struct thermal_zone_device *tz) * whether trip points have been crossed (0 for interrupt * driven systems) * + * This interface function adds a new thermal zone device (sensor) to + * /sys/class/thermal folder as thermal_zone[0-*]. It tries to bind all the + * thermal cooling devices registered at the same time. * thermal_zone_device_unregister() must be called when the device is no * longer needed. The passive cooling depends on the .get_trend() return value. + * + * Return: a pointer to the created struct thermal_zone_device or an + * in case of error, an ERR_PTR. Caller must check return value with + * IS_ERR*() helpers. */ struct thermal_zone_device *thermal_zone_device_register(const char *type, int trips, int mask, void *devdata, @@ -1594,7 +1640,7 @@ struct thermal_zone_device *thermal_zone_device_register(const char *type, return ERR_PTR(result); } - strcpy(tz->type, type ? : ""); + strlcpy(tz->type, type ? : "", sizeof(tz->type)); tz->ops = ops; tz->tzp = tzp; tz->device.class = &thermal_class; @@ -1687,7 +1733,7 @@ struct thermal_zone_device *thermal_zone_device_register(const char *type, device_unregister(&tz->device); return ERR_PTR(result); } -EXPORT_SYMBOL(thermal_zone_device_register); +EXPORT_SYMBOL_GPL(thermal_zone_device_register); /** * thermal_device_unregister - removes the registered thermal zone device @@ -1754,7 +1800,45 @@ void thermal_zone_device_unregister(struct thermal_zone_device *tz) device_unregister(&tz->device); return; } -EXPORT_SYMBOL(thermal_zone_device_unregister); +EXPORT_SYMBOL_GPL(thermal_zone_device_unregister); + +/** + * thermal_zone_get_zone_by_name() - search for a zone and returns its ref + * @name: thermal zone name to fetch the temperature + * + * When only one zone is found with the passed name, returns a reference to it. + * + * Return: On success returns a reference to an unique thermal zone with + * matching name equals to @name, an ERR_PTR otherwise (-EINVAL for invalid + * paramenters, -ENODEV for not found and -EEXIST for multiple matches). + */ +struct thermal_zone_device *thermal_zone_get_zone_by_name(const char *name) +{ + struct thermal_zone_device *pos = NULL, *ref = ERR_PTR(-EINVAL); + unsigned int found = 0; + + if (!name) + goto exit; + + mutex_lock(&thermal_list_lock); + list_for_each_entry(pos, &thermal_tz_list, node) + if (!strnicmp(name, pos->type, THERMAL_NAME_LENGTH)) { + found++; + ref = pos; + } + mutex_unlock(&thermal_list_lock); + + /* nothing has been found, thus an error code for it */ + if (found == 0) + ref = ERR_PTR(-ENODEV); + else if (found > 1) + /* Success only when an unique zone is found */ + ref = ERR_PTR(-EEXIST); + +exit: + return ref; +} +EXPORT_SYMBOL_GPL(thermal_zone_get_zone_by_name); #ifdef CONFIG_NET static struct genl_family thermal_event_genl_family = { @@ -1832,7 +1916,7 @@ int thermal_generate_netlink_event(struct thermal_zone_device *tz, return result; } -EXPORT_SYMBOL(thermal_generate_netlink_event); +EXPORT_SYMBOL_GPL(thermal_generate_netlink_event); static int genetlink_init(void) { @@ -1858,30 +1942,69 @@ static inline int genetlink_init(void) { return 0; } static inline void genetlink_exit(void) {} #endif /* !CONFIG_NET */ +static int __init thermal_register_governors(void) +{ + int result; + + result = thermal_gov_step_wise_register(); + if (result) + return result; + + result = thermal_gov_fair_share_register(); + if (result) + return result; + + return thermal_gov_user_space_register(); +} + +static void thermal_unregister_governors(void) +{ + thermal_gov_step_wise_unregister(); + thermal_gov_fair_share_unregister(); + thermal_gov_user_space_unregister(); +} + static int __init thermal_init(void) { - int result = 0; + int result; + + result = thermal_register_governors(); + if (result) + goto error; result = class_register(&thermal_class); - if (result) { - idr_destroy(&thermal_tz_idr); - idr_destroy(&thermal_cdev_idr); - mutex_destroy(&thermal_idr_lock); - mutex_destroy(&thermal_list_lock); - return result; - } + if (result) + goto unregister_governors; + result = genetlink_init(); + if (result) + goto unregister_class; + + return 0; + +unregister_governors: + thermal_unregister_governors(); +unregister_class: + class_unregister(&thermal_class); +error: + idr_destroy(&thermal_tz_idr); + idr_destroy(&thermal_cdev_idr); + mutex_destroy(&thermal_idr_lock); + mutex_destroy(&thermal_list_lock); + mutex_destroy(&thermal_governor_lock); return result; } static void __exit thermal_exit(void) { + genetlink_exit(); class_unregister(&thermal_class); + thermal_unregister_governors(); idr_destroy(&thermal_tz_idr); idr_destroy(&thermal_cdev_idr); mutex_destroy(&thermal_idr_lock); mutex_destroy(&thermal_list_lock); - genetlink_exit(); + mutex_destroy(&thermal_governor_lock); } fs_initcall(thermal_init); diff --git a/trunk/drivers/thermal/thermal_core.h b/trunk/drivers/thermal/thermal_core.h index 0d3205a18112..7cf2f6626251 100644 --- a/trunk/drivers/thermal/thermal_core.h +++ b/trunk/drivers/thermal/thermal_core.h @@ -50,4 +50,31 @@ struct thermal_instance { struct list_head cdev_node; /* node in cdev->thermal_instances */ }; +int thermal_register_governor(struct thermal_governor *); +void thermal_unregister_governor(struct thermal_governor *); + +#ifdef CONFIG_THERMAL_GOV_STEP_WISE +int thermal_gov_step_wise_register(void); +void thermal_gov_step_wise_unregister(void); +#else +static inline int thermal_gov_step_wise_register(void) { return 0; } +static inline void thermal_gov_step_wise_unregister(void) {} +#endif /* CONFIG_THERMAL_GOV_STEP_WISE */ + +#ifdef CONFIG_THERMAL_GOV_FAIR_SHARE +int thermal_gov_fair_share_register(void); +void thermal_gov_fair_share_unregister(void); +#else +static inline int thermal_gov_fair_share_register(void) { return 0; } +static inline void thermal_gov_fair_share_unregister(void) {} +#endif /* CONFIG_THERMAL_GOV_FAIR_SHARE */ + +#ifdef CONFIG_THERMAL_GOV_USER_SPACE +int thermal_gov_user_space_register(void); +void thermal_gov_user_space_unregister(void); +#else +static inline int thermal_gov_user_space_register(void) { return 0; } +static inline void thermal_gov_user_space_unregister(void) {} +#endif /* CONFIG_THERMAL_GOV_USER_SPACE */ + #endif /* __THERMAL_CORE_H__ */ diff --git a/trunk/drivers/thermal/user_space.c b/trunk/drivers/thermal/user_space.c index 6bbb380b6d19..10adcddc8821 100644 --- a/trunk/drivers/thermal/user_space.c +++ b/trunk/drivers/thermal/user_space.c @@ -22,9 +22,6 @@ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ */ -#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt - -#include #include #include "thermal_core.h" @@ -46,23 +43,15 @@ static int notify_user_space(struct thermal_zone_device *tz, int trip) static struct thermal_governor thermal_gov_user_space = { .name = "user_space", .throttle = notify_user_space, - .owner = THIS_MODULE, }; -static int __init thermal_gov_user_space_init(void) +int thermal_gov_user_space_register(void) { return thermal_register_governor(&thermal_gov_user_space); } -static void __exit thermal_gov_user_space_exit(void) +void thermal_gov_user_space_unregister(void) { thermal_unregister_governor(&thermal_gov_user_space); } -/* This should load after thermal framework */ -fs_initcall(thermal_gov_user_space_init); -module_exit(thermal_gov_user_space_exit); - -MODULE_AUTHOR("Durgadoss R"); -MODULE_DESCRIPTION("A user space Thermal notifier"); -MODULE_LICENSE("GPL"); diff --git a/trunk/drivers/tty/ehv_bytechan.c b/trunk/drivers/tty/ehv_bytechan.c index 6d0c27cd03da..9bffcec5ad82 100644 --- a/trunk/drivers/tty/ehv_bytechan.c +++ b/trunk/drivers/tty/ehv_bytechan.c @@ -859,6 +859,7 @@ static int __init ehv_bc_init(void) */ static void __exit ehv_bc_exit(void) { + platform_driver_unregister(&ehv_bc_tty_driver); tty_unregister_driver(ehv_bc_driver); put_tty_driver(ehv_bc_driver); kfree(bcs); diff --git a/trunk/drivers/tty/mxser.c b/trunk/drivers/tty/mxser.c index 71d6eb2c93b1..4c4a23674569 100644 --- a/trunk/drivers/tty/mxser.c +++ b/trunk/drivers/tty/mxser.c @@ -1618,8 +1618,12 @@ static int mxser_ioctl_special(unsigned int cmd, void __user *argp) if (ip->type == PORT_16550A) me->fifo[p] = 1; - opmode = inb(ip->opmode_ioaddr)>>((p % 4) * 2); - opmode &= OP_MODE_MASK; + if (ip->board->chip_flag == MOXA_MUST_MU860_HWID) { + opmode = inb(ip->opmode_ioaddr)>>((p % 4) * 2); + opmode &= OP_MODE_MASK; + } else { + opmode = RS232_MODE; + } me->iftype[p] = opmode; mutex_unlock(&port->mutex); } @@ -1676,6 +1680,9 @@ static int mxser_ioctl(struct tty_struct *tty, int shiftbit; unsigned char val, mask; + if (info->board->chip_flag != MOXA_MUST_MU860_HWID) + return -EFAULT; + p = tty->index % 4; if (cmd == MOXA_SET_OP_MODE) { if (get_user(opmode, (int __user *) argp)) diff --git a/trunk/drivers/tty/n_tty.c b/trunk/drivers/tty/n_tty.c index d655416087b7..6c7fe90ad72d 100644 --- a/trunk/drivers/tty/n_tty.c +++ b/trunk/drivers/tty/n_tty.c @@ -1573,6 +1573,14 @@ static void n_tty_set_termios(struct tty_struct *tty, struct ktermios *old) ldata->real_raw = 0; } n_tty_set_room(tty); + /* + * Fix tty hang when I_IXON(tty) is cleared, but the tty + * been stopped by STOP_CHAR(tty) before it. + */ + if (!I_IXON(tty) && old && (old->c_iflag & IXON) && !tty->flow_stopped) { + start_tty(tty); + } + /* The termios change make the tty ready for I/O */ wake_up_interruptible(&tty->write_wait); wake_up_interruptible(&tty->read_wait); diff --git a/trunk/drivers/tty/rocket.c b/trunk/drivers/tty/rocket.c index 82d35c5a58fd..354564ea47c5 100644 --- a/trunk/drivers/tty/rocket.c +++ b/trunk/drivers/tty/rocket.c @@ -150,12 +150,14 @@ static Word_t aiop_intr_bits[AIOP_CTL_SIZE] = { AIOP_INTR_BIT_3 }; +#ifdef CONFIG_PCI static Word_t upci_aiop_intr_bits[AIOP_CTL_SIZE] = { UPCI_AIOP_INTR_BIT_0, UPCI_AIOP_INTR_BIT_1, UPCI_AIOP_INTR_BIT_2, UPCI_AIOP_INTR_BIT_3 }; +#endif static Byte_t RData[RDATASIZE] = { 0x00, 0x09, 0xf6, 0x82, @@ -227,7 +229,6 @@ static unsigned long nextLineNumber; static int __init init_ISA(int i); static void rp_wait_until_sent(struct tty_struct *tty, int timeout); static void rp_flush_buffer(struct tty_struct *tty); -static void rmSpeakerReset(CONTROLLER_T * CtlP, unsigned long model); static unsigned char GetLineNumber(int ctrl, int aiop, int ch); static unsigned char SetLineNumber(int ctrl, int aiop, int ch); static void rp_start(struct tty_struct *tty); @@ -241,11 +242,6 @@ static void sDisInterrupts(CHANNEL_T * ChP, Word_t Flags); static void sModemReset(CONTROLLER_T * CtlP, int chan, int on); static void sPCIModemReset(CONTROLLER_T * CtlP, int chan, int on); static int sWriteTxPrioByte(CHANNEL_T * ChP, Byte_t Data); -static int sPCIInitController(CONTROLLER_T * CtlP, int CtlNum, - ByteIO_t * AiopIOList, int AiopIOListSize, - WordIO_t ConfigIO, int IRQNum, Byte_t Frequency, - int PeriodicOnly, int altChanRingIndicator, - int UPCIRingInd); static int sInitController(CONTROLLER_T * CtlP, int CtlNum, ByteIO_t MudbacIO, ByteIO_t * AiopIOList, int AiopIOListSize, int IRQNum, Byte_t Frequency, int PeriodicOnly); @@ -1775,6 +1771,145 @@ static DEFINE_PCI_DEVICE_TABLE(rocket_pci_ids) = { }; MODULE_DEVICE_TABLE(pci, rocket_pci_ids); +/* Resets the speaker controller on RocketModem II and III devices */ +static void rmSpeakerReset(CONTROLLER_T * CtlP, unsigned long model) +{ + ByteIO_t addr; + + /* RocketModem II speaker control is at the 8th port location of offset 0x40 */ + if ((model == MODEL_RP4M) || (model == MODEL_RP6M)) { + addr = CtlP->AiopIO[0] + 0x4F; + sOutB(addr, 0); + } + + /* RocketModem III speaker control is at the 1st port location of offset 0x80 */ + if ((model == MODEL_UPCI_RM3_8PORT) + || (model == MODEL_UPCI_RM3_4PORT)) { + addr = CtlP->AiopIO[0] + 0x88; + sOutB(addr, 0); + } +} + +/*************************************************************************** +Function: sPCIInitController +Purpose: Initialization of controller global registers and controller + structure. +Call: sPCIInitController(CtlP,CtlNum,AiopIOList,AiopIOListSize, + IRQNum,Frequency,PeriodicOnly) + CONTROLLER_T *CtlP; Ptr to controller structure + int CtlNum; Controller number + ByteIO_t *AiopIOList; List of I/O addresses for each AIOP. + This list must be in the order the AIOPs will be found on the + controller. Once an AIOP in the list is not found, it is + assumed that there are no more AIOPs on the controller. + int AiopIOListSize; Number of addresses in AiopIOList + int IRQNum; Interrupt Request number. Can be any of the following: + 0: Disable global interrupts + 3: IRQ 3 + 4: IRQ 4 + 5: IRQ 5 + 9: IRQ 9 + 10: IRQ 10 + 11: IRQ 11 + 12: IRQ 12 + 15: IRQ 15 + Byte_t Frequency: A flag identifying the frequency + of the periodic interrupt, can be any one of the following: + FREQ_DIS - periodic interrupt disabled + FREQ_137HZ - 137 Hertz + FREQ_69HZ - 69 Hertz + FREQ_34HZ - 34 Hertz + FREQ_17HZ - 17 Hertz + FREQ_9HZ - 9 Hertz + FREQ_4HZ - 4 Hertz + If IRQNum is set to 0 the Frequency parameter is + overidden, it is forced to a value of FREQ_DIS. + int PeriodicOnly: 1 if all interrupts except the periodic + interrupt are to be blocked. + 0 is both the periodic interrupt and + other channel interrupts are allowed. + If IRQNum is set to 0 the PeriodicOnly parameter is + overidden, it is forced to a value of 0. +Return: int: Number of AIOPs on the controller, or CTLID_NULL if controller + initialization failed. + +Comments: + If periodic interrupts are to be disabled but AIOP interrupts + are allowed, set Frequency to FREQ_DIS and PeriodicOnly to 0. + + If interrupts are to be completely disabled set IRQNum to 0. + + Setting Frequency to FREQ_DIS and PeriodicOnly to 1 is an + invalid combination. + + This function performs initialization of global interrupt modes, + but it does not actually enable global interrupts. To enable + and disable global interrupts use functions sEnGlobalInt() and + sDisGlobalInt(). Enabling of global interrupts is normally not + done until all other initializations are complete. + + Even if interrupts are globally enabled, they must also be + individually enabled for each channel that is to generate + interrupts. + +Warnings: No range checking on any of the parameters is done. + + No context switches are allowed while executing this function. + + After this function all AIOPs on the controller are disabled, + they can be enabled with sEnAiop(). +*/ +static int sPCIInitController(CONTROLLER_T * CtlP, int CtlNum, + ByteIO_t * AiopIOList, int AiopIOListSize, + WordIO_t ConfigIO, int IRQNum, Byte_t Frequency, + int PeriodicOnly, int altChanRingIndicator, + int UPCIRingInd) +{ + int i; + ByteIO_t io; + + CtlP->AltChanRingIndicator = altChanRingIndicator; + CtlP->UPCIRingInd = UPCIRingInd; + CtlP->CtlNum = CtlNum; + CtlP->CtlID = CTLID_0001; /* controller release 1 */ + CtlP->BusType = isPCI; /* controller release 1 */ + + if (ConfigIO) { + CtlP->isUPCI = 1; + CtlP->PCIIO = ConfigIO + _PCI_9030_INT_CTRL; + CtlP->PCIIO2 = ConfigIO + _PCI_9030_GPIO_CTRL; + CtlP->AiopIntrBits = upci_aiop_intr_bits; + } else { + CtlP->isUPCI = 0; + CtlP->PCIIO = + (WordIO_t) ((ByteIO_t) AiopIOList[0] + _PCI_INT_FUNC); + CtlP->AiopIntrBits = aiop_intr_bits; + } + + sPCIControllerEOI(CtlP); /* clear EOI if warm init */ + /* Init AIOPs */ + CtlP->NumAiop = 0; + for (i = 0; i < AiopIOListSize; i++) { + io = AiopIOList[i]; + CtlP->AiopIO[i] = (WordIO_t) io; + CtlP->AiopIntChanIO[i] = io + _INT_CHAN; + + CtlP->AiopID[i] = sReadAiopID(io); /* read AIOP ID */ + if (CtlP->AiopID[i] == AIOPID_NULL) /* if AIOP does not exist */ + break; /* done looking for AIOPs */ + + CtlP->AiopNumChan[i] = sReadAiopNumChan((WordIO_t) io); /* num channels in AIOP */ + sOutW((WordIO_t) io + _INDX_ADDR, _CLK_PRE); /* clock prescaler */ + sOutB(io + _INDX_DATA, sClockPrescale); + CtlP->NumAiop++; /* bump count of AIOPs */ + } + + if (CtlP->NumAiop == 0) + return (-1); + else + return (CtlP->NumAiop); +} + /* * Called when a PCI card is found. Retrieves and stores model information, * init's aiopic and serial port hardware. @@ -2519,147 +2654,6 @@ static int sInitController(CONTROLLER_T * CtlP, int CtlNum, ByteIO_t MudbacIO, return (CtlP->NumAiop); } -#ifdef CONFIG_PCI -/*************************************************************************** -Function: sPCIInitController -Purpose: Initialization of controller global registers and controller - structure. -Call: sPCIInitController(CtlP,CtlNum,AiopIOList,AiopIOListSize, - IRQNum,Frequency,PeriodicOnly) - CONTROLLER_T *CtlP; Ptr to controller structure - int CtlNum; Controller number - ByteIO_t *AiopIOList; List of I/O addresses for each AIOP. - This list must be in the order the AIOPs will be found on the - controller. Once an AIOP in the list is not found, it is - assumed that there are no more AIOPs on the controller. - int AiopIOListSize; Number of addresses in AiopIOList - int IRQNum; Interrupt Request number. Can be any of the following: - 0: Disable global interrupts - 3: IRQ 3 - 4: IRQ 4 - 5: IRQ 5 - 9: IRQ 9 - 10: IRQ 10 - 11: IRQ 11 - 12: IRQ 12 - 15: IRQ 15 - Byte_t Frequency: A flag identifying the frequency - of the periodic interrupt, can be any one of the following: - FREQ_DIS - periodic interrupt disabled - FREQ_137HZ - 137 Hertz - FREQ_69HZ - 69 Hertz - FREQ_34HZ - 34 Hertz - FREQ_17HZ - 17 Hertz - FREQ_9HZ - 9 Hertz - FREQ_4HZ - 4 Hertz - If IRQNum is set to 0 the Frequency parameter is - overidden, it is forced to a value of FREQ_DIS. - int PeriodicOnly: 1 if all interrupts except the periodic - interrupt are to be blocked. - 0 is both the periodic interrupt and - other channel interrupts are allowed. - If IRQNum is set to 0 the PeriodicOnly parameter is - overidden, it is forced to a value of 0. -Return: int: Number of AIOPs on the controller, or CTLID_NULL if controller - initialization failed. - -Comments: - If periodic interrupts are to be disabled but AIOP interrupts - are allowed, set Frequency to FREQ_DIS and PeriodicOnly to 0. - - If interrupts are to be completely disabled set IRQNum to 0. - - Setting Frequency to FREQ_DIS and PeriodicOnly to 1 is an - invalid combination. - - This function performs initialization of global interrupt modes, - but it does not actually enable global interrupts. To enable - and disable global interrupts use functions sEnGlobalInt() and - sDisGlobalInt(). Enabling of global interrupts is normally not - done until all other initializations are complete. - - Even if interrupts are globally enabled, they must also be - individually enabled for each channel that is to generate - interrupts. - -Warnings: No range checking on any of the parameters is done. - - No context switches are allowed while executing this function. - - After this function all AIOPs on the controller are disabled, - they can be enabled with sEnAiop(). -*/ -static int sPCIInitController(CONTROLLER_T * CtlP, int CtlNum, - ByteIO_t * AiopIOList, int AiopIOListSize, - WordIO_t ConfigIO, int IRQNum, Byte_t Frequency, - int PeriodicOnly, int altChanRingIndicator, - int UPCIRingInd) -{ - int i; - ByteIO_t io; - - CtlP->AltChanRingIndicator = altChanRingIndicator; - CtlP->UPCIRingInd = UPCIRingInd; - CtlP->CtlNum = CtlNum; - CtlP->CtlID = CTLID_0001; /* controller release 1 */ - CtlP->BusType = isPCI; /* controller release 1 */ - - if (ConfigIO) { - CtlP->isUPCI = 1; - CtlP->PCIIO = ConfigIO + _PCI_9030_INT_CTRL; - CtlP->PCIIO2 = ConfigIO + _PCI_9030_GPIO_CTRL; - CtlP->AiopIntrBits = upci_aiop_intr_bits; - } else { - CtlP->isUPCI = 0; - CtlP->PCIIO = - (WordIO_t) ((ByteIO_t) AiopIOList[0] + _PCI_INT_FUNC); - CtlP->AiopIntrBits = aiop_intr_bits; - } - - sPCIControllerEOI(CtlP); /* clear EOI if warm init */ - /* Init AIOPs */ - CtlP->NumAiop = 0; - for (i = 0; i < AiopIOListSize; i++) { - io = AiopIOList[i]; - CtlP->AiopIO[i] = (WordIO_t) io; - CtlP->AiopIntChanIO[i] = io + _INT_CHAN; - - CtlP->AiopID[i] = sReadAiopID(io); /* read AIOP ID */ - if (CtlP->AiopID[i] == AIOPID_NULL) /* if AIOP does not exist */ - break; /* done looking for AIOPs */ - - CtlP->AiopNumChan[i] = sReadAiopNumChan((WordIO_t) io); /* num channels in AIOP */ - sOutW((WordIO_t) io + _INDX_ADDR, _CLK_PRE); /* clock prescaler */ - sOutB(io + _INDX_DATA, sClockPrescale); - CtlP->NumAiop++; /* bump count of AIOPs */ - } - - if (CtlP->NumAiop == 0) - return (-1); - else - return (CtlP->NumAiop); -} - -/* Resets the speaker controller on RocketModem II and III devices */ -static void rmSpeakerReset(CONTROLLER_T * CtlP, unsigned long model) -{ - ByteIO_t addr; - - /* RocketModem II speaker control is at the 8th port location of offset 0x40 */ - if ((model == MODEL_RP4M) || (model == MODEL_RP6M)) { - addr = CtlP->AiopIO[0] + 0x4F; - sOutB(addr, 0); - } - - /* RocketModem III speaker control is at the 1st port location of offset 0x80 */ - if ((model == MODEL_UPCI_RM3_8PORT) - || (model == MODEL_UPCI_RM3_4PORT)) { - addr = CtlP->AiopIO[0] + 0x88; - sOutB(addr, 0); - } -} -#endif - /*************************************************************************** Function: sReadAiopID Purpose: Read the AIOP idenfication number directly from an AIOP. diff --git a/trunk/drivers/tty/serial/68328serial.c b/trunk/drivers/tty/serial/68328serial.c index ef2e08e9b590..5dc9c4bfa66e 100644 --- a/trunk/drivers/tty/serial/68328serial.c +++ b/trunk/drivers/tty/serial/68328serial.c @@ -14,7 +14,6 @@ * 2.4/2.5 port David McCullough */ -#include #include #include #include diff --git a/trunk/drivers/tty/serial/8250/8250_dw.c b/trunk/drivers/tty/serial/8250/8250_dw.c index beaa283f5cc6..d07b6af3a937 100644 --- a/trunk/drivers/tty/serial/8250/8250_dw.c +++ b/trunk/drivers/tty/serial/8250/8250_dw.c @@ -338,7 +338,8 @@ static int dw8250_runtime_suspend(struct device *dev) { struct dw8250_data *data = dev_get_drvdata(dev); - clk_disable_unprepare(data->clk); + if (!IS_ERR(data->clk)) + clk_disable_unprepare(data->clk); return 0; } @@ -347,7 +348,8 @@ static int dw8250_runtime_resume(struct device *dev) { struct dw8250_data *data = dev_get_drvdata(dev); - clk_prepare_enable(data->clk); + if (!IS_ERR(data->clk)) + clk_prepare_enable(data->clk); return 0; } @@ -367,6 +369,7 @@ MODULE_DEVICE_TABLE(of, dw8250_of_match); static const struct acpi_device_id dw8250_acpi_match[] = { { "INT33C4", 0 }, { "INT33C5", 0 }, + { "80860F0A", 0 }, { }, }; MODULE_DEVICE_TABLE(acpi, dw8250_acpi_match); diff --git a/trunk/drivers/tty/serial/amba-pl011.c b/trunk/drivers/tty/serial/amba-pl011.c index 8ab70a620919..e2774f9ecd59 100644 --- a/trunk/drivers/tty/serial/amba-pl011.c +++ b/trunk/drivers/tty/serial/amba-pl011.c @@ -332,7 +332,7 @@ static void pl011_dma_probe_initcall(struct device *dev, struct uart_amba_port * dmaengine_slave_config(chan, &rx_conf); uap->dmarx.chan = chan; - if (plat->dma_rx_poll_enable) { + if (plat && plat->dma_rx_poll_enable) { /* Set poll rate if specified. */ if (plat->dma_rx_poll_rate) { uap->dmarx.auto_poll_rate = false; diff --git a/trunk/drivers/tty/serial/bcm63xx_uart.c b/trunk/drivers/tty/serial/bcm63xx_uart.c index 52a3ecd40421..6fa2ae77fffd 100644 --- a/trunk/drivers/tty/serial/bcm63xx_uart.c +++ b/trunk/drivers/tty/serial/bcm63xx_uart.c @@ -30,7 +30,6 @@ #include #include -#include #include #include #include diff --git a/trunk/drivers/tty/serial/mcf.c b/trunk/drivers/tty/serial/mcf.c index e956377a38fe..65be0c00c4bf 100644 --- a/trunk/drivers/tty/serial/mcf.c +++ b/trunk/drivers/tty/serial/mcf.c @@ -707,8 +707,10 @@ static int __init mcf_init(void) if (rc) return rc; rc = platform_driver_register(&mcf_platform_driver); - if (rc) + if (rc) { + uart_unregister_driver(&mcf_driver); return rc; + } return 0; } diff --git a/trunk/drivers/tty/serial/mpc52xx_uart.c b/trunk/drivers/tty/serial/mpc52xx_uart.c index 018bad922554..f51b280f3bf2 100644 --- a/trunk/drivers/tty/serial/mpc52xx_uart.c +++ b/trunk/drivers/tty/serial/mpc52xx_uart.c @@ -1497,18 +1497,23 @@ mpc52xx_uart_init(void) if (psc_ops && psc_ops->fifoc_init) { ret = psc_ops->fifoc_init(); if (ret) - return ret; + goto err_init; } ret = platform_driver_register(&mpc52xx_uart_of_driver); if (ret) { printk(KERN_ERR "%s: platform_driver_register failed (%i)\n", __FILE__, ret); - uart_unregister_driver(&mpc52xx_uart_driver); - return ret; + goto err_reg; } return 0; +err_reg: + if (psc_ops && psc_ops->fifoc_uninit) + psc_ops->fifoc_uninit(); +err_init: + uart_unregister_driver(&mpc52xx_uart_driver); + return ret; } static void __exit diff --git a/trunk/drivers/tty/serial/nwpserial.c b/trunk/drivers/tty/serial/nwpserial.c index 77287c54f331..549c70a2a63e 100644 --- a/trunk/drivers/tty/serial/nwpserial.c +++ b/trunk/drivers/tty/serial/nwpserial.c @@ -199,7 +199,7 @@ static void nwpserial_shutdown(struct uart_port *port) dcr_write(up->dcr_host, UART_IER, up->ier); /* free irq */ - free_irq(up->port.irq, port); + free_irq(up->port.irq, up); } static int nwpserial_verify_port(struct uart_port *port, diff --git a/trunk/drivers/tty/serial/samsung.c b/trunk/drivers/tty/serial/samsung.c index 074b9194144f..89429410a245 100644 --- a/trunk/drivers/tty/serial/samsung.c +++ b/trunk/drivers/tty/serial/samsung.c @@ -1803,6 +1803,7 @@ static int __init s3c24xx_serial_modinit(void) static void __exit s3c24xx_serial_modexit(void) { + platform_driver_unregister(&samsung_serial_driver); uart_unregister_driver(&s3c24xx_uart_drv); } diff --git a/trunk/drivers/tty/tty_audit.c b/trunk/drivers/tty/tty_audit.c index 6953dc82850c..a4fdce74f883 100644 --- a/trunk/drivers/tty/tty_audit.c +++ b/trunk/drivers/tty/tty_audit.c @@ -60,24 +60,22 @@ static void tty_audit_buf_put(struct tty_audit_buf *buf) tty_audit_buf_free(buf); } -static void tty_audit_log(const char *description, struct task_struct *tsk, - kuid_t loginuid, unsigned sessionid, int major, - int minor, unsigned char *data, size_t size) +static void tty_audit_log(const char *description, int major, int minor, + unsigned char *data, size_t size) { struct audit_buffer *ab; + struct task_struct *tsk = current; + uid_t uid = from_kuid(&init_user_ns, task_uid(tsk)); + uid_t loginuid = from_kuid(&init_user_ns, audit_get_loginuid(tsk)); + u32 sessionid = audit_get_sessionid(tsk); ab = audit_log_start(NULL, GFP_KERNEL, AUDIT_TTY); if (ab) { char name[sizeof(tsk->comm)]; - kuid_t uid = task_uid(tsk); - - audit_log_format(ab, "%s pid=%u uid=%u auid=%u ses=%u " - "major=%d minor=%d comm=", description, - tsk->pid, - from_kuid(&init_user_ns, uid), - from_kuid(&init_user_ns, loginuid), - sessionid, - major, minor); + + audit_log_format(ab, "%s pid=%u uid=%u auid=%u ses=%u major=%d" + " minor=%d comm=", description, tsk->pid, uid, + loginuid, sessionid, major, minor); get_task_comm(name, tsk); audit_log_untrustedstring(ab, name); audit_log_format(ab, " data="); @@ -90,11 +88,9 @@ static void tty_audit_log(const char *description, struct task_struct *tsk, * tty_audit_buf_push - Push buffered data out * * Generate an audit message from the contents of @buf, which is owned by - * @tsk with @loginuid. @buf->mutex must be locked. + * the current task. @buf->mutex must be locked. */ -static void tty_audit_buf_push(struct task_struct *tsk, kuid_t loginuid, - unsigned int sessionid, - struct tty_audit_buf *buf) +static void tty_audit_buf_push(struct tty_audit_buf *buf) { if (buf->valid == 0) return; @@ -102,24 +98,10 @@ static void tty_audit_buf_push(struct task_struct *tsk, kuid_t loginuid, buf->valid = 0; return; } - tty_audit_log("tty", tsk, loginuid, sessionid, buf->major, buf->minor, - buf->data, buf->valid); + tty_audit_log("tty", buf->major, buf->minor, buf->data, buf->valid); buf->valid = 0; } -/** - * tty_audit_buf_push_current - Push buffered data out - * - * Generate an audit message from the contents of @buf, which is owned by - * the current task. @buf->mutex must be locked. - */ -static void tty_audit_buf_push_current(struct tty_audit_buf *buf) -{ - kuid_t auid = audit_get_loginuid(current); - unsigned int sessionid = audit_get_sessionid(current); - tty_audit_buf_push(current, auid, sessionid, buf); -} - /** * tty_audit_exit - Handle a task exit * @@ -130,15 +112,13 @@ void tty_audit_exit(void) { struct tty_audit_buf *buf; - spin_lock_irq(¤t->sighand->siglock); buf = current->signal->tty_audit_buf; current->signal->tty_audit_buf = NULL; - spin_unlock_irq(¤t->sighand->siglock); if (!buf) return; mutex_lock(&buf->mutex); - tty_audit_buf_push_current(buf); + tty_audit_buf_push(buf); mutex_unlock(&buf->mutex); tty_audit_buf_put(buf); @@ -151,9 +131,8 @@ void tty_audit_exit(void) */ void tty_audit_fork(struct signal_struct *sig) { - spin_lock_irq(¤t->sighand->siglock); sig->audit_tty = current->signal->audit_tty; - spin_unlock_irq(¤t->sighand->siglock); + sig->audit_tty_log_passwd = current->signal->audit_tty_log_passwd; } /** @@ -163,20 +142,21 @@ void tty_audit_tiocsti(struct tty_struct *tty, char ch) { struct tty_audit_buf *buf; int major, minor, should_audit; + unsigned long flags; - spin_lock_irq(¤t->sighand->siglock); + spin_lock_irqsave(¤t->sighand->siglock, flags); should_audit = current->signal->audit_tty; buf = current->signal->tty_audit_buf; if (buf) atomic_inc(&buf->count); - spin_unlock_irq(¤t->sighand->siglock); + spin_unlock_irqrestore(¤t->sighand->siglock, flags); major = tty->driver->major; minor = tty->driver->minor_start + tty->index; if (buf) { mutex_lock(&buf->mutex); if (buf->major == major && buf->minor == minor) - tty_audit_buf_push_current(buf); + tty_audit_buf_push(buf); mutex_unlock(&buf->mutex); tty_audit_buf_put(buf); } @@ -187,24 +167,20 @@ void tty_audit_tiocsti(struct tty_struct *tty, char ch) auid = audit_get_loginuid(current); sessionid = audit_get_sessionid(current); - tty_audit_log("ioctl=TIOCSTI", current, auid, sessionid, major, - minor, &ch, 1); + tty_audit_log("ioctl=TIOCSTI", major, minor, &ch, 1); } } /** - * tty_audit_push_task - Flush task's pending audit data - * @tsk: task pointer - * @loginuid: sender login uid - * @sessionid: sender session id + * tty_audit_push_current - Flush current's pending audit data * - * Called with a ref on @tsk held. Try to lock sighand and get a - * reference to the tty audit buffer if available. + * Try to lock sighand and get a reference to the tty audit buffer if available. * Flush the buffer or return an appropriate error code. */ -int tty_audit_push_task(struct task_struct *tsk, kuid_t loginuid, u32 sessionid) +int tty_audit_push_current(void) { struct tty_audit_buf *buf = ERR_PTR(-EPERM); + struct task_struct *tsk = current; unsigned long flags; if (!lock_task_sighand(tsk, &flags)) @@ -225,7 +201,7 @@ int tty_audit_push_task(struct task_struct *tsk, kuid_t loginuid, u32 sessionid) return PTR_ERR(buf); mutex_lock(&buf->mutex); - tty_audit_buf_push(tsk, loginuid, sessionid, buf); + tty_audit_buf_push(buf); mutex_unlock(&buf->mutex); tty_audit_buf_put(buf); @@ -243,10 +219,11 @@ static struct tty_audit_buf *tty_audit_buf_get(struct tty_struct *tty, unsigned icanon) { struct tty_audit_buf *buf, *buf2; + unsigned long flags; buf = NULL; buf2 = NULL; - spin_lock_irq(¤t->sighand->siglock); + spin_lock_irqsave(¤t->sighand->siglock, flags); if (likely(!current->signal->audit_tty)) goto out; buf = current->signal->tty_audit_buf; @@ -254,7 +231,7 @@ static struct tty_audit_buf *tty_audit_buf_get(struct tty_struct *tty, atomic_inc(&buf->count); goto out; } - spin_unlock_irq(¤t->sighand->siglock); + spin_unlock_irqrestore(¤t->sighand->siglock, flags); buf2 = tty_audit_buf_alloc(tty->driver->major, tty->driver->minor_start + tty->index, @@ -264,7 +241,7 @@ static struct tty_audit_buf *tty_audit_buf_get(struct tty_struct *tty, return NULL; } - spin_lock_irq(¤t->sighand->siglock); + spin_lock_irqsave(¤t->sighand->siglock, flags); if (!current->signal->audit_tty) goto out; buf = current->signal->tty_audit_buf; @@ -276,7 +253,7 @@ static struct tty_audit_buf *tty_audit_buf_get(struct tty_struct *tty, atomic_inc(&buf->count); /* Fall through */ out: - spin_unlock_irq(¤t->sighand->siglock); + spin_unlock_irqrestore(¤t->sighand->siglock, flags); if (buf2) tty_audit_buf_free(buf2); return buf; @@ -292,10 +269,18 @@ void tty_audit_add_data(struct tty_struct *tty, unsigned char *data, { struct tty_audit_buf *buf; int major, minor; + int audit_log_tty_passwd; + unsigned long flags; if (unlikely(size == 0)) return; + spin_lock_irqsave(¤t->sighand->siglock, flags); + audit_log_tty_passwd = current->signal->audit_tty_log_passwd; + spin_unlock_irqrestore(¤t->sighand->siglock, flags); + if (!audit_log_tty_passwd && icanon && !L_ECHO(tty)) + return; + if (tty->driver->type == TTY_DRIVER_TYPE_PTY && tty->driver->subtype == PTY_TYPE_MASTER) return; @@ -309,7 +294,7 @@ void tty_audit_add_data(struct tty_struct *tty, unsigned char *data, minor = tty->driver->minor_start + tty->index; if (buf->major != major || buf->minor != minor || buf->icanon != icanon) { - tty_audit_buf_push_current(buf); + tty_audit_buf_push(buf); buf->major = major; buf->minor = minor; buf->icanon = icanon; @@ -325,7 +310,7 @@ void tty_audit_add_data(struct tty_struct *tty, unsigned char *data, data += run; size -= run; if (buf->valid == N_TTY_BUF_SIZE) - tty_audit_buf_push_current(buf); + tty_audit_buf_push(buf); } while (size != 0); mutex_unlock(&buf->mutex); tty_audit_buf_put(buf); @@ -339,16 +324,17 @@ void tty_audit_add_data(struct tty_struct *tty, unsigned char *data, void tty_audit_push(struct tty_struct *tty) { struct tty_audit_buf *buf; + unsigned long flags; - spin_lock_irq(¤t->sighand->siglock); + spin_lock_irqsave(¤t->sighand->siglock, flags); if (likely(!current->signal->audit_tty)) { - spin_unlock_irq(¤t->sighand->siglock); + spin_unlock_irqrestore(¤t->sighand->siglock, flags); return; } buf = current->signal->tty_audit_buf; if (buf) atomic_inc(&buf->count); - spin_unlock_irq(¤t->sighand->siglock); + spin_unlock_irqrestore(¤t->sighand->siglock, flags); if (buf) { int major, minor; @@ -357,7 +343,7 @@ void tty_audit_push(struct tty_struct *tty) minor = tty->driver->minor_start + tty->index; mutex_lock(&buf->mutex); if (buf->major == major && buf->minor == minor) - tty_audit_buf_push_current(buf); + tty_audit_buf_push(buf); mutex_unlock(&buf->mutex); tty_audit_buf_put(buf); } diff --git a/trunk/drivers/tty/vt/vt.c b/trunk/drivers/tty/vt/vt.c index fbd447b390f7..740202d8a5c4 100644 --- a/trunk/drivers/tty/vt/vt.c +++ b/trunk/drivers/tty/vt/vt.c @@ -779,7 +779,6 @@ int vc_allocate(unsigned int currcons) /* return 0 on success */ con_set_default_unimap(vc); vc->vc_screenbuf = kmalloc(vc->vc_screenbuf_size, GFP_KERNEL); if (!vc->vc_screenbuf) { - tty_port_destroy(&vc->port); kfree(vc); vc_cons[currcons].d = NULL; return -ENOMEM; @@ -986,26 +985,25 @@ static int vt_resize(struct tty_struct *tty, struct winsize *ws) return ret; } -void vc_deallocate(unsigned int currcons) +struct vc_data *vc_deallocate(unsigned int currcons) { + struct vc_data *vc = NULL; + WARN_CONSOLE_UNLOCKED(); if (vc_cons_allocated(currcons)) { - struct vc_data *vc = vc_cons[currcons].d; - struct vt_notifier_param param = { .vc = vc }; + struct vt_notifier_param param; + param.vc = vc = vc_cons[currcons].d; atomic_notifier_call_chain(&vt_notifier_list, VT_DEALLOCATE, ¶m); vcs_remove_sysfs(currcons); vc->vc_sw->con_deinit(vc); put_pid(vc->vt_pid); module_put(vc->vc_sw->owner); kfree(vc->vc_screenbuf); - if (currcons >= MIN_NR_CONSOLES) { - tty_port_destroy(&vc->port); - kfree(vc); - } vc_cons[currcons].d = NULL; } + return vc; } /* diff --git a/trunk/drivers/tty/vt/vt_ioctl.c b/trunk/drivers/tty/vt/vt_ioctl.c index 98ff1735eafc..fc2c06c66e89 100644 --- a/trunk/drivers/tty/vt/vt_ioctl.c +++ b/trunk/drivers/tty/vt/vt_ioctl.c @@ -283,6 +283,51 @@ do_unimap_ioctl(int cmd, struct unimapdesc __user *user_ud, int perm, struct vc_ return 0; } +/* deallocate a single console, if possible (leave 0) */ +static int vt_disallocate(unsigned int vc_num) +{ + struct vc_data *vc = NULL; + int ret = 0; + + if (!vc_num) + return 0; + + console_lock(); + if (VT_BUSY(vc_num)) + ret = -EBUSY; + else + vc = vc_deallocate(vc_num); + console_unlock(); + + if (vc && vc_num >= MIN_NR_CONSOLES) { + tty_port_destroy(&vc->port); + kfree(vc); + } + + return ret; +} + +/* deallocate all unused consoles, but leave 0 */ +static void vt_disallocate_all(void) +{ + struct vc_data *vc[MAX_NR_CONSOLES]; + int i; + + console_lock(); + for (i = 1; i < MAX_NR_CONSOLES; i++) + if (!VT_BUSY(i)) + vc[i] = vc_deallocate(i); + else + vc[i] = NULL; + console_unlock(); + + for (i = 1; i < MAX_NR_CONSOLES; i++) { + if (vc[i] && i >= MIN_NR_CONSOLES) { + tty_port_destroy(&vc[i]->port); + kfree(vc[i]); + } + } +} /* @@ -769,24 +814,10 @@ int vt_ioctl(struct tty_struct *tty, ret = -ENXIO; break; } - if (arg == 0) { - /* deallocate all unused consoles, but leave 0 */ - console_lock(); - for (i=1; iphy; - if (!pdev->dev.dma_mask) { - pdev->dev.dma_mask = devm_kzalloc(&pdev->dev, - sizeof(*pdev->dev.dma_mask), GFP_KERNEL); - if (!pdev->dev.dma_mask) { - ret = -ENOMEM; - dev_err(&pdev->dev, "Failed to alloc dma_mask!\n"); - goto err; - } - *pdev->dev.dma_mask = DMA_BIT_MASK(32); - dma_set_coherent_mask(&pdev->dev, *pdev->dev.dma_mask); - } + if (!pdev->dev.dma_mask) + pdev->dev.dma_mask = &pdev->dev.coherent_dma_mask; + if (!pdev->dev.coherent_dma_mask) + pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32); if (usbmisc_ops && usbmisc_ops->init) { ret = usbmisc_ops->init(&pdev->dev); diff --git a/trunk/drivers/usb/chipidea/core.c b/trunk/drivers/usb/chipidea/core.c index 450107e5f657..49b098bedf9b 100644 --- a/trunk/drivers/usb/chipidea/core.c +++ b/trunk/drivers/usb/chipidea/core.c @@ -370,11 +370,6 @@ static int ci_hdrc_probe(struct platform_device *pdev) } res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - if (!res) { - dev_err(dev, "missing resource\n"); - return -ENODEV; - } - base = devm_ioremap_resource(dev, res); if (IS_ERR(base)) return PTR_ERR(base); diff --git a/trunk/drivers/usb/core/Kconfig b/trunk/drivers/usb/core/Kconfig index 8772b3659296..db535b0aa172 100644 --- a/trunk/drivers/usb/core/Kconfig +++ b/trunk/drivers/usb/core/Kconfig @@ -51,7 +51,7 @@ config USB_DYNAMIC_MINORS config USB_OTG bool "OTG support" - depends on USB_SUSPEND + depends on PM_RUNTIME default n help The most notable feature of USB OTG is support for a diff --git a/trunk/drivers/usb/core/quirks.c b/trunk/drivers/usb/core/quirks.c index ab5638d9c707..a63598895077 100644 --- a/trunk/drivers/usb/core/quirks.c +++ b/trunk/drivers/usb/core/quirks.c @@ -88,6 +88,9 @@ static const struct usb_device_id usb_quirk_list[] = { /* Edirol SD-20 */ { USB_DEVICE(0x0582, 0x0027), .driver_info = USB_QUIRK_RESET_RESUME }, + /* Alcor Micro Corp. Hub */ + { USB_DEVICE(0x058f, 0x9254), .driver_info = USB_QUIRK_RESET_RESUME }, + /* appletouch */ { USB_DEVICE(0x05ac, 0x021a), .driver_info = USB_QUIRK_RESET_RESUME }, diff --git a/trunk/drivers/usb/dwc3/Kconfig b/trunk/drivers/usb/dwc3/Kconfig index ea5ee9c21c35..757aa18027d0 100644 --- a/trunk/drivers/usb/dwc3/Kconfig +++ b/trunk/drivers/usb/dwc3/Kconfig @@ -19,21 +19,21 @@ choice config USB_DWC3_HOST bool "Host only mode" - depends on USB + depends on USB=y || USB=USB_DWC3 help Select this when you want to use DWC3 in host mode only, thereby the gadget feature will be regressed. config USB_DWC3_GADGET bool "Gadget only mode" - depends on USB_GADGET + depends on USB_GADGET=y || USB_GADGET=USB_DWC3 help Select this when you want to use DWC3 in gadget mode only, thereby the host feature will be regressed. config USB_DWC3_DUAL_ROLE bool "Dual Role mode" - depends on (USB && USB_GADGET) + depends on ((USB=y || USB=USB_DWC3) && (USB_GADGET=y || USB_GADGET=USB_DWC3)) help This is the default mode of working of DWC3 controller where both host and gadget features are enabled. diff --git a/trunk/drivers/usb/dwc3/dwc3-exynos.c b/trunk/drivers/usb/dwc3/dwc3-exynos.c index a8afe6e26621..929e7dd6e58b 100644 --- a/trunk/drivers/usb/dwc3/dwc3-exynos.c +++ b/trunk/drivers/usb/dwc3/dwc3-exynos.c @@ -95,8 +95,6 @@ static int dwc3_exynos_remove_child(struct device *dev, void *unused) return 0; } -static u64 dwc3_exynos_dma_mask = DMA_BIT_MASK(32); - static int dwc3_exynos_probe(struct platform_device *pdev) { struct dwc3_exynos *exynos; @@ -118,7 +116,9 @@ static int dwc3_exynos_probe(struct platform_device *pdev) * Once we move to full device tree support this will vanish off. */ if (!dev->dma_mask) - dev->dma_mask = &dwc3_exynos_dma_mask; + dev->dma_mask = &dev->coherent_dma_mask; + if (!dev->coherent_dma_mask) + dev->coherent_dma_mask = DMA_BIT_MASK(32); platform_set_drvdata(pdev, exynos); diff --git a/trunk/drivers/usb/gadget/Kconfig b/trunk/drivers/usb/gadget/Kconfig index 83300d94a893..f41aa0d0c414 100644 --- a/trunk/drivers/usb/gadget/Kconfig +++ b/trunk/drivers/usb/gadget/Kconfig @@ -146,7 +146,6 @@ config USB_LPC32XX depends on ARCH_LPC32XX depends on USB_PHY select USB_ISP1301 - select USB_OTG_UTILS help This option selects the USB device controller in the LPC32xx SoC. diff --git a/trunk/drivers/usb/gadget/atmel_usba_udc.c b/trunk/drivers/usb/gadget/atmel_usba_udc.c index f2a970f75bfa..5a5128a226f7 100644 --- a/trunk/drivers/usb/gadget/atmel_usba_udc.c +++ b/trunk/drivers/usb/gadget/atmel_usba_udc.c @@ -1992,8 +1992,6 @@ static int __init usba_udc_probe(struct platform_device *pdev) err_get_hclk: clk_put(pclk); - platform_set_drvdata(pdev, NULL); - return ret; } diff --git a/trunk/drivers/usb/gadget/bcm63xx_udc.c b/trunk/drivers/usb/gadget/bcm63xx_udc.c index 6e6518264c42..fd24cb4540a4 100644 --- a/trunk/drivers/usb/gadget/bcm63xx_udc.c +++ b/trunk/drivers/usb/gadget/bcm63xx_udc.c @@ -2334,21 +2334,11 @@ static int bcm63xx_udc_probe(struct platform_device *pdev) } res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - if (!res) { - dev_err(dev, "error finding USBD resource\n"); - return -ENXIO; - } - udc->usbd_regs = devm_ioremap_resource(dev, res); if (IS_ERR(udc->usbd_regs)) return PTR_ERR(udc->usbd_regs); res = platform_get_resource(pdev, IORESOURCE_MEM, 1); - if (!res) { - dev_err(dev, "error finding IUDMA resource\n"); - return -ENXIO; - } - udc->iudma_regs = devm_ioremap_resource(dev, res); if (IS_ERR(udc->iudma_regs)) return PTR_ERR(udc->iudma_regs); @@ -2420,7 +2410,6 @@ static int bcm63xx_udc_remove(struct platform_device *pdev) usb_del_gadget_udc(&udc->gadget); BUG_ON(udc->driver); - platform_set_drvdata(pdev, NULL); bcm63xx_uninit_udc_hw(udc); return 0; diff --git a/trunk/drivers/usb/gadget/configfs.c b/trunk/drivers/usb/gadget/configfs.c index 3d5cfc9c2c78..80e7f75a56c7 100644 --- a/trunk/drivers/usb/gadget/configfs.c +++ b/trunk/drivers/usb/gadget/configfs.c @@ -821,8 +821,10 @@ static int configfs_composite_bind(struct usb_gadget *gadget, gi->gstrings[i] = NULL; s = usb_gstrings_attach(&gi->cdev, gi->gstrings, USB_GADGET_FIRST_AVAIL_IDX); - if (IS_ERR(s)) + if (IS_ERR(s)) { + ret = PTR_ERR(s); goto err_comp_cleanup; + } gi->cdev.desc.iManufacturer = s[USB_GADGET_MANUFACTURER_IDX].id; gi->cdev.desc.iProduct = s[USB_GADGET_PRODUCT_IDX].id; @@ -847,8 +849,10 @@ static int configfs_composite_bind(struct usb_gadget *gadget, } cfg->gstrings[i] = NULL; s = usb_gstrings_attach(&gi->cdev, cfg->gstrings, 1); - if (IS_ERR(s)) + if (IS_ERR(s)) { + ret = PTR_ERR(s); goto err_comp_cleanup; + } c->iConfiguration = s[0].id; } diff --git a/trunk/drivers/usb/gadget/dummy_hcd.c b/trunk/drivers/usb/gadget/dummy_hcd.c index a792e322f4f1..c588e8e486e5 100644 --- a/trunk/drivers/usb/gadget/dummy_hcd.c +++ b/trunk/drivers/usb/gadget/dummy_hcd.c @@ -1001,7 +1001,6 @@ static int dummy_udc_remove(struct platform_device *pdev) struct dummy *dum = platform_get_drvdata(pdev); usb_del_gadget_udc(&dum->gadget); - platform_set_drvdata(pdev, NULL); device_remove_file(&dum->gadget.dev, &dev_attr_function); return 0; } @@ -2661,8 +2660,10 @@ static int __init init(void) } for (i = 0; i < mod_data.num; i++) { dum[i] = kzalloc(sizeof(struct dummy), GFP_KERNEL); - if (!dum[i]) + if (!dum[i]) { + retval = -ENOMEM; goto err_add_pdata; + } retval = platform_device_add_data(the_hcd_pdev[i], &dum[i], sizeof(void *)); if (retval) diff --git a/trunk/drivers/usb/gadget/f_ecm.c b/trunk/drivers/usb/gadget/f_ecm.c index d893d6929079..abf8a31ae146 100644 --- a/trunk/drivers/usb/gadget/f_ecm.c +++ b/trunk/drivers/usb/gadget/f_ecm.c @@ -816,6 +816,7 @@ ecm_unbind(struct usb_configuration *c, struct usb_function *f) * @c: the configuration to support the network link * @ethaddr: a buffer in which the ethernet address of the host side * side of the link was recorded + * @dev: eth_dev structure * Context: single threaded during gadget setup * * Returns zero on success, else negative errno. diff --git a/trunk/drivers/usb/gadget/f_subset.c b/trunk/drivers/usb/gadget/f_subset.c index 185d6f5e4e4d..7be04b342494 100644 --- a/trunk/drivers/usb/gadget/f_subset.c +++ b/trunk/drivers/usb/gadget/f_subset.c @@ -373,6 +373,7 @@ geth_unbind(struct usb_configuration *c, struct usb_function *f) * @c: the configuration to support the network link * @ethaddr: a buffer in which the ethernet address of the host side * side of the link was recorded + * @dev: eth_dev structure * Context: single threaded during gadget setup * * Returns zero on success, else negative errno. diff --git a/trunk/drivers/usb/gadget/f_uac2.c b/trunk/drivers/usb/gadget/f_uac2.c index c7468b6c07b0..03c1fb686644 100644 --- a/trunk/drivers/usb/gadget/f_uac2.c +++ b/trunk/drivers/usb/gadget/f_uac2.c @@ -456,8 +456,6 @@ static int snd_uac2_remove(struct platform_device *pdev) { struct snd_card *card = platform_get_drvdata(pdev); - platform_set_drvdata(pdev, NULL); - if (card) return snd_card_free(card); diff --git a/trunk/drivers/usb/gadget/fusb300_udc.c b/trunk/drivers/usb/gadget/fusb300_udc.c index cec8871b77f9..b8632d40f8bf 100644 --- a/trunk/drivers/usb/gadget/fusb300_udc.c +++ b/trunk/drivers/usb/gadget/fusb300_udc.c @@ -1461,8 +1461,10 @@ static int __init fusb300_probe(struct platform_device *pdev) fusb300->ep0_req = fusb300_alloc_request(&fusb300->ep[0]->ep, GFP_KERNEL); - if (fusb300->ep0_req == NULL) + if (fusb300->ep0_req == NULL) { + ret = -ENOMEM; goto clean_up3; + } init_controller(fusb300); ret = usb_add_gadget_udc(&pdev->dev, &fusb300->gadget); diff --git a/trunk/drivers/usb/gadget/imx_udc.c b/trunk/drivers/usb/gadget/imx_udc.c index b5cebd6b0d7a..9b2d24e4c95f 100644 --- a/trunk/drivers/usb/gadget/imx_udc.c +++ b/trunk/drivers/usb/gadget/imx_udc.c @@ -1511,8 +1511,6 @@ static int __exit imx_udc_remove(struct platform_device *pdev) if (pdata->exit) pdata->exit(&pdev->dev); - platform_set_drvdata(pdev, NULL); - return 0; } diff --git a/trunk/drivers/usb/gadget/m66592-udc.c b/trunk/drivers/usb/gadget/m66592-udc.c index 866ef0999247..51cfe72da5bb 100644 --- a/trunk/drivers/usb/gadget/m66592-udc.c +++ b/trunk/drivers/usb/gadget/m66592-udc.c @@ -1660,8 +1660,10 @@ static int __init m66592_probe(struct platform_device *pdev) m66592->epaddr2ep[0] = &m66592->ep[0]; m66592->ep0_req = m66592_alloc_request(&m66592->ep[0].ep, GFP_KERNEL); - if (m66592->ep0_req == NULL) + if (m66592->ep0_req == NULL) { + ret = -ENOMEM; goto clean_up3; + } m66592->ep0_req->complete = nop_completion; init_controller(m66592); diff --git a/trunk/drivers/usb/gadget/pxa25x_udc.c b/trunk/drivers/usb/gadget/pxa25x_udc.c index ef47495dec8f..95c531d5aa4f 100644 --- a/trunk/drivers/usb/gadget/pxa25x_udc.c +++ b/trunk/drivers/usb/gadget/pxa25x_udc.c @@ -2236,7 +2236,6 @@ static int __exit pxa25x_udc_remove(struct platform_device *pdev) dev->transceiver = NULL; } - platform_set_drvdata(pdev, NULL); the_controller = NULL; return 0; } diff --git a/trunk/drivers/usb/gadget/r8a66597-udc.c b/trunk/drivers/usb/gadget/r8a66597-udc.c index 0b742d171843..7ff7d9cf2061 100644 --- a/trunk/drivers/usb/gadget/r8a66597-udc.c +++ b/trunk/drivers/usb/gadget/r8a66597-udc.c @@ -1977,8 +1977,10 @@ static int __init r8a66597_probe(struct platform_device *pdev) r8a66597->ep0_req = r8a66597_alloc_request(&r8a66597->ep[0].ep, GFP_KERNEL); - if (r8a66597->ep0_req == NULL) + if (r8a66597->ep0_req == NULL) { + ret = -ENOMEM; goto clean_up3; + } r8a66597->ep0_req->complete = nop_completion; ret = usb_add_gadget_udc(&pdev->dev, &r8a66597->gadget); diff --git a/trunk/drivers/usb/gadget/s3c-hsotg.c b/trunk/drivers/usb/gadget/s3c-hsotg.c index a3cdc32115d5..af22f24046b2 100644 --- a/trunk/drivers/usb/gadget/s3c-hsotg.c +++ b/trunk/drivers/usb/gadget/s3c-hsotg.c @@ -437,7 +437,7 @@ static void s3c_hsotg_unmap_dma(struct s3c_hsotg *hsotg, if (hs_req->req.length == 0) return; - usb_gadget_unmap_request(&hsotg->gadget, hs_req, hs_ep->dir_in); + usb_gadget_unmap_request(&hsotg->gadget, req, hs_ep->dir_in); } /** diff --git a/trunk/drivers/usb/gadget/s3c2410_udc.c b/trunk/drivers/usb/gadget/s3c2410_udc.c index d0e75e1b3ccb..09c4f70c93c4 100644 --- a/trunk/drivers/usb/gadget/s3c2410_udc.c +++ b/trunk/drivers/usb/gadget/s3c2410_udc.c @@ -1851,6 +1851,7 @@ static int s3c2410_udc_probe(struct platform_device *pdev) irq = gpio_to_irq(udc_info->vbus_pin); if (irq < 0) { dev_err(dev, "no irq for gpio vbus pin\n"); + retval = irq; goto err_gpio_claim; } @@ -1948,8 +1949,6 @@ static int s3c2410_udc_remove(struct platform_device *pdev) iounmap(base_addr); release_mem_region(rsrc_start, rsrc_len); - platform_set_drvdata(pdev, NULL); - if (!IS_ERR(udc_clock) && udc_clock != NULL) { clk_disable(udc_clock); clk_put(udc_clock); diff --git a/trunk/drivers/usb/gadget/zero.c b/trunk/drivers/usb/gadget/zero.c index 2cd6262e8b71..0deb9d6cde26 100644 --- a/trunk/drivers/usb/gadget/zero.c +++ b/trunk/drivers/usb/gadget/zero.c @@ -284,12 +284,16 @@ static int __init zero_bind(struct usb_composite_dev *cdev) ss_opts->bulk_buflen = gzero_options.bulk_buflen; func_ss = usb_get_function(func_inst_ss); - if (IS_ERR(func_ss)) + if (IS_ERR(func_ss)) { + status = PTR_ERR(func_ss); goto err_put_func_inst_ss; + } func_inst_lb = usb_get_function_instance("Loopback"); - if (IS_ERR(func_inst_lb)) + if (IS_ERR(func_inst_lb)) { + status = PTR_ERR(func_inst_lb); goto err_put_func_ss; + } lb_opts = container_of(func_inst_lb, struct f_lb_opts, func_inst); lb_opts->bulk_buflen = gzero_options.bulk_buflen; diff --git a/trunk/drivers/usb/host/Kconfig b/trunk/drivers/usb/host/Kconfig index de94f2699063..344d5e2f87d7 100644 --- a/trunk/drivers/usb/host/Kconfig +++ b/trunk/drivers/usb/host/Kconfig @@ -507,7 +507,7 @@ endif # USB_OHCI_HCD config USB_UHCI_HCD tristate "UHCI HCD (most Intel and VIA) support" - depends on PCI || SPARC_LEON || ARCH_VT8500 + depends on PCI || USB_UHCI_SUPPORT_NON_PCI_HC ---help--- The Universal Host Controller Interface is a standard by Intel for accessing the USB hardware in the PC (which is also called the USB @@ -524,26 +524,19 @@ config USB_UHCI_HCD config USB_UHCI_SUPPORT_NON_PCI_HC bool - depends on USB_UHCI_HCD - default y if (SPARC_LEON || ARCH_VT8500) + default y if (SPARC_LEON || USB_UHCI_PLATFORM) config USB_UHCI_PLATFORM - bool "Generic UHCI Platform Driver support" - depends on USB_UHCI_SUPPORT_NON_PCI_HC + bool default y if ARCH_VT8500 - ---help--- - Enable support for generic UHCI platform devices that require no - additional configuration. config USB_UHCI_BIG_ENDIAN_MMIO bool - depends on USB_UHCI_SUPPORT_NON_PCI_HC && SPARC_LEON - default y + default y if SPARC_LEON config USB_UHCI_BIG_ENDIAN_DESC bool - depends on USB_UHCI_SUPPORT_NON_PCI_HC && SPARC_LEON - default y + default y if SPARC_LEON config USB_FHCI_HCD tristate "Freescale QE USB Host Controller support" diff --git a/trunk/drivers/usb/host/ehci-atmel.c b/trunk/drivers/usb/host/ehci-atmel.c index 66420097c242..02f4611faa62 100644 --- a/trunk/drivers/usb/host/ehci-atmel.c +++ b/trunk/drivers/usb/host/ehci-atmel.c @@ -63,8 +63,6 @@ static void atmel_stop_ehci(struct platform_device *pdev) /*-------------------------------------------------------------------------*/ -static u64 at91_ehci_dma_mask = DMA_BIT_MASK(32); - static int ehci_atmel_drv_probe(struct platform_device *pdev) { struct usb_hcd *hcd; @@ -93,7 +91,9 @@ static int ehci_atmel_drv_probe(struct platform_device *pdev) * Once we have dma capability bindings this can go away. */ if (!pdev->dev.dma_mask) - pdev->dev.dma_mask = &at91_ehci_dma_mask; + pdev->dev.dma_mask = &pdev->dev.coherent_dma_mask; + if (!pdev->dev.coherent_dma_mask) + pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32); hcd = usb_create_hcd(driver, &pdev->dev, dev_name(&pdev->dev)); if (!hcd) { diff --git a/trunk/drivers/usb/host/ehci-hcd.c b/trunk/drivers/usb/host/ehci-hcd.c index 312fc10da3c7..246e124e6ac5 100644 --- a/trunk/drivers/usb/host/ehci-hcd.c +++ b/trunk/drivers/usb/host/ehci-hcd.c @@ -1286,23 +1286,6 @@ MODULE_LICENSE ("GPL"); #define PLATFORM_DRIVER ehci_hcd_sead3_driver #endif -#if !IS_ENABLED(CONFIG_USB_EHCI_PCI) && \ - !IS_ENABLED(CONFIG_USB_EHCI_HCD_PLATFORM) && \ - !IS_ENABLED(CONFIG_USB_CHIPIDEA_HOST) && \ - !IS_ENABLED(CONFIG_USB_EHCI_MXC) && \ - !IS_ENABLED(CONFIG_USB_EHCI_HCD_OMAP) && \ - !IS_ENABLED(CONFIG_USB_EHCI_HCD_ORION) && \ - !IS_ENABLED(CONFIG_USB_EHCI_HCD_SPEAR) && \ - !IS_ENABLED(CONFIG_USB_EHCI_S5P) && \ - !IS_ENABLED(CONFIG_USB_EHCI_HCD_AT91) && \ - !IS_ENABLED(CONFIG_USB_EHCI_MSM) && \ - !defined(PLATFORM_DRIVER) && \ - !defined(PS3_SYSTEM_BUS_DRIVER) && \ - !defined(OF_PLATFORM_DRIVER) && \ - !defined(XILINX_OF_PLATFORM_DRIVER) -#error "missing bus glue for ehci-hcd" -#endif - static int __init ehci_hcd_init(void) { int retval = 0; diff --git a/trunk/drivers/usb/host/ehci-omap.c b/trunk/drivers/usb/host/ehci-omap.c index 3d1491b5f360..16d7150e8557 100644 --- a/trunk/drivers/usb/host/ehci-omap.c +++ b/trunk/drivers/usb/host/ehci-omap.c @@ -90,8 +90,6 @@ static const struct ehci_driver_overrides ehci_omap_overrides __initdata = { .extra_priv_size = sizeof(struct omap_hcd), }; -static u64 omap_ehci_dma_mask = DMA_BIT_MASK(32); - /** * ehci_hcd_omap_probe - initialize TI-based HCDs * @@ -146,8 +144,10 @@ static int ehci_hcd_omap_probe(struct platform_device *pdev) * Since shared usb code relies on it, set it here for now. * Once we have dma capability bindings this can go away. */ - if (!pdev->dev.dma_mask) - pdev->dev.dma_mask = &omap_ehci_dma_mask; + if (!dev->dma_mask) + dev->dma_mask = &dev->coherent_dma_mask; + if (!dev->coherent_dma_mask) + dev->coherent_dma_mask = DMA_BIT_MASK(32); hcd = usb_create_hcd(&ehci_omap_hc_driver, dev, dev_name(dev)); diff --git a/trunk/drivers/usb/host/ehci-orion.c b/trunk/drivers/usb/host/ehci-orion.c index 54c579485150..efbc588b48c5 100644 --- a/trunk/drivers/usb/host/ehci-orion.c +++ b/trunk/drivers/usb/host/ehci-orion.c @@ -137,8 +137,6 @@ ehci_orion_conf_mbus_windows(struct usb_hcd *hcd, } } -static u64 ehci_orion_dma_mask = DMA_BIT_MASK(32); - static int ehci_orion_drv_probe(struct platform_device *pdev) { struct orion_ehci_data *pd = pdev->dev.platform_data; @@ -183,7 +181,9 @@ static int ehci_orion_drv_probe(struct platform_device *pdev) * now. Once we have dma capability bindings this can go away. */ if (!pdev->dev.dma_mask) - pdev->dev.dma_mask = &ehci_orion_dma_mask; + pdev->dev.dma_mask = &pdev->dev.coherent_dma_mask; + if (!pdev->dev.coherent_dma_mask) + pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32); if (!request_mem_region(res->start, resource_size(res), ehci_orion_hc_driver.description)) { diff --git a/trunk/drivers/usb/host/ehci-s5p.c b/trunk/drivers/usb/host/ehci-s5p.c index 635775278c7f..379037f51a2f 100644 --- a/trunk/drivers/usb/host/ehci-s5p.c +++ b/trunk/drivers/usb/host/ehci-s5p.c @@ -71,8 +71,6 @@ static void s5p_setup_vbus_gpio(struct platform_device *pdev) dev_err(dev, "can't request ehci vbus gpio %d", gpio); } -static u64 ehci_s5p_dma_mask = DMA_BIT_MASK(32); - static int s5p_ehci_probe(struct platform_device *pdev) { struct s5p_ehci_platdata *pdata = pdev->dev.platform_data; @@ -90,7 +88,7 @@ static int s5p_ehci_probe(struct platform_device *pdev) * Once we move to full device tree support this will vanish off. */ if (!pdev->dev.dma_mask) - pdev->dev.dma_mask = &ehci_s5p_dma_mask; + pdev->dev.dma_mask = &pdev->dev.coherent_dma_mask; if (!pdev->dev.coherent_dma_mask) pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32); @@ -107,6 +105,7 @@ static int s5p_ehci_probe(struct platform_device *pdev) if (IS_ERR(phy)) { /* Fallback to pdata */ if (!pdata) { + usb_put_hcd(hcd); dev_warn(&pdev->dev, "no platform data or transceiver defined\n"); return -EPROBE_DEFER; } else { diff --git a/trunk/drivers/usb/host/ehci-spear.c b/trunk/drivers/usb/host/ehci-spear.c index 61ecfb3d52f5..bd3e5cbc6240 100644 --- a/trunk/drivers/usb/host/ehci-spear.c +++ b/trunk/drivers/usb/host/ehci-spear.c @@ -58,8 +58,6 @@ static int ehci_spear_drv_resume(struct device *dev) static SIMPLE_DEV_PM_OPS(ehci_spear_pm_ops, ehci_spear_drv_suspend, ehci_spear_drv_resume); -static u64 spear_ehci_dma_mask = DMA_BIT_MASK(32); - static int spear_ehci_hcd_drv_probe(struct platform_device *pdev) { struct usb_hcd *hcd ; @@ -84,7 +82,9 @@ static int spear_ehci_hcd_drv_probe(struct platform_device *pdev) * Once we have dma capability bindings this can go away. */ if (!pdev->dev.dma_mask) - pdev->dev.dma_mask = &spear_ehci_dma_mask; + pdev->dev.dma_mask = &pdev->dev.coherent_dma_mask; + if (!pdev->dev.coherent_dma_mask) + pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32); usbh_clk = devm_clk_get(&pdev->dev, NULL); if (IS_ERR(usbh_clk)) { diff --git a/trunk/drivers/usb/host/ehci-tegra.c b/trunk/drivers/usb/host/ehci-tegra.c index e3eddc31ac83..59d111bf44a9 100644 --- a/trunk/drivers/usb/host/ehci-tegra.c +++ b/trunk/drivers/usb/host/ehci-tegra.c @@ -637,8 +637,6 @@ static void tegra_ehci_set_phcd(struct usb_phy *x, bool enable) writel(val, base + TEGRA_USB_PORTSC1); } -static u64 tegra_ehci_dma_mask = DMA_BIT_MASK(32); - static int tegra_ehci_probe(struct platform_device *pdev) { struct resource *res; @@ -661,7 +659,9 @@ static int tegra_ehci_probe(struct platform_device *pdev) * Once we have dma capability bindings this can go away. */ if (!pdev->dev.dma_mask) - pdev->dev.dma_mask = &tegra_ehci_dma_mask; + pdev->dev.dma_mask = &pdev->dev.coherent_dma_mask; + if (!pdev->dev.coherent_dma_mask) + pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32); setup_vbus_gpio(pdev, pdata); diff --git a/trunk/drivers/usb/host/ehci-tilegx.c b/trunk/drivers/usb/host/ehci-tilegx.c index 1d215cdb9dea..b083a350eea3 100644 --- a/trunk/drivers/usb/host/ehci-tilegx.c +++ b/trunk/drivers/usb/host/ehci-tilegx.c @@ -118,8 +118,10 @@ static int ehci_hcd_tilegx_drv_probe(struct platform_device *pdev) hcd = usb_create_hcd(&ehci_tilegx_hc_driver, &pdev->dev, dev_name(&pdev->dev)); - if (!hcd) - return -ENOMEM; + if (!hcd) { + ret = -ENOMEM; + goto err_hcd; + } /* * We don't use rsrc_start to map in our registers, but seems like @@ -176,6 +178,7 @@ static int ehci_hcd_tilegx_drv_probe(struct platform_device *pdev) err_no_irq: tilegx_stop_ehc(); usb_put_hcd(hcd); +err_hcd: gxio_usb_host_destroy(&pdata->usb_ctx); return ret; } diff --git a/trunk/drivers/usb/host/isp1760-hcd.c b/trunk/drivers/usb/host/isp1760-hcd.c index 125e261f5bfc..2facee53eab1 100644 --- a/trunk/drivers/usb/host/isp1760-hcd.c +++ b/trunk/drivers/usb/host/isp1760-hcd.c @@ -1739,7 +1739,7 @@ static int isp1760_hub_status_data(struct usb_hcd *hcd, char *buf) int retval = 1; unsigned long flags; - /* if !USB_SUSPEND, root hub timers won't get shut down ... */ + /* if !PM_RUNTIME, root hub timers won't get shut down ... */ if (!HC_IS_RUNNING(hcd->state)) return 0; diff --git a/trunk/drivers/usb/host/isp1760-if.c b/trunk/drivers/usb/host/isp1760-if.c index bbb791bd7617..a13709ee4e5d 100644 --- a/trunk/drivers/usb/host/isp1760-if.c +++ b/trunk/drivers/usb/host/isp1760-if.c @@ -373,8 +373,10 @@ static int isp1760_plat_probe(struct platform_device *pdev) irq_res = platform_get_resource(pdev, IORESOURCE_IRQ, 0); if (!irq_res) { pr_warning("isp1760: IRQ resource not available\n"); - return -ENODEV; + ret = -ENODEV; + goto cleanup; } + irqflags |= irq_res->flags & IRQF_TRIGGER_MASK; if (priv) { diff --git a/trunk/drivers/usb/host/ohci-at91.c b/trunk/drivers/usb/host/ohci-at91.c index a0cb44f0e724..2ee1496dbc1d 100644 --- a/trunk/drivers/usb/host/ohci-at91.c +++ b/trunk/drivers/usb/host/ohci-at91.c @@ -504,8 +504,6 @@ static const struct of_device_id at91_ohci_dt_ids[] = { MODULE_DEVICE_TABLE(of, at91_ohci_dt_ids); -static u64 at91_ohci_dma_mask = DMA_BIT_MASK(32); - static int ohci_at91_of_init(struct platform_device *pdev) { struct device_node *np = pdev->dev.of_node; @@ -522,7 +520,9 @@ static int ohci_at91_of_init(struct platform_device *pdev) * Once we have dma capability bindings this can go away. */ if (!pdev->dev.dma_mask) - pdev->dev.dma_mask = &at91_ohci_dma_mask; + pdev->dev.dma_mask = &pdev->dev.coherent_dma_mask; + if (!pdev->dev.coherent_dma_mask) + pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32); pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL); if (!pdata) diff --git a/trunk/drivers/usb/host/ohci-exynos.c b/trunk/drivers/usb/host/ohci-exynos.c index 07592c00af26..b0b542c14e31 100644 --- a/trunk/drivers/usb/host/ohci-exynos.c +++ b/trunk/drivers/usb/host/ohci-exynos.c @@ -98,8 +98,6 @@ static const struct hc_driver exynos_ohci_hc_driver = { .start_port_reset = ohci_start_port_reset, }; -static u64 ohci_exynos_dma_mask = DMA_BIT_MASK(32); - static int exynos_ohci_probe(struct platform_device *pdev) { struct exynos4_ohci_platdata *pdata = pdev->dev.platform_data; @@ -117,7 +115,7 @@ static int exynos_ohci_probe(struct platform_device *pdev) * Once we move to full device tree support this will vanish off. */ if (!pdev->dev.dma_mask) - pdev->dev.dma_mask = &ohci_exynos_dma_mask; + pdev->dev.dma_mask = &pdev->dev.coherent_dma_mask; if (!pdev->dev.coherent_dma_mask) pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32); diff --git a/trunk/drivers/usb/host/ohci-hcd.c b/trunk/drivers/usb/host/ohci-hcd.c index 9e6de9586ae4..fc627fd54116 100644 --- a/trunk/drivers/usb/host/ohci-hcd.c +++ b/trunk/drivers/usb/host/ohci-hcd.c @@ -233,14 +233,14 @@ static int ohci_urb_enqueue ( urb->start_frame = frame; } } else if (ed->type == PIPE_ISOCHRONOUS) { - u16 next = ohci_frame_no(ohci) + 2; + u16 next = ohci_frame_no(ohci) + 1; u16 frame = ed->last_iso + ed->interval; /* Behind the scheduling threshold? */ if (unlikely(tick_before(frame, next))) { /* USB_ISO_ASAP: Round up to the first available slot */ - if (urb->transfer_flags & URB_ISO_ASAP) + if (urb->transfer_flags & URB_ISO_ASAP) { frame += (next - frame + ed->interval - 1) & -ed->interval; @@ -248,21 +248,25 @@ static int ohci_urb_enqueue ( * Not ASAP: Use the next slot in the stream. If * the entire URB falls before the threshold, fail. */ - else if (tick_before(frame + ed->interval * + } else { + if (tick_before(frame + ed->interval * (urb->number_of_packets - 1), next)) { - retval = -EXDEV; - usb_hcd_unlink_urb_from_ep(hcd, urb); - goto fail; - } + retval = -EXDEV; + usb_hcd_unlink_urb_from_ep(hcd, urb); + goto fail; + } - /* - * Some OHCI hardware doesn't handle late TDs - * correctly. After retiring them it proceeds to - * the next ED instead of the next TD. Therefore - * we have to omit the late TDs entirely. - */ - urb_priv->td_cnt = DIV_ROUND_UP(next - frame, - ed->interval); + /* + * Some OHCI hardware doesn't handle late TDs + * correctly. After retiring them it proceeds + * to the next ED instead of the next TD. + * Therefore we have to omit the late TDs + * entirely. + */ + urb_priv->td_cnt = DIV_ROUND_UP( + (u16) (next - frame), + ed->interval); + } } urb->start_frame = frame; } diff --git a/trunk/drivers/usb/host/ohci-nxp.c b/trunk/drivers/usb/host/ohci-nxp.c index f4988fbe78e7..5d7eb72c5064 100644 --- a/trunk/drivers/usb/host/ohci-nxp.c +++ b/trunk/drivers/usb/host/ohci-nxp.c @@ -223,8 +223,7 @@ static int usb_hcd_nxp_probe(struct platform_device *pdev) isp1301_i2c_client = isp1301_get_client(isp1301_node); if (!isp1301_i2c_client) { - ret = -EPROBE_DEFER; - goto out; + return -EPROBE_DEFER; } pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32); @@ -234,7 +233,7 @@ static int usb_hcd_nxp_probe(struct platform_device *pdev) if (usb_disabled()) { dev_err(&pdev->dev, "USB is disabled\n"); ret = -ENODEV; - goto out; + goto fail_disable; } /* Enable AHB slave USB clock, needed for further USB clock control */ @@ -245,19 +244,19 @@ static int usb_hcd_nxp_probe(struct platform_device *pdev) if (IS_ERR(usb_pll_clk)) { dev_err(&pdev->dev, "failed to acquire USB PLL\n"); ret = PTR_ERR(usb_pll_clk); - goto out1; + goto fail_pll; } ret = clk_enable(usb_pll_clk); if (ret < 0) { dev_err(&pdev->dev, "failed to start USB PLL\n"); - goto out2; + goto fail_pllen; } ret = clk_set_rate(usb_pll_clk, 48000); if (ret < 0) { dev_err(&pdev->dev, "failed to set USB clock rate\n"); - goto out3; + goto fail_rate; } /* Enable USB device clock */ @@ -265,13 +264,13 @@ static int usb_hcd_nxp_probe(struct platform_device *pdev) if (IS_ERR(usb_dev_clk)) { dev_err(&pdev->dev, "failed to acquire USB DEV Clock\n"); ret = PTR_ERR(usb_dev_clk); - goto out4; + goto fail_dev; } ret = clk_enable(usb_dev_clk); if (ret < 0) { dev_err(&pdev->dev, "failed to start USB DEV Clock\n"); - goto out5; + goto fail_deven; } /* Enable USB otg clocks */ @@ -279,7 +278,7 @@ static int usb_hcd_nxp_probe(struct platform_device *pdev) if (IS_ERR(usb_otg_clk)) { dev_err(&pdev->dev, "failed to acquire USB DEV Clock\n"); ret = PTR_ERR(usb_otg_clk); - goto out6; + goto fail_otg; } __raw_writel(__raw_readl(USB_CTRL) | USB_HOST_NEED_CLK_EN, USB_CTRL); @@ -287,7 +286,7 @@ static int usb_hcd_nxp_probe(struct platform_device *pdev) ret = clk_enable(usb_otg_clk); if (ret < 0) { dev_err(&pdev->dev, "failed to start USB DEV Clock\n"); - goto out7; + goto fail_otgen; } isp1301_configure(); @@ -296,20 +295,14 @@ static int usb_hcd_nxp_probe(struct platform_device *pdev) if (!hcd) { dev_err(&pdev->dev, "Failed to allocate HC buffer\n"); ret = -ENOMEM; - goto out8; + goto fail_hcd; } res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - if (!res) { - dev_err(&pdev->dev, "Failed to get MEM resource\n"); - ret = -ENOMEM; - goto out8; - } - hcd->regs = devm_ioremap_resource(&pdev->dev, res); if (IS_ERR(hcd->regs)) { ret = PTR_ERR(hcd->regs); - goto out8; + goto fail_resource; } hcd->rsrc_start = res->start; hcd->rsrc_len = resource_size(res); @@ -317,7 +310,7 @@ static int usb_hcd_nxp_probe(struct platform_device *pdev) irq = platform_get_irq(pdev, 0); if (irq < 0) { ret = -ENXIO; - goto out8; + goto fail_resource; } nxp_start_hc(); @@ -331,23 +324,24 @@ static int usb_hcd_nxp_probe(struct platform_device *pdev) return ret; nxp_stop_hc(); -out8: +fail_resource: usb_put_hcd(hcd); -out7: +fail_hcd: clk_disable(usb_otg_clk); -out6: +fail_otgen: clk_put(usb_otg_clk); -out5: +fail_otg: clk_disable(usb_dev_clk); -out4: +fail_deven: clk_put(usb_dev_clk); -out3: +fail_dev: +fail_rate: clk_disable(usb_pll_clk); -out2: +fail_pllen: clk_put(usb_pll_clk); -out1: +fail_pll: +fail_disable: isp1301_i2c_client = NULL; -out: return ret; } diff --git a/trunk/drivers/usb/host/ohci-omap3.c b/trunk/drivers/usb/host/ohci-omap3.c index ddfc31427bc0..8663851c8d8e 100644 --- a/trunk/drivers/usb/host/ohci-omap3.c +++ b/trunk/drivers/usb/host/ohci-omap3.c @@ -114,8 +114,6 @@ static const struct hc_driver ohci_omap3_hc_driver = { /*-------------------------------------------------------------------------*/ -static u64 omap_ohci_dma_mask = DMA_BIT_MASK(32); - /* * configure so an HC device and id are always provided * always called with process context; sleeping is OK @@ -168,8 +166,10 @@ static int ohci_hcd_omap3_probe(struct platform_device *pdev) * Since shared usb code relies on it, set it here for now. * Once we have dma capability bindings this can go away. */ - if (!pdev->dev.dma_mask) - pdev->dev.dma_mask = &omap_ohci_dma_mask; + if (!dev->dma_mask) + dev->dma_mask = &dev->coherent_dma_mask; + if (!dev->coherent_dma_mask) + dev->coherent_dma_mask = DMA_BIT_MASK(32); hcd = usb_create_hcd(&ohci_omap3_hc_driver, dev, dev_name(dev)); diff --git a/trunk/drivers/usb/host/ohci-pxa27x.c b/trunk/drivers/usb/host/ohci-pxa27x.c index efe71f3ca477..279b2ef17411 100644 --- a/trunk/drivers/usb/host/ohci-pxa27x.c +++ b/trunk/drivers/usb/host/ohci-pxa27x.c @@ -282,8 +282,6 @@ static const struct of_device_id pxa_ohci_dt_ids[] = { MODULE_DEVICE_TABLE(of, pxa_ohci_dt_ids); -static u64 pxa_ohci_dma_mask = DMA_BIT_MASK(32); - static int ohci_pxa_of_init(struct platform_device *pdev) { struct device_node *np = pdev->dev.of_node; @@ -298,7 +296,9 @@ static int ohci_pxa_of_init(struct platform_device *pdev) * Once we have dma capability bindings this can go away. */ if (!pdev->dev.dma_mask) - pdev->dev.dma_mask = &pxa_ohci_dma_mask; + pdev->dev.dma_mask = &pdev->dev.coherent_dma_mask; + if (!pdev->dev.coherent_dma_mask) + pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32); pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL); if (!pdata) diff --git a/trunk/drivers/usb/host/ohci-spear.c b/trunk/drivers/usb/host/ohci-spear.c index 9020bf0e2eca..3e19e0170d11 100644 --- a/trunk/drivers/usb/host/ohci-spear.c +++ b/trunk/drivers/usb/host/ohci-spear.c @@ -91,8 +91,6 @@ static const struct hc_driver ohci_spear_hc_driver = { .start_port_reset = ohci_start_port_reset, }; -static u64 spear_ohci_dma_mask = DMA_BIT_MASK(32); - static int spear_ohci_hcd_drv_probe(struct platform_device *pdev) { const struct hc_driver *driver = &ohci_spear_hc_driver; @@ -114,7 +112,9 @@ static int spear_ohci_hcd_drv_probe(struct platform_device *pdev) * Once we have dma capability bindings this can go away. */ if (!pdev->dev.dma_mask) - pdev->dev.dma_mask = &spear_ohci_dma_mask; + pdev->dev.dma_mask = &pdev->dev.coherent_dma_mask; + if (!pdev->dev.coherent_dma_mask) + pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32); usbh_clk = devm_clk_get(&pdev->dev, NULL); if (IS_ERR(usbh_clk)) { diff --git a/trunk/drivers/usb/host/ohci-tilegx.c b/trunk/drivers/usb/host/ohci-tilegx.c index 1ae7b28a71c2..ea73009de623 100644 --- a/trunk/drivers/usb/host/ohci-tilegx.c +++ b/trunk/drivers/usb/host/ohci-tilegx.c @@ -112,8 +112,10 @@ static int ohci_hcd_tilegx_drv_probe(struct platform_device *pdev) hcd = usb_create_hcd(&ohci_tilegx_hc_driver, &pdev->dev, dev_name(&pdev->dev)); - if (!hcd) - return -ENOMEM; + if (!hcd) { + ret = -ENOMEM; + goto err_hcd; + } /* * We don't use rsrc_start to map in our registers, but seems like @@ -165,6 +167,7 @@ static int ohci_hcd_tilegx_drv_probe(struct platform_device *pdev) err_no_irq: tilegx_stop_ohc(); usb_put_hcd(hcd); +err_hcd: gxio_usb_host_destroy(&pdata->usb_ctx); return ret; } diff --git a/trunk/drivers/usb/host/oxu210hp-hcd.c b/trunk/drivers/usb/host/oxu210hp-hcd.c index 4f0f0339532f..0f401dbfaf07 100644 --- a/trunk/drivers/usb/host/oxu210hp-hcd.c +++ b/trunk/drivers/usb/host/oxu210hp-hcd.c @@ -3084,7 +3084,7 @@ static int oxu_hub_status_data(struct usb_hcd *hcd, char *buf) int ports, i, retval = 1; unsigned long flags; - /* if !USB_SUSPEND, root hub timers won't get shut down ... */ + /* if !PM_RUNTIME, root hub timers won't get shut down ... */ if (!HC_IS_RUNNING(hcd->state)) return 0; diff --git a/trunk/drivers/usb/host/sl811-hcd.c b/trunk/drivers/usb/host/sl811-hcd.c index ad4483efb6d6..b2ec7fe758dd 100644 --- a/trunk/drivers/usb/host/sl811-hcd.c +++ b/trunk/drivers/usb/host/sl811-hcd.c @@ -22,7 +22,7 @@ * and usb-storage. * * TODO: - * - usb suspend/resume triggered by sl811 (with USB_SUSPEND) + * - usb suspend/resume triggered by sl811 (with PM_RUNTIME) * - various issues noted in the code * - performance work; use both register banks; ... * - use urb->iso_frame_desc[] with ISO transfers diff --git a/trunk/drivers/usb/host/uhci-hub.c b/trunk/drivers/usb/host/uhci-hub.c index f87bee6d2789..9189bc984c98 100644 --- a/trunk/drivers/usb/host/uhci-hub.c +++ b/trunk/drivers/usb/host/uhci-hub.c @@ -225,7 +225,8 @@ static int uhci_hub_status_data(struct usb_hcd *hcd, char *buf) /* auto-stop if nothing connected for 1 second */ if (any_ports_active(uhci)) uhci->rh_state = UHCI_RH_RUNNING; - else if (time_after_eq(jiffies, uhci->auto_stop_time)) + else if (time_after_eq(jiffies, uhci->auto_stop_time) && + !uhci->wait_for_hp) suspend_rh(uhci, UHCI_RH_AUTO_STOPPED); break; diff --git a/trunk/drivers/usb/host/uhci-platform.c b/trunk/drivers/usb/host/uhci-platform.c index 8c4dace4b14a..f1db61ada6a8 100644 --- a/trunk/drivers/usb/host/uhci-platform.c +++ b/trunk/drivers/usb/host/uhci-platform.c @@ -60,8 +60,6 @@ static const struct hc_driver uhci_platform_hc_driver = { .hub_control = uhci_hub_control, }; -static u64 platform_uhci_dma_mask = DMA_BIT_MASK(32); - static int uhci_hcd_platform_probe(struct platform_device *pdev) { struct usb_hcd *hcd; @@ -78,7 +76,9 @@ static int uhci_hcd_platform_probe(struct platform_device *pdev) * Once we have dma capability bindings this can go away. */ if (!pdev->dev.dma_mask) - pdev->dev.dma_mask = &platform_uhci_dma_mask; + pdev->dev.dma_mask = &pdev->dev.coherent_dma_mask; + if (!pdev->dev.coherent_dma_mask) + pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32); hcd = usb_create_hcd(&uhci_platform_hc_driver, &pdev->dev, pdev->name); diff --git a/trunk/drivers/usb/host/uhci-q.c b/trunk/drivers/usb/host/uhci-q.c index f0976d8190bc..041c6ddb695c 100644 --- a/trunk/drivers/usb/host/uhci-q.c +++ b/trunk/drivers/usb/host/uhci-q.c @@ -1287,7 +1287,7 @@ static int uhci_submit_isochronous(struct uhci_hcd *uhci, struct urb *urb, return -EINVAL; /* Can't change the period */ } else { - next = uhci->frame_number + 2; + next = uhci->frame_number + 1; /* Find the next unused frame */ if (list_empty(&qh->queue)) { diff --git a/trunk/drivers/usb/host/xhci-mem.c b/trunk/drivers/usb/host/xhci-mem.c index 965b539bc474..2cfc465925bd 100644 --- a/trunk/drivers/usb/host/xhci-mem.c +++ b/trunk/drivers/usb/host/xhci-mem.c @@ -1423,15 +1423,17 @@ int xhci_endpoint_init(struct xhci_hcd *xhci, ep_ctx->ep_info2 |= cpu_to_le32(xhci_get_endpoint_type(udev, ep)); /* Set the max packet size and max burst */ + max_packet = GET_MAX_PACKET(usb_endpoint_maxp(&ep->desc)); + max_burst = 0; switch (udev->speed) { case USB_SPEED_SUPER: - max_packet = usb_endpoint_maxp(&ep->desc); - ep_ctx->ep_info2 |= cpu_to_le32(MAX_PACKET(max_packet)); /* dig out max burst from ep companion desc */ - max_packet = ep->ss_ep_comp.bMaxBurst; - ep_ctx->ep_info2 |= cpu_to_le32(MAX_BURST(max_packet)); + max_burst = ep->ss_ep_comp.bMaxBurst; break; case USB_SPEED_HIGH: + /* Some devices get this wrong */ + if (usb_endpoint_xfer_bulk(&ep->desc)) + max_packet = 512; /* bits 11:12 specify the number of additional transaction * opportunities per microframe (USB 2.0, section 9.6.6) */ @@ -1439,17 +1441,16 @@ int xhci_endpoint_init(struct xhci_hcd *xhci, usb_endpoint_xfer_int(&ep->desc)) { max_burst = (usb_endpoint_maxp(&ep->desc) & 0x1800) >> 11; - ep_ctx->ep_info2 |= cpu_to_le32(MAX_BURST(max_burst)); } - /* Fall through */ + break; case USB_SPEED_FULL: case USB_SPEED_LOW: - max_packet = GET_MAX_PACKET(usb_endpoint_maxp(&ep->desc)); - ep_ctx->ep_info2 |= cpu_to_le32(MAX_PACKET(max_packet)); break; default: BUG(); } + ep_ctx->ep_info2 |= cpu_to_le32(MAX_PACKET(max_packet) | + MAX_BURST(max_burst)); max_esit_payload = xhci_get_max_esit_payload(xhci, udev, ep); ep_ctx->tx_info = cpu_to_le32(MAX_ESIT_PAYLOAD_FOR_EP(max_esit_payload)); diff --git a/trunk/drivers/usb/musb/musb_dsps.c b/trunk/drivers/usb/musb/musb_dsps.c index 3a18e44e9391..e1b661d04021 100644 --- a/trunk/drivers/usb/musb/musb_dsps.c +++ b/trunk/drivers/usb/musb/musb_dsps.c @@ -560,6 +560,7 @@ static int dsps_create_musb_pdev(struct dsps_glue *glue, u8 id) if (!config) { dev_err(&pdev->dev, "failed to allocate musb hdrc config\n"); + ret = -ENOMEM; goto err2; } diff --git a/trunk/drivers/usb/musb/omap2430.c b/trunk/drivers/usb/musb/omap2430.c index 3551f1a30c65..628b93fe5ccc 100644 --- a/trunk/drivers/usb/musb/omap2430.c +++ b/trunk/drivers/usb/musb/omap2430.c @@ -549,7 +549,8 @@ static int omap2430_probe(struct platform_device *pdev) glue->control_otghs = omap_get_control_dev(); if (IS_ERR(glue->control_otghs)) { dev_vdbg(&pdev->dev, "Failed to get control device\n"); - return -ENODEV; + ret = PTR_ERR(glue->control_otghs); + goto err2; } } else { glue->control_otghs = ERR_PTR(-ENODEV); diff --git a/trunk/drivers/usb/phy/Kconfig b/trunk/drivers/usb/phy/Kconfig index aab2ab2fbc90..7ef3eb8617a6 100644 --- a/trunk/drivers/usb/phy/Kconfig +++ b/trunk/drivers/usb/phy/Kconfig @@ -25,7 +25,7 @@ config AB8500_USB config FSL_USB2_OTG bool "Freescale USB OTG Transceiver Driver" - depends on USB_EHCI_FSL && USB_FSL_USB2 && USB_SUSPEND + depends on USB_EHCI_FSL && USB_FSL_USB2 && PM_RUNTIME select USB_OTG help Enable this to support Freescale USB OTG transceiver. @@ -128,7 +128,7 @@ config TWL6030_USB config USB_GPIO_VBUS tristate "GPIO based peripheral-only VBUS sensing 'transceiver'" - depends on GENERIC_GPIO + depends on GPIOLIB help Provides simple GPIO VBUS sensing for controllers with an internal transceiver via the usb_phy interface, and @@ -139,7 +139,6 @@ config USB_ISP1301 tristate "NXP ISP1301 USB transceiver support" depends on USB || USB_GADGET depends on I2C - select USB_OTG_UTILS help Say Y here to add support for the NXP ISP1301 USB transceiver driver. This chip is typically used as USB transceiver for USB host, gadget @@ -162,7 +161,7 @@ config USB_MSM_OTG config USB_MV_OTG tristate "Marvell USB OTG support" - depends on USB_EHCI_MV && USB_MV_UDC && USB_SUSPEND + depends on USB_EHCI_MV && USB_MV_UDC && PM_RUNTIME select USB_OTG help Say Y here if you want to build Marvell USB OTG transciever diff --git a/trunk/drivers/usb/phy/phy-ab8500-usb.c b/trunk/drivers/usb/phy/phy-ab8500-usb.c index 4acef26a2ef5..e5eb1b5a04eb 100644 --- a/trunk/drivers/usb/phy/phy-ab8500-usb.c +++ b/trunk/drivers/usb/phy/phy-ab8500-usb.c @@ -892,8 +892,6 @@ static int ab8500_usb_remove(struct platform_device *pdev) else if (ab->mode == USB_PERIPHERAL) ab8500_usb_peri_phy_dis(ab); - platform_set_drvdata(pdev, NULL); - return 0; } diff --git a/trunk/drivers/usb/phy/phy-fsl-usb.c b/trunk/drivers/usb/phy/phy-fsl-usb.c index 97b9308507c3..e771bafb9f1d 100644 --- a/trunk/drivers/usb/phy/phy-fsl-usb.c +++ b/trunk/drivers/usb/phy/phy-fsl-usb.c @@ -799,6 +799,7 @@ static int fsl_otg_conf(struct platform_device *pdev) /* initialize the otg structure */ fsl_otg_tc->phy.label = DRIVER_DESC; + fsl_otg_tc->phy.dev = &pdev->dev; fsl_otg_tc->phy.set_power = fsl_otg_set_power; fsl_otg_tc->phy.otg->phy = &fsl_otg_tc->phy; diff --git a/trunk/drivers/usb/phy/phy-gpio-vbus-usb.c b/trunk/drivers/usb/phy/phy-gpio-vbus-usb.c index 4c76074e518d..8443335c2ea0 100644 --- a/trunk/drivers/usb/phy/phy-gpio-vbus-usb.c +++ b/trunk/drivers/usb/phy/phy-gpio-vbus-usb.c @@ -266,6 +266,7 @@ static int __init gpio_vbus_probe(struct platform_device *pdev) platform_set_drvdata(pdev, gpio_vbus); gpio_vbus->dev = &pdev->dev; gpio_vbus->phy.label = "gpio-vbus"; + gpio_vbus->phy.dev = gpio_vbus->dev; gpio_vbus->phy.set_power = gpio_vbus_set_power; gpio_vbus->phy.set_suspend = gpio_vbus_set_suspend; gpio_vbus->phy.state = OTG_STATE_UNDEFINED; @@ -343,7 +344,6 @@ static int __init gpio_vbus_probe(struct platform_device *pdev) gpio_free(pdata->gpio_pullup); gpio_free(pdata->gpio_vbus); err_gpio: - platform_set_drvdata(pdev, NULL); kfree(gpio_vbus->phy.otg); kfree(gpio_vbus); return err; @@ -365,7 +365,6 @@ static int __exit gpio_vbus_remove(struct platform_device *pdev) if (gpio_is_valid(pdata->gpio_pullup)) gpio_free(pdata->gpio_pullup); gpio_free(gpio); - platform_set_drvdata(pdev, NULL); kfree(gpio_vbus->phy.otg); kfree(gpio_vbus); diff --git a/trunk/drivers/usb/phy/phy-isp1301.c b/trunk/drivers/usb/phy/phy-isp1301.c index 225ae6c97eeb..8a55b37d1a02 100644 --- a/trunk/drivers/usb/phy/phy-isp1301.c +++ b/trunk/drivers/usb/phy/phy-isp1301.c @@ -102,6 +102,7 @@ static int isp1301_probe(struct i2c_client *client, mutex_init(&isp->mutex); phy = &isp->phy; + phy->dev = &client->dev; phy->label = DRV_NAME; phy->init = isp1301_phy_init; phy->set_vbus = isp1301_phy_set_vbus; diff --git a/trunk/drivers/usb/phy/phy-mv-u3d-usb.c b/trunk/drivers/usb/phy/phy-mv-u3d-usb.c index f7838a43347c..1568ea63e338 100644 --- a/trunk/drivers/usb/phy/phy-mv-u3d-usb.c +++ b/trunk/drivers/usb/phy/phy-mv-u3d-usb.c @@ -278,11 +278,6 @@ static int mv_u3d_phy_probe(struct platform_device *pdev) } res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - if (!res) { - dev_err(dev, "missing mem resource\n"); - return -ENODEV; - } - phy_base = devm_ioremap_resource(dev, res); if (IS_ERR(phy_base)) return PTR_ERR(phy_base); diff --git a/trunk/drivers/usb/phy/phy-mv-usb.c b/trunk/drivers/usb/phy/phy-mv-usb.c index c987bbe27851..4a6b03c73876 100644 --- a/trunk/drivers/usb/phy/phy-mv-usb.c +++ b/trunk/drivers/usb/phy/phy-mv-usb.c @@ -667,7 +667,6 @@ int mv_otg_remove(struct platform_device *pdev) mv_otg_disable(mvotg); usb_remove_phy(&mvotg->phy); - platform_set_drvdata(pdev, NULL); return 0; } @@ -850,8 +849,6 @@ static int mv_otg_probe(struct platform_device *pdev) flush_workqueue(mvotg->qwork); destroy_workqueue(mvotg->qwork); - platform_set_drvdata(pdev, NULL); - return retval; } diff --git a/trunk/drivers/usb/phy/phy-mxs-usb.c b/trunk/drivers/usb/phy/phy-mxs-usb.c index 9d4381e64d51..bd601c537c8d 100644 --- a/trunk/drivers/usb/phy/phy-mxs-usb.c +++ b/trunk/drivers/usb/phy/phy-mxs-usb.c @@ -130,11 +130,6 @@ static int mxs_phy_probe(struct platform_device *pdev) int ret; res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - if (!res) { - dev_err(&pdev->dev, "can't get device resources\n"); - return -ENOENT; - } - base = devm_ioremap_resource(&pdev->dev, res); if (IS_ERR(base)) return PTR_ERR(base); @@ -160,6 +155,7 @@ static int mxs_phy_probe(struct platform_device *pdev) mxs_phy->phy.set_suspend = mxs_phy_suspend; mxs_phy->phy.notify_connect = mxs_phy_on_connect; mxs_phy->phy.notify_disconnect = mxs_phy_on_disconnect; + mxs_phy->phy.type = USB_PHY_TYPE_USB2; ATOMIC_INIT_NOTIFIER_HEAD(&mxs_phy->phy.notifier); @@ -180,8 +176,6 @@ static int mxs_phy_remove(struct platform_device *pdev) usb_remove_phy(&mxs_phy->phy); - platform_set_drvdata(pdev, NULL); - return 0; } diff --git a/trunk/drivers/usb/phy/phy-nop.c b/trunk/drivers/usb/phy/phy-nop.c index 2b10cc969bbb..638cc5dade35 100644 --- a/trunk/drivers/usb/phy/phy-nop.c +++ b/trunk/drivers/usb/phy/phy-nop.c @@ -254,8 +254,6 @@ static int nop_usb_xceiv_remove(struct platform_device *pdev) usb_remove_phy(&nop->phy); - platform_set_drvdata(pdev, NULL); - return 0; } diff --git a/trunk/drivers/usb/phy/phy-samsung-usb2.c b/trunk/drivers/usb/phy/phy-samsung-usb2.c index 45ffe036dacc..9d5e273abcc7 100644 --- a/trunk/drivers/usb/phy/phy-samsung-usb2.c +++ b/trunk/drivers/usb/phy/phy-samsung-usb2.c @@ -363,11 +363,6 @@ static int samsung_usb2phy_probe(struct platform_device *pdev) int ret; phy_mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); - if (!phy_mem) { - dev_err(dev, "%s: missing mem resource\n", __func__); - return -ENODEV; - } - phy_base = devm_ioremap_resource(dev, phy_mem); if (IS_ERR(phy_base)) return PTR_ERR(phy_base); diff --git a/trunk/drivers/usb/phy/phy-samsung-usb3.c b/trunk/drivers/usb/phy/phy-samsung-usb3.c index 133f3d0c554f..5a9efcbcb532 100644 --- a/trunk/drivers/usb/phy/phy-samsung-usb3.c +++ b/trunk/drivers/usb/phy/phy-samsung-usb3.c @@ -239,11 +239,6 @@ static int samsung_usb3phy_probe(struct platform_device *pdev) int ret; phy_mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); - if (!phy_mem) { - dev_err(dev, "%s: missing mem resource\n", __func__); - return -ENODEV; - } - phy_base = devm_ioremap_resource(dev, phy_mem); if (IS_ERR(phy_base)) return PTR_ERR(phy_base); diff --git a/trunk/drivers/usb/serial/ftdi_sio.c b/trunk/drivers/usb/serial/ftdi_sio.c index 242b5776648a..7260ec660347 100644 --- a/trunk/drivers/usb/serial/ftdi_sio.c +++ b/trunk/drivers/usb/serial/ftdi_sio.c @@ -189,6 +189,8 @@ static struct usb_device_id id_table_combined [] = { { USB_DEVICE(FTDI_VID, FTDI_OPENDCC_GBM_PID) }, { USB_DEVICE(FTDI_VID, FTDI_OPENDCC_GBM_BOOST_PID) }, { USB_DEVICE(NEWPORT_VID, NEWPORT_AGILIS_PID) }, + { USB_DEVICE(NEWPORT_VID, NEWPORT_CONEX_CC_PID) }, + { USB_DEVICE(NEWPORT_VID, NEWPORT_CONEX_AGP_PID) }, { USB_DEVICE(INTERBIOMETRICS_VID, INTERBIOMETRICS_IOBOARD_PID) }, { USB_DEVICE(INTERBIOMETRICS_VID, INTERBIOMETRICS_MINI_IOBOARD_PID) }, { USB_DEVICE(FTDI_VID, FTDI_SPROG_II) }, @@ -924,8 +926,8 @@ static int ftdi_tiocmset(struct tty_struct *tty, static int ftdi_ioctl(struct tty_struct *tty, unsigned int cmd, unsigned long arg); static void ftdi_break_ctl(struct tty_struct *tty, int break_state); -static int ftdi_chars_in_buffer(struct tty_struct *tty); -static int ftdi_get_modem_status(struct tty_struct *tty, +static bool ftdi_tx_empty(struct usb_serial_port *port); +static int ftdi_get_modem_status(struct usb_serial_port *port, unsigned char status[2]); static unsigned short int ftdi_232am_baud_base_to_divisor(int baud, int base); @@ -961,7 +963,7 @@ static struct usb_serial_driver ftdi_sio_device = { .ioctl = ftdi_ioctl, .set_termios = ftdi_set_termios, .break_ctl = ftdi_break_ctl, - .chars_in_buffer = ftdi_chars_in_buffer, + .tx_empty = ftdi_tx_empty, }; static struct usb_serial_driver * const serial_drivers[] = { @@ -2056,27 +2058,18 @@ static void ftdi_break_ctl(struct tty_struct *tty, int break_state) } -static int ftdi_chars_in_buffer(struct tty_struct *tty) +static bool ftdi_tx_empty(struct usb_serial_port *port) { - struct usb_serial_port *port = tty->driver_data; - int chars; unsigned char buf[2]; int ret; - chars = usb_serial_generic_chars_in_buffer(tty); - if (chars) - goto out; - - /* Check if hardware buffer is empty. */ - ret = ftdi_get_modem_status(tty, buf); + ret = ftdi_get_modem_status(port, buf); if (ret == 2) { if (!(buf[1] & FTDI_RS_TEMT)) - chars = 1; + return false; } -out: - dev_dbg(&port->dev, "%s - %d\n", __func__, chars); - return chars; + return true; } /* old_termios contains the original termios settings and tty->termios contains @@ -2268,10 +2261,9 @@ static void ftdi_set_termios(struct tty_struct *tty, * Returns the number of status bytes retrieved (device dependant), or * negative error code. */ -static int ftdi_get_modem_status(struct tty_struct *tty, +static int ftdi_get_modem_status(struct usb_serial_port *port, unsigned char status[2]) { - struct usb_serial_port *port = tty->driver_data; struct ftdi_private *priv = usb_get_serial_port_data(port); unsigned char *buf; int len; @@ -2336,7 +2328,7 @@ static int ftdi_tiocmget(struct tty_struct *tty) unsigned char buf[2]; int ret; - ret = ftdi_get_modem_status(tty, buf); + ret = ftdi_get_modem_status(port, buf); if (ret < 0) return ret; diff --git a/trunk/drivers/usb/serial/ftdi_sio_ids.h b/trunk/drivers/usb/serial/ftdi_sio_ids.h index 98528270c43c..6dd79253205d 100644 --- a/trunk/drivers/usb/serial/ftdi_sio_ids.h +++ b/trunk/drivers/usb/serial/ftdi_sio_ids.h @@ -772,6 +772,8 @@ */ #define NEWPORT_VID 0x104D #define NEWPORT_AGILIS_PID 0x3000 +#define NEWPORT_CONEX_CC_PID 0x3002 +#define NEWPORT_CONEX_AGP_PID 0x3006 /* Interbiometrics USB I/O Board */ /* Developed for Interbiometrics by Rudolf Gugler */ diff --git a/trunk/drivers/usb/serial/generic.c b/trunk/drivers/usb/serial/generic.c index 297665fdd16d..ba45170c78e5 100644 --- a/trunk/drivers/usb/serial/generic.c +++ b/trunk/drivers/usb/serial/generic.c @@ -253,6 +253,37 @@ int usb_serial_generic_chars_in_buffer(struct tty_struct *tty) } EXPORT_SYMBOL_GPL(usb_serial_generic_chars_in_buffer); +void usb_serial_generic_wait_until_sent(struct tty_struct *tty, long timeout) +{ + struct usb_serial_port *port = tty->driver_data; + unsigned int bps; + unsigned long period; + unsigned long expire; + + bps = tty_get_baud_rate(tty); + if (!bps) + bps = 9600; /* B0 */ + /* + * Use a poll-period of roughly the time it takes to send one + * character or at least one jiffy. + */ + period = max_t(unsigned long, (10 * HZ / bps), 1); + period = min_t(unsigned long, period, timeout); + + dev_dbg(&port->dev, "%s - timeout = %u ms, period = %u ms\n", + __func__, jiffies_to_msecs(timeout), + jiffies_to_msecs(period)); + expire = jiffies + timeout; + while (!port->serial->type->tx_empty(port)) { + schedule_timeout_interruptible(period); + if (signal_pending(current)) + break; + if (time_after(jiffies, expire)) + break; + } +} +EXPORT_SYMBOL_GPL(usb_serial_generic_wait_until_sent); + static int usb_serial_generic_submit_read_urb(struct usb_serial_port *port, int index, gfp_t mem_flags) { diff --git a/trunk/drivers/usb/serial/io_ti.c b/trunk/drivers/usb/serial/io_ti.c index 158bf4bc29cc..1be6ba7bee27 100644 --- a/trunk/drivers/usb/serial/io_ti.c +++ b/trunk/drivers/usb/serial/io_ti.c @@ -2019,8 +2019,6 @@ static int edge_chars_in_buffer(struct tty_struct *tty) struct edgeport_port *edge_port = usb_get_serial_port_data(port); int chars = 0; unsigned long flags; - int ret; - if (edge_port == NULL) return 0; @@ -2028,16 +2026,22 @@ static int edge_chars_in_buffer(struct tty_struct *tty) chars = kfifo_len(&edge_port->write_fifo); spin_unlock_irqrestore(&edge_port->ep_lock, flags); - if (!chars) { - ret = tx_active(edge_port); - if (ret > 0) - chars = ret; - } - dev_dbg(&port->dev, "%s - returns %d\n", __func__, chars); return chars; } +static bool edge_tx_empty(struct usb_serial_port *port) +{ + struct edgeport_port *edge_port = usb_get_serial_port_data(port); + int ret; + + ret = tx_active(edge_port); + if (ret > 0) + return false; + + return true; +} + static void edge_throttle(struct tty_struct *tty) { struct usb_serial_port *port = tty->driver_data; @@ -2557,6 +2561,7 @@ static struct usb_serial_driver edgeport_1port_device = { .write = edge_write, .write_room = edge_write_room, .chars_in_buffer = edge_chars_in_buffer, + .tx_empty = edge_tx_empty, .break_ctl = edge_break, .read_int_callback = edge_interrupt_callback, .read_bulk_callback = edge_bulk_in_callback, @@ -2589,6 +2594,7 @@ static struct usb_serial_driver edgeport_2port_device = { .write = edge_write, .write_room = edge_write_room, .chars_in_buffer = edge_chars_in_buffer, + .tx_empty = edge_tx_empty, .break_ctl = edge_break, .read_int_callback = edge_interrupt_callback, .read_bulk_callback = edge_bulk_in_callback, diff --git a/trunk/drivers/usb/serial/option.c b/trunk/drivers/usb/serial/option.c index 734372846abb..93d02bc4eb52 100644 --- a/trunk/drivers/usb/serial/option.c +++ b/trunk/drivers/usb/serial/option.c @@ -196,6 +196,7 @@ static void option_instat_callback(struct urb *urb); #define DELL_PRODUCT_5800_MINICARD_VZW 0x8195 /* Novatel E362 */ #define DELL_PRODUCT_5800_V2_MINICARD_VZW 0x8196 /* Novatel E362 */ +#define DELL_PRODUCT_5804_MINICARD_ATT 0x819b /* Novatel E371 */ #define KYOCERA_VENDOR_ID 0x0c88 #define KYOCERA_PRODUCT_KPC650 0x17da @@ -341,8 +342,8 @@ static void option_instat_callback(struct urb *urb); #define CINTERION_PRODUCT_EU3_E 0x0051 #define CINTERION_PRODUCT_EU3_P 0x0052 #define CINTERION_PRODUCT_PH8 0x0053 -#define CINTERION_PRODUCT_AH6 0x0055 -#define CINTERION_PRODUCT_PLS8 0x0060 +#define CINTERION_PRODUCT_AHXX 0x0055 +#define CINTERION_PRODUCT_PLXX 0x0060 /* Olivetti products */ #define OLIVETTI_VENDOR_ID 0x0b3c @@ -771,6 +772,7 @@ static const struct usb_device_id option_ids[] = { { USB_DEVICE(DELL_VENDOR_ID, DELL_PRODUCT_5730_MINICARD_VZW) }, /* Dell Wireless 5730 Mobile Broadband EVDO/HSPA Mini-Card */ { USB_DEVICE_AND_INTERFACE_INFO(DELL_VENDOR_ID, DELL_PRODUCT_5800_MINICARD_VZW, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(DELL_VENDOR_ID, DELL_PRODUCT_5800_V2_MINICARD_VZW, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(DELL_VENDOR_ID, DELL_PRODUCT_5804_MINICARD_ATT, 0xff, 0xff, 0xff) }, { USB_DEVICE(ANYDATA_VENDOR_ID, ANYDATA_PRODUCT_ADU_E100A) }, /* ADU-E100, ADU-310 */ { USB_DEVICE(ANYDATA_VENDOR_ID, ANYDATA_PRODUCT_ADU_500A) }, { USB_DEVICE(ANYDATA_VENDOR_ID, ANYDATA_PRODUCT_ADU_620UW) }, @@ -966,6 +968,8 @@ static const struct usb_device_id option_ids[] = { .driver_info = (kernel_ulong_t)&net_intf4_blacklist }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0330, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0395, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0412, 0xff, 0xff, 0xff), /* Telewell TW-LTE 4G */ + .driver_info = (kernel_ulong_t)&net_intf4_blacklist }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0414, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0417, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1008, 0xff, 0xff, 0xff), @@ -1264,8 +1268,9 @@ static const struct usb_device_id option_ids[] = { { USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_EU3_E) }, { USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_EU3_P) }, { USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_PH8) }, - { USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_AH6) }, - { USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_PLS8) }, + { USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_AHXX) }, + { USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_PLXX), + .driver_info = (kernel_ulong_t)&net_intf4_blacklist }, { USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_HC28_MDM) }, { USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_HC28_MDMNET) }, { USB_DEVICE(SIEMENS_VENDOR_ID, CINTERION_PRODUCT_HC25_MDM) }, diff --git a/trunk/drivers/usb/serial/ti_usb_3410_5052.c b/trunk/drivers/usb/serial/ti_usb_3410_5052.c index cac47aef2918..c92c5ed4e580 100644 --- a/trunk/drivers/usb/serial/ti_usb_3410_5052.c +++ b/trunk/drivers/usb/serial/ti_usb_3410_5052.c @@ -101,6 +101,7 @@ static int ti_write(struct tty_struct *tty, struct usb_serial_port *port, const unsigned char *data, int count); static int ti_write_room(struct tty_struct *tty); static int ti_chars_in_buffer(struct tty_struct *tty); +static bool ti_tx_empty(struct usb_serial_port *port); static void ti_throttle(struct tty_struct *tty); static void ti_unthrottle(struct tty_struct *tty); static int ti_ioctl(struct tty_struct *tty, @@ -222,6 +223,7 @@ static struct usb_serial_driver ti_1port_device = { .write = ti_write, .write_room = ti_write_room, .chars_in_buffer = ti_chars_in_buffer, + .tx_empty = ti_tx_empty, .throttle = ti_throttle, .unthrottle = ti_unthrottle, .ioctl = ti_ioctl, @@ -253,6 +255,7 @@ static struct usb_serial_driver ti_2port_device = { .write = ti_write, .write_room = ti_write_room, .chars_in_buffer = ti_chars_in_buffer, + .tx_empty = ti_tx_empty, .throttle = ti_throttle, .unthrottle = ti_unthrottle, .ioctl = ti_ioctl, @@ -684,8 +687,6 @@ static int ti_chars_in_buffer(struct tty_struct *tty) struct ti_port *tport = usb_get_serial_port_data(port); int chars = 0; unsigned long flags; - int ret; - u8 lsr; if (tport == NULL) return 0; @@ -694,16 +695,22 @@ static int ti_chars_in_buffer(struct tty_struct *tty) chars = kfifo_len(&tport->write_fifo); spin_unlock_irqrestore(&tport->tp_lock, flags); - if (!chars) { - ret = ti_get_lsr(tport, &lsr); - if (!ret && !(lsr & TI_LSR_TX_EMPTY)) - chars = 1; - } - dev_dbg(&port->dev, "%s - returns %d\n", __func__, chars); return chars; } +static bool ti_tx_empty(struct usb_serial_port *port) +{ + struct ti_port *tport = usb_get_serial_port_data(port); + int ret; + u8 lsr; + + ret = ti_get_lsr(tport, &lsr); + if (!ret && !(lsr & TI_LSR_TX_EMPTY)) + return false; + + return true; +} static void ti_throttle(struct tty_struct *tty) { diff --git a/trunk/drivers/usb/serial/usb-serial.c b/trunk/drivers/usb/serial/usb-serial.c index cf75beb1251b..4753c005cfb6 100644 --- a/trunk/drivers/usb/serial/usb-serial.c +++ b/trunk/drivers/usb/serial/usb-serial.c @@ -359,20 +359,29 @@ static int serial_chars_in_buffer(struct tty_struct *tty) { struct usb_serial_port *port = tty->driver_data; struct usb_serial *serial = port->serial; - int count = 0; dev_dbg(tty->dev, "%s\n", __func__); - mutex_lock(&serial->disc_mutex); - /* if the device was unplugged then any remaining characters - fell out of the connector ;) */ if (serial->disconnected) - count = 0; - else - count = serial->type->chars_in_buffer(tty); - mutex_unlock(&serial->disc_mutex); + return 0; + + return serial->type->chars_in_buffer(tty); +} + +static void serial_wait_until_sent(struct tty_struct *tty, int timeout) +{ + struct usb_serial_port *port = tty->driver_data; + struct usb_serial *serial = port->serial; + + dev_dbg(tty->dev, "%s\n", __func__); + + if (!port->serial->type->wait_until_sent) + return; - return count; + mutex_lock(&serial->disc_mutex); + if (!serial->disconnected) + port->serial->type->wait_until_sent(tty, timeout); + mutex_unlock(&serial->disc_mutex); } static void serial_throttle(struct tty_struct *tty) @@ -1191,6 +1200,7 @@ static const struct tty_operations serial_ops = { .unthrottle = serial_unthrottle, .break_ctl = serial_break, .chars_in_buffer = serial_chars_in_buffer, + .wait_until_sent = serial_wait_until_sent, .tiocmget = serial_tiocmget, .tiocmset = serial_tiocmset, .get_icount = serial_get_icount, @@ -1316,6 +1326,8 @@ static void usb_serial_operations_init(struct usb_serial_driver *device) set_to_generic_if_null(device, close); set_to_generic_if_null(device, write_room); set_to_generic_if_null(device, chars_in_buffer); + if (device->tx_empty) + set_to_generic_if_null(device, wait_until_sent); set_to_generic_if_null(device, read_bulk_callback); set_to_generic_if_null(device, write_bulk_callback); set_to_generic_if_null(device, process_read_urb); diff --git a/trunk/drivers/usb/storage/realtek_cr.c b/trunk/drivers/usb/storage/realtek_cr.c index 8623577bbbe7..281be56d5648 100644 --- a/trunk/drivers/usb/storage/realtek_cr.c +++ b/trunk/drivers/usb/storage/realtek_cr.c @@ -105,8 +105,9 @@ struct rts51x_chip { int status_len; u32 flag; -#ifdef CONFIG_REALTEK_AUTOPM struct us_data *us; + +#ifdef CONFIG_REALTEK_AUTOPM struct timer_list rts51x_suspend_timer; unsigned long timer_expires; int pwr_state; @@ -988,6 +989,7 @@ static int init_realtek_cr(struct us_data *us) us->extra = chip; us->extra_destructor = realtek_cr_destructor; us->max_lun = chip->max_lun = rts51x_get_max_lun(us); + chip->us = us; usb_stor_dbg(us, "chip->max_lun = %d\n", chip->max_lun); @@ -1010,10 +1012,8 @@ static int init_realtek_cr(struct us_data *us) SET_AUTO_DELINK(chip); } #ifdef CONFIG_REALTEK_AUTOPM - if (ss_en) { - chip->us = us; + if (ss_en) realtek_cr_autosuspend_setup(us); - } #endif usb_stor_dbg(us, "chip->flag = 0x%x\n", chip->flag); diff --git a/trunk/drivers/vhost/vringh.c b/trunk/drivers/vhost/vringh.c index bff0775e258c..5174ebac288d 100644 --- a/trunk/drivers/vhost/vringh.c +++ b/trunk/drivers/vhost/vringh.c @@ -3,6 +3,7 @@ * * Since these may be in userspace, we use (inline) accessors. */ +#include #include #include #include @@ -1005,3 +1006,5 @@ int vringh_need_notify_kern(struct vringh *vrh) return __vringh_need_notify(vrh, getu16_kern); } EXPORT_SYMBOL(vringh_need_notify_kern); + +MODULE_LICENSE("GPL"); diff --git a/trunk/drivers/video/Kconfig b/trunk/drivers/video/Kconfig index c04ccdf60eaa..d71d60f94fc1 100644 --- a/trunk/drivers/video/Kconfig +++ b/trunk/drivers/video/Kconfig @@ -2429,7 +2429,7 @@ config FB_MXS select FB_CFB_COPYAREA select FB_CFB_IMAGEBLIT select FB_MODE_HELPERS - select OF_VIDEOMODE + select VIDEOMODE_HELPERS help Framebuffer support for the MXS SoC. @@ -2483,7 +2483,7 @@ config FB_SSD1307 tristate "Solomon SSD1307 framebuffer support" depends on FB && I2C depends on OF - depends on GENERIC_GPIO + depends on GPIOLIB select FB_SYS_FOPS select FB_SYS_FILLRECT select FB_SYS_COPYAREA diff --git a/trunk/drivers/video/au1100fb.c b/trunk/drivers/video/au1100fb.c index ddabaa867b0d..700cac067b46 100644 --- a/trunk/drivers/video/au1100fb.c +++ b/trunk/drivers/video/au1100fb.c @@ -111,30 +111,16 @@ static int au1100fb_fb_blank(int blank_mode, struct fb_info *fbi) switch (blank_mode) { case VESA_NO_BLANKING: - /* Turn on panel */ - fbdev->regs->lcd_control |= LCD_CONTROL_GO; -#ifdef CONFIG_MIPS_PB1100 - if (fbdev->panel_idx == 1) { - au_writew(au_readw(PB1100_G_CONTROL) - | (PB1100_G_CONTROL_BL | PB1100_G_CONTROL_VDD), - PB1100_G_CONTROL); - } -#endif + /* Turn on panel */ + fbdev->regs->lcd_control |= LCD_CONTROL_GO; au_sync(); break; case VESA_VSYNC_SUSPEND: case VESA_HSYNC_SUSPEND: case VESA_POWERDOWN: - /* Turn off panel */ - fbdev->regs->lcd_control &= ~LCD_CONTROL_GO; -#ifdef CONFIG_MIPS_PB1100 - if (fbdev->panel_idx == 1) { - au_writew(au_readw(PB1100_G_CONTROL) - & ~(PB1100_G_CONTROL_BL | PB1100_G_CONTROL_VDD), - PB1100_G_CONTROL); - } -#endif + /* Turn off panel */ + fbdev->regs->lcd_control &= ~LCD_CONTROL_GO; au_sync(); break; default: diff --git a/trunk/drivers/video/backlight/Kconfig b/trunk/drivers/video/backlight/Kconfig index 2e166c3fc4c3..d5ab6583f440 100644 --- a/trunk/drivers/video/backlight/Kconfig +++ b/trunk/drivers/video/backlight/Kconfig @@ -36,14 +36,14 @@ config LCD_CORGI config LCD_L4F00242T03 tristate "Epson L4F00242T03 LCD" - depends on SPI_MASTER && GENERIC_GPIO + depends on SPI_MASTER && GPIOLIB help SPI driver for Epson L4F00242T03. This provides basic support for init and powering the LCD up/down through a sysfs interface. config LCD_LMS283GF05 tristate "Samsung LMS283GF05 LCD" - depends on SPI_MASTER && GENERIC_GPIO + depends on SPI_MASTER && GPIOLIB help SPI driver for Samsung LMS283GF05. This provides basic support for powering the LCD up/down through a sysfs interface. diff --git a/trunk/drivers/video/console/Makefile b/trunk/drivers/video/console/Makefile index a862e9173ebe..48da25c96cd3 100644 --- a/trunk/drivers/video/console/Makefile +++ b/trunk/drivers/video/console/Makefile @@ -18,6 +18,8 @@ font-objs-$(CONFIG_FONT_MINI_4x6) += font_mini_4x6.o font-objs += $(font-objs-y) +obj-$(CONFIG_FONTS) += font.o + # Each configuration option enables a list of files. obj-$(CONFIG_DUMMY_CONSOLE) += dummycon.o diff --git a/trunk/drivers/video/mxsfb.c b/trunk/drivers/video/mxsfb.c index 1b2c26d1658c..21223d475b39 100644 --- a/trunk/drivers/video/mxsfb.c +++ b/trunk/drivers/video/mxsfb.c @@ -42,7 +42,6 @@ #include #include #include -#include