From ff67c143824613489a26772a5b6964ce8366db32 Mon Sep 17 00:00:00 2001 From: Linus Torvalds Date: Mon, 1 Oct 2012 11:13:33 -0700 Subject: [PATCH] --- yaml --- r: 324026 b: refs/heads/master c: 229993001282e128a49a59ec43d255614775703a h: refs/heads/master v: v3 --- [refs] | 2 +- trunk/Documentation/ABI/testing/sysfs-bus-pci | 12 + .../ABI/testing/sysfs-class-regulator | 21 + .../ABI/testing/sysfs-driver-wacom | 13 + .../ABI/testing/sysfs-platform-ideapad-laptop | 11 + trunk/Documentation/DocBook/filesystems.tmpl | 4 +- .../DocBook/media/v4l/vidioc-g-tuner.xml | 2 +- trunk/Documentation/RCU/checklist.txt | 6 + trunk/Documentation/RCU/stallwarn.txt | 16 +- trunk/Documentation/RCU/trace.txt | 43 +- trunk/Documentation/RCU/whatisRCU.txt | 9 +- trunk/Documentation/accounting/getdelays.c | 5 +- trunk/Documentation/block/00-INDEX | 10 +- trunk/Documentation/block/cfq-iosched.txt | 77 + trunk/Documentation/block/queue-sysfs.txt | 64 + .../devicetree/bindings/mmc/fsl-imx-esdhc.txt | 8 +- .../bindings/regulator/regulator.txt | 5 +- .../bindings/regulator/tps65217.txt | 31 +- .../bindings/regulator/tps6586x.txt | 85 +- trunk/Documentation/dontdiff | 1 - .../feature-removal-schedule.txt | 2 +- trunk/Documentation/filesystems/Locking | 2 - trunk/Documentation/filesystems/porting | 5 +- trunk/Documentation/filesystems/vfat.txt | 11 + trunk/Documentation/filesystems/vfs.txt | 4 - trunk/Documentation/i2c/busses/i2c-i801 | 1 + trunk/Documentation/ia64/aliasing-test.c | 1 + trunk/Documentation/kernel-parameters.txt | 20 +- trunk/Documentation/laptops/laptop-mode.txt | 12 +- trunk/Documentation/networking/netconsole.txt | 19 +- trunk/Documentation/pinctrl.txt | 6 +- trunk/Documentation/power/swsusp.txt | 2 +- trunk/Documentation/scheduler/sched-arch.txt | 10 - trunk/Documentation/security/Yama.txt | 14 +- trunk/Documentation/sysctl/vm.txt | 14 +- trunk/Documentation/trace/kprobetrace.txt | 2 +- trunk/Documentation/vfio.txt | 2 +- trunk/Documentation/vm/hugetlbpage.txt | 10 +- trunk/Documentation/w1/slaves/w1_therm | 2 + .../watchdog/src/watchdog-test.c | 2 +- .../Documentation/x86/x86_64/boot-options.txt | 7 + trunk/MAINTAINERS | 92 +- trunk/Makefile | 10 +- trunk/arch/Kconfig | 32 + trunk/arch/alpha/Kconfig | 2 + trunk/arch/alpha/include/asm/atomic.h | 4 +- trunk/arch/alpha/include/asm/fpu.h | 2 + trunk/arch/alpha/include/asm/ptrace.h | 5 +- trunk/arch/alpha/include/asm/socket.h | 2 + trunk/arch/alpha/include/asm/uaccess.h | 34 +- trunk/arch/alpha/include/asm/unistd.h | 4 +- trunk/arch/alpha/include/asm/word-at-a-time.h | 55 + trunk/arch/alpha/kernel/alpha_ksyms.c | 3 - trunk/arch/alpha/kernel/entry.S | 161 - trunk/arch/alpha/kernel/osf_sys.c | 49 + trunk/arch/alpha/kernel/process.c | 25 +- trunk/arch/alpha/kernel/smp.c | 1 + trunk/arch/alpha/kernel/systbls.S | 4 +- trunk/arch/alpha/lib/Makefile | 2 - trunk/arch/alpha/lib/ev6-strncpy_from_user.S | 424 --- trunk/arch/alpha/lib/ev67-strlen_user.S | 107 - trunk/arch/alpha/lib/strlen_user.S | 91 - trunk/arch/alpha/lib/strncpy_from_user.S | 339 -- trunk/arch/alpha/mm/fault.c | 36 +- trunk/arch/alpha/oprofile/common.c | 1 + trunk/arch/arm/Kconfig | 9 +- trunk/arch/arm/Kconfig.debug | 6 +- trunk/arch/arm/Makefile | 4 +- trunk/arch/arm/boot/compressed/head.S | 5 + trunk/arch/arm/boot/dts/am33xx.dtsi | 5 + trunk/arch/arm/boot/dts/at91sam9260.dtsi | 3 + trunk/arch/arm/boot/dts/at91sam9263.dtsi | 5 + trunk/arch/arm/boot/dts/at91sam9g25ek.dts | 2 +- trunk/arch/arm/boot/dts/at91sam9g45.dtsi | 5 + trunk/arch/arm/boot/dts/at91sam9n12.dtsi | 4 + trunk/arch/arm/boot/dts/at91sam9x5.dtsi | 4 + trunk/arch/arm/boot/dts/imx23.dtsi | 52 +- trunk/arch/arm/boot/dts/imx27-3ds.dts | 2 +- trunk/arch/arm/boot/dts/imx27.dtsi | 6 + trunk/arch/arm/boot/dts/imx28.dtsi | 74 +- trunk/arch/arm/boot/dts/imx51-babbage.dts | 6 +- trunk/arch/arm/boot/dts/imx51.dtsi | 4 + trunk/arch/arm/boot/dts/imx53-ard.dts | 22 +- trunk/arch/arm/boot/dts/imx53.dtsi | 7 + trunk/arch/arm/boot/dts/imx6q-sabrelite.dts | 1 + trunk/arch/arm/boot/dts/imx6q.dtsi | 7 + trunk/arch/arm/boot/dts/kirkwood-iconnect.dts | 6 +- trunk/arch/arm/boot/dts/twl6030.dtsi | 3 + .../arm/configs/armadillo800eva_defconfig | 2 +- trunk/arch/arm/configs/imx_v6_v7_defconfig | 1 + trunk/arch/arm/configs/mxs_defconfig | 1 - trunk/arch/arm/configs/tct_hammer_defconfig | 2 +- trunk/arch/arm/configs/u8500_defconfig | 1 + trunk/arch/arm/include/asm/assembler.h | 8 + trunk/arch/arm/include/asm/dma-mapping.h | 7 + trunk/arch/arm/include/asm/memory.h | 3 + trunk/arch/arm/include/asm/pgtable.h | 40 +- trunk/arch/arm/include/asm/sched_clock.h | 2 + trunk/arch/arm/include/asm/tlb.h | 4 + trunk/arch/arm/include/asm/uaccess.h | 58 +- trunk/arch/arm/include/asm/unistd.h | 2 + trunk/arch/arm/kernel/calls.S | 1 + trunk/arch/arm/kernel/hw_breakpoint.c | 62 +- trunk/arch/arm/kernel/sched_clock.c | 24 + trunk/arch/arm/kernel/smp_twd.c | 48 +- trunk/arch/arm/kernel/topology.c | 2 +- trunk/arch/arm/kernel/traps.c | 11 +- trunk/arch/arm/lib/Makefile | 23 +- trunk/arch/arm/lib/delay.c | 1 + trunk/arch/arm/lib/getuser.S | 23 +- trunk/arch/arm/lib/io-readsw-armv3.S | 106 + trunk/arch/arm/lib/io-writesw-armv3.S | 126 + trunk/arch/arm/lib/putuser.S | 6 + trunk/arch/arm/lib/uaccess.S | 564 ++++ trunk/arch/arm/mach-at91/at91rm9200_time.c | 2 +- .../arch/arm/mach-at91/at91sam9260_devices.c | 6 +- .../arch/arm/mach-at91/at91sam9261_devices.c | 6 +- .../arch/arm/mach-at91/at91sam9263_devices.c | 10 +- .../arch/arm/mach-at91/at91sam9g45_devices.c | 6 +- trunk/arch/arm/mach-at91/at91sam9rl_devices.c | 6 +- trunk/arch/arm/mach-at91/clock.c | 12 + .../arch/arm/mach-davinci/board-neuros-osd2.c | 39 - trunk/arch/arm/mach-dove/common.c | 3 +- trunk/arch/arm/mach-exynos/mach-origen.c | 7 + trunk/arch/arm/mach-exynos/mach-smdkv310.c | 7 + trunk/arch/arm/mach-exynos/pm_domains.c | 2 +- trunk/arch/arm/mach-gemini/irq.c | 1 + trunk/arch/arm/mach-imx/Makefile | 10 +- trunk/arch/arm/mach-imx/clk-imx25.c | 8 +- trunk/arch/arm/mach-imx/clk-imx27.c | 8 +- trunk/arch/arm/mach-imx/clk-imx31.c | 2 +- trunk/arch/arm/mach-imx/clk-imx35.c | 6 +- trunk/arch/arm/mach-imx/clk-imx51-imx53.c | 1 + trunk/arch/arm/mach-imx/clk-imx6q.c | 8 +- .../arm/mach-imx/{head-v7.S => headsmp.S} | 0 trunk/arch/arm/mach-imx/hotplug.c | 23 +- trunk/arch/arm/mach-imx/mach-armadillo5x0.c | 3 +- trunk/arch/arm/mach-imx/mach-imx6q.c | 4 +- trunk/arch/arm/mach-integrator/core.c | 1 + .../arch/arm/mach-integrator/integrator_ap.c | 2 +- trunk/arch/arm/mach-kirkwood/Makefile.boot | 7 +- trunk/arch/arm/mach-kirkwood/common.c | 11 +- .../arm/mach-kirkwood/db88f6281-bp-setup.c | 1 + trunk/arch/arm/mach-mmp/sram.c | 2 +- trunk/arch/arm/mach-mv78xx0/addr-map.c | 2 +- trunk/arch/arm/mach-mv78xx0/common.c | 6 +- trunk/arch/arm/mach-mxs/Kconfig | 6 - trunk/arch/arm/mach-mxs/Makefile | 3 +- trunk/arch/arm/mach-mxs/mach-mxs.c | 2 +- trunk/arch/arm/mach-omap2/Kconfig | 6 +- trunk/arch/arm/mach-omap2/Makefile | 2 +- trunk/arch/arm/mach-omap2/board-igep0020.c | 2 + trunk/arch/arm/mach-omap2/board-omap3evm.c | 1 + trunk/arch/arm/mach-omap2/clock33xx_data.c | 14 +- .../arm/mach-omap2/clockdomain2xxx_3xxx.c | 50 +- trunk/arch/arm/mach-omap2/cm-regbits-34xx.h | 1 + .../arm/mach-omap2/common-board-devices.c | 11 - .../arm/mach-omap2/common-board-devices.h | 1 - trunk/arch/arm/mach-omap2/cpuidle44xx.c | 3 +- trunk/arch/arm/mach-omap2/mux.h | 1 - trunk/arch/arm/mach-omap2/omap-wakeupgen.c | 2 +- trunk/arch/arm/mach-omap2/omap_hwmod.c | 1 + .../arm/mach-omap2/omap_hwmod_3xxx_data.c | 15 +- .../arm/mach-omap2/omap_hwmod_44xx_data.c | 12 +- trunk/arch/arm/mach-omap2/opp4xxx_data.c | 2 +- trunk/arch/arm/mach-omap2/pm34xx.c | 19 +- trunk/arch/arm/mach-omap2/sleep44xx.S | 8 +- trunk/arch/arm/mach-omap2/timer.c | 7 + trunk/arch/arm/mach-omap2/twl-common.c | 1 + trunk/arch/arm/mach-orion5x/common.c | 10 +- trunk/arch/arm/mach-pxa/raumfeld.c | 2 +- trunk/arch/arm/mach-s3c24xx/Kconfig | 4 +- .../arch/arm/mach-s3c24xx/include/mach/dma.h | 3 +- trunk/arch/arm/mach-sa1100/leds-hackkit.c | 1 + .../arm/mach-shmobile/board-armadillo800eva.c | 13 +- trunk/arch/arm/mach-shmobile/board-kzm9g.c | 4 +- trunk/arch/arm/mach-shmobile/board-mackerel.c | 3 +- trunk/arch/arm/mach-shmobile/board-marzen.c | 2 +- trunk/arch/arm/mach-shmobile/intc-sh73a0.c | 4 +- .../arch/arm/mach-tegra/board-harmony-power.c | 40 +- trunk/arch/arm/mach-ux500/Kconfig | 1 - trunk/arch/arm/mach-ux500/board-mop500-msp.c | 10 +- trunk/arch/arm/mach-ux500/board-mop500.c | 4 + trunk/arch/arm/mm/context.c | 7 +- trunk/arch/arm/mm/dma-mapping.c | 128 +- trunk/arch/arm/mm/flush.c | 2 - trunk/arch/arm/mm/mm.h | 3 + trunk/arch/arm/mm/mmu.c | 8 +- trunk/arch/arm/mm/tlb-v7.S | 6 +- trunk/arch/arm/plat-mxc/include/mach/mx25.h | 1 + trunk/arch/arm/plat-omap/dmtimer.c | 6 +- trunk/arch/arm/plat-omap/include/plat/cpu.h | 3 +- trunk/arch/arm/plat-omap/include/plat/multi.h | 9 + .../arm/plat-omap/include/plat/uncompress.h | 4 +- trunk/arch/arm/plat-omap/sram.c | 11 +- trunk/arch/arm/plat-orion/common.c | 8 +- .../arch/arm/plat-orion/include/plat/common.h | 6 +- trunk/arch/arm/plat-s3c24xx/dma.c | 2 +- trunk/arch/arm/plat-samsung/Kconfig | 3 +- trunk/arch/arm/plat-samsung/clock.c | 10 +- trunk/arch/arm/plat-samsung/devs.c | 29 +- .../arch/arm/plat-samsung/include/plat/hdmi.h | 16 + trunk/arch/arm/plat-samsung/pm.c | 2 +- trunk/arch/arm/vfp/vfpmodule.c | 2 + trunk/arch/blackfin/Kconfig | 1 + trunk/arch/blackfin/Makefile | 1 - trunk/arch/blackfin/include/asm/smp.h | 2 + trunk/arch/blackfin/kernel/setup.c | 1 - trunk/arch/blackfin/mach-common/smp.c | 223 +- trunk/arch/c6x/Kconfig | 1 + trunk/arch/c6x/include/asm/Kbuild | 1 + trunk/arch/c6x/include/asm/barrier.h | 27 - trunk/arch/c6x/include/asm/cache.h | 16 +- trunk/arch/cris/kernel/process.c | 3 + trunk/arch/frv/kernel/process.c | 3 + trunk/arch/h8300/kernel/process.c | 3 + trunk/arch/ia64/Kconfig | 12 +- trunk/arch/ia64/configs/generic_defconfig | 1 - trunk/arch/ia64/configs/gensparse_defconfig | 1 - trunk/arch/ia64/include/asm/switch_to.h | 8 - trunk/arch/ia64/kernel/acpi.c | 5 +- trunk/arch/ia64/kernel/process.c | 3 + trunk/arch/ia64/kernel/time.c | 66 +- trunk/arch/m32r/kernel/process.c | 3 + trunk/arch/m68k/Kconfig | 13 +- trunk/arch/m68k/Kconfig.cpu | 19 +- trunk/arch/m68k/apollo/config.c | 16 +- trunk/arch/m68k/include/asm/Kbuild | 25 + trunk/arch/m68k/include/asm/MC68332.h | 152 - trunk/arch/m68k/include/asm/apollodma.h | 248 -- trunk/arch/m68k/include/asm/apollohw.h | 2 +- trunk/arch/m68k/include/asm/bitsperlong.h | 1 - trunk/arch/m68k/include/asm/cputime.h | 6 - trunk/arch/m68k/include/asm/delay.h | 2 +- trunk/arch/m68k/include/asm/device.h | 7 - .../arch/m68k/include/asm/emergency-restart.h | 6 - trunk/arch/m68k/include/asm/errno.h | 6 - trunk/arch/m68k/include/asm/futex.h | 6 - trunk/arch/m68k/include/asm/ioctl.h | 1 - trunk/arch/m68k/include/asm/ipcbuf.h | 1 - trunk/arch/m68k/include/asm/irq_regs.h | 1 - trunk/arch/m68k/include/asm/kdebug.h | 1 - trunk/arch/m68k/include/asm/kmap_types.h | 6 - trunk/arch/m68k/include/asm/kvm_para.h | 1 - trunk/arch/m68k/include/asm/local.h | 6 - trunk/arch/m68k/include/asm/local64.h | 1 - trunk/arch/m68k/include/asm/mac_mouse.h | 23 - trunk/arch/m68k/include/asm/mcfmbus.h | 77 - trunk/arch/m68k/include/asm/mman.h | 1 - trunk/arch/m68k/include/asm/mutex.h | 9 - trunk/arch/m68k/include/asm/percpu.h | 6 - trunk/arch/m68k/include/asm/resource.h | 6 - trunk/arch/m68k/include/asm/sbus.h | 45 - trunk/arch/m68k/include/asm/scatterlist.h | 6 - trunk/arch/m68k/include/asm/sections.h | 8 - trunk/arch/m68k/include/asm/shm.h | 31 - trunk/arch/m68k/include/asm/siginfo.h | 6 - trunk/arch/m68k/include/asm/statfs.h | 6 - trunk/arch/m68k/include/asm/topology.h | 6 - trunk/arch/m68k/include/asm/types.h | 22 - trunk/arch/m68k/include/asm/unaligned.h | 4 +- trunk/arch/m68k/include/asm/xor.h | 1 - trunk/arch/m68k/kernel/process.c | 3 + trunk/arch/m68k/kernel/setup_no.c | 11 +- trunk/arch/m68k/kernel/sys_m68k.c | 8 +- trunk/arch/m68k/kernel/vmlinux-nommu.lds | 2 - trunk/arch/m68k/kernel/vmlinux-std.lds | 2 - trunk/arch/m68k/kernel/vmlinux-sun3.lds | 2 - trunk/arch/m68k/lib/muldi3.c | 2 +- trunk/arch/m68k/mm/init_mm.c | 2 +- trunk/arch/m68k/mm/init_no.c | 2 +- trunk/arch/m68k/platform/68328/head-de2.S | 8 +- trunk/arch/m68k/platform/68328/head-pilot.S | 10 +- trunk/arch/m68k/platform/68328/head-ram.S | 4 +- trunk/arch/m68k/platform/68328/head-rom.S | 6 +- trunk/arch/m68k/platform/68360/head-ram.S | 6 +- trunk/arch/m68k/platform/68360/head-rom.S | 8 +- trunk/arch/m68k/platform/coldfire/clk.c | 6 - trunk/arch/m68k/platform/coldfire/head.S | 10 +- trunk/arch/m68k/sun3/prom/init.c | 48 +- trunk/arch/microblaze/include/asm/sections.h | 4 - .../arch/microblaze/kernel/microblaze_ksyms.c | 3 - trunk/arch/microblaze/kernel/setup.c | 4 +- trunk/arch/microblaze/kernel/vmlinux.lds.S | 1 - trunk/arch/mips/Kconfig | 1 + trunk/arch/mips/alchemy/board-mtx1.c | 2 + trunk/arch/mips/ath79/dev-usb.c | 2 + trunk/arch/mips/ath79/gpio.c | 6 +- trunk/arch/mips/bcm63xx/dev-spi.c | 4 + trunk/arch/mips/cavium-octeon/octeon-irq.c | 89 +- .../mips/include/asm/mach-ath79/ar71xx_regs.h | 3 +- .../asm/mach-ath79/cpu-feature-overrides.h | 1 - .../asm/mach-bcm63xx/bcm63xx_dev_spi.h | 2 + .../include/asm/mach-bcm63xx/bcm63xx_regs.h | 13 +- .../mips/include/asm/mach-cavium-octeon/irq.h | 10 +- trunk/arch/mips/include/asm/module.h | 1 + trunk/arch/mips/include/asm/r4k-timer.h | 8 +- trunk/arch/mips/kernel/module.c | 43 +- trunk/arch/mips/kernel/smp-cmp.c | 2 +- trunk/arch/mips/kernel/smp.c | 4 +- trunk/arch/mips/kernel/sync-r4k.c | 26 +- trunk/arch/mips/mm/gup.c | 2 + trunk/arch/mips/mti-malta/malta-int.c | 9 +- trunk/arch/mips/mti-malta/malta-pci.c | 13 - trunk/arch/mips/mti-malta/malta-platform.c | 5 - trunk/arch/mips/pci/pci-ar724x.c | 22 + trunk/arch/mn10300/kernel/process.c | 3 + trunk/arch/parisc/include/asm/atomic.h | 4 +- trunk/arch/parisc/kernel/process.c | 5 +- trunk/arch/parisc/kernel/sys_parisc.c | 8 +- trunk/arch/powerpc/boot/.gitignore | 4 - .../powerpc/boot/dts/fsl/p4080si-post.dtsi | 7 + .../powerpc/configs/85xx/p1023rds_defconfig | 31 +- .../powerpc/configs/corenet32_smp_defconfig | 29 +- .../powerpc/configs/corenet64_smp_defconfig | 1 + trunk/arch/powerpc/configs/g5_defconfig | 103 +- trunk/arch/powerpc/configs/mpc83xx_defconfig | 18 +- trunk/arch/powerpc/configs/mpc85xx_defconfig | 33 +- .../powerpc/configs/mpc85xx_smp_defconfig | 32 +- trunk/arch/powerpc/include/asm/cputable.h | 2 - trunk/arch/powerpc/include/asm/kvm_host.h | 1 + trunk/arch/powerpc/include/asm/kvm_ppc.h | 12 + trunk/arch/powerpc/include/asm/mpic_msgr.h | 1 + trunk/arch/powerpc/include/asm/processor.h | 1 + trunk/arch/powerpc/include/asm/time.h | 6 - trunk/arch/powerpc/kernel/asm-offsets.c | 1 + trunk/arch/powerpc/kernel/dbell.c | 2 + trunk/arch/powerpc/kernel/dma-iommu.c | 9 +- trunk/arch/powerpc/kernel/entry_64.S | 23 +- trunk/arch/powerpc/kernel/exceptions-64s.S | 3 +- trunk/arch/powerpc/kernel/hw_breakpoint.c | 2 +- trunk/arch/powerpc/kernel/idle_power7.S | 2 + trunk/arch/powerpc/kernel/kgdb.c | 27 +- trunk/arch/powerpc/kernel/process.c | 15 +- trunk/arch/powerpc/kernel/smp.c | 11 +- trunk/arch/powerpc/kernel/syscalls.c | 8 +- trunk/arch/powerpc/kernel/sysfs.c | 10 + trunk/arch/powerpc/kernel/time.c | 64 +- trunk/arch/powerpc/kernel/traps.c | 3 +- trunk/arch/powerpc/kvm/book3s_32_mmu_host.c | 3 + trunk/arch/powerpc/kvm/book3s_64_mmu_host.c | 2 + trunk/arch/powerpc/kvm/book3s_hv_rmhandlers.S | 12 +- trunk/arch/powerpc/kvm/e500_tlb.c | 11 +- trunk/arch/powerpc/lib/code-patching.c | 2 +- trunk/arch/powerpc/lib/copyuser_power7.S | 35 +- trunk/arch/powerpc/lib/memcpy_power7.S | 4 +- trunk/arch/powerpc/mm/mem.c | 1 + trunk/arch/powerpc/mm/numa.c | 7 +- trunk/arch/powerpc/perf/core-book3s.c | 2 +- trunk/arch/powerpc/platforms/Kconfig.cputype | 16 +- trunk/arch/powerpc/platforms/powernv/smp.c | 10 +- trunk/arch/powerpc/sysdev/fsl_pci.c | 13 +- trunk/arch/powerpc/sysdev/mpic_msgr.c | 3 + trunk/arch/powerpc/sysdev/xics/icp-hv.c | 6 +- trunk/arch/powerpc/xmon/xmon.c | 84 +- trunk/arch/s390/Kconfig | 6 +- trunk/arch/s390/include/asm/cputime.h | 3 + trunk/arch/s390/include/asm/elf.h | 3 +- trunk/arch/s390/include/asm/hugetlb.h | 24 +- trunk/arch/s390/include/asm/posix_types.h | 3 +- trunk/arch/s390/include/asm/smp.h | 1 + trunk/arch/s390/include/asm/sparsemem.h | 2 - trunk/arch/s390/include/asm/switch_to.h | 4 - trunk/arch/s390/include/asm/syscall.h | 10 + trunk/arch/s390/include/asm/tlbflush.h | 2 - trunk/arch/s390/kernel/compat_linux.c | 2 - trunk/arch/s390/kernel/compat_wrapper.S | 4 +- trunk/arch/s390/kernel/ptrace.c | 7 +- trunk/arch/s390/kernel/setup.c | 2 + trunk/arch/s390/kernel/sys_s390.c | 9 +- trunk/arch/s390/kernel/vtime.c | 8 +- trunk/arch/s390/lib/uaccess_pt.c | 142 +- trunk/arch/s390/oprofile/init.c | 10 +- trunk/arch/score/kernel/process.c | 4 +- trunk/arch/sh/drivers/dma/dma-sh.c | 2 +- trunk/arch/sh/include/asm/sections.h | 1 - trunk/arch/sh/include/cpu-sh2a/cpu/sh7269.h | 36 +- trunk/arch/sh/kernel/cpu/sh2a/pinmux-sh7269.c | 195 +- trunk/arch/sh/kernel/cpu/sh5/entry.S | 2 +- trunk/arch/sh/kernel/entry-common.S | 2 +- trunk/arch/sh/kernel/setup.c | 2 +- trunk/arch/sh/kernel/sh_ksyms_32.c | 1 - trunk/arch/sh/kernel/vmlinux.lds.S | 1 - trunk/arch/sh/lib/mcount.S | 8 +- trunk/arch/sparc/kernel/module.c | 13 +- trunk/arch/sparc/kernel/sys_sparc_64.c | 10 +- trunk/arch/sparc/mm/init_64.c | 28 +- trunk/arch/tile/include/asm/topology.h | 1 - trunk/arch/tile/include/gxio/iorpc_trio.h | 24 +- trunk/arch/um/drivers/mconsole_kern.c | 1 + trunk/arch/um/include/asm/processor-generic.h | 9 - trunk/arch/um/include/shared/common-offsets.h | 10 - trunk/arch/um/include/shared/user.h | 11 + trunk/arch/um/kernel/exec.c | 25 +- trunk/arch/um/kernel/process.c | 8 +- trunk/arch/um/kernel/signal.c | 6 +- trunk/arch/um/kernel/syscall.c | 24 +- trunk/arch/um/os-Linux/time.c | 2 +- trunk/arch/um/scripts/Makefile.rules | 2 +- trunk/arch/x86/Kconfig | 77 +- trunk/arch/x86/Kconfig.cpu | 5 +- trunk/arch/x86/Makefile | 6 +- trunk/arch/x86/boot/Makefile | 2 +- trunk/arch/x86/boot/compressed/Makefile | 3 + trunk/arch/x86/boot/compressed/eboot.c | 34 +- trunk/arch/x86/boot/compressed/eboot.h | 4 + trunk/arch/x86/boot/header.S | 4 - trunk/arch/x86/configs/i386_defconfig | 23 +- trunk/arch/x86/configs/x86_64_defconfig | 23 +- trunk/arch/x86/ia32/ia32_signal.c | 20 +- trunk/arch/x86/ia32/sys_ia32.c | 2 +- trunk/arch/x86/include/asm/alternative.h | 4 +- trunk/arch/x86/include/asm/bitops.h | 14 +- trunk/arch/x86/include/asm/calling.h | 48 +- trunk/arch/x86/include/asm/cpufeature.h | 4 + trunk/arch/x86/include/asm/fpu-internal.h | 390 ++- trunk/arch/x86/include/asm/ftrace.h | 56 +- trunk/arch/x86/include/asm/hpet.h | 2 - trunk/arch/x86/include/asm/i387.h | 29 +- trunk/arch/x86/include/asm/iommu_table.h | 6 +- trunk/arch/x86/include/asm/kprobes.h | 1 + trunk/arch/x86/include/asm/kvm.h | 16 + trunk/arch/x86/include/asm/kvm_host.h | 16 - trunk/arch/x86/include/asm/mce.h | 21 +- trunk/arch/x86/include/asm/perf_event.h | 13 +- trunk/arch/x86/include/asm/perf_regs.h | 33 + trunk/arch/x86/include/asm/processor.h | 3 +- trunk/arch/x86/include/asm/rcu.h | 32 + trunk/arch/x86/include/asm/signal.h | 4 + trunk/arch/x86/include/asm/spinlock.h | 3 +- trunk/arch/x86/include/asm/svm.h | 205 +- trunk/arch/x86/include/asm/sys_ia32.h | 2 +- trunk/arch/x86/include/asm/thread_info.h | 10 +- trunk/arch/x86/include/asm/uprobes.h | 3 +- trunk/arch/x86/include/asm/vdso.h | 3 +- trunk/arch/x86/include/asm/vmx.h | 127 +- trunk/arch/x86/include/asm/xen/page.h | 3 +- trunk/arch/x86/include/asm/xor_32.h | 56 +- trunk/arch/x86/include/asm/xor_64.h | 61 +- trunk/arch/x86/include/asm/xor_avx.h | 54 +- trunk/arch/x86/include/asm/xsave.h | 13 +- trunk/arch/x86/kernel/Makefile | 2 + trunk/arch/x86/kernel/acpi/boot.c | 2 +- trunk/arch/x86/kernel/acpi/sleep.c | 4 - trunk/arch/x86/kernel/acpi/sleep.h | 2 - trunk/arch/x86/kernel/acpi/wakeup_32.S | 4 +- trunk/arch/x86/kernel/acpi/wakeup_64.S | 4 +- trunk/arch/x86/kernel/alternative.c | 115 +- trunk/arch/x86/kernel/apic/apic.c | 2 +- trunk/arch/x86/kernel/apic/io_apic.c | 14 +- trunk/arch/x86/kernel/cpu/bugs.c | 7 +- trunk/arch/x86/kernel/cpu/common.c | 16 +- trunk/arch/x86/kernel/cpu/mcheck/mce-inject.c | 8 + .../arch/x86/kernel/cpu/mcheck/mce-internal.h | 12 + .../arch/x86/kernel/cpu/mcheck/mce-severity.c | 7 - trunk/arch/x86/kernel/cpu/mcheck/mce.c | 137 +- trunk/arch/x86/kernel/cpu/mcheck/mce_intel.c | 168 +- trunk/arch/x86/kernel/cpu/perf_event.c | 89 +- trunk/arch/x86/kernel/cpu/perf_event.h | 22 + .../arch/x86/kernel/cpu/perf_event_amd_ibs.c | 16 +- trunk/arch/x86/kernel/cpu/perf_event_intel.c | 35 +- .../arch/x86/kernel/cpu/perf_event_intel_ds.c | 21 +- .../x86/kernel/cpu/perf_event_intel_lbr.c | 3 +- .../x86/kernel/cpu/perf_event_intel_uncore.c | 289 +- .../x86/kernel/cpu/perf_event_intel_uncore.h | 54 +- trunk/arch/x86/kernel/cpu/proc.c | 5 +- trunk/arch/x86/kernel/cpuid.c | 5 + trunk/arch/x86/kernel/devicetree.c | 51 +- trunk/arch/x86/kernel/entry_32.S | 74 +- trunk/arch/x86/kernel/entry_64.S | 161 +- trunk/arch/x86/kernel/ftrace.c | 73 +- trunk/arch/x86/kernel/i387.c | 292 +- trunk/arch/x86/kernel/i8259.c | 2 +- trunk/arch/x86/kernel/irq.c | 3 +- trunk/arch/x86/kernel/kdebugfs.c | 6 +- trunk/arch/x86/kernel/kprobes.c | 67 + trunk/arch/x86/kernel/microcode_amd.c | 7 +- trunk/arch/x86/kernel/microcode_core.c | 3 + trunk/arch/x86/kernel/msr.c | 5 + trunk/arch/x86/kernel/perf_regs.c | 105 + trunk/arch/x86/kernel/probe_roms.c | 2 +- trunk/arch/x86/kernel/process.c | 22 +- trunk/arch/x86/kernel/process_32.c | 4 - trunk/arch/x86/kernel/process_64.c | 4 - trunk/arch/x86/kernel/ptrace.c | 8 +- trunk/arch/x86/kernel/signal.c | 215 +- trunk/arch/x86/kernel/smpboot.c | 20 +- trunk/arch/x86/kernel/step.c | 53 +- trunk/arch/x86/kernel/traps.c | 174 +- trunk/arch/x86/kernel/uprobes.c | 52 +- trunk/arch/x86/kernel/x8664_ksyms_64.c | 6 +- trunk/arch/x86/kernel/xsave.c | 517 ++-- trunk/arch/x86/kvm/emulate.c | 30 +- trunk/arch/x86/kvm/i8259.c | 17 + trunk/arch/x86/kvm/mmu.c | 13 +- trunk/arch/x86/kvm/trace.h | 89 - trunk/arch/x86/kvm/vmx.c | 55 +- trunk/arch/x86/kvm/x86.c | 25 +- trunk/arch/x86/mm/fault.c | 13 +- trunk/arch/x86/mm/hugetlbpage.c | 21 +- trunk/arch/x86/mm/init.c | 2 +- trunk/arch/x86/mm/pageattr.c | 10 +- trunk/arch/x86/mm/srat.c | 15 +- trunk/arch/x86/pci/mmconfig-shared.c | 2 +- trunk/arch/x86/platform/efi/Makefile | 1 + trunk/arch/x86/platform/efi/efi-bgrt.c | 76 + trunk/arch/x86/platform/efi/efi.c | 96 +- trunk/arch/x86/realmode/rm/Makefile | 2 +- trunk/arch/x86/syscalls/syscall_64.tbl | 8 +- trunk/arch/x86/um/Kconfig | 1 + .../x86/um/shared/sysdep/kernel-offsets.h | 3 - trunk/arch/x86/um/shared/sysdep/syscalls.h | 2 + trunk/arch/x86/um/signal.c | 6 - trunk/arch/x86/um/sys_call_table_32.c | 2 +- trunk/arch/x86/um/syscalls_32.c | 27 +- trunk/arch/x86/um/syscalls_64.c | 23 +- trunk/arch/x86/xen/enlighten.c | 122 +- trunk/arch/x86/xen/mmu.c | 2 +- trunk/arch/x86/xen/p2m.c | 121 +- trunk/arch/x86/xen/setup.c | 13 +- trunk/arch/x86/xen/smp.c | 6 +- trunk/arch/x86/xen/suspend.c | 2 +- trunk/arch/x86/xen/xen-ops.h | 2 +- trunk/arch/xtensa/kernel/process.c | 3 + trunk/block/blk-core.c | 8 +- trunk/block/blk-lib.c | 41 +- trunk/block/blk-merge.c | 117 +- trunk/block/genhd.c | 2 +- trunk/block/ioctl.c | 2 +- trunk/crypto/authenc.c | 4 +- trunk/drivers/acpi/Kconfig | 4 +- trunk/drivers/acpi/ac.c | 4 + trunk/drivers/acpi/acpica/achware.h | 12 +- trunk/drivers/acpi/acpica/hwesleep.c | 19 +- trunk/drivers/acpi/acpica/hwsleep.c | 20 +- trunk/drivers/acpi/acpica/hwxfsleep.c | 22 +- trunk/drivers/acpi/acpica/tbxface.c | 1 + trunk/drivers/acpi/battery.c | 2 + trunk/drivers/acpi/bgrt.c | 76 +- trunk/drivers/acpi/bus.c | 10 + trunk/drivers/acpi/button.c | 4 + trunk/drivers/acpi/fan.c | 4 + trunk/drivers/acpi/numa.c | 12 +- trunk/drivers/acpi/pci_root.c | 11 +- trunk/drivers/acpi/power.c | 40 +- trunk/drivers/acpi/processor_driver.c | 2 +- trunk/drivers/acpi/sbs.c | 2 + trunk/drivers/acpi/sleep.c | 75 +- trunk/drivers/acpi/sysfs.c | 4 +- trunk/drivers/acpi/thermal.c | 4 + trunk/drivers/ata/Kconfig | 2 +- trunk/drivers/ata/ahci.c | 18 +- trunk/drivers/ata/ahci.h | 1 + trunk/drivers/ata/ata_piix.c | 8 + trunk/drivers/ata/libahci.c | 3 +- trunk/drivers/ata/libata-acpi.c | 15 +- trunk/drivers/ata/libata-core.c | 3 +- trunk/drivers/ata/pata_atiixp.c | 16 + trunk/drivers/atm/iphase.c | 2 +- trunk/drivers/base/core.c | 9 +- trunk/drivers/base/dma-contiguous.c | 2 +- trunk/drivers/base/power/clock_ops.c | 3 +- trunk/drivers/base/power/common.c | 4 +- trunk/drivers/base/power/runtime.c | 13 +- trunk/drivers/base/regmap/regmap-irq.c | 92 +- trunk/drivers/base/regmap/regmap.c | 13 +- trunk/drivers/bcma/host_pci.c | 1 + trunk/drivers/bcma/sprom.c | 4 +- trunk/drivers/block/aoe/aoecmd.c | 1 + trunk/drivers/block/cciss_scsi.c | 12 +- trunk/drivers/block/drbd/drbd_bitmap.c | 15 +- trunk/drivers/block/drbd/drbd_int.h | 1 + trunk/drivers/block/drbd/drbd_main.c | 32 +- trunk/drivers/block/drbd/drbd_nl.c | 4 +- trunk/drivers/block/drbd/drbd_req.c | 36 +- trunk/drivers/block/mtip32xx/mtip32xx.c | 38 +- trunk/drivers/block/mtip32xx/mtip32xx.h | 10 +- trunk/drivers/block/nbd.c | 9 + trunk/drivers/block/nvme.c | 153 +- trunk/drivers/block/rbd.c | 7 +- trunk/drivers/block/xen-blkback/blkback.c | 2 +- trunk/drivers/bluetooth/ath3k.c | 4 + trunk/drivers/bluetooth/btusb.c | 12 +- trunk/drivers/char/agp/intel-agp.h | 40 +- trunk/drivers/char/agp/intel-gtt.c | 107 +- trunk/drivers/char/hw_random/omap-rng.c | 2 +- trunk/drivers/char/tpm/tpm_tis.c | 2 + trunk/drivers/clk/Makefile | 1 + trunk/drivers/clk/clk-devres.c | 55 + trunk/drivers/clk/clkdev.c | 45 - trunk/drivers/clocksource/cs5535-clockevt.c | 4 +- trunk/drivers/cpufreq/omap-cpufreq.c | 4 +- trunk/drivers/cpufreq/pcc-cpufreq.c | 1 + trunk/drivers/cpufreq/powernow-k8.c | 63 +- trunk/drivers/cpuidle/coupled.c | 14 +- trunk/drivers/crypto/caam/jr.c | 10 +- trunk/drivers/crypto/caam/key_gen.c | 1 + trunk/drivers/crypto/hifn_795x.c | 4 +- trunk/drivers/dma/at_hdmac.c | 21 +- trunk/drivers/dma/ep93xx_dma.c | 2 +- trunk/drivers/dma/fsldma.c | 2 +- trunk/drivers/dma/imx-dma.c | 40 +- trunk/drivers/dma/intel_mid_dma.c | 2 +- trunk/drivers/dma/intel_mid_dma_regs.h | 6 +- trunk/drivers/dma/ioat/hw.h | 4 - trunk/drivers/dma/pl330.c | 23 +- trunk/drivers/dma/ppc4xx/adma.c | 2 +- trunk/drivers/dma/ste_dma40_ll.h | 2 +- trunk/drivers/dma/tegra20-apb-dma.c | 18 +- trunk/drivers/edac/edac_mc.c | 57 +- trunk/drivers/edac/i3200_edac.c | 2 +- trunk/drivers/edac/i5000_edac.c | 4 + trunk/drivers/edac/sb_edac.c | 7 +- trunk/drivers/extcon/extcon-arizona.c | 5 + trunk/drivers/extcon/extcon-max77693.c | 19 +- trunk/drivers/extcon/extcon_gpio.c | 3 +- trunk/drivers/gpio/Kconfig | 2 +- trunk/drivers/gpio/gpio-em.c | 6 +- trunk/drivers/gpio/gpio-langwell.c | 7 +- trunk/drivers/gpio/gpio-lpc32xx.c | 5 + trunk/drivers/gpio/gpio-msic.c | 2 +- trunk/drivers/gpio/gpio-mxc.c | 5 +- trunk/drivers/gpio/gpio-pxa.c | 30 +- trunk/drivers/gpio/gpio-rdc321x.c | 1 + trunk/drivers/gpio/gpio-samsung.c | 14 +- trunk/drivers/gpio/gpio-sch.c | 3 +- trunk/drivers/gpio/gpiolib-of.c | 2 +- trunk/drivers/gpu/drm/Kconfig | 1 + trunk/drivers/gpu/drm/ast/ast_drv.c | 3 + trunk/drivers/gpu/drm/ast/ast_mode.c | 2 +- trunk/drivers/gpu/drm/cirrus/cirrus_drv.c | 3 + trunk/drivers/gpu/drm/drm_crtc.c | 2 +- trunk/drivers/gpu/drm/drm_edid.c | 3 + trunk/drivers/gpu/drm/drm_edid_load.c | 8 +- trunk/drivers/gpu/drm/drm_modes.c | 3 - trunk/drivers/gpu/drm/drm_proc.c | 4 +- trunk/drivers/gpu/drm/exynos/Kconfig | 2 +- .../gpu/drm/exynos/exynos_drm_dmabuf.c | 7 + trunk/drivers/gpu/drm/exynos/exynos_drm_drv.c | 5 +- .../drivers/gpu/drm/exynos/exynos_drm_fimd.c | 5 - trunk/drivers/gpu/drm/exynos/exynos_drm_g2d.c | 52 +- trunk/drivers/gpu/drm/exynos/exynos_drm_gem.c | 4 +- .../drivers/gpu/drm/exynos/exynos_drm_hdmi.c | 3 +- .../drivers/gpu/drm/exynos/exynos_drm_plane.c | 1 - .../drivers/gpu/drm/exynos/exynos_drm_vidi.c | 4 +- trunk/drivers/gpu/drm/exynos/exynos_hdmi.c | 11 +- trunk/drivers/gpu/drm/exynos/exynos_mixer.c | 6 +- .../drivers/gpu/drm/gma500/oaktrail_device.c | 2 + .../gpu/drm/gma500/psb_intel_display.c | 3 + trunk/drivers/gpu/drm/i810/i810_dma.c | 3 + trunk/drivers/gpu/drm/i810/i810_drv.c | 3 + trunk/drivers/gpu/drm/i915/i915_dma.c | 1 + trunk/drivers/gpu/drm/i915/i915_drv.c | 31 +- trunk/drivers/gpu/drm/i915/i915_gem.c | 11 +- trunk/drivers/gpu/drm/i915/i915_gem_context.c | 1 - .../gpu/drm/i915/i915_gem_execbuffer.c | 20 +- trunk/drivers/gpu/drm/i915/i915_gem_gtt.c | 10 +- trunk/drivers/gpu/drm/i915/i915_irq.c | 3 - trunk/drivers/gpu/drm/i915/i915_reg.h | 1 + trunk/drivers/gpu/drm/i915/i915_sysfs.c | 12 + trunk/drivers/gpu/drm/i915/intel_crt.c | 36 +- trunk/drivers/gpu/drm/i915/intel_display.c | 42 +- trunk/drivers/gpu/drm/i915/intel_dp.c | 25 +- trunk/drivers/gpu/drm/i915/intel_drv.h | 22 +- trunk/drivers/gpu/drm/i915/intel_hdmi.c | 2 +- trunk/drivers/gpu/drm/i915/intel_i2c.c | 10 +- trunk/drivers/gpu/drm/i915/intel_lvds.c | 8 + trunk/drivers/gpu/drm/i915/intel_modes.c | 31 +- trunk/drivers/gpu/drm/i915/intel_panel.c | 46 +- trunk/drivers/gpu/drm/i915/intel_pm.c | 24 +- trunk/drivers/gpu/drm/i915/intel_ringbuffer.c | 48 +- trunk/drivers/gpu/drm/i915/intel_sdvo.c | 21 +- trunk/drivers/gpu/drm/i915/intel_sprite.c | 4 +- trunk/drivers/gpu/drm/mgag200/mgag200_drv.c | 3 + trunk/drivers/gpu/drm/mgag200/mgag200_mode.c | 12 +- trunk/drivers/gpu/drm/nouveau/nouveau_abi16.c | 2 +- trunk/drivers/gpu/drm/nouveau/nouveau_acpi.c | 6 - .../drivers/gpu/drm/nouveau/nouveau_display.c | 2 +- trunk/drivers/gpu/drm/nouveau/nouveau_i2c.c | 2 +- trunk/drivers/gpu/drm/nouveau/nouveau_state.c | 7 +- trunk/drivers/gpu/drm/nouveau/nv50_gpio.c | 16 + trunk/drivers/gpu/drm/nouveau/nv84_fifo.c | 9 + trunk/drivers/gpu/drm/nouveau/nvc0_fb.c | 1 + trunk/drivers/gpu/drm/nouveau/nvc0_fifo.c | 3 +- trunk/drivers/gpu/drm/nouveau/nvc0_pm.c | 2 +- trunk/drivers/gpu/drm/nouveau/nvd0_display.c | 6 +- trunk/drivers/gpu/drm/nouveau/nve0_fifo.c | 40 +- trunk/drivers/gpu/drm/radeon/atombios_crtc.c | 81 +- trunk/drivers/gpu/drm/radeon/atombios_dp.c | 29 +- .../gpu/drm/radeon/atombios_encoders.c | 140 +- trunk/drivers/gpu/drm/radeon/evergreen.c | 71 +- trunk/drivers/gpu/drm/radeon/evergreen_cs.c | 13 +- trunk/drivers/gpu/drm/radeon/evergreend.h | 2 + trunk/drivers/gpu/drm/radeon/ni.c | 14 +- trunk/drivers/gpu/drm/radeon/r100.c | 3 +- trunk/drivers/gpu/drm/radeon/r600.c | 20 + trunk/drivers/gpu/drm/radeon/r600_cs.c | 196 +- trunk/drivers/gpu/drm/radeon/r600d.h | 28 + trunk/drivers/gpu/drm/radeon/radeon.h | 27 +- trunk/drivers/gpu/drm/radeon/radeon_asic.h | 10 +- .../drivers/gpu/drm/radeon/radeon_atombios.c | 51 +- .../gpu/drm/radeon/radeon_atpx_handler.c | 56 +- trunk/drivers/gpu/drm/radeon/radeon_bios.c | 138 +- trunk/drivers/gpu/drm/radeon/radeon_combios.c | 57 +- trunk/drivers/gpu/drm/radeon/radeon_cs.c | 32 +- trunk/drivers/gpu/drm/radeon/radeon_cursor.c | 6 +- trunk/drivers/gpu/drm/radeon/radeon_device.c | 6 +- trunk/drivers/gpu/drm/radeon/radeon_drv.c | 7 +- trunk/drivers/gpu/drm/radeon/radeon_fence.c | 8 +- trunk/drivers/gpu/drm/radeon/radeon_gart.c | 26 +- trunk/drivers/gpu/drm/radeon/radeon_gem.c | 13 +- trunk/drivers/gpu/drm/radeon/radeon_kms.c | 35 +- .../gpu/drm/radeon/radeon_legacy_crtc.c | 4 + trunk/drivers/gpu/drm/radeon/radeon_mode.h | 1 + trunk/drivers/gpu/drm/radeon/radeon_object.c | 9 +- trunk/drivers/gpu/drm/radeon/radeon_ring.c | 1 + trunk/drivers/gpu/drm/radeon/reg_srcs/r600 | 9 - trunk/drivers/gpu/drm/radeon/rv515.c | 13 - trunk/drivers/gpu/drm/radeon/si.c | 35 +- trunk/drivers/gpu/drm/radeon/sid.h | 3 + trunk/drivers/gpu/drm/savage/savage_drv.c | 3 + trunk/drivers/gpu/drm/sis/sis_drv.c | 3 + trunk/drivers/gpu/drm/tdfx/tdfx_drv.c | 3 + trunk/drivers/gpu/drm/udl/Kconfig | 1 + trunk/drivers/gpu/drm/udl/udl_connector.c | 7 + trunk/drivers/gpu/drm/udl/udl_drv.c | 3 + trunk/drivers/gpu/drm/udl/udl_gem.c | 2 +- trunk/drivers/gpu/drm/udl/udl_modeset.c | 3 +- trunk/drivers/gpu/drm/via/via_drv.c | 3 + trunk/drivers/gpu/drm/vmwgfx/Kconfig | 8 + trunk/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c | 8 +- trunk/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h | 10 + trunk/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c | 2 +- trunk/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c | 6 +- .../drivers/gpu/drm/vmwgfx/vmwgfx_resource.c | 73 + trunk/drivers/gpu/vga/vga_switcheroo.c | 61 +- trunk/drivers/hid/Kconfig | 22 +- trunk/drivers/hid/Makefile | 21 + trunk/drivers/hid/hid-a4tech.c | 1 - trunk/drivers/hid/hid-apple.c | 1 - trunk/drivers/hid/hid-aureal.c | 1 - trunk/drivers/hid/hid-belkin.c | 1 - trunk/drivers/hid/hid-cherry.c | 1 - trunk/drivers/hid/hid-core.c | 46 +- trunk/drivers/hid/hid-cypress.c | 1 - trunk/drivers/hid/hid-debug.c | 12 +- trunk/drivers/hid/hid-ezkey.c | 1 - trunk/drivers/hid/hid-gyration.c | 1 - trunk/drivers/hid/hid-holtekff.c | 3 +- trunk/drivers/hid/hid-ids.h | 16 +- trunk/drivers/hid/hid-input.c | 11 +- trunk/drivers/hid/hid-lcpower.c | 2 +- trunk/drivers/hid/hid-lenovo-tpkbd.c | 149 +- trunk/drivers/hid/hid-lg.c | 20 +- trunk/drivers/hid/hid-lg.h | 4 + trunk/drivers/hid/hid-lg4ff.c | 198 +- trunk/drivers/hid/hid-logitech-dj.c | 49 +- trunk/drivers/hid/hid-logitech-dj.h | 1 + trunk/drivers/hid/hid-magicmouse.c | 2 +- trunk/drivers/hid/hid-microsoft.c | 1 - trunk/drivers/hid/hid-monterey.c | 1 - trunk/drivers/hid/hid-multitouch.c | 252 +- trunk/drivers/hid/hid-ntrig.c | 8 +- trunk/drivers/hid/hid-petalynx.c | 1 - trunk/drivers/hid/hid-picolcd.c | 2748 ----------------- trunk/drivers/hid/hid-picolcd.h | 309 ++ trunk/drivers/hid/hid-picolcd_backlight.c | 122 + trunk/drivers/hid/hid-picolcd_cir.c | 152 + trunk/drivers/hid/hid-picolcd_core.c | 689 +++++ trunk/drivers/hid/hid-picolcd_debugfs.c | 899 ++++++ trunk/drivers/hid/hid-picolcd_fb.c | 615 ++++ trunk/drivers/hid/hid-picolcd_lcd.c | 107 + trunk/drivers/hid/hid-picolcd_leds.c | 175 ++ trunk/drivers/hid/hid-primax.c | 25 - trunk/drivers/hid/hid-prodikeys.c | 18 +- trunk/drivers/hid/hid-ps3remote.c | 215 ++ trunk/drivers/hid/hid-samsung.c | 1 - trunk/drivers/hid/hid-sony.c | 1 - trunk/drivers/hid/hid-sunplus.c | 1 - trunk/drivers/hid/hid-uclogic.c | 98 + trunk/drivers/hid/hid-wacom.c | 170 +- trunk/drivers/hid/hid-waltop.c | 29 - trunk/drivers/hid/hid-wiimote-ext.c | 97 + trunk/drivers/hid/hidraw.c | 84 +- trunk/drivers/hid/usbhid/hid-core.c | 6 +- trunk/drivers/hid/usbhid/hid-quirks.c | 4 +- trunk/drivers/hwmon/ad7314.c | 8 + trunk/drivers/hwmon/ads7871.c | 9 + trunk/drivers/hwmon/applesmc.c | 4 +- trunk/drivers/hwmon/asus_atk0110.c | 6 + trunk/drivers/hwmon/coretemp.c | 7 +- trunk/drivers/hwmon/fam15h_power.c | 15 +- trunk/drivers/hwmon/ina2xx.c | 30 +- trunk/drivers/hwmon/twl4030-madc-hwmon.c | 9 +- trunk/drivers/hwmon/via-cputemp.c | 5 + trunk/drivers/hwmon/w83627hf.c | 2 +- trunk/drivers/hwspinlock/hwspinlock_core.c | 3 +- trunk/drivers/i2c/algos/i2c-algo-pca.c | 6 +- trunk/drivers/i2c/busses/Kconfig | 6 + trunk/drivers/i2c/busses/Makefile | 5 +- .../drivers/i2c/busses/i2c-designware-core.c | 11 + trunk/drivers/i2c/busses/i2c-diolan-u2c.c | 1 + trunk/drivers/i2c/busses/i2c-i801.c | 3 + trunk/drivers/i2c/busses/i2c-mxs.c | 13 +- trunk/drivers/i2c/busses/i2c-nomadik.c | 28 +- trunk/drivers/i2c/busses/i2c-omap.c | 2 +- trunk/drivers/i2c/busses/i2c-pnx.c | 53 +- trunk/drivers/i2c/busses/i2c-tegra.c | 2 +- trunk/drivers/i2c/i2c-core.c | 22 +- trunk/drivers/ide/ide-pm.c | 4 +- trunk/drivers/idle/intel_idle.c | 3 +- trunk/drivers/iio/adc/at91_adc.c | 2 +- trunk/drivers/iio/frequency/adf4350.c | 24 +- trunk/drivers/iio/light/adjd_s311.c | 7 +- trunk/drivers/iio/light/lm3533-als.c | 4 +- trunk/drivers/infiniband/core/ucma.c | 2 +- .../drivers/infiniband/hw/amso1100/c2_rnic.c | 2 +- trunk/drivers/infiniband/hw/cxgb3/iwch_cm.c | 2 +- trunk/drivers/infiniband/hw/cxgb4/cm.c | 2 +- trunk/drivers/infiniband/hw/ehca/ehca_irq.c | 250 +- trunk/drivers/infiniband/hw/ehca/ehca_irq.h | 6 +- trunk/drivers/infiniband/hw/mlx4/mad.c | 16 +- trunk/drivers/infiniband/hw/mlx4/main.c | 5 +- trunk/drivers/infiniband/hw/mlx4/qp.c | 6 +- .../infiniband/hw/ocrdma/ocrdma_main.c | 16 +- .../infiniband/hw/ocrdma/ocrdma_verbs.c | 8 +- trunk/drivers/infiniband/hw/qib/qib_iba7322.c | 4 +- trunk/drivers/infiniband/hw/qib/qib_mad.c | 3 +- trunk/drivers/infiniband/hw/qib/qib_sd7220.c | 2 +- trunk/drivers/infiniband/ulp/ipoib/ipoib.h | 5 +- trunk/drivers/infiniband/ulp/ipoib/ipoib_cm.c | 3 + .../drivers/infiniband/ulp/ipoib/ipoib_main.c | 95 +- .../infiniband/ulp/ipoib/ipoib_multicast.c | 2 - trunk/drivers/infiniband/ulp/srp/ib_srp.c | 87 +- trunk/drivers/infiniband/ulp/srpt/ib_srpt.c | 2 +- trunk/drivers/input/evdev.c | 78 +- trunk/drivers/input/input-mt.c | 305 +- trunk/drivers/input/input.c | 254 +- trunk/drivers/input/keyboard/imx_keypad.c | 3 + trunk/drivers/input/misc/ab8500-ponkey.c | 4 +- trunk/drivers/input/misc/uinput.c | 2 +- trunk/drivers/input/mouse/alps.c | 2 +- trunk/drivers/input/mouse/bcm5974.c | 348 +-- trunk/drivers/input/mouse/elantech.c | 4 +- trunk/drivers/input/mouse/sentelic.c | 13 +- trunk/drivers/input/mouse/synaptics.c | 4 +- trunk/drivers/input/serio/ambakmi.c | 6 +- trunk/drivers/input/serio/i8042-x86ia64io.h | 20 + trunk/drivers/input/tablet/wacom_wac.c | 12 +- .../drivers/input/touchscreen/atmel_mxt_ts.c | 2 +- trunk/drivers/input/touchscreen/cyttsp_core.c | 2 +- trunk/drivers/input/touchscreen/edt-ft5x06.c | 13 +- trunk/drivers/input/touchscreen/eeti_ts.c | 21 +- trunk/drivers/input/touchscreen/egalax_ts.c | 2 +- trunk/drivers/input/touchscreen/ili210x.c | 2 +- trunk/drivers/input/touchscreen/mms114.c | 2 +- trunk/drivers/input/touchscreen/penmount.c | 2 +- .../input/touchscreen/usbtouchscreen.c | 40 + trunk/drivers/input/touchscreen/wacom_w8001.c | 2 +- trunk/drivers/iommu/amd_iommu.c | 31 +- trunk/drivers/iommu/amd_iommu_init.c | 8 +- trunk/drivers/iommu/exynos-iommu.c | 6 +- trunk/drivers/iommu/intel-iommu.c | 26 +- trunk/drivers/iommu/intel_irq_remapping.c | 18 +- trunk/drivers/iommu/tegra-smmu.c | 17 +- trunk/drivers/isdn/hardware/mISDN/avmfritz.c | 3 +- trunk/drivers/isdn/hardware/mISDN/hfcmulti.c | 2 + trunk/drivers/isdn/hardware/mISDN/mISDNipac.c | 3 +- trunk/drivers/isdn/hardware/mISDN/mISDNisar.c | 3 +- trunk/drivers/isdn/hardware/mISDN/netjet.c | 3 +- trunk/drivers/isdn/hardware/mISDN/w6692.c | 3 +- trunk/drivers/isdn/isdnloop/isdnloop.c | 12 - trunk/drivers/isdn/mISDN/hwchannel.c | 9 +- trunk/drivers/isdn/mISDN/layer2.c | 2 +- trunk/drivers/leds/led-triggers.c | 2 +- trunk/drivers/leds/leds-lp8788.c | 2 +- trunk/drivers/leds/leds-renesas-tpu.c | 2 +- trunk/drivers/lguest/x86/core.c | 10 +- trunk/drivers/md/dm-mpath.c | 11 +- trunk/drivers/md/dm-table.c | 61 +- trunk/drivers/md/dm-thin.c | 135 +- trunk/drivers/md/dm-verity.c | 8 +- trunk/drivers/md/dm.c | 71 +- trunk/drivers/md/dm.h | 1 + trunk/drivers/md/md.c | 15 +- trunk/drivers/md/raid10.c | 38 +- trunk/drivers/md/raid10.h | 2 +- trunk/drivers/md/raid5.c | 7 +- trunk/drivers/media/dvb/siano/smsusb.c | 2 +- trunk/drivers/media/radio/radio-shark.c | 151 +- trunk/drivers/media/radio/radio-shark2.c | 137 +- .../media/radio/si470x/radio-si470x-common.c | 3 + .../media/radio/si470x/radio-si470x-i2c.c | 5 +- .../media/radio/si470x/radio-si470x-usb.c | 2 +- trunk/drivers/media/rc/Kconfig | 1 + trunk/drivers/media/video/gspca/jl2005bcd.c | 2 +- trunk/drivers/media/video/gspca/spca506.c | 2 +- trunk/drivers/media/video/mem2mem_testdev.c | 2 +- trunk/drivers/media/video/mx1_camera.c | 4 +- trunk/drivers/media/video/mx2_camera.c | 47 +- trunk/drivers/media/video/mx3_camera.c | 22 +- trunk/drivers/media/video/soc_camera.c | 3 +- trunk/drivers/media/video/soc_mediabus.c | 6 + trunk/drivers/media/video/uvc/uvc_queue.c | 1 + trunk/drivers/media/video/v4l2-ioctl.c | 10 +- trunk/drivers/mfd/88pm800.c | 5 +- trunk/drivers/mfd/88pm805.c | 3 +- trunk/drivers/mfd/88pm860x-core.c | 21 +- trunk/drivers/mfd/Kconfig | 3 +- trunk/drivers/mfd/aat2870-core.c | 2 +- trunk/drivers/mfd/ab3100-core.c | 2 +- trunk/drivers/mfd/ab8500-core.c | 10 +- trunk/drivers/mfd/ab8500-gpadc.c | 2 +- trunk/drivers/mfd/arizona-core.c | 6 +- trunk/drivers/mfd/asic3.c | 7 +- trunk/drivers/mfd/cs5535-mfd.c | 2 +- trunk/drivers/mfd/da9052-core.c | 2 +- trunk/drivers/mfd/davinci_voicecodec.c | 2 +- trunk/drivers/mfd/db8500-prcmu.c | 2 +- trunk/drivers/mfd/ezx-pcap.c | 2 +- trunk/drivers/mfd/htc-pasic3.c | 5 +- trunk/drivers/mfd/intel_msic.c | 4 +- trunk/drivers/mfd/janz-cmodio.c | 2 +- trunk/drivers/mfd/jz4740-adc.c | 3 +- trunk/drivers/mfd/lm3533-core.c | 7 +- trunk/drivers/mfd/lpc_ich.c | 24 +- trunk/drivers/mfd/lpc_sch.c | 6 +- trunk/drivers/mfd/max77686.c | 2 +- trunk/drivers/mfd/max77693-irq.c | 36 +- trunk/drivers/mfd/max77693.c | 16 +- trunk/drivers/mfd/max8925-core.c | 12 +- trunk/drivers/mfd/max8997.c | 2 +- trunk/drivers/mfd/max8998.c | 8 +- trunk/drivers/mfd/mc13xxx-core.c | 2 +- trunk/drivers/mfd/mfd-core.c | 12 +- trunk/drivers/mfd/palmas.c | 3 +- trunk/drivers/mfd/rc5t583.c | 4 +- trunk/drivers/mfd/rdc321x-southbridge.c | 5 +- trunk/drivers/mfd/sec-core.c | 8 +- trunk/drivers/mfd/sta2x11-mfd.c | 4 +- trunk/drivers/mfd/stmpe.c | 2 +- trunk/drivers/mfd/t7l66xb.c | 2 +- trunk/drivers/mfd/tc3589x.c | 8 +- trunk/drivers/mfd/tc6387xb.c | 2 +- trunk/drivers/mfd/tc6393xb.c | 4 +- trunk/drivers/mfd/ti-ssp.c | 2 +- trunk/drivers/mfd/timberdale.c | 12 +- trunk/drivers/mfd/tps6105x.c | 2 +- trunk/drivers/mfd/tps6507x.c | 2 +- trunk/drivers/mfd/tps65090.c | 2 +- trunk/drivers/mfd/tps65217.c | 130 +- trunk/drivers/mfd/tps6586x.c | 16 +- trunk/drivers/mfd/tps65910.c | 2 +- trunk/drivers/mfd/tps65911-comparator.c | 2 +- trunk/drivers/mfd/tps65912-core.c | 2 +- trunk/drivers/mfd/twl4030-audio.c | 2 +- trunk/drivers/mfd/twl6040-core.c | 2 +- trunk/drivers/mfd/vx855.c | 2 +- trunk/drivers/mfd/wl1273-core.c | 2 +- trunk/drivers/mfd/wm831x-core.c | 16 +- trunk/drivers/mfd/wm8400-core.c | 2 +- trunk/drivers/mfd/wm8994-core.c | 4 +- trunk/drivers/mfd/wm8994-irq.c | 1 + trunk/drivers/misc/mei/interrupt.c | 2 +- trunk/drivers/misc/mei/main.c | 27 + trunk/drivers/misc/sgi-xp/xpc_uv.c | 84 +- trunk/drivers/misc/ti-st/st_ll.c | 2 +- trunk/drivers/mmc/card/block.c | 26 +- trunk/drivers/mmc/core/sdio.c | 2 +- trunk/drivers/mmc/host/at91_mci.c | 2 +- trunk/drivers/mmc/host/atmel-mci.c | 8 +- trunk/drivers/mmc/host/bfin_sdh.c | 7 - trunk/drivers/mmc/host/dw_mmc.c | 85 +- trunk/drivers/mmc/host/mxs-mmc.c | 14 +- trunk/drivers/mmc/host/omap.c | 14 +- trunk/drivers/mmc/host/omap_hsmmc.c | 2 +- trunk/drivers/mmc/host/sdhci-esdhc-imx.c | 2 +- trunk/drivers/mmc/host/sdhci-esdhc.h | 6 +- trunk/drivers/mmc/host/vub300.c | 2 +- trunk/drivers/mtd/maps/uclinux.c | 5 +- trunk/drivers/mtd/mtdchar.c | 48 +- trunk/drivers/mtd/nand/Kconfig | 2 +- trunk/drivers/mtd/nand/omap2.c | 7 +- trunk/drivers/mtd/ubi/vtbl.c | 4 +- trunk/drivers/net/appletalk/cops.c | 4 +- trunk/drivers/net/appletalk/ltpc.c | 4 +- trunk/drivers/net/bonding/bond_main.c | 12 +- trunk/drivers/net/can/janz-ican3.c | 4 +- trunk/drivers/net/can/mcp251x.c | 11 +- .../net/can/sja1000/sja1000_platform.c | 4 +- trunk/drivers/net/can/softing/softing_fw.c | 7 +- trunk/drivers/net/can/ti_hecc.c | 2 +- trunk/drivers/net/cris/eth_v10.c | 2 +- trunk/drivers/net/ethernet/3com/typhoon.c | 2 +- trunk/drivers/net/ethernet/broadcom/bnx2.c | 2 +- .../net/ethernet/broadcom/bnx2x/bnx2x.h | 7 +- .../net/ethernet/broadcom/bnx2x/bnx2x_cmn.c | 16 +- .../net/ethernet/broadcom/bnx2x/bnx2x_cmn.h | 15 +- .../net/ethernet/broadcom/bnx2x/bnx2x_dump.h | 25 +- .../ethernet/broadcom/bnx2x/bnx2x_ethtool.c | 10 +- .../net/ethernet/broadcom/bnx2x/bnx2x_link.c | 18 +- .../net/ethernet/broadcom/bnx2x/bnx2x_main.c | 122 +- .../net/ethernet/broadcom/bnx2x/bnx2x_sp.c | 1 + .../net/ethernet/broadcom/bnx2x/bnx2x_sp.h | 2 +- .../net/ethernet/broadcom/bnx2x/bnx2x_stats.c | 15 +- .../drivers/net/ethernet/cadence/at91_ether.c | 2 +- trunk/drivers/net/ethernet/cirrus/cs89x0.c | 10 +- .../net/ethernet/emulex/benet/be_cmds.c | 6 +- .../net/ethernet/emulex/benet/be_main.c | 4 +- .../ethernet/freescale/fs_enet/mii-bitbang.c | 4 +- .../net/ethernet/freescale/fs_enet/mii-fec.c | 8 +- .../drivers/net/ethernet/freescale/gianfar.c | 2 +- .../net/ethernet/freescale/gianfar_ethtool.c | 1 + .../net/ethernet/freescale/gianfar_ptp.c | 4 +- trunk/drivers/net/ethernet/i825xx/znet.c | 13 +- trunk/drivers/net/ethernet/ibm/ibmveth.c | 26 +- .../net/ethernet/intel/e1000/e1000_main.c | 11 + .../drivers/net/ethernet/intel/e1000e/82571.c | 10 +- .../drivers/net/ethernet/intel/e1000e/e1000.h | 1 + .../net/ethernet/intel/e1000e/netdev.c | 84 +- .../net/ethernet/intel/igb/e1000_82575.c | 16 +- .../net/ethernet/intel/igb/e1000_regs.h | 8 +- .../net/ethernet/intel/igb/igb_ethtool.c | 28 +- .../drivers/net/ethernet/intel/igb/igb_main.c | 19 +- .../net/ethernet/intel/ixgbe/ixgbe_82599.c | 3 +- .../net/ethernet/mellanox/mlx4/en_rx.c | 4 +- .../net/ethernet/mellanox/mlx4/en_tx.c | 17 +- .../drivers/net/ethernet/mellanox/mlx4/icm.c | 39 +- .../drivers/net/ethernet/mellanox/mlx4/icm.h | 12 +- .../drivers/net/ethernet/mellanox/mlx4/main.c | 19 +- .../drivers/net/ethernet/mellanox/mlx4/mcg.c | 110 +- .../drivers/net/ethernet/mellanox/mlx4/mlx4.h | 80 +- .../net/ethernet/mellanox/mlx4/mlx4_en.h | 1 - trunk/drivers/net/ethernet/mellanox/mlx4/mr.c | 27 +- .../net/ethernet/mellanox/mlx4/profile.c | 4 +- .../ethernet/mellanox/mlx4/resource_tracker.c | 116 + .../net/ethernet/mellanox/mlx4/sense.c | 14 - trunk/drivers/net/ethernet/nxp/lpc_eth.c | 13 - .../drivers/net/ethernet/octeon/octeon_mgmt.c | 4 +- .../drivers/net/ethernet/pasemi/pasemi_mac.c | 4 +- .../ethernet/qlogic/netxen/netxen_nic_main.c | 4 + .../net/ethernet/qlogic/qlcnic/qlcnic_ctx.c | 4 +- trunk/drivers/net/ethernet/renesas/Kconfig | 4 +- trunk/drivers/net/ethernet/renesas/sh_eth.c | 11 +- trunk/drivers/net/ethernet/seeq/sgiseeq.c | 1 + trunk/drivers/net/ethernet/sfc/efx.c | 6 + trunk/drivers/net/ethernet/sfc/efx.h | 14 +- trunk/drivers/net/ethernet/sfc/ethtool.c | 20 +- trunk/drivers/net/ethernet/sfc/tx.c | 19 + .../net/ethernet/stmicro/stmmac/common.h | 5 + .../net/ethernet/stmicro/stmmac/descs.h | 6 + .../net/ethernet/stmicro/stmmac/descs_com.h | 5 + .../net/ethernet/stmicro/stmmac/dwmac100.h | 5 + .../net/ethernet/stmicro/stmmac/dwmac1000.h | 5 +- .../net/ethernet/stmicro/stmmac/dwmac_dma.h | 5 + .../drivers/net/ethernet/stmicro/stmmac/mmc.h | 5 + .../net/ethernet/stmicro/stmmac/mmc_core.c | 6 +- .../net/ethernet/stmicro/stmmac/stmmac.h | 5 + .../net/ethernet/stmicro/stmmac/stmmac_main.c | 12 +- .../ethernet/stmicro/stmmac/stmmac_platform.c | 2 +- .../ethernet/stmicro/stmmac/stmmac_timer.c | 8 +- .../ethernet/stmicro/stmmac/stmmac_timer.h | 4 + trunk/drivers/net/ethernet/ti/davinci_cpdma.c | 3 +- trunk/drivers/net/ethernet/ti/davinci_mdio.c | 4 +- .../drivers/net/ethernet/xscale/ixp4xx_eth.c | 1 + trunk/drivers/net/fddi/skfp/pmf.c | 2 +- trunk/drivers/net/hyperv/netvsc.c | 7 - trunk/drivers/net/hyperv/rndis_filter.c | 11 + trunk/drivers/net/irda/bfin_sir.c | 8 +- trunk/drivers/net/irda/ks959-sir.c | 1 + trunk/drivers/net/irda/ksdazzle-sir.c | 1 + trunk/drivers/net/irda/sh_sir.c | 2 +- trunk/drivers/net/macvtap.c | 3 +- trunk/drivers/net/netconsole.c | 6 - trunk/drivers/net/phy/bcm87xx.c | 2 + trunk/drivers/net/phy/mdio-mux-gpio.c | 1 - trunk/drivers/net/phy/mdio-mux.c | 2 +- trunk/drivers/net/phy/micrel.c | 45 +- trunk/drivers/net/phy/smsc.c | 28 +- trunk/drivers/net/ppp/pppoe.c | 2 +- trunk/drivers/net/ppp/pptp.c | 4 +- trunk/drivers/net/team/team.c | 60 +- trunk/drivers/net/tun.c | 1 - trunk/drivers/net/usb/asix_devices.c | 4 + trunk/drivers/net/usb/cdc-phonet.c | 1 + trunk/drivers/net/usb/cdc_ncm.c | 20 + trunk/drivers/net/usb/qmi_wwan.c | 271 +- trunk/drivers/net/usb/sierra_net.c | 54 +- trunk/drivers/net/usb/smsc75xx.c | 1 + trunk/drivers/net/usb/usbnet.c | 18 +- trunk/drivers/net/vmxnet3/vmxnet3_drv.c | 2 +- trunk/drivers/net/wan/dscc4.c | 5 +- trunk/drivers/net/wan/ixp4xx_hss.c | 1 + trunk/drivers/net/wimax/i2400m/fw.c | 4 +- trunk/drivers/net/wireless/at76c50x-usb.c | 2 +- trunk/drivers/net/wireless/ath/ath5k/base.c | 6 +- trunk/drivers/net/wireless/ath/ath5k/eeprom.c | 2 +- trunk/drivers/net/wireless/ath/ath5k/eeprom.h | 1 + .../net/wireless/ath/ath5k/mac80211-ops.c | 5 +- .../net/wireless/ath/ath9k/ar9003_eeprom.c | 4 + .../net/wireless/ath/ath9k/ar9003_paprd.c | 105 +- .../net/wireless/ath/ath9k/ar9003_phy.h | 4 + trunk/drivers/net/wireless/ath/ath9k/debug.c | 2 + trunk/drivers/net/wireless/ath/ath9k/gpio.c | 3 +- trunk/drivers/net/wireless/ath/ath9k/hw.c | 12 +- trunk/drivers/net/wireless/ath/ath9k/hw.h | 5 +- trunk/drivers/net/wireless/ath/ath9k/link.c | 20 +- trunk/drivers/net/wireless/ath/ath9k/mac.c | 18 +- trunk/drivers/net/wireless/ath/ath9k/mac.h | 1 + trunk/drivers/net/wireless/ath/ath9k/main.c | 4 +- trunk/drivers/net/wireless/ath/ath9k/pci.c | 2 + trunk/drivers/net/wireless/ath/ath9k/recv.c | 2 +- trunk/drivers/net/wireless/ath/ath9k/xmit.c | 3 + trunk/drivers/net/wireless/b43/Kconfig | 4 +- trunk/drivers/net/wireless/b43/main.c | 21 +- .../brcm80211/brcmfmac/bcmsdh_sdmmc.c | 2 + .../wireless/brcm80211/brcmfmac/dhd_common.c | 26 +- .../net/wireless/brcm80211/brcmfmac/usb.c | 30 +- .../wireless/brcm80211/brcmfmac/wl_cfg80211.c | 21 +- .../net/wireless/brcm80211/brcmsmac/channel.c | 7 +- .../wireless/brcm80211/brcmsmac/mac80211_if.c | 6 +- trunk/drivers/net/wireless/ipw2x00/ipw2100.c | 3 +- .../net/wireless/iwlwifi/dvm/debugfs.c | 3 + trunk/drivers/net/wireless/iwlwifi/dvm/rs.c | 13 +- .../net/wireless/iwlwifi/pcie/internal.h | 2 +- trunk/drivers/net/wireless/iwlwifi/pcie/rx.c | 2 +- .../drivers/net/wireless/iwlwifi/pcie/trans.c | 31 +- trunk/drivers/net/wireless/libertas/cfg.c | 1 + trunk/drivers/net/wireless/libertas/if_sdio.c | 6 + trunk/drivers/net/wireless/libertas/main.c | 5 +- trunk/drivers/net/wireless/mwifiex/cmdevt.c | 15 +- trunk/drivers/net/wireless/p54/p54usb.c | 2 +- trunk/drivers/net/wireless/rndis_wlan.c | 6 +- trunk/drivers/net/wireless/rt2x00/rt2400pci.c | 9 + trunk/drivers/net/wireless/rt2x00/rt2400pci.h | 1 + trunk/drivers/net/wireless/rt2x00/rt2500pci.c | 9 + trunk/drivers/net/wireless/rt2x00/rt2500usb.c | 11 +- trunk/drivers/net/wireless/rt2x00/rt2500usb.h | 17 +- trunk/drivers/net/wireless/rt2x00/rt2800lib.c | 69 + trunk/drivers/net/wireless/rt2x00/rt2800pci.c | 80 +- trunk/drivers/net/wireless/rt2x00/rt2800usb.c | 22 +- trunk/drivers/net/wireless/rt2x00/rt2x00dev.c | 2 +- trunk/drivers/net/wireless/rt2x00/rt61pci.c | 12 +- trunk/drivers/net/wireless/rt2x00/rt61pci.h | 1 + trunk/drivers/net/wireless/rt2x00/rt73usb.c | 9 + trunk/drivers/net/wireless/rt2x00/rt73usb.h | 3 + .../net/wireless/rtl818x/rtl8187/dev.c | 2 +- .../net/wireless/rtlwifi/rtl8192c/fw_common.c | 6 +- .../net/wireless/rtlwifi/rtl8192ce/def.h | 1 + .../net/wireless/rtlwifi/rtl8192ce/hw.c | 12 +- .../net/wireless/rtlwifi/rtl8192ce/sw.c | 6 +- .../net/wireless/rtlwifi/rtl8192de/fw.c | 6 +- trunk/drivers/net/xen-netfront.c | 39 +- trunk/drivers/of/base.c | 27 + trunk/drivers/oprofile/cpu_buffer.c | 11 +- trunk/drivers/pci/.gitignore | 4 - trunk/drivers/pci/pci-acpi.c | 4 +- trunk/drivers/pci/pci-driver.c | 13 + trunk/drivers/pci/pci-sysfs.c | 42 + trunk/drivers/pci/pci.c | 1 + trunk/drivers/pci/pcie/portdrv_pci.c | 14 + trunk/drivers/pci/probe.c | 31 +- trunk/drivers/pinctrl/core.c | 13 +- trunk/drivers/pinctrl/pinctrl-imx23.c | 2 +- trunk/drivers/pinctrl/pinctrl-imx28.c | 2 +- trunk/drivers/pinctrl/pinctrl-imx51.c | 2 +- .../drivers/pinctrl/pinctrl-nomadik-db8500.c | 7 +- trunk/drivers/pinctrl/pinctrl-nomadik.c | 3 +- trunk/drivers/pinctrl/pinctrl-sirf.c | 1 - trunk/drivers/pinctrl/pinctrl-u300.c | 8 +- trunk/drivers/platform/x86/Kconfig | 6 +- trunk/drivers/platform/x86/acer-wmi.c | 2 - trunk/drivers/platform/x86/apple-gmux.c | 432 ++- trunk/drivers/platform/x86/asus-laptop.c | 10 +- trunk/drivers/platform/x86/asus-wmi.c | 25 +- trunk/drivers/platform/x86/asus-wmi.h | 1 + trunk/drivers/platform/x86/classmate-laptop.c | 16 +- trunk/drivers/platform/x86/dell-laptop.c | 12 +- trunk/drivers/platform/x86/eeepc-laptop.c | 10 +- trunk/drivers/platform/x86/fujitsu-tablet.c | 2 + trunk/drivers/platform/x86/hdaps.c | 2 + trunk/drivers/platform/x86/hp_accel.c | 2 +- trunk/drivers/platform/x86/ideapad-laptop.c | 110 +- trunk/drivers/platform/x86/msi-laptop.c | 4 + trunk/drivers/platform/x86/panasonic-laptop.c | 4 + trunk/drivers/platform/x86/samsung-laptop.c | 4 - trunk/drivers/platform/x86/sony-laptop.c | 12 +- trunk/drivers/platform/x86/thinkpad_acpi.c | 22 +- trunk/drivers/platform/x86/toshiba_acpi.c | 2 + .../drivers/platform/x86/toshiba_bluetooth.c | 4 + trunk/drivers/platform/x86/xo15-ebook.c | 2 + trunk/drivers/pwm/Kconfig | 31 +- trunk/drivers/pwm/core.c | 12 +- trunk/drivers/pwm/pwm-samsung.c | 1 + trunk/drivers/pwm/pwm-tegra.c | 4 +- trunk/drivers/pwm/pwm-tiecap.c | 11 +- trunk/drivers/pwm/pwm-tiehrpwm.c | 33 +- trunk/drivers/pwm/pwm-vt8500.c | 2 +- trunk/drivers/rapidio/devices/tsi721.c | 12 +- trunk/drivers/regulator/Kconfig | 38 +- trunk/drivers/regulator/Makefile | 2 + trunk/drivers/regulator/aat2870-regulator.c | 2 +- trunk/drivers/regulator/ab3100.c | 7 +- trunk/drivers/regulator/ab8500.c | 36 +- trunk/drivers/regulator/anatop-regulator.c | 5 +- trunk/drivers/regulator/arizona-ldo1.c | 6 +- trunk/drivers/regulator/arizona-micsupp.c | 5 + trunk/drivers/regulator/core.c | 155 +- trunk/drivers/regulator/da9052-regulator.c | 4 +- trunk/drivers/regulator/dummy.c | 2 +- trunk/drivers/regulator/fan53555.c | 322 ++ trunk/drivers/regulator/gpio-regulator.c | 38 +- trunk/drivers/regulator/isl6271a-regulator.c | 6 - trunk/drivers/regulator/lp872x.c | 88 +- trunk/drivers/regulator/lp8788-buck.c | 80 +- trunk/drivers/regulator/lp8788-ldo.c | 8 + trunk/drivers/regulator/max77686.c | 30 +- trunk/drivers/regulator/max8907-regulator.c | 408 +++ trunk/drivers/regulator/mc13783-regulator.c | 89 +- trunk/drivers/regulator/mc13892-regulator.c | 77 +- .../regulator/mc13xxx-regulator-core.c | 17 +- trunk/drivers/regulator/mc13xxx.h | 1 - trunk/drivers/regulator/of_regulator.c | 25 +- trunk/drivers/regulator/palmas-regulator.c | 68 +- trunk/drivers/regulator/s2mps11.c | 27 +- trunk/drivers/regulator/tps65217-regulator.c | 124 +- trunk/drivers/regulator/tps6524x-regulator.c | 10 +- trunk/drivers/regulator/tps6586x-regulator.c | 104 +- trunk/drivers/regulator/twl-regulator.c | 115 +- trunk/drivers/regulator/wm831x-dcdc.c | 11 +- trunk/drivers/regulator/wm831x-ldo.c | 12 + trunk/drivers/regulator/wm8400-regulator.c | 7 +- trunk/drivers/rpmsg/virtio_rpmsg_bus.c | 6 +- trunk/drivers/rtc/interface.c | 2 + trunk/drivers/rtc/rtc-at91sam9.c | 22 +- trunk/drivers/rtc/rtc-cmos.c | 1 - trunk/drivers/rtc/rtc-pcf2123.c | 2 + trunk/drivers/rtc/rtc-rs5c348.c | 7 +- trunk/drivers/rtc/rtc-twl.c | 5 + trunk/drivers/s390/block/dasd.c | 17 +- trunk/drivers/s390/block/dasd_alias.c | 27 +- trunk/drivers/s390/block/dasd_eckd.c | 34 +- trunk/drivers/s390/block/dasd_ioctl.c | 7 +- trunk/drivers/s390/char/sclp_sdias.c | 2 + trunk/drivers/s390/cio/device.c | 7 +- trunk/drivers/scsi/aic7xxx/aic79xx_core.c | 2 +- trunk/drivers/scsi/bfa/bfa_ioc.c | 2 +- trunk/drivers/scsi/bfa/bfa_ioc.h | 2 +- trunk/drivers/scsi/bnx2fc/bnx2fc_fcoe.c | 4 +- trunk/drivers/scsi/bnx2i/bnx2i_hwi.c | 3 + trunk/drivers/scsi/gdth.h | 9 - trunk/drivers/scsi/hpsa.c | 3 +- trunk/drivers/scsi/ipr.c | 66 +- trunk/drivers/scsi/isci/host.c | 2 +- trunk/drivers/scsi/isci/init.c | 2 +- trunk/drivers/scsi/isci/port.c | 2 +- trunk/drivers/scsi/isci/request.c | 2 +- trunk/drivers/scsi/isci/task.c | 2 +- trunk/drivers/scsi/lpfc/lpfc_init.c | 2 +- trunk/drivers/scsi/lpfc/lpfc_sli.c | 4 +- trunk/drivers/scsi/megaraid.c | 5 +- trunk/drivers/scsi/megaraid.h | 35 - .../drivers/scsi/megaraid/megaraid_sas_base.c | 3 +- trunk/drivers/scsi/mpt2sas/mpt2sas_base.c | 20 +- trunk/drivers/scsi/mvumi.c | 6 +- trunk/drivers/scsi/qla4xxx/ql4_os.c | 6 +- trunk/drivers/scsi/scsi_error.c | 10 + trunk/drivers/scsi/scsi_lib.c | 5 +- trunk/drivers/scsi/scsi_scan.c | 10 + trunk/drivers/scsi/virtio_scsi.c | 2 +- trunk/drivers/scsi/vmw_pvscsi.c | 4 +- trunk/drivers/sh/intc/core.c | 27 +- trunk/drivers/sh/pfc/pinctrl.c | 5 +- trunk/drivers/spi/spi-au1550.c | 2 +- trunk/drivers/spi/spi-bcm63xx.c | 35 +- trunk/drivers/spi/spi-bfin-sport.c | 2 +- trunk/drivers/spi/spi-coldfire-qspi.c | 5 +- trunk/drivers/spi/spi-oc-tiny.c | 2 +- trunk/drivers/spi/spi-omap2-mcspi.c | 6 +- trunk/drivers/spi/spi-pl022.c | 1 - trunk/drivers/spi/spi-ppc4xx.c | 4 +- trunk/drivers/spi/spi-s3c64xx.c | 14 +- trunk/drivers/spi/spi-topcliff-pch.c | 2 +- trunk/drivers/staging/android/android_alarm.h | 4 +- trunk/drivers/staging/comedi/drivers.c | 2 +- .../staging/comedi/drivers/adv_pci1710.c | 3 - .../staging/comedi/drivers/adv_pci1723.c | 2 - .../staging/comedi/drivers/adv_pci_dio.c | 2 - .../staging/comedi/drivers/amplc_dio200.c | 7 + .../staging/comedi/drivers/amplc_pc236.c | 7 + .../staging/comedi/drivers/amplc_pc263.c | 7 + .../staging/comedi/drivers/amplc_pci224.c | 7 + .../staging/comedi/drivers/amplc_pci230.c | 7 + .../staging/comedi/drivers/daqboard2000.c | 17 +- trunk/drivers/staging/comedi/drivers/das08.c | 11 +- trunk/drivers/staging/comedi/drivers/dt3000.c | 6 +- trunk/drivers/staging/comedi/drivers/rtd520.c | 26 +- trunk/drivers/staging/comedi/drivers/usbdux.c | 4 +- .../staging/comedi/drivers/usbduxfast.c | 4 +- .../staging/comedi/drivers/usbduxsigma.c | 4 +- trunk/drivers/staging/csr/Kconfig | 2 +- .../staging/iio/accel/lis3l02dq_ring.c | 4 +- trunk/drivers/staging/iio/adc/ad7192.c | 50 +- trunk/drivers/staging/iio/adc/ad7298_ring.c | 2 +- trunk/drivers/staging/iio/adc/ad7780.c | 10 +- trunk/drivers/staging/iio/adc/ad7793.c | 99 +- .../drivers/staging/iio/gyro/adis16260_core.c | 2 + .../drivers/staging/iio/imu/adis16400_core.c | 2 + trunk/drivers/staging/iio/meter/ade7753.c | 2 + trunk/drivers/staging/iio/meter/ade7754.c | 2 + trunk/drivers/staging/iio/meter/ade7759.c | 2 + trunk/drivers/staging/nvec/nvec.c | 2 +- .../drivers/staging/omapdrm/omap_connector.c | 41 +- trunk/drivers/staging/ozwpan/ozcdev.c | 3 +- trunk/drivers/staging/rtl8712/recv_linux.c | 7 +- trunk/drivers/staging/vt6656/dpc.c | 2 +- trunk/drivers/staging/vt6656/main_usb.c | 2 +- trunk/drivers/staging/vt6656/rxtx.c | 38 +- trunk/drivers/staging/winbond/wbusb.c | 2 +- trunk/drivers/staging/wlan-ng/cfg80211.c | 4 +- trunk/drivers/staging/zcache/zcache-main.c | 7 +- .../drivers/target/iscsi/iscsi_target_login.c | 11 +- .../target/iscsi/iscsi_target_parameters.c | 4 +- trunk/drivers/target/target_core_alua.c | 7 + trunk/drivers/target/target_core_device.c | 7 + trunk/drivers/target/target_core_iblock.c | 17 +- trunk/drivers/target/target_core_pr.c | 8 + trunk/drivers/target/target_core_pscsi.c | 36 +- trunk/drivers/target/target_core_spc.c | 35 +- trunk/drivers/target/target_core_transport.c | 161 +- trunk/drivers/target/tcm_fc/tcm_fc.h | 1 + trunk/drivers/target/tcm_fc/tfc_cmd.c | 8 +- trunk/drivers/target/tcm_fc/tfc_sess.c | 4 +- trunk/drivers/tty/serial/Kconfig | 10 +- trunk/drivers/tty/serial/ifx6x60.c | 2 +- trunk/drivers/tty/serial/imx.c | 30 +- trunk/drivers/tty/serial/mxs-auart.c | 14 +- trunk/drivers/tty/serial/pmac_zilog.c | 12 +- trunk/drivers/usb/Kconfig | 2 +- trunk/drivers/usb/chipidea/Kconfig | 9 +- trunk/drivers/usb/chipidea/udc.c | 59 +- trunk/drivers/usb/class/cdc-acm.c | 3 +- trunk/drivers/usb/class/cdc-wdm.c | 12 +- trunk/drivers/usb/core/devices.c | 2 +- trunk/drivers/usb/core/hcd.c | 6 +- trunk/drivers/usb/core/quirks.c | 4 + trunk/drivers/usb/dwc3/core.c | 9 +- trunk/drivers/usb/dwc3/ep0.c | 1 - trunk/drivers/usb/dwc3/gadget.c | 19 +- trunk/drivers/usb/early/ehci-dbgp.c | 2 +- trunk/drivers/usb/gadget/at91_udc.c | 6 +- trunk/drivers/usb/gadget/dummy_hcd.c | 41 +- trunk/drivers/usb/gadget/f_fs.c | 4 + trunk/drivers/usb/gadget/s3c-hsotg.c | 3 + trunk/drivers/usb/gadget/u_ether.c | 6 + trunk/drivers/usb/gadget/u_serial.c | 4 + trunk/drivers/usb/host/ehci-omap.c | 167 +- trunk/drivers/usb/host/ehci-q.c | 12 +- trunk/drivers/usb/host/ehci-sead3.c | 2 +- trunk/drivers/usb/host/ehci-tegra.c | 3 +- trunk/drivers/usb/host/isp1362-hcd.c | 8 +- trunk/drivers/usb/host/ohci-at91.c | 13 +- trunk/drivers/usb/host/ohci-omap.c | 2 - trunk/drivers/usb/host/pci-quirks.c | 49 +- trunk/drivers/usb/host/pci-quirks.h | 2 + trunk/drivers/usb/host/xhci-hub.c | 42 + trunk/drivers/usb/host/xhci-pci.c | 10 + trunk/drivers/usb/host/xhci-plat.c | 2 +- trunk/drivers/usb/host/xhci-ring.c | 40 +- trunk/drivers/usb/host/xhci.c | 129 +- trunk/drivers/usb/host/xhci.h | 9 + trunk/drivers/usb/misc/emi62.c | 2 +- trunk/drivers/usb/musb/Kconfig | 4 +- trunk/drivers/usb/musb/musb_dsps.c | 19 +- trunk/drivers/usb/musb/musb_host.c | 2 +- trunk/drivers/usb/musb/musbhsdma.c | 2 +- trunk/drivers/usb/musb/tusb6010.c | 2 +- trunk/drivers/usb/renesas_usbhs/common.c | 6 +- trunk/drivers/usb/renesas_usbhs/fifo.c | 4 +- trunk/drivers/usb/renesas_usbhs/mod_host.c | 8 + trunk/drivers/usb/serial/bus.c | 15 +- trunk/drivers/usb/serial/ftdi_sio.c | 23 +- trunk/drivers/usb/serial/ftdi_sio_ids.h | 36 +- trunk/drivers/usb/serial/ipw.c | 3 +- trunk/drivers/usb/serial/mos7840.c | 16 +- trunk/drivers/usb/serial/option.c | 287 +- trunk/drivers/usb/serial/qcserial.c | 47 +- trunk/drivers/usb/serial/usb-wwan.h | 3 +- trunk/drivers/usb/serial/usb_wwan.c | 68 +- trunk/drivers/vfio/pci/vfio_pci_intrs.c | 76 +- trunk/drivers/vfio/vfio.c | 19 +- trunk/drivers/vhost/Kconfig | 3 + trunk/drivers/vhost/Kconfig.tcm | 6 + trunk/drivers/vhost/Makefile | 2 + trunk/drivers/vhost/tcm_vhost.c | 1649 ++++++++++ trunk/drivers/vhost/tcm_vhost.h | 103 + trunk/drivers/video/auo_k190x.c | 2 - trunk/drivers/video/backlight/88pm860x_bl.c | 1 - trunk/drivers/video/console/bitblit.c | 2 +- trunk/drivers/video/console/fbcon.c | 11 +- trunk/drivers/video/efifb.c | 4 +- trunk/drivers/video/exynos/exynos_mipi_dsi.c | 2 +- trunk/drivers/video/mb862xx/mb862xxfbdrv.c | 2 + trunk/drivers/video/omap2/dss/sdi.c | 14 + .../drivers/video/omap2/omapfb/omapfb-main.c | 2 +- trunk/drivers/video/tmiofb.c | 4 + trunk/drivers/w1/masters/ds1wm.c | 2 +- trunk/drivers/w1/slaves/w1_therm.c | 9 + trunk/drivers/w1/w1_family.h | 1 + trunk/drivers/watchdog/booke_wdt.c | 7 +- trunk/drivers/watchdog/da9052_wdt.c | 1 - trunk/drivers/watchdog/hpwdt.c | 3 + trunk/drivers/watchdog/watchdog_core.c | 3 +- trunk/drivers/xen/gntdev.c | 5 +- trunk/drivers/xen/grant-table.c | 6 +- trunk/drivers/xen/platform-pci.c | 15 - trunk/drivers/xen/swiotlb-xen.c | 2 +- trunk/drivers/xen/xen-pciback/pci_stub.c | 8 +- trunk/drivers/zorro/zorro.c | 2 - trunk/fs/autofs4/expire.c | 36 +- trunk/fs/bio.c | 13 +- trunk/fs/block_dev.c | 3 + trunk/fs/btrfs/backref.c | 4 +- trunk/fs/btrfs/compression.c | 1 + trunk/fs/btrfs/ctree.c | 9 +- trunk/fs/btrfs/ctree.h | 5 +- trunk/fs/btrfs/delayed-inode.c | 12 +- trunk/fs/btrfs/delayed-ref.c | 163 +- trunk/fs/btrfs/delayed-ref.h | 6 +- trunk/fs/btrfs/disk-io.c | 53 +- trunk/fs/btrfs/disk-io.h | 2 +- trunk/fs/btrfs/extent-tree.c | 123 +- trunk/fs/btrfs/extent_io.c | 17 +- trunk/fs/btrfs/file-item.c | 4 +- trunk/fs/btrfs/inode.c | 333 +- trunk/fs/btrfs/ioctl.c | 7 +- trunk/fs/btrfs/locking.c | 2 +- trunk/fs/btrfs/ordered-data.c | 2 +- trunk/fs/btrfs/qgroup.c | 4 +- trunk/fs/btrfs/root-tree.c | 4 +- trunk/fs/btrfs/super.c | 19 +- trunk/fs/btrfs/transaction.c | 3 +- trunk/fs/btrfs/volumes.c | 37 +- trunk/fs/btrfs/volumes.h | 2 - trunk/fs/buffer.c | 66 +- trunk/fs/ceph/debugfs.c | 1 + trunk/fs/ceph/inode.c | 15 +- trunk/fs/ceph/ioctl.c | 3 +- trunk/fs/cifs/cifs_unicode.c | 2 +- trunk/fs/cifs/cifssmb.c | 11 +- trunk/fs/cifs/dir.c | 9 +- trunk/fs/cifs/file.c | 2 +- trunk/fs/cifs/inode.c | 24 +- trunk/fs/cifs/link.c | 2 + trunk/fs/cifs/smb2misc.c | 16 +- trunk/fs/cifs/smb2pdu.h | 10 +- trunk/fs/cifs/transport.c | 9 +- trunk/fs/compat.c | 10 +- trunk/fs/dcache.c | 12 +- trunk/fs/debugfs/file.c | 76 +- trunk/fs/direct-io.c | 5 + trunk/fs/ecryptfs/file.c | 10 +- trunk/fs/ecryptfs/inode.c | 5 + trunk/fs/ecryptfs/main.c | 1 + trunk/fs/eventpoll.c | 2 +- trunk/fs/exofs/inode.c | 27 +- trunk/fs/exofs/ore.c | 14 +- trunk/fs/exofs/super.c | 11 - trunk/fs/ext2/balloc.c | 2 +- trunk/fs/ext3/balloc.c | 2 +- trunk/fs/ext3/inode.c | 27 +- trunk/fs/ext3/super.c | 11 - trunk/fs/ext4/balloc.c | 62 +- trunk/fs/ext4/bitmap.c | 1 - trunk/fs/ext4/extents.c | 1 + trunk/fs/ext4/inode.c | 24 +- trunk/fs/ext4/mballoc.c | 2 +- trunk/fs/ext4/super.c | 17 +- trunk/fs/fs-writeback.c | 4 - trunk/fs/fuse/control.c | 4 +- trunk/fs/fuse/cuse.c | 4 +- trunk/fs/fuse/dev.c | 1 + trunk/fs/fuse/dir.c | 3 + trunk/fs/fuse/file.c | 15 +- trunk/fs/fuse/fuse_i.h | 3 + trunk/fs/fuse/inode.c | 44 +- trunk/fs/gfs2/aops.c | 11 +- trunk/fs/gfs2/bmap.c | 2 +- trunk/fs/gfs2/file.c | 35 +- trunk/fs/gfs2/glock.c | 60 +- trunk/fs/gfs2/glops.c | 1 + trunk/fs/gfs2/incore.h | 30 +- trunk/fs/gfs2/inode.c | 28 +- trunk/fs/gfs2/meta_io.c | 2 +- trunk/fs/gfs2/ops_fstype.c | 8 + trunk/fs/gfs2/quota.c | 11 +- trunk/fs/gfs2/rgrp.c | 1221 ++++---- trunk/fs/gfs2/rgrp.h | 28 +- trunk/fs/gfs2/super.c | 9 +- trunk/fs/gfs2/trace_gfs2.h | 20 +- trunk/fs/gfs2/trans.h | 7 +- trunk/fs/gfs2/xattr.c | 96 +- trunk/fs/hfs/mdb.c | 4 +- trunk/fs/jbd/journal.c | 9 +- trunk/fs/jbd2/journal.c | 7 +- trunk/fs/libfs.c | 2 +- trunk/fs/lockd/svclock.c | 3 +- trunk/fs/logfs/dev_bdev.c | 15 +- trunk/fs/logfs/inode.c | 18 +- trunk/fs/logfs/journal.c | 2 +- trunk/fs/logfs/readwrite.c | 1 - trunk/fs/logfs/segment.c | 2 +- trunk/fs/namei.c | 10 +- trunk/fs/namespace.c | 10 +- trunk/fs/nfs/Makefile | 18 +- trunk/fs/nfs/client.c | 2 +- trunk/fs/nfs/file.c | 4 +- trunk/fs/nfs/idmap.c | 62 +- trunk/fs/nfs/inode.c | 2 +- trunk/fs/nfs/nfs3proc.c | 4 +- trunk/fs/nfs/nfs4_fs.h | 3 + trunk/fs/nfs/nfs4client.c | 2 +- trunk/fs/nfs/nfs4file.c | 4 +- trunk/fs/nfs/nfs4proc.c | 127 +- trunk/fs/nfs/nfs4super.c | 15 - trunk/fs/nfs/nfs4xdr.c | 41 +- trunk/fs/nfs/objlayout/objio_osd.c | 55 +- trunk/fs/nfs/pagelist.c | 2 + trunk/fs/nfs/pnfs.c | 39 +- trunk/fs/nfs/pnfs.h | 2 +- trunk/fs/nfs/super.c | 43 +- trunk/fs/nfs/write.c | 15 +- trunk/fs/nfsd/nfs4callback.c | 4 +- trunk/fs/nfsd/state.h | 1 - trunk/fs/nilfs2/super.c | 4 - trunk/fs/nilfs2/the_nilfs.h | 2 - trunk/fs/open.c | 9 +- trunk/fs/proc/proc_sysctl.c | 5 +- trunk/fs/quota/dquot.c | 2 +- trunk/fs/reiserfs/bitmap.c | 2 - trunk/fs/reiserfs/inode.c | 2 +- trunk/fs/stat.c | 2 +- trunk/fs/super.c | 40 - trunk/fs/ubifs/debug.h | 2 +- trunk/fs/ubifs/file.c | 10 +- trunk/fs/ubifs/lpt.c | 5 +- trunk/fs/ubifs/recovery.c | 2 +- trunk/fs/ubifs/replay.c | 3 +- trunk/fs/ubifs/super.c | 5 +- trunk/fs/udf/file.c | 35 +- trunk/fs/udf/inode.c | 5 +- trunk/fs/udf/super.c | 7 +- trunk/fs/xfs/xfs_buf.c | 5 +- trunk/fs/xfs/xfs_buf.h | 41 +- trunk/fs/xfs/xfs_discard.c | 6 +- trunk/fs/xfs/xfs_ialloc.c | 17 +- trunk/fs/xfs/xfs_rtalloc.c | 2 +- trunk/fs/xfs/xfs_super.c | 1 + trunk/include/acpi/acpixf.h | 4 +- trunk/include/acpi/actypes.h | 2 +- trunk/include/asm-generic/mutex-xchg.h | 11 +- trunk/include/asm-generic/unistd.h | 4 +- trunk/include/drm/drm_crtc.h | 5 +- trunk/include/drm/drm_fourcc.h | 6 +- trunk/include/drm/drm_mode.h | 5 +- trunk/include/drm/drm_pciids.h | 3 + trunk/include/drm/radeon_drm.h | 2 + trunk/include/linux/Kbuild | 1 + trunk/include/linux/acpi.h | 2 +- trunk/include/linux/atmel-ssc.h | 1 + trunk/include/linux/backing-dev.h | 1 - .../linux/bcma/bcma_driver_chipcommon.h | 6 + trunk/include/linux/blkdev.h | 14 +- trunk/include/linux/can.h | 25 +- trunk/include/linux/compaction.h | 4 +- trunk/include/linux/compiler-gcc4.h | 7 + trunk/include/linux/compiler.h | 4 + trunk/include/linux/cpuidle.h | 4 + trunk/include/linux/dcache.h | 2 + trunk/include/linux/efi-bgrt.h | 21 + trunk/include/linux/efi.h | 10 + trunk/include/linux/fs.h | 3 - trunk/include/linux/ftrace.h | 158 +- trunk/include/linux/ftrace_event.h | 5 +- trunk/include/linux/fuse.h | 19 +- trunk/include/linux/hardirq.h | 10 +- trunk/include/linux/hid.h | 5 +- trunk/include/linux/i2c-pnx.h | 1 + trunk/include/linux/if_team.h | 30 +- trunk/include/linux/iio/frequency/adf4350.h | 2 + trunk/include/linux/input.h | 35 +- trunk/include/linux/input/eeti_ts.h | 1 + trunk/include/linux/input/mt.h | 57 +- trunk/include/linux/interrupt.h | 2 + trunk/include/linux/iommu.h | 44 +- trunk/include/linux/ipv6.h | 1 + trunk/include/linux/irq.h | 1 + trunk/include/linux/irqdesc.h | 2 - trunk/include/linux/jbd2.h | 1 + trunk/include/linux/jiffies.h | 29 +- trunk/include/linux/kdb.h | 2 - trunk/include/linux/kernel.h | 12 +- trunk/include/linux/kernel_stat.h | 8 + trunk/include/linux/kobject.h | 2 +- trunk/include/linux/kprobes.h | 27 + trunk/include/linux/kref.h | 18 + trunk/include/linux/kthread.h | 11 +- trunk/include/linux/ktime.h | 7 - trunk/include/linux/kvm_host.h | 4 +- trunk/include/linux/mISDNhw.h | 2 +- trunk/include/linux/memory.h | 2 +- trunk/include/linux/mfd/core.h | 4 +- trunk/include/linux/mfd/ezx-pcap.h | 1 + trunk/include/linux/mfd/max77686.h | 1 + trunk/include/linux/mfd/max8998.h | 2 +- trunk/include/linux/mfd/tps65217.h | 12 +- trunk/include/linux/mfd/tps6586x.h | 1 + trunk/include/linux/micrel_phy.h | 19 +- trunk/include/linux/mlx4/device.h | 13 + trunk/include/linux/mmc/card.h | 1 + trunk/include/linux/mv643xx_eth.h | 2 + trunk/include/linux/netdevice.h | 7 +- .../linux/netfilter/nf_conntrack_sip.h | 2 +- trunk/include/linux/netpoll.h | 42 +- trunk/include/linux/nfs_fs.h | 5 - trunk/include/linux/nfs_page.h | 1 + trunk/include/linux/nfs_xdr.h | 3 +- trunk/include/linux/nvme.h | 2 + trunk/include/linux/of.h | 7 + trunk/include/linux/pci_ids.h | 2 +- trunk/include/linux/perf_event.h | 69 +- trunk/include/linux/perf_regs.h | 25 + trunk/include/linux/pinctrl/consumer.h | 1 + trunk/include/linux/rcupdate.h | 21 +- trunk/include/linux/regmap.h | 3 + trunk/include/linux/regulator/consumer.h | 15 + trunk/include/linux/regulator/driver.h | 18 +- trunk/include/linux/regulator/fan53555.h | 60 + trunk/include/linux/regulator/machine.h | 2 + trunk/include/linux/sched.h | 30 +- trunk/include/linux/screen_info.h | 2 + trunk/include/linux/security.h | 3 +- trunk/include/linux/smpboot.h | 43 + trunk/include/linux/string.h | 2 +- trunk/include/linux/sunrpc/xprt.h | 3 + trunk/include/linux/task_work.h | 3 +- trunk/include/linux/time.h | 29 +- trunk/include/linux/timer.h | 165 +- trunk/include/linux/timex.h | 2 +- trunk/include/linux/topology.h | 3 +- trunk/include/linux/tracepoint.h | 28 +- trunk/include/linux/uprobes.h | 15 +- trunk/include/linux/writeback.h | 1 - trunk/include/linux/xfrm.h | 2 + trunk/include/net/bluetooth/smp.h | 2 +- trunk/include/net/cfg80211.h | 2 + trunk/include/net/codel.h | 8 +- trunk/include/net/dst.h | 2 +- trunk/include/net/inet_connection_sock.h | 1 + trunk/include/net/inet_sock.h | 9 - trunk/include/net/ip.h | 2 +- trunk/include/net/ip6_fib.h | 5 +- trunk/include/net/llc.h | 2 +- trunk/include/net/net_namespace.h | 10 + .../net/netfilter/nf_conntrack_ecache.h | 1 + trunk/include/net/netns/ipv4.h | 1 - trunk/include/net/route.h | 2 +- trunk/include/net/scm.h | 4 +- trunk/include/net/sock.h | 4 +- trunk/include/net/tcp.h | 1 + trunk/include/net/xfrm.h | 9 + trunk/include/sound/pcm.h | 3 +- trunk/include/target/target_core_backend.h | 4 +- trunk/include/target/target_core_base.h | 3 +- trunk/include/trace/define_trace.h | 2 +- trunk/include/trace/events/kmem.h | 4 +- trunk/include/trace/events/sched.h | 4 + trunk/include/trace/ftrace.h | 6 +- trunk/include/xen/events.h | 2 - trunk/include/xen/grant_table.h | 3 +- trunk/init/Kconfig | 175 +- trunk/init/main.c | 13 +- trunk/ipc/mqueue.c | 61 +- trunk/kernel/Kconfig.locks | 103 +- trunk/kernel/Makefile | 5 +- trunk/kernel/audit_tree.c | 19 +- trunk/kernel/cpu.c | 21 +- trunk/kernel/debug/kdb/kdb_debugger.c | 4 + trunk/kernel/debug/kdb/kdb_io.c | 11 - trunk/kernel/debug/kdb/kdb_main.c | 15 +- trunk/kernel/events/callchain.c | 35 +- trunk/kernel/events/core.c | 308 +- trunk/kernel/events/hw_breakpoint.c | 11 +- trunk/kernel/events/internal.h | 85 +- trunk/kernel/events/ring_buffer.c | 10 +- trunk/kernel/events/uprobes.c | 248 +- trunk/kernel/fork.c | 10 +- trunk/kernel/futex.c | 17 +- trunk/kernel/irq/chip.c | 1 + trunk/kernel/irq/dummychip.c | 2 + trunk/kernel/irq/manage.c | 15 +- trunk/kernel/kprobes.c | 247 +- trunk/kernel/kthread.c | 185 +- trunk/kernel/lockdep.c | 39 + trunk/kernel/pid_namespace.c | 6 +- trunk/kernel/power/suspend.c | 3 - trunk/kernel/printk.c | 2 + trunk/kernel/rcupdate.c | 4 + trunk/kernel/rcutiny.c | 33 +- trunk/kernel/rcutiny_plugin.h | 10 +- trunk/kernel/rcutorture.c | 159 +- trunk/kernel/rcutree.c | 916 +++--- trunk/kernel/rcutree.h | 50 +- trunk/kernel/rcutree_plugin.h | 597 ++-- trunk/kernel/rcutree_trace.c | 22 +- trunk/kernel/sched/Makefile | 2 +- trunk/kernel/sched/core.c | 766 +---- trunk/kernel/sched/cpupri.c | 10 +- trunk/kernel/sched/cputime.c | 530 ++++ trunk/kernel/sched/fair.c | 158 +- trunk/kernel/sched/features.h | 10 +- trunk/kernel/sched/rt.c | 19 +- trunk/kernel/sched/sched.h | 78 +- trunk/kernel/sched/stop_task.c | 22 +- trunk/kernel/signal.c | 18 +- trunk/kernel/smpboot.c | 233 ++ trunk/kernel/smpboot.h | 4 + trunk/kernel/softirq.c | 117 +- trunk/kernel/sysctl.c | 6 +- trunk/kernel/task_work.c | 112 +- trunk/kernel/time/jiffies.c | 2 +- trunk/kernel/time/ntp.c | 2 +- trunk/kernel/time/tick-sched.c | 7 +- trunk/kernel/time/timekeeping.c | 455 +-- trunk/kernel/timer.c | 117 +- trunk/kernel/trace/Kconfig | 10 + trunk/kernel/trace/Makefile | 8 +- trunk/kernel/trace/ftrace.c | 322 +- trunk/kernel/trace/ring_buffer.c | 4 +- trunk/kernel/trace/trace.c | 12 +- trunk/kernel/trace/trace.h | 3 +- trunk/kernel/trace/trace_event_perf.c | 5 +- trunk/kernel/trace/trace_events.c | 116 +- trunk/kernel/trace/trace_events_filter.c | 2 +- trunk/kernel/trace/trace_functions.c | 14 +- trunk/kernel/trace/trace_functions_graph.c | 5 +- trunk/kernel/trace/trace_irqsoff.c | 5 +- trunk/kernel/trace/trace_kprobe.c | 6 +- trunk/kernel/trace/trace_sched_wakeup.c | 5 +- trunk/kernel/trace/trace_selftest.c | 304 +- trunk/kernel/trace/trace_stack.c | 4 +- trunk/kernel/trace/trace_syscalls.c | 10 +- trunk/kernel/trace/trace_uprobe.c | 2 +- trunk/kernel/watchdog.c | 280 +- trunk/kernel/workqueue.c | 147 +- trunk/lib/Kconfig.debug | 14 + trunk/lib/digsig.c | 6 +- trunk/lib/flex_proportions.c | 2 +- trunk/mm/backing-dev.c | 52 - trunk/mm/bootmem.c | 2 +- trunk/mm/compaction.c | 156 +- trunk/mm/filemap.c | 7 - trunk/mm/huge_memory.c | 1 - trunk/mm/internal.h | 1 + trunk/mm/kmemleak.c | 6 +- trunk/mm/memblock.c | 2 +- trunk/mm/memory_hotplug.c | 16 +- trunk/mm/mempolicy.c | 2 +- trunk/mm/mmap.c | 7 +- trunk/mm/page-writeback.c | 1 - trunk/mm/page_alloc.c | 40 +- trunk/mm/slab.c | 7 +- trunk/mm/slub.c | 15 +- trunk/mm/vmscan.c | 1 + trunk/net/8021q/vlan_dev.c | 52 +- trunk/net/8021q/vlanproc.c | 2 +- trunk/net/atm/common.c | 1 + trunk/net/atm/pvc.c | 1 + trunk/net/batman-adv/bat_iv_ogm.c | 13 +- trunk/net/batman-adv/bitarray.h | 6 +- trunk/net/batman-adv/gateway_client.c | 6 +- trunk/net/batman-adv/soft-interface.c | 7 +- trunk/net/batman-adv/translation-table.c | 1 + trunk/net/bluetooth/bnep/sock.c | 4 +- trunk/net/bluetooth/cmtp/sock.c | 4 +- trunk/net/bluetooth/hci_conn.c | 4 + trunk/net/bluetooth/hci_core.c | 2 + trunk/net/bluetooth/hci_event.c | 28 +- trunk/net/bluetooth/hci_sock.c | 18 +- trunk/net/bluetooth/hidp/sock.c | 4 +- trunk/net/bluetooth/l2cap_core.c | 14 +- trunk/net/bluetooth/l2cap_sock.c | 5 +- trunk/net/bluetooth/mgmt.c | 16 + trunk/net/bluetooth/rfcomm/sock.c | 2 + trunk/net/bluetooth/rfcomm/tty.c | 2 +- trunk/net/bluetooth/sco.c | 19 +- trunk/net/bluetooth/smp.c | 15 +- trunk/net/bridge/br_device.c | 30 +- trunk/net/bridge/br_forward.c | 2 +- trunk/net/bridge/br_if.c | 6 +- trunk/net/bridge/br_private.h | 4 +- trunk/net/bridge/netfilter/ebt_log.c | 2 +- trunk/net/caif/cfsrvl.c | 5 +- trunk/net/caif/chnl_net.c | 4 + trunk/net/ceph/ceph_common.c | 1 - trunk/net/ceph/debugfs.c | 4 + trunk/net/ceph/messenger.c | 16 +- trunk/net/ceph/mon_client.c | 51 +- trunk/net/core/dev.c | 44 +- trunk/net/core/dst.c | 10 +- trunk/net/core/netpoll.c | 99 +- trunk/net/core/netprio_cgroup.c | 30 +- trunk/net/core/pktgen.c | 2 +- trunk/net/core/scm.c | 4 + trunk/net/core/skbuff.c | 4 +- trunk/net/core/sock.c | 13 +- trunk/net/dccp/ccid.h | 4 +- trunk/net/dccp/ccids/ccid3.c | 1 + trunk/net/ipv4/arp.c | 2 +- trunk/net/ipv4/devinet.c | 10 +- trunk/net/ipv4/fib_frontend.c | 20 +- trunk/net/ipv4/fib_rules.c | 2 +- trunk/net/ipv4/fib_trie.c | 8 +- trunk/net/ipv4/inet_connection_sock.c | 7 +- trunk/net/ipv4/inetpeer.c | 5 +- trunk/net/ipv4/ip_output.c | 10 +- trunk/net/ipv4/ipmr.c | 14 +- trunk/net/ipv4/netfilter/nf_nat_sip.c | 14 +- trunk/net/ipv4/raw.c | 14 +- trunk/net/ipv4/route.c | 54 +- trunk/net/ipv4/tcp.c | 27 +- trunk/net/ipv4/tcp_cong.c | 3 +- trunk/net/ipv4/tcp_input.c | 24 +- trunk/net/ipv4/tcp_ipv4.c | 23 +- trunk/net/ipv4/tcp_metrics.c | 12 + trunk/net/ipv4/tcp_minisocks.c | 2 - trunk/net/ipv4/tcp_output.c | 37 +- trunk/net/ipv4/tcp_timer.c | 6 +- trunk/net/ipv4/udp.c | 7 +- trunk/net/ipv6/addrconf.c | 4 +- trunk/net/ipv6/esp6.c | 6 +- trunk/net/ipv6/inet6_connection_sock.c | 23 +- trunk/net/ipv6/ip6_fib.c | 4 + trunk/net/ipv6/mip6.c | 20 +- trunk/net/ipv6/proc.c | 4 +- trunk/net/ipv6/raw.c | 21 +- trunk/net/ipv6/route.c | 19 +- trunk/net/ipv6/tcp_ipv6.c | 34 +- trunk/net/ipv6/udp.c | 11 + trunk/net/ipv6/xfrm6_policy.c | 8 + trunk/net/l2tp/l2tp_core.c | 7 +- trunk/net/l2tp/l2tp_core.h | 1 + trunk/net/l2tp/l2tp_eth.c | 2 +- trunk/net/l2tp/l2tp_ip6.c | 1 + trunk/net/l2tp/l2tp_netlink.c | 12 +- trunk/net/llc/af_llc.c | 8 +- trunk/net/llc/llc_input.c | 21 +- trunk/net/llc/llc_station.c | 29 +- trunk/net/mac80211/cfg.c | 9 +- trunk/net/mac80211/mesh.c | 3 + trunk/net/mac80211/mlme.c | 6 + trunk/net/mac80211/scan.c | 3 +- trunk/net/mac80211/tx.c | 38 +- trunk/net/netfilter/ipvs/ip_vs_ctl.c | 5 +- trunk/net/netfilter/nf_conntrack_core.c | 16 +- trunk/net/netfilter/nf_conntrack_expect.c | 29 +- trunk/net/netfilter/nf_conntrack_netlink.c | 10 +- trunk/net/netfilter/nf_conntrack_proto_tcp.c | 29 +- trunk/net/netfilter/nf_conntrack_sip.c | 92 +- trunk/net/netfilter/nfnetlink_log.c | 20 +- trunk/net/netfilter/xt_LOG.c | 37 +- trunk/net/netfilter/xt_limit.c | 8 +- trunk/net/netlink/af_netlink.c | 6 +- trunk/net/netrom/af_netrom.c | 9 +- trunk/net/openvswitch/actions.c | 2 +- trunk/net/openvswitch/datapath.c | 6 +- trunk/net/openvswitch/flow.h | 8 +- trunk/net/packet/af_packet.c | 12 +- trunk/net/sched/act_gact.c | 14 +- trunk/net/sched/act_ipt.c | 7 +- trunk/net/sched/act_mirred.c | 11 +- trunk/net/sched/act_pedit.c | 5 +- trunk/net/sched/act_simple.c | 5 +- trunk/net/sched/sch_cbq.c | 5 +- trunk/net/sched/sch_fq_codel.c | 2 +- trunk/net/sched/sch_gred.c | 38 +- trunk/net/sched/sch_qfq.c | 100 +- trunk/net/sctp/output.c | 21 +- trunk/net/socket.c | 5 +- trunk/net/sunrpc/svc_xprt.c | 10 +- trunk/net/sunrpc/svcsock.c | 2 +- trunk/net/sunrpc/xprt.c | 34 +- trunk/net/sunrpc/xprtrdma/transport.c | 1 + trunk/net/sunrpc/xprtsock.c | 3 + trunk/net/unix/af_unix.c | 4 +- trunk/net/wireless/core.c | 5 + trunk/net/wireless/core.h | 1 + trunk/net/wireless/nl80211.c | 4 +- trunk/net/wireless/reg.c | 31 +- trunk/net/wireless/util.c | 2 +- trunk/net/xfrm/xfrm_input.c | 2 +- trunk/net/xfrm/xfrm_policy.c | 5 +- trunk/net/xfrm/xfrm_replay.c | 15 + trunk/net/xfrm/xfrm_state.c | 25 +- trunk/net/xfrm/xfrm_user.c | 57 +- trunk/scripts/Makefile.fwinst | 2 +- trunk/scripts/checkpatch.pl | 3 +- trunk/scripts/checksyscalls.sh | 2 +- .../scripts/coccinelle/api/memdup_user.cocci | 4 +- trunk/scripts/decodecode | 2 +- trunk/scripts/kconfig/streamline_config.pl | 50 +- trunk/scripts/kernel-doc | 1 + trunk/scripts/link-vmlinux.sh | 9 +- trunk/scripts/recordmcount.h | 4 +- trunk/security/apparmor/.gitignore | 1 - trunk/security/keys/keyctl.c | 2 - trunk/security/selinux/include/xfrm.h | 1 + trunk/security/yama/yama_lsm.c | 43 +- trunk/sound/arm/pxa2xx-ac97.c | 4 +- trunk/sound/atmel/abdac.c | 3 +- trunk/sound/atmel/ac97c.c | 14 +- trunk/sound/core/compress_offload.c | 8 +- trunk/sound/core/sgbuf.c | 2 +- trunk/sound/drivers/aloop.c | 2 +- trunk/sound/drivers/dummy.c | 2 +- trunk/sound/drivers/pcsp/pcsp.c | 4 +- trunk/sound/isa/als100.c | 2 +- trunk/sound/oss/.gitignore | 1 - trunk/sound/oss/sb_audio.c | 4 +- trunk/sound/pci/cs46xx/cs46xx_lib.c | 2 +- trunk/sound/pci/ctxfi/ctatc.c | 4 +- trunk/sound/pci/emu10k1/memory.c | 5 +- trunk/sound/pci/hda/hda_auto_parser.c | 5 +- trunk/sound/pci/hda/hda_beep.c | 29 +- trunk/sound/pci/hda/hda_codec.c | 85 +- trunk/sound/pci/hda/hda_codec.h | 2 + trunk/sound/pci/hda/hda_intel.c | 11 + trunk/sound/pci/hda/hda_proc.c | 2 +- trunk/sound/pci/hda/patch_ca0132.c | 174 +- trunk/sound/pci/hda/patch_conexant.c | 6 - trunk/sound/pci/hda/patch_hdmi.c | 12 +- trunk/sound/pci/hda/patch_realtek.c | 8 +- trunk/sound/pci/hda/patch_sigmatel.c | 15 +- trunk/sound/pci/hda/patch_via.c | 8 + trunk/sound/pci/ice1712/prodigy_hifi.c | 3 +- trunk/sound/pci/lx6464es/lx6464es.c | 2 + trunk/sound/pci/rme9652/hdspm.c | 2 +- trunk/sound/pci/sis7019.c | 5 +- trunk/sound/ppc/powermac.c | 2 +- trunk/sound/ppc/snd_ps3.c | 1 + trunk/sound/soc/blackfin/bf6xx-sport.c | 7 + trunk/sound/soc/codecs/ab8500-codec.c | 4 + trunk/sound/soc/codecs/ad1980.c | 1 + trunk/sound/soc/codecs/arizona.c | 2 +- trunk/sound/soc/codecs/mc13783.c | 10 +- trunk/sound/soc/codecs/sgtl5000.c | 3 +- trunk/sound/soc/codecs/stac9766.c | 1 + trunk/sound/soc/codecs/wm2000.c | 2 +- trunk/sound/soc/codecs/wm5102.c | 25 +- trunk/sound/soc/codecs/wm5110.c | 12 + trunk/sound/soc/codecs/wm8904.c | 2 +- trunk/sound/soc/codecs/wm8962.c | 18 +- trunk/sound/soc/codecs/wm8994.c | 17 +- trunk/sound/soc/codecs/wm9712.c | 22 +- trunk/sound/soc/codecs/wm9713.c | 1 + trunk/sound/soc/davinci/davinci-mcasp.c | 10 +- trunk/sound/soc/fsl/imx-sgtl5000.c | 2 +- trunk/sound/soc/fsl/imx-ssi.c | 5 +- trunk/sound/soc/mxs/Kconfig | 2 +- trunk/sound/soc/mxs/mxs-saif.c | 24 + trunk/sound/soc/omap/am3517evm.c | 2 +- trunk/sound/soc/omap/mcbsp.c | 2 +- trunk/sound/soc/omap/omap-mcbsp.c | 1 + trunk/sound/soc/omap/omap-pcm.c | 1 + trunk/sound/soc/samsung/dma.c | 8 +- trunk/sound/soc/samsung/pcm.c | 2 +- trunk/sound/soc/soc-core.c | 12 +- trunk/sound/soc/soc-dapm.c | 5 +- trunk/sound/soc/soc-jack.c | 2 +- trunk/sound/soc/spear/spear_pcm.c | 2 +- trunk/sound/soc/tegra/tegra_alc5632.c | 3 +- trunk/sound/soc/tegra/tegra_pcm.c | 4 +- trunk/sound/soc/tegra/tegra_wm8903.c | 10 +- trunk/sound/soc/ux500/ux500_msp_dai.c | 2 +- trunk/sound/soc/ux500/ux500_msp_i2s.c | 27 +- trunk/sound/soc/ux500/ux500_msp_i2s.h | 2 +- trunk/sound/usb/card.c | 4 +- trunk/sound/usb/endpoint.c | 32 +- trunk/sound/usb/endpoint.h | 3 +- trunk/sound/usb/pcm.c | 67 +- trunk/tools/lib/traceevent/Makefile | 2 +- trunk/tools/lib/traceevent/event-parse.c | 754 +++-- trunk/tools/lib/traceevent/event-parse.h | 46 +- trunk/tools/lib/traceevent/event-utils.h | 6 + trunk/tools/perf/.gitignore | 2 + trunk/tools/perf/Documentation/Makefile | 6 +- .../perf/Documentation/jit-interface.txt | 15 + .../perf/Documentation/perf-annotate.txt | 3 + trunk/tools/perf/Documentation/perf-diff.txt | 3 + trunk/tools/perf/Documentation/perf-kvm.txt | 30 +- trunk/tools/perf/Documentation/perf-list.txt | 48 +- .../tools/perf/Documentation/perf-report.txt | 3 + .../perf/Documentation/perf-script-perl.txt | 4 +- .../perf/Documentation/perf-script-python.txt | 10 +- trunk/tools/perf/Documentation/perf-trace.txt | 53 + trunk/tools/perf/MANIFEST | 4 + trunk/tools/perf/Makefile | 180 +- trunk/tools/perf/arch/x86/Makefile | 3 + trunk/tools/perf/arch/x86/include/perf_regs.h | 80 + trunk/tools/perf/arch/x86/util/unwind.c | 111 + trunk/tools/perf/bash_completion | 26 + trunk/tools/perf/bench/bench.h | 3 +- trunk/tools/perf/bench/mem-memcpy.c | 2 +- trunk/tools/perf/bench/mem-memset.c | 2 +- trunk/tools/perf/bench/sched-messaging.c | 2 +- trunk/tools/perf/bench/sched-pipe.c | 10 +- trunk/tools/perf/builtin-annotate.c | 4 +- trunk/tools/perf/builtin-bench.c | 2 +- trunk/tools/perf/builtin-buildid-cache.c | 10 +- trunk/tools/perf/builtin-buildid-list.c | 7 +- trunk/tools/perf/builtin-diff.c | 96 +- trunk/tools/perf/builtin-evlist.c | 2 +- trunk/tools/perf/builtin-help.c | 50 +- trunk/tools/perf/builtin-inject.c | 29 +- trunk/tools/perf/builtin-kmem.c | 234 +- trunk/tools/perf/builtin-kvm.c | 838 ++++- trunk/tools/perf/builtin-list.c | 16 +- trunk/tools/perf/builtin-lock.c | 414 ++- trunk/tools/perf/builtin-probe.c | 24 +- trunk/tools/perf/builtin-record.c | 309 +- trunk/tools/perf/builtin-report.c | 52 +- trunk/tools/perf/builtin-sched.c | 1522 ++++----- trunk/tools/perf/builtin-script.c | 229 +- trunk/tools/perf/builtin-stat.c | 136 +- trunk/tools/perf/builtin-test.c | 368 ++- trunk/tools/perf/builtin-timechart.c | 70 +- trunk/tools/perf/builtin-top.c | 54 +- trunk/tools/perf/builtin-trace.c | 310 ++ trunk/tools/perf/builtin.h | 2 + trunk/tools/perf/command-list.txt | 3 +- trunk/tools/perf/config/feature-tests.mak | 50 + trunk/tools/perf/perf-archive.sh | 6 +- trunk/tools/perf/perf.c | 75 +- trunk/tools/perf/perf.h | 9 +- .../lib/Perf/Trace/EventClass.py | 94 + .../python/bin/event_analyzing_sample-record | 8 + .../python/bin/event_analyzing_sample-report | 3 + .../scripts/python/event_analyzing_sample.py | 189 ++ trunk/tools/perf/ui/browser.c | 7 +- trunk/tools/perf/ui/browsers/annotate.c | 6 +- trunk/tools/perf/ui/browsers/hists.c | 133 +- trunk/tools/perf/ui/gtk/browser.c | 111 +- trunk/tools/perf/ui/gtk/gtk.h | 3 + trunk/tools/perf/ui/gtk/helpline.c | 56 + trunk/tools/perf/ui/gtk/setup.c | 6 +- trunk/tools/perf/ui/gtk/util.c | 9 +- trunk/tools/perf/ui/helpline.c | 56 +- trunk/tools/perf/ui/helpline.h | 33 +- trunk/tools/perf/ui/hist.c | 390 +++ trunk/tools/perf/ui/setup.c | 10 +- trunk/tools/perf/ui/stdio/hist.c | 498 +++ trunk/tools/perf/ui/tui/helpline.c | 57 + trunk/tools/perf/ui/tui/setup.c | 10 +- trunk/tools/perf/util/alias.c | 3 +- trunk/tools/perf/util/annotate.c | 19 +- trunk/tools/perf/util/annotate.h | 15 +- trunk/tools/perf/util/build-id.c | 11 +- trunk/tools/perf/util/cache.h | 6 +- trunk/tools/perf/util/callchain.c | 6 +- trunk/tools/perf/util/callchain.h | 2 +- trunk/tools/perf/util/cgroup.c | 4 +- trunk/tools/perf/util/config.c | 6 +- trunk/tools/perf/util/cpumap.c | 22 +- trunk/tools/perf/util/cpumap.h | 13 +- trunk/tools/perf/util/debug.c | 4 +- trunk/tools/perf/util/debug.h | 17 +- trunk/tools/perf/util/dso-test-data.c | 2 +- trunk/tools/perf/util/dwarf-aux.c | 2 + trunk/tools/perf/util/event.c | 71 +- trunk/tools/perf/util/event.h | 17 +- trunk/tools/perf/util/evlist.c | 144 +- trunk/tools/perf/util/evlist.h | 36 +- trunk/tools/perf/util/evsel.c | 291 +- trunk/tools/perf/util/evsel.h | 69 +- trunk/tools/perf/util/generate-cmdlist.sh | 15 + trunk/tools/perf/util/header.c | 1072 +++++-- trunk/tools/perf/util/header.h | 29 +- trunk/tools/perf/util/help.c | 4 +- trunk/tools/perf/util/hist.c | 721 +---- trunk/tools/perf/util/hist.h | 75 +- trunk/tools/perf/util/include/linux/bitops.h | 4 + .../tools/perf/util/include/linux/compiler.h | 9 +- trunk/tools/perf/util/include/linux/kernel.h | 17 +- trunk/tools/perf/util/include/linux/magic.h | 12 + trunk/tools/perf/util/include/linux/rbtree.h | 1 + trunk/tools/perf/util/include/linux/string.h | 2 + trunk/tools/perf/util/include/linux/types.h | 8 + trunk/tools/perf/util/intlist.c | 101 + trunk/tools/perf/util/intlist.h | 75 + trunk/tools/perf/util/map.c | 47 +- trunk/tools/perf/util/map.h | 9 +- trunk/tools/perf/util/parse-events-test.c | 438 ++- trunk/tools/perf/util/parse-events.c | 254 +- trunk/tools/perf/util/parse-events.h | 18 +- trunk/tools/perf/util/parse-events.l | 56 +- trunk/tools/perf/util/parse-events.y | 125 +- trunk/tools/perf/util/parse-options.c | 6 +- trunk/tools/perf/util/perf_regs.h | 14 + trunk/tools/perf/util/pmu.c | 80 +- trunk/tools/perf/util/pmu.h | 3 + trunk/tools/perf/util/pmu.y | 6 +- trunk/tools/perf/util/probe-event.c | 69 +- trunk/tools/perf/util/probe-finder.c | 28 +- trunk/tools/perf/util/python-ext-sources | 4 +- trunk/tools/perf/util/python.c | 21 +- trunk/tools/perf/util/rblist.c | 107 + trunk/tools/perf/util/rblist.h | 47 + .../util/scripting-engines/trace-event-perl.c | 50 +- .../scripting-engines/trace-event-python.c | 113 +- trunk/tools/perf/util/session.c | 236 +- trunk/tools/perf/util/session.h | 34 +- trunk/tools/perf/util/sort.c | 25 +- trunk/tools/perf/util/sort.h | 2 +- trunk/tools/perf/util/stat.c | 57 + trunk/tools/perf/util/stat.h | 16 + trunk/tools/perf/util/string.c | 18 +- trunk/tools/perf/util/strlist.c | 130 +- trunk/tools/perf/util/strlist.h | 11 +- trunk/tools/perf/util/symbol-elf.c | 841 +++++ trunk/tools/perf/util/symbol-minimal.c | 307 ++ trunk/tools/perf/util/symbol.c | 956 +----- trunk/tools/perf/util/symbol.h | 67 +- trunk/tools/perf/util/target.c | 6 +- trunk/tools/perf/util/thread.h | 2 + trunk/tools/perf/util/top.c | 3 +- trunk/tools/perf/util/top.h | 1 + trunk/tools/perf/util/trace-event-parse.c | 54 +- trunk/tools/perf/util/trace-event-scripting.c | 34 +- trunk/tools/perf/util/trace-event.h | 12 +- trunk/tools/perf/util/unwind.c | 571 ++++ trunk/tools/perf/util/unwind.h | 35 + trunk/tools/perf/util/util.c | 25 + trunk/tools/perf/util/util.h | 9 +- trunk/tools/perf/util/vdso.c | 111 + trunk/tools/perf/util/vdso.h | 18 + trunk/tools/perf/util/wrapper.c | 3 +- trunk/tools/scripts/Makefile.include | 6 +- .../ktest/examples/include/defaults.conf | 2 +- .../testing/ktest/examples/include/tests.conf | 2 +- trunk/tools/testing/ktest/ktest.pl | 4 +- trunk/tools/testing/selftests/vm/run_vmtests | 6 +- trunk/virt/kvm/kvm_main.c | 7 +- 2152 files changed, 43112 insertions(+), 25443 deletions(-) create mode 100644 trunk/arch/alpha/include/asm/word-at-a-time.h delete mode 100644 trunk/arch/alpha/lib/ev6-strncpy_from_user.S delete mode 100644 trunk/arch/alpha/lib/ev67-strlen_user.S delete mode 100644 trunk/arch/alpha/lib/strlen_user.S delete mode 100644 trunk/arch/alpha/lib/strncpy_from_user.S create mode 100644 trunk/arch/arm/lib/io-readsw-armv3.S create mode 100644 trunk/arch/arm/lib/io-writesw-armv3.S create mode 100644 trunk/arch/arm/lib/uaccess.S rename trunk/arch/arm/mach-imx/{head-v7.S => headsmp.S} (100%) create mode 100644 trunk/arch/arm/plat-samsung/include/plat/hdmi.h delete mode 100644 trunk/arch/c6x/include/asm/barrier.h delete mode 100644 trunk/arch/m68k/include/asm/MC68332.h delete mode 100644 trunk/arch/m68k/include/asm/apollodma.h delete mode 100644 trunk/arch/m68k/include/asm/bitsperlong.h delete mode 100644 trunk/arch/m68k/include/asm/cputime.h delete mode 100644 trunk/arch/m68k/include/asm/device.h delete mode 100644 trunk/arch/m68k/include/asm/emergency-restart.h delete mode 100644 trunk/arch/m68k/include/asm/errno.h delete mode 100644 trunk/arch/m68k/include/asm/futex.h delete mode 100644 trunk/arch/m68k/include/asm/ioctl.h delete mode 100644 trunk/arch/m68k/include/asm/ipcbuf.h delete mode 100644 trunk/arch/m68k/include/asm/irq_regs.h delete mode 100644 trunk/arch/m68k/include/asm/kdebug.h delete mode 100644 trunk/arch/m68k/include/asm/kmap_types.h delete mode 100644 trunk/arch/m68k/include/asm/kvm_para.h delete mode 100644 trunk/arch/m68k/include/asm/local.h delete mode 100644 trunk/arch/m68k/include/asm/local64.h delete mode 100644 trunk/arch/m68k/include/asm/mac_mouse.h delete mode 100644 trunk/arch/m68k/include/asm/mcfmbus.h delete mode 100644 trunk/arch/m68k/include/asm/mman.h delete mode 100644 trunk/arch/m68k/include/asm/mutex.h delete mode 100644 trunk/arch/m68k/include/asm/percpu.h delete mode 100644 trunk/arch/m68k/include/asm/resource.h delete mode 100644 trunk/arch/m68k/include/asm/sbus.h delete mode 100644 trunk/arch/m68k/include/asm/scatterlist.h delete mode 100644 trunk/arch/m68k/include/asm/sections.h delete mode 100644 trunk/arch/m68k/include/asm/shm.h delete mode 100644 trunk/arch/m68k/include/asm/siginfo.h delete mode 100644 trunk/arch/m68k/include/asm/statfs.h delete mode 100644 trunk/arch/m68k/include/asm/topology.h delete mode 100644 trunk/arch/m68k/include/asm/types.h delete mode 100644 trunk/arch/m68k/include/asm/xor.h create mode 100644 trunk/arch/x86/include/asm/perf_regs.h create mode 100644 trunk/arch/x86/include/asm/rcu.h create mode 100644 trunk/arch/x86/kernel/perf_regs.c create mode 100644 trunk/arch/x86/platform/efi/efi-bgrt.c create mode 100644 trunk/drivers/clk/clk-devres.c delete mode 100644 trunk/drivers/hid/hid-picolcd.c create mode 100644 trunk/drivers/hid/hid-picolcd.h create mode 100644 trunk/drivers/hid/hid-picolcd_backlight.c create mode 100644 trunk/drivers/hid/hid-picolcd_cir.c create mode 100644 trunk/drivers/hid/hid-picolcd_core.c create mode 100644 trunk/drivers/hid/hid-picolcd_debugfs.c create mode 100644 trunk/drivers/hid/hid-picolcd_fb.c create mode 100644 trunk/drivers/hid/hid-picolcd_lcd.c create mode 100644 trunk/drivers/hid/hid-picolcd_leds.c create mode 100644 trunk/drivers/hid/hid-ps3remote.c delete mode 100644 trunk/drivers/pci/.gitignore create mode 100644 trunk/drivers/regulator/fan53555.c create mode 100644 trunk/drivers/regulator/max8907-regulator.c create mode 100644 trunk/drivers/vhost/Kconfig.tcm create mode 100644 trunk/drivers/vhost/tcm_vhost.c create mode 100644 trunk/drivers/vhost/tcm_vhost.h create mode 100644 trunk/include/linux/efi-bgrt.h create mode 100644 trunk/include/linux/perf_regs.h create mode 100644 trunk/include/linux/regulator/fan53555.h create mode 100644 trunk/include/linux/smpboot.h create mode 100644 trunk/kernel/sched/cputime.c create mode 100644 trunk/tools/perf/Documentation/jit-interface.txt create mode 100644 trunk/tools/perf/Documentation/perf-trace.txt create mode 100644 trunk/tools/perf/arch/x86/include/perf_regs.h create mode 100644 trunk/tools/perf/arch/x86/util/unwind.c create mode 100644 trunk/tools/perf/bash_completion create mode 100644 trunk/tools/perf/builtin-trace.c create mode 100755 trunk/tools/perf/scripts/python/Perf-Trace-Util/lib/Perf/Trace/EventClass.py create mode 100644 trunk/tools/perf/scripts/python/bin/event_analyzing_sample-record create mode 100644 trunk/tools/perf/scripts/python/bin/event_analyzing_sample-report create mode 100644 trunk/tools/perf/scripts/python/event_analyzing_sample.py create mode 100644 trunk/tools/perf/ui/gtk/helpline.c create mode 100644 trunk/tools/perf/ui/hist.c create mode 100644 trunk/tools/perf/ui/stdio/hist.c create mode 100644 trunk/tools/perf/ui/tui/helpline.c create mode 100644 trunk/tools/perf/util/include/linux/magic.h create mode 100644 trunk/tools/perf/util/intlist.c create mode 100644 trunk/tools/perf/util/intlist.h create mode 100644 trunk/tools/perf/util/perf_regs.h create mode 100644 trunk/tools/perf/util/rblist.c create mode 100644 trunk/tools/perf/util/rblist.h create mode 100644 trunk/tools/perf/util/stat.c create mode 100644 trunk/tools/perf/util/stat.h create mode 100644 trunk/tools/perf/util/symbol-elf.c create mode 100644 trunk/tools/perf/util/symbol-minimal.c create mode 100644 trunk/tools/perf/util/unwind.c create mode 100644 trunk/tools/perf/util/unwind.h create mode 100644 trunk/tools/perf/util/vdso.c create mode 100644 trunk/tools/perf/util/vdso.h diff --git a/[refs] b/[refs] index b598ee722a8f..5f7676d31f4b 100644 --- a/[refs] +++ b/[refs] @@ -1,2 +1,2 @@ --- -refs/heads/master: fd0f5869724ff6195c6e7f12f8287c66a132e0ba +refs/heads/master: 229993001282e128a49a59ec43d255614775703a diff --git a/trunk/Documentation/ABI/testing/sysfs-bus-pci b/trunk/Documentation/ABI/testing/sysfs-bus-pci index 34f51100f029..dff1f48d252d 100644 --- a/trunk/Documentation/ABI/testing/sysfs-bus-pci +++ b/trunk/Documentation/ABI/testing/sysfs-bus-pci @@ -210,3 +210,15 @@ Users: firmware assigned instance number of the PCI device that can help in understanding the firmware intended order of the PCI device. + +What: /sys/bus/pci/devices/.../d3cold_allowed +Date: July 2012 +Contact: Huang Ying +Description: + d3cold_allowed is bit to control whether the corresponding PCI + device can be put into D3Cold state. If it is cleared, the + device will never be put into D3Cold state. If it is set, the + device may be put into D3Cold state if other requirements are + satisfied too. Reading this attribute will show the current + value of d3cold_allowed bit. Writing this attribute will set + the value of d3cold_allowed bit. diff --git a/trunk/Documentation/ABI/testing/sysfs-class-regulator b/trunk/Documentation/ABI/testing/sysfs-class-regulator index e091fa873792..bc578bc60628 100644 --- a/trunk/Documentation/ABI/testing/sysfs-class-regulator +++ b/trunk/Documentation/ABI/testing/sysfs-class-regulator @@ -349,3 +349,24 @@ Description: This will be one of the same strings reported by the "state" attribute. + +What: /sys/class/regulator/.../bypass +Date: September 2012 +KernelVersion: 3.7 +Contact: Mark Brown +Description: + Some regulator directories will contain a field called + bypass. This indicates if the device is in bypass mode. + + This will be one of the following strings: + + 'enabled' + 'disabled' + 'unknown' + + 'enabled' means the regulator is in bypass mode. + + 'disabled' means that the regulator is regulating. + + 'unknown' means software cannot determine the state, or + the reported state is invalid. diff --git a/trunk/Documentation/ABI/testing/sysfs-driver-wacom b/trunk/Documentation/ABI/testing/sysfs-driver-wacom index 8d55a83d6921..7fc781048b79 100644 --- a/trunk/Documentation/ABI/testing/sysfs-driver-wacom +++ b/trunk/Documentation/ABI/testing/sysfs-driver-wacom @@ -1,3 +1,16 @@ +WWhat: /sys/class/hidraw/hidraw*/device/oled*_img +Date: June 2012 +Contact: linux-bluetooth@vger.kernel.org +Description: + The /sys/class/hidraw/hidraw*/device/oled*_img files control + OLED mocro displays on Intuos4 Wireless tablet. Accepted image + has to contain 256 bytes (64x32 px 1 bit colour). The format + is the same as PBM image 62x32px without header (64 bits per + horizontal line, 32 lines). An example of setting OLED No. 0: + dd bs=256 count=1 if=img_file of=[path to oled0_img]/oled0_img + The attribute is read only and no local copy of the image is + stored. + What: /sys/class/hidraw/hidraw*/device/speed Date: April 2010 Kernel Version: 2.6.35 diff --git a/trunk/Documentation/ABI/testing/sysfs-platform-ideapad-laptop b/trunk/Documentation/ABI/testing/sysfs-platform-ideapad-laptop index 814b01354c41..b31e782bd985 100644 --- a/trunk/Documentation/ABI/testing/sysfs-platform-ideapad-laptop +++ b/trunk/Documentation/ABI/testing/sysfs-platform-ideapad-laptop @@ -5,4 +5,15 @@ Contact: "Ike Panhc " Description: Control the power of camera module. 1 means on, 0 means off. +What: /sys/devices/platform/ideapad/fan_mode +Date: June 2012 +KernelVersion: 3.6 +Contact: "Maxim Mikityanskiy " +Description: + Change fan mode + There are four available modes: + * 0 -> Super Silent Mode + * 1 -> Standard Mode + * 2 -> Dust Cleaning + * 4 -> Efficient Thermal Dissipation Mode diff --git a/trunk/Documentation/DocBook/filesystems.tmpl b/trunk/Documentation/DocBook/filesystems.tmpl index 3fca32c41927..25b58efd955d 100644 --- a/trunk/Documentation/DocBook/filesystems.tmpl +++ b/trunk/Documentation/DocBook/filesystems.tmpl @@ -224,8 +224,8 @@ all your transactions. -Then at umount time , in your put_super() (2.4) or write_super() (2.5) -you can then call journal_destroy() to clean up your in-core journal object. +Then at umount time , in your put_super() you can then call journal_destroy() +to clean up your in-core journal object. diff --git a/trunk/Documentation/DocBook/media/v4l/vidioc-g-tuner.xml b/trunk/Documentation/DocBook/media/v4l/vidioc-g-tuner.xml index 720395127904..701138f1209d 100644 --- a/trunk/Documentation/DocBook/media/v4l/vidioc-g-tuner.xml +++ b/trunk/Documentation/DocBook/media/v4l/vidioc-g-tuner.xml @@ -125,7 +125,7 @@ the structure refers to a radio tuner the V4L2_TUNER_CAP_NORM flags can't be used. If multiple frequency bands are supported, then capability is the union of all -capability> fields of each &v4l2-frequency-band;. +capability fields of each &v4l2-frequency-band;. diff --git a/trunk/Documentation/RCU/checklist.txt b/trunk/Documentation/RCU/checklist.txt index fc103d7a0474..cdb20d41a44a 100644 --- a/trunk/Documentation/RCU/checklist.txt +++ b/trunk/Documentation/RCU/checklist.txt @@ -310,6 +310,12 @@ over a rather long period of time, but improvements are always welcome! code under the influence of preempt_disable(), you instead need to use synchronize_irq() or synchronize_sched(). + This same limitation also applies to synchronize_rcu_bh() + and synchronize_srcu(), as well as to the asynchronous and + expedited forms of the three primitives, namely call_rcu(), + call_rcu_bh(), call_srcu(), synchronize_rcu_expedited(), + synchronize_rcu_bh_expedited(), and synchronize_srcu_expedited(). + 12. Any lock acquired by an RCU callback must be acquired elsewhere with softirq disabled, e.g., via spin_lock_irqsave(), spin_lock_bh(), etc. Failing to disable irq on a given diff --git a/trunk/Documentation/RCU/stallwarn.txt b/trunk/Documentation/RCU/stallwarn.txt index 523364e4e1f1..1927151b386b 100644 --- a/trunk/Documentation/RCU/stallwarn.txt +++ b/trunk/Documentation/RCU/stallwarn.txt @@ -99,7 +99,7 @@ In kernels with CONFIG_RCU_FAST_NO_HZ, even more information is printed: INFO: rcu_preempt detected stall on CPU - 0: (64628 ticks this GP) idle=dd5/3fffffffffffffff/0 drain=0 . timer=-1 + 0: (64628 ticks this GP) idle=dd5/3fffffffffffffff/0 drain=0 . timer not pending (t=65000 jiffies) The "(64628 ticks this GP)" indicates that this CPU has taken more @@ -116,13 +116,13 @@ number between the two "/"s is the value of the nesting, which will be a small positive number if in the idle loop and a very large positive number (as shown above) otherwise. -For CONFIG_RCU_FAST_NO_HZ kernels, the "drain=0" indicates that the -CPU is not in the process of trying to force itself into dyntick-idle -state, the "." indicates that the CPU has not given up forcing RCU -into dyntick-idle mode (it would be "H" otherwise), and the "timer=-1" -indicates that the CPU has not recented forced RCU into dyntick-idle -mode (it would otherwise indicate the number of microseconds remaining -in this forced state). +For CONFIG_RCU_FAST_NO_HZ kernels, the "drain=0" indicates that the CPU is +not in the process of trying to force itself into dyntick-idle state, the +"." indicates that the CPU has not given up forcing RCU into dyntick-idle +mode (it would be "H" otherwise), and the "timer not pending" indicates +that the CPU has not recently forced RCU into dyntick-idle mode (it +would otherwise indicate the number of microseconds remaining in this +forced state). Multiple Warnings From One Stall diff --git a/trunk/Documentation/RCU/trace.txt b/trunk/Documentation/RCU/trace.txt index f6f15ce39903..672d19083252 100644 --- a/trunk/Documentation/RCU/trace.txt +++ b/trunk/Documentation/RCU/trace.txt @@ -333,23 +333,23 @@ o Each element of the form "1/1 0:127 ^0" represents one struct The output of "cat rcu/rcu_pending" looks as follows: rcu_sched: - 0 np=255892 qsp=53936 rpq=85 cbr=0 cng=14417 gpc=10033 gps=24320 nf=6445 nn=146741 - 1 np=261224 qsp=54638 rpq=33 cbr=0 cng=25723 gpc=16310 gps=2849 nf=5912 nn=155792 - 2 np=237496 qsp=49664 rpq=23 cbr=0 cng=2762 gpc=45478 gps=1762 nf=1201 nn=136629 - 3 np=236249 qsp=48766 rpq=98 cbr=0 cng=286 gpc=48049 gps=1218 nf=207 nn=137723 - 4 np=221310 qsp=46850 rpq=7 cbr=0 cng=26 gpc=43161 gps=4634 nf=3529 nn=123110 - 5 np=237332 qsp=48449 rpq=9 cbr=0 cng=54 gpc=47920 gps=3252 nf=201 nn=137456 - 6 np=219995 qsp=46718 rpq=12 cbr=0 cng=50 gpc=42098 gps=6093 nf=4202 nn=120834 - 7 np=249893 qsp=49390 rpq=42 cbr=0 cng=72 gpc=38400 gps=17102 nf=41 nn=144888 + 0 np=255892 qsp=53936 rpq=85 cbr=0 cng=14417 gpc=10033 gps=24320 nn=146741 + 1 np=261224 qsp=54638 rpq=33 cbr=0 cng=25723 gpc=16310 gps=2849 nn=155792 + 2 np=237496 qsp=49664 rpq=23 cbr=0 cng=2762 gpc=45478 gps=1762 nn=136629 + 3 np=236249 qsp=48766 rpq=98 cbr=0 cng=286 gpc=48049 gps=1218 nn=137723 + 4 np=221310 qsp=46850 rpq=7 cbr=0 cng=26 gpc=43161 gps=4634 nn=123110 + 5 np=237332 qsp=48449 rpq=9 cbr=0 cng=54 gpc=47920 gps=3252 nn=137456 + 6 np=219995 qsp=46718 rpq=12 cbr=0 cng=50 gpc=42098 gps=6093 nn=120834 + 7 np=249893 qsp=49390 rpq=42 cbr=0 cng=72 gpc=38400 gps=17102 nn=144888 rcu_bh: - 0 np=146741 qsp=1419 rpq=6 cbr=0 cng=6 gpc=0 gps=0 nf=2 nn=145314 - 1 np=155792 qsp=12597 rpq=3 cbr=0 cng=0 gpc=4 gps=8 nf=3 nn=143180 - 2 np=136629 qsp=18680 rpq=1 cbr=0 cng=0 gpc=7 gps=6 nf=0 nn=117936 - 3 np=137723 qsp=2843 rpq=0 cbr=0 cng=0 gpc=10 gps=7 nf=0 nn=134863 - 4 np=123110 qsp=12433 rpq=0 cbr=0 cng=0 gpc=4 gps=2 nf=0 nn=110671 - 5 np=137456 qsp=4210 rpq=1 cbr=0 cng=0 gpc=6 gps=5 nf=0 nn=133235 - 6 np=120834 qsp=9902 rpq=2 cbr=0 cng=0 gpc=6 gps=3 nf=2 nn=110921 - 7 np=144888 qsp=26336 rpq=0 cbr=0 cng=0 gpc=8 gps=2 nf=0 nn=118542 + 0 np=146741 qsp=1419 rpq=6 cbr=0 cng=6 gpc=0 gps=0 nn=145314 + 1 np=155792 qsp=12597 rpq=3 cbr=0 cng=0 gpc=4 gps=8 nn=143180 + 2 np=136629 qsp=18680 rpq=1 cbr=0 cng=0 gpc=7 gps=6 nn=117936 + 3 np=137723 qsp=2843 rpq=0 cbr=0 cng=0 gpc=10 gps=7 nn=134863 + 4 np=123110 qsp=12433 rpq=0 cbr=0 cng=0 gpc=4 gps=2 nn=110671 + 5 np=137456 qsp=4210 rpq=1 cbr=0 cng=0 gpc=6 gps=5 nn=133235 + 6 np=120834 qsp=9902 rpq=2 cbr=0 cng=0 gpc=6 gps=3 nn=110921 + 7 np=144888 qsp=26336 rpq=0 cbr=0 cng=0 gpc=8 gps=2 nn=118542 As always, this is once again split into "rcu_sched" and "rcu_bh" portions, with CONFIG_TREE_PREEMPT_RCU kernels having an additional @@ -377,17 +377,6 @@ o "gpc" is the number of times that an old grace period had o "gps" is the number of times that a new grace period had started, but this CPU was not yet aware of it. -o "nf" is the number of times that this CPU suspected that the - current grace period had run for too long, and thus needed to - be forced. - - Please note that "forcing" consists of sending resched IPIs - to holdout CPUs. If that CPU really still is in an old RCU - read-side critical section, then we really do have to wait for it. - The assumption behing "forcing" is that the CPU is not still in - an old RCU read-side critical section, but has not yet responded - for some other reason. - o "nn" is the number of times that this CPU needed nothing. Alert readers will note that the rcu "nn" number for a given CPU very closely matches the rcu_bh "np" number for that same CPU. This diff --git a/trunk/Documentation/RCU/whatisRCU.txt b/trunk/Documentation/RCU/whatisRCU.txt index 69ee188515e7..bf0f6de2aa00 100644 --- a/trunk/Documentation/RCU/whatisRCU.txt +++ b/trunk/Documentation/RCU/whatisRCU.txt @@ -873,7 +873,7 @@ d. Do you need to treat NMI handlers, hardirq handlers, and code segments with preemption disabled (whether via preempt_disable(), local_irq_save(), local_bh_disable(), or some other mechanism) as if they were explicit RCU readers? - If so, you need RCU-sched. + If so, RCU-sched is the only choice that will work for you. e. Do you need RCU grace periods to complete even in the face of softirq monopolization of one or more of the CPUs? For @@ -884,7 +884,12 @@ f. Is your workload too update-intensive for normal use of RCU, but inappropriate for other synchronization mechanisms? If so, consider SLAB_DESTROY_BY_RCU. But please be careful! -g. Otherwise, use RCU. +g. Do you need read-side critical sections that are respected + even though they are in the middle of the idle loop, during + user-mode execution, or on an offlined CPU? If so, SRCU is the + only choice that will work for you. + +h. Otherwise, use RCU. Of course, this all assumes that you have determined that RCU is in fact the right tool for your job. diff --git a/trunk/Documentation/accounting/getdelays.c b/trunk/Documentation/accounting/getdelays.c index f6318f6d7baf..6f706aca2049 100644 --- a/trunk/Documentation/accounting/getdelays.c +++ b/trunk/Documentation/accounting/getdelays.c @@ -98,10 +98,9 @@ static int create_nl_socket(int protocol) if (rcvbufsz) if (setsockopt(fd, SOL_SOCKET, SO_RCVBUF, &rcvbufsz, sizeof(rcvbufsz)) < 0) { - fprintf(stderr, "Unable to set socket rcv buf size " - "to %d\n", + fprintf(stderr, "Unable to set socket rcv buf size to %d\n", rcvbufsz); - return -1; + goto error; } memset(&local, 0, sizeof(local)); diff --git a/trunk/Documentation/block/00-INDEX b/trunk/Documentation/block/00-INDEX index d111e3b23db0..d18ecd827c40 100644 --- a/trunk/Documentation/block/00-INDEX +++ b/trunk/Documentation/block/00-INDEX @@ -3,15 +3,21 @@ biodoc.txt - Notes on the Generic Block Layer Rewrite in Linux 2.5 capability.txt - - Generic Block Device Capability (/sys/block//capability) + - Generic Block Device Capability (/sys/block//capability) +cfq-iosched.txt + - CFQ IO scheduler tunables +data-integrity.txt + - Block data integrity deadline-iosched.txt - Deadline IO scheduler tunables ioprio.txt - Block io priorities (in CFQ scheduler) +queue-sysfs.txt + - Queue's sysfs entries request.txt - The members of struct request (in include/linux/blkdev.h) stat.txt - - Block layer statistics in /sys/block//stat + - Block layer statistics in /sys/block//stat switching-sched.txt - Switching I/O schedulers at runtime writeback_cache_control.txt diff --git a/trunk/Documentation/block/cfq-iosched.txt b/trunk/Documentation/block/cfq-iosched.txt index 6d670f570451..d89b4fe724d7 100644 --- a/trunk/Documentation/block/cfq-iosched.txt +++ b/trunk/Documentation/block/cfq-iosched.txt @@ -1,3 +1,14 @@ +CFQ (Complete Fairness Queueing) +=============================== + +The main aim of CFQ scheduler is to provide a fair allocation of the disk +I/O bandwidth for all the processes which requests an I/O operation. + +CFQ maintains the per process queue for the processes which request I/O +operation(syncronous requests). In case of asynchronous requests, all the +requests from all the processes are batched together according to their +process's I/O priority. + CFQ ioscheduler tunables ======================== @@ -25,6 +36,72 @@ there are multiple spindles behind single LUN (Host based hardware RAID controller or for storage arrays), setting slice_idle=0 might end up in better throughput and acceptable latencies. +back_seek_max +------------- +This specifies, given in Kbytes, the maximum "distance" for backward seeking. +The distance is the amount of space from the current head location to the +sectors that are backward in terms of distance. + +This parameter allows the scheduler to anticipate requests in the "backward" +direction and consider them as being the "next" if they are within this +distance from the current head location. + +back_seek_penalty +----------------- +This parameter is used to compute the cost of backward seeking. If the +backward distance of request is just 1/back_seek_penalty from a "front" +request, then the seeking cost of two requests is considered equivalent. + +So scheduler will not bias toward one or the other request (otherwise scheduler +will bias toward front request). Default value of back_seek_penalty is 2. + +fifo_expire_async +----------------- +This parameter is used to set the timeout of asynchronous requests. Default +value of this is 248ms. + +fifo_expire_sync +---------------- +This parameter is used to set the timeout of synchronous requests. Default +value of this is 124ms. In case to favor synchronous requests over asynchronous +one, this value should be decreased relative to fifo_expire_async. + +slice_async +----------- +This parameter is same as of slice_sync but for asynchronous queue. The +default value is 40ms. + +slice_async_rq +-------------- +This parameter is used to limit the dispatching of asynchronous request to +device request queue in queue's slice time. The maximum number of request that +are allowed to be dispatched also depends upon the io priority. Default value +for this is 2. + +slice_sync +---------- +When a queue is selected for execution, the queues IO requests are only +executed for a certain amount of time(time_slice) before switching to another +queue. This parameter is used to calculate the time slice of synchronous +queue. + +time_slice is computed using the below equation:- +time_slice = slice_sync + (slice_sync/5 * (4 - prio)). To increase the +time_slice of synchronous queue, increase the value of slice_sync. Default +value is 100ms. + +quantum +------- +This specifies the number of request dispatched to the device queue. In a +queue's time slice, a request will not be dispatched if the number of request +in the device exceeds this parameter. This parameter is used for synchronous +request. + +In case of storage with several disk, this setting can limit the parallel +processing of request. Therefore, increasing the value can imporve the +performace although this can cause the latency of some I/O to increase due +to more number of requests. + CFQ IOPS Mode for group scheduling =================================== Basic CFQ design is to provide priority based time slices. Higher priority diff --git a/trunk/Documentation/block/queue-sysfs.txt b/trunk/Documentation/block/queue-sysfs.txt index 6518a55273e7..e54ac1d53403 100644 --- a/trunk/Documentation/block/queue-sysfs.txt +++ b/trunk/Documentation/block/queue-sysfs.txt @@ -9,20 +9,71 @@ These files are the ones found in the /sys/block/xxx/queue/ directory. Files denoted with a RO postfix are readonly and the RW postfix means read-write. +add_random (RW) +---------------- +This file allows to trun off the disk entropy contribution. Default +value of this file is '1'(on). + +discard_granularity (RO) +----------------------- +This shows the size of internal allocation of the device in bytes, if +reported by the device. A value of '0' means device does not support +the discard functionality. + +discard_max_bytes (RO) +---------------------- +Devices that support discard functionality may have internal limits on +the number of bytes that can be trimmed or unmapped in a single operation. +The discard_max_bytes parameter is set by the device driver to the maximum +number of bytes that can be discarded in a single operation. Discard +requests issued to the device must not exceed this limit. A discard_max_bytes +value of 0 means that the device does not support discard functionality. + +discard_zeroes_data (RO) +------------------------ +When read, this file will show if the discarded block are zeroed by the +device or not. If its value is '1' the blocks are zeroed otherwise not. + hw_sector_size (RO) ------------------- This is the hardware sector size of the device, in bytes. +iostats (RW) +------------- +This file is used to control (on/off) the iostats accounting of the +disk. + +logical_block_size (RO) +----------------------- +This is the logcal block size of the device, in bytes. + max_hw_sectors_kb (RO) ---------------------- This is the maximum number of kilobytes supported in a single data transfer. +max_integrity_segments (RO) +--------------------------- +When read, this file shows the max limit of integrity segments as +set by block layer which a hardware controller can handle. + max_sectors_kb (RW) ------------------- This is the maximum number of kilobytes that the block layer will allow for a filesystem request. Must be smaller than or equal to the maximum size allowed by the hardware. +max_segments (RO) +----------------- +Maximum number of segments of the device. + +max_segment_size (RO) +--------------------- +Maximum segment size of the device. + +minimum_io_size (RO) +-------------------- +This is the smallest preferred io size reported by the device. + nomerges (RW) ------------- This enables the user to disable the lookup logic involved with IO @@ -45,11 +96,24 @@ per-block-cgroup request pool. IOW, if there are N block cgroups, each request queue may have upto N request pools, each independently regulated by nr_requests. +optimal_io_size (RO) +-------------------- +This is the optimal io size reported by the device. + +physical_block_size (RO) +------------------------ +This is the physical block size of device, in bytes. + read_ahead_kb (RW) ------------------ Maximum number of kilobytes to read-ahead for filesystems on this block device. +rotational (RW) +--------------- +This file is used to stat if the device is of rotational type or +non-rotational type. + rq_affinity (RW) ---------------- If this option is '1', the block layer will migrate request completions to the diff --git a/trunk/Documentation/devicetree/bindings/mmc/fsl-imx-esdhc.txt b/trunk/Documentation/devicetree/bindings/mmc/fsl-imx-esdhc.txt index 70cd49b1caa8..1dd622546d06 100644 --- a/trunk/Documentation/devicetree/bindings/mmc/fsl-imx-esdhc.txt +++ b/trunk/Documentation/devicetree/bindings/mmc/fsl-imx-esdhc.txt @@ -10,8 +10,8 @@ Required properties: - compatible : Should be "fsl,-esdhc" Optional properties: -- fsl,cd-internal : Indicate to use controller internal card detection -- fsl,wp-internal : Indicate to use controller internal write protection +- fsl,cd-controller : Indicate to use controller internal card detection +- fsl,wp-controller : Indicate to use controller internal write protection Examples: @@ -19,8 +19,8 @@ esdhc@70004000 { compatible = "fsl,imx51-esdhc"; reg = <0x70004000 0x4000>; interrupts = <1>; - fsl,cd-internal; - fsl,wp-internal; + fsl,cd-controller; + fsl,wp-controller; }; esdhc@70008000 { diff --git a/trunk/Documentation/devicetree/bindings/regulator/regulator.txt b/trunk/Documentation/devicetree/bindings/regulator/regulator.txt index 66ece3f87bbc..ecfc6ccd67ef 100644 --- a/trunk/Documentation/devicetree/bindings/regulator/regulator.txt +++ b/trunk/Documentation/devicetree/bindings/regulator/regulator.txt @@ -11,10 +11,13 @@ Optional properties: - regulator-boot-on: bootloader/firmware enabled regulator - -supply: phandle to the parent supply/regulator node - regulator-ramp-delay: ramp delay for regulator(in uV/uS) + +Deprecated properties: - regulator-compatible: If a regulator chip contains multiple regulators, and if the chip's binding contains a child node that describes each regulator, then this property indicates which regulator - this child node is intended to configure. + this child node is intended to configure. If this property is missing, + the node's name will be used instead. Example: diff --git a/trunk/Documentation/devicetree/bindings/regulator/tps65217.txt b/trunk/Documentation/devicetree/bindings/regulator/tps65217.txt index 0487e9675ba0..d316fb895daf 100644 --- a/trunk/Documentation/devicetree/bindings/regulator/tps65217.txt +++ b/trunk/Documentation/devicetree/bindings/regulator/tps65217.txt @@ -22,66 +22,49 @@ Example: compatible = "ti,tps65217"; regulators { - #address-cells = <1>; - #size-cells = <0>; - - dcdc1_reg: regulator@0 { - reg = <0>; - regulator-compatible = "dcdc1"; + dcdc1_reg: dcdc1 { regulator-min-microvolt = <900000>; regulator-max-microvolt = <1800000>; regulator-boot-on; regulator-always-on; }; - dcdc2_reg: regulator@1 { - reg = <1>; - regulator-compatible = "dcdc2"; + dcdc2_reg: dcdc2 { regulator-min-microvolt = <900000>; regulator-max-microvolt = <3300000>; regulator-boot-on; regulator-always-on; }; - dcdc3_reg: regulator@2 { - reg = <2>; - regulator-compatible = "dcdc3"; + dcdc3_reg: dcc3 { regulator-min-microvolt = <900000>; regulator-max-microvolt = <1500000>; regulator-boot-on; regulator-always-on; }; - ldo1_reg: regulator@3 { - reg = <3>; - regulator-compatible = "ldo1"; + ldo1_reg: ldo1 { regulator-min-microvolt = <1000000>; regulator-max-microvolt = <3300000>; regulator-boot-on; regulator-always-on; }; - ldo2_reg: regulator@4 { - reg = <4>; - regulator-compatible = "ldo2"; + ldo2_reg: ldo2 { regulator-min-microvolt = <900000>; regulator-max-microvolt = <3300000>; regulator-boot-on; regulator-always-on; }; - ldo3_reg: regulator@5 { - reg = <5>; - regulator-compatible = "ldo3"; + ldo3_reg: ldo3 { regulator-min-microvolt = <1800000>; regulator-max-microvolt = <3300000>; regulator-boot-on; regulator-always-on; }; - ldo4_reg: regulator@6 { - reg = <6>; - regulator-compatible = "ldo4"; + ldo4_reg: ldo4 { regulator-min-microvolt = <1800000>; regulator-max-microvolt = <3300000>; regulator-boot-on; diff --git a/trunk/Documentation/devicetree/bindings/regulator/tps6586x.txt b/trunk/Documentation/devicetree/bindings/regulator/tps6586x.txt index d156e1b5db12..07b9ef6e49d5 100644 --- a/trunk/Documentation/devicetree/bindings/regulator/tps6586x.txt +++ b/trunk/Documentation/devicetree/bindings/regulator/tps6586x.txt @@ -6,12 +6,16 @@ Required properties: - interrupts: the interrupt outputs of the controller - #gpio-cells: number of cells to describe a GPIO - gpio-controller: mark the device as a GPIO controller -- regulators: list of regulators provided by this controller, must have - property "regulator-compatible" to match their hardware counterparts: - sm[0-2], ldo[0-9] and ldo_rtc -- sm0-supply: The input supply for the SM0. -- sm1-supply: The input supply for the SM1. -- sm2-supply: The input supply for the SM2. +- regulators: A node that houses a sub-node for each regulator within the + device. Each sub-node is identified using the node's name (or the deprecated + regulator-compatible property if present), with valid values listed below. + The content of each sub-node is defined by the standard binding for + regulators; see regulator.txt. + sys, sm[0-2], ldo[0-9] and ldo_rtc +- sys-supply: The input supply for SYS. +- vin-sm0-supply: The input supply for the SM0. +- vin-sm1-supply: The input supply for the SM1. +- vin-sm2-supply: The input supply for the SM2. - vinldo01-supply: The input supply for the LDO1 and LDO2 - vinldo23-supply: The input supply for the LDO2 and LDO3 - vinldo4-supply: The input supply for the LDO4 @@ -20,6 +24,9 @@ Required properties: Each regulator is defined using the standard binding for regulators. +Note: LDO5 and LDO_RTC is supplied by SYS regulator internally and driver + take care of making proper parent child relationship. + Example: pmu: tps6586x@34 { @@ -30,9 +37,10 @@ Example: #gpio-cells = <2>; gpio-controller; - sm0-supply = <&some_reg>; - sm1-supply = <&some_reg>; - sm2-supply = <&some_reg>; + sys-supply = <&some_reg>; + vin-sm0-supply = <&some_reg>; + vin-sm1-supply = <&some_reg>; + vin-sm2-supply = <&some_reg>; vinldo01-supply = <...>; vinldo23-supply = <...>; vinldo4-supply = <...>; @@ -40,103 +48,80 @@ Example: vinldo9-supply = <...>; regulators { - #address-cells = <1>; - #size-cells = <0>; + sys_reg: sys { + regulator-name = "vdd_sys"; + regulator-boot-on; + regulator-always-on; + }; - sm0_reg: regulator@0 { - reg = <0>; - regulator-compatible = "sm0"; + sm0_reg: sm0 { regulator-min-microvolt = < 725000>; regulator-max-microvolt = <1500000>; regulator-boot-on; regulator-always-on; }; - sm1_reg: regulator@1 { - reg = <1>; - regulator-compatible = "sm1"; + sm1_reg: sm1 { regulator-min-microvolt = < 725000>; regulator-max-microvolt = <1500000>; regulator-boot-on; regulator-always-on; }; - sm2_reg: regulator@2 { - reg = <2>; - regulator-compatible = "sm2"; + sm2_reg: sm2 { regulator-min-microvolt = <3000000>; regulator-max-microvolt = <4550000>; regulator-boot-on; regulator-always-on; }; - ldo0_reg: regulator@3 { - reg = <3>; - regulator-compatible = "ldo0"; + ldo0_reg: ldo0 { regulator-name = "PCIE CLK"; regulator-min-microvolt = <3300000>; regulator-max-microvolt = <3300000>; }; - ldo1_reg: regulator@4 { - reg = <4>; - regulator-compatible = "ldo1"; + ldo1_reg: ldo1 { regulator-min-microvolt = < 725000>; regulator-max-microvolt = <1500000>; }; - ldo2_reg: regulator@5 { - reg = <5>; - regulator-compatible = "ldo2"; + ldo2_reg: ldo2 { regulator-min-microvolt = < 725000>; regulator-max-microvolt = <1500000>; }; - ldo3_reg: regulator@6 { - reg = <6>; - regulator-compatible = "ldo3"; + ldo3_reg: ldo3 { regulator-min-microvolt = <1250000>; regulator-max-microvolt = <3300000>; }; - ldo4_reg: regulator@7 { - reg = <7>; - regulator-compatible = "ldo4"; + ldo4_reg: ldo4 { regulator-min-microvolt = <1700000>; regulator-max-microvolt = <2475000>; }; - ldo5_reg: regulator@8 { - reg = <8>; - regulator-compatible = "ldo5"; + ldo5_reg: ldo5 { regulator-min-microvolt = <1250000>; regulator-max-microvolt = <3300000>; }; - ldo6_reg: regulator@9 { - reg = <9>; - regulator-compatible = "ldo6"; + ldo6_reg: ldo6 { regulator-min-microvolt = <1250000>; regulator-max-microvolt = <3300000>; }; - ldo7_reg: regulator@10 { - reg = <10>; - regulator-compatible = "ldo7"; + ldo7_reg: ldo7 { regulator-min-microvolt = <1250000>; regulator-max-microvolt = <3300000>; }; - ldo8_reg: regulator@11 { - reg = <11>; - regulator-compatible = "ldo8"; + ldo8_reg: ldo8 { regulator-min-microvolt = <1250000>; regulator-max-microvolt = <3300000>; }; - ldo9_reg: regulator@12 { - reg = <12>; - regulator-compatible = "ldo9"; + ldo9_reg: ldo9 { regulator-min-microvolt = <1250000>; regulator-max-microvolt = <3300000>; }; diff --git a/trunk/Documentation/dontdiff b/trunk/Documentation/dontdiff index 39462cf35cd4..74c25c8d8884 100644 --- a/trunk/Documentation/dontdiff +++ b/trunk/Documentation/dontdiff @@ -162,7 +162,6 @@ mach-types.h machtypes.h map map_hugetlb -maui_boot.h media mconf miboot* diff --git a/trunk/Documentation/feature-removal-schedule.txt b/trunk/Documentation/feature-removal-schedule.txt index afaff312bf41..f4d8c7105fcd 100644 --- a/trunk/Documentation/feature-removal-schedule.txt +++ b/trunk/Documentation/feature-removal-schedule.txt @@ -579,7 +579,7 @@ Why: KVM tracepoints provide mostly equivalent information in a much more ---------------------------- What: at91-mci driver ("CONFIG_MMC_AT91") -When: 3.7 +When: 3.8 Why: There are two mci drivers: at91-mci and atmel-mci. The PDC support was added to atmel-mci as a first step to support more chips. Then at91-mci was kept only for old IP versions (on at91rm9200 and diff --git a/trunk/Documentation/filesystems/Locking b/trunk/Documentation/filesystems/Locking index 0f103e39b4f6..e540a24e5d06 100644 --- a/trunk/Documentation/filesystems/Locking +++ b/trunk/Documentation/filesystems/Locking @@ -114,7 +114,6 @@ prototypes: int (*drop_inode) (struct inode *); void (*evict_inode) (struct inode *); void (*put_super) (struct super_block *); - void (*write_super) (struct super_block *); int (*sync_fs)(struct super_block *sb, int wait); int (*freeze_fs) (struct super_block *); int (*unfreeze_fs) (struct super_block *); @@ -136,7 +135,6 @@ write_inode: drop_inode: !!!inode->i_lock!!! evict_inode: put_super: write -write_super: read sync_fs: read freeze_fs: write unfreeze_fs: write diff --git a/trunk/Documentation/filesystems/porting b/trunk/Documentation/filesystems/porting index 2bef2b3843d1..0742feebc6e2 100644 --- a/trunk/Documentation/filesystems/porting +++ b/trunk/Documentation/filesystems/porting @@ -94,9 +94,8 @@ protected. --- [mandatory] -BKL is also moved from around sb operations. ->write_super() Is now called -without BKL held. BKL should have been shifted into individual fs sb_op -functions. If you don't need it, remove it. +BKL is also moved from around sb operations. BKL should have been shifted into +individual fs sb_op functions. If you don't need it, remove it. --- [informational] diff --git a/trunk/Documentation/filesystems/vfat.txt b/trunk/Documentation/filesystems/vfat.txt index ead764b2728f..de1e6c4dccff 100644 --- a/trunk/Documentation/filesystems/vfat.txt +++ b/trunk/Documentation/filesystems/vfat.txt @@ -137,6 +137,17 @@ errors=panic|continue|remount-ro without doing anything or remount the partition in read-only mode (default behavior). +discard -- If set, issues discard/TRIM commands to the block + device when blocks are freed. This is useful for SSD devices + and sparse/thinly-provisoned LUNs. + +nfs -- This option maintains an index (cache) of directory + inodes by i_logstart which is used by the nfs-related code to + improve look-ups. + + Enable this only if you want to export the FAT filesystem + over NFS + : 0,1,yes,no,true,false TODO diff --git a/trunk/Documentation/filesystems/vfs.txt b/trunk/Documentation/filesystems/vfs.txt index 065aa2dc0835..2ee133e030c3 100644 --- a/trunk/Documentation/filesystems/vfs.txt +++ b/trunk/Documentation/filesystems/vfs.txt @@ -216,7 +216,6 @@ struct super_operations { void (*drop_inode) (struct inode *); void (*delete_inode) (struct inode *); void (*put_super) (struct super_block *); - void (*write_super) (struct super_block *); int (*sync_fs)(struct super_block *sb, int wait); int (*freeze_fs) (struct super_block *); int (*unfreeze_fs) (struct super_block *); @@ -273,9 +272,6 @@ or bottom half). put_super: called when the VFS wishes to free the superblock (i.e. unmount). This is called with the superblock lock held - write_super: called when the VFS superblock needs to be written to - disc. This method is optional - sync_fs: called when VFS is writing out all dirty data associated with a superblock. The second parameter indicates whether the method should wait until the write out has been completed. Optional. diff --git a/trunk/Documentation/i2c/busses/i2c-i801 b/trunk/Documentation/i2c/busses/i2c-i801 index 615142da4ef6..157416e78cc4 100644 --- a/trunk/Documentation/i2c/busses/i2c-i801 +++ b/trunk/Documentation/i2c/busses/i2c-i801 @@ -21,6 +21,7 @@ Supported adapters: * Intel DH89xxCC (PCH) * Intel Panther Point (PCH) * Intel Lynx Point (PCH) + * Intel Lynx Point-LP (PCH) Datasheets: Publicly available at the Intel website On Intel Patsburg and later chipsets, both the normal host SMBus controller diff --git a/trunk/Documentation/ia64/aliasing-test.c b/trunk/Documentation/ia64/aliasing-test.c index 5caa2af33207..62a190d45f38 100644 --- a/trunk/Documentation/ia64/aliasing-test.c +++ b/trunk/Documentation/ia64/aliasing-test.c @@ -132,6 +132,7 @@ static int read_rom(char *path) rc = write(fd, "1", 2); if (rc <= 0) { + close(fd); perror("write"); return -1; } diff --git a/trunk/Documentation/kernel-parameters.txt b/trunk/Documentation/kernel-parameters.txt index ad7e2e5088c1..df551dfa8e52 100644 --- a/trunk/Documentation/kernel-parameters.txt +++ b/trunk/Documentation/kernel-parameters.txt @@ -1833,6 +1833,12 @@ bytes respectively. Such letter suffixes can also be entirely omitted. and restore using xsave. The kernel will fallback to enabling legacy floating-point and sse state. + eagerfpu= [X86] + on enable eager fpu restore + off disable eager fpu restore + auto selects the default scheme, which automatically + enables eagerfpu restore for xsaveopt. + nohlt [BUGS=ARM,SH] Tells the kernel that the sleep(SH) or wfi(ARM) instruction doesn't work correctly and not to use it. This is also useful when using JTAG debugger. @@ -2385,6 +2391,17 @@ bytes respectively. Such letter suffixes can also be entirely omitted. rcutree.rcu_cpu_stall_timeout= [KNL,BOOT] Set timeout for RCU CPU stall warning messages. + rcutree.jiffies_till_first_fqs= [KNL,BOOT] + Set delay from grace-period initialization to + first attempt to force quiescent states. + Units are jiffies, minimum value is zero, + and maximum value is HZ. + + rcutree.jiffies_till_next_fqs= [KNL,BOOT] + Set delay between subsequent attempts to force + quiescent states. Units are jiffies, minimum + value is one, and maximum value is HZ. + rcutorture.fqs_duration= [KNL,BOOT] Set duration of force_quiescent_state bursts. @@ -2638,9 +2655,6 @@ bytes respectively. Such letter suffixes can also be entirely omitted. smart2= [HW] Format: [,[,...,]] - smp-alt-once [X86-32,SMP] On a hotplug CPU system, only - attempt to substitute SMP alternatives once at boot. - smsc-ircc2.nopnp [HW] Don't use PNP to discover SMC devices smsc-ircc2.ircc_cfg= [HW] Device configuration I/O port smsc-ircc2.ircc_sir= [HW] SIR base I/O port diff --git a/trunk/Documentation/laptops/laptop-mode.txt b/trunk/Documentation/laptops/laptop-mode.txt index 0bf25eebce94..4ebbfc3f1c6e 100644 --- a/trunk/Documentation/laptops/laptop-mode.txt +++ b/trunk/Documentation/laptops/laptop-mode.txt @@ -262,9 +262,9 @@ MINIMUM_BATTERY_MINUTES=10 # # Allowed dirty background ratio, in percent. Once DIRTY_RATIO has been -# exceeded, the kernel will wake pdflush which will then reduce the amount -# of dirty memory to dirty_background_ratio. Set this nice and low, so once -# some writeout has commenced, we do a lot of it. +# exceeded, the kernel will wake flusher threads which will then reduce the +# amount of dirty memory to dirty_background_ratio. Set this nice and low, +# so once some writeout has commenced, we do a lot of it. # #DIRTY_BACKGROUND_RATIO=5 @@ -384,9 +384,9 @@ CPU_MAXFREQ=${CPU_MAXFREQ:-'slowest'} # # Allowed dirty background ratio, in percent. Once DIRTY_RATIO has been -# exceeded, the kernel will wake pdflush which will then reduce the amount -# of dirty memory to dirty_background_ratio. Set this nice and low, so once -# some writeout has commenced, we do a lot of it. +# exceeded, the kernel will wake flusher threads which will then reduce the +# amount of dirty memory to dirty_background_ratio. Set this nice and low, +# so once some writeout has commenced, we do a lot of it. # DIRTY_BACKGROUND_RATIO=${DIRTY_BACKGROUND_RATIO:-'5'} diff --git a/trunk/Documentation/networking/netconsole.txt b/trunk/Documentation/networking/netconsole.txt index 8d022073e3ef..2e9e0ae2cd45 100644 --- a/trunk/Documentation/networking/netconsole.txt +++ b/trunk/Documentation/networking/netconsole.txt @@ -51,8 +51,23 @@ Built-in netconsole starts immediately after the TCP stack is initialized and attempts to bring up the supplied dev at the supplied address. -The remote host can run either 'netcat -u -l -p ', -'nc -l -u ' or syslogd. +The remote host has several options to receive the kernel messages, +for example: + +1) syslogd + +2) netcat + + On distributions using a BSD-based netcat version (e.g. Fedora, + openSUSE and Ubuntu) the listening port must be specified without + the -p switch: + + 'nc -u -l -p ' / 'nc -u -l ' or + 'netcat -u -l -p ' / 'netcat -u -l ' + +3) socat + + 'socat udp-recv: -' Dynamic reconfiguration: ======================== diff --git a/trunk/Documentation/pinctrl.txt b/trunk/Documentation/pinctrl.txt index e40f4b4e1977..1479aca23744 100644 --- a/trunk/Documentation/pinctrl.txt +++ b/trunk/Documentation/pinctrl.txt @@ -840,9 +840,9 @@ static unsigned long i2c_pin_configs[] = { static struct pinctrl_map __initdata mapping[] = { PIN_MAP_MUX_GROUP("foo-i2c.0", PINCTRL_STATE_DEFAULT, "pinctrl-foo", "i2c0", "i2c0"), - PIN_MAP_MUX_CONFIGS_GROUP("foo-i2c.0", PINCTRL_STATE_DEFAULT, "pinctrl-foo", "i2c0", i2c_grp_configs), - PIN_MAP_MUX_CONFIGS_PIN("foo-i2c.0", PINCTRL_STATE_DEFAULT, "pinctrl-foo", "i2c0scl", i2c_pin_configs), - PIN_MAP_MUX_CONFIGS_PIN("foo-i2c.0", PINCTRL_STATE_DEFAULT, "pinctrl-foo", "i2c0sda", i2c_pin_configs), + PIN_MAP_CONFIGS_GROUP("foo-i2c.0", PINCTRL_STATE_DEFAULT, "pinctrl-foo", "i2c0", i2c_grp_configs), + PIN_MAP_CONFIGS_PIN("foo-i2c.0", PINCTRL_STATE_DEFAULT, "pinctrl-foo", "i2c0scl", i2c_pin_configs), + PIN_MAP_CONFIGS_PIN("foo-i2c.0", PINCTRL_STATE_DEFAULT, "pinctrl-foo", "i2c0sda", i2c_pin_configs), }; Finally, some devices expect the mapping table to contain certain specific diff --git a/trunk/Documentation/power/swsusp.txt b/trunk/Documentation/power/swsusp.txt index 92341b84250d..0b4b63e7e9b6 100644 --- a/trunk/Documentation/power/swsusp.txt +++ b/trunk/Documentation/power/swsusp.txt @@ -53,7 +53,7 @@ before suspend (it is limited to 500 MB by default). Article about goals and implementation of Software Suspend for Linux ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -Author: G‚ábor Kuti +Author: Gábor Kuti Last revised: 2003-10-20 by Pavel Machek Idea and goals to achieve diff --git a/trunk/Documentation/scheduler/sched-arch.txt b/trunk/Documentation/scheduler/sched-arch.txt index 28aa1075e291..b1b8587b86f0 100644 --- a/trunk/Documentation/scheduler/sched-arch.txt +++ b/trunk/Documentation/scheduler/sched-arch.txt @@ -17,16 +17,6 @@ you must `#define __ARCH_WANT_UNLOCKED_CTXSW` in a header file Unlocked context switches introduce only a very minor performance penalty to the core scheduler implementation in the CONFIG_SMP case. -2. Interrupt status -By default, the switch_to arch function is called with interrupts -disabled. Interrupts may be enabled over the call if it is likely to -introduce a significant interrupt latency by adding the line -`#define __ARCH_WANT_INTERRUPTS_ON_CTXSW` in the same place as for -unlocked context switches. This define also implies -`__ARCH_WANT_UNLOCKED_CTXSW`. See arch/arm/include/asm/system.h for an -example. - - CPU idle ======== Your cpu_idle routines need to obey the following rules: diff --git a/trunk/Documentation/security/Yama.txt b/trunk/Documentation/security/Yama.txt index e369de2d48cd..dd908cf64ecf 100644 --- a/trunk/Documentation/security/Yama.txt +++ b/trunk/Documentation/security/Yama.txt @@ -46,14 +46,13 @@ restrictions, it can call prctl(PR_SET_PTRACER, PR_SET_PTRACER_ANY, ...) so that any otherwise allowed process (even those in external pid namespaces) may attach. -These restrictions do not change how ptrace via PTRACE_TRACEME operates. - -The sysctl settings are: +The sysctl settings (writable only with CAP_SYS_PTRACE) are: 0 - classic ptrace permissions: a process can PTRACE_ATTACH to any other process running under the same uid, as long as it is dumpable (i.e. did not transition uids, start privileged, or have called - prctl(PR_SET_DUMPABLE...) already). + prctl(PR_SET_DUMPABLE...) already). Similarly, PTRACE_TRACEME is + unchanged. 1 - restricted ptrace: a process must have a predefined relationship with the inferior it wants to call PTRACE_ATTACH on. By default, @@ -61,12 +60,13 @@ The sysctl settings are: classic criteria is also met. To change the relationship, an inferior can call prctl(PR_SET_PTRACER, debugger, ...) to declare an allowed debugger PID to call PTRACE_ATTACH on the inferior. + Using PTRACE_TRACEME is unchanged. 2 - admin-only attach: only processes with CAP_SYS_PTRACE may use ptrace - with PTRACE_ATTACH. + with PTRACE_ATTACH, or through children calling PTRACE_TRACEME. -3 - no attach: no processes may use ptrace with PTRACE_ATTACH. Once set, - this sysctl cannot be changed to a lower value. +3 - no attach: no processes may use ptrace with PTRACE_ATTACH nor via + PTRACE_TRACEME. Once set, this sysctl value cannot be changed. The original children-only logic was based on the restrictions in grsecurity. diff --git a/trunk/Documentation/sysctl/vm.txt b/trunk/Documentation/sysctl/vm.txt index dcc2a94ae34e..078701fdbd4d 100644 --- a/trunk/Documentation/sysctl/vm.txt +++ b/trunk/Documentation/sysctl/vm.txt @@ -76,8 +76,8 @@ huge pages although processes will also directly compact memory as required. dirty_background_bytes -Contains the amount of dirty memory at which the pdflush background writeback -daemon will start writeback. +Contains the amount of dirty memory at which the background kernel +flusher threads will start writeback. Note: dirty_background_bytes is the counterpart of dirty_background_ratio. Only one of them may be specified at a time. When one sysctl is written it is @@ -89,7 +89,7 @@ other appears as 0 when read. dirty_background_ratio Contains, as a percentage of total system memory, the number of pages at which -the pdflush background writeback daemon will start writing out dirty data. +the background kernel flusher threads will start writing out dirty data. ============================================================== @@ -112,9 +112,9 @@ retained. dirty_expire_centisecs This tunable is used to define when dirty data is old enough to be eligible -for writeout by the pdflush daemons. It is expressed in 100'ths of a second. -Data which has been dirty in-memory for longer than this interval will be -written out next time a pdflush daemon wakes up. +for writeout by the kernel flusher threads. It is expressed in 100'ths +of a second. Data which has been dirty in-memory for longer than this +interval will be written out next time a flusher thread wakes up. ============================================================== @@ -128,7 +128,7 @@ data. dirty_writeback_centisecs -The pdflush writeback daemons will periodically wake up and write `old' data +The kernel flusher threads will periodically wake up and write `old' data out to disk. This tunable expresses the interval between those wakeups, in 100'ths of a second. diff --git a/trunk/Documentation/trace/kprobetrace.txt b/trunk/Documentation/trace/kprobetrace.txt index d0d0bb9e3e25..d68ea5fc812b 100644 --- a/trunk/Documentation/trace/kprobetrace.txt +++ b/trunk/Documentation/trace/kprobetrace.txt @@ -12,7 +12,7 @@ kprobes can probe (this means, all functions body except for __kprobes functions). Unlike the Tracepoint based event, this can be added and removed dynamically, on the fly. -To enable this feature, build your kernel with CONFIG_KPROBE_TRACING=y. +To enable this feature, build your kernel with CONFIG_KPROBE_EVENT=y. Similar to the events tracer, this doesn't need to be activated via current_tracer. Instead of that, add probe points via diff --git a/trunk/Documentation/vfio.txt b/trunk/Documentation/vfio.txt index 0cb6685c8029..8eda3635a17d 100644 --- a/trunk/Documentation/vfio.txt +++ b/trunk/Documentation/vfio.txt @@ -133,7 +133,7 @@ character devices for this group: $ lspci -n -s 0000:06:0d.0 06:0d.0 0401: 1102:0002 (rev 08) # echo 0000:06:0d.0 > /sys/bus/pci/devices/0000:06:0d.0/driver/unbind -# echo 1102 0002 > /sys/bus/pci/drivers/vfio/new_id +# echo 1102 0002 > /sys/bus/pci/drivers/vfio-pci/new_id Now we need to look at what other devices are in the group to free it for use by VFIO: diff --git a/trunk/Documentation/vm/hugetlbpage.txt b/trunk/Documentation/vm/hugetlbpage.txt index f8551b3879f8..4ac359b7aa17 100644 --- a/trunk/Documentation/vm/hugetlbpage.txt +++ b/trunk/Documentation/vm/hugetlbpage.txt @@ -299,11 +299,17 @@ map_hugetlb.c. ******************************************************************* /* - * hugepage-shm: see Documentation/vm/hugepage-shm.c + * map_hugetlb: see tools/testing/selftests/vm/map_hugetlb.c */ ******************************************************************* /* - * hugepage-mmap: see Documentation/vm/hugepage-mmap.c + * hugepage-shm: see tools/testing/selftests/vm/hugepage-shm.c + */ + +******************************************************************* + +/* + * hugepage-mmap: see tools/testing/selftests/vm/hugepage-mmap.c */ diff --git a/trunk/Documentation/w1/slaves/w1_therm b/trunk/Documentation/w1/slaves/w1_therm index 0403aaaba878..874a8ca93feb 100644 --- a/trunk/Documentation/w1/slaves/w1_therm +++ b/trunk/Documentation/w1/slaves/w1_therm @@ -3,6 +3,7 @@ Kernel driver w1_therm Supported chips: * Maxim ds18*20 based temperature sensors. + * Maxim ds1825 based temperature sensors. Author: Evgeniy Polyakov @@ -15,6 +16,7 @@ supported family codes: W1_THERM_DS18S20 0x10 W1_THERM_DS1822 0x22 W1_THERM_DS18B20 0x28 +W1_THERM_DS1825 0x3B Support is provided through the sysfs w1_slave file. Each open and read sequence will initiate a temperature conversion then provide two diff --git a/trunk/Documentation/watchdog/src/watchdog-test.c b/trunk/Documentation/watchdog/src/watchdog-test.c index 73ff5cc93e05..3da822967ee0 100644 --- a/trunk/Documentation/watchdog/src/watchdog-test.c +++ b/trunk/Documentation/watchdog/src/watchdog-test.c @@ -31,7 +31,7 @@ static void keep_alive(void) * or "-e" to enable the card. */ -void term(int sig) +static void term(int sig) { close(fd); fprintf(stderr, "Stopping watchdog ticks...\n"); diff --git a/trunk/Documentation/x86/x86_64/boot-options.txt b/trunk/Documentation/x86/x86_64/boot-options.txt index c54b4f503e2a..de38429beb71 100644 --- a/trunk/Documentation/x86/x86_64/boot-options.txt +++ b/trunk/Documentation/x86/x86_64/boot-options.txt @@ -50,6 +50,13 @@ Machine check monarchtimeout: Sets the time in us to wait for other CPUs on machine checks. 0 to disable. + mce=bios_cmci_threshold + Don't overwrite the bios-set CMCI threshold. This boot option + prevents Linux from overwriting the CMCI threshold set by the + bios. Without this option, Linux always sets the CMCI + threshold to 1. Enabling this may make memory predictive failure + analysis less effective if the bios sets thresholds for memory + errors since we will not see details for all errors. nomce (for compatibility with i386): same as mce=off diff --git a/trunk/MAINTAINERS b/trunk/MAINTAINERS index 94b823f71e94..9362f54bccb8 100644 --- a/trunk/MAINTAINERS +++ b/trunk/MAINTAINERS @@ -827,24 +827,24 @@ F: arch/arm/mach-pxa/colibri-pxa270-income.c ARM/INTEL IOP32X ARM ARCHITECTURE M: Lennert Buytenhek -M: Dan Williams +M: Dan Williams L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) S: Maintained ARM/INTEL IOP33X ARM ARCHITECTURE -M: Dan Williams +M: Dan Williams L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) S: Maintained ARM/INTEL IOP13XX ARM ARCHITECTURE M: Lennert Buytenhek -M: Dan Williams +M: Dan Williams L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) S: Maintained ARM/INTEL IQ81342EX MACHINE SUPPORT M: Lennert Buytenhek -M: Dan Williams +M: Dan Williams L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) S: Maintained @@ -869,7 +869,7 @@ F: drivers/pcmcia/pxa2xx_stargate2.c ARM/INTEL XSC3 (MANZANO) ARM CORE M: Lennert Buytenhek -M: Dan Williams +M: Dan Williams L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) S: Maintained @@ -925,14 +925,14 @@ S: Maintained ARM/NOMADIK ARCHITECTURE M: Alessandro Rubini -M: Linus Walleij +M: Linus Walleij M: STEricsson L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) S: Maintained F: arch/arm/mach-nomadik/ F: arch/arm/plat-nomadik/ F: drivers/i2c/busses/i2c-nomadik.c -T: git git://git.kernel.org/pub/scm/linux/kernel/git/linusw/linux-stericsson.git +T: git git://git.kernel.org/pub/scm/linux/kernel/git/linusw/linux-nomadik.git ARM/OPENMOKO NEO FREERUNNER (GTA02) MACHINE SUPPORT M: Nelson Castillo @@ -1146,7 +1146,7 @@ F: drivers/usb/host/ehci-w90x900.c F: drivers/video/nuc900fb.c ARM/U300 MACHINE SUPPORT -M: Linus Walleij +M: Linus Walleij L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) S: Supported F: arch/arm/mach-u300/ @@ -1161,15 +1161,20 @@ T: git git://git.kernel.org/pub/scm/linux/kernel/git/linusw/linux-stericsson.git ARM/Ux500 ARM ARCHITECTURE M: Srinidhi Kasagar -M: Linus Walleij +M: Linus Walleij L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) S: Maintained F: arch/arm/mach-ux500/ +F: drivers/clocksource/clksrc-dbx500-prcmu.c F: drivers/dma/ste_dma40* +F: drivers/hwspinlock/u8500_hsem.c F: drivers/mfd/abx500* F: drivers/mfd/ab8500* -F: drivers/mfd/stmpe* +F: drivers/mfd/dbx500* +F: drivers/mfd/db8500* +F: drivers/pinctrl/pinctrl-nomadik* F: drivers/rtc/rtc-ab8500.c +F: drivers/rtc/rtc-pl031.c T: git git://git.kernel.org/pub/scm/linux/kernel/git/linusw/linux-stericsson.git ARM/VFP SUPPORT @@ -1227,9 +1232,9 @@ S: Maintained F: drivers/hwmon/asb100.c ASYNCHRONOUS TRANSFERS/TRANSFORMS (IOAT) API -M: Dan Williams +M: Dan Williams W: http://sourceforge.net/projects/xscaleiop -S: Supported +S: Maintained F: Documentation/crypto/async-tx-api.txt F: crypto/async_tx/ F: drivers/dma/ @@ -2212,7 +2217,7 @@ S: Maintained F: drivers/scsi/tmscsim.* DC395x SCSI driver -M: Oliver Neukum +M: Oliver Neukum M: Ali Akcaagac M: Jamie Lenehan W: http://twibble.org/dist/dc395x/ @@ -2359,7 +2364,7 @@ T: git git://git.linaro.org/people/sumitsemwal/linux-dma-buf.git DMA GENERIC OFFLOAD ENGINE SUBSYSTEM M: Vinod Koul -M: Dan Williams +M: Dan Williams S: Supported F: drivers/dma/ F: include/linux/dma* @@ -3094,7 +3099,7 @@ F: include/linux/gigaset_dev.h GPIO SUBSYSTEM M: Grant Likely -M: Linus Walleij +M: Linus Walleij S: Maintained T: git git://git.secretlab.ca/git/linux-2.6.git F: Documentation/gpio.txt @@ -3383,7 +3388,7 @@ M: "Wolfram Sang (embedded platforms)" L: linux-i2c@vger.kernel.org W: http://i2c.wiki.kernel.org/ T: quilt kernel.org/pub/linux/kernel/people/jdelvare/linux-2.6/jdelvare-i2c/ -T: git git://git.fluff.org/bjdooks/linux.git +T: git git://git.pengutronix.de/git/wsa/linux.git S: Maintained F: Documentation/i2c/ F: drivers/i2c/ @@ -3547,12 +3552,12 @@ K: \b(ABS|SYN)_MT_ INTEL C600 SERIES SAS CONTROLLER DRIVER M: Intel SCU Linux support -M: Dan Williams +M: Lukasz Dorau +M: Maciej Patelczyk M: Dave Jiang -M: Ed Nadolski L: linux-scsi@vger.kernel.org -T: git git://git.kernel.org/pub/scm/linux/kernel/git/djbw/isci.git -S: Maintained +T: git git://git.code.sf.net/p/intel-sas/isci +S: Supported F: drivers/scsi/isci/ F: firmware/isci/ @@ -3590,8 +3595,8 @@ F: arch/x86/kernel/microcode_core.c F: arch/x86/kernel/microcode_intel.c INTEL I/OAT DMA DRIVER -M: Dan Williams -S: Supported +M: Dan Williams +S: Maintained F: drivers/dma/ioat* INTEL IOMMU (VT-d) @@ -3603,8 +3608,8 @@ F: drivers/iommu/intel-iommu.c F: include/linux/intel-iommu.h INTEL IOP-ADMA DMA DRIVER -M: Dan Williams -S: Maintained +M: Dan Williams +S: Odd fixes F: drivers/dma/iop-adma.c INTEL IXP4XX QMGR, NPE, ETHERNET and HSS SUPPORT @@ -3662,11 +3667,12 @@ F: Documentation/networking/README.ipw2200 F: drivers/net/wireless/ipw2x00/ INTEL(R) TRUSTED EXECUTION TECHNOLOGY (TXT) -M: Joseph Cihula +M: Richard L Maliszewski +M: Gang Wei M: Shane Wang L: tboot-devel@lists.sourceforge.net W: http://tboot.sourceforge.net -T: Mercurial http://www.bughost.org/repos.hg/tboot.hg +T: hg http://tboot.hg.sourceforge.net:8000/hgroot/tboot/tboot S: Supported F: Documentation/intel_txt.txt F: include/linux/tboot.h @@ -4533,7 +4539,7 @@ S: Supported F: arch/microblaze/ MICROTEK X6 SCANNER -M: Oliver Neukum +M: Oliver Neukum S: Maintained F: drivers/usb/image/microtek.* @@ -5316,6 +5322,12 @@ L: linux-mtd@lists.infradead.org S: Maintained F: drivers/mtd/devices/phram.c +PICOLCD HID DRIVER +M: Bruno Prémont +L: linux-input@vger.kernel.org +S: Maintained +F: drivers/hid/hid-picolcd* + PICOXCELL SUPPORT M: Jamie Iles L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) @@ -5329,14 +5341,15 @@ PIN CONTROL SUBSYSTEM M: Linus Walleij S: Maintained F: drivers/pinctrl/ +F: include/linux/pinctrl/ PIN CONTROLLER - ST SPEAR -M: Viresh Kumar +M: Viresh Kumar L: spear-devel@list.st.com L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) W: http://www.st.com/spear S: Maintained -F: driver/pinctrl/spear/ +F: drivers/pinctrl/spear/ PKTCDVD DRIVER M: Peter Osterlund @@ -5538,6 +5551,8 @@ F: Documentation/devicetree/bindings/pwm/ F: include/linux/pwm.h F: include/linux/of_pwm.h F: drivers/pwm/ +F: drivers/video/backlight/pwm_bl.c +F: include/linux/pwm_backlight.h PXA2xx/PXA3xx SUPPORT M: Eric Miao @@ -7071,7 +7086,7 @@ F: include/linux/mtd/ubi.h F: include/mtd/ubi-user.h USB ACM DRIVER -M: Oliver Neukum +M: Oliver Neukum L: linux-usb@vger.kernel.org S: Maintained F: Documentation/usb/acm.txt @@ -7092,7 +7107,7 @@ S: Supported F: drivers/block/ub.c USB CDC ETHERNET DRIVER -M: Oliver Neukum +M: Oliver Neukum L: linux-usb@vger.kernel.org S: Maintained F: drivers/net/usb/cdc_*.c @@ -7165,7 +7180,7 @@ F: drivers/usb/host/isp116x* F: include/linux/usb/isp116x.h USB KAWASAKI LSI DRIVER -M: Oliver Neukum +M: Oliver Neukum L: linux-usb@vger.kernel.org S: Maintained F: drivers/usb/serial/kl5kusb105.* @@ -7283,6 +7298,12 @@ W: http://www.connecttech.com S: Supported F: drivers/usb/serial/whiteheat* +USB SMSC75XX ETHERNET DRIVER +M: Steve Glendinning +L: netdev@vger.kernel.org +S: Maintained +F: drivers/net/usb/smsc75xx.* + USB SMSC95XX ETHERNET DRIVER M: Steve Glendinning L: netdev@vger.kernel.org @@ -7665,23 +7686,28 @@ S: Supported F: Documentation/hwmon/wm83?? F: arch/arm/mach-s3c64xx/mach-crag6410* F: drivers/clk/clk-wm83*.c +F: drivers/extcon/extcon-arizona.c F: drivers/leds/leds-wm83*.c F: drivers/gpio/gpio-*wm*.c +F: drivers/gpio/gpio-arizona.c F: drivers/hwmon/wm83??-hwmon.c F: drivers/input/misc/wm831x-on.c F: drivers/input/touchscreen/wm831x-ts.c F: drivers/input/touchscreen/wm97*.c -F: drivers/mfd/wm8*.c +F: drivers/mfd/arizona* +F: drivers/mfd/wm*.c F: drivers/power/wm83*.c F: drivers/rtc/rtc-wm83*.c F: drivers/regulator/wm8*.c F: drivers/video/backlight/wm83*_bl.c F: drivers/watchdog/wm83*_wdt.c +F: include/linux/mfd/arizona/ F: include/linux/mfd/wm831x/ F: include/linux/mfd/wm8350/ F: include/linux/mfd/wm8400* F: include/linux/wm97xx.h F: include/sound/wm????.h +F: sound/soc/codecs/arizona.? F: sound/soc/codecs/wm* WORKQUEUE diff --git a/trunk/Makefile b/trunk/Makefile index ddf5be952e45..846dd7607854 100644 --- a/trunk/Makefile +++ b/trunk/Makefile @@ -1,8 +1,8 @@ VERSION = 3 PATCHLEVEL = 6 SUBLEVEL = 0 -EXTRAVERSION = -rc1 -NAME = Saber-toothed Squirrel +EXTRAVERSION = +NAME = Terrified Chipmunk # *DOCUMENTATION* # To see a list of typical targets execute "make help" @@ -609,7 +609,11 @@ KBUILD_CFLAGS += $(call cc-option, -femit-struct-debug-baseonly) endif ifdef CONFIG_FUNCTION_TRACER -KBUILD_CFLAGS += -pg +ifdef CONFIG_HAVE_FENTRY +CC_USING_FENTRY := $(call cc-option, -mfentry -DCC_USING_FENTRY) +endif +KBUILD_CFLAGS += -pg $(CC_USING_FENTRY) +KBUILD_AFLAGS += $(CC_USING_FENTRY) ifdef CONFIG_DYNAMIC_FTRACE ifdef CONFIG_HAVE_C_RECORDMCOUNT BUILD_C_RECORDMCOUNT := y diff --git a/trunk/arch/Kconfig b/trunk/arch/Kconfig index 72f2fa189cc5..a62965d057f6 100644 --- a/trunk/arch/Kconfig +++ b/trunk/arch/Kconfig @@ -222,6 +222,19 @@ config HAVE_PERF_EVENTS_NMI subsystem. Also has support for calculating CPU cycle events to determine how many clock cycles in a given period. +config HAVE_PERF_REGS + bool + help + Support selective register dumps for perf events. This includes + bit-mapping of each registers and a unique architecture id. + +config HAVE_PERF_USER_STACK_DUMP + bool + help + Support user stack dumps for perf event samples. This needs + access to the user stack pointer which is not unified across + architectures. + config HAVE_ARCH_JUMP_LABEL bool @@ -281,4 +294,23 @@ config SECCOMP_FILTER See Documentation/prctl/seccomp_filter.txt for details. +config HAVE_RCU_USER_QS + bool + help + Provide kernel entry/exit hooks necessary for userspace + RCU extended quiescent state. Syscalls need to be wrapped inside + rcu_user_exit()-rcu_user_enter() through the slow path using + TIF_NOHZ flag. Exceptions handlers must be wrapped as well. Irqs + are already protected inside rcu_irq_enter/rcu_irq_exit() but + preemption or signal handling on irq exit still need to be protected. + +config HAVE_VIRT_CPU_ACCOUNTING + bool + +config HAVE_IRQ_TIME_ACCOUNTING + bool + help + Archs need to ensure they use a high enough resolution clock to + support irq time accounting and then call enable_sched_clock_irqtime(). + source "kernel/gcov/Kconfig" diff --git a/trunk/arch/alpha/Kconfig b/trunk/arch/alpha/Kconfig index d5b9b5e645cc..9944dedee5b1 100644 --- a/trunk/arch/alpha/Kconfig +++ b/trunk/arch/alpha/Kconfig @@ -18,6 +18,8 @@ config ALPHA select ARCH_HAVE_NMI_SAFE_CMPXCHG select GENERIC_SMP_IDLE_THREAD select GENERIC_CMOS_UPDATE + select GENERIC_STRNCPY_FROM_USER + select GENERIC_STRNLEN_USER help The Alpha is a 64-bit general-purpose processor designed and marketed by the Digital Equipment Corporation of blessed memory, diff --git a/trunk/arch/alpha/include/asm/atomic.h b/trunk/arch/alpha/include/asm/atomic.h index 3bb7ffeae3bc..c2cbe4fc391c 100644 --- a/trunk/arch/alpha/include/asm/atomic.h +++ b/trunk/arch/alpha/include/asm/atomic.h @@ -14,8 +14,8 @@ */ -#define ATOMIC_INIT(i) ( (atomic_t) { (i) } ) -#define ATOMIC64_INIT(i) ( (atomic64_t) { (i) } ) +#define ATOMIC_INIT(i) { (i) } +#define ATOMIC64_INIT(i) { (i) } #define atomic_read(v) (*(volatile int *)&(v)->counter) #define atomic64_read(v) (*(volatile long *)&(v)->counter) diff --git a/trunk/arch/alpha/include/asm/fpu.h b/trunk/arch/alpha/include/asm/fpu.h index db00f7885faa..e477bcd5b94a 100644 --- a/trunk/arch/alpha/include/asm/fpu.h +++ b/trunk/arch/alpha/include/asm/fpu.h @@ -1,7 +1,9 @@ #ifndef __ASM_ALPHA_FPU_H #define __ASM_ALPHA_FPU_H +#ifdef __KERNEL__ #include +#endif /* * Alpha floating-point control register defines: diff --git a/trunk/arch/alpha/include/asm/ptrace.h b/trunk/arch/alpha/include/asm/ptrace.h index fd698a174f26..b87755a19554 100644 --- a/trunk/arch/alpha/include/asm/ptrace.h +++ b/trunk/arch/alpha/include/asm/ptrace.h @@ -76,7 +76,10 @@ struct switch_stack { #define task_pt_regs(task) \ ((struct pt_regs *) (task_stack_page(task) + 2*PAGE_SIZE) - 1) -#define force_successful_syscall_return() (task_pt_regs(current)->r0 = 0) +#define current_pt_regs() \ + ((struct pt_regs *) ((char *)current_thread_info() + 2*PAGE_SIZE) - 1) + +#define force_successful_syscall_return() (current_pt_regs()->r0 = 0) #endif diff --git a/trunk/arch/alpha/include/asm/socket.h b/trunk/arch/alpha/include/asm/socket.h index dcb221a4b5be..7d2f75be932e 100644 --- a/trunk/arch/alpha/include/asm/socket.h +++ b/trunk/arch/alpha/include/asm/socket.h @@ -76,9 +76,11 @@ /* Instruct lower device to use last 4-bytes of skb data as FCS */ #define SO_NOFCS 43 +#ifdef __KERNEL__ /* O_NONBLOCK clashes with the bits used for socket types. Therefore we * have to define SOCK_NONBLOCK to a different value here. */ #define SOCK_NONBLOCK 0x40000000 +#endif /* __KERNEL__ */ #endif /* _ASM_SOCKET_H */ diff --git a/trunk/arch/alpha/include/asm/uaccess.h b/trunk/arch/alpha/include/asm/uaccess.h index b49ec2f8d6e3..766fdfde2b7a 100644 --- a/trunk/arch/alpha/include/asm/uaccess.h +++ b/trunk/arch/alpha/include/asm/uaccess.h @@ -433,36 +433,12 @@ clear_user(void __user *to, long len) #undef __module_address #undef __module_call -/* Returns: -EFAULT if exception before terminator, N if the entire - buffer filled, else strlen. */ +#define user_addr_max() \ + (segment_eq(get_fs(), USER_DS) ? TASK_SIZE : ~0UL) -extern long __strncpy_from_user(char *__to, const char __user *__from, long __to_len); - -extern inline long -strncpy_from_user(char *to, const char __user *from, long n) -{ - long ret = -EFAULT; - if (__access_ok((unsigned long)from, 0, get_fs())) - ret = __strncpy_from_user(to, from, n); - return ret; -} - -/* Returns: 0 if bad, string length+1 (memory size) of string if ok */ -extern long __strlen_user(const char __user *); - -extern inline long strlen_user(const char __user *str) -{ - return access_ok(VERIFY_READ,str,0) ? __strlen_user(str) : 0; -} - -/* Returns: 0 if exception before NUL or reaching the supplied limit (N), - * a value greater than N if the limit would be exceeded, else strlen. */ -extern long __strnlen_user(const char __user *, long); - -extern inline long strnlen_user(const char __user *str, long n) -{ - return access_ok(VERIFY_READ,str,0) ? __strnlen_user(str, n) : 0; -} +extern long strncpy_from_user(char *dest, const char __user *src, long count); +extern __must_check long strlen_user(const char __user *str); +extern __must_check long strnlen_user(const char __user *str, long n); /* * About the exception table: diff --git a/trunk/arch/alpha/include/asm/unistd.h b/trunk/arch/alpha/include/asm/unistd.h index 633b23b0664a..a31a78eac9b9 100644 --- a/trunk/arch/alpha/include/asm/unistd.h +++ b/trunk/arch/alpha/include/asm/unistd.h @@ -465,10 +465,12 @@ #define __NR_setns 501 #define __NR_accept4 502 #define __NR_sendmmsg 503 +#define __NR_process_vm_readv 504 +#define __NR_process_vm_writev 505 #ifdef __KERNEL__ -#define NR_SYSCALLS 504 +#define NR_SYSCALLS 506 #define __ARCH_WANT_OLD_READDIR #define __ARCH_WANT_STAT64 diff --git a/trunk/arch/alpha/include/asm/word-at-a-time.h b/trunk/arch/alpha/include/asm/word-at-a-time.h new file mode 100644 index 000000000000..6b340d0f1521 --- /dev/null +++ b/trunk/arch/alpha/include/asm/word-at-a-time.h @@ -0,0 +1,55 @@ +#ifndef _ASM_WORD_AT_A_TIME_H +#define _ASM_WORD_AT_A_TIME_H + +#include + +/* + * word-at-a-time interface for Alpha. + */ + +/* + * We do not use the word_at_a_time struct on Alpha, but it needs to be + * implemented to humour the generic code. + */ +struct word_at_a_time { + const unsigned long unused; +}; + +#define WORD_AT_A_TIME_CONSTANTS { 0 } + +/* Return nonzero if val has a zero */ +static inline unsigned long has_zero(unsigned long val, unsigned long *bits, const struct word_at_a_time *c) +{ + unsigned long zero_locations = __kernel_cmpbge(0, val); + *bits = zero_locations; + return zero_locations; +} + +static inline unsigned long prep_zero_mask(unsigned long val, unsigned long bits, const struct word_at_a_time *c) +{ + return bits; +} + +#define create_zero_mask(bits) (bits) + +static inline unsigned long find_zero(unsigned long bits) +{ +#if defined(CONFIG_ALPHA_EV6) && defined(CONFIG_ALPHA_EV67) + /* Simple if have CIX instructions */ + return __kernel_cttz(bits); +#else + unsigned long t1, t2, t3; + /* Retain lowest set bit only */ + bits &= -bits; + /* Binary search for lowest set bit */ + t1 = bits & 0xf0; + t2 = bits & 0xcc; + t3 = bits & 0xaa; + if (t1) t1 = 4; + if (t2) t2 = 2; + if (t3) t3 = 1; + return t1 + t2 + t3; +#endif +} + +#endif /* _ASM_WORD_AT_A_TIME_H */ diff --git a/trunk/arch/alpha/kernel/alpha_ksyms.c b/trunk/arch/alpha/kernel/alpha_ksyms.c index d96e742d4dc2..15fa821d09cd 100644 --- a/trunk/arch/alpha/kernel/alpha_ksyms.c +++ b/trunk/arch/alpha/kernel/alpha_ksyms.c @@ -52,7 +52,6 @@ EXPORT_SYMBOL(alpha_write_fp_reg_s); /* entry.S */ EXPORT_SYMBOL(kernel_thread); -EXPORT_SYMBOL(kernel_execve); /* Networking helper routines. */ EXPORT_SYMBOL(csum_tcpudp_magic); @@ -74,8 +73,6 @@ EXPORT_SYMBOL(alpha_fp_emul); */ EXPORT_SYMBOL(__copy_user); EXPORT_SYMBOL(__do_clear_user); -EXPORT_SYMBOL(__strncpy_from_user); -EXPORT_SYMBOL(__strnlen_user); /* * SMP-specific symbols. diff --git a/trunk/arch/alpha/kernel/entry.S b/trunk/arch/alpha/kernel/entry.S index 6d159cee5f2f..ec0da0567ab5 100644 --- a/trunk/arch/alpha/kernel/entry.S +++ b/trunk/arch/alpha/kernel/entry.S @@ -663,58 +663,6 @@ kernel_thread: br ret_to_kernel .end kernel_thread -/* - * kernel_execve(path, argv, envp) - */ - .align 4 - .globl kernel_execve - .ent kernel_execve -kernel_execve: - /* We can be called from a module. */ - ldgp $gp, 0($27) - lda $sp, -(32+SIZEOF_PT_REGS+8)($sp) - .frame $sp, 32+SIZEOF_PT_REGS+8, $26, 0 - stq $26, 0($sp) - stq $16, 8($sp) - stq $17, 16($sp) - stq $18, 24($sp) - .prologue 1 - - lda $16, 32($sp) - lda $17, 0 - lda $18, SIZEOF_PT_REGS - bsr $26, memset !samegp - - /* Avoid the HAE being gratuitously wrong, which would cause us - to do the whole turn off interrupts thing and restore it. */ - ldq $2, alpha_mv+HAE_CACHE - stq $2, 152+32($sp) - - ldq $16, 8($sp) - ldq $17, 16($sp) - ldq $18, 24($sp) - lda $19, 32($sp) - bsr $26, do_execve !samegp - - ldq $26, 0($sp) - bne $0, 1f /* error! */ - - /* Move the temporary pt_regs struct from its current location - to the top of the kernel stack frame. See copy_thread for - details for a normal process. */ - lda $16, 0x4000 - SIZEOF_PT_REGS($8) - lda $17, 32($sp) - lda $18, SIZEOF_PT_REGS - bsr $26, memmove !samegp - - /* Take that over as our new stack frame and visit userland! */ - lda $sp, 0x4000 - SIZEOF_PT_REGS($8) - br $31, ret_from_sys_call - -1: lda $sp, 32+SIZEOF_PT_REGS+8($sp) - ret -.end kernel_execve - /* * Special system calls. Most of these are special in that they either @@ -796,115 +744,6 @@ sys_rt_sigreturn: br ret_from_sys_call .end sys_rt_sigreturn - .align 4 - .globl sys_sethae - .ent sys_sethae -sys_sethae: - .prologue 0 - stq $16, 152($sp) - ret -.end sys_sethae - - .align 4 - .globl osf_getpriority - .ent osf_getpriority -osf_getpriority: - lda $sp, -16($sp) - stq $26, 0($sp) - .prologue 0 - - jsr $26, sys_getpriority - - ldq $26, 0($sp) - blt $0, 1f - - /* Return value is the unbiased priority, i.e. 20 - prio. - This does result in negative return values, so signal - no error by writing into the R0 slot. */ - lda $1, 20 - stq $31, 16($sp) - subl $1, $0, $0 - unop - -1: lda $sp, 16($sp) - ret -.end osf_getpriority - - .align 4 - .globl sys_getxuid - .ent sys_getxuid -sys_getxuid: - .prologue 0 - ldq $2, TI_TASK($8) - ldq $3, TASK_CRED($2) - ldl $0, CRED_UID($3) - ldl $1, CRED_EUID($3) - stq $1, 80($sp) - ret -.end sys_getxuid - - .align 4 - .globl sys_getxgid - .ent sys_getxgid -sys_getxgid: - .prologue 0 - ldq $2, TI_TASK($8) - ldq $3, TASK_CRED($2) - ldl $0, CRED_GID($3) - ldl $1, CRED_EGID($3) - stq $1, 80($sp) - ret -.end sys_getxgid - - .align 4 - .globl sys_getxpid - .ent sys_getxpid -sys_getxpid: - .prologue 0 - ldq $2, TI_TASK($8) - - /* See linux/kernel/timer.c sys_getppid for discussion - about this loop. */ - ldq $3, TASK_GROUP_LEADER($2) - ldq $4, TASK_REAL_PARENT($3) - ldl $0, TASK_TGID($2) -1: ldl $1, TASK_TGID($4) -#ifdef CONFIG_SMP - mov $4, $5 - mb - ldq $3, TASK_GROUP_LEADER($2) - ldq $4, TASK_REAL_PARENT($3) - cmpeq $4, $5, $5 - beq $5, 1b -#endif - stq $1, 80($sp) - ret -.end sys_getxpid - - .align 4 - .globl sys_alpha_pipe - .ent sys_alpha_pipe -sys_alpha_pipe: - lda $sp, -16($sp) - stq $26, 0($sp) - .prologue 0 - - mov $31, $17 - lda $16, 8($sp) - jsr $26, do_pipe_flags - - ldq $26, 0($sp) - bne $0, 1f - - /* The return values are in $0 and $20. */ - ldl $1, 12($sp) - ldl $0, 8($sp) - - stq $1, 80+16($sp) -1: lda $sp, 16($sp) - ret -.end sys_alpha_pipe - .align 4 .globl sys_execve .ent sys_execve diff --git a/trunk/arch/alpha/kernel/osf_sys.c b/trunk/arch/alpha/kernel/osf_sys.c index 98a103621af6..bc1acdda7a5e 100644 --- a/trunk/arch/alpha/kernel/osf_sys.c +++ b/trunk/arch/alpha/kernel/osf_sys.c @@ -1404,3 +1404,52 @@ SYSCALL_DEFINE3(osf_writev, unsigned long, fd, } #endif + +SYSCALL_DEFINE2(osf_getpriority, int, which, int, who) +{ + int prio = sys_getpriority(which, who); + if (prio >= 0) { + /* Return value is the unbiased priority, i.e. 20 - prio. + This does result in negative return values, so signal + no error */ + force_successful_syscall_return(); + prio = 20 - prio; + } + return prio; +} + +SYSCALL_DEFINE0(getxuid) +{ + current_pt_regs()->r20 = sys_geteuid(); + return sys_getuid(); +} + +SYSCALL_DEFINE0(getxgid) +{ + current_pt_regs()->r20 = sys_getegid(); + return sys_getgid(); +} + +SYSCALL_DEFINE0(getxpid) +{ + current_pt_regs()->r20 = sys_getppid(); + return sys_getpid(); +} + +SYSCALL_DEFINE0(alpha_pipe) +{ + int fd[2]; + int res = do_pipe_flags(fd, 0); + if (!res) { + /* The return values are in $0 and $20. */ + current_pt_regs()->r20 = fd[1]; + res = fd[0]; + } + return res; +} + +SYSCALL_DEFINE1(sethae, unsigned long, val) +{ + current_pt_regs()->hae = val; + return 0; +} diff --git a/trunk/arch/alpha/kernel/process.c b/trunk/arch/alpha/kernel/process.c index 153d3fce3e8e..83638aa096d5 100644 --- a/trunk/arch/alpha/kernel/process.c +++ b/trunk/arch/alpha/kernel/process.c @@ -28,6 +28,7 @@ #include #include #include +#include #include #include @@ -54,9 +55,12 @@ cpu_idle(void) /* FIXME -- EV6 and LCA45 know how to power down the CPU. */ + rcu_idle_enter(); while (!need_resched()) cpu_relax(); - schedule(); + + rcu_idle_exit(); + schedule_preempt_disabled(); } } @@ -455,3 +459,22 @@ get_wchan(struct task_struct *p) } return pc; } + +int kernel_execve(const char *path, const char *const argv[], const char *const envp[]) +{ + /* Avoid the HAE being gratuitously wrong, which would cause us + to do the whole turn off interrupts thing and restore it. */ + struct pt_regs regs = {.hae = alpha_mv.hae_cache}; + int err = do_execve(path, argv, envp, ®s); + if (!err) { + struct pt_regs *p = current_pt_regs(); + /* copy regs to normal position and off to userland we go... */ + *p = regs; + __asm__ __volatile__ ( + "mov %0, $sp;" + "br $31, ret_from_sys_call" + : : "r"(p)); + } + return err; +} +EXPORT_SYMBOL(kernel_execve); diff --git a/trunk/arch/alpha/kernel/smp.c b/trunk/arch/alpha/kernel/smp.c index 35ddc02bfa4a..a41ad90a97a6 100644 --- a/trunk/arch/alpha/kernel/smp.c +++ b/trunk/arch/alpha/kernel/smp.c @@ -166,6 +166,7 @@ smp_callin(void) DBGS(("smp_callin: commencing CPU %d current %p active_mm %p\n", cpuid, current, current->active_mm)); + preempt_disable(); /* Do nothing. */ cpu_idle(); } diff --git a/trunk/arch/alpha/kernel/systbls.S b/trunk/arch/alpha/kernel/systbls.S index 87835235f114..2ac6b45c3e00 100644 --- a/trunk/arch/alpha/kernel/systbls.S +++ b/trunk/arch/alpha/kernel/systbls.S @@ -111,7 +111,7 @@ sys_call_table: .quad sys_socket .quad sys_connect .quad sys_accept - .quad osf_getpriority /* 100 */ + .quad sys_osf_getpriority /* 100 */ .quad sys_send .quad sys_recv .quad sys_sigreturn @@ -522,6 +522,8 @@ sys_call_table: .quad sys_setns .quad sys_accept4 .quad sys_sendmmsg + .quad sys_process_vm_readv + .quad sys_process_vm_writev /* 505 */ .size sys_call_table, . - sys_call_table .type sys_call_table, @object diff --git a/trunk/arch/alpha/lib/Makefile b/trunk/arch/alpha/lib/Makefile index c0a83ab62b78..59660743237c 100644 --- a/trunk/arch/alpha/lib/Makefile +++ b/trunk/arch/alpha/lib/Makefile @@ -31,8 +31,6 @@ lib-y = __divqu.o __remqu.o __divlu.o __remlu.o \ $(ev6-y)memchr.o \ $(ev6-y)copy_user.o \ $(ev6-y)clear_user.o \ - $(ev6-y)strncpy_from_user.o \ - $(ev67-y)strlen_user.o \ $(ev6-y)csum_ipv6_magic.o \ $(ev6-y)clear_page.o \ $(ev6-y)copy_page.o \ diff --git a/trunk/arch/alpha/lib/ev6-strncpy_from_user.S b/trunk/arch/alpha/lib/ev6-strncpy_from_user.S deleted file mode 100644 index d2e28178cacc..000000000000 --- a/trunk/arch/alpha/lib/ev6-strncpy_from_user.S +++ /dev/null @@ -1,424 +0,0 @@ -/* - * arch/alpha/lib/ev6-strncpy_from_user.S - * 21264 version contributed by Rick Gorton - * - * Just like strncpy except in the return value: - * - * -EFAULT if an exception occurs before the terminator is copied. - * N if the buffer filled. - * - * Otherwise the length of the string is returned. - * - * Much of the information about 21264 scheduling/coding comes from: - * Compiler Writer's Guide for the Alpha 21264 - * abbreviated as 'CWG' in other comments here - * ftp.digital.com/pub/Digital/info/semiconductor/literature/dsc-library.html - * Scheduling notation: - * E - either cluster - * U - upper subcluster; U0 - subcluster U0; U1 - subcluster U1 - * L - lower subcluster; L0 - subcluster L0; L1 - subcluster L1 - * A bunch of instructions got moved and temp registers were changed - * to aid in scheduling. Control flow was also re-arranged to eliminate - * branches, and to provide longer code sequences to enable better scheduling. - * A total rewrite (using byte load/stores for start & tail sequences) - * is desirable, but very difficult to do without a from-scratch rewrite. - * Save that for the future. - */ - - -#include -#include - - -/* Allow an exception for an insn; exit if we get one. */ -#define EX(x,y...) \ - 99: x,##y; \ - .section __ex_table,"a"; \ - .long 99b - .; \ - lda $31, $exception-99b($0); \ - .previous - - - .set noat - .set noreorder - .text - - .globl __strncpy_from_user - .ent __strncpy_from_user - .frame $30, 0, $26 - .prologue 0 - - .align 4 -__strncpy_from_user: - and a0, 7, t3 # E : find dest misalignment - beq a2, $zerolength # U : - - /* Are source and destination co-aligned? */ - mov a0, v0 # E : save the string start - xor a0, a1, t4 # E : - EX( ldq_u t1, 0(a1) ) # L : Latency=3 load first quadword - ldq_u t0, 0(a0) # L : load first (partial) aligned dest quadword - - addq a2, t3, a2 # E : bias count by dest misalignment - subq a2, 1, a3 # E : - addq zero, 1, t10 # E : - and t4, 7, t4 # E : misalignment between the two - - and a3, 7, t6 # E : number of tail bytes - sll t10, t6, t10 # E : t10 = bitmask of last count byte - bne t4, $unaligned # U : - lda t2, -1 # E : build a mask against false zero - - /* - * We are co-aligned; take care of a partial first word. - * On entry to this basic block: - * t0 == the first destination word for masking back in - * t1 == the first source word. - */ - - srl a3, 3, a2 # E : a2 = loop counter = (count - 1)/8 - addq a1, 8, a1 # E : - mskqh t2, a1, t2 # U : detection in the src word - nop - - /* Create the 1st output word and detect 0's in the 1st input word. */ - mskqh t1, a1, t3 # U : - mskql t0, a1, t0 # U : assemble the first output word - ornot t1, t2, t2 # E : - nop - - cmpbge zero, t2, t8 # E : bits set iff null found - or t0, t3, t0 # E : - beq a2, $a_eoc # U : - bne t8, $a_eos # U : 2nd branch in a quad. Bad. - - /* On entry to this basic block: - * t0 == a source quad not containing a null. - * a0 - current aligned destination address - * a1 - current aligned source address - * a2 - count of quadwords to move. - * NOTE: Loop improvement - unrolling this is going to be - * a huge win, since we're going to stall otherwise. - * Fix this later. For _really_ large copies, look - * at using wh64 on a look-ahead basis. See the code - * in clear_user.S and copy_user.S. - * Presumably, since (a0) and (a1) do not overlap (by C definition) - * Lots of nops here: - * - Separate loads from stores - * - Keep it to 1 branch/quadpack so the branch predictor - * can train. - */ -$a_loop: - stq_u t0, 0(a0) # L : - addq a0, 8, a0 # E : - nop - subq a2, 1, a2 # E : - - EX( ldq_u t0, 0(a1) ) # L : - addq a1, 8, a1 # E : - cmpbge zero, t0, t8 # E : Stall 2 cycles on t0 - beq a2, $a_eoc # U : - - beq t8, $a_loop # U : - nop - nop - nop - - /* Take care of the final (partial) word store. At this point - * the end-of-count bit is set in t8 iff it applies. - * - * On entry to this basic block we have: - * t0 == the source word containing the null - * t8 == the cmpbge mask that found it. - */ -$a_eos: - negq t8, t12 # E : find low bit set - and t8, t12, t12 # E : - - /* We're doing a partial word store and so need to combine - our source and original destination words. */ - ldq_u t1, 0(a0) # L : - subq t12, 1, t6 # E : - - or t12, t6, t8 # E : - zapnot t0, t8, t0 # U : clear src bytes > null - zap t1, t8, t1 # U : clear dst bytes <= null - or t0, t1, t0 # E : - - stq_u t0, 0(a0) # L : - br $finish_up # L0 : - nop - nop - - /* Add the end-of-count bit to the eos detection bitmask. */ - .align 4 -$a_eoc: - or t10, t8, t8 - br $a_eos - nop - nop - - -/* The source and destination are not co-aligned. Align the destination - and cope. We have to be very careful about not reading too much and - causing a SEGV. */ - - .align 4 -$u_head: - /* We know just enough now to be able to assemble the first - full source word. We can still find a zero at the end of it - that prevents us from outputting the whole thing. - - On entry to this basic block: - t0 == the first dest word, unmasked - t1 == the shifted low bits of the first source word - t6 == bytemask that is -1 in dest word bytes */ - - EX( ldq_u t2, 8(a1) ) # L : load second src word - addq a1, 8, a1 # E : - mskql t0, a0, t0 # U : mask trailing garbage in dst - extqh t2, a1, t4 # U : - - or t1, t4, t1 # E : first aligned src word complete - mskqh t1, a0, t1 # U : mask leading garbage in src - or t0, t1, t0 # E : first output word complete - or t0, t6, t6 # E : mask original data for zero test - - cmpbge zero, t6, t8 # E : - beq a2, $u_eocfin # U : - bne t8, $u_final # U : bad news - 2nd branch in a quad - lda t6, -1 # E : mask out the bits we have - - mskql t6, a1, t6 # U : already seen - stq_u t0, 0(a0) # L : store first output word - or t6, t2, t2 # E : - cmpbge zero, t2, t8 # E : find nulls in second partial - - addq a0, 8, a0 # E : - subq a2, 1, a2 # E : - bne t8, $u_late_head_exit # U : - nop - - /* Finally, we've got all the stupid leading edge cases taken care - of and we can set up to enter the main loop. */ - - extql t2, a1, t1 # U : position hi-bits of lo word - EX( ldq_u t2, 8(a1) ) # L : read next high-order source word - addq a1, 8, a1 # E : - cmpbge zero, t2, t8 # E : - - beq a2, $u_eoc # U : - bne t8, $u_eos # U : - nop - nop - - /* Unaligned copy main loop. In order to avoid reading too much, - the loop is structured to detect zeros in aligned source words. - This has, unfortunately, effectively pulled half of a loop - iteration out into the head and half into the tail, but it does - prevent nastiness from accumulating in the very thing we want - to run as fast as possible. - - On entry to this basic block: - t1 == the shifted high-order bits from the previous source word - t2 == the unshifted current source word - - We further know that t2 does not contain a null terminator. */ - - /* - * Extra nops here: - * separate load quads from store quads - * only one branch/quad to permit predictor training - */ - - .align 4 -$u_loop: - extqh t2, a1, t0 # U : extract high bits for current word - addq a1, 8, a1 # E : - extql t2, a1, t3 # U : extract low bits for next time - addq a0, 8, a0 # E : - - or t0, t1, t0 # E : current dst word now complete - EX( ldq_u t2, 0(a1) ) # L : load high word for next time - subq a2, 1, a2 # E : - nop - - stq_u t0, -8(a0) # L : save the current word - mov t3, t1 # E : - cmpbge zero, t2, t8 # E : test new word for eos - beq a2, $u_eoc # U : - - beq t8, $u_loop # U : - nop - nop - nop - - /* We've found a zero somewhere in the source word we just read. - If it resides in the lower half, we have one (probably partial) - word to write out, and if it resides in the upper half, we - have one full and one partial word left to write out. - - On entry to this basic block: - t1 == the shifted high-order bits from the previous source word - t2 == the unshifted current source word. */ - .align 4 -$u_eos: - extqh t2, a1, t0 # U : - or t0, t1, t0 # E : first (partial) source word complete - cmpbge zero, t0, t8 # E : is the null in this first bit? - nop - - bne t8, $u_final # U : - stq_u t0, 0(a0) # L : the null was in the high-order bits - addq a0, 8, a0 # E : - subq a2, 1, a2 # E : - - .align 4 -$u_late_head_exit: - extql t2, a1, t0 # U : - cmpbge zero, t0, t8 # E : - or t8, t10, t6 # E : - cmoveq a2, t6, t8 # E : - - /* Take care of a final (probably partial) result word. - On entry to this basic block: - t0 == assembled source word - t8 == cmpbge mask that found the null. */ - .align 4 -$u_final: - negq t8, t6 # E : isolate low bit set - and t6, t8, t12 # E : - ldq_u t1, 0(a0) # L : - subq t12, 1, t6 # E : - - or t6, t12, t8 # E : - zapnot t0, t8, t0 # U : kill source bytes > null - zap t1, t8, t1 # U : kill dest bytes <= null - or t0, t1, t0 # E : - - stq_u t0, 0(a0) # E : - br $finish_up # U : - nop - nop - - .align 4 -$u_eoc: # end-of-count - extqh t2, a1, t0 # U : - or t0, t1, t0 # E : - cmpbge zero, t0, t8 # E : - nop - - .align 4 -$u_eocfin: # end-of-count, final word - or t10, t8, t8 # E : - br $u_final # U : - nop - nop - - /* Unaligned copy entry point. */ - .align 4 -$unaligned: - - srl a3, 3, a2 # U : a2 = loop counter = (count - 1)/8 - and a0, 7, t4 # E : find dest misalignment - and a1, 7, t5 # E : find src misalignment - mov zero, t0 # E : - - /* Conditionally load the first destination word and a bytemask - with 0xff indicating that the destination byte is sacrosanct. */ - - mov zero, t6 # E : - beq t4, 1f # U : - ldq_u t0, 0(a0) # L : - lda t6, -1 # E : - - mskql t6, a0, t6 # E : - nop - nop - nop - - .align 4 -1: - subq a1, t4, a1 # E : sub dest misalignment from src addr - /* If source misalignment is larger than dest misalignment, we need - extra startup checks to avoid SEGV. */ - cmplt t4, t5, t12 # E : - extql t1, a1, t1 # U : shift src into place - lda t2, -1 # E : for creating masks later - - beq t12, $u_head # U : - mskqh t2, t5, t2 # U : begin src byte validity mask - cmpbge zero, t1, t8 # E : is there a zero? - nop - - extql t2, a1, t2 # U : - or t8, t10, t5 # E : test for end-of-count too - cmpbge zero, t2, t3 # E : - cmoveq a2, t5, t8 # E : Latency=2, extra map slot - - nop # E : goes with cmov - andnot t8, t3, t8 # E : - beq t8, $u_head # U : - nop - - /* At this point we've found a zero in the first partial word of - the source. We need to isolate the valid source data and mask - it into the original destination data. (Incidentally, we know - that we'll need at least one byte of that original dest word.) */ - - ldq_u t0, 0(a0) # L : - negq t8, t6 # E : build bitmask of bytes <= zero - mskqh t1, t4, t1 # U : - and t6, t8, t12 # E : - - subq t12, 1, t6 # E : - or t6, t12, t8 # E : - zapnot t2, t8, t2 # U : prepare source word; mirror changes - zapnot t1, t8, t1 # U : to source validity mask - - andnot t0, t2, t0 # E : zero place for source to reside - or t0, t1, t0 # E : and put it there - stq_u t0, 0(a0) # L : - nop - - .align 4 -$finish_up: - zapnot t0, t12, t4 # U : was last byte written null? - and t12, 0xf0, t3 # E : binary search for the address of the - cmovne t4, 1, t4 # E : Latency=2, extra map slot - nop # E : with cmovne - - and t12, 0xcc, t2 # E : last byte written - and t12, 0xaa, t1 # E : - cmovne t3, 4, t3 # E : Latency=2, extra map slot - nop # E : with cmovne - - bic a0, 7, t0 - cmovne t2, 2, t2 # E : Latency=2, extra map slot - nop # E : with cmovne - nop - - cmovne t1, 1, t1 # E : Latency=2, extra map slot - nop # E : with cmovne - addq t0, t3, t0 # E : - addq t1, t2, t1 # E : - - addq t0, t1, t0 # E : - addq t0, t4, t0 # add one if we filled the buffer - subq t0, v0, v0 # find string length - ret # L0 : - - .align 4 -$zerolength: - nop - nop - nop - clr v0 - -$exception: - nop - nop - nop - ret - - .end __strncpy_from_user diff --git a/trunk/arch/alpha/lib/ev67-strlen_user.S b/trunk/arch/alpha/lib/ev67-strlen_user.S deleted file mode 100644 index 57e0d77b81a6..000000000000 --- a/trunk/arch/alpha/lib/ev67-strlen_user.S +++ /dev/null @@ -1,107 +0,0 @@ -/* - * arch/alpha/lib/ev67-strlen_user.S - * 21264 version contributed by Rick Gorton - * - * Return the length of the string including the NULL terminator - * (strlen+1) or zero if an error occurred. - * - * In places where it is critical to limit the processing time, - * and the data is not trusted, strnlen_user() should be used. - * It will return a value greater than its second argument if - * that limit would be exceeded. This implementation is allowed - * to access memory beyond the limit, but will not cross a page - * boundary when doing so. - * - * Much of the information about 21264 scheduling/coding comes from: - * Compiler Writer's Guide for the Alpha 21264 - * abbreviated as 'CWG' in other comments here - * ftp.digital.com/pub/Digital/info/semiconductor/literature/dsc-library.html - * Scheduling notation: - * E - either cluster - * U - upper subcluster; U0 - subcluster U0; U1 - subcluster U1 - * L - lower subcluster; L0 - subcluster L0; L1 - subcluster L1 - * Try not to change the actual algorithm if possible for consistency. - */ - -#include - - -/* Allow an exception for an insn; exit if we get one. */ -#define EX(x,y...) \ - 99: x,##y; \ - .section __ex_table,"a"; \ - .long 99b - .; \ - lda v0, $exception-99b(zero); \ - .previous - - - .set noreorder - .set noat - .text - - .globl __strlen_user - .ent __strlen_user - .frame sp, 0, ra - - .align 4 -__strlen_user: - ldah a1, 32767(zero) # do not use plain strlen_user() for strings - # that might be almost 2 GB long; you should - # be using strnlen_user() instead - nop - nop - nop - - .globl __strnlen_user - - .align 4 -__strnlen_user: - .prologue 0 - EX( ldq_u t0, 0(a0) ) # L : load first quadword (a0 may be misaligned) - lda t1, -1(zero) # E : - - insqh t1, a0, t1 # U : - andnot a0, 7, v0 # E : - or t1, t0, t0 # E : - subq a0, 1, a0 # E : get our +1 for the return - - cmpbge zero, t0, t1 # E : t1 <- bitmask: bit i == 1 <==> i-th byte == 0 - subq a1, 7, t2 # E : - subq a0, v0, t0 # E : - bne t1, $found # U : - - addq t2, t0, t2 # E : - addq a1, 1, a1 # E : - nop # E : - nop # E : - - .align 4 -$loop: ble t2, $limit # U : - EX( ldq t0, 8(v0) ) # L : - nop # E : - nop # E : - - cmpbge zero, t0, t1 # E : - subq t2, 8, t2 # E : - addq v0, 8, v0 # E : addr += 8 - beq t1, $loop # U : - -$found: cttz t1, t2 # U0 : - addq v0, t2, v0 # E : - subq v0, a0, v0 # E : - ret # L0 : - -$exception: - nop - nop - nop - ret - - .align 4 # currently redundant -$limit: - nop - nop - subq a1, t2, v0 - ret - - .end __strlen_user diff --git a/trunk/arch/alpha/lib/strlen_user.S b/trunk/arch/alpha/lib/strlen_user.S deleted file mode 100644 index 508a18e96479..000000000000 --- a/trunk/arch/alpha/lib/strlen_user.S +++ /dev/null @@ -1,91 +0,0 @@ -/* - * arch/alpha/lib/strlen_user.S - * - * Return the length of the string including the NUL terminator - * (strlen+1) or zero if an error occurred. - * - * In places where it is critical to limit the processing time, - * and the data is not trusted, strnlen_user() should be used. - * It will return a value greater than its second argument if - * that limit would be exceeded. This implementation is allowed - * to access memory beyond the limit, but will not cross a page - * boundary when doing so. - */ - -#include - - -/* Allow an exception for an insn; exit if we get one. */ -#define EX(x,y...) \ - 99: x,##y; \ - .section __ex_table,"a"; \ - .long 99b - .; \ - lda v0, $exception-99b(zero); \ - .previous - - - .set noreorder - .set noat - .text - - .globl __strlen_user - .ent __strlen_user - .frame sp, 0, ra - - .align 3 -__strlen_user: - ldah a1, 32767(zero) # do not use plain strlen_user() for strings - # that might be almost 2 GB long; you should - # be using strnlen_user() instead - - .globl __strnlen_user - - .align 3 -__strnlen_user: - .prologue 0 - - EX( ldq_u t0, 0(a0) ) # load first quadword (a0 may be misaligned) - lda t1, -1(zero) - insqh t1, a0, t1 - andnot a0, 7, v0 - or t1, t0, t0 - subq a0, 1, a0 # get our +1 for the return - cmpbge zero, t0, t1 # t1 <- bitmask: bit i == 1 <==> i-th byte == 0 - subq a1, 7, t2 - subq a0, v0, t0 - bne t1, $found - - addq t2, t0, t2 - addq a1, 1, a1 - - .align 3 -$loop: ble t2, $limit - EX( ldq t0, 8(v0) ) - subq t2, 8, t2 - addq v0, 8, v0 # addr += 8 - cmpbge zero, t0, t1 - beq t1, $loop - -$found: negq t1, t2 # clear all but least set bit - and t1, t2, t1 - - and t1, 0xf0, t2 # binary search for that set bit - and t1, 0xcc, t3 - and t1, 0xaa, t4 - cmovne t2, 4, t2 - cmovne t3, 2, t3 - cmovne t4, 1, t4 - addq t2, t3, t2 - addq v0, t4, v0 - addq v0, t2, v0 - nop # dual issue next two on ev4 and ev5 - subq v0, a0, v0 -$exception: - ret - - .align 3 # currently redundant -$limit: - subq a1, t2, v0 - ret - - .end __strlen_user diff --git a/trunk/arch/alpha/lib/strncpy_from_user.S b/trunk/arch/alpha/lib/strncpy_from_user.S deleted file mode 100644 index 73ee21160ff7..000000000000 --- a/trunk/arch/alpha/lib/strncpy_from_user.S +++ /dev/null @@ -1,339 +0,0 @@ -/* - * arch/alpha/lib/strncpy_from_user.S - * Contributed by Richard Henderson (rth@tamu.edu) - * - * Just like strncpy except in the return value: - * - * -EFAULT if an exception occurs before the terminator is copied. - * N if the buffer filled. - * - * Otherwise the length of the string is returned. - */ - - -#include -#include - - -/* Allow an exception for an insn; exit if we get one. */ -#define EX(x,y...) \ - 99: x,##y; \ - .section __ex_table,"a"; \ - .long 99b - .; \ - lda $31, $exception-99b($0); \ - .previous - - - .set noat - .set noreorder - .text - - .globl __strncpy_from_user - .ent __strncpy_from_user - .frame $30, 0, $26 - .prologue 0 - - .align 3 -$aligned: - /* On entry to this basic block: - t0 == the first destination word for masking back in - t1 == the first source word. */ - - /* Create the 1st output word and detect 0's in the 1st input word. */ - lda t2, -1 # e1 : build a mask against false zero - mskqh t2, a1, t2 # e0 : detection in the src word - mskqh t1, a1, t3 # e0 : - ornot t1, t2, t2 # .. e1 : - mskql t0, a1, t0 # e0 : assemble the first output word - cmpbge zero, t2, t8 # .. e1 : bits set iff null found - or t0, t3, t0 # e0 : - beq a2, $a_eoc # .. e1 : - bne t8, $a_eos # .. e1 : - - /* On entry to this basic block: - t0 == a source word not containing a null. */ - -$a_loop: - stq_u t0, 0(a0) # e0 : - addq a0, 8, a0 # .. e1 : - EX( ldq_u t0, 0(a1) ) # e0 : - addq a1, 8, a1 # .. e1 : - subq a2, 1, a2 # e0 : - cmpbge zero, t0, t8 # .. e1 (stall) - beq a2, $a_eoc # e1 : - beq t8, $a_loop # e1 : - - /* Take care of the final (partial) word store. At this point - the end-of-count bit is set in t8 iff it applies. - - On entry to this basic block we have: - t0 == the source word containing the null - t8 == the cmpbge mask that found it. */ - -$a_eos: - negq t8, t12 # e0 : find low bit set - and t8, t12, t12 # e1 (stall) - - /* For the sake of the cache, don't read a destination word - if we're not going to need it. */ - and t12, 0x80, t6 # e0 : - bne t6, 1f # .. e1 (zdb) - - /* We're doing a partial word store and so need to combine - our source and original destination words. */ - ldq_u t1, 0(a0) # e0 : - subq t12, 1, t6 # .. e1 : - or t12, t6, t8 # e0 : - unop # - zapnot t0, t8, t0 # e0 : clear src bytes > null - zap t1, t8, t1 # .. e1 : clear dst bytes <= null - or t0, t1, t0 # e1 : - -1: stq_u t0, 0(a0) - br $finish_up - - /* Add the end-of-count bit to the eos detection bitmask. */ -$a_eoc: - or t10, t8, t8 - br $a_eos - - /*** The Function Entry Point ***/ - .align 3 -__strncpy_from_user: - mov a0, v0 # save the string start - beq a2, $zerolength - - /* Are source and destination co-aligned? */ - xor a0, a1, t1 # e0 : - and a0, 7, t0 # .. e1 : find dest misalignment - and t1, 7, t1 # e0 : - addq a2, t0, a2 # .. e1 : bias count by dest misalignment - subq a2, 1, a2 # e0 : - and a2, 7, t2 # e1 : - srl a2, 3, a2 # e0 : a2 = loop counter = (count - 1)/8 - addq zero, 1, t10 # .. e1 : - sll t10, t2, t10 # e0 : t10 = bitmask of last count byte - bne t1, $unaligned # .. e1 : - - /* We are co-aligned; take care of a partial first word. */ - - EX( ldq_u t1, 0(a1) ) # e0 : load first src word - addq a1, 8, a1 # .. e1 : - - beq t0, $aligned # avoid loading dest word if not needed - ldq_u t0, 0(a0) # e0 : - br $aligned # .. e1 : - - -/* The source and destination are not co-aligned. Align the destination - and cope. We have to be very careful about not reading too much and - causing a SEGV. */ - - .align 3 -$u_head: - /* We know just enough now to be able to assemble the first - full source word. We can still find a zero at the end of it - that prevents us from outputting the whole thing. - - On entry to this basic block: - t0 == the first dest word, unmasked - t1 == the shifted low bits of the first source word - t6 == bytemask that is -1 in dest word bytes */ - - EX( ldq_u t2, 8(a1) ) # e0 : load second src word - addq a1, 8, a1 # .. e1 : - mskql t0, a0, t0 # e0 : mask trailing garbage in dst - extqh t2, a1, t4 # e0 : - or t1, t4, t1 # e1 : first aligned src word complete - mskqh t1, a0, t1 # e0 : mask leading garbage in src - or t0, t1, t0 # e0 : first output word complete - or t0, t6, t6 # e1 : mask original data for zero test - cmpbge zero, t6, t8 # e0 : - beq a2, $u_eocfin # .. e1 : - bne t8, $u_final # e1 : - - lda t6, -1 # e1 : mask out the bits we have - mskql t6, a1, t6 # e0 : already seen - stq_u t0, 0(a0) # e0 : store first output word - or t6, t2, t2 # .. e1 : - cmpbge zero, t2, t8 # e0 : find nulls in second partial - addq a0, 8, a0 # .. e1 : - subq a2, 1, a2 # e0 : - bne t8, $u_late_head_exit # .. e1 : - - /* Finally, we've got all the stupid leading edge cases taken care - of and we can set up to enter the main loop. */ - - extql t2, a1, t1 # e0 : position hi-bits of lo word - EX( ldq_u t2, 8(a1) ) # .. e1 : read next high-order source word - addq a1, 8, a1 # e0 : - cmpbge zero, t2, t8 # e1 (stall) - beq a2, $u_eoc # e1 : - bne t8, $u_eos # e1 : - - /* Unaligned copy main loop. In order to avoid reading too much, - the loop is structured to detect zeros in aligned source words. - This has, unfortunately, effectively pulled half of a loop - iteration out into the head and half into the tail, but it does - prevent nastiness from accumulating in the very thing we want - to run as fast as possible. - - On entry to this basic block: - t1 == the shifted high-order bits from the previous source word - t2 == the unshifted current source word - - We further know that t2 does not contain a null terminator. */ - - .align 3 -$u_loop: - extqh t2, a1, t0 # e0 : extract high bits for current word - addq a1, 8, a1 # .. e1 : - extql t2, a1, t3 # e0 : extract low bits for next time - addq a0, 8, a0 # .. e1 : - or t0, t1, t0 # e0 : current dst word now complete - EX( ldq_u t2, 0(a1) ) # .. e1 : load high word for next time - stq_u t0, -8(a0) # e0 : save the current word - mov t3, t1 # .. e1 : - subq a2, 1, a2 # e0 : - cmpbge zero, t2, t8 # .. e1 : test new word for eos - beq a2, $u_eoc # e1 : - beq t8, $u_loop # e1 : - - /* We've found a zero somewhere in the source word we just read. - If it resides in the lower half, we have one (probably partial) - word to write out, and if it resides in the upper half, we - have one full and one partial word left to write out. - - On entry to this basic block: - t1 == the shifted high-order bits from the previous source word - t2 == the unshifted current source word. */ -$u_eos: - extqh t2, a1, t0 # e0 : - or t0, t1, t0 # e1 : first (partial) source word complete - - cmpbge zero, t0, t8 # e0 : is the null in this first bit? - bne t8, $u_final # .. e1 (zdb) - - stq_u t0, 0(a0) # e0 : the null was in the high-order bits - addq a0, 8, a0 # .. e1 : - subq a2, 1, a2 # e1 : - -$u_late_head_exit: - extql t2, a1, t0 # .. e0 : - cmpbge zero, t0, t8 # e0 : - or t8, t10, t6 # e1 : - cmoveq a2, t6, t8 # e0 : - nop # .. e1 : - - /* Take care of a final (probably partial) result word. - On entry to this basic block: - t0 == assembled source word - t8 == cmpbge mask that found the null. */ -$u_final: - negq t8, t6 # e0 : isolate low bit set - and t6, t8, t12 # e1 : - - and t12, 0x80, t6 # e0 : avoid dest word load if we can - bne t6, 1f # .. e1 (zdb) - - ldq_u t1, 0(a0) # e0 : - subq t12, 1, t6 # .. e1 : - or t6, t12, t8 # e0 : - zapnot t0, t8, t0 # .. e1 : kill source bytes > null - zap t1, t8, t1 # e0 : kill dest bytes <= null - or t0, t1, t0 # e1 : - -1: stq_u t0, 0(a0) # e0 : - br $finish_up - -$u_eoc: # end-of-count - extqh t2, a1, t0 - or t0, t1, t0 - cmpbge zero, t0, t8 - -$u_eocfin: # end-of-count, final word - or t10, t8, t8 - br $u_final - - /* Unaligned copy entry point. */ - .align 3 -$unaligned: - - EX( ldq_u t1, 0(a1) ) # e0 : load first source word - - and a0, 7, t4 # .. e1 : find dest misalignment - and a1, 7, t5 # e0 : find src misalignment - - /* Conditionally load the first destination word and a bytemask - with 0xff indicating that the destination byte is sacrosanct. */ - - mov zero, t0 # .. e1 : - mov zero, t6 # e0 : - beq t4, 1f # .. e1 : - ldq_u t0, 0(a0) # e0 : - lda t6, -1 # .. e1 : - mskql t6, a0, t6 # e0 : -1: - subq a1, t4, a1 # .. e1 : sub dest misalignment from src addr - - /* If source misalignment is larger than dest misalignment, we need - extra startup checks to avoid SEGV. */ - - cmplt t4, t5, t12 # e1 : - extql t1, a1, t1 # .. e0 : shift src into place - lda t2, -1 # e0 : for creating masks later - beq t12, $u_head # e1 : - - mskqh t2, t5, t2 # e0 : begin src byte validity mask - cmpbge zero, t1, t8 # .. e1 : is there a zero? - extql t2, a1, t2 # e0 : - or t8, t10, t5 # .. e1 : test for end-of-count too - cmpbge zero, t2, t3 # e0 : - cmoveq a2, t5, t8 # .. e1 : - andnot t8, t3, t8 # e0 : - beq t8, $u_head # .. e1 (zdb) - - /* At this point we've found a zero in the first partial word of - the source. We need to isolate the valid source data and mask - it into the original destination data. (Incidentally, we know - that we'll need at least one byte of that original dest word.) */ - - ldq_u t0, 0(a0) # e0 : - negq t8, t6 # .. e1 : build bitmask of bytes <= zero - mskqh t1, t4, t1 # e0 : - and t6, t8, t12 # .. e1 : - subq t12, 1, t6 # e0 : - or t6, t12, t8 # e1 : - - zapnot t2, t8, t2 # e0 : prepare source word; mirror changes - zapnot t1, t8, t1 # .. e1 : to source validity mask - - andnot t0, t2, t0 # e0 : zero place for source to reside - or t0, t1, t0 # e1 : and put it there - stq_u t0, 0(a0) # e0 : - -$finish_up: - zapnot t0, t12, t4 # was last byte written null? - cmovne t4, 1, t4 - - and t12, 0xf0, t3 # binary search for the address of the - and t12, 0xcc, t2 # last byte written - and t12, 0xaa, t1 - bic a0, 7, t0 - cmovne t3, 4, t3 - cmovne t2, 2, t2 - cmovne t1, 1, t1 - addq t0, t3, t0 - addq t1, t2, t1 - addq t0, t1, t0 - addq t0, t4, t0 # add one if we filled the buffer - - subq t0, v0, v0 # find string length - ret - -$zerolength: - clr v0 -$exception: - ret - - .end __strncpy_from_user diff --git a/trunk/arch/alpha/mm/fault.c b/trunk/arch/alpha/mm/fault.c index 5eecab1a84ef..0c4132dd3507 100644 --- a/trunk/arch/alpha/mm/fault.c +++ b/trunk/arch/alpha/mm/fault.c @@ -89,6 +89,8 @@ do_page_fault(unsigned long address, unsigned long mmcsr, const struct exception_table_entry *fixup; int fault, si_code = SEGV_MAPERR; siginfo_t info; + unsigned int flags = (FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE | + (cause > 0 ? FAULT_FLAG_WRITE : 0)); /* As of EV6, a load into $31/$f31 is a prefetch, and never faults (or is suppressed by the PALcode). Support that for older CPUs @@ -114,6 +116,7 @@ do_page_fault(unsigned long address, unsigned long mmcsr, goto vmalloc_fault; #endif +retry: down_read(&mm->mmap_sem); vma = find_vma(mm, address); if (!vma) @@ -144,8 +147,11 @@ do_page_fault(unsigned long address, unsigned long mmcsr, /* If for any reason at all we couldn't handle the fault, make sure we exit gracefully rather than endlessly redo the fault. */ - fault = handle_mm_fault(mm, vma, address, cause > 0 ? FAULT_FLAG_WRITE : 0); - up_read(&mm->mmap_sem); + fault = handle_mm_fault(mm, vma, address, flags); + + if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current)) + return; + if (unlikely(fault & VM_FAULT_ERROR)) { if (fault & VM_FAULT_OOM) goto out_of_memory; @@ -153,10 +159,26 @@ do_page_fault(unsigned long address, unsigned long mmcsr, goto do_sigbus; BUG(); } - if (fault & VM_FAULT_MAJOR) - current->maj_flt++; - else - current->min_flt++; + + if (flags & FAULT_FLAG_ALLOW_RETRY) { + if (fault & VM_FAULT_MAJOR) + current->maj_flt++; + else + current->min_flt++; + if (fault & VM_FAULT_RETRY) { + flags &= ~FAULT_FLAG_ALLOW_RETRY; + + /* No need to up_read(&mm->mmap_sem) as we would + * have already released it in __lock_page_or_retry + * in mm/filemap.c. + */ + + goto retry; + } + } + + up_read(&mm->mmap_sem); + return; /* Something tried to access memory that isn't in our memory map. @@ -186,12 +208,14 @@ do_page_fault(unsigned long address, unsigned long mmcsr, /* We ran out of memory, or some other thing happened to us that made us unable to handle the page fault gracefully. */ out_of_memory: + up_read(&mm->mmap_sem); if (!user_mode(regs)) goto no_context; pagefault_out_of_memory(); return; do_sigbus: + up_read(&mm->mmap_sem); /* Send a sigbus, regardless of whether we were in kernel or user mode. */ info.si_signo = SIGBUS; diff --git a/trunk/arch/alpha/oprofile/common.c b/trunk/arch/alpha/oprofile/common.c index a0a5d27aa215..b8ce18f485d3 100644 --- a/trunk/arch/alpha/oprofile/common.c +++ b/trunk/arch/alpha/oprofile/common.c @@ -12,6 +12,7 @@ #include #include #include +#include #include "op_impl.h" diff --git a/trunk/arch/arm/Kconfig b/trunk/arch/arm/Kconfig index e91c7cdc6fe5..2f88d8d97701 100644 --- a/trunk/arch/arm/Kconfig +++ b/trunk/arch/arm/Kconfig @@ -6,7 +6,7 @@ config ARM select HAVE_DMA_API_DEBUG select HAVE_IDE if PCI || ISA || PCMCIA select HAVE_DMA_ATTRS - select HAVE_DMA_CONTIGUOUS if (CPU_V6 || CPU_V6K || CPU_V7) + select HAVE_DMA_CONTIGUOUS if MMU select HAVE_MEMBLOCK select RTC_LIB select SYS_SUPPORTS_APM_EMULATION @@ -38,7 +38,6 @@ config ARM select HARDIRQS_SW_RESEND select GENERIC_IRQ_PROBE select GENERIC_IRQ_SHOW - select GENERIC_IRQ_PROBE select ARCH_WANT_IPC_PARSE_VERSION select HARDIRQS_SW_RESEND select CPU_PM if (SUSPEND || CPU_IDLE) @@ -126,11 +125,6 @@ config TRACE_IRQFLAGS_SUPPORT bool default y -config GENERIC_LOCKBREAK - bool - default y - depends on SMP && PREEMPT - config RWSEM_GENERIC_SPINLOCK bool default y @@ -2150,6 +2144,7 @@ source "drivers/cpufreq/Kconfig" config CPU_FREQ_IMX tristate "CPUfreq driver for i.MX CPUs" depends on ARCH_MXC && CPU_FREQ + select CPU_FREQ_TABLE help This enables the CPUfreq driver for i.MX CPUs. diff --git a/trunk/arch/arm/Kconfig.debug b/trunk/arch/arm/Kconfig.debug index f15f82bf3a50..e968a52e4881 100644 --- a/trunk/arch/arm/Kconfig.debug +++ b/trunk/arch/arm/Kconfig.debug @@ -356,15 +356,15 @@ choice is nothing connected to read from the DCC. config DEBUG_SEMIHOSTING - bool "Kernel low-level debug output via semihosting I" + bool "Kernel low-level debug output via semihosting I/O" help Semihosting enables code running on an ARM target to use the I/O facilities on a host debugger/emulator through a - simple SVC calls. The host debugger or emulator must have + simple SVC call. The host debugger or emulator must have semihosting enabled for the special svc call to be trapped otherwise the kernel will crash. - This is known to work with OpenOCD, as wellas + This is known to work with OpenOCD, as well as ARM's Fast Models, or any other controlling environment that implements semihosting. diff --git a/trunk/arch/arm/Makefile b/trunk/arch/arm/Makefile index 30eae87ead6d..a051dfbdd7db 100644 --- a/trunk/arch/arm/Makefile +++ b/trunk/arch/arm/Makefile @@ -284,10 +284,10 @@ zImage Image xipImage bootpImage uImage: vmlinux zinstall uinstall install: vmlinux $(Q)$(MAKE) $(build)=$(boot) MACHINE=$(MACHINE) $@ -%.dtb: +%.dtb: scripts $(Q)$(MAKE) $(build)=$(boot) MACHINE=$(MACHINE) $(boot)/$@ -dtbs: +dtbs: scripts $(Q)$(MAKE) $(build)=$(boot) MACHINE=$(MACHINE) $(boot)/$@ # We use MRPROPER_FILES and CLEAN_FILES now diff --git a/trunk/arch/arm/boot/compressed/head.S b/trunk/arch/arm/boot/compressed/head.S index b8c64b80bafc..bc67cbff3944 100644 --- a/trunk/arch/arm/boot/compressed/head.S +++ b/trunk/arch/arm/boot/compressed/head.S @@ -653,16 +653,21 @@ __armv7_mmu_cache_on: mcrne p15, 0, r0, c8, c7, 0 @ flush I,D TLBs #endif mrc p15, 0, r0, c1, c0, 0 @ read control reg + bic r0, r0, #1 << 28 @ clear SCTLR.TRE orr r0, r0, #0x5000 @ I-cache enable, RR cache replacement orr r0, r0, #0x003c @ write buffer #ifdef CONFIG_MMU #ifdef CONFIG_CPU_ENDIAN_BE8 orr r0, r0, #1 << 25 @ big-endian page tables #endif + mrcne p15, 0, r6, c2, c0, 2 @ read ttb control reg orrne r0, r0, #1 @ MMU enabled movne r1, #0xfffffffd @ domain 0 = client + bic r6, r6, #1 << 31 @ 32-bit translation system + bic r6, r6, #3 << 0 @ use only ttbr0 mcrne p15, 0, r3, c2, c0, 0 @ load page table pointer mcrne p15, 0, r1, c3, c0, 0 @ load domain access control + mcrne p15, 0, r6, c2, c0, 2 @ load ttb control #endif mcr p15, 0, r0, c7, c5, 4 @ ISB mcr p15, 0, r0, c1, c0, 0 @ load control register diff --git a/trunk/arch/arm/boot/dts/am33xx.dtsi b/trunk/arch/arm/boot/dts/am33xx.dtsi index 59509c48d7e5..bd0cff3f808c 100644 --- a/trunk/arch/arm/boot/dts/am33xx.dtsi +++ b/trunk/arch/arm/boot/dts/am33xx.dtsi @@ -154,5 +154,10 @@ #size-cells = <0>; ti,hwmods = "i2c3"; }; + + wdt2: wdt@44e35000 { + compatible = "ti,omap3-wdt"; + ti,hwmods = "wd_timer2"; + }; }; }; diff --git a/trunk/arch/arm/boot/dts/at91sam9260.dtsi b/trunk/arch/arm/boot/dts/at91sam9260.dtsi index 66389c1c6f62..7c95f76398de 100644 --- a/trunk/arch/arm/boot/dts/at91sam9260.dtsi +++ b/trunk/arch/arm/boot/dts/at91sam9260.dtsi @@ -104,6 +104,7 @@ #gpio-cells = <2>; gpio-controller; interrupt-controller; + #interrupt-cells = <2>; }; pioB: gpio@fffff600 { @@ -113,6 +114,7 @@ #gpio-cells = <2>; gpio-controller; interrupt-controller; + #interrupt-cells = <2>; }; pioC: gpio@fffff800 { @@ -122,6 +124,7 @@ #gpio-cells = <2>; gpio-controller; interrupt-controller; + #interrupt-cells = <2>; }; dbgu: serial@fffff200 { diff --git a/trunk/arch/arm/boot/dts/at91sam9263.dtsi b/trunk/arch/arm/boot/dts/at91sam9263.dtsi index b460d6ce9eb5..195019b7ca0e 100644 --- a/trunk/arch/arm/boot/dts/at91sam9263.dtsi +++ b/trunk/arch/arm/boot/dts/at91sam9263.dtsi @@ -95,6 +95,7 @@ #gpio-cells = <2>; gpio-controller; interrupt-controller; + #interrupt-cells = <2>; }; pioB: gpio@fffff400 { @@ -104,6 +105,7 @@ #gpio-cells = <2>; gpio-controller; interrupt-controller; + #interrupt-cells = <2>; }; pioC: gpio@fffff600 { @@ -113,6 +115,7 @@ #gpio-cells = <2>; gpio-controller; interrupt-controller; + #interrupt-cells = <2>; }; pioD: gpio@fffff800 { @@ -122,6 +125,7 @@ #gpio-cells = <2>; gpio-controller; interrupt-controller; + #interrupt-cells = <2>; }; pioE: gpio@fffffa00 { @@ -131,6 +135,7 @@ #gpio-cells = <2>; gpio-controller; interrupt-controller; + #interrupt-cells = <2>; }; dbgu: serial@ffffee00 { diff --git a/trunk/arch/arm/boot/dts/at91sam9g25ek.dts b/trunk/arch/arm/boot/dts/at91sam9g25ek.dts index 7829a4d0cb22..96514c134e54 100644 --- a/trunk/arch/arm/boot/dts/at91sam9g25ek.dts +++ b/trunk/arch/arm/boot/dts/at91sam9g25ek.dts @@ -15,7 +15,7 @@ compatible = "atmel,at91sam9g25ek", "atmel,at91sam9x5ek", "atmel,at91sam9x5", "atmel,at91sam9"; chosen { - bootargs = "128M console=ttyS0,115200 root=/dev/mtdblock1 rw rootfstype=ubifs ubi.mtd=1 root=ubi0:rootfs"; + bootargs = "console=ttyS0,115200 root=/dev/mtdblock1 rw rootfstype=ubifs ubi.mtd=1 root=ubi0:rootfs"; }; ahb { diff --git a/trunk/arch/arm/boot/dts/at91sam9g45.dtsi b/trunk/arch/arm/boot/dts/at91sam9g45.dtsi index bafa8806fc17..63751b1e744b 100644 --- a/trunk/arch/arm/boot/dts/at91sam9g45.dtsi +++ b/trunk/arch/arm/boot/dts/at91sam9g45.dtsi @@ -113,6 +113,7 @@ #gpio-cells = <2>; gpio-controller; interrupt-controller; + #interrupt-cells = <2>; }; pioB: gpio@fffff400 { @@ -122,6 +123,7 @@ #gpio-cells = <2>; gpio-controller; interrupt-controller; + #interrupt-cells = <2>; }; pioC: gpio@fffff600 { @@ -131,6 +133,7 @@ #gpio-cells = <2>; gpio-controller; interrupt-controller; + #interrupt-cells = <2>; }; pioD: gpio@fffff800 { @@ -140,6 +143,7 @@ #gpio-cells = <2>; gpio-controller; interrupt-controller; + #interrupt-cells = <2>; }; pioE: gpio@fffffa00 { @@ -149,6 +153,7 @@ #gpio-cells = <2>; gpio-controller; interrupt-controller; + #interrupt-cells = <2>; }; dbgu: serial@ffffee00 { diff --git a/trunk/arch/arm/boot/dts/at91sam9n12.dtsi b/trunk/arch/arm/boot/dts/at91sam9n12.dtsi index bfac0dfc332c..ef9336ae9614 100644 --- a/trunk/arch/arm/boot/dts/at91sam9n12.dtsi +++ b/trunk/arch/arm/boot/dts/at91sam9n12.dtsi @@ -107,6 +107,7 @@ #gpio-cells = <2>; gpio-controller; interrupt-controller; + #interrupt-cells = <2>; }; pioB: gpio@fffff600 { @@ -116,6 +117,7 @@ #gpio-cells = <2>; gpio-controller; interrupt-controller; + #interrupt-cells = <2>; }; pioC: gpio@fffff800 { @@ -125,6 +127,7 @@ #gpio-cells = <2>; gpio-controller; interrupt-controller; + #interrupt-cells = <2>; }; pioD: gpio@fffffa00 { @@ -134,6 +137,7 @@ #gpio-cells = <2>; gpio-controller; interrupt-controller; + #interrupt-cells = <2>; }; dbgu: serial@fffff200 { diff --git a/trunk/arch/arm/boot/dts/at91sam9x5.dtsi b/trunk/arch/arm/boot/dts/at91sam9x5.dtsi index 4a18c393b136..8a387a8d61b7 100644 --- a/trunk/arch/arm/boot/dts/at91sam9x5.dtsi +++ b/trunk/arch/arm/boot/dts/at91sam9x5.dtsi @@ -115,6 +115,7 @@ #gpio-cells = <2>; gpio-controller; interrupt-controller; + #interrupt-cells = <2>; }; pioB: gpio@fffff600 { @@ -124,6 +125,7 @@ #gpio-cells = <2>; gpio-controller; interrupt-controller; + #interrupt-cells = <2>; }; pioC: gpio@fffff800 { @@ -133,6 +135,7 @@ #gpio-cells = <2>; gpio-controller; interrupt-controller; + #interrupt-cells = <2>; }; pioD: gpio@fffffa00 { @@ -142,6 +145,7 @@ #gpio-cells = <2>; gpio-controller; interrupt-controller; + #interrupt-cells = <2>; }; dbgu: serial@fffff200 { diff --git a/trunk/arch/arm/boot/dts/imx23.dtsi b/trunk/arch/arm/boot/dts/imx23.dtsi index a874dbfb5ae6..e6138310e5ce 100644 --- a/trunk/arch/arm/boot/dts/imx23.dtsi +++ b/trunk/arch/arm/boot/dts/imx23.dtsi @@ -51,11 +51,11 @@ dma-apbh@80004000 { compatible = "fsl,imx23-dma-apbh"; - reg = <0x80004000 2000>; + reg = <0x80004000 0x2000>; }; ecc@80008000 { - reg = <0x80008000 2000>; + reg = <0x80008000 0x2000>; status = "disabled"; }; @@ -63,7 +63,7 @@ compatible = "fsl,imx23-gpmi-nand"; #address-cells = <1>; #size-cells = <1>; - reg = <0x8000c000 2000>, <0x8000a000 2000>; + reg = <0x8000c000 0x2000>, <0x8000a000 0x2000>; reg-names = "gpmi-nand", "bch"; interrupts = <13>, <56>; interrupt-names = "gpmi-dma", "bch"; @@ -72,14 +72,14 @@ }; ssp0: ssp@80010000 { - reg = <0x80010000 2000>; + reg = <0x80010000 0x2000>; interrupts = <15 14>; fsl,ssp-dma-channel = <1>; status = "disabled"; }; etm@80014000 { - reg = <0x80014000 2000>; + reg = <0x80014000 0x2000>; status = "disabled"; }; @@ -87,7 +87,7 @@ #address-cells = <1>; #size-cells = <0>; compatible = "fsl,imx23-pinctrl", "simple-bus"; - reg = <0x80018000 2000>; + reg = <0x80018000 0x2000>; gpio0: gpio@0 { compatible = "fsl,imx23-gpio", "fsl,mxs-gpio"; @@ -273,32 +273,32 @@ }; emi@80020000 { - reg = <0x80020000 2000>; + reg = <0x80020000 0x2000>; status = "disabled"; }; dma-apbx@80024000 { compatible = "fsl,imx23-dma-apbx"; - reg = <0x80024000 2000>; + reg = <0x80024000 0x2000>; }; dcp@80028000 { - reg = <0x80028000 2000>; + reg = <0x80028000 0x2000>; status = "disabled"; }; pxp@8002a000 { - reg = <0x8002a000 2000>; + reg = <0x8002a000 0x2000>; status = "disabled"; }; ocotp@8002c000 { - reg = <0x8002c000 2000>; + reg = <0x8002c000 0x2000>; status = "disabled"; }; axi-ahb@8002e000 { - reg = <0x8002e000 2000>; + reg = <0x8002e000 0x2000>; status = "disabled"; }; @@ -310,14 +310,14 @@ }; ssp1: ssp@80034000 { - reg = <0x80034000 2000>; + reg = <0x80034000 0x2000>; interrupts = <2 20>; fsl,ssp-dma-channel = <2>; status = "disabled"; }; tvenc@80038000 { - reg = <0x80038000 2000>; + reg = <0x80038000 0x2000>; status = "disabled"; }; }; @@ -330,37 +330,37 @@ ranges; clkctl@80040000 { - reg = <0x80040000 2000>; + reg = <0x80040000 0x2000>; status = "disabled"; }; saif0: saif@80042000 { - reg = <0x80042000 2000>; + reg = <0x80042000 0x2000>; status = "disabled"; }; power@80044000 { - reg = <0x80044000 2000>; + reg = <0x80044000 0x2000>; status = "disabled"; }; saif1: saif@80046000 { - reg = <0x80046000 2000>; + reg = <0x80046000 0x2000>; status = "disabled"; }; audio-out@80048000 { - reg = <0x80048000 2000>; + reg = <0x80048000 0x2000>; status = "disabled"; }; audio-in@8004c000 { - reg = <0x8004c000 2000>; + reg = <0x8004c000 0x2000>; status = "disabled"; }; lradc@80050000 { - reg = <0x80050000 2000>; + reg = <0x80050000 0x2000>; status = "disabled"; }; @@ -370,26 +370,26 @@ }; i2c@80058000 { - reg = <0x80058000 2000>; + reg = <0x80058000 0x2000>; status = "disabled"; }; rtc@8005c000 { compatible = "fsl,imx23-rtc", "fsl,stmp3xxx-rtc"; - reg = <0x8005c000 2000>; + reg = <0x8005c000 0x2000>; interrupts = <22>; }; pwm: pwm@80064000 { compatible = "fsl,imx23-pwm"; - reg = <0x80064000 2000>; + reg = <0x80064000 0x2000>; #pwm-cells = <2>; fsl,pwm-number = <5>; status = "disabled"; }; timrot@80068000 { - reg = <0x80068000 2000>; + reg = <0x80068000 0x2000>; status = "disabled"; }; @@ -429,7 +429,7 @@ ranges; usbctrl@80080000 { - reg = <0x80080000 0x10000>; + reg = <0x80080000 0x40000>; status = "disabled"; }; }; diff --git a/trunk/arch/arm/boot/dts/imx27-3ds.dts b/trunk/arch/arm/boot/dts/imx27-3ds.dts index d3f8296e19e0..0a8978a40ece 100644 --- a/trunk/arch/arm/boot/dts/imx27-3ds.dts +++ b/trunk/arch/arm/boot/dts/imx27-3ds.dts @@ -27,7 +27,7 @@ status = "okay"; }; - uart@1000a000 { + uart1: serial@1000a000 { fsl,uart-has-rtscts; status = "okay"; }; diff --git a/trunk/arch/arm/boot/dts/imx27.dtsi b/trunk/arch/arm/boot/dts/imx27.dtsi index 00bae3aad5ab..5303ab680a34 100644 --- a/trunk/arch/arm/boot/dts/imx27.dtsi +++ b/trunk/arch/arm/boot/dts/imx27.dtsi @@ -19,6 +19,12 @@ serial3 = &uart4; serial4 = &uart5; serial5 = &uart6; + gpio0 = &gpio1; + gpio1 = &gpio2; + gpio2 = &gpio3; + gpio3 = &gpio4; + gpio4 = &gpio5; + gpio5 = &gpio6; }; avic: avic-interrupt-controller@e0000000 { diff --git a/trunk/arch/arm/boot/dts/imx28.dtsi b/trunk/arch/arm/boot/dts/imx28.dtsi index 787efac68da8..3fa6d190fab4 100644 --- a/trunk/arch/arm/boot/dts/imx28.dtsi +++ b/trunk/arch/arm/boot/dts/imx28.dtsi @@ -57,18 +57,18 @@ }; hsadc@80002000 { - reg = <0x80002000 2000>; + reg = <0x80002000 0x2000>; interrupts = <13 87>; status = "disabled"; }; dma-apbh@80004000 { compatible = "fsl,imx28-dma-apbh"; - reg = <0x80004000 2000>; + reg = <0x80004000 0x2000>; }; perfmon@80006000 { - reg = <0x80006000 800>; + reg = <0x80006000 0x800>; interrupts = <27>; status = "disabled"; }; @@ -77,7 +77,7 @@ compatible = "fsl,imx28-gpmi-nand"; #address-cells = <1>; #size-cells = <1>; - reg = <0x8000c000 2000>, <0x8000a000 2000>; + reg = <0x8000c000 0x2000>, <0x8000a000 0x2000>; reg-names = "gpmi-nand", "bch"; interrupts = <88>, <41>; interrupt-names = "gpmi-dma", "bch"; @@ -86,28 +86,28 @@ }; ssp0: ssp@80010000 { - reg = <0x80010000 2000>; + reg = <0x80010000 0x2000>; interrupts = <96 82>; fsl,ssp-dma-channel = <0>; status = "disabled"; }; ssp1: ssp@80012000 { - reg = <0x80012000 2000>; + reg = <0x80012000 0x2000>; interrupts = <97 83>; fsl,ssp-dma-channel = <1>; status = "disabled"; }; ssp2: ssp@80014000 { - reg = <0x80014000 2000>; + reg = <0x80014000 0x2000>; interrupts = <98 84>; fsl,ssp-dma-channel = <2>; status = "disabled"; }; ssp3: ssp@80016000 { - reg = <0x80016000 2000>; + reg = <0x80016000 0x2000>; interrupts = <99 85>; fsl,ssp-dma-channel = <3>; status = "disabled"; @@ -117,7 +117,7 @@ #address-cells = <1>; #size-cells = <0>; compatible = "fsl,imx28-pinctrl", "simple-bus"; - reg = <0x80018000 2000>; + reg = <0x80018000 0x2000>; gpio0: gpio@0 { compatible = "fsl,imx28-gpio", "fsl,mxs-gpio"; @@ -510,96 +510,96 @@ }; digctl@8001c000 { - reg = <0x8001c000 2000>; + reg = <0x8001c000 0x2000>; interrupts = <89>; status = "disabled"; }; etm@80022000 { - reg = <0x80022000 2000>; + reg = <0x80022000 0x2000>; status = "disabled"; }; dma-apbx@80024000 { compatible = "fsl,imx28-dma-apbx"; - reg = <0x80024000 2000>; + reg = <0x80024000 0x2000>; }; dcp@80028000 { - reg = <0x80028000 2000>; + reg = <0x80028000 0x2000>; interrupts = <52 53 54>; status = "disabled"; }; pxp@8002a000 { - reg = <0x8002a000 2000>; + reg = <0x8002a000 0x2000>; interrupts = <39>; status = "disabled"; }; ocotp@8002c000 { - reg = <0x8002c000 2000>; + reg = <0x8002c000 0x2000>; status = "disabled"; }; axi-ahb@8002e000 { - reg = <0x8002e000 2000>; + reg = <0x8002e000 0x2000>; status = "disabled"; }; lcdif@80030000 { compatible = "fsl,imx28-lcdif"; - reg = <0x80030000 2000>; + reg = <0x80030000 0x2000>; interrupts = <38 86>; status = "disabled"; }; can0: can@80032000 { compatible = "fsl,imx28-flexcan", "fsl,p1010-flexcan"; - reg = <0x80032000 2000>; + reg = <0x80032000 0x2000>; interrupts = <8>; status = "disabled"; }; can1: can@80034000 { compatible = "fsl,imx28-flexcan", "fsl,p1010-flexcan"; - reg = <0x80034000 2000>; + reg = <0x80034000 0x2000>; interrupts = <9>; status = "disabled"; }; simdbg@8003c000 { - reg = <0x8003c000 200>; + reg = <0x8003c000 0x200>; status = "disabled"; }; simgpmisel@8003c200 { - reg = <0x8003c200 100>; + reg = <0x8003c200 0x100>; status = "disabled"; }; simsspsel@8003c300 { - reg = <0x8003c300 100>; + reg = <0x8003c300 0x100>; status = "disabled"; }; simmemsel@8003c400 { - reg = <0x8003c400 100>; + reg = <0x8003c400 0x100>; status = "disabled"; }; gpiomon@8003c500 { - reg = <0x8003c500 100>; + reg = <0x8003c500 0x100>; status = "disabled"; }; simenet@8003c700 { - reg = <0x8003c700 100>; + reg = <0x8003c700 0x100>; status = "disabled"; }; armjtag@8003c800 { - reg = <0x8003c800 100>; + reg = <0x8003c800 0x100>; status = "disabled"; }; }; @@ -612,45 +612,45 @@ ranges; clkctl@80040000 { - reg = <0x80040000 2000>; + reg = <0x80040000 0x2000>; status = "disabled"; }; saif0: saif@80042000 { compatible = "fsl,imx28-saif"; - reg = <0x80042000 2000>; + reg = <0x80042000 0x2000>; interrupts = <59 80>; fsl,saif-dma-channel = <4>; status = "disabled"; }; power@80044000 { - reg = <0x80044000 2000>; + reg = <0x80044000 0x2000>; status = "disabled"; }; saif1: saif@80046000 { compatible = "fsl,imx28-saif"; - reg = <0x80046000 2000>; + reg = <0x80046000 0x2000>; interrupts = <58 81>; fsl,saif-dma-channel = <5>; status = "disabled"; }; lradc@80050000 { - reg = <0x80050000 2000>; + reg = <0x80050000 0x2000>; status = "disabled"; }; spdif@80054000 { - reg = <0x80054000 2000>; + reg = <0x80054000 0x2000>; interrupts = <45 66>; status = "disabled"; }; rtc@80056000 { compatible = "fsl,imx28-rtc", "fsl,stmp3xxx-rtc"; - reg = <0x80056000 2000>; + reg = <0x80056000 0x2000>; interrupts = <29>; }; @@ -658,7 +658,7 @@ #address-cells = <1>; #size-cells = <0>; compatible = "fsl,imx28-i2c"; - reg = <0x80058000 2000>; + reg = <0x80058000 0x2000>; interrupts = <111 68>; clock-frequency = <100000>; status = "disabled"; @@ -668,7 +668,7 @@ #address-cells = <1>; #size-cells = <0>; compatible = "fsl,imx28-i2c"; - reg = <0x8005a000 2000>; + reg = <0x8005a000 0x2000>; interrupts = <110 69>; clock-frequency = <100000>; status = "disabled"; @@ -676,14 +676,14 @@ pwm: pwm@80064000 { compatible = "fsl,imx28-pwm", "fsl,imx23-pwm"; - reg = <0x80064000 2000>; + reg = <0x80064000 0x2000>; #pwm-cells = <2>; fsl,pwm-number = <8>; status = "disabled"; }; timrot@80068000 { - reg = <0x80068000 2000>; + reg = <0x80068000 0x2000>; status = "disabled"; }; diff --git a/trunk/arch/arm/boot/dts/imx51-babbage.dts b/trunk/arch/arm/boot/dts/imx51-babbage.dts index de065b5976e6..59d9789e5508 100644 --- a/trunk/arch/arm/boot/dts/imx51-babbage.dts +++ b/trunk/arch/arm/boot/dts/imx51-babbage.dts @@ -25,8 +25,8 @@ aips@70000000 { /* aips-1 */ spba@70000000 { esdhc@70004000 { /* ESDHC1 */ - fsl,cd-internal; - fsl,wp-internal; + fsl,cd-controller; + fsl,wp-controller; status = "okay"; }; @@ -53,7 +53,7 @@ spi-max-frequency = <6000000>; reg = <0>; interrupt-parent = <&gpio1>; - interrupts = <8>; + interrupts = <8 0x4>; regulators { sw1_reg: sw1 { diff --git a/trunk/arch/arm/boot/dts/imx51.dtsi b/trunk/arch/arm/boot/dts/imx51.dtsi index 53cbaa3d4f90..aba28dc87fc8 100644 --- a/trunk/arch/arm/boot/dts/imx51.dtsi +++ b/trunk/arch/arm/boot/dts/imx51.dtsi @@ -17,6 +17,10 @@ serial0 = &uart1; serial1 = &uart2; serial2 = &uart3; + gpio0 = &gpio1; + gpio1 = &gpio2; + gpio2 = &gpio3; + gpio3 = &gpio4; }; tzic: tz-interrupt-controller@e0000000 { diff --git a/trunk/arch/arm/boot/dts/imx53-ard.dts b/trunk/arch/arm/boot/dts/imx53-ard.dts index 5b8eafcdbeec..da895e93a999 100644 --- a/trunk/arch/arm/boot/dts/imx53-ard.dts +++ b/trunk/arch/arm/boot/dts/imx53-ard.dts @@ -64,12 +64,32 @@ reg = <0xf4000000 0x2000000>; phy-mode = "mii"; interrupt-parent = <&gpio2>; - interrupts = <31>; + interrupts = <31 0x8>; reg-io-width = <4>; + /* + * VDD33A and VDDVARIO of LAN9220 are supplied by + * SW4_3V3 of LTC3589. Before the regulator driver + * for this PMIC is available, we use a fixed dummy + * 3V3 regulator to get LAN9220 driver probing work. + */ + vdd33a-supply = <®_3p3v>; + vddvario-supply = <®_3p3v>; smsc,irq-push-pull; }; }; + regulators { + compatible = "simple-bus"; + + reg_3p3v: 3p3v { + compatible = "regulator-fixed"; + regulator-name = "3P3V"; + regulator-min-microvolt = <3300000>; + regulator-max-microvolt = <3300000>; + regulator-always-on; + }; + }; + gpio-keys { compatible = "gpio-keys"; diff --git a/trunk/arch/arm/boot/dts/imx53.dtsi b/trunk/arch/arm/boot/dts/imx53.dtsi index fc79cdc4b4e6..cd37165edce5 100644 --- a/trunk/arch/arm/boot/dts/imx53.dtsi +++ b/trunk/arch/arm/boot/dts/imx53.dtsi @@ -19,6 +19,13 @@ serial2 = &uart3; serial3 = &uart4; serial4 = &uart5; + gpio0 = &gpio1; + gpio1 = &gpio2; + gpio2 = &gpio3; + gpio3 = &gpio4; + gpio4 = &gpio5; + gpio5 = &gpio6; + gpio6 = &gpio7; }; tzic: tz-interrupt-controller@0fffc000 { diff --git a/trunk/arch/arm/boot/dts/imx6q-sabrelite.dts b/trunk/arch/arm/boot/dts/imx6q-sabrelite.dts index d42e851ceb97..72f30f3e6171 100644 --- a/trunk/arch/arm/boot/dts/imx6q-sabrelite.dts +++ b/trunk/arch/arm/boot/dts/imx6q-sabrelite.dts @@ -53,6 +53,7 @@ fsl,pins = < 144 0x80000000 /* MX6Q_PAD_EIM_D22__GPIO_3_22 */ 121 0x80000000 /* MX6Q_PAD_EIM_D19__GPIO_3_19 */ + 953 0x80000000 /* MX6Q_PAD_GPIO_0__CCM_CLKO */ >; }; }; diff --git a/trunk/arch/arm/boot/dts/imx6q.dtsi b/trunk/arch/arm/boot/dts/imx6q.dtsi index 3d3c64b014e6..fd57079f71a9 100644 --- a/trunk/arch/arm/boot/dts/imx6q.dtsi +++ b/trunk/arch/arm/boot/dts/imx6q.dtsi @@ -19,6 +19,13 @@ serial2 = &uart3; serial3 = &uart4; serial4 = &uart5; + gpio0 = &gpio1; + gpio1 = &gpio2; + gpio2 = &gpio3; + gpio3 = &gpio4; + gpio4 = &gpio5; + gpio5 = &gpio6; + gpio6 = &gpio7; }; cpus { diff --git a/trunk/arch/arm/boot/dts/kirkwood-iconnect.dts b/trunk/arch/arm/boot/dts/kirkwood-iconnect.dts index 52d947045106..f8ca6fa88192 100644 --- a/trunk/arch/arm/boot/dts/kirkwood-iconnect.dts +++ b/trunk/arch/arm/boot/dts/kirkwood-iconnect.dts @@ -41,9 +41,13 @@ }; power-blue { label = "power:blue"; - gpios = <&gpio1 11 0>; + gpios = <&gpio1 10 0>; linux,default-trigger = "timer"; }; + power-red { + label = "power:red"; + gpios = <&gpio1 11 0>; + }; usb1 { label = "usb1:blue"; gpios = <&gpio1 12 0>; diff --git a/trunk/arch/arm/boot/dts/twl6030.dtsi b/trunk/arch/arm/boot/dts/twl6030.dtsi index 3b2f3510d7eb..d351b27d7213 100644 --- a/trunk/arch/arm/boot/dts/twl6030.dtsi +++ b/trunk/arch/arm/boot/dts/twl6030.dtsi @@ -66,6 +66,7 @@ vcxio: regulator@8 { compatible = "ti,twl6030-vcxio"; + regulator-always-on; }; vusb: regulator@9 { @@ -74,10 +75,12 @@ v1v8: regulator@10 { compatible = "ti,twl6030-v1v8"; + regulator-always-on; }; v2v1: regulator@11 { compatible = "ti,twl6030-v2v1"; + regulator-always-on; }; clk32kg: regulator@12 { diff --git a/trunk/arch/arm/configs/armadillo800eva_defconfig b/trunk/arch/arm/configs/armadillo800eva_defconfig index 7d8718468e0d..90610c7030f7 100644 --- a/trunk/arch/arm/configs/armadillo800eva_defconfig +++ b/trunk/arch/arm/configs/armadillo800eva_defconfig @@ -33,7 +33,7 @@ CONFIG_AEABI=y CONFIG_FORCE_MAX_ZONEORDER=13 CONFIG_ZBOOT_ROM_TEXT=0x0 CONFIG_ZBOOT_ROM_BSS=0x0 -CONFIG_CMDLINE="console=tty0 console=ttySC1,115200 earlyprintk=sh-sci.1,115200 ignore_loglevel root=/dev/nfs ip=dhcp nfsroot=,rsize=4096,wsize=4096" +CONFIG_CMDLINE="console=tty0 console=ttySC1,115200 earlyprintk=sh-sci.1,115200 ignore_loglevel root=/dev/nfs ip=dhcp nfsroot=,rsize=4096,wsize=4096 rw" CONFIG_CMDLINE_FORCE=y CONFIG_KEXEC=y CONFIG_VFP=y diff --git a/trunk/arch/arm/configs/imx_v6_v7_defconfig b/trunk/arch/arm/configs/imx_v6_v7_defconfig index f725b9637b33..3c9f32f9b6b4 100644 --- a/trunk/arch/arm/configs/imx_v6_v7_defconfig +++ b/trunk/arch/arm/configs/imx_v6_v7_defconfig @@ -192,6 +192,7 @@ CONFIG_RTC_DRV_MC13XXX=y CONFIG_RTC_DRV_MXC=y CONFIG_DMADEVICES=y CONFIG_IMX_SDMA=y +CONFIG_MXS_DMA=y CONFIG_COMMON_CLK_DEBUG=y # CONFIG_IOMMU_SUPPORT is not set CONFIG_EXT2_FS=y diff --git a/trunk/arch/arm/configs/mxs_defconfig b/trunk/arch/arm/configs/mxs_defconfig index ccdb6357fb74..4edcfb4e4dee 100644 --- a/trunk/arch/arm/configs/mxs_defconfig +++ b/trunk/arch/arm/configs/mxs_defconfig @@ -34,7 +34,6 @@ CONFIG_NO_HZ=y CONFIG_HIGH_RES_TIMERS=y CONFIG_PREEMPT_VOLUNTARY=y CONFIG_AEABI=y -CONFIG_DEFAULT_MMAP_MIN_ADDR=65536 CONFIG_AUTO_ZRELADDR=y CONFIG_FPE_NWFPE=y CONFIG_NET=y diff --git a/trunk/arch/arm/configs/tct_hammer_defconfig b/trunk/arch/arm/configs/tct_hammer_defconfig index 1d24f8458bef..71277a1591ba 100644 --- a/trunk/arch/arm/configs/tct_hammer_defconfig +++ b/trunk/arch/arm/configs/tct_hammer_defconfig @@ -7,7 +7,7 @@ CONFIG_SYSFS_DEPRECATED_V2=y CONFIG_BLK_DEV_INITRD=y CONFIG_EXPERT=y # CONFIG_KALLSYMS is not set -# CONFIG_BUG is not set +# CONFIG_BUGVERBOSE is not set # CONFIG_ELF_CORE is not set # CONFIG_SHMEM is not set CONFIG_SLOB=y diff --git a/trunk/arch/arm/configs/u8500_defconfig b/trunk/arch/arm/configs/u8500_defconfig index 2d4f661d1cf6..da6845493caa 100644 --- a/trunk/arch/arm/configs/u8500_defconfig +++ b/trunk/arch/arm/configs/u8500_defconfig @@ -86,6 +86,7 @@ CONFIG_NEW_LEDS=y CONFIG_LEDS_CLASS=y CONFIG_LEDS_LM3530=y CONFIG_LEDS_LP5521=y +CONFIG_LEDS_GPIO=y CONFIG_RTC_CLASS=y CONFIG_RTC_DRV_AB8500=y CONFIG_RTC_DRV_PL031=y diff --git a/trunk/arch/arm/include/asm/assembler.h b/trunk/arch/arm/include/asm/assembler.h index 03fb93621d0d..5c8b3bf4d825 100644 --- a/trunk/arch/arm/include/asm/assembler.h +++ b/trunk/arch/arm/include/asm/assembler.h @@ -320,4 +320,12 @@ .size \name , . - \name .endm + .macro check_uaccess, addr:req, size:req, limit:req, tmp:req, bad:req +#ifndef CONFIG_CPU_USE_DOMAINS + adds \tmp, \addr, #\size - 1 + sbcccs \tmp, \tmp, \limit + bcs \bad +#endif + .endm + #endif /* __ASM_ASSEMBLER_H__ */ diff --git a/trunk/arch/arm/include/asm/dma-mapping.h b/trunk/arch/arm/include/asm/dma-mapping.h index 2ae842df4551..5c44dcb0987b 100644 --- a/trunk/arch/arm/include/asm/dma-mapping.h +++ b/trunk/arch/arm/include/asm/dma-mapping.h @@ -202,6 +202,13 @@ static inline void dma_free_writecombine(struct device *dev, size_t size, return dma_free_attrs(dev, size, cpu_addr, dma_handle, &attrs); } +/* + * This can be called during early boot to increase the size of the atomic + * coherent DMA pool above the default value of 256KiB. It must be called + * before postcore_initcall. + */ +extern void __init init_dma_coherent_pool_size(unsigned long size); + /* * This can be called during boot to increase the size of the consistent * DMA region above it's default value of 2MB. It must be called before the diff --git a/trunk/arch/arm/include/asm/memory.h b/trunk/arch/arm/include/asm/memory.h index e965f1b560f1..5f6ddcc56452 100644 --- a/trunk/arch/arm/include/asm/memory.h +++ b/trunk/arch/arm/include/asm/memory.h @@ -187,6 +187,7 @@ static inline unsigned long __phys_to_virt(unsigned long x) #define __phys_to_virt(x) ((x) - PHYS_OFFSET + PAGE_OFFSET) #endif #endif +#endif /* __ASSEMBLY__ */ #ifndef PHYS_OFFSET #ifdef PLAT_PHYS_OFFSET @@ -196,6 +197,8 @@ static inline unsigned long __phys_to_virt(unsigned long x) #endif #endif +#ifndef __ASSEMBLY__ + /* * PFNs are used to describe any physical page; this means * PFN 0 == physical address 0. diff --git a/trunk/arch/arm/include/asm/pgtable.h b/trunk/arch/arm/include/asm/pgtable.h index f66626d71e7d..41dc31f834c3 100644 --- a/trunk/arch/arm/include/asm/pgtable.h +++ b/trunk/arch/arm/include/asm/pgtable.h @@ -195,6 +195,18 @@ static inline pte_t *pmd_page_vaddr(pmd_t pmd) #define pte_clear(mm,addr,ptep) set_pte_ext(ptep, __pte(0), 0) +#define pte_none(pte) (!pte_val(pte)) +#define pte_present(pte) (pte_val(pte) & L_PTE_PRESENT) +#define pte_write(pte) (!(pte_val(pte) & L_PTE_RDONLY)) +#define pte_dirty(pte) (pte_val(pte) & L_PTE_DIRTY) +#define pte_young(pte) (pte_val(pte) & L_PTE_YOUNG) +#define pte_exec(pte) (!(pte_val(pte) & L_PTE_XN)) +#define pte_special(pte) (0) + +#define pte_present_user(pte) \ + ((pte_val(pte) & (L_PTE_PRESENT | L_PTE_USER)) == \ + (L_PTE_PRESENT | L_PTE_USER)) + #if __LINUX_ARM_ARCH__ < 6 static inline void __sync_icache_dcache(pte_t pteval) { @@ -206,25 +218,15 @@ extern void __sync_icache_dcache(pte_t pteval); static inline void set_pte_at(struct mm_struct *mm, unsigned long addr, pte_t *ptep, pte_t pteval) { - if (addr >= TASK_SIZE) - set_pte_ext(ptep, pteval, 0); - else { + unsigned long ext = 0; + + if (addr < TASK_SIZE && pte_present_user(pteval)) { __sync_icache_dcache(pteval); - set_pte_ext(ptep, pteval, PTE_EXT_NG); + ext |= PTE_EXT_NG; } -} -#define pte_none(pte) (!pte_val(pte)) -#define pte_present(pte) (pte_val(pte) & L_PTE_PRESENT) -#define pte_write(pte) (!(pte_val(pte) & L_PTE_RDONLY)) -#define pte_dirty(pte) (pte_val(pte) & L_PTE_DIRTY) -#define pte_young(pte) (pte_val(pte) & L_PTE_YOUNG) -#define pte_exec(pte) (!(pte_val(pte) & L_PTE_XN)) -#define pte_special(pte) (0) - -#define pte_present_user(pte) \ - ((pte_val(pte) & (L_PTE_PRESENT | L_PTE_USER)) == \ - (L_PTE_PRESENT | L_PTE_USER)) + set_pte_ext(ptep, pteval, ext); +} #define PTE_BIT_FUNC(fn,op) \ static inline pte_t pte_##fn(pte_t pte) { pte_val(pte) op; return pte; } @@ -251,13 +253,13 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) * * 3 3 2 2 2 2 2 2 2 2 2 2 1 1 1 1 1 1 1 1 1 1 * 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 - * <--------------- offset --------------------> <- type --> 0 0 0 + * <--------------- offset ----------------------> < type -> 0 0 0 * - * This gives us up to 63 swap files and 32GB per swap file. Note that + * This gives us up to 31 swap files and 64GB per swap file. Note that * the offset field is always non-zero. */ #define __SWP_TYPE_SHIFT 3 -#define __SWP_TYPE_BITS 6 +#define __SWP_TYPE_BITS 5 #define __SWP_TYPE_MASK ((1 << __SWP_TYPE_BITS) - 1) #define __SWP_OFFSET_SHIFT (__SWP_TYPE_BITS + __SWP_TYPE_SHIFT) diff --git a/trunk/arch/arm/include/asm/sched_clock.h b/trunk/arch/arm/include/asm/sched_clock.h index e3f757263438..05b8e82ec9f5 100644 --- a/trunk/arch/arm/include/asm/sched_clock.h +++ b/trunk/arch/arm/include/asm/sched_clock.h @@ -10,5 +10,7 @@ extern void sched_clock_postinit(void); extern void setup_sched_clock(u32 (*read)(void), int bits, unsigned long rate); +extern void setup_sched_clock_needs_suspend(u32 (*read)(void), int bits, + unsigned long rate); #endif diff --git a/trunk/arch/arm/include/asm/tlb.h b/trunk/arch/arm/include/asm/tlb.h index 314d4664eae7..99a19512ee26 100644 --- a/trunk/arch/arm/include/asm/tlb.h +++ b/trunk/arch/arm/include/asm/tlb.h @@ -199,6 +199,9 @@ static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t pte, { pgtable_page_dtor(pte); +#ifdef CONFIG_ARM_LPAE + tlb_add_flush(tlb, addr); +#else /* * With the classic ARM MMU, a pte page has two corresponding pmd * entries, each covering 1MB. @@ -206,6 +209,7 @@ static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t pte, addr &= PMD_MASK; tlb_add_flush(tlb, addr + SZ_1M - PAGE_SIZE); tlb_add_flush(tlb, addr + SZ_1M); +#endif tlb_remove_page(tlb, pte); } diff --git a/trunk/arch/arm/include/asm/uaccess.h b/trunk/arch/arm/include/asm/uaccess.h index 479a6352e0b5..77bd79f2ffdb 100644 --- a/trunk/arch/arm/include/asm/uaccess.h +++ b/trunk/arch/arm/include/asm/uaccess.h @@ -101,28 +101,39 @@ extern int __get_user_1(void *); extern int __get_user_2(void *); extern int __get_user_4(void *); -#define __get_user_x(__r2,__p,__e,__s,__i...) \ +#define __GUP_CLOBBER_1 "lr", "cc" +#ifdef CONFIG_CPU_USE_DOMAINS +#define __GUP_CLOBBER_2 "ip", "lr", "cc" +#else +#define __GUP_CLOBBER_2 "lr", "cc" +#endif +#define __GUP_CLOBBER_4 "lr", "cc" + +#define __get_user_x(__r2,__p,__e,__l,__s) \ __asm__ __volatile__ ( \ __asmeq("%0", "r0") __asmeq("%1", "r2") \ + __asmeq("%3", "r1") \ "bl __get_user_" #__s \ : "=&r" (__e), "=r" (__r2) \ - : "0" (__p) \ - : __i, "cc") + : "0" (__p), "r" (__l) \ + : __GUP_CLOBBER_##__s) -#define get_user(x,p) \ +#define __get_user_check(x,p) \ ({ \ + unsigned long __limit = current_thread_info()->addr_limit - 1; \ register const typeof(*(p)) __user *__p asm("r0") = (p);\ register unsigned long __r2 asm("r2"); \ + register unsigned long __l asm("r1") = __limit; \ register int __e asm("r0"); \ switch (sizeof(*(__p))) { \ case 1: \ - __get_user_x(__r2, __p, __e, 1, "lr"); \ - break; \ + __get_user_x(__r2, __p, __e, __l, 1); \ + break; \ case 2: \ - __get_user_x(__r2, __p, __e, 2, "r3", "lr"); \ + __get_user_x(__r2, __p, __e, __l, 2); \ break; \ case 4: \ - __get_user_x(__r2, __p, __e, 4, "lr"); \ + __get_user_x(__r2, __p, __e, __l, 4); \ break; \ default: __e = __get_user_bad(); break; \ } \ @@ -130,42 +141,57 @@ extern int __get_user_4(void *); __e; \ }) +#define get_user(x,p) \ + ({ \ + might_fault(); \ + __get_user_check(x,p); \ + }) + extern int __put_user_1(void *, unsigned int); extern int __put_user_2(void *, unsigned int); extern int __put_user_4(void *, unsigned int); extern int __put_user_8(void *, unsigned long long); -#define __put_user_x(__r2,__p,__e,__s) \ +#define __put_user_x(__r2,__p,__e,__l,__s) \ __asm__ __volatile__ ( \ __asmeq("%0", "r0") __asmeq("%2", "r2") \ + __asmeq("%3", "r1") \ "bl __put_user_" #__s \ : "=&r" (__e) \ - : "0" (__p), "r" (__r2) \ + : "0" (__p), "r" (__r2), "r" (__l) \ : "ip", "lr", "cc") -#define put_user(x,p) \ +#define __put_user_check(x,p) \ ({ \ + unsigned long __limit = current_thread_info()->addr_limit - 1; \ register const typeof(*(p)) __r2 asm("r2") = (x); \ register const typeof(*(p)) __user *__p asm("r0") = (p);\ + register unsigned long __l asm("r1") = __limit; \ register int __e asm("r0"); \ switch (sizeof(*(__p))) { \ case 1: \ - __put_user_x(__r2, __p, __e, 1); \ + __put_user_x(__r2, __p, __e, __l, 1); \ break; \ case 2: \ - __put_user_x(__r2, __p, __e, 2); \ + __put_user_x(__r2, __p, __e, __l, 2); \ break; \ case 4: \ - __put_user_x(__r2, __p, __e, 4); \ + __put_user_x(__r2, __p, __e, __l, 4); \ break; \ case 8: \ - __put_user_x(__r2, __p, __e, 8); \ + __put_user_x(__r2, __p, __e, __l, 8); \ break; \ default: __e = __put_user_bad(); break; \ } \ __e; \ }) +#define put_user(x,p) \ + ({ \ + might_fault(); \ + __put_user_check(x,p); \ + }) + #else /* CONFIG_MMU */ /* @@ -219,6 +245,7 @@ do { \ unsigned long __gu_addr = (unsigned long)(ptr); \ unsigned long __gu_val; \ __chk_user_ptr(ptr); \ + might_fault(); \ switch (sizeof(*(ptr))) { \ case 1: __get_user_asm_byte(__gu_val,__gu_addr,err); break; \ case 2: __get_user_asm_half(__gu_val,__gu_addr,err); break; \ @@ -300,6 +327,7 @@ do { \ unsigned long __pu_addr = (unsigned long)(ptr); \ __typeof__(*(ptr)) __pu_val = (x); \ __chk_user_ptr(ptr); \ + might_fault(); \ switch (sizeof(*(ptr))) { \ case 1: __put_user_asm_byte(__pu_val,__pu_addr,err); break; \ case 2: __put_user_asm_half(__pu_val,__pu_addr,err); break; \ diff --git a/trunk/arch/arm/include/asm/unistd.h b/trunk/arch/arm/include/asm/unistd.h index 0cab47d4a83f..2fde5fd1acce 100644 --- a/trunk/arch/arm/include/asm/unistd.h +++ b/trunk/arch/arm/include/asm/unistd.h @@ -404,6 +404,7 @@ #define __NR_setns (__NR_SYSCALL_BASE+375) #define __NR_process_vm_readv (__NR_SYSCALL_BASE+376) #define __NR_process_vm_writev (__NR_SYSCALL_BASE+377) + /* 378 for kcmp */ /* * The following SWIs are ARM private. @@ -483,6 +484,7 @@ */ #define __IGNORE_fadvise64_64 #define __IGNORE_migrate_pages +#define __IGNORE_kcmp #endif /* __KERNEL__ */ #endif /* __ASM_ARM_UNISTD_H */ diff --git a/trunk/arch/arm/kernel/calls.S b/trunk/arch/arm/kernel/calls.S index 463ff4a0ec8a..e337879595e5 100644 --- a/trunk/arch/arm/kernel/calls.S +++ b/trunk/arch/arm/kernel/calls.S @@ -387,6 +387,7 @@ /* 375 */ CALL(sys_setns) CALL(sys_process_vm_readv) CALL(sys_process_vm_writev) + CALL(sys_ni_syscall) /* reserved for sys_kcmp */ #ifndef syscalls_counted .equ syscalls_padding, ((NR_syscalls + 3) & ~3) - NR_syscalls #define syscalls_counted diff --git a/trunk/arch/arm/kernel/hw_breakpoint.c b/trunk/arch/arm/kernel/hw_breakpoint.c index ba386bd94107..281bf3301241 100644 --- a/trunk/arch/arm/kernel/hw_breakpoint.c +++ b/trunk/arch/arm/kernel/hw_breakpoint.c @@ -159,6 +159,12 @@ static int debug_arch_supported(void) arch >= ARM_DEBUG_ARCH_V7_1; } +/* Can we determine the watchpoint access type from the fsr? */ +static int debug_exception_updates_fsr(void) +{ + return 0; +} + /* Determine number of WRP registers available. */ static int get_num_wrp_resources(void) { @@ -604,13 +610,14 @@ int arch_validate_hwbkpt_settings(struct perf_event *bp) /* Aligned */ break; case 1: - /* Allow single byte watchpoint. */ - if (info->ctrl.len == ARM_BREAKPOINT_LEN_1) - break; case 2: /* Allow halfword watchpoints and breakpoints. */ if (info->ctrl.len == ARM_BREAKPOINT_LEN_2) break; + case 3: + /* Allow single byte watchpoint. */ + if (info->ctrl.len == ARM_BREAKPOINT_LEN_1) + break; default: ret = -EINVAL; goto out; @@ -619,18 +626,35 @@ int arch_validate_hwbkpt_settings(struct perf_event *bp) info->address &= ~alignment_mask; info->ctrl.len <<= offset; - /* - * Currently we rely on an overflow handler to take - * care of single-stepping the breakpoint when it fires. - * In the case of userspace breakpoints on a core with V7 debug, - * we can use the mismatch feature as a poor-man's hardware - * single-step, but this only works for per-task breakpoints. - */ - if (!bp->overflow_handler && (arch_check_bp_in_kernelspace(bp) || - !core_has_mismatch_brps() || !bp->hw.bp_target)) { - pr_warning("overflow handler required but none found\n"); - ret = -EINVAL; + if (!bp->overflow_handler) { + /* + * Mismatch breakpoints are required for single-stepping + * breakpoints. + */ + if (!core_has_mismatch_brps()) + return -EINVAL; + + /* We don't allow mismatch breakpoints in kernel space. */ + if (arch_check_bp_in_kernelspace(bp)) + return -EPERM; + + /* + * Per-cpu breakpoints are not supported by our stepping + * mechanism. + */ + if (!bp->hw.bp_target) + return -EINVAL; + + /* + * We only support specific access types if the fsr + * reports them. + */ + if (!debug_exception_updates_fsr() && + (info->ctrl.type == ARM_BREAKPOINT_LOAD || + info->ctrl.type == ARM_BREAKPOINT_STORE)) + return -EINVAL; } + out: return ret; } @@ -706,10 +730,12 @@ static void watchpoint_handler(unsigned long addr, unsigned int fsr, goto unlock; /* Check that the access type matches. */ - access = (fsr & ARM_FSR_ACCESS_MASK) ? HW_BREAKPOINT_W : - HW_BREAKPOINT_R; - if (!(access & hw_breakpoint_type(wp))) - goto unlock; + if (debug_exception_updates_fsr()) { + access = (fsr & ARM_FSR_ACCESS_MASK) ? + HW_BREAKPOINT_W : HW_BREAKPOINT_R; + if (!(access & hw_breakpoint_type(wp))) + goto unlock; + } /* We have a winner. */ info->trigger = addr; diff --git a/trunk/arch/arm/kernel/sched_clock.c b/trunk/arch/arm/kernel/sched_clock.c index 27d186abbc06..f4515393248d 100644 --- a/trunk/arch/arm/kernel/sched_clock.c +++ b/trunk/arch/arm/kernel/sched_clock.c @@ -21,6 +21,8 @@ struct clock_data { u32 epoch_cyc_copy; u32 mult; u32 shift; + bool suspended; + bool needs_suspend; }; static void sched_clock_poll(unsigned long wrap_ticks); @@ -49,6 +51,9 @@ static unsigned long long cyc_to_sched_clock(u32 cyc, u32 mask) u64 epoch_ns; u32 epoch_cyc; + if (cd.suspended) + return cd.epoch_ns; + /* * Load the epoch_cyc and epoch_ns atomically. We do this by * ensuring that we always write epoch_cyc, epoch_ns and @@ -98,6 +103,13 @@ static void sched_clock_poll(unsigned long wrap_ticks) update_sched_clock(); } +void __init setup_sched_clock_needs_suspend(u32 (*read)(void), int bits, + unsigned long rate) +{ + setup_sched_clock(read, bits, rate); + cd.needs_suspend = true; +} + void __init setup_sched_clock(u32 (*read)(void), int bits, unsigned long rate) { unsigned long r, w; @@ -169,11 +181,23 @@ void __init sched_clock_postinit(void) static int sched_clock_suspend(void) { sched_clock_poll(sched_clock_timer.data); + if (cd.needs_suspend) + cd.suspended = true; return 0; } +static void sched_clock_resume(void) +{ + if (cd.needs_suspend) { + cd.epoch_cyc = read_sched_clock(); + cd.epoch_cyc_copy = cd.epoch_cyc; + cd.suspended = false; + } +} + static struct syscore_ops sched_clock_ops = { .suspend = sched_clock_suspend, + .resume = sched_clock_resume, }; static int __init sched_clock_syscore_init(void) diff --git a/trunk/arch/arm/kernel/smp_twd.c b/trunk/arch/arm/kernel/smp_twd.c index fef42b21cecb..e1f906989bb8 100644 --- a/trunk/arch/arm/kernel/smp_twd.c +++ b/trunk/arch/arm/kernel/smp_twd.c @@ -11,7 +11,6 @@ #include #include #include -#include #include #include #include @@ -96,7 +95,52 @@ static void twd_timer_stop(struct clock_event_device *clk) disable_percpu_irq(clk->irq); } -#ifdef CONFIG_CPU_FREQ +#ifdef CONFIG_COMMON_CLK + +/* + * Updates clockevent frequency when the cpu frequency changes. + * Called on the cpu that is changing frequency with interrupts disabled. + */ +static void twd_update_frequency(void *new_rate) +{ + twd_timer_rate = *((unsigned long *) new_rate); + + clockevents_update_freq(*__this_cpu_ptr(twd_evt), twd_timer_rate); +} + +static int twd_rate_change(struct notifier_block *nb, + unsigned long flags, void *data) +{ + struct clk_notifier_data *cnd = data; + + /* + * The twd clock events must be reprogrammed to account for the new + * frequency. The timer is local to a cpu, so cross-call to the + * changing cpu. + */ + if (flags == POST_RATE_CHANGE) + smp_call_function(twd_update_frequency, + (void *)&cnd->new_rate, 1); + + return NOTIFY_OK; +} + +static struct notifier_block twd_clk_nb = { + .notifier_call = twd_rate_change, +}; + +static int twd_clk_init(void) +{ + if (twd_evt && *__this_cpu_ptr(twd_evt) && !IS_ERR(twd_clk)) + return clk_notifier_register(twd_clk, &twd_clk_nb); + + return 0; +} +core_initcall(twd_clk_init); + +#elif defined (CONFIG_CPU_FREQ) + +#include /* * Updates clockevent frequency when the cpu frequency changes. diff --git a/trunk/arch/arm/kernel/topology.c b/trunk/arch/arm/kernel/topology.c index 198b08456e90..26c12c6440fc 100644 --- a/trunk/arch/arm/kernel/topology.c +++ b/trunk/arch/arm/kernel/topology.c @@ -321,7 +321,7 @@ void store_cpu_topology(unsigned int cpuid) * init_cpu_topology is called at boot when only one cpu is running * which prevent simultaneous write access to cpu_topology array */ -void init_cpu_topology(void) +void __init init_cpu_topology(void) { unsigned int cpu; diff --git a/trunk/arch/arm/kernel/traps.c b/trunk/arch/arm/kernel/traps.c index f7945218b8c6..b0179b89a04c 100644 --- a/trunk/arch/arm/kernel/traps.c +++ b/trunk/arch/arm/kernel/traps.c @@ -420,20 +420,23 @@ asmlinkage void __exception do_undefinstr(struct pt_regs *regs) #endif instr = *(u32 *) pc; } else if (thumb_mode(regs)) { - get_user(instr, (u16 __user *)pc); + if (get_user(instr, (u16 __user *)pc)) + goto die_sig; if (is_wide_instruction(instr)) { unsigned int instr2; - get_user(instr2, (u16 __user *)pc+1); + if (get_user(instr2, (u16 __user *)pc+1)) + goto die_sig; instr <<= 16; instr |= instr2; } - } else { - get_user(instr, (u32 __user *)pc); + } else if (get_user(instr, (u32 __user *)pc)) { + goto die_sig; } if (call_undef_hook(regs, instr) == 0) return; +die_sig: #ifdef CONFIG_DEBUG_USER if (user_debug & UDBG_UNDEFINED) { printk(KERN_INFO "%s (%d): undefined instruction: pc=%p\n", diff --git a/trunk/arch/arm/lib/Makefile b/trunk/arch/arm/lib/Makefile index 2473fd1fd51c..af72969820b4 100644 --- a/trunk/arch/arm/lib/Makefile +++ b/trunk/arch/arm/lib/Makefile @@ -16,13 +16,30 @@ lib-y := backtrace.o changebit.o csumipv6.o csumpartial.o \ call_with_stack.o mmu-y := clear_user.o copy_page.o getuser.o putuser.o -mmu-y += copy_from_user.o copy_to_user.o + +# the code in uaccess.S is not preemption safe and +# probably faster on ARMv3 only +ifeq ($(CONFIG_PREEMPT),y) + mmu-y += copy_from_user.o copy_to_user.o +else +ifneq ($(CONFIG_CPU_32v3),y) + mmu-y += copy_from_user.o copy_to_user.o +else + mmu-y += uaccess.o +endif +endif # using lib_ here won't override already available weak symbols obj-$(CONFIG_UACCESS_WITH_MEMCPY) += uaccess_with_memcpy.o -lib-$(CONFIG_MMU) += $(mmu-y) -lib-y += io-readsw-armv4.o io-writesw-armv4.o +lib-$(CONFIG_MMU) += $(mmu-y) + +ifeq ($(CONFIG_CPU_32v3),y) + lib-y += io-readsw-armv3.o io-writesw-armv3.o +else + lib-y += io-readsw-armv4.o io-writesw-armv4.o +endif + lib-$(CONFIG_ARCH_RPC) += ecard.o io-acorn.o floppydma.o lib-$(CONFIG_ARCH_SHARK) += io-shark.o diff --git a/trunk/arch/arm/lib/delay.c b/trunk/arch/arm/lib/delay.c index d6dacc69254e..395d5fbb8fa2 100644 --- a/trunk/arch/arm/lib/delay.c +++ b/trunk/arch/arm/lib/delay.c @@ -59,6 +59,7 @@ void __init init_current_timer_delay(unsigned long freq) { pr_info("Switching to timer-based delay loop\n"); lpj_fine = freq / HZ; + loops_per_jiffy = lpj_fine; arm_delay_ops.delay = __timer_delay; arm_delay_ops.const_udelay = __timer_const_udelay; arm_delay_ops.udelay = __timer_udelay; diff --git a/trunk/arch/arm/lib/getuser.S b/trunk/arch/arm/lib/getuser.S index 11093a7c3e32..9b06bb41fca6 100644 --- a/trunk/arch/arm/lib/getuser.S +++ b/trunk/arch/arm/lib/getuser.S @@ -16,8 +16,9 @@ * __get_user_X * * Inputs: r0 contains the address + * r1 contains the address limit, which must be preserved * Outputs: r0 is the error code - * r2, r3 contains the zero-extended value + * r2 contains the zero-extended value * lr corrupted * * No other registers must be altered. (see @@ -27,33 +28,39 @@ * Note also that it is intended that __get_user_bad is not global. */ #include +#include #include #include ENTRY(__get_user_1) + check_uaccess r0, 1, r1, r2, __get_user_bad 1: TUSER(ldrb) r2, [r0] mov r0, #0 mov pc, lr ENDPROC(__get_user_1) ENTRY(__get_user_2) -#ifdef CONFIG_THUMB2_KERNEL -2: TUSER(ldrb) r2, [r0] -3: TUSER(ldrb) r3, [r0, #1] + check_uaccess r0, 2, r1, r2, __get_user_bad +#ifdef CONFIG_CPU_USE_DOMAINS +rb .req ip +2: ldrbt r2, [r0], #1 +3: ldrbt rb, [r0], #0 #else -2: TUSER(ldrb) r2, [r0], #1 -3: TUSER(ldrb) r3, [r0] +rb .req r0 +2: ldrb r2, [r0] +3: ldrb rb, [r0, #1] #endif #ifndef __ARMEB__ - orr r2, r2, r3, lsl #8 + orr r2, r2, rb, lsl #8 #else - orr r2, r3, r2, lsl #8 + orr r2, rb, r2, lsl #8 #endif mov r0, #0 mov pc, lr ENDPROC(__get_user_2) ENTRY(__get_user_4) + check_uaccess r0, 4, r1, r2, __get_user_bad 4: TUSER(ldr) r2, [r0] mov r0, #0 mov pc, lr diff --git a/trunk/arch/arm/lib/io-readsw-armv3.S b/trunk/arch/arm/lib/io-readsw-armv3.S new file mode 100644 index 000000000000..88487c8c4f23 --- /dev/null +++ b/trunk/arch/arm/lib/io-readsw-armv3.S @@ -0,0 +1,106 @@ +/* + * linux/arch/arm/lib/io-readsw-armv3.S + * + * Copyright (C) 1995-2000 Russell King + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ +#include +#include + +.Linsw_bad_alignment: + adr r0, .Linsw_bad_align_msg + mov r2, lr + b panic +.Linsw_bad_align_msg: + .asciz "insw: bad buffer alignment (0x%p, lr=0x%08lX)\n" + .align + +.Linsw_align: tst r1, #1 + bne .Linsw_bad_alignment + + ldr r3, [r0] + strb r3, [r1], #1 + mov r3, r3, lsr #8 + strb r3, [r1], #1 + + subs r2, r2, #1 + moveq pc, lr + +ENTRY(__raw_readsw) + teq r2, #0 @ do we have to check for the zero len? + moveq pc, lr + tst r1, #3 + bne .Linsw_align + +.Linsw_aligned: mov ip, #0xff + orr ip, ip, ip, lsl #8 + stmfd sp!, {r4, r5, r6, lr} + + subs r2, r2, #8 + bmi .Lno_insw_8 + +.Linsw_8_lp: ldr r3, [r0] + and r3, r3, ip + ldr r4, [r0] + orr r3, r3, r4, lsl #16 + + ldr r4, [r0] + and r4, r4, ip + ldr r5, [r0] + orr r4, r4, r5, lsl #16 + + ldr r5, [r0] + and r5, r5, ip + ldr r6, [r0] + orr r5, r5, r6, lsl #16 + + ldr r6, [r0] + and r6, r6, ip + ldr lr, [r0] + orr r6, r6, lr, lsl #16 + + stmia r1!, {r3 - r6} + + subs r2, r2, #8 + bpl .Linsw_8_lp + + tst r2, #7 + ldmeqfd sp!, {r4, r5, r6, pc} + +.Lno_insw_8: tst r2, #4 + beq .Lno_insw_4 + + ldr r3, [r0] + and r3, r3, ip + ldr r4, [r0] + orr r3, r3, r4, lsl #16 + + ldr r4, [r0] + and r4, r4, ip + ldr r5, [r0] + orr r4, r4, r5, lsl #16 + + stmia r1!, {r3, r4} + +.Lno_insw_4: tst r2, #2 + beq .Lno_insw_2 + + ldr r3, [r0] + and r3, r3, ip + ldr r4, [r0] + orr r3, r3, r4, lsl #16 + + str r3, [r1], #4 + +.Lno_insw_2: tst r2, #1 + ldrne r3, [r0] + strneb r3, [r1], #1 + movne r3, r3, lsr #8 + strneb r3, [r1] + + ldmfd sp!, {r4, r5, r6, pc} + + diff --git a/trunk/arch/arm/lib/io-writesw-armv3.S b/trunk/arch/arm/lib/io-writesw-armv3.S new file mode 100644 index 000000000000..49b800419e32 --- /dev/null +++ b/trunk/arch/arm/lib/io-writesw-armv3.S @@ -0,0 +1,126 @@ +/* + * linux/arch/arm/lib/io-writesw-armv3.S + * + * Copyright (C) 1995-2000 Russell King + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ +#include +#include + +.Loutsw_bad_alignment: + adr r0, .Loutsw_bad_align_msg + mov r2, lr + b panic +.Loutsw_bad_align_msg: + .asciz "outsw: bad buffer alignment (0x%p, lr=0x%08lX)\n" + .align + +.Loutsw_align: tst r1, #1 + bne .Loutsw_bad_alignment + + add r1, r1, #2 + + ldr r3, [r1, #-4] + mov r3, r3, lsr #16 + orr r3, r3, r3, lsl #16 + str r3, [r0] + subs r2, r2, #1 + moveq pc, lr + +ENTRY(__raw_writesw) + teq r2, #0 @ do we have to check for the zero len? + moveq pc, lr + tst r1, #3 + bne .Loutsw_align + + stmfd sp!, {r4, r5, r6, lr} + + subs r2, r2, #8 + bmi .Lno_outsw_8 + +.Loutsw_8_lp: ldmia r1!, {r3, r4, r5, r6} + + mov ip, r3, lsl #16 + orr ip, ip, ip, lsr #16 + str ip, [r0] + + mov ip, r3, lsr #16 + orr ip, ip, ip, lsl #16 + str ip, [r0] + + mov ip, r4, lsl #16 + orr ip, ip, ip, lsr #16 + str ip, [r0] + + mov ip, r4, lsr #16 + orr ip, ip, ip, lsl #16 + str ip, [r0] + + mov ip, r5, lsl #16 + orr ip, ip, ip, lsr #16 + str ip, [r0] + + mov ip, r5, lsr #16 + orr ip, ip, ip, lsl #16 + str ip, [r0] + + mov ip, r6, lsl #16 + orr ip, ip, ip, lsr #16 + str ip, [r0] + + mov ip, r6, lsr #16 + orr ip, ip, ip, lsl #16 + str ip, [r0] + + subs r2, r2, #8 + bpl .Loutsw_8_lp + + tst r2, #7 + ldmeqfd sp!, {r4, r5, r6, pc} + +.Lno_outsw_8: tst r2, #4 + beq .Lno_outsw_4 + + ldmia r1!, {r3, r4} + + mov ip, r3, lsl #16 + orr ip, ip, ip, lsr #16 + str ip, [r0] + + mov ip, r3, lsr #16 + orr ip, ip, ip, lsl #16 + str ip, [r0] + + mov ip, r4, lsl #16 + orr ip, ip, ip, lsr #16 + str ip, [r0] + + mov ip, r4, lsr #16 + orr ip, ip, ip, lsl #16 + str ip, [r0] + +.Lno_outsw_4: tst r2, #2 + beq .Lno_outsw_2 + + ldr r3, [r1], #4 + + mov ip, r3, lsl #16 + orr ip, ip, ip, lsr #16 + str ip, [r0] + + mov ip, r3, lsr #16 + orr ip, ip, ip, lsl #16 + str ip, [r0] + +.Lno_outsw_2: tst r2, #1 + + ldrne r3, [r1] + + movne ip, r3, lsl #16 + orrne ip, ip, ip, lsr #16 + strne ip, [r0] + + ldmfd sp!, {r4, r5, r6, pc} diff --git a/trunk/arch/arm/lib/putuser.S b/trunk/arch/arm/lib/putuser.S index 7db25990c589..3d73dcb959b0 100644 --- a/trunk/arch/arm/lib/putuser.S +++ b/trunk/arch/arm/lib/putuser.S @@ -16,6 +16,7 @@ * __put_user_X * * Inputs: r0 contains the address + * r1 contains the address limit, which must be preserved * r2, r3 contains the value * Outputs: r0 is the error code * lr corrupted @@ -27,16 +28,19 @@ * Note also that it is intended that __put_user_bad is not global. */ #include +#include #include #include ENTRY(__put_user_1) + check_uaccess r0, 1, r1, ip, __put_user_bad 1: TUSER(strb) r2, [r0] mov r0, #0 mov pc, lr ENDPROC(__put_user_1) ENTRY(__put_user_2) + check_uaccess r0, 2, r1, ip, __put_user_bad mov ip, r2, lsr #8 #ifdef CONFIG_THUMB2_KERNEL #ifndef __ARMEB__ @@ -60,12 +64,14 @@ ENTRY(__put_user_2) ENDPROC(__put_user_2) ENTRY(__put_user_4) + check_uaccess r0, 4, r1, ip, __put_user_bad 4: TUSER(str) r2, [r0] mov r0, #0 mov pc, lr ENDPROC(__put_user_4) ENTRY(__put_user_8) + check_uaccess r0, 8, r1, ip, __put_user_bad #ifdef CONFIG_THUMB2_KERNEL 5: TUSER(str) r2, [r0] 6: TUSER(str) r3, [r0, #4] diff --git a/trunk/arch/arm/lib/uaccess.S b/trunk/arch/arm/lib/uaccess.S new file mode 100644 index 000000000000..5c908b1cb8ed --- /dev/null +++ b/trunk/arch/arm/lib/uaccess.S @@ -0,0 +1,564 @@ +/* + * linux/arch/arm/lib/uaccess.S + * + * Copyright (C) 1995, 1996,1997,1998 Russell King + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * Routines to block copy data to/from user memory + * These are highly optimised both for the 4k page size + * and for various alignments. + */ +#include +#include +#include +#include + + .text + +#define PAGE_SHIFT 12 + +/* Prototype: int __copy_to_user(void *to, const char *from, size_t n) + * Purpose : copy a block to user memory from kernel memory + * Params : to - user memory + * : from - kernel memory + * : n - number of bytes to copy + * Returns : Number of bytes NOT copied. + */ + +.Lc2u_dest_not_aligned: + rsb ip, ip, #4 + cmp ip, #2 + ldrb r3, [r1], #1 +USER( TUSER( strb) r3, [r0], #1) @ May fault + ldrgeb r3, [r1], #1 +USER( TUSER( strgeb) r3, [r0], #1) @ May fault + ldrgtb r3, [r1], #1 +USER( TUSER( strgtb) r3, [r0], #1) @ May fault + sub r2, r2, ip + b .Lc2u_dest_aligned + +ENTRY(__copy_to_user) + stmfd sp!, {r2, r4 - r7, lr} + cmp r2, #4 + blt .Lc2u_not_enough + ands ip, r0, #3 + bne .Lc2u_dest_not_aligned +.Lc2u_dest_aligned: + + ands ip, r1, #3 + bne .Lc2u_src_not_aligned +/* + * Seeing as there has to be at least 8 bytes to copy, we can + * copy one word, and force a user-mode page fault... + */ + +.Lc2u_0fupi: subs r2, r2, #4 + addmi ip, r2, #4 + bmi .Lc2u_0nowords + ldr r3, [r1], #4 +USER( TUSER( str) r3, [r0], #4) @ May fault + mov ip, r0, lsl #32 - PAGE_SHIFT @ On each page, use a ld/st??t instruction + rsb ip, ip, #0 + movs ip, ip, lsr #32 - PAGE_SHIFT + beq .Lc2u_0fupi +/* + * ip = max no. of bytes to copy before needing another "strt" insn + */ + cmp r2, ip + movlt ip, r2 + sub r2, r2, ip + subs ip, ip, #32 + blt .Lc2u_0rem8lp + +.Lc2u_0cpy8lp: ldmia r1!, {r3 - r6} + stmia r0!, {r3 - r6} @ Shouldnt fault + ldmia r1!, {r3 - r6} + subs ip, ip, #32 + stmia r0!, {r3 - r6} @ Shouldnt fault + bpl .Lc2u_0cpy8lp + +.Lc2u_0rem8lp: cmn ip, #16 + ldmgeia r1!, {r3 - r6} + stmgeia r0!, {r3 - r6} @ Shouldnt fault + tst ip, #8 + ldmneia r1!, {r3 - r4} + stmneia r0!, {r3 - r4} @ Shouldnt fault + tst ip, #4 + ldrne r3, [r1], #4 + TUSER( strne) r3, [r0], #4 @ Shouldnt fault + ands ip, ip, #3 + beq .Lc2u_0fupi +.Lc2u_0nowords: teq ip, #0 + beq .Lc2u_finished +.Lc2u_nowords: cmp ip, #2 + ldrb r3, [r1], #1 +USER( TUSER( strb) r3, [r0], #1) @ May fault + ldrgeb r3, [r1], #1 +USER( TUSER( strgeb) r3, [r0], #1) @ May fault + ldrgtb r3, [r1], #1 +USER( TUSER( strgtb) r3, [r0], #1) @ May fault + b .Lc2u_finished + +.Lc2u_not_enough: + movs ip, r2 + bne .Lc2u_nowords +.Lc2u_finished: mov r0, #0 + ldmfd sp!, {r2, r4 - r7, pc} + +.Lc2u_src_not_aligned: + bic r1, r1, #3 + ldr r7, [r1], #4 + cmp ip, #2 + bgt .Lc2u_3fupi + beq .Lc2u_2fupi +.Lc2u_1fupi: subs r2, r2, #4 + addmi ip, r2, #4 + bmi .Lc2u_1nowords + mov r3, r7, pull #8 + ldr r7, [r1], #4 + orr r3, r3, r7, push #24 +USER( TUSER( str) r3, [r0], #4) @ May fault + mov ip, r0, lsl #32 - PAGE_SHIFT + rsb ip, ip, #0 + movs ip, ip, lsr #32 - PAGE_SHIFT + beq .Lc2u_1fupi + cmp r2, ip + movlt ip, r2 + sub r2, r2, ip + subs ip, ip, #16 + blt .Lc2u_1rem8lp + +.Lc2u_1cpy8lp: mov r3, r7, pull #8 + ldmia r1!, {r4 - r7} + subs ip, ip, #16 + orr r3, r3, r4, push #24 + mov r4, r4, pull #8 + orr r4, r4, r5, push #24 + mov r5, r5, pull #8 + orr r5, r5, r6, push #24 + mov r6, r6, pull #8 + orr r6, r6, r7, push #24 + stmia r0!, {r3 - r6} @ Shouldnt fault + bpl .Lc2u_1cpy8lp + +.Lc2u_1rem8lp: tst ip, #8 + movne r3, r7, pull #8 + ldmneia r1!, {r4, r7} + orrne r3, r3, r4, push #24 + movne r4, r4, pull #8 + orrne r4, r4, r7, push #24 + stmneia r0!, {r3 - r4} @ Shouldnt fault + tst ip, #4 + movne r3, r7, pull #8 + ldrne r7, [r1], #4 + orrne r3, r3, r7, push #24 + TUSER( strne) r3, [r0], #4 @ Shouldnt fault + ands ip, ip, #3 + beq .Lc2u_1fupi +.Lc2u_1nowords: mov r3, r7, get_byte_1 + teq ip, #0 + beq .Lc2u_finished + cmp ip, #2 +USER( TUSER( strb) r3, [r0], #1) @ May fault + movge r3, r7, get_byte_2 +USER( TUSER( strgeb) r3, [r0], #1) @ May fault + movgt r3, r7, get_byte_3 +USER( TUSER( strgtb) r3, [r0], #1) @ May fault + b .Lc2u_finished + +.Lc2u_2fupi: subs r2, r2, #4 + addmi ip, r2, #4 + bmi .Lc2u_2nowords + mov r3, r7, pull #16 + ldr r7, [r1], #4 + orr r3, r3, r7, push #16 +USER( TUSER( str) r3, [r0], #4) @ May fault + mov ip, r0, lsl #32 - PAGE_SHIFT + rsb ip, ip, #0 + movs ip, ip, lsr #32 - PAGE_SHIFT + beq .Lc2u_2fupi + cmp r2, ip + movlt ip, r2 + sub r2, r2, ip + subs ip, ip, #16 + blt .Lc2u_2rem8lp + +.Lc2u_2cpy8lp: mov r3, r7, pull #16 + ldmia r1!, {r4 - r7} + subs ip, ip, #16 + orr r3, r3, r4, push #16 + mov r4, r4, pull #16 + orr r4, r4, r5, push #16 + mov r5, r5, pull #16 + orr r5, r5, r6, push #16 + mov r6, r6, pull #16 + orr r6, r6, r7, push #16 + stmia r0!, {r3 - r6} @ Shouldnt fault + bpl .Lc2u_2cpy8lp + +.Lc2u_2rem8lp: tst ip, #8 + movne r3, r7, pull #16 + ldmneia r1!, {r4, r7} + orrne r3, r3, r4, push #16 + movne r4, r4, pull #16 + orrne r4, r4, r7, push #16 + stmneia r0!, {r3 - r4} @ Shouldnt fault + tst ip, #4 + movne r3, r7, pull #16 + ldrne r7, [r1], #4 + orrne r3, r3, r7, push #16 + TUSER( strne) r3, [r0], #4 @ Shouldnt fault + ands ip, ip, #3 + beq .Lc2u_2fupi +.Lc2u_2nowords: mov r3, r7, get_byte_2 + teq ip, #0 + beq .Lc2u_finished + cmp ip, #2 +USER( TUSER( strb) r3, [r0], #1) @ May fault + movge r3, r7, get_byte_3 +USER( TUSER( strgeb) r3, [r0], #1) @ May fault + ldrgtb r3, [r1], #0 +USER( TUSER( strgtb) r3, [r0], #1) @ May fault + b .Lc2u_finished + +.Lc2u_3fupi: subs r2, r2, #4 + addmi ip, r2, #4 + bmi .Lc2u_3nowords + mov r3, r7, pull #24 + ldr r7, [r1], #4 + orr r3, r3, r7, push #8 +USER( TUSER( str) r3, [r0], #4) @ May fault + mov ip, r0, lsl #32 - PAGE_SHIFT + rsb ip, ip, #0 + movs ip, ip, lsr #32 - PAGE_SHIFT + beq .Lc2u_3fupi + cmp r2, ip + movlt ip, r2 + sub r2, r2, ip + subs ip, ip, #16 + blt .Lc2u_3rem8lp + +.Lc2u_3cpy8lp: mov r3, r7, pull #24 + ldmia r1!, {r4 - r7} + subs ip, ip, #16 + orr r3, r3, r4, push #8 + mov r4, r4, pull #24 + orr r4, r4, r5, push #8 + mov r5, r5, pull #24 + orr r5, r5, r6, push #8 + mov r6, r6, pull #24 + orr r6, r6, r7, push #8 + stmia r0!, {r3 - r6} @ Shouldnt fault + bpl .Lc2u_3cpy8lp + +.Lc2u_3rem8lp: tst ip, #8 + movne r3, r7, pull #24 + ldmneia r1!, {r4, r7} + orrne r3, r3, r4, push #8 + movne r4, r4, pull #24 + orrne r4, r4, r7, push #8 + stmneia r0!, {r3 - r4} @ Shouldnt fault + tst ip, #4 + movne r3, r7, pull #24 + ldrne r7, [r1], #4 + orrne r3, r3, r7, push #8 + TUSER( strne) r3, [r0], #4 @ Shouldnt fault + ands ip, ip, #3 + beq .Lc2u_3fupi +.Lc2u_3nowords: mov r3, r7, get_byte_3 + teq ip, #0 + beq .Lc2u_finished + cmp ip, #2 +USER( TUSER( strb) r3, [r0], #1) @ May fault + ldrgeb r3, [r1], #1 +USER( TUSER( strgeb) r3, [r0], #1) @ May fault + ldrgtb r3, [r1], #0 +USER( TUSER( strgtb) r3, [r0], #1) @ May fault + b .Lc2u_finished +ENDPROC(__copy_to_user) + + .pushsection .fixup,"ax" + .align 0 +9001: ldmfd sp!, {r0, r4 - r7, pc} + .popsection + +/* Prototype: unsigned long __copy_from_user(void *to,const void *from,unsigned long n); + * Purpose : copy a block from user memory to kernel memory + * Params : to - kernel memory + * : from - user memory + * : n - number of bytes to copy + * Returns : Number of bytes NOT copied. + */ +.Lcfu_dest_not_aligned: + rsb ip, ip, #4 + cmp ip, #2 +USER( TUSER( ldrb) r3, [r1], #1) @ May fault + strb r3, [r0], #1 +USER( TUSER( ldrgeb) r3, [r1], #1) @ May fault + strgeb r3, [r0], #1 +USER( TUSER( ldrgtb) r3, [r1], #1) @ May fault + strgtb r3, [r0], #1 + sub r2, r2, ip + b .Lcfu_dest_aligned + +ENTRY(__copy_from_user) + stmfd sp!, {r0, r2, r4 - r7, lr} + cmp r2, #4 + blt .Lcfu_not_enough + ands ip, r0, #3 + bne .Lcfu_dest_not_aligned +.Lcfu_dest_aligned: + ands ip, r1, #3 + bne .Lcfu_src_not_aligned + +/* + * Seeing as there has to be at least 8 bytes to copy, we can + * copy one word, and force a user-mode page fault... + */ + +.Lcfu_0fupi: subs r2, r2, #4 + addmi ip, r2, #4 + bmi .Lcfu_0nowords +USER( TUSER( ldr) r3, [r1], #4) + str r3, [r0], #4 + mov ip, r1, lsl #32 - PAGE_SHIFT @ On each page, use a ld/st??t instruction + rsb ip, ip, #0 + movs ip, ip, lsr #32 - PAGE_SHIFT + beq .Lcfu_0fupi +/* + * ip = max no. of bytes to copy before needing another "strt" insn + */ + cmp r2, ip + movlt ip, r2 + sub r2, r2, ip + subs ip, ip, #32 + blt .Lcfu_0rem8lp + +.Lcfu_0cpy8lp: ldmia r1!, {r3 - r6} @ Shouldnt fault + stmia r0!, {r3 - r6} + ldmia r1!, {r3 - r6} @ Shouldnt fault + subs ip, ip, #32 + stmia r0!, {r3 - r6} + bpl .Lcfu_0cpy8lp + +.Lcfu_0rem8lp: cmn ip, #16 + ldmgeia r1!, {r3 - r6} @ Shouldnt fault + stmgeia r0!, {r3 - r6} + tst ip, #8 + ldmneia r1!, {r3 - r4} @ Shouldnt fault + stmneia r0!, {r3 - r4} + tst ip, #4 + TUSER( ldrne) r3, [r1], #4 @ Shouldnt fault + strne r3, [r0], #4 + ands ip, ip, #3 + beq .Lcfu_0fupi +.Lcfu_0nowords: teq ip, #0 + beq .Lcfu_finished +.Lcfu_nowords: cmp ip, #2 +USER( TUSER( ldrb) r3, [r1], #1) @ May fault + strb r3, [r0], #1 +USER( TUSER( ldrgeb) r3, [r1], #1) @ May fault + strgeb r3, [r0], #1 +USER( TUSER( ldrgtb) r3, [r1], #1) @ May fault + strgtb r3, [r0], #1 + b .Lcfu_finished + +.Lcfu_not_enough: + movs ip, r2 + bne .Lcfu_nowords +.Lcfu_finished: mov r0, #0 + add sp, sp, #8 + ldmfd sp!, {r4 - r7, pc} + +.Lcfu_src_not_aligned: + bic r1, r1, #3 +USER( TUSER( ldr) r7, [r1], #4) @ May fault + cmp ip, #2 + bgt .Lcfu_3fupi + beq .Lcfu_2fupi +.Lcfu_1fupi: subs r2, r2, #4 + addmi ip, r2, #4 + bmi .Lcfu_1nowords + mov r3, r7, pull #8 +USER( TUSER( ldr) r7, [r1], #4) @ May fault + orr r3, r3, r7, push #24 + str r3, [r0], #4 + mov ip, r1, lsl #32 - PAGE_SHIFT + rsb ip, ip, #0 + movs ip, ip, lsr #32 - PAGE_SHIFT + beq .Lcfu_1fupi + cmp r2, ip + movlt ip, r2 + sub r2, r2, ip + subs ip, ip, #16 + blt .Lcfu_1rem8lp + +.Lcfu_1cpy8lp: mov r3, r7, pull #8 + ldmia r1!, {r4 - r7} @ Shouldnt fault + subs ip, ip, #16 + orr r3, r3, r4, push #24 + mov r4, r4, pull #8 + orr r4, r4, r5, push #24 + mov r5, r5, pull #8 + orr r5, r5, r6, push #24 + mov r6, r6, pull #8 + orr r6, r6, r7, push #24 + stmia r0!, {r3 - r6} + bpl .Lcfu_1cpy8lp + +.Lcfu_1rem8lp: tst ip, #8 + movne r3, r7, pull #8 + ldmneia r1!, {r4, r7} @ Shouldnt fault + orrne r3, r3, r4, push #24 + movne r4, r4, pull #8 + orrne r4, r4, r7, push #24 + stmneia r0!, {r3 - r4} + tst ip, #4 + movne r3, r7, pull #8 +USER( TUSER( ldrne) r7, [r1], #4) @ May fault + orrne r3, r3, r7, push #24 + strne r3, [r0], #4 + ands ip, ip, #3 + beq .Lcfu_1fupi +.Lcfu_1nowords: mov r3, r7, get_byte_1 + teq ip, #0 + beq .Lcfu_finished + cmp ip, #2 + strb r3, [r0], #1 + movge r3, r7, get_byte_2 + strgeb r3, [r0], #1 + movgt r3, r7, get_byte_3 + strgtb r3, [r0], #1 + b .Lcfu_finished + +.Lcfu_2fupi: subs r2, r2, #4 + addmi ip, r2, #4 + bmi .Lcfu_2nowords + mov r3, r7, pull #16 +USER( TUSER( ldr) r7, [r1], #4) @ May fault + orr r3, r3, r7, push #16 + str r3, [r0], #4 + mov ip, r1, lsl #32 - PAGE_SHIFT + rsb ip, ip, #0 + movs ip, ip, lsr #32 - PAGE_SHIFT + beq .Lcfu_2fupi + cmp r2, ip + movlt ip, r2 + sub r2, r2, ip + subs ip, ip, #16 + blt .Lcfu_2rem8lp + + +.Lcfu_2cpy8lp: mov r3, r7, pull #16 + ldmia r1!, {r4 - r7} @ Shouldnt fault + subs ip, ip, #16 + orr r3, r3, r4, push #16 + mov r4, r4, pull #16 + orr r4, r4, r5, push #16 + mov r5, r5, pull #16 + orr r5, r5, r6, push #16 + mov r6, r6, pull #16 + orr r6, r6, r7, push #16 + stmia r0!, {r3 - r6} + bpl .Lcfu_2cpy8lp + +.Lcfu_2rem8lp: tst ip, #8 + movne r3, r7, pull #16 + ldmneia r1!, {r4, r7} @ Shouldnt fault + orrne r3, r3, r4, push #16 + movne r4, r4, pull #16 + orrne r4, r4, r7, push #16 + stmneia r0!, {r3 - r4} + tst ip, #4 + movne r3, r7, pull #16 +USER( TUSER( ldrne) r7, [r1], #4) @ May fault + orrne r3, r3, r7, push #16 + strne r3, [r0], #4 + ands ip, ip, #3 + beq .Lcfu_2fupi +.Lcfu_2nowords: mov r3, r7, get_byte_2 + teq ip, #0 + beq .Lcfu_finished + cmp ip, #2 + strb r3, [r0], #1 + movge r3, r7, get_byte_3 + strgeb r3, [r0], #1 +USER( TUSER( ldrgtb) r3, [r1], #0) @ May fault + strgtb r3, [r0], #1 + b .Lcfu_finished + +.Lcfu_3fupi: subs r2, r2, #4 + addmi ip, r2, #4 + bmi .Lcfu_3nowords + mov r3, r7, pull #24 +USER( TUSER( ldr) r7, [r1], #4) @ May fault + orr r3, r3, r7, push #8 + str r3, [r0], #4 + mov ip, r1, lsl #32 - PAGE_SHIFT + rsb ip, ip, #0 + movs ip, ip, lsr #32 - PAGE_SHIFT + beq .Lcfu_3fupi + cmp r2, ip + movlt ip, r2 + sub r2, r2, ip + subs ip, ip, #16 + blt .Lcfu_3rem8lp + +.Lcfu_3cpy8lp: mov r3, r7, pull #24 + ldmia r1!, {r4 - r7} @ Shouldnt fault + orr r3, r3, r4, push #8 + mov r4, r4, pull #24 + orr r4, r4, r5, push #8 + mov r5, r5, pull #24 + orr r5, r5, r6, push #8 + mov r6, r6, pull #24 + orr r6, r6, r7, push #8 + stmia r0!, {r3 - r6} + subs ip, ip, #16 + bpl .Lcfu_3cpy8lp + +.Lcfu_3rem8lp: tst ip, #8 + movne r3, r7, pull #24 + ldmneia r1!, {r4, r7} @ Shouldnt fault + orrne r3, r3, r4, push #8 + movne r4, r4, pull #24 + orrne r4, r4, r7, push #8 + stmneia r0!, {r3 - r4} + tst ip, #4 + movne r3, r7, pull #24 +USER( TUSER( ldrne) r7, [r1], #4) @ May fault + orrne r3, r3, r7, push #8 + strne r3, [r0], #4 + ands ip, ip, #3 + beq .Lcfu_3fupi +.Lcfu_3nowords: mov r3, r7, get_byte_3 + teq ip, #0 + beq .Lcfu_finished + cmp ip, #2 + strb r3, [r0], #1 +USER( TUSER( ldrgeb) r3, [r1], #1) @ May fault + strgeb r3, [r0], #1 +USER( TUSER( ldrgtb) r3, [r1], #1) @ May fault + strgtb r3, [r0], #1 + b .Lcfu_finished +ENDPROC(__copy_from_user) + + .pushsection .fixup,"ax" + .align 0 + /* + * We took an exception. r0 contains a pointer to + * the byte not copied. + */ +9001: ldr r2, [sp], #4 @ void *to + sub r2, r0, r2 @ bytes copied + ldr r1, [sp], #4 @ unsigned long count + subs r4, r1, r2 @ bytes left to copy + movne r1, r4 + blne __memzero + mov r0, r4 + ldmfd sp!, {r4 - r7, pc} + .popsection + diff --git a/trunk/arch/arm/mach-at91/at91rm9200_time.c b/trunk/arch/arm/mach-at91/at91rm9200_time.c index 104ca40d8d18..aaa443b48c91 100644 --- a/trunk/arch/arm/mach-at91/at91rm9200_time.c +++ b/trunk/arch/arm/mach-at91/at91rm9200_time.c @@ -197,7 +197,7 @@ void __init at91rm9200_timer_init(void) at91_st_read(AT91_ST_SR); /* Make IRQs happen for the system timer */ - setup_irq(AT91_ID_SYS, &at91rm9200_timer_irq); + setup_irq(NR_IRQS_LEGACY + AT91_ID_SYS, &at91rm9200_timer_irq); /* The 32KiHz "Slow Clock" (tick every 30517.58 nanoseconds) is used * directly for the clocksource and all clockevents, after adjusting diff --git a/trunk/arch/arm/mach-at91/at91sam9260_devices.c b/trunk/arch/arm/mach-at91/at91sam9260_devices.c index 7b9c2ba396ed..bce572a530ef 100644 --- a/trunk/arch/arm/mach-at91/at91sam9260_devices.c +++ b/trunk/arch/arm/mach-at91/at91sam9260_devices.c @@ -726,6 +726,8 @@ static struct resource rtt_resources[] = { .flags = IORESOURCE_MEM, }, { .flags = IORESOURCE_MEM, + }, { + .flags = IORESOURCE_IRQ, }, }; @@ -744,10 +746,12 @@ static void __init at91_add_device_rtt_rtc(void) * The second resource is needed: * GPBR will serve as the storage for RTC time offset */ - at91sam9260_rtt_device.num_resources = 2; + at91sam9260_rtt_device.num_resources = 3; rtt_resources[1].start = AT91SAM9260_BASE_GPBR + 4 * CONFIG_RTC_DRV_AT91SAM9_GPBR; rtt_resources[1].end = rtt_resources[1].start + 3; + rtt_resources[2].start = NR_IRQS_LEGACY + AT91_ID_SYS; + rtt_resources[2].end = NR_IRQS_LEGACY + AT91_ID_SYS; } #else static void __init at91_add_device_rtt_rtc(void) diff --git a/trunk/arch/arm/mach-at91/at91sam9261_devices.c b/trunk/arch/arm/mach-at91/at91sam9261_devices.c index 8df5c1bdff92..bc2590d712d0 100644 --- a/trunk/arch/arm/mach-at91/at91sam9261_devices.c +++ b/trunk/arch/arm/mach-at91/at91sam9261_devices.c @@ -609,6 +609,8 @@ static struct resource rtt_resources[] = { .flags = IORESOURCE_MEM, }, { .flags = IORESOURCE_MEM, + }, { + .flags = IORESOURCE_IRQ, } }; @@ -626,10 +628,12 @@ static void __init at91_add_device_rtt_rtc(void) * The second resource is needed: * GPBR will serve as the storage for RTC time offset */ - at91sam9261_rtt_device.num_resources = 2; + at91sam9261_rtt_device.num_resources = 3; rtt_resources[1].start = AT91SAM9261_BASE_GPBR + 4 * CONFIG_RTC_DRV_AT91SAM9_GPBR; rtt_resources[1].end = rtt_resources[1].start + 3; + rtt_resources[2].start = NR_IRQS_LEGACY + AT91_ID_SYS; + rtt_resources[2].end = NR_IRQS_LEGACY + AT91_ID_SYS; } #else static void __init at91_add_device_rtt_rtc(void) diff --git a/trunk/arch/arm/mach-at91/at91sam9263_devices.c b/trunk/arch/arm/mach-at91/at91sam9263_devices.c index eb6bbf86fb9f..9b6ca734f1a9 100644 --- a/trunk/arch/arm/mach-at91/at91sam9263_devices.c +++ b/trunk/arch/arm/mach-at91/at91sam9263_devices.c @@ -990,6 +990,8 @@ static struct resource rtt0_resources[] = { .flags = IORESOURCE_MEM, }, { .flags = IORESOURCE_MEM, + }, { + .flags = IORESOURCE_IRQ, } }; @@ -1006,6 +1008,8 @@ static struct resource rtt1_resources[] = { .flags = IORESOURCE_MEM, }, { .flags = IORESOURCE_MEM, + }, { + .flags = IORESOURCE_IRQ, } }; @@ -1027,14 +1031,14 @@ static void __init at91_add_device_rtt_rtc(void) * The second resource is needed only for the chosen RTT: * GPBR will serve as the storage for RTC time offset */ - at91sam9263_rtt0_device.num_resources = 2; + at91sam9263_rtt0_device.num_resources = 3; at91sam9263_rtt1_device.num_resources = 1; pdev = &at91sam9263_rtt0_device; r = rtt0_resources; break; case 1: at91sam9263_rtt0_device.num_resources = 1; - at91sam9263_rtt1_device.num_resources = 2; + at91sam9263_rtt1_device.num_resources = 3; pdev = &at91sam9263_rtt1_device; r = rtt1_resources; break; @@ -1047,6 +1051,8 @@ static void __init at91_add_device_rtt_rtc(void) pdev->name = "rtc-at91sam9"; r[1].start = AT91SAM9263_BASE_GPBR + 4 * CONFIG_RTC_DRV_AT91SAM9_GPBR; r[1].end = r[1].start + 3; + r[2].start = NR_IRQS_LEGACY + AT91_ID_SYS; + r[2].end = NR_IRQS_LEGACY + AT91_ID_SYS; } #else static void __init at91_add_device_rtt_rtc(void) diff --git a/trunk/arch/arm/mach-at91/at91sam9g45_devices.c b/trunk/arch/arm/mach-at91/at91sam9g45_devices.c index 06073996a382..1b47319ca00b 100644 --- a/trunk/arch/arm/mach-at91/at91sam9g45_devices.c +++ b/trunk/arch/arm/mach-at91/at91sam9g45_devices.c @@ -1293,6 +1293,8 @@ static struct resource rtt_resources[] = { .flags = IORESOURCE_MEM, }, { .flags = IORESOURCE_MEM, + }, { + .flags = IORESOURCE_IRQ, } }; @@ -1310,10 +1312,12 @@ static void __init at91_add_device_rtt_rtc(void) * The second resource is needed: * GPBR will serve as the storage for RTC time offset */ - at91sam9g45_rtt_device.num_resources = 2; + at91sam9g45_rtt_device.num_resources = 3; rtt_resources[1].start = AT91SAM9G45_BASE_GPBR + 4 * CONFIG_RTC_DRV_AT91SAM9_GPBR; rtt_resources[1].end = rtt_resources[1].start + 3; + rtt_resources[2].start = NR_IRQS_LEGACY + AT91_ID_SYS; + rtt_resources[2].end = NR_IRQS_LEGACY + AT91_ID_SYS; } #else static void __init at91_add_device_rtt_rtc(void) diff --git a/trunk/arch/arm/mach-at91/at91sam9rl_devices.c b/trunk/arch/arm/mach-at91/at91sam9rl_devices.c index f09fff932172..b3d365dadef5 100644 --- a/trunk/arch/arm/mach-at91/at91sam9rl_devices.c +++ b/trunk/arch/arm/mach-at91/at91sam9rl_devices.c @@ -688,6 +688,8 @@ static struct resource rtt_resources[] = { .flags = IORESOURCE_MEM, }, { .flags = IORESOURCE_MEM, + }, { + .flags = IORESOURCE_IRQ, } }; @@ -705,10 +707,12 @@ static void __init at91_add_device_rtt_rtc(void) * The second resource is needed: * GPBR will serve as the storage for RTC time offset */ - at91sam9rl_rtt_device.num_resources = 2; + at91sam9rl_rtt_device.num_resources = 3; rtt_resources[1].start = AT91SAM9RL_BASE_GPBR + 4 * CONFIG_RTC_DRV_AT91SAM9_GPBR; rtt_resources[1].end = rtt_resources[1].start + 3; + rtt_resources[2].start = NR_IRQS_LEGACY + AT91_ID_SYS; + rtt_resources[2].end = NR_IRQS_LEGACY + AT91_ID_SYS; } #else static void __init at91_add_device_rtt_rtc(void) diff --git a/trunk/arch/arm/mach-at91/clock.c b/trunk/arch/arm/mach-at91/clock.c index de2ec6b8fea7..188c82971ebd 100644 --- a/trunk/arch/arm/mach-at91/clock.c +++ b/trunk/arch/arm/mach-at91/clock.c @@ -63,6 +63,12 @@ EXPORT_SYMBOL_GPL(at91_pmc_base); #define cpu_has_300M_plla() (cpu_is_at91sam9g10()) +#define cpu_has_240M_plla() (cpu_is_at91sam9261() \ + || cpu_is_at91sam9263() \ + || cpu_is_at91sam9rl()) + +#define cpu_has_210M_plla() (cpu_is_at91sam9260()) + #define cpu_has_pllb() (!(cpu_is_at91sam9rl() \ || cpu_is_at91sam9g45() \ || cpu_is_at91sam9x5() \ @@ -706,6 +712,12 @@ static int __init at91_pmc_init(unsigned long main_clock) } else if (cpu_has_800M_plla()) { if (plla.rate_hz > 800000000) pll_overclock = true; + } else if (cpu_has_240M_plla()) { + if (plla.rate_hz > 240000000) + pll_overclock = true; + } else if (cpu_has_210M_plla()) { + if (plla.rate_hz > 210000000) + pll_overclock = true; } else { if (plla.rate_hz > 209000000) pll_overclock = true; diff --git a/trunk/arch/arm/mach-davinci/board-neuros-osd2.c b/trunk/arch/arm/mach-davinci/board-neuros-osd2.c index 5de69f2fcca9..f6b9fc70161b 100644 --- a/trunk/arch/arm/mach-davinci/board-neuros-osd2.c +++ b/trunk/arch/arm/mach-davinci/board-neuros-osd2.c @@ -162,38 +162,6 @@ static void __init davinci_ntosd2_map_io(void) dm644x_init(); } -/* - I2C initialization -*/ -static struct davinci_i2c_platform_data ntosd2_i2c_pdata = { - .bus_freq = 20 /* kHz */, - .bus_delay = 100 /* usec */, -}; - -static struct i2c_board_info __initdata ntosd2_i2c_info[] = { -}; - -static int ntosd2_init_i2c(void) -{ - int status; - - davinci_init_i2c(&ntosd2_i2c_pdata); - status = gpio_request(NTOSD2_MSP430_IRQ, ntosd2_i2c_info[0].type); - if (status == 0) { - status = gpio_direction_input(NTOSD2_MSP430_IRQ); - if (status == 0) { - status = gpio_to_irq(NTOSD2_MSP430_IRQ); - if (status > 0) { - ntosd2_i2c_info[0].irq = status; - i2c_register_board_info(1, - ntosd2_i2c_info, - ARRAY_SIZE(ntosd2_i2c_info)); - } - } - } - return status; -} - static struct davinci_mmc_config davinci_ntosd2_mmc_config = { .wires = 4, .version = MMC_CTLR_VERSION_1 @@ -218,7 +186,6 @@ static __init void davinci_ntosd2_init(void) { struct clk *aemif_clk; struct davinci_soc_info *soc_info = &davinci_soc_info; - int status; aemif_clk = clk_get(NULL, "aemif"); clk_enable(aemif_clk); @@ -242,12 +209,6 @@ static __init void davinci_ntosd2_init(void) platform_add_devices(davinci_ntosd2_devices, ARRAY_SIZE(davinci_ntosd2_devices)); - /* Initialize I2C interface specific for this board */ - status = ntosd2_init_i2c(); - if (status < 0) - pr_warning("davinci_ntosd2_init: msp430 irq setup failed:" - " %d\n", status); - davinci_serial_init(&uart_config); dm644x_init_asp(&dm644x_ntosd2_snd_data); diff --git a/trunk/arch/arm/mach-dove/common.c b/trunk/arch/arm/mach-dove/common.c index 4db5de54b6a7..6321567d8eaa 100644 --- a/trunk/arch/arm/mach-dove/common.c +++ b/trunk/arch/arm/mach-dove/common.c @@ -102,7 +102,8 @@ void __init dove_ehci1_init(void) void __init dove_ge00_init(struct mv643xx_eth_platform_data *eth_data) { orion_ge00_init(eth_data, DOVE_GE00_PHYS_BASE, - IRQ_DOVE_GE00_SUM, IRQ_DOVE_GE00_ERR); + IRQ_DOVE_GE00_SUM, IRQ_DOVE_GE00_ERR, + 1600); } /***************************************************************************** diff --git a/trunk/arch/arm/mach-exynos/mach-origen.c b/trunk/arch/arm/mach-exynos/mach-origen.c index 5ca80307d6d7..4e574c24581c 100644 --- a/trunk/arch/arm/mach-exynos/mach-origen.c +++ b/trunk/arch/arm/mach-exynos/mach-origen.c @@ -42,6 +42,7 @@ #include #include #include +#include #include #include @@ -734,6 +735,11 @@ static void __init origen_bt_setup(void) s3c_gpio_setpull(EXYNOS4_GPX2(2), S3C_GPIO_PULL_NONE); } +/* I2C module and id for HDMIPHY */ +static struct i2c_board_info hdmiphy_info = { + I2C_BOARD_INFO("hdmiphy-exynos4210", 0x38), +}; + static void s5p_tv_setup(void) { /* Direct HPD to HDMI chip */ @@ -781,6 +787,7 @@ static void __init origen_machine_init(void) s5p_tv_setup(); s5p_i2c_hdmiphy_set_platdata(NULL); + s5p_hdmi_set_platdata(&hdmiphy_info, NULL, 0); #ifdef CONFIG_DRM_EXYNOS s5p_device_fimd0.dev.platform_data = &drm_fimd_pdata; diff --git a/trunk/arch/arm/mach-exynos/mach-smdkv310.c b/trunk/arch/arm/mach-exynos/mach-smdkv310.c index 3cfa688d274a..73f2bce097e1 100644 --- a/trunk/arch/arm/mach-exynos/mach-smdkv310.c +++ b/trunk/arch/arm/mach-exynos/mach-smdkv310.c @@ -40,6 +40,7 @@ #include #include #include +#include #include #include @@ -354,6 +355,11 @@ static struct platform_pwm_backlight_data smdkv310_bl_data = { .pwm_period_ns = 1000, }; +/* I2C module and id for HDMIPHY */ +static struct i2c_board_info hdmiphy_info = { + I2C_BOARD_INFO("hdmiphy-exynos4210", 0x38), +}; + static void s5p_tv_setup(void) { /* direct HPD to HDMI chip */ @@ -388,6 +394,7 @@ static void __init smdkv310_machine_init(void) s5p_tv_setup(); s5p_i2c_hdmiphy_set_platdata(NULL); + s5p_hdmi_set_platdata(&hdmiphy_info, NULL, 0); samsung_keypad_set_platdata(&smdkv310_keypad_data); diff --git a/trunk/arch/arm/mach-exynos/pm_domains.c b/trunk/arch/arm/mach-exynos/pm_domains.c index 373c3c00d24c..c0bc83a7663e 100644 --- a/trunk/arch/arm/mach-exynos/pm_domains.c +++ b/trunk/arch/arm/mach-exynos/pm_domains.c @@ -115,7 +115,7 @@ static __init int exynos_pm_dt_parse_domains(void) } #endif /* CONFIG_OF */ -static __init void exynos_pm_add_dev_to_genpd(struct platform_device *pdev, +static __init __maybe_unused void exynos_pm_add_dev_to_genpd(struct platform_device *pdev, struct exynos_pm_domain *pd) { if (pdev->dev.bus) { diff --git a/trunk/arch/arm/mach-gemini/irq.c b/trunk/arch/arm/mach-gemini/irq.c index ca70e5fcc7ac..020852d3bdd8 100644 --- a/trunk/arch/arm/mach-gemini/irq.c +++ b/trunk/arch/arm/mach-gemini/irq.c @@ -17,6 +17,7 @@ #include #include #include +#include #include #define IRQ_SOURCE(base_addr) (base_addr + 0x00) diff --git a/trunk/arch/arm/mach-imx/Makefile b/trunk/arch/arm/mach-imx/Makefile index 07f7c226e4cf..d004d37ad9d8 100644 --- a/trunk/arch/arm/mach-imx/Makefile +++ b/trunk/arch/arm/mach-imx/Makefile @@ -9,7 +9,8 @@ obj-$(CONFIG_SOC_IMX27) += clk-imx27.o mm-imx27.o ehci-imx27.o obj-$(CONFIG_SOC_IMX31) += mm-imx3.o cpu-imx31.o clk-imx31.o iomux-imx31.o ehci-imx31.o pm-imx3.o obj-$(CONFIG_SOC_IMX35) += mm-imx3.o cpu-imx35.o clk-imx35.o ehci-imx35.o pm-imx3.o -obj-$(CONFIG_SOC_IMX5) += cpu-imx5.o mm-imx5.o clk-imx51-imx53.o ehci-imx5.o pm-imx5.o cpu_op-mx51.o +imx5-pm-$(CONFIG_PM) += pm-imx5.o +obj-$(CONFIG_SOC_IMX5) += cpu-imx5.o mm-imx5.o clk-imx51-imx53.o ehci-imx5.o $(imx5-pm-y) cpu_op-mx51.o obj-$(CONFIG_COMMON_CLK) += clk-pllv1.o clk-pllv2.o clk-pllv3.o clk-gate2.o \ clk-pfd.o clk-busy.o @@ -70,14 +71,13 @@ obj-$(CONFIG_DEBUG_LL) += lluart.o obj-$(CONFIG_HAVE_IMX_GPC) += gpc.o obj-$(CONFIG_HAVE_IMX_MMDC) += mmdc.o obj-$(CONFIG_HAVE_IMX_SRC) += src.o -obj-$(CONFIG_CPU_V7) += head-v7.o -AFLAGS_head-v7.o :=-Wa,-march=armv7-a -obj-$(CONFIG_SMP) += platsmp.o +AFLAGS_headsmp.o :=-Wa,-march=armv7-a +obj-$(CONFIG_SMP) += headsmp.o platsmp.o obj-$(CONFIG_HOTPLUG_CPU) += hotplug.o obj-$(CONFIG_SOC_IMX6Q) += clk-imx6q.o mach-imx6q.o ifeq ($(CONFIG_PM),y) -obj-$(CONFIG_SOC_IMX6Q) += pm-imx6q.o +obj-$(CONFIG_SOC_IMX6Q) += pm-imx6q.o headsmp.o endif # i.MX5 based machines diff --git a/trunk/arch/arm/mach-imx/clk-imx25.c b/trunk/arch/arm/mach-imx/clk-imx25.c index fdd8cc87c9fe..d20d4795f4ea 100644 --- a/trunk/arch/arm/mach-imx/clk-imx25.c +++ b/trunk/arch/arm/mach-imx/clk-imx25.c @@ -222,10 +222,8 @@ int __init mx25_clocks_init(void) clk_register_clkdev(clk[lcdc_ipg], "ipg", "imx-fb.0"); clk_register_clkdev(clk[lcdc_ahb], "ahb", "imx-fb.0"); clk_register_clkdev(clk[wdt_ipg], NULL, "imx2-wdt.0"); - clk_register_clkdev(clk[ssi1_ipg_per], "per", "imx-ssi.0"); - clk_register_clkdev(clk[ssi1_ipg], "ipg", "imx-ssi.0"); - clk_register_clkdev(clk[ssi2_ipg_per], "per", "imx-ssi.1"); - clk_register_clkdev(clk[ssi2_ipg], "ipg", "imx-ssi.1"); + clk_register_clkdev(clk[ssi1_ipg], NULL, "imx-ssi.0"); + clk_register_clkdev(clk[ssi2_ipg], NULL, "imx-ssi.1"); clk_register_clkdev(clk[esdhc1_ipg_per], "per", "sdhci-esdhc-imx25.0"); clk_register_clkdev(clk[esdhc1_ipg], "ipg", "sdhci-esdhc-imx25.0"); clk_register_clkdev(clk[esdhc1_ahb], "ahb", "sdhci-esdhc-imx25.0"); @@ -243,6 +241,6 @@ int __init mx25_clocks_init(void) clk_register_clkdev(clk[sdma_ahb], "ahb", "imx35-sdma"); clk_register_clkdev(clk[iim_ipg], "iim", NULL); - mxc_timer_init(MX25_IO_ADDRESS(MX25_GPT1_BASE_ADDR), 54); + mxc_timer_init(MX25_IO_ADDRESS(MX25_GPT1_BASE_ADDR), MX25_INT_GPT1); return 0; } diff --git a/trunk/arch/arm/mach-imx/clk-imx27.c b/trunk/arch/arm/mach-imx/clk-imx27.c index 7aa6313fb167..f69ca4680049 100644 --- a/trunk/arch/arm/mach-imx/clk-imx27.c +++ b/trunk/arch/arm/mach-imx/clk-imx27.c @@ -223,7 +223,7 @@ int __init mx27_clocks_init(unsigned long fref) clk_register_clkdev(clk[per3_gate], "per", "imx-fb.0"); clk_register_clkdev(clk[lcdc_ipg_gate], "ipg", "imx-fb.0"); clk_register_clkdev(clk[lcdc_ahb_gate], "ahb", "imx-fb.0"); - clk_register_clkdev(clk[csi_ahb_gate], NULL, "mx2-camera.0"); + clk_register_clkdev(clk[csi_ahb_gate], "ahb", "mx2-camera.0"); clk_register_clkdev(clk[usb_div], "per", "fsl-usb2-udc"); clk_register_clkdev(clk[usb_ipg_gate], "ipg", "fsl-usb2-udc"); clk_register_clkdev(clk[usb_ahb_gate], "ahb", "fsl-usb2-udc"); @@ -250,8 +250,10 @@ int __init mx27_clocks_init(unsigned long fref) clk_register_clkdev(clk[i2c2_ipg_gate], NULL, "imx-i2c.1"); clk_register_clkdev(clk[owire_ipg_gate], NULL, "mxc_w1.0"); clk_register_clkdev(clk[kpp_ipg_gate], NULL, "imx-keypad"); - clk_register_clkdev(clk[emma_ahb_gate], "ahb", "imx-emma"); - clk_register_clkdev(clk[emma_ipg_gate], "ipg", "imx-emma"); + clk_register_clkdev(clk[emma_ahb_gate], "emma-ahb", "mx2-camera.0"); + clk_register_clkdev(clk[emma_ipg_gate], "emma-ipg", "mx2-camera.0"); + clk_register_clkdev(clk[emma_ahb_gate], "ahb", "m2m-emmaprp.0"); + clk_register_clkdev(clk[emma_ipg_gate], "ipg", "m2m-emmaprp.0"); clk_register_clkdev(clk[iim_ipg_gate], "iim", NULL); clk_register_clkdev(clk[gpio_ipg_gate], "gpio", NULL); clk_register_clkdev(clk[brom_ahb_gate], "brom", NULL); diff --git a/trunk/arch/arm/mach-imx/clk-imx31.c b/trunk/arch/arm/mach-imx/clk-imx31.c index 8e19e70f90f9..1253af2d9971 100644 --- a/trunk/arch/arm/mach-imx/clk-imx31.c +++ b/trunk/arch/arm/mach-imx/clk-imx31.c @@ -130,7 +130,7 @@ int __init mx31_clocks_init(unsigned long fref) clk_register_clkdev(clk[nfc], NULL, "mxc_nand.0"); clk_register_clkdev(clk[ipu_gate], NULL, "ipu-core"); clk_register_clkdev(clk[ipu_gate], NULL, "mx3_sdc_fb"); - clk_register_clkdev(clk[kpp_gate], "kpp", NULL); + clk_register_clkdev(clk[kpp_gate], NULL, "imx-keypad"); clk_register_clkdev(clk[usb_div_post], "per", "mxc-ehci.0"); clk_register_clkdev(clk[usb_gate], "ahb", "mxc-ehci.0"); clk_register_clkdev(clk[ipg], "ipg", "mxc-ehci.0"); diff --git a/trunk/arch/arm/mach-imx/clk-imx35.c b/trunk/arch/arm/mach-imx/clk-imx35.c index c6422fb10bae..65fb8bcd86cb 100644 --- a/trunk/arch/arm/mach-imx/clk-imx35.c +++ b/trunk/arch/arm/mach-imx/clk-imx35.c @@ -230,10 +230,8 @@ int __init mx35_clocks_init() clk_register_clkdev(clk[ipu_gate], NULL, "mx3_sdc_fb"); clk_register_clkdev(clk[owire_gate], NULL, "mxc_w1"); clk_register_clkdev(clk[sdma_gate], NULL, "imx35-sdma"); - clk_register_clkdev(clk[ipg], "ipg", "imx-ssi.0"); - clk_register_clkdev(clk[ssi1_div_post], "per", "imx-ssi.0"); - clk_register_clkdev(clk[ipg], "ipg", "imx-ssi.1"); - clk_register_clkdev(clk[ssi2_div_post], "per", "imx-ssi.1"); + clk_register_clkdev(clk[ssi1_gate], NULL, "imx-ssi.0"); + clk_register_clkdev(clk[ssi2_gate], NULL, "imx-ssi.1"); /* i.mx35 has the i.mx21 type uart */ clk_register_clkdev(clk[uart1_gate], "per", "imx21-uart.0"); clk_register_clkdev(clk[ipg], "ipg", "imx21-uart.0"); diff --git a/trunk/arch/arm/mach-imx/clk-imx51-imx53.c b/trunk/arch/arm/mach-imx/clk-imx51-imx53.c index f6086693ebd2..4bdcaa97bd98 100644 --- a/trunk/arch/arm/mach-imx/clk-imx51-imx53.c +++ b/trunk/arch/arm/mach-imx/clk-imx51-imx53.c @@ -303,6 +303,7 @@ static void __init mx5_clocks_common_init(unsigned long rate_ckil, clk_prepare_enable(clk[aips_tz2]); /* fec */ clk_prepare_enable(clk[spba]); clk_prepare_enable(clk[emi_fast_gate]); /* fec */ + clk_prepare_enable(clk[emi_slow_gate]); /* eim */ clk_prepare_enable(clk[tmax1]); clk_prepare_enable(clk[tmax2]); /* esdhc2, fec */ clk_prepare_enable(clk[tmax3]); /* esdhc1, esdhc4 */ diff --git a/trunk/arch/arm/mach-imx/clk-imx6q.c b/trunk/arch/arm/mach-imx/clk-imx6q.c index ea89520b6e22..4233d9e3531d 100644 --- a/trunk/arch/arm/mach-imx/clk-imx6q.c +++ b/trunk/arch/arm/mach-imx/clk-imx6q.c @@ -152,7 +152,7 @@ enum mx6q_clks { ssi2, ssi3, uart_ipg, uart_serial, usboh3, usdhc1, usdhc2, usdhc3, usdhc4, vdo_axi, vpu_axi, cko1, pll1_sys, pll2_bus, pll3_usb_otg, pll4_audio, pll5_video, pll6_mlb, pll7_usb_host, pll8_enet, ssi1_ipg, - ssi2_ipg, ssi3_ipg, rom, usbphy1, usbphy2, + ssi2_ipg, ssi3_ipg, rom, usbphy1, usbphy2, ldb_di0_div_3_5, ldb_di1_div_3_5, clk_max }; @@ -288,8 +288,10 @@ int __init mx6q_clocks_init(void) clk[gpu3d_shader] = imx_clk_divider("gpu3d_shader", "gpu3d_shader_sel", base + 0x18, 29, 3); clk[ipu1_podf] = imx_clk_divider("ipu1_podf", "ipu1_sel", base + 0x3c, 11, 3); clk[ipu2_podf] = imx_clk_divider("ipu2_podf", "ipu2_sel", base + 0x3c, 16, 3); - clk[ldb_di0_podf] = imx_clk_divider("ldb_di0_podf", "ldb_di0_sel", base + 0x20, 10, 1); - clk[ldb_di1_podf] = imx_clk_divider("ldb_di1_podf", "ldb_di1_sel", base + 0x20, 11, 1); + clk[ldb_di0_div_3_5] = imx_clk_fixed_factor("ldb_di0_div_3_5", "ldb_di0_sel", 2, 7); + clk[ldb_di0_podf] = imx_clk_divider("ldb_di0_podf", "ldb_di0_div_3_5", base + 0x20, 10, 1); + clk[ldb_di1_div_3_5] = imx_clk_fixed_factor("ldb_di1_div_3_5", "ldb_di1_sel", 2, 7); + clk[ldb_di1_podf] = imx_clk_divider("ldb_di1_podf", "ldb_di1_div_3_5", base + 0x20, 11, 1); clk[ipu1_di0_pre] = imx_clk_divider("ipu1_di0_pre", "ipu1_di0_pre_sel", base + 0x34, 3, 3); clk[ipu1_di1_pre] = imx_clk_divider("ipu1_di1_pre", "ipu1_di1_pre_sel", base + 0x34, 12, 3); clk[ipu2_di0_pre] = imx_clk_divider("ipu2_di0_pre", "ipu2_di0_pre_sel", base + 0x38, 3, 3); diff --git a/trunk/arch/arm/mach-imx/head-v7.S b/trunk/arch/arm/mach-imx/headsmp.S similarity index 100% rename from trunk/arch/arm/mach-imx/head-v7.S rename to trunk/arch/arm/mach-imx/headsmp.S diff --git a/trunk/arch/arm/mach-imx/hotplug.c b/trunk/arch/arm/mach-imx/hotplug.c index 20ed2d56c1af..f8f7437c83b8 100644 --- a/trunk/arch/arm/mach-imx/hotplug.c +++ b/trunk/arch/arm/mach-imx/hotplug.c @@ -42,22 +42,6 @@ static inline void cpu_enter_lowpower(void) : "cc"); } -static inline void cpu_leave_lowpower(void) -{ - unsigned int v; - - asm volatile( - "mrc p15, 0, %0, c1, c0, 0\n" - " orr %0, %0, %1\n" - " mcr p15, 0, %0, c1, c0, 0\n" - " mrc p15, 0, %0, c1, c0, 1\n" - " orr %0, %0, %2\n" - " mcr p15, 0, %0, c1, c0, 1\n" - : "=&r" (v) - : "Ir" (CR_C), "Ir" (0x40) - : "cc"); -} - /* * platform-specific code to shutdown a CPU * @@ -67,11 +51,10 @@ void platform_cpu_die(unsigned int cpu) { cpu_enter_lowpower(); imx_enable_cpu(cpu, false); - cpu_do_idle(); - cpu_leave_lowpower(); - /* We should never return from idle */ - panic("cpu %d unexpectedly exit from shutdown\n", cpu); + /* spin here until hardware takes it down */ + while (1) + ; } int platform_cpu_disable(unsigned int cpu) diff --git a/trunk/arch/arm/mach-imx/mach-armadillo5x0.c b/trunk/arch/arm/mach-imx/mach-armadillo5x0.c index 2c6ab3273f9e..5985ed1b8c98 100644 --- a/trunk/arch/arm/mach-imx/mach-armadillo5x0.c +++ b/trunk/arch/arm/mach-imx/mach-armadillo5x0.c @@ -526,7 +526,8 @@ static void __init armadillo5x0_init(void) imx31_add_mxc_nand(&armadillo5x0_nand_board_info); /* set NAND page size to 2k if not configured via boot mode pins */ - __raw_writel(__raw_readl(MXC_CCM_RCSR) | (1 << 30), MXC_CCM_RCSR); + __raw_writel(__raw_readl(mx3_ccm_base + MXC_CCM_RCSR) | + (1 << 30), mx3_ccm_base + MXC_CCM_RCSR); /* RTC */ /* Get RTC IRQ and register the chip */ diff --git a/trunk/arch/arm/mach-imx/mach-imx6q.c b/trunk/arch/arm/mach-imx/mach-imx6q.c index 5ec0608f2a76..045b3f6a387d 100644 --- a/trunk/arch/arm/mach-imx/mach-imx6q.c +++ b/trunk/arch/arm/mach-imx/mach-imx6q.c @@ -71,7 +71,7 @@ void imx6q_restart(char mode, const char *cmd) /* For imx6q sabrelite board: set KSZ9021RN RGMII pad skew */ static int ksz9021rn_phy_fixup(struct phy_device *phydev) { - if (IS_ENABLED(CONFIG_PHYLIB)) { + if (IS_BUILTIN(CONFIG_PHYLIB)) { /* min rx data delay */ phy_write(phydev, 0x0b, 0x8105); phy_write(phydev, 0x0c, 0x0000); @@ -112,7 +112,7 @@ static void __init imx6q_sabrelite_cko1_setup(void) static void __init imx6q_sabrelite_init(void) { - if (IS_ENABLED(CONFIG_PHYLIB)) + if (IS_BUILTIN(CONFIG_PHYLIB)) phy_register_fixup_for_uid(PHY_ID_KSZ9021, MICREL_PHY_ID_MASK, ksz9021rn_phy_fixup); imx6q_sabrelite_cko1_setup(); diff --git a/trunk/arch/arm/mach-integrator/core.c b/trunk/arch/arm/mach-integrator/core.c index ebf680bebdf2..3fa6c51390da 100644 --- a/trunk/arch/arm/mach-integrator/core.c +++ b/trunk/arch/arm/mach-integrator/core.c @@ -11,6 +11,7 @@ #include #include #include +#include #include #include #include diff --git a/trunk/arch/arm/mach-integrator/integrator_ap.c b/trunk/arch/arm/mach-integrator/integrator_ap.c index 7b1055c8e0b9..3b2267529f5e 100644 --- a/trunk/arch/arm/mach-integrator/integrator_ap.c +++ b/trunk/arch/arm/mach-integrator/integrator_ap.c @@ -456,7 +456,7 @@ static void __init ap_init_timer(void) clk = clk_get_sys("ap_timer", NULL); BUG_ON(IS_ERR(clk)); - clk_enable(clk); + clk_prepare_enable(clk); rate = clk_get_rate(clk); writel(0, TIMER0_VA_BASE + TIMER_CTRL); diff --git a/trunk/arch/arm/mach-kirkwood/Makefile.boot b/trunk/arch/arm/mach-kirkwood/Makefile.boot index 2a576abf409b..a13299d758e1 100644 --- a/trunk/arch/arm/mach-kirkwood/Makefile.boot +++ b/trunk/arch/arm/mach-kirkwood/Makefile.boot @@ -7,7 +7,8 @@ dtb-$(CONFIG_MACH_DLINK_KIRKWOOD_DT) += kirkwood-dns320.dtb dtb-$(CONFIG_MACH_DLINK_KIRKWOOD_DT) += kirkwood-dns325.dtb dtb-$(CONFIG_MACH_ICONNECT_DT) += kirkwood-iconnect.dtb dtb-$(CONFIG_MACH_IB62X0_DT) += kirkwood-ib62x0.dtb -dtb-$(CONFIG_MACH_TS219_DT) += kirkwood-qnap-ts219.dtb +dtb-$(CONFIG_MACH_TS219_DT) += kirkwood-ts219-6281.dtb +dtb-$(CONFIG_MACH_TS219_DT) += kirkwood-ts219-6282.dtb dtb-$(CONFIG_MACH_GOFLEXNET_DT) += kirkwood-goflexnet.dtb -dbt-$(CONFIG_MACH_LSXL_DT) += kirkwood-lschlv2.dtb -dbt-$(CONFIG_MACH_LSXL_DT) += kirkwood-lsxhl.dtb +dtb-$(CONFIG_MACH_LSXL_DT) += kirkwood-lschlv2.dtb +dtb-$(CONFIG_MACH_LSXL_DT) += kirkwood-lsxhl.dtb diff --git a/trunk/arch/arm/mach-kirkwood/common.c b/trunk/arch/arm/mach-kirkwood/common.c index c4b64adcbfce..1201191d7f1b 100644 --- a/trunk/arch/arm/mach-kirkwood/common.c +++ b/trunk/arch/arm/mach-kirkwood/common.c @@ -301,7 +301,7 @@ void __init kirkwood_ge00_init(struct mv643xx_eth_platform_data *eth_data) { orion_ge00_init(eth_data, GE00_PHYS_BASE, IRQ_KIRKWOOD_GE00_SUM, - IRQ_KIRKWOOD_GE00_ERR); + IRQ_KIRKWOOD_GE00_ERR, 1600); /* The interface forgets the MAC address assigned by u-boot if the clock is turned off, so claim the clk now. */ clk_prepare_enable(ge0); @@ -315,7 +315,7 @@ void __init kirkwood_ge01_init(struct mv643xx_eth_platform_data *eth_data) { orion_ge01_init(eth_data, GE01_PHYS_BASE, IRQ_KIRKWOOD_GE01_SUM, - IRQ_KIRKWOOD_GE01_ERR); + IRQ_KIRKWOOD_GE01_ERR, 1600); clk_prepare_enable(ge1); } @@ -517,6 +517,13 @@ void __init kirkwood_wdt_init(void) void __init kirkwood_init_early(void) { orion_time_set_base(TIMER_VIRT_BASE); + + /* + * Some Kirkwood devices allocate their coherent buffers from atomic + * context. Increase size of atomic coherent pool to make sure such + * the allocations won't fail. + */ + init_dma_coherent_pool_size(SZ_1M); } int kirkwood_tclk; diff --git a/trunk/arch/arm/mach-kirkwood/db88f6281-bp-setup.c b/trunk/arch/arm/mach-kirkwood/db88f6281-bp-setup.c index d93359379598..be90b7d0e10b 100644 --- a/trunk/arch/arm/mach-kirkwood/db88f6281-bp-setup.c +++ b/trunk/arch/arm/mach-kirkwood/db88f6281-bp-setup.c @@ -10,6 +10,7 @@ #include #include +#include #include #include #include diff --git a/trunk/arch/arm/mach-mmp/sram.c b/trunk/arch/arm/mach-mmp/sram.c index 4304f9519372..7e8a5a2e1ec7 100644 --- a/trunk/arch/arm/mach-mmp/sram.c +++ b/trunk/arch/arm/mach-mmp/sram.c @@ -68,7 +68,7 @@ static int __devinit sram_probe(struct platform_device *pdev) struct resource *res; int ret = 0; - if (!pdata && !pdata->pool_name) + if (!pdata || !pdata->pool_name) return -ENODEV; info = kzalloc(sizeof(*info), GFP_KERNEL); diff --git a/trunk/arch/arm/mach-mv78xx0/addr-map.c b/trunk/arch/arm/mach-mv78xx0/addr-map.c index 62b53d710efd..a9bc84180d21 100644 --- a/trunk/arch/arm/mach-mv78xx0/addr-map.c +++ b/trunk/arch/arm/mach-mv78xx0/addr-map.c @@ -37,7 +37,7 @@ #define WIN0_OFF(n) (BRIDGE_VIRT_BASE + 0x0000 + ((n) << 4)) #define WIN8_OFF(n) (BRIDGE_VIRT_BASE + 0x0900 + (((n) - 8) << 4)) -static void __init __iomem *win_cfg_base(int win) +static void __init __iomem *win_cfg_base(const struct orion_addr_map_cfg *cfg, int win) { /* * Find the control register base address for this window. diff --git a/trunk/arch/arm/mach-mv78xx0/common.c b/trunk/arch/arm/mach-mv78xx0/common.c index b4c53b846c9c..3057f7d4329a 100644 --- a/trunk/arch/arm/mach-mv78xx0/common.c +++ b/trunk/arch/arm/mach-mv78xx0/common.c @@ -213,7 +213,8 @@ void __init mv78xx0_ge00_init(struct mv643xx_eth_platform_data *eth_data) { orion_ge00_init(eth_data, GE00_PHYS_BASE, IRQ_MV78XX0_GE00_SUM, - IRQ_MV78XX0_GE_ERR); + IRQ_MV78XX0_GE_ERR, + MV643XX_TX_CSUM_DEFAULT_LIMIT); } @@ -224,7 +225,8 @@ void __init mv78xx0_ge01_init(struct mv643xx_eth_platform_data *eth_data) { orion_ge01_init(eth_data, GE01_PHYS_BASE, IRQ_MV78XX0_GE01_SUM, - NO_IRQ); + NO_IRQ, + MV643XX_TX_CSUM_DEFAULT_LIMIT); } diff --git a/trunk/arch/arm/mach-mxs/Kconfig b/trunk/arch/arm/mach-mxs/Kconfig index ccdf83b17cf1..9a8bbda195b2 100644 --- a/trunk/arch/arm/mach-mxs/Kconfig +++ b/trunk/arch/arm/mach-mxs/Kconfig @@ -2,9 +2,6 @@ if ARCH_MXS source "arch/arm/mach-mxs/devices/Kconfig" -config MXS_OCOTP - bool - config SOC_IMX23 bool select ARM_AMBA @@ -66,7 +63,6 @@ config MACH_MX28EVK select MXS_HAVE_PLATFORM_MXS_SAIF select MXS_HAVE_PLATFORM_MXS_I2C select MXS_HAVE_PLATFORM_RTC_STMP3XXX - select MXS_OCOTP help Include support for MX28EVK platform. This includes specific configurations for the board and its peripherals. @@ -94,7 +90,6 @@ config MODULE_M28 select MXS_HAVE_PLATFORM_MXS_I2C select MXS_HAVE_PLATFORM_MXS_MMC select MXS_HAVE_PLATFORM_MXSFB - select MXS_OCOTP config MODULE_APX4 bool @@ -106,7 +101,6 @@ config MODULE_APX4 select MXS_HAVE_PLATFORM_MXS_I2C select MXS_HAVE_PLATFORM_MXS_MMC select MXS_HAVE_PLATFORM_MXS_SAIF - select MXS_OCOTP config MACH_TX28 bool "Ka-Ro TX28 module" diff --git a/trunk/arch/arm/mach-mxs/Makefile b/trunk/arch/arm/mach-mxs/Makefile index e41590ccb437..fed3695a1339 100644 --- a/trunk/arch/arm/mach-mxs/Makefile +++ b/trunk/arch/arm/mach-mxs/Makefile @@ -1,7 +1,6 @@ # Common support -obj-y := devices.o icoll.o iomux.o system.o timer.o mm.o +obj-y := devices.o icoll.o iomux.o ocotp.o system.o timer.o mm.o -obj-$(CONFIG_MXS_OCOTP) += ocotp.o obj-$(CONFIG_PM) += pm.o obj-$(CONFIG_MACH_MXS_DT) += mach-mxs.o diff --git a/trunk/arch/arm/mach-mxs/mach-mxs.c b/trunk/arch/arm/mach-mxs/mach-mxs.c index 8dabfe81d07c..ff886e01a0b0 100644 --- a/trunk/arch/arm/mach-mxs/mach-mxs.c +++ b/trunk/arch/arm/mach-mxs/mach-mxs.c @@ -261,7 +261,7 @@ static void __init apx4devkit_init(void) enable_clk_enet_out(); if (IS_BUILTIN(CONFIG_PHYLIB)) - phy_register_fixup_for_uid(PHY_ID_KS8051, MICREL_PHY_ID_MASK, + phy_register_fixup_for_uid(PHY_ID_KSZ8051, MICREL_PHY_ID_MASK, apx4devkit_phy_fixup); mxsfb_pdata.mode_list = apx4devkit_video_modes; diff --git a/trunk/arch/arm/mach-omap2/Kconfig b/trunk/arch/arm/mach-omap2/Kconfig index dd2db025f778..346fd26f3aa6 100644 --- a/trunk/arch/arm/mach-omap2/Kconfig +++ b/trunk/arch/arm/mach-omap2/Kconfig @@ -62,13 +62,14 @@ config ARCH_OMAP4 select PM_OPP if PM select USB_ARCH_HAS_EHCI if USB_SUPPORT select ARM_CPU_SUSPEND if PM - select ARCH_NEEDS_CPU_IDLE_COUPLED + select ARCH_NEEDS_CPU_IDLE_COUPLED if SMP config SOC_OMAP5 bool "TI OMAP5" select CPU_V7 select ARM_GIC select HAVE_SMP + select ARM_CPU_SUSPEND if PM comment "OMAP Core Type" depends on ARCH_OMAP2 @@ -231,10 +232,11 @@ config MACH_OMAP3_PANDORA select OMAP_PACKAGE_CBB select REGULATOR_FIXED_VOLTAGE if REGULATOR -config MACH_OMAP3_TOUCHBOOK +config MACH_TOUCHBOOK bool "OMAP3 Touch Book" depends on ARCH_OMAP3 default y + select OMAP_PACKAGE_CBB config MACH_OMAP_3430SDP bool "OMAP 3430 SDP board" diff --git a/trunk/arch/arm/mach-omap2/Makefile b/trunk/arch/arm/mach-omap2/Makefile index f6a24b3f9c4f..34c2c7f59f0a 100644 --- a/trunk/arch/arm/mach-omap2/Makefile +++ b/trunk/arch/arm/mach-omap2/Makefile @@ -255,7 +255,7 @@ obj-$(CONFIG_MACH_OMAP_3630SDP) += board-zoom-display.o obj-$(CONFIG_MACH_CM_T35) += board-cm-t35.o obj-$(CONFIG_MACH_CM_T3517) += board-cm-t3517.o obj-$(CONFIG_MACH_IGEP0020) += board-igep0020.o -obj-$(CONFIG_MACH_OMAP3_TOUCHBOOK) += board-omap3touchbook.o +obj-$(CONFIG_MACH_TOUCHBOOK) += board-omap3touchbook.o obj-$(CONFIG_MACH_OMAP_4430SDP) += board-4430sdp.o obj-$(CONFIG_MACH_OMAP4_PANDA) += board-omap4panda.o diff --git a/trunk/arch/arm/mach-omap2/board-igep0020.c b/trunk/arch/arm/mach-omap2/board-igep0020.c index 74915295482e..28214483aaba 100644 --- a/trunk/arch/arm/mach-omap2/board-igep0020.c +++ b/trunk/arch/arm/mach-omap2/board-igep0020.c @@ -554,6 +554,8 @@ static const struct usbhs_omap_board_data igep3_usbhs_bdata __initconst = { #ifdef CONFIG_OMAP_MUX static struct omap_board_mux board_mux[] __initdata = { + /* SMSC9221 LAN Controller ETH IRQ (GPIO_176) */ + OMAP3_MUX(MCSPI1_CS2, OMAP_MUX_MODE4 | OMAP_PIN_INPUT), { .reg_offset = OMAP_MUX_TERMINATOR }, }; #endif diff --git a/trunk/arch/arm/mach-omap2/board-omap3evm.c b/trunk/arch/arm/mach-omap2/board-omap3evm.c index ef230a0eb5eb..0d362e9f9cb9 100644 --- a/trunk/arch/arm/mach-omap2/board-omap3evm.c +++ b/trunk/arch/arm/mach-omap2/board-omap3evm.c @@ -58,6 +58,7 @@ #include "hsmmc.h" #include "common-board-devices.h" +#define OMAP3_EVM_TS_GPIO 175 #define OMAP3_EVM_EHCI_VBUS 22 #define OMAP3_EVM_EHCI_SELECT 61 diff --git a/trunk/arch/arm/mach-omap2/clock33xx_data.c b/trunk/arch/arm/mach-omap2/clock33xx_data.c index 25bbcc7ca4dc..ae27de8899a6 100644 --- a/trunk/arch/arm/mach-omap2/clock33xx_data.c +++ b/trunk/arch/arm/mach-omap2/clock33xx_data.c @@ -1036,13 +1036,13 @@ static struct omap_clk am33xx_clks[] = { CLK(NULL, "mmu_fck", &mmu_fck, CK_AM33XX), CLK(NULL, "smartreflex0_fck", &smartreflex0_fck, CK_AM33XX), CLK(NULL, "smartreflex1_fck", &smartreflex1_fck, CK_AM33XX), - CLK(NULL, "gpt1_fck", &timer1_fck, CK_AM33XX), - CLK(NULL, "gpt2_fck", &timer2_fck, CK_AM33XX), - CLK(NULL, "gpt3_fck", &timer3_fck, CK_AM33XX), - CLK(NULL, "gpt4_fck", &timer4_fck, CK_AM33XX), - CLK(NULL, "gpt5_fck", &timer5_fck, CK_AM33XX), - CLK(NULL, "gpt6_fck", &timer6_fck, CK_AM33XX), - CLK(NULL, "gpt7_fck", &timer7_fck, CK_AM33XX), + CLK(NULL, "timer1_fck", &timer1_fck, CK_AM33XX), + CLK(NULL, "timer2_fck", &timer2_fck, CK_AM33XX), + CLK(NULL, "timer3_fck", &timer3_fck, CK_AM33XX), + CLK(NULL, "timer4_fck", &timer4_fck, CK_AM33XX), + CLK(NULL, "timer5_fck", &timer5_fck, CK_AM33XX), + CLK(NULL, "timer6_fck", &timer6_fck, CK_AM33XX), + CLK(NULL, "timer7_fck", &timer7_fck, CK_AM33XX), CLK(NULL, "usbotg_fck", &usbotg_fck, CK_AM33XX), CLK(NULL, "ieee5000_fck", &ieee5000_fck, CK_AM33XX), CLK(NULL, "wdt1_fck", &wdt1_fck, CK_AM33XX), diff --git a/trunk/arch/arm/mach-omap2/clockdomain2xxx_3xxx.c b/trunk/arch/arm/mach-omap2/clockdomain2xxx_3xxx.c index a0d68dbecfa3..f99e65cfb862 100644 --- a/trunk/arch/arm/mach-omap2/clockdomain2xxx_3xxx.c +++ b/trunk/arch/arm/mach-omap2/clockdomain2xxx_3xxx.c @@ -241,6 +241,52 @@ static void omap3_clkdm_deny_idle(struct clockdomain *clkdm) _clkdm_del_autodeps(clkdm); } +static int omap3xxx_clkdm_clk_enable(struct clockdomain *clkdm) +{ + bool hwsup = false; + + if (!clkdm->clktrctrl_mask) + return 0; + + hwsup = omap2_cm_is_clkdm_in_hwsup(clkdm->pwrdm.ptr->prcm_offs, + clkdm->clktrctrl_mask); + + if (hwsup) { + /* Disable HW transitions when we are changing deps */ + _disable_hwsup(clkdm); + _clkdm_add_autodeps(clkdm); + _enable_hwsup(clkdm); + } else { + if (clkdm->flags & CLKDM_CAN_FORCE_WAKEUP) + omap3_clkdm_wakeup(clkdm); + } + + return 0; +} + +static int omap3xxx_clkdm_clk_disable(struct clockdomain *clkdm) +{ + bool hwsup = false; + + if (!clkdm->clktrctrl_mask) + return 0; + + hwsup = omap2_cm_is_clkdm_in_hwsup(clkdm->pwrdm.ptr->prcm_offs, + clkdm->clktrctrl_mask); + + if (hwsup) { + /* Disable HW transitions when we are changing deps */ + _disable_hwsup(clkdm); + _clkdm_del_autodeps(clkdm); + _enable_hwsup(clkdm); + } else { + if (clkdm->flags & CLKDM_CAN_FORCE_SLEEP) + omap3_clkdm_sleep(clkdm); + } + + return 0; +} + struct clkdm_ops omap2_clkdm_operations = { .clkdm_add_wkdep = omap2_clkdm_add_wkdep, .clkdm_del_wkdep = omap2_clkdm_del_wkdep, @@ -267,6 +313,6 @@ struct clkdm_ops omap3_clkdm_operations = { .clkdm_wakeup = omap3_clkdm_wakeup, .clkdm_allow_idle = omap3_clkdm_allow_idle, .clkdm_deny_idle = omap3_clkdm_deny_idle, - .clkdm_clk_enable = omap2_clkdm_clk_enable, - .clkdm_clk_disable = omap2_clkdm_clk_disable, + .clkdm_clk_enable = omap3xxx_clkdm_clk_enable, + .clkdm_clk_disable = omap3xxx_clkdm_clk_disable, }; diff --git a/trunk/arch/arm/mach-omap2/cm-regbits-34xx.h b/trunk/arch/arm/mach-omap2/cm-regbits-34xx.h index 766338fe4d34..975f6bda0e0b 100644 --- a/trunk/arch/arm/mach-omap2/cm-regbits-34xx.h +++ b/trunk/arch/arm/mach-omap2/cm-regbits-34xx.h @@ -67,6 +67,7 @@ #define OMAP3430_EN_IVA2_DPLL_MASK (0x7 << 0) /* CM_IDLEST_IVA2 */ +#define OMAP3430_ST_IVA2_SHIFT 0 #define OMAP3430_ST_IVA2_MASK (1 << 0) /* CM_IDLEST_PLL_IVA2 */ diff --git a/trunk/arch/arm/mach-omap2/common-board-devices.c b/trunk/arch/arm/mach-omap2/common-board-devices.c index 14734746457c..c1875862679f 100644 --- a/trunk/arch/arm/mach-omap2/common-board-devices.c +++ b/trunk/arch/arm/mach-omap2/common-board-devices.c @@ -35,16 +35,6 @@ static struct omap2_mcspi_device_config ads7846_mcspi_config = { .turbo_mode = 0, }; -/* - * ADS7846 driver maybe request a gpio according to the value - * of pdata->get_pendown_state, but we have done this. So set - * get_pendown_state to avoid twice gpio requesting. - */ -static int omap3_get_pendown_state(void) -{ - return !gpio_get_value(OMAP3_EVM_TS_GPIO); -} - static struct ads7846_platform_data ads7846_config = { .x_max = 0x0fff, .y_max = 0x0fff, @@ -55,7 +45,6 @@ static struct ads7846_platform_data ads7846_config = { .debounce_rep = 1, .gpio_pendown = -EINVAL, .keep_vref_on = 1, - .get_pendown_state = &omap3_get_pendown_state, }; static struct spi_board_info ads7846_spi_board_info __initdata = { diff --git a/trunk/arch/arm/mach-omap2/common-board-devices.h b/trunk/arch/arm/mach-omap2/common-board-devices.h index 4c4ef6a6166b..a0b4a42836ab 100644 --- a/trunk/arch/arm/mach-omap2/common-board-devices.h +++ b/trunk/arch/arm/mach-omap2/common-board-devices.h @@ -4,7 +4,6 @@ #include "twl-common.h" #define NAND_BLOCK_SIZE SZ_128K -#define OMAP3_EVM_TS_GPIO 175 struct mtd_partition; struct ads7846_platform_data; diff --git a/trunk/arch/arm/mach-omap2/cpuidle44xx.c b/trunk/arch/arm/mach-omap2/cpuidle44xx.c index ee05e193fc61..288bee6cbb76 100644 --- a/trunk/arch/arm/mach-omap2/cpuidle44xx.c +++ b/trunk/arch/arm/mach-omap2/cpuidle44xx.c @@ -238,8 +238,9 @@ int __init omap4_idle_init(void) for_each_cpu(cpu_id, cpu_online_mask) { dev = &per_cpu(omap4_idle_dev, cpu_id); dev->cpu = cpu_id; +#ifdef CONFIG_ARCH_NEEDS_CPU_IDLE_COUPLED dev->coupled_cpus = *cpu_online_mask; - +#endif cpuidle_register_driver(&omap4_idle_driver); if (cpuidle_register_device(dev)) { diff --git a/trunk/arch/arm/mach-omap2/mux.h b/trunk/arch/arm/mach-omap2/mux.h index 471e62a74a16..76f9b3c2f586 100644 --- a/trunk/arch/arm/mach-omap2/mux.h +++ b/trunk/arch/arm/mach-omap2/mux.h @@ -127,7 +127,6 @@ struct omap_mux_partition { * @gpio: GPIO number * @muxnames: available signal modes for a ball * @balls: available balls on the package - * @partition: mux partition */ struct omap_mux { u16 reg_offset; diff --git a/trunk/arch/arm/mach-omap2/omap-wakeupgen.c b/trunk/arch/arm/mach-omap2/omap-wakeupgen.c index 05fdebfaa195..330d4c6e746b 100644 --- a/trunk/arch/arm/mach-omap2/omap-wakeupgen.c +++ b/trunk/arch/arm/mach-omap2/omap-wakeupgen.c @@ -46,7 +46,7 @@ static void __iomem *wakeupgen_base; static void __iomem *sar_base; static DEFINE_SPINLOCK(wakeupgen_lock); -static unsigned int irq_target_cpu[NR_IRQS]; +static unsigned int irq_target_cpu[MAX_IRQS]; static unsigned int irq_banks = MAX_NR_REG_BANKS; static unsigned int max_irqs = MAX_IRQS; static unsigned int omap_secure_apis; diff --git a/trunk/arch/arm/mach-omap2/omap_hwmod.c b/trunk/arch/arm/mach-omap2/omap_hwmod.c index 6ca8e519968d..37afbd173c2c 100644 --- a/trunk/arch/arm/mach-omap2/omap_hwmod.c +++ b/trunk/arch/arm/mach-omap2/omap_hwmod.c @@ -1889,6 +1889,7 @@ static int _enable(struct omap_hwmod *oh) _enable_sysc(oh); } } else { + _omap4_disable_module(oh); _disable_clocks(oh); pr_debug("omap_hwmod: %s: _wait_target_ready: %d\n", oh->name, r); diff --git a/trunk/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c b/trunk/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c index c9e38200216b..ce7e6068768f 100644 --- a/trunk/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c +++ b/trunk/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c @@ -100,9 +100,9 @@ static struct omap_hwmod omap3xxx_mpu_hwmod = { /* IVA2 (IVA2) */ static struct omap_hwmod_rst_info omap3xxx_iva_resets[] = { - { .name = "logic", .rst_shift = 0 }, - { .name = "seq0", .rst_shift = 1 }, - { .name = "seq1", .rst_shift = 2 }, + { .name = "logic", .rst_shift = 0, .st_shift = 8 }, + { .name = "seq0", .rst_shift = 1, .st_shift = 9 }, + { .name = "seq1", .rst_shift = 2, .st_shift = 10 }, }; static struct omap_hwmod omap3xxx_iva_hwmod = { @@ -112,6 +112,15 @@ static struct omap_hwmod omap3xxx_iva_hwmod = { .rst_lines = omap3xxx_iva_resets, .rst_lines_cnt = ARRAY_SIZE(omap3xxx_iva_resets), .main_clk = "iva2_ck", + .prcm = { + .omap2 = { + .module_offs = OMAP3430_IVA2_MOD, + .prcm_reg_id = 1, + .module_bit = OMAP3430_CM_FCLKEN_IVA2_EN_IVA2_SHIFT, + .idlest_reg_id = 1, + .idlest_idle_bit = OMAP3430_ST_IVA2_SHIFT, + } + }, }; /* timer class */ diff --git a/trunk/arch/arm/mach-omap2/omap_hwmod_44xx_data.c b/trunk/arch/arm/mach-omap2/omap_hwmod_44xx_data.c index 242aee498ceb..afb60917a948 100644 --- a/trunk/arch/arm/mach-omap2/omap_hwmod_44xx_data.c +++ b/trunk/arch/arm/mach-omap2/omap_hwmod_44xx_data.c @@ -4210,7 +4210,7 @@ static struct omap_hwmod_ocp_if omap44xx_dsp__iva = { }; /* dsp -> sl2if */ -static struct omap_hwmod_ocp_if omap44xx_dsp__sl2if = { +static struct omap_hwmod_ocp_if __maybe_unused omap44xx_dsp__sl2if = { .master = &omap44xx_dsp_hwmod, .slave = &omap44xx_sl2if_hwmod, .clk = "dpll_iva_m5x2_ck", @@ -4828,7 +4828,7 @@ static struct omap_hwmod_ocp_if omap44xx_l3_main_2__iss = { }; /* iva -> sl2if */ -static struct omap_hwmod_ocp_if omap44xx_iva__sl2if = { +static struct omap_hwmod_ocp_if __maybe_unused omap44xx_iva__sl2if = { .master = &omap44xx_iva_hwmod, .slave = &omap44xx_sl2if_hwmod, .clk = "dpll_iva_m5x2_ck", @@ -5362,7 +5362,7 @@ static struct omap_hwmod_ocp_if omap44xx_l4_wkup__scrm = { }; /* l3_main_2 -> sl2if */ -static struct omap_hwmod_ocp_if omap44xx_l3_main_2__sl2if = { +static struct omap_hwmod_ocp_if __maybe_unused omap44xx_l3_main_2__sl2if = { .master = &omap44xx_l3_main_2_hwmod, .slave = &omap44xx_sl2if_hwmod, .clk = "l3_div_ck", @@ -6032,7 +6032,7 @@ static struct omap_hwmod_ocp_if *omap44xx_hwmod_ocp_ifs[] __initdata = { &omap44xx_l4_abe__dmic, &omap44xx_l4_abe__dmic_dma, &omap44xx_dsp__iva, - &omap44xx_dsp__sl2if, + /* &omap44xx_dsp__sl2if, */ &omap44xx_l4_cfg__dsp, &omap44xx_l3_main_2__dss, &omap44xx_l4_per__dss, @@ -6068,7 +6068,7 @@ static struct omap_hwmod_ocp_if *omap44xx_hwmod_ocp_ifs[] __initdata = { &omap44xx_l4_per__i2c4, &omap44xx_l3_main_2__ipu, &omap44xx_l3_main_2__iss, - &omap44xx_iva__sl2if, + /* &omap44xx_iva__sl2if, */ &omap44xx_l3_main_2__iva, &omap44xx_l4_wkup__kbd, &omap44xx_l4_cfg__mailbox, @@ -6099,7 +6099,7 @@ static struct omap_hwmod_ocp_if *omap44xx_hwmod_ocp_ifs[] __initdata = { &omap44xx_l4_cfg__cm_core, &omap44xx_l4_wkup__prm, &omap44xx_l4_wkup__scrm, - &omap44xx_l3_main_2__sl2if, + /* &omap44xx_l3_main_2__sl2if, */ &omap44xx_l4_abe__slimbus1, &omap44xx_l4_abe__slimbus1_dma, &omap44xx_l4_per__slimbus2, diff --git a/trunk/arch/arm/mach-omap2/opp4xxx_data.c b/trunk/arch/arm/mach-omap2/opp4xxx_data.c index 2293ba27101b..c95415da23c2 100644 --- a/trunk/arch/arm/mach-omap2/opp4xxx_data.c +++ b/trunk/arch/arm/mach-omap2/opp4xxx_data.c @@ -94,7 +94,7 @@ int __init omap4_opp_init(void) { int r = -ENODEV; - if (!cpu_is_omap44xx()) + if (!cpu_is_omap443x()) return r; r = omap_init_opp_table(omap44xx_opp_def_list, diff --git a/trunk/arch/arm/mach-omap2/pm34xx.c b/trunk/arch/arm/mach-omap2/pm34xx.c index e4fc88c65dbd..05bd8f02723f 100644 --- a/trunk/arch/arm/mach-omap2/pm34xx.c +++ b/trunk/arch/arm/mach-omap2/pm34xx.c @@ -272,21 +272,16 @@ void omap_sram_idle(void) per_next_state = pwrdm_read_next_pwrst(per_pwrdm); core_next_state = pwrdm_read_next_pwrst(core_pwrdm); - if (mpu_next_state < PWRDM_POWER_ON) { - pwrdm_pre_transition(mpu_pwrdm); - pwrdm_pre_transition(neon_pwrdm); - } + pwrdm_pre_transition(NULL); /* PER */ if (per_next_state < PWRDM_POWER_ON) { - pwrdm_pre_transition(per_pwrdm); per_going_off = (per_next_state == PWRDM_POWER_OFF) ? 1 : 0; omap2_gpio_prepare_for_idle(per_going_off); } /* CORE */ if (core_next_state < PWRDM_POWER_ON) { - pwrdm_pre_transition(core_pwrdm); if (core_next_state == PWRDM_POWER_OFF) { omap3_core_save_context(); omap3_cm_save_context(); @@ -339,20 +334,14 @@ void omap_sram_idle(void) omap2_prm_clear_mod_reg_bits(OMAP3430_AUTO_OFF_MASK, OMAP3430_GR_MOD, OMAP3_PRM_VOLTCTRL_OFFSET); - pwrdm_post_transition(core_pwrdm); } omap3_intc_resume_idle(); + pwrdm_post_transition(NULL); + /* PER */ - if (per_next_state < PWRDM_POWER_ON) { + if (per_next_state < PWRDM_POWER_ON) omap2_gpio_resume_after_idle(); - pwrdm_post_transition(per_pwrdm); - } - - if (mpu_next_state < PWRDM_POWER_ON) { - pwrdm_post_transition(mpu_pwrdm); - pwrdm_post_transition(neon_pwrdm); - } } static void omap3_pm_idle(void) diff --git a/trunk/arch/arm/mach-omap2/sleep44xx.S b/trunk/arch/arm/mach-omap2/sleep44xx.S index 9f6b83d1b193..91e71d8f46f0 100644 --- a/trunk/arch/arm/mach-omap2/sleep44xx.S +++ b/trunk/arch/arm/mach-omap2/sleep44xx.S @@ -56,9 +56,13 @@ ppa_por_params: * The restore function pointer is stored at CPUx_WAKEUP_NS_PA_ADDR_OFFSET. * It returns to the caller for CPU INACTIVE and ON power states or in case * CPU failed to transition to targeted OFF/DORMANT state. + * + * omap4_finish_suspend() calls v7_flush_dcache_all() which doesn't save + * stack frame and it expects the caller to take care of it. Hence the entire + * stack frame is saved to avoid possible stack corruption. */ ENTRY(omap4_finish_suspend) - stmfd sp!, {lr} + stmfd sp!, {r4-r12, lr} cmp r0, #0x0 beq do_WFI @ No lowpower state, jump to WFI @@ -226,7 +230,7 @@ scu_gp_clear: skip_scu_gp_clear: isb dsb - ldmfd sp!, {pc} + ldmfd sp!, {r4-r12, pc} ENDPROC(omap4_finish_suspend) /* diff --git a/trunk/arch/arm/mach-omap2/timer.c b/trunk/arch/arm/mach-omap2/timer.c index 2ff6d41ec6c6..2ba4f57dda86 100644 --- a/trunk/arch/arm/mach-omap2/timer.c +++ b/trunk/arch/arm/mach-omap2/timer.c @@ -260,6 +260,7 @@ static u32 notrace dmtimer_read_sched_clock(void) return 0; } +#ifdef CONFIG_OMAP_32K_TIMER /* Setup free-running counter for clocksource */ static int __init omap2_sync32k_clocksource_init(void) { @@ -299,6 +300,12 @@ static int __init omap2_sync32k_clocksource_init(void) return ret; } +#else +static inline int omap2_sync32k_clocksource_init(void) +{ + return -ENODEV; +} +#endif static void __init omap2_gptimer_clocksource_init(int gptimer_id, const char *fck_source) diff --git a/trunk/arch/arm/mach-omap2/twl-common.c b/trunk/arch/arm/mach-omap2/twl-common.c index de47f170ba50..db5ff6642375 100644 --- a/trunk/arch/arm/mach-omap2/twl-common.c +++ b/trunk/arch/arm/mach-omap2/twl-common.c @@ -67,6 +67,7 @@ void __init omap_pmic_init(int bus, u32 clkrate, const char *pmic_type, int pmic_irq, struct twl4030_platform_data *pmic_data) { + omap_mux_init_signal("sys_nirq", OMAP_PIN_INPUT_PULLUP | OMAP_PIN_OFF_WAKEUPENABLE); strncpy(pmic_i2c_board_info.type, pmic_type, sizeof(pmic_i2c_board_info.type)); pmic_i2c_board_info.irq = pmic_irq; diff --git a/trunk/arch/arm/mach-orion5x/common.c b/trunk/arch/arm/mach-orion5x/common.c index 9148b229d0de..a6cd14ab1e4e 100644 --- a/trunk/arch/arm/mach-orion5x/common.c +++ b/trunk/arch/arm/mach-orion5x/common.c @@ -109,7 +109,8 @@ void __init orion5x_eth_init(struct mv643xx_eth_platform_data *eth_data) { orion_ge00_init(eth_data, ORION5X_ETH_PHYS_BASE, IRQ_ORION5X_ETH_SUM, - IRQ_ORION5X_ETH_ERR); + IRQ_ORION5X_ETH_ERR, + MV643XX_TX_CSUM_DEFAULT_LIMIT); } @@ -203,6 +204,13 @@ void __init orion5x_wdt_init(void) void __init orion5x_init_early(void) { orion_time_set_base(TIMER_VIRT_BASE); + + /* + * Some Orion5x devices allocate their coherent buffers from atomic + * context. Increase size of atomic coherent pool to make sure such + * the allocations won't fail. + */ + init_dma_coherent_pool_size(SZ_1M); } int orion5x_tclk; diff --git a/trunk/arch/arm/mach-pxa/raumfeld.c b/trunk/arch/arm/mach-pxa/raumfeld.c index 5905ed130e94..d89d87ae144c 100644 --- a/trunk/arch/arm/mach-pxa/raumfeld.c +++ b/trunk/arch/arm/mach-pxa/raumfeld.c @@ -953,12 +953,12 @@ static struct i2c_board_info raumfeld_connector_i2c_board_info __initdata = { static struct eeti_ts_platform_data eeti_ts_pdata = { .irq_active_high = 1, + .irq_gpio = GPIO_TOUCH_IRQ, }; static struct i2c_board_info raumfeld_controller_i2c_board_info __initdata = { .type = "eeti_ts", .addr = 0x0a, - .irq = PXA_GPIO_TO_IRQ(GPIO_TOUCH_IRQ), .platform_data = &eeti_ts_pdata, }; diff --git a/trunk/arch/arm/mach-s3c24xx/Kconfig b/trunk/arch/arm/mach-s3c24xx/Kconfig index e24961109b70..d56b0f7f2b20 100644 --- a/trunk/arch/arm/mach-s3c24xx/Kconfig +++ b/trunk/arch/arm/mach-s3c24xx/Kconfig @@ -483,7 +483,7 @@ config MACH_NEO1973_GTA02 select I2C select POWER_SUPPLY select MACH_NEO1973 - select S3C2410_PWM + select S3C24XX_PWM select S3C_DEV_USB_HOST help Say Y here if you are using the Openmoko GTA02 / Freerunner GSM Phone @@ -493,7 +493,7 @@ config MACH_RX1950 select S3C24XX_DCLK select PM_H1940 if PM select I2C - select S3C2410_PWM + select S3C24XX_PWM select S3C_DEV_NAND select S3C2410_IOTIMING if S3C2440_CPUFREQ select S3C2440_XTAL_16934400 diff --git a/trunk/arch/arm/mach-s3c24xx/include/mach/dma.h b/trunk/arch/arm/mach-s3c24xx/include/mach/dma.h index 454831b66037..ee99fd56c043 100644 --- a/trunk/arch/arm/mach-s3c24xx/include/mach/dma.h +++ b/trunk/arch/arm/mach-s3c24xx/include/mach/dma.h @@ -24,7 +24,8 @@ */ enum dma_ch { - DMACH_XD0, + DMACH_DT_PROP = -1, /* not yet supported, do not use */ + DMACH_XD0 = 0, DMACH_XD1, DMACH_SDI, DMACH_SPI0, diff --git a/trunk/arch/arm/mach-sa1100/leds-hackkit.c b/trunk/arch/arm/mach-sa1100/leds-hackkit.c index 6a2352436e62..f8e47235babe 100644 --- a/trunk/arch/arm/mach-sa1100/leds-hackkit.c +++ b/trunk/arch/arm/mach-sa1100/leds-hackkit.c @@ -10,6 +10,7 @@ * as cpu led, the green one is used as timer led. */ #include +#include #include #include diff --git a/trunk/arch/arm/mach-shmobile/board-armadillo800eva.c b/trunk/arch/arm/mach-shmobile/board-armadillo800eva.c index cf10f92856dc..453a6e50db8b 100644 --- a/trunk/arch/arm/mach-shmobile/board-armadillo800eva.c +++ b/trunk/arch/arm/mach-shmobile/board-armadillo800eva.c @@ -520,13 +520,14 @@ static struct platform_device hdmi_lcdc_device = { }; /* GPIO KEY */ -#define GPIO_KEY(c, g, d) { .code = c, .gpio = g, .desc = d, .active_low = 1 } +#define GPIO_KEY(c, g, d, ...) \ + { .code = c, .gpio = g, .desc = d, .active_low = 1, __VA_ARGS__ } static struct gpio_keys_button gpio_buttons[] = { - GPIO_KEY(KEY_POWER, GPIO_PORT99, "SW1"), - GPIO_KEY(KEY_BACK, GPIO_PORT100, "SW2"), - GPIO_KEY(KEY_MENU, GPIO_PORT97, "SW3"), - GPIO_KEY(KEY_HOME, GPIO_PORT98, "SW4"), + GPIO_KEY(KEY_POWER, GPIO_PORT99, "SW3", .wakeup = 1), + GPIO_KEY(KEY_BACK, GPIO_PORT100, "SW4"), + GPIO_KEY(KEY_MENU, GPIO_PORT97, "SW5"), + GPIO_KEY(KEY_HOME, GPIO_PORT98, "SW6"), }; static struct gpio_keys_platform_data gpio_key_info = { @@ -901,8 +902,8 @@ static struct platform_device *eva_devices[] __initdata = { &camera_device, &ceu0_device, &fsi_device, - &fsi_hdmi_device, &fsi_wm8978_device, + &fsi_hdmi_device, }; static void __init eva_clock_init(void) diff --git a/trunk/arch/arm/mach-shmobile/board-kzm9g.c b/trunk/arch/arm/mach-shmobile/board-kzm9g.c index 53b7ea92c32c..3b8a0171c3cb 100644 --- a/trunk/arch/arm/mach-shmobile/board-kzm9g.c +++ b/trunk/arch/arm/mach-shmobile/board-kzm9g.c @@ -346,11 +346,11 @@ static struct resource sh_mmcif_resources[] = { .flags = IORESOURCE_MEM, }, [1] = { - .start = gic_spi(141), + .start = gic_spi(140), .flags = IORESOURCE_IRQ, }, [2] = { - .start = gic_spi(140), + .start = gic_spi(141), .flags = IORESOURCE_IRQ, }, }; diff --git a/trunk/arch/arm/mach-shmobile/board-mackerel.c b/trunk/arch/arm/mach-shmobile/board-mackerel.c index 7ea2b31e3199..c129542f6aed 100644 --- a/trunk/arch/arm/mach-shmobile/board-mackerel.c +++ b/trunk/arch/arm/mach-shmobile/board-mackerel.c @@ -695,6 +695,7 @@ static struct platform_device usbhs0_device = { * - J30 "open" * - modify usbhs1_get_id() USBHS_HOST -> USBHS_GADGET * - add .get_vbus = usbhs_get_vbus in usbhs1_private + * - check usbhs0_device(pio)/usbhs1_device(irq) order in mackerel_devices. */ #define IRQ8 evt2irq(0x0300) #define USB_PHY_MODE (1 << 4) @@ -1325,8 +1326,8 @@ static struct platform_device *mackerel_devices[] __initdata = { &nor_flash_device, &smc911x_device, &lcdc_device, - &usbhs1_device, &usbhs0_device, + &usbhs1_device, &leds_device, &fsi_device, &fsi_ak4643_device, diff --git a/trunk/arch/arm/mach-shmobile/board-marzen.c b/trunk/arch/arm/mach-shmobile/board-marzen.c index 3a528cf4366c..fcf5a47f4772 100644 --- a/trunk/arch/arm/mach-shmobile/board-marzen.c +++ b/trunk/arch/arm/mach-shmobile/board-marzen.c @@ -67,7 +67,7 @@ static struct smsc911x_platform_config smsc911x_platdata = { static struct platform_device eth_device = { .name = "smsc911x", - .id = 0, + .id = -1, .dev = { .platform_data = &smsc911x_platdata, }, diff --git a/trunk/arch/arm/mach-shmobile/intc-sh73a0.c b/trunk/arch/arm/mach-shmobile/intc-sh73a0.c index ee447404c857..588555a67d9c 100644 --- a/trunk/arch/arm/mach-shmobile/intc-sh73a0.c +++ b/trunk/arch/arm/mach-shmobile/intc-sh73a0.c @@ -259,9 +259,9 @@ static int sh73a0_set_wake(struct irq_data *data, unsigned int on) return 0; /* always allow wakeup */ } -#define RELOC_BASE 0x1000 +#define RELOC_BASE 0x1200 -/* INTCA IRQ pins at INTCS + 0x1000 to make space for GIC+INTC handling */ +/* INTCA IRQ pins at INTCS + RELOC_BASE to make space for GIC+INTC handling */ #define INTCS_VECT_RELOC(n, vect) INTCS_VECT((n), (vect) + RELOC_BASE) INTC_IRQ_PINS_32(intca_irq_pins, 0xe6900000, diff --git a/trunk/arch/arm/mach-tegra/board-harmony-power.c b/trunk/arch/arm/mach-tegra/board-harmony-power.c index 8fd387bf31f0..94486e7e9dfd 100644 --- a/trunk/arch/arm/mach-tegra/board-harmony-power.c +++ b/trunk/arch/arm/mach-tegra/board-harmony-power.c @@ -51,7 +51,7 @@ static struct regulator_init_data ldo0_data = { .consumer_supplies = tps658621_ldo0_supply, }; -#define HARMONY_REGULATOR_INIT(_id, _name, _supply, _minmv, _maxmv) \ +#define HARMONY_REGULATOR_INIT(_id, _name, _supply, _minmv, _maxmv, _on)\ static struct regulator_init_data _id##_data = { \ .supply_regulator = _supply, \ .constraints = { \ @@ -63,21 +63,29 @@ static struct regulator_init_data ldo0_data = { .valid_ops_mask = (REGULATOR_CHANGE_MODE | \ REGULATOR_CHANGE_STATUS | \ REGULATOR_CHANGE_VOLTAGE), \ + .always_on = _on, \ }, \ } -HARMONY_REGULATOR_INIT(sm0, "vdd_sm0", "vdd_sys", 725, 1500); -HARMONY_REGULATOR_INIT(sm1, "vdd_sm1", "vdd_sys", 725, 1500); -HARMONY_REGULATOR_INIT(sm2, "vdd_sm2", "vdd_sys", 3000, 4550); -HARMONY_REGULATOR_INIT(ldo1, "vdd_ldo1", "vdd_sm2", 725, 1500); -HARMONY_REGULATOR_INIT(ldo2, "vdd_ldo2", "vdd_sm2", 725, 1500); -HARMONY_REGULATOR_INIT(ldo3, "vdd_ldo3", "vdd_sm2", 1250, 3300); -HARMONY_REGULATOR_INIT(ldo4, "vdd_ldo4", "vdd_sm2", 1700, 2475); -HARMONY_REGULATOR_INIT(ldo5, "vdd_ldo5", NULL, 1250, 3300); -HARMONY_REGULATOR_INIT(ldo6, "vdd_ldo6", "vdd_sm2", 1250, 3300); -HARMONY_REGULATOR_INIT(ldo7, "vdd_ldo7", "vdd_sm2", 1250, 3300); -HARMONY_REGULATOR_INIT(ldo8, "vdd_ldo8", "vdd_sm2", 1250, 3300); -HARMONY_REGULATOR_INIT(ldo9, "vdd_ldo9", "vdd_sm2", 1250, 3300); +static struct regulator_init_data sys_data = { + .supply_regulator = "vdd_5v0", + .constraints = { + .name = "vdd_sys", + }, +}; + +HARMONY_REGULATOR_INIT(sm0, "vdd_sm0", "vdd_sys", 725, 1500, 1); +HARMONY_REGULATOR_INIT(sm1, "vdd_sm1", "vdd_sys", 725, 1500, 1); +HARMONY_REGULATOR_INIT(sm2, "vdd_sm2", "vdd_sys", 3000, 4550, 1); +HARMONY_REGULATOR_INIT(ldo1, "vdd_ldo1", "vdd_sm2", 725, 1500, 1); +HARMONY_REGULATOR_INIT(ldo2, "vdd_ldo2", "vdd_sm2", 725, 1500, 0); +HARMONY_REGULATOR_INIT(ldo3, "vdd_ldo3", "vdd_sm2", 1250, 3300, 1); +HARMONY_REGULATOR_INIT(ldo4, "vdd_ldo4", "vdd_sm2", 1700, 2475, 1); +HARMONY_REGULATOR_INIT(ldo5, "vdd_ldo5", "vdd_sys", 1250, 3300, 1); +HARMONY_REGULATOR_INIT(ldo6, "vdd_ldo6", "vdd_sm2", 1250, 3300, 0); +HARMONY_REGULATOR_INIT(ldo7, "vdd_ldo7", "vdd_sm2", 1250, 3300, 0); +HARMONY_REGULATOR_INIT(ldo8, "vdd_ldo8", "vdd_sm2", 1250, 3300, 0); +HARMONY_REGULATOR_INIT(ldo9, "vdd_ldo9", "vdd_sm2", 1250, 3300, 1); #define TPS_REG(_id, _data) \ { \ @@ -87,6 +95,7 @@ HARMONY_REGULATOR_INIT(ldo9, "vdd_ldo9", "vdd_sm2", 1250, 3300); } static struct tps6586x_subdev_info tps_devs[] = { + TPS_REG(SYS, &sys_data), TPS_REG(SM_0, &sm0_data), TPS_REG(SM_1, &sm1_data), TPS_REG(SM_2, &sm2_data), @@ -119,9 +128,10 @@ static struct i2c_board_info __initdata harmony_regulators[] = { int __init harmony_regulator_init(void) { + regulator_register_always_on(0, "vdd_5v0", + NULL, 0, 5000000); + if (machine_is_harmony()) { - regulator_register_always_on(0, "vdd_sys", - NULL, 0, 5000000); i2c_register_board_info(3, harmony_regulators, 1); } else { /* Harmony, booted using device tree */ struct device_node *np; diff --git a/trunk/arch/arm/mach-ux500/Kconfig b/trunk/arch/arm/mach-ux500/Kconfig index c013bbf79cac..53d3d46dec12 100644 --- a/trunk/arch/arm/mach-ux500/Kconfig +++ b/trunk/arch/arm/mach-ux500/Kconfig @@ -41,7 +41,6 @@ config MACH_HREFV60 config MACH_SNOWBALL bool "U8500 Snowball platform" select MACH_MOP500 - select LEDS_GPIO help Include support for the snowball development platform. diff --git a/trunk/arch/arm/mach-ux500/board-mop500-msp.c b/trunk/arch/arm/mach-ux500/board-mop500-msp.c index 996048038743..df15646036aa 100644 --- a/trunk/arch/arm/mach-ux500/board-mop500-msp.c +++ b/trunk/arch/arm/mach-ux500/board-mop500-msp.c @@ -191,9 +191,9 @@ static struct platform_device *db8500_add_msp_i2s(struct device *parent, return pdev; } -/* Platform device for ASoC U8500 machine */ -static struct platform_device snd_soc_u8500 = { - .name = "snd-soc-u8500", +/* Platform device for ASoC MOP500 machine */ +static struct platform_device snd_soc_mop500 = { + .name = "snd-soc-mop500", .id = 0, .dev = { .platform_data = NULL, @@ -227,8 +227,8 @@ int mop500_msp_init(struct device *parent) { struct platform_device *msp1; - pr_info("%s: Register platform-device 'snd-soc-u8500'.\n", __func__); - platform_device_register(&snd_soc_u8500); + pr_info("%s: Register platform-device 'snd-soc-mop500'.\n", __func__); + platform_device_register(&snd_soc_mop500); pr_info("Initialize MSP I2S-devices.\n"); db8500_add_msp_i2s(parent, 0, U8500_MSP0_BASE, IRQ_DB8500_MSP0, diff --git a/trunk/arch/arm/mach-ux500/board-mop500.c b/trunk/arch/arm/mach-ux500/board-mop500.c index 8674a890fd1c..a534d8880de1 100644 --- a/trunk/arch/arm/mach-ux500/board-mop500.c +++ b/trunk/arch/arm/mach-ux500/board-mop500.c @@ -797,6 +797,7 @@ static void __init u8500_init_machine(void) ARRAY_SIZE(mop500_platform_devs)); mop500_sdi_init(parent); + mop500_msp_init(parent); i2c0_devs = ARRAY_SIZE(mop500_i2c0_devices); i2c_register_board_info(0, mop500_i2c0_devices, i2c0_devs); i2c_register_board_info(2, mop500_i2c2_devices, @@ -804,6 +805,8 @@ static void __init u8500_init_machine(void) mop500_uib_init(); + } else if (of_machine_is_compatible("calaosystems,snowball-a9500")) { + mop500_msp_init(parent); } else if (of_machine_is_compatible("st-ericsson,hrefv60+")) { /* * The HREFv60 board removed a GPIO expander and routed @@ -815,6 +818,7 @@ static void __init u8500_init_machine(void) ARRAY_SIZE(mop500_platform_devs)); hrefv60_sdi_init(parent); + mop500_msp_init(parent); i2c0_devs = ARRAY_SIZE(mop500_i2c0_devices); i2c0_devs -= NUM_PRE_V60_I2C0_DEVICES; diff --git a/trunk/arch/arm/mm/context.c b/trunk/arch/arm/mm/context.c index 119bc52ab93e..4e07eec1270d 100644 --- a/trunk/arch/arm/mm/context.c +++ b/trunk/arch/arm/mm/context.c @@ -63,10 +63,11 @@ static int contextidr_notifier(struct notifier_block *unused, unsigned long cmd, pid = task_pid_nr(thread->task) << ASID_BITS; asm volatile( " mrc p15, 0, %0, c13, c0, 1\n" - " bfi %1, %0, #0, %2\n" - " mcr p15, 0, %1, c13, c0, 1\n" + " and %0, %0, %2\n" + " orr %0, %0, %1\n" + " mcr p15, 0, %0, c13, c0, 1\n" : "=r" (contextidr), "+r" (pid) - : "I" (ASID_BITS)); + : "I" (~ASID_MASK)); isb(); return NOTIFY_OK; diff --git a/trunk/arch/arm/mm/dma-mapping.c b/trunk/arch/arm/mm/dma-mapping.c index c2cdf6500f75..13f555d62491 100644 --- a/trunk/arch/arm/mm/dma-mapping.c +++ b/trunk/arch/arm/mm/dma-mapping.c @@ -267,17 +267,19 @@ static void __dma_free_remap(void *cpu_addr, size_t size) vunmap(cpu_addr); } +#define DEFAULT_DMA_COHERENT_POOL_SIZE SZ_256K + struct dma_pool { size_t size; spinlock_t lock; unsigned long *bitmap; unsigned long nr_pages; void *vaddr; - struct page *page; + struct page **pages; }; static struct dma_pool atomic_pool = { - .size = SZ_256K, + .size = DEFAULT_DMA_COHERENT_POOL_SIZE, }; static int __init early_coherent_pool(char *p) @@ -287,6 +289,21 @@ static int __init early_coherent_pool(char *p) } early_param("coherent_pool", early_coherent_pool); +void __init init_dma_coherent_pool_size(unsigned long size) +{ + /* + * Catch any attempt to set the pool size too late. + */ + BUG_ON(atomic_pool.vaddr); + + /* + * Set architecture specific coherent pool size only if + * it has not been changed by kernel command line parameter. + */ + if (atomic_pool.size == DEFAULT_DMA_COHERENT_POOL_SIZE) + atomic_pool.size = size; +} + /* * Initialise the coherent pool for atomic allocations. */ @@ -297,6 +314,7 @@ static int __init atomic_pool_init(void) unsigned long nr_pages = pool->size >> PAGE_SHIFT; unsigned long *bitmap; struct page *page; + struct page **pages; void *ptr; int bitmap_size = BITS_TO_LONGS(nr_pages) * sizeof(long); @@ -304,21 +322,33 @@ static int __init atomic_pool_init(void) if (!bitmap) goto no_bitmap; + pages = kzalloc(nr_pages * sizeof(struct page *), GFP_KERNEL); + if (!pages) + goto no_pages; + if (IS_ENABLED(CONFIG_CMA)) ptr = __alloc_from_contiguous(NULL, pool->size, prot, &page); else ptr = __alloc_remap_buffer(NULL, pool->size, GFP_KERNEL, prot, &page, NULL); if (ptr) { + int i; + + for (i = 0; i < nr_pages; i++) + pages[i] = page + i; + spin_lock_init(&pool->lock); pool->vaddr = ptr; - pool->page = page; + pool->pages = pages; pool->bitmap = bitmap; pool->nr_pages = nr_pages; pr_info("DMA: preallocated %u KiB pool for atomic coherent allocations\n", (unsigned)pool->size / 1024); return 0; } + + kfree(pages); +no_pages: kfree(bitmap); no_bitmap: pr_err("DMA: failed to allocate %u KiB pool for atomic coherent allocation\n", @@ -358,7 +388,7 @@ void __init dma_contiguous_remap(void) if (end > arm_lowmem_limit) end = arm_lowmem_limit; if (start >= end) - return; + continue; map.pfn = __phys_to_pfn(start); map.virtual = __phys_to_virt(start); @@ -423,7 +453,7 @@ static void *__alloc_from_pool(size_t size, struct page **ret_page) unsigned int pageno; unsigned long flags; void *ptr = NULL; - size_t align; + unsigned long align_mask; if (!pool->vaddr) { WARN(1, "coherent pool not initialised!\n"); @@ -435,35 +465,53 @@ static void *__alloc_from_pool(size_t size, struct page **ret_page) * small, so align them to their order in pages, minimum is a page * size. This helps reduce fragmentation of the DMA space. */ - align = PAGE_SIZE << get_order(size); + align_mask = (1 << get_order(size)) - 1; spin_lock_irqsave(&pool->lock, flags); pageno = bitmap_find_next_zero_area(pool->bitmap, pool->nr_pages, - 0, count, (1 << align) - 1); + 0, count, align_mask); if (pageno < pool->nr_pages) { bitmap_set(pool->bitmap, pageno, count); ptr = pool->vaddr + PAGE_SIZE * pageno; - *ret_page = pool->page + pageno; + *ret_page = pool->pages[pageno]; + } else { + pr_err_once("ERROR: %u KiB atomic DMA coherent pool is too small!\n" + "Please increase it with coherent_pool= kernel parameter!\n", + (unsigned)pool->size / 1024); } spin_unlock_irqrestore(&pool->lock, flags); return ptr; } +static bool __in_atomic_pool(void *start, size_t size) +{ + struct dma_pool *pool = &atomic_pool; + void *end = start + size; + void *pool_start = pool->vaddr; + void *pool_end = pool->vaddr + pool->size; + + if (start < pool_start || start >= pool_end) + return false; + + if (end <= pool_end) + return true; + + WARN(1, "Wrong coherent size(%p-%p) from atomic pool(%p-%p)\n", + start, end - 1, pool_start, pool_end - 1); + + return false; +} + static int __free_from_pool(void *start, size_t size) { struct dma_pool *pool = &atomic_pool; unsigned long pageno, count; unsigned long flags; - if (start < pool->vaddr || start > pool->vaddr + pool->size) + if (!__in_atomic_pool(start, size)) return 0; - if (start + size > pool->vaddr + pool->size) { - WARN(1, "freeing wrong coherent size from pool\n"); - return 0; - } - pageno = (start - pool->vaddr) >> PAGE_SHIFT; count = size >> PAGE_SHIFT; @@ -648,12 +696,12 @@ void arm_dma_free(struct device *dev, size_t size, void *cpu_addr, if (arch_is_coherent() || nommu()) { __dma_free_buffer(page, size); + } else if (__free_from_pool(cpu_addr, size)) { + return; } else if (!IS_ENABLED(CONFIG_CMA)) { __dma_free_remap(cpu_addr, size); __dma_free_buffer(page, size); } else { - if (__free_from_pool(cpu_addr, size)) - return; /* * Non-atomic allocations cannot be freed with IRQs disabled */ @@ -1090,10 +1138,22 @@ static int __iommu_remove_mapping(struct device *dev, dma_addr_t iova, size_t si return 0; } +static struct page **__atomic_get_pages(void *addr) +{ + struct dma_pool *pool = &atomic_pool; + struct page **pages = pool->pages; + int offs = (addr - pool->vaddr) >> PAGE_SHIFT; + + return pages + offs; +} + static struct page **__iommu_get_pages(void *cpu_addr, struct dma_attrs *attrs) { struct vm_struct *area; + if (__in_atomic_pool(cpu_addr, PAGE_SIZE)) + return __atomic_get_pages(cpu_addr); + if (dma_get_attr(DMA_ATTR_NO_KERNEL_MAPPING, attrs)) return cpu_addr; @@ -1103,6 +1163,34 @@ static struct page **__iommu_get_pages(void *cpu_addr, struct dma_attrs *attrs) return NULL; } +static void *__iommu_alloc_atomic(struct device *dev, size_t size, + dma_addr_t *handle) +{ + struct page *page; + void *addr; + + addr = __alloc_from_pool(size, &page); + if (!addr) + return NULL; + + *handle = __iommu_create_mapping(dev, &page, size); + if (*handle == DMA_ERROR_CODE) + goto err_mapping; + + return addr; + +err_mapping: + __free_from_pool(addr, size); + return NULL; +} + +static void __iommu_free_atomic(struct device *dev, struct page **pages, + dma_addr_t handle, size_t size) +{ + __iommu_remove_mapping(dev, handle, size); + __free_from_pool(page_address(pages[0]), size); +} + static void *arm_iommu_alloc_attrs(struct device *dev, size_t size, dma_addr_t *handle, gfp_t gfp, struct dma_attrs *attrs) { @@ -1113,6 +1201,9 @@ static void *arm_iommu_alloc_attrs(struct device *dev, size_t size, *handle = DMA_ERROR_CODE; size = PAGE_ALIGN(size); + if (gfp & GFP_ATOMIC) + return __iommu_alloc_atomic(dev, size, handle); + pages = __iommu_alloc_buffer(dev, size, gfp); if (!pages) return NULL; @@ -1179,6 +1270,11 @@ void arm_iommu_free_attrs(struct device *dev, size_t size, void *cpu_addr, return; } + if (__in_atomic_pool(cpu_addr, size)) { + __iommu_free_atomic(dev, pages, handle, size); + return; + } + if (!dma_get_attr(DMA_ATTR_NO_KERNEL_MAPPING, attrs)) { unmap_kernel_range((unsigned long)cpu_addr, size); vunmap(cpu_addr); diff --git a/trunk/arch/arm/mm/flush.c b/trunk/arch/arm/mm/flush.c index 77458548e031..40ca11ed6e5f 100644 --- a/trunk/arch/arm/mm/flush.c +++ b/trunk/arch/arm/mm/flush.c @@ -231,8 +231,6 @@ void __sync_icache_dcache(pte_t pteval) struct page *page; struct address_space *mapping; - if (!pte_present_user(pteval)) - return; if (cache_is_vipt_nonaliasing() && !pte_exec(pteval)) /* only flush non-aliasing VIPT caches for exec mappings */ return; diff --git a/trunk/arch/arm/mm/mm.h b/trunk/arch/arm/mm/mm.h index 6776160618ef..a8ee92da3544 100644 --- a/trunk/arch/arm/mm/mm.h +++ b/trunk/arch/arm/mm/mm.h @@ -55,6 +55,9 @@ extern void __flush_dcache_page(struct address_space *mapping, struct page *page /* permanent static mappings from iotable_init() */ #define VM_ARM_STATIC_MAPPING 0x40000000 +/* empty mapping */ +#define VM_ARM_EMPTY_MAPPING 0x20000000 + /* mapping type (attributes) for permanent static mappings */ #define VM_ARM_MTYPE(mt) ((mt) << 20) #define VM_ARM_MTYPE_MASK (0x1f << 20) diff --git a/trunk/arch/arm/mm/mmu.c b/trunk/arch/arm/mm/mmu.c index 4c2d0451e84a..c2fa21d0103e 100644 --- a/trunk/arch/arm/mm/mmu.c +++ b/trunk/arch/arm/mm/mmu.c @@ -807,7 +807,7 @@ static void __init pmd_empty_section_gap(unsigned long addr) vm = early_alloc_aligned(sizeof(*vm), __alignof__(*vm)); vm->addr = (void *)addr; vm->size = SECTION_SIZE; - vm->flags = VM_IOREMAP | VM_ARM_STATIC_MAPPING; + vm->flags = VM_IOREMAP | VM_ARM_EMPTY_MAPPING; vm->caller = pmd_empty_section_gap; vm_area_add_early(vm); } @@ -820,7 +820,7 @@ static void __init fill_pmd_gaps(void) /* we're still single threaded hence no lock needed here */ for (vm = vmlist; vm; vm = vm->next) { - if (!(vm->flags & VM_ARM_STATIC_MAPPING)) + if (!(vm->flags & (VM_ARM_STATIC_MAPPING | VM_ARM_EMPTY_MAPPING))) continue; addr = (unsigned long)vm->addr; if (addr < next) @@ -961,8 +961,8 @@ void __init sanity_check_meminfo(void) * Check whether this memory bank would partially overlap * the vmalloc area. */ - if (__va(bank->start + bank->size) > vmalloc_min || - __va(bank->start + bank->size) < __va(bank->start)) { + if (__va(bank->start + bank->size - 1) >= vmalloc_min || + __va(bank->start + bank->size - 1) <= __va(bank->start)) { unsigned long newsize = vmalloc_min - __va(bank->start); printk(KERN_NOTICE "Truncating RAM at %.8llx-%.8llx " "to -%.8llx (vmalloc region overlap).\n", diff --git a/trunk/arch/arm/mm/tlb-v7.S b/trunk/arch/arm/mm/tlb-v7.S index c2021139cb56..ea94765acf9a 100644 --- a/trunk/arch/arm/mm/tlb-v7.S +++ b/trunk/arch/arm/mm/tlb-v7.S @@ -38,10 +38,10 @@ ENTRY(v7wbi_flush_user_tlb_range) dsb mov r0, r0, lsr #PAGE_SHIFT @ align address mov r1, r1, lsr #PAGE_SHIFT -#ifdef CONFIG_ARM_ERRATA_720789 - mov r3, #0 -#else asid r3, r3 @ mask ASID +#ifdef CONFIG_ARM_ERRATA_720789 + ALT_SMP(W(mov) r3, #0 ) + ALT_UP(W(nop) ) #endif orr r0, r3, r0, lsl #PAGE_SHIFT @ Create initial MVA mov r1, r1, lsl #PAGE_SHIFT diff --git a/trunk/arch/arm/plat-mxc/include/mach/mx25.h b/trunk/arch/arm/plat-mxc/include/mach/mx25.h index 627d94f1b010..ec466400a200 100644 --- a/trunk/arch/arm/plat-mxc/include/mach/mx25.h +++ b/trunk/arch/arm/plat-mxc/include/mach/mx25.h @@ -98,6 +98,7 @@ #define MX25_INT_UART1 (NR_IRQS_LEGACY + 45) #define MX25_INT_GPIO2 (NR_IRQS_LEGACY + 51) #define MX25_INT_GPIO1 (NR_IRQS_LEGACY + 52) +#define MX25_INT_GPT1 (NR_IRQS_LEGACY + 54) #define MX25_INT_FEC (NR_IRQS_LEGACY + 57) #define MX25_DMA_REQ_SSI2_RX1 22 diff --git a/trunk/arch/arm/plat-omap/dmtimer.c b/trunk/arch/arm/plat-omap/dmtimer.c index 626ad8cad7a9..938b50a33439 100644 --- a/trunk/arch/arm/plat-omap/dmtimer.c +++ b/trunk/arch/arm/plat-omap/dmtimer.c @@ -189,6 +189,7 @@ struct omap_dm_timer *omap_dm_timer_request(void) timer->reserved = 1; break; } + spin_unlock_irqrestore(&dm_timer_lock, flags); if (timer) { ret = omap_dm_timer_prepare(timer); @@ -197,7 +198,6 @@ struct omap_dm_timer *omap_dm_timer_request(void) timer = NULL; } } - spin_unlock_irqrestore(&dm_timer_lock, flags); if (!timer) pr_debug("%s: timer request failed!\n", __func__); @@ -220,6 +220,7 @@ struct omap_dm_timer *omap_dm_timer_request_specific(int id) break; } } + spin_unlock_irqrestore(&dm_timer_lock, flags); if (timer) { ret = omap_dm_timer_prepare(timer); @@ -228,7 +229,6 @@ struct omap_dm_timer *omap_dm_timer_request_specific(int id) timer = NULL; } } - spin_unlock_irqrestore(&dm_timer_lock, flags); if (!timer) pr_debug("%s: timer%d request failed!\n", __func__, id); @@ -258,7 +258,7 @@ EXPORT_SYMBOL_GPL(omap_dm_timer_enable); void omap_dm_timer_disable(struct omap_dm_timer *timer) { - pm_runtime_put(&timer->pdev->dev); + pm_runtime_put_sync(&timer->pdev->dev); } EXPORT_SYMBOL_GPL(omap_dm_timer_disable); diff --git a/trunk/arch/arm/plat-omap/include/plat/cpu.h b/trunk/arch/arm/plat-omap/include/plat/cpu.h index 68b180edcfff..bb5d08a70dbc 100644 --- a/trunk/arch/arm/plat-omap/include/plat/cpu.h +++ b/trunk/arch/arm/plat-omap/include/plat/cpu.h @@ -372,7 +372,8 @@ IS_OMAP_TYPE(3430, 0x3430) #define cpu_class_is_omap1() (cpu_is_omap7xx() || cpu_is_omap15xx() || \ cpu_is_omap16xx()) #define cpu_class_is_omap2() (cpu_is_omap24xx() || cpu_is_omap34xx() || \ - cpu_is_omap44xx() || soc_is_omap54xx()) + cpu_is_omap44xx() || soc_is_omap54xx() || \ + soc_is_am33xx()) /* Various silicon revisions for omap2 */ #define OMAP242X_CLASS 0x24200024 diff --git a/trunk/arch/arm/plat-omap/include/plat/multi.h b/trunk/arch/arm/plat-omap/include/plat/multi.h index 045e320f1067..324d31b14852 100644 --- a/trunk/arch/arm/plat-omap/include/plat/multi.h +++ b/trunk/arch/arm/plat-omap/include/plat/multi.h @@ -108,4 +108,13 @@ # endif #endif +#ifdef CONFIG_SOC_AM33XX +# ifdef OMAP_NAME +# undef MULTI_OMAP2 +# define MULTI_OMAP2 +# else +# define OMAP_NAME am33xx +# endif +#endif + #endif /* __PLAT_OMAP_MULTI_H */ diff --git a/trunk/arch/arm/plat-omap/include/plat/uncompress.h b/trunk/arch/arm/plat-omap/include/plat/uncompress.h index b8d19a136781..7f7b112acccb 100644 --- a/trunk/arch/arm/plat-omap/include/plat/uncompress.h +++ b/trunk/arch/arm/plat-omap/include/plat/uncompress.h @@ -110,7 +110,7 @@ static inline void flush(void) _DEBUG_LL_ENTRY(mach, AM33XX_UART##p##_BASE, OMAP_PORT_SHIFT, \ AM33XXUART##p) -static inline void __arch_decomp_setup(unsigned long arch_id) +static inline void arch_decomp_setup(void) { int port = 0; @@ -198,8 +198,6 @@ static inline void __arch_decomp_setup(unsigned long arch_id) } while (0); } -#define arch_decomp_setup() __arch_decomp_setup(arch_id) - /* * nothing to do */ diff --git a/trunk/arch/arm/plat-omap/sram.c b/trunk/arch/arm/plat-omap/sram.c index 766181cb5c95..024f3b08db29 100644 --- a/trunk/arch/arm/plat-omap/sram.c +++ b/trunk/arch/arm/plat-omap/sram.c @@ -68,6 +68,7 @@ static unsigned long omap_sram_start; static void __iomem *omap_sram_base; +static unsigned long omap_sram_skip; static unsigned long omap_sram_size; static void __iomem *omap_sram_ceil; @@ -106,6 +107,7 @@ static int is_sram_locked(void) */ static void __init omap_detect_sram(void) { + omap_sram_skip = SRAM_BOOTLOADER_SZ; if (cpu_class_is_omap2()) { if (is_sram_locked()) { if (cpu_is_omap34xx()) { @@ -113,6 +115,7 @@ static void __init omap_detect_sram(void) if ((omap_type() == OMAP2_DEVICE_TYPE_EMU) || (omap_type() == OMAP2_DEVICE_TYPE_SEC)) { omap_sram_size = 0x7000; /* 28K */ + omap_sram_skip += SZ_16K; } else { omap_sram_size = 0x8000; /* 32K */ } @@ -175,8 +178,10 @@ static void __init omap_map_sram(void) return; #ifdef CONFIG_OMAP4_ERRATA_I688 + if (cpu_is_omap44xx()) { omap_sram_start += PAGE_SIZE; omap_sram_size -= SZ_16K; + } #endif if (cpu_is_omap34xx()) { /* @@ -203,8 +208,8 @@ static void __init omap_map_sram(void) * Looks like we need to preserve some bootloader code at the * beginning of SRAM for jumping to flash for reboot to work... */ - memset_io(omap_sram_base + SRAM_BOOTLOADER_SZ, 0, - omap_sram_size - SRAM_BOOTLOADER_SZ); + memset_io(omap_sram_base + omap_sram_skip, 0, + omap_sram_size - omap_sram_skip); } /* @@ -218,7 +223,7 @@ void *omap_sram_push_address(unsigned long size) { unsigned long available, new_ceil = (unsigned long)omap_sram_ceil; - available = omap_sram_ceil - (omap_sram_base + SRAM_BOOTLOADER_SZ); + available = omap_sram_ceil - (omap_sram_base + omap_sram_skip); if (size > available) { pr_err("Not enough space in SRAM\n"); diff --git a/trunk/arch/arm/plat-orion/common.c b/trunk/arch/arm/plat-orion/common.c index d245a87dc014..b8b747a9d360 100644 --- a/trunk/arch/arm/plat-orion/common.c +++ b/trunk/arch/arm/plat-orion/common.c @@ -291,10 +291,12 @@ static struct platform_device orion_ge00 = { void __init orion_ge00_init(struct mv643xx_eth_platform_data *eth_data, unsigned long mapbase, unsigned long irq, - unsigned long irq_err) + unsigned long irq_err, + unsigned int tx_csum_limit) { fill_resources(&orion_ge00_shared, orion_ge00_shared_resources, mapbase + 0x2000, SZ_16K - 1, irq_err); + orion_ge00_shared_data.tx_csum_limit = tx_csum_limit; ge_complete(&orion_ge00_shared_data, orion_ge00_resources, irq, &orion_ge00_shared, eth_data, &orion_ge00); @@ -343,10 +345,12 @@ static struct platform_device orion_ge01 = { void __init orion_ge01_init(struct mv643xx_eth_platform_data *eth_data, unsigned long mapbase, unsigned long irq, - unsigned long irq_err) + unsigned long irq_err, + unsigned int tx_csum_limit) { fill_resources(&orion_ge01_shared, orion_ge01_shared_resources, mapbase + 0x2000, SZ_16K - 1, irq_err); + orion_ge01_shared_data.tx_csum_limit = tx_csum_limit; ge_complete(&orion_ge01_shared_data, orion_ge01_resources, irq, &orion_ge01_shared, eth_data, &orion_ge01); diff --git a/trunk/arch/arm/plat-orion/include/plat/common.h b/trunk/arch/arm/plat-orion/include/plat/common.h index e00fdb213609..ae2377ef63e5 100644 --- a/trunk/arch/arm/plat-orion/include/plat/common.h +++ b/trunk/arch/arm/plat-orion/include/plat/common.h @@ -39,12 +39,14 @@ void __init orion_rtc_init(unsigned long mapbase, void __init orion_ge00_init(struct mv643xx_eth_platform_data *eth_data, unsigned long mapbase, unsigned long irq, - unsigned long irq_err); + unsigned long irq_err, + unsigned int tx_csum_limit); void __init orion_ge01_init(struct mv643xx_eth_platform_data *eth_data, unsigned long mapbase, unsigned long irq, - unsigned long irq_err); + unsigned long irq_err, + unsigned int tx_csum_limit); void __init orion_ge10_init(struct mv643xx_eth_platform_data *eth_data, unsigned long mapbase, diff --git a/trunk/arch/arm/plat-s3c24xx/dma.c b/trunk/arch/arm/plat-s3c24xx/dma.c index 28f898f75380..db98e7021f0d 100644 --- a/trunk/arch/arm/plat-s3c24xx/dma.c +++ b/trunk/arch/arm/plat-s3c24xx/dma.c @@ -430,7 +430,7 @@ s3c2410_dma_canload(struct s3c2410_dma_chan *chan) * when necessary. */ -int s3c2410_dma_enqueue(unsigned int channel, void *id, +int s3c2410_dma_enqueue(enum dma_ch channel, void *id, dma_addr_t data, int size) { struct s3c2410_dma_chan *chan = s3c_dma_lookup_channel(channel); diff --git a/trunk/arch/arm/plat-samsung/Kconfig b/trunk/arch/arm/plat-samsung/Kconfig index 7aca31c1df1f..9c3b90c3538e 100644 --- a/trunk/arch/arm/plat-samsung/Kconfig +++ b/trunk/arch/arm/plat-samsung/Kconfig @@ -403,7 +403,8 @@ config S5P_DEV_USB_EHCI config S3C24XX_PWM bool "PWM device support" - select HAVE_PWM + select PWM + select PWM_SAMSUNG help Support for exporting the PWM timer blocks via the pwm device system diff --git a/trunk/arch/arm/plat-samsung/clock.c b/trunk/arch/arm/plat-samsung/clock.c index 65c5eca475e7..d1116e2dfbea 100644 --- a/trunk/arch/arm/plat-samsung/clock.c +++ b/trunk/arch/arm/plat-samsung/clock.c @@ -144,6 +144,7 @@ long clk_round_rate(struct clk *clk, unsigned long rate) int clk_set_rate(struct clk *clk, unsigned long rate) { + unsigned long flags; int ret; if (IS_ERR(clk)) @@ -159,9 +160,9 @@ int clk_set_rate(struct clk *clk, unsigned long rate) if (clk->ops == NULL || clk->ops->set_rate == NULL) return -EINVAL; - spin_lock(&clocks_lock); + spin_lock_irqsave(&clocks_lock, flags); ret = (clk->ops->set_rate)(clk, rate); - spin_unlock(&clocks_lock); + spin_unlock_irqrestore(&clocks_lock, flags); return ret; } @@ -173,17 +174,18 @@ struct clk *clk_get_parent(struct clk *clk) int clk_set_parent(struct clk *clk, struct clk *parent) { + unsigned long flags; int ret = 0; if (IS_ERR(clk)) return -EINVAL; - spin_lock(&clocks_lock); + spin_lock_irqsave(&clocks_lock, flags); if (clk->ops && clk->ops->set_parent) ret = (clk->ops->set_parent)(clk, parent); - spin_unlock(&clocks_lock); + spin_unlock_irqrestore(&clocks_lock, flags); return ret; } diff --git a/trunk/arch/arm/plat-samsung/devs.c b/trunk/arch/arm/plat-samsung/devs.c index 74e31ce35538..fc49f3dabd76 100644 --- a/trunk/arch/arm/plat-samsung/devs.c +++ b/trunk/arch/arm/plat-samsung/devs.c @@ -32,6 +32,8 @@ #include #include +#include + #include #include #include @@ -748,7 +750,8 @@ void __init s5p_i2c_hdmiphy_set_platdata(struct s3c2410_platform_i2c *pd) if (!pd) { pd = &default_i2c_data; - if (soc_is_exynos4210()) + if (soc_is_exynos4210() || + soc_is_exynos4212() || soc_is_exynos4412()) pd->bus_num = 8; else if (soc_is_s5pv210()) pd->bus_num = 3; @@ -759,6 +762,30 @@ void __init s5p_i2c_hdmiphy_set_platdata(struct s3c2410_platform_i2c *pd) npd = s3c_set_platdata(pd, sizeof(struct s3c2410_platform_i2c), &s5p_device_i2c_hdmiphy); } + +struct s5p_hdmi_platform_data s5p_hdmi_def_platdata; + +void __init s5p_hdmi_set_platdata(struct i2c_board_info *hdmiphy_info, + struct i2c_board_info *mhl_info, int mhl_bus) +{ + struct s5p_hdmi_platform_data *pd = &s5p_hdmi_def_platdata; + + if (soc_is_exynos4210() || + soc_is_exynos4212() || soc_is_exynos4412()) + pd->hdmiphy_bus = 8; + else if (soc_is_s5pv210()) + pd->hdmiphy_bus = 3; + else + pd->hdmiphy_bus = 0; + + pd->hdmiphy_info = hdmiphy_info; + pd->mhl_info = mhl_info; + pd->mhl_bus = mhl_bus; + + s3c_set_platdata(pd, sizeof(struct s5p_hdmi_platform_data), + &s5p_device_hdmi); +} + #endif /* CONFIG_S5P_DEV_I2C_HDMIPHY */ /* I2S */ diff --git a/trunk/arch/arm/plat-samsung/include/plat/hdmi.h b/trunk/arch/arm/plat-samsung/include/plat/hdmi.h new file mode 100644 index 000000000000..331d046ac2c5 --- /dev/null +++ b/trunk/arch/arm/plat-samsung/include/plat/hdmi.h @@ -0,0 +1,16 @@ +/* + * Copyright (C) 2012 Samsung Electronics Co.Ltd + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + */ + +#ifndef __PLAT_SAMSUNG_HDMI_H +#define __PLAT_SAMSUNG_HDMI_H __FILE__ + +extern void s5p_hdmi_set_platdata(struct i2c_board_info *hdmiphy_info, + struct i2c_board_info *mhl_info, int mhl_bus); + +#endif /* __PLAT_SAMSUNG_HDMI_H */ diff --git a/trunk/arch/arm/plat-samsung/pm.c b/trunk/arch/arm/plat-samsung/pm.c index 64ab65f0fdbc..15070284343e 100644 --- a/trunk/arch/arm/plat-samsung/pm.c +++ b/trunk/arch/arm/plat-samsung/pm.c @@ -74,7 +74,7 @@ unsigned char pm_uart_udivslot; #ifdef CONFIG_SAMSUNG_PM_DEBUG -struct pm_uart_save uart_save[CONFIG_SERIAL_SAMSUNG_UARTS]; +static struct pm_uart_save uart_save[CONFIG_SERIAL_SAMSUNG_UARTS]; static void s3c_pm_save_uart(unsigned int uart, struct pm_uart_save *save) { diff --git a/trunk/arch/arm/vfp/vfpmodule.c b/trunk/arch/arm/vfp/vfpmodule.c index fb849d044bde..c834b32af275 100644 --- a/trunk/arch/arm/vfp/vfpmodule.c +++ b/trunk/arch/arm/vfp/vfpmodule.c @@ -719,8 +719,10 @@ static int __init vfp_init(void) if ((fmrx(MVFR1) & 0x000fff00) == 0x00011100) elf_hwcap |= HWCAP_NEON; #endif +#ifdef CONFIG_VFPv3 if ((fmrx(MVFR1) & 0xf0000000) == 0x10000000) elf_hwcap |= HWCAP_VFPv4; +#endif } } return 0; diff --git a/trunk/arch/blackfin/Kconfig b/trunk/arch/blackfin/Kconfig index f34861920634..c7092e6057c5 100644 --- a/trunk/arch/blackfin/Kconfig +++ b/trunk/arch/blackfin/Kconfig @@ -38,6 +38,7 @@ config BLACKFIN select GENERIC_ATOMIC64 select GENERIC_IRQ_PROBE select IRQ_PER_CPU if SMP + select USE_GENERIC_SMP_HELPERS if SMP select HAVE_NMI_WATCHDOG if NMI_WATCHDOG select GENERIC_SMP_IDLE_THREAD select ARCH_USES_GETTIMEOFFSET if !GENERIC_CLOCKEVENTS diff --git a/trunk/arch/blackfin/Makefile b/trunk/arch/blackfin/Makefile index d3d7e64ca96d..66cf00095b84 100644 --- a/trunk/arch/blackfin/Makefile +++ b/trunk/arch/blackfin/Makefile @@ -20,7 +20,6 @@ endif KBUILD_AFLAGS += $(call cc-option,-mno-fdpic) KBUILD_CFLAGS_MODULE += -mlong-calls LDFLAGS += -m elf32bfin -KALLSYMS += --symbol-prefix=_ KBUILD_DEFCONFIG := BF537-STAMP_defconfig diff --git a/trunk/arch/blackfin/include/asm/smp.h b/trunk/arch/blackfin/include/asm/smp.h index dc3d144b4bb5..9631598dcc5d 100644 --- a/trunk/arch/blackfin/include/asm/smp.h +++ b/trunk/arch/blackfin/include/asm/smp.h @@ -18,6 +18,8 @@ #define raw_smp_processor_id() blackfin_core_id() extern void bfin_relocate_coreb_l1_mem(void); +extern void arch_send_call_function_single_ipi(int cpu); +extern void arch_send_call_function_ipi_mask(const struct cpumask *mask); #if defined(CONFIG_SMP) && defined(CONFIG_ICACHE_FLUSH_L1) asmlinkage void blackfin_icache_flush_range_l1(unsigned long *ptr); diff --git a/trunk/arch/blackfin/kernel/setup.c b/trunk/arch/blackfin/kernel/setup.c index ada8f0fc71e4..fb96e607adcf 100644 --- a/trunk/arch/blackfin/kernel/setup.c +++ b/trunk/arch/blackfin/kernel/setup.c @@ -52,7 +52,6 @@ EXPORT_SYMBOL(reserved_mem_dcache_on); #ifdef CONFIG_MTD_UCLINUX extern struct map_info uclinux_ram_map; unsigned long memory_mtd_end, memory_mtd_start, mtd_size; -unsigned long _ebss; EXPORT_SYMBOL(memory_mtd_end); EXPORT_SYMBOL(memory_mtd_start); EXPORT_SYMBOL(mtd_size); diff --git a/trunk/arch/blackfin/mach-common/smp.c b/trunk/arch/blackfin/mach-common/smp.c index 00bbe672b3b3..a40151306b77 100644 --- a/trunk/arch/blackfin/mach-common/smp.c +++ b/trunk/arch/blackfin/mach-common/smp.c @@ -48,10 +48,13 @@ unsigned long blackfin_iflush_l1_entry[NR_CPUS]; struct blackfin_initial_pda __cpuinitdata initial_pda_coreb; -#define BFIN_IPI_TIMER 0 -#define BFIN_IPI_RESCHEDULE 1 -#define BFIN_IPI_CALL_FUNC 2 -#define BFIN_IPI_CPU_STOP 3 +enum ipi_message_type { + BFIN_IPI_TIMER, + BFIN_IPI_RESCHEDULE, + BFIN_IPI_CALL_FUNC, + BFIN_IPI_CALL_FUNC_SINGLE, + BFIN_IPI_CPU_STOP, +}; struct blackfin_flush_data { unsigned long start; @@ -60,35 +63,20 @@ struct blackfin_flush_data { void *secondary_stack; - -struct smp_call_struct { - void (*func)(void *info); - void *info; - int wait; - cpumask_t *waitmask; -}; - static struct blackfin_flush_data smp_flush_data; static DEFINE_SPINLOCK(stop_lock); -struct ipi_message { - unsigned long type; - struct smp_call_struct call_struct; -}; - /* A magic number - stress test shows this is safe for common cases */ #define BFIN_IPI_MSGQ_LEN 5 /* Simple FIFO buffer, overflow leads to panic */ -struct ipi_message_queue { - spinlock_t lock; +struct ipi_data { unsigned long count; - unsigned long head; /* head of the queue */ - struct ipi_message ipi_message[BFIN_IPI_MSGQ_LEN]; + unsigned long bits; }; -static DEFINE_PER_CPU(struct ipi_message_queue, ipi_msg_queue); +static DEFINE_PER_CPU(struct ipi_data, bfin_ipi); static void ipi_cpu_stop(unsigned int cpu) { @@ -129,28 +117,6 @@ static void ipi_flush_icache(void *info) blackfin_icache_flush_range(fdata->start, fdata->end); } -static void ipi_call_function(unsigned int cpu, struct ipi_message *msg) -{ - int wait; - void (*func)(void *info); - void *info; - func = msg->call_struct.func; - info = msg->call_struct.info; - wait = msg->call_struct.wait; - func(info); - if (wait) { -#ifdef __ARCH_SYNC_CORE_DCACHE - /* - * 'wait' usually means synchronization between CPUs. - * Invalidate D cache in case shared data was changed - * by func() to ensure cache coherence. - */ - resync_core_dcache(); -#endif - cpumask_clear_cpu(cpu, msg->call_struct.waitmask); - } -} - /* Use IRQ_SUPPLE_0 to request reschedule. * When returning from interrupt to user space, * there is chance to reschedule */ @@ -172,152 +138,95 @@ void ipi_timer(void) static irqreturn_t ipi_handler_int1(int irq, void *dev_instance) { - struct ipi_message *msg; - struct ipi_message_queue *msg_queue; + struct ipi_data *bfin_ipi_data; unsigned int cpu = smp_processor_id(); - unsigned long flags; + unsigned long pending; + unsigned long msg; platform_clear_ipi(cpu, IRQ_SUPPLE_1); - msg_queue = &__get_cpu_var(ipi_msg_queue); - - spin_lock_irqsave(&msg_queue->lock, flags); - - while (msg_queue->count) { - msg = &msg_queue->ipi_message[msg_queue->head]; - switch (msg->type) { - case BFIN_IPI_TIMER: - ipi_timer(); - break; - case BFIN_IPI_RESCHEDULE: - scheduler_ipi(); - break; - case BFIN_IPI_CALL_FUNC: - ipi_call_function(cpu, msg); - break; - case BFIN_IPI_CPU_STOP: - ipi_cpu_stop(cpu); - break; - default: - printk(KERN_CRIT "CPU%u: Unknown IPI message 0x%lx\n", - cpu, msg->type); - break; - } - msg_queue->head++; - msg_queue->head %= BFIN_IPI_MSGQ_LEN; - msg_queue->count--; + bfin_ipi_data = &__get_cpu_var(bfin_ipi); + + while ((pending = xchg(&bfin_ipi_data->bits, 0)) != 0) { + msg = 0; + do { + msg = find_next_bit(&pending, BITS_PER_LONG, msg + 1); + switch (msg) { + case BFIN_IPI_TIMER: + ipi_timer(); + break; + case BFIN_IPI_RESCHEDULE: + scheduler_ipi(); + break; + case BFIN_IPI_CALL_FUNC: + generic_smp_call_function_interrupt(); + break; + + case BFIN_IPI_CALL_FUNC_SINGLE: + generic_smp_call_function_single_interrupt(); + break; + + case BFIN_IPI_CPU_STOP: + ipi_cpu_stop(cpu); + break; + } + } while (msg < BITS_PER_LONG); + + smp_mb(); } - spin_unlock_irqrestore(&msg_queue->lock, flags); return IRQ_HANDLED; } -static void ipi_queue_init(void) +static void bfin_ipi_init(void) { unsigned int cpu; - struct ipi_message_queue *msg_queue; + struct ipi_data *bfin_ipi_data; for_each_possible_cpu(cpu) { - msg_queue = &per_cpu(ipi_msg_queue, cpu); - spin_lock_init(&msg_queue->lock); - msg_queue->count = 0; - msg_queue->head = 0; + bfin_ipi_data = &per_cpu(bfin_ipi, cpu); + bfin_ipi_data->bits = 0; + bfin_ipi_data->count = 0; } } -static inline void smp_send_message(cpumask_t callmap, unsigned long type, - void (*func) (void *info), void *info, int wait) +void send_ipi(const struct cpumask *cpumask, enum ipi_message_type msg) { unsigned int cpu; - struct ipi_message_queue *msg_queue; - struct ipi_message *msg; - unsigned long flags, next_msg; - cpumask_t waitmask; /* waitmask is shared by all cpus */ - - cpumask_copy(&waitmask, &callmap); - for_each_cpu(cpu, &callmap) { - msg_queue = &per_cpu(ipi_msg_queue, cpu); - spin_lock_irqsave(&msg_queue->lock, flags); - if (msg_queue->count < BFIN_IPI_MSGQ_LEN) { - next_msg = (msg_queue->head + msg_queue->count) - % BFIN_IPI_MSGQ_LEN; - msg = &msg_queue->ipi_message[next_msg]; - msg->type = type; - if (type == BFIN_IPI_CALL_FUNC) { - msg->call_struct.func = func; - msg->call_struct.info = info; - msg->call_struct.wait = wait; - msg->call_struct.waitmask = &waitmask; - } - msg_queue->count++; - } else - panic("IPI message queue overflow\n"); - spin_unlock_irqrestore(&msg_queue->lock, flags); + struct ipi_data *bfin_ipi_data; + unsigned long flags; + + local_irq_save(flags); + + for_each_cpu(cpu, cpumask) { + bfin_ipi_data = &per_cpu(bfin_ipi, cpu); + smp_mb(); + set_bit(msg, &bfin_ipi_data->bits); + bfin_ipi_data->count++; platform_send_ipi_cpu(cpu, IRQ_SUPPLE_1); } - if (wait) { - while (!cpumask_empty(&waitmask)) - blackfin_dcache_invalidate_range( - (unsigned long)(&waitmask), - (unsigned long)(&waitmask)); -#ifdef __ARCH_SYNC_CORE_DCACHE - /* - * Invalidate D cache in case shared data was changed by - * other processors to ensure cache coherence. - */ - resync_core_dcache(); -#endif - } + local_irq_restore(flags); } -int smp_call_function(void (*func)(void *info), void *info, int wait) +void arch_send_call_function_single_ipi(int cpu) { - cpumask_t callmap; - - preempt_disable(); - cpumask_copy(&callmap, cpu_online_mask); - cpumask_clear_cpu(smp_processor_id(), &callmap); - if (!cpumask_empty(&callmap)) - smp_send_message(callmap, BFIN_IPI_CALL_FUNC, func, info, wait); - - preempt_enable(); - - return 0; + send_ipi(cpumask_of(cpu), BFIN_IPI_CALL_FUNC_SINGLE); } -EXPORT_SYMBOL_GPL(smp_call_function); -int smp_call_function_single(int cpuid, void (*func) (void *info), void *info, - int wait) +void arch_send_call_function_ipi_mask(const struct cpumask *mask) { - unsigned int cpu = cpuid; - cpumask_t callmap; - - if (cpu_is_offline(cpu)) - return 0; - cpumask_clear(&callmap); - cpumask_set_cpu(cpu, &callmap); - - smp_send_message(callmap, BFIN_IPI_CALL_FUNC, func, info, wait); - - return 0; + send_ipi(mask, BFIN_IPI_CALL_FUNC); } -EXPORT_SYMBOL_GPL(smp_call_function_single); void smp_send_reschedule(int cpu) { - cpumask_t callmap; - /* simply trigger an ipi */ - - cpumask_clear(&callmap); - cpumask_set_cpu(cpu, &callmap); - - smp_send_message(callmap, BFIN_IPI_RESCHEDULE, NULL, NULL, 0); + send_ipi(cpumask_of(cpu), BFIN_IPI_RESCHEDULE); return; } void smp_send_msg(const struct cpumask *mask, unsigned long type) { - smp_send_message(*mask, type, NULL, NULL, 0); + send_ipi(mask, type); } void smp_timer_broadcast(const struct cpumask *mask) @@ -333,7 +242,7 @@ void smp_send_stop(void) cpumask_copy(&callmap, cpu_online_mask); cpumask_clear_cpu(smp_processor_id(), &callmap); if (!cpumask_empty(&callmap)) - smp_send_message(callmap, BFIN_IPI_CPU_STOP, NULL, NULL, 0); + send_ipi(&callmap, BFIN_IPI_CPU_STOP); preempt_enable(); @@ -436,7 +345,7 @@ void __init smp_prepare_boot_cpu(void) void __init smp_prepare_cpus(unsigned int max_cpus) { platform_prepare_cpus(max_cpus); - ipi_queue_init(); + bfin_ipi_init(); platform_request_ipi(IRQ_SUPPLE_0, ipi_handler_int0); platform_request_ipi(IRQ_SUPPLE_1, ipi_handler_int1); } diff --git a/trunk/arch/c6x/Kconfig b/trunk/arch/c6x/Kconfig index 052f81a76239..983c859e40b7 100644 --- a/trunk/arch/c6x/Kconfig +++ b/trunk/arch/c6x/Kconfig @@ -6,6 +6,7 @@ config C6X def_bool y select CLKDEV_LOOKUP + select GENERIC_ATOMIC64 select GENERIC_IRQ_SHOW select HAVE_ARCH_TRACEHOOK select HAVE_DMA_API_DEBUG diff --git a/trunk/arch/c6x/include/asm/Kbuild b/trunk/arch/c6x/include/asm/Kbuild index 3af601e31e66..f08e89183cda 100644 --- a/trunk/arch/c6x/include/asm/Kbuild +++ b/trunk/arch/c6x/include/asm/Kbuild @@ -2,6 +2,7 @@ include include/asm-generic/Kbuild.asm generic-y += atomic.h generic-y += auxvec.h +generic-y += barrier.h generic-y += bitsperlong.h generic-y += bugs.h generic-y += cputime.h diff --git a/trunk/arch/c6x/include/asm/barrier.h b/trunk/arch/c6x/include/asm/barrier.h deleted file mode 100644 index 538240e85909..000000000000 --- a/trunk/arch/c6x/include/asm/barrier.h +++ /dev/null @@ -1,27 +0,0 @@ -/* - * Port on Texas Instruments TMS320C6x architecture - * - * Copyright (C) 2004, 2009, 2010, 2011 Texas Instruments Incorporated - * Author: Aurelien Jacquiot (aurelien.jacquiot@jaluna.com) - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - */ -#ifndef _ASM_C6X_BARRIER_H -#define _ASM_C6X_BARRIER_H - -#define nop() asm("NOP\n"); - -#define mb() barrier() -#define rmb() barrier() -#define wmb() barrier() -#define set_mb(var, value) do { var = value; mb(); } while (0) -#define set_wmb(var, value) do { var = value; wmb(); } while (0) - -#define smp_mb() barrier() -#define smp_rmb() barrier() -#define smp_wmb() barrier() -#define smp_read_barrier_depends() do { } while (0) - -#endif /* _ASM_C6X_BARRIER_H */ diff --git a/trunk/arch/c6x/include/asm/cache.h b/trunk/arch/c6x/include/asm/cache.h index 6d521d96d941..09c5a0f5f4d1 100644 --- a/trunk/arch/c6x/include/asm/cache.h +++ b/trunk/arch/c6x/include/asm/cache.h @@ -1,7 +1,7 @@ /* * Port on Texas Instruments TMS320C6x architecture * - * Copyright (C) 2005, 2006, 2009, 2010 Texas Instruments Incorporated + * Copyright (C) 2005, 2006, 2009, 2010, 2012 Texas Instruments Incorporated * Author: Aurelien Jacquiot (aurelien.jacquiot@jaluna.com) * * This program is free software; you can redistribute it and/or modify @@ -16,9 +16,14 @@ /* * Cache line size */ -#define L1D_CACHE_BYTES 64 -#define L1P_CACHE_BYTES 32 -#define L2_CACHE_BYTES 128 +#define L1D_CACHE_SHIFT 6 +#define L1D_CACHE_BYTES (1 << L1D_CACHE_SHIFT) + +#define L1P_CACHE_SHIFT 5 +#define L1P_CACHE_BYTES (1 << L1P_CACHE_SHIFT) + +#define L2_CACHE_SHIFT 7 +#define L2_CACHE_BYTES (1 << L2_CACHE_SHIFT) /* * L2 used as cache @@ -29,7 +34,8 @@ * For practical reasons the L1_CACHE_BYTES defines should not be smaller than * the L2 line size */ -#define L1_CACHE_BYTES L2_CACHE_BYTES +#define L1_CACHE_SHIFT L2_CACHE_SHIFT +#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT) #define L2_CACHE_ALIGN_LOW(x) \ (((x) & ~(L2_CACHE_BYTES - 1))) diff --git a/trunk/arch/cris/kernel/process.c b/trunk/arch/cris/kernel/process.c index 66fd01728790..7f65be6f7f17 100644 --- a/trunk/arch/cris/kernel/process.c +++ b/trunk/arch/cris/kernel/process.c @@ -25,6 +25,7 @@ #include #include #include +#include //#define DEBUG @@ -74,6 +75,7 @@ void cpu_idle (void) { /* endless idle loop with no priority at all */ while (1) { + rcu_idle_enter(); while (!need_resched()) { void (*idle)(void); /* @@ -86,6 +88,7 @@ void cpu_idle (void) idle = default_idle; idle(); } + rcu_idle_exit(); schedule_preempt_disabled(); } } diff --git a/trunk/arch/frv/kernel/process.c b/trunk/arch/frv/kernel/process.c index ff95f50efea5..2eb7fa5bf9d8 100644 --- a/trunk/arch/frv/kernel/process.c +++ b/trunk/arch/frv/kernel/process.c @@ -25,6 +25,7 @@ #include #include #include +#include #include #include @@ -69,12 +70,14 @@ void cpu_idle(void) { /* endless idle loop with no priority at all */ while (1) { + rcu_idle_enter(); while (!need_resched()) { check_pgt_cache(); if (!frv_dma_inprogress && idle) idle(); } + rcu_idle_exit(); schedule_preempt_disabled(); } diff --git a/trunk/arch/h8300/kernel/process.c b/trunk/arch/h8300/kernel/process.c index 0e9c315be104..f153ed1a4c08 100644 --- a/trunk/arch/h8300/kernel/process.c +++ b/trunk/arch/h8300/kernel/process.c @@ -36,6 +36,7 @@ #include #include #include +#include #include #include @@ -78,8 +79,10 @@ void (*idle)(void) = default_idle; void cpu_idle(void) { while (1) { + rcu_idle_enter(); while (!need_resched()) idle(); + rcu_idle_exit(); schedule_preempt_disabled(); } } diff --git a/trunk/arch/ia64/Kconfig b/trunk/arch/ia64/Kconfig index 310cf5781fad..3c720ef6c32d 100644 --- a/trunk/arch/ia64/Kconfig +++ b/trunk/arch/ia64/Kconfig @@ -25,6 +25,7 @@ config IA64 select HAVE_GENERIC_HARDIRQS select HAVE_MEMBLOCK select HAVE_MEMBLOCK_NODE_MAP + select HAVE_VIRT_CPU_ACCOUNTING select ARCH_DISCARD_MEMBLOCK select GENERIC_IRQ_PROBE select GENERIC_PENDING_IRQ if SMP @@ -340,17 +341,6 @@ config FORCE_MAX_ZONEORDER default "17" if HUGETLB_PAGE default "11" -config VIRT_CPU_ACCOUNTING - bool "Deterministic task and CPU time accounting" - default n - help - Select this option to enable more accurate task and CPU time - accounting. This is done by reading a CPU counter on each - kernel entry and exit and on transitions within the kernel - between system, softirq and hardirq state, so there is a - small performance impact. - If in doubt, say N here. - config SMP bool "Symmetric multi-processing support" select USE_GENERIC_SMP_HELPERS diff --git a/trunk/arch/ia64/configs/generic_defconfig b/trunk/arch/ia64/configs/generic_defconfig index 954d81e2e837..7913695b2fcb 100644 --- a/trunk/arch/ia64/configs/generic_defconfig +++ b/trunk/arch/ia64/configs/generic_defconfig @@ -234,5 +234,4 @@ CONFIG_CRYPTO_PCBC=m CONFIG_CRYPTO_MD5=y # CONFIG_CRYPTO_ANSI_CPRNG is not set CONFIG_CRC_T10DIF=y -CONFIG_MISC_DEVICES=y CONFIG_INTEL_IOMMU=y diff --git a/trunk/arch/ia64/configs/gensparse_defconfig b/trunk/arch/ia64/configs/gensparse_defconfig index 91c41ecfa6d9..f8e913365423 100644 --- a/trunk/arch/ia64/configs/gensparse_defconfig +++ b/trunk/arch/ia64/configs/gensparse_defconfig @@ -209,4 +209,3 @@ CONFIG_MAGIC_SYSRQ=y CONFIG_DEBUG_KERNEL=y CONFIG_DEBUG_MUTEXES=y CONFIG_CRYPTO_MD5=y -CONFIG_MISC_DEVICES=y diff --git a/trunk/arch/ia64/include/asm/switch_to.h b/trunk/arch/ia64/include/asm/switch_to.h index cb2412fcd17f..d38c7ea5eea5 100644 --- a/trunk/arch/ia64/include/asm/switch_to.h +++ b/trunk/arch/ia64/include/asm/switch_to.h @@ -30,13 +30,6 @@ extern struct task_struct *ia64_switch_to (void *next_task); extern void ia64_save_extra (struct task_struct *task); extern void ia64_load_extra (struct task_struct *task); -#ifdef CONFIG_VIRT_CPU_ACCOUNTING -extern void ia64_account_on_switch (struct task_struct *prev, struct task_struct *next); -# define IA64_ACCOUNT_ON_SWITCH(p,n) ia64_account_on_switch(p,n) -#else -# define IA64_ACCOUNT_ON_SWITCH(p,n) -#endif - #ifdef CONFIG_PERFMON DECLARE_PER_CPU(unsigned long, pfm_syst_info); # define PERFMON_IS_SYSWIDE() (__get_cpu_var(pfm_syst_info) & 0x1) @@ -49,7 +42,6 @@ extern void ia64_account_on_switch (struct task_struct *prev, struct task_struct || PERFMON_IS_SYSWIDE()) #define __switch_to(prev,next,last) do { \ - IA64_ACCOUNT_ON_SWITCH(prev, next); \ if (IA64_HAS_EXTRA_STATE(prev)) \ ia64_save_extra(prev); \ if (IA64_HAS_EXTRA_STATE(next)) \ diff --git a/trunk/arch/ia64/kernel/acpi.c b/trunk/arch/ia64/kernel/acpi.c index 6f38b6120d96..440578850ae5 100644 --- a/trunk/arch/ia64/kernel/acpi.c +++ b/trunk/arch/ia64/kernel/acpi.c @@ -497,7 +497,7 @@ acpi_numa_processor_affinity_init(struct acpi_srat_cpu_affinity *pa) srat_num_cpus++; } -void __init +int __init acpi_numa_memory_affinity_init(struct acpi_srat_mem_affinity *ma) { unsigned long paddr, size; @@ -512,7 +512,7 @@ acpi_numa_memory_affinity_init(struct acpi_srat_mem_affinity *ma) /* Ignore disabled entries */ if (!(ma->flags & ACPI_SRAT_MEM_ENABLED)) - return; + return -1; /* record this node in proximity bitmap */ pxm_bit_set(pxm); @@ -531,6 +531,7 @@ acpi_numa_memory_affinity_init(struct acpi_srat_mem_affinity *ma) p->size = size; p->nid = pxm; num_node_memblks++; + return 0; } void __init acpi_numa_arch_fixup(void) diff --git a/trunk/arch/ia64/kernel/process.c b/trunk/arch/ia64/kernel/process.c index dd6fc1449741..3e316ec0b835 100644 --- a/trunk/arch/ia64/kernel/process.c +++ b/trunk/arch/ia64/kernel/process.c @@ -29,6 +29,7 @@ #include #include #include +#include #include #include @@ -279,6 +280,7 @@ cpu_idle (void) /* endless idle loop with no priority at all */ while (1) { + rcu_idle_enter(); if (can_do_pal_halt) { current_thread_info()->status &= ~TS_POLLING; /* @@ -309,6 +311,7 @@ cpu_idle (void) normal_xtp(); #endif } + rcu_idle_exit(); schedule_preempt_disabled(); check_pgt_cache(); if (cpu_is_offline(cpu)) diff --git a/trunk/arch/ia64/kernel/time.c b/trunk/arch/ia64/kernel/time.c index ecc904b33c5f..80ff9acc5edf 100644 --- a/trunk/arch/ia64/kernel/time.c +++ b/trunk/arch/ia64/kernel/time.c @@ -83,32 +83,36 @@ static struct clocksource *itc_clocksource; extern cputime_t cycle_to_cputime(u64 cyc); +static void vtime_account_user(struct task_struct *tsk) +{ + cputime_t delta_utime; + struct thread_info *ti = task_thread_info(tsk); + + if (ti->ac_utime) { + delta_utime = cycle_to_cputime(ti->ac_utime); + account_user_time(tsk, delta_utime, delta_utime); + ti->ac_utime = 0; + } +} + /* * Called from the context switch with interrupts disabled, to charge all * accumulated times to the current process, and to prepare accounting on * the next process. */ -void ia64_account_on_switch(struct task_struct *prev, struct task_struct *next) +void vtime_task_switch(struct task_struct *prev) { struct thread_info *pi = task_thread_info(prev); - struct thread_info *ni = task_thread_info(next); - cputime_t delta_stime, delta_utime; - __u64 now; + struct thread_info *ni = task_thread_info(current); - now = ia64_get_itc(); - - delta_stime = cycle_to_cputime(pi->ac_stime + (now - pi->ac_stamp)); if (idle_task(smp_processor_id()) != prev) - account_system_time(prev, 0, delta_stime, delta_stime); + vtime_account_system(prev); else - account_idle_time(delta_stime); + vtime_account_idle(prev); - if (pi->ac_utime) { - delta_utime = cycle_to_cputime(pi->ac_utime); - account_user_time(prev, delta_utime, delta_utime); - } + vtime_account_user(prev); - pi->ac_stamp = ni->ac_stamp = now; + pi->ac_stamp = ni->ac_stamp; ni->ac_stime = ni->ac_utime = 0; } @@ -116,29 +120,32 @@ void ia64_account_on_switch(struct task_struct *prev, struct task_struct *next) * Account time for a transition between system, hard irq or soft irq state. * Note that this function is called with interrupts enabled. */ -void account_system_vtime(struct task_struct *tsk) +static cputime_t vtime_delta(struct task_struct *tsk) { struct thread_info *ti = task_thread_info(tsk); - unsigned long flags; cputime_t delta_stime; __u64 now; - local_irq_save(flags); - now = ia64_get_itc(); delta_stime = cycle_to_cputime(ti->ac_stime + (now - ti->ac_stamp)); - if (irq_count() || idle_task(smp_processor_id()) != tsk) - account_system_time(tsk, 0, delta_stime, delta_stime); - else - account_idle_time(delta_stime); ti->ac_stime = 0; - ti->ac_stamp = now; - local_irq_restore(flags); + return delta_stime; +} + +void vtime_account_system(struct task_struct *tsk) +{ + cputime_t delta = vtime_delta(tsk); + + account_system_time(tsk, 0, delta, delta); +} + +void vtime_account_idle(struct task_struct *tsk) +{ + account_idle_time(vtime_delta(tsk)); } -EXPORT_SYMBOL_GPL(account_system_vtime); /* * Called from the timer interrupt handler to charge accumulated user time @@ -146,14 +153,7 @@ EXPORT_SYMBOL_GPL(account_system_vtime); */ void account_process_tick(struct task_struct *p, int user_tick) { - struct thread_info *ti = task_thread_info(p); - cputime_t delta_utime; - - if (ti->ac_utime) { - delta_utime = cycle_to_cputime(ti->ac_utime); - account_user_time(p, delta_utime, delta_utime); - ti->ac_utime = 0; - } + vtime_account_user(p); } #endif /* CONFIG_VIRT_CPU_ACCOUNTING */ diff --git a/trunk/arch/m32r/kernel/process.c b/trunk/arch/m32r/kernel/process.c index 3a4a32b27208..384e63f3a4c4 100644 --- a/trunk/arch/m32r/kernel/process.c +++ b/trunk/arch/m32r/kernel/process.c @@ -26,6 +26,7 @@ #include #include #include +#include #include #include @@ -82,6 +83,7 @@ void cpu_idle (void) { /* endless idle loop with no priority at all */ while (1) { + rcu_idle_enter(); while (!need_resched()) { void (*idle)(void) = pm_idle; @@ -90,6 +92,7 @@ void cpu_idle (void) idle(); } + rcu_idle_exit(); schedule_preempt_disabled(); } } diff --git a/trunk/arch/m68k/Kconfig b/trunk/arch/m68k/Kconfig index 0b0f8b8c4a26..b22df9410dce 100644 --- a/trunk/arch/m68k/Kconfig +++ b/trunk/arch/m68k/Kconfig @@ -5,6 +5,7 @@ config M68K select HAVE_AOUT if MMU select HAVE_GENERIC_HARDIRQS select GENERIC_IRQ_SHOW + select GENERIC_ATOMIC64 select ARCH_HAVE_NMI_SAFE_CMPXCHG if RMW_INSNS select GENERIC_CPU_DEVICES select GENERIC_STRNCPY_FROM_USER if MMU @@ -54,18 +55,6 @@ config ZONE_DMA bool default y -config CPU_HAS_NO_BITFIELDS - bool - -config CPU_HAS_NO_MULDIV64 - bool - -config CPU_HAS_ADDRESS_SPACES - bool - -config FPU - bool - config HZ int default 1000 if CLEOPATRA diff --git a/trunk/arch/m68k/Kconfig.cpu b/trunk/arch/m68k/Kconfig.cpu index 43a9f8f1b8eb..c4eb79edecec 100644 --- a/trunk/arch/m68k/Kconfig.cpu +++ b/trunk/arch/m68k/Kconfig.cpu @@ -28,6 +28,7 @@ config COLDFIRE select CPU_HAS_NO_BITFIELDS select CPU_HAS_NO_MULDIV64 select GENERIC_CSUM + select HAVE_CLK endchoice @@ -37,6 +38,7 @@ config M68000 bool select CPU_HAS_NO_BITFIELDS select CPU_HAS_NO_MULDIV64 + select CPU_HAS_NO_UNALIGNED select GENERIC_CSUM help The Freescale (was Motorola) 68000 CPU is the first generation of @@ -48,6 +50,7 @@ config M68000 config MCPU32 bool select CPU_HAS_NO_BITFIELDS + select CPU_HAS_NO_UNALIGNED help The Freescale (was then Motorola) CPU32 is a CPU core that is based on the 68020 processor. For the most part it is used in @@ -56,7 +59,6 @@ config MCPU32 config M68020 bool "68020 support" depends on MMU - select GENERIC_ATOMIC64 select CPU_HAS_ADDRESS_SPACES help If you anticipate running this kernel on a computer with a MC68020 @@ -67,7 +69,6 @@ config M68020 config M68030 bool "68030 support" depends on MMU && !MMU_SUN3 - select GENERIC_ATOMIC64 select CPU_HAS_ADDRESS_SPACES help If you anticipate running this kernel on a computer with a MC68030 @@ -77,7 +78,6 @@ config M68030 config M68040 bool "68040 support" depends on MMU && !MMU_SUN3 - select GENERIC_ATOMIC64 select CPU_HAS_ADDRESS_SPACES help If you anticipate running this kernel on a computer with a MC68LC040 @@ -88,7 +88,6 @@ config M68040 config M68060 bool "68060 support" depends on MMU && !MMU_SUN3 - select GENERIC_ATOMIC64 select CPU_HAS_ADDRESS_SPACES help If you anticipate running this kernel on a computer with a MC68060 @@ -376,6 +375,18 @@ config NODES_SHIFT default "3" depends on !SINGLE_MEMORY_CHUNK +config CPU_HAS_NO_BITFIELDS + bool + +config CPU_HAS_NO_MULDIV64 + bool + +config CPU_HAS_NO_UNALIGNED + bool + +config CPU_HAS_ADDRESS_SPACES + bool + config FPU bool diff --git a/trunk/arch/m68k/apollo/config.c b/trunk/arch/m68k/apollo/config.c index 0a30406b9442..f5565d6eeb8e 100644 --- a/trunk/arch/m68k/apollo/config.c +++ b/trunk/arch/m68k/apollo/config.c @@ -177,8 +177,8 @@ irqreturn_t dn_timer_int(int irq, void *dev_id) timer_handler(irq, dev_id); - x=*(volatile unsigned char *)(timer+3); - x=*(volatile unsigned char *)(timer+5); + x = *(volatile unsigned char *)(apollo_timer + 3); + x = *(volatile unsigned char *)(apollo_timer + 5); return IRQ_HANDLED; } @@ -186,17 +186,17 @@ irqreturn_t dn_timer_int(int irq, void *dev_id) void dn_sched_init(irq_handler_t timer_routine) { /* program timer 1 */ - *(volatile unsigned char *)(timer+3)=0x01; - *(volatile unsigned char *)(timer+1)=0x40; - *(volatile unsigned char *)(timer+5)=0x09; - *(volatile unsigned char *)(timer+7)=0xc4; + *(volatile unsigned char *)(apollo_timer + 3) = 0x01; + *(volatile unsigned char *)(apollo_timer + 1) = 0x40; + *(volatile unsigned char *)(apollo_timer + 5) = 0x09; + *(volatile unsigned char *)(apollo_timer + 7) = 0xc4; /* enable IRQ of PIC B */ *(volatile unsigned char *)(pica+1)&=(~8); #if 0 - printk("*(0x10803) %02x\n",*(volatile unsigned char *)(timer+0x3)); - printk("*(0x10803) %02x\n",*(volatile unsigned char *)(timer+0x3)); + printk("*(0x10803) %02x\n",*(volatile unsigned char *)(apollo_timer + 0x3)); + printk("*(0x10803) %02x\n",*(volatile unsigned char *)(apollo_timer + 0x3)); #endif if (request_irq(IRQ_APOLLO, dn_timer_int, 0, "time", timer_routine)) diff --git a/trunk/arch/m68k/include/asm/Kbuild b/trunk/arch/m68k/include/asm/Kbuild index eafa2539a8ee..a74e5d95c384 100644 --- a/trunk/arch/m68k/include/asm/Kbuild +++ b/trunk/arch/m68k/include/asm/Kbuild @@ -1,4 +1,29 @@ include include/asm-generic/Kbuild.asm header-y += cachectl.h +generic-y += bitsperlong.h +generic-y += cputime.h +generic-y += device.h +generic-y += emergency-restart.h +generic-y += errno.h +generic-y += futex.h +generic-y += ioctl.h +generic-y += ipcbuf.h +generic-y += irq_regs.h +generic-y += kdebug.h +generic-y += kmap_types.h +generic-y += kvm_para.h +generic-y += local64.h +generic-y += local.h +generic-y += mman.h +generic-y += mutex.h +generic-y += percpu.h +generic-y += resource.h +generic-y += scatterlist.h +generic-y += sections.h +generic-y += siginfo.h +generic-y += statfs.h +generic-y += topology.h +generic-y += types.h generic-y += word-at-a-time.h +generic-y += xor.h diff --git a/trunk/arch/m68k/include/asm/MC68332.h b/trunk/arch/m68k/include/asm/MC68332.h deleted file mode 100644 index 6bb8f02685a2..000000000000 --- a/trunk/arch/m68k/include/asm/MC68332.h +++ /dev/null @@ -1,152 +0,0 @@ - -/* include/asm-m68knommu/MC68332.h: '332 control registers - * - * Copyright (C) 1998 Kenneth Albanowski , - * - */ - -#ifndef _MC68332_H_ -#define _MC68332_H_ - -#define BYTE_REF(addr) (*((volatile unsigned char*)addr)) -#define WORD_REF(addr) (*((volatile unsigned short*)addr)) - -#define PORTE_ADDR 0xfffa11 -#define PORTE BYTE_REF(PORTE_ADDR) -#define DDRE_ADDR 0xfffa15 -#define DDRE BYTE_REF(DDRE_ADDR) -#define PEPAR_ADDR 0xfffa17 -#define PEPAR BYTE_REF(PEPAR_ADDR) - -#define PORTF_ADDR 0xfffa19 -#define PORTF BYTE_REF(PORTF_ADDR) -#define DDRF_ADDR 0xfffa1d -#define DDRF BYTE_REF(DDRF_ADDR) -#define PFPAR_ADDR 0xfffa1f -#define PFPAR BYTE_REF(PFPAR_ADDR) - -#define PORTQS_ADDR 0xfffc15 -#define PORTQS BYTE_REF(PORTQS_ADDR) -#define DDRQS_ADDR 0xfffc17 -#define DDRQS BYTE_REF(DDRQS_ADDR) -#define PQSPAR_ADDR 0xfffc16 -#define PQSPAR BYTE_REF(PQSPAR_ADDR) - -#define CSPAR0_ADDR 0xFFFA44 -#define CSPAR0 WORD_REF(CSPAR0_ADDR) -#define CSPAR1_ADDR 0xFFFA46 -#define CSPAR1 WORD_REF(CSPAR1_ADDR) -#define CSARBT_ADDR 0xFFFA48 -#define CSARBT WORD_REF(CSARBT_ADDR) -#define CSOPBT_ADDR 0xFFFA4A -#define CSOPBT WORD_REF(CSOPBT_ADDR) -#define CSBAR0_ADDR 0xFFFA4C -#define CSBAR0 WORD_REF(CSBAR0_ADDR) -#define CSOR0_ADDR 0xFFFA4E -#define CSOR0 WORD_REF(CSOR0_ADDR) -#define CSBAR1_ADDR 0xFFFA50 -#define CSBAR1 WORD_REF(CSBAR1_ADDR) -#define CSOR1_ADDR 0xFFFA52 -#define CSOR1 WORD_REF(CSOR1_ADDR) -#define CSBAR2_ADDR 0xFFFA54 -#define CSBAR2 WORD_REF(CSBAR2_ADDR) -#define CSOR2_ADDR 0xFFFA56 -#define CSOR2 WORD_REF(CSOR2_ADDR) -#define CSBAR3_ADDR 0xFFFA58 -#define CSBAR3 WORD_REF(CSBAR3_ADDR) -#define CSOR3_ADDR 0xFFFA5A -#define CSOR3 WORD_REF(CSOR3_ADDR) -#define CSBAR4_ADDR 0xFFFA5C -#define CSBAR4 WORD_REF(CSBAR4_ADDR) -#define CSOR4_ADDR 0xFFFA5E -#define CSOR4 WORD_REF(CSOR4_ADDR) -#define CSBAR5_ADDR 0xFFFA60 -#define CSBAR5 WORD_REF(CSBAR5_ADDR) -#define CSOR5_ADDR 0xFFFA62 -#define CSOR5 WORD_REF(CSOR5_ADDR) -#define CSBAR6_ADDR 0xFFFA64 -#define CSBAR6 WORD_REF(CSBAR6_ADDR) -#define CSOR6_ADDR 0xFFFA66 -#define CSOR6 WORD_REF(CSOR6_ADDR) -#define CSBAR7_ADDR 0xFFFA68 -#define CSBAR7 WORD_REF(CSBAR7_ADDR) -#define CSOR7_ADDR 0xFFFA6A -#define CSOR7 WORD_REF(CSOR7_ADDR) -#define CSBAR8_ADDR 0xFFFA6C -#define CSBAR8 WORD_REF(CSBAR8_ADDR) -#define CSOR8_ADDR 0xFFFA6E -#define CSOR8 WORD_REF(CSOR8_ADDR) -#define CSBAR9_ADDR 0xFFFA70 -#define CSBAR9 WORD_REF(CSBAR9_ADDR) -#define CSOR9_ADDR 0xFFFA72 -#define CSOR9 WORD_REF(CSOR9_ADDR) -#define CSBAR10_ADDR 0xFFFA74 -#define CSBAR10 WORD_REF(CSBAR10_ADDR) -#define CSOR10_ADDR 0xFFFA76 -#define CSOR10 WORD_REF(CSOR10_ADDR) - -#define CSOR_MODE_ASYNC 0x0000 -#define CSOR_MODE_SYNC 0x8000 -#define CSOR_MODE_MASK 0x8000 -#define CSOR_BYTE_DISABLE 0x0000 -#define CSOR_BYTE_UPPER 0x4000 -#define CSOR_BYTE_LOWER 0x2000 -#define CSOR_BYTE_BOTH 0x6000 -#define CSOR_BYTE_MASK 0x6000 -#define CSOR_RW_RSVD 0x0000 -#define CSOR_RW_READ 0x0800 -#define CSOR_RW_WRITE 0x1000 -#define CSOR_RW_BOTH 0x1800 -#define CSOR_RW_MASK 0x1800 -#define CSOR_STROBE_DS 0x0400 -#define CSOR_STROBE_AS 0x0000 -#define CSOR_STROBE_MASK 0x0400 -#define CSOR_DSACK_WAIT(x) (wait << 6) -#define CSOR_DSACK_FTERM (14 << 6) -#define CSOR_DSACK_EXTERNAL (15 << 6) -#define CSOR_DSACK_MASK 0x03c0 -#define CSOR_SPACE_CPU 0x0000 -#define CSOR_SPACE_USER 0x0010 -#define CSOR_SPACE_SU 0x0020 -#define CSOR_SPACE_BOTH 0x0030 -#define CSOR_SPACE_MASK 0x0030 -#define CSOR_IPL_ALL 0x0000 -#define CSOR_IPL_PRIORITY(x) (x << 1) -#define CSOR_IPL_MASK 0x000e -#define CSOR_AVEC_ON 0x0001 -#define CSOR_AVEC_OFF 0x0000 -#define CSOR_AVEC_MASK 0x0001 - -#define CSBAR_ADDR(x) ((addr >> 11) << 3) -#define CSBAR_ADDR_MASK 0xfff8 -#define CSBAR_BLKSIZE_2K 0x0000 -#define CSBAR_BLKSIZE_8K 0x0001 -#define CSBAR_BLKSIZE_16K 0x0002 -#define CSBAR_BLKSIZE_64K 0x0003 -#define CSBAR_BLKSIZE_128K 0x0004 -#define CSBAR_BLKSIZE_256K 0x0005 -#define CSBAR_BLKSIZE_512K 0x0006 -#define CSBAR_BLKSIZE_1M 0x0007 -#define CSBAR_BLKSIZE_MASK 0x0007 - -#define CSPAR_DISC 0 -#define CSPAR_ALT 1 -#define CSPAR_CS8 2 -#define CSPAR_CS16 3 -#define CSPAR_MASK 3 - -#define CSPAR0_CSBOOT(x) (x << 0) -#define CSPAR0_CS0(x) (x << 2) -#define CSPAR0_CS1(x) (x << 4) -#define CSPAR0_CS2(x) (x << 6) -#define CSPAR0_CS3(x) (x << 8) -#define CSPAR0_CS4(x) (x << 10) -#define CSPAR0_CS5(x) (x << 12) - -#define CSPAR1_CS6(x) (x << 0) -#define CSPAR1_CS7(x) (x << 2) -#define CSPAR1_CS8(x) (x << 4) -#define CSPAR1_CS9(x) (x << 6) -#define CSPAR1_CS10(x) (x << 8) - -#endif diff --git a/trunk/arch/m68k/include/asm/apollodma.h b/trunk/arch/m68k/include/asm/apollodma.h deleted file mode 100644 index 954adc851adb..000000000000 --- a/trunk/arch/m68k/include/asm/apollodma.h +++ /dev/null @@ -1,248 +0,0 @@ -/* - * linux/include/asm/dma.h: Defines for using and allocating dma channels. - * Written by Hennus Bergman, 1992. - * High DMA channel support & info by Hannu Savolainen - * and John Boyd, Nov. 1992. - */ - -#ifndef _ASM_APOLLO_DMA_H -#define _ASM_APOLLO_DMA_H - -#include /* need byte IO */ -#include /* And spinlocks */ -#include - - -#define dma_outb(val,addr) (*((volatile unsigned char *)(addr+IO_BASE)) = (val)) -#define dma_inb(addr) (*((volatile unsigned char *)(addr+IO_BASE))) - -/* - * NOTES about DMA transfers: - * - * controller 1: channels 0-3, byte operations, ports 00-1F - * controller 2: channels 4-7, word operations, ports C0-DF - * - * - ALL registers are 8 bits only, regardless of transfer size - * - channel 4 is not used - cascades 1 into 2. - * - channels 0-3 are byte - addresses/counts are for physical bytes - * - channels 5-7 are word - addresses/counts are for physical words - * - transfers must not cross physical 64K (0-3) or 128K (5-7) boundaries - * - transfer count loaded to registers is 1 less than actual count - * - controller 2 offsets are all even (2x offsets for controller 1) - * - page registers for 5-7 don't use data bit 0, represent 128K pages - * - page registers for 0-3 use bit 0, represent 64K pages - * - * DMA transfers are limited to the lower 16MB of _physical_ memory. - * Note that addresses loaded into registers must be _physical_ addresses, - * not logical addresses (which may differ if paging is active). - * - * Address mapping for channels 0-3: - * - * A23 ... A16 A15 ... A8 A7 ... A0 (Physical addresses) - * | ... | | ... | | ... | - * | ... | | ... | | ... | - * | ... | | ... | | ... | - * P7 ... P0 A7 ... A0 A7 ... A0 - * | Page | Addr MSB | Addr LSB | (DMA registers) - * - * Address mapping for channels 5-7: - * - * A23 ... A17 A16 A15 ... A9 A8 A7 ... A1 A0 (Physical addresses) - * | ... | \ \ ... \ \ \ ... \ \ - * | ... | \ \ ... \ \ \ ... \ (not used) - * | ... | \ \ ... \ \ \ ... \ - * P7 ... P1 (0) A7 A6 ... A0 A7 A6 ... A0 - * | Page | Addr MSB | Addr LSB | (DMA registers) - * - * Again, channels 5-7 transfer _physical_ words (16 bits), so addresses - * and counts _must_ be word-aligned (the lowest address bit is _ignored_ at - * the hardware level, so odd-byte transfers aren't possible). - * - * Transfer count (_not # bytes_) is limited to 64K, represented as actual - * count - 1 : 64K => 0xFFFF, 1 => 0x0000. Thus, count is always 1 or more, - * and up to 128K bytes may be transferred on channels 5-7 in one operation. - * - */ - -#define MAX_DMA_CHANNELS 8 - -/* The maximum address that we can perform a DMA transfer to on this platform */#define MAX_DMA_ADDRESS (PAGE_OFFSET+0x1000000) - -/* 8237 DMA controllers */ -#define IO_DMA1_BASE 0x10C00 /* 8 bit slave DMA, channels 0..3 */ -#define IO_DMA2_BASE 0x10D00 /* 16 bit master DMA, ch 4(=slave input)..7 */ - -/* DMA controller registers */ -#define DMA1_CMD_REG (IO_DMA1_BASE+0x08) /* command register (w) */ -#define DMA1_STAT_REG (IO_DMA1_BASE+0x08) /* status register (r) */ -#define DMA1_REQ_REG (IO_DMA1_BASE+0x09) /* request register (w) */ -#define DMA1_MASK_REG (IO_DMA1_BASE+0x0A) /* single-channel mask (w) */ -#define DMA1_MODE_REG (IO_DMA1_BASE+0x0B) /* mode register (w) */ -#define DMA1_CLEAR_FF_REG (IO_DMA1_BASE+0x0C) /* clear pointer flip-flop (w) */ -#define DMA1_TEMP_REG (IO_DMA1_BASE+0x0D) /* Temporary Register (r) */ -#define DMA1_RESET_REG (IO_DMA1_BASE+0x0D) /* Master Clear (w) */ -#define DMA1_CLR_MASK_REG (IO_DMA1_BASE+0x0E) /* Clear Mask */ -#define DMA1_MASK_ALL_REG (IO_DMA1_BASE+0x0F) /* all-channels mask (w) */ - -#define DMA2_CMD_REG (IO_DMA2_BASE+0x10) /* command register (w) */ -#define DMA2_STAT_REG (IO_DMA2_BASE+0x10) /* status register (r) */ -#define DMA2_REQ_REG (IO_DMA2_BASE+0x12) /* request register (w) */ -#define DMA2_MASK_REG (IO_DMA2_BASE+0x14) /* single-channel mask (w) */ -#define DMA2_MODE_REG (IO_DMA2_BASE+0x16) /* mode register (w) */ -#define DMA2_CLEAR_FF_REG (IO_DMA2_BASE+0x18) /* clear pointer flip-flop (w) */ -#define DMA2_TEMP_REG (IO_DMA2_BASE+0x1A) /* Temporary Register (r) */ -#define DMA2_RESET_REG (IO_DMA2_BASE+0x1A) /* Master Clear (w) */ -#define DMA2_CLR_MASK_REG (IO_DMA2_BASE+0x1C) /* Clear Mask */ -#define DMA2_MASK_ALL_REG (IO_DMA2_BASE+0x1E) /* all-channels mask (w) */ - -#define DMA_ADDR_0 (IO_DMA1_BASE+0x00) /* DMA address registers */ -#define DMA_ADDR_1 (IO_DMA1_BASE+0x02) -#define DMA_ADDR_2 (IO_DMA1_BASE+0x04) -#define DMA_ADDR_3 (IO_DMA1_BASE+0x06) -#define DMA_ADDR_4 (IO_DMA2_BASE+0x00) -#define DMA_ADDR_5 (IO_DMA2_BASE+0x04) -#define DMA_ADDR_6 (IO_DMA2_BASE+0x08) -#define DMA_ADDR_7 (IO_DMA2_BASE+0x0C) - -#define DMA_CNT_0 (IO_DMA1_BASE+0x01) /* DMA count registers */ -#define DMA_CNT_1 (IO_DMA1_BASE+0x03) -#define DMA_CNT_2 (IO_DMA1_BASE+0x05) -#define DMA_CNT_3 (IO_DMA1_BASE+0x07) -#define DMA_CNT_4 (IO_DMA2_BASE+0x02) -#define DMA_CNT_5 (IO_DMA2_BASE+0x06) -#define DMA_CNT_6 (IO_DMA2_BASE+0x0A) -#define DMA_CNT_7 (IO_DMA2_BASE+0x0E) - -#define DMA_MODE_READ 0x44 /* I/O to memory, no autoinit, increment, single mode */ -#define DMA_MODE_WRITE 0x48 /* memory to I/O, no autoinit, increment, single mode */ -#define DMA_MODE_CASCADE 0xC0 /* pass thru DREQ->HRQ, DACK<-HLDA only */ - -#define DMA_AUTOINIT 0x10 - -#define DMA_8BIT 0 -#define DMA_16BIT 1 -#define DMA_BUSMASTER 2 - -extern spinlock_t dma_spin_lock; - -static __inline__ unsigned long claim_dma_lock(void) -{ - unsigned long flags; - spin_lock_irqsave(&dma_spin_lock, flags); - return flags; -} - -static __inline__ void release_dma_lock(unsigned long flags) -{ - spin_unlock_irqrestore(&dma_spin_lock, flags); -} - -/* enable/disable a specific DMA channel */ -static __inline__ void enable_dma(unsigned int dmanr) -{ - if (dmanr<=3) - dma_outb(dmanr, DMA1_MASK_REG); - else - dma_outb(dmanr & 3, DMA2_MASK_REG); -} - -static __inline__ void disable_dma(unsigned int dmanr) -{ - if (dmanr<=3) - dma_outb(dmanr | 4, DMA1_MASK_REG); - else - dma_outb((dmanr & 3) | 4, DMA2_MASK_REG); -} - -/* Clear the 'DMA Pointer Flip Flop'. - * Write 0 for LSB/MSB, 1 for MSB/LSB access. - * Use this once to initialize the FF to a known state. - * After that, keep track of it. :-) - * --- In order to do that, the DMA routines below should --- - * --- only be used while holding the DMA lock ! --- - */ -static __inline__ void clear_dma_ff(unsigned int dmanr) -{ - if (dmanr<=3) - dma_outb(0, DMA1_CLEAR_FF_REG); - else - dma_outb(0, DMA2_CLEAR_FF_REG); -} - -/* set mode (above) for a specific DMA channel */ -static __inline__ void set_dma_mode(unsigned int dmanr, char mode) -{ - if (dmanr<=3) - dma_outb(mode | dmanr, DMA1_MODE_REG); - else - dma_outb(mode | (dmanr&3), DMA2_MODE_REG); -} - -/* Set transfer address & page bits for specific DMA channel. - * Assumes dma flipflop is clear. - */ -static __inline__ void set_dma_addr(unsigned int dmanr, unsigned int a) -{ - if (dmanr <= 3) { - dma_outb( a & 0xff, ((dmanr&3)<<1) + IO_DMA1_BASE ); - dma_outb( (a>>8) & 0xff, ((dmanr&3)<<1) + IO_DMA1_BASE ); - } else { - dma_outb( (a>>1) & 0xff, ((dmanr&3)<<2) + IO_DMA2_BASE ); - dma_outb( (a>>9) & 0xff, ((dmanr&3)<<2) + IO_DMA2_BASE ); - } -} - - -/* Set transfer size (max 64k for DMA1..3, 128k for DMA5..7) for - * a specific DMA channel. - * You must ensure the parameters are valid. - * NOTE: from a manual: "the number of transfers is one more - * than the initial word count"! This is taken into account. - * Assumes dma flip-flop is clear. - * NOTE 2: "count" represents _bytes_ and must be even for channels 5-7. - */ -static __inline__ void set_dma_count(unsigned int dmanr, unsigned int count) -{ - count--; - if (dmanr <= 3) { - dma_outb( count & 0xff, ((dmanr&3)<<1) + 1 + IO_DMA1_BASE ); - dma_outb( (count>>8) & 0xff, ((dmanr&3)<<1) + 1 + IO_DMA1_BASE ); - } else { - dma_outb( (count>>1) & 0xff, ((dmanr&3)<<2) + 2 + IO_DMA2_BASE ); - dma_outb( (count>>9) & 0xff, ((dmanr&3)<<2) + 2 + IO_DMA2_BASE ); - } -} - - -/* Get DMA residue count. After a DMA transfer, this - * should return zero. Reading this while a DMA transfer is - * still in progress will return unpredictable results. - * If called before the channel has been used, it may return 1. - * Otherwise, it returns the number of _bytes_ left to transfer. - * - * Assumes DMA flip-flop is clear. - */ -static __inline__ int get_dma_residue(unsigned int dmanr) -{ - unsigned int io_port = (dmanr<=3)? ((dmanr&3)<<1) + 1 + IO_DMA1_BASE - : ((dmanr&3)<<2) + 2 + IO_DMA2_BASE; - - /* using short to get 16-bit wrap around */ - unsigned short count; - - count = 1 + dma_inb(io_port); - count += dma_inb(io_port) << 8; - - return (dmanr<=3)? count : (count<<1); -} - - -/* These are in kernel/dma.c: */ -extern int request_dma(unsigned int dmanr, const char * device_id); /* reserve a DMA channel */ -extern void free_dma(unsigned int dmanr); /* release it again */ - -/* These are in arch/m68k/apollo/dma.c: */ -extern unsigned short dma_map_page(unsigned long phys_addr,int count,int type); -extern void dma_unmap_page(unsigned short dma_addr); - -#endif /* _ASM_APOLLO_DMA_H */ diff --git a/trunk/arch/m68k/include/asm/apollohw.h b/trunk/arch/m68k/include/asm/apollohw.h index a1373b9aa281..635ef4f89010 100644 --- a/trunk/arch/m68k/include/asm/apollohw.h +++ b/trunk/arch/m68k/include/asm/apollohw.h @@ -98,7 +98,7 @@ extern u_long timer_physaddr; #define cpuctrl (*(volatile unsigned int *)(IO_BASE + cpuctrl_physaddr)) #define pica (IO_BASE + pica_physaddr) #define picb (IO_BASE + picb_physaddr) -#define timer (IO_BASE + timer_physaddr) +#define apollo_timer (IO_BASE + timer_physaddr) #define addr_xlat_map ((unsigned short *)(IO_BASE + 0x17000)) #define isaIO2mem(x) (((((x) & 0x3f8) << 7) | (((x) & 0xfc00) >> 6) | ((x) & 0x7)) + 0x40000 + IO_BASE) diff --git a/trunk/arch/m68k/include/asm/bitsperlong.h b/trunk/arch/m68k/include/asm/bitsperlong.h deleted file mode 100644 index 6dc0bb0c13b2..000000000000 --- a/trunk/arch/m68k/include/asm/bitsperlong.h +++ /dev/null @@ -1 +0,0 @@ -#include diff --git a/trunk/arch/m68k/include/asm/cputime.h b/trunk/arch/m68k/include/asm/cputime.h deleted file mode 100644 index c79c5e892305..000000000000 --- a/trunk/arch/m68k/include/asm/cputime.h +++ /dev/null @@ -1,6 +0,0 @@ -#ifndef __M68K_CPUTIME_H -#define __M68K_CPUTIME_H - -#include - -#endif /* __M68K_CPUTIME_H */ diff --git a/trunk/arch/m68k/include/asm/delay.h b/trunk/arch/m68k/include/asm/delay.h index 9c09becfd4c9..12d8fe4f1d30 100644 --- a/trunk/arch/m68k/include/asm/delay.h +++ b/trunk/arch/m68k/include/asm/delay.h @@ -43,7 +43,7 @@ static inline void __delay(unsigned long loops) extern void __bad_udelay(void); -#if defined(CONFIG_M68000) || defined(CONFIG_COLDFIRE) +#ifdef CONFIG_CPU_HAS_NO_MULDIV64 /* * The simpler m68k and ColdFire processors do not have a 32*32->64 * multiply instruction. So we need to handle them a little differently. diff --git a/trunk/arch/m68k/include/asm/device.h b/trunk/arch/m68k/include/asm/device.h deleted file mode 100644 index d8f9872b0e2d..000000000000 --- a/trunk/arch/m68k/include/asm/device.h +++ /dev/null @@ -1,7 +0,0 @@ -/* - * Arch specific extensions to struct device - * - * This file is released under the GPLv2 - */ -#include - diff --git a/trunk/arch/m68k/include/asm/emergency-restart.h b/trunk/arch/m68k/include/asm/emergency-restart.h deleted file mode 100644 index 108d8c48e42e..000000000000 --- a/trunk/arch/m68k/include/asm/emergency-restart.h +++ /dev/null @@ -1,6 +0,0 @@ -#ifndef _ASM_EMERGENCY_RESTART_H -#define _ASM_EMERGENCY_RESTART_H - -#include - -#endif /* _ASM_EMERGENCY_RESTART_H */ diff --git a/trunk/arch/m68k/include/asm/errno.h b/trunk/arch/m68k/include/asm/errno.h deleted file mode 100644 index 0d4e188d6ef6..000000000000 --- a/trunk/arch/m68k/include/asm/errno.h +++ /dev/null @@ -1,6 +0,0 @@ -#ifndef _M68K_ERRNO_H -#define _M68K_ERRNO_H - -#include - -#endif /* _M68K_ERRNO_H */ diff --git a/trunk/arch/m68k/include/asm/futex.h b/trunk/arch/m68k/include/asm/futex.h deleted file mode 100644 index 6a332a9f099c..000000000000 --- a/trunk/arch/m68k/include/asm/futex.h +++ /dev/null @@ -1,6 +0,0 @@ -#ifndef _ASM_FUTEX_H -#define _ASM_FUTEX_H - -#include - -#endif diff --git a/trunk/arch/m68k/include/asm/ioctl.h b/trunk/arch/m68k/include/asm/ioctl.h deleted file mode 100644 index b279fe06dfe5..000000000000 --- a/trunk/arch/m68k/include/asm/ioctl.h +++ /dev/null @@ -1 +0,0 @@ -#include diff --git a/trunk/arch/m68k/include/asm/ipcbuf.h b/trunk/arch/m68k/include/asm/ipcbuf.h deleted file mode 100644 index 84c7e51cb6d0..000000000000 --- a/trunk/arch/m68k/include/asm/ipcbuf.h +++ /dev/null @@ -1 +0,0 @@ -#include diff --git a/trunk/arch/m68k/include/asm/irq_regs.h b/trunk/arch/m68k/include/asm/irq_regs.h deleted file mode 100644 index 3dd9c0b70270..000000000000 --- a/trunk/arch/m68k/include/asm/irq_regs.h +++ /dev/null @@ -1 +0,0 @@ -#include diff --git a/trunk/arch/m68k/include/asm/kdebug.h b/trunk/arch/m68k/include/asm/kdebug.h deleted file mode 100644 index 6ece1b037665..000000000000 --- a/trunk/arch/m68k/include/asm/kdebug.h +++ /dev/null @@ -1 +0,0 @@ -#include diff --git a/trunk/arch/m68k/include/asm/kmap_types.h b/trunk/arch/m68k/include/asm/kmap_types.h deleted file mode 100644 index 3413cc1390ec..000000000000 --- a/trunk/arch/m68k/include/asm/kmap_types.h +++ /dev/null @@ -1,6 +0,0 @@ -#ifndef __ASM_M68K_KMAP_TYPES_H -#define __ASM_M68K_KMAP_TYPES_H - -#include - -#endif /* __ASM_M68K_KMAP_TYPES_H */ diff --git a/trunk/arch/m68k/include/asm/kvm_para.h b/trunk/arch/m68k/include/asm/kvm_para.h deleted file mode 100644 index 14fab8f0b957..000000000000 --- a/trunk/arch/m68k/include/asm/kvm_para.h +++ /dev/null @@ -1 +0,0 @@ -#include diff --git a/trunk/arch/m68k/include/asm/local.h b/trunk/arch/m68k/include/asm/local.h deleted file mode 100644 index 6c259263e1f0..000000000000 --- a/trunk/arch/m68k/include/asm/local.h +++ /dev/null @@ -1,6 +0,0 @@ -#ifndef _ASM_M68K_LOCAL_H -#define _ASM_M68K_LOCAL_H - -#include - -#endif /* _ASM_M68K_LOCAL_H */ diff --git a/trunk/arch/m68k/include/asm/local64.h b/trunk/arch/m68k/include/asm/local64.h deleted file mode 100644 index 36c93b5cc239..000000000000 --- a/trunk/arch/m68k/include/asm/local64.h +++ /dev/null @@ -1 +0,0 @@ -#include diff --git a/trunk/arch/m68k/include/asm/mac_mouse.h b/trunk/arch/m68k/include/asm/mac_mouse.h deleted file mode 100644 index 39a5c292eaee..000000000000 --- a/trunk/arch/m68k/include/asm/mac_mouse.h +++ /dev/null @@ -1,23 +0,0 @@ -#ifndef _ASM_MAC_MOUSE_H -#define _ASM_MAC_MOUSE_H - -/* - * linux/include/asm-m68k/mac_mouse.h - * header file for Macintosh ADB mouse driver - * 27-10-97 Michael Schmitz - * copied from: - * header file for Atari Mouse driver - * by Robert de Vries (robert@and.nl) on 19Jul93 - */ - -struct mouse_status { - char buttons; - short dx; - short dy; - int ready; - int active; - wait_queue_head_t wait; - struct fasync_struct *fasyncptr; -}; - -#endif diff --git a/trunk/arch/m68k/include/asm/mcfmbus.h b/trunk/arch/m68k/include/asm/mcfmbus.h deleted file mode 100644 index 319899c47a2c..000000000000 --- a/trunk/arch/m68k/include/asm/mcfmbus.h +++ /dev/null @@ -1,77 +0,0 @@ -/****************************************************************************/ - -/* - * mcfmbus.h -- Coldfire MBUS support defines. - * - * (C) Copyright 1999, Martin Floeer (mfloeer@axcent.de) - */ - -/****************************************************************************/ - - -#ifndef mcfmbus_h -#define mcfmbus_h - - -#define MCFMBUS_BASE 0x280 -#define MCFMBUS_IRQ_VECTOR 0x19 -#define MCFMBUS_IRQ 0x1 -#define MCFMBUS_CLK 0x3f -#define MCFMBUS_IRQ_LEVEL 0x07 /*IRQ Level 1*/ -#define MCFMBUS_ADDRESS 0x01 - - -/* -* Define the 5307 MBUS register set addresses -*/ - -#define MCFMBUS_MADR 0x00 -#define MCFMBUS_MFDR 0x04 -#define MCFMBUS_MBCR 0x08 -#define MCFMBUS_MBSR 0x0C -#define MCFMBUS_MBDR 0x10 - - -#define MCFMBUS_MADR_ADDR(a) (((a)&0x7F)<<0x01) /*Slave Address*/ - -#define MCFMBUS_MFDR_MBC(a) ((a)&0x3F) /*M-Bus Clock*/ - -/* -* Define bit flags in Control Register -*/ - -#define MCFMBUS_MBCR_MEN (0x80) /* M-Bus Enable */ -#define MCFMBUS_MBCR_MIEN (0x40) /* M-Bus Interrupt Enable */ -#define MCFMBUS_MBCR_MSTA (0x20) /* Master/Slave Mode Select Bit */ -#define MCFMBUS_MBCR_MTX (0x10) /* Transmit/Rcv Mode Select Bit */ -#define MCFMBUS_MBCR_TXAK (0x08) /* Transmit Acknowledge Enable */ -#define MCFMBUS_MBCR_RSTA (0x04) /* Repeat Start */ - -/* -* Define bit flags in Status Register -*/ - -#define MCFMBUS_MBSR_MCF (0x80) /* Data Transfer Complete */ -#define MCFMBUS_MBSR_MAAS (0x40) /* Addressed as a Slave */ -#define MCFMBUS_MBSR_MBB (0x20) /* Bus Busy */ -#define MCFMBUS_MBSR_MAL (0x10) /* Arbitration Lost */ -#define MCFMBUS_MBSR_SRW (0x04) /* Slave Transmit */ -#define MCFMBUS_MBSR_MIF (0x02) /* M-Bus Interrupt */ -#define MCFMBUS_MBSR_RXAK (0x01) /* No Acknowledge Received */ - -/* -* Define bit flags in DATA I/O Register -*/ - -#define MCFMBUS_MBDR_READ (0x01) /* 1=read 0=write MBUS */ - -#define MBUSIOCSCLOCK 1 -#define MBUSIOCGCLOCK 2 -#define MBUSIOCSADDR 3 -#define MBUSIOCGADDR 4 -#define MBUSIOCSSLADDR 5 -#define MBUSIOCGSLADDR 6 -#define MBUSIOCSSUBADDR 7 -#define MBUSIOCGSUBADDR 8 - -#endif diff --git a/trunk/arch/m68k/include/asm/mman.h b/trunk/arch/m68k/include/asm/mman.h deleted file mode 100644 index 8eebf89f5ab1..000000000000 --- a/trunk/arch/m68k/include/asm/mman.h +++ /dev/null @@ -1 +0,0 @@ -#include diff --git a/trunk/arch/m68k/include/asm/mutex.h b/trunk/arch/m68k/include/asm/mutex.h deleted file mode 100644 index 458c1f7fbc18..000000000000 --- a/trunk/arch/m68k/include/asm/mutex.h +++ /dev/null @@ -1,9 +0,0 @@ -/* - * Pull in the generic implementation for the mutex fastpath. - * - * TODO: implement optimized primitives instead, or leave the generic - * implementation in place, or pick the atomic_xchg() based generic - * implementation. (see asm-generic/mutex-xchg.h for details) - */ - -#include diff --git a/trunk/arch/m68k/include/asm/percpu.h b/trunk/arch/m68k/include/asm/percpu.h deleted file mode 100644 index 0859d048faf5..000000000000 --- a/trunk/arch/m68k/include/asm/percpu.h +++ /dev/null @@ -1,6 +0,0 @@ -#ifndef __ASM_M68K_PERCPU_H -#define __ASM_M68K_PERCPU_H - -#include - -#endif /* __ASM_M68K_PERCPU_H */ diff --git a/trunk/arch/m68k/include/asm/resource.h b/trunk/arch/m68k/include/asm/resource.h deleted file mode 100644 index e7d35019f337..000000000000 --- a/trunk/arch/m68k/include/asm/resource.h +++ /dev/null @@ -1,6 +0,0 @@ -#ifndef _M68K_RESOURCE_H -#define _M68K_RESOURCE_H - -#include - -#endif /* _M68K_RESOURCE_H */ diff --git a/trunk/arch/m68k/include/asm/sbus.h b/trunk/arch/m68k/include/asm/sbus.h deleted file mode 100644 index bfe3ba147f2e..000000000000 --- a/trunk/arch/m68k/include/asm/sbus.h +++ /dev/null @@ -1,45 +0,0 @@ -/* - * some sbus structures and macros to make usage of sbus drivers possible - */ - -#ifndef __M68K_SBUS_H -#define __M68K_SBUS_H - -struct sbus_dev { - struct { - unsigned int which_io; - unsigned int phys_addr; - } reg_addrs[1]; -}; - -/* sbus IO functions stolen from include/asm-sparc/io.h for the serial driver */ -/* No SBUS on the Sun3, kludge -- sam */ - -static inline void _sbus_writeb(unsigned char val, unsigned long addr) -{ - *(volatile unsigned char *)addr = val; -} - -static inline unsigned char _sbus_readb(unsigned long addr) -{ - return *(volatile unsigned char *)addr; -} - -static inline void _sbus_writel(unsigned long val, unsigned long addr) -{ - *(volatile unsigned long *)addr = val; - -} - -extern inline unsigned long _sbus_readl(unsigned long addr) -{ - return *(volatile unsigned long *)addr; -} - - -#define sbus_readb(a) _sbus_readb((unsigned long)a) -#define sbus_writeb(v, a) _sbus_writeb(v, (unsigned long)a) -#define sbus_readl(a) _sbus_readl((unsigned long)a) -#define sbus_writel(v, a) _sbus_writel(v, (unsigned long)a) - -#endif diff --git a/trunk/arch/m68k/include/asm/scatterlist.h b/trunk/arch/m68k/include/asm/scatterlist.h deleted file mode 100644 index 312505452a1e..000000000000 --- a/trunk/arch/m68k/include/asm/scatterlist.h +++ /dev/null @@ -1,6 +0,0 @@ -#ifndef _M68K_SCATTERLIST_H -#define _M68K_SCATTERLIST_H - -#include - -#endif /* !(_M68K_SCATTERLIST_H) */ diff --git a/trunk/arch/m68k/include/asm/sections.h b/trunk/arch/m68k/include/asm/sections.h deleted file mode 100644 index 5277e52715ec..000000000000 --- a/trunk/arch/m68k/include/asm/sections.h +++ /dev/null @@ -1,8 +0,0 @@ -#ifndef _ASM_M68K_SECTIONS_H -#define _ASM_M68K_SECTIONS_H - -#include - -extern char _sbss[], _ebss[]; - -#endif /* _ASM_M68K_SECTIONS_H */ diff --git a/trunk/arch/m68k/include/asm/shm.h b/trunk/arch/m68k/include/asm/shm.h deleted file mode 100644 index fa56ec84a126..000000000000 --- a/trunk/arch/m68k/include/asm/shm.h +++ /dev/null @@ -1,31 +0,0 @@ -#ifndef _M68K_SHM_H -#define _M68K_SHM_H - - -/* format of page table entries that correspond to shared memory pages - currently out in swap space (see also mm/swap.c): - bits 0-1 (PAGE_PRESENT) is = 0 - bits 8..2 (SWP_TYPE) are = SHM_SWP_TYPE - bits 31..9 are used like this: - bits 15..9 (SHM_ID) the id of the shared memory segment - bits 30..16 (SHM_IDX) the index of the page within the shared memory segment - (actually only bits 25..16 get used since SHMMAX is so low) - bit 31 (SHM_READ_ONLY) flag whether the page belongs to a read-only attach -*/ -/* on the m68k both bits 0 and 1 must be zero */ -/* format on the sun3 is similar, but bits 30, 31 are set to zero and all - others are reduced by 2. --m */ - -#ifndef CONFIG_SUN3 -#define SHM_ID_SHIFT 9 -#else -#define SHM_ID_SHIFT 7 -#endif -#define _SHM_ID_BITS 7 -#define SHM_ID_MASK ((1<<_SHM_ID_BITS)-1) - -#define SHM_IDX_SHIFT (SHM_ID_SHIFT+_SHM_ID_BITS) -#define _SHM_IDX_BITS 15 -#define SHM_IDX_MASK ((1<<_SHM_IDX_BITS)-1) - -#endif /* _M68K_SHM_H */ diff --git a/trunk/arch/m68k/include/asm/siginfo.h b/trunk/arch/m68k/include/asm/siginfo.h deleted file mode 100644 index 851d3d784b53..000000000000 --- a/trunk/arch/m68k/include/asm/siginfo.h +++ /dev/null @@ -1,6 +0,0 @@ -#ifndef _M68K_SIGINFO_H -#define _M68K_SIGINFO_H - -#include - -#endif diff --git a/trunk/arch/m68k/include/asm/statfs.h b/trunk/arch/m68k/include/asm/statfs.h deleted file mode 100644 index 08d93f14e061..000000000000 --- a/trunk/arch/m68k/include/asm/statfs.h +++ /dev/null @@ -1,6 +0,0 @@ -#ifndef _M68K_STATFS_H -#define _M68K_STATFS_H - -#include - -#endif /* _M68K_STATFS_H */ diff --git a/trunk/arch/m68k/include/asm/topology.h b/trunk/arch/m68k/include/asm/topology.h deleted file mode 100644 index ca173e9f26ff..000000000000 --- a/trunk/arch/m68k/include/asm/topology.h +++ /dev/null @@ -1,6 +0,0 @@ -#ifndef _ASM_M68K_TOPOLOGY_H -#define _ASM_M68K_TOPOLOGY_H - -#include - -#endif /* _ASM_M68K_TOPOLOGY_H */ diff --git a/trunk/arch/m68k/include/asm/types.h b/trunk/arch/m68k/include/asm/types.h deleted file mode 100644 index 89705adcbd52..000000000000 --- a/trunk/arch/m68k/include/asm/types.h +++ /dev/null @@ -1,22 +0,0 @@ -#ifndef _M68K_TYPES_H -#define _M68K_TYPES_H - -/* - * This file is never included by application software unless - * explicitly requested (e.g., via linux/types.h) in which case the - * application is Linux specific so (user-) name space pollution is - * not a major issue. However, for interoperability, libraries still - * need to be careful to avoid a name clashes. - */ -#include - -/* - * These aren't exported outside the kernel to avoid name space clashes - */ -#ifdef __KERNEL__ - -#define BITS_PER_LONG 32 - -#endif /* __KERNEL__ */ - -#endif /* _M68K_TYPES_H */ diff --git a/trunk/arch/m68k/include/asm/unaligned.h b/trunk/arch/m68k/include/asm/unaligned.h index f4043ae63db1..2b3ca0bf7a0d 100644 --- a/trunk/arch/m68k/include/asm/unaligned.h +++ b/trunk/arch/m68k/include/asm/unaligned.h @@ -2,7 +2,7 @@ #define _ASM_M68K_UNALIGNED_H -#if defined(CONFIG_COLDFIRE) || defined(CONFIG_M68000) +#ifdef CONFIG_CPU_HAS_NO_UNALIGNED #include #include #include @@ -12,7 +12,7 @@ #else /* - * The m68k can do unaligned accesses itself. + * The m68k can do unaligned accesses itself. */ #include #include diff --git a/trunk/arch/m68k/include/asm/xor.h b/trunk/arch/m68k/include/asm/xor.h deleted file mode 100644 index c82eb12a5b18..000000000000 --- a/trunk/arch/m68k/include/asm/xor.h +++ /dev/null @@ -1 +0,0 @@ -#include diff --git a/trunk/arch/m68k/kernel/process.c b/trunk/arch/m68k/kernel/process.c index c488e3cfab53..ac2892e49c7c 100644 --- a/trunk/arch/m68k/kernel/process.c +++ b/trunk/arch/m68k/kernel/process.c @@ -25,6 +25,7 @@ #include #include #include +#include #include #include @@ -75,8 +76,10 @@ void cpu_idle(void) { /* endless idle loop with no priority at all */ while (1) { + rcu_idle_enter(); while (!need_resched()) idle(); + rcu_idle_exit(); schedule_preempt_disabled(); } } diff --git a/trunk/arch/m68k/kernel/setup_no.c b/trunk/arch/m68k/kernel/setup_no.c index 7dc186b7a85f..71fb29938dba 100644 --- a/trunk/arch/m68k/kernel/setup_no.c +++ b/trunk/arch/m68k/kernel/setup_no.c @@ -218,13 +218,10 @@ void __init setup_arch(char **cmdline_p) printk(KERN_INFO "Motorola M5235EVB support (C)2005 Syn-tech Systems, Inc. (Jate Sujjavanich)\n"); #endif - pr_debug("KERNEL -> TEXT=0x%06x-0x%06x DATA=0x%06x-0x%06x " - "BSS=0x%06x-0x%06x\n", (int) &_stext, (int) &_etext, - (int) &_sdata, (int) &_edata, - (int) &_sbss, (int) &_ebss); - pr_debug("MEMORY -> ROMFS=0x%06x-0x%06x MEM=0x%06x-0x%06x\n ", - (int) &_ebss, (int) memory_start, - (int) memory_start, (int) memory_end); + pr_debug("KERNEL -> TEXT=0x%p-0x%p DATA=0x%p-0x%p BSS=0x%p-0x%p\n", + _stext, _etext, _sdata, _edata, __bss_start, __bss_stop); + pr_debug("MEMORY -> ROMFS=0x%p-0x%06lx MEM=0x%06lx-0x%06lx\n ", + __bss_stop, memory_start, memory_start, memory_end); /* Keep a copy of command line */ *cmdline_p = &command_line[0]; diff --git a/trunk/arch/m68k/kernel/sys_m68k.c b/trunk/arch/m68k/kernel/sys_m68k.c index 8623f8dc16f8..9a5932ec3689 100644 --- a/trunk/arch/m68k/kernel/sys_m68k.c +++ b/trunk/arch/m68k/kernel/sys_m68k.c @@ -479,9 +479,13 @@ sys_atomic_cmpxchg_32(unsigned long newval, int oldval, int d3, int d4, int d5, goto bad_access; } - mem_value = *mem; + /* + * No need to check for EFAULT; we know that the page is + * present and writable. + */ + __get_user(mem_value, mem); if (mem_value == oldval) - *mem = newval; + __put_user(newval, mem); pte_unmap_unlock(pte, ptl); up_read(&mm->mmap_sem); diff --git a/trunk/arch/m68k/kernel/vmlinux-nommu.lds b/trunk/arch/m68k/kernel/vmlinux-nommu.lds index 40e02d9c38b4..06a763f49fd3 100644 --- a/trunk/arch/m68k/kernel/vmlinux-nommu.lds +++ b/trunk/arch/m68k/kernel/vmlinux-nommu.lds @@ -78,9 +78,7 @@ SECTIONS { __init_end = .; } - _sbss = .; BSS_SECTION(0, 0, 0) - _ebss = .; _end = .; diff --git a/trunk/arch/m68k/kernel/vmlinux-std.lds b/trunk/arch/m68k/kernel/vmlinux-std.lds index 63407c836826..d0993594f558 100644 --- a/trunk/arch/m68k/kernel/vmlinux-std.lds +++ b/trunk/arch/m68k/kernel/vmlinux-std.lds @@ -31,9 +31,7 @@ SECTIONS RW_DATA_SECTION(16, PAGE_SIZE, THREAD_SIZE) - _sbss = .; BSS_SECTION(0, 0, 0) - _ebss = .; _edata = .; /* End of data section */ diff --git a/trunk/arch/m68k/kernel/vmlinux-sun3.lds b/trunk/arch/m68k/kernel/vmlinux-sun3.lds index ad0f46d64c0b..8080469ee6c1 100644 --- a/trunk/arch/m68k/kernel/vmlinux-sun3.lds +++ b/trunk/arch/m68k/kernel/vmlinux-sun3.lds @@ -44,9 +44,7 @@ __init_begin = .; . = ALIGN(PAGE_SIZE); __init_end = .; - _sbss = .; BSS_SECTION(0, 0, 0) - _ebss = .; _end = . ; diff --git a/trunk/arch/m68k/lib/muldi3.c b/trunk/arch/m68k/lib/muldi3.c index 79e928a525d0..ee5f0b1b5c5d 100644 --- a/trunk/arch/m68k/lib/muldi3.c +++ b/trunk/arch/m68k/lib/muldi3.c @@ -19,7 +19,7 @@ along with GNU CC; see the file COPYING. If not, write to the Free Software Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ -#if defined(CONFIG_M68000) || defined(CONFIG_COLDFIRE) +#ifdef CONFIG_CPU_HAS_NO_MULDIV64 #define SI_TYPE_SIZE 32 #define __BITS4 (SI_TYPE_SIZE / 4) diff --git a/trunk/arch/m68k/mm/init_mm.c b/trunk/arch/m68k/mm/init_mm.c index f77f258dce3a..282f9de68966 100644 --- a/trunk/arch/m68k/mm/init_mm.c +++ b/trunk/arch/m68k/mm/init_mm.c @@ -104,7 +104,7 @@ void __init print_memmap(void) MLK_ROUNDUP(__init_begin, __init_end), MLK_ROUNDUP(_stext, _etext), MLK_ROUNDUP(_sdata, _edata), - MLK_ROUNDUP(_sbss, _ebss)); + MLK_ROUNDUP(__bss_start, __bss_stop)); } void __init mem_init(void) diff --git a/trunk/arch/m68k/mm/init_no.c b/trunk/arch/m68k/mm/init_no.c index 345ec0d83e3d..688e3664aea0 100644 --- a/trunk/arch/m68k/mm/init_no.c +++ b/trunk/arch/m68k/mm/init_no.c @@ -91,7 +91,7 @@ void __init mem_init(void) totalram_pages = free_all_bootmem(); codek = (_etext - _stext) >> 10; - datak = (_ebss - _sdata) >> 10; + datak = (__bss_stop - _sdata) >> 10; initk = (__init_begin - __init_end) >> 10; tmp = nr_free_pages() << PAGE_SHIFT; diff --git a/trunk/arch/m68k/platform/68328/head-de2.S b/trunk/arch/m68k/platform/68328/head-de2.S index f632fdcb93e9..537d3245b539 100644 --- a/trunk/arch/m68k/platform/68328/head-de2.S +++ b/trunk/arch/m68k/platform/68328/head-de2.S @@ -60,8 +60,8 @@ _start: * Move ROM filesystem above bss :-) */ - moveal #_sbss, %a0 /* romfs at the start of bss */ - moveal #_ebss, %a1 /* Set up destination */ + moveal #__bss_start, %a0 /* romfs at the start of bss */ + moveal #__bss_stop, %a1 /* Set up destination */ movel %a0, %a2 /* Copy of bss start */ movel 8(%a0), %d1 /* Get size of ROMFS */ @@ -84,8 +84,8 @@ _start: * Initialize BSS segment to 0 */ - lea _sbss, %a0 - lea _ebss, %a1 + lea __bss_start, %a0 + lea __bss_stop, %a1 /* Copy 0 to %a0 until %a0 == %a1 */ 2: cmpal %a0, %a1 diff --git a/trunk/arch/m68k/platform/68328/head-pilot.S b/trunk/arch/m68k/platform/68328/head-pilot.S index 2ebfd6420818..45a9dad29e3d 100644 --- a/trunk/arch/m68k/platform/68328/head-pilot.S +++ b/trunk/arch/m68k/platform/68328/head-pilot.S @@ -110,7 +110,7 @@ L0: movel #CONFIG_VECTORBASE, %d7 addl #16, %d7 moveal %d7, %a0 - moveal #_ebss, %a1 + moveal #__bss_stop, %a1 lea %a1@(512), %a2 DBG_PUTC('C') @@ -138,8 +138,8 @@ LD1: DBG_PUTC('E') - moveal #_sbss, %a0 - moveal #_ebss, %a1 + moveal #__bss_start, %a0 + moveal #__bss_stop, %a1 /* Copy 0 to %a0 until %a0 == %a1 */ L1: @@ -150,7 +150,7 @@ L1: DBG_PUTC('F') /* Copy command line from end of bss to command line */ - moveal #_ebss, %a0 + moveal #__bss_stop, %a0 moveal #command_line, %a1 lea %a1@(512), %a2 @@ -165,7 +165,7 @@ L3: movel #_sdata, %d0 movel %d0, _rambase - movel #_ebss, %d0 + movel #__bss_stop, %d0 movel %d0, _ramstart movel %a4, %d0 diff --git a/trunk/arch/m68k/platform/68328/head-ram.S b/trunk/arch/m68k/platform/68328/head-ram.S index 7f1aeeacb219..5189ef926098 100644 --- a/trunk/arch/m68k/platform/68328/head-ram.S +++ b/trunk/arch/m68k/platform/68328/head-ram.S @@ -76,8 +76,8 @@ pclp3: beq pclp3 #endif /* DEBUG */ moveal #0x007ffff0, %ssp - moveal #_sbss, %a0 - moveal #_ebss, %a1 + moveal #__bss_start, %a0 + moveal #__bss_stop, %a1 /* Copy 0 to %a0 until %a0 >= %a1 */ L1: diff --git a/trunk/arch/m68k/platform/68328/head-rom.S b/trunk/arch/m68k/platform/68328/head-rom.S index a5ff96d0295f..3dff98ba2e97 100644 --- a/trunk/arch/m68k/platform/68328/head-rom.S +++ b/trunk/arch/m68k/platform/68328/head-rom.S @@ -59,8 +59,8 @@ _stext: movew #0x2700,%sr cmpal %a1, %a2 bhi 1b - moveal #_sbss, %a0 - moveal #_ebss, %a1 + moveal #__bss_start, %a0 + moveal #__bss_stop, %a1 /* Copy 0 to %a0 until %a0 == %a1 */ 1: @@ -70,7 +70,7 @@ _stext: movew #0x2700,%sr movel #_sdata, %d0 movel %d0, _rambase - movel #_ebss, %d0 + movel #__bss_stop, %d0 movel %d0, _ramstart movel #RAMEND-CONFIG_MEMORY_RESERVE*0x100000, %d0 movel %d0, _ramend diff --git a/trunk/arch/m68k/platform/68360/head-ram.S b/trunk/arch/m68k/platform/68360/head-ram.S index 8eb94fb6b971..acd213170d80 100644 --- a/trunk/arch/m68k/platform/68360/head-ram.S +++ b/trunk/arch/m68k/platform/68360/head-ram.S @@ -219,8 +219,8 @@ LD1: cmp.l #_edata, %a1 blt LD1 - moveal #_sbss, %a0 - moveal #_ebss, %a1 + moveal #__bss_start, %a0 + moveal #__bss_stop, %a1 /* Copy 0 to %a0 until %a0 == %a1 */ L1: @@ -234,7 +234,7 @@ load_quicc: store_ram_size: /* Set ram size information */ move.l #_sdata, _rambase - move.l #_ebss, _ramstart + move.l #__bss_stop, _ramstart move.l #RAMEND, %d0 sub.l #0x1000, %d0 /* Reserve 4K for stack space.*/ move.l %d0, _ramend /* Different from RAMEND.*/ diff --git a/trunk/arch/m68k/platform/68360/head-rom.S b/trunk/arch/m68k/platform/68360/head-rom.S index 97510e55b802..dfc756d99886 100644 --- a/trunk/arch/m68k/platform/68360/head-rom.S +++ b/trunk/arch/m68k/platform/68360/head-rom.S @@ -13,7 +13,7 @@ */ .global _stext -.global _sbss +.global __bss_start .global _start .global _rambase @@ -229,8 +229,8 @@ LD1: cmp.l #_edata, %a1 blt LD1 - moveal #_sbss, %a0 - moveal #_ebss, %a1 + moveal #__bss_start, %a0 + moveal #__bss_stop, %a1 /* Copy 0 to %a0 until %a0 == %a1 */ L1: @@ -244,7 +244,7 @@ load_quicc: store_ram_size: /* Set ram size information */ move.l #_sdata, _rambase - move.l #_ebss, _ramstart + move.l #__bss_stop, _ramstart move.l #RAMEND, %d0 sub.l #0x1000, %d0 /* Reserve 4K for stack space.*/ move.l %d0, _ramend /* Different from RAMEND.*/ diff --git a/trunk/arch/m68k/platform/coldfire/clk.c b/trunk/arch/m68k/platform/coldfire/clk.c index 75f9ee967ea7..9cd13b4ce42b 100644 --- a/trunk/arch/m68k/platform/coldfire/clk.c +++ b/trunk/arch/m68k/platform/coldfire/clk.c @@ -146,9 +146,3 @@ struct clk_ops clk_ops1 = { }; #endif /* MCFPM_PPMCR1 */ #endif /* MCFPM_PPMCR0 */ - -struct clk *devm_clk_get(struct device *dev, const char *id) -{ - return NULL; -} -EXPORT_SYMBOL(devm_clk_get); diff --git a/trunk/arch/m68k/platform/coldfire/head.S b/trunk/arch/m68k/platform/coldfire/head.S index 4e0c9eb3bd1f..b88f5716f357 100644 --- a/trunk/arch/m68k/platform/coldfire/head.S +++ b/trunk/arch/m68k/platform/coldfire/head.S @@ -230,8 +230,8 @@ _vstart: /* * Move ROM filesystem above bss :-) */ - lea _sbss,%a0 /* get start of bss */ - lea _ebss,%a1 /* set up destination */ + lea __bss_start,%a0 /* get start of bss */ + lea __bss_stop,%a1 /* set up destination */ movel %a0,%a2 /* copy of bss start */ movel 8(%a0),%d0 /* get size of ROMFS */ @@ -249,7 +249,7 @@ _copy_romfs: bne _copy_romfs #else /* CONFIG_ROMFS_FS */ - lea _ebss,%a1 + lea __bss_stop,%a1 movel %a1,_ramstart #endif /* CONFIG_ROMFS_FS */ @@ -257,8 +257,8 @@ _copy_romfs: /* * Zero out the bss region. */ - lea _sbss,%a0 /* get start of bss */ - lea _ebss,%a1 /* get end of bss */ + lea __bss_start,%a0 /* get start of bss */ + lea __bss_stop,%a1 /* get end of bss */ clrl %d0 /* set value */ _clear_bss: movel %d0,(%a0)+ /* clear each word */ diff --git a/trunk/arch/m68k/sun3/prom/init.c b/trunk/arch/m68k/sun3/prom/init.c index d8e6349336b4..eeba067d565f 100644 --- a/trunk/arch/m68k/sun3/prom/init.c +++ b/trunk/arch/m68k/sun3/prom/init.c @@ -22,57 +22,13 @@ int prom_root_node; struct linux_nodeops *prom_nodeops; /* You must call prom_init() before you attempt to use any of the - * routines in the prom library. It returns 0 on success, 1 on - * failure. It gets passed the pointer to the PROM vector. + * routines in the prom library. + * It gets passed the pointer to the PROM vector. */ -extern void prom_meminit(void); -extern void prom_ranges_init(void); - void __init prom_init(struct linux_romvec *rp) { romvec = rp; -#ifndef CONFIG_SUN3 - switch(romvec->pv_romvers) { - case 0: - prom_vers = PROM_V0; - break; - case 2: - prom_vers = PROM_V2; - break; - case 3: - prom_vers = PROM_V3; - break; - case 4: - prom_vers = PROM_P1275; - prom_printf("PROMLIB: Sun IEEE Prom not supported yet\n"); - prom_halt(); - break; - default: - prom_printf("PROMLIB: Bad PROM version %d\n", - romvec->pv_romvers); - prom_halt(); - break; - }; - - prom_rev = romvec->pv_plugin_revision; - prom_prev = romvec->pv_printrev; - prom_nodeops = romvec->pv_nodeops; - - prom_root_node = prom_getsibling(0); - if((prom_root_node == 0) || (prom_root_node == -1)) - prom_halt(); - - if((((unsigned long) prom_nodeops) == 0) || - (((unsigned long) prom_nodeops) == -1)) - prom_halt(); - - prom_meminit(); - - prom_ranges_init(); -#endif -// printk("PROMLIB: Sun Boot Prom Version %d Revision %d\n", -// romvec->pv_romvers, prom_rev); /* Initialization successful. */ return; diff --git a/trunk/arch/microblaze/include/asm/sections.h b/trunk/arch/microblaze/include/asm/sections.h index 4487e150b455..c07ed5d2a820 100644 --- a/trunk/arch/microblaze/include/asm/sections.h +++ b/trunk/arch/microblaze/include/asm/sections.h @@ -18,10 +18,6 @@ extern char _ssbss[], _esbss[]; extern unsigned long __ivt_start[], __ivt_end[]; extern char _etext[], _stext[]; -# ifdef CONFIG_MTD_UCLINUX -extern char *_ebss; -# endif - extern u32 _fdt_start[], _fdt_end[]; # endif /* !__ASSEMBLY__ */ diff --git a/trunk/arch/microblaze/kernel/microblaze_ksyms.c b/trunk/arch/microblaze/kernel/microblaze_ksyms.c index bb4907c828dc..2b25bcf05c00 100644 --- a/trunk/arch/microblaze/kernel/microblaze_ksyms.c +++ b/trunk/arch/microblaze/kernel/microblaze_ksyms.c @@ -21,9 +21,6 @@ #include #include -extern char *_ebss; -EXPORT_SYMBOL_GPL(_ebss); - #ifdef CONFIG_FUNCTION_TRACER extern void _mcount(void); EXPORT_SYMBOL(_mcount); diff --git a/trunk/arch/microblaze/kernel/setup.c b/trunk/arch/microblaze/kernel/setup.c index 16d8dfd9094b..4da971d4392f 100644 --- a/trunk/arch/microblaze/kernel/setup.c +++ b/trunk/arch/microblaze/kernel/setup.c @@ -121,7 +121,7 @@ void __init machine_early_init(const char *cmdline, unsigned int ram, /* Move ROMFS out of BSS before clearing it */ if (romfs_size > 0) { - memmove(&_ebss, (int *)romfs_base, romfs_size); + memmove(&__bss_stop, (int *)romfs_base, romfs_size); klimit += romfs_size; } #endif @@ -165,7 +165,7 @@ void __init machine_early_init(const char *cmdline, unsigned int ram, BUG_ON(romfs_size < 0); /* What else can we do? */ printk("Moved 0x%08x bytes from 0x%08x to 0x%08x\n", - romfs_size, romfs_base, (unsigned)&_ebss); + romfs_size, romfs_base, (unsigned)&__bss_stop); printk("New klimit: 0x%08x\n", (unsigned)klimit); #endif diff --git a/trunk/arch/microblaze/kernel/vmlinux.lds.S b/trunk/arch/microblaze/kernel/vmlinux.lds.S index 109e9d86ade4..936d01a689d7 100644 --- a/trunk/arch/microblaze/kernel/vmlinux.lds.S +++ b/trunk/arch/microblaze/kernel/vmlinux.lds.S @@ -131,7 +131,6 @@ SECTIONS { *(COMMON) . = ALIGN (4) ; __bss_stop = . ; - _ebss = . ; } . = ALIGN(PAGE_SIZE); _end = .; diff --git a/trunk/arch/mips/Kconfig b/trunk/arch/mips/Kconfig index 331d574df99c..faf65286574e 100644 --- a/trunk/arch/mips/Kconfig +++ b/trunk/arch/mips/Kconfig @@ -89,6 +89,7 @@ config ATH79 select CEVT_R4K select CSRC_R4K select DMA_NONCOHERENT + select HAVE_CLK select IRQ_CPU select MIPS_MACHINE select SYS_HAS_CPU_MIPS32_R2 diff --git a/trunk/arch/mips/alchemy/board-mtx1.c b/trunk/arch/mips/alchemy/board-mtx1.c index 99969484c475..a124c251c0c9 100644 --- a/trunk/arch/mips/alchemy/board-mtx1.c +++ b/trunk/arch/mips/alchemy/board-mtx1.c @@ -228,6 +228,8 @@ static int mtx1_pci_idsel(unsigned int devsel, int assert) * adapter on the mtx-1 "singleboard" variant. It triggers a custom * logic chip connected to EXT_IO3 (GPIO1) to suppress IDSEL signals. */ + udelay(1); + if (assert && devsel != 0) /* Suppress signal to Cardbus */ alchemy_gpio_set_value(1, 0); /* set EXT_IO3 OFF */ diff --git a/trunk/arch/mips/ath79/dev-usb.c b/trunk/arch/mips/ath79/dev-usb.c index 36e9570e7bc4..b2a2311ec85b 100644 --- a/trunk/arch/mips/ath79/dev-usb.c +++ b/trunk/arch/mips/ath79/dev-usb.c @@ -145,6 +145,8 @@ static void __init ar7240_usb_setup(void) ath79_ohci_resources[0].start = AR7240_OHCI_BASE; ath79_ohci_resources[0].end = AR7240_OHCI_BASE + AR7240_OHCI_SIZE - 1; + ath79_ohci_resources[1].start = ATH79_CPU_IRQ_USB; + ath79_ohci_resources[1].end = ATH79_CPU_IRQ_USB; platform_device_register(&ath79_ohci_device); } diff --git a/trunk/arch/mips/ath79/gpio.c b/trunk/arch/mips/ath79/gpio.c index 29054f211832..48fe762d2526 100644 --- a/trunk/arch/mips/ath79/gpio.c +++ b/trunk/arch/mips/ath79/gpio.c @@ -188,8 +188,10 @@ void __init ath79_gpio_init(void) if (soc_is_ar71xx()) ath79_gpio_count = AR71XX_GPIO_COUNT; - else if (soc_is_ar724x()) - ath79_gpio_count = AR724X_GPIO_COUNT; + else if (soc_is_ar7240()) + ath79_gpio_count = AR7240_GPIO_COUNT; + else if (soc_is_ar7241() || soc_is_ar7242()) + ath79_gpio_count = AR7241_GPIO_COUNT; else if (soc_is_ar913x()) ath79_gpio_count = AR913X_GPIO_COUNT; else if (soc_is_ar933x()) diff --git a/trunk/arch/mips/bcm63xx/dev-spi.c b/trunk/arch/mips/bcm63xx/dev-spi.c index e39f73048d4f..f1c9c3e2f678 100644 --- a/trunk/arch/mips/bcm63xx/dev-spi.c +++ b/trunk/arch/mips/bcm63xx/dev-spi.c @@ -106,11 +106,15 @@ int __init bcm63xx_spi_register(void) if (BCMCPU_IS_6338() || BCMCPU_IS_6348()) { spi_resources[0].end += BCM_6338_RSET_SPI_SIZE - 1; spi_pdata.fifo_size = SPI_6338_MSG_DATA_SIZE; + spi_pdata.msg_type_shift = SPI_6338_MSG_TYPE_SHIFT; + spi_pdata.msg_ctl_width = SPI_6338_MSG_CTL_WIDTH; } if (BCMCPU_IS_6358() || BCMCPU_IS_6368()) { spi_resources[0].end += BCM_6358_RSET_SPI_SIZE - 1; spi_pdata.fifo_size = SPI_6358_MSG_DATA_SIZE; + spi_pdata.msg_type_shift = SPI_6358_MSG_TYPE_SHIFT; + spi_pdata.msg_ctl_width = SPI_6358_MSG_CTL_WIDTH; } bcm63xx_spi_regs_init(); diff --git a/trunk/arch/mips/cavium-octeon/octeon-irq.c b/trunk/arch/mips/cavium-octeon/octeon-irq.c index 7fb1f222b8a5..274cd4fad30c 100644 --- a/trunk/arch/mips/cavium-octeon/octeon-irq.c +++ b/trunk/arch/mips/cavium-octeon/octeon-irq.c @@ -61,6 +61,12 @@ static void octeon_irq_set_ciu_mapping(int irq, int line, int bit, octeon_irq_ciu_to_irq[line][bit] = irq; } +static void octeon_irq_force_ciu_mapping(struct irq_domain *domain, + int irq, int line, int bit) +{ + irq_domain_associate(domain, irq, line << 6 | bit); +} + static int octeon_coreid_for_cpu(int cpu) { #ifdef CONFIG_SMP @@ -183,19 +189,9 @@ static void __init octeon_irq_init_core(void) mutex_init(&cd->core_irq_mutex); irq = OCTEON_IRQ_SW0 + i; - switch (irq) { - case OCTEON_IRQ_TIMER: - case OCTEON_IRQ_SW0: - case OCTEON_IRQ_SW1: - case OCTEON_IRQ_5: - case OCTEON_IRQ_PERF: - irq_set_chip_data(irq, cd); - irq_set_chip_and_handler(irq, &octeon_irq_chip_core, - handle_percpu_irq); - break; - default: - break; - } + irq_set_chip_data(irq, cd); + irq_set_chip_and_handler(irq, &octeon_irq_chip_core, + handle_percpu_irq); } } @@ -890,7 +886,6 @@ static int octeon_irq_gpio_xlat(struct irq_domain *d, unsigned int type; unsigned int pin; unsigned int trigger; - struct octeon_irq_gpio_domain_data *gpiod; if (d->of_node != node) return -EINVAL; @@ -925,8 +920,7 @@ static int octeon_irq_gpio_xlat(struct irq_domain *d, break; } *out_type = type; - gpiod = d->host_data; - *out_hwirq = gpiod->base_hwirq + pin; + *out_hwirq = pin; return 0; } @@ -996,19 +990,21 @@ static int octeon_irq_ciu_map(struct irq_domain *d, static int octeon_irq_gpio_map(struct irq_domain *d, unsigned int virq, irq_hw_number_t hw) { - unsigned int line = hw >> 6; - unsigned int bit = hw & 63; + struct octeon_irq_gpio_domain_data *gpiod = d->host_data; + unsigned int line, bit; if (!octeon_irq_virq_in_range(virq)) return -EINVAL; + hw += gpiod->base_hwirq; + line = hw >> 6; + bit = hw & 63; if (line > 1 || octeon_irq_ciu_to_irq[line][bit] != 0) return -EINVAL; octeon_irq_set_ciu_mapping(virq, line, bit, octeon_irq_gpio_chip, octeon_irq_handle_gpio); - return 0; } @@ -1149,6 +1145,7 @@ static void __init octeon_irq_init_ciu(void) struct irq_chip *chip_wd; struct device_node *gpio_node; struct device_node *ciu_node; + struct irq_domain *ciu_domain = NULL; octeon_irq_init_ciu_percpu(); octeon_irq_setup_secondary = octeon_irq_setup_secondary_ciu; @@ -1177,31 +1174,6 @@ static void __init octeon_irq_init_ciu(void) /* Mips internal */ octeon_irq_init_core(); - /* CIU_0 */ - for (i = 0; i < 16; i++) - octeon_irq_set_ciu_mapping(i + OCTEON_IRQ_WORKQ0, 0, i + 0, chip, handle_level_irq); - - octeon_irq_set_ciu_mapping(OCTEON_IRQ_MBOX0, 0, 32, chip_mbox, handle_percpu_irq); - octeon_irq_set_ciu_mapping(OCTEON_IRQ_MBOX1, 0, 33, chip_mbox, handle_percpu_irq); - - for (i = 0; i < 4; i++) - octeon_irq_set_ciu_mapping(i + OCTEON_IRQ_PCI_INT0, 0, i + 36, chip, handle_level_irq); - for (i = 0; i < 4; i++) - octeon_irq_set_ciu_mapping(i + OCTEON_IRQ_PCI_MSI0, 0, i + 40, chip, handle_level_irq); - - octeon_irq_set_ciu_mapping(OCTEON_IRQ_RML, 0, 46, chip, handle_level_irq); - for (i = 0; i < 4; i++) - octeon_irq_set_ciu_mapping(i + OCTEON_IRQ_TIMER0, 0, i + 52, chip, handle_edge_irq); - - octeon_irq_set_ciu_mapping(OCTEON_IRQ_USB0, 0, 56, chip, handle_level_irq); - octeon_irq_set_ciu_mapping(OCTEON_IRQ_BOOTDMA, 0, 63, chip, handle_level_irq); - - /* CIU_1 */ - for (i = 0; i < 16; i++) - octeon_irq_set_ciu_mapping(i + OCTEON_IRQ_WDOG0, 1, i + 0, chip_wd, handle_level_irq); - - octeon_irq_set_ciu_mapping(OCTEON_IRQ_USB1, 1, 17, chip, handle_level_irq); - gpio_node = of_find_compatible_node(NULL, NULL, "cavium,octeon-3860-gpio"); if (gpio_node) { struct octeon_irq_gpio_domain_data *gpiod; @@ -1219,10 +1191,35 @@ static void __init octeon_irq_init_ciu(void) ciu_node = of_find_compatible_node(NULL, NULL, "cavium,octeon-3860-ciu"); if (ciu_node) { - irq_domain_add_tree(ciu_node, &octeon_irq_domain_ciu_ops, NULL); + ciu_domain = irq_domain_add_tree(ciu_node, &octeon_irq_domain_ciu_ops, NULL); of_node_put(ciu_node); } else - pr_warn("Cannot find device node for cavium,octeon-3860-ciu.\n"); + panic("Cannot find device node for cavium,octeon-3860-ciu."); + + /* CIU_0 */ + for (i = 0; i < 16; i++) + octeon_irq_force_ciu_mapping(ciu_domain, i + OCTEON_IRQ_WORKQ0, 0, i + 0); + + octeon_irq_set_ciu_mapping(OCTEON_IRQ_MBOX0, 0, 32, chip_mbox, handle_percpu_irq); + octeon_irq_set_ciu_mapping(OCTEON_IRQ_MBOX1, 0, 33, chip_mbox, handle_percpu_irq); + + for (i = 0; i < 4; i++) + octeon_irq_force_ciu_mapping(ciu_domain, i + OCTEON_IRQ_PCI_INT0, 0, i + 36); + for (i = 0; i < 4; i++) + octeon_irq_force_ciu_mapping(ciu_domain, i + OCTEON_IRQ_PCI_MSI0, 0, i + 40); + + octeon_irq_force_ciu_mapping(ciu_domain, OCTEON_IRQ_RML, 0, 46); + for (i = 0; i < 4; i++) + octeon_irq_force_ciu_mapping(ciu_domain, i + OCTEON_IRQ_TIMER0, 0, i + 52); + + octeon_irq_force_ciu_mapping(ciu_domain, OCTEON_IRQ_USB0, 0, 56); + octeon_irq_force_ciu_mapping(ciu_domain, OCTEON_IRQ_BOOTDMA, 0, 63); + + /* CIU_1 */ + for (i = 0; i < 16; i++) + octeon_irq_set_ciu_mapping(i + OCTEON_IRQ_WDOG0, 1, i + 0, chip_wd, handle_level_irq); + + octeon_irq_force_ciu_mapping(ciu_domain, OCTEON_IRQ_USB1, 1, 17); /* Enable the CIU lines */ set_c0_status(STATUSF_IP3 | STATUSF_IP2); diff --git a/trunk/arch/mips/include/asm/mach-ath79/ar71xx_regs.h b/trunk/arch/mips/include/asm/mach-ath79/ar71xx_regs.h index 1caa78ad06d5..dde504477fac 100644 --- a/trunk/arch/mips/include/asm/mach-ath79/ar71xx_regs.h +++ b/trunk/arch/mips/include/asm/mach-ath79/ar71xx_regs.h @@ -393,7 +393,8 @@ #define AR71XX_GPIO_REG_FUNC 0x28 #define AR71XX_GPIO_COUNT 16 -#define AR724X_GPIO_COUNT 18 +#define AR7240_GPIO_COUNT 18 +#define AR7241_GPIO_COUNT 20 #define AR913X_GPIO_COUNT 22 #define AR933X_GPIO_COUNT 30 #define AR934X_GPIO_COUNT 23 diff --git a/trunk/arch/mips/include/asm/mach-ath79/cpu-feature-overrides.h b/trunk/arch/mips/include/asm/mach-ath79/cpu-feature-overrides.h index 4476fa03bf36..6ddae926bf79 100644 --- a/trunk/arch/mips/include/asm/mach-ath79/cpu-feature-overrides.h +++ b/trunk/arch/mips/include/asm/mach-ath79/cpu-feature-overrides.h @@ -42,7 +42,6 @@ #define cpu_has_mips64r1 0 #define cpu_has_mips64r2 0 -#define cpu_has_dsp 0 #define cpu_has_mipsmt 0 #define cpu_has_64bits 0 diff --git a/trunk/arch/mips/include/asm/mach-bcm63xx/bcm63xx_dev_spi.h b/trunk/arch/mips/include/asm/mach-bcm63xx/bcm63xx_dev_spi.h index 7d98dbe5d4b5..c9bae1362606 100644 --- a/trunk/arch/mips/include/asm/mach-bcm63xx/bcm63xx_dev_spi.h +++ b/trunk/arch/mips/include/asm/mach-bcm63xx/bcm63xx_dev_spi.h @@ -9,6 +9,8 @@ int __init bcm63xx_spi_register(void); struct bcm63xx_spi_pdata { unsigned int fifo_size; + unsigned int msg_type_shift; + unsigned int msg_ctl_width; int bus_num; int num_chipselect; u32 speed_hz; diff --git a/trunk/arch/mips/include/asm/mach-bcm63xx/bcm63xx_regs.h b/trunk/arch/mips/include/asm/mach-bcm63xx/bcm63xx_regs.h index 4ccc2a748aff..61f2a2a5099d 100644 --- a/trunk/arch/mips/include/asm/mach-bcm63xx/bcm63xx_regs.h +++ b/trunk/arch/mips/include/asm/mach-bcm63xx/bcm63xx_regs.h @@ -1054,7 +1054,8 @@ #define SPI_6338_FILL_BYTE 0x07 #define SPI_6338_MSG_TAIL 0x09 #define SPI_6338_RX_TAIL 0x0b -#define SPI_6338_MSG_CTL 0x40 +#define SPI_6338_MSG_CTL 0x40 /* 8-bits register */ +#define SPI_6338_MSG_CTL_WIDTH 8 #define SPI_6338_MSG_DATA 0x41 #define SPI_6338_MSG_DATA_SIZE 0x3f #define SPI_6338_RX_DATA 0x80 @@ -1070,7 +1071,8 @@ #define SPI_6348_FILL_BYTE 0x07 #define SPI_6348_MSG_TAIL 0x09 #define SPI_6348_RX_TAIL 0x0b -#define SPI_6348_MSG_CTL 0x40 +#define SPI_6348_MSG_CTL 0x40 /* 8-bits register */ +#define SPI_6348_MSG_CTL_WIDTH 8 #define SPI_6348_MSG_DATA 0x41 #define SPI_6348_MSG_DATA_SIZE 0x3f #define SPI_6348_RX_DATA 0x80 @@ -1078,6 +1080,7 @@ /* BCM 6358 SPI core */ #define SPI_6358_MSG_CTL 0x00 /* 16-bits register */ +#define SPI_6358_MSG_CTL_WIDTH 16 #define SPI_6358_MSG_DATA 0x02 #define SPI_6358_MSG_DATA_SIZE 0x21e #define SPI_6358_RX_DATA 0x400 @@ -1094,6 +1097,7 @@ /* BCM 6358 SPI core */ #define SPI_6368_MSG_CTL 0x00 /* 16-bits register */ +#define SPI_6368_MSG_CTL_WIDTH 16 #define SPI_6368_MSG_DATA 0x02 #define SPI_6368_MSG_DATA_SIZE 0x21e #define SPI_6368_RX_DATA 0x400 @@ -1115,7 +1119,10 @@ #define SPI_HD_W 0x01 #define SPI_HD_R 0x02 #define SPI_BYTE_CNT_SHIFT 0 -#define SPI_MSG_TYPE_SHIFT 14 +#define SPI_6338_MSG_TYPE_SHIFT 6 +#define SPI_6348_MSG_TYPE_SHIFT 6 +#define SPI_6358_MSG_TYPE_SHIFT 14 +#define SPI_6368_MSG_TYPE_SHIFT 14 /* Command */ #define SPI_CMD_NOOP 0x00 diff --git a/trunk/arch/mips/include/asm/mach-cavium-octeon/irq.h b/trunk/arch/mips/include/asm/mach-cavium-octeon/irq.h index 418992042f6f..c22a3078bf11 100644 --- a/trunk/arch/mips/include/asm/mach-cavium-octeon/irq.h +++ b/trunk/arch/mips/include/asm/mach-cavium-octeon/irq.h @@ -21,14 +21,10 @@ enum octeon_irq { OCTEON_IRQ_TIMER, /* sources in CIU_INTX_EN0 */ OCTEON_IRQ_WORKQ0, - OCTEON_IRQ_GPIO0 = OCTEON_IRQ_WORKQ0 + 16, - OCTEON_IRQ_WDOG0 = OCTEON_IRQ_GPIO0 + 16, + OCTEON_IRQ_WDOG0 = OCTEON_IRQ_WORKQ0 + 16, OCTEON_IRQ_WDOG15 = OCTEON_IRQ_WDOG0 + 15, OCTEON_IRQ_MBOX0 = OCTEON_IRQ_WDOG0 + 16, OCTEON_IRQ_MBOX1, - OCTEON_IRQ_UART0, - OCTEON_IRQ_UART1, - OCTEON_IRQ_UART2, OCTEON_IRQ_PCI_INT0, OCTEON_IRQ_PCI_INT1, OCTEON_IRQ_PCI_INT2, @@ -38,8 +34,6 @@ enum octeon_irq { OCTEON_IRQ_PCI_MSI2, OCTEON_IRQ_PCI_MSI3, - OCTEON_IRQ_TWSI, - OCTEON_IRQ_TWSI2, OCTEON_IRQ_RML, OCTEON_IRQ_TIMER0, OCTEON_IRQ_TIMER1, @@ -47,8 +41,6 @@ enum octeon_irq { OCTEON_IRQ_TIMER3, OCTEON_IRQ_USB0, OCTEON_IRQ_USB1, - OCTEON_IRQ_MII0, - OCTEON_IRQ_MII1, OCTEON_IRQ_BOOTDMA, #ifndef CONFIG_PCI_MSI OCTEON_IRQ_LAST = 127 diff --git a/trunk/arch/mips/include/asm/module.h b/trunk/arch/mips/include/asm/module.h index 7531ecd654d6..dca8bce8c7ab 100644 --- a/trunk/arch/mips/include/asm/module.h +++ b/trunk/arch/mips/include/asm/module.h @@ -10,6 +10,7 @@ struct mod_arch_specific { struct list_head dbe_list; const struct exception_table_entry *dbe_start; const struct exception_table_entry *dbe_end; + struct mips_hi16 *r_mips_hi16_list; }; typedef uint8_t Elf64_Byte; /* Type for a 8-bit quantity. */ diff --git a/trunk/arch/mips/include/asm/r4k-timer.h b/trunk/arch/mips/include/asm/r4k-timer.h index a37d12b3b61c..afe9e0e03fe9 100644 --- a/trunk/arch/mips/include/asm/r4k-timer.h +++ b/trunk/arch/mips/include/asm/r4k-timer.h @@ -12,16 +12,16 @@ #ifdef CONFIG_SYNC_R4K -extern void synchronise_count_master(void); -extern void synchronise_count_slave(void); +extern void synchronise_count_master(int cpu); +extern void synchronise_count_slave(int cpu); #else -static inline void synchronise_count_master(void) +static inline void synchronise_count_master(int cpu) { } -static inline void synchronise_count_slave(void) +static inline void synchronise_count_slave(int cpu) { } diff --git a/trunk/arch/mips/kernel/module.c b/trunk/arch/mips/kernel/module.c index a5066b1c3de3..4f8c3cba8c0c 100644 --- a/trunk/arch/mips/kernel/module.c +++ b/trunk/arch/mips/kernel/module.c @@ -39,8 +39,6 @@ struct mips_hi16 { Elf_Addr value; }; -static struct mips_hi16 *mips_hi16_list; - static LIST_HEAD(dbe_list); static DEFINE_SPINLOCK(dbe_lock); @@ -128,8 +126,8 @@ static int apply_r_mips_hi16_rel(struct module *me, u32 *location, Elf_Addr v) n->addr = (Elf_Addr *)location; n->value = v; - n->next = mips_hi16_list; - mips_hi16_list = n; + n->next = me->arch.r_mips_hi16_list; + me->arch.r_mips_hi16_list = n; return 0; } @@ -142,18 +140,28 @@ static int apply_r_mips_hi16_rela(struct module *me, u32 *location, Elf_Addr v) return 0; } +static void free_relocation_chain(struct mips_hi16 *l) +{ + struct mips_hi16 *next; + + while (l) { + next = l->next; + kfree(l); + l = next; + } +} + static int apply_r_mips_lo16_rel(struct module *me, u32 *location, Elf_Addr v) { unsigned long insnlo = *location; + struct mips_hi16 *l; Elf_Addr val, vallo; /* Sign extend the addend we extract from the lo insn. */ vallo = ((insnlo & 0xffff) ^ 0x8000) - 0x8000; - if (mips_hi16_list != NULL) { - struct mips_hi16 *l; - - l = mips_hi16_list; + if (me->arch.r_mips_hi16_list != NULL) { + l = me->arch.r_mips_hi16_list; while (l != NULL) { struct mips_hi16 *next; unsigned long insn; @@ -188,7 +196,7 @@ static int apply_r_mips_lo16_rel(struct module *me, u32 *location, Elf_Addr v) l = next; } - mips_hi16_list = NULL; + me->arch.r_mips_hi16_list = NULL; } /* @@ -201,6 +209,9 @@ static int apply_r_mips_lo16_rel(struct module *me, u32 *location, Elf_Addr v) return 0; out_danger: + free_relocation_chain(l); + me->arch.r_mips_hi16_list = NULL; + pr_err("module %s: dangerous R_MIPS_LO16 REL relocation\n", me->name); return -ENOEXEC; @@ -273,6 +284,7 @@ int apply_relocate(Elf_Shdr *sechdrs, const char *strtab, pr_debug("Applying relocate section %u to %u\n", relsec, sechdrs[relsec].sh_info); + me->arch.r_mips_hi16_list = NULL; for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) { /* This is where to make the change */ location = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr @@ -296,6 +308,19 @@ int apply_relocate(Elf_Shdr *sechdrs, const char *strtab, return res; } + /* + * Normally the hi16 list should be deallocated at this point. A + * malformed binary however could contain a series of R_MIPS_HI16 + * relocations not followed by a R_MIPS_LO16 relocation. In that + * case, free up the list and return an error. + */ + if (me->arch.r_mips_hi16_list) { + free_relocation_chain(me->arch.r_mips_hi16_list); + me->arch.r_mips_hi16_list = NULL; + + return -ENOEXEC; + } + return 0; } diff --git a/trunk/arch/mips/kernel/smp-cmp.c b/trunk/arch/mips/kernel/smp-cmp.c index e7e03ecf5495..afc379ca3753 100644 --- a/trunk/arch/mips/kernel/smp-cmp.c +++ b/trunk/arch/mips/kernel/smp-cmp.c @@ -102,7 +102,7 @@ static void cmp_init_secondary(void) c->vpe_id = (read_c0_tcbind() >> TCBIND_CURVPE_SHIFT) & TCBIND_CURVPE; #endif #ifdef CONFIG_MIPS_MT_SMTC - c->tc_id = (read_c0_tcbind() >> TCBIND_CURTC_SHIFT) & TCBIND_CURTC; + c->tc_id = (read_c0_tcbind() & TCBIND_CURTC) >> TCBIND_CURTC_SHIFT; #endif } diff --git a/trunk/arch/mips/kernel/smp.c b/trunk/arch/mips/kernel/smp.c index 31637d8c8738..9005bf9fb859 100644 --- a/trunk/arch/mips/kernel/smp.c +++ b/trunk/arch/mips/kernel/smp.c @@ -130,7 +130,7 @@ asmlinkage __cpuinit void start_secondary(void) cpu_set(cpu, cpu_callin_map); - synchronise_count_slave(); + synchronise_count_slave(cpu); /* * irq will be enabled in ->smp_finish(), enabling it too early @@ -173,7 +173,6 @@ void smp_send_stop(void) void __init smp_cpus_done(unsigned int max_cpus) { mp_ops->cpus_done(); - synchronise_count_master(); } /* called from main before smp_init() */ @@ -206,6 +205,7 @@ int __cpuinit __cpu_up(unsigned int cpu, struct task_struct *tidle) while (!cpu_isset(cpu, cpu_callin_map)) udelay(100); + synchronise_count_master(cpu); return 0; } diff --git a/trunk/arch/mips/kernel/sync-r4k.c b/trunk/arch/mips/kernel/sync-r4k.c index 842d55e411fd..7f1eca3858de 100644 --- a/trunk/arch/mips/kernel/sync-r4k.c +++ b/trunk/arch/mips/kernel/sync-r4k.c @@ -28,12 +28,11 @@ static atomic_t __cpuinitdata count_reference = ATOMIC_INIT(0); #define COUNTON 100 #define NR_LOOPS 5 -void __cpuinit synchronise_count_master(void) +void __cpuinit synchronise_count_master(int cpu) { int i; unsigned long flags; unsigned int initcount; - int nslaves; #ifdef CONFIG_MIPS_MT_SMTC /* @@ -43,8 +42,7 @@ void __cpuinit synchronise_count_master(void) return; #endif - printk(KERN_INFO "Synchronize counters across %u CPUs: ", - num_online_cpus()); + printk(KERN_INFO "Synchronize counters for CPU %u: ", cpu); local_irq_save(flags); @@ -52,7 +50,7 @@ void __cpuinit synchronise_count_master(void) * Notify the slaves that it's time to start */ atomic_set(&count_reference, read_c0_count()); - atomic_set(&count_start_flag, 1); + atomic_set(&count_start_flag, cpu); smp_wmb(); /* Count will be initialised to current timer for all CPU's */ @@ -69,10 +67,9 @@ void __cpuinit synchronise_count_master(void) * two CPUs. */ - nslaves = num_online_cpus()-1; for (i = 0; i < NR_LOOPS; i++) { - /* slaves loop on '!= ncpus' */ - while (atomic_read(&count_count_start) != nslaves) + /* slaves loop on '!= 2' */ + while (atomic_read(&count_count_start) != 1) mb(); atomic_set(&count_count_stop, 0); smp_wmb(); @@ -89,7 +86,7 @@ void __cpuinit synchronise_count_master(void) /* * Wait for all slaves to leave the synchronization point: */ - while (atomic_read(&count_count_stop) != nslaves) + while (atomic_read(&count_count_stop) != 1) mb(); atomic_set(&count_count_start, 0); smp_wmb(); @@ -97,6 +94,7 @@ void __cpuinit synchronise_count_master(void) } /* Arrange for an interrupt in a short while */ write_c0_compare(read_c0_count() + COUNTON); + atomic_set(&count_start_flag, 0); local_irq_restore(flags); @@ -108,11 +106,10 @@ void __cpuinit synchronise_count_master(void) printk("done.\n"); } -void __cpuinit synchronise_count_slave(void) +void __cpuinit synchronise_count_slave(int cpu) { int i; unsigned int initcount; - int ncpus; #ifdef CONFIG_MIPS_MT_SMTC /* @@ -127,16 +124,15 @@ void __cpuinit synchronise_count_slave(void) * so we first wait for the master to say everyone is ready */ - while (!atomic_read(&count_start_flag)) + while (atomic_read(&count_start_flag) != cpu) mb(); /* Count will be initialised to next expire for all CPU's */ initcount = atomic_read(&count_reference); - ncpus = num_online_cpus(); for (i = 0; i < NR_LOOPS; i++) { atomic_inc(&count_count_start); - while (atomic_read(&count_count_start) != ncpus) + while (atomic_read(&count_count_start) != 2) mb(); /* @@ -146,7 +142,7 @@ void __cpuinit synchronise_count_slave(void) write_c0_count(initcount); atomic_inc(&count_count_stop); - while (atomic_read(&count_count_stop) != ncpus) + while (atomic_read(&count_count_stop) != 2) mb(); } /* Arrange for an interrupt in a short while */ diff --git a/trunk/arch/mips/mm/gup.c b/trunk/arch/mips/mm/gup.c index 33aadbcf170b..dcfd573871c1 100644 --- a/trunk/arch/mips/mm/gup.c +++ b/trunk/arch/mips/mm/gup.c @@ -152,6 +152,8 @@ static int gup_huge_pud(pud_t pud, unsigned long addr, unsigned long end, do { VM_BUG_ON(compound_head(page) != head); pages[*nr] = page; + if (PageTail(page)) + get_huge_page_tail(page); (*nr)++; page++; refs++; diff --git a/trunk/arch/mips/mti-malta/malta-int.c b/trunk/arch/mips/mti-malta/malta-int.c index 7b13a4caeea4..fea823f18479 100644 --- a/trunk/arch/mips/mti-malta/malta-int.c +++ b/trunk/arch/mips/mti-malta/malta-int.c @@ -273,16 +273,19 @@ asmlinkage void plat_irq_dispatch(void) unsigned int pending = read_c0_cause() & read_c0_status() & ST0_IM; int irq; + if (unlikely(!pending)) { + spurious_interrupt(); + return; + } + irq = irq_ffs(pending); if (irq == MIPSCPU_INT_I8259A) malta_hw0_irqdispatch(); else if (gic_present && ((1 << irq) & ipi_map[smp_processor_id()])) malta_ipi_irqdispatch(); - else if (irq >= 0) - do_IRQ(MIPS_CPU_IRQ_BASE + irq); else - spurious_interrupt(); + do_IRQ(MIPS_CPU_IRQ_BASE + irq); } #ifdef CONFIG_MIPS_MT_SMP diff --git a/trunk/arch/mips/mti-malta/malta-pci.c b/trunk/arch/mips/mti-malta/malta-pci.c index 284dea54faf5..2147cb34e705 100644 --- a/trunk/arch/mips/mti-malta/malta-pci.c +++ b/trunk/arch/mips/mti-malta/malta-pci.c @@ -252,16 +252,3 @@ void __init mips_pcibios_init(void) register_pci_controller(controller); } - -/* Enable PCI 2.1 compatibility in PIIX4 */ -static void __devinit quirk_dlcsetup(struct pci_dev *dev) -{ - u8 odlc, ndlc; - (void) pci_read_config_byte(dev, 0x82, &odlc); - /* Enable passive releases and delayed transaction */ - ndlc = odlc | 7; - (void) pci_write_config_byte(dev, 0x82, ndlc); -} - -DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82371AB_0, - quirk_dlcsetup); diff --git a/trunk/arch/mips/mti-malta/malta-platform.c b/trunk/arch/mips/mti-malta/malta-platform.c index 4c35301720e7..80562b81f0f2 100644 --- a/trunk/arch/mips/mti-malta/malta-platform.c +++ b/trunk/arch/mips/mti-malta/malta-platform.c @@ -138,11 +138,6 @@ static int __init malta_add_devices(void) if (err) return err; - /* - * Set RTC to BCD mode to support current alarm code. - */ - CMOS_WRITE(CMOS_READ(RTC_CONTROL) & ~RTC_DM_BINARY, RTC_CONTROL); - return 0; } diff --git a/trunk/arch/mips/pci/pci-ar724x.c b/trunk/arch/mips/pci/pci-ar724x.c index 414a7459858d..86d77a666458 100644 --- a/trunk/arch/mips/pci/pci-ar724x.c +++ b/trunk/arch/mips/pci/pci-ar724x.c @@ -23,9 +23,12 @@ #define AR724X_PCI_MEM_BASE 0x10000000 #define AR724X_PCI_MEM_SIZE 0x08000000 +#define AR724X_PCI_REG_RESET 0x18 #define AR724X_PCI_REG_INT_STATUS 0x4c #define AR724X_PCI_REG_INT_MASK 0x50 +#define AR724X_PCI_RESET_LINK_UP BIT(0) + #define AR724X_PCI_INT_DEV0 BIT(14) #define AR724X_PCI_IRQ_COUNT 1 @@ -38,6 +41,15 @@ static void __iomem *ar724x_pci_ctrl_base; static u32 ar724x_pci_bar0_value; static bool ar724x_pci_bar0_is_cached; +static bool ar724x_pci_link_up; + +static inline bool ar724x_pci_check_link(void) +{ + u32 reset; + + reset = __raw_readl(ar724x_pci_ctrl_base + AR724X_PCI_REG_RESET); + return reset & AR724X_PCI_RESET_LINK_UP; +} static int ar724x_pci_read(struct pci_bus *bus, unsigned int devfn, int where, int size, uint32_t *value) @@ -46,6 +58,9 @@ static int ar724x_pci_read(struct pci_bus *bus, unsigned int devfn, int where, void __iomem *base; u32 data; + if (!ar724x_pci_link_up) + return PCIBIOS_DEVICE_NOT_FOUND; + if (devfn) return PCIBIOS_DEVICE_NOT_FOUND; @@ -96,6 +111,9 @@ static int ar724x_pci_write(struct pci_bus *bus, unsigned int devfn, int where, u32 data; int s; + if (!ar724x_pci_link_up) + return PCIBIOS_DEVICE_NOT_FOUND; + if (devfn) return PCIBIOS_DEVICE_NOT_FOUND; @@ -280,6 +298,10 @@ int __init ar724x_pcibios_init(int irq) if (ar724x_pci_ctrl_base == NULL) goto err_unmap_devcfg; + ar724x_pci_link_up = ar724x_pci_check_link(); + if (!ar724x_pci_link_up) + pr_warn("ar724x: PCIe link is down\n"); + ar724x_pci_irq_init(irq); register_pci_controller(&ar724x_pci_controller); diff --git a/trunk/arch/mn10300/kernel/process.c b/trunk/arch/mn10300/kernel/process.c index 7dab0cd36466..e9cceba193b6 100644 --- a/trunk/arch/mn10300/kernel/process.c +++ b/trunk/arch/mn10300/kernel/process.c @@ -25,6 +25,7 @@ #include #include #include +#include #include #include #include @@ -107,6 +108,7 @@ void cpu_idle(void) { /* endless idle loop with no priority at all */ for (;;) { + rcu_idle_enter(); while (!need_resched()) { void (*idle)(void); @@ -121,6 +123,7 @@ void cpu_idle(void) } idle(); } + rcu_idle_exit(); schedule_preempt_disabled(); } diff --git a/trunk/arch/parisc/include/asm/atomic.h b/trunk/arch/parisc/include/asm/atomic.h index 6c6defc24619..af9cf30ed474 100644 --- a/trunk/arch/parisc/include/asm/atomic.h +++ b/trunk/arch/parisc/include/asm/atomic.h @@ -141,7 +141,7 @@ static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u) #define atomic_sub_and_test(i,v) (atomic_sub_return((i),(v)) == 0) -#define ATOMIC_INIT(i) ((atomic_t) { (i) }) +#define ATOMIC_INIT(i) { (i) } #define smp_mb__before_atomic_dec() smp_mb() #define smp_mb__after_atomic_dec() smp_mb() @@ -150,7 +150,7 @@ static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u) #ifdef CONFIG_64BIT -#define ATOMIC64_INIT(i) ((atomic64_t) { (i) }) +#define ATOMIC64_INIT(i) { (i) } static __inline__ s64 __atomic64_add_return(s64 i, atomic64_t *v) diff --git a/trunk/arch/parisc/kernel/process.c b/trunk/arch/parisc/kernel/process.c index d4b94b395c16..8c6b6b6561f0 100644 --- a/trunk/arch/parisc/kernel/process.c +++ b/trunk/arch/parisc/kernel/process.c @@ -48,6 +48,7 @@ #include #include #include +#include #include #include @@ -69,8 +70,10 @@ void cpu_idle(void) /* endless idle loop with no priority at all */ while (1) { + rcu_idle_enter(); while (!need_resched()) barrier(); + rcu_idle_exit(); schedule_preempt_disabled(); check_pgt_cache(); } @@ -309,7 +312,7 @@ copy_thread(unsigned long clone_flags, unsigned long usp, cregs->ksp = (unsigned long)stack + (pregs->gr[21] & (THREAD_SIZE - 1)); cregs->gr[30] = usp; - if (p->personality == PER_HPUX) { + if (personality(p->personality) == PER_HPUX) { #ifdef CONFIG_HPUX cregs->kpc = (unsigned long) &hpux_child_return; #else diff --git a/trunk/arch/parisc/kernel/sys_parisc.c b/trunk/arch/parisc/kernel/sys_parisc.c index c9b932260f47..7426e40699bd 100644 --- a/trunk/arch/parisc/kernel/sys_parisc.c +++ b/trunk/arch/parisc/kernel/sys_parisc.c @@ -225,12 +225,12 @@ long parisc_personality(unsigned long personality) long err; if (personality(current->personality) == PER_LINUX32 - && personality == PER_LINUX) - personality = PER_LINUX32; + && personality(personality) == PER_LINUX) + personality = (personality & ~PER_MASK) | PER_LINUX32; err = sys_personality(personality); - if (err == PER_LINUX32) - err = PER_LINUX; + if (personality(err) == PER_LINUX32) + err = (err & ~PER_MASK) | PER_LINUX; return err; } diff --git a/trunk/arch/powerpc/boot/.gitignore b/trunk/arch/powerpc/boot/.gitignore index 1c1aadc8c48f..c32ae5ce9fff 100644 --- a/trunk/arch/powerpc/boot/.gitignore +++ b/trunk/arch/powerpc/boot/.gitignore @@ -1,10 +1,6 @@ addnote empty.c hack-coff -infblock.c -infblock.h -infcodes.c -infcodes.h inffast.c inffast.h inffixed.h diff --git a/trunk/arch/powerpc/boot/dts/fsl/p4080si-post.dtsi b/trunk/arch/powerpc/boot/dts/fsl/p4080si-post.dtsi index 8d35d2c1f694..4f9c9f682ecf 100644 --- a/trunk/arch/powerpc/boot/dts/fsl/p4080si-post.dtsi +++ b/trunk/arch/powerpc/boot/dts/fsl/p4080si-post.dtsi @@ -345,6 +345,13 @@ /include/ "qoriq-duart-1.dtsi" /include/ "qoriq-gpio-0.dtsi" /include/ "qoriq-usb2-mph-0.dtsi" + usb@210000 { + compatible = "fsl-usb2-mph-v1.6", "fsl,mpc85xx-usb2-mph", "fsl-usb2-mph"; + port0; + }; /include/ "qoriq-usb2-dr-0.dtsi" + usb@211000 { + compatible = "fsl-usb2-dr-v1.6", "fsl,mpc85xx-usb2-dr", "fsl-usb2-dr"; + }; /include/ "qoriq-sec4.0-0.dtsi" }; diff --git a/trunk/arch/powerpc/configs/85xx/p1023rds_defconfig b/trunk/arch/powerpc/configs/85xx/p1023rds_defconfig index f4337bacd0e7..26e541c4662b 100644 --- a/trunk/arch/powerpc/configs/85xx/p1023rds_defconfig +++ b/trunk/arch/powerpc/configs/85xx/p1023rds_defconfig @@ -6,28 +6,27 @@ CONFIG_SYSVIPC=y CONFIG_POSIX_MQUEUE=y CONFIG_BSD_PROCESS_ACCT=y CONFIG_AUDIT=y -CONFIG_SPARSE_IRQ=y +CONFIG_IRQ_DOMAIN_DEBUG=y +CONFIG_NO_HZ=y +CONFIG_HIGH_RES_TIMERS=y CONFIG_IKCONFIG=y CONFIG_IKCONFIG_PROC=y CONFIG_LOG_BUF_SHIFT=14 CONFIG_BLK_DEV_INITRD=y -# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set CONFIG_KALLSYMS_ALL=y -CONFIG_KALLSYMS_EXTRA_PASS=y CONFIG_EMBEDDED=y CONFIG_MODULES=y CONFIG_MODULE_UNLOAD=y CONFIG_MODULE_FORCE_UNLOAD=y CONFIG_MODVERSIONS=y # CONFIG_BLK_DEV_BSG is not set +CONFIG_PARTITION_ADVANCED=y +CONFIG_MAC_PARTITION=y CONFIG_P1023_RDS=y CONFIG_QUICC_ENGINE=y CONFIG_QE_GPIO=y CONFIG_CPM2=y -CONFIG_GPIO_MPC8XXX=y CONFIG_HIGHMEM=y -CONFIG_NO_HZ=y -CONFIG_HIGH_RES_TIMERS=y # CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set CONFIG_BINFMT_MISC=m CONFIG_MATH_EMULATION=y @@ -63,11 +62,11 @@ CONFIG_INET_ESP=y CONFIG_IPV6=y CONFIG_IP_SCTP=m CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" +CONFIG_DEVTMPFS=y CONFIG_PROC_DEVICETREE=y CONFIG_BLK_DEV_LOOP=y CONFIG_BLK_DEV_RAM=y CONFIG_BLK_DEV_RAM_SIZE=131072 -CONFIG_MISC_DEVICES=y CONFIG_EEPROM_LEGACY=y CONFIG_BLK_DEV_SD=y CONFIG_CHR_DEV_ST=y @@ -80,15 +79,14 @@ CONFIG_SATA_FSL=y CONFIG_SATA_SIL24=y CONFIG_NETDEVICES=y CONFIG_DUMMY=y +CONFIG_FS_ENET=y +CONFIG_FSL_PQ_MDIO=y +CONFIG_E1000E=y CONFIG_MARVELL_PHY=y CONFIG_DAVICOM_PHY=y CONFIG_CICADA_PHY=y CONFIG_VITESSE_PHY=y CONFIG_FIXED_PHY=y -CONFIG_NET_ETHERNET=y -CONFIG_FS_ENET=y -CONFIG_E1000E=y -CONFIG_FSL_PQ_MDIO=y CONFIG_INPUT_FF_MEMLESS=m # CONFIG_INPUT_MOUSEDEV is not set # CONFIG_INPUT_KEYBOARD is not set @@ -98,16 +96,15 @@ CONFIG_SERIAL_8250=y CONFIG_SERIAL_8250_CONSOLE=y CONFIG_SERIAL_8250_NR_UARTS=2 CONFIG_SERIAL_8250_RUNTIME_UARTS=2 -CONFIG_SERIAL_8250_EXTENDED=y CONFIG_SERIAL_8250_MANY_PORTS=y CONFIG_SERIAL_8250_DETECT_IRQ=y CONFIG_SERIAL_8250_RSA=y CONFIG_SERIAL_QE=m -CONFIG_HW_RANDOM=y CONFIG_NVRAM=y CONFIG_I2C=y CONFIG_I2C_CPM=m CONFIG_I2C_MPC=y +CONFIG_GPIO_MPC8XXX=y # CONFIG_HWMON is not set CONFIG_VIDEO_OUTPUT_CONTROL=y CONFIG_SOUND=y @@ -123,7 +120,6 @@ CONFIG_DMADEVICES=y CONFIG_FSL_DMA=y # CONFIG_NET_DMA is not set CONFIG_STAGING=y -# CONFIG_STAGING_EXCLUDE_BUILD is not set CONFIG_EXT2_FS=y CONFIG_EXT3_FS=y # CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set @@ -150,22 +146,15 @@ CONFIG_QNX4FS_FS=m CONFIG_SYSV_FS=m CONFIG_UFS_FS=m CONFIG_NFS_FS=y -CONFIG_NFS_V3=y CONFIG_NFS_V4=y CONFIG_ROOT_NFS=y CONFIG_NFSD=y -CONFIG_PARTITION_ADVANCED=y -CONFIG_MAC_PARTITION=y CONFIG_CRC_T10DIF=y CONFIG_FRAME_WARN=8092 CONFIG_DEBUG_FS=y -CONFIG_DEBUG_KERNEL=y CONFIG_DETECT_HUNG_TASK=y # CONFIG_DEBUG_BUGVERBOSE is not set CONFIG_DEBUG_INFO=y -# CONFIG_RCU_CPU_STALL_DETECTOR is not set -CONFIG_SYSCTL_SYSCALL_CHECK=y -CONFIG_IRQ_DOMAIN_DEBUG=y CONFIG_CRYPTO_PCBC=m CONFIG_CRYPTO_SHA256=y CONFIG_CRYPTO_SHA512=y diff --git a/trunk/arch/powerpc/configs/corenet32_smp_defconfig b/trunk/arch/powerpc/configs/corenet32_smp_defconfig index cbb98c1234fd..8b3d57c1ebe8 100644 --- a/trunk/arch/powerpc/configs/corenet32_smp_defconfig +++ b/trunk/arch/powerpc/configs/corenet32_smp_defconfig @@ -6,8 +6,8 @@ CONFIG_SYSVIPC=y CONFIG_POSIX_MQUEUE=y CONFIG_BSD_PROCESS_ACCT=y CONFIG_AUDIT=y -CONFIG_SPARSE_IRQ=y -CONFIG_RCU_TRACE=y +CONFIG_NO_HZ=y +CONFIG_HIGH_RES_TIMERS=y CONFIG_IKCONFIG=y CONFIG_IKCONFIG_PROC=y CONFIG_LOG_BUF_SHIFT=14 @@ -21,23 +21,22 @@ CONFIG_MODULE_UNLOAD=y CONFIG_MODULE_FORCE_UNLOAD=y CONFIG_MODVERSIONS=y # CONFIG_BLK_DEV_BSG is not set +CONFIG_PARTITION_ADVANCED=y +CONFIG_MAC_PARTITION=y CONFIG_P2041_RDB=y CONFIG_P3041_DS=y CONFIG_P4080_DS=y CONFIG_P5020_DS=y CONFIG_HIGHMEM=y -CONFIG_NO_HZ=y -CONFIG_HIGH_RES_TIMERS=y # CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set CONFIG_BINFMT_MISC=m CONFIG_KEXEC=y CONFIG_IRQ_ALL_CPUS=y CONFIG_FORCE_MAX_ZONEORDER=13 -CONFIG_FSL_LBC=y CONFIG_PCI=y CONFIG_PCIEPORTBUS=y -CONFIG_PCI_MSI=y # CONFIG_PCIEASPM is not set +CONFIG_PCI_MSI=y CONFIG_RAPIDIO=y CONFIG_FSL_RIO=y CONFIG_NET=y @@ -70,6 +69,7 @@ CONFIG_INET_IPCOMP=y CONFIG_IPV6=y CONFIG_IP_SCTP=m CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" +CONFIG_DEVTMPFS=y CONFIG_MTD=y CONFIG_MTD_CMDLINE_PARTS=y CONFIG_MTD_CHAR=y @@ -77,17 +77,14 @@ CONFIG_MTD_BLOCK=y CONFIG_MTD_CFI=y CONFIG_MTD_CFI_AMDSTD=y CONFIG_MTD_PHYSMAP_OF=y +CONFIG_MTD_M25P80=y CONFIG_MTD_NAND=y -CONFIG_MTD_NAND_ECC=y -CONFIG_MTD_NAND_IDS=y -CONFIG_MTD_NAND_FSL_IFC=y CONFIG_MTD_NAND_FSL_ELBC=y -CONFIG_MTD_M25P80=y +CONFIG_MTD_NAND_FSL_IFC=y CONFIG_PROC_DEVICETREE=y CONFIG_BLK_DEV_LOOP=y CONFIG_BLK_DEV_RAM=y CONFIG_BLK_DEV_RAM_SIZE=131072 -CONFIG_MISC_DEVICES=y CONFIG_BLK_DEV_SD=y CONFIG_CHR_DEV_ST=y CONFIG_BLK_DEV_SR=y @@ -115,11 +112,9 @@ CONFIG_SERIO_LIBPS2=y CONFIG_PPC_EPAPR_HV_BYTECHAN=y CONFIG_SERIAL_8250=y CONFIG_SERIAL_8250_CONSOLE=y -CONFIG_SERIAL_8250_EXTENDED=y CONFIG_SERIAL_8250_MANY_PORTS=y CONFIG_SERIAL_8250_DETECT_IRQ=y CONFIG_SERIAL_8250_RSA=y -CONFIG_HW_RANDOM=y CONFIG_NVRAM=y CONFIG_I2C=y CONFIG_I2C_CHARDEV=y @@ -132,7 +127,6 @@ CONFIG_SPI_FSL_ESPI=y CONFIG_VIDEO_OUTPUT_CONTROL=y CONFIG_USB_HID=m CONFIG_USB=y -CONFIG_USB_DEVICEFS=y CONFIG_USB_MON=y CONFIG_USB_EHCI_HCD=y CONFIG_USB_EHCI_FSL=y @@ -142,8 +136,6 @@ CONFIG_USB_OHCI_HCD_PPC_OF_LE=y CONFIG_USB_STORAGE=y CONFIG_MMC=y CONFIG_MMC_SDHCI=y -CONFIG_MMC_SDHCI_OF=y -CONFIG_MMC_SDHCI_OF_ESDHC=y CONFIG_EDAC=y CONFIG_EDAC_MM_EDAC=y CONFIG_EDAC_MPC85XX=y @@ -170,19 +162,16 @@ CONFIG_HUGETLBFS=y CONFIG_JFFS2_FS=y CONFIG_CRAMFS=y CONFIG_NFS_FS=y -CONFIG_NFS_V3=y CONFIG_NFS_V4=y CONFIG_ROOT_NFS=y CONFIG_NFSD=m -CONFIG_PARTITION_ADVANCED=y -CONFIG_MAC_PARTITION=y CONFIG_NLS_ISO8859_1=y CONFIG_NLS_UTF8=m CONFIG_MAGIC_SYSRQ=y CONFIG_DEBUG_SHIRQ=y CONFIG_DETECT_HUNG_TASK=y CONFIG_DEBUG_INFO=y -CONFIG_SYSCTL_SYSCALL_CHECK=y +CONFIG_RCU_TRACE=y CONFIG_CRYPTO_NULL=y CONFIG_CRYPTO_PCBC=m CONFIG_CRYPTO_MD4=y diff --git a/trunk/arch/powerpc/configs/corenet64_smp_defconfig b/trunk/arch/powerpc/configs/corenet64_smp_defconfig index dd89de8b0b7f..0516e22ca3de 100644 --- a/trunk/arch/powerpc/configs/corenet64_smp_defconfig +++ b/trunk/arch/powerpc/configs/corenet64_smp_defconfig @@ -56,6 +56,7 @@ CONFIG_INET_ESP=y CONFIG_IPV6=y CONFIG_IP_SCTP=m CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" +CONFIG_DEVTMPFS=y CONFIG_MTD=y CONFIG_MTD_CMDLINE_PARTS=y CONFIG_MTD_CHAR=y diff --git a/trunk/arch/powerpc/configs/g5_defconfig b/trunk/arch/powerpc/configs/g5_defconfig index 15130066e5e2..07b7f2af2dca 100644 --- a/trunk/arch/powerpc/configs/g5_defconfig +++ b/trunk/arch/powerpc/configs/g5_defconfig @@ -1,8 +1,10 @@ +CONFIG_PPC64=y +CONFIG_ALTIVEC=y +CONFIG_SMP=y +CONFIG_NR_CPUS=4 CONFIG_EXPERIMENTAL=y CONFIG_SYSVIPC=y CONFIG_POSIX_MQUEUE=y -CONFIG_NO_HZ=y -CONFIG_HIGH_RES_TIMERS=y CONFIG_IKCONFIG=y CONFIG_IKCONFIG_PROC=y CONFIG_BLK_DEV_INITRD=y @@ -13,15 +15,16 @@ CONFIG_MODULES=y CONFIG_MODULE_UNLOAD=y CONFIG_MODVERSIONS=y CONFIG_MODULE_SRCVERSION_ALL=y -CONFIG_PARTITION_ADVANCED=y -CONFIG_MAC_PARTITION=y -CONFIG_SMP=y -CONFIG_NR_CPUS=4 -CONFIG_KEXEC=y -# CONFIG_RELOCATABLE is not set +# CONFIG_PPC_PSERIES is not set CONFIG_CPU_FREQ=y CONFIG_CPU_FREQ_GOV_POWERSAVE=y CONFIG_CPU_FREQ_GOV_USERSPACE=y +CONFIG_CPU_FREQ_PMAC64=y +CONFIG_NO_HZ=y +CONFIG_HIGH_RES_TIMERS=y +CONFIG_KEXEC=y +CONFIG_IRQ_ALL_CPUS=y +# CONFIG_MIGRATION is not set CONFIG_PCI_MSI=y CONFIG_NET=y CONFIG_PACKET=y @@ -49,6 +52,7 @@ CONFIG_NF_CT_NETLINK=m CONFIG_NF_CONNTRACK_IPV4=m CONFIG_IP_NF_QUEUE=m CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" +CONFIG_PROC_DEVICETREE=y CONFIG_BLK_DEV_LOOP=y CONFIG_BLK_DEV_NBD=m CONFIG_BLK_DEV_RAM=y @@ -56,6 +60,8 @@ CONFIG_BLK_DEV_RAM_SIZE=65536 CONFIG_CDROM_PKTCDVD=m CONFIG_IDE=y CONFIG_BLK_DEV_IDECD=y +CONFIG_BLK_DEV_IDE_PMAC=y +CONFIG_BLK_DEV_IDE_PMAC_ATA100FIRST=y CONFIG_BLK_DEV_SD=y CONFIG_CHR_DEV_ST=y CONFIG_BLK_DEV_SR=y @@ -79,24 +85,33 @@ CONFIG_DM_CRYPT=m CONFIG_DM_SNAPSHOT=m CONFIG_DM_MIRROR=m CONFIG_DM_ZERO=m -CONFIG_MACINTOSH_DRIVERS=y +CONFIG_IEEE1394=y +CONFIG_IEEE1394_OHCI1394=y +CONFIG_IEEE1394_SBP2=m +CONFIG_IEEE1394_ETH1394=m +CONFIG_IEEE1394_RAWIO=y +CONFIG_IEEE1394_VIDEO1394=m +CONFIG_IEEE1394_DV1394=m +CONFIG_ADB_PMU=y +CONFIG_PMAC_SMU=y CONFIG_MAC_EMUMOUSEBTN=y +CONFIG_THERM_PM72=y +CONFIG_WINDFARM=y +CONFIG_WINDFARM_PM81=y +CONFIG_WINDFARM_PM91=y +CONFIG_WINDFARM_PM112=y +CONFIG_WINDFARM_PM121=y CONFIG_NETDEVICES=y -CONFIG_BONDING=m CONFIG_DUMMY=m -CONFIG_MII=y +CONFIG_BONDING=m CONFIG_TUN=m +CONFIG_NET_ETHERNET=y +CONFIG_MII=y +CONFIG_SUNGEM=y CONFIG_ACENIC=m CONFIG_ACENIC_OMIT_TIGON_I=y -CONFIG_TIGON3=y CONFIG_E1000=y -CONFIG_SUNGEM=y -CONFIG_PPP=m -CONFIG_PPP_BSDCOMP=m -CONFIG_PPP_DEFLATE=m -CONFIG_PPPOE=m -CONFIG_PPP_ASYNC=m -CONFIG_PPP_SYNC_TTY=m +CONFIG_TIGON3=y CONFIG_USB_CATC=m CONFIG_USB_KAWETH=m CONFIG_USB_PEGASUS=m @@ -106,24 +121,36 @@ CONFIG_USB_USBNET=m # CONFIG_USB_NET_NET1080 is not set # CONFIG_USB_NET_CDC_SUBSET is not set # CONFIG_USB_NET_ZAURUS is not set +CONFIG_PPP=m +CONFIG_PPP_ASYNC=m +CONFIG_PPP_SYNC_TTY=m +CONFIG_PPP_DEFLATE=m +CONFIG_PPP_BSDCOMP=m +CONFIG_PPPOE=m # CONFIG_INPUT_MOUSEDEV_PSAUX is not set CONFIG_INPUT_JOYDEV=m CONFIG_INPUT_EVDEV=y +# CONFIG_KEYBOARD_ATKBD is not set # CONFIG_MOUSE_PS2 is not set +# CONFIG_SERIO_I8042 is not set # CONFIG_SERIO_SERPORT is not set -CONFIG_VT_HW_CONSOLE_BINDING=y # CONFIG_HW_RANDOM is not set CONFIG_GEN_RTC=y CONFIG_RAW_DRIVER=y CONFIG_I2C_CHARDEV=y # CONFIG_HWMON is not set -CONFIG_AGP=y -CONFIG_DRM=y -CONFIG_DRM_NOUVEAU=y +CONFIG_AGP=m +CONFIG_AGP_UNINORTH=m CONFIG_VIDEO_OUTPUT_CONTROL=m +CONFIG_FB=y CONFIG_FIRMWARE_EDID=y CONFIG_FB_TILEBLITTING=y +CONFIG_FB_OF=y +CONFIG_FB_NVIDIA=y +CONFIG_FB_NVIDIA_I2C=y CONFIG_FB_RADEON=y +# CONFIG_VGA_CONSOLE is not set +CONFIG_FRAMEBUFFER_CONSOLE=y CONFIG_LOGO=y CONFIG_SOUND=m CONFIG_SND=m @@ -131,7 +158,15 @@ CONFIG_SND_SEQUENCER=m CONFIG_SND_MIXER_OSS=m CONFIG_SND_PCM_OSS=m CONFIG_SND_SEQUENCER_OSS=y +CONFIG_SND_POWERMAC=m +CONFIG_SND_AOA=m +CONFIG_SND_AOA_FABRIC_LAYOUT=m +CONFIG_SND_AOA_ONYX=m +CONFIG_SND_AOA_TAS=m +CONFIG_SND_AOA_TOONIE=m CONFIG_SND_USB_AUDIO=m +CONFIG_HID_PID=y +CONFIG_USB_HIDDEV=y CONFIG_HID_GYRATION=y CONFIG_LOGITECH_FF=y CONFIG_HID_PANTHERLORD=y @@ -139,12 +174,13 @@ CONFIG_HID_PETALYNX=y CONFIG_HID_SAMSUNG=y CONFIG_HID_SONY=y CONFIG_HID_SUNPLUS=y -CONFIG_HID_PID=y -CONFIG_USB_HIDDEV=y CONFIG_USB=y +CONFIG_USB_DEVICEFS=y CONFIG_USB_MON=y CONFIG_USB_EHCI_HCD=y +# CONFIG_USB_EHCI_HCD_PPC_OF is not set CONFIG_USB_OHCI_HCD=y +CONFIG_USB_OHCI_HCD_PPC_OF_BE=y CONFIG_USB_ACM=m CONFIG_USB_PRINTER=y CONFIG_USB_STORAGE=y @@ -208,6 +244,8 @@ CONFIG_REISERFS_FS_POSIX_ACL=y CONFIG_REISERFS_FS_SECURITY=y CONFIG_XFS_FS=m CONFIG_XFS_POSIX_ACL=y +CONFIG_INOTIFY=y +CONFIG_AUTOFS_FS=m CONFIG_ISO9660_FS=y CONFIG_JOLIET=y CONFIG_ZISOFS=y @@ -221,12 +259,14 @@ CONFIG_HFS_FS=m CONFIG_HFSPLUS_FS=m CONFIG_CRAMFS=y CONFIG_NFS_FS=y +CONFIG_NFS_V3=y CONFIG_NFS_V3_ACL=y CONFIG_NFS_V4=y CONFIG_NFSD=y CONFIG_NFSD_V3_ACL=y CONFIG_NFSD_V4=y CONFIG_CIFS=m +CONFIG_PARTITION_ADVANCED=y CONFIG_NLS_CODEPAGE_437=y CONFIG_NLS_CODEPAGE_1250=y CONFIG_NLS_CODEPAGE_1251=y @@ -234,23 +274,29 @@ CONFIG_NLS_ASCII=y CONFIG_NLS_ISO8859_1=y CONFIG_NLS_ISO8859_15=y CONFIG_NLS_UTF8=y +CONFIG_CRC_T10DIF=y +CONFIG_LIBCRC32C=m CONFIG_MAGIC_SYSRQ=y -# CONFIG_UNUSED_SYMBOLS is not set CONFIG_DEBUG_FS=y CONFIG_DEBUG_KERNEL=y CONFIG_DEBUG_MUTEXES=y +# CONFIG_RCU_CPU_STALL_DETECTOR is not set CONFIG_LATENCYTOP=y -CONFIG_STRICT_DEVMEM=y +CONFIG_SYSCTL_SYSCALL_CHECK=y +CONFIG_BOOTX_TEXT=y CONFIG_CRYPTO_NULL=m CONFIG_CRYPTO_TEST=m +CONFIG_CRYPTO_ECB=m CONFIG_CRYPTO_PCBC=m CONFIG_CRYPTO_HMAC=y +CONFIG_CRYPTO_MD4=m CONFIG_CRYPTO_MICHAEL_MIC=m CONFIG_CRYPTO_SHA256=m CONFIG_CRYPTO_SHA512=m CONFIG_CRYPTO_WP512=m CONFIG_CRYPTO_AES=m CONFIG_CRYPTO_ANUBIS=m +CONFIG_CRYPTO_ARC4=m CONFIG_CRYPTO_BLOWFISH=m CONFIG_CRYPTO_CAST5=m CONFIG_CRYPTO_CAST6=m @@ -260,6 +306,3 @@ CONFIG_CRYPTO_TEA=m CONFIG_CRYPTO_TWOFISH=m # CONFIG_CRYPTO_ANSI_CPRNG is not set # CONFIG_CRYPTO_HW is not set -# CONFIG_VIRTUALIZATION is not set -CONFIG_CRC_T10DIF=y -CONFIG_LIBCRC32C=m diff --git a/trunk/arch/powerpc/configs/mpc83xx_defconfig b/trunk/arch/powerpc/configs/mpc83xx_defconfig index 5aac9a8bc53b..9352e4430c3b 100644 --- a/trunk/arch/powerpc/configs/mpc83xx_defconfig +++ b/trunk/arch/powerpc/configs/mpc83xx_defconfig @@ -2,12 +2,12 @@ CONFIG_EXPERIMENTAL=y CONFIG_SYSVIPC=y CONFIG_LOG_BUF_SHIFT=14 CONFIG_BLK_DEV_INITRD=y -# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set CONFIG_EXPERT=y CONFIG_SLAB=y CONFIG_MODULES=y CONFIG_MODULE_UNLOAD=y # CONFIG_BLK_DEV_BSG is not set +CONFIG_PARTITION_ADVANCED=y # CONFIG_PPC_CHRP is not set # CONFIG_PPC_PMAC is not set CONFIG_PPC_83xx=y @@ -25,7 +25,6 @@ CONFIG_ASP834x=y CONFIG_QUICC_ENGINE=y CONFIG_QE_GPIO=y CONFIG_MATH_EMULATION=y -CONFIG_SPARSE_IRQ=y CONFIG_PCI=y CONFIG_NET=y CONFIG_PACKET=y @@ -42,10 +41,9 @@ CONFIG_INET_ESP=y # CONFIG_INET_LRO is not set # CONFIG_IPV6 is not set CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" +CONFIG_DEVTMPFS=y # CONFIG_FW_LOADER is not set CONFIG_MTD=y -CONFIG_MTD_PARTITIONS=y -CONFIG_MTD_OF_PARTS=y CONFIG_MTD_CHAR=y CONFIG_MTD_BLOCK=y CONFIG_MTD_CFI=y @@ -64,15 +62,14 @@ CONFIG_ATA=y CONFIG_SATA_FSL=y CONFIG_SATA_SIL=y CONFIG_NETDEVICES=y +CONFIG_MII=y +CONFIG_UCC_GETH=y +CONFIG_GIANFAR=y CONFIG_MARVELL_PHY=y CONFIG_DAVICOM_PHY=y CONFIG_VITESSE_PHY=y CONFIG_ICPLUS_PHY=y CONFIG_FIXED_PHY=y -CONFIG_NET_ETHERNET=y -CONFIG_MII=y -CONFIG_GIANFAR=y -CONFIG_UCC_GETH=y CONFIG_INPUT_FF_MEMLESS=m # CONFIG_INPUT_MOUSEDEV is not set # CONFIG_INPUT_KEYBOARD is not set @@ -112,17 +109,12 @@ CONFIG_RTC_DRV_DS1374=y CONFIG_EXT2_FS=y CONFIG_EXT3_FS=y # CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set -CONFIG_INOTIFY=y CONFIG_PROC_KCORE=y CONFIG_TMPFS=y CONFIG_NFS_FS=y -CONFIG_NFS_V3=y CONFIG_NFS_V4=y CONFIG_ROOT_NFS=y -CONFIG_PARTITION_ADVANCED=y CONFIG_CRC_T10DIF=y -# CONFIG_RCU_CPU_STALL_DETECTOR is not set -CONFIG_SYSCTL_SYSCALL_CHECK=y CONFIG_CRYPTO_ECB=m CONFIG_CRYPTO_PCBC=m CONFIG_CRYPTO_SHA256=y diff --git a/trunk/arch/powerpc/configs/mpc85xx_defconfig b/trunk/arch/powerpc/configs/mpc85xx_defconfig index 03ee911c4577..8b5bda27d248 100644 --- a/trunk/arch/powerpc/configs/mpc85xx_defconfig +++ b/trunk/arch/powerpc/configs/mpc85xx_defconfig @@ -5,7 +5,9 @@ CONFIG_SYSVIPC=y CONFIG_POSIX_MQUEUE=y CONFIG_BSD_PROCESS_ACCT=y CONFIG_AUDIT=y -CONFIG_SPARSE_IRQ=y +CONFIG_IRQ_DOMAIN_DEBUG=y +CONFIG_NO_HZ=y +CONFIG_HIGH_RES_TIMERS=y CONFIG_IKCONFIG=y CONFIG_IKCONFIG_PROC=y CONFIG_LOG_BUF_SHIFT=14 @@ -17,6 +19,8 @@ CONFIG_MODULE_UNLOAD=y CONFIG_MODULE_FORCE_UNLOAD=y CONFIG_MODVERSIONS=y # CONFIG_BLK_DEV_BSG is not set +CONFIG_PARTITION_ADVANCED=y +CONFIG_MAC_PARTITION=y CONFIG_MPC8540_ADS=y CONFIG_MPC8560_ADS=y CONFIG_MPC85xx_CDS=y @@ -40,8 +44,6 @@ CONFIG_SBC8548=y CONFIG_QUICC_ENGINE=y CONFIG_QE_GPIO=y CONFIG_HIGHMEM=y -CONFIG_NO_HZ=y -CONFIG_HIGH_RES_TIMERS=y CONFIG_BINFMT_MISC=m CONFIG_MATH_EMULATION=y CONFIG_FORCE_MAX_ZONEORDER=12 @@ -74,36 +76,25 @@ CONFIG_INET_ESP=y CONFIG_IPV6=y CONFIG_IP_SCTP=m CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" +CONFIG_DEVTMPFS=y CONFIG_MTD=y CONFIG_MTD_CMDLINE_PARTS=y CONFIG_MTD_CHAR=y CONFIG_MTD_BLOCK=y -CONFIG_MTD_CFI=y CONFIG_FTL=y -CONFIG_MTD_GEN_PROBE=y -CONFIG_MTD_MAP_BANK_WIDTH_1=y -CONFIG_MTD_MAP_BANK_WIDTH_2=y -CONFIG_MTD_MAP_BANK_WIDTH_4=y -CONFIG_MTD_CFI_I1=y -CONFIG_MTD_CFI_I2=y +CONFIG_MTD_CFI=y CONFIG_MTD_CFI_INTELEXT=y CONFIG_MTD_CFI_AMDSTD=y -CONFIG_MTD_CFI_UTIL=y CONFIG_MTD_PHYSMAP_OF=y -CONFIG_MTD_PARTITIONS=y -CONFIG_MTD_OF_PARTS=y +CONFIG_MTD_M25P80=y CONFIG_MTD_NAND=y CONFIG_MTD_NAND_FSL_ELBC=y CONFIG_MTD_NAND_FSL_IFC=y -CONFIG_MTD_NAND_IDS=y -CONFIG_MTD_NAND_ECC=y -CONFIG_MTD_M25P80=y CONFIG_PROC_DEVICETREE=y CONFIG_BLK_DEV_LOOP=y CONFIG_BLK_DEV_NBD=y CONFIG_BLK_DEV_RAM=y CONFIG_BLK_DEV_RAM_SIZE=131072 -CONFIG_MISC_DEVICES=y CONFIG_EEPROM_LEGACY=y CONFIG_BLK_DEV_SD=y CONFIG_CHR_DEV_ST=y @@ -115,6 +106,7 @@ CONFIG_ATA=y CONFIG_SATA_AHCI=y CONFIG_SATA_FSL=y CONFIG_PATA_ALI=y +CONFIG_PATA_VIA=y CONFIG_NETDEVICES=y CONFIG_DUMMY=y CONFIG_FS_ENET=y @@ -134,7 +126,6 @@ CONFIG_SERIAL_8250=y CONFIG_SERIAL_8250_CONSOLE=y CONFIG_SERIAL_8250_NR_UARTS=2 CONFIG_SERIAL_8250_RUNTIME_UARTS=2 -CONFIG_SERIAL_8250_EXTENDED=y CONFIG_SERIAL_8250_MANY_PORTS=y CONFIG_SERIAL_8250_DETECT_IRQ=y CONFIG_SERIAL_8250_RSA=y @@ -183,7 +174,6 @@ CONFIG_HID_SAMSUNG=y CONFIG_HID_SONY=y CONFIG_HID_SUNPLUS=y CONFIG_USB=y -CONFIG_USB_DEVICEFS=y CONFIG_USB_MON=y CONFIG_USB_EHCI_HCD=y CONFIG_USB_EHCI_FSL=y @@ -229,18 +219,13 @@ CONFIG_QNX4FS_FS=m CONFIG_SYSV_FS=m CONFIG_UFS_FS=m CONFIG_NFS_FS=y -CONFIG_NFS_V3=y CONFIG_NFS_V4=y CONFIG_ROOT_NFS=y CONFIG_NFSD=y -CONFIG_PARTITION_ADVANCED=y -CONFIG_MAC_PARTITION=y CONFIG_CRC_T10DIF=y CONFIG_DEBUG_FS=y CONFIG_DETECT_HUNG_TASK=y CONFIG_DEBUG_INFO=y -CONFIG_SYSCTL_SYSCALL_CHECK=y -CONFIG_IRQ_DOMAIN_DEBUG=y CONFIG_CRYPTO_PCBC=m CONFIG_CRYPTO_SHA256=y CONFIG_CRYPTO_SHA512=y diff --git a/trunk/arch/powerpc/configs/mpc85xx_smp_defconfig b/trunk/arch/powerpc/configs/mpc85xx_smp_defconfig index fdfa84dc908f..b0974e7e98ae 100644 --- a/trunk/arch/powerpc/configs/mpc85xx_smp_defconfig +++ b/trunk/arch/powerpc/configs/mpc85xx_smp_defconfig @@ -7,7 +7,9 @@ CONFIG_SYSVIPC=y CONFIG_POSIX_MQUEUE=y CONFIG_BSD_PROCESS_ACCT=y CONFIG_AUDIT=y -CONFIG_SPARSE_IRQ=y +CONFIG_IRQ_DOMAIN_DEBUG=y +CONFIG_NO_HZ=y +CONFIG_HIGH_RES_TIMERS=y CONFIG_IKCONFIG=y CONFIG_IKCONFIG_PROC=y CONFIG_LOG_BUF_SHIFT=14 @@ -19,6 +21,8 @@ CONFIG_MODULE_UNLOAD=y CONFIG_MODULE_FORCE_UNLOAD=y CONFIG_MODVERSIONS=y # CONFIG_BLK_DEV_BSG is not set +CONFIG_PARTITION_ADVANCED=y +CONFIG_MAC_PARTITION=y CONFIG_MPC8540_ADS=y CONFIG_MPC8560_ADS=y CONFIG_MPC85xx_CDS=y @@ -42,8 +46,6 @@ CONFIG_SBC8548=y CONFIG_QUICC_ENGINE=y CONFIG_QE_GPIO=y CONFIG_HIGHMEM=y -CONFIG_NO_HZ=y -CONFIG_HIGH_RES_TIMERS=y CONFIG_BINFMT_MISC=m CONFIG_MATH_EMULATION=y CONFIG_IRQ_ALL_CPUS=y @@ -77,36 +79,25 @@ CONFIG_INET_ESP=y CONFIG_IPV6=y CONFIG_IP_SCTP=m CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" +CONFIG_DEVTMPFS=y CONFIG_MTD=y CONFIG_MTD_CMDLINE_PARTS=y CONFIG_MTD_CHAR=y CONFIG_MTD_BLOCK=y -CONFIG_MTD_CFI=y CONFIG_FTL=y -CONFIG_MTD_GEN_PROBE=y -CONFIG_MTD_MAP_BANK_WIDTH_1=y -CONFIG_MTD_MAP_BANK_WIDTH_2=y -CONFIG_MTD_MAP_BANK_WIDTH_4=y -CONFIG_MTD_CFI_I1=y -CONFIG_MTD_CFI_I2=y +CONFIG_MTD_CFI=y CONFIG_MTD_CFI_INTELEXT=y CONFIG_MTD_CFI_AMDSTD=y -CONFIG_MTD_CFI_UTIL=y CONFIG_MTD_PHYSMAP_OF=y -CONFIG_MTD_PARTITIONS=y -CONFIG_MTD_OF_PARTS=y +CONFIG_MTD_M25P80=y CONFIG_MTD_NAND=y CONFIG_MTD_NAND_FSL_ELBC=y CONFIG_MTD_NAND_FSL_IFC=y -CONFIG_MTD_NAND_IDS=y -CONFIG_MTD_NAND_ECC=y -CONFIG_MTD_M25P80=y CONFIG_PROC_DEVICETREE=y CONFIG_BLK_DEV_LOOP=y CONFIG_BLK_DEV_NBD=y CONFIG_BLK_DEV_RAM=y CONFIG_BLK_DEV_RAM_SIZE=131072 -CONFIG_MISC_DEVICES=y CONFIG_EEPROM_LEGACY=y CONFIG_BLK_DEV_SD=y CONFIG_CHR_DEV_ST=y @@ -137,7 +128,6 @@ CONFIG_SERIAL_8250=y CONFIG_SERIAL_8250_CONSOLE=y CONFIG_SERIAL_8250_NR_UARTS=2 CONFIG_SERIAL_8250_RUNTIME_UARTS=2 -CONFIG_SERIAL_8250_EXTENDED=y CONFIG_SERIAL_8250_MANY_PORTS=y CONFIG_SERIAL_8250_DETECT_IRQ=y CONFIG_SERIAL_8250_RSA=y @@ -186,7 +176,6 @@ CONFIG_HID_SAMSUNG=y CONFIG_HID_SONY=y CONFIG_HID_SUNPLUS=y CONFIG_USB=y -CONFIG_USB_DEVICEFS=y CONFIG_USB_MON=y CONFIG_USB_EHCI_HCD=y CONFIG_USB_EHCI_FSL=y @@ -232,18 +221,13 @@ CONFIG_QNX4FS_FS=m CONFIG_SYSV_FS=m CONFIG_UFS_FS=m CONFIG_NFS_FS=y -CONFIG_NFS_V3=y CONFIG_NFS_V4=y CONFIG_ROOT_NFS=y CONFIG_NFSD=y -CONFIG_PARTITION_ADVANCED=y -CONFIG_MAC_PARTITION=y CONFIG_CRC_T10DIF=y CONFIG_DEBUG_FS=y CONFIG_DETECT_HUNG_TASK=y CONFIG_DEBUG_INFO=y -CONFIG_SYSCTL_SYSCALL_CHECK=y -CONFIG_IRQ_DOMAIN_DEBUG=y CONFIG_CRYPTO_PCBC=m CONFIG_CRYPTO_SHA256=y CONFIG_CRYPTO_SHA512=y diff --git a/trunk/arch/powerpc/include/asm/cputable.h b/trunk/arch/powerpc/include/asm/cputable.h index 50d82c8a037f..b3c083de17ad 100644 --- a/trunk/arch/powerpc/include/asm/cputable.h +++ b/trunk/arch/powerpc/include/asm/cputable.h @@ -553,9 +553,7 @@ static inline int cpu_has_feature(unsigned long feature) & feature); } -#ifdef CONFIG_HAVE_HW_BREAKPOINT #define HBP_NUM 1 -#endif /* CONFIG_HAVE_HW_BREAKPOINT */ #endif /* !__ASSEMBLY__ */ diff --git a/trunk/arch/powerpc/include/asm/kvm_host.h b/trunk/arch/powerpc/include/asm/kvm_host.h index 50ea12fd7bf5..a8bf5c673a3c 100644 --- a/trunk/arch/powerpc/include/asm/kvm_host.h +++ b/trunk/arch/powerpc/include/asm/kvm_host.h @@ -33,6 +33,7 @@ #include #include #include +#include #define KVM_MAX_VCPUS NR_CPUS #define KVM_MAX_VCORES NR_CPUS diff --git a/trunk/arch/powerpc/include/asm/kvm_ppc.h b/trunk/arch/powerpc/include/asm/kvm_ppc.h index 0124937a23b9..e006f0bdea95 100644 --- a/trunk/arch/powerpc/include/asm/kvm_ppc.h +++ b/trunk/arch/powerpc/include/asm/kvm_ppc.h @@ -219,4 +219,16 @@ void kvmppc_claim_lpid(long lpid); void kvmppc_free_lpid(long lpid); void kvmppc_init_lpid(unsigned long nr_lpids); +static inline void kvmppc_mmu_flush_icache(pfn_t pfn) +{ + /* Clear i-cache for new pages */ + struct page *page; + page = pfn_to_page(pfn); + if (!test_bit(PG_arch_1, &page->flags)) { + flush_dcache_icache_page(page); + set_bit(PG_arch_1, &page->flags); + } +} + + #endif /* __POWERPC_KVM_PPC_H__ */ diff --git a/trunk/arch/powerpc/include/asm/mpic_msgr.h b/trunk/arch/powerpc/include/asm/mpic_msgr.h index 326d33ca55cd..d4f471fb1031 100644 --- a/trunk/arch/powerpc/include/asm/mpic_msgr.h +++ b/trunk/arch/powerpc/include/asm/mpic_msgr.h @@ -14,6 +14,7 @@ #include #include #include +#include struct mpic_msgr { u32 __iomem *base; diff --git a/trunk/arch/powerpc/include/asm/processor.h b/trunk/arch/powerpc/include/asm/processor.h index 53b6dfa83344..54b73a28c205 100644 --- a/trunk/arch/powerpc/include/asm/processor.h +++ b/trunk/arch/powerpc/include/asm/processor.h @@ -386,6 +386,7 @@ extern unsigned long cpuidle_disable; enum idle_boot_override {IDLE_NO_OVERRIDE = 0, IDLE_POWERSAVE_OFF}; extern int powersave_nap; /* set if nap mode can be used in idle loop */ +extern void power7_nap(void); #ifdef CONFIG_PSERIES_IDLE extern void update_smt_snooze_delay(int snooze); diff --git a/trunk/arch/powerpc/include/asm/time.h b/trunk/arch/powerpc/include/asm/time.h index 3b4b4a8da922..c1f267694acb 100644 --- a/trunk/arch/powerpc/include/asm/time.h +++ b/trunk/arch/powerpc/include/asm/time.h @@ -197,12 +197,6 @@ struct cpu_usage { DECLARE_PER_CPU(struct cpu_usage, cpu_usage_array); -#if defined(CONFIG_VIRT_CPU_ACCOUNTING) -#define account_process_vtime(tsk) account_process_tick(tsk, 0) -#else -#define account_process_vtime(tsk) do { } while (0) -#endif - extern void secondary_cpu_time_init(void); DECLARE_PER_CPU(u64, decrementers_next_tb); diff --git a/trunk/arch/powerpc/kernel/asm-offsets.c b/trunk/arch/powerpc/kernel/asm-offsets.c index 85b05c463fae..e8995727b1c1 100644 --- a/trunk/arch/powerpc/kernel/asm-offsets.c +++ b/trunk/arch/powerpc/kernel/asm-offsets.c @@ -76,6 +76,7 @@ int main(void) DEFINE(SIGSEGV, SIGSEGV); DEFINE(NMI_MASK, NMI_MASK); DEFINE(THREAD_DSCR, offsetof(struct thread_struct, dscr)); + DEFINE(THREAD_DSCR_INHERIT, offsetof(struct thread_struct, dscr_inherit)); #else DEFINE(THREAD_INFO, offsetof(struct task_struct, stack)); #endif /* CONFIG_PPC64 */ diff --git a/trunk/arch/powerpc/kernel/dbell.c b/trunk/arch/powerpc/kernel/dbell.c index 5b25c8060fd6..a892680668d8 100644 --- a/trunk/arch/powerpc/kernel/dbell.c +++ b/trunk/arch/powerpc/kernel/dbell.c @@ -28,6 +28,8 @@ void doorbell_setup_this_cpu(void) void doorbell_cause_ipi(int cpu, unsigned long data) { + /* Order previous accesses vs. msgsnd, which is treated as a store */ + mb(); ppc_msgsnd(PPC_DBELL, 0, data); } diff --git a/trunk/arch/powerpc/kernel/dma-iommu.c b/trunk/arch/powerpc/kernel/dma-iommu.c index 2d7bb8ced136..e4897523de41 100644 --- a/trunk/arch/powerpc/kernel/dma-iommu.c +++ b/trunk/arch/powerpc/kernel/dma-iommu.c @@ -83,11 +83,10 @@ static int dma_iommu_dma_supported(struct device *dev, u64 mask) return 0; } - if ((tbl->it_offset + tbl->it_size) > (mask >> IOMMU_PAGE_SHIFT)) { - dev_info(dev, "Warning: IOMMU window too big for device mask\n"); - dev_info(dev, "mask: 0x%08llx, table end: 0x%08lx\n", - mask, (tbl->it_offset + tbl->it_size) << - IOMMU_PAGE_SHIFT); + if (tbl->it_offset > (mask >> IOMMU_PAGE_SHIFT)) { + dev_info(dev, "Warning: IOMMU offset too big for device mask\n"); + dev_info(dev, "mask: 0x%08llx, table offset: 0x%08lx\n", + mask, tbl->it_offset << IOMMU_PAGE_SHIFT); return 0; } else return 1; diff --git a/trunk/arch/powerpc/kernel/entry_64.S b/trunk/arch/powerpc/kernel/entry_64.S index 4b01a25e29ef..b40e0b4815b3 100644 --- a/trunk/arch/powerpc/kernel/entry_64.S +++ b/trunk/arch/powerpc/kernel/entry_64.S @@ -370,6 +370,12 @@ _GLOBAL(ret_from_fork) li r3,0 b syscall_exit + .section ".toc","aw" +DSCR_DEFAULT: + .tc dscr_default[TC],dscr_default + + .section ".text" + /* * This routine switches between two different tasks. The process * state of one is saved on its kernel stack. Then the state @@ -509,9 +515,6 @@ END_MMU_FTR_SECTION_IFSET(MMU_FTR_1T_SEGMENT) mr r1,r8 /* start using new stack pointer */ std r7,PACAKSAVE(r13) - ld r6,_CCR(r1) - mtcrf 0xFF,r6 - #ifdef CONFIG_ALTIVEC BEGIN_FTR_SECTION ld r0,THREAD_VRSAVE(r4) @@ -520,14 +523,22 @@ END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC) #endif /* CONFIG_ALTIVEC */ #ifdef CONFIG_PPC64 BEGIN_FTR_SECTION + lwz r6,THREAD_DSCR_INHERIT(r4) + ld r7,DSCR_DEFAULT@toc(2) ld r0,THREAD_DSCR(r4) - cmpd r0,r25 - beq 1f + cmpwi r6,0 + bne 1f + ld r0,0(r7) +1: cmpd r0,r25 + beq 2f mtspr SPRN_DSCR,r0 -1: +2: END_FTR_SECTION_IFSET(CPU_FTR_DSCR) #endif + ld r6,_CCR(r1) + mtcrf 0xFF,r6 + /* r3-r13 are destroyed -- Cort */ REST_8GPRS(14, r1) REST_10GPRS(22, r1) diff --git a/trunk/arch/powerpc/kernel/exceptions-64s.S b/trunk/arch/powerpc/kernel/exceptions-64s.S index e894515e77bb..39aa97d3ff88 100644 --- a/trunk/arch/powerpc/kernel/exceptions-64s.S +++ b/trunk/arch/powerpc/kernel/exceptions-64s.S @@ -186,7 +186,7 @@ hardware_interrupt_hv: KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0x800) MASKABLE_EXCEPTION_PSERIES(0x900, 0x900, decrementer) - MASKABLE_EXCEPTION_HV(0x980, 0x982, decrementer) + STD_EXCEPTION_HV(0x980, 0x982, hdecrementer) STD_EXCEPTION_PSERIES(0xa00, 0xa00, trap_0a) KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0xa00) @@ -486,6 +486,7 @@ machine_check_common: STD_EXCEPTION_COMMON_ASYNC(0x500, hardware_interrupt, do_IRQ) STD_EXCEPTION_COMMON_ASYNC(0x900, decrementer, .timer_interrupt) + STD_EXCEPTION_COMMON(0x980, hdecrementer, .hdec_interrupt) STD_EXCEPTION_COMMON(0xa00, trap_0a, .unknown_exception) STD_EXCEPTION_COMMON(0xb00, trap_0b, .unknown_exception) STD_EXCEPTION_COMMON(0xd00, single_step, .single_step_exception) diff --git a/trunk/arch/powerpc/kernel/hw_breakpoint.c b/trunk/arch/powerpc/kernel/hw_breakpoint.c index f3a82dde61db..956a4c496de9 100644 --- a/trunk/arch/powerpc/kernel/hw_breakpoint.c +++ b/trunk/arch/powerpc/kernel/hw_breakpoint.c @@ -253,7 +253,7 @@ int __kprobes hw_breakpoint_handler(struct die_args *args) /* Do not emulate user-space instructions, instead single-step them */ if (user_mode(regs)) { - bp->ctx->task->thread.last_hit_ubp = bp; + current->thread.last_hit_ubp = bp; regs->msr |= MSR_SE; goto out; } diff --git a/trunk/arch/powerpc/kernel/idle_power7.S b/trunk/arch/powerpc/kernel/idle_power7.S index 7140d838339e..e11863f4e595 100644 --- a/trunk/arch/powerpc/kernel/idle_power7.S +++ b/trunk/arch/powerpc/kernel/idle_power7.S @@ -28,7 +28,9 @@ _GLOBAL(power7_idle) lwz r4,ADDROFF(powersave_nap)(r3) cmpwi 0,r4,0 beqlr + /* fall through */ +_GLOBAL(power7_nap) /* NAP is a state loss, we create a regs frame on the * stack, fill it up with the state we care about and * stick a pointer to it in PACAR1. We really only diff --git a/trunk/arch/powerpc/kernel/kgdb.c b/trunk/arch/powerpc/kernel/kgdb.c index 782bd0a3c2f0..c470a40b29f5 100644 --- a/trunk/arch/powerpc/kernel/kgdb.c +++ b/trunk/arch/powerpc/kernel/kgdb.c @@ -25,6 +25,7 @@ #include #include #include +#include /* * This table contains the mapping between PowerPC hardware trap types, and @@ -101,6 +102,21 @@ static int computeSignal(unsigned int tt) return SIGHUP; /* default for things we don't know about */ } +/** + * + * kgdb_skipexception - Bail out of KGDB when we've been triggered. + * @exception: Exception vector number + * @regs: Current &struct pt_regs. + * + * On some architectures we need to skip a breakpoint exception when + * it occurs after a breakpoint has been removed. + * + */ +int kgdb_skipexception(int exception, struct pt_regs *regs) +{ + return kgdb_isremovedbreak(regs->nip); +} + static int kgdb_call_nmi_hook(struct pt_regs *regs) { kgdb_nmicallback(raw_smp_processor_id(), regs); @@ -138,6 +154,8 @@ static int kgdb_handle_breakpoint(struct pt_regs *regs) static int kgdb_singlestep(struct pt_regs *regs) { struct thread_info *thread_info, *exception_thread_info; + struct thread_info *backup_current_thread_info = \ + (struct thread_info *)kmalloc(sizeof(struct thread_info), GFP_KERNEL); if (user_mode(regs)) return 0; @@ -155,13 +173,17 @@ static int kgdb_singlestep(struct pt_regs *regs) thread_info = (struct thread_info *)(regs->gpr[1] & ~(THREAD_SIZE-1)); exception_thread_info = current_thread_info(); - if (thread_info != exception_thread_info) + if (thread_info != exception_thread_info) { + /* Save the original current_thread_info. */ + memcpy(backup_current_thread_info, exception_thread_info, sizeof *thread_info); memcpy(exception_thread_info, thread_info, sizeof *thread_info); + } kgdb_handle_exception(0, SIGTRAP, 0, regs); if (thread_info != exception_thread_info) - memcpy(thread_info, exception_thread_info, sizeof *thread_info); + /* Restore current_thread_info lastly. */ + memcpy(exception_thread_info, backup_current_thread_info, sizeof *thread_info); return 1; } @@ -410,7 +432,6 @@ int kgdb_arch_handle_exception(int vector, int signo, int err_code, #else linux_regs->msr |= MSR_SE; #endif - kgdb_single_step = 1; atomic_set(&kgdb_cpu_doing_single_step, raw_smp_processor_id()); } diff --git a/trunk/arch/powerpc/kernel/process.c b/trunk/arch/powerpc/kernel/process.c index 710f400476de..e9cb51f5f801 100644 --- a/trunk/arch/powerpc/kernel/process.c +++ b/trunk/arch/powerpc/kernel/process.c @@ -514,9 +514,6 @@ struct task_struct *__switch_to(struct task_struct *prev, local_irq_save(flags); - account_system_vtime(current); - account_process_vtime(current); - /* * We can't take a PMU exception inside _switch() since there is a * window where the kernel stack SLB and the kernel stack are out @@ -802,16 +799,8 @@ int copy_thread(unsigned long clone_flags, unsigned long usp, #endif /* CONFIG_PPC_STD_MMU_64 */ #ifdef CONFIG_PPC64 if (cpu_has_feature(CPU_FTR_DSCR)) { - if (current->thread.dscr_inherit) { - p->thread.dscr_inherit = 1; - p->thread.dscr = current->thread.dscr; - } else if (0 != dscr_default) { - p->thread.dscr_inherit = 1; - p->thread.dscr = dscr_default; - } else { - p->thread.dscr_inherit = 0; - p->thread.dscr = 0; - } + p->thread.dscr_inherit = current->thread.dscr_inherit; + p->thread.dscr = current->thread.dscr; } #endif diff --git a/trunk/arch/powerpc/kernel/smp.c b/trunk/arch/powerpc/kernel/smp.c index 0321007086f7..8d4214afc21d 100644 --- a/trunk/arch/powerpc/kernel/smp.c +++ b/trunk/arch/powerpc/kernel/smp.c @@ -198,8 +198,15 @@ void smp_muxed_ipi_message_pass(int cpu, int msg) struct cpu_messages *info = &per_cpu(ipi_message, cpu); char *message = (char *)&info->messages; + /* + * Order previous accesses before accesses in the IPI handler. + */ + smp_mb(); message[msg] = 1; - mb(); + /* + * cause_ipi functions are required to include a full barrier + * before doing whatever causes the IPI. + */ smp_ops->cause_ipi(cpu, info->data); } @@ -211,7 +218,7 @@ irqreturn_t smp_ipi_demux(void) mb(); /* order any irq clear */ do { - all = xchg_local(&info->messages, 0); + all = xchg(&info->messages, 0); #ifdef __BIG_ENDIAN if (all & (1 << (24 - 8 * PPC_MSG_CALL_FUNCTION))) diff --git a/trunk/arch/powerpc/kernel/syscalls.c b/trunk/arch/powerpc/kernel/syscalls.c index f2496f2faecc..4e3cc47f26b9 100644 --- a/trunk/arch/powerpc/kernel/syscalls.c +++ b/trunk/arch/powerpc/kernel/syscalls.c @@ -107,11 +107,11 @@ long ppc64_personality(unsigned long personality) long ret; if (personality(current->personality) == PER_LINUX32 - && personality == PER_LINUX) - personality = PER_LINUX32; + && personality(personality) == PER_LINUX) + personality = (personality & ~PER_MASK) | PER_LINUX32; ret = sys_personality(personality); - if (ret == PER_LINUX32) - ret = PER_LINUX; + if (personality(ret) == PER_LINUX32) + ret = (ret & ~PER_MASK) | PER_LINUX; return ret; } #endif diff --git a/trunk/arch/powerpc/kernel/sysfs.c b/trunk/arch/powerpc/kernel/sysfs.c index 3529446c2abd..8302af649219 100644 --- a/trunk/arch/powerpc/kernel/sysfs.c +++ b/trunk/arch/powerpc/kernel/sysfs.c @@ -194,6 +194,14 @@ static ssize_t show_dscr_default(struct device *dev, return sprintf(buf, "%lx\n", dscr_default); } +static void update_dscr(void *dummy) +{ + if (!current->thread.dscr_inherit) { + current->thread.dscr = dscr_default; + mtspr(SPRN_DSCR, dscr_default); + } +} + static ssize_t __used store_dscr_default(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) @@ -206,6 +214,8 @@ static ssize_t __used store_dscr_default(struct device *dev, return -EINVAL; dscr_default = val; + on_each_cpu(update_dscr, NULL, 1); + return count; } diff --git a/trunk/arch/powerpc/kernel/time.c b/trunk/arch/powerpc/kernel/time.c index be171ee73bf8..eaa9d0e6abca 100644 --- a/trunk/arch/powerpc/kernel/time.c +++ b/trunk/arch/powerpc/kernel/time.c @@ -291,13 +291,12 @@ static inline u64 calculate_stolen_time(u64 stop_tb) * Account time for a transition between system, hard irq * or soft irq state. */ -void account_system_vtime(struct task_struct *tsk) +static u64 vtime_delta(struct task_struct *tsk, + u64 *sys_scaled, u64 *stolen) { - u64 now, nowscaled, delta, deltascaled; - unsigned long flags; - u64 stolen, udelta, sys_scaled, user_scaled; + u64 now, nowscaled, deltascaled; + u64 udelta, delta, user_scaled; - local_irq_save(flags); now = mftb(); nowscaled = read_spurr(now); get_paca()->system_time += now - get_paca()->starttime; @@ -305,7 +304,7 @@ void account_system_vtime(struct task_struct *tsk) deltascaled = nowscaled - get_paca()->startspurr; get_paca()->startspurr = nowscaled; - stolen = calculate_stolen_time(now); + *stolen = calculate_stolen_time(now); delta = get_paca()->system_time; get_paca()->system_time = 0; @@ -322,35 +321,45 @@ void account_system_vtime(struct task_struct *tsk) * the user ticks get saved up in paca->user_time_scaled to be * used by account_process_tick. */ - sys_scaled = delta; + *sys_scaled = delta; user_scaled = udelta; if (deltascaled != delta + udelta) { if (udelta) { - sys_scaled = deltascaled * delta / (delta + udelta); - user_scaled = deltascaled - sys_scaled; + *sys_scaled = deltascaled * delta / (delta + udelta); + user_scaled = deltascaled - *sys_scaled; } else { - sys_scaled = deltascaled; + *sys_scaled = deltascaled; } } get_paca()->user_time_scaled += user_scaled; - if (in_interrupt() || idle_task(smp_processor_id()) != tsk) { - account_system_time(tsk, 0, delta, sys_scaled); - if (stolen) - account_steal_time(stolen); - } else { - account_idle_time(delta + stolen); - } - local_irq_restore(flags); + return delta; +} + +void vtime_account_system(struct task_struct *tsk) +{ + u64 delta, sys_scaled, stolen; + + delta = vtime_delta(tsk, &sys_scaled, &stolen); + account_system_time(tsk, 0, delta, sys_scaled); + if (stolen) + account_steal_time(stolen); +} + +void vtime_account_idle(struct task_struct *tsk) +{ + u64 delta, sys_scaled, stolen; + + delta = vtime_delta(tsk, &sys_scaled, &stolen); + account_idle_time(delta + stolen); } -EXPORT_SYMBOL_GPL(account_system_vtime); /* * Transfer the user and system times accumulated in the paca * by the exception entry and exit code to the generic process * user and system time records. * Must be called with interrupts disabled. - * Assumes that account_system_vtime() has been called recently + * Assumes that vtime_account() has been called recently * (i.e. since the last entry from usermode) so that * get_paca()->user_time_scaled is up to date. */ @@ -366,6 +375,12 @@ void account_process_tick(struct task_struct *tsk, int user_tick) account_user_time(tsk, utime, utimescaled); } +void vtime_task_switch(struct task_struct *prev) +{ + vtime_account(prev); + account_process_tick(prev, 0); +} + #else /* ! CONFIG_VIRT_CPU_ACCOUNTING */ #define calc_cputime_factors() #endif @@ -535,6 +550,15 @@ void timer_interrupt(struct pt_regs * regs) trace_timer_interrupt_exit(regs); } +/* + * Hypervisor decrementer interrupts shouldn't occur but are sometimes + * left pending on exit from a KVM guest. We don't need to do anything + * to clear them, as they are edge-triggered. + */ +void hdec_interrupt(struct pt_regs *regs) +{ +} + #ifdef CONFIG_SUSPEND static void generic_suspend_disable_irqs(void) { diff --git a/trunk/arch/powerpc/kernel/traps.c b/trunk/arch/powerpc/kernel/traps.c index 158972341a2d..ae0843fa7a61 100644 --- a/trunk/arch/powerpc/kernel/traps.c +++ b/trunk/arch/powerpc/kernel/traps.c @@ -972,8 +972,9 @@ static int emulate_instruction(struct pt_regs *regs) cpu_has_feature(CPU_FTR_DSCR)) { PPC_WARN_EMULATED(mtdscr, regs); rd = (instword >> 21) & 0x1f; - mtspr(SPRN_DSCR, regs->gpr[rd]); + current->thread.dscr = regs->gpr[rd]; current->thread.dscr_inherit = 1; + mtspr(SPRN_DSCR, current->thread.dscr); return 0; } #endif diff --git a/trunk/arch/powerpc/kvm/book3s_32_mmu_host.c b/trunk/arch/powerpc/kvm/book3s_32_mmu_host.c index f922c29bb234..837f13e7b6bf 100644 --- a/trunk/arch/powerpc/kvm/book3s_32_mmu_host.c +++ b/trunk/arch/powerpc/kvm/book3s_32_mmu_host.c @@ -211,6 +211,9 @@ int kvmppc_mmu_map_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *orig_pte) pteg1 |= PP_RWRX; } + if (orig_pte->may_execute) + kvmppc_mmu_flush_icache(hpaddr >> PAGE_SHIFT); + local_irq_disable(); if (pteg[rr]) { diff --git a/trunk/arch/powerpc/kvm/book3s_64_mmu_host.c b/trunk/arch/powerpc/kvm/book3s_64_mmu_host.c index 10fc8ec9d2a8..0688b6b39585 100644 --- a/trunk/arch/powerpc/kvm/book3s_64_mmu_host.c +++ b/trunk/arch/powerpc/kvm/book3s_64_mmu_host.c @@ -126,6 +126,8 @@ int kvmppc_mmu_map_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *orig_pte) if (!orig_pte->may_execute) rflags |= HPTE_R_N; + else + kvmppc_mmu_flush_icache(hpaddr >> PAGE_SHIFT); hash = hpt_hash(va, PTE_SIZE, MMU_SEGSIZE_256M); diff --git a/trunk/arch/powerpc/kvm/book3s_hv_rmhandlers.S b/trunk/arch/powerpc/kvm/book3s_hv_rmhandlers.S index 5a84c8d3d040..44b72feaff7d 100644 --- a/trunk/arch/powerpc/kvm/book3s_hv_rmhandlers.S +++ b/trunk/arch/powerpc/kvm/book3s_hv_rmhandlers.S @@ -1421,13 +1421,13 @@ _GLOBAL(kvmppc_h_cede) sync /* order setting ceded vs. testing prodded */ lbz r5,VCPU_PRODDED(r3) cmpwi r5,0 - bne 1f + bne kvm_cede_prodded li r0,0 /* set trap to 0 to say hcall is handled */ stw r0,VCPU_TRAP(r3) li r0,H_SUCCESS std r0,VCPU_GPR(R3)(r3) BEGIN_FTR_SECTION - b 2f /* just send it up to host on 970 */ + b kvm_cede_exit /* just send it up to host on 970 */ END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_206) /* @@ -1446,7 +1446,7 @@ END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_206) or r4,r4,r0 PPC_POPCNTW(R7,R4) cmpw r7,r8 - bge 2f + bge kvm_cede_exit stwcx. r4,0,r6 bne 31b li r0,1 @@ -1555,7 +1555,8 @@ kvm_end_cede: b hcall_real_fallback /* cede when already previously prodded case */ -1: li r0,0 +kvm_cede_prodded: + li r0,0 stb r0,VCPU_PRODDED(r3) sync /* order testing prodded vs. clearing ceded */ stb r0,VCPU_CEDED(r3) @@ -1563,7 +1564,8 @@ kvm_end_cede: blr /* we've ceded but we want to give control to the host */ -2: li r3,H_TOO_HARD +kvm_cede_exit: + li r3,H_TOO_HARD blr secondary_too_late: diff --git a/trunk/arch/powerpc/kvm/e500_tlb.c b/trunk/arch/powerpc/kvm/e500_tlb.c index c510fc961302..a2b66717813d 100644 --- a/trunk/arch/powerpc/kvm/e500_tlb.c +++ b/trunk/arch/powerpc/kvm/e500_tlb.c @@ -322,11 +322,11 @@ static inline void kvmppc_e500_ref_release(struct tlbe_ref *ref) static void clear_tlb1_bitmap(struct kvmppc_vcpu_e500 *vcpu_e500) { if (vcpu_e500->g2h_tlb1_map) - memset(vcpu_e500->g2h_tlb1_map, - sizeof(u64) * vcpu_e500->gtlb_params[1].entries, 0); + memset(vcpu_e500->g2h_tlb1_map, 0, + sizeof(u64) * vcpu_e500->gtlb_params[1].entries); if (vcpu_e500->h2g_tlb1_rmap) - memset(vcpu_e500->h2g_tlb1_rmap, - sizeof(unsigned int) * host_tlb_params[1].entries, 0); + memset(vcpu_e500->h2g_tlb1_rmap, 0, + sizeof(unsigned int) * host_tlb_params[1].entries); } static void clear_tlb_privs(struct kvmppc_vcpu_e500 *vcpu_e500) @@ -539,6 +539,9 @@ static inline void kvmppc_e500_shadow_map(struct kvmppc_vcpu_e500 *vcpu_e500, kvmppc_e500_setup_stlbe(&vcpu_e500->vcpu, gtlbe, tsize, ref, gvaddr, stlbe); + + /* Clear i-cache for new pages */ + kvmppc_mmu_flush_icache(pfn); } /* XXX only map the one-one case, for now use TLB0 */ diff --git a/trunk/arch/powerpc/lib/code-patching.c b/trunk/arch/powerpc/lib/code-patching.c index dd223b3eb333..17e5b2364312 100644 --- a/trunk/arch/powerpc/lib/code-patching.c +++ b/trunk/arch/powerpc/lib/code-patching.c @@ -20,7 +20,7 @@ int patch_instruction(unsigned int *addr, unsigned int instr) { int err; - err = __put_user(instr, addr); + __put_user_size(instr, addr, 4, err); if (err) return err; asm ("dcbst 0, %0; sync; icbi 0,%0; sync; isync" : : "r" (addr)); diff --git a/trunk/arch/powerpc/lib/copyuser_power7.S b/trunk/arch/powerpc/lib/copyuser_power7.S index f9ede7c6606e..0d24ff15f5f6 100644 --- a/trunk/arch/powerpc/lib/copyuser_power7.S +++ b/trunk/arch/powerpc/lib/copyuser_power7.S @@ -288,7 +288,7 @@ err1; stb r0,0(r3) std r0,16(r1) stdu r1,-STACKFRAMESIZE(r1) bl .enter_vmx_usercopy - cmpwi r3,0 + cmpwi cr1,r3,0 ld r0,STACKFRAMESIZE+16(r1) ld r3,STACKFRAMESIZE+48(r1) ld r4,STACKFRAMESIZE+56(r1) @@ -326,38 +326,7 @@ err1; stb r0,0(r3) dcbt r0,r8,0b01010 /* GO */ .machine pop - /* - * We prefetch both the source and destination using enhanced touch - * instructions. We use a stream ID of 0 for the load side and - * 1 for the store side. - */ - clrrdi r6,r4,7 - clrrdi r9,r3,7 - ori r9,r9,1 /* stream=1 */ - - srdi r7,r5,7 /* length in cachelines, capped at 0x3FF */ - cmpldi cr1,r7,0x3FF - ble cr1,1f - li r7,0x3FF -1: lis r0,0x0E00 /* depth=7 */ - sldi r7,r7,7 - or r7,r7,r0 - ori r10,r7,1 /* stream=1 */ - - lis r8,0x8000 /* GO=1 */ - clrldi r8,r8,32 - -.machine push -.machine "power4" - dcbt r0,r6,0b01000 - dcbt r0,r7,0b01010 - dcbtst r0,r9,0b01000 - dcbtst r0,r10,0b01010 - eieio - dcbt r0,r8,0b01010 /* GO */ -.machine pop - - beq .Lunwind_stack_nonvmx_copy + beq cr1,.Lunwind_stack_nonvmx_copy /* * If source and destination are not relatively aligned we use a diff --git a/trunk/arch/powerpc/lib/memcpy_power7.S b/trunk/arch/powerpc/lib/memcpy_power7.S index 0efdc51bc716..7ba6c96de778 100644 --- a/trunk/arch/powerpc/lib/memcpy_power7.S +++ b/trunk/arch/powerpc/lib/memcpy_power7.S @@ -222,7 +222,7 @@ _GLOBAL(memcpy_power7) std r0,16(r1) stdu r1,-STACKFRAMESIZE(r1) bl .enter_vmx_copy - cmpwi r3,0 + cmpwi cr1,r3,0 ld r0,STACKFRAMESIZE+16(r1) ld r3,STACKFRAMESIZE+48(r1) ld r4,STACKFRAMESIZE+56(r1) @@ -260,7 +260,7 @@ _GLOBAL(memcpy_power7) dcbt r0,r8,0b01010 /* GO */ .machine pop - beq .Lunwind_stack_nonvmx_copy + beq cr1,.Lunwind_stack_nonvmx_copy /* * If source and destination are not relatively aligned we use a diff --git a/trunk/arch/powerpc/mm/mem.c b/trunk/arch/powerpc/mm/mem.c index baaafde7d135..fbdad0e3929a 100644 --- a/trunk/arch/powerpc/mm/mem.c +++ b/trunk/arch/powerpc/mm/mem.c @@ -469,6 +469,7 @@ void flush_dcache_icache_page(struct page *page) __flush_dcache_icache_phys(page_to_pfn(page) << PAGE_SHIFT); #endif } +EXPORT_SYMBOL(flush_dcache_icache_page); void clear_user_page(void *page, unsigned long vaddr, struct page *pg) { diff --git a/trunk/arch/powerpc/mm/numa.c b/trunk/arch/powerpc/mm/numa.c index 39b159751c35..59213cfaeca9 100644 --- a/trunk/arch/powerpc/mm/numa.c +++ b/trunk/arch/powerpc/mm/numa.c @@ -1436,11 +1436,11 @@ static long vphn_get_associativity(unsigned long cpu, /* * Update the node maps and sysfs entries for each cpu whose home node - * has changed. + * has changed. Returns 1 when the topology has changed, and 0 otherwise. */ int arch_update_cpu_topology(void) { - int cpu, nid, old_nid; + int cpu, nid, old_nid, changed = 0; unsigned int associativity[VPHN_ASSOC_BUFSIZE] = {0}; struct device *dev; @@ -1466,9 +1466,10 @@ int arch_update_cpu_topology(void) dev = get_cpu_device(cpu); if (dev) kobject_uevent(&dev->kobj, KOBJ_CHANGE); + changed = 1; } - return 1; + return changed; } static void topology_work_fn(struct work_struct *work) diff --git a/trunk/arch/powerpc/perf/core-book3s.c b/trunk/arch/powerpc/perf/core-book3s.c index 77b49ddda9d3..7cd2dbd6e4c4 100644 --- a/trunk/arch/powerpc/perf/core-book3s.c +++ b/trunk/arch/powerpc/perf/core-book3s.c @@ -1431,7 +1431,7 @@ static void perf_event_interrupt(struct pt_regs *regs) if (!event->hw.idx || is_limited_pmc(event->hw.idx)) continue; val = read_pmc(event->hw.idx); - if ((int)val < 0) { + if (pmc_overflow(val)) { /* event has overflowed */ found = 1; record_and_restart(event, val, regs); diff --git a/trunk/arch/powerpc/platforms/Kconfig.cputype b/trunk/arch/powerpc/platforms/Kconfig.cputype index 30fd01de6bed..72afd2888cad 100644 --- a/trunk/arch/powerpc/platforms/Kconfig.cputype +++ b/trunk/arch/powerpc/platforms/Kconfig.cputype @@ -1,6 +1,7 @@ config PPC64 bool "64-bit kernel" default n + select HAVE_VIRT_CPU_ACCOUNTING help This option selects whether a 32-bit or a 64-bit kernel will be built. @@ -337,21 +338,6 @@ config PPC_MM_SLICES default y if (!PPC_FSL_BOOK3E && PPC64 && HUGETLB_PAGE) || (PPC_STD_MMU_64 && PPC_64K_PAGES) default n -config VIRT_CPU_ACCOUNTING - bool "Deterministic task and CPU time accounting" - depends on PPC64 - default y - help - Select this option to enable more accurate task and CPU time - accounting. This is done by reading a CPU counter on each - kernel entry and exit and on transitions within the kernel - between system, softirq and hardirq state, so there is a - small performance impact. This also enables accounting of - stolen time on logically-partitioned systems running on - IBM POWER5-based machines. - - If in doubt, say Y here. - config PPC_HAVE_PMU_SUPPORT bool diff --git a/trunk/arch/powerpc/platforms/powernv/smp.c b/trunk/arch/powerpc/platforms/powernv/smp.c index 3ef46254c35b..7698b6e13c57 100644 --- a/trunk/arch/powerpc/platforms/powernv/smp.c +++ b/trunk/arch/powerpc/platforms/powernv/smp.c @@ -106,14 +106,6 @@ static void pnv_smp_cpu_kill_self(void) { unsigned int cpu; - /* If powersave_nap is enabled, use NAP mode, else just - * spin aimlessly - */ - if (!powersave_nap) { - generic_mach_cpu_die(); - return; - } - /* Standard hot unplug procedure */ local_irq_disable(); idle_task_exit(); @@ -128,7 +120,7 @@ static void pnv_smp_cpu_kill_self(void) */ mtspr(SPRN_LPCR, mfspr(SPRN_LPCR) & ~(u64)LPCR_PECE1); while (!generic_check_cpu_restart(cpu)) { - power7_idle(); + power7_nap(); if (!generic_check_cpu_restart(cpu)) { DBG("CPU%d Unexpected exit while offline !\n", cpu); /* We may be getting an IPI, so we re-enable diff --git a/trunk/arch/powerpc/sysdev/fsl_pci.c b/trunk/arch/powerpc/sysdev/fsl_pci.c index a7b2a600d0a4..c37f46136321 100644 --- a/trunk/arch/powerpc/sysdev/fsl_pci.c +++ b/trunk/arch/powerpc/sysdev/fsl_pci.c @@ -465,7 +465,7 @@ int __init fsl_add_bridge(struct device_node *dev, int is_primary) iounmap(hose->cfg_data); iounmap(hose->cfg_addr); pcibios_free_controller(hose); - return 0; + return -ENODEV; } setup_pci_cmd(hose); @@ -827,6 +827,7 @@ struct device_node *fsl_pci_primary; void __devinit fsl_pci_init(void) { + int ret; struct device_node *node; struct pci_controller *hose; dma_addr_t max = 0xffffffff; @@ -855,10 +856,12 @@ void __devinit fsl_pci_init(void) if (!fsl_pci_primary) fsl_pci_primary = node; - fsl_add_bridge(node, fsl_pci_primary == node); - hose = pci_find_hose_for_OF_device(node); - max = min(max, hose->dma_window_base_cur + - hose->dma_window_size); + ret = fsl_add_bridge(node, fsl_pci_primary == node); + if (ret == 0) { + hose = pci_find_hose_for_OF_device(node); + max = min(max, hose->dma_window_base_cur + + hose->dma_window_size); + } } } diff --git a/trunk/arch/powerpc/sysdev/mpic_msgr.c b/trunk/arch/powerpc/sysdev/mpic_msgr.c index 483d8fa72e8b..e961f8c4a8f0 100644 --- a/trunk/arch/powerpc/sysdev/mpic_msgr.c +++ b/trunk/arch/powerpc/sysdev/mpic_msgr.c @@ -14,6 +14,9 @@ #include #include #include +#include +#include +#include #include #include #include diff --git a/trunk/arch/powerpc/sysdev/xics/icp-hv.c b/trunk/arch/powerpc/sysdev/xics/icp-hv.c index 14469cf9df68..df0fc5821469 100644 --- a/trunk/arch/powerpc/sysdev/xics/icp-hv.c +++ b/trunk/arch/powerpc/sysdev/xics/icp-hv.c @@ -65,7 +65,11 @@ static inline void icp_hv_set_xirr(unsigned int value) static inline void icp_hv_set_qirr(int n_cpu , u8 value) { int hw_cpu = get_hard_smp_processor_id(n_cpu); - long rc = plpar_hcall_norets(H_IPI, hw_cpu, value); + long rc; + + /* Make sure all previous accesses are ordered before IPI sending */ + mb(); + rc = plpar_hcall_norets(H_IPI, hw_cpu, value); if (rc != H_SUCCESS) { pr_err("%s: bad return code qirr cpu=%d hw_cpu=%d mfrr=0x%x " "returned %ld\n", __func__, n_cpu, hw_cpu, value, rc); diff --git a/trunk/arch/powerpc/xmon/xmon.c b/trunk/arch/powerpc/xmon/xmon.c index eab3492a45c5..9b49c65ee7a4 100644 --- a/trunk/arch/powerpc/xmon/xmon.c +++ b/trunk/arch/powerpc/xmon/xmon.c @@ -17,6 +17,7 @@ #include #include #include +#include #include #include #include @@ -894,13 +895,13 @@ cmds(struct pt_regs *excp) #endif default: printf("Unrecognized command: "); - do { + do { if (' ' < cmd && cmd <= '~') putchar(cmd); else printf("\\x%x", cmd); cmd = inchar(); - } while (cmd != '\n'); + } while (cmd != '\n'); printf(" (type ? for help)\n"); break; } @@ -1097,7 +1098,7 @@ static long check_bp_loc(unsigned long addr) return 1; } -static char *breakpoint_help_string = +static char *breakpoint_help_string = "Breakpoint command usage:\n" "b show breakpoints\n" "b [cnt] set breakpoint at given instr addr\n" @@ -1193,7 +1194,7 @@ bpt_cmds(void) default: termch = cmd; - cmd = skipbl(); + cmd = skipbl(); if (cmd == '?') { printf(breakpoint_help_string); break; @@ -1359,7 +1360,7 @@ static void xmon_show_stack(unsigned long sp, unsigned long lr, sp + REGS_OFFSET); break; } - printf("--- Exception: %lx %s at ", regs.trap, + printf("--- Exception: %lx %s at ", regs.trap, getvecname(TRAP(®s))); pc = regs.nip; lr = regs.link; @@ -1623,14 +1624,14 @@ static void super_regs(void) cmd = skipbl(); if (cmd == '\n') { - unsigned long sp, toc; + unsigned long sp, toc; asm("mr %0,1" : "=r" (sp) :); asm("mr %0,2" : "=r" (toc) :); printf("msr = "REG" sprg0= "REG"\n", mfmsr(), mfspr(SPRN_SPRG0)); printf("pvr = "REG" sprg1= "REG"\n", - mfspr(SPRN_PVR), mfspr(SPRN_SPRG1)); + mfspr(SPRN_PVR), mfspr(SPRN_SPRG1)); printf("dec = "REG" sprg2= "REG"\n", mfspr(SPRN_DEC), mfspr(SPRN_SPRG2)); printf("sp = "REG" sprg3= "REG"\n", sp, mfspr(SPRN_SPRG3)); @@ -1783,7 +1784,7 @@ byterev(unsigned char *val, int size) static int brev; static int mnoread; -static char *memex_help_string = +static char *memex_help_string = "Memory examine command usage:\n" "m [addr] [flags] examine/change memory\n" " addr is optional. will start where left off.\n" @@ -1798,7 +1799,7 @@ static char *memex_help_string = "NOTE: flags are saved as defaults\n" ""; -static char *memex_subcmd_help_string = +static char *memex_subcmd_help_string = "Memory examine subcommands:\n" " hexval write this val to current location\n" " 'string' write chars from string to this location\n" @@ -2064,7 +2065,7 @@ prdump(unsigned long adrs, long ndump) nr = mread(adrs, temp, r); adrs += nr; for (m = 0; m < r; ++m) { - if ((m & (sizeof(long) - 1)) == 0 && m > 0) + if ((m & (sizeof(long) - 1)) == 0 && m > 0) putchar(' '); if (m < nr) printf("%.2x", temp[m]); @@ -2072,7 +2073,7 @@ prdump(unsigned long adrs, long ndump) printf("%s", fault_chars[fault_type]); } for (; m < 16; ++m) { - if ((m & (sizeof(long) - 1)) == 0) + if ((m & (sizeof(long) - 1)) == 0) putchar(' '); printf(" "); } @@ -2148,45 +2149,28 @@ print_address(unsigned long addr) void dump_log_buf(void) { - const unsigned long size = 128; - unsigned long end, addr; - unsigned char buf[size + 1]; - - addr = 0; - buf[size] = '\0'; - - if (setjmp(bus_error_jmp) != 0) { - printf("Unable to lookup symbol __log_buf!\n"); - return; - } - - catch_memory_errors = 1; - sync(); - addr = kallsyms_lookup_name("__log_buf"); - - if (! addr) - printf("Symbol __log_buf not found!\n"); - else { - end = addr + (1 << CONFIG_LOG_BUF_SHIFT); - while (addr < end) { - if (! mread(addr, buf, size)) { - printf("Can't read memory at address 0x%lx\n", addr); - break; - } - - printf("%s", buf); - - if (strlen(buf) < size) - break; - - addr += size; - } - } - - sync(); - /* wait a little while to see if we get a machine check */ - __delay(200); - catch_memory_errors = 0; + struct kmsg_dumper dumper = { .active = 1 }; + unsigned char buf[128]; + size_t len; + + if (setjmp(bus_error_jmp) != 0) { + printf("Error dumping printk buffer!\n"); + return; + } + + catch_memory_errors = 1; + sync(); + + kmsg_dump_rewind_nolock(&dumper); + while (kmsg_dump_get_line_nolock(&dumper, false, buf, sizeof(buf), &len)) { + buf[len] = '\0'; + printf("%s", buf); + } + + sync(); + /* wait a little while to see if we get a machine check */ + __delay(200); + catch_memory_errors = 0; } /* diff --git a/trunk/arch/s390/Kconfig b/trunk/arch/s390/Kconfig index 76de6b68487c..f5ab543396da 100644 --- a/trunk/arch/s390/Kconfig +++ b/trunk/arch/s390/Kconfig @@ -49,9 +49,6 @@ config GENERIC_LOCKBREAK config PGSTE def_bool y if KVM -config VIRT_CPU_ACCOUNTING - def_bool y - config ARCH_SUPPORTS_DEBUG_PAGEALLOC def_bool y @@ -89,6 +86,8 @@ config S390 select HAVE_MEMBLOCK select HAVE_MEMBLOCK_NODE_MAP select HAVE_CMPXCHG_LOCAL + select HAVE_VIRT_CPU_ACCOUNTING + select VIRT_CPU_ACCOUNTING select ARCH_DISCARD_MEMBLOCK select BUILDTIME_EXTABLE_SORT select ARCH_INLINE_SPIN_TRYLOCK @@ -124,6 +123,7 @@ config S390 select GENERIC_TIME_VSYSCALL select GENERIC_CLOCKEVENTS select KTIME_SCALAR if 32BIT + select HAVE_ARCH_SECCOMP_FILTER config SCHED_OMIT_FRAME_POINTER def_bool y diff --git a/trunk/arch/s390/include/asm/cputime.h b/trunk/arch/s390/include/asm/cputime.h index 8709bdef233c..023d5ae24482 100644 --- a/trunk/arch/s390/include/asm/cputime.h +++ b/trunk/arch/s390/include/asm/cputime.h @@ -12,6 +12,9 @@ #include #include + +#define __ARCH_HAS_VTIME_ACCOUNT + /* We want to use full resolution of the CPU timer: 2**-12 micro-seconds. */ typedef unsigned long long __nocast cputime_t; diff --git a/trunk/arch/s390/include/asm/elf.h b/trunk/arch/s390/include/asm/elf.h index 32e8449640fa..9b94a160fe7f 100644 --- a/trunk/arch/s390/include/asm/elf.h +++ b/trunk/arch/s390/include/asm/elf.h @@ -180,7 +180,8 @@ extern char elf_platform[]; #define ELF_PLATFORM (elf_platform) #ifndef CONFIG_64BIT -#define SET_PERSONALITY(ex) set_personality(PER_LINUX) +#define SET_PERSONALITY(ex) \ + set_personality(PER_LINUX | (current->personality & (~PER_MASK))) #else /* CONFIG_64BIT */ #define SET_PERSONALITY(ex) \ do { \ diff --git a/trunk/arch/s390/include/asm/hugetlb.h b/trunk/arch/s390/include/asm/hugetlb.h index 799ed0f1643d..2d6e6e380564 100644 --- a/trunk/arch/s390/include/asm/hugetlb.h +++ b/trunk/arch/s390/include/asm/hugetlb.h @@ -66,16 +66,6 @@ static inline pte_t huge_ptep_get(pte_t *ptep) return pte; } -static inline pte_t huge_ptep_get_and_clear(struct mm_struct *mm, - unsigned long addr, pte_t *ptep) -{ - pte_t pte = huge_ptep_get(ptep); - - mm->context.flush_mm = 1; - pmd_clear((pmd_t *) ptep); - return pte; -} - static inline void __pmd_csp(pmd_t *pmdp) { register unsigned long reg2 asm("2") = pmd_val(*pmdp); @@ -117,6 +107,15 @@ static inline void huge_ptep_invalidate(struct mm_struct *mm, __pmd_csp(pmdp); } +static inline pte_t huge_ptep_get_and_clear(struct mm_struct *mm, + unsigned long addr, pte_t *ptep) +{ + pte_t pte = huge_ptep_get(ptep); + + huge_ptep_invalidate(mm, addr, ptep); + return pte; +} + #define huge_ptep_set_access_flags(__vma, __addr, __ptep, __entry, __dirty) \ ({ \ int __changed = !pte_same(huge_ptep_get(__ptep), __entry); \ @@ -131,10 +130,7 @@ static inline void huge_ptep_invalidate(struct mm_struct *mm, ({ \ pte_t __pte = huge_ptep_get(__ptep); \ if (pte_write(__pte)) { \ - (__mm)->context.flush_mm = 1; \ - if (atomic_read(&(__mm)->context.attach_count) > 1 || \ - (__mm) != current->active_mm) \ - huge_ptep_invalidate(__mm, __addr, __ptep); \ + huge_ptep_invalidate(__mm, __addr, __ptep); \ set_huge_pte_at(__mm, __addr, __ptep, \ huge_pte_wrprotect(__pte)); \ } \ diff --git a/trunk/arch/s390/include/asm/posix_types.h b/trunk/arch/s390/include/asm/posix_types.h index 7bcc14e395f0..bf2a2ad2f800 100644 --- a/trunk/arch/s390/include/asm/posix_types.h +++ b/trunk/arch/s390/include/asm/posix_types.h @@ -13,6 +13,7 @@ */ typedef unsigned long __kernel_size_t; +typedef long __kernel_ssize_t; #define __kernel_size_t __kernel_size_t typedef unsigned short __kernel_old_dev_t; @@ -25,7 +26,6 @@ typedef unsigned short __kernel_mode_t; typedef unsigned short __kernel_ipc_pid_t; typedef unsigned short __kernel_uid_t; typedef unsigned short __kernel_gid_t; -typedef int __kernel_ssize_t; typedef int __kernel_ptrdiff_t; #else /* __s390x__ */ @@ -35,7 +35,6 @@ typedef unsigned int __kernel_mode_t; typedef int __kernel_ipc_pid_t; typedef unsigned int __kernel_uid_t; typedef unsigned int __kernel_gid_t; -typedef long __kernel_ssize_t; typedef long __kernel_ptrdiff_t; typedef unsigned long __kernel_sigset_t; /* at least 32 bits */ diff --git a/trunk/arch/s390/include/asm/smp.h b/trunk/arch/s390/include/asm/smp.h index a0a8340daafa..ce26ac3cb162 100644 --- a/trunk/arch/s390/include/asm/smp.h +++ b/trunk/arch/s390/include/asm/smp.h @@ -44,6 +44,7 @@ static inline void smp_call_online_cpu(void (*func)(void *), void *data) } static inline int smp_find_processor_id(int address) { return 0; } +static inline int smp_store_status(int cpu) { return 0; } static inline int smp_vcpu_scheduled(int cpu) { return 1; } static inline void smp_yield_cpu(int cpu) { } static inline void smp_yield(void) { } diff --git a/trunk/arch/s390/include/asm/sparsemem.h b/trunk/arch/s390/include/asm/sparsemem.h index 0fb34027d3f6..a60d085ddb4d 100644 --- a/trunk/arch/s390/include/asm/sparsemem.h +++ b/trunk/arch/s390/include/asm/sparsemem.h @@ -4,13 +4,11 @@ #ifdef CONFIG_64BIT #define SECTION_SIZE_BITS 28 -#define MAX_PHYSADDR_BITS 46 #define MAX_PHYSMEM_BITS 46 #else #define SECTION_SIZE_BITS 25 -#define MAX_PHYSADDR_BITS 31 #define MAX_PHYSMEM_BITS 31 #endif /* CONFIG_64BIT */ diff --git a/trunk/arch/s390/include/asm/switch_to.h b/trunk/arch/s390/include/asm/switch_to.h index f223068b7822..314cc9426fc4 100644 --- a/trunk/arch/s390/include/asm/switch_to.h +++ b/trunk/arch/s390/include/asm/switch_to.h @@ -89,12 +89,8 @@ static inline void restore_access_regs(unsigned int *acrs) prev = __switch_to(prev,next); \ } while (0) -extern void account_vtime(struct task_struct *, struct task_struct *); -extern void account_tick_vtime(struct task_struct *); - #define finish_arch_switch(prev) do { \ set_fs(current->thread.mm_segment); \ - account_vtime(prev, current); \ } while (0) #endif /* __ASM_SWITCH_TO_H */ diff --git a/trunk/arch/s390/include/asm/syscall.h b/trunk/arch/s390/include/asm/syscall.h index fb214dd9b7e0..fe7b99759e12 100644 --- a/trunk/arch/s390/include/asm/syscall.h +++ b/trunk/arch/s390/include/asm/syscall.h @@ -12,6 +12,7 @@ #ifndef _ASM_SYSCALL_H #define _ASM_SYSCALL_H 1 +#include #include #include #include @@ -87,4 +88,13 @@ static inline void syscall_set_arguments(struct task_struct *task, regs->orig_gpr2 = args[0]; } +static inline int syscall_get_arch(struct task_struct *task, + struct pt_regs *regs) +{ +#ifdef CONFIG_COMPAT + if (test_tsk_thread_flag(task, TIF_31BIT)) + return AUDIT_ARCH_S390; +#endif + return sizeof(long) == 8 ? AUDIT_ARCH_S390X : AUDIT_ARCH_S390; +} #endif /* _ASM_SYSCALL_H */ diff --git a/trunk/arch/s390/include/asm/tlbflush.h b/trunk/arch/s390/include/asm/tlbflush.h index 9fde315f3a7c..1d8fe2b17ef6 100644 --- a/trunk/arch/s390/include/asm/tlbflush.h +++ b/trunk/arch/s390/include/asm/tlbflush.h @@ -90,12 +90,10 @@ static inline void __tlb_flush_mm(struct mm_struct * mm) static inline void __tlb_flush_mm_cond(struct mm_struct * mm) { - spin_lock(&mm->page_table_lock); if (mm->context.flush_mm) { __tlb_flush_mm(mm); mm->context.flush_mm = 0; } - spin_unlock(&mm->page_table_lock); } /* diff --git a/trunk/arch/s390/kernel/compat_linux.c b/trunk/arch/s390/kernel/compat_linux.c index d1225089a4bb..f606d935f495 100644 --- a/trunk/arch/s390/kernel/compat_linux.c +++ b/trunk/arch/s390/kernel/compat_linux.c @@ -620,7 +620,6 @@ asmlinkage unsigned long old32_mmap(struct mmap_arg_struct_emu31 __user *arg) return -EFAULT; if (a.offset & ~PAGE_MASK) return -EINVAL; - a.addr = (unsigned long) compat_ptr(a.addr); return sys_mmap_pgoff(a.addr, a.len, a.prot, a.flags, a.fd, a.offset >> PAGE_SHIFT); } @@ -631,7 +630,6 @@ asmlinkage long sys32_mmap2(struct mmap_arg_struct_emu31 __user *arg) if (copy_from_user(&a, arg, sizeof(a))) return -EFAULT; - a.addr = (unsigned long) compat_ptr(a.addr); return sys_mmap_pgoff(a.addr, a.len, a.prot, a.flags, a.fd, a.offset); } diff --git a/trunk/arch/s390/kernel/compat_wrapper.S b/trunk/arch/s390/kernel/compat_wrapper.S index e835d6d5b7fd..2d82cfcbce5b 100644 --- a/trunk/arch/s390/kernel/compat_wrapper.S +++ b/trunk/arch/s390/kernel/compat_wrapper.S @@ -1635,7 +1635,7 @@ ENTRY(compat_sys_process_vm_readv_wrapper) llgfr %r6,%r6 # unsigned long llgf %r0,164(%r15) # unsigned long stg %r0,160(%r15) - jg sys_process_vm_readv + jg compat_sys_process_vm_readv ENTRY(compat_sys_process_vm_writev_wrapper) lgfr %r2,%r2 # compat_pid_t @@ -1645,4 +1645,4 @@ ENTRY(compat_sys_process_vm_writev_wrapper) llgfr %r6,%r6 # unsigned long llgf %r0,164(%r15) # unsigned long stg %r0,160(%r15) - jg sys_process_vm_writev + jg compat_sys_process_vm_writev diff --git a/trunk/arch/s390/kernel/ptrace.c b/trunk/arch/s390/kernel/ptrace.c index f4eb37680b91..e4be113fbac6 100644 --- a/trunk/arch/s390/kernel/ptrace.c +++ b/trunk/arch/s390/kernel/ptrace.c @@ -719,7 +719,11 @@ asmlinkage long do_syscall_trace_enter(struct pt_regs *regs) long ret = 0; /* Do the secure computing check first. */ - secure_computing_strict(regs->gprs[2]); + if (secure_computing(regs->gprs[2])) { + /* seccomp failures shouldn't expose any additional code. */ + ret = -1; + goto out; + } /* * The sysc_tracesys code in entry.S stored the system @@ -745,6 +749,7 @@ asmlinkage long do_syscall_trace_enter(struct pt_regs *regs) regs->gprs[2], regs->orig_gpr2, regs->gprs[3], regs->gprs[4], regs->gprs[5]); +out: return ret ?: regs->gprs[2]; } diff --git a/trunk/arch/s390/kernel/setup.c b/trunk/arch/s390/kernel/setup.c index f86c81e13c37..40b57693de38 100644 --- a/trunk/arch/s390/kernel/setup.c +++ b/trunk/arch/s390/kernel/setup.c @@ -974,11 +974,13 @@ static void __init setup_hwcaps(void) if (MACHINE_HAS_HPAGE) elf_hwcap |= HWCAP_S390_HPAGE; +#if defined(CONFIG_64BIT) /* * 64-bit register support for 31-bit processes * HWCAP_S390_HIGH_GPRS is bit 9. */ elf_hwcap |= HWCAP_S390_HIGH_GPRS; +#endif get_cpu_id(&cpu_id); switch (cpu_id.machine) { diff --git a/trunk/arch/s390/kernel/sys_s390.c b/trunk/arch/s390/kernel/sys_s390.c index b4a29eee41b8..d0964d22adb5 100644 --- a/trunk/arch/s390/kernel/sys_s390.c +++ b/trunk/arch/s390/kernel/sys_s390.c @@ -81,11 +81,12 @@ SYSCALL_DEFINE1(s390_personality, unsigned int, personality) { unsigned int ret; - if (current->personality == PER_LINUX32 && personality == PER_LINUX) - personality = PER_LINUX32; + if (personality(current->personality) == PER_LINUX32 && + personality(personality) == PER_LINUX) + personality |= PER_LINUX32; ret = sys_personality(personality); - if (ret == PER_LINUX32) - ret = PER_LINUX; + if (personality(ret) == PER_LINUX32) + ret &= ~PER_LINUX32; return ret; } diff --git a/trunk/arch/s390/kernel/vtime.c b/trunk/arch/s390/kernel/vtime.c index 4fc97b40a6e1..cb5093c26d16 100644 --- a/trunk/arch/s390/kernel/vtime.c +++ b/trunk/arch/s390/kernel/vtime.c @@ -99,7 +99,7 @@ static int do_account_vtime(struct task_struct *tsk, int hardirq_offset) return virt_timer_forward(user + system); } -void account_vtime(struct task_struct *prev, struct task_struct *next) +void vtime_task_switch(struct task_struct *prev) { struct thread_info *ti; @@ -107,7 +107,7 @@ void account_vtime(struct task_struct *prev, struct task_struct *next) ti = task_thread_info(prev); ti->user_timer = S390_lowcore.user_timer; ti->system_timer = S390_lowcore.system_timer; - ti = task_thread_info(next); + ti = task_thread_info(current); S390_lowcore.user_timer = ti->user_timer; S390_lowcore.system_timer = ti->system_timer; } @@ -122,7 +122,7 @@ void account_process_tick(struct task_struct *tsk, int user_tick) * Update process times based on virtual cpu times stored by entry.S * to the lowcore fields user_timer, system_timer & steal_clock. */ -void account_system_vtime(struct task_struct *tsk) +void vtime_account(struct task_struct *tsk) { struct thread_info *ti = task_thread_info(tsk); u64 timer, system; @@ -138,7 +138,7 @@ void account_system_vtime(struct task_struct *tsk) virt_timer_forward(system); } -EXPORT_SYMBOL_GPL(account_system_vtime); +EXPORT_SYMBOL_GPL(vtime_account); void __kprobes vtime_stop_cpu(void) { diff --git a/trunk/arch/s390/lib/uaccess_pt.c b/trunk/arch/s390/lib/uaccess_pt.c index 60ee2b883797..2d37bb861faf 100644 --- a/trunk/arch/s390/lib/uaccess_pt.c +++ b/trunk/arch/s390/lib/uaccess_pt.c @@ -2,69 +2,82 @@ * User access functions based on page table walks for enhanced * system layout without hardware support. * - * Copyright IBM Corp. 2006 + * Copyright IBM Corp. 2006, 2012 * Author(s): Gerald Schaefer (gerald.schaefer@de.ibm.com) */ #include #include #include +#include #include #include #include "uaccess.h" -static inline pte_t *follow_table(struct mm_struct *mm, unsigned long addr) + +/* + * Returns kernel address for user virtual address. If the returned address is + * >= -4095 (IS_ERR_VALUE(x) returns true), a fault has occured and the address + * contains the (negative) exception code. + */ +static __always_inline unsigned long follow_table(struct mm_struct *mm, + unsigned long addr, int write) { pgd_t *pgd; pud_t *pud; pmd_t *pmd; + pte_t *ptep; pgd = pgd_offset(mm, addr); if (pgd_none(*pgd) || unlikely(pgd_bad(*pgd))) - return (pte_t *) 0x3a; + return -0x3aUL; pud = pud_offset(pgd, addr); if (pud_none(*pud) || unlikely(pud_bad(*pud))) - return (pte_t *) 0x3b; + return -0x3bUL; pmd = pmd_offset(pud, addr); - if (pmd_none(*pmd) || unlikely(pmd_bad(*pmd))) - return (pte_t *) 0x10; + if (pmd_none(*pmd)) + return -0x10UL; + if (pmd_huge(*pmd)) { + if (write && (pmd_val(*pmd) & _SEGMENT_ENTRY_RO)) + return -0x04UL; + return (pmd_val(*pmd) & HPAGE_MASK) + (addr & ~HPAGE_MASK); + } + if (unlikely(pmd_bad(*pmd))) + return -0x10UL; + + ptep = pte_offset_map(pmd, addr); + if (!pte_present(*ptep)) + return -0x11UL; + if (write && !pte_write(*ptep)) + return -0x04UL; - return pte_offset_map(pmd, addr); + return (pte_val(*ptep) & PAGE_MASK) + (addr & ~PAGE_MASK); } static __always_inline size_t __user_copy_pt(unsigned long uaddr, void *kptr, size_t n, int write_user) { struct mm_struct *mm = current->mm; - unsigned long offset, pfn, done, size; - pte_t *pte; + unsigned long offset, done, size, kaddr; void *from, *to; done = 0; retry: spin_lock(&mm->page_table_lock); do { - pte = follow_table(mm, uaddr); - if ((unsigned long) pte < 0x1000) + kaddr = follow_table(mm, uaddr, write_user); + if (IS_ERR_VALUE(kaddr)) goto fault; - if (!pte_present(*pte)) { - pte = (pte_t *) 0x11; - goto fault; - } else if (write_user && !pte_write(*pte)) { - pte = (pte_t *) 0x04; - goto fault; - } - pfn = pte_pfn(*pte); - offset = uaddr & (PAGE_SIZE - 1); + offset = uaddr & ~PAGE_MASK; size = min(n - done, PAGE_SIZE - offset); if (write_user) { - to = (void *)((pfn << PAGE_SHIFT) + offset); + to = (void *) kaddr; from = kptr + done; } else { - from = (void *)((pfn << PAGE_SHIFT) + offset); + from = (void *) kaddr; to = kptr + done; } memcpy(to, from, size); @@ -75,7 +88,7 @@ static __always_inline size_t __user_copy_pt(unsigned long uaddr, void *kptr, return n - done; fault: spin_unlock(&mm->page_table_lock); - if (__handle_fault(uaddr, (unsigned long) pte, write_user)) + if (__handle_fault(uaddr, -kaddr, write_user)) return n - done; goto retry; } @@ -84,27 +97,22 @@ static __always_inline size_t __user_copy_pt(unsigned long uaddr, void *kptr, * Do DAT for user address by page table walk, return kernel address. * This function needs to be called with current->mm->page_table_lock held. */ -static __always_inline unsigned long __dat_user_addr(unsigned long uaddr) +static __always_inline unsigned long __dat_user_addr(unsigned long uaddr, + int write) { struct mm_struct *mm = current->mm; - unsigned long pfn; - pte_t *pte; + unsigned long kaddr; int rc; retry: - pte = follow_table(mm, uaddr); - if ((unsigned long) pte < 0x1000) - goto fault; - if (!pte_present(*pte)) { - pte = (pte_t *) 0x11; + kaddr = follow_table(mm, uaddr, write); + if (IS_ERR_VALUE(kaddr)) goto fault; - } - pfn = pte_pfn(*pte); - return (pfn << PAGE_SHIFT) + (uaddr & (PAGE_SIZE - 1)); + return kaddr; fault: spin_unlock(&mm->page_table_lock); - rc = __handle_fault(uaddr, (unsigned long) pte, 0); + rc = __handle_fault(uaddr, -kaddr, write); spin_lock(&mm->page_table_lock); if (!rc) goto retry; @@ -159,11 +167,9 @@ static size_t clear_user_pt(size_t n, void __user *to) static size_t strnlen_user_pt(size_t count, const char __user *src) { - char *addr; unsigned long uaddr = (unsigned long) src; struct mm_struct *mm = current->mm; - unsigned long offset, pfn, done, len; - pte_t *pte; + unsigned long offset, done, len, kaddr; size_t len_str; if (segment_eq(get_fs(), KERNEL_DS)) @@ -172,19 +178,13 @@ static size_t strnlen_user_pt(size_t count, const char __user *src) retry: spin_lock(&mm->page_table_lock); do { - pte = follow_table(mm, uaddr); - if ((unsigned long) pte < 0x1000) - goto fault; - if (!pte_present(*pte)) { - pte = (pte_t *) 0x11; + kaddr = follow_table(mm, uaddr, 0); + if (IS_ERR_VALUE(kaddr)) goto fault; - } - pfn = pte_pfn(*pte); - offset = uaddr & (PAGE_SIZE-1); - addr = (char *)(pfn << PAGE_SHIFT) + offset; + offset = uaddr & ~PAGE_MASK; len = min(count - done, PAGE_SIZE - offset); - len_str = strnlen(addr, len); + len_str = strnlen((char *) kaddr, len); done += len_str; uaddr += len_str; } while ((len_str == len) && (done < count)); @@ -192,7 +192,7 @@ static size_t strnlen_user_pt(size_t count, const char __user *src) return done + 1; fault: spin_unlock(&mm->page_table_lock); - if (__handle_fault(uaddr, (unsigned long) pte, 0)) + if (__handle_fault(uaddr, -kaddr, 0)) return 0; goto retry; } @@ -225,11 +225,10 @@ static size_t copy_in_user_pt(size_t n, void __user *to, const void __user *from) { struct mm_struct *mm = current->mm; - unsigned long offset_from, offset_to, offset_max, pfn_from, pfn_to, - uaddr, done, size, error_code; + unsigned long offset_max, uaddr, done, size, error_code; unsigned long uaddr_from = (unsigned long) from; unsigned long uaddr_to = (unsigned long) to; - pte_t *pte_from, *pte_to; + unsigned long kaddr_to, kaddr_from; int write_user; if (segment_eq(get_fs(), KERNEL_DS)) { @@ -242,38 +241,23 @@ static size_t copy_in_user_pt(size_t n, void __user *to, do { write_user = 0; uaddr = uaddr_from; - pte_from = follow_table(mm, uaddr_from); - error_code = (unsigned long) pte_from; - if (error_code < 0x1000) - goto fault; - if (!pte_present(*pte_from)) { - error_code = 0x11; + kaddr_from = follow_table(mm, uaddr_from, 0); + error_code = kaddr_from; + if (IS_ERR_VALUE(error_code)) goto fault; - } write_user = 1; uaddr = uaddr_to; - pte_to = follow_table(mm, uaddr_to); - error_code = (unsigned long) pte_to; - if (error_code < 0x1000) - goto fault; - if (!pte_present(*pte_to)) { - error_code = 0x11; + kaddr_to = follow_table(mm, uaddr_to, 1); + error_code = (unsigned long) kaddr_to; + if (IS_ERR_VALUE(error_code)) goto fault; - } else if (!pte_write(*pte_to)) { - error_code = 0x04; - goto fault; - } - pfn_from = pte_pfn(*pte_from); - pfn_to = pte_pfn(*pte_to); - offset_from = uaddr_from & (PAGE_SIZE-1); - offset_to = uaddr_from & (PAGE_SIZE-1); - offset_max = max(offset_from, offset_to); + offset_max = max(uaddr_from & ~PAGE_MASK, + uaddr_to & ~PAGE_MASK); size = min(n - done, PAGE_SIZE - offset_max); - memcpy((void *)(pfn_to << PAGE_SHIFT) + offset_to, - (void *)(pfn_from << PAGE_SHIFT) + offset_from, size); + memcpy((void *) kaddr_to, (void *) kaddr_from, size); done += size; uaddr_from += size; uaddr_to += size; @@ -282,7 +266,7 @@ static size_t copy_in_user_pt(size_t n, void __user *to, return n - done; fault: spin_unlock(&mm->page_table_lock); - if (__handle_fault(uaddr, error_code, write_user)) + if (__handle_fault(uaddr, -error_code, write_user)) return n - done; goto retry; } @@ -341,7 +325,7 @@ int futex_atomic_op_pt(int op, u32 __user *uaddr, int oparg, int *old) return __futex_atomic_op_pt(op, uaddr, oparg, old); spin_lock(¤t->mm->page_table_lock); uaddr = (u32 __force __user *) - __dat_user_addr((__force unsigned long) uaddr); + __dat_user_addr((__force unsigned long) uaddr, 1); if (!uaddr) { spin_unlock(¤t->mm->page_table_lock); return -EFAULT; @@ -378,7 +362,7 @@ int futex_atomic_cmpxchg_pt(u32 *uval, u32 __user *uaddr, return __futex_atomic_cmpxchg_pt(uval, uaddr, oldval, newval); spin_lock(¤t->mm->page_table_lock); uaddr = (u32 __force __user *) - __dat_user_addr((__force unsigned long) uaddr); + __dat_user_addr((__force unsigned long) uaddr, 1); if (!uaddr) { spin_unlock(¤t->mm->page_table_lock); return -EFAULT; diff --git a/trunk/arch/s390/oprofile/init.c b/trunk/arch/s390/oprofile/init.c index a1e9d69a9c90..584b93674ea4 100644 --- a/trunk/arch/s390/oprofile/init.c +++ b/trunk/arch/s390/oprofile/init.c @@ -169,7 +169,7 @@ static ssize_t hw_interval_write(struct file *file, char const __user *buf, if (*offset) return -EINVAL; retval = oprofilefs_ulong_from_user(&val, buf, count); - if (retval) + if (retval <= 0) return retval; if (val < oprofile_min_interval) oprofile_hw_interval = oprofile_min_interval; @@ -212,7 +212,7 @@ static ssize_t hwsampler_zero_write(struct file *file, char const __user *buf, return -EINVAL; retval = oprofilefs_ulong_from_user(&val, buf, count); - if (retval) + if (retval <= 0) return retval; if (val != 0) return -EINVAL; @@ -243,7 +243,7 @@ static ssize_t hwsampler_kernel_write(struct file *file, char const __user *buf, return -EINVAL; retval = oprofilefs_ulong_from_user(&val, buf, count); - if (retval) + if (retval <= 0) return retval; if (val != 0 && val != 1) @@ -278,7 +278,7 @@ static ssize_t hwsampler_user_write(struct file *file, char const __user *buf, return -EINVAL; retval = oprofilefs_ulong_from_user(&val, buf, count); - if (retval) + if (retval <= 0) return retval; if (val != 0 && val != 1) @@ -317,7 +317,7 @@ static ssize_t timer_enabled_write(struct file *file, char const __user *buf, return -EINVAL; retval = oprofilefs_ulong_from_user(&val, buf, count); - if (retval) + if (retval <= 0) return retval; if (val != 0 && val != 1) diff --git a/trunk/arch/score/kernel/process.c b/trunk/arch/score/kernel/process.c index 2707023c7563..637970cfd3f4 100644 --- a/trunk/arch/score/kernel/process.c +++ b/trunk/arch/score/kernel/process.c @@ -27,6 +27,7 @@ #include #include #include +#include void (*pm_power_off)(void); EXPORT_SYMBOL(pm_power_off); @@ -50,9 +51,10 @@ void __noreturn cpu_idle(void) { /* endless idle loop with no priority at all */ while (1) { + rcu_idle_enter(); while (!need_resched()) barrier(); - + rcu_idle_exit(); schedule_preempt_disabled(); } } diff --git a/trunk/arch/sh/drivers/dma/dma-sh.c b/trunk/arch/sh/drivers/dma/dma-sh.c index 4c171f13b0e8..b22565623142 100644 --- a/trunk/arch/sh/drivers/dma/dma-sh.c +++ b/trunk/arch/sh/drivers/dma/dma-sh.c @@ -335,7 +335,7 @@ static int dmae_irq_init(void) for (n = 0; n < NR_DMAE; n++) { int i = request_irq(get_dma_error_irq(n), dma_err, - IRQF_SHARED, dmae_name[n], NULL); + IRQF_SHARED, dmae_name[n], (void *)dmae_name[n]); if (unlikely(i < 0)) { printk(KERN_ERR "%s request_irq fail\n", dmae_name[n]); return i; diff --git a/trunk/arch/sh/include/asm/sections.h b/trunk/arch/sh/include/asm/sections.h index 4a5350037c8f..1b6199740e98 100644 --- a/trunk/arch/sh/include/asm/sections.h +++ b/trunk/arch/sh/include/asm/sections.h @@ -6,7 +6,6 @@ extern long __nosave_begin, __nosave_end; extern long __machvec_start, __machvec_end; extern char __uncached_start, __uncached_end; -extern char _ebss[]; extern char __start_eh_frame[], __stop_eh_frame[]; #endif /* __ASM_SH_SECTIONS_H */ diff --git a/trunk/arch/sh/include/cpu-sh2a/cpu/sh7269.h b/trunk/arch/sh/include/cpu-sh2a/cpu/sh7269.h index 48d14498e774..2a0ca8780f0d 100644 --- a/trunk/arch/sh/include/cpu-sh2a/cpu/sh7269.h +++ b/trunk/arch/sh/include/cpu-sh2a/cpu/sh7269.h @@ -183,18 +183,30 @@ enum { GPIO_FN_DV_DATA1, GPIO_FN_DV_DATA0, GPIO_FN_LCD_CLK, GPIO_FN_LCD_EXTCLK, GPIO_FN_LCD_VSYNC, GPIO_FN_LCD_HSYNC, GPIO_FN_LCD_DE, - GPIO_FN_LCD_DATA23, GPIO_FN_LCD_DATA22, - GPIO_FN_LCD_DATA21, GPIO_FN_LCD_DATA20, - GPIO_FN_LCD_DATA19, GPIO_FN_LCD_DATA18, - GPIO_FN_LCD_DATA17, GPIO_FN_LCD_DATA16, - GPIO_FN_LCD_DATA15, GPIO_FN_LCD_DATA14, - GPIO_FN_LCD_DATA13, GPIO_FN_LCD_DATA12, - GPIO_FN_LCD_DATA11, GPIO_FN_LCD_DATA10, - GPIO_FN_LCD_DATA9, GPIO_FN_LCD_DATA8, - GPIO_FN_LCD_DATA7, GPIO_FN_LCD_DATA6, - GPIO_FN_LCD_DATA5, GPIO_FN_LCD_DATA4, - GPIO_FN_LCD_DATA3, GPIO_FN_LCD_DATA2, - GPIO_FN_LCD_DATA1, GPIO_FN_LCD_DATA0, + GPIO_FN_LCD_DATA23_PG23, GPIO_FN_LCD_DATA22_PG22, + GPIO_FN_LCD_DATA21_PG21, GPIO_FN_LCD_DATA20_PG20, + GPIO_FN_LCD_DATA19_PG19, GPIO_FN_LCD_DATA18_PG18, + GPIO_FN_LCD_DATA17_PG17, GPIO_FN_LCD_DATA16_PG16, + GPIO_FN_LCD_DATA15_PG15, GPIO_FN_LCD_DATA14_PG14, + GPIO_FN_LCD_DATA13_PG13, GPIO_FN_LCD_DATA12_PG12, + GPIO_FN_LCD_DATA11_PG11, GPIO_FN_LCD_DATA10_PG10, + GPIO_FN_LCD_DATA9_PG9, GPIO_FN_LCD_DATA8_PG8, + GPIO_FN_LCD_DATA7_PG7, GPIO_FN_LCD_DATA6_PG6, + GPIO_FN_LCD_DATA5_PG5, GPIO_FN_LCD_DATA4_PG4, + GPIO_FN_LCD_DATA3_PG3, GPIO_FN_LCD_DATA2_PG2, + GPIO_FN_LCD_DATA1_PG1, GPIO_FN_LCD_DATA0_PG0, + GPIO_FN_LCD_DATA23_PJ23, GPIO_FN_LCD_DATA22_PJ22, + GPIO_FN_LCD_DATA21_PJ21, GPIO_FN_LCD_DATA20_PJ20, + GPIO_FN_LCD_DATA19_PJ19, GPIO_FN_LCD_DATA18_PJ18, + GPIO_FN_LCD_DATA17_PJ17, GPIO_FN_LCD_DATA16_PJ16, + GPIO_FN_LCD_DATA15_PJ15, GPIO_FN_LCD_DATA14_PJ14, + GPIO_FN_LCD_DATA13_PJ13, GPIO_FN_LCD_DATA12_PJ12, + GPIO_FN_LCD_DATA11_PJ11, GPIO_FN_LCD_DATA10_PJ10, + GPIO_FN_LCD_DATA9_PJ9, GPIO_FN_LCD_DATA8_PJ8, + GPIO_FN_LCD_DATA7_PJ7, GPIO_FN_LCD_DATA6_PJ6, + GPIO_FN_LCD_DATA5_PJ5, GPIO_FN_LCD_DATA4_PJ4, + GPIO_FN_LCD_DATA3_PJ3, GPIO_FN_LCD_DATA2_PJ2, + GPIO_FN_LCD_DATA1_PJ1, GPIO_FN_LCD_DATA0_PJ0, GPIO_FN_LCD_M_DISP, }; diff --git a/trunk/arch/sh/kernel/cpu/sh2a/pinmux-sh7269.c b/trunk/arch/sh/kernel/cpu/sh2a/pinmux-sh7269.c index f25127c46eca..039e4587dd9b 100644 --- a/trunk/arch/sh/kernel/cpu/sh2a/pinmux-sh7269.c +++ b/trunk/arch/sh/kernel/cpu/sh2a/pinmux-sh7269.c @@ -758,12 +758,22 @@ enum { DV_DATA3_MARK, DV_DATA2_MARK, DV_DATA1_MARK, DV_DATA0_MARK, LCD_CLK_MARK, LCD_EXTCLK_MARK, LCD_VSYNC_MARK, LCD_HSYNC_MARK, LCD_DE_MARK, - LCD_DATA23_MARK, LCD_DATA22_MARK, LCD_DATA21_MARK, LCD_DATA20_MARK, - LCD_DATA19_MARK, LCD_DATA18_MARK, LCD_DATA17_MARK, LCD_DATA16_MARK, - LCD_DATA15_MARK, LCD_DATA14_MARK, LCD_DATA13_MARK, LCD_DATA12_MARK, - LCD_DATA11_MARK, LCD_DATA10_MARK, LCD_DATA9_MARK, LCD_DATA8_MARK, - LCD_DATA7_MARK, LCD_DATA6_MARK, LCD_DATA5_MARK, LCD_DATA4_MARK, - LCD_DATA3_MARK, LCD_DATA2_MARK, LCD_DATA1_MARK, LCD_DATA0_MARK, + LCD_DATA23_PG23_MARK, LCD_DATA22_PG22_MARK, LCD_DATA21_PG21_MARK, + LCD_DATA20_PG20_MARK, LCD_DATA19_PG19_MARK, LCD_DATA18_PG18_MARK, + LCD_DATA17_PG17_MARK, LCD_DATA16_PG16_MARK, LCD_DATA15_PG15_MARK, + LCD_DATA14_PG14_MARK, LCD_DATA13_PG13_MARK, LCD_DATA12_PG12_MARK, + LCD_DATA11_PG11_MARK, LCD_DATA10_PG10_MARK, LCD_DATA9_PG9_MARK, + LCD_DATA8_PG8_MARK, LCD_DATA7_PG7_MARK, LCD_DATA6_PG6_MARK, + LCD_DATA5_PG5_MARK, LCD_DATA4_PG4_MARK, LCD_DATA3_PG3_MARK, + LCD_DATA2_PG2_MARK, LCD_DATA1_PG1_MARK, LCD_DATA0_PG0_MARK, + LCD_DATA23_PJ23_MARK, LCD_DATA22_PJ22_MARK, LCD_DATA21_PJ21_MARK, + LCD_DATA20_PJ20_MARK, LCD_DATA19_PJ19_MARK, LCD_DATA18_PJ18_MARK, + LCD_DATA17_PJ17_MARK, LCD_DATA16_PJ16_MARK, LCD_DATA15_PJ15_MARK, + LCD_DATA14_PJ14_MARK, LCD_DATA13_PJ13_MARK, LCD_DATA12_PJ12_MARK, + LCD_DATA11_PJ11_MARK, LCD_DATA10_PJ10_MARK, LCD_DATA9_PJ9_MARK, + LCD_DATA8_PJ8_MARK, LCD_DATA7_PJ7_MARK, LCD_DATA6_PJ6_MARK, + LCD_DATA5_PJ5_MARK, LCD_DATA4_PJ4_MARK, LCD_DATA3_PJ3_MARK, + LCD_DATA2_PJ2_MARK, LCD_DATA1_PJ1_MARK, LCD_DATA0_PJ0_MARK, LCD_TCON6_MARK, LCD_TCON5_MARK, LCD_TCON4_MARK, LCD_TCON3_MARK, LCD_TCON2_MARK, LCD_TCON1_MARK, LCD_TCON0_MARK, LCD_M_DISP_MARK, @@ -1036,6 +1046,7 @@ static pinmux_enum_t pinmux_data[] = { PINMUX_DATA(PF1_DATA, PF1MD_000), PINMUX_DATA(BACK_MARK, PF1MD_001), + PINMUX_DATA(SSL10_MARK, PF1MD_011), PINMUX_DATA(TIOC4B_MARK, PF1MD_100), PINMUX_DATA(DACK0_MARK, PF1MD_101), @@ -1049,47 +1060,50 @@ static pinmux_enum_t pinmux_data[] = { PINMUX_DATA(PG27_DATA, PG27MD_00), PINMUX_DATA(LCD_TCON2_MARK, PG27MD_10), PINMUX_DATA(LCD_EXTCLK_MARK, PG27MD_11), + PINMUX_DATA(LCD_DE_MARK, PG27MD_11), PINMUX_DATA(PG26_DATA, PG26MD_00), PINMUX_DATA(LCD_TCON1_MARK, PG26MD_10), + PINMUX_DATA(LCD_HSYNC_MARK, PG26MD_10), PINMUX_DATA(PG25_DATA, PG25MD_00), PINMUX_DATA(LCD_TCON0_MARK, PG25MD_10), + PINMUX_DATA(LCD_VSYNC_MARK, PG25MD_10), PINMUX_DATA(PG24_DATA, PG24MD_00), PINMUX_DATA(LCD_CLK_MARK, PG24MD_10), PINMUX_DATA(PG23_DATA, PG23MD_000), - PINMUX_DATA(LCD_DATA23_MARK, PG23MD_010), + PINMUX_DATA(LCD_DATA23_PG23_MARK, PG23MD_010), PINMUX_DATA(LCD_TCON6_MARK, PG23MD_011), PINMUX_DATA(TXD5_MARK, PG23MD_100), PINMUX_DATA(PG22_DATA, PG22MD_000), - PINMUX_DATA(LCD_DATA22_MARK, PG22MD_010), + PINMUX_DATA(LCD_DATA22_PG22_MARK, PG22MD_010), PINMUX_DATA(LCD_TCON5_MARK, PG22MD_011), PINMUX_DATA(RXD5_MARK, PG22MD_100), PINMUX_DATA(PG21_DATA, PG21MD_000), PINMUX_DATA(DV_DATA7_MARK, PG21MD_001), - PINMUX_DATA(LCD_DATA21_MARK, PG21MD_010), + PINMUX_DATA(LCD_DATA21_PG21_MARK, PG21MD_010), PINMUX_DATA(LCD_TCON4_MARK, PG21MD_011), PINMUX_DATA(TXD4_MARK, PG21MD_100), PINMUX_DATA(PG20_DATA, PG20MD_000), PINMUX_DATA(DV_DATA6_MARK, PG20MD_001), - PINMUX_DATA(LCD_DATA20_MARK, PG21MD_010), + PINMUX_DATA(LCD_DATA20_PG20_MARK, PG21MD_010), PINMUX_DATA(LCD_TCON3_MARK, PG20MD_011), PINMUX_DATA(RXD4_MARK, PG20MD_100), PINMUX_DATA(PG19_DATA, PG19MD_000), PINMUX_DATA(DV_DATA5_MARK, PG19MD_001), - PINMUX_DATA(LCD_DATA19_MARK, PG19MD_010), + PINMUX_DATA(LCD_DATA19_PG19_MARK, PG19MD_010), PINMUX_DATA(SPDIF_OUT_MARK, PG19MD_011), PINMUX_DATA(SCK5_MARK, PG19MD_100), PINMUX_DATA(PG18_DATA, PG18MD_000), PINMUX_DATA(DV_DATA4_MARK, PG18MD_001), - PINMUX_DATA(LCD_DATA18_MARK, PG18MD_010), + PINMUX_DATA(LCD_DATA18_PG18_MARK, PG18MD_010), PINMUX_DATA(SPDIF_IN_MARK, PG18MD_011), PINMUX_DATA(SCK4_MARK, PG18MD_100), @@ -1097,103 +1111,103 @@ static pinmux_enum_t pinmux_data[] = { // we're going with 2 bits PINMUX_DATA(PG17_DATA, PG17MD_00), PINMUX_DATA(WE3ICIOWRAHDQMUU_MARK, PG17MD_01), - PINMUX_DATA(LCD_DATA17_MARK, PG17MD_10), + PINMUX_DATA(LCD_DATA17_PG17_MARK, PG17MD_10), // TODO hardware manual has PG16 3 bits wide in reg picture and 2 bits in description // we're going with 2 bits PINMUX_DATA(PG16_DATA, PG16MD_00), PINMUX_DATA(WE2ICIORDDQMUL_MARK, PG16MD_01), - PINMUX_DATA(LCD_DATA16_MARK, PG16MD_10), + PINMUX_DATA(LCD_DATA16_PG16_MARK, PG16MD_10), PINMUX_DATA(PG15_DATA, PG15MD_00), PINMUX_DATA(D31_MARK, PG15MD_01), - PINMUX_DATA(LCD_DATA15_MARK, PG15MD_10), + PINMUX_DATA(LCD_DATA15_PG15_MARK, PG15MD_10), PINMUX_DATA(PINT7_PG_MARK, PG15MD_11), PINMUX_DATA(PG14_DATA, PG14MD_00), PINMUX_DATA(D30_MARK, PG14MD_01), - PINMUX_DATA(LCD_DATA14_MARK, PG14MD_10), + PINMUX_DATA(LCD_DATA14_PG14_MARK, PG14MD_10), PINMUX_DATA(PINT6_PG_MARK, PG14MD_11), PINMUX_DATA(PG13_DATA, PG13MD_00), PINMUX_DATA(D29_MARK, PG13MD_01), - PINMUX_DATA(LCD_DATA13_MARK, PG13MD_10), + PINMUX_DATA(LCD_DATA13_PG13_MARK, PG13MD_10), PINMUX_DATA(PINT5_PG_MARK, PG13MD_11), PINMUX_DATA(PG12_DATA, PG12MD_00), PINMUX_DATA(D28_MARK, PG12MD_01), - PINMUX_DATA(LCD_DATA12_MARK, PG12MD_10), + PINMUX_DATA(LCD_DATA12_PG12_MARK, PG12MD_10), PINMUX_DATA(PINT4_PG_MARK, PG12MD_11), PINMUX_DATA(PG11_DATA, PG11MD_000), PINMUX_DATA(D27_MARK, PG11MD_001), - PINMUX_DATA(LCD_DATA11_MARK, PG11MD_010), + PINMUX_DATA(LCD_DATA11_PG11_MARK, PG11MD_010), PINMUX_DATA(PINT3_PG_MARK, PG11MD_011), PINMUX_DATA(TIOC3D_MARK, PG11MD_100), PINMUX_DATA(PG10_DATA, PG10MD_000), PINMUX_DATA(D26_MARK, PG10MD_001), - PINMUX_DATA(LCD_DATA10_MARK, PG10MD_010), + PINMUX_DATA(LCD_DATA10_PG10_MARK, PG10MD_010), PINMUX_DATA(PINT2_PG_MARK, PG10MD_011), PINMUX_DATA(TIOC3C_MARK, PG10MD_100), PINMUX_DATA(PG9_DATA, PG9MD_000), PINMUX_DATA(D25_MARK, PG9MD_001), - PINMUX_DATA(LCD_DATA9_MARK, PG9MD_010), + PINMUX_DATA(LCD_DATA9_PG9_MARK, PG9MD_010), PINMUX_DATA(PINT1_PG_MARK, PG9MD_011), PINMUX_DATA(TIOC3B_MARK, PG9MD_100), PINMUX_DATA(PG8_DATA, PG8MD_000), PINMUX_DATA(D24_MARK, PG8MD_001), - PINMUX_DATA(LCD_DATA8_MARK, PG8MD_010), + PINMUX_DATA(LCD_DATA8_PG8_MARK, PG8MD_010), PINMUX_DATA(PINT0_PG_MARK, PG8MD_011), PINMUX_DATA(TIOC3A_MARK, PG8MD_100), PINMUX_DATA(PG7_DATA, PG7MD_000), PINMUX_DATA(D23_MARK, PG7MD_001), - PINMUX_DATA(LCD_DATA7_MARK, PG7MD_010), + PINMUX_DATA(LCD_DATA7_PG7_MARK, PG7MD_010), PINMUX_DATA(IRQ7_PG_MARK, PG7MD_011), PINMUX_DATA(TIOC2B_MARK, PG7MD_100), PINMUX_DATA(PG6_DATA, PG6MD_000), PINMUX_DATA(D22_MARK, PG6MD_001), - PINMUX_DATA(LCD_DATA6_MARK, PG6MD_010), + PINMUX_DATA(LCD_DATA6_PG6_MARK, PG6MD_010), PINMUX_DATA(IRQ6_PG_MARK, PG6MD_011), PINMUX_DATA(TIOC2A_MARK, PG6MD_100), PINMUX_DATA(PG5_DATA, PG5MD_000), PINMUX_DATA(D21_MARK, PG5MD_001), - PINMUX_DATA(LCD_DATA5_MARK, PG5MD_010), + PINMUX_DATA(LCD_DATA5_PG5_MARK, PG5MD_010), PINMUX_DATA(IRQ5_PG_MARK, PG5MD_011), PINMUX_DATA(TIOC1B_MARK, PG5MD_100), PINMUX_DATA(PG4_DATA, PG4MD_000), PINMUX_DATA(D20_MARK, PG4MD_001), - PINMUX_DATA(LCD_DATA4_MARK, PG4MD_010), + PINMUX_DATA(LCD_DATA4_PG4_MARK, PG4MD_010), PINMUX_DATA(IRQ4_PG_MARK, PG4MD_011), PINMUX_DATA(TIOC1A_MARK, PG4MD_100), PINMUX_DATA(PG3_DATA, PG3MD_000), PINMUX_DATA(D19_MARK, PG3MD_001), - PINMUX_DATA(LCD_DATA3_MARK, PG3MD_010), + PINMUX_DATA(LCD_DATA3_PG3_MARK, PG3MD_010), PINMUX_DATA(IRQ3_PG_MARK, PG3MD_011), PINMUX_DATA(TIOC0D_MARK, PG3MD_100), PINMUX_DATA(PG2_DATA, PG2MD_000), PINMUX_DATA(D18_MARK, PG2MD_001), - PINMUX_DATA(LCD_DATA2_MARK, PG2MD_010), + PINMUX_DATA(LCD_DATA2_PG2_MARK, PG2MD_010), PINMUX_DATA(IRQ2_PG_MARK, PG2MD_011), PINMUX_DATA(TIOC0C_MARK, PG2MD_100), PINMUX_DATA(PG1_DATA, PG1MD_000), PINMUX_DATA(D17_MARK, PG1MD_001), - PINMUX_DATA(LCD_DATA1_MARK, PG1MD_010), + PINMUX_DATA(LCD_DATA1_PG1_MARK, PG1MD_010), PINMUX_DATA(IRQ1_PG_MARK, PG1MD_011), PINMUX_DATA(TIOC0B_MARK, PG1MD_100), PINMUX_DATA(PG0_DATA, PG0MD_000), PINMUX_DATA(D16_MARK, PG0MD_001), - PINMUX_DATA(LCD_DATA0_MARK, PG0MD_010), + PINMUX_DATA(LCD_DATA0_PG0_MARK, PG0MD_010), PINMUX_DATA(IRQ0_PG_MARK, PG0MD_011), PINMUX_DATA(TIOC0A_MARK, PG0MD_100), @@ -1275,14 +1289,14 @@ static pinmux_enum_t pinmux_data[] = { PINMUX_DATA(PJ23_DATA, PJ23MD_000), PINMUX_DATA(DV_DATA23_MARK, PJ23MD_001), - PINMUX_DATA(LCD_DATA23_MARK, PJ23MD_010), + PINMUX_DATA(LCD_DATA23_PJ23_MARK, PJ23MD_010), PINMUX_DATA(LCD_TCON6_MARK, PJ23MD_011), PINMUX_DATA(IRQ3_PJ_MARK, PJ23MD_100), PINMUX_DATA(CTX1_MARK, PJ23MD_101), PINMUX_DATA(PJ22_DATA, PJ22MD_000), PINMUX_DATA(DV_DATA22_MARK, PJ22MD_001), - PINMUX_DATA(LCD_DATA22_MARK, PJ22MD_010), + PINMUX_DATA(LCD_DATA22_PJ22_MARK, PJ22MD_010), PINMUX_DATA(LCD_TCON5_MARK, PJ22MD_011), PINMUX_DATA(IRQ2_PJ_MARK, PJ22MD_100), PINMUX_DATA(CRX1_MARK, PJ22MD_101), @@ -1290,14 +1304,14 @@ static pinmux_enum_t pinmux_data[] = { PINMUX_DATA(PJ21_DATA, PJ21MD_000), PINMUX_DATA(DV_DATA21_MARK, PJ21MD_001), - PINMUX_DATA(LCD_DATA21_MARK, PJ21MD_010), + PINMUX_DATA(LCD_DATA21_PJ21_MARK, PJ21MD_010), PINMUX_DATA(LCD_TCON4_MARK, PJ21MD_011), PINMUX_DATA(IRQ1_PJ_MARK, PJ21MD_100), PINMUX_DATA(CTX2_MARK, PJ21MD_101), PINMUX_DATA(PJ20_DATA, PJ20MD_000), PINMUX_DATA(DV_DATA20_MARK, PJ20MD_001), - PINMUX_DATA(LCD_DATA20_MARK, PJ20MD_010), + PINMUX_DATA(LCD_DATA20_PJ20_MARK, PJ20MD_010), PINMUX_DATA(LCD_TCON3_MARK, PJ20MD_011), PINMUX_DATA(IRQ0_PJ_MARK, PJ20MD_100), PINMUX_DATA(CRX2_MARK, PJ20MD_101), @@ -1305,7 +1319,7 @@ static pinmux_enum_t pinmux_data[] = { PINMUX_DATA(PJ19_DATA, PJ19MD_000), PINMUX_DATA(DV_DATA19_MARK, PJ19MD_001), - PINMUX_DATA(LCD_DATA19_MARK, PJ19MD_010), + PINMUX_DATA(LCD_DATA19_PJ19_MARK, PJ19MD_010), PINMUX_DATA(MISO0_PJ19_MARK, PJ19MD_011), PINMUX_DATA(TIOC0D_MARK, PJ19MD_100), PINMUX_DATA(SIOFRXD_MARK, PJ19MD_101), @@ -1313,126 +1327,126 @@ static pinmux_enum_t pinmux_data[] = { PINMUX_DATA(PJ18_DATA, PJ18MD_000), PINMUX_DATA(DV_DATA18_MARK, PJ18MD_001), - PINMUX_DATA(LCD_DATA18_MARK, PJ18MD_010), + PINMUX_DATA(LCD_DATA18_PJ18_MARK, PJ18MD_010), PINMUX_DATA(MOSI0_PJ18_MARK, PJ18MD_011), PINMUX_DATA(TIOC0C_MARK, PJ18MD_100), PINMUX_DATA(SIOFTXD_MARK, PJ18MD_101), PINMUX_DATA(PJ17_DATA, PJ17MD_000), PINMUX_DATA(DV_DATA17_MARK, PJ17MD_001), - PINMUX_DATA(LCD_DATA17_MARK, PJ17MD_010), + PINMUX_DATA(LCD_DATA17_PJ17_MARK, PJ17MD_010), PINMUX_DATA(SSL00_PJ17_MARK, PJ17MD_011), PINMUX_DATA(TIOC0B_MARK, PJ17MD_100), PINMUX_DATA(SIOFSYNC_MARK, PJ17MD_101), PINMUX_DATA(PJ16_DATA, PJ16MD_000), PINMUX_DATA(DV_DATA16_MARK, PJ16MD_001), - PINMUX_DATA(LCD_DATA16_MARK, PJ16MD_010), + PINMUX_DATA(LCD_DATA16_PJ16_MARK, PJ16MD_010), PINMUX_DATA(RSPCK0_PJ16_MARK, PJ16MD_011), PINMUX_DATA(TIOC0A_MARK, PJ16MD_100), PINMUX_DATA(SIOFSCK_MARK, PJ16MD_101), PINMUX_DATA(PJ15_DATA, PJ15MD_000), PINMUX_DATA(DV_DATA15_MARK, PJ15MD_001), - PINMUX_DATA(LCD_DATA15_MARK, PJ15MD_010), + PINMUX_DATA(LCD_DATA15_PJ15_MARK, PJ15MD_010), PINMUX_DATA(PINT7_PJ_MARK, PJ15MD_011), PINMUX_DATA(PWM2H_MARK, PJ15MD_100), PINMUX_DATA(TXD7_MARK, PJ15MD_101), PINMUX_DATA(PJ14_DATA, PJ14MD_000), PINMUX_DATA(DV_DATA14_MARK, PJ14MD_001), - PINMUX_DATA(LCD_DATA14_MARK, PJ14MD_010), + PINMUX_DATA(LCD_DATA14_PJ14_MARK, PJ14MD_010), PINMUX_DATA(PINT6_PJ_MARK, PJ14MD_011), PINMUX_DATA(PWM2G_MARK, PJ14MD_100), PINMUX_DATA(TXD6_MARK, PJ14MD_101), PINMUX_DATA(PJ13_DATA, PJ13MD_000), PINMUX_DATA(DV_DATA13_MARK, PJ13MD_001), - PINMUX_DATA(LCD_DATA13_MARK, PJ13MD_010), + PINMUX_DATA(LCD_DATA13_PJ13_MARK, PJ13MD_010), PINMUX_DATA(PINT5_PJ_MARK, PJ13MD_011), PINMUX_DATA(PWM2F_MARK, PJ13MD_100), PINMUX_DATA(TXD5_MARK, PJ13MD_101), PINMUX_DATA(PJ12_DATA, PJ12MD_000), PINMUX_DATA(DV_DATA12_MARK, PJ12MD_001), - PINMUX_DATA(LCD_DATA12_MARK, PJ12MD_010), + PINMUX_DATA(LCD_DATA12_PJ12_MARK, PJ12MD_010), PINMUX_DATA(PINT4_PJ_MARK, PJ12MD_011), PINMUX_DATA(PWM2E_MARK, PJ12MD_100), PINMUX_DATA(SCK7_MARK, PJ12MD_101), PINMUX_DATA(PJ11_DATA, PJ11MD_000), PINMUX_DATA(DV_DATA11_MARK, PJ11MD_001), - PINMUX_DATA(LCD_DATA11_MARK, PJ11MD_010), + PINMUX_DATA(LCD_DATA11_PJ11_MARK, PJ11MD_010), PINMUX_DATA(PINT3_PJ_MARK, PJ11MD_011), PINMUX_DATA(PWM2D_MARK, PJ11MD_100), PINMUX_DATA(SCK6_MARK, PJ11MD_101), PINMUX_DATA(PJ10_DATA, PJ10MD_000), PINMUX_DATA(DV_DATA10_MARK, PJ10MD_001), - PINMUX_DATA(LCD_DATA10_MARK, PJ10MD_010), + PINMUX_DATA(LCD_DATA10_PJ10_MARK, PJ10MD_010), PINMUX_DATA(PINT2_PJ_MARK, PJ10MD_011), PINMUX_DATA(PWM2C_MARK, PJ10MD_100), PINMUX_DATA(SCK5_MARK, PJ10MD_101), PINMUX_DATA(PJ9_DATA, PJ9MD_000), PINMUX_DATA(DV_DATA9_MARK, PJ9MD_001), - PINMUX_DATA(LCD_DATA9_MARK, PJ9MD_010), + PINMUX_DATA(LCD_DATA9_PJ9_MARK, PJ9MD_010), PINMUX_DATA(PINT1_PJ_MARK, PJ9MD_011), PINMUX_DATA(PWM2B_MARK, PJ9MD_100), PINMUX_DATA(RTS5_MARK, PJ9MD_101), PINMUX_DATA(PJ8_DATA, PJ8MD_000), PINMUX_DATA(DV_DATA8_MARK, PJ8MD_001), - PINMUX_DATA(LCD_DATA8_MARK, PJ8MD_010), + PINMUX_DATA(LCD_DATA8_PJ8_MARK, PJ8MD_010), PINMUX_DATA(PINT0_PJ_MARK, PJ8MD_011), PINMUX_DATA(PWM2A_MARK, PJ8MD_100), PINMUX_DATA(CTS5_MARK, PJ8MD_101), PINMUX_DATA(PJ7_DATA, PJ7MD_000), PINMUX_DATA(DV_DATA7_MARK, PJ7MD_001), - PINMUX_DATA(LCD_DATA7_MARK, PJ7MD_010), + PINMUX_DATA(LCD_DATA7_PJ7_MARK, PJ7MD_010), PINMUX_DATA(SD_D2_MARK, PJ7MD_011), PINMUX_DATA(PWM1H_MARK, PJ7MD_100), PINMUX_DATA(PJ6_DATA, PJ6MD_000), PINMUX_DATA(DV_DATA6_MARK, PJ6MD_001), - PINMUX_DATA(LCD_DATA6_MARK, PJ6MD_010), + PINMUX_DATA(LCD_DATA6_PJ6_MARK, PJ6MD_010), PINMUX_DATA(SD_D3_MARK, PJ6MD_011), PINMUX_DATA(PWM1G_MARK, PJ6MD_100), PINMUX_DATA(PJ5_DATA, PJ5MD_000), PINMUX_DATA(DV_DATA5_MARK, PJ5MD_001), - PINMUX_DATA(LCD_DATA5_MARK, PJ5MD_010), + PINMUX_DATA(LCD_DATA5_PJ5_MARK, PJ5MD_010), PINMUX_DATA(SD_CMD_MARK, PJ5MD_011), PINMUX_DATA(PWM1F_MARK, PJ5MD_100), PINMUX_DATA(PJ4_DATA, PJ4MD_000), PINMUX_DATA(DV_DATA4_MARK, PJ4MD_001), - PINMUX_DATA(LCD_DATA4_MARK, PJ4MD_010), + PINMUX_DATA(LCD_DATA4_PJ4_MARK, PJ4MD_010), PINMUX_DATA(SD_CLK_MARK, PJ4MD_011), PINMUX_DATA(PWM1E_MARK, PJ4MD_100), PINMUX_DATA(PJ3_DATA, PJ3MD_000), PINMUX_DATA(DV_DATA3_MARK, PJ3MD_001), - PINMUX_DATA(LCD_DATA3_MARK, PJ3MD_010), + PINMUX_DATA(LCD_DATA3_PJ3_MARK, PJ3MD_010), PINMUX_DATA(SD_D0_MARK, PJ3MD_011), PINMUX_DATA(PWM1D_MARK, PJ3MD_100), PINMUX_DATA(PJ2_DATA, PJ2MD_000), PINMUX_DATA(DV_DATA2_MARK, PJ2MD_001), - PINMUX_DATA(LCD_DATA2_MARK, PJ2MD_010), + PINMUX_DATA(LCD_DATA2_PJ2_MARK, PJ2MD_010), PINMUX_DATA(SD_D1_MARK, PJ2MD_011), PINMUX_DATA(PWM1C_MARK, PJ2MD_100), PINMUX_DATA(PJ1_DATA, PJ1MD_000), PINMUX_DATA(DV_DATA1_MARK, PJ1MD_001), - PINMUX_DATA(LCD_DATA1_MARK, PJ1MD_010), + PINMUX_DATA(LCD_DATA1_PJ1_MARK, PJ1MD_010), PINMUX_DATA(SD_WP_MARK, PJ1MD_011), PINMUX_DATA(PWM1B_MARK, PJ1MD_100), PINMUX_DATA(PJ0_DATA, PJ0MD_000), PINMUX_DATA(DV_DATA0_MARK, PJ0MD_001), - PINMUX_DATA(LCD_DATA0_MARK, PJ0MD_010), + PINMUX_DATA(LCD_DATA0_PJ0_MARK, PJ0MD_010), PINMUX_DATA(SD_CD_MARK, PJ0MD_011), PINMUX_DATA(PWM1A_MARK, PJ0MD_100), }; @@ -1877,30 +1891,55 @@ static struct pinmux_gpio pinmux_gpios[] = { PINMUX_GPIO(GPIO_FN_LCD_HSYNC, LCD_HSYNC_MARK), PINMUX_GPIO(GPIO_FN_LCD_DE, LCD_DE_MARK), - PINMUX_GPIO(GPIO_FN_LCD_DATA23, LCD_DATA23_MARK), - PINMUX_GPIO(GPIO_FN_LCD_DATA22, LCD_DATA22_MARK), - PINMUX_GPIO(GPIO_FN_LCD_DATA21, LCD_DATA21_MARK), - PINMUX_GPIO(GPIO_FN_LCD_DATA20, LCD_DATA20_MARK), - PINMUX_GPIO(GPIO_FN_LCD_DATA19, LCD_DATA19_MARK), - PINMUX_GPIO(GPIO_FN_LCD_DATA18, LCD_DATA18_MARK), - PINMUX_GPIO(GPIO_FN_LCD_DATA17, LCD_DATA17_MARK), - PINMUX_GPIO(GPIO_FN_LCD_DATA16, LCD_DATA16_MARK), - PINMUX_GPIO(GPIO_FN_LCD_DATA15, LCD_DATA15_MARK), - PINMUX_GPIO(GPIO_FN_LCD_DATA14, LCD_DATA14_MARK), - PINMUX_GPIO(GPIO_FN_LCD_DATA13, LCD_DATA13_MARK), - PINMUX_GPIO(GPIO_FN_LCD_DATA12, LCD_DATA12_MARK), - PINMUX_GPIO(GPIO_FN_LCD_DATA11, LCD_DATA11_MARK), - PINMUX_GPIO(GPIO_FN_LCD_DATA10, LCD_DATA10_MARK), - PINMUX_GPIO(GPIO_FN_LCD_DATA9, LCD_DATA9_MARK), - PINMUX_GPIO(GPIO_FN_LCD_DATA8, LCD_DATA8_MARK), - PINMUX_GPIO(GPIO_FN_LCD_DATA7, LCD_DATA7_MARK), - PINMUX_GPIO(GPIO_FN_LCD_DATA6, LCD_DATA6_MARK), - PINMUX_GPIO(GPIO_FN_LCD_DATA5, LCD_DATA5_MARK), - PINMUX_GPIO(GPIO_FN_LCD_DATA4, LCD_DATA4_MARK), - PINMUX_GPIO(GPIO_FN_LCD_DATA3, LCD_DATA3_MARK), - PINMUX_GPIO(GPIO_FN_LCD_DATA2, LCD_DATA2_MARK), - PINMUX_GPIO(GPIO_FN_LCD_DATA1, LCD_DATA1_MARK), - PINMUX_GPIO(GPIO_FN_LCD_DATA0, LCD_DATA0_MARK), + PINMUX_GPIO(GPIO_FN_LCD_DATA23_PG23, LCD_DATA23_PG23_MARK), + PINMUX_GPIO(GPIO_FN_LCD_DATA22_PG22, LCD_DATA22_PG22_MARK), + PINMUX_GPIO(GPIO_FN_LCD_DATA21_PG21, LCD_DATA21_PG21_MARK), + PINMUX_GPIO(GPIO_FN_LCD_DATA20_PG20, LCD_DATA20_PG20_MARK), + PINMUX_GPIO(GPIO_FN_LCD_DATA19_PG19, LCD_DATA19_PG19_MARK), + PINMUX_GPIO(GPIO_FN_LCD_DATA18_PG18, LCD_DATA18_PG18_MARK), + PINMUX_GPIO(GPIO_FN_LCD_DATA17_PG17, LCD_DATA17_PG17_MARK), + PINMUX_GPIO(GPIO_FN_LCD_DATA16_PG16, LCD_DATA16_PG16_MARK), + PINMUX_GPIO(GPIO_FN_LCD_DATA15_PG15, LCD_DATA15_PG15_MARK), + PINMUX_GPIO(GPIO_FN_LCD_DATA14_PG14, LCD_DATA14_PG14_MARK), + PINMUX_GPIO(GPIO_FN_LCD_DATA13_PG13, LCD_DATA13_PG13_MARK), + PINMUX_GPIO(GPIO_FN_LCD_DATA12_PG12, LCD_DATA12_PG12_MARK), + PINMUX_GPIO(GPIO_FN_LCD_DATA11_PG11, LCD_DATA11_PG11_MARK), + PINMUX_GPIO(GPIO_FN_LCD_DATA10_PG10, LCD_DATA10_PG10_MARK), + PINMUX_GPIO(GPIO_FN_LCD_DATA9_PG9, LCD_DATA9_PG9_MARK), + PINMUX_GPIO(GPIO_FN_LCD_DATA8_PG8, LCD_DATA8_PG8_MARK), + PINMUX_GPIO(GPIO_FN_LCD_DATA7_PG7, LCD_DATA7_PG7_MARK), + PINMUX_GPIO(GPIO_FN_LCD_DATA6_PG6, LCD_DATA6_PG6_MARK), + PINMUX_GPIO(GPIO_FN_LCD_DATA5_PG5, LCD_DATA5_PG5_MARK), + PINMUX_GPIO(GPIO_FN_LCD_DATA4_PG4, LCD_DATA4_PG4_MARK), + PINMUX_GPIO(GPIO_FN_LCD_DATA3_PG3, LCD_DATA3_PG3_MARK), + PINMUX_GPIO(GPIO_FN_LCD_DATA2_PG2, LCD_DATA2_PG2_MARK), + PINMUX_GPIO(GPIO_FN_LCD_DATA1_PG1, LCD_DATA1_PG1_MARK), + PINMUX_GPIO(GPIO_FN_LCD_DATA0_PG0, LCD_DATA0_PG0_MARK), + + PINMUX_GPIO(GPIO_FN_LCD_DATA23_PJ23, LCD_DATA23_PJ23_MARK), + PINMUX_GPIO(GPIO_FN_LCD_DATA22_PJ22, LCD_DATA22_PJ22_MARK), + PINMUX_GPIO(GPIO_FN_LCD_DATA21_PJ21, LCD_DATA21_PJ21_MARK), + PINMUX_GPIO(GPIO_FN_LCD_DATA20_PJ20, LCD_DATA20_PJ20_MARK), + PINMUX_GPIO(GPIO_FN_LCD_DATA19_PJ19, LCD_DATA19_PJ19_MARK), + PINMUX_GPIO(GPIO_FN_LCD_DATA18_PJ18, LCD_DATA18_PJ18_MARK), + PINMUX_GPIO(GPIO_FN_LCD_DATA17_PJ17, LCD_DATA17_PJ17_MARK), + PINMUX_GPIO(GPIO_FN_LCD_DATA16_PJ16, LCD_DATA16_PJ16_MARK), + PINMUX_GPIO(GPIO_FN_LCD_DATA15_PJ15, LCD_DATA15_PJ15_MARK), + PINMUX_GPIO(GPIO_FN_LCD_DATA14_PJ14, LCD_DATA14_PJ14_MARK), + PINMUX_GPIO(GPIO_FN_LCD_DATA13_PJ13, LCD_DATA13_PJ13_MARK), + PINMUX_GPIO(GPIO_FN_LCD_DATA12_PJ12, LCD_DATA12_PJ12_MARK), + PINMUX_GPIO(GPIO_FN_LCD_DATA11_PJ11, LCD_DATA11_PJ11_MARK), + PINMUX_GPIO(GPIO_FN_LCD_DATA10_PJ10, LCD_DATA10_PJ10_MARK), + PINMUX_GPIO(GPIO_FN_LCD_DATA9_PJ9, LCD_DATA9_PJ9_MARK), + PINMUX_GPIO(GPIO_FN_LCD_DATA8_PJ8, LCD_DATA8_PJ8_MARK), + PINMUX_GPIO(GPIO_FN_LCD_DATA7_PJ7, LCD_DATA7_PJ7_MARK), + PINMUX_GPIO(GPIO_FN_LCD_DATA6_PJ6, LCD_DATA6_PJ6_MARK), + PINMUX_GPIO(GPIO_FN_LCD_DATA5_PJ5, LCD_DATA5_PJ5_MARK), + PINMUX_GPIO(GPIO_FN_LCD_DATA4_PJ4, LCD_DATA4_PJ4_MARK), + PINMUX_GPIO(GPIO_FN_LCD_DATA3_PJ3, LCD_DATA3_PJ3_MARK), + PINMUX_GPIO(GPIO_FN_LCD_DATA2_PJ2, LCD_DATA2_PJ2_MARK), + PINMUX_GPIO(GPIO_FN_LCD_DATA1_PJ1, LCD_DATA1_PJ1_MARK), + PINMUX_GPIO(GPIO_FN_LCD_DATA0_PJ0, LCD_DATA0_PJ0_MARK), PINMUX_GPIO(GPIO_FN_LCD_M_DISP, LCD_M_DISP_MARK), }; diff --git a/trunk/arch/sh/kernel/cpu/sh5/entry.S b/trunk/arch/sh/kernel/cpu/sh5/entry.S index b7cf6a547f11..7e605b95592a 100644 --- a/trunk/arch/sh/kernel/cpu/sh5/entry.S +++ b/trunk/arch/sh/kernel/cpu/sh5/entry.S @@ -933,7 +933,7 @@ ret_with_reschedule: pta restore_all, tr1 - movi _TIF_SIGPENDING, r8 + movi (_TIF_SIGPENDING|_TIF_NOTIFY_RESUME), r8 and r8, r7, r8 pta work_notifysig, tr0 bne r8, ZERO, tr0 diff --git a/trunk/arch/sh/kernel/entry-common.S b/trunk/arch/sh/kernel/entry-common.S index f67601cb3f1f..b96489d8b27d 100644 --- a/trunk/arch/sh/kernel/entry-common.S +++ b/trunk/arch/sh/kernel/entry-common.S @@ -139,7 +139,7 @@ work_pending: ! r8: current_thread_info ! t: result of "tst #_TIF_NEED_RESCHED, r0" bf/s work_resched - tst #_TIF_SIGPENDING, r0 + tst #(_TIF_SIGPENDING | _TIF_NOTIFY_RESUME), r0 work_notifysig: bt/s __restore_all mov r15, r4 diff --git a/trunk/arch/sh/kernel/setup.c b/trunk/arch/sh/kernel/setup.c index 7b57bf1dc855..ebe7a7d97215 100644 --- a/trunk/arch/sh/kernel/setup.c +++ b/trunk/arch/sh/kernel/setup.c @@ -273,7 +273,7 @@ void __init setup_arch(char **cmdline_p) data_resource.start = virt_to_phys(_etext); data_resource.end = virt_to_phys(_edata)-1; bss_resource.start = virt_to_phys(__bss_start); - bss_resource.end = virt_to_phys(_ebss)-1; + bss_resource.end = virt_to_phys(__bss_stop)-1; #ifdef CONFIG_CMDLINE_OVERWRITE strlcpy(command_line, CONFIG_CMDLINE, sizeof(command_line)); diff --git a/trunk/arch/sh/kernel/sh_ksyms_32.c b/trunk/arch/sh/kernel/sh_ksyms_32.c index 3896f26efa4a..2a0a596ebf67 100644 --- a/trunk/arch/sh/kernel/sh_ksyms_32.c +++ b/trunk/arch/sh/kernel/sh_ksyms_32.c @@ -19,7 +19,6 @@ EXPORT_SYMBOL(csum_partial); EXPORT_SYMBOL(csum_partial_copy_generic); EXPORT_SYMBOL(copy_page); EXPORT_SYMBOL(__clear_user); -EXPORT_SYMBOL(_ebss); EXPORT_SYMBOL(empty_zero_page); #define DECLARE_EXPORT(name) \ diff --git a/trunk/arch/sh/kernel/vmlinux.lds.S b/trunk/arch/sh/kernel/vmlinux.lds.S index c98905f71e28..db88cbf9eafd 100644 --- a/trunk/arch/sh/kernel/vmlinux.lds.S +++ b/trunk/arch/sh/kernel/vmlinux.lds.S @@ -78,7 +78,6 @@ SECTIONS . = ALIGN(PAGE_SIZE); __init_end = .; BSS_SECTION(0, PAGE_SIZE, 4) - _ebss = .; /* uClinux MTD sucks */ _end = . ; STABS_DEBUG diff --git a/trunk/arch/sh/lib/mcount.S b/trunk/arch/sh/lib/mcount.S index 84a57761f17e..60164e65d665 100644 --- a/trunk/arch/sh/lib/mcount.S +++ b/trunk/arch/sh/lib/mcount.S @@ -39,7 +39,7 @@ * * Make sure the stack pointer contains a valid address. Valid * addresses for kernel stacks are anywhere after the bss - * (after _ebss) and anywhere in init_thread_union (init_stack). + * (after __bss_stop) and anywhere in init_thread_union (init_stack). */ #define STACK_CHECK() \ mov #(THREAD_SIZE >> 10), r0; \ @@ -60,7 +60,7 @@ cmp/hi r2, r1; \ bf stack_panic; \ \ - /* If sp > _ebss then we're OK. */ \ + /* If sp > __bss_stop then we're OK. */ \ mov.l .L_ebss, r1; \ cmp/hi r1, r15; \ bt 1f; \ @@ -70,7 +70,7 @@ cmp/hs r1, r15; \ bf stack_panic; \ \ - /* If sp > init_stack && sp < _ebss, not OK. */ \ + /* If sp > init_stack && sp < __bss_stop, not OK. */ \ add r0, r1; \ cmp/hs r1, r15; \ bt stack_panic; \ @@ -292,8 +292,6 @@ stack_panic: nop .align 2 -.L_ebss: - .long _ebss .L_init_thread_union: .long init_thread_union .Lpanic: diff --git a/trunk/arch/sparc/kernel/module.c b/trunk/arch/sparc/kernel/module.c index 15e0a1693976..f1ddc0d23679 100644 --- a/trunk/arch/sparc/kernel/module.c +++ b/trunk/arch/sparc/kernel/module.c @@ -48,9 +48,7 @@ void *module_alloc(unsigned long size) return NULL; ret = module_map(size); - if (!ret) - ret = ERR_PTR(-ENOMEM); - else + if (ret) memset(ret, 0, size); return ret; @@ -116,6 +114,10 @@ int apply_relocate_add(Elf_Shdr *sechdrs, v = sym->st_value + rel[i].r_addend; switch (ELF_R_TYPE(rel[i].r_info) & 0xff) { + case R_SPARC_DISP32: + v -= (Elf_Addr) location; + *loc32 = v; + break; #ifdef CONFIG_SPARC64 case R_SPARC_64: location[0] = v >> 56; @@ -128,11 +130,6 @@ int apply_relocate_add(Elf_Shdr *sechdrs, location[7] = v >> 0; break; - case R_SPARC_DISP32: - v -= (Elf_Addr) location; - *loc32 = v; - break; - case R_SPARC_WDISP19: v -= (Elf_Addr) location; *loc32 = (*loc32 & ~0x7ffff) | diff --git a/trunk/arch/sparc/kernel/sys_sparc_64.c b/trunk/arch/sparc/kernel/sys_sparc_64.c index 0dc1f5786081..11c6c9603e71 100644 --- a/trunk/arch/sparc/kernel/sys_sparc_64.c +++ b/trunk/arch/sparc/kernel/sys_sparc_64.c @@ -502,12 +502,12 @@ SYSCALL_DEFINE1(sparc64_personality, unsigned long, personality) { int ret; - if (current->personality == PER_LINUX32 && - personality == PER_LINUX) - personality = PER_LINUX32; + if (personality(current->personality) == PER_LINUX32 && + personality(personality) == PER_LINUX) + personality |= PER_LINUX32; ret = sys_personality(personality); - if (ret == PER_LINUX32) - ret = PER_LINUX; + if (personality(ret) == PER_LINUX32) + ret &= ~PER_LINUX32; return ret; } diff --git a/trunk/arch/sparc/mm/init_64.c b/trunk/arch/sparc/mm/init_64.c index 6026fdd1b2ed..d58edf5fefdb 100644 --- a/trunk/arch/sparc/mm/init_64.c +++ b/trunk/arch/sparc/mm/init_64.c @@ -2020,6 +2020,9 @@ EXPORT_SYMBOL(_PAGE_CACHE); #ifdef CONFIG_SPARSEMEM_VMEMMAP unsigned long vmemmap_table[VMEMMAP_SIZE]; +static long __meminitdata addr_start, addr_end; +static int __meminitdata node_start; + int __meminit vmemmap_populate(struct page *start, unsigned long nr, int node) { unsigned long vstart = (unsigned long) start; @@ -2050,15 +2053,30 @@ int __meminit vmemmap_populate(struct page *start, unsigned long nr, int node) *vmem_pp = pte_base | __pa(block); - printk(KERN_INFO "[%p-%p] page_structs=%lu " - "node=%d entry=%lu/%lu\n", start, block, nr, - node, - addr >> VMEMMAP_CHUNK_SHIFT, - VMEMMAP_SIZE); + /* check to see if we have contiguous blocks */ + if (addr_end != addr || node_start != node) { + if (addr_start) + printk(KERN_DEBUG " [%lx-%lx] on node %d\n", + addr_start, addr_end-1, node_start); + addr_start = addr; + node_start = node; + } + addr_end = addr + VMEMMAP_CHUNK; } } return 0; } + +void __meminit vmemmap_populate_print_last(void) +{ + if (addr_start) { + printk(KERN_DEBUG " [%lx-%lx] on node %d\n", + addr_start, addr_end-1, node_start); + addr_start = 0; + addr_end = 0; + node_start = 0; + } +} #endif /* CONFIG_SPARSEMEM_VMEMMAP */ static void prot_init_common(unsigned long page_none, diff --git a/trunk/arch/tile/include/asm/topology.h b/trunk/arch/tile/include/asm/topology.h index 7a7ce390534f..d5e86c9f74fd 100644 --- a/trunk/arch/tile/include/asm/topology.h +++ b/trunk/arch/tile/include/asm/topology.h @@ -69,7 +69,6 @@ static inline const struct cpumask *cpumask_of_node(int node) | 1*SD_BALANCE_FORK \ | 0*SD_BALANCE_WAKE \ | 0*SD_WAKE_AFFINE \ - | 0*SD_PREFER_LOCAL \ | 0*SD_SHARE_CPUPOWER \ | 0*SD_SHARE_PKG_RESOURCES \ | 0*SD_SERIALIZE \ diff --git a/trunk/arch/tile/include/gxio/iorpc_trio.h b/trunk/arch/tile/include/gxio/iorpc_trio.h index 15fb77992083..58105c31228b 100644 --- a/trunk/arch/tile/include/gxio/iorpc_trio.h +++ b/trunk/arch/tile/include/gxio/iorpc_trio.h @@ -25,21 +25,23 @@ #include #include -#define GXIO_TRIO_OP_ALLOC_ASIDS IORPC_OPCODE(IORPC_FORMAT_NONE, 0x1400) +#define GXIO_TRIO_OP_DEALLOC_ASID IORPC_OPCODE(IORPC_FORMAT_NONE, 0x1400) +#define GXIO_TRIO_OP_ALLOC_ASIDS IORPC_OPCODE(IORPC_FORMAT_NONE, 0x1401) -#define GXIO_TRIO_OP_ALLOC_MEMORY_MAPS IORPC_OPCODE(IORPC_FORMAT_NONE, 0x1402) +#define GXIO_TRIO_OP_ALLOC_MEMORY_MAPS IORPC_OPCODE(IORPC_FORMAT_NONE, 0x1404) -#define GXIO_TRIO_OP_ALLOC_PIO_REGIONS IORPC_OPCODE(IORPC_FORMAT_NONE, 0x140e) -#define GXIO_TRIO_OP_INIT_PIO_REGION_AUX IORPC_OPCODE(IORPC_FORMAT_NONE, 0x140f) +#define GXIO_TRIO_OP_ALLOC_PIO_REGIONS IORPC_OPCODE(IORPC_FORMAT_NONE, 0x1412) -#define GXIO_TRIO_OP_INIT_MEMORY_MAP_MMU_AUX IORPC_OPCODE(IORPC_FORMAT_NONE_NOUSER, 0x1417) -#define GXIO_TRIO_OP_GET_PORT_PROPERTY IORPC_OPCODE(IORPC_FORMAT_NONE_NOUSER, 0x1418) -#define GXIO_TRIO_OP_CONFIG_LEGACY_INTR IORPC_OPCODE(IORPC_FORMAT_KERNEL_INTERRUPT, 0x1419) -#define GXIO_TRIO_OP_CONFIG_MSI_INTR IORPC_OPCODE(IORPC_FORMAT_KERNEL_INTERRUPT, 0x141a) +#define GXIO_TRIO_OP_INIT_PIO_REGION_AUX IORPC_OPCODE(IORPC_FORMAT_NONE, 0x1414) -#define GXIO_TRIO_OP_SET_MPS_MRS IORPC_OPCODE(IORPC_FORMAT_NONE_NOUSER, 0x141c) -#define GXIO_TRIO_OP_FORCE_RC_LINK_UP IORPC_OPCODE(IORPC_FORMAT_NONE_NOUSER, 0x141d) -#define GXIO_TRIO_OP_FORCE_EP_LINK_UP IORPC_OPCODE(IORPC_FORMAT_NONE_NOUSER, 0x141e) +#define GXIO_TRIO_OP_INIT_MEMORY_MAP_MMU_AUX IORPC_OPCODE(IORPC_FORMAT_NONE_NOUSER, 0x141e) +#define GXIO_TRIO_OP_GET_PORT_PROPERTY IORPC_OPCODE(IORPC_FORMAT_NONE_NOUSER, 0x141f) +#define GXIO_TRIO_OP_CONFIG_LEGACY_INTR IORPC_OPCODE(IORPC_FORMAT_KERNEL_INTERRUPT, 0x1420) +#define GXIO_TRIO_OP_CONFIG_MSI_INTR IORPC_OPCODE(IORPC_FORMAT_KERNEL_INTERRUPT, 0x1421) + +#define GXIO_TRIO_OP_SET_MPS_MRS IORPC_OPCODE(IORPC_FORMAT_NONE_NOUSER, 0x1423) +#define GXIO_TRIO_OP_FORCE_RC_LINK_UP IORPC_OPCODE(IORPC_FORMAT_NONE_NOUSER, 0x1424) +#define GXIO_TRIO_OP_FORCE_EP_LINK_UP IORPC_OPCODE(IORPC_FORMAT_NONE_NOUSER, 0x1425) #define GXIO_TRIO_OP_GET_MMIO_BASE IORPC_OPCODE(IORPC_FORMAT_NONE_NOUSER, 0x8000) #define GXIO_TRIO_OP_CHECK_MMIO_OFFSET IORPC_OPCODE(IORPC_FORMAT_NONE_NOUSER, 0x8001) diff --git a/trunk/arch/um/drivers/mconsole_kern.c b/trunk/arch/um/drivers/mconsole_kern.c index 664a60e8dfb4..c17de0db6736 100644 --- a/trunk/arch/um/drivers/mconsole_kern.c +++ b/trunk/arch/um/drivers/mconsole_kern.c @@ -705,6 +705,7 @@ static void stack_proc(void *arg) struct task_struct *from = current, *to = arg; to->thread.saved_task = from; + rcu_switch(from, to); switch_to(from, to, from); } diff --git a/trunk/arch/um/include/asm/processor-generic.h b/trunk/arch/um/include/asm/processor-generic.h index 69f1c57a8d0d..33a6a2423bd2 100644 --- a/trunk/arch/um/include/asm/processor-generic.h +++ b/trunk/arch/um/include/asm/processor-generic.h @@ -20,14 +20,6 @@ struct mm_struct; struct thread_struct { struct task_struct *saved_task; - /* - * This flag is set to 1 before calling do_fork (and analyzed in - * copy_thread) to mark that we are begin called from userspace (fork / - * vfork / clone), and reset to 0 after. It is left to 0 when called - * from kernelspace (i.e. kernel_thread() or fork_idle(), - * as of 2.6.11). - */ - int forking; struct pt_regs regs; int singlestep_syscall; void *fault_addr; @@ -58,7 +50,6 @@ struct thread_struct { #define INIT_THREAD \ { \ - .forking = 0, \ .regs = EMPTY_REGS, \ .fault_addr = NULL, \ .prev_sched = NULL, \ diff --git a/trunk/arch/um/include/shared/common-offsets.h b/trunk/arch/um/include/shared/common-offsets.h index 40db8f71deae..2df313b6a586 100644 --- a/trunk/arch/um/include/shared/common-offsets.h +++ b/trunk/arch/um/include/shared/common-offsets.h @@ -7,16 +7,6 @@ DEFINE(UM_KERN_PAGE_MASK, PAGE_MASK); DEFINE(UM_KERN_PAGE_SHIFT, PAGE_SHIFT); DEFINE(UM_NSEC_PER_SEC, NSEC_PER_SEC); -DEFINE_STR(UM_KERN_EMERG, KERN_EMERG); -DEFINE_STR(UM_KERN_ALERT, KERN_ALERT); -DEFINE_STR(UM_KERN_CRIT, KERN_CRIT); -DEFINE_STR(UM_KERN_ERR, KERN_ERR); -DEFINE_STR(UM_KERN_WARNING, KERN_WARNING); -DEFINE_STR(UM_KERN_NOTICE, KERN_NOTICE); -DEFINE_STR(UM_KERN_INFO, KERN_INFO); -DEFINE_STR(UM_KERN_DEBUG, KERN_DEBUG); -DEFINE_STR(UM_KERN_CONT, KERN_CONT); - DEFINE(UM_ELF_CLASS, ELF_CLASS); DEFINE(UM_ELFCLASS32, ELFCLASS32); DEFINE(UM_ELFCLASS64, ELFCLASS64); diff --git a/trunk/arch/um/include/shared/user.h b/trunk/arch/um/include/shared/user.h index 4fa82c055aab..cef068563336 100644 --- a/trunk/arch/um/include/shared/user.h +++ b/trunk/arch/um/include/shared/user.h @@ -26,6 +26,17 @@ extern void panic(const char *fmt, ...) __attribute__ ((format (printf, 1, 2))); +/* Requires preincluding include/linux/kern_levels.h */ +#define UM_KERN_EMERG KERN_EMERG +#define UM_KERN_ALERT KERN_ALERT +#define UM_KERN_CRIT KERN_CRIT +#define UM_KERN_ERR KERN_ERR +#define UM_KERN_WARNING KERN_WARNING +#define UM_KERN_NOTICE KERN_NOTICE +#define UM_KERN_INFO KERN_INFO +#define UM_KERN_DEBUG KERN_DEBUG +#define UM_KERN_CONT KERN_CONT + #ifdef UML_CONFIG_PRINTK extern int printk(const char *fmt, ...) __attribute__ ((format (printf, 1, 2))); diff --git a/trunk/arch/um/kernel/exec.c b/trunk/arch/um/kernel/exec.c index 6cade9366364..8c82786da823 100644 --- a/trunk/arch/um/kernel/exec.c +++ b/trunk/arch/um/kernel/exec.c @@ -39,34 +39,21 @@ void flush_thread(void) void start_thread(struct pt_regs *regs, unsigned long eip, unsigned long esp) { + get_safe_registers(regs->regs.gp, regs->regs.fp); PT_REGS_IP(regs) = eip; PT_REGS_SP(regs) = esp; -} -EXPORT_SYMBOL(start_thread); - -static long execve1(const char *file, - const char __user *const __user *argv, - const char __user *const __user *env) -{ - long error; - - error = do_execve(file, argv, env, ¤t->thread.regs); - if (error == 0) { - task_lock(current); - current->ptrace &= ~PT_DTRACE; + current->ptrace &= ~PT_DTRACE; #ifdef SUBARCH_EXECVE1 - SUBARCH_EXECVE1(¤t->thread.regs.regs); + SUBARCH_EXECVE1(regs->regs); #endif - task_unlock(current); - } - return error; } +EXPORT_SYMBOL(start_thread); long um_execve(const char *file, const char __user *const __user *argv, const char __user *const __user *env) { long err; - err = execve1(file, argv, env); + err = do_execve(file, argv, env, ¤t->thread.regs); if (!err) UML_LONGJMP(current->thread.exec_buf, 1); return err; @@ -81,7 +68,7 @@ long sys_execve(const char __user *file, const char __user *const __user *argv, filename = getname(file); error = PTR_ERR(filename); if (IS_ERR(filename)) goto out; - error = execve1(filename, argv, env); + error = do_execve(filename, argv, env, ¤t->thread.regs); putname(filename); out: return error; diff --git a/trunk/arch/um/kernel/process.c b/trunk/arch/um/kernel/process.c index 57fc7028714a..c5f5afa50745 100644 --- a/trunk/arch/um/kernel/process.c +++ b/trunk/arch/um/kernel/process.c @@ -181,11 +181,12 @@ int copy_thread(unsigned long clone_flags, unsigned long sp, struct pt_regs *regs) { void (*handler)(void); + int kthread = current->flags & PF_KTHREAD; int ret = 0; p->thread = (struct thread_struct) INIT_THREAD; - if (current->thread.forking) { + if (!kthread) { memcpy(&p->thread.regs.regs, ®s->regs, sizeof(p->thread.regs.regs)); PT_REGS_SET_SYSCALL_RETURN(&p->thread.regs, 0); @@ -195,8 +196,7 @@ int copy_thread(unsigned long clone_flags, unsigned long sp, handler = fork_handler; arch_copy_thread(¤t->thread.arch, &p->thread.arch); - } - else { + } else { get_safe_registers(p->thread.regs.regs.gp, p->thread.regs.regs.fp); p->thread.request.u.thread = current->thread.request.u.thread; handler = new_thread_handler; @@ -204,7 +204,7 @@ int copy_thread(unsigned long clone_flags, unsigned long sp, new_thread(task_stack_page(p), &p->thread.switch_buf, handler); - if (current->thread.forking) { + if (!kthread) { clear_flushed_tls(p); /* diff --git a/trunk/arch/um/kernel/signal.c b/trunk/arch/um/kernel/signal.c index 7362d58efc29..cc9c2350e417 100644 --- a/trunk/arch/um/kernel/signal.c +++ b/trunk/arch/um/kernel/signal.c @@ -22,9 +22,13 @@ static void handle_signal(struct pt_regs *regs, unsigned long signr, struct k_sigaction *ka, siginfo_t *info) { sigset_t *oldset = sigmask_to_save(); + int singlestep = 0; unsigned long sp; int err; + if ((current->ptrace & PT_DTRACE) && (current->ptrace & PT_PTRACED)) + singlestep = 1; + /* Did we come from a system call? */ if (PT_REGS_SYSCALL_NR(regs) >= 0) { /* If so, check system call restarting.. */ @@ -61,7 +65,7 @@ static void handle_signal(struct pt_regs *regs, unsigned long signr, if (err) force_sigsegv(signr, current); else - signal_delivered(signr, info, ka, regs, 0); + signal_delivered(signr, info, ka, regs, singlestep); } static int kern_do_signal(struct pt_regs *regs) diff --git a/trunk/arch/um/kernel/syscall.c b/trunk/arch/um/kernel/syscall.c index f958cb876ee3..a4c6d8eee74c 100644 --- a/trunk/arch/um/kernel/syscall.c +++ b/trunk/arch/um/kernel/syscall.c @@ -17,25 +17,25 @@ long sys_fork(void) { - long ret; - - current->thread.forking = 1; - ret = do_fork(SIGCHLD, UPT_SP(¤t->thread.regs.regs), + return do_fork(SIGCHLD, UPT_SP(¤t->thread.regs.regs), ¤t->thread.regs, 0, NULL, NULL); - current->thread.forking = 0; - return ret; } long sys_vfork(void) { - long ret; - - current->thread.forking = 1; - ret = do_fork(CLONE_VFORK | CLONE_VM | SIGCHLD, + return do_fork(CLONE_VFORK | CLONE_VM | SIGCHLD, UPT_SP(¤t->thread.regs.regs), ¤t->thread.regs, 0, NULL, NULL); - current->thread.forking = 0; - return ret; +} + +long sys_clone(unsigned long clone_flags, unsigned long newsp, + void __user *parent_tid, void __user *child_tid) +{ + if (!newsp) + newsp = UPT_SP(¤t->thread.regs.regs); + + return do_fork(clone_flags, newsp, ¤t->thread.regs, 0, parent_tid, + child_tid); } long old_mmap(unsigned long addr, unsigned long len, diff --git a/trunk/arch/um/os-Linux/time.c b/trunk/arch/um/os-Linux/time.c index f60238559af3..0748fe0c8a73 100644 --- a/trunk/arch/um/os-Linux/time.c +++ b/trunk/arch/um/os-Linux/time.c @@ -114,7 +114,7 @@ static void deliver_alarm(void) skew += this_tick - last_tick; while (skew >= one_tick) { - alarm_handler(SIGVTALRM, NULL); + alarm_handler(SIGVTALRM, NULL, NULL); skew -= one_tick; } diff --git a/trunk/arch/um/scripts/Makefile.rules b/trunk/arch/um/scripts/Makefile.rules index d50270d26b42..15889df9b466 100644 --- a/trunk/arch/um/scripts/Makefile.rules +++ b/trunk/arch/um/scripts/Makefile.rules @@ -8,7 +8,7 @@ USER_OBJS += $(filter %_user.o,$(obj-y) $(obj-m) $(USER_SINGLE_OBJS)) USER_OBJS := $(foreach file,$(USER_OBJS),$(obj)/$(file)) $(USER_OBJS:.o=.%): \ - c_flags = -Wp,-MD,$(depfile) $(USER_CFLAGS) -include user.h $(CFLAGS_$(basetarget).o) + c_flags = -Wp,-MD,$(depfile) $(USER_CFLAGS) -include $(srctree)/include/linux/kern_levels.h -include user.h $(CFLAGS_$(basetarget).o) # These are like USER_OBJS but filter USER_CFLAGS through unprofile instead of # using it directly. diff --git a/trunk/arch/x86/Kconfig b/trunk/arch/x86/Kconfig index ba2657c49217..6cd6f24e1223 100644 --- a/trunk/arch/x86/Kconfig +++ b/trunk/arch/x86/Kconfig @@ -7,11 +7,13 @@ config 64BIT Say no to build a 32-bit kernel - formerly known as i386 config X86_32 - def_bool !64BIT + def_bool y + depends on !64BIT select CLKSRC_I8253 config X86_64 - def_bool 64BIT + def_bool y + depends on 64BIT select X86_DEV_DMA_OPS ### Arch settings @@ -36,6 +38,7 @@ config X86 select HAVE_KRETPROBES select HAVE_OPTPROBES select HAVE_FTRACE_MCOUNT_RECORD + select HAVE_FENTRY if X86_64 select HAVE_C_RECORDMCOUNT select HAVE_DYNAMIC_FTRACE select HAVE_FUNCTION_TRACER @@ -60,6 +63,8 @@ config X86 select HAVE_MIXED_BREAKPOINTS_REGS select PERF_EVENTS select HAVE_PERF_EVENTS_NMI + select HAVE_PERF_REGS + select HAVE_PERF_USER_STACK_DUMP select ANON_INODES select HAVE_ALIGNED_STRUCT_PAGE if SLUB && !M386 select HAVE_CMPXCHG_LOCAL if !M386 @@ -97,9 +102,12 @@ config X86 select KTIME_SCALAR if X86_32 select GENERIC_STRNCPY_FROM_USER select GENERIC_STRNLEN_USER + select HAVE_RCU_USER_QS if X86_64 + select HAVE_IRQ_TIME_ACCOUNTING config INSTRUCTION_DECODER - def_bool (KPROBES || PERF_EVENTS || UPROBES) + def_bool y + depends on KPROBES || PERF_EVENTS || UPROBES config OUTPUT_FORMAT string @@ -127,13 +135,15 @@ config SBUS bool config NEED_DMA_MAP_STATE - def_bool (X86_64 || INTEL_IOMMU || DMA_API_DEBUG) + def_bool y + depends on X86_64 || INTEL_IOMMU || DMA_API_DEBUG config NEED_SG_DMA_LENGTH def_bool y config GENERIC_ISA_DMA - def_bool ISA_DMA_API + def_bool y + depends on ISA_DMA_API config GENERIC_BUG def_bool y @@ -150,13 +160,16 @@ config GENERIC_GPIO bool config ARCH_MAY_HAVE_PC_FDC - def_bool ISA_DMA_API + def_bool y + depends on ISA_DMA_API config RWSEM_GENERIC_SPINLOCK - def_bool !X86_XADD + def_bool y + depends on !X86_XADD config RWSEM_XCHGADD_ALGORITHM - def_bool X86_XADD + def_bool y + depends on X86_XADD config GENERIC_CALIBRATE_DELAY def_bool y @@ -746,13 +759,14 @@ config SWIOTLB def_bool y if X86_64 ---help--- Support for software bounce buffers used on x86-64 systems - which don't have a hardware IOMMU (e.g. the current generation - of Intel's x86-64 CPUs). Using this PCI devices which can only - access 32-bits of memory can be used on systems with more than - 3 GB of memory. If unsure, say Y. + which don't have a hardware IOMMU. Using this PCI devices + which can only access 32-bits of memory can be used on systems + with more than 3 GB of memory. + If unsure, say Y. config IOMMU_HELPER - def_bool (CALGARY_IOMMU || GART_IOMMU || SWIOTLB || AMD_IOMMU) + def_bool y + depends on CALGARY_IOMMU || GART_IOMMU || SWIOTLB || AMD_IOMMU config MAXSMP bool "Enable Maximum number of SMP Processors and NUMA Nodes" @@ -796,17 +810,6 @@ config SCHED_MC making when dealing with multi-core CPU chips at a cost of slightly increased overhead in some places. If unsure say N here. -config IRQ_TIME_ACCOUNTING - bool "Fine granularity task level IRQ time accounting" - default n - ---help--- - Select this option to enable fine granularity task irq time - accounting. This is done by reading a timestamp on each - transitions between softirq and hardirq state, so there can be a - small performance impact. - - If in doubt, say N here. - source "kernel/Kconfig.preempt" config X86_UP_APIC @@ -871,6 +874,7 @@ config X86_REROUTE_FOR_BROKEN_BOOT_IRQS config X86_MCE bool "Machine Check / overheating reporting" + default y ---help--- Machine Check support allows the processor to notify the kernel if it detects a problem (e.g. overheating, data corruption). @@ -1159,10 +1163,12 @@ config X86_PAE consumes more pagetable space per process. config ARCH_PHYS_ADDR_T_64BIT - def_bool X86_64 || X86_PAE + def_bool y + depends on X86_64 || X86_PAE config ARCH_DMA_ADDR_T_64BIT - def_bool X86_64 || HIGHMEM64G + def_bool y + depends on X86_64 || HIGHMEM64G config DIRECT_GBPAGES bool "Enable 1GB pages for kernel pagetables" if EXPERT @@ -1285,8 +1291,8 @@ config ARCH_SELECT_MEMORY_MODEL depends on ARCH_SPARSEMEM_ENABLE config ARCH_MEMORY_PROBE - def_bool X86_64 - depends on MEMORY_HOTPLUG + def_bool y + depends on X86_64 && MEMORY_HOTPLUG config ARCH_PROC_KCORE_TEXT def_bool y @@ -1527,7 +1533,7 @@ config SECCOMP If unsure, say Y. Only embedded should say N here. config CC_STACKPROTECTOR - bool "Enable -fstack-protector buffer overflow detection (EXPERIMENTAL)" + bool "Enable -fstack-protector buffer overflow detection" ---help--- This option turns on the -fstack-protector GCC feature. This feature puts, at the beginning of functions, a canary value on @@ -1975,7 +1981,6 @@ config PCI_MMCONFIG config PCI_CNB20LE_QUIRK bool "Read CNB20LE Host Bridge Windows" if EXPERT - default n depends on PCI && EXPERIMENTAL help Read the PCI windows out of the CNB20LE host bridge. This allows @@ -2186,18 +2191,18 @@ config COMPAT depends on IA32_EMULATION || X86_X32 select ARCH_WANT_OLD_COMPAT_IPC +if COMPAT config COMPAT_FOR_U64_ALIGNMENT - def_bool COMPAT - depends on X86_64 + def_bool y config SYSVIPC_COMPAT def_bool y - depends on COMPAT && SYSVIPC + depends on SYSVIPC config KEYS_COMPAT - bool - depends on COMPAT && KEYS - default y + def_bool y + depends on KEYS +endif endmenu diff --git a/trunk/arch/x86/Kconfig.cpu b/trunk/arch/x86/Kconfig.cpu index 706e12e9984b..f3b86d0df44e 100644 --- a/trunk/arch/x86/Kconfig.cpu +++ b/trunk/arch/x86/Kconfig.cpu @@ -306,7 +306,8 @@ config X86_INTERNODE_CACHE_SHIFT default X86_L1_CACHE_SHIFT config X86_CMPXCHG - def_bool X86_64 || (X86_32 && !M386) + def_bool y + depends on X86_64 || (X86_32 && !M386) config X86_L1_CACHE_SHIFT int @@ -317,7 +318,7 @@ config X86_L1_CACHE_SHIFT config X86_XADD def_bool y - depends on X86_64 || !M386 + depends on !M386 config X86_PPRO_FENCE bool "PentiumPro memory ordering errata workaround" diff --git a/trunk/arch/x86/Makefile b/trunk/arch/x86/Makefile index b0c5276861ec..474ca35b1bce 100644 --- a/trunk/arch/x86/Makefile +++ b/trunk/arch/x86/Makefile @@ -27,6 +27,10 @@ ifeq ($(CONFIG_X86_32),y) KBUILD_CFLAGS += -msoft-float -mregparm=3 -freg-struct-return + # Never want PIC in a 32-bit kernel, prevent breakage with GCC built + # with nonstandard options + KBUILD_CFLAGS += -fno-pic + # prevent gcc from keeping the stack 16 byte aligned KBUILD_CFLAGS += $(call cc-option,-mpreferred-stack-boundary=2) @@ -138,7 +142,7 @@ KBUILD_CFLAGS += $(call cc-option,-mno-avx,) KBUILD_CFLAGS += $(mflags-y) KBUILD_AFLAGS += $(mflags-y) -archscripts: +archscripts: scripts_basic $(Q)$(MAKE) $(build)=arch/x86/tools relocs ### diff --git a/trunk/arch/x86/boot/Makefile b/trunk/arch/x86/boot/Makefile index 5a747dd884db..f7535bedc33f 100644 --- a/trunk/arch/x86/boot/Makefile +++ b/trunk/arch/x86/boot/Makefile @@ -57,7 +57,7 @@ KBUILD_CFLAGS := $(LINUXINCLUDE) -g -Os -D_SETUP -D__KERNEL__ \ -Wall -Wstrict-prototypes \ -march=i386 -mregparm=3 \ -include $(srctree)/$(src)/code16gcc.h \ - -fno-strict-aliasing -fomit-frame-pointer \ + -fno-strict-aliasing -fomit-frame-pointer -fno-pic \ $(call cc-option, -ffreestanding) \ $(call cc-option, -fno-toplevel-reorder,\ $(call cc-option, -fno-unit-at-a-time)) \ diff --git a/trunk/arch/x86/boot/compressed/Makefile b/trunk/arch/x86/boot/compressed/Makefile index e398bb5d63bb..8a84501acb1b 100644 --- a/trunk/arch/x86/boot/compressed/Makefile +++ b/trunk/arch/x86/boot/compressed/Makefile @@ -28,6 +28,9 @@ VMLINUX_OBJS = $(obj)/vmlinux.lds $(obj)/head_$(BITS).o $(obj)/misc.o \ $(obj)/string.o $(obj)/cmdline.o $(obj)/early_serial_console.o \ $(obj)/piggy.o +$(obj)/eboot.o: KBUILD_CFLAGS += -fshort-wchar -mno-red-zone +$(obj)/efi_stub_$(BITS).o: KBUILD_CLFAGS += -fshort-wchar -mno-red-zone + ifeq ($(CONFIG_EFI_STUB), y) VMLINUX_OBJS += $(obj)/eboot.o $(obj)/efi_stub_$(BITS).o endif diff --git a/trunk/arch/x86/boot/compressed/eboot.c b/trunk/arch/x86/boot/compressed/eboot.c index b3e0227df2c9..c760e073963e 100644 --- a/trunk/arch/x86/boot/compressed/eboot.c +++ b/trunk/arch/x86/boot/compressed/eboot.c @@ -276,8 +276,9 @@ static efi_status_t setup_gop(struct screen_info *si, efi_guid_t *proto, nr_gops = size / sizeof(void *); for (i = 0; i < nr_gops; i++) { struct efi_graphics_output_mode_info *info; - efi_guid_t pciio_proto = EFI_PCI_IO_PROTOCOL_GUID; - void *pciio; + efi_guid_t conout_proto = EFI_CONSOLE_OUT_DEVICE_GUID; + bool conout_found = false; + void *dummy; void *h = gop_handle[i]; status = efi_call_phys3(sys_table->boottime->handle_protocol, @@ -285,19 +286,21 @@ static efi_status_t setup_gop(struct screen_info *si, efi_guid_t *proto, if (status != EFI_SUCCESS) continue; - efi_call_phys3(sys_table->boottime->handle_protocol, - h, &pciio_proto, &pciio); + status = efi_call_phys3(sys_table->boottime->handle_protocol, + h, &conout_proto, &dummy); + + if (status == EFI_SUCCESS) + conout_found = true; status = efi_call_phys4(gop->query_mode, gop, gop->mode->mode, &size, &info); - if (status == EFI_SUCCESS && (!first_gop || pciio)) { + if (status == EFI_SUCCESS && (!first_gop || conout_found)) { /* - * Apple provide GOPs that are not backed by - * real hardware (they're used to handle - * multiple displays). The workaround is to - * search for a GOP implementing the PCIIO - * protocol, and if one isn't found, to just - * fallback to the first GOP. + * Systems that use the UEFI Console Splitter may + * provide multiple GOP devices, not all of which are + * backed by real hardware. The workaround is to search + * for a GOP implementing the ConOut protocol, and if + * one isn't found, to just fall back to the first GOP. */ width = info->horizontal_resolution; height = info->vertical_resolution; @@ -308,10 +311,10 @@ static efi_status_t setup_gop(struct screen_info *si, efi_guid_t *proto, pixels_per_scan_line = info->pixels_per_scan_line; /* - * Once we've found a GOP supporting PCIIO, + * Once we've found a GOP supporting ConOut, * don't bother looking any further. */ - if (pciio) + if (conout_found) break; first_gop = gop; @@ -328,7 +331,6 @@ static efi_status_t setup_gop(struct screen_info *si, efi_guid_t *proto, si->lfb_width = width; si->lfb_height = height; si->lfb_base = fb_base; - si->lfb_size = fb_size; si->pages = 1; if (pixel_format == PIXEL_RGB_RESERVED_8BIT_PER_COLOR) { @@ -376,6 +378,10 @@ static efi_status_t setup_gop(struct screen_info *si, efi_guid_t *proto, si->rsvd_pos = 0; } + si->lfb_size = si->lfb_linelength * si->lfb_height; + + si->capabilities |= VIDEO_CAPABILITY_SKIP_QUIRKS; + free_handle: efi_call_phys1(sys_table->boottime->free_pool, gop_handle); return status; diff --git a/trunk/arch/x86/boot/compressed/eboot.h b/trunk/arch/x86/boot/compressed/eboot.h index 3b6e15627c55..e5b0a8f91c5f 100644 --- a/trunk/arch/x86/boot/compressed/eboot.h +++ b/trunk/arch/x86/boot/compressed/eboot.h @@ -14,6 +14,10 @@ #define EFI_PAGE_SIZE (1UL << EFI_PAGE_SHIFT) #define EFI_READ_CHUNK_SIZE (1024 * 1024) +#define EFI_CONSOLE_OUT_DEVICE_GUID \ + EFI_GUID(0xd3b36f2c, 0xd551, 0x11d4, 0x9a, 0x46, 0x0, 0x90, 0x27, \ + 0x3f, 0xc1, 0x4d) + #define PIXEL_RGB_RESERVED_8BIT_PER_COLOR 0 #define PIXEL_BGR_RESERVED_8BIT_PER_COLOR 1 #define PIXEL_BIT_MASK 2 diff --git a/trunk/arch/x86/boot/header.S b/trunk/arch/x86/boot/header.S index b4e15dd6786a..2a017441b8b2 100644 --- a/trunk/arch/x86/boot/header.S +++ b/trunk/arch/x86/boot/header.S @@ -32,10 +32,6 @@ SYSSEG = 0x1000 /* historical load address >> 4 */ #define SVGA_MODE ASK_VGA #endif -#ifndef RAMDISK -#define RAMDISK 0 -#endif - #ifndef ROOT_RDONLY #define ROOT_RDONLY 1 #endif diff --git a/trunk/arch/x86/configs/i386_defconfig b/trunk/arch/x86/configs/i386_defconfig index 119db67dcb03..5598547281a7 100644 --- a/trunk/arch/x86/configs/i386_defconfig +++ b/trunk/arch/x86/configs/i386_defconfig @@ -8,6 +8,8 @@ CONFIG_TASK_DELAY_ACCT=y CONFIG_TASK_XACCT=y CONFIG_TASK_IO_ACCOUNTING=y CONFIG_AUDIT=y +CONFIG_NO_HZ=y +CONFIG_HIGH_RES_TIMERS=y CONFIG_LOG_BUF_SHIFT=18 CONFIG_CGROUPS=y CONFIG_CGROUP_FREEZER=y @@ -34,8 +36,6 @@ CONFIG_SGI_PARTITION=y CONFIG_SUN_PARTITION=y CONFIG_KARMA_PARTITION=y CONFIG_EFI_PARTITION=y -CONFIG_NO_HZ=y -CONFIG_HIGH_RES_TIMERS=y CONFIG_SMP=y CONFIG_X86_GENERIC=y CONFIG_HPET_TIMER=y @@ -144,8 +144,6 @@ CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" CONFIG_DEBUG_DEVRES=y CONFIG_CONNECTOR=y CONFIG_BLK_DEV_LOOP=y -CONFIG_BLK_DEV_RAM=y -CONFIG_BLK_DEV_RAM_SIZE=16384 CONFIG_BLK_DEV_SD=y CONFIG_BLK_DEV_SR=y CONFIG_BLK_DEV_SR_VENDOR=y @@ -231,8 +229,6 @@ CONFIG_SND_HRTIMER=y CONFIG_SND_HDA_INTEL=y CONFIG_SND_HDA_HWDEP=y CONFIG_HIDRAW=y -CONFIG_HID_PID=y -CONFIG_USB_HIDDEV=y CONFIG_HID_GYRATION=y CONFIG_LOGITECH_FF=y CONFIG_HID_NTRIG=y @@ -243,11 +239,11 @@ CONFIG_HID_SAMSUNG=y CONFIG_HID_SONY=y CONFIG_HID_SUNPLUS=y CONFIG_HID_TOPSEED=y +CONFIG_HID_PID=y +CONFIG_USB_HIDDEV=y CONFIG_USB=y CONFIG_USB_DEBUG=y CONFIG_USB_ANNOUNCE_NEW_DEVICES=y -CONFIG_USB_DEVICEFS=y -# CONFIG_USB_DEVICE_CLASS is not set CONFIG_USB_MON=y CONFIG_USB_EHCI_HCD=y # CONFIG_USB_EHCI_TT_NEWSCHED is not set @@ -262,10 +258,9 @@ CONFIG_RTC_CLASS=y CONFIG_DMADEVICES=y CONFIG_EEEPC_LAPTOP=y CONFIG_EFI_VARS=y -CONFIG_EXT3_FS=y -# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set -CONFIG_EXT3_FS_POSIX_ACL=y -CONFIG_EXT3_FS_SECURITY=y +CONFIG_EXT4_FS=y +CONFIG_EXT4_FS_POSIX_ACL=y +CONFIG_EXT4_FS_SECURITY=y CONFIG_QUOTA=y CONFIG_QUOTA_NETLINK_INTERFACE=y # CONFIG_PRINT_QUOTA_WARNING is not set @@ -280,7 +275,6 @@ CONFIG_PROC_KCORE=y CONFIG_TMPFS_POSIX_ACL=y CONFIG_HUGETLBFS=y CONFIG_NFS_FS=y -CONFIG_NFS_V3=y CONFIG_NFS_V3_ACL=y CONFIG_NFS_V4=y CONFIG_ROOT_NFS=y @@ -299,13 +293,11 @@ CONFIG_DEBUG_KERNEL=y CONFIG_SCHEDSTATS=y CONFIG_TIMER_STATS=y CONFIG_DEBUG_STACK_USAGE=y -CONFIG_SYSCTL_SYSCALL_CHECK=y CONFIG_BLK_DEV_IO_TRACE=y CONFIG_PROVIDE_OHCI1394_DMA_INIT=y CONFIG_EARLY_PRINTK_DBGP=y CONFIG_DEBUG_STACKOVERFLOW=y # CONFIG_DEBUG_RODATA_TEST is not set -CONFIG_DEBUG_NX_TEST=m CONFIG_DEBUG_BOOT_PARAMS=y CONFIG_OPTIMIZE_INLINING=y CONFIG_KEYS_DEBUG_PROC_KEYS=y @@ -316,4 +308,3 @@ CONFIG_SECURITY_SELINUX_BOOTPARAM=y CONFIG_SECURITY_SELINUX_DISABLE=y CONFIG_CRYPTO_AES_586=y # CONFIG_CRYPTO_ANSI_CPRNG is not set -CONFIG_CRC_T10DIF=y diff --git a/trunk/arch/x86/configs/x86_64_defconfig b/trunk/arch/x86/configs/x86_64_defconfig index 76eb2903809f..671524d0f6c0 100644 --- a/trunk/arch/x86/configs/x86_64_defconfig +++ b/trunk/arch/x86/configs/x86_64_defconfig @@ -8,6 +8,8 @@ CONFIG_TASK_DELAY_ACCT=y CONFIG_TASK_XACCT=y CONFIG_TASK_IO_ACCOUNTING=y CONFIG_AUDIT=y +CONFIG_NO_HZ=y +CONFIG_HIGH_RES_TIMERS=y CONFIG_LOG_BUF_SHIFT=18 CONFIG_CGROUPS=y CONFIG_CGROUP_FREEZER=y @@ -34,8 +36,6 @@ CONFIG_SGI_PARTITION=y CONFIG_SUN_PARTITION=y CONFIG_KARMA_PARTITION=y CONFIG_EFI_PARTITION=y -CONFIG_NO_HZ=y -CONFIG_HIGH_RES_TIMERS=y CONFIG_SMP=y CONFIG_CALGARY_IOMMU=y CONFIG_NR_CPUS=64 @@ -144,8 +144,6 @@ CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" CONFIG_DEBUG_DEVRES=y CONFIG_CONNECTOR=y CONFIG_BLK_DEV_LOOP=y -CONFIG_BLK_DEV_RAM=y -CONFIG_BLK_DEV_RAM_SIZE=16384 CONFIG_BLK_DEV_SD=y CONFIG_BLK_DEV_SR=y CONFIG_BLK_DEV_SR_VENDOR=y @@ -227,8 +225,6 @@ CONFIG_SND_HRTIMER=y CONFIG_SND_HDA_INTEL=y CONFIG_SND_HDA_HWDEP=y CONFIG_HIDRAW=y -CONFIG_HID_PID=y -CONFIG_USB_HIDDEV=y CONFIG_HID_GYRATION=y CONFIG_LOGITECH_FF=y CONFIG_HID_NTRIG=y @@ -239,11 +235,11 @@ CONFIG_HID_SAMSUNG=y CONFIG_HID_SONY=y CONFIG_HID_SUNPLUS=y CONFIG_HID_TOPSEED=y +CONFIG_HID_PID=y +CONFIG_USB_HIDDEV=y CONFIG_USB=y CONFIG_USB_DEBUG=y CONFIG_USB_ANNOUNCE_NEW_DEVICES=y -CONFIG_USB_DEVICEFS=y -# CONFIG_USB_DEVICE_CLASS is not set CONFIG_USB_MON=y CONFIG_USB_EHCI_HCD=y # CONFIG_USB_EHCI_TT_NEWSCHED is not set @@ -262,10 +258,9 @@ CONFIG_AMD_IOMMU_STATS=y CONFIG_INTEL_IOMMU=y # CONFIG_INTEL_IOMMU_DEFAULT_ON is not set CONFIG_EFI_VARS=y -CONFIG_EXT3_FS=y -# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set -CONFIG_EXT3_FS_POSIX_ACL=y -CONFIG_EXT3_FS_SECURITY=y +CONFIG_EXT4_FS=y +CONFIG_EXT4_FS_POSIX_ACL=y +CONFIG_EXT4_FS_SECURITY=y CONFIG_QUOTA=y CONFIG_QUOTA_NETLINK_INTERFACE=y # CONFIG_PRINT_QUOTA_WARNING is not set @@ -280,7 +275,6 @@ CONFIG_PROC_KCORE=y CONFIG_TMPFS_POSIX_ACL=y CONFIG_HUGETLBFS=y CONFIG_NFS_FS=y -CONFIG_NFS_V3=y CONFIG_NFS_V3_ACL=y CONFIG_NFS_V4=y CONFIG_ROOT_NFS=y @@ -298,13 +292,11 @@ CONFIG_DEBUG_KERNEL=y CONFIG_SCHEDSTATS=y CONFIG_TIMER_STATS=y CONFIG_DEBUG_STACK_USAGE=y -CONFIG_SYSCTL_SYSCALL_CHECK=y CONFIG_BLK_DEV_IO_TRACE=y CONFIG_PROVIDE_OHCI1394_DMA_INIT=y CONFIG_EARLY_PRINTK_DBGP=y CONFIG_DEBUG_STACKOVERFLOW=y # CONFIG_DEBUG_RODATA_TEST is not set -CONFIG_DEBUG_NX_TEST=m CONFIG_DEBUG_BOOT_PARAMS=y CONFIG_OPTIMIZE_INLINING=y CONFIG_KEYS_DEBUG_PROC_KEYS=y @@ -314,4 +306,3 @@ CONFIG_SECURITY_SELINUX=y CONFIG_SECURITY_SELINUX_BOOTPARAM=y CONFIG_SECURITY_SELINUX_DISABLE=y # CONFIG_CRYPTO_ANSI_CPRNG is not set -CONFIG_CRC_T10DIF=y diff --git a/trunk/arch/x86/ia32/ia32_signal.c b/trunk/arch/x86/ia32/ia32_signal.c index 673ac9b63d6b..8c77c64fbd27 100644 --- a/trunk/arch/x86/ia32/ia32_signal.c +++ b/trunk/arch/x86/ia32/ia32_signal.c @@ -162,7 +162,8 @@ asmlinkage long sys32_sigaltstack(const stack_ia32_t __user *uss_ptr, } seg = get_fs(); set_fs(KERNEL_DS); - ret = do_sigaltstack(uss_ptr ? &uss : NULL, &uoss, regs->sp); + ret = do_sigaltstack((stack_t __force __user *) (uss_ptr ? &uss : NULL), + (stack_t __force __user *) &uoss, regs->sp); set_fs(seg); if (ret >= 0 && uoss_ptr) { if (!access_ok(VERIFY_WRITE, uoss_ptr, sizeof(stack_ia32_t))) @@ -250,7 +251,7 @@ static int ia32_restore_sigcontext(struct pt_regs *regs, get_user_ex(tmp, &sc->fpstate); buf = compat_ptr(tmp); - err |= restore_i387_xstate_ia32(buf); + err |= restore_xstate_sig(buf, 1); get_user_ex(*pax, &sc->ax); } get_user_catch(err); @@ -361,7 +362,7 @@ static int ia32_setup_sigcontext(struct sigcontext_ia32 __user *sc, */ static void __user *get_sigframe(struct k_sigaction *ka, struct pt_regs *regs, size_t frame_size, - void **fpstate) + void __user **fpstate) { unsigned long sp; @@ -381,9 +382,12 @@ static void __user *get_sigframe(struct k_sigaction *ka, struct pt_regs *regs, sp = (unsigned long) ka->sa.sa_restorer; if (used_math()) { - sp = sp - sig_xstate_ia32_size; - *fpstate = (struct _fpstate_ia32 *) sp; - if (save_i387_xstate_ia32(*fpstate) < 0) + unsigned long fx_aligned, math_size; + + sp = alloc_mathframe(sp, 1, &fx_aligned, &math_size); + *fpstate = (struct _fpstate_ia32 __user *) sp; + if (save_xstate_sig(*fpstate, (void __user *)fx_aligned, + math_size) < 0) return (void __user *) -1L; } @@ -448,7 +452,7 @@ int ia32_setup_frame(int sig, struct k_sigaction *ka, * These are actually not used anymore, but left because some * gdb versions depend on them as a marker. */ - put_user_ex(*((u64 *)&code), (u64 *)frame->retcode); + put_user_ex(*((u64 *)&code), (u64 __user *)frame->retcode); } put_user_catch(err); if (err) @@ -529,7 +533,7 @@ int ia32_setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info, * Not actually used anymore, but left because some gdb * versions need it. */ - put_user_ex(*((u64 *)&code), (u64 *)frame->retcode); + put_user_ex(*((u64 *)&code), (u64 __user *)frame->retcode); } put_user_catch(err); if (err) diff --git a/trunk/arch/x86/ia32/sys_ia32.c b/trunk/arch/x86/ia32/sys_ia32.c index 4540bece0946..c5b938d92eab 100644 --- a/trunk/arch/x86/ia32/sys_ia32.c +++ b/trunk/arch/x86/ia32/sys_ia32.c @@ -287,7 +287,7 @@ asmlinkage long sys32_sigaction(int sig, struct old_sigaction32 __user *act, return ret; } -asmlinkage long sys32_waitpid(compat_pid_t pid, unsigned int *stat_addr, +asmlinkage long sys32_waitpid(compat_pid_t pid, unsigned int __user *stat_addr, int options) { return compat_sys_wait4(pid, stat_addr, options, NULL); diff --git a/trunk/arch/x86/include/asm/alternative.h b/trunk/arch/x86/include/asm/alternative.h index 70780689599a..444704c8e186 100644 --- a/trunk/arch/x86/include/asm/alternative.h +++ b/trunk/arch/x86/include/asm/alternative.h @@ -60,7 +60,7 @@ extern void alternatives_smp_module_add(struct module *mod, char *name, void *locks, void *locks_end, void *text, void *text_end); extern void alternatives_smp_module_del(struct module *mod); -extern void alternatives_smp_switch(int smp); +extern void alternatives_enable_smp(void); extern int alternatives_text_reserved(void *start, void *end); extern bool skip_smp_alternatives; #else @@ -68,7 +68,7 @@ static inline void alternatives_smp_module_add(struct module *mod, char *name, void *locks, void *locks_end, void *text, void *text_end) {} static inline void alternatives_smp_module_del(struct module *mod) {} -static inline void alternatives_smp_switch(int smp) {} +static inline void alternatives_enable_smp(void) {} static inline int alternatives_text_reserved(void *start, void *end) { return 0; diff --git a/trunk/arch/x86/include/asm/bitops.h b/trunk/arch/x86/include/asm/bitops.h index 72f5009deb5a..6dfd0195bb55 100644 --- a/trunk/arch/x86/include/asm/bitops.h +++ b/trunk/arch/x86/include/asm/bitops.h @@ -355,7 +355,7 @@ static int test_bit(int nr, const volatile unsigned long *addr); */ static inline unsigned long __ffs(unsigned long word) { - asm("bsf %1,%0" + asm("rep; bsf %1,%0" : "=r" (word) : "rm" (word)); return word; @@ -369,7 +369,7 @@ static inline unsigned long __ffs(unsigned long word) */ static inline unsigned long ffz(unsigned long word) { - asm("bsf %1,%0" + asm("rep; bsf %1,%0" : "=r" (word) : "r" (~word)); return word; @@ -417,10 +417,9 @@ static inline int ffs(int x) * We cannot do this on 32 bits because at the very least some * 486 CPUs did not behave this way. */ - long tmp = -1; asm("bsfl %1,%0" : "=r" (r) - : "rm" (x), "0" (tmp)); + : "rm" (x), "0" (-1)); #elif defined(CONFIG_X86_CMOV) asm("bsfl %1,%0\n\t" "cmovzl %2,%0" @@ -459,10 +458,9 @@ static inline int fls(int x) * We cannot do this on 32 bits because at the very least some * 486 CPUs did not behave this way. */ - long tmp = -1; asm("bsrl %1,%0" : "=r" (r) - : "rm" (x), "0" (tmp)); + : "rm" (x), "0" (-1)); #elif defined(CONFIG_X86_CMOV) asm("bsrl %1,%0\n\t" "cmovzl %2,%0" @@ -490,13 +488,13 @@ static inline int fls(int x) #ifdef CONFIG_X86_64 static __always_inline int fls64(__u64 x) { - long bitpos = -1; + int bitpos = -1; /* * AMD64 says BSRQ won't clobber the dest reg if x==0; Intel64 says the * dest reg is undefined if x==0, but their CPU architect says its * value is written to set it to the same as before. */ - asm("bsrq %1,%0" + asm("bsrq %1,%q0" : "+r" (bitpos) : "rm" (x)); return bitpos + 1; diff --git a/trunk/arch/x86/include/asm/calling.h b/trunk/arch/x86/include/asm/calling.h index a9e3a740f697..7f8422a28a46 100644 --- a/trunk/arch/x86/include/asm/calling.h +++ b/trunk/arch/x86/include/asm/calling.h @@ -49,38 +49,36 @@ For 32-bit we have the following conventions - kernel is built with #include "dwarf2.h" /* - * 64-bit system call stack frame layout defines and helpers, for - * assembly code (note that the seemingly unnecessary parentheses - * are to prevent cpp from inserting spaces in expressions that get - * passed to macros): + * 64-bit system call stack frame layout defines and helpers, + * for assembly code: */ -#define R15 (0) -#define R14 (8) -#define R13 (16) -#define R12 (24) -#define RBP (32) -#define RBX (40) +#define R15 0 +#define R14 8 +#define R13 16 +#define R12 24 +#define RBP 32 +#define RBX 40 /* arguments: interrupts/non tracing syscalls only save up to here: */ -#define R11 (48) -#define R10 (56) -#define R9 (64) -#define R8 (72) -#define RAX (80) -#define RCX (88) -#define RDX (96) -#define RSI (104) -#define RDI (112) -#define ORIG_RAX (120) /* + error_code */ +#define R11 48 +#define R10 56 +#define R9 64 +#define R8 72 +#define RAX 80 +#define RCX 88 +#define RDX 96 +#define RSI 104 +#define RDI 112 +#define ORIG_RAX 120 /* + error_code */ /* end of arguments */ /* cpu exception frame or undefined in case of fast syscall: */ -#define RIP (128) -#define CS (136) -#define EFLAGS (144) -#define RSP (152) -#define SS (160) +#define RIP 128 +#define CS 136 +#define EFLAGS 144 +#define RSP 152 +#define SS 160 #define ARGOFFSET R11 #define SWFRAME ORIG_RAX diff --git a/trunk/arch/x86/include/asm/cpufeature.h b/trunk/arch/x86/include/asm/cpufeature.h index 6b7ee5ff6820..16cae425d1f8 100644 --- a/trunk/arch/x86/include/asm/cpufeature.h +++ b/trunk/arch/x86/include/asm/cpufeature.h @@ -97,6 +97,7 @@ #define X86_FEATURE_EXTD_APICID (3*32+26) /* has extended APICID (8 bits) */ #define X86_FEATURE_AMD_DCM (3*32+27) /* multi-node processor */ #define X86_FEATURE_APERFMPERF (3*32+28) /* APERFMPERF */ +#define X86_FEATURE_EAGER_FPU (3*32+29) /* "eagerfpu" Non lazy FPU restore */ /* Intel-defined CPU features, CPUID level 0x00000001 (ecx), word 4 */ #define X86_FEATURE_XMM3 (4*32+ 0) /* "pni" SSE-3 */ @@ -209,6 +210,7 @@ #define X86_FEATURE_RTM (9*32+11) /* Restricted Transactional Memory */ #define X86_FEATURE_RDSEED (9*32+18) /* The RDSEED instruction */ #define X86_FEATURE_ADX (9*32+19) /* The ADCX and ADOX instructions */ +#define X86_FEATURE_SMAP (9*32+20) /* Supervisor Mode Access Prevention */ #if defined(__KERNEL__) && !defined(__ASSEMBLY__) @@ -299,12 +301,14 @@ extern const char * const x86_power_flags[32]; #define cpu_has_xmm4_2 boot_cpu_has(X86_FEATURE_XMM4_2) #define cpu_has_x2apic boot_cpu_has(X86_FEATURE_X2APIC) #define cpu_has_xsave boot_cpu_has(X86_FEATURE_XSAVE) +#define cpu_has_xsaveopt boot_cpu_has(X86_FEATURE_XSAVEOPT) #define cpu_has_osxsave boot_cpu_has(X86_FEATURE_OSXSAVE) #define cpu_has_hypervisor boot_cpu_has(X86_FEATURE_HYPERVISOR) #define cpu_has_pclmulqdq boot_cpu_has(X86_FEATURE_PCLMULQDQ) #define cpu_has_perfctr_core boot_cpu_has(X86_FEATURE_PERFCTR_CORE) #define cpu_has_cx8 boot_cpu_has(X86_FEATURE_CX8) #define cpu_has_cx16 boot_cpu_has(X86_FEATURE_CX16) +#define cpu_has_eager_fpu boot_cpu_has(X86_FEATURE_EAGER_FPU) #if defined(CONFIG_X86_INVLPG) || defined(CONFIG_X86_64) # define cpu_has_invlpg 1 diff --git a/trunk/arch/x86/include/asm/fpu-internal.h b/trunk/arch/x86/include/asm/fpu-internal.h index 75f4c6d6a331..92f3c6ed817f 100644 --- a/trunk/arch/x86/include/asm/fpu-internal.h +++ b/trunk/arch/x86/include/asm/fpu-internal.h @@ -12,6 +12,7 @@ #include #include +#include #include #include #include @@ -21,42 +22,74 @@ #include #include -extern unsigned int sig_xstate_size; +#ifdef CONFIG_X86_64 +# include +# include +int ia32_setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info, + compat_sigset_t *set, struct pt_regs *regs); +int ia32_setup_frame(int sig, struct k_sigaction *ka, + compat_sigset_t *set, struct pt_regs *regs); +#else +# define user_i387_ia32_struct user_i387_struct +# define user32_fxsr_struct user_fxsr_struct +# define ia32_setup_frame __setup_frame +# define ia32_setup_rt_frame __setup_rt_frame +#endif + +extern unsigned int mxcsr_feature_mask; extern void fpu_init(void); +extern void eager_fpu_init(void); DECLARE_PER_CPU(struct task_struct *, fpu_owner_task); +extern void convert_from_fxsr(struct user_i387_ia32_struct *env, + struct task_struct *tsk); +extern void convert_to_fxsr(struct task_struct *tsk, + const struct user_i387_ia32_struct *env); + extern user_regset_active_fn fpregs_active, xfpregs_active; extern user_regset_get_fn fpregs_get, xfpregs_get, fpregs_soft_get, xstateregs_get; extern user_regset_set_fn fpregs_set, xfpregs_set, fpregs_soft_set, xstateregs_set; - /* * xstateregs_active == fpregs_active. Please refer to the comment * at the definition of fpregs_active. */ #define xstateregs_active fpregs_active -extern struct _fpx_sw_bytes fx_sw_reserved; -#ifdef CONFIG_IA32_EMULATION -extern unsigned int sig_xstate_ia32_size; -extern struct _fpx_sw_bytes fx_sw_reserved_ia32; -struct _fpstate_ia32; -struct _xstate_ia32; -extern int save_i387_xstate_ia32(void __user *buf); -extern int restore_i387_xstate_ia32(void __user *buf); -#endif - #ifdef CONFIG_MATH_EMULATION +# define HAVE_HWFP (boot_cpu_data.hard_math) extern void finit_soft_fpu(struct i387_soft_struct *soft); #else +# define HAVE_HWFP 1 static inline void finit_soft_fpu(struct i387_soft_struct *soft) {} #endif +static inline int is_ia32_compat_frame(void) +{ + return config_enabled(CONFIG_IA32_EMULATION) && + test_thread_flag(TIF_IA32); +} + +static inline int is_ia32_frame(void) +{ + return config_enabled(CONFIG_X86_32) || is_ia32_compat_frame(); +} + +static inline int is_x32_frame(void) +{ + return config_enabled(CONFIG_X86_X32_ABI) && test_thread_flag(TIF_X32); +} + #define X87_FSW_ES (1 << 7) /* Exception Summary */ +static __always_inline __pure bool use_eager_fpu(void) +{ + return static_cpu_has(X86_FEATURE_EAGER_FPU); +} + static __always_inline __pure bool use_xsaveopt(void) { return static_cpu_has(X86_FEATURE_XSAVEOPT); @@ -72,6 +105,13 @@ static __always_inline __pure bool use_fxsr(void) return static_cpu_has(X86_FEATURE_FXSR); } +static inline void fx_finit(struct i387_fxsave_struct *fx) +{ + memset(fx, 0, xstate_size); + fx->cwd = 0x37f; + fx->mxcsr = MXCSR_DEFAULT; +} + extern void __sanitize_i387_state(struct task_struct *); static inline void sanitize_i387_state(struct task_struct *tsk) @@ -81,131 +121,88 @@ static inline void sanitize_i387_state(struct task_struct *tsk) __sanitize_i387_state(tsk); } -#ifdef CONFIG_X86_64 -static inline int fxrstor_checking(struct i387_fxsave_struct *fx) +#define check_insn(insn, output, input...) \ +({ \ + int err; \ + asm volatile("1:" #insn "\n\t" \ + "2:\n" \ + ".section .fixup,\"ax\"\n" \ + "3: movl $-1,%[err]\n" \ + " jmp 2b\n" \ + ".previous\n" \ + _ASM_EXTABLE(1b, 3b) \ + : [err] "=r" (err), output \ + : "0"(0), input); \ + err; \ +}) + +static inline int fsave_user(struct i387_fsave_struct __user *fx) { - int err; - - /* See comment in fxsave() below. */ -#ifdef CONFIG_AS_FXSAVEQ - asm volatile("1: fxrstorq %[fx]\n\t" - "2:\n" - ".section .fixup,\"ax\"\n" - "3: movl $-1,%[err]\n" - " jmp 2b\n" - ".previous\n" - _ASM_EXTABLE(1b, 3b) - : [err] "=r" (err) - : [fx] "m" (*fx), "0" (0)); -#else - asm volatile("1: rex64/fxrstor (%[fx])\n\t" - "2:\n" - ".section .fixup,\"ax\"\n" - "3: movl $-1,%[err]\n" - " jmp 2b\n" - ".previous\n" - _ASM_EXTABLE(1b, 3b) - : [err] "=r" (err) - : [fx] "R" (fx), "m" (*fx), "0" (0)); -#endif - return err; + return check_insn(fnsave %[fx]; fwait, [fx] "=m" (*fx), "m" (*fx)); } static inline int fxsave_user(struct i387_fxsave_struct __user *fx) { - int err; + if (config_enabled(CONFIG_X86_32)) + return check_insn(fxsave %[fx], [fx] "=m" (*fx), "m" (*fx)); + else if (config_enabled(CONFIG_AS_FXSAVEQ)) + return check_insn(fxsaveq %[fx], [fx] "=m" (*fx), "m" (*fx)); - /* - * Clear the bytes not touched by the fxsave and reserved - * for the SW usage. - */ - err = __clear_user(&fx->sw_reserved, - sizeof(struct _fpx_sw_bytes)); - if (unlikely(err)) - return -EFAULT; - - /* See comment in fxsave() below. */ -#ifdef CONFIG_AS_FXSAVEQ - asm volatile("1: fxsaveq %[fx]\n\t" - "2:\n" - ".section .fixup,\"ax\"\n" - "3: movl $-1,%[err]\n" - " jmp 2b\n" - ".previous\n" - _ASM_EXTABLE(1b, 3b) - : [err] "=r" (err), [fx] "=m" (*fx) - : "0" (0)); -#else - asm volatile("1: rex64/fxsave (%[fx])\n\t" - "2:\n" - ".section .fixup,\"ax\"\n" - "3: movl $-1,%[err]\n" - " jmp 2b\n" - ".previous\n" - _ASM_EXTABLE(1b, 3b) - : [err] "=r" (err), "=m" (*fx) - : [fx] "R" (fx), "0" (0)); -#endif - if (unlikely(err) && - __clear_user(fx, sizeof(struct i387_fxsave_struct))) - err = -EFAULT; - /* No need to clear here because the caller clears USED_MATH */ - return err; + /* See comment in fpu_fxsave() below. */ + return check_insn(rex64/fxsave (%[fx]), "=m" (*fx), [fx] "R" (fx)); } -static inline void fpu_fxsave(struct fpu *fpu) +static inline int fxrstor_checking(struct i387_fxsave_struct *fx) { - /* Using "rex64; fxsave %0" is broken because, if the memory operand - uses any extended registers for addressing, a second REX prefix - will be generated (to the assembler, rex64 followed by semicolon - is a separate instruction), and hence the 64-bitness is lost. */ + if (config_enabled(CONFIG_X86_32)) + return check_insn(fxrstor %[fx], "=m" (*fx), [fx] "m" (*fx)); + else if (config_enabled(CONFIG_AS_FXSAVEQ)) + return check_insn(fxrstorq %[fx], "=m" (*fx), [fx] "m" (*fx)); -#ifdef CONFIG_AS_FXSAVEQ - /* Using "fxsaveq %0" would be the ideal choice, but is only supported - starting with gas 2.16. */ - __asm__ __volatile__("fxsaveq %0" - : "=m" (fpu->state->fxsave)); -#else - /* Using, as a workaround, the properly prefixed form below isn't - accepted by any binutils version so far released, complaining that - the same type of prefix is used twice if an extended register is - needed for addressing (fix submitted to mainline 2005-11-21). - asm volatile("rex64/fxsave %0" - : "=m" (fpu->state->fxsave)); - This, however, we can work around by forcing the compiler to select - an addressing mode that doesn't require extended registers. */ - asm volatile("rex64/fxsave (%[fx])" - : "=m" (fpu->state->fxsave) - : [fx] "R" (&fpu->state->fxsave)); -#endif + /* See comment in fpu_fxsave() below. */ + return check_insn(rex64/fxrstor (%[fx]), "=m" (*fx), [fx] "R" (fx), + "m" (*fx)); } -#else /* CONFIG_X86_32 */ - -/* perform fxrstor iff the processor has extended states, otherwise frstor */ -static inline int fxrstor_checking(struct i387_fxsave_struct *fx) +static inline int frstor_checking(struct i387_fsave_struct *fx) { - /* - * The "nop" is needed to make the instructions the same - * length. - */ - alternative_input( - "nop ; frstor %1", - "fxrstor %1", - X86_FEATURE_FXSR, - "m" (*fx)); - - return 0; + return check_insn(frstor %[fx], "=m" (*fx), [fx] "m" (*fx)); } static inline void fpu_fxsave(struct fpu *fpu) { - asm volatile("fxsave %[fx]" - : [fx] "=m" (fpu->state->fxsave)); + if (config_enabled(CONFIG_X86_32)) + asm volatile( "fxsave %[fx]" : [fx] "=m" (fpu->state->fxsave)); + else if (config_enabled(CONFIG_AS_FXSAVEQ)) + asm volatile("fxsaveq %0" : "=m" (fpu->state->fxsave)); + else { + /* Using "rex64; fxsave %0" is broken because, if the memory + * operand uses any extended registers for addressing, a second + * REX prefix will be generated (to the assembler, rex64 + * followed by semicolon is a separate instruction), and hence + * the 64-bitness is lost. + * + * Using "fxsaveq %0" would be the ideal choice, but is only + * supported starting with gas 2.16. + * + * Using, as a workaround, the properly prefixed form below + * isn't accepted by any binutils version so far released, + * complaining that the same type of prefix is used twice if + * an extended register is needed for addressing (fix submitted + * to mainline 2005-11-21). + * + * asm volatile("rex64/fxsave %0" : "=m" (fpu->state->fxsave)); + * + * This, however, we can work around by forcing the compiler to + * select an addressing mode that doesn't require extended + * registers. + */ + asm volatile( "rex64/fxsave (%[fx])" + : "=m" (fpu->state->fxsave) + : [fx] "R" (&fpu->state->fxsave)); + } } -#endif /* CONFIG_X86_64 */ - /* * These must be called with preempt disabled. Returns * 'true' if the FPU state is still intact. @@ -248,17 +245,14 @@ static inline int __save_init_fpu(struct task_struct *tsk) return fpu_save_init(&tsk->thread.fpu); } -static inline int fpu_fxrstor_checking(struct fpu *fpu) -{ - return fxrstor_checking(&fpu->state->fxsave); -} - static inline int fpu_restore_checking(struct fpu *fpu) { if (use_xsave()) - return fpu_xrstor_checking(fpu); + return fpu_xrstor_checking(&fpu->state->xsave); + else if (use_fxsr()) + return fxrstor_checking(&fpu->state->fxsave); else - return fpu_fxrstor_checking(fpu); + return frstor_checking(&fpu->state->fsave); } static inline int restore_fpu_checking(struct task_struct *tsk) @@ -310,15 +304,52 @@ static inline void __thread_set_has_fpu(struct task_struct *tsk) static inline void __thread_fpu_end(struct task_struct *tsk) { __thread_clear_has_fpu(tsk); - stts(); + if (!use_eager_fpu()) + stts(); } static inline void __thread_fpu_begin(struct task_struct *tsk) { - clts(); + if (!use_eager_fpu()) + clts(); __thread_set_has_fpu(tsk); } +static inline void __drop_fpu(struct task_struct *tsk) +{ + if (__thread_has_fpu(tsk)) { + /* Ignore delayed exceptions from user space */ + asm volatile("1: fwait\n" + "2:\n" + _ASM_EXTABLE(1b, 2b)); + __thread_fpu_end(tsk); + } +} + +static inline void drop_fpu(struct task_struct *tsk) +{ + /* + * Forget coprocessor state.. + */ + preempt_disable(); + tsk->fpu_counter = 0; + __drop_fpu(tsk); + clear_used_math(); + preempt_enable(); +} + +static inline void drop_init_fpu(struct task_struct *tsk) +{ + if (!use_eager_fpu()) + drop_fpu(tsk); + else { + if (use_xsave()) + xrstor_state(init_xstate_buf, -1); + else + fxrstor_checking(&init_xstate_buf->i387); + } +} + /* * FPU state switching for scheduling. * @@ -352,7 +383,12 @@ static inline fpu_switch_t switch_fpu_prepare(struct task_struct *old, struct ta { fpu_switch_t fpu; - fpu.preload = tsk_used_math(new) && new->fpu_counter > 5; + /* + * If the task has used the math, pre-load the FPU on xsave processors + * or if the past 5 consecutive context-switches used math. + */ + fpu.preload = tsk_used_math(new) && (use_eager_fpu() || + new->fpu_counter > 5); if (__thread_has_fpu(old)) { if (!__save_init_fpu(old)) cpu = ~0; @@ -364,14 +400,14 @@ static inline fpu_switch_t switch_fpu_prepare(struct task_struct *old, struct ta new->fpu_counter++; __thread_set_has_fpu(new); prefetch(new->thread.fpu.state); - } else + } else if (!use_eager_fpu()) stts(); } else { old->fpu_counter = 0; old->thread.fpu.last_cpu = ~0; if (fpu.preload) { new->fpu_counter++; - if (fpu_lazy_restore(new, cpu)) + if (!use_eager_fpu() && fpu_lazy_restore(new, cpu)) fpu.preload = 0; else prefetch(new->thread.fpu.state); @@ -391,44 +427,40 @@ static inline void switch_fpu_finish(struct task_struct *new, fpu_switch_t fpu) { if (fpu.preload) { if (unlikely(restore_fpu_checking(new))) - __thread_fpu_end(new); + drop_init_fpu(new); } } /* * Signal frame handlers... */ -extern int save_i387_xstate(void __user *buf); -extern int restore_i387_xstate(void __user *buf); +extern int save_xstate_sig(void __user *buf, void __user *fx, int size); +extern int __restore_xstate_sig(void __user *buf, void __user *fx, int size); -static inline void __clear_fpu(struct task_struct *tsk) +static inline int xstate_sigframe_size(void) { - if (__thread_has_fpu(tsk)) { - /* Ignore delayed exceptions from user space */ - asm volatile("1: fwait\n" - "2:\n" - _ASM_EXTABLE(1b, 2b)); - __thread_fpu_end(tsk); + return use_xsave() ? xstate_size + FP_XSTATE_MAGIC2_SIZE : xstate_size; +} + +static inline int restore_xstate_sig(void __user *buf, int ia32_frame) +{ + void __user *buf_fx = buf; + int size = xstate_sigframe_size(); + + if (ia32_frame && use_fxsr()) { + buf_fx = buf + sizeof(struct i387_fsave_struct); + size += sizeof(struct i387_fsave_struct); } + + return __restore_xstate_sig(buf, buf_fx, size); } /* - * The actual user_fpu_begin/end() functions - * need to be preemption-safe. + * Need to be preemption-safe. * - * NOTE! user_fpu_end() must be used only after you - * have saved the FP state, and user_fpu_begin() must - * be used only immediately before restoring it. - * These functions do not do any save/restore on - * their own. + * NOTE! user_fpu_begin() must be used only immediately before restoring + * it. This function does not do any save/restore on their own. */ -static inline void user_fpu_end(void) -{ - preempt_disable(); - __thread_fpu_end(current); - preempt_enable(); -} - static inline void user_fpu_begin(void) { preempt_disable(); @@ -437,25 +469,32 @@ static inline void user_fpu_begin(void) preempt_enable(); } +static inline void __save_fpu(struct task_struct *tsk) +{ + if (use_xsave()) + xsave_state(&tsk->thread.fpu.state->xsave, -1); + else + fpu_fxsave(&tsk->thread.fpu); +} + /* * These disable preemption on their own and are safe */ static inline void save_init_fpu(struct task_struct *tsk) { WARN_ON_ONCE(!__thread_has_fpu(tsk)); + + if (use_eager_fpu()) { + __save_fpu(tsk); + return; + } + preempt_disable(); __save_init_fpu(tsk); __thread_fpu_end(tsk); preempt_enable(); } -static inline void clear_fpu(struct task_struct *tsk) -{ - preempt_disable(); - __clear_fpu(tsk); - preempt_enable(); -} - /* * i387 state interaction */ @@ -510,11 +549,34 @@ static inline void fpu_free(struct fpu *fpu) } } -static inline void fpu_copy(struct fpu *dst, struct fpu *src) +static inline void fpu_copy(struct task_struct *dst, struct task_struct *src) { - memcpy(dst->state, src->state, xstate_size); + if (use_eager_fpu()) { + memset(&dst->thread.fpu.state->xsave, 0, xstate_size); + __save_fpu(dst); + } else { + struct fpu *dfpu = &dst->thread.fpu; + struct fpu *sfpu = &src->thread.fpu; + + unlazy_fpu(src); + memcpy(dfpu->state, sfpu->state, xstate_size); + } } -extern void fpu_finit(struct fpu *fpu); +static inline unsigned long +alloc_mathframe(unsigned long sp, int ia32_frame, unsigned long *buf_fx, + unsigned long *size) +{ + unsigned long frame_size = xstate_sigframe_size(); + + *buf_fx = sp = round_down(sp - frame_size, 64); + if (ia32_frame && use_fxsr()) { + frame_size += sizeof(struct i387_fsave_struct); + sp -= sizeof(struct i387_fsave_struct); + } + + *size = frame_size; + return sp; +} #endif diff --git a/trunk/arch/x86/include/asm/ftrace.h b/trunk/arch/x86/include/asm/ftrace.h index b0767bc08740..9a25b522d377 100644 --- a/trunk/arch/x86/include/asm/ftrace.h +++ b/trunk/arch/x86/include/asm/ftrace.h @@ -3,38 +3,54 @@ #ifdef __ASSEMBLY__ - .macro MCOUNT_SAVE_FRAME - /* taken from glibc */ - subq $0x38, %rsp - movq %rax, (%rsp) - movq %rcx, 8(%rsp) - movq %rdx, 16(%rsp) - movq %rsi, 24(%rsp) - movq %rdi, 32(%rsp) - movq %r8, 40(%rsp) - movq %r9, 48(%rsp) + /* skip is set if the stack was already partially adjusted */ + .macro MCOUNT_SAVE_FRAME skip=0 + /* + * We add enough stack to save all regs. + */ + subq $(SS+8-\skip), %rsp + movq %rax, RAX(%rsp) + movq %rcx, RCX(%rsp) + movq %rdx, RDX(%rsp) + movq %rsi, RSI(%rsp) + movq %rdi, RDI(%rsp) + movq %r8, R8(%rsp) + movq %r9, R9(%rsp) + /* Move RIP to its proper location */ + movq SS+8(%rsp), %rdx + movq %rdx, RIP(%rsp) .endm - .macro MCOUNT_RESTORE_FRAME - movq 48(%rsp), %r9 - movq 40(%rsp), %r8 - movq 32(%rsp), %rdi - movq 24(%rsp), %rsi - movq 16(%rsp), %rdx - movq 8(%rsp), %rcx - movq (%rsp), %rax - addq $0x38, %rsp + .macro MCOUNT_RESTORE_FRAME skip=0 + movq R9(%rsp), %r9 + movq R8(%rsp), %r8 + movq RDI(%rsp), %rdi + movq RSI(%rsp), %rsi + movq RDX(%rsp), %rdx + movq RCX(%rsp), %rcx + movq RAX(%rsp), %rax + addq $(SS+8-\skip), %rsp .endm #endif #ifdef CONFIG_FUNCTION_TRACER -#define MCOUNT_ADDR ((long)(mcount)) +#ifdef CC_USING_FENTRY +# define MCOUNT_ADDR ((long)(__fentry__)) +#else +# define MCOUNT_ADDR ((long)(mcount)) +#endif #define MCOUNT_INSN_SIZE 5 /* sizeof mcount call */ +#ifdef CONFIG_DYNAMIC_FTRACE +#define ARCH_SUPPORTS_FTRACE_OPS 1 +#define ARCH_SUPPORTS_FTRACE_SAVE_REGS +#endif + #ifndef __ASSEMBLY__ extern void mcount(void); extern atomic_t modifying_ftrace_code; +extern void __fentry__(void); static inline unsigned long ftrace_call_adjust(unsigned long addr) { diff --git a/trunk/arch/x86/include/asm/hpet.h b/trunk/arch/x86/include/asm/hpet.h index 2c392d663dce..434e2106cc87 100644 --- a/trunk/arch/x86/include/asm/hpet.h +++ b/trunk/arch/x86/include/asm/hpet.h @@ -35,8 +35,6 @@ #define HPET_ID_NUMBER_SHIFT 8 #define HPET_ID_VENDOR_SHIFT 16 -#define HPET_ID_VENDOR_8086 0x8086 - #define HPET_CFG_ENABLE 0x001 #define HPET_CFG_LEGACY 0x002 #define HPET_LEGACY_8254 2 diff --git a/trunk/arch/x86/include/asm/i387.h b/trunk/arch/x86/include/asm/i387.h index 257d9cca214f..ed8089d69094 100644 --- a/trunk/arch/x86/include/asm/i387.h +++ b/trunk/arch/x86/include/asm/i387.h @@ -19,12 +19,37 @@ struct pt_regs; struct user_i387_struct; extern int init_fpu(struct task_struct *child); +extern void fpu_finit(struct fpu *fpu); extern int dump_fpu(struct pt_regs *, struct user_i387_struct *); extern void math_state_restore(void); extern bool irq_fpu_usable(void); -extern void kernel_fpu_begin(void); -extern void kernel_fpu_end(void); + +/* + * Careful: __kernel_fpu_begin/end() must be called with preempt disabled + * and they don't touch the preempt state on their own. + * If you enable preemption after __kernel_fpu_begin(), preempt notifier + * should call the __kernel_fpu_end() to prevent the kernel/user FPU + * state from getting corrupted. KVM for example uses this model. + * + * All other cases use kernel_fpu_begin/end() which disable preemption + * during kernel FPU usage. + */ +extern void __kernel_fpu_begin(void); +extern void __kernel_fpu_end(void); + +static inline void kernel_fpu_begin(void) +{ + WARN_ON_ONCE(!irq_fpu_usable()); + preempt_disable(); + __kernel_fpu_begin(); +} + +static inline void kernel_fpu_end(void) +{ + __kernel_fpu_end(); + preempt_enable(); +} /* * Some instructions like VIA's padlock instructions generate a spurious diff --git a/trunk/arch/x86/include/asm/iommu_table.h b/trunk/arch/x86/include/asm/iommu_table.h index f229b13a5f30..f42a04735a0a 100644 --- a/trunk/arch/x86/include/asm/iommu_table.h +++ b/trunk/arch/x86/include/asm/iommu_table.h @@ -48,7 +48,7 @@ struct iommu_table_entry { #define __IOMMU_INIT(_detect, _depend, _early_init, _late_init, _finish)\ - static const struct iommu_table_entry const \ + static const struct iommu_table_entry \ __iommu_entry_##_detect __used \ __attribute__ ((unused, __section__(".iommu_table"), \ aligned((sizeof(void *))))) \ @@ -63,10 +63,10 @@ struct iommu_table_entry { * to stop detecting the other IOMMUs after yours has been detected. */ #define IOMMU_INIT_POST(_detect) \ - __IOMMU_INIT(_detect, pci_swiotlb_detect_4gb, 0, 0, 0) + __IOMMU_INIT(_detect, pci_swiotlb_detect_4gb, NULL, NULL, 0) #define IOMMU_INIT_POST_FINISH(detect) \ - __IOMMU_INIT(_detect, pci_swiotlb_detect_4gb, 0, 0, 1) + __IOMMU_INIT(_detect, pci_swiotlb_detect_4gb, NULL, NULL, 1) /* * A more sophisticated version of IOMMU_INIT. This variant requires: diff --git a/trunk/arch/x86/include/asm/kprobes.h b/trunk/arch/x86/include/asm/kprobes.h index 547882539157..d3ddd17405d0 100644 --- a/trunk/arch/x86/include/asm/kprobes.h +++ b/trunk/arch/x86/include/asm/kprobes.h @@ -27,6 +27,7 @@ #include #define __ARCH_WANT_KPROBES_INSN_SLOT +#define ARCH_SUPPORTS_KPROBES_ON_FTRACE struct pt_regs; struct kprobe; diff --git a/trunk/arch/x86/include/asm/kvm.h b/trunk/arch/x86/include/asm/kvm.h index 246617efd67f..41e08cb6a092 100644 --- a/trunk/arch/x86/include/asm/kvm.h +++ b/trunk/arch/x86/include/asm/kvm.h @@ -9,6 +9,22 @@ #include #include +#define DE_VECTOR 0 +#define DB_VECTOR 1 +#define BP_VECTOR 3 +#define OF_VECTOR 4 +#define BR_VECTOR 5 +#define UD_VECTOR 6 +#define NM_VECTOR 7 +#define DF_VECTOR 8 +#define TS_VECTOR 10 +#define NP_VECTOR 11 +#define SS_VECTOR 12 +#define GP_VECTOR 13 +#define PF_VECTOR 14 +#define MF_VECTOR 16 +#define MC_VECTOR 18 + /* Select x86 specific features in */ #define __KVM_HAVE_PIT #define __KVM_HAVE_IOAPIC diff --git a/trunk/arch/x86/include/asm/kvm_host.h b/trunk/arch/x86/include/asm/kvm_host.h index 09155d64cf7e..1eaa6b056670 100644 --- a/trunk/arch/x86/include/asm/kvm_host.h +++ b/trunk/arch/x86/include/asm/kvm_host.h @@ -75,22 +75,6 @@ #define KVM_HPAGE_MASK(x) (~(KVM_HPAGE_SIZE(x) - 1)) #define KVM_PAGES_PER_HPAGE(x) (KVM_HPAGE_SIZE(x) / PAGE_SIZE) -#define DE_VECTOR 0 -#define DB_VECTOR 1 -#define BP_VECTOR 3 -#define OF_VECTOR 4 -#define BR_VECTOR 5 -#define UD_VECTOR 6 -#define NM_VECTOR 7 -#define DF_VECTOR 8 -#define TS_VECTOR 10 -#define NP_VECTOR 11 -#define SS_VECTOR 12 -#define GP_VECTOR 13 -#define PF_VECTOR 14 -#define MF_VECTOR 16 -#define MC_VECTOR 18 - #define SELECTOR_TI_MASK (1 << 2) #define SELECTOR_RPL_MASK 0x03 diff --git a/trunk/arch/x86/include/asm/mce.h b/trunk/arch/x86/include/asm/mce.h index 441520e4174f..54d73b1f00a0 100644 --- a/trunk/arch/x86/include/asm/mce.h +++ b/trunk/arch/x86/include/asm/mce.h @@ -33,6 +33,14 @@ #define MCI_STATUS_PCC (1ULL<<57) /* processor context corrupt */ #define MCI_STATUS_S (1ULL<<56) /* Signaled machine check */ #define MCI_STATUS_AR (1ULL<<55) /* Action required */ +#define MCACOD 0xffff /* MCA Error Code */ + +/* Architecturally defined codes from SDM Vol. 3B Chapter 15 */ +#define MCACOD_SCRUB 0x00C0 /* 0xC0-0xCF Memory Scrubbing */ +#define MCACOD_SCRUBMSK 0xfff0 +#define MCACOD_L3WB 0x017A /* L3 Explicit Writeback */ +#define MCACOD_DATA 0x0134 /* Data Load */ +#define MCACOD_INSTR 0x0150 /* Instruction Fetch */ /* MCi_MISC register defines */ #define MCI_MISC_ADDR_LSB(m) ((m) & 0x3f) @@ -108,19 +116,9 @@ struct mce_log { /* Software defined banks */ #define MCE_EXTENDED_BANK 128 #define MCE_THERMAL_BANK MCE_EXTENDED_BANK + 0 - -#define K8_MCE_THRESHOLD_BASE (MCE_EXTENDED_BANK + 1) /* MCE_AMD */ -#define K8_MCE_THRESHOLD_BANK_0 (MCE_THRESHOLD_BASE + 0 * 9) -#define K8_MCE_THRESHOLD_BANK_1 (MCE_THRESHOLD_BASE + 1 * 9) -#define K8_MCE_THRESHOLD_BANK_2 (MCE_THRESHOLD_BASE + 2 * 9) -#define K8_MCE_THRESHOLD_BANK_3 (MCE_THRESHOLD_BASE + 3 * 9) -#define K8_MCE_THRESHOLD_BANK_4 (MCE_THRESHOLD_BASE + 4 * 9) -#define K8_MCE_THRESHOLD_BANK_5 (MCE_THRESHOLD_BASE + 5 * 9) -#define K8_MCE_THRESHOLD_DRAM_ECC (MCE_THRESHOLD_BANK_4 + 0) - +#define K8_MCE_THRESHOLD_BASE (MCE_EXTENDED_BANK + 1) #ifdef __KERNEL__ - extern void mce_register_decode_chain(struct notifier_block *nb); extern void mce_unregister_decode_chain(struct notifier_block *nb); @@ -163,6 +161,7 @@ DECLARE_PER_CPU(struct device *, mce_device); #ifdef CONFIG_X86_MCE_INTEL extern int mce_cmci_disabled; extern int mce_ignore_ce; +extern int mce_bios_cmci_threshold; void mce_intel_feature_init(struct cpuinfo_x86 *c); void cmci_clear(void); void cmci_reenable(void); diff --git a/trunk/arch/x86/include/asm/perf_event.h b/trunk/arch/x86/include/asm/perf_event.h index dab39350e51e..4fabcdf1cfa7 100644 --- a/trunk/arch/x86/include/asm/perf_event.h +++ b/trunk/arch/x86/include/asm/perf_event.h @@ -196,11 +196,16 @@ static inline u32 get_ibs_caps(void) { return 0; } extern void perf_events_lapic_init(void); /* - * Abuse bit 3 of the cpu eflags register to indicate proper PEBS IP fixups. - * This flag is otherwise unused and ABI specified to be 0, so nobody should - * care what we do with it. + * Abuse bits {3,5} of the cpu eflags register. These flags are otherwise + * unused and ABI specified to be 0, so nobody should care what we do with + * them. + * + * EXACT - the IP points to the exact instruction that triggered the + * event (HW bugs exempt). + * VM - original X86_VM_MASK; see set_linear_ip(). */ #define PERF_EFLAGS_EXACT (1UL << 3) +#define PERF_EFLAGS_VM (1UL << 5) struct pt_regs; extern unsigned long perf_instruction_pointer(struct pt_regs *regs); @@ -257,4 +262,6 @@ static inline void perf_check_microcode(void) { } static inline void amd_pmu_disable_virt(void) { } #endif +#define arch_perf_out_copy_user copy_from_user_nmi + #endif /* _ASM_X86_PERF_EVENT_H */ diff --git a/trunk/arch/x86/include/asm/perf_regs.h b/trunk/arch/x86/include/asm/perf_regs.h new file mode 100644 index 000000000000..3f2207bfd17b --- /dev/null +++ b/trunk/arch/x86/include/asm/perf_regs.h @@ -0,0 +1,33 @@ +#ifndef _ASM_X86_PERF_REGS_H +#define _ASM_X86_PERF_REGS_H + +enum perf_event_x86_regs { + PERF_REG_X86_AX, + PERF_REG_X86_BX, + PERF_REG_X86_CX, + PERF_REG_X86_DX, + PERF_REG_X86_SI, + PERF_REG_X86_DI, + PERF_REG_X86_BP, + PERF_REG_X86_SP, + PERF_REG_X86_IP, + PERF_REG_X86_FLAGS, + PERF_REG_X86_CS, + PERF_REG_X86_SS, + PERF_REG_X86_DS, + PERF_REG_X86_ES, + PERF_REG_X86_FS, + PERF_REG_X86_GS, + PERF_REG_X86_R8, + PERF_REG_X86_R9, + PERF_REG_X86_R10, + PERF_REG_X86_R11, + PERF_REG_X86_R12, + PERF_REG_X86_R13, + PERF_REG_X86_R14, + PERF_REG_X86_R15, + + PERF_REG_X86_32_MAX = PERF_REG_X86_GS + 1, + PERF_REG_X86_64_MAX = PERF_REG_X86_R15 + 1, +}; +#endif /* _ASM_X86_PERF_REGS_H */ diff --git a/trunk/arch/x86/include/asm/processor.h b/trunk/arch/x86/include/asm/processor.h index d048cad9bcad..b98c0d958ebb 100644 --- a/trunk/arch/x86/include/asm/processor.h +++ b/trunk/arch/x86/include/asm/processor.h @@ -423,7 +423,6 @@ DECLARE_INIT_PER_CPU(irq_stack_union); DECLARE_PER_CPU(char *, irq_stack_ptr); DECLARE_PER_CPU(unsigned int, irq_count); -extern unsigned long kernel_eflags; extern asmlinkage void ignore_sysret(void); #else /* X86_64 */ #ifdef CONFIG_CC_STACKPROTECTOR @@ -759,6 +758,8 @@ static inline void update_debugctlmsr(unsigned long debugctlmsr) wrmsrl(MSR_IA32_DEBUGCTLMSR, debugctlmsr); } +extern void set_task_blockstep(struct task_struct *task, bool on); + /* * from system description table in BIOS. Mostly for MCA use, but * others may find it useful: diff --git a/trunk/arch/x86/include/asm/rcu.h b/trunk/arch/x86/include/asm/rcu.h new file mode 100644 index 000000000000..d1ac07a23979 --- /dev/null +++ b/trunk/arch/x86/include/asm/rcu.h @@ -0,0 +1,32 @@ +#ifndef _ASM_X86_RCU_H +#define _ASM_X86_RCU_H + +#ifndef __ASSEMBLY__ + +#include +#include + +static inline void exception_enter(struct pt_regs *regs) +{ + rcu_user_exit(); +} + +static inline void exception_exit(struct pt_regs *regs) +{ +#ifdef CONFIG_RCU_USER_QS + if (user_mode(regs)) + rcu_user_enter(); +#endif +} + +#else /* __ASSEMBLY__ */ + +#ifdef CONFIG_RCU_USER_QS +# define SCHEDULE_USER call schedule_user +#else +# define SCHEDULE_USER call schedule +#endif + +#endif /* !__ASSEMBLY__ */ + +#endif diff --git a/trunk/arch/x86/include/asm/signal.h b/trunk/arch/x86/include/asm/signal.h index 598457cbd0f8..323973f4abf1 100644 --- a/trunk/arch/x86/include/asm/signal.h +++ b/trunk/arch/x86/include/asm/signal.h @@ -31,6 +31,10 @@ typedef struct { unsigned long sig[_NSIG_WORDS]; } sigset_t; +#ifndef CONFIG_COMPAT +typedef sigset_t compat_sigset_t; +#endif + #else /* Here we must cater to libcs that poke about in kernel headers. */ diff --git a/trunk/arch/x86/include/asm/spinlock.h b/trunk/arch/x86/include/asm/spinlock.h index b315a33867f2..33692eaabab5 100644 --- a/trunk/arch/x86/include/asm/spinlock.h +++ b/trunk/arch/x86/include/asm/spinlock.h @@ -12,8 +12,7 @@ * Simple spin lock operations. There are two variants, one clears IRQ's * on the local processor, one does not. * - * These are fair FIFO ticket locks, which are currently limited to 256 - * CPUs. + * These are fair FIFO ticket locks, which support up to 2^16 CPUs. * * (the type definitions are in asm/spinlock_types.h) */ diff --git a/trunk/arch/x86/include/asm/svm.h b/trunk/arch/x86/include/asm/svm.h index f2b83bc7d784..cdf5674dd23a 100644 --- a/trunk/arch/x86/include/asm/svm.h +++ b/trunk/arch/x86/include/asm/svm.h @@ -1,6 +1,135 @@ #ifndef __SVM_H #define __SVM_H +#define SVM_EXIT_READ_CR0 0x000 +#define SVM_EXIT_READ_CR3 0x003 +#define SVM_EXIT_READ_CR4 0x004 +#define SVM_EXIT_READ_CR8 0x008 +#define SVM_EXIT_WRITE_CR0 0x010 +#define SVM_EXIT_WRITE_CR3 0x013 +#define SVM_EXIT_WRITE_CR4 0x014 +#define SVM_EXIT_WRITE_CR8 0x018 +#define SVM_EXIT_READ_DR0 0x020 +#define SVM_EXIT_READ_DR1 0x021 +#define SVM_EXIT_READ_DR2 0x022 +#define SVM_EXIT_READ_DR3 0x023 +#define SVM_EXIT_READ_DR4 0x024 +#define SVM_EXIT_READ_DR5 0x025 +#define SVM_EXIT_READ_DR6 0x026 +#define SVM_EXIT_READ_DR7 0x027 +#define SVM_EXIT_WRITE_DR0 0x030 +#define SVM_EXIT_WRITE_DR1 0x031 +#define SVM_EXIT_WRITE_DR2 0x032 +#define SVM_EXIT_WRITE_DR3 0x033 +#define SVM_EXIT_WRITE_DR4 0x034 +#define SVM_EXIT_WRITE_DR5 0x035 +#define SVM_EXIT_WRITE_DR6 0x036 +#define SVM_EXIT_WRITE_DR7 0x037 +#define SVM_EXIT_EXCP_BASE 0x040 +#define SVM_EXIT_INTR 0x060 +#define SVM_EXIT_NMI 0x061 +#define SVM_EXIT_SMI 0x062 +#define SVM_EXIT_INIT 0x063 +#define SVM_EXIT_VINTR 0x064 +#define SVM_EXIT_CR0_SEL_WRITE 0x065 +#define SVM_EXIT_IDTR_READ 0x066 +#define SVM_EXIT_GDTR_READ 0x067 +#define SVM_EXIT_LDTR_READ 0x068 +#define SVM_EXIT_TR_READ 0x069 +#define SVM_EXIT_IDTR_WRITE 0x06a +#define SVM_EXIT_GDTR_WRITE 0x06b +#define SVM_EXIT_LDTR_WRITE 0x06c +#define SVM_EXIT_TR_WRITE 0x06d +#define SVM_EXIT_RDTSC 0x06e +#define SVM_EXIT_RDPMC 0x06f +#define SVM_EXIT_PUSHF 0x070 +#define SVM_EXIT_POPF 0x071 +#define SVM_EXIT_CPUID 0x072 +#define SVM_EXIT_RSM 0x073 +#define SVM_EXIT_IRET 0x074 +#define SVM_EXIT_SWINT 0x075 +#define SVM_EXIT_INVD 0x076 +#define SVM_EXIT_PAUSE 0x077 +#define SVM_EXIT_HLT 0x078 +#define SVM_EXIT_INVLPG 0x079 +#define SVM_EXIT_INVLPGA 0x07a +#define SVM_EXIT_IOIO 0x07b +#define SVM_EXIT_MSR 0x07c +#define SVM_EXIT_TASK_SWITCH 0x07d +#define SVM_EXIT_FERR_FREEZE 0x07e +#define SVM_EXIT_SHUTDOWN 0x07f +#define SVM_EXIT_VMRUN 0x080 +#define SVM_EXIT_VMMCALL 0x081 +#define SVM_EXIT_VMLOAD 0x082 +#define SVM_EXIT_VMSAVE 0x083 +#define SVM_EXIT_STGI 0x084 +#define SVM_EXIT_CLGI 0x085 +#define SVM_EXIT_SKINIT 0x086 +#define SVM_EXIT_RDTSCP 0x087 +#define SVM_EXIT_ICEBP 0x088 +#define SVM_EXIT_WBINVD 0x089 +#define SVM_EXIT_MONITOR 0x08a +#define SVM_EXIT_MWAIT 0x08b +#define SVM_EXIT_MWAIT_COND 0x08c +#define SVM_EXIT_XSETBV 0x08d +#define SVM_EXIT_NPF 0x400 + +#define SVM_EXIT_ERR -1 + +#define SVM_EXIT_REASONS \ + { SVM_EXIT_READ_CR0, "read_cr0" }, \ + { SVM_EXIT_READ_CR3, "read_cr3" }, \ + { SVM_EXIT_READ_CR4, "read_cr4" }, \ + { SVM_EXIT_READ_CR8, "read_cr8" }, \ + { SVM_EXIT_WRITE_CR0, "write_cr0" }, \ + { SVM_EXIT_WRITE_CR3, "write_cr3" }, \ + { SVM_EXIT_WRITE_CR4, "write_cr4" }, \ + { SVM_EXIT_WRITE_CR8, "write_cr8" }, \ + { SVM_EXIT_READ_DR0, "read_dr0" }, \ + { SVM_EXIT_READ_DR1, "read_dr1" }, \ + { SVM_EXIT_READ_DR2, "read_dr2" }, \ + { SVM_EXIT_READ_DR3, "read_dr3" }, \ + { SVM_EXIT_WRITE_DR0, "write_dr0" }, \ + { SVM_EXIT_WRITE_DR1, "write_dr1" }, \ + { SVM_EXIT_WRITE_DR2, "write_dr2" }, \ + { SVM_EXIT_WRITE_DR3, "write_dr3" }, \ + { SVM_EXIT_WRITE_DR5, "write_dr5" }, \ + { SVM_EXIT_WRITE_DR7, "write_dr7" }, \ + { SVM_EXIT_EXCP_BASE + DB_VECTOR, "DB excp" }, \ + { SVM_EXIT_EXCP_BASE + BP_VECTOR, "BP excp" }, \ + { SVM_EXIT_EXCP_BASE + UD_VECTOR, "UD excp" }, \ + { SVM_EXIT_EXCP_BASE + PF_VECTOR, "PF excp" }, \ + { SVM_EXIT_EXCP_BASE + NM_VECTOR, "NM excp" }, \ + { SVM_EXIT_EXCP_BASE + MC_VECTOR, "MC excp" }, \ + { SVM_EXIT_INTR, "interrupt" }, \ + { SVM_EXIT_NMI, "nmi" }, \ + { SVM_EXIT_SMI, "smi" }, \ + { SVM_EXIT_INIT, "init" }, \ + { SVM_EXIT_VINTR, "vintr" }, \ + { SVM_EXIT_CPUID, "cpuid" }, \ + { SVM_EXIT_INVD, "invd" }, \ + { SVM_EXIT_HLT, "hlt" }, \ + { SVM_EXIT_INVLPG, "invlpg" }, \ + { SVM_EXIT_INVLPGA, "invlpga" }, \ + { SVM_EXIT_IOIO, "io" }, \ + { SVM_EXIT_MSR, "msr" }, \ + { SVM_EXIT_TASK_SWITCH, "task_switch" }, \ + { SVM_EXIT_SHUTDOWN, "shutdown" }, \ + { SVM_EXIT_VMRUN, "vmrun" }, \ + { SVM_EXIT_VMMCALL, "hypercall" }, \ + { SVM_EXIT_VMLOAD, "vmload" }, \ + { SVM_EXIT_VMSAVE, "vmsave" }, \ + { SVM_EXIT_STGI, "stgi" }, \ + { SVM_EXIT_CLGI, "clgi" }, \ + { SVM_EXIT_SKINIT, "skinit" }, \ + { SVM_EXIT_WBINVD, "wbinvd" }, \ + { SVM_EXIT_MONITOR, "monitor" }, \ + { SVM_EXIT_MWAIT, "mwait" }, \ + { SVM_EXIT_XSETBV, "xsetbv" }, \ + { SVM_EXIT_NPF, "npf" } + +#ifdef __KERNEL__ + enum { INTERCEPT_INTR, INTERCEPT_NMI, @@ -264,81 +393,6 @@ struct __attribute__ ((__packed__)) vmcb { #define SVM_EXITINFO_REG_MASK 0x0F -#define SVM_EXIT_READ_CR0 0x000 -#define SVM_EXIT_READ_CR3 0x003 -#define SVM_EXIT_READ_CR4 0x004 -#define SVM_EXIT_READ_CR8 0x008 -#define SVM_EXIT_WRITE_CR0 0x010 -#define SVM_EXIT_WRITE_CR3 0x013 -#define SVM_EXIT_WRITE_CR4 0x014 -#define SVM_EXIT_WRITE_CR8 0x018 -#define SVM_EXIT_READ_DR0 0x020 -#define SVM_EXIT_READ_DR1 0x021 -#define SVM_EXIT_READ_DR2 0x022 -#define SVM_EXIT_READ_DR3 0x023 -#define SVM_EXIT_READ_DR4 0x024 -#define SVM_EXIT_READ_DR5 0x025 -#define SVM_EXIT_READ_DR6 0x026 -#define SVM_EXIT_READ_DR7 0x027 -#define SVM_EXIT_WRITE_DR0 0x030 -#define SVM_EXIT_WRITE_DR1 0x031 -#define SVM_EXIT_WRITE_DR2 0x032 -#define SVM_EXIT_WRITE_DR3 0x033 -#define SVM_EXIT_WRITE_DR4 0x034 -#define SVM_EXIT_WRITE_DR5 0x035 -#define SVM_EXIT_WRITE_DR6 0x036 -#define SVM_EXIT_WRITE_DR7 0x037 -#define SVM_EXIT_EXCP_BASE 0x040 -#define SVM_EXIT_INTR 0x060 -#define SVM_EXIT_NMI 0x061 -#define SVM_EXIT_SMI 0x062 -#define SVM_EXIT_INIT 0x063 -#define SVM_EXIT_VINTR 0x064 -#define SVM_EXIT_CR0_SEL_WRITE 0x065 -#define SVM_EXIT_IDTR_READ 0x066 -#define SVM_EXIT_GDTR_READ 0x067 -#define SVM_EXIT_LDTR_READ 0x068 -#define SVM_EXIT_TR_READ 0x069 -#define SVM_EXIT_IDTR_WRITE 0x06a -#define SVM_EXIT_GDTR_WRITE 0x06b -#define SVM_EXIT_LDTR_WRITE 0x06c -#define SVM_EXIT_TR_WRITE 0x06d -#define SVM_EXIT_RDTSC 0x06e -#define SVM_EXIT_RDPMC 0x06f -#define SVM_EXIT_PUSHF 0x070 -#define SVM_EXIT_POPF 0x071 -#define SVM_EXIT_CPUID 0x072 -#define SVM_EXIT_RSM 0x073 -#define SVM_EXIT_IRET 0x074 -#define SVM_EXIT_SWINT 0x075 -#define SVM_EXIT_INVD 0x076 -#define SVM_EXIT_PAUSE 0x077 -#define SVM_EXIT_HLT 0x078 -#define SVM_EXIT_INVLPG 0x079 -#define SVM_EXIT_INVLPGA 0x07a -#define SVM_EXIT_IOIO 0x07b -#define SVM_EXIT_MSR 0x07c -#define SVM_EXIT_TASK_SWITCH 0x07d -#define SVM_EXIT_FERR_FREEZE 0x07e -#define SVM_EXIT_SHUTDOWN 0x07f -#define SVM_EXIT_VMRUN 0x080 -#define SVM_EXIT_VMMCALL 0x081 -#define SVM_EXIT_VMLOAD 0x082 -#define SVM_EXIT_VMSAVE 0x083 -#define SVM_EXIT_STGI 0x084 -#define SVM_EXIT_CLGI 0x085 -#define SVM_EXIT_SKINIT 0x086 -#define SVM_EXIT_RDTSCP 0x087 -#define SVM_EXIT_ICEBP 0x088 -#define SVM_EXIT_WBINVD 0x089 -#define SVM_EXIT_MONITOR 0x08a -#define SVM_EXIT_MWAIT 0x08b -#define SVM_EXIT_MWAIT_COND 0x08c -#define SVM_EXIT_XSETBV 0x08d -#define SVM_EXIT_NPF 0x400 - -#define SVM_EXIT_ERR -1 - #define SVM_CR0_SELECTIVE_MASK (X86_CR0_TS | X86_CR0_MP) #define SVM_VMLOAD ".byte 0x0f, 0x01, 0xda" @@ -350,3 +404,4 @@ struct __attribute__ ((__packed__)) vmcb { #endif +#endif diff --git a/trunk/arch/x86/include/asm/sys_ia32.h b/trunk/arch/x86/include/asm/sys_ia32.h index 3fda9db48819..4ca1c611b552 100644 --- a/trunk/arch/x86/include/asm/sys_ia32.h +++ b/trunk/arch/x86/include/asm/sys_ia32.h @@ -40,7 +40,7 @@ asmlinkage long sys32_sigaction(int, struct old_sigaction32 __user *, struct old_sigaction32 __user *); asmlinkage long sys32_alarm(unsigned int); -asmlinkage long sys32_waitpid(compat_pid_t, unsigned int *, int); +asmlinkage long sys32_waitpid(compat_pid_t, unsigned int __user *, int); asmlinkage long sys32_sysfs(int, u32, u32); asmlinkage long sys32_sched_rr_get_interval(compat_pid_t, diff --git a/trunk/arch/x86/include/asm/thread_info.h b/trunk/arch/x86/include/asm/thread_info.h index 89f794f007ec..c535d847e3b5 100644 --- a/trunk/arch/x86/include/asm/thread_info.h +++ b/trunk/arch/x86/include/asm/thread_info.h @@ -89,6 +89,7 @@ struct thread_info { #define TIF_NOTSC 16 /* TSC is not accessible in userland */ #define TIF_IA32 17 /* IA32 compatibility process */ #define TIF_FORK 18 /* ret_from_fork */ +#define TIF_NOHZ 19 /* in adaptive nohz mode */ #define TIF_MEMDIE 20 /* is terminating due to OOM killer */ #define TIF_DEBUG 21 /* uses debug registers */ #define TIF_IO_BITMAP 22 /* uses I/O bitmap */ @@ -114,6 +115,7 @@ struct thread_info { #define _TIF_NOTSC (1 << TIF_NOTSC) #define _TIF_IA32 (1 << TIF_IA32) #define _TIF_FORK (1 << TIF_FORK) +#define _TIF_NOHZ (1 << TIF_NOHZ) #define _TIF_DEBUG (1 << TIF_DEBUG) #define _TIF_IO_BITMAP (1 << TIF_IO_BITMAP) #define _TIF_FORCED_TF (1 << TIF_FORCED_TF) @@ -126,12 +128,13 @@ struct thread_info { /* work to do in syscall_trace_enter() */ #define _TIF_WORK_SYSCALL_ENTRY \ (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_EMU | _TIF_SYSCALL_AUDIT | \ - _TIF_SECCOMP | _TIF_SINGLESTEP | _TIF_SYSCALL_TRACEPOINT) + _TIF_SECCOMP | _TIF_SINGLESTEP | _TIF_SYSCALL_TRACEPOINT | \ + _TIF_NOHZ) /* work to do in syscall_trace_leave() */ #define _TIF_WORK_SYSCALL_EXIT \ (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | _TIF_SINGLESTEP | \ - _TIF_SYSCALL_TRACEPOINT) + _TIF_SYSCALL_TRACEPOINT | _TIF_NOHZ) /* work to do on interrupt/exception return */ #define _TIF_WORK_MASK \ @@ -141,7 +144,8 @@ struct thread_info { /* work to do on any return to user space */ #define _TIF_ALLWORK_MASK \ - ((0x0000FFFF & ~_TIF_SECCOMP) | _TIF_SYSCALL_TRACEPOINT) + ((0x0000FFFF & ~_TIF_SECCOMP) | _TIF_SYSCALL_TRACEPOINT | \ + _TIF_NOHZ) /* Only used for 64 bit */ #define _TIF_DO_NOTIFY_MASK \ diff --git a/trunk/arch/x86/include/asm/uprobes.h b/trunk/arch/x86/include/asm/uprobes.h index f3971bbcd1de..8ff8be7835ab 100644 --- a/trunk/arch/x86/include/asm/uprobes.h +++ b/trunk/arch/x86/include/asm/uprobes.h @@ -42,10 +42,11 @@ struct arch_uprobe { }; struct arch_uprobe_task { - unsigned long saved_trap_nr; #ifdef CONFIG_X86_64 unsigned long saved_scratch_register; #endif + unsigned int saved_trap_nr; + unsigned int saved_tf; }; extern int arch_uprobe_analyze_insn(struct arch_uprobe *aup, struct mm_struct *mm, unsigned long addr); diff --git a/trunk/arch/x86/include/asm/vdso.h b/trunk/arch/x86/include/asm/vdso.h index bb0522850b74..fddb53d63915 100644 --- a/trunk/arch/x86/include/asm/vdso.h +++ b/trunk/arch/x86/include/asm/vdso.h @@ -11,7 +11,8 @@ extern const char VDSO32_PRELINK[]; #define VDSO32_SYMBOL(base, name) \ ({ \ extern const char VDSO32_##name[]; \ - (void *)(VDSO32_##name - VDSO32_PRELINK + (unsigned long)(base)); \ + (void __user *)(VDSO32_##name - VDSO32_PRELINK + \ + (unsigned long)(base)); \ }) #endif diff --git a/trunk/arch/x86/include/asm/vmx.h b/trunk/arch/x86/include/asm/vmx.h index 74fcb963595b..36ec21c36d68 100644 --- a/trunk/arch/x86/include/asm/vmx.h +++ b/trunk/arch/x86/include/asm/vmx.h @@ -25,6 +25,88 @@ * */ +#define VMX_EXIT_REASONS_FAILED_VMENTRY 0x80000000 + +#define EXIT_REASON_EXCEPTION_NMI 0 +#define EXIT_REASON_EXTERNAL_INTERRUPT 1 +#define EXIT_REASON_TRIPLE_FAULT 2 + +#define EXIT_REASON_PENDING_INTERRUPT 7 +#define EXIT_REASON_NMI_WINDOW 8 +#define EXIT_REASON_TASK_SWITCH 9 +#define EXIT_REASON_CPUID 10 +#define EXIT_REASON_HLT 12 +#define EXIT_REASON_INVD 13 +#define EXIT_REASON_INVLPG 14 +#define EXIT_REASON_RDPMC 15 +#define EXIT_REASON_RDTSC 16 +#define EXIT_REASON_VMCALL 18 +#define EXIT_REASON_VMCLEAR 19 +#define EXIT_REASON_VMLAUNCH 20 +#define EXIT_REASON_VMPTRLD 21 +#define EXIT_REASON_VMPTRST 22 +#define EXIT_REASON_VMREAD 23 +#define EXIT_REASON_VMRESUME 24 +#define EXIT_REASON_VMWRITE 25 +#define EXIT_REASON_VMOFF 26 +#define EXIT_REASON_VMON 27 +#define EXIT_REASON_CR_ACCESS 28 +#define EXIT_REASON_DR_ACCESS 29 +#define EXIT_REASON_IO_INSTRUCTION 30 +#define EXIT_REASON_MSR_READ 31 +#define EXIT_REASON_MSR_WRITE 32 +#define EXIT_REASON_INVALID_STATE 33 +#define EXIT_REASON_MWAIT_INSTRUCTION 36 +#define EXIT_REASON_MONITOR_INSTRUCTION 39 +#define EXIT_REASON_PAUSE_INSTRUCTION 40 +#define EXIT_REASON_MCE_DURING_VMENTRY 41 +#define EXIT_REASON_TPR_BELOW_THRESHOLD 43 +#define EXIT_REASON_APIC_ACCESS 44 +#define EXIT_REASON_EPT_VIOLATION 48 +#define EXIT_REASON_EPT_MISCONFIG 49 +#define EXIT_REASON_WBINVD 54 +#define EXIT_REASON_XSETBV 55 +#define EXIT_REASON_INVPCID 58 + +#define VMX_EXIT_REASONS \ + { EXIT_REASON_EXCEPTION_NMI, "EXCEPTION_NMI" }, \ + { EXIT_REASON_EXTERNAL_INTERRUPT, "EXTERNAL_INTERRUPT" }, \ + { EXIT_REASON_TRIPLE_FAULT, "TRIPLE_FAULT" }, \ + { EXIT_REASON_PENDING_INTERRUPT, "PENDING_INTERRUPT" }, \ + { EXIT_REASON_NMI_WINDOW, "NMI_WINDOW" }, \ + { EXIT_REASON_TASK_SWITCH, "TASK_SWITCH" }, \ + { EXIT_REASON_CPUID, "CPUID" }, \ + { EXIT_REASON_HLT, "HLT" }, \ + { EXIT_REASON_INVLPG, "INVLPG" }, \ + { EXIT_REASON_RDPMC, "RDPMC" }, \ + { EXIT_REASON_RDTSC, "RDTSC" }, \ + { EXIT_REASON_VMCALL, "VMCALL" }, \ + { EXIT_REASON_VMCLEAR, "VMCLEAR" }, \ + { EXIT_REASON_VMLAUNCH, "VMLAUNCH" }, \ + { EXIT_REASON_VMPTRLD, "VMPTRLD" }, \ + { EXIT_REASON_VMPTRST, "VMPTRST" }, \ + { EXIT_REASON_VMREAD, "VMREAD" }, \ + { EXIT_REASON_VMRESUME, "VMRESUME" }, \ + { EXIT_REASON_VMWRITE, "VMWRITE" }, \ + { EXIT_REASON_VMOFF, "VMOFF" }, \ + { EXIT_REASON_VMON, "VMON" }, \ + { EXIT_REASON_CR_ACCESS, "CR_ACCESS" }, \ + { EXIT_REASON_DR_ACCESS, "DR_ACCESS" }, \ + { EXIT_REASON_IO_INSTRUCTION, "IO_INSTRUCTION" }, \ + { EXIT_REASON_MSR_READ, "MSR_READ" }, \ + { EXIT_REASON_MSR_WRITE, "MSR_WRITE" }, \ + { EXIT_REASON_MWAIT_INSTRUCTION, "MWAIT_INSTRUCTION" }, \ + { EXIT_REASON_MONITOR_INSTRUCTION, "MONITOR_INSTRUCTION" }, \ + { EXIT_REASON_PAUSE_INSTRUCTION, "PAUSE_INSTRUCTION" }, \ + { EXIT_REASON_MCE_DURING_VMENTRY, "MCE_DURING_VMENTRY" }, \ + { EXIT_REASON_TPR_BELOW_THRESHOLD, "TPR_BELOW_THRESHOLD" }, \ + { EXIT_REASON_APIC_ACCESS, "APIC_ACCESS" }, \ + { EXIT_REASON_EPT_VIOLATION, "EPT_VIOLATION" }, \ + { EXIT_REASON_EPT_MISCONFIG, "EPT_MISCONFIG" }, \ + { EXIT_REASON_WBINVD, "WBINVD" } + +#ifdef __KERNEL__ + #include /* @@ -241,49 +323,6 @@ enum vmcs_field { HOST_RIP = 0x00006c16, }; -#define VMX_EXIT_REASONS_FAILED_VMENTRY 0x80000000 - -#define EXIT_REASON_EXCEPTION_NMI 0 -#define EXIT_REASON_EXTERNAL_INTERRUPT 1 -#define EXIT_REASON_TRIPLE_FAULT 2 - -#define EXIT_REASON_PENDING_INTERRUPT 7 -#define EXIT_REASON_NMI_WINDOW 8 -#define EXIT_REASON_TASK_SWITCH 9 -#define EXIT_REASON_CPUID 10 -#define EXIT_REASON_HLT 12 -#define EXIT_REASON_INVD 13 -#define EXIT_REASON_INVLPG 14 -#define EXIT_REASON_RDPMC 15 -#define EXIT_REASON_RDTSC 16 -#define EXIT_REASON_VMCALL 18 -#define EXIT_REASON_VMCLEAR 19 -#define EXIT_REASON_VMLAUNCH 20 -#define EXIT_REASON_VMPTRLD 21 -#define EXIT_REASON_VMPTRST 22 -#define EXIT_REASON_VMREAD 23 -#define EXIT_REASON_VMRESUME 24 -#define EXIT_REASON_VMWRITE 25 -#define EXIT_REASON_VMOFF 26 -#define EXIT_REASON_VMON 27 -#define EXIT_REASON_CR_ACCESS 28 -#define EXIT_REASON_DR_ACCESS 29 -#define EXIT_REASON_IO_INSTRUCTION 30 -#define EXIT_REASON_MSR_READ 31 -#define EXIT_REASON_MSR_WRITE 32 -#define EXIT_REASON_INVALID_STATE 33 -#define EXIT_REASON_MWAIT_INSTRUCTION 36 -#define EXIT_REASON_MONITOR_INSTRUCTION 39 -#define EXIT_REASON_PAUSE_INSTRUCTION 40 -#define EXIT_REASON_MCE_DURING_VMENTRY 41 -#define EXIT_REASON_TPR_BELOW_THRESHOLD 43 -#define EXIT_REASON_APIC_ACCESS 44 -#define EXIT_REASON_EPT_VIOLATION 48 -#define EXIT_REASON_EPT_MISCONFIG 49 -#define EXIT_REASON_WBINVD 54 -#define EXIT_REASON_XSETBV 55 -#define EXIT_REASON_INVPCID 58 - /* * Interruption-information format */ @@ -488,3 +527,5 @@ enum vm_instruction_error_number { }; #endif + +#endif diff --git a/trunk/arch/x86/include/asm/xen/page.h b/trunk/arch/x86/include/asm/xen/page.h index 93971e841dd5..472b9b783019 100644 --- a/trunk/arch/x86/include/asm/xen/page.h +++ b/trunk/arch/x86/include/asm/xen/page.h @@ -51,7 +51,8 @@ extern unsigned long set_phys_range_identity(unsigned long pfn_s, extern int m2p_add_override(unsigned long mfn, struct page *page, struct gnttab_map_grant_ref *kmap_op); -extern int m2p_remove_override(struct page *page, bool clear_pte); +extern int m2p_remove_override(struct page *page, + struct gnttab_map_grant_ref *kmap_op); extern struct page *m2p_find_override(unsigned long mfn); extern unsigned long m2p_find_override_pfn(unsigned long mfn, unsigned long pfn); diff --git a/trunk/arch/x86/include/asm/xor_32.h b/trunk/arch/x86/include/asm/xor_32.h index 454570891bdc..aabd5850bdb9 100644 --- a/trunk/arch/x86/include/asm/xor_32.h +++ b/trunk/arch/x86/include/asm/xor_32.h @@ -534,38 +534,6 @@ static struct xor_block_template xor_block_p5_mmx = { * Copyright (C) 1999 Zach Brown (with obvious credit due Ingo) */ -#define XMMS_SAVE \ -do { \ - preempt_disable(); \ - cr0 = read_cr0(); \ - clts(); \ - asm volatile( \ - "movups %%xmm0,(%0) ;\n\t" \ - "movups %%xmm1,0x10(%0) ;\n\t" \ - "movups %%xmm2,0x20(%0) ;\n\t" \ - "movups %%xmm3,0x30(%0) ;\n\t" \ - : \ - : "r" (xmm_save) \ - : "memory"); \ -} while (0) - -#define XMMS_RESTORE \ -do { \ - asm volatile( \ - "sfence ;\n\t" \ - "movups (%0),%%xmm0 ;\n\t" \ - "movups 0x10(%0),%%xmm1 ;\n\t" \ - "movups 0x20(%0),%%xmm2 ;\n\t" \ - "movups 0x30(%0),%%xmm3 ;\n\t" \ - : \ - : "r" (xmm_save) \ - : "memory"); \ - write_cr0(cr0); \ - preempt_enable(); \ -} while (0) - -#define ALIGN16 __attribute__((aligned(16))) - #define OFFS(x) "16*("#x")" #define PF_OFFS(x) "256+16*("#x")" #define PF0(x) " prefetchnta "PF_OFFS(x)"(%1) ;\n" @@ -587,10 +555,8 @@ static void xor_sse_2(unsigned long bytes, unsigned long *p1, unsigned long *p2) { unsigned long lines = bytes >> 8; - char xmm_save[16*4] ALIGN16; - int cr0; - XMMS_SAVE; + kernel_fpu_begin(); asm volatile( #undef BLOCK @@ -633,7 +599,7 @@ xor_sse_2(unsigned long bytes, unsigned long *p1, unsigned long *p2) : : "memory"); - XMMS_RESTORE; + kernel_fpu_end(); } static void @@ -641,10 +607,8 @@ xor_sse_3(unsigned long bytes, unsigned long *p1, unsigned long *p2, unsigned long *p3) { unsigned long lines = bytes >> 8; - char xmm_save[16*4] ALIGN16; - int cr0; - XMMS_SAVE; + kernel_fpu_begin(); asm volatile( #undef BLOCK @@ -694,7 +658,7 @@ xor_sse_3(unsigned long bytes, unsigned long *p1, unsigned long *p2, : : "memory" ); - XMMS_RESTORE; + kernel_fpu_end(); } static void @@ -702,10 +666,8 @@ xor_sse_4(unsigned long bytes, unsigned long *p1, unsigned long *p2, unsigned long *p3, unsigned long *p4) { unsigned long lines = bytes >> 8; - char xmm_save[16*4] ALIGN16; - int cr0; - XMMS_SAVE; + kernel_fpu_begin(); asm volatile( #undef BLOCK @@ -762,7 +724,7 @@ xor_sse_4(unsigned long bytes, unsigned long *p1, unsigned long *p2, : : "memory" ); - XMMS_RESTORE; + kernel_fpu_end(); } static void @@ -770,10 +732,8 @@ xor_sse_5(unsigned long bytes, unsigned long *p1, unsigned long *p2, unsigned long *p3, unsigned long *p4, unsigned long *p5) { unsigned long lines = bytes >> 8; - char xmm_save[16*4] ALIGN16; - int cr0; - XMMS_SAVE; + kernel_fpu_begin(); /* Make sure GCC forgets anything it knows about p4 or p5, such that it won't pass to the asm volatile below a @@ -850,7 +810,7 @@ xor_sse_5(unsigned long bytes, unsigned long *p1, unsigned long *p2, like assuming they have some legal value. */ asm("" : "=r" (p4), "=r" (p5)); - XMMS_RESTORE; + kernel_fpu_end(); } static struct xor_block_template xor_block_pIII_sse = { diff --git a/trunk/arch/x86/include/asm/xor_64.h b/trunk/arch/x86/include/asm/xor_64.h index b9b2323e90fe..5fc06d0b7eb5 100644 --- a/trunk/arch/x86/include/asm/xor_64.h +++ b/trunk/arch/x86/include/asm/xor_64.h @@ -34,41 +34,7 @@ * no advantages to be gotten from x86-64 here anyways. */ -typedef struct { - unsigned long a, b; -} __attribute__((aligned(16))) xmm_store_t; - -/* Doesn't use gcc to save the XMM registers, because there is no easy way to - tell it to do a clts before the register saving. */ -#define XMMS_SAVE \ -do { \ - preempt_disable(); \ - asm volatile( \ - "movq %%cr0,%0 ;\n\t" \ - "clts ;\n\t" \ - "movups %%xmm0,(%1) ;\n\t" \ - "movups %%xmm1,0x10(%1) ;\n\t" \ - "movups %%xmm2,0x20(%1) ;\n\t" \ - "movups %%xmm3,0x30(%1) ;\n\t" \ - : "=&r" (cr0) \ - : "r" (xmm_save) \ - : "memory"); \ -} while (0) - -#define XMMS_RESTORE \ -do { \ - asm volatile( \ - "sfence ;\n\t" \ - "movups (%1),%%xmm0 ;\n\t" \ - "movups 0x10(%1),%%xmm1 ;\n\t" \ - "movups 0x20(%1),%%xmm2 ;\n\t" \ - "movups 0x30(%1),%%xmm3 ;\n\t" \ - "movq %0,%%cr0 ;\n\t" \ - : \ - : "r" (cr0), "r" (xmm_save) \ - : "memory"); \ - preempt_enable(); \ -} while (0) +#include #define OFFS(x) "16*("#x")" #define PF_OFFS(x) "256+16*("#x")" @@ -91,10 +57,8 @@ static void xor_sse_2(unsigned long bytes, unsigned long *p1, unsigned long *p2) { unsigned int lines = bytes >> 8; - unsigned long cr0; - xmm_store_t xmm_save[4]; - XMMS_SAVE; + kernel_fpu_begin(); asm volatile( #undef BLOCK @@ -135,7 +99,7 @@ xor_sse_2(unsigned long bytes, unsigned long *p1, unsigned long *p2) : [inc] "r" (256UL) : "memory"); - XMMS_RESTORE; + kernel_fpu_end(); } static void @@ -143,11 +107,8 @@ xor_sse_3(unsigned long bytes, unsigned long *p1, unsigned long *p2, unsigned long *p3) { unsigned int lines = bytes >> 8; - xmm_store_t xmm_save[4]; - unsigned long cr0; - - XMMS_SAVE; + kernel_fpu_begin(); asm volatile( #undef BLOCK #define BLOCK(i) \ @@ -194,7 +155,7 @@ xor_sse_3(unsigned long bytes, unsigned long *p1, unsigned long *p2, [p1] "+r" (p1), [p2] "+r" (p2), [p3] "+r" (p3) : [inc] "r" (256UL) : "memory"); - XMMS_RESTORE; + kernel_fpu_end(); } static void @@ -202,10 +163,8 @@ xor_sse_4(unsigned long bytes, unsigned long *p1, unsigned long *p2, unsigned long *p3, unsigned long *p4) { unsigned int lines = bytes >> 8; - xmm_store_t xmm_save[4]; - unsigned long cr0; - XMMS_SAVE; + kernel_fpu_begin(); asm volatile( #undef BLOCK @@ -261,7 +220,7 @@ xor_sse_4(unsigned long bytes, unsigned long *p1, unsigned long *p2, : [inc] "r" (256UL) : "memory" ); - XMMS_RESTORE; + kernel_fpu_end(); } static void @@ -269,10 +228,8 @@ xor_sse_5(unsigned long bytes, unsigned long *p1, unsigned long *p2, unsigned long *p3, unsigned long *p4, unsigned long *p5) { unsigned int lines = bytes >> 8; - xmm_store_t xmm_save[4]; - unsigned long cr0; - XMMS_SAVE; + kernel_fpu_begin(); asm volatile( #undef BLOCK @@ -336,7 +293,7 @@ xor_sse_5(unsigned long bytes, unsigned long *p1, unsigned long *p2, : [inc] "r" (256UL) : "memory"); - XMMS_RESTORE; + kernel_fpu_end(); } static struct xor_block_template xor_block_sse = { diff --git a/trunk/arch/x86/include/asm/xor_avx.h b/trunk/arch/x86/include/asm/xor_avx.h index 2510d35f480e..7ea79c5fa1f2 100644 --- a/trunk/arch/x86/include/asm/xor_avx.h +++ b/trunk/arch/x86/include/asm/xor_avx.h @@ -20,32 +20,6 @@ #include #include -#define ALIGN32 __aligned(32) - -#define YMM_SAVED_REGS 4 - -#define YMMS_SAVE \ -do { \ - preempt_disable(); \ - cr0 = read_cr0(); \ - clts(); \ - asm volatile("vmovaps %%ymm0, %0" : "=m" (ymm_save[0]) : : "memory"); \ - asm volatile("vmovaps %%ymm1, %0" : "=m" (ymm_save[32]) : : "memory"); \ - asm volatile("vmovaps %%ymm2, %0" : "=m" (ymm_save[64]) : : "memory"); \ - asm volatile("vmovaps %%ymm3, %0" : "=m" (ymm_save[96]) : : "memory"); \ -} while (0); - -#define YMMS_RESTORE \ -do { \ - asm volatile("sfence" : : : "memory"); \ - asm volatile("vmovaps %0, %%ymm3" : : "m" (ymm_save[96])); \ - asm volatile("vmovaps %0, %%ymm2" : : "m" (ymm_save[64])); \ - asm volatile("vmovaps %0, %%ymm1" : : "m" (ymm_save[32])); \ - asm volatile("vmovaps %0, %%ymm0" : : "m" (ymm_save[0])); \ - write_cr0(cr0); \ - preempt_enable(); \ -} while (0); - #define BLOCK4(i) \ BLOCK(32 * i, 0) \ BLOCK(32 * (i + 1), 1) \ @@ -60,10 +34,9 @@ do { \ static void xor_avx_2(unsigned long bytes, unsigned long *p0, unsigned long *p1) { - unsigned long cr0, lines = bytes >> 9; - char ymm_save[32 * YMM_SAVED_REGS] ALIGN32; + unsigned long lines = bytes >> 9; - YMMS_SAVE + kernel_fpu_begin(); while (lines--) { #undef BLOCK @@ -82,16 +55,15 @@ do { \ p1 = (unsigned long *)((uintptr_t)p1 + 512); } - YMMS_RESTORE + kernel_fpu_end(); } static void xor_avx_3(unsigned long bytes, unsigned long *p0, unsigned long *p1, unsigned long *p2) { - unsigned long cr0, lines = bytes >> 9; - char ymm_save[32 * YMM_SAVED_REGS] ALIGN32; + unsigned long lines = bytes >> 9; - YMMS_SAVE + kernel_fpu_begin(); while (lines--) { #undef BLOCK @@ -113,16 +85,15 @@ do { \ p2 = (unsigned long *)((uintptr_t)p2 + 512); } - YMMS_RESTORE + kernel_fpu_end(); } static void xor_avx_4(unsigned long bytes, unsigned long *p0, unsigned long *p1, unsigned long *p2, unsigned long *p3) { - unsigned long cr0, lines = bytes >> 9; - char ymm_save[32 * YMM_SAVED_REGS] ALIGN32; + unsigned long lines = bytes >> 9; - YMMS_SAVE + kernel_fpu_begin(); while (lines--) { #undef BLOCK @@ -147,16 +118,15 @@ do { \ p3 = (unsigned long *)((uintptr_t)p3 + 512); } - YMMS_RESTORE + kernel_fpu_end(); } static void xor_avx_5(unsigned long bytes, unsigned long *p0, unsigned long *p1, unsigned long *p2, unsigned long *p3, unsigned long *p4) { - unsigned long cr0, lines = bytes >> 9; - char ymm_save[32 * YMM_SAVED_REGS] ALIGN32; + unsigned long lines = bytes >> 9; - YMMS_SAVE + kernel_fpu_begin(); while (lines--) { #undef BLOCK @@ -184,7 +154,7 @@ do { \ p4 = (unsigned long *)((uintptr_t)p4 + 512); } - YMMS_RESTORE + kernel_fpu_end(); } static struct xor_block_template xor_block_avx = { diff --git a/trunk/arch/x86/include/asm/xsave.h b/trunk/arch/x86/include/asm/xsave.h index 8a1b6f9b594a..2ddee1b87793 100644 --- a/trunk/arch/x86/include/asm/xsave.h +++ b/trunk/arch/x86/include/asm/xsave.h @@ -34,17 +34,14 @@ extern unsigned int xstate_size; extern u64 pcntxt_mask; extern u64 xstate_fx_sw_bytes[USER_XSTATE_FX_SW_WORDS]; +extern struct xsave_struct *init_xstate_buf; extern void xsave_init(void); extern void update_regset_xstate_info(unsigned int size, u64 xstate_mask); extern int init_fpu(struct task_struct *child); -extern int check_for_xstate(struct i387_fxsave_struct __user *buf, - void __user *fpstate, - struct _fpx_sw_bytes *sw); -static inline int fpu_xrstor_checking(struct fpu *fpu) +static inline int fpu_xrstor_checking(struct xsave_struct *fx) { - struct xsave_struct *fx = &fpu->state->xsave; int err; asm volatile("1: .byte " REX_PREFIX "0x0f,0xae,0x2f\n\t" @@ -69,8 +66,7 @@ static inline int xsave_user(struct xsave_struct __user *buf) * Clear the xsave header first, so that reserved fields are * initialized to zero. */ - err = __clear_user(&buf->xsave_hdr, - sizeof(struct xsave_hdr_struct)); + err = __clear_user(&buf->xsave_hdr, sizeof(buf->xsave_hdr)); if (unlikely(err)) return -EFAULT; @@ -84,9 +80,6 @@ static inline int xsave_user(struct xsave_struct __user *buf) : [err] "=r" (err) : "D" (buf), "a" (-1), "d" (-1), "0" (0) : "memory"); - if (unlikely(err) && __clear_user(buf, xstate_size)) - err = -EFAULT; - /* No need to clear here because the caller clears USED_MATH */ return err; } diff --git a/trunk/arch/x86/kernel/Makefile b/trunk/arch/x86/kernel/Makefile index 8215e5652d97..8d7a619718b5 100644 --- a/trunk/arch/x86/kernel/Makefile +++ b/trunk/arch/x86/kernel/Makefile @@ -100,6 +100,8 @@ obj-$(CONFIG_SWIOTLB) += pci-swiotlb.o obj-$(CONFIG_OF) += devicetree.o obj-$(CONFIG_UPROBES) += uprobes.o +obj-$(CONFIG_PERF_EVENTS) += perf_regs.o + ### # 64 bit specific files ifeq ($(CONFIG_X86_64),y) diff --git a/trunk/arch/x86/kernel/acpi/boot.c b/trunk/arch/x86/kernel/acpi/boot.c index b2297e58c6ed..e651f7a589ac 100644 --- a/trunk/arch/x86/kernel/acpi/boot.c +++ b/trunk/arch/x86/kernel/acpi/boot.c @@ -656,7 +656,7 @@ static int __cpuinit _acpi_map_lsapic(acpi_handle handle, int *pcpu) acpi_register_lapic(physid, ACPI_MADT_ENABLED); /* - * If mp_register_lapic successfully generates a new logical cpu + * If acpi_register_lapic successfully generates a new logical cpu * number, then the following will get us exactly what was mapped */ cpumask_andnot(new_map, cpu_present_mask, tmp_map); diff --git a/trunk/arch/x86/kernel/acpi/sleep.c b/trunk/arch/x86/kernel/acpi/sleep.c index 95bf99de9058..1b8e5a03d942 100644 --- a/trunk/arch/x86/kernel/acpi/sleep.c +++ b/trunk/arch/x86/kernel/acpi/sleep.c @@ -25,10 +25,6 @@ unsigned long acpi_realmode_flags; static char temp_stack[4096]; #endif -asmlinkage void acpi_enter_s3(void) -{ - acpi_enter_sleep_state(3, wake_sleep_flags); -} /** * acpi_suspend_lowlevel - save kernel state * diff --git a/trunk/arch/x86/kernel/acpi/sleep.h b/trunk/arch/x86/kernel/acpi/sleep.h index 5653a5791ec9..67f59f8c6956 100644 --- a/trunk/arch/x86/kernel/acpi/sleep.h +++ b/trunk/arch/x86/kernel/acpi/sleep.h @@ -2,7 +2,6 @@ * Variables and functions used by the code in sleep.c */ -#include #include extern unsigned long saved_video_mode; @@ -11,7 +10,6 @@ extern long saved_magic; extern int wakeup_pmode_return; extern u8 wake_sleep_flags; -extern asmlinkage void acpi_enter_s3(void); extern unsigned long acpi_copy_wakeup_routine(unsigned long); extern void wakeup_long64(void); diff --git a/trunk/arch/x86/kernel/acpi/wakeup_32.S b/trunk/arch/x86/kernel/acpi/wakeup_32.S index 72610839f03b..13ab720573e3 100644 --- a/trunk/arch/x86/kernel/acpi/wakeup_32.S +++ b/trunk/arch/x86/kernel/acpi/wakeup_32.S @@ -74,7 +74,9 @@ restore_registers: ENTRY(do_suspend_lowlevel) call save_processor_state call save_registers - call acpi_enter_s3 + pushl $3 + call acpi_enter_sleep_state + addl $4, %esp # In case of S3 failure, we'll emerge here. Jump # to ret_point to recover diff --git a/trunk/arch/x86/kernel/acpi/wakeup_64.S b/trunk/arch/x86/kernel/acpi/wakeup_64.S index 014d1d28c397..8ea5164cbd04 100644 --- a/trunk/arch/x86/kernel/acpi/wakeup_64.S +++ b/trunk/arch/x86/kernel/acpi/wakeup_64.S @@ -71,7 +71,9 @@ ENTRY(do_suspend_lowlevel) movq %rsi, saved_rsi addq $8, %rsp - call acpi_enter_s3 + movl $3, %edi + xorl %eax, %eax + call acpi_enter_sleep_state /* in case something went wrong, restore the machine status and go on */ jmp resume_point diff --git a/trunk/arch/x86/kernel/alternative.c b/trunk/arch/x86/kernel/alternative.c index 931280ff8299..ef5ccca79a6c 100644 --- a/trunk/arch/x86/kernel/alternative.c +++ b/trunk/arch/x86/kernel/alternative.c @@ -23,19 +23,6 @@ #define MAX_PATCH_LEN (255-1) -#ifdef CONFIG_HOTPLUG_CPU -static int smp_alt_once; - -static int __init bootonly(char *str) -{ - smp_alt_once = 1; - return 1; -} -__setup("smp-alt-boot", bootonly); -#else -#define smp_alt_once 1 -#endif - static int __initdata_or_module debug_alternative; static int __init debug_alt(char *str) @@ -165,7 +152,7 @@ static const unsigned char * const k7_nops[ASM_NOP_MAX+2] = #endif #ifdef P6_NOP1 -static const unsigned char __initconst_or_module p6nops[] = +static const unsigned char p6nops[] = { P6_NOP1, P6_NOP2, @@ -224,7 +211,7 @@ void __init arch_init_ideal_nops(void) ideal_nops = intel_nops; #endif } - + break; default: #ifdef CONFIG_X86_64 ideal_nops = k8_nops; @@ -317,7 +304,7 @@ static void alternatives_smp_lock(const s32 *start, const s32 *end, /* turn DS segment override prefix into lock prefix */ if (*ptr == 0x3e) text_poke(ptr, ((unsigned char []){0xf0}), 1); - }; + } mutex_unlock(&text_mutex); } @@ -326,9 +313,6 @@ static void alternatives_smp_unlock(const s32 *start, const s32 *end, { const s32 *poff; - if (noreplace_smp) - return; - mutex_lock(&text_mutex); for (poff = start; poff < end; poff++) { u8 *ptr = (u8 *)poff + *poff; @@ -338,7 +322,7 @@ static void alternatives_smp_unlock(const s32 *start, const s32 *end, /* turn lock prefix into DS segment override prefix */ if (*ptr == 0xf0) text_poke(ptr, ((unsigned char []){0x3E}), 1); - }; + } mutex_unlock(&text_mutex); } @@ -359,7 +343,7 @@ struct smp_alt_module { }; static LIST_HEAD(smp_alt_modules); static DEFINE_MUTEX(smp_alt); -static int smp_mode = 1; /* protected by smp_alt */ +static bool uniproc_patched = false; /* protected by smp_alt */ void __init_or_module alternatives_smp_module_add(struct module *mod, char *name, @@ -368,19 +352,18 @@ void __init_or_module alternatives_smp_module_add(struct module *mod, { struct smp_alt_module *smp; - if (noreplace_smp) - return; + mutex_lock(&smp_alt); + if (!uniproc_patched) + goto unlock; - if (smp_alt_once) { - if (boot_cpu_has(X86_FEATURE_UP)) - alternatives_smp_unlock(locks, locks_end, - text, text_end); - return; - } + if (num_possible_cpus() == 1) + /* Don't bother remembering, we'll never have to undo it. */ + goto smp_unlock; smp = kzalloc(sizeof(*smp), GFP_KERNEL); if (NULL == smp) - return; /* we'll run the (safe but slow) SMP code then ... */ + /* we'll run the (safe but slow) SMP code then ... */ + goto unlock; smp->mod = mod; smp->name = name; @@ -392,11 +375,10 @@ void __init_or_module alternatives_smp_module_add(struct module *mod, __func__, smp->locks, smp->locks_end, smp->text, smp->text_end, smp->name); - mutex_lock(&smp_alt); list_add_tail(&smp->next, &smp_alt_modules); - if (boot_cpu_has(X86_FEATURE_UP)) - alternatives_smp_unlock(smp->locks, smp->locks_end, - smp->text, smp->text_end); +smp_unlock: + alternatives_smp_unlock(locks, locks_end, text, text_end); +unlock: mutex_unlock(&smp_alt); } @@ -404,24 +386,18 @@ void __init_or_module alternatives_smp_module_del(struct module *mod) { struct smp_alt_module *item; - if (smp_alt_once || noreplace_smp) - return; - mutex_lock(&smp_alt); list_for_each_entry(item, &smp_alt_modules, next) { if (mod != item->mod) continue; list_del(&item->next); - mutex_unlock(&smp_alt); - DPRINTK("%s: %s\n", __func__, item->name); kfree(item); - return; + break; } mutex_unlock(&smp_alt); } -bool skip_smp_alternatives; -void alternatives_smp_switch(int smp) +void alternatives_enable_smp(void) { struct smp_alt_module *mod; @@ -436,34 +412,21 @@ void alternatives_smp_switch(int smp) pr_info("lockdep: fixing up alternatives\n"); #endif - if (noreplace_smp || smp_alt_once || skip_smp_alternatives) - return; - BUG_ON(!smp && (num_online_cpus() > 1)); + /* Why bother if there are no other CPUs? */ + BUG_ON(num_possible_cpus() == 1); mutex_lock(&smp_alt); - /* - * Avoid unnecessary switches because it forces JIT based VMs to - * throw away all cached translations, which can be quite costly. - */ - if (smp == smp_mode) { - /* nothing */ - } else if (smp) { + if (uniproc_patched) { pr_info("switching to SMP code\n"); + BUG_ON(num_online_cpus() != 1); clear_cpu_cap(&boot_cpu_data, X86_FEATURE_UP); clear_cpu_cap(&cpu_data(0), X86_FEATURE_UP); list_for_each_entry(mod, &smp_alt_modules, next) alternatives_smp_lock(mod->locks, mod->locks_end, mod->text, mod->text_end); - } else { - pr_info("switching to UP code\n"); - set_cpu_cap(&boot_cpu_data, X86_FEATURE_UP); - set_cpu_cap(&cpu_data(0), X86_FEATURE_UP); - list_for_each_entry(mod, &smp_alt_modules, next) - alternatives_smp_unlock(mod->locks, mod->locks_end, - mod->text, mod->text_end); + uniproc_patched = false; } - smp_mode = smp; mutex_unlock(&smp_alt); } @@ -540,40 +503,22 @@ void __init alternative_instructions(void) apply_alternatives(__alt_instructions, __alt_instructions_end); - /* switch to patch-once-at-boottime-only mode and free the - * tables in case we know the number of CPUs will never ever - * change */ -#ifdef CONFIG_HOTPLUG_CPU - if (num_possible_cpus() < 2) - smp_alt_once = 1; -#endif - #ifdef CONFIG_SMP - if (smp_alt_once) { - if (1 == num_possible_cpus()) { - pr_info("switching to UP code\n"); - set_cpu_cap(&boot_cpu_data, X86_FEATURE_UP); - set_cpu_cap(&cpu_data(0), X86_FEATURE_UP); - - alternatives_smp_unlock(__smp_locks, __smp_locks_end, - _text, _etext); - } - } else { + /* Patch to UP if other cpus not imminent. */ + if (!noreplace_smp && (num_present_cpus() == 1 || setup_max_cpus <= 1)) { + uniproc_patched = true; alternatives_smp_module_add(NULL, "core kernel", __smp_locks, __smp_locks_end, _text, _etext); - - /* Only switch to UP mode if we don't immediately boot others */ - if (num_present_cpus() == 1 || setup_max_cpus <= 1) - alternatives_smp_switch(0); } -#endif - apply_paravirt(__parainstructions, __parainstructions_end); - if (smp_alt_once) + if (!uniproc_patched || num_possible_cpus() == 1) free_init_pages("SMP alternatives", (unsigned long)__smp_locks, (unsigned long)__smp_locks_end); +#endif + + apply_paravirt(__parainstructions, __parainstructions_end); restart_nmi(); } diff --git a/trunk/arch/x86/kernel/apic/apic.c b/trunk/arch/x86/kernel/apic/apic.c index 24deb3082328..b17416e72fbd 100644 --- a/trunk/arch/x86/kernel/apic/apic.c +++ b/trunk/arch/x86/kernel/apic/apic.c @@ -1934,7 +1934,7 @@ void smp_error_interrupt(struct pt_regs *regs) apic_printk(APIC_DEBUG, KERN_CONT " : %s", error_interrupt_reason[i]); i++; v1 >>= 1; - }; + } apic_printk(APIC_DEBUG, KERN_CONT "\n"); diff --git a/trunk/arch/x86/kernel/apic/io_apic.c b/trunk/arch/x86/kernel/apic/io_apic.c index 406eee784684..c265593ec2cd 100644 --- a/trunk/arch/x86/kernel/apic/io_apic.c +++ b/trunk/arch/x86/kernel/apic/io_apic.c @@ -1204,7 +1204,7 @@ static void __clear_irq_vector(int irq, struct irq_cfg *cfg) BUG_ON(!cfg->vector); vector = cfg->vector; - for_each_cpu(cpu, cfg->domain) + for_each_cpu_and(cpu, cfg->domain, cpu_online_mask) per_cpu(vector_irq, cpu)[vector] = -1; cfg->vector = 0; @@ -1212,7 +1212,7 @@ static void __clear_irq_vector(int irq, struct irq_cfg *cfg) if (likely(!cfg->move_in_progress)) return; - for_each_cpu(cpu, cfg->old_domain) { + for_each_cpu_and(cpu, cfg->old_domain, cpu_online_mask) { for (vector = FIRST_EXTERNAL_VECTOR; vector < NR_VECTORS; vector++) { if (per_cpu(vector_irq, cpu)[vector] != irq) @@ -1356,6 +1356,16 @@ static void setup_ioapic_irq(unsigned int irq, struct irq_cfg *cfg, if (!IO_APIC_IRQ(irq)) return; + /* + * For legacy irqs, cfg->domain starts with cpu 0. Now that IO-APIC + * can handle this irq and the apic driver is finialized at this point, + * update the cfg->domain. + */ + if (irq < legacy_pic->nr_legacy_irqs && + cpumask_equal(cfg->domain, cpumask_of(0))) + apic->vector_allocation_domain(0, cfg->domain, + apic->target_cpus()); + if (assign_irq_vector(irq, cfg, apic->target_cpus())) return; diff --git a/trunk/arch/x86/kernel/cpu/bugs.c b/trunk/arch/x86/kernel/cpu/bugs.c index c97bb7b5a9f8..d0e910da16c5 100644 --- a/trunk/arch/x86/kernel/cpu/bugs.c +++ b/trunk/arch/x86/kernel/cpu/bugs.c @@ -165,10 +165,15 @@ void __init check_bugs(void) print_cpu_info(&boot_cpu_data); #endif check_config(); - check_fpu(); check_hlt(); check_popad(); init_utsname()->machine[1] = '0' + (boot_cpu_data.x86 > 6 ? 6 : boot_cpu_data.x86); alternative_instructions(); + + /* + * kernel_fpu_begin/end() in check_fpu() relies on the patched + * alternative instructions. + */ + check_fpu(); } diff --git a/trunk/arch/x86/kernel/cpu/common.c b/trunk/arch/x86/kernel/cpu/common.c index 080f4a737e3e..532691b6c8fe 100644 --- a/trunk/arch/x86/kernel/cpu/common.c +++ b/trunk/arch/x86/kernel/cpu/common.c @@ -144,6 +144,8 @@ static int __init x86_xsave_setup(char *s) { setup_clear_cpu_cap(X86_FEATURE_XSAVE); setup_clear_cpu_cap(X86_FEATURE_XSAVEOPT); + setup_clear_cpu_cap(X86_FEATURE_AVX); + setup_clear_cpu_cap(X86_FEATURE_AVX2); return 1; } __setup("noxsave", x86_xsave_setup); @@ -1020,14 +1022,16 @@ void __cpuinit print_cpu_info(struct cpuinfo_x86 *c) printk(KERN_CONT "%s ", vendor); if (c->x86_model_id[0]) - printk(KERN_CONT "%s", c->x86_model_id); + printk(KERN_CONT "%s", strim(c->x86_model_id)); else printk(KERN_CONT "%d86", c->x86); + printk(KERN_CONT " (fam: %02x, model: %02x", c->x86, c->x86_model); + if (c->x86_mask || c->cpuid_level >= 0) - printk(KERN_CONT " stepping %02x\n", c->x86_mask); + printk(KERN_CONT ", stepping: %02x)\n", c->x86_mask); else - printk(KERN_CONT "\n"); + printk(KERN_CONT ")\n"); print_cpu_msr(c); } @@ -1113,8 +1117,6 @@ void syscall_init(void) X86_EFLAGS_TF|X86_EFLAGS_DF|X86_EFLAGS_IF|X86_EFLAGS_IOPL); } -unsigned long kernel_eflags; - /* * Copies of the original ist values from the tss are only accessed during * debugging, no special alignment required. @@ -1294,9 +1296,6 @@ void __cpuinit cpu_init(void) dbg_restore_debug_regs(); fpu_init(); - xsave_init(); - - raw_local_save_flags(kernel_eflags); if (is_uv_system()) uv_cpu_init(); @@ -1349,6 +1348,5 @@ void __cpuinit cpu_init(void) dbg_restore_debug_regs(); fpu_init(); - xsave_init(); } #endif diff --git a/trunk/arch/x86/kernel/cpu/mcheck/mce-inject.c b/trunk/arch/x86/kernel/cpu/mcheck/mce-inject.c index fc4beb393577..ddc72f839332 100644 --- a/trunk/arch/x86/kernel/cpu/mcheck/mce-inject.c +++ b/trunk/arch/x86/kernel/cpu/mcheck/mce-inject.c @@ -78,6 +78,7 @@ static void raise_exception(struct mce *m, struct pt_regs *pregs) } static cpumask_var_t mce_inject_cpumask; +static DEFINE_MUTEX(mce_inject_mutex); static int mce_raise_notify(unsigned int cmd, struct pt_regs *regs) { @@ -194,7 +195,11 @@ static void raise_mce(struct mce *m) put_online_cpus(); } else #endif + { + preempt_disable(); raise_local(); + preempt_enable(); + } } /* Error injection interface */ @@ -225,7 +230,10 @@ static ssize_t mce_write(struct file *filp, const char __user *ubuf, * so do it a jiffie or two later everywhere. */ schedule_timeout(2); + + mutex_lock(&mce_inject_mutex); raise_mce(&m); + mutex_unlock(&mce_inject_mutex); return usize; } diff --git a/trunk/arch/x86/kernel/cpu/mcheck/mce-internal.h b/trunk/arch/x86/kernel/cpu/mcheck/mce-internal.h index ed44c8a65858..6a05c1d327a9 100644 --- a/trunk/arch/x86/kernel/cpu/mcheck/mce-internal.h +++ b/trunk/arch/x86/kernel/cpu/mcheck/mce-internal.h @@ -28,6 +28,18 @@ extern int mce_ser; extern struct mce_bank *mce_banks; +#ifdef CONFIG_X86_MCE_INTEL +unsigned long mce_intel_adjust_timer(unsigned long interval); +void mce_intel_cmci_poll(void); +void mce_intel_hcpu_update(unsigned long cpu); +#else +# define mce_intel_adjust_timer mce_adjust_timer_default +static inline void mce_intel_cmci_poll(void) { } +static inline void mce_intel_hcpu_update(unsigned long cpu) { } +#endif + +void mce_timer_kick(unsigned long interval); + #ifdef CONFIG_ACPI_APEI int apei_write_mce(struct mce *m); ssize_t apei_read_mce(struct mce *m, u64 *record_id); diff --git a/trunk/arch/x86/kernel/cpu/mcheck/mce-severity.c b/trunk/arch/x86/kernel/cpu/mcheck/mce-severity.c index 413c2ced887c..13017626f9a8 100644 --- a/trunk/arch/x86/kernel/cpu/mcheck/mce-severity.c +++ b/trunk/arch/x86/kernel/cpu/mcheck/mce-severity.c @@ -55,13 +55,6 @@ static struct severity { #define MCI_UC_S (MCI_STATUS_UC|MCI_STATUS_S) #define MCI_UC_SAR (MCI_STATUS_UC|MCI_STATUS_S|MCI_STATUS_AR) #define MCI_ADDR (MCI_STATUS_ADDRV|MCI_STATUS_MISCV) -#define MCACOD 0xffff -/* Architecturally defined codes from SDM Vol. 3B Chapter 15 */ -#define MCACOD_SCRUB 0x00C0 /* 0xC0-0xCF Memory Scrubbing */ -#define MCACOD_SCRUBMSK 0xfff0 -#define MCACOD_L3WB 0x017A /* L3 Explicit Writeback */ -#define MCACOD_DATA 0x0134 /* Data Load */ -#define MCACOD_INSTR 0x0150 /* Instruction Fetch */ MCESEV( NO, "Invalid", diff --git a/trunk/arch/x86/kernel/cpu/mcheck/mce.c b/trunk/arch/x86/kernel/cpu/mcheck/mce.c index 5e095f873e3e..29e87d3b2843 100644 --- a/trunk/arch/x86/kernel/cpu/mcheck/mce.c +++ b/trunk/arch/x86/kernel/cpu/mcheck/mce.c @@ -83,6 +83,7 @@ static int mce_dont_log_ce __read_mostly; int mce_cmci_disabled __read_mostly; int mce_ignore_ce __read_mostly; int mce_ser __read_mostly; +int mce_bios_cmci_threshold __read_mostly; struct mce_bank *mce_banks __read_mostly; @@ -103,6 +104,8 @@ DEFINE_PER_CPU(mce_banks_t, mce_poll_banks) = { static DEFINE_PER_CPU(struct work_struct, mce_work); +static void (*quirk_no_way_out)(int bank, struct mce *m, struct pt_regs *regs); + /* * CPU/chipset specific EDAC code can register a notifier call here to print * MCE errors in a human-readable form. @@ -650,14 +653,18 @@ EXPORT_SYMBOL_GPL(machine_check_poll); * Do a quick check if any of the events requires a panic. * This decides if we keep the events around or clear them. */ -static int mce_no_way_out(struct mce *m, char **msg, unsigned long *validp) +static int mce_no_way_out(struct mce *m, char **msg, unsigned long *validp, + struct pt_regs *regs) { int i, ret = 0; for (i = 0; i < banks; i++) { m->status = mce_rdmsrl(MSR_IA32_MCx_STATUS(i)); - if (m->status & MCI_STATUS_VAL) + if (m->status & MCI_STATUS_VAL) { __set_bit(i, validp); + if (quirk_no_way_out) + quirk_no_way_out(i, m, regs); + } if (mce_severity(m, tolerant, msg) >= MCE_PANIC_SEVERITY) ret = 1; } @@ -1040,7 +1047,7 @@ void do_machine_check(struct pt_regs *regs, long error_code) *final = m; memset(valid_banks, 0, sizeof(valid_banks)); - no_way_out = mce_no_way_out(&m, &msg, valid_banks); + no_way_out = mce_no_way_out(&m, &msg, valid_banks, regs); barrier(); @@ -1260,6 +1267,14 @@ static unsigned long check_interval = 5 * 60; /* 5 minutes */ static DEFINE_PER_CPU(unsigned long, mce_next_interval); /* in jiffies */ static DEFINE_PER_CPU(struct timer_list, mce_timer); +static unsigned long mce_adjust_timer_default(unsigned long interval) +{ + return interval; +} + +static unsigned long (*mce_adjust_timer)(unsigned long interval) = + mce_adjust_timer_default; + static void mce_timer_fn(unsigned long data) { struct timer_list *t = &__get_cpu_var(mce_timer); @@ -1270,6 +1285,7 @@ static void mce_timer_fn(unsigned long data) if (mce_available(__this_cpu_ptr(&cpu_info))) { machine_check_poll(MCP_TIMESTAMP, &__get_cpu_var(mce_poll_banks)); + mce_intel_cmci_poll(); } /* @@ -1277,14 +1293,38 @@ static void mce_timer_fn(unsigned long data) * polling interval, otherwise increase the polling interval. */ iv = __this_cpu_read(mce_next_interval); - if (mce_notify_irq()) + if (mce_notify_irq()) { iv = max(iv / 2, (unsigned long) HZ/100); - else + } else { iv = min(iv * 2, round_jiffies_relative(check_interval * HZ)); + iv = mce_adjust_timer(iv); + } __this_cpu_write(mce_next_interval, iv); + /* Might have become 0 after CMCI storm subsided */ + if (iv) { + t->expires = jiffies + iv; + add_timer_on(t, smp_processor_id()); + } +} - t->expires = jiffies + iv; - add_timer_on(t, smp_processor_id()); +/* + * Ensure that the timer is firing in @interval from now. + */ +void mce_timer_kick(unsigned long interval) +{ + struct timer_list *t = &__get_cpu_var(mce_timer); + unsigned long when = jiffies + interval; + unsigned long iv = __this_cpu_read(mce_next_interval); + + if (timer_pending(t)) { + if (time_before(when, t->expires)) + mod_timer_pinned(t, when); + } else { + t->expires = round_jiffies(when); + add_timer_on(t, smp_processor_id()); + } + if (interval < iv) + __this_cpu_write(mce_next_interval, interval); } /* Must not be called in IRQ context where del_timer_sync() can deadlock */ @@ -1418,6 +1458,34 @@ static void __mcheck_cpu_init_generic(void) } } +/* + * During IFU recovery Sandy Bridge -EP4S processors set the RIPV and + * EIPV bits in MCG_STATUS to zero on the affected logical processor (SDM + * Vol 3B Table 15-20). But this confuses both the code that determines + * whether the machine check occurred in kernel or user mode, and also + * the severity assessment code. Pretend that EIPV was set, and take the + * ip/cs values from the pt_regs that mce_gather_info() ignored earlier. + */ +static void quirk_sandybridge_ifu(int bank, struct mce *m, struct pt_regs *regs) +{ + if (bank != 0) + return; + if ((m->mcgstatus & (MCG_STATUS_EIPV|MCG_STATUS_RIPV)) != 0) + return; + if ((m->status & (MCI_STATUS_OVER|MCI_STATUS_UC| + MCI_STATUS_EN|MCI_STATUS_MISCV|MCI_STATUS_ADDRV| + MCI_STATUS_PCC|MCI_STATUS_S|MCI_STATUS_AR| + MCACOD)) != + (MCI_STATUS_UC|MCI_STATUS_EN| + MCI_STATUS_MISCV|MCI_STATUS_ADDRV|MCI_STATUS_S| + MCI_STATUS_AR|MCACOD_INSTR)) + return; + + m->mcgstatus |= MCG_STATUS_EIPV; + m->ip = regs->ip; + m->cs = regs->cs; +} + /* Add per CPU specific workarounds here */ static int __cpuinit __mcheck_cpu_apply_quirks(struct cpuinfo_x86 *c) { @@ -1515,6 +1583,9 @@ static int __cpuinit __mcheck_cpu_apply_quirks(struct cpuinfo_x86 *c) */ if (c->x86 == 6 && c->x86_model <= 13 && mce_bootlog < 0) mce_bootlog = 0; + + if (c->x86 == 6 && c->x86_model == 45) + quirk_no_way_out = quirk_sandybridge_ifu; } if (monarch_timeout < 0) monarch_timeout = 0; @@ -1548,6 +1619,7 @@ static void __mcheck_cpu_init_vendor(struct cpuinfo_x86 *c) switch (c->x86_vendor) { case X86_VENDOR_INTEL: mce_intel_feature_init(c); + mce_adjust_timer = mce_intel_adjust_timer; break; case X86_VENDOR_AMD: mce_amd_feature_init(c); @@ -1557,23 +1629,28 @@ static void __mcheck_cpu_init_vendor(struct cpuinfo_x86 *c) } } -static void __mcheck_cpu_init_timer(void) +static void mce_start_timer(unsigned int cpu, struct timer_list *t) { - struct timer_list *t = &__get_cpu_var(mce_timer); - unsigned long iv = check_interval * HZ; + unsigned long iv = mce_adjust_timer(check_interval * HZ); - setup_timer(t, mce_timer_fn, smp_processor_id()); + __this_cpu_write(mce_next_interval, iv); - if (mce_ignore_ce) + if (mce_ignore_ce || !iv) return; - __this_cpu_write(mce_next_interval, iv); - if (!iv) - return; t->expires = round_jiffies(jiffies + iv); add_timer_on(t, smp_processor_id()); } +static void __mcheck_cpu_init_timer(void) +{ + struct timer_list *t = &__get_cpu_var(mce_timer); + unsigned int cpu = smp_processor_id(); + + setup_timer(t, mce_timer_fn, cpu); + mce_start_timer(cpu, t); +} + /* Handle unconfigured int18 (should never happen) */ static void unexpected_machine_check(struct pt_regs *regs, long error_code) { @@ -1870,6 +1947,7 @@ static struct miscdevice mce_chrdev_device = { * check, or 0 to not wait * mce=bootlog Log MCEs from before booting. Disabled by default on AMD. * mce=nobootlog Don't log MCEs from before booting. + * mce=bios_cmci_threshold Don't program the CMCI threshold */ static int __init mcheck_enable(char *str) { @@ -1889,6 +1967,8 @@ static int __init mcheck_enable(char *str) mce_ignore_ce = 1; else if (!strcmp(str, "bootlog") || !strcmp(str, "nobootlog")) mce_bootlog = (str[0] == 'b'); + else if (!strcmp(str, "bios_cmci_threshold")) + mce_bios_cmci_threshold = 1; else if (isdigit(str[0])) { get_option(&str, &tolerant); if (*str == ',') { @@ -2129,6 +2209,11 @@ static struct dev_ext_attribute dev_attr_cmci_disabled = { &mce_cmci_disabled }; +static struct dev_ext_attribute dev_attr_bios_cmci_threshold = { + __ATTR(bios_cmci_threshold, 0444, device_show_int, NULL), + &mce_bios_cmci_threshold +}; + static struct device_attribute *mce_device_attrs[] = { &dev_attr_tolerant.attr, &dev_attr_check_interval.attr, @@ -2137,6 +2222,7 @@ static struct device_attribute *mce_device_attrs[] = { &dev_attr_dont_log_ce.attr, &dev_attr_ignore_ce.attr, &dev_attr_cmci_disabled.attr, + &dev_attr_bios_cmci_threshold.attr, NULL }; @@ -2257,38 +2343,33 @@ mce_cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu) unsigned int cpu = (unsigned long)hcpu; struct timer_list *t = &per_cpu(mce_timer, cpu); - switch (action) { + switch (action & ~CPU_TASKS_FROZEN) { case CPU_ONLINE: - case CPU_ONLINE_FROZEN: mce_device_create(cpu); if (threshold_cpu_callback) threshold_cpu_callback(action, cpu); break; case CPU_DEAD: - case CPU_DEAD_FROZEN: if (threshold_cpu_callback) threshold_cpu_callback(action, cpu); mce_device_remove(cpu); + mce_intel_hcpu_update(cpu); break; case CPU_DOWN_PREPARE: - case CPU_DOWN_PREPARE_FROZEN: - del_timer_sync(t); smp_call_function_single(cpu, mce_disable_cpu, &action, 1); + del_timer_sync(t); break; case CPU_DOWN_FAILED: - case CPU_DOWN_FAILED_FROZEN: - if (!mce_ignore_ce && check_interval) { - t->expires = round_jiffies(jiffies + - per_cpu(mce_next_interval, cpu)); - add_timer_on(t, cpu); - } smp_call_function_single(cpu, mce_reenable_cpu, &action, 1); + mce_start_timer(cpu, t); break; - case CPU_POST_DEAD: + } + + if (action == CPU_POST_DEAD) { /* intentionally ignoring frozen here */ cmci_rediscover(cpu); - break; } + return NOTIFY_OK; } diff --git a/trunk/arch/x86/kernel/cpu/mcheck/mce_intel.c b/trunk/arch/x86/kernel/cpu/mcheck/mce_intel.c index 38e49bc95ffc..5f88abf07e9c 100644 --- a/trunk/arch/x86/kernel/cpu/mcheck/mce_intel.c +++ b/trunk/arch/x86/kernel/cpu/mcheck/mce_intel.c @@ -15,6 +15,8 @@ #include #include +#include "mce-internal.h" + /* * Support for Intel Correct Machine Check Interrupts. This allows * the CPU to raise an interrupt when a corrected machine check happened. @@ -30,7 +32,22 @@ static DEFINE_PER_CPU(mce_banks_t, mce_banks_owned); */ static DEFINE_RAW_SPINLOCK(cmci_discover_lock); -#define CMCI_THRESHOLD 1 +#define CMCI_THRESHOLD 1 +#define CMCI_POLL_INTERVAL (30 * HZ) +#define CMCI_STORM_INTERVAL (1 * HZ) +#define CMCI_STORM_THRESHOLD 15 + +static DEFINE_PER_CPU(unsigned long, cmci_time_stamp); +static DEFINE_PER_CPU(unsigned int, cmci_storm_cnt); +static DEFINE_PER_CPU(unsigned int, cmci_storm_state); + +enum { + CMCI_STORM_NONE, + CMCI_STORM_ACTIVE, + CMCI_STORM_SUBSIDED, +}; + +static atomic_t cmci_storm_on_cpus; static int cmci_supported(int *banks) { @@ -53,6 +70,93 @@ static int cmci_supported(int *banks) return !!(cap & MCG_CMCI_P); } +void mce_intel_cmci_poll(void) +{ + if (__this_cpu_read(cmci_storm_state) == CMCI_STORM_NONE) + return; + machine_check_poll(MCP_TIMESTAMP, &__get_cpu_var(mce_banks_owned)); +} + +void mce_intel_hcpu_update(unsigned long cpu) +{ + if (per_cpu(cmci_storm_state, cpu) == CMCI_STORM_ACTIVE) + atomic_dec(&cmci_storm_on_cpus); + + per_cpu(cmci_storm_state, cpu) = CMCI_STORM_NONE; +} + +unsigned long mce_intel_adjust_timer(unsigned long interval) +{ + int r; + + if (interval < CMCI_POLL_INTERVAL) + return interval; + + switch (__this_cpu_read(cmci_storm_state)) { + case CMCI_STORM_ACTIVE: + /* + * We switch back to interrupt mode once the poll timer has + * silenced itself. That means no events recorded and the + * timer interval is back to our poll interval. + */ + __this_cpu_write(cmci_storm_state, CMCI_STORM_SUBSIDED); + r = atomic_sub_return(1, &cmci_storm_on_cpus); + if (r == 0) + pr_notice("CMCI storm subsided: switching to interrupt mode\n"); + /* FALLTHROUGH */ + + case CMCI_STORM_SUBSIDED: + /* + * We wait for all cpus to go back to SUBSIDED + * state. When that happens we switch back to + * interrupt mode. + */ + if (!atomic_read(&cmci_storm_on_cpus)) { + __this_cpu_write(cmci_storm_state, CMCI_STORM_NONE); + cmci_reenable(); + cmci_recheck(); + } + return CMCI_POLL_INTERVAL; + default: + /* + * We have shiny weather. Let the poll do whatever it + * thinks. + */ + return interval; + } +} + +static bool cmci_storm_detect(void) +{ + unsigned int cnt = __this_cpu_read(cmci_storm_cnt); + unsigned long ts = __this_cpu_read(cmci_time_stamp); + unsigned long now = jiffies; + int r; + + if (__this_cpu_read(cmci_storm_state) != CMCI_STORM_NONE) + return true; + + if (time_before_eq(now, ts + CMCI_STORM_INTERVAL)) { + cnt++; + } else { + cnt = 1; + __this_cpu_write(cmci_time_stamp, now); + } + __this_cpu_write(cmci_storm_cnt, cnt); + + if (cnt <= CMCI_STORM_THRESHOLD) + return false; + + cmci_clear(); + __this_cpu_write(cmci_storm_state, CMCI_STORM_ACTIVE); + r = atomic_add_return(1, &cmci_storm_on_cpus); + mce_timer_kick(CMCI_POLL_INTERVAL); + + if (r == 1) + pr_notice("CMCI storm detected: switching to poll mode\n"); + return true; +} + /* * The interrupt handler. This is called on every event. * Just call the poller directly to log any events. @@ -61,33 +165,28 @@ static int cmci_supported(int *banks) */ static void intel_threshold_interrupt(void) { + if (cmci_storm_detect()) + return; machine_check_poll(MCP_TIMESTAMP, &__get_cpu_var(mce_banks_owned)); mce_notify_irq(); } -static void print_update(char *type, int *hdr, int num) -{ - if (*hdr == 0) - printk(KERN_INFO "CPU %d MCA banks", smp_processor_id()); - *hdr = 1; - printk(KERN_CONT " %s:%d", type, num); -} - /* * Enable CMCI (Corrected Machine Check Interrupt) for available MCE banks * on this CPU. Use the algorithm recommended in the SDM to discover shared * banks. */ -static void cmci_discover(int banks, int boot) +static void cmci_discover(int banks) { unsigned long *owned = (void *)&__get_cpu_var(mce_banks_owned); unsigned long flags; - int hdr = 0; int i; + int bios_wrong_thresh = 0; raw_spin_lock_irqsave(&cmci_discover_lock, flags); for (i = 0; i < banks; i++) { u64 val; + int bios_zero_thresh = 0; if (test_bit(i, owned)) continue; @@ -96,29 +195,52 @@ static void cmci_discover(int banks, int boot) /* Already owned by someone else? */ if (val & MCI_CTL2_CMCI_EN) { - if (test_and_clear_bit(i, owned) && !boot) - print_update("SHD", &hdr, i); + clear_bit(i, owned); __clear_bit(i, __get_cpu_var(mce_poll_banks)); continue; } - val &= ~MCI_CTL2_CMCI_THRESHOLD_MASK; - val |= MCI_CTL2_CMCI_EN | CMCI_THRESHOLD; + if (!mce_bios_cmci_threshold) { + val &= ~MCI_CTL2_CMCI_THRESHOLD_MASK; + val |= CMCI_THRESHOLD; + } else if (!(val & MCI_CTL2_CMCI_THRESHOLD_MASK)) { + /* + * If bios_cmci_threshold boot option was specified + * but the threshold is zero, we'll try to initialize + * it to 1. + */ + bios_zero_thresh = 1; + val |= CMCI_THRESHOLD; + } + + val |= MCI_CTL2_CMCI_EN; wrmsrl(MSR_IA32_MCx_CTL2(i), val); rdmsrl(MSR_IA32_MCx_CTL2(i), val); /* Did the enable bit stick? -- the bank supports CMCI */ if (val & MCI_CTL2_CMCI_EN) { - if (!test_and_set_bit(i, owned) && !boot) - print_update("CMCI", &hdr, i); + set_bit(i, owned); __clear_bit(i, __get_cpu_var(mce_poll_banks)); + /* + * We are able to set thresholds for some banks that + * had a threshold of 0. This means the BIOS has not + * set the thresholds properly or does not work with + * this boot option. Note down now and report later. + */ + if (mce_bios_cmci_threshold && bios_zero_thresh && + (val & MCI_CTL2_CMCI_THRESHOLD_MASK)) + bios_wrong_thresh = 1; } else { WARN_ON(!test_bit(i, __get_cpu_var(mce_poll_banks))); } } raw_spin_unlock_irqrestore(&cmci_discover_lock, flags); - if (hdr) - printk(KERN_CONT "\n"); + if (mce_bios_cmci_threshold && bios_wrong_thresh) { + pr_info_once( + "bios_cmci_threshold: Some banks do not have valid thresholds set\n"); + pr_info_once( + "bios_cmci_threshold: Make sure your BIOS supports this boot option\n"); + } } /* @@ -156,7 +278,7 @@ void cmci_clear(void) continue; /* Disable CMCI */ rdmsrl(MSR_IA32_MCx_CTL2(i), val); - val &= ~(MCI_CTL2_CMCI_EN|MCI_CTL2_CMCI_THRESHOLD_MASK); + val &= ~MCI_CTL2_CMCI_EN; wrmsrl(MSR_IA32_MCx_CTL2(i), val); __clear_bit(i, __get_cpu_var(mce_banks_owned)); } @@ -186,7 +308,7 @@ void cmci_rediscover(int dying) continue; /* Recheck banks in case CPUs don't all have the same */ if (cmci_supported(&banks)) - cmci_discover(banks, 0); + cmci_discover(banks); } set_cpus_allowed_ptr(current, old); @@ -200,7 +322,7 @@ void cmci_reenable(void) { int banks; if (cmci_supported(&banks)) - cmci_discover(banks, 0); + cmci_discover(banks); } static void intel_init_cmci(void) @@ -211,7 +333,7 @@ static void intel_init_cmci(void) return; mce_threshold_vector = intel_threshold_interrupt; - cmci_discover(banks, 1); + cmci_discover(banks); /* * For CPU #0 this runs with still disabled APIC, but that's * ok because only the vector is set up. We still do another diff --git a/trunk/arch/x86/kernel/cpu/perf_event.c b/trunk/arch/x86/kernel/cpu/perf_event.c index 29557aa06dda..915b876edd1e 100644 --- a/trunk/arch/x86/kernel/cpu/perf_event.c +++ b/trunk/arch/x86/kernel/cpu/perf_event.c @@ -32,6 +32,8 @@ #include #include #include +#include +#include #include "perf_event.h" @@ -1738,6 +1740,29 @@ valid_user_frame(const void __user *fp, unsigned long size) return (__range_not_ok(fp, size, TASK_SIZE) == 0); } +static unsigned long get_segment_base(unsigned int segment) +{ + struct desc_struct *desc; + int idx = segment >> 3; + + if ((segment & SEGMENT_TI_MASK) == SEGMENT_LDT) { + if (idx > LDT_ENTRIES) + return 0; + + if (idx > current->active_mm->context.size) + return 0; + + desc = current->active_mm->context.ldt; + } else { + if (idx > GDT_ENTRIES) + return 0; + + desc = __this_cpu_ptr(&gdt_page.gdt[0]); + } + + return get_desc_base(desc + idx); +} + #ifdef CONFIG_COMPAT #include @@ -1746,13 +1771,17 @@ static inline int perf_callchain_user32(struct pt_regs *regs, struct perf_callchain_entry *entry) { /* 32-bit process in 64-bit kernel. */ + unsigned long ss_base, cs_base; struct stack_frame_ia32 frame; const void __user *fp; if (!test_thread_flag(TIF_IA32)) return 0; - fp = compat_ptr(regs->bp); + cs_base = get_segment_base(regs->cs); + ss_base = get_segment_base(regs->ss); + + fp = compat_ptr(ss_base + regs->bp); while (entry->nr < PERF_MAX_STACK_DEPTH) { unsigned long bytes; frame.next_frame = 0; @@ -1765,8 +1794,8 @@ perf_callchain_user32(struct pt_regs *regs, struct perf_callchain_entry *entry) if (!valid_user_frame(fp, sizeof(frame))) break; - perf_callchain_store(entry, frame.return_address); - fp = compat_ptr(frame.next_frame); + perf_callchain_store(entry, cs_base + frame.return_address); + fp = compat_ptr(ss_base + frame.next_frame); } return 1; } @@ -1789,6 +1818,12 @@ perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs) return; } + /* + * We don't know what to do with VM86 stacks.. ignore them for now. + */ + if (regs->flags & (X86_VM_MASK | PERF_EFLAGS_VM)) + return; + fp = (void __user *)regs->bp; perf_callchain_store(entry, regs->ip); @@ -1816,16 +1851,50 @@ perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs) } } -unsigned long perf_instruction_pointer(struct pt_regs *regs) +/* + * Deal with code segment offsets for the various execution modes: + * + * VM86 - the good olde 16 bit days, where the linear address is + * 20 bits and we use regs->ip + 0x10 * regs->cs. + * + * IA32 - Where we need to look at GDT/LDT segment descriptor tables + * to figure out what the 32bit base address is. + * + * X32 - has TIF_X32 set, but is running in x86_64 + * + * X86_64 - CS,DS,SS,ES are all zero based. + */ +static unsigned long code_segment_base(struct pt_regs *regs) { - unsigned long ip; + /* + * If we are in VM86 mode, add the segment offset to convert to a + * linear address. + */ + if (regs->flags & X86_VM_MASK) + return 0x10 * regs->cs; + + /* + * For IA32 we look at the GDT/LDT segment base to convert the + * effective IP to a linear address. + */ +#ifdef CONFIG_X86_32 + if (user_mode(regs) && regs->cs != __USER_CS) + return get_segment_base(regs->cs); +#else + if (test_thread_flag(TIF_IA32)) { + if (user_mode(regs) && regs->cs != __USER32_CS) + return get_segment_base(regs->cs); + } +#endif + return 0; +} +unsigned long perf_instruction_pointer(struct pt_regs *regs) +{ if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) - ip = perf_guest_cbs->get_guest_ip(); - else - ip = instruction_pointer(regs); + return perf_guest_cbs->get_guest_ip(); - return ip; + return regs->ip + code_segment_base(regs); } unsigned long perf_misc_flags(struct pt_regs *regs) @@ -1838,7 +1907,7 @@ unsigned long perf_misc_flags(struct pt_regs *regs) else misc |= PERF_RECORD_MISC_GUEST_KERNEL; } else { - if (!kernel_ip(regs->ip)) + if (user_mode(regs)) misc |= PERF_RECORD_MISC_USER; else misc |= PERF_RECORD_MISC_KERNEL; diff --git a/trunk/arch/x86/kernel/cpu/perf_event.h b/trunk/arch/x86/kernel/cpu/perf_event.h index 821d53b696d1..8b6defe7eefc 100644 --- a/trunk/arch/x86/kernel/cpu/perf_event.h +++ b/trunk/arch/x86/kernel/cpu/perf_event.h @@ -516,6 +516,26 @@ static inline bool kernel_ip(unsigned long ip) #endif } +/* + * Not all PMUs provide the right context information to place the reported IP + * into full context. Specifically segment registers are typically not + * supplied. + * + * Assuming the address is a linear address (it is for IBS), we fake the CS and + * vm86 mode using the known zero-based code segment and 'fix up' the registers + * to reflect this. + * + * Intel PEBS/LBR appear to typically provide the effective address, nothing + * much we can do about that but pray and treat it like a linear address. + */ +static inline void set_linear_ip(struct pt_regs *regs, unsigned long ip) +{ + regs->cs = kernel_ip(ip) ? __KERNEL_CS : __USER_CS; + if (regs->flags & X86_VM_MASK) + regs->flags ^= (PERF_EFLAGS_VM | X86_VM_MASK); + regs->ip = ip; +} + #ifdef CONFIG_CPU_SUP_AMD int amd_pmu_init(void); @@ -566,6 +586,8 @@ extern struct event_constraint intel_westmere_pebs_event_constraints[]; extern struct event_constraint intel_snb_pebs_event_constraints[]; +extern struct event_constraint intel_ivb_pebs_event_constraints[]; + struct event_constraint *intel_pebs_constraints(struct perf_event *event); void intel_pmu_pebs_enable(struct perf_event *event); diff --git a/trunk/arch/x86/kernel/cpu/perf_event_amd_ibs.c b/trunk/arch/x86/kernel/cpu/perf_event_amd_ibs.c index da9bcdcd9856..eebd5ffe1bba 100644 --- a/trunk/arch/x86/kernel/cpu/perf_event_amd_ibs.c +++ b/trunk/arch/x86/kernel/cpu/perf_event_amd_ibs.c @@ -13,6 +13,8 @@ #include +#include "perf_event.h" + static u32 ibs_caps; #if defined(CONFIG_PERF_EVENTS) && defined(CONFIG_CPU_SUP_AMD) @@ -207,6 +209,15 @@ static int perf_ibs_precise_event(struct perf_event *event, u64 *config) return -EOPNOTSUPP; } +static const struct perf_event_attr ibs_notsupp = { + .exclude_user = 1, + .exclude_kernel = 1, + .exclude_hv = 1, + .exclude_idle = 1, + .exclude_host = 1, + .exclude_guest = 1, +}; + static int perf_ibs_init(struct perf_event *event) { struct hw_perf_event *hwc = &event->hw; @@ -227,6 +238,9 @@ static int perf_ibs_init(struct perf_event *event) if (event->pmu != &perf_ibs->pmu) return -ENOENT; + if (perf_flags(&event->attr) & perf_flags(&ibs_notsupp)) + return -EINVAL; + if (config & ~perf_ibs->config_mask) return -EINVAL; @@ -536,7 +550,7 @@ static int perf_ibs_handle_irq(struct perf_ibs *perf_ibs, struct pt_regs *iregs) if (check_rip && (ibs_data.regs[2] & IBS_RIP_INVALID)) { regs.flags &= ~PERF_EFLAGS_EXACT; } else { - instruction_pointer_set(®s, ibs_data.regs[1]); + set_linear_ip(®s, ibs_data.regs[1]); regs.flags |= PERF_EFLAGS_EXACT; } diff --git a/trunk/arch/x86/kernel/cpu/perf_event_intel.c b/trunk/arch/x86/kernel/cpu/perf_event_intel.c index 382366977d4c..6bca492b8547 100644 --- a/trunk/arch/x86/kernel/cpu/perf_event_intel.c +++ b/trunk/arch/x86/kernel/cpu/perf_event_intel.c @@ -1522,8 +1522,16 @@ static struct perf_guest_switch_msr *intel_guest_get_msrs(int *nr) arr[0].msr = MSR_CORE_PERF_GLOBAL_CTRL; arr[0].host = x86_pmu.intel_ctrl & ~cpuc->intel_ctrl_guest_mask; arr[0].guest = x86_pmu.intel_ctrl & ~cpuc->intel_ctrl_host_mask; + /* + * If PMU counter has PEBS enabled it is not enough to disable counter + * on a guest entry since PEBS memory write can overshoot guest entry + * and corrupt guest memory. Disabling PEBS solves the problem. + */ + arr[1].msr = MSR_IA32_PEBS_ENABLE; + arr[1].host = cpuc->pebs_enabled; + arr[1].guest = 0; - *nr = 1; + *nr = 2; return arr; } @@ -2000,6 +2008,7 @@ __init int intel_pmu_init(void) break; case 28: /* Atom */ + case 54: /* Cedariew */ memcpy(hw_cache_event_ids, atom_hw_cache_event_ids, sizeof(hw_cache_event_ids)); @@ -2039,7 +2048,6 @@ __init int intel_pmu_init(void) case 42: /* SandyBridge */ case 45: /* SandyBridge, "Romely-EP" */ x86_add_quirk(intel_sandybridge_quirk); - case 58: /* IvyBridge */ memcpy(hw_cache_event_ids, snb_hw_cache_event_ids, sizeof(hw_cache_event_ids)); memcpy(hw_cache_extra_regs, snb_hw_cache_extra_regs, @@ -2064,6 +2072,29 @@ __init int intel_pmu_init(void) pr_cont("SandyBridge events, "); break; + case 58: /* IvyBridge */ + memcpy(hw_cache_event_ids, snb_hw_cache_event_ids, + sizeof(hw_cache_event_ids)); + memcpy(hw_cache_extra_regs, snb_hw_cache_extra_regs, + sizeof(hw_cache_extra_regs)); + + intel_pmu_lbr_init_snb(); + + x86_pmu.event_constraints = intel_snb_event_constraints; + x86_pmu.pebs_constraints = intel_ivb_pebs_event_constraints; + x86_pmu.pebs_aliases = intel_pebs_aliases_snb; + x86_pmu.extra_regs = intel_snb_extra_regs; + /* all extra regs are per-cpu when HT is on */ + x86_pmu.er_flags |= ERF_HAS_RSP_1; + x86_pmu.er_flags |= ERF_NO_HT_SHARING; + + /* UOPS_ISSUED.ANY,c=1,i=1 to count stall cycles */ + intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = + X86_CONFIG(.event=0x0e, .umask=0x01, .inv=1, .cmask=1); + + pr_cont("IvyBridge events, "); + break; + default: switch (x86_pmu.version) { diff --git a/trunk/arch/x86/kernel/cpu/perf_event_intel_ds.c b/trunk/arch/x86/kernel/cpu/perf_event_intel_ds.c index 629ae0b7ad90..826054a4f2ee 100644 --- a/trunk/arch/x86/kernel/cpu/perf_event_intel_ds.c +++ b/trunk/arch/x86/kernel/cpu/perf_event_intel_ds.c @@ -407,6 +407,20 @@ struct event_constraint intel_snb_pebs_event_constraints[] = { EVENT_CONSTRAINT_END }; +struct event_constraint intel_ivb_pebs_event_constraints[] = { + INTEL_UEVENT_CONSTRAINT(0x01c0, 0x2), /* INST_RETIRED.PRECDIST */ + INTEL_UEVENT_CONSTRAINT(0x01c2, 0xf), /* UOPS_RETIRED.ALL */ + INTEL_UEVENT_CONSTRAINT(0x02c2, 0xf), /* UOPS_RETIRED.RETIRE_SLOTS */ + INTEL_EVENT_CONSTRAINT(0xc4, 0xf), /* BR_INST_RETIRED.* */ + INTEL_EVENT_CONSTRAINT(0xc5, 0xf), /* BR_MISP_RETIRED.* */ + INTEL_EVENT_CONSTRAINT(0xcd, 0x8), /* MEM_TRANS_RETIRED.* */ + INTEL_EVENT_CONSTRAINT(0xd0, 0xf), /* MEM_UOP_RETIRED.* */ + INTEL_EVENT_CONSTRAINT(0xd1, 0xf), /* MEM_LOAD_UOPS_RETIRED.* */ + INTEL_EVENT_CONSTRAINT(0xd2, 0xf), /* MEM_LOAD_UOPS_LLC_HIT_RETIRED.* */ + INTEL_EVENT_CONSTRAINT(0xd3, 0xf), /* MEM_LOAD_UOPS_LLC_MISS_RETIRED.* */ + EVENT_CONSTRAINT_END +}; + struct event_constraint *intel_pebs_constraints(struct perf_event *event) { struct event_constraint *c; @@ -499,7 +513,7 @@ static int intel_pmu_pebs_fixup_ip(struct pt_regs *regs) * We sampled a branch insn, rewind using the LBR stack */ if (ip == to) { - regs->ip = from; + set_linear_ip(regs, from); return 1; } @@ -529,7 +543,7 @@ static int intel_pmu_pebs_fixup_ip(struct pt_regs *regs) } while (to < ip); if (to == ip) { - regs->ip = old_to; + set_linear_ip(regs, old_to); return 1; } @@ -569,7 +583,8 @@ static void __intel_pmu_pebs_event(struct perf_event *event, * A possible PERF_SAMPLE_REGS will have to transfer all regs. */ regs = *iregs; - regs.ip = pebs->ip; + regs.flags = pebs->flags; + set_linear_ip(®s, pebs->ip); regs.bp = pebs->bp; regs.sp = pebs->sp; diff --git a/trunk/arch/x86/kernel/cpu/perf_event_intel_lbr.c b/trunk/arch/x86/kernel/cpu/perf_event_intel_lbr.c index 520b4265fcd2..da02e9cc3754 100644 --- a/trunk/arch/x86/kernel/cpu/perf_event_intel_lbr.c +++ b/trunk/arch/x86/kernel/cpu/perf_event_intel_lbr.c @@ -686,7 +686,8 @@ void intel_pmu_lbr_init_atom(void) * to have an operational LBR which can freeze * on PMU interrupt */ - if (boot_cpu_data.x86_mask < 10) { + if (boot_cpu_data.x86_model == 28 + && boot_cpu_data.x86_mask < 10) { pr_cont("LBR disabled due to erratum"); return; } diff --git a/trunk/arch/x86/kernel/cpu/perf_event_intel_uncore.c b/trunk/arch/x86/kernel/cpu/perf_event_intel_uncore.c index 7563fda9f033..99d96a4978b5 100644 --- a/trunk/arch/x86/kernel/cpu/perf_event_intel_uncore.c +++ b/trunk/arch/x86/kernel/cpu/perf_event_intel_uncore.c @@ -661,6 +661,11 @@ static void snb_uncore_msr_init_box(struct intel_uncore_box *box) } } +static struct uncore_event_desc snb_uncore_events[] = { + INTEL_UNCORE_EVENT_DESC(clockticks, "event=0xff,umask=0x00"), + { /* end: all zeroes */ }, +}; + static struct attribute *snb_uncore_formats_attr[] = { &format_attr_event.attr, &format_attr_umask.attr, @@ -704,6 +709,7 @@ static struct intel_uncore_type snb_uncore_cbox = { .constraints = snb_uncore_cbox_constraints, .ops = &snb_uncore_msr_ops, .format_group = &snb_uncore_format_group, + .event_descs = snb_uncore_events, }; static struct intel_uncore_type *snb_msr_uncores[] = { @@ -796,7 +802,6 @@ static struct intel_uncore_type *nhm_msr_uncores[] = { DEFINE_UNCORE_FORMAT_ATTR(event5, event, "config:1-5"); DEFINE_UNCORE_FORMAT_ATTR(counter, counter, "config:6-7"); -DEFINE_UNCORE_FORMAT_ATTR(mm_cfg, mm_cfg, "config:63"); DEFINE_UNCORE_FORMAT_ATTR(match, match, "config1:0-63"); DEFINE_UNCORE_FORMAT_ATTR(mask, mask, "config2:0-63"); @@ -902,16 +907,21 @@ static struct attribute_group nhmex_uncore_cbox_format_group = { .attrs = nhmex_uncore_cbox_formats_attr, }; +/* msr offset for each instance of cbox */ +static unsigned nhmex_cbox_msr_offsets[] = { + 0x0, 0x80, 0x40, 0xc0, 0x20, 0xa0, 0x60, 0xe0, 0x240, 0x2c0, +}; + static struct intel_uncore_type nhmex_uncore_cbox = { .name = "cbox", .num_counters = 6, - .num_boxes = 8, + .num_boxes = 10, .perf_ctr_bits = 48, .event_ctl = NHMEX_C0_MSR_PMON_EV_SEL0, .perf_ctr = NHMEX_C0_MSR_PMON_CTR0, .event_mask = NHMEX_PMON_RAW_EVENT_MASK, .box_ctl = NHMEX_C0_MSR_PMON_GLOBAL_CTL, - .msr_offset = NHMEX_C_MSR_OFFSET, + .msr_offsets = nhmex_cbox_msr_offsets, .pair_ctr_ctl = 1, .ops = &nhmex_uncore_ops, .format_group = &nhmex_uncore_cbox_format_group @@ -1032,24 +1042,22 @@ static struct intel_uncore_type nhmex_uncore_bbox = { static int nhmex_sbox_hw_config(struct intel_uncore_box *box, struct perf_event *event) { - struct hw_perf_event_extra *reg1 = &event->hw.extra_reg; - struct hw_perf_event_extra *reg2 = &event->hw.branch_reg; + struct hw_perf_event *hwc = &event->hw; + struct hw_perf_event_extra *reg1 = &hwc->extra_reg; + struct hw_perf_event_extra *reg2 = &hwc->branch_reg; - if (event->attr.config & NHMEX_S_PMON_MM_CFG_EN) { - reg1->config = event->attr.config1; - reg2->config = event->attr.config2; - } else { - reg1->config = ~0ULL; - reg2->config = ~0ULL; - } + /* only TO_R_PROG_EV event uses the match/mask register */ + if ((hwc->config & NHMEX_PMON_CTL_EV_SEL_MASK) != + NHMEX_S_EVENT_TO_R_PROG_EV) + return 0; if (box->pmu->pmu_idx == 0) reg1->reg = NHMEX_S0_MSR_MM_CFG; else reg1->reg = NHMEX_S1_MSR_MM_CFG; - reg1->idx = 0; - + reg1->config = event->attr.config1; + reg2->config = event->attr.config2; return 0; } @@ -1059,8 +1067,8 @@ static void nhmex_sbox_msr_enable_event(struct intel_uncore_box *box, struct per struct hw_perf_event_extra *reg1 = &hwc->extra_reg; struct hw_perf_event_extra *reg2 = &hwc->branch_reg; - wrmsrl(reg1->reg, 0); - if (reg1->config != ~0ULL || reg2->config != ~0ULL) { + if (reg1->idx != EXTRA_REG_NONE) { + wrmsrl(reg1->reg, 0); wrmsrl(reg1->reg + 1, reg1->config); wrmsrl(reg1->reg + 2, reg2->config); wrmsrl(reg1->reg, NHMEX_S_PMON_MM_CFG_EN); @@ -1074,7 +1082,6 @@ static struct attribute *nhmex_uncore_sbox_formats_attr[] = { &format_attr_edge.attr, &format_attr_inv.attr, &format_attr_thresh8.attr, - &format_attr_mm_cfg.attr, &format_attr_match.attr, &format_attr_mask.attr, NULL, @@ -1142,6 +1149,9 @@ static struct extra_reg nhmex_uncore_mbox_extra_regs[] = { EVENT_EXTRA_END }; +/* Nehalem-EX or Westmere-EX ? */ +bool uncore_nhmex; + static bool nhmex_mbox_get_shared_reg(struct intel_uncore_box *box, int idx, u64 config) { struct intel_uncore_extra_reg *er; @@ -1171,18 +1181,29 @@ static bool nhmex_mbox_get_shared_reg(struct intel_uncore_box *box, int idx, u64 return false; /* mask of the shared fields */ - mask = NHMEX_M_PMON_ZDP_CTL_FVC_MASK; + if (uncore_nhmex) + mask = NHMEX_M_PMON_ZDP_CTL_FVC_MASK; + else + mask = WSMEX_M_PMON_ZDP_CTL_FVC_MASK; er = &box->shared_regs[EXTRA_REG_NHMEX_M_ZDP_CTL_FVC]; raw_spin_lock_irqsave(&er->lock, flags); /* add mask of the non-shared field if it's in use */ - if (__BITS_VALUE(atomic_read(&er->ref), idx, 8)) - mask |= NHMEX_M_PMON_ZDP_CTL_FVC_EVENT_MASK(idx); + if (__BITS_VALUE(atomic_read(&er->ref), idx, 8)) { + if (uncore_nhmex) + mask |= NHMEX_M_PMON_ZDP_CTL_FVC_EVENT_MASK(idx); + else + mask |= WSMEX_M_PMON_ZDP_CTL_FVC_EVENT_MASK(idx); + } if (!atomic_read(&er->ref) || !((er->config ^ config) & mask)) { atomic_add(1 << (idx * 8), &er->ref); - mask = NHMEX_M_PMON_ZDP_CTL_FVC_MASK | - NHMEX_M_PMON_ZDP_CTL_FVC_EVENT_MASK(idx); + if (uncore_nhmex) + mask = NHMEX_M_PMON_ZDP_CTL_FVC_MASK | + NHMEX_M_PMON_ZDP_CTL_FVC_EVENT_MASK(idx); + else + mask = WSMEX_M_PMON_ZDP_CTL_FVC_MASK | + WSMEX_M_PMON_ZDP_CTL_FVC_EVENT_MASK(idx); er->config &= ~mask; er->config |= (config & mask); ret = true; @@ -1216,7 +1237,10 @@ u64 nhmex_mbox_alter_er(struct perf_event *event, int new_idx, bool modify) /* get the non-shared control bits and shift them */ idx = orig_idx - EXTRA_REG_NHMEX_M_ZDP_CTL_FVC; - config &= NHMEX_M_PMON_ZDP_CTL_FVC_EVENT_MASK(idx); + if (uncore_nhmex) + config &= NHMEX_M_PMON_ZDP_CTL_FVC_EVENT_MASK(idx); + else + config &= WSMEX_M_PMON_ZDP_CTL_FVC_EVENT_MASK(idx); if (new_idx > orig_idx) { idx = new_idx - orig_idx; config <<= 3 * idx; @@ -1226,6 +1250,10 @@ u64 nhmex_mbox_alter_er(struct perf_event *event, int new_idx, bool modify) } /* add the shared control bits back */ + if (uncore_nhmex) + config |= NHMEX_M_PMON_ZDP_CTL_FVC_MASK & reg1->config; + else + config |= WSMEX_M_PMON_ZDP_CTL_FVC_MASK & reg1->config; config |= NHMEX_M_PMON_ZDP_CTL_FVC_MASK & reg1->config; if (modify) { /* adjust the main event selector */ @@ -1264,7 +1292,8 @@ nhmex_mbox_get_constraint(struct intel_uncore_box *box, struct perf_event *event } /* for the match/mask registers */ - if ((uncore_box_is_fake(box) || !reg2->alloc) && + if (reg2->idx != EXTRA_REG_NONE && + (uncore_box_is_fake(box) || !reg2->alloc) && !nhmex_mbox_get_shared_reg(box, reg2->idx, reg2->config)) goto fail; @@ -1278,7 +1307,8 @@ nhmex_mbox_get_constraint(struct intel_uncore_box *box, struct perf_event *event if (idx[0] != 0xff && idx[0] != __BITS_VALUE(reg1->idx, 0, 8)) nhmex_mbox_alter_er(event, idx[0], true); reg1->alloc |= alloc; - reg2->alloc = 1; + if (reg2->idx != EXTRA_REG_NONE) + reg2->alloc = 1; } return NULL; fail: @@ -1342,9 +1372,6 @@ static int nhmex_mbox_hw_config(struct intel_uncore_box *box, struct perf_event struct extra_reg *er; unsigned msr; int reg_idx = 0; - - if (WARN_ON_ONCE(reg1->idx != -1)) - return -EINVAL; /* * The mbox events may require 2 extra MSRs at the most. But only * the lower 32 bits in these MSRs are significant, so we can use @@ -1355,11 +1382,6 @@ static int nhmex_mbox_hw_config(struct intel_uncore_box *box, struct perf_event continue; if (event->attr.config1 & ~er->valid_mask) return -EINVAL; - if (er->idx == __BITS_VALUE(reg1->idx, 0, 8) || - er->idx == __BITS_VALUE(reg1->idx, 1, 8)) - continue; - if (WARN_ON_ONCE(reg_idx >= 2)) - return -EINVAL; msr = er->msr + type->msr_offset * box->pmu->pmu_idx; if (WARN_ON_ONCE(msr >= 0xffff || er->idx >= 0xff)) @@ -1368,6 +1390,8 @@ static int nhmex_mbox_hw_config(struct intel_uncore_box *box, struct perf_event /* always use the 32~63 bits to pass the PLD config */ if (er->idx == EXTRA_REG_NHMEX_M_PLD) reg_idx = 1; + else if (WARN_ON_ONCE(reg_idx > 0)) + return -EINVAL; reg1->idx &= ~(0xff << (reg_idx * 8)); reg1->reg &= ~(0xffff << (reg_idx * 16)); @@ -1376,17 +1400,21 @@ static int nhmex_mbox_hw_config(struct intel_uncore_box *box, struct perf_event reg1->config = event->attr.config1; reg_idx++; } - /* use config2 to pass the filter config */ - reg2->idx = EXTRA_REG_NHMEX_M_FILTER; - if (event->attr.config2 & NHMEX_M_PMON_MM_CFG_EN) - reg2->config = event->attr.config2; - else - reg2->config = ~0ULL; - if (box->pmu->pmu_idx == 0) - reg2->reg = NHMEX_M0_MSR_PMU_MM_CFG; - else - reg2->reg = NHMEX_M1_MSR_PMU_MM_CFG; - + /* + * The mbox only provides ability to perform address matching + * for the PLD events. + */ + if (reg_idx == 2) { + reg2->idx = EXTRA_REG_NHMEX_M_FILTER; + if (event->attr.config2 & NHMEX_M_PMON_MM_CFG_EN) + reg2->config = event->attr.config2; + else + reg2->config = ~0ULL; + if (box->pmu->pmu_idx == 0) + reg2->reg = NHMEX_M0_MSR_PMU_MM_CFG; + else + reg2->reg = NHMEX_M1_MSR_PMU_MM_CFG; + } return 0; } @@ -1422,34 +1450,36 @@ static void nhmex_mbox_msr_enable_event(struct intel_uncore_box *box, struct per wrmsrl(__BITS_VALUE(reg1->reg, 1, 16), nhmex_mbox_shared_reg_config(box, idx)); - wrmsrl(reg2->reg, 0); - if (reg2->config != ~0ULL) { - wrmsrl(reg2->reg + 1, - reg2->config & NHMEX_M_PMON_ADDR_MATCH_MASK); - wrmsrl(reg2->reg + 2, NHMEX_M_PMON_ADDR_MASK_MASK & - (reg2->config >> NHMEX_M_PMON_ADDR_MASK_SHIFT)); - wrmsrl(reg2->reg, NHMEX_M_PMON_MM_CFG_EN); + if (reg2->idx != EXTRA_REG_NONE) { + wrmsrl(reg2->reg, 0); + if (reg2->config != ~0ULL) { + wrmsrl(reg2->reg + 1, + reg2->config & NHMEX_M_PMON_ADDR_MATCH_MASK); + wrmsrl(reg2->reg + 2, NHMEX_M_PMON_ADDR_MASK_MASK & + (reg2->config >> NHMEX_M_PMON_ADDR_MASK_SHIFT)); + wrmsrl(reg2->reg, NHMEX_M_PMON_MM_CFG_EN); + } } wrmsrl(hwc->config_base, hwc->config | NHMEX_PMON_CTL_EN_BIT0); } -DEFINE_UNCORE_FORMAT_ATTR(count_mode, count_mode, "config:2-3"); -DEFINE_UNCORE_FORMAT_ATTR(storage_mode, storage_mode, "config:4-5"); -DEFINE_UNCORE_FORMAT_ATTR(wrap_mode, wrap_mode, "config:6"); -DEFINE_UNCORE_FORMAT_ATTR(flag_mode, flag_mode, "config:7"); -DEFINE_UNCORE_FORMAT_ATTR(inc_sel, inc_sel, "config:9-13"); -DEFINE_UNCORE_FORMAT_ATTR(set_flag_sel, set_flag_sel, "config:19-21"); -DEFINE_UNCORE_FORMAT_ATTR(filter_cfg, filter_cfg, "config2:63"); -DEFINE_UNCORE_FORMAT_ATTR(filter_match, filter_match, "config2:0-33"); -DEFINE_UNCORE_FORMAT_ATTR(filter_mask, filter_mask, "config2:34-61"); -DEFINE_UNCORE_FORMAT_ATTR(dsp, dsp, "config1:0-31"); -DEFINE_UNCORE_FORMAT_ATTR(thr, thr, "config1:0-31"); -DEFINE_UNCORE_FORMAT_ATTR(fvc, fvc, "config1:0-31"); -DEFINE_UNCORE_FORMAT_ATTR(pgt, pgt, "config1:0-31"); -DEFINE_UNCORE_FORMAT_ATTR(map, map, "config1:0-31"); -DEFINE_UNCORE_FORMAT_ATTR(iss, iss, "config1:0-31"); -DEFINE_UNCORE_FORMAT_ATTR(pld, pld, "config1:32-63"); +DEFINE_UNCORE_FORMAT_ATTR(count_mode, count_mode, "config:2-3"); +DEFINE_UNCORE_FORMAT_ATTR(storage_mode, storage_mode, "config:4-5"); +DEFINE_UNCORE_FORMAT_ATTR(wrap_mode, wrap_mode, "config:6"); +DEFINE_UNCORE_FORMAT_ATTR(flag_mode, flag_mode, "config:7"); +DEFINE_UNCORE_FORMAT_ATTR(inc_sel, inc_sel, "config:9-13"); +DEFINE_UNCORE_FORMAT_ATTR(set_flag_sel, set_flag_sel, "config:19-21"); +DEFINE_UNCORE_FORMAT_ATTR(filter_cfg_en, filter_cfg_en, "config2:63"); +DEFINE_UNCORE_FORMAT_ATTR(filter_match, filter_match, "config2:0-33"); +DEFINE_UNCORE_FORMAT_ATTR(filter_mask, filter_mask, "config2:34-61"); +DEFINE_UNCORE_FORMAT_ATTR(dsp, dsp, "config1:0-31"); +DEFINE_UNCORE_FORMAT_ATTR(thr, thr, "config1:0-31"); +DEFINE_UNCORE_FORMAT_ATTR(fvc, fvc, "config1:0-31"); +DEFINE_UNCORE_FORMAT_ATTR(pgt, pgt, "config1:0-31"); +DEFINE_UNCORE_FORMAT_ATTR(map, map, "config1:0-31"); +DEFINE_UNCORE_FORMAT_ATTR(iss, iss, "config1:0-31"); +DEFINE_UNCORE_FORMAT_ATTR(pld, pld, "config1:32-63"); static struct attribute *nhmex_uncore_mbox_formats_attr[] = { &format_attr_count_mode.attr, @@ -1458,7 +1488,7 @@ static struct attribute *nhmex_uncore_mbox_formats_attr[] = { &format_attr_flag_mode.attr, &format_attr_inc_sel.attr, &format_attr_set_flag_sel.attr, - &format_attr_filter_cfg.attr, + &format_attr_filter_cfg_en.attr, &format_attr_filter_match.attr, &format_attr_filter_mask.attr, &format_attr_dsp.attr, @@ -1482,6 +1512,12 @@ static struct uncore_event_desc nhmex_uncore_mbox_events[] = { { /* end: all zeroes */ }, }; +static struct uncore_event_desc wsmex_uncore_mbox_events[] = { + INTEL_UNCORE_EVENT_DESC(bbox_cmds_read, "inc_sel=0xd,fvc=0x5000"), + INTEL_UNCORE_EVENT_DESC(bbox_cmds_write, "inc_sel=0xd,fvc=0x5040"), + { /* end: all zeroes */ }, +}; + static struct intel_uncore_ops nhmex_uncore_mbox_ops = { NHMEX_UNCORE_OPS_COMMON_INIT(), .enable_event = nhmex_mbox_msr_enable_event, @@ -1513,7 +1549,7 @@ void nhmex_rbox_alter_er(struct intel_uncore_box *box, struct perf_event *event) struct hw_perf_event_extra *reg1 = &hwc->extra_reg; int port; - /* adjust the main event selector */ + /* adjust the main event selector and extra register index */ if (reg1->idx % 2) { reg1->idx--; hwc->config -= 1 << NHMEX_R_PMON_CTL_EV_SEL_SHIFT; @@ -1522,29 +1558,17 @@ void nhmex_rbox_alter_er(struct intel_uncore_box *box, struct perf_event *event) hwc->config += 1 << NHMEX_R_PMON_CTL_EV_SEL_SHIFT; } - /* adjust address or config of extra register */ + /* adjust extra register config */ port = reg1->idx / 6 + box->pmu->pmu_idx * 4; switch (reg1->idx % 6) { - case 0: - reg1->reg = NHMEX_R_MSR_PORTN_IPERF_CFG0(port); - break; - case 1: - reg1->reg = NHMEX_R_MSR_PORTN_IPERF_CFG1(port); - break; case 2: - /* the 8~15 bits to the 0~7 bits */ + /* shift the 8~15 bits to the 0~7 bits */ reg1->config >>= 8; break; case 3: - /* the 0~7 bits to the 8~15 bits */ + /* shift the 0~7 bits to the 8~15 bits */ reg1->config <<= 8; break; - case 4: - reg1->reg = NHMEX_R_MSR_PORTN_XBR_SET1_MM_CFG(port); - break; - case 5: - reg1->reg = NHMEX_R_MSR_PORTN_XBR_SET2_MM_CFG(port); - break; }; } @@ -1671,7 +1695,7 @@ static int nhmex_rbox_hw_config(struct intel_uncore_box *box, struct perf_event struct hw_perf_event *hwc = &event->hw; struct hw_perf_event_extra *reg1 = &event->hw.extra_reg; struct hw_perf_event_extra *reg2 = &event->hw.branch_reg; - int port, idx; + int idx; idx = (event->hw.config & NHMEX_R_PMON_CTL_EV_SEL_MASK) >> NHMEX_R_PMON_CTL_EV_SEL_SHIFT; @@ -1681,27 +1705,11 @@ static int nhmex_rbox_hw_config(struct intel_uncore_box *box, struct perf_event reg1->idx = idx; reg1->config = event->attr.config1; - port = idx / 6 + box->pmu->pmu_idx * 4; - idx %= 6; - switch (idx) { - case 0: - reg1->reg = NHMEX_R_MSR_PORTN_IPERF_CFG0(port); - break; - case 1: - reg1->reg = NHMEX_R_MSR_PORTN_IPERF_CFG1(port); - break; - case 2: - case 3: - reg1->reg = NHMEX_R_MSR_PORTN_QLX_CFG(port); - break; + switch (idx % 6) { case 4: case 5: - if (idx == 4) - reg1->reg = NHMEX_R_MSR_PORTN_XBR_SET1_MM_CFG(port); - else - reg1->reg = NHMEX_R_MSR_PORTN_XBR_SET2_MM_CFG(port); - reg2->config = event->attr.config2; hwc->config |= event->attr.config & (~0ULL << 32); + reg2->config = event->attr.config2; break; }; return 0; @@ -1727,28 +1735,34 @@ static void nhmex_rbox_msr_enable_event(struct intel_uncore_box *box, struct per struct hw_perf_event *hwc = &event->hw; struct hw_perf_event_extra *reg1 = &hwc->extra_reg; struct hw_perf_event_extra *reg2 = &hwc->branch_reg; - int idx, er_idx; + int idx, port; - idx = reg1->idx % 6; - er_idx = idx; - if (er_idx > 2) - er_idx--; - er_idx += (reg1->idx / 6) * 5; + idx = reg1->idx; + port = idx / 6 + box->pmu->pmu_idx * 4; - switch (idx) { + switch (idx % 6) { case 0: + wrmsrl(NHMEX_R_MSR_PORTN_IPERF_CFG0(port), reg1->config); + break; case 1: - wrmsrl(reg1->reg, reg1->config); + wrmsrl(NHMEX_R_MSR_PORTN_IPERF_CFG1(port), reg1->config); break; case 2: case 3: - wrmsrl(reg1->reg, nhmex_rbox_shared_reg_config(box, er_idx)); + wrmsrl(NHMEX_R_MSR_PORTN_QLX_CFG(port), + nhmex_rbox_shared_reg_config(box, 2 + (idx / 6) * 5)); break; case 4: + wrmsrl(NHMEX_R_MSR_PORTN_XBR_SET1_MM_CFG(port), + hwc->config >> 32); + wrmsrl(NHMEX_R_MSR_PORTN_XBR_SET1_MATCH(port), reg1->config); + wrmsrl(NHMEX_R_MSR_PORTN_XBR_SET1_MASK(port), reg2->config); + break; case 5: - wrmsrl(reg1->reg, reg1->config); - wrmsrl(reg1->reg + 1, hwc->config >> 32); - wrmsrl(reg1->reg + 2, reg2->config); + wrmsrl(NHMEX_R_MSR_PORTN_XBR_SET2_MM_CFG(port), + hwc->config >> 32); + wrmsrl(NHMEX_R_MSR_PORTN_XBR_SET2_MATCH(port), reg1->config); + wrmsrl(NHMEX_R_MSR_PORTN_XBR_SET2_MASK(port), reg2->config); break; }; @@ -1756,8 +1770,8 @@ static void nhmex_rbox_msr_enable_event(struct intel_uncore_box *box, struct per (hwc->config & NHMEX_R_PMON_CTL_EV_SEL_MASK)); } -DEFINE_UNCORE_FORMAT_ATTR(xbr_match, xbr_match, "config:32-63"); -DEFINE_UNCORE_FORMAT_ATTR(xbr_mm_cfg, xbr_mm_cfg, "config1:0-63"); +DEFINE_UNCORE_FORMAT_ATTR(xbr_mm_cfg, xbr_mm_cfg, "config:32-63"); +DEFINE_UNCORE_FORMAT_ATTR(xbr_match, xbr_match, "config1:0-63"); DEFINE_UNCORE_FORMAT_ATTR(xbr_mask, xbr_mask, "config2:0-63"); DEFINE_UNCORE_FORMAT_ATTR(qlx_cfg, qlx_cfg, "config1:0-15"); DEFINE_UNCORE_FORMAT_ATTR(iperf_cfg, iperf_cfg, "config1:0-31"); @@ -1936,7 +1950,7 @@ struct intel_uncore_box *uncore_alloc_box(struct intel_uncore_type *type, int cp static struct intel_uncore_box * uncore_pmu_to_box(struct intel_uncore_pmu *pmu, int cpu) { - static struct intel_uncore_box *box; + struct intel_uncore_box *box; box = *per_cpu_ptr(pmu->box, cpu); if (box) @@ -2303,6 +2317,7 @@ int uncore_pmu_event_init(struct perf_event *event) event->hw.idx = -1; event->hw.last_tag = ~0ULL; event->hw.extra_reg.idx = EXTRA_REG_NONE; + event->hw.branch_reg.idx = EXTRA_REG_NONE; if (event->attr.config == UNCORE_FIXED_EVENT) { /* no fixed counter */ @@ -2332,6 +2347,27 @@ int uncore_pmu_event_init(struct perf_event *event) return ret; } +static ssize_t uncore_get_attr_cpumask(struct device *dev, + struct device_attribute *attr, char *buf) +{ + int n = cpulist_scnprintf(buf, PAGE_SIZE - 2, &uncore_cpu_mask); + + buf[n++] = '\n'; + buf[n] = '\0'; + return n; +} + +static DEVICE_ATTR(cpumask, S_IRUGO, uncore_get_attr_cpumask, NULL); + +static struct attribute *uncore_pmu_attrs[] = { + &dev_attr_cpumask.attr, + NULL, +}; + +static struct attribute_group uncore_pmu_attr_group = { + .attrs = uncore_pmu_attrs, +}; + static int __init uncore_pmu_register(struct intel_uncore_pmu *pmu) { int ret; @@ -2369,11 +2405,11 @@ static void __init uncore_type_exit(struct intel_uncore_type *type) free_percpu(type->pmus[i].box); kfree(type->pmus); type->pmus = NULL; - kfree(type->attr_groups[1]); - type->attr_groups[1] = NULL; + kfree(type->events_group); + type->events_group = NULL; } -static void uncore_types_exit(struct intel_uncore_type **types) +static void __init uncore_types_exit(struct intel_uncore_type **types) { int i; for (i = 0; types[i]; i++) @@ -2422,9 +2458,10 @@ static int __init uncore_type_init(struct intel_uncore_type *type) for (j = 0; j < i; j++) attrs[j] = &type->event_descs[j].attr.attr; - type->attr_groups[1] = events_group; + type->events_group = events_group; } + type->pmu_group = &uncore_pmu_attr_group; type->pmus = pmus; return 0; fail: @@ -2814,7 +2851,13 @@ static int __init uncore_cpu_init(void) snbep_uncore_cbox.num_boxes = max_cores; msr_uncores = snbep_msr_uncores; break; - case 46: + case 46: /* Nehalem-EX */ + uncore_nhmex = true; + case 47: /* Westmere-EX aka. Xeon E7 */ + if (!uncore_nhmex) + nhmex_uncore_mbox.event_descs = wsmex_uncore_mbox_events; + if (nhmex_uncore_cbox.num_boxes > max_cores) + nhmex_uncore_cbox.num_boxes = max_cores; msr_uncores = nhmex_msr_uncores; break; default: diff --git a/trunk/arch/x86/kernel/cpu/perf_event_intel_uncore.h b/trunk/arch/x86/kernel/cpu/perf_event_intel_uncore.h index f3851892e077..e68a4550e952 100644 --- a/trunk/arch/x86/kernel/cpu/perf_event_intel_uncore.h +++ b/trunk/arch/x86/kernel/cpu/perf_event_intel_uncore.h @@ -5,7 +5,7 @@ #include "perf_event.h" #define UNCORE_PMU_NAME_LEN 32 -#define UNCORE_PMU_HRTIMER_INTERVAL (60 * NSEC_PER_SEC) +#define UNCORE_PMU_HRTIMER_INTERVAL (60LL * NSEC_PER_SEC) #define UNCORE_FIXED_EVENT 0xff #define UNCORE_PMC_IDX_MAX_GENERIC 8 @@ -230,6 +230,7 @@ #define NHMEX_S1_MSR_MASK 0xe5a #define NHMEX_S_PMON_MM_CFG_EN (0x1ULL << 63) +#define NHMEX_S_EVENT_TO_R_PROG_EV 0 /* NHM-EX Mbox */ #define NHMEX_M0_MSR_GLOBAL_CTL 0xca0 @@ -275,18 +276,12 @@ NHMEX_M_PMON_CTL_INC_SEL_MASK | \ NHMEX_M_PMON_CTL_SET_FLAG_SEL_MASK) - -#define NHMEX_M_PMON_ZDP_CTL_FVC_FVID_MASK 0x1f -#define NHMEX_M_PMON_ZDP_CTL_FVC_BCMD_MASK (0x7 << 5) -#define NHMEX_M_PMON_ZDP_CTL_FVC_RSP_MASK (0x7 << 8) -#define NHMEX_M_PMON_ZDP_CTL_FVC_PBOX_INIT_ERR (1 << 23) -#define NHMEX_M_PMON_ZDP_CTL_FVC_MASK \ - (NHMEX_M_PMON_ZDP_CTL_FVC_FVID_MASK | \ - NHMEX_M_PMON_ZDP_CTL_FVC_BCMD_MASK | \ - NHMEX_M_PMON_ZDP_CTL_FVC_RSP_MASK | \ - NHMEX_M_PMON_ZDP_CTL_FVC_PBOX_INIT_ERR) +#define NHMEX_M_PMON_ZDP_CTL_FVC_MASK (((1 << 11) - 1) | (1 << 23)) #define NHMEX_M_PMON_ZDP_CTL_FVC_EVENT_MASK(n) (0x7 << (11 + 3 * (n))) +#define WSMEX_M_PMON_ZDP_CTL_FVC_MASK (((1 << 12) - 1) | (1 << 24)) +#define WSMEX_M_PMON_ZDP_CTL_FVC_EVENT_MASK(n) (0x7 << (12 + 3 * (n))) + /* * use the 9~13 bits to select event If the 7th bit is not set, * otherwise use the 19~21 bits to select event. @@ -368,15 +363,18 @@ struct intel_uncore_type { unsigned num_shared_regs:8; unsigned single_fixed:1; unsigned pair_ctr_ctl:1; + unsigned *msr_offsets; struct event_constraint unconstrainted; struct event_constraint *constraints; struct intel_uncore_pmu *pmus; struct intel_uncore_ops *ops; struct uncore_event_desc *event_descs; - const struct attribute_group *attr_groups[3]; + const struct attribute_group *attr_groups[4]; }; -#define format_group attr_groups[0] +#define pmu_group attr_groups[0] +#define format_group attr_groups[1] +#define events_group attr_groups[2] struct intel_uncore_ops { void (*init_box)(struct intel_uncore_box *); @@ -485,29 +483,31 @@ unsigned uncore_pci_perf_ctr(struct intel_uncore_box *box, int idx) return idx * 8 + box->pmu->type->perf_ctr; } -static inline -unsigned uncore_msr_box_ctl(struct intel_uncore_box *box) +static inline unsigned uncore_msr_box_offset(struct intel_uncore_box *box) +{ + struct intel_uncore_pmu *pmu = box->pmu; + return pmu->type->msr_offsets ? + pmu->type->msr_offsets[pmu->pmu_idx] : + pmu->type->msr_offset * pmu->pmu_idx; +} + +static inline unsigned uncore_msr_box_ctl(struct intel_uncore_box *box) { if (!box->pmu->type->box_ctl) return 0; - return box->pmu->type->box_ctl + - box->pmu->type->msr_offset * box->pmu->pmu_idx; + return box->pmu->type->box_ctl + uncore_msr_box_offset(box); } -static inline -unsigned uncore_msr_fixed_ctl(struct intel_uncore_box *box) +static inline unsigned uncore_msr_fixed_ctl(struct intel_uncore_box *box) { if (!box->pmu->type->fixed_ctl) return 0; - return box->pmu->type->fixed_ctl + - box->pmu->type->msr_offset * box->pmu->pmu_idx; + return box->pmu->type->fixed_ctl + uncore_msr_box_offset(box); } -static inline -unsigned uncore_msr_fixed_ctr(struct intel_uncore_box *box) +static inline unsigned uncore_msr_fixed_ctr(struct intel_uncore_box *box) { - return box->pmu->type->fixed_ctr + - box->pmu->type->msr_offset * box->pmu->pmu_idx; + return box->pmu->type->fixed_ctr + uncore_msr_box_offset(box); } static inline @@ -515,7 +515,7 @@ unsigned uncore_msr_event_ctl(struct intel_uncore_box *box, int idx) { return box->pmu->type->event_ctl + (box->pmu->type->pair_ctr_ctl ? 2 * idx : idx) + - box->pmu->type->msr_offset * box->pmu->pmu_idx; + uncore_msr_box_offset(box); } static inline @@ -523,7 +523,7 @@ unsigned uncore_msr_perf_ctr(struct intel_uncore_box *box, int idx) { return box->pmu->type->perf_ctr + (box->pmu->type->pair_ctr_ctl ? 2 * idx : idx) + - box->pmu->type->msr_offset * box->pmu->pmu_idx; + uncore_msr_box_offset(box); } static inline diff --git a/trunk/arch/x86/kernel/cpu/proc.c b/trunk/arch/x86/kernel/cpu/proc.c index 8022c6681485..fbd895562292 100644 --- a/trunk/arch/x86/kernel/cpu/proc.c +++ b/trunk/arch/x86/kernel/cpu/proc.c @@ -140,10 +140,7 @@ static int show_cpuinfo(struct seq_file *m, void *v) static void *c_start(struct seq_file *m, loff_t *pos) { - if (*pos == 0) /* just in case, cpu 0 is not the first */ - *pos = cpumask_first(cpu_online_mask); - else - *pos = cpumask_next(*pos - 1, cpu_online_mask); + *pos = cpumask_next(*pos - 1, cpu_online_mask); if ((*pos) < nr_cpu_ids) return &cpu_data(*pos); return NULL; diff --git a/trunk/arch/x86/kernel/cpuid.c b/trunk/arch/x86/kernel/cpuid.c index 39472dd2323f..60c78917190c 100644 --- a/trunk/arch/x86/kernel/cpuid.c +++ b/trunk/arch/x86/kernel/cpuid.c @@ -199,12 +199,14 @@ static int __init cpuid_init(void) goto out_chrdev; } cpuid_class->devnode = cpuid_devnode; + get_online_cpus(); for_each_online_cpu(i) { err = cpuid_device_create(i); if (err != 0) goto out_class; } register_hotcpu_notifier(&cpuid_class_cpu_notifier); + put_online_cpus(); err = 0; goto out; @@ -214,6 +216,7 @@ static int __init cpuid_init(void) for_each_online_cpu(i) { cpuid_device_destroy(i); } + put_online_cpus(); class_destroy(cpuid_class); out_chrdev: __unregister_chrdev(CPUID_MAJOR, 0, NR_CPUS, "cpu/cpuid"); @@ -225,11 +228,13 @@ static void __exit cpuid_exit(void) { int cpu = 0; + get_online_cpus(); for_each_online_cpu(cpu) cpuid_device_destroy(cpu); class_destroy(cpuid_class); __unregister_chrdev(CPUID_MAJOR, 0, NR_CPUS, "cpu/cpuid"); unregister_hotcpu_notifier(&cpuid_class_cpu_notifier); + put_online_cpus(); } module_init(cpuid_init); diff --git a/trunk/arch/x86/kernel/devicetree.c b/trunk/arch/x86/kernel/devicetree.c index 3ae2ced4a874..b1581527a236 100644 --- a/trunk/arch/x86/kernel/devicetree.c +++ b/trunk/arch/x86/kernel/devicetree.c @@ -342,6 +342,47 @@ const struct irq_domain_ops ioapic_irq_domain_ops = { .xlate = ioapic_xlate, }; +static void dt_add_ioapic_domain(unsigned int ioapic_num, + struct device_node *np) +{ + struct irq_domain *id; + struct mp_ioapic_gsi *gsi_cfg; + int ret; + int num; + + gsi_cfg = mp_ioapic_gsi_routing(ioapic_num); + num = gsi_cfg->gsi_end - gsi_cfg->gsi_base + 1; + + id = irq_domain_add_linear(np, num, &ioapic_irq_domain_ops, + (void *)ioapic_num); + BUG_ON(!id); + if (gsi_cfg->gsi_base == 0) { + /* + * The first NR_IRQS_LEGACY irq descs are allocated in + * early_irq_init() and need just a mapping. The + * remaining irqs need both. All of them are preallocated + * and assigned so we can keep the 1:1 mapping which the ioapic + * is having. + */ + ret = irq_domain_associate_many(id, 0, 0, NR_IRQS_LEGACY); + if (ret) + pr_err("Error mapping legacy IRQs: %d\n", ret); + + if (num > NR_IRQS_LEGACY) { + ret = irq_create_strict_mappings(id, NR_IRQS_LEGACY, + NR_IRQS_LEGACY, num - NR_IRQS_LEGACY); + if (ret) + pr_err("Error creating mapping for the " + "remaining IRQs: %d\n", ret); + } + irq_set_default_host(id); + } else { + ret = irq_create_strict_mappings(id, gsi_cfg->gsi_base, 0, num); + if (ret) + pr_err("Error creating IRQ mapping: %d\n", ret); + } +} + static void __init ioapic_add_ofnode(struct device_node *np) { struct resource r; @@ -356,15 +397,7 @@ static void __init ioapic_add_ofnode(struct device_node *np) for (i = 0; i < nr_ioapics; i++) { if (r.start == mpc_ioapic_addr(i)) { - struct irq_domain *id; - struct mp_ioapic_gsi *gsi_cfg; - - gsi_cfg = mp_ioapic_gsi_routing(i); - - id = irq_domain_add_legacy(np, 32, gsi_cfg->gsi_base, 0, - &ioapic_irq_domain_ops, - (void*)i); - BUG_ON(!id); + dt_add_ioapic_domain(i, np); return; } } diff --git a/trunk/arch/x86/kernel/entry_32.S b/trunk/arch/x86/kernel/entry_32.S index 623f28837476..f438a44bf8f9 100644 --- a/trunk/arch/x86/kernel/entry_32.S +++ b/trunk/arch/x86/kernel/entry_32.S @@ -1109,17 +1109,21 @@ ENTRY(ftrace_caller) pushl %eax pushl %ecx pushl %edx - movl 0xc(%esp), %eax + pushl $0 /* Pass NULL as regs pointer */ + movl 4*4(%esp), %eax movl 0x4(%ebp), %edx + leal function_trace_op, %ecx subl $MCOUNT_INSN_SIZE, %eax .globl ftrace_call ftrace_call: call ftrace_stub + addl $4,%esp /* skip NULL pointer */ popl %edx popl %ecx popl %eax +ftrace_ret: #ifdef CONFIG_FUNCTION_GRAPH_TRACER .globl ftrace_graph_call ftrace_graph_call: @@ -1131,6 +1135,71 @@ ftrace_stub: ret END(ftrace_caller) +ENTRY(ftrace_regs_caller) + pushf /* push flags before compare (in cs location) */ + cmpl $0, function_trace_stop + jne ftrace_restore_flags + + /* + * i386 does not save SS and ESP when coming from kernel. + * Instead, to get sp, ®s->sp is used (see ptrace.h). + * Unfortunately, that means eflags must be at the same location + * as the current return ip is. We move the return ip into the + * ip location, and move flags into the return ip location. + */ + pushl 4(%esp) /* save return ip into ip slot */ + + pushl $0 /* Load 0 into orig_ax */ + pushl %gs + pushl %fs + pushl %es + pushl %ds + pushl %eax + pushl %ebp + pushl %edi + pushl %esi + pushl %edx + pushl %ecx + pushl %ebx + + movl 13*4(%esp), %eax /* Get the saved flags */ + movl %eax, 14*4(%esp) /* Move saved flags into regs->flags location */ + /* clobbering return ip */ + movl $__KERNEL_CS,13*4(%esp) + + movl 12*4(%esp), %eax /* Load ip (1st parameter) */ + subl $MCOUNT_INSN_SIZE, %eax /* Adjust ip */ + movl 0x4(%ebp), %edx /* Load parent ip (2nd parameter) */ + leal function_trace_op, %ecx /* Save ftrace_pos in 3rd parameter */ + pushl %esp /* Save pt_regs as 4th parameter */ + +GLOBAL(ftrace_regs_call) + call ftrace_stub + + addl $4, %esp /* Skip pt_regs */ + movl 14*4(%esp), %eax /* Move flags back into cs */ + movl %eax, 13*4(%esp) /* Needed to keep addl from modifying flags */ + movl 12*4(%esp), %eax /* Get return ip from regs->ip */ + movl %eax, 14*4(%esp) /* Put return ip back for ret */ + + popl %ebx + popl %ecx + popl %edx + popl %esi + popl %edi + popl %ebp + popl %eax + popl %ds + popl %es + popl %fs + popl %gs + addl $8, %esp /* Skip orig_ax and ip */ + popf /* Pop flags at end (no addl to corrupt flags) */ + jmp ftrace_ret + +ftrace_restore_flags: + popf + jmp ftrace_stub #else /* ! CONFIG_DYNAMIC_FTRACE */ ENTRY(mcount) @@ -1171,9 +1240,6 @@ END(mcount) #ifdef CONFIG_FUNCTION_GRAPH_TRACER ENTRY(ftrace_graph_caller) - cmpl $0, function_trace_stop - jne ftrace_stub - pushl %eax pushl %ecx pushl %edx diff --git a/trunk/arch/x86/kernel/entry_64.S b/trunk/arch/x86/kernel/entry_64.S index 69babd8c834f..066334be7b74 100644 --- a/trunk/arch/x86/kernel/entry_64.S +++ b/trunk/arch/x86/kernel/entry_64.S @@ -56,6 +56,7 @@ #include #include #include +#include #include /* Avoid __ASSEMBLER__'ifying just for this. */ @@ -68,25 +69,51 @@ .section .entry.text, "ax" #ifdef CONFIG_FUNCTION_TRACER + +#ifdef CC_USING_FENTRY +# define function_hook __fentry__ +#else +# define function_hook mcount +#endif + #ifdef CONFIG_DYNAMIC_FTRACE -ENTRY(mcount) + +ENTRY(function_hook) retq -END(mcount) +END(function_hook) + +/* skip is set if stack has been adjusted */ +.macro ftrace_caller_setup skip=0 + MCOUNT_SAVE_FRAME \skip + + /* Load the ftrace_ops into the 3rd parameter */ + leaq function_trace_op, %rdx + + /* Load ip into the first parameter */ + movq RIP(%rsp), %rdi + subq $MCOUNT_INSN_SIZE, %rdi + /* Load the parent_ip into the second parameter */ +#ifdef CC_USING_FENTRY + movq SS+16(%rsp), %rsi +#else + movq 8(%rbp), %rsi +#endif +.endm ENTRY(ftrace_caller) + /* Check if tracing was disabled (quick check) */ cmpl $0, function_trace_stop jne ftrace_stub - MCOUNT_SAVE_FRAME - - movq 0x38(%rsp), %rdi - movq 8(%rbp), %rsi - subq $MCOUNT_INSN_SIZE, %rdi + ftrace_caller_setup + /* regs go into 4th parameter (but make it NULL) */ + movq $0, %rcx GLOBAL(ftrace_call) call ftrace_stub MCOUNT_RESTORE_FRAME +ftrace_return: #ifdef CONFIG_FUNCTION_GRAPH_TRACER GLOBAL(ftrace_graph_call) @@ -97,8 +124,78 @@ GLOBAL(ftrace_stub) retq END(ftrace_caller) +ENTRY(ftrace_regs_caller) + /* Save the current flags before compare (in SS location)*/ + pushfq + + /* Check if tracing was disabled (quick check) */ + cmpl $0, function_trace_stop + jne ftrace_restore_flags + + /* skip=8 to skip flags saved in SS */ + ftrace_caller_setup 8 + + /* Save the rest of pt_regs */ + movq %r15, R15(%rsp) + movq %r14, R14(%rsp) + movq %r13, R13(%rsp) + movq %r12, R12(%rsp) + movq %r11, R11(%rsp) + movq %r10, R10(%rsp) + movq %rbp, RBP(%rsp) + movq %rbx, RBX(%rsp) + /* Copy saved flags */ + movq SS(%rsp), %rcx + movq %rcx, EFLAGS(%rsp) + /* Kernel segments */ + movq $__KERNEL_DS, %rcx + movq %rcx, SS(%rsp) + movq $__KERNEL_CS, %rcx + movq %rcx, CS(%rsp) + /* Stack - skipping return address */ + leaq SS+16(%rsp), %rcx + movq %rcx, RSP(%rsp) + + /* regs go into 4th parameter */ + leaq (%rsp), %rcx + +GLOBAL(ftrace_regs_call) + call ftrace_stub + + /* Copy flags back to SS, to restore them */ + movq EFLAGS(%rsp), %rax + movq %rax, SS(%rsp) + + /* Handlers can change the RIP */ + movq RIP(%rsp), %rax + movq %rax, SS+8(%rsp) + + /* restore the rest of pt_regs */ + movq R15(%rsp), %r15 + movq R14(%rsp), %r14 + movq R13(%rsp), %r13 + movq R12(%rsp), %r12 + movq R10(%rsp), %r10 + movq RBP(%rsp), %rbp + movq RBX(%rsp), %rbx + + /* skip=8 to skip flags saved in SS */ + MCOUNT_RESTORE_FRAME 8 + + /* Restore flags */ + popfq + + jmp ftrace_return +ftrace_restore_flags: + popfq + jmp ftrace_stub + +END(ftrace_regs_caller) + + #else /* ! CONFIG_DYNAMIC_FTRACE */ -ENTRY(mcount) + +ENTRY(function_hook) cmpl $0, function_trace_stop jne ftrace_stub @@ -119,8 +216,12 @@ GLOBAL(ftrace_stub) trace: MCOUNT_SAVE_FRAME - movq 0x38(%rsp), %rdi + movq RIP(%rsp), %rdi +#ifdef CC_USING_FENTRY + movq SS+16(%rsp), %rsi +#else movq 8(%rbp), %rsi +#endif subq $MCOUNT_INSN_SIZE, %rdi call *ftrace_trace_function @@ -128,20 +229,22 @@ trace: MCOUNT_RESTORE_FRAME jmp ftrace_stub -END(mcount) +END(function_hook) #endif /* CONFIG_DYNAMIC_FTRACE */ #endif /* CONFIG_FUNCTION_TRACER */ #ifdef CONFIG_FUNCTION_GRAPH_TRACER ENTRY(ftrace_graph_caller) - cmpl $0, function_trace_stop - jne ftrace_stub - MCOUNT_SAVE_FRAME +#ifdef CC_USING_FENTRY + leaq SS+16(%rsp), %rdi + movq $0, %rdx /* No framepointers needed */ +#else leaq 8(%rbp), %rdi - movq 0x38(%rsp), %rsi movq (%rbp), %rdx +#endif + movq RIP(%rsp), %rsi subq $MCOUNT_INSN_SIZE, %rsi call prepare_ftrace_return @@ -342,15 +445,15 @@ ENDPROC(native_usergs_sysret64) .macro SAVE_ARGS_IRQ cld /* start from rbp in pt_regs and jump over */ - movq_cfi rdi, RDI-RBP - movq_cfi rsi, RSI-RBP - movq_cfi rdx, RDX-RBP - movq_cfi rcx, RCX-RBP - movq_cfi rax, RAX-RBP - movq_cfi r8, R8-RBP - movq_cfi r9, R9-RBP - movq_cfi r10, R10-RBP - movq_cfi r11, R11-RBP + movq_cfi rdi, (RDI-RBP) + movq_cfi rsi, (RSI-RBP) + movq_cfi rdx, (RDX-RBP) + movq_cfi rcx, (RCX-RBP) + movq_cfi rax, (RAX-RBP) + movq_cfi r8, (R8-RBP) + movq_cfi r9, (R9-RBP) + movq_cfi r10, (R10-RBP) + movq_cfi r11, (R11-RBP) /* Save rbp so that we can unwind from get_irq_regs() */ movq_cfi rbp, 0 @@ -384,7 +487,7 @@ ENDPROC(native_usergs_sysret64) .endm ENTRY(save_rest) - PARTIAL_FRAME 1 REST_SKIP+8 + PARTIAL_FRAME 1 (REST_SKIP+8) movq 5*8+16(%rsp), %r11 /* save return address */ movq_cfi rbx, RBX+16 movq_cfi rbp, RBP+16 @@ -440,7 +543,7 @@ ENTRY(ret_from_fork) LOCK ; btr $TIF_FORK,TI_flags(%r8) - pushq_cfi kernel_eflags(%rip) + pushq_cfi $0x0002 popfq_cfi # reset kernel eflags call schedule_tail # rdi: 'prev' task parameter @@ -565,7 +668,7 @@ sysret_careful: TRACE_IRQS_ON ENABLE_INTERRUPTS(CLBR_NONE) pushq_cfi %rdi - call schedule + SCHEDULE_USER popq_cfi %rdi jmp sysret_check @@ -678,7 +781,7 @@ int_careful: TRACE_IRQS_ON ENABLE_INTERRUPTS(CLBR_NONE) pushq_cfi %rdi - call schedule + SCHEDULE_USER popq_cfi %rdi DISABLE_INTERRUPTS(CLBR_NONE) TRACE_IRQS_OFF @@ -974,7 +1077,7 @@ retint_careful: TRACE_IRQS_ON ENABLE_INTERRUPTS(CLBR_NONE) pushq_cfi %rdi - call schedule + SCHEDULE_USER popq_cfi %rdi GET_THREAD_INFO(%rcx) DISABLE_INTERRUPTS(CLBR_NONE) @@ -1449,7 +1552,7 @@ paranoid_userspace: paranoid_schedule: TRACE_IRQS_ON ENABLE_INTERRUPTS(CLBR_ANY) - call schedule + SCHEDULE_USER DISABLE_INTERRUPTS(CLBR_ANY) TRACE_IRQS_OFF jmp paranoid_userspace diff --git a/trunk/arch/x86/kernel/ftrace.c b/trunk/arch/x86/kernel/ftrace.c index c3a7cb4bf6e6..1d414029f1d8 100644 --- a/trunk/arch/x86/kernel/ftrace.c +++ b/trunk/arch/x86/kernel/ftrace.c @@ -206,6 +206,21 @@ static int ftrace_modify_code(unsigned long ip, unsigned const char *old_code, unsigned const char *new_code); +/* + * Should never be called: + * As it is only called by __ftrace_replace_code() which is called by + * ftrace_replace_code() that x86 overrides, and by ftrace_update_code() + * which is called to turn mcount into nops or nops into function calls + * but not to convert a function from not using regs to one that uses + * regs, which ftrace_modify_call() is for. + */ +int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr, + unsigned long addr) +{ + WARN_ON(1); + return -EINVAL; +} + int ftrace_update_ftrace_func(ftrace_func_t func) { unsigned long ip = (unsigned long)(&ftrace_call); @@ -220,6 +235,14 @@ int ftrace_update_ftrace_func(ftrace_func_t func) ret = ftrace_modify_code(ip, old, new); + /* Also update the regs callback function */ + if (!ret) { + ip = (unsigned long)(&ftrace_regs_call); + memcpy(old, &ftrace_regs_call, MCOUNT_INSN_SIZE); + new = ftrace_call_replace(ip, (unsigned long)func); + ret = ftrace_modify_code(ip, old, new); + } + atomic_dec(&modifying_ftrace_code); return ret; @@ -299,6 +322,32 @@ static int add_brk_on_nop(struct dyn_ftrace *rec) return add_break(rec->ip, old); } +/* + * If the record has the FTRACE_FL_REGS set, that means that it + * wants to convert to a callback that saves all regs. If FTRACE_FL_REGS + * is not not set, then it wants to convert to the normal callback. + */ +static unsigned long get_ftrace_addr(struct dyn_ftrace *rec) +{ + if (rec->flags & FTRACE_FL_REGS) + return (unsigned long)FTRACE_REGS_ADDR; + else + return (unsigned long)FTRACE_ADDR; +} + +/* + * The FTRACE_FL_REGS_EN is set when the record already points to + * a function that saves all the regs. Basically the '_EN' version + * represents the current state of the function. + */ +static unsigned long get_ftrace_old_addr(struct dyn_ftrace *rec) +{ + if (rec->flags & FTRACE_FL_REGS_EN) + return (unsigned long)FTRACE_REGS_ADDR; + else + return (unsigned long)FTRACE_ADDR; +} + static int add_breakpoints(struct dyn_ftrace *rec, int enable) { unsigned long ftrace_addr; @@ -306,7 +355,7 @@ static int add_breakpoints(struct dyn_ftrace *rec, int enable) ret = ftrace_test_record(rec, enable); - ftrace_addr = (unsigned long)FTRACE_ADDR; + ftrace_addr = get_ftrace_addr(rec); switch (ret) { case FTRACE_UPDATE_IGNORE: @@ -316,6 +365,10 @@ static int add_breakpoints(struct dyn_ftrace *rec, int enable) /* converting nop to call */ return add_brk_on_nop(rec); + case FTRACE_UPDATE_MODIFY_CALL_REGS: + case FTRACE_UPDATE_MODIFY_CALL: + ftrace_addr = get_ftrace_old_addr(rec); + /* fall through */ case FTRACE_UPDATE_MAKE_NOP: /* converting a call to a nop */ return add_brk_on_call(rec, ftrace_addr); @@ -360,13 +413,21 @@ static int remove_breakpoint(struct dyn_ftrace *rec) * If not, don't touch the breakpoint, we make just create * a disaster. */ - ftrace_addr = (unsigned long)FTRACE_ADDR; + ftrace_addr = get_ftrace_addr(rec); + nop = ftrace_call_replace(ip, ftrace_addr); + + if (memcmp(&ins[1], &nop[1], MCOUNT_INSN_SIZE - 1) == 0) + goto update; + + /* Check both ftrace_addr and ftrace_old_addr */ + ftrace_addr = get_ftrace_old_addr(rec); nop = ftrace_call_replace(ip, ftrace_addr); if (memcmp(&ins[1], &nop[1], MCOUNT_INSN_SIZE - 1) != 0) return -EINVAL; } + update: return probe_kernel_write((void *)ip, &nop[0], 1); } @@ -405,12 +466,14 @@ static int add_update(struct dyn_ftrace *rec, int enable) ret = ftrace_test_record(rec, enable); - ftrace_addr = (unsigned long)FTRACE_ADDR; + ftrace_addr = get_ftrace_addr(rec); switch (ret) { case FTRACE_UPDATE_IGNORE: return 0; + case FTRACE_UPDATE_MODIFY_CALL_REGS: + case FTRACE_UPDATE_MODIFY_CALL: case FTRACE_UPDATE_MAKE_CALL: /* converting nop to call */ return add_update_call(rec, ftrace_addr); @@ -455,12 +518,14 @@ static int finish_update(struct dyn_ftrace *rec, int enable) ret = ftrace_update_record(rec, enable); - ftrace_addr = (unsigned long)FTRACE_ADDR; + ftrace_addr = get_ftrace_addr(rec); switch (ret) { case FTRACE_UPDATE_IGNORE: return 0; + case FTRACE_UPDATE_MODIFY_CALL_REGS: + case FTRACE_UPDATE_MODIFY_CALL: case FTRACE_UPDATE_MAKE_CALL: /* converting nop to call */ return finish_update_call(rec, ftrace_addr); diff --git a/trunk/arch/x86/kernel/i387.c b/trunk/arch/x86/kernel/i387.c index f250431fb505..675a05012449 100644 --- a/trunk/arch/x86/kernel/i387.c +++ b/trunk/arch/x86/kernel/i387.c @@ -19,24 +19,17 @@ #include #include -#ifdef CONFIG_X86_64 -# include -# include -#else -# define save_i387_xstate_ia32 save_i387_xstate -# define restore_i387_xstate_ia32 restore_i387_xstate -# define _fpstate_ia32 _fpstate -# define _xstate_ia32 _xstate -# define sig_xstate_ia32_size sig_xstate_size -# define fx_sw_reserved_ia32 fx_sw_reserved -# define user_i387_ia32_struct user_i387_struct -# define user32_fxsr_struct user_fxsr_struct -#endif - /* * Were we in an interrupt that interrupted kernel mode? * - * We can do a kernel_fpu_begin/end() pair *ONLY* if that + * For now, with eagerfpu we will return interrupted kernel FPU + * state as not-idle. TBD: Ideally we can change the return value + * to something like __thread_has_fpu(current). But we need to + * be careful of doing __thread_clear_has_fpu() before saving + * the FPU etc for supporting nested uses etc. For now, take + * the simple route! + * + * On others, we can do a kernel_fpu_begin/end() pair *ONLY* if that * pair does nothing at all: the thread must not have fpu (so * that we don't try to save the FPU state), and TS must * be set (so that the clts/stts pair does nothing that is @@ -44,6 +37,9 @@ */ static inline bool interrupted_kernel_fpu_idle(void) { + if (use_eager_fpu()) + return 0; + return !__thread_has_fpu(current) && (read_cr0() & X86_CR0_TS); } @@ -77,29 +73,29 @@ bool irq_fpu_usable(void) } EXPORT_SYMBOL(irq_fpu_usable); -void kernel_fpu_begin(void) +void __kernel_fpu_begin(void) { struct task_struct *me = current; - WARN_ON_ONCE(!irq_fpu_usable()); - preempt_disable(); if (__thread_has_fpu(me)) { __save_init_fpu(me); __thread_clear_has_fpu(me); - /* We do 'stts()' in kernel_fpu_end() */ - } else { + /* We do 'stts()' in __kernel_fpu_end() */ + } else if (!use_eager_fpu()) { this_cpu_write(fpu_owner_task, NULL); clts(); } } -EXPORT_SYMBOL(kernel_fpu_begin); +EXPORT_SYMBOL(__kernel_fpu_begin); -void kernel_fpu_end(void) +void __kernel_fpu_end(void) { - stts(); - preempt_enable(); + if (use_eager_fpu()) + math_state_restore(); + else + stts(); } -EXPORT_SYMBOL(kernel_fpu_end); +EXPORT_SYMBOL(__kernel_fpu_end); void unlazy_fpu(struct task_struct *tsk) { @@ -113,23 +109,15 @@ void unlazy_fpu(struct task_struct *tsk) } EXPORT_SYMBOL(unlazy_fpu); -#ifdef CONFIG_MATH_EMULATION -# define HAVE_HWFP (boot_cpu_data.hard_math) -#else -# define HAVE_HWFP 1 -#endif - -static unsigned int mxcsr_feature_mask __read_mostly = 0xffffffffu; +unsigned int mxcsr_feature_mask __read_mostly = 0xffffffffu; unsigned int xstate_size; EXPORT_SYMBOL_GPL(xstate_size); -unsigned int sig_xstate_ia32_size = sizeof(struct _fpstate_ia32); static struct i387_fxsave_struct fx_scratch __cpuinitdata; static void __cpuinit mxcsr_feature_mask_init(void) { unsigned long mask = 0; - clts(); if (cpu_has_fxsr) { memset(&fx_scratch, 0, sizeof(struct i387_fxsave_struct)); asm volatile("fxsave %0" : : "m" (fx_scratch)); @@ -138,7 +126,6 @@ static void __cpuinit mxcsr_feature_mask_init(void) mask = 0x0000ffbf; } mxcsr_feature_mask &= mask; - stts(); } static void __cpuinit init_thread_xstate(void) @@ -192,9 +179,8 @@ void __cpuinit fpu_init(void) init_thread_xstate(); mxcsr_feature_mask_init(); - /* clean state in init */ - current_thread_info()->status = 0; - clear_used_math(); + xsave_init(); + eager_fpu_init(); } void fpu_finit(struct fpu *fpu) @@ -205,12 +191,7 @@ void fpu_finit(struct fpu *fpu) } if (cpu_has_fxsr) { - struct i387_fxsave_struct *fx = &fpu->state->fxsave; - - memset(fx, 0, xstate_size); - fx->cwd = 0x37f; - if (cpu_has_xmm) - fx->mxcsr = MXCSR_DEFAULT; + fx_finit(&fpu->state->fxsave); } else { struct i387_fsave_struct *fp = &fpu->state->fsave; memset(fp, 0, xstate_size); @@ -454,7 +435,7 @@ static inline u32 twd_fxsr_to_i387(struct i387_fxsave_struct *fxsave) * FXSR floating point environment conversions. */ -static void +void convert_from_fxsr(struct user_i387_ia32_struct *env, struct task_struct *tsk) { struct i387_fxsave_struct *fxsave = &tsk->thread.fpu.state->fxsave; @@ -491,8 +472,8 @@ convert_from_fxsr(struct user_i387_ia32_struct *env, struct task_struct *tsk) memcpy(&to[i], &from[i], sizeof(to[0])); } -static void convert_to_fxsr(struct task_struct *tsk, - const struct user_i387_ia32_struct *env) +void convert_to_fxsr(struct task_struct *tsk, + const struct user_i387_ia32_struct *env) { struct i387_fxsave_struct *fxsave = &tsk->thread.fpu.state->fxsave; @@ -588,223 +569,6 @@ int fpregs_set(struct task_struct *target, const struct user_regset *regset, return ret; } -/* - * Signal frame handlers. - */ - -static inline int save_i387_fsave(struct _fpstate_ia32 __user *buf) -{ - struct task_struct *tsk = current; - struct i387_fsave_struct *fp = &tsk->thread.fpu.state->fsave; - - fp->status = fp->swd; - if (__copy_to_user(buf, fp, sizeof(struct i387_fsave_struct))) - return -1; - return 1; -} - -static int save_i387_fxsave(struct _fpstate_ia32 __user *buf) -{ - struct task_struct *tsk = current; - struct i387_fxsave_struct *fx = &tsk->thread.fpu.state->fxsave; - struct user_i387_ia32_struct env; - int err = 0; - - convert_from_fxsr(&env, tsk); - if (__copy_to_user(buf, &env, sizeof(env))) - return -1; - - err |= __put_user(fx->swd, &buf->status); - err |= __put_user(X86_FXSR_MAGIC, &buf->magic); - if (err) - return -1; - - if (__copy_to_user(&buf->_fxsr_env[0], fx, xstate_size)) - return -1; - return 1; -} - -static int save_i387_xsave(void __user *buf) -{ - struct task_struct *tsk = current; - struct _fpstate_ia32 __user *fx = buf; - int err = 0; - - - sanitize_i387_state(tsk); - - /* - * For legacy compatible, we always set FP/SSE bits in the bit - * vector while saving the state to the user context. - * This will enable us capturing any changes(during sigreturn) to - * the FP/SSE bits by the legacy applications which don't touch - * xstate_bv in the xsave header. - * - * xsave aware applications can change the xstate_bv in the xsave - * header as well as change any contents in the memory layout. - * xrestore as part of sigreturn will capture all the changes. - */ - tsk->thread.fpu.state->xsave.xsave_hdr.xstate_bv |= XSTATE_FPSSE; - - if (save_i387_fxsave(fx) < 0) - return -1; - - err = __copy_to_user(&fx->sw_reserved, &fx_sw_reserved_ia32, - sizeof(struct _fpx_sw_bytes)); - err |= __put_user(FP_XSTATE_MAGIC2, - (__u32 __user *) (buf + sig_xstate_ia32_size - - FP_XSTATE_MAGIC2_SIZE)); - if (err) - return -1; - - return 1; -} - -int save_i387_xstate_ia32(void __user *buf) -{ - struct _fpstate_ia32 __user *fp = (struct _fpstate_ia32 __user *) buf; - struct task_struct *tsk = current; - - if (!used_math()) - return 0; - - if (!access_ok(VERIFY_WRITE, buf, sig_xstate_ia32_size)) - return -EACCES; - /* - * This will cause a "finit" to be triggered by the next - * attempted FPU operation by the 'current' process. - */ - clear_used_math(); - - if (!HAVE_HWFP) { - return fpregs_soft_get(current, NULL, - 0, sizeof(struct user_i387_ia32_struct), - NULL, fp) ? -1 : 1; - } - - unlazy_fpu(tsk); - - if (cpu_has_xsave) - return save_i387_xsave(fp); - if (cpu_has_fxsr) - return save_i387_fxsave(fp); - else - return save_i387_fsave(fp); -} - -static inline int restore_i387_fsave(struct _fpstate_ia32 __user *buf) -{ - struct task_struct *tsk = current; - - return __copy_from_user(&tsk->thread.fpu.state->fsave, buf, - sizeof(struct i387_fsave_struct)); -} - -static int restore_i387_fxsave(struct _fpstate_ia32 __user *buf, - unsigned int size) -{ - struct task_struct *tsk = current; - struct user_i387_ia32_struct env; - int err; - - err = __copy_from_user(&tsk->thread.fpu.state->fxsave, &buf->_fxsr_env[0], - size); - /* mxcsr reserved bits must be masked to zero for security reasons */ - tsk->thread.fpu.state->fxsave.mxcsr &= mxcsr_feature_mask; - if (err || __copy_from_user(&env, buf, sizeof(env))) - return 1; - convert_to_fxsr(tsk, &env); - - return 0; -} - -static int restore_i387_xsave(void __user *buf) -{ - struct _fpx_sw_bytes fx_sw_user; - struct _fpstate_ia32 __user *fx_user = - ((struct _fpstate_ia32 __user *) buf); - struct i387_fxsave_struct __user *fx = - (struct i387_fxsave_struct __user *) &fx_user->_fxsr_env[0]; - struct xsave_hdr_struct *xsave_hdr = - ¤t->thread.fpu.state->xsave.xsave_hdr; - u64 mask; - int err; - - if (check_for_xstate(fx, buf, &fx_sw_user)) - goto fx_only; - - mask = fx_sw_user.xstate_bv; - - err = restore_i387_fxsave(buf, fx_sw_user.xstate_size); - - xsave_hdr->xstate_bv &= pcntxt_mask; - /* - * These bits must be zero. - */ - xsave_hdr->reserved1[0] = xsave_hdr->reserved1[1] = 0; - - /* - * Init the state that is not present in the memory layout - * and enabled by the OS. - */ - mask = ~(pcntxt_mask & ~mask); - xsave_hdr->xstate_bv &= mask; - - return err; -fx_only: - /* - * Couldn't find the extended state information in the memory - * layout. Restore the FP/SSE and init the other extended state - * enabled by the OS. - */ - xsave_hdr->xstate_bv = XSTATE_FPSSE; - return restore_i387_fxsave(buf, sizeof(struct i387_fxsave_struct)); -} - -int restore_i387_xstate_ia32(void __user *buf) -{ - int err; - struct task_struct *tsk = current; - struct _fpstate_ia32 __user *fp = (struct _fpstate_ia32 __user *) buf; - - if (HAVE_HWFP) - clear_fpu(tsk); - - if (!buf) { - if (used_math()) { - clear_fpu(tsk); - clear_used_math(); - } - - return 0; - } else - if (!access_ok(VERIFY_READ, buf, sig_xstate_ia32_size)) - return -EACCES; - - if (!used_math()) { - err = init_fpu(tsk); - if (err) - return err; - } - - if (HAVE_HWFP) { - if (cpu_has_xsave) - err = restore_i387_xsave(buf); - else if (cpu_has_fxsr) - err = restore_i387_fxsave(fp, sizeof(struct - i387_fxsave_struct)); - else - err = restore_i387_fsave(fp); - } else { - err = fpregs_soft_set(current, NULL, - 0, sizeof(struct user_i387_ia32_struct), - NULL, fp) != 0; - } - set_used_math(); - - return err; -} - /* * FPU state for core dumps. * This is only used for a.out dumps now. diff --git a/trunk/arch/x86/kernel/i8259.c b/trunk/arch/x86/kernel/i8259.c index 36d1853e91af..9a5c460404dc 100644 --- a/trunk/arch/x86/kernel/i8259.c +++ b/trunk/arch/x86/kernel/i8259.c @@ -263,7 +263,7 @@ static void i8259A_shutdown(void) * out of. */ outb(0xff, PIC_MASTER_IMR); /* mask all of 8259A-1 */ - outb(0xff, PIC_SLAVE_IMR); /* mask all of 8259A-1 */ + outb(0xff, PIC_SLAVE_IMR); /* mask all of 8259A-2 */ } static struct syscore_ops i8259_syscore_ops = { diff --git a/trunk/arch/x86/kernel/irq.c b/trunk/arch/x86/kernel/irq.c index 355b13f6de8d..e4595f105910 100644 --- a/trunk/arch/x86/kernel/irq.c +++ b/trunk/arch/x86/kernel/irq.c @@ -270,7 +270,7 @@ void fixup_irqs(void) if (cpumask_any_and(affinity, cpu_online_mask) >= nr_cpu_ids) { break_affinity = 1; - affinity = cpu_all_mask; + affinity = cpu_online_mask; } chip = irq_data_get_irq_chip(data); @@ -328,6 +328,7 @@ void fixup_irqs(void) chip->irq_retrigger(data); raw_spin_unlock(&desc->lock); } + __this_cpu_write(vector_irq[vector], -1); } } #endif diff --git a/trunk/arch/x86/kernel/kdebugfs.c b/trunk/arch/x86/kernel/kdebugfs.c index 1d5d31ea686b..dc1404bf8e4b 100644 --- a/trunk/arch/x86/kernel/kdebugfs.c +++ b/trunk/arch/x86/kernel/kdebugfs.c @@ -107,7 +107,7 @@ static int __init create_setup_data_nodes(struct dentry *parent) { struct setup_data_node *node; struct setup_data *data; - int error = -ENOMEM; + int error; struct dentry *d; struct page *pg; u64 pa_data; @@ -121,8 +121,10 @@ static int __init create_setup_data_nodes(struct dentry *parent) while (pa_data) { node = kmalloc(sizeof(*node), GFP_KERNEL); - if (!node) + if (!node) { + error = -ENOMEM; goto err_dir; + } pg = pfn_to_page((pa_data+sizeof(*data)-1) >> PAGE_SHIFT); if (PageHighMem(pg)) { diff --git a/trunk/arch/x86/kernel/kprobes.c b/trunk/arch/x86/kernel/kprobes.c index e2f751efb7b1..57916c0d3cf6 100644 --- a/trunk/arch/x86/kernel/kprobes.c +++ b/trunk/arch/x86/kernel/kprobes.c @@ -541,6 +541,23 @@ reenter_kprobe(struct kprobe *p, struct pt_regs *regs, struct kprobe_ctlblk *kcb return 1; } +#ifdef KPROBES_CAN_USE_FTRACE +static void __kprobes skip_singlestep(struct kprobe *p, struct pt_regs *regs, + struct kprobe_ctlblk *kcb) +{ + /* + * Emulate singlestep (and also recover regs->ip) + * as if there is a 5byte nop + */ + regs->ip = (unsigned long)p->addr + MCOUNT_INSN_SIZE; + if (unlikely(p->post_handler)) { + kcb->kprobe_status = KPROBE_HIT_SSDONE; + p->post_handler(p, regs, 0); + } + __this_cpu_write(current_kprobe, NULL); +} +#endif + /* * Interrupts are disabled on entry as trap3 is an interrupt gate and they * remain disabled throughout this function. @@ -599,6 +616,12 @@ static int __kprobes kprobe_handler(struct pt_regs *regs) } else if (kprobe_running()) { p = __this_cpu_read(current_kprobe); if (p->break_handler && p->break_handler(p, regs)) { +#ifdef KPROBES_CAN_USE_FTRACE + if (kprobe_ftrace(p)) { + skip_singlestep(p, regs, kcb); + return 1; + } +#endif setup_singlestep(p, regs, kcb, 0); return 1; } @@ -1052,6 +1075,50 @@ int __kprobes longjmp_break_handler(struct kprobe *p, struct pt_regs *regs) return 0; } +#ifdef KPROBES_CAN_USE_FTRACE +/* Ftrace callback handler for kprobes */ +void __kprobes kprobe_ftrace_handler(unsigned long ip, unsigned long parent_ip, + struct ftrace_ops *ops, struct pt_regs *regs) +{ + struct kprobe *p; + struct kprobe_ctlblk *kcb; + unsigned long flags; + + /* Disable irq for emulating a breakpoint and avoiding preempt */ + local_irq_save(flags); + + p = get_kprobe((kprobe_opcode_t *)ip); + if (unlikely(!p) || kprobe_disabled(p)) + goto end; + + kcb = get_kprobe_ctlblk(); + if (kprobe_running()) { + kprobes_inc_nmissed_count(p); + } else { + /* Kprobe handler expects regs->ip = ip + 1 as breakpoint hit */ + regs->ip = ip + sizeof(kprobe_opcode_t); + + __this_cpu_write(current_kprobe, p); + kcb->kprobe_status = KPROBE_HIT_ACTIVE; + if (!p->pre_handler || !p->pre_handler(p, regs)) + skip_singlestep(p, regs, kcb); + /* + * If pre_handler returns !0, it sets regs->ip and + * resets current kprobe. + */ + } +end: + local_irq_restore(flags); +} + +int __kprobes arch_prepare_kprobe_ftrace(struct kprobe *p) +{ + p->ainsn.insn = NULL; + p->ainsn.boostable = -1; + return 0; +} +#endif + int __init arch_init_kprobes(void) { return arch_init_optprobes(); diff --git a/trunk/arch/x86/kernel/microcode_amd.c b/trunk/arch/x86/kernel/microcode_amd.c index 8a2ce8fd41c0..82746f942cd8 100644 --- a/trunk/arch/x86/kernel/microcode_amd.c +++ b/trunk/arch/x86/kernel/microcode_amd.c @@ -143,11 +143,12 @@ static int get_matching_microcode(int cpu, const u8 *ucode_ptr, unsigned int *current_size) { struct microcode_header_amd *mc_hdr; - unsigned int actual_size; + unsigned int actual_size, patch_size; u16 equiv_cpu_id; /* size of the current patch we're staring at */ - *current_size = *(u32 *)(ucode_ptr + 4) + SECTION_HDR_SIZE; + patch_size = *(u32 *)(ucode_ptr + 4); + *current_size = patch_size + SECTION_HDR_SIZE; equiv_cpu_id = find_equiv_id(); if (!equiv_cpu_id) @@ -174,7 +175,7 @@ static int get_matching_microcode(int cpu, const u8 *ucode_ptr, /* * now that the header looks sane, verify its size */ - actual_size = verify_ucode_size(cpu, *current_size, leftover_size); + actual_size = verify_ucode_size(cpu, patch_size, leftover_size); if (!actual_size) return 0; diff --git a/trunk/arch/x86/kernel/microcode_core.c b/trunk/arch/x86/kernel/microcode_core.c index 4873e62db6a1..9e5bcf1e2376 100644 --- a/trunk/arch/x86/kernel/microcode_core.c +++ b/trunk/arch/x86/kernel/microcode_core.c @@ -225,6 +225,9 @@ static ssize_t microcode_write(struct file *file, const char __user *buf, if (do_microcode_update(buf, len) == 0) ret = (ssize_t)len; + if (ret > 0) + perf_check_microcode(); + mutex_unlock(µcode_mutex); put_online_cpus(); diff --git a/trunk/arch/x86/kernel/msr.c b/trunk/arch/x86/kernel/msr.c index eb113693f043..a7c5661f8496 100644 --- a/trunk/arch/x86/kernel/msr.c +++ b/trunk/arch/x86/kernel/msr.c @@ -257,12 +257,14 @@ static int __init msr_init(void) goto out_chrdev; } msr_class->devnode = msr_devnode; + get_online_cpus(); for_each_online_cpu(i) { err = msr_device_create(i); if (err != 0) goto out_class; } register_hotcpu_notifier(&msr_class_cpu_notifier); + put_online_cpus(); err = 0; goto out; @@ -271,6 +273,7 @@ static int __init msr_init(void) i = 0; for_each_online_cpu(i) msr_device_destroy(i); + put_online_cpus(); class_destroy(msr_class); out_chrdev: __unregister_chrdev(MSR_MAJOR, 0, NR_CPUS, "cpu/msr"); @@ -281,11 +284,13 @@ static int __init msr_init(void) static void __exit msr_exit(void) { int cpu = 0; + get_online_cpus(); for_each_online_cpu(cpu) msr_device_destroy(cpu); class_destroy(msr_class); __unregister_chrdev(MSR_MAJOR, 0, NR_CPUS, "cpu/msr"); unregister_hotcpu_notifier(&msr_class_cpu_notifier); + put_online_cpus(); } module_init(msr_init); diff --git a/trunk/arch/x86/kernel/perf_regs.c b/trunk/arch/x86/kernel/perf_regs.c new file mode 100644 index 000000000000..e309cc5c276e --- /dev/null +++ b/trunk/arch/x86/kernel/perf_regs.c @@ -0,0 +1,105 @@ +#include +#include +#include +#include +#include +#include +#include +#include + +#ifdef CONFIG_X86_32 +#define PERF_REG_X86_MAX PERF_REG_X86_32_MAX +#else +#define PERF_REG_X86_MAX PERF_REG_X86_64_MAX +#endif + +#define PT_REGS_OFFSET(id, r) [id] = offsetof(struct pt_regs, r) + +static unsigned int pt_regs_offset[PERF_REG_X86_MAX] = { + PT_REGS_OFFSET(PERF_REG_X86_AX, ax), + PT_REGS_OFFSET(PERF_REG_X86_BX, bx), + PT_REGS_OFFSET(PERF_REG_X86_CX, cx), + PT_REGS_OFFSET(PERF_REG_X86_DX, dx), + PT_REGS_OFFSET(PERF_REG_X86_SI, si), + PT_REGS_OFFSET(PERF_REG_X86_DI, di), + PT_REGS_OFFSET(PERF_REG_X86_BP, bp), + PT_REGS_OFFSET(PERF_REG_X86_SP, sp), + PT_REGS_OFFSET(PERF_REG_X86_IP, ip), + PT_REGS_OFFSET(PERF_REG_X86_FLAGS, flags), + PT_REGS_OFFSET(PERF_REG_X86_CS, cs), + PT_REGS_OFFSET(PERF_REG_X86_SS, ss), +#ifdef CONFIG_X86_32 + PT_REGS_OFFSET(PERF_REG_X86_DS, ds), + PT_REGS_OFFSET(PERF_REG_X86_ES, es), + PT_REGS_OFFSET(PERF_REG_X86_FS, fs), + PT_REGS_OFFSET(PERF_REG_X86_GS, gs), +#else + /* + * The pt_regs struct does not store + * ds, es, fs, gs in 64 bit mode. + */ + (unsigned int) -1, + (unsigned int) -1, + (unsigned int) -1, + (unsigned int) -1, +#endif +#ifdef CONFIG_X86_64 + PT_REGS_OFFSET(PERF_REG_X86_R8, r8), + PT_REGS_OFFSET(PERF_REG_X86_R9, r9), + PT_REGS_OFFSET(PERF_REG_X86_R10, r10), + PT_REGS_OFFSET(PERF_REG_X86_R11, r11), + PT_REGS_OFFSET(PERF_REG_X86_R12, r12), + PT_REGS_OFFSET(PERF_REG_X86_R13, r13), + PT_REGS_OFFSET(PERF_REG_X86_R14, r14), + PT_REGS_OFFSET(PERF_REG_X86_R15, r15), +#endif +}; + +u64 perf_reg_value(struct pt_regs *regs, int idx) +{ + if (WARN_ON_ONCE(idx >= ARRAY_SIZE(pt_regs_offset))) + return 0; + + return regs_get_register(regs, pt_regs_offset[idx]); +} + +#define REG_RESERVED (~((1ULL << PERF_REG_X86_MAX) - 1ULL)) + +#ifdef CONFIG_X86_32 +int perf_reg_validate(u64 mask) +{ + if (!mask || mask & REG_RESERVED) + return -EINVAL; + + return 0; +} + +u64 perf_reg_abi(struct task_struct *task) +{ + return PERF_SAMPLE_REGS_ABI_32; +} +#else /* CONFIG_X86_64 */ +#define REG_NOSUPPORT ((1ULL << PERF_REG_X86_DS) | \ + (1ULL << PERF_REG_X86_ES) | \ + (1ULL << PERF_REG_X86_FS) | \ + (1ULL << PERF_REG_X86_GS)) + +int perf_reg_validate(u64 mask) +{ + if (!mask || mask & REG_RESERVED) + return -EINVAL; + + if (mask & REG_NOSUPPORT) + return -EINVAL; + + return 0; +} + +u64 perf_reg_abi(struct task_struct *task) +{ + if (test_tsk_thread_flag(task, TIF_IA32)) + return PERF_SAMPLE_REGS_ABI_32; + else + return PERF_SAMPLE_REGS_ABI_64; +} +#endif /* CONFIG_X86_32 */ diff --git a/trunk/arch/x86/kernel/probe_roms.c b/trunk/arch/x86/kernel/probe_roms.c index 0bc72e2069e3..d5f15c3f7b25 100644 --- a/trunk/arch/x86/kernel/probe_roms.c +++ b/trunk/arch/x86/kernel/probe_roms.c @@ -150,7 +150,7 @@ static struct resource *find_oprom(struct pci_dev *pdev) return oprom; } -void *pci_map_biosrom(struct pci_dev *pdev) +void __iomem *pci_map_biosrom(struct pci_dev *pdev) { struct resource *oprom = find_oprom(pdev); diff --git a/trunk/arch/x86/kernel/process.c b/trunk/arch/x86/kernel/process.c index ef6a8456f719..dc3567e083f9 100644 --- a/trunk/arch/x86/kernel/process.c +++ b/trunk/arch/x86/kernel/process.c @@ -66,15 +66,13 @@ int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src) { int ret; - unlazy_fpu(src); - *dst = *src; if (fpu_allocated(&src->thread.fpu)) { memset(&dst->thread.fpu, 0, sizeof(dst->thread.fpu)); ret = fpu_alloc(&dst->thread.fpu); if (ret) return ret; - fpu_copy(&dst->thread.fpu, &src->thread.fpu); + fpu_copy(dst, src); } return 0; } @@ -97,16 +95,6 @@ void arch_task_cache_init(void) SLAB_PANIC | SLAB_NOTRACK, NULL); } -static inline void drop_fpu(struct task_struct *tsk) -{ - /* - * Forget coprocessor state.. - */ - tsk->fpu_counter = 0; - clear_fpu(tsk); - clear_used_math(); -} - /* * Free current thread data structures etc.. */ @@ -163,7 +151,13 @@ void flush_thread(void) flush_ptrace_hw_breakpoint(tsk); memset(tsk->thread.tls_array, 0, sizeof(tsk->thread.tls_array)); - drop_fpu(tsk); + drop_init_fpu(tsk); + /* + * Free the FPU state for non xsave platforms. They get reallocated + * lazily at the first use. + */ + if (!use_eager_fpu()) + free_thread_xstate(tsk); } static void hard_disable_TSC(void) diff --git a/trunk/arch/x86/kernel/process_32.c b/trunk/arch/x86/kernel/process_32.c index 516fa186121b..b9ff83c7135b 100644 --- a/trunk/arch/x86/kernel/process_32.c +++ b/trunk/arch/x86/kernel/process_32.c @@ -190,10 +190,6 @@ start_thread(struct pt_regs *regs, unsigned long new_ip, unsigned long new_sp) regs->cs = __USER_CS; regs->ip = new_ip; regs->sp = new_sp; - /* - * Free the old FP and other extended state - */ - free_thread_xstate(current); } EXPORT_SYMBOL_GPL(start_thread); diff --git a/trunk/arch/x86/kernel/process_64.c b/trunk/arch/x86/kernel/process_64.c index 0a980c9d7cb8..8a6d20ce1978 100644 --- a/trunk/arch/x86/kernel/process_64.c +++ b/trunk/arch/x86/kernel/process_64.c @@ -232,10 +232,6 @@ start_thread_common(struct pt_regs *regs, unsigned long new_ip, regs->cs = _cs; regs->ss = _ss; regs->flags = X86_EFLAGS_IF; - /* - * Free the old FP and other extended state - */ - free_thread_xstate(current); } void diff --git a/trunk/arch/x86/kernel/ptrace.c b/trunk/arch/x86/kernel/ptrace.c index c4c6a5c2bf0f..b00b33a18390 100644 --- a/trunk/arch/x86/kernel/ptrace.c +++ b/trunk/arch/x86/kernel/ptrace.c @@ -21,6 +21,7 @@ #include #include #include +#include #include #include @@ -1332,9 +1333,6 @@ static const struct user_regset_view user_x86_64_view = { #define genregs32_get genregs_get #define genregs32_set genregs_set -#define user_i387_ia32_struct user_i387_struct -#define user32_fxsr_struct user_fxsr_struct - #endif /* CONFIG_X86_64 */ #if defined CONFIG_X86_32 || defined CONFIG_IA32_EMULATION @@ -1463,6 +1461,8 @@ long syscall_trace_enter(struct pt_regs *regs) { long ret = 0; + rcu_user_exit(); + /* * If we stepped into a sysenter/syscall insn, it trapped in * kernel mode; do_debug() cleared TF and set TIF_SINGLESTEP. @@ -1526,4 +1526,6 @@ void syscall_trace_leave(struct pt_regs *regs) !test_thread_flag(TIF_SYSCALL_EMU); if (step || test_thread_flag(TIF_SYSCALL_TRACE)) tracehook_report_syscall_exit(regs, step); + + rcu_user_enter(); } diff --git a/trunk/arch/x86/kernel/signal.c b/trunk/arch/x86/kernel/signal.c index b280908a376e..3160c26db5e7 100644 --- a/trunk/arch/x86/kernel/signal.c +++ b/trunk/arch/x86/kernel/signal.c @@ -114,7 +114,7 @@ int restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc, regs->orig_ax = -1; /* disable syscall checks */ get_user_ex(buf, &sc->fpstate); - err |= restore_i387_xstate(buf); + err |= restore_xstate_sig(buf, config_enabled(CONFIG_X86_32)); get_user_ex(*pax, &sc->ax); } get_user_catch(err); @@ -206,35 +206,32 @@ get_sigframe(struct k_sigaction *ka, struct pt_regs *regs, size_t frame_size, void __user **fpstate) { /* Default to using normal stack */ + unsigned long math_size = 0; unsigned long sp = regs->sp; + unsigned long buf_fx = 0; int onsigstack = on_sig_stack(sp); -#ifdef CONFIG_X86_64 /* redzone */ - sp -= 128; -#endif /* CONFIG_X86_64 */ + if (config_enabled(CONFIG_X86_64)) + sp -= 128; if (!onsigstack) { /* This is the X/Open sanctioned signal stack switching. */ if (ka->sa.sa_flags & SA_ONSTACK) { if (current->sas_ss_size) sp = current->sas_ss_sp + current->sas_ss_size; - } else { -#ifdef CONFIG_X86_32 - /* This is the legacy signal stack switching. */ - if ((regs->ss & 0xffff) != __USER_DS && - !(ka->sa.sa_flags & SA_RESTORER) && - ka->sa.sa_restorer) + } else if (config_enabled(CONFIG_X86_32) && + (regs->ss & 0xffff) != __USER_DS && + !(ka->sa.sa_flags & SA_RESTORER) && + ka->sa.sa_restorer) { + /* This is the legacy signal stack switching. */ sp = (unsigned long) ka->sa.sa_restorer; -#endif /* CONFIG_X86_32 */ } } if (used_math()) { - sp -= sig_xstate_size; -#ifdef CONFIG_X86_64 - sp = round_down(sp, 64); -#endif /* CONFIG_X86_64 */ + sp = alloc_mathframe(sp, config_enabled(CONFIG_X86_32), + &buf_fx, &math_size); *fpstate = (void __user *)sp; } @@ -247,8 +244,9 @@ get_sigframe(struct k_sigaction *ka, struct pt_regs *regs, size_t frame_size, if (onsigstack && !likely(on_sig_stack(sp))) return (void __user *)-1L; - /* save i387 state */ - if (used_math() && save_i387_xstate(*fpstate) < 0) + /* save i387 and extended state */ + if (used_math() && + save_xstate_sig(*fpstate, (void __user *)buf_fx, math_size) < 0) return (void __user *)-1L; return (void __user *)sp; @@ -474,6 +472,74 @@ static int __setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info, } #endif /* CONFIG_X86_32 */ +static int x32_setup_rt_frame(int sig, struct k_sigaction *ka, + siginfo_t *info, compat_sigset_t *set, + struct pt_regs *regs) +{ +#ifdef CONFIG_X86_X32_ABI + struct rt_sigframe_x32 __user *frame; + void __user *restorer; + int err = 0; + void __user *fpstate = NULL; + + frame = get_sigframe(ka, regs, sizeof(*frame), &fpstate); + + if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame))) + return -EFAULT; + + if (ka->sa.sa_flags & SA_SIGINFO) { + if (copy_siginfo_to_user32(&frame->info, info)) + return -EFAULT; + } + + put_user_try { + /* Create the ucontext. */ + if (cpu_has_xsave) + put_user_ex(UC_FP_XSTATE, &frame->uc.uc_flags); + else + put_user_ex(0, &frame->uc.uc_flags); + put_user_ex(0, &frame->uc.uc_link); + put_user_ex(current->sas_ss_sp, &frame->uc.uc_stack.ss_sp); + put_user_ex(sas_ss_flags(regs->sp), + &frame->uc.uc_stack.ss_flags); + put_user_ex(current->sas_ss_size, &frame->uc.uc_stack.ss_size); + put_user_ex(0, &frame->uc.uc__pad0); + err |= setup_sigcontext(&frame->uc.uc_mcontext, fpstate, + regs, set->sig[0]); + err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set)); + + if (ka->sa.sa_flags & SA_RESTORER) { + restorer = ka->sa.sa_restorer; + } else { + /* could use a vstub here */ + restorer = NULL; + err |= -EFAULT; + } + put_user_ex(restorer, &frame->pretcode); + } put_user_catch(err); + + if (err) + return -EFAULT; + + /* Set up registers for signal handler */ + regs->sp = (unsigned long) frame; + regs->ip = (unsigned long) ka->sa.sa_handler; + + /* We use the x32 calling convention here... */ + regs->di = sig; + regs->si = (unsigned long) &frame->info; + regs->dx = (unsigned long) &frame->uc; + + loadsegment(ds, __USER_DS); + loadsegment(es, __USER_DS); + + regs->cs = __USER_CS; + regs->ss = __USER_DS; +#endif /* CONFIG_X86_X32_ABI */ + + return 0; +} + #ifdef CONFIG_X86_32 /* * Atomically swap in the new signal mask, and wait for a signal. @@ -612,55 +678,22 @@ static int signr_convert(int sig) return sig; } -#ifdef CONFIG_X86_32 - -#define is_ia32 1 -#define ia32_setup_frame __setup_frame -#define ia32_setup_rt_frame __setup_rt_frame - -#else /* !CONFIG_X86_32 */ - -#ifdef CONFIG_IA32_EMULATION -#define is_ia32 test_thread_flag(TIF_IA32) -#else /* !CONFIG_IA32_EMULATION */ -#define is_ia32 0 -#endif /* CONFIG_IA32_EMULATION */ - -#ifdef CONFIG_X86_X32_ABI -#define is_x32 test_thread_flag(TIF_X32) - -static int x32_setup_rt_frame(int sig, struct k_sigaction *ka, - siginfo_t *info, compat_sigset_t *set, - struct pt_regs *regs); -#else /* !CONFIG_X86_X32_ABI */ -#define is_x32 0 -#endif /* CONFIG_X86_X32_ABI */ - -int ia32_setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info, - sigset_t *set, struct pt_regs *regs); -int ia32_setup_frame(int sig, struct k_sigaction *ka, - sigset_t *set, struct pt_regs *regs); - -#endif /* CONFIG_X86_32 */ - static int setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info, struct pt_regs *regs) { int usig = signr_convert(sig); sigset_t *set = sigmask_to_save(); + compat_sigset_t *cset = (compat_sigset_t *) set; /* Set up the stack frame */ - if (is_ia32) { + if (is_ia32_frame()) { if (ka->sa.sa_flags & SA_SIGINFO) - return ia32_setup_rt_frame(usig, ka, info, set, regs); + return ia32_setup_rt_frame(usig, ka, info, cset, regs); else - return ia32_setup_frame(usig, ka, set, regs); -#ifdef CONFIG_X86_X32_ABI - } else if (is_x32) { - return x32_setup_rt_frame(usig, ka, info, - (compat_sigset_t *)set, regs); -#endif + return ia32_setup_frame(usig, ka, cset, regs); + } else if (is_x32_frame()) { + return x32_setup_rt_frame(usig, ka, info, cset, regs); } else { return __setup_rt_frame(sig, ka, info, set, regs); } @@ -779,6 +812,8 @@ static void do_signal(struct pt_regs *regs) void do_notify_resume(struct pt_regs *regs, void *unused, __u32 thread_info_flags) { + rcu_user_exit(); + #ifdef CONFIG_X86_MCE /* notify userspace of pending MCEs */ if (thread_info_flags & _TIF_MCE_NOTIFY) @@ -804,6 +839,8 @@ do_notify_resume(struct pt_regs *regs, void *unused, __u32 thread_info_flags) #ifdef CONFIG_X86_32 clear_thread_flag(TIF_IRET); #endif /* CONFIG_X86_32 */ + + rcu_user_enter(); } void signal_fault(struct pt_regs *regs, void __user *frame, char *where) @@ -824,72 +861,6 @@ void signal_fault(struct pt_regs *regs, void __user *frame, char *where) } #ifdef CONFIG_X86_X32_ABI -static int x32_setup_rt_frame(int sig, struct k_sigaction *ka, - siginfo_t *info, compat_sigset_t *set, - struct pt_regs *regs) -{ - struct rt_sigframe_x32 __user *frame; - void __user *restorer; - int err = 0; - void __user *fpstate = NULL; - - frame = get_sigframe(ka, regs, sizeof(*frame), &fpstate); - - if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame))) - return -EFAULT; - - if (ka->sa.sa_flags & SA_SIGINFO) { - if (copy_siginfo_to_user32(&frame->info, info)) - return -EFAULT; - } - - put_user_try { - /* Create the ucontext. */ - if (cpu_has_xsave) - put_user_ex(UC_FP_XSTATE, &frame->uc.uc_flags); - else - put_user_ex(0, &frame->uc.uc_flags); - put_user_ex(0, &frame->uc.uc_link); - put_user_ex(current->sas_ss_sp, &frame->uc.uc_stack.ss_sp); - put_user_ex(sas_ss_flags(regs->sp), - &frame->uc.uc_stack.ss_flags); - put_user_ex(current->sas_ss_size, &frame->uc.uc_stack.ss_size); - put_user_ex(0, &frame->uc.uc__pad0); - err |= setup_sigcontext(&frame->uc.uc_mcontext, fpstate, - regs, set->sig[0]); - err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set)); - - if (ka->sa.sa_flags & SA_RESTORER) { - restorer = ka->sa.sa_restorer; - } else { - /* could use a vstub here */ - restorer = NULL; - err |= -EFAULT; - } - put_user_ex(restorer, &frame->pretcode); - } put_user_catch(err); - - if (err) - return -EFAULT; - - /* Set up registers for signal handler */ - regs->sp = (unsigned long) frame; - regs->ip = (unsigned long) ka->sa.sa_handler; - - /* We use the x32 calling convention here... */ - regs->di = sig; - regs->si = (unsigned long) &frame->info; - regs->dx = (unsigned long) &frame->uc; - - loadsegment(ds, __USER_DS); - loadsegment(es, __USER_DS); - - regs->cs = __USER_CS; - regs->ss = __USER_DS; - - return 0; -} - asmlinkage long sys32_x32_rt_sigreturn(struct pt_regs *regs) { struct rt_sigframe_x32 __user *frame; diff --git a/trunk/arch/x86/kernel/smpboot.c b/trunk/arch/x86/kernel/smpboot.c index 7c5a8c314c02..c80a33bc528b 100644 --- a/trunk/arch/x86/kernel/smpboot.c +++ b/trunk/arch/x86/kernel/smpboot.c @@ -665,7 +665,8 @@ static int __cpuinit do_boot_cpu(int apicid, int cpu, struct task_struct *idle) unsigned long boot_error = 0; int timeout; - alternatives_smp_switch(1); + /* Just in case we booted with a single CPU. */ + alternatives_enable_smp(); idle->thread.sp = (unsigned long) (((struct pt_regs *) (THREAD_SIZE + task_stack_page(idle))) - 1); @@ -1053,20 +1054,6 @@ void __init native_smp_prepare_cpus(unsigned int max_cpus) preempt_enable(); } -void arch_disable_nonboot_cpus_begin(void) -{ - /* - * Avoid the smp alternatives switch during the disable_nonboot_cpus(). - * In the suspend path, we will be back in the SMP mode shortly anyways. - */ - skip_smp_alternatives = true; -} - -void arch_disable_nonboot_cpus_end(void) -{ - skip_smp_alternatives = false; -} - void arch_enable_nonboot_cpus_begin(void) { set_mtrr_aps_delayed_init(); @@ -1256,9 +1243,6 @@ void native_cpu_die(unsigned int cpu) if (per_cpu(cpu_state, cpu) == CPU_DEAD) { if (system_state == SYSTEM_RUNNING) pr_info("CPU %u is now offline\n", cpu); - - if (1 == num_online_cpus()) - alternatives_smp_switch(0); return; } msleep(100); diff --git a/trunk/arch/x86/kernel/step.c b/trunk/arch/x86/kernel/step.c index c346d1161488..cd3b2438a980 100644 --- a/trunk/arch/x86/kernel/step.c +++ b/trunk/arch/x86/kernel/step.c @@ -157,6 +157,33 @@ static int enable_single_step(struct task_struct *child) return 1; } +void set_task_blockstep(struct task_struct *task, bool on) +{ + unsigned long debugctl; + + /* + * Ensure irq/preemption can't change debugctl in between. + * Note also that both TIF_BLOCKSTEP and debugctl should + * be changed atomically wrt preemption. + * FIXME: this means that set/clear TIF_BLOCKSTEP is simply + * wrong if task != current, SIGKILL can wakeup the stopped + * tracee and set/clear can play with the running task, this + * can confuse the next __switch_to_xtra(). + */ + local_irq_disable(); + debugctl = get_debugctlmsr(); + if (on) { + debugctl |= DEBUGCTLMSR_BTF; + set_tsk_thread_flag(task, TIF_BLOCKSTEP); + } else { + debugctl &= ~DEBUGCTLMSR_BTF; + clear_tsk_thread_flag(task, TIF_BLOCKSTEP); + } + if (task == current) + update_debugctlmsr(debugctl); + local_irq_enable(); +} + /* * Enable single or block step. */ @@ -169,19 +196,10 @@ static void enable_step(struct task_struct *child, bool block) * So no one should try to use debugger block stepping in a program * that uses user-mode single stepping itself. */ - if (enable_single_step(child) && block) { - unsigned long debugctl = get_debugctlmsr(); - - debugctl |= DEBUGCTLMSR_BTF; - update_debugctlmsr(debugctl); - set_tsk_thread_flag(child, TIF_BLOCKSTEP); - } else if (test_tsk_thread_flag(child, TIF_BLOCKSTEP)) { - unsigned long debugctl = get_debugctlmsr(); - - debugctl &= ~DEBUGCTLMSR_BTF; - update_debugctlmsr(debugctl); - clear_tsk_thread_flag(child, TIF_BLOCKSTEP); - } + if (enable_single_step(child) && block) + set_task_blockstep(child, true); + else if (test_tsk_thread_flag(child, TIF_BLOCKSTEP)) + set_task_blockstep(child, false); } void user_enable_single_step(struct task_struct *child) @@ -199,13 +217,8 @@ void user_disable_single_step(struct task_struct *child) /* * Make sure block stepping (BTF) is disabled. */ - if (test_tsk_thread_flag(child, TIF_BLOCKSTEP)) { - unsigned long debugctl = get_debugctlmsr(); - - debugctl &= ~DEBUGCTLMSR_BTF; - update_debugctlmsr(debugctl); - clear_tsk_thread_flag(child, TIF_BLOCKSTEP); - } + if (test_tsk_thread_flag(child, TIF_BLOCKSTEP)) + set_task_blockstep(child, false); /* Always clear TIF_SINGLESTEP... */ clear_tsk_thread_flag(child, TIF_SINGLESTEP); diff --git a/trunk/arch/x86/kernel/traps.c b/trunk/arch/x86/kernel/traps.c index b481341c9369..8276dc6794cc 100644 --- a/trunk/arch/x86/kernel/traps.c +++ b/trunk/arch/x86/kernel/traps.c @@ -55,6 +55,7 @@ #include #include #include +#include #include @@ -107,30 +108,45 @@ static inline void preempt_conditional_cli(struct pt_regs *regs) dec_preempt_count(); } -static void __kprobes -do_trap(int trapnr, int signr, char *str, struct pt_regs *regs, - long error_code, siginfo_t *info) +static int __kprobes +do_trap_no_signal(struct task_struct *tsk, int trapnr, char *str, + struct pt_regs *regs, long error_code) { - struct task_struct *tsk = current; - #ifdef CONFIG_X86_32 if (regs->flags & X86_VM_MASK) { /* - * traps 0, 1, 3, 4, and 5 should be forwarded to vm86. + * Traps 0, 1, 3, 4, and 5 should be forwarded to vm86. * On nmi (interrupt 2), do_trap should not be called. */ - if (trapnr < X86_TRAP_UD) - goto vm86_trap; - goto trap_signal; + if (trapnr < X86_TRAP_UD) { + if (!handle_vm86_trap((struct kernel_vm86_regs *) regs, + error_code, trapnr)) + return 0; + } + return -1; } #endif + if (!user_mode(regs)) { + if (!fixup_exception(regs)) { + tsk->thread.error_code = error_code; + tsk->thread.trap_nr = trapnr; + die(str, regs, error_code); + } + return 0; + } - if (!user_mode(regs)) - goto kernel_trap; + return -1; +} -#ifdef CONFIG_X86_32 -trap_signal: -#endif +static void __kprobes +do_trap(int trapnr, int signr, char *str, struct pt_regs *regs, + long error_code, siginfo_t *info) +{ + struct task_struct *tsk = current; + + + if (!do_trap_no_signal(tsk, trapnr, str, regs, error_code)) + return; /* * We want error_code and trap_nr set for userspace faults and * kernelspace faults which result in die(), but not @@ -158,33 +174,20 @@ do_trap(int trapnr, int signr, char *str, struct pt_regs *regs, force_sig_info(signr, info, tsk); else force_sig(signr, tsk); - return; - -kernel_trap: - if (!fixup_exception(regs)) { - tsk->thread.error_code = error_code; - tsk->thread.trap_nr = trapnr; - die(str, regs, error_code); - } - return; - -#ifdef CONFIG_X86_32 -vm86_trap: - if (handle_vm86_trap((struct kernel_vm86_regs *) regs, - error_code, trapnr)) - goto trap_signal; - return; -#endif } #define DO_ERROR(trapnr, signr, str, name) \ dotraplinkage void do_##name(struct pt_regs *regs, long error_code) \ { \ - if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, signr) \ - == NOTIFY_STOP) \ + exception_enter(regs); \ + if (notify_die(DIE_TRAP, str, regs, error_code, \ + trapnr, signr) == NOTIFY_STOP) { \ + exception_exit(regs); \ return; \ + } \ conditional_sti(regs); \ do_trap(trapnr, signr, str, regs, error_code, NULL); \ + exception_exit(regs); \ } #define DO_ERROR_INFO(trapnr, signr, str, name, sicode, siaddr) \ @@ -195,11 +198,15 @@ dotraplinkage void do_##name(struct pt_regs *regs, long error_code) \ info.si_errno = 0; \ info.si_code = sicode; \ info.si_addr = (void __user *)siaddr; \ - if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, signr) \ - == NOTIFY_STOP) \ + exception_enter(regs); \ + if (notify_die(DIE_TRAP, str, regs, error_code, \ + trapnr, signr) == NOTIFY_STOP) { \ + exception_exit(regs); \ return; \ + } \ conditional_sti(regs); \ do_trap(trapnr, signr, str, regs, error_code, &info); \ + exception_exit(regs); \ } DO_ERROR_INFO(X86_TRAP_DE, SIGFPE, "divide error", divide_error, FPE_INTDIV, @@ -222,12 +229,14 @@ DO_ERROR_INFO(X86_TRAP_AC, SIGBUS, "alignment check", alignment_check, /* Runs on IST stack */ dotraplinkage void do_stack_segment(struct pt_regs *regs, long error_code) { + exception_enter(regs); if (notify_die(DIE_TRAP, "stack segment", regs, error_code, - X86_TRAP_SS, SIGBUS) == NOTIFY_STOP) - return; - preempt_conditional_sti(regs); - do_trap(X86_TRAP_SS, SIGBUS, "stack segment", regs, error_code, NULL); - preempt_conditional_cli(regs); + X86_TRAP_SS, SIGBUS) != NOTIFY_STOP) { + preempt_conditional_sti(regs); + do_trap(X86_TRAP_SS, SIGBUS, "stack segment", regs, error_code, NULL); + preempt_conditional_cli(regs); + } + exception_exit(regs); } dotraplinkage void do_double_fault(struct pt_regs *regs, long error_code) @@ -235,6 +244,7 @@ dotraplinkage void do_double_fault(struct pt_regs *regs, long error_code) static const char str[] = "double fault"; struct task_struct *tsk = current; + exception_enter(regs); /* Return not checked because double check cannot be ignored */ notify_die(DIE_TRAP, str, regs, error_code, X86_TRAP_DF, SIGSEGV); @@ -255,16 +265,29 @@ do_general_protection(struct pt_regs *regs, long error_code) { struct task_struct *tsk; + exception_enter(regs); conditional_sti(regs); #ifdef CONFIG_X86_32 - if (regs->flags & X86_VM_MASK) - goto gp_in_vm86; + if (regs->flags & X86_VM_MASK) { + local_irq_enable(); + handle_vm86_fault((struct kernel_vm86_regs *) regs, error_code); + goto exit; + } #endif tsk = current; - if (!user_mode(regs)) - goto gp_in_kernel; + if (!user_mode(regs)) { + if (fixup_exception(regs)) + goto exit; + + tsk->thread.error_code = error_code; + tsk->thread.trap_nr = X86_TRAP_GP; + if (notify_die(DIE_GPF, "general protection fault", regs, error_code, + X86_TRAP_GP, SIGSEGV) != NOTIFY_STOP) + die("general protection fault", regs, error_code); + goto exit; + } tsk->thread.error_code = error_code; tsk->thread.trap_nr = X86_TRAP_GP; @@ -279,25 +302,8 @@ do_general_protection(struct pt_regs *regs, long error_code) } force_sig(SIGSEGV, tsk); - return; - -#ifdef CONFIG_X86_32 -gp_in_vm86: - local_irq_enable(); - handle_vm86_fault((struct kernel_vm86_regs *) regs, error_code); - return; -#endif - -gp_in_kernel: - if (fixup_exception(regs)) - return; - - tsk->thread.error_code = error_code; - tsk->thread.trap_nr = X86_TRAP_GP; - if (notify_die(DIE_GPF, "general protection fault", regs, error_code, - X86_TRAP_GP, SIGSEGV) == NOTIFY_STOP) - return; - die("general protection fault", regs, error_code); +exit: + exception_exit(regs); } /* May run on IST stack. */ @@ -312,15 +318,16 @@ dotraplinkage void __kprobes notrace do_int3(struct pt_regs *regs, long error_co ftrace_int3_handler(regs)) return; #endif + exception_enter(regs); #ifdef CONFIG_KGDB_LOW_LEVEL_TRAP if (kgdb_ll_trap(DIE_INT3, "int3", regs, error_code, X86_TRAP_BP, SIGTRAP) == NOTIFY_STOP) - return; + goto exit; #endif /* CONFIG_KGDB_LOW_LEVEL_TRAP */ if (notify_die(DIE_INT3, "int3", regs, error_code, X86_TRAP_BP, SIGTRAP) == NOTIFY_STOP) - return; + goto exit; /* * Let others (NMI) know that the debug stack is in use @@ -331,6 +338,8 @@ dotraplinkage void __kprobes notrace do_int3(struct pt_regs *regs, long error_co do_trap(X86_TRAP_BP, SIGTRAP, "int3", regs, error_code, NULL); preempt_conditional_cli(regs); debug_stack_usage_dec(); +exit: + exception_exit(regs); } #ifdef CONFIG_X86_64 @@ -391,6 +400,8 @@ dotraplinkage void __kprobes do_debug(struct pt_regs *regs, long error_code) unsigned long dr6; int si_code; + exception_enter(regs); + get_debugreg(dr6, 6); /* Filter out all the reserved bits which are preset to 1 */ @@ -406,7 +417,7 @@ dotraplinkage void __kprobes do_debug(struct pt_regs *regs, long error_code) /* Catch kmemcheck conditions first of all! */ if ((dr6 & DR_STEP) && kmemcheck_trap(regs)) - return; + goto exit; /* DR6 may or may not be cleared by the CPU */ set_debugreg(0, 6); @@ -421,7 +432,7 @@ dotraplinkage void __kprobes do_debug(struct pt_regs *regs, long error_code) if (notify_die(DIE_DEBUG, "debug", regs, PTR_ERR(&dr6), error_code, SIGTRAP) == NOTIFY_STOP) - return; + goto exit; /* * Let others (NMI) know that the debug stack is in use @@ -437,7 +448,7 @@ dotraplinkage void __kprobes do_debug(struct pt_regs *regs, long error_code) X86_TRAP_DB); preempt_conditional_cli(regs); debug_stack_usage_dec(); - return; + goto exit; } /* @@ -458,7 +469,8 @@ dotraplinkage void __kprobes do_debug(struct pt_regs *regs, long error_code) preempt_conditional_cli(regs); debug_stack_usage_dec(); - return; +exit: + exception_exit(regs); } /* @@ -555,14 +567,17 @@ dotraplinkage void do_coprocessor_error(struct pt_regs *regs, long error_code) #ifdef CONFIG_X86_32 ignore_fpu_irq = 1; #endif - + exception_enter(regs); math_error(regs, error_code, X86_TRAP_MF); + exception_exit(regs); } dotraplinkage void do_simd_coprocessor_error(struct pt_regs *regs, long error_code) { + exception_enter(regs); math_error(regs, error_code, X86_TRAP_XF); + exception_exit(regs); } dotraplinkage void @@ -613,11 +628,12 @@ void math_state_restore(void) } __thread_fpu_begin(tsk); + /* * Paranoid restore. send a SIGSEGV if we fail to restore the state. */ if (unlikely(restore_fpu_checking(tsk))) { - __thread_fpu_end(tsk); + drop_init_fpu(tsk); force_sig(SIGSEGV, tsk); return; } @@ -629,6 +645,9 @@ EXPORT_SYMBOL_GPL(math_state_restore); dotraplinkage void __kprobes do_device_not_available(struct pt_regs *regs, long error_code) { + exception_enter(regs); + BUG_ON(use_eager_fpu()); + #ifdef CONFIG_MATH_EMULATION if (read_cr0() & X86_CR0_EM) { struct math_emu_info info = { }; @@ -637,6 +656,7 @@ do_device_not_available(struct pt_regs *regs, long error_code) info.regs = regs; math_emulate(&info); + exception_exit(regs); return; } #endif @@ -644,12 +664,15 @@ do_device_not_available(struct pt_regs *regs, long error_code) #ifdef CONFIG_X86_32 conditional_sti(regs); #endif + exception_exit(regs); } #ifdef CONFIG_X86_32 dotraplinkage void do_iret_error(struct pt_regs *regs, long error_code) { siginfo_t info; + + exception_enter(regs); local_irq_enable(); info.si_signo = SIGILL; @@ -657,10 +680,11 @@ dotraplinkage void do_iret_error(struct pt_regs *regs, long error_code) info.si_code = ILL_BADSTK; info.si_addr = NULL; if (notify_die(DIE_TRAP, "iret exception", regs, error_code, - X86_TRAP_IRET, SIGILL) == NOTIFY_STOP) - return; - do_trap(X86_TRAP_IRET, SIGILL, "iret exception", regs, error_code, - &info); + X86_TRAP_IRET, SIGILL) != NOTIFY_STOP) { + do_trap(X86_TRAP_IRET, SIGILL, "iret exception", regs, error_code, + &info); + } + exception_exit(regs); } #endif diff --git a/trunk/arch/x86/kernel/uprobes.c b/trunk/arch/x86/kernel/uprobes.c index 36fd42091fa7..9538f00827a9 100644 --- a/trunk/arch/x86/kernel/uprobes.c +++ b/trunk/arch/x86/kernel/uprobes.c @@ -41,6 +41,9 @@ /* Adjust the return address of a call insn */ #define UPROBE_FIX_CALL 0x2 +/* Instruction will modify TF, don't change it */ +#define UPROBE_FIX_SETF 0x4 + #define UPROBE_FIX_RIP_AX 0x8000 #define UPROBE_FIX_RIP_CX 0x4000 @@ -239,6 +242,10 @@ static void prepare_fixups(struct arch_uprobe *auprobe, struct insn *insn) insn_get_opcode(insn); /* should be a nop */ switch (OPCODE1(insn)) { + case 0x9d: + /* popf */ + auprobe->fixups |= UPROBE_FIX_SETF; + break; case 0xc3: /* ret/lret */ case 0xcb: case 0xc2: @@ -646,7 +653,7 @@ void arch_uprobe_abort_xol(struct arch_uprobe *auprobe, struct pt_regs *regs) * Skip these instructions as per the currently known x86 ISA. * 0x66* { 0x90 | 0x0f 0x1f | 0x0f 0x19 | 0x87 0xc0 } */ -bool arch_uprobe_skip_sstep(struct arch_uprobe *auprobe, struct pt_regs *regs) +static bool __skip_sstep(struct arch_uprobe *auprobe, struct pt_regs *regs) { int i; @@ -673,3 +680,46 @@ bool arch_uprobe_skip_sstep(struct arch_uprobe *auprobe, struct pt_regs *regs) } return false; } + +bool arch_uprobe_skip_sstep(struct arch_uprobe *auprobe, struct pt_regs *regs) +{ + bool ret = __skip_sstep(auprobe, regs); + if (ret && (regs->flags & X86_EFLAGS_TF)) + send_sig(SIGTRAP, current, 0); + return ret; +} + +void arch_uprobe_enable_step(struct arch_uprobe *auprobe) +{ + struct task_struct *task = current; + struct arch_uprobe_task *autask = &task->utask->autask; + struct pt_regs *regs = task_pt_regs(task); + + autask->saved_tf = !!(regs->flags & X86_EFLAGS_TF); + + regs->flags |= X86_EFLAGS_TF; + if (test_tsk_thread_flag(task, TIF_BLOCKSTEP)) + set_task_blockstep(task, false); +} + +void arch_uprobe_disable_step(struct arch_uprobe *auprobe) +{ + struct task_struct *task = current; + struct arch_uprobe_task *autask = &task->utask->autask; + bool trapped = (task->utask->state == UTASK_SSTEP_TRAPPED); + struct pt_regs *regs = task_pt_regs(task); + /* + * The state of TIF_BLOCKSTEP was not saved so we can get an extra + * SIGTRAP if we do not clear TF. We need to examine the opcode to + * make it right. + */ + if (unlikely(trapped)) { + if (!autask->saved_tf) + regs->flags &= ~X86_EFLAGS_TF; + } else { + if (autask->saved_tf) + send_sig(SIGTRAP, task, 0); + else if (!(auprobe->fixups & UPROBE_FIX_SETF)) + regs->flags &= ~X86_EFLAGS_TF; + } +} diff --git a/trunk/arch/x86/kernel/x8664_ksyms_64.c b/trunk/arch/x86/kernel/x8664_ksyms_64.c index 6020f6f5927c..1330dd102950 100644 --- a/trunk/arch/x86/kernel/x8664_ksyms_64.c +++ b/trunk/arch/x86/kernel/x8664_ksyms_64.c @@ -13,9 +13,13 @@ #include #ifdef CONFIG_FUNCTION_TRACER -/* mcount is defined in assembly */ +/* mcount and __fentry__ are defined in assembly */ +#ifdef CC_USING_FENTRY +EXPORT_SYMBOL(__fentry__); +#else EXPORT_SYMBOL(mcount); #endif +#endif EXPORT_SYMBOL(__get_user_1); EXPORT_SYMBOL(__get_user_2); diff --git a/trunk/arch/x86/kernel/xsave.c b/trunk/arch/x86/kernel/xsave.c index 3d3e20709119..4e89b3dd408d 100644 --- a/trunk/arch/x86/kernel/xsave.c +++ b/trunk/arch/x86/kernel/xsave.c @@ -10,9 +10,7 @@ #include #include #include -#ifdef CONFIG_IA32_EMULATION -#include -#endif +#include #include /* @@ -23,13 +21,9 @@ u64 pcntxt_mask; /* * Represents init state for the supported extended state. */ -static struct xsave_struct *init_xstate_buf; - -struct _fpx_sw_bytes fx_sw_reserved; -#ifdef CONFIG_IA32_EMULATION -struct _fpx_sw_bytes fx_sw_reserved_ia32; -#endif +struct xsave_struct *init_xstate_buf; +static struct _fpx_sw_bytes fx_sw_reserved, fx_sw_reserved_ia32; static unsigned int *xstate_offsets, *xstate_sizes, xstate_features; /* @@ -44,9 +38,9 @@ static unsigned int *xstate_offsets, *xstate_sizes, xstate_features; */ void __sanitize_i387_state(struct task_struct *tsk) { - u64 xstate_bv; - int feature_bit = 0x2; struct i387_fxsave_struct *fx = &tsk->thread.fpu.state->fxsave; + int feature_bit = 0x2; + u64 xstate_bv; if (!fx) return; @@ -104,213 +98,326 @@ void __sanitize_i387_state(struct task_struct *tsk) * Check for the presence of extended state information in the * user fpstate pointer in the sigcontext. */ -int check_for_xstate(struct i387_fxsave_struct __user *buf, - void __user *fpstate, - struct _fpx_sw_bytes *fx_sw_user) +static inline int check_for_xstate(struct i387_fxsave_struct __user *buf, + void __user *fpstate, + struct _fpx_sw_bytes *fx_sw) { int min_xstate_size = sizeof(struct i387_fxsave_struct) + sizeof(struct xsave_hdr_struct); unsigned int magic2; - int err; - err = __copy_from_user(fx_sw_user, &buf->sw_reserved[0], - sizeof(struct _fpx_sw_bytes)); - if (err) - return -EFAULT; + if (__copy_from_user(fx_sw, &buf->sw_reserved[0], sizeof(*fx_sw))) + return -1; - /* - * First Magic check failed. - */ - if (fx_sw_user->magic1 != FP_XSTATE_MAGIC1) - return -EINVAL; + /* Check for the first magic field and other error scenarios. */ + if (fx_sw->magic1 != FP_XSTATE_MAGIC1 || + fx_sw->xstate_size < min_xstate_size || + fx_sw->xstate_size > xstate_size || + fx_sw->xstate_size > fx_sw->extended_size) + return -1; - /* - * Check for error scenarios. - */ - if (fx_sw_user->xstate_size < min_xstate_size || - fx_sw_user->xstate_size > xstate_size || - fx_sw_user->xstate_size > fx_sw_user->extended_size) - return -EINVAL; - - err = __get_user(magic2, (__u32 *) (((void *)fpstate) + - fx_sw_user->extended_size - - FP_XSTATE_MAGIC2_SIZE)); - if (err) - return err; /* * Check for the presence of second magic word at the end of memory * layout. This detects the case where the user just copied the legacy * fpstate layout with out copying the extended state information * in the memory layout. */ - if (magic2 != FP_XSTATE_MAGIC2) - return -EFAULT; + if (__get_user(magic2, (__u32 __user *)(fpstate + fx_sw->xstate_size)) + || magic2 != FP_XSTATE_MAGIC2) + return -1; return 0; } -#ifdef CONFIG_X86_64 /* * Signal frame handlers. */ - -int save_i387_xstate(void __user *buf) +static inline int save_fsave_header(struct task_struct *tsk, void __user *buf) { - struct task_struct *tsk = current; - int err = 0; - - if (!access_ok(VERIFY_WRITE, buf, sig_xstate_size)) - return -EACCES; + if (use_fxsr()) { + struct xsave_struct *xsave = &tsk->thread.fpu.state->xsave; + struct user_i387_ia32_struct env; + struct _fpstate_ia32 __user *fp = buf; - BUG_ON(sig_xstate_size < xstate_size); + convert_from_fxsr(&env, tsk); - if ((unsigned long)buf % 64) - pr_err("%s: bad fpstate %p\n", __func__, buf); - - if (!used_math()) - return 0; - - if (user_has_fpu()) { - if (use_xsave()) - err = xsave_user(buf); - else - err = fxsave_user(buf); - - if (err) - return err; - user_fpu_end(); + if (__copy_to_user(buf, &env, sizeof(env)) || + __put_user(xsave->i387.swd, &fp->status) || + __put_user(X86_FXSR_MAGIC, &fp->magic)) + return -1; } else { - sanitize_i387_state(tsk); - if (__copy_to_user(buf, &tsk->thread.fpu.state->fxsave, - xstate_size)) + struct i387_fsave_struct __user *fp = buf; + u32 swd; + if (__get_user(swd, &fp->swd) || __put_user(swd, &fp->status)) return -1; } - clear_used_math(); /* trigger finit */ + return 0; +} - if (use_xsave()) { - struct _fpstate __user *fx = buf; - struct _xstate __user *x = buf; - u64 xstate_bv; +static inline int save_xstate_epilog(void __user *buf, int ia32_frame) +{ + struct xsave_struct __user *x = buf; + struct _fpx_sw_bytes *sw_bytes; + u32 xstate_bv; + int err; - err = __copy_to_user(&fx->sw_reserved, &fx_sw_reserved, - sizeof(struct _fpx_sw_bytes)); + /* Setup the bytes not touched by the [f]xsave and reserved for SW. */ + sw_bytes = ia32_frame ? &fx_sw_reserved_ia32 : &fx_sw_reserved; + err = __copy_to_user(&x->i387.sw_reserved, sw_bytes, sizeof(*sw_bytes)); - err |= __put_user(FP_XSTATE_MAGIC2, - (__u32 __user *) (buf + sig_xstate_size - - FP_XSTATE_MAGIC2_SIZE)); + if (!use_xsave()) + return err; - /* - * Read the xstate_bv which we copied (directly from the cpu or - * from the state in task struct) to the user buffers and - * set the FP/SSE bits. - */ - err |= __get_user(xstate_bv, &x->xstate_hdr.xstate_bv); + err |= __put_user(FP_XSTATE_MAGIC2, (__u32 *)(buf + xstate_size)); - /* - * For legacy compatible, we always set FP/SSE bits in the bit - * vector while saving the state to the user context. This will - * enable us capturing any changes(during sigreturn) to - * the FP/SSE bits by the legacy applications which don't touch - * xstate_bv in the xsave header. - * - * xsave aware apps can change the xstate_bv in the xsave - * header as well as change any contents in the memory layout. - * xrestore as part of sigreturn will capture all the changes. - */ - xstate_bv |= XSTATE_FPSSE; + /* + * Read the xstate_bv which we copied (directly from the cpu or + * from the state in task struct) to the user buffers. + */ + err |= __get_user(xstate_bv, (__u32 *)&x->xsave_hdr.xstate_bv); - err |= __put_user(xstate_bv, &x->xstate_hdr.xstate_bv); + /* + * For legacy compatible, we always set FP/SSE bits in the bit + * vector while saving the state to the user context. This will + * enable us capturing any changes(during sigreturn) to + * the FP/SSE bits by the legacy applications which don't touch + * xstate_bv in the xsave header. + * + * xsave aware apps can change the xstate_bv in the xsave + * header as well as change any contents in the memory layout. + * xrestore as part of sigreturn will capture all the changes. + */ + xstate_bv |= XSTATE_FPSSE; - if (err) - return err; - } + err |= __put_user(xstate_bv, (__u32 *)&x->xsave_hdr.xstate_bv); - return 1; + return err; +} + +static inline int save_user_xstate(struct xsave_struct __user *buf) +{ + int err; + + if (use_xsave()) + err = xsave_user(buf); + else if (use_fxsr()) + err = fxsave_user((struct i387_fxsave_struct __user *) buf); + else + err = fsave_user((struct i387_fsave_struct __user *) buf); + + if (unlikely(err) && __clear_user(buf, xstate_size)) + err = -EFAULT; + return err; } /* - * Restore the extended state if present. Otherwise, restore the FP/SSE - * state. + * Save the fpu, extended register state to the user signal frame. + * + * 'buf_fx' is the 64-byte aligned pointer at which the [f|fx|x]save + * state is copied. + * 'buf' points to the 'buf_fx' or to the fsave header followed by 'buf_fx'. + * + * buf == buf_fx for 64-bit frames and 32-bit fsave frame. + * buf != buf_fx for 32-bit frames with fxstate. + * + * If the fpu, extended register state is live, save the state directly + * to the user frame pointed by the aligned pointer 'buf_fx'. Otherwise, + * copy the thread's fpu state to the user frame starting at 'buf_fx'. + * + * If this is a 32-bit frame with fxstate, put a fsave header before + * the aligned state at 'buf_fx'. + * + * For [f]xsave state, update the SW reserved fields in the [f]xsave frame + * indicating the absence/presence of the extended state to the user. */ -static int restore_user_xstate(void __user *buf) +int save_xstate_sig(void __user *buf, void __user *buf_fx, int size) { - struct _fpx_sw_bytes fx_sw_user; - u64 mask; - int err; + struct xsave_struct *xsave = ¤t->thread.fpu.state->xsave; + struct task_struct *tsk = current; + int ia32_fxstate = (buf != buf_fx); - if (((unsigned long)buf % 64) || - check_for_xstate(buf, buf, &fx_sw_user)) - goto fx_only; + ia32_fxstate &= (config_enabled(CONFIG_X86_32) || + config_enabled(CONFIG_IA32_EMULATION)); - mask = fx_sw_user.xstate_bv; + if (!access_ok(VERIFY_WRITE, buf, size)) + return -EACCES; - /* - * restore the state passed by the user. - */ - err = xrestore_user(buf, mask); - if (err) - return err; + if (!HAVE_HWFP) + return fpregs_soft_get(current, NULL, 0, + sizeof(struct user_i387_ia32_struct), NULL, + (struct _fpstate_ia32 __user *) buf) ? -1 : 1; - /* - * init the state skipped by the user. - */ - mask = pcntxt_mask & ~mask; - if (unlikely(mask)) - xrstor_state(init_xstate_buf, mask); + if (user_has_fpu()) { + /* Save the live register state to the user directly. */ + if (save_user_xstate(buf_fx)) + return -1; + /* Update the thread's fxstate to save the fsave header. */ + if (ia32_fxstate) + fpu_fxsave(&tsk->thread.fpu); + } else { + sanitize_i387_state(tsk); + if (__copy_to_user(buf_fx, xsave, xstate_size)) + return -1; + } + + /* Save the fsave header for the 32-bit frames. */ + if ((ia32_fxstate || !use_fxsr()) && save_fsave_header(tsk, buf)) + return -1; + + if (use_fxsr() && save_xstate_epilog(buf_fx, ia32_fxstate)) + return -1; + + drop_init_fpu(tsk); /* trigger finit */ return 0; +} -fx_only: - /* - * couldn't find the extended state information in the - * memory layout. Restore just the FP/SSE and init all - * the other extended state. - */ - xrstor_state(init_xstate_buf, pcntxt_mask & ~XSTATE_FPSSE); - return fxrstor_checking((__force struct i387_fxsave_struct *)buf); +static inline void +sanitize_restored_xstate(struct task_struct *tsk, + struct user_i387_ia32_struct *ia32_env, + u64 xstate_bv, int fx_only) +{ + struct xsave_struct *xsave = &tsk->thread.fpu.state->xsave; + struct xsave_hdr_struct *xsave_hdr = &xsave->xsave_hdr; + + if (use_xsave()) { + /* These bits must be zero. */ + xsave_hdr->reserved1[0] = xsave_hdr->reserved1[1] = 0; + + /* + * Init the state that is not present in the memory + * layout and not enabled by the OS. + */ + if (fx_only) + xsave_hdr->xstate_bv = XSTATE_FPSSE; + else + xsave_hdr->xstate_bv &= (pcntxt_mask & xstate_bv); + } + + if (use_fxsr()) { + /* + * mscsr reserved bits must be masked to zero for security + * reasons. + */ + xsave->i387.mxcsr &= mxcsr_feature_mask; + + convert_to_fxsr(tsk, ia32_env); + } } /* - * This restores directly out of user space. Exceptions are handled. + * Restore the extended state if present. Otherwise, restore the FP/SSE state. */ -int restore_i387_xstate(void __user *buf) +static inline int restore_user_xstate(void __user *buf, u64 xbv, int fx_only) { + if (use_xsave()) { + if ((unsigned long)buf % 64 || fx_only) { + u64 init_bv = pcntxt_mask & ~XSTATE_FPSSE; + xrstor_state(init_xstate_buf, init_bv); + return fxrstor_checking((__force void *) buf); + } else { + u64 init_bv = pcntxt_mask & ~xbv; + if (unlikely(init_bv)) + xrstor_state(init_xstate_buf, init_bv); + return xrestore_user(buf, xbv); + } + } else if (use_fxsr()) { + return fxrstor_checking((__force void *) buf); + } else + return frstor_checking((__force void *) buf); +} + +int __restore_xstate_sig(void __user *buf, void __user *buf_fx, int size) +{ + int ia32_fxstate = (buf != buf_fx); struct task_struct *tsk = current; - int err = 0; + int state_size = xstate_size; + u64 xstate_bv = 0; + int fx_only = 0; + + ia32_fxstate &= (config_enabled(CONFIG_X86_32) || + config_enabled(CONFIG_IA32_EMULATION)); if (!buf) { - if (used_math()) - goto clear; + drop_init_fpu(tsk); return 0; - } else - if (!access_ok(VERIFY_READ, buf, sig_xstate_size)) - return -EACCES; + } - if (!used_math()) { - err = init_fpu(tsk); - if (err) - return err; + if (!access_ok(VERIFY_READ, buf, size)) + return -EACCES; + + if (!used_math() && init_fpu(tsk)) + return -1; + + if (!HAVE_HWFP) { + return fpregs_soft_set(current, NULL, + 0, sizeof(struct user_i387_ia32_struct), + NULL, buf) != 0; } - user_fpu_begin(); - if (use_xsave()) - err = restore_user_xstate(buf); - else - err = fxrstor_checking((__force struct i387_fxsave_struct *) - buf); - if (unlikely(err)) { + if (use_xsave()) { + struct _fpx_sw_bytes fx_sw_user; + if (unlikely(check_for_xstate(buf_fx, buf_fx, &fx_sw_user))) { + /* + * Couldn't find the extended state information in the + * memory layout. Restore just the FP/SSE and init all + * the other extended state. + */ + state_size = sizeof(struct i387_fxsave_struct); + fx_only = 1; + } else { + state_size = fx_sw_user.xstate_size; + xstate_bv = fx_sw_user.xstate_bv; + } + } + + if (ia32_fxstate) { + /* + * For 32-bit frames with fxstate, copy the user state to the + * thread's fpu state, reconstruct fxstate from the fsave + * header. Sanitize the copied state etc. + */ + struct xsave_struct *xsave = &tsk->thread.fpu.state->xsave; + struct user_i387_ia32_struct env; + int err = 0; + + /* + * Drop the current fpu which clears used_math(). This ensures + * that any context-switch during the copy of the new state, + * avoids the intermediate state from getting restored/saved. + * Thus avoiding the new restored state from getting corrupted. + * We will be ready to restore/save the state only after + * set_used_math() is again set. + */ + drop_fpu(tsk); + + if (__copy_from_user(xsave, buf_fx, state_size) || + __copy_from_user(&env, buf, sizeof(env))) { + err = -1; + } else { + sanitize_restored_xstate(tsk, &env, xstate_bv, fx_only); + set_used_math(); + } + + if (use_eager_fpu()) + math_state_restore(); + + return err; + } else { /* - * Encountered an error while doing the restore from the - * user buffer, clear the fpu state. + * For 64-bit frames and 32-bit fsave frames, restore the user + * state to the registers directly (with exceptions handled). */ -clear: - clear_fpu(tsk); - clear_used_math(); + user_fpu_begin(); + if (restore_user_xstate(buf_fx, xstate_bv, fx_only)) { + drop_init_fpu(tsk); + return -1; + } } - return err; + + return 0; } -#endif /* * Prepare the SW reserved portion of the fxsave memory layout, indicating @@ -321,31 +428,22 @@ int restore_i387_xstate(void __user *buf) */ static void prepare_fx_sw_frame(void) { - int size_extended = (xstate_size - sizeof(struct i387_fxsave_struct)) + - FP_XSTATE_MAGIC2_SIZE; + int fsave_header_size = sizeof(struct i387_fsave_struct); + int size = xstate_size + FP_XSTATE_MAGIC2_SIZE; - sig_xstate_size = sizeof(struct _fpstate) + size_extended; - -#ifdef CONFIG_IA32_EMULATION - sig_xstate_ia32_size = sizeof(struct _fpstate_ia32) + size_extended; -#endif - - memset(&fx_sw_reserved, 0, sizeof(fx_sw_reserved)); + if (config_enabled(CONFIG_X86_32)) + size += fsave_header_size; fx_sw_reserved.magic1 = FP_XSTATE_MAGIC1; - fx_sw_reserved.extended_size = sig_xstate_size; + fx_sw_reserved.extended_size = size; fx_sw_reserved.xstate_bv = pcntxt_mask; fx_sw_reserved.xstate_size = xstate_size; -#ifdef CONFIG_IA32_EMULATION - memcpy(&fx_sw_reserved_ia32, &fx_sw_reserved, - sizeof(struct _fpx_sw_bytes)); - fx_sw_reserved_ia32.extended_size = sig_xstate_ia32_size; -#endif -} -#ifdef CONFIG_X86_64 -unsigned int sig_xstate_size = sizeof(struct _fpstate); -#endif + if (config_enabled(CONFIG_IA32_EMULATION)) { + fx_sw_reserved_ia32 = fx_sw_reserved; + fx_sw_reserved_ia32.extended_size += fsave_header_size; + } +} /* * Enable the extended processor state save/restore feature @@ -384,19 +482,21 @@ static void __init setup_xstate_features(void) /* * setup the xstate image representing the init state */ -static void __init setup_xstate_init(void) +static void __init setup_init_fpu_buf(void) { - setup_xstate_features(); - /* * Setup init_xstate_buf to represent the init state of * all the features managed by the xsave */ init_xstate_buf = alloc_bootmem_align(xstate_size, __alignof__(struct xsave_struct)); - init_xstate_buf->i387.mxcsr = MXCSR_DEFAULT; + fx_finit(&init_xstate_buf->i387); + + if (!cpu_has_xsave) + return; + + setup_xstate_features(); - clts(); /* * Init all the features state with header_bv being 0x0 */ @@ -406,9 +506,21 @@ static void __init setup_xstate_init(void) * of any feature which is not represented by all zero's. */ xsave_state(init_xstate_buf, -1); - stts(); } +static enum { AUTO, ENABLE, DISABLE } eagerfpu = AUTO; +static int __init eager_fpu_setup(char *s) +{ + if (!strcmp(s, "on")) + eagerfpu = ENABLE; + else if (!strcmp(s, "off")) + eagerfpu = DISABLE; + else if (!strcmp(s, "auto")) + eagerfpu = AUTO; + return 1; +} +__setup("eagerfpu=", eager_fpu_setup); + /* * Enable and initialize the xsave feature. */ @@ -445,8 +557,11 @@ static void __init xstate_enable_boot_cpu(void) update_regset_xstate_info(xstate_size, pcntxt_mask); prepare_fx_sw_frame(); + setup_init_fpu_buf(); - setup_xstate_init(); + /* Auto enable eagerfpu for xsaveopt */ + if (cpu_has_xsaveopt && eagerfpu != DISABLE) + eagerfpu = ENABLE; pr_info("enabled xstate_bv 0x%llx, cntxt size 0x%x\n", pcntxt_mask, xstate_size); @@ -471,3 +586,43 @@ void __cpuinit xsave_init(void) next_func = xstate_enable; this_func(); } + +static inline void __init eager_fpu_init_bp(void) +{ + current->thread.fpu.state = + alloc_bootmem_align(xstate_size, __alignof__(struct xsave_struct)); + if (!init_xstate_buf) + setup_init_fpu_buf(); +} + +void __cpuinit eager_fpu_init(void) +{ + static __refdata void (*boot_func)(void) = eager_fpu_init_bp; + + clear_used_math(); + current_thread_info()->status = 0; + + if (eagerfpu == ENABLE) + setup_force_cpu_cap(X86_FEATURE_EAGER_FPU); + + if (!cpu_has_eager_fpu) { + stts(); + return; + } + + if (boot_func) { + boot_func(); + boot_func = NULL; + } + + /* + * This is same as math_state_restore(). But use_xsave() is + * not yet patched to use math_state_restore(). + */ + init_fpu(current); + __thread_fpu_begin(current); + if (cpu_has_xsave) + xrstor_state(init_xstate_buf, -1); + else + fxrstor_checking(&init_xstate_buf->i387); +} diff --git a/trunk/arch/x86/kvm/emulate.c b/trunk/arch/x86/kvm/emulate.c index 97d9a9914ba8..a3b57a27be88 100644 --- a/trunk/arch/x86/kvm/emulate.c +++ b/trunk/arch/x86/kvm/emulate.c @@ -475,13 +475,26 @@ register_address(struct x86_emulate_ctxt *ctxt, unsigned long reg) return address_mask(ctxt, reg); } +static void masked_increment(ulong *reg, ulong mask, int inc) +{ + assign_masked(reg, *reg + inc, mask); +} + static inline void register_address_increment(struct x86_emulate_ctxt *ctxt, unsigned long *reg, int inc) { + ulong mask; + if (ctxt->ad_bytes == sizeof(unsigned long)) - *reg += inc; + mask = ~0UL; else - *reg = (*reg & ~ad_mask(ctxt)) | ((*reg + inc) & ad_mask(ctxt)); + mask = ad_mask(ctxt); + masked_increment(reg, mask, inc); +} + +static void rsp_increment(struct x86_emulate_ctxt *ctxt, int inc) +{ + masked_increment(&ctxt->regs[VCPU_REGS_RSP], stack_mask(ctxt), inc); } static inline void jmp_rel(struct x86_emulate_ctxt *ctxt, int rel) @@ -1522,8 +1535,8 @@ static int push(struct x86_emulate_ctxt *ctxt, void *data, int bytes) { struct segmented_address addr; - register_address_increment(ctxt, &ctxt->regs[VCPU_REGS_RSP], -bytes); - addr.ea = register_address(ctxt, ctxt->regs[VCPU_REGS_RSP]); + rsp_increment(ctxt, -bytes); + addr.ea = ctxt->regs[VCPU_REGS_RSP] & stack_mask(ctxt); addr.seg = VCPU_SREG_SS; return segmented_write(ctxt, addr, data, bytes); @@ -1542,13 +1555,13 @@ static int emulate_pop(struct x86_emulate_ctxt *ctxt, int rc; struct segmented_address addr; - addr.ea = register_address(ctxt, ctxt->regs[VCPU_REGS_RSP]); + addr.ea = ctxt->regs[VCPU_REGS_RSP] & stack_mask(ctxt); addr.seg = VCPU_SREG_SS; rc = segmented_read(ctxt, addr, dest, len); if (rc != X86EMUL_CONTINUE) return rc; - register_address_increment(ctxt, &ctxt->regs[VCPU_REGS_RSP], len); + rsp_increment(ctxt, len); return rc; } @@ -1688,8 +1701,7 @@ static int em_popa(struct x86_emulate_ctxt *ctxt) while (reg >= VCPU_REGS_RAX) { if (reg == VCPU_REGS_RSP) { - register_address_increment(ctxt, &ctxt->regs[VCPU_REGS_RSP], - ctxt->op_bytes); + rsp_increment(ctxt, ctxt->op_bytes); --reg; } @@ -2825,7 +2837,7 @@ static int em_ret_near_imm(struct x86_emulate_ctxt *ctxt) rc = emulate_pop(ctxt, &ctxt->dst.val, ctxt->op_bytes); if (rc != X86EMUL_CONTINUE) return rc; - register_address_increment(ctxt, &ctxt->regs[VCPU_REGS_RSP], ctxt->src.val); + rsp_increment(ctxt, ctxt->src.val); return X86EMUL_CONTINUE; } diff --git a/trunk/arch/x86/kvm/i8259.c b/trunk/arch/x86/kvm/i8259.c index 1df8fb9e1d5d..9fc9aa7ac703 100644 --- a/trunk/arch/x86/kvm/i8259.c +++ b/trunk/arch/x86/kvm/i8259.c @@ -316,6 +316,11 @@ static void pic_ioport_write(void *opaque, u32 addr, u32 val) addr &= 1; if (addr == 0) { if (val & 0x10) { + u8 edge_irr = s->irr & ~s->elcr; + int i; + bool found = false; + struct kvm_vcpu *vcpu; + s->init4 = val & 1; s->last_irr = 0; s->irr &= s->elcr; @@ -333,6 +338,18 @@ static void pic_ioport_write(void *opaque, u32 addr, u32 val) if (val & 0x08) pr_pic_unimpl( "level sensitive irq not supported"); + + kvm_for_each_vcpu(i, vcpu, s->pics_state->kvm) + if (kvm_apic_accept_pic_intr(vcpu)) { + found = true; + break; + } + + + if (found) + for (irq = 0; irq < PIC_NUM_PINS/2; irq++) + if (edge_irr & (1 << irq)) + pic_clear_isr(s, irq); } else if (val & 0x08) { if (val & 0x04) s->poll = 1; diff --git a/trunk/arch/x86/kvm/mmu.c b/trunk/arch/x86/kvm/mmu.c index 01ca00423938..7fbd0d273ea8 100644 --- a/trunk/arch/x86/kvm/mmu.c +++ b/trunk/arch/x86/kvm/mmu.c @@ -4112,17 +4112,22 @@ static int mmu_shrink(struct shrinker *shrink, struct shrink_control *sc) int idx; LIST_HEAD(invalid_list); + /* + * Never scan more than sc->nr_to_scan VM instances. + * Will not hit this condition practically since we do not try + * to shrink more than one VM and it is very unlikely to see + * !n_used_mmu_pages so many times. + */ + if (!nr_to_scan--) + break; /* * n_used_mmu_pages is accessed without holding kvm->mmu_lock * here. We may skip a VM instance errorneosly, but we do not * want to shrink a VM that only started to populate its MMU * anyway. */ - if (kvm->arch.n_used_mmu_pages > 0) { - if (!nr_to_scan--) - break; + if (!kvm->arch.n_used_mmu_pages) continue; - } idx = srcu_read_lock(&kvm->srcu); spin_lock(&kvm->mmu_lock); diff --git a/trunk/arch/x86/kvm/trace.h b/trunk/arch/x86/kvm/trace.h index a71faf727ff3..bca63f04dccb 100644 --- a/trunk/arch/x86/kvm/trace.h +++ b/trunk/arch/x86/kvm/trace.h @@ -183,95 +183,6 @@ TRACE_EVENT(kvm_apic, #define KVM_ISA_VMX 1 #define KVM_ISA_SVM 2 -#define VMX_EXIT_REASONS \ - { EXIT_REASON_EXCEPTION_NMI, "EXCEPTION_NMI" }, \ - { EXIT_REASON_EXTERNAL_INTERRUPT, "EXTERNAL_INTERRUPT" }, \ - { EXIT_REASON_TRIPLE_FAULT, "TRIPLE_FAULT" }, \ - { EXIT_REASON_PENDING_INTERRUPT, "PENDING_INTERRUPT" }, \ - { EXIT_REASON_NMI_WINDOW, "NMI_WINDOW" }, \ - { EXIT_REASON_TASK_SWITCH, "TASK_SWITCH" }, \ - { EXIT_REASON_CPUID, "CPUID" }, \ - { EXIT_REASON_HLT, "HLT" }, \ - { EXIT_REASON_INVLPG, "INVLPG" }, \ - { EXIT_REASON_RDPMC, "RDPMC" }, \ - { EXIT_REASON_RDTSC, "RDTSC" }, \ - { EXIT_REASON_VMCALL, "VMCALL" }, \ - { EXIT_REASON_VMCLEAR, "VMCLEAR" }, \ - { EXIT_REASON_VMLAUNCH, "VMLAUNCH" }, \ - { EXIT_REASON_VMPTRLD, "VMPTRLD" }, \ - { EXIT_REASON_VMPTRST, "VMPTRST" }, \ - { EXIT_REASON_VMREAD, "VMREAD" }, \ - { EXIT_REASON_VMRESUME, "VMRESUME" }, \ - { EXIT_REASON_VMWRITE, "VMWRITE" }, \ - { EXIT_REASON_VMOFF, "VMOFF" }, \ - { EXIT_REASON_VMON, "VMON" }, \ - { EXIT_REASON_CR_ACCESS, "CR_ACCESS" }, \ - { EXIT_REASON_DR_ACCESS, "DR_ACCESS" }, \ - { EXIT_REASON_IO_INSTRUCTION, "IO_INSTRUCTION" }, \ - { EXIT_REASON_MSR_READ, "MSR_READ" }, \ - { EXIT_REASON_MSR_WRITE, "MSR_WRITE" }, \ - { EXIT_REASON_MWAIT_INSTRUCTION, "MWAIT_INSTRUCTION" }, \ - { EXIT_REASON_MONITOR_INSTRUCTION, "MONITOR_INSTRUCTION" }, \ - { EXIT_REASON_PAUSE_INSTRUCTION, "PAUSE_INSTRUCTION" }, \ - { EXIT_REASON_MCE_DURING_VMENTRY, "MCE_DURING_VMENTRY" }, \ - { EXIT_REASON_TPR_BELOW_THRESHOLD, "TPR_BELOW_THRESHOLD" }, \ - { EXIT_REASON_APIC_ACCESS, "APIC_ACCESS" }, \ - { EXIT_REASON_EPT_VIOLATION, "EPT_VIOLATION" }, \ - { EXIT_REASON_EPT_MISCONFIG, "EPT_MISCONFIG" }, \ - { EXIT_REASON_WBINVD, "WBINVD" } - -#define SVM_EXIT_REASONS \ - { SVM_EXIT_READ_CR0, "read_cr0" }, \ - { SVM_EXIT_READ_CR3, "read_cr3" }, \ - { SVM_EXIT_READ_CR4, "read_cr4" }, \ - { SVM_EXIT_READ_CR8, "read_cr8" }, \ - { SVM_EXIT_WRITE_CR0, "write_cr0" }, \ - { SVM_EXIT_WRITE_CR3, "write_cr3" }, \ - { SVM_EXIT_WRITE_CR4, "write_cr4" }, \ - { SVM_EXIT_WRITE_CR8, "write_cr8" }, \ - { SVM_EXIT_READ_DR0, "read_dr0" }, \ - { SVM_EXIT_READ_DR1, "read_dr1" }, \ - { SVM_EXIT_READ_DR2, "read_dr2" }, \ - { SVM_EXIT_READ_DR3, "read_dr3" }, \ - { SVM_EXIT_WRITE_DR0, "write_dr0" }, \ - { SVM_EXIT_WRITE_DR1, "write_dr1" }, \ - { SVM_EXIT_WRITE_DR2, "write_dr2" }, \ - { SVM_EXIT_WRITE_DR3, "write_dr3" }, \ - { SVM_EXIT_WRITE_DR5, "write_dr5" }, \ - { SVM_EXIT_WRITE_DR7, "write_dr7" }, \ - { SVM_EXIT_EXCP_BASE + DB_VECTOR, "DB excp" }, \ - { SVM_EXIT_EXCP_BASE + BP_VECTOR, "BP excp" }, \ - { SVM_EXIT_EXCP_BASE + UD_VECTOR, "UD excp" }, \ - { SVM_EXIT_EXCP_BASE + PF_VECTOR, "PF excp" }, \ - { SVM_EXIT_EXCP_BASE + NM_VECTOR, "NM excp" }, \ - { SVM_EXIT_EXCP_BASE + MC_VECTOR, "MC excp" }, \ - { SVM_EXIT_INTR, "interrupt" }, \ - { SVM_EXIT_NMI, "nmi" }, \ - { SVM_EXIT_SMI, "smi" }, \ - { SVM_EXIT_INIT, "init" }, \ - { SVM_EXIT_VINTR, "vintr" }, \ - { SVM_EXIT_CPUID, "cpuid" }, \ - { SVM_EXIT_INVD, "invd" }, \ - { SVM_EXIT_HLT, "hlt" }, \ - { SVM_EXIT_INVLPG, "invlpg" }, \ - { SVM_EXIT_INVLPGA, "invlpga" }, \ - { SVM_EXIT_IOIO, "io" }, \ - { SVM_EXIT_MSR, "msr" }, \ - { SVM_EXIT_TASK_SWITCH, "task_switch" }, \ - { SVM_EXIT_SHUTDOWN, "shutdown" }, \ - { SVM_EXIT_VMRUN, "vmrun" }, \ - { SVM_EXIT_VMMCALL, "hypercall" }, \ - { SVM_EXIT_VMLOAD, "vmload" }, \ - { SVM_EXIT_VMSAVE, "vmsave" }, \ - { SVM_EXIT_STGI, "stgi" }, \ - { SVM_EXIT_CLGI, "clgi" }, \ - { SVM_EXIT_SKINIT, "skinit" }, \ - { SVM_EXIT_WBINVD, "wbinvd" }, \ - { SVM_EXIT_MONITOR, "monitor" }, \ - { SVM_EXIT_MWAIT, "mwait" }, \ - { SVM_EXIT_XSETBV, "xsetbv" }, \ - { SVM_EXIT_NPF, "npf" } - /* * Tracepoint for kvm guest exit: */ diff --git a/trunk/arch/x86/kvm/vmx.c b/trunk/arch/x86/kvm/vmx.c index c39b60707e02..851aa7c3b890 100644 --- a/trunk/arch/x86/kvm/vmx.c +++ b/trunk/arch/x86/kvm/vmx.c @@ -1488,20 +1488,17 @@ static void __vmx_load_host_state(struct vcpu_vmx *vmx) loadsegment(ds, vmx->host_state.ds_sel); loadsegment(es, vmx->host_state.es_sel); } -#else - /* - * The sysexit path does not restore ds/es, so we must set them to - * a reasonable value ourselves. - */ - loadsegment(ds, __USER_DS); - loadsegment(es, __USER_DS); #endif reload_tss(); #ifdef CONFIG_X86_64 wrmsrl(MSR_KERNEL_GS_BASE, vmx->msr_host_kernel_gs_base); #endif - if (user_has_fpu()) - clts(); + /* + * If the FPU is not active (through the host task or + * the guest vcpu), then restore the cr0.TS bit. + */ + if (!user_has_fpu() && !vmx->vcpu.guest_fpu_loaded) + stts(); load_gdt(&__get_cpu_var(host_gdt)); } @@ -3626,6 +3623,7 @@ static void seg_setup(int seg) static int alloc_apic_access_page(struct kvm *kvm) { + struct page *page; struct kvm_userspace_memory_region kvm_userspace_mem; int r = 0; @@ -3640,7 +3638,13 @@ static int alloc_apic_access_page(struct kvm *kvm) if (r) goto out; - kvm->arch.apic_access_page = gfn_to_page(kvm, 0xfee00); + page = gfn_to_page(kvm, 0xfee00); + if (is_error_page(page)) { + r = -EFAULT; + goto out; + } + + kvm->arch.apic_access_page = page; out: mutex_unlock(&kvm->slots_lock); return r; @@ -3648,6 +3652,7 @@ static int alloc_apic_access_page(struct kvm *kvm) static int alloc_identity_pagetable(struct kvm *kvm) { + struct page *page; struct kvm_userspace_memory_region kvm_userspace_mem; int r = 0; @@ -3663,8 +3668,13 @@ static int alloc_identity_pagetable(struct kvm *kvm) if (r) goto out; - kvm->arch.ept_identity_pagetable = gfn_to_page(kvm, - kvm->arch.ept_identity_map_addr >> PAGE_SHIFT); + page = gfn_to_page(kvm, kvm->arch.ept_identity_map_addr >> PAGE_SHIFT); + if (is_error_page(page)) { + r = -EFAULT; + goto out; + } + + kvm->arch.ept_identity_pagetable = page; out: mutex_unlock(&kvm->slots_lock); return r; @@ -3737,7 +3747,7 @@ static void vmx_set_constant_host_state(void) unsigned long tmpl; struct desc_ptr dt; - vmcs_writel(HOST_CR0, read_cr0() | X86_CR0_TS); /* 22.2.3 */ + vmcs_writel(HOST_CR0, read_cr0() & ~X86_CR0_TS); /* 22.2.3 */ vmcs_writel(HOST_CR4, read_cr4()); /* 22.2.3, 22.2.5 */ vmcs_writel(HOST_CR3, read_cr3()); /* 22.2.3 FIXME: shadow tables */ @@ -4537,7 +4547,7 @@ static int handle_cr(struct kvm_vcpu *vcpu) vcpu->run->exit_reason = KVM_EXIT_SET_TPR; return 0; } - }; + } break; case 2: /* clts */ handle_clts(vcpu); @@ -6370,6 +6380,19 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu) #endif ); +#ifndef CONFIG_X86_64 + /* + * The sysexit path does not restore ds/es, so we must set them to + * a reasonable value ourselves. + * + * We can't defer this to vmx_load_host_state() since that function + * may be executed in interrupt context, which saves and restore segments + * around it, nullifying its effect. + */ + loadsegment(ds, __USER_DS); + loadsegment(es, __USER_DS); +#endif + vcpu->arch.regs_avail = ~((1 << VCPU_REGS_RIP) | (1 << VCPU_REGS_RSP) | (1 << VCPU_EXREG_RFLAGS) | (1 << VCPU_EXREG_CPL) @@ -6569,7 +6592,7 @@ static void vmx_cpuid_update(struct kvm_vcpu *vcpu) /* Exposing INVPCID only when PCID is exposed */ best = kvm_find_cpuid_entry(vcpu, 0x7, 0); if (vmx_invpcid_supported() && - best && (best->ecx & bit(X86_FEATURE_INVPCID)) && + best && (best->ebx & bit(X86_FEATURE_INVPCID)) && guest_cpuid_has_pcid(vcpu)) { exec_control |= SECONDARY_EXEC_ENABLE_INVPCID; vmcs_write32(SECONDARY_VM_EXEC_CONTROL, @@ -6579,7 +6602,7 @@ static void vmx_cpuid_update(struct kvm_vcpu *vcpu) vmcs_write32(SECONDARY_VM_EXEC_CONTROL, exec_control); if (best) - best->ecx &= ~bit(X86_FEATURE_INVPCID); + best->ebx &= ~bit(X86_FEATURE_INVPCID); } } diff --git a/trunk/arch/x86/kvm/x86.c b/trunk/arch/x86/kvm/x86.c index 59b59508ff07..1f09552572fa 100644 --- a/trunk/arch/x86/kvm/x86.c +++ b/trunk/arch/x86/kvm/x86.c @@ -806,7 +806,7 @@ EXPORT_SYMBOL_GPL(kvm_rdpmc); * kvm-specific. Those are put in the beginning of the list. */ -#define KVM_SAVE_MSRS_BEGIN 9 +#define KVM_SAVE_MSRS_BEGIN 10 static u32 msrs_to_save[] = { MSR_KVM_SYSTEM_TIME, MSR_KVM_WALL_CLOCK, MSR_KVM_SYSTEM_TIME_NEW, MSR_KVM_WALL_CLOCK_NEW, @@ -925,6 +925,10 @@ static void kvm_write_wall_clock(struct kvm *kvm, gpa_t wall_clock) */ getboottime(&boot); + if (kvm->arch.kvmclock_offset) { + struct timespec ts = ns_to_timespec(kvm->arch.kvmclock_offset); + boot = timespec_sub(boot, ts); + } wc.sec = boot.tv_sec; wc.nsec = boot.tv_nsec; wc.version = version; @@ -1996,6 +2000,9 @@ int kvm_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata) case MSR_KVM_STEAL_TIME: data = vcpu->arch.st.msr_val; break; + case MSR_KVM_PV_EOI_EN: + data = vcpu->arch.pv_eoi.msr_val; + break; case MSR_IA32_P5_MC_ADDR: case MSR_IA32_P5_MC_TYPE: case MSR_IA32_MCG_CAP: @@ -5106,17 +5113,20 @@ static void post_kvm_run_save(struct kvm_vcpu *vcpu) !kvm_event_needs_reinjection(vcpu); } -static void vapic_enter(struct kvm_vcpu *vcpu) +static int vapic_enter(struct kvm_vcpu *vcpu) { struct kvm_lapic *apic = vcpu->arch.apic; struct page *page; if (!apic || !apic->vapic_addr) - return; + return 0; page = gfn_to_page(vcpu->kvm, apic->vapic_addr >> PAGE_SHIFT); + if (is_error_page(page)) + return -EFAULT; vcpu->arch.apic->vapic_page = page; + return 0; } static void vapic_exit(struct kvm_vcpu *vcpu) @@ -5423,7 +5433,11 @@ static int __vcpu_run(struct kvm_vcpu *vcpu) } vcpu->srcu_idx = srcu_read_lock(&kvm->srcu); - vapic_enter(vcpu); + r = vapic_enter(vcpu); + if (r) { + srcu_read_unlock(&kvm->srcu, vcpu->srcu_idx); + return r; + } r = 1; while (r > 0) { @@ -5965,7 +5979,7 @@ void kvm_load_guest_fpu(struct kvm_vcpu *vcpu) */ kvm_put_guest_xcr0(vcpu); vcpu->guest_fpu_loaded = 1; - unlazy_fpu(current); + __kernel_fpu_begin(); fpu_restore_checking(&vcpu->arch.guest_fpu); trace_kvm_fpu(1); } @@ -5979,6 +5993,7 @@ void kvm_put_guest_fpu(struct kvm_vcpu *vcpu) vcpu->guest_fpu_loaded = 0; fpu_save_init(&vcpu->arch.guest_fpu); + __kernel_fpu_end(); ++vcpu->stat.fpu_reload; kvm_make_request(KVM_REQ_DEACTIVATE_FPU, vcpu); trace_kvm_fpu(0); diff --git a/trunk/arch/x86/mm/fault.c b/trunk/arch/x86/mm/fault.c index 76dcd9d8e0bc..7dde46d68a25 100644 --- a/trunk/arch/x86/mm/fault.c +++ b/trunk/arch/x86/mm/fault.c @@ -18,6 +18,7 @@ #include /* pgd_*(), ... */ #include /* kmemcheck_*(), ... */ #include /* VSYSCALL_START */ +#include /* exception_enter(), ... */ /* * Page fault error code bits: @@ -1000,8 +1001,8 @@ static int fault_in_kernel_space(unsigned long address) * and the problem, and then passes it off to one of the appropriate * routines. */ -dotraplinkage void __kprobes -do_page_fault(struct pt_regs *regs, unsigned long error_code) +static void __kprobes +__do_page_fault(struct pt_regs *regs, unsigned long error_code) { struct vm_area_struct *vma; struct task_struct *tsk; @@ -1209,3 +1210,11 @@ do_page_fault(struct pt_regs *regs, unsigned long error_code) up_read(&mm->mmap_sem); } + +dotraplinkage void __kprobes +do_page_fault(struct pt_regs *regs, unsigned long error_code) +{ + exception_enter(regs); + __do_page_fault(regs, error_code); + exception_exit(regs); +} diff --git a/trunk/arch/x86/mm/hugetlbpage.c b/trunk/arch/x86/mm/hugetlbpage.c index f6679a7fb8ca..b91e48512425 100644 --- a/trunk/arch/x86/mm/hugetlbpage.c +++ b/trunk/arch/x86/mm/hugetlbpage.c @@ -56,9 +56,16 @@ static int vma_shareable(struct vm_area_struct *vma, unsigned long addr) } /* - * search for a shareable pmd page for hugetlb. + * Search for a shareable pmd page for hugetlb. In any case calls pmd_alloc() + * and returns the corresponding pte. While this is not necessary for the + * !shared pmd case because we can allocate the pmd later as well, it makes the + * code much cleaner. pmd allocation is essential for the shared case because + * pud has to be populated inside the same i_mmap_mutex section - otherwise + * racing tasks could either miss the sharing (see huge_pte_offset) or select a + * bad pmd for sharing. */ -static void huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud) +static pte_t * +huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud) { struct vm_area_struct *vma = find_vma(mm, addr); struct address_space *mapping = vma->vm_file->f_mapping; @@ -68,9 +75,10 @@ static void huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud) struct vm_area_struct *svma; unsigned long saddr; pte_t *spte = NULL; + pte_t *pte; if (!vma_shareable(vma, addr)) - return; + return (pte_t *)pmd_alloc(mm, pud, addr); mutex_lock(&mapping->i_mmap_mutex); vma_prio_tree_foreach(svma, &iter, &mapping->i_mmap, idx, idx) { @@ -97,7 +105,9 @@ static void huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud) put_page(virt_to_page(spte)); spin_unlock(&mm->page_table_lock); out: + pte = (pte_t *)pmd_alloc(mm, pud, addr); mutex_unlock(&mapping->i_mmap_mutex); + return pte; } /* @@ -142,8 +152,9 @@ pte_t *huge_pte_alloc(struct mm_struct *mm, } else { BUG_ON(sz != PMD_SIZE); if (pud_none(*pud)) - huge_pmd_share(mm, addr, pud); - pte = (pte_t *) pmd_alloc(mm, pud, addr); + pte = huge_pmd_share(mm, addr, pud); + else + pte = (pte_t *)pmd_alloc(mm, pud, addr); } } BUG_ON(pte && !pte_none(*pte) && !pte_huge(*pte)); diff --git a/trunk/arch/x86/mm/init.c b/trunk/arch/x86/mm/init.c index e0e6990723e9..ab1f6a93b527 100644 --- a/trunk/arch/x86/mm/init.c +++ b/trunk/arch/x86/mm/init.c @@ -319,7 +319,7 @@ unsigned long __init_refok init_memory_mapping(unsigned long start, */ int devmem_is_allowed(unsigned long pagenr) { - if (pagenr <= 256) + if (pagenr < 256) return 1; if (iomem_is_exclusive(pagenr << PAGE_SHIFT)) return 0; diff --git a/trunk/arch/x86/mm/pageattr.c b/trunk/arch/x86/mm/pageattr.c index 931930a96160..a718e0d23503 100644 --- a/trunk/arch/x86/mm/pageattr.c +++ b/trunk/arch/x86/mm/pageattr.c @@ -919,13 +919,11 @@ static int change_page_attr_set_clr(unsigned long *addr, int numpages, /* * On success we use clflush, when the CPU supports it to - * avoid the wbindv. If the CPU does not support it, in the - * error case, and during early boot (for EFI) we fall back - * to cpa_flush_all (which uses wbinvd): + * avoid the wbindv. If the CPU does not support it and in the + * error case we fall back to cpa_flush_all (which uses + * wbindv): */ - if (early_boot_irqs_disabled) - __cpa_flush_all((void *)(long)cache); - else if (!ret && cpu_has_clflush) { + if (!ret && cpu_has_clflush) { if (cpa.flags & (CPA_PAGES_ARRAY | CPA_ARRAY)) { cpa_flush_array(addr, numpages, cache, cpa.flags, pages); diff --git a/trunk/arch/x86/mm/srat.c b/trunk/arch/x86/mm/srat.c index 4599c3e8bcb6..4ddf497ca65b 100644 --- a/trunk/arch/x86/mm/srat.c +++ b/trunk/arch/x86/mm/srat.c @@ -142,23 +142,23 @@ static inline int save_add_info(void) {return 0;} #endif /* Callback for parsing of the Proximity Domain <-> Memory Area mappings */ -void __init +int __init acpi_numa_memory_affinity_init(struct acpi_srat_mem_affinity *ma) { u64 start, end; int node, pxm; if (srat_disabled()) - return; + return -1; if (ma->header.length != sizeof(struct acpi_srat_mem_affinity)) { bad_srat(); - return; + return -1; } if ((ma->flags & ACPI_SRAT_MEM_ENABLED) == 0) - return; + return -1; if ((ma->flags & ACPI_SRAT_MEM_HOT_PLUGGABLE) && !save_add_info()) - return; + return -1; start = ma->base_address; end = start + ma->length; pxm = ma->proximity_domain; @@ -168,12 +168,12 @@ acpi_numa_memory_affinity_init(struct acpi_srat_mem_affinity *ma) if (node < 0) { printk(KERN_ERR "SRAT: Too many proximity domains.\n"); bad_srat(); - return; + return -1; } if (numa_add_memblk(node, start, end) < 0) { bad_srat(); - return; + return -1; } node_set(node, numa_nodes_parsed); @@ -181,6 +181,7 @@ acpi_numa_memory_affinity_init(struct acpi_srat_mem_affinity *ma) printk(KERN_INFO "SRAT: Node %u PXM %u [mem %#010Lx-%#010Lx]\n", node, pxm, (unsigned long long) start, (unsigned long long) end - 1); + return 0; } void __init acpi_numa_arch_fixup(void) {} diff --git a/trunk/arch/x86/pci/mmconfig-shared.c b/trunk/arch/x86/pci/mmconfig-shared.c index 937bcece7006..704b9ec043d7 100644 --- a/trunk/arch/x86/pci/mmconfig-shared.c +++ b/trunk/arch/x86/pci/mmconfig-shared.c @@ -585,7 +585,7 @@ static int __init pci_parse_mcfg(struct acpi_table_header *header) while (i >= sizeof(struct acpi_mcfg_allocation)) { entries++; i -= sizeof(struct acpi_mcfg_allocation); - }; + } if (entries == 0) { pr_err(PREFIX "MMCONFIG has no entries\n"); return -ENODEV; diff --git a/trunk/arch/x86/platform/efi/Makefile b/trunk/arch/x86/platform/efi/Makefile index 73b8be0f3675..6db1cc4c7534 100644 --- a/trunk/arch/x86/platform/efi/Makefile +++ b/trunk/arch/x86/platform/efi/Makefile @@ -1 +1,2 @@ obj-$(CONFIG_EFI) += efi.o efi_$(BITS).o efi_stub_$(BITS).o +obj-$(CONFIG_ACPI_BGRT) += efi-bgrt.o diff --git a/trunk/arch/x86/platform/efi/efi-bgrt.c b/trunk/arch/x86/platform/efi/efi-bgrt.c new file mode 100644 index 000000000000..f6a0c1b8e518 --- /dev/null +++ b/trunk/arch/x86/platform/efi/efi-bgrt.c @@ -0,0 +1,76 @@ +/* + * Copyright 2012 Intel Corporation + * Author: Josh Triplett + * + * Based on the bgrt driver: + * Copyright 2012 Red Hat, Inc + * Author: Matthew Garrett + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ +#include +#include +#include +#include + +struct acpi_table_bgrt *bgrt_tab; +void *bgrt_image; +size_t bgrt_image_size; + +struct bmp_header { + u16 id; + u32 size; +} __packed; + +void efi_bgrt_init(void) +{ + acpi_status status; + void __iomem *image; + bool ioremapped = false; + struct bmp_header bmp_header; + + if (acpi_disabled) + return; + + status = acpi_get_table("BGRT", 0, + (struct acpi_table_header **)&bgrt_tab); + if (ACPI_FAILURE(status)) + return; + + if (bgrt_tab->version != 1) + return; + if (bgrt_tab->image_type != 0 || !bgrt_tab->image_address) + return; + + image = efi_lookup_mapped_addr(bgrt_tab->image_address); + if (!image) { + image = ioremap(bgrt_tab->image_address, sizeof(bmp_header)); + ioremapped = true; + if (!image) + return; + } + + memcpy_fromio(&bmp_header, image, sizeof(bmp_header)); + if (ioremapped) + iounmap(image); + bgrt_image_size = bmp_header.size; + + bgrt_image = kmalloc(bgrt_image_size, GFP_KERNEL); + if (!bgrt_image) + return; + + if (ioremapped) { + image = ioremap(bgrt_tab->image_address, bmp_header.size); + if (!image) { + kfree(bgrt_image); + bgrt_image = NULL; + return; + } + } + + memcpy_fromio(bgrt_image, image, bgrt_image_size); + if (ioremapped) + iounmap(image); +} diff --git a/trunk/arch/x86/platform/efi/efi.c b/trunk/arch/x86/platform/efi/efi.c index 2dc29f51e75a..aded2a91162a 100644 --- a/trunk/arch/x86/platform/efi/efi.c +++ b/trunk/arch/x86/platform/efi/efi.c @@ -31,6 +31,7 @@ #include #include #include +#include #include #include #include @@ -234,7 +235,22 @@ static efi_status_t __init phys_efi_set_virtual_address_map( return status; } -static int efi_set_rtc_mmss(unsigned long nowtime) +static efi_status_t __init phys_efi_get_time(efi_time_t *tm, + efi_time_cap_t *tc) +{ + unsigned long flags; + efi_status_t status; + + spin_lock_irqsave(&rtc_lock, flags); + efi_call_phys_prelog(); + status = efi_call_phys2(efi_phys.get_time, virt_to_phys(tm), + virt_to_phys(tc)); + efi_call_phys_epilog(); + spin_unlock_irqrestore(&rtc_lock, flags); + return status; +} + +int efi_set_rtc_mmss(unsigned long nowtime) { int real_seconds, real_minutes; efi_status_t status; @@ -263,7 +279,7 @@ static int efi_set_rtc_mmss(unsigned long nowtime) return 0; } -static unsigned long efi_get_time(void) +unsigned long efi_get_time(void) { efi_status_t status; efi_time_t eft; @@ -404,10 +420,21 @@ void __init efi_reserve_boot_services(void) } } -static void __init efi_free_boot_services(void) +static void __init efi_unmap_memmap(void) +{ + if (memmap.map) { + early_iounmap(memmap.map, memmap.nr_map * memmap.desc_size); + memmap.map = NULL; + } +} + +void __init efi_free_boot_services(void) { void *p; + if (!efi_native) + return; + for (p = memmap.map; p < memmap.map_end; p += memmap.desc_size) { efi_memory_desc_t *md = p; unsigned long long start = md->phys_addr; @@ -423,6 +450,8 @@ static void __init efi_free_boot_services(void) free_bootmem_late(start, size); } + + efi_unmap_memmap(); } static int __init efi_systab_init(void *phys) @@ -606,13 +635,18 @@ static int __init efi_runtime_init(void) } /* * We will only need *early* access to the following - * EFI runtime service before set_virtual_address_map + * two EFI runtime services before set_virtual_address_map * is invoked. */ + efi_phys.get_time = (efi_get_time_t *)runtime->get_time; efi_phys.set_virtual_address_map = (efi_set_virtual_address_map_t *) runtime->set_virtual_address_map; - + /* + * Make efi_get_time can be called before entering + * virtual mode. + */ + efi.get_time = phys_efi_get_time; early_iounmap(runtime, sizeof(efi_runtime_services_t)); return 0; @@ -700,16 +734,23 @@ void __init efi_init(void) efi_enabled = 0; return; } +#ifdef CONFIG_X86_32 if (efi_native) { x86_platform.get_wallclock = efi_get_time; x86_platform.set_wallclock = efi_set_rtc_mmss; } +#endif #if EFI_DEBUG print_efi_memmap(); #endif } +void __init efi_late_init(void) +{ + efi_bgrt_init(); +} + void __init efi_set_executable(efi_memory_desc_t *md, bool executable) { u64 addr, npages; @@ -741,6 +782,34 @@ static void __init runtime_code_page_mkexec(void) } } +/* + * We can't ioremap data in EFI boot services RAM, because we've already mapped + * it as RAM. So, look it up in the existing EFI memory map instead. Only + * callable after efi_enter_virtual_mode and before efi_free_boot_services. + */ +void __iomem *efi_lookup_mapped_addr(u64 phys_addr) +{ + void *p; + if (WARN_ON(!memmap.map)) + return NULL; + for (p = memmap.map; p < memmap.map_end; p += memmap.desc_size) { + efi_memory_desc_t *md = p; + u64 size = md->num_pages << EFI_PAGE_SHIFT; + u64 end = md->phys_addr + size; + if (!(md->attribute & EFI_MEMORY_RUNTIME) && + md->type != EFI_BOOT_SERVICES_CODE && + md->type != EFI_BOOT_SERVICES_DATA) + continue; + if (!md->virt_addr) + continue; + if (phys_addr >= md->phys_addr && phys_addr < end) { + phys_addr += md->virt_addr - md->phys_addr; + return (__force void __iomem *)(unsigned long)phys_addr; + } + } + return NULL; +} + /* * This function will switch the EFI runtime services to virtual mode. * Essentially, look through the EFI memmap and map every region that @@ -765,8 +834,10 @@ void __init efi_enter_virtual_mode(void) * non-native EFI */ - if (!efi_native) - goto out; + if (!efi_native) { + efi_unmap_memmap(); + return; + } /* Merge contiguous regions of the same type and attribute */ for (p = memmap.map; p < memmap.map_end; p += memmap.desc_size) { @@ -855,19 +926,13 @@ void __init efi_enter_virtual_mode(void) panic("EFI call to SetVirtualAddressMap() failed!"); } - /* - * Thankfully, it does seem that no runtime services other than - * SetVirtualAddressMap() will touch boot services code, so we can - * get rid of it all at this point - */ - efi_free_boot_services(); - /* * Now that EFI is in virtual mode, update the function * pointers in the runtime service table to the new virtual addresses. * * Call EFI services through wrapper functions. */ + efi.runtime_version = efi_systab.fw_revision; efi.get_time = virt_efi_get_time; efi.set_time = virt_efi_set_time; efi.get_wakeup_time = virt_efi_get_wakeup_time; @@ -884,9 +949,6 @@ void __init efi_enter_virtual_mode(void) if (__supported_pte_mask & _PAGE_NX) runtime_code_page_mkexec(); -out: - early_iounmap(memmap.map, memmap.nr_map * memmap.desc_size); - memmap.map = NULL; kfree(new_memmap); } diff --git a/trunk/arch/x86/realmode/rm/Makefile b/trunk/arch/x86/realmode/rm/Makefile index b2d534cab25f..88692871823f 100644 --- a/trunk/arch/x86/realmode/rm/Makefile +++ b/trunk/arch/x86/realmode/rm/Makefile @@ -72,7 +72,7 @@ KBUILD_CFLAGS := $(LINUXINCLUDE) -m32 -g -Os -D_SETUP -D__KERNEL__ -D_WAKEUP \ -Wall -Wstrict-prototypes \ -march=i386 -mregparm=3 \ -include $(srctree)/$(src)/../../boot/code16gcc.h \ - -fno-strict-aliasing -fomit-frame-pointer \ + -fno-strict-aliasing -fomit-frame-pointer -fno-pic \ $(call cc-option, -ffreestanding) \ $(call cc-option, -fno-toplevel-reorder,\ $(call cc-option, -fno-unit-at-a-time)) \ diff --git a/trunk/arch/x86/syscalls/syscall_64.tbl b/trunk/arch/x86/syscalls/syscall_64.tbl index 51171aeff0dc..a582bfed95bb 100644 --- a/trunk/arch/x86/syscalls/syscall_64.tbl +++ b/trunk/arch/x86/syscalls/syscall_64.tbl @@ -60,8 +60,8 @@ 51 common getsockname sys_getsockname 52 common getpeername sys_getpeername 53 common socketpair sys_socketpair -54 common setsockopt sys_setsockopt -55 common getsockopt sys_getsockopt +54 64 setsockopt sys_setsockopt +55 64 getsockopt sys_getsockopt 56 common clone stub_clone 57 common fork stub_fork 58 common vfork stub_vfork @@ -318,7 +318,7 @@ 309 common getcpu sys_getcpu 310 64 process_vm_readv sys_process_vm_readv 311 64 process_vm_writev sys_process_vm_writev -312 64 kcmp sys_kcmp +312 common kcmp sys_kcmp # # x32-specific system call numbers start at 512 to avoid cache impact @@ -353,3 +353,5 @@ 538 x32 sendmmsg compat_sys_sendmmsg 539 x32 process_vm_readv compat_sys_process_vm_readv 540 x32 process_vm_writev compat_sys_process_vm_writev +541 x32 setsockopt compat_sys_setsockopt +542 x32 getsockopt compat_sys_getsockopt diff --git a/trunk/arch/x86/um/Kconfig b/trunk/arch/x86/um/Kconfig index 9926e11a772d..aeaff8bef2f1 100644 --- a/trunk/arch/x86/um/Kconfig +++ b/trunk/arch/x86/um/Kconfig @@ -21,6 +21,7 @@ config 64BIT config X86_32 def_bool !64BIT select HAVE_AOUT + select ARCH_WANT_IPC_PARSE_VERSION config X86_64 def_bool 64BIT diff --git a/trunk/arch/x86/um/shared/sysdep/kernel-offsets.h b/trunk/arch/x86/um/shared/sysdep/kernel-offsets.h index 5868526b5eef..46a9df99f3c5 100644 --- a/trunk/arch/x86/um/shared/sysdep/kernel-offsets.h +++ b/trunk/arch/x86/um/shared/sysdep/kernel-offsets.h @@ -7,9 +7,6 @@ #define DEFINE(sym, val) \ asm volatile("\n->" #sym " %0 " #val : : "i" (val)) -#define STR(x) #x -#define DEFINE_STR(sym, val) asm volatile("\n->" #sym " " STR(val) " " #val: : ) - #define BLANK() asm volatile("\n->" : : ) #define OFFSET(sym, str, mem) \ diff --git a/trunk/arch/x86/um/shared/sysdep/syscalls.h b/trunk/arch/x86/um/shared/sysdep/syscalls.h index bd9a89b67e41..ca255a805ed9 100644 --- a/trunk/arch/x86/um/shared/sysdep/syscalls.h +++ b/trunk/arch/x86/um/shared/sysdep/syscalls.h @@ -1,3 +1,5 @@ +extern long sys_clone(unsigned long clone_flags, unsigned long newsp, + void __user *parent_tid, void __user *child_tid); #ifdef __i386__ #include "syscalls_32.h" #else diff --git a/trunk/arch/x86/um/signal.c b/trunk/arch/x86/um/signal.c index a508cea13503..ba7363ecf896 100644 --- a/trunk/arch/x86/um/signal.c +++ b/trunk/arch/x86/um/signal.c @@ -416,9 +416,6 @@ int setup_signal_stack_sc(unsigned long stack_top, int sig, PT_REGS_AX(regs) = (unsigned long) sig; PT_REGS_DX(regs) = (unsigned long) 0; PT_REGS_CX(regs) = (unsigned long) 0; - - if ((current->ptrace & PT_DTRACE) && (current->ptrace & PT_PTRACED)) - ptrace_notify(SIGTRAP); return 0; } @@ -466,9 +463,6 @@ int setup_signal_stack_si(unsigned long stack_top, int sig, PT_REGS_AX(regs) = (unsigned long) sig; PT_REGS_DX(regs) = (unsigned long) &frame->info; PT_REGS_CX(regs) = (unsigned long) &frame->uc; - - if ((current->ptrace & PT_DTRACE) && (current->ptrace & PT_PTRACED)) - ptrace_notify(SIGTRAP); return 0; } diff --git a/trunk/arch/x86/um/sys_call_table_32.c b/trunk/arch/x86/um/sys_call_table_32.c index 68d1dc91b37b..b5408cecac6c 100644 --- a/trunk/arch/x86/um/sys_call_table_32.c +++ b/trunk/arch/x86/um/sys_call_table_32.c @@ -28,7 +28,7 @@ #define ptregs_execve sys_execve #define ptregs_iopl sys_iopl #define ptregs_vm86old sys_vm86old -#define ptregs_clone sys_clone +#define ptregs_clone i386_clone #define ptregs_vm86 sys_vm86 #define ptregs_sigaltstack sys_sigaltstack #define ptregs_vfork sys_vfork diff --git a/trunk/arch/x86/um/syscalls_32.c b/trunk/arch/x86/um/syscalls_32.c index b853e8600b9d..db444c7218fe 100644 --- a/trunk/arch/x86/um/syscalls_32.c +++ b/trunk/arch/x86/um/syscalls_32.c @@ -3,37 +3,24 @@ * Licensed under the GPL */ -#include "linux/sched.h" -#include "linux/shm.h" -#include "linux/ipc.h" -#include "linux/syscalls.h" -#include "asm/mman.h" -#include "asm/uaccess.h" -#include "asm/unistd.h" +#include +#include /* * The prototype on i386 is: * - * int clone(int flags, void * child_stack, int * parent_tidptr, struct user_desc * newtls, int * child_tidptr) + * int clone(int flags, void * child_stack, int * parent_tidptr, struct user_desc * newtls * * and the "newtls" arg. on i386 is read by copy_thread directly from the * register saved on the stack. */ -long sys_clone(unsigned long clone_flags, unsigned long newsp, - int __user *parent_tid, void *newtls, int __user *child_tid) +long i386_clone(unsigned long clone_flags, unsigned long newsp, + int __user *parent_tid, void *newtls, int __user *child_tid) { - long ret; - - if (!newsp) - newsp = UPT_SP(¤t->thread.regs.regs); - - current->thread.forking = 1; - ret = do_fork(clone_flags, newsp, ¤t->thread.regs, 0, parent_tid, - child_tid); - current->thread.forking = 0; - return ret; + return sys_clone(clone_flags, newsp, parent_tid, child_tid); } + long sys_sigaction(int sig, const struct old_sigaction __user *act, struct old_sigaction __user *oact) { diff --git a/trunk/arch/x86/um/syscalls_64.c b/trunk/arch/x86/um/syscalls_64.c index f3d82bb6e15a..adb08eb5c22a 100644 --- a/trunk/arch/x86/um/syscalls_64.c +++ b/trunk/arch/x86/um/syscalls_64.c @@ -5,12 +5,9 @@ * Licensed under the GPL */ -#include "linux/linkage.h" -#include "linux/personality.h" -#include "linux/utsname.h" -#include "asm/prctl.h" /* XXX This should get the constants from libc */ -#include "asm/uaccess.h" -#include "os.h" +#include +#include /* XXX This should get the constants from libc */ +#include long arch_prctl(struct task_struct *task, int code, unsigned long __user *addr) { @@ -79,20 +76,6 @@ long sys_arch_prctl(int code, unsigned long addr) return arch_prctl(current, code, (unsigned long __user *) addr); } -long sys_clone(unsigned long clone_flags, unsigned long newsp, - void __user *parent_tid, void __user *child_tid) -{ - long ret; - - if (!newsp) - newsp = UPT_SP(¤t->thread.regs.regs); - current->thread.forking = 1; - ret = do_fork(clone_flags, newsp, ¤t->thread.regs, 0, parent_tid, - child_tid); - current->thread.forking = 0; - return ret; -} - void arch_switch_to(struct task_struct *to) { if ((to->thread.arch.fs == 0) || (to->mm == NULL)) diff --git a/trunk/arch/x86/xen/enlighten.c b/trunk/arch/x86/xen/enlighten.c index bf4bda6d3e9a..1fbe75a95f15 100644 --- a/trunk/arch/x86/xen/enlighten.c +++ b/trunk/arch/x86/xen/enlighten.c @@ -31,7 +31,6 @@ #include #include #include -#include #include #include @@ -1453,6 +1452,10 @@ asmlinkage void __init xen_start_kernel(void) pci_request_acs(); xen_acpi_sleep_register(); + + /* Avoid searching for BIOS MP tables */ + x86_init.mpparse.find_smp_config = x86_init_noop; + x86_init.mpparse.get_smp_config = x86_init_uint_noop; } #ifdef CONFIG_PCI /* PCI BIOS service won't work from a PV guest. */ @@ -1470,130 +1473,38 @@ asmlinkage void __init xen_start_kernel(void) #endif } -#ifdef CONFIG_XEN_PVHVM -/* - * The pfn containing the shared_info is located somewhere in RAM. This - * will cause trouble if the current kernel is doing a kexec boot into a - * new kernel. The new kernel (and its startup code) can not know where - * the pfn is, so it can not reserve the page. The hypervisor will - * continue to update the pfn, and as a result memory corruption occours - * in the new kernel. - * - * One way to work around this issue is to allocate a page in the - * xen-platform pci device's BAR memory range. But pci init is done very - * late and the shared_info page is already in use very early to read - * the pvclock. So moving the pfn from RAM to MMIO is racy because some - * code paths on other vcpus could access the pfn during the small - * window when the old pfn is moved to the new pfn. There is even a - * small window were the old pfn is not backed by a mfn, and during that - * time all reads return -1. - * - * Because it is not known upfront where the MMIO region is located it - * can not be used right from the start in xen_hvm_init_shared_info. - * - * To minimise trouble the move of the pfn is done shortly before kexec. - * This does not eliminate the race because all vcpus are still online - * when the syscore_ops will be called. But hopefully there is no work - * pending at this point in time. Also the syscore_op is run last which - * reduces the risk further. - */ - -static struct shared_info *xen_hvm_shared_info; - -static void xen_hvm_connect_shared_info(unsigned long pfn) +void __ref xen_hvm_init_shared_info(void) { + int cpu; struct xen_add_to_physmap xatp; + static struct shared_info *shared_info_page = 0; + if (!shared_info_page) + shared_info_page = (struct shared_info *) + extend_brk(PAGE_SIZE, PAGE_SIZE); xatp.domid = DOMID_SELF; xatp.idx = 0; xatp.space = XENMAPSPACE_shared_info; - xatp.gpfn = pfn; + xatp.gpfn = __pa(shared_info_page) >> PAGE_SHIFT; if (HYPERVISOR_memory_op(XENMEM_add_to_physmap, &xatp)) BUG(); -} -static void xen_hvm_set_shared_info(struct shared_info *sip) -{ - int cpu; - - HYPERVISOR_shared_info = sip; + HYPERVISOR_shared_info = (struct shared_info *)shared_info_page; /* xen_vcpu is a pointer to the vcpu_info struct in the shared_info * page, we use it in the event channel upcall and in some pvclock * related functions. We don't need the vcpu_info placement * optimizations because we don't use any pv_mmu or pv_irq op on * HVM. - * When xen_hvm_set_shared_info is run at boot time only vcpu 0 is - * online but xen_hvm_set_shared_info is run at resume time too and + * When xen_hvm_init_shared_info is run at boot time only vcpu 0 is + * online but xen_hvm_init_shared_info is run at resume time too and * in that case multiple vcpus might be online. */ for_each_online_cpu(cpu) { per_cpu(xen_vcpu, cpu) = &HYPERVISOR_shared_info->vcpu_info[cpu]; } } -/* Reconnect the shared_info pfn to a mfn */ -void xen_hvm_resume_shared_info(void) -{ - xen_hvm_connect_shared_info(__pa(xen_hvm_shared_info) >> PAGE_SHIFT); -} - -#ifdef CONFIG_KEXEC -static struct shared_info *xen_hvm_shared_info_kexec; -static unsigned long xen_hvm_shared_info_pfn_kexec; - -/* Remember a pfn in MMIO space for kexec reboot */ -void __devinit xen_hvm_prepare_kexec(struct shared_info *sip, unsigned long pfn) -{ - xen_hvm_shared_info_kexec = sip; - xen_hvm_shared_info_pfn_kexec = pfn; -} - -static void xen_hvm_syscore_shutdown(void) -{ - struct xen_memory_reservation reservation = { - .domid = DOMID_SELF, - .nr_extents = 1, - }; - unsigned long prev_pfn; - int rc; - - if (!xen_hvm_shared_info_kexec) - return; - - prev_pfn = __pa(xen_hvm_shared_info) >> PAGE_SHIFT; - set_xen_guest_handle(reservation.extent_start, &prev_pfn); - - /* Move pfn to MMIO, disconnects previous pfn from mfn */ - xen_hvm_connect_shared_info(xen_hvm_shared_info_pfn_kexec); - - /* Update pointers, following hypercall is also a memory barrier */ - xen_hvm_set_shared_info(xen_hvm_shared_info_kexec); - - /* Allocate new mfn for previous pfn */ - do { - rc = HYPERVISOR_memory_op(XENMEM_populate_physmap, &reservation); - if (rc == 0) - msleep(123); - } while (rc == 0); - - /* Make sure the previous pfn is really connected to a (new) mfn */ - BUG_ON(rc != 1); -} - -static struct syscore_ops xen_hvm_syscore_ops = { - .shutdown = xen_hvm_syscore_shutdown, -}; -#endif - -/* Use a pfn in RAM, may move to MMIO before kexec. */ -static void __init xen_hvm_init_shared_info(void) -{ - /* Remember pointer for resume */ - xen_hvm_shared_info = extend_brk(PAGE_SIZE, PAGE_SIZE); - xen_hvm_connect_shared_info(__pa(xen_hvm_shared_info) >> PAGE_SHIFT); - xen_hvm_set_shared_info(xen_hvm_shared_info); -} - +#ifdef CONFIG_XEN_PVHVM static void __init init_hvm_pv_info(void) { int major, minor; @@ -1644,9 +1555,6 @@ static void __init xen_hvm_guest_init(void) init_hvm_pv_info(); xen_hvm_init_shared_info(); -#ifdef CONFIG_KEXEC - register_syscore_ops(&xen_hvm_syscore_ops); -#endif if (xen_feature(XENFEAT_hvm_callback_vector)) xen_have_vector_callback = 1; diff --git a/trunk/arch/x86/xen/mmu.c b/trunk/arch/x86/xen/mmu.c index b65a76133f4f..5141d808e751 100644 --- a/trunk/arch/x86/xen/mmu.c +++ b/trunk/arch/x86/xen/mmu.c @@ -1283,7 +1283,7 @@ static void xen_flush_tlb_others(const struct cpumask *cpus, cpumask_clear_cpu(smp_processor_id(), to_cpumask(args->mask)); args->op.cmd = MMUEXT_TLB_FLUSH_MULTI; - if (start != TLB_FLUSH_ALL && (end - start) <= PAGE_SIZE) { + if (end != TLB_FLUSH_ALL && (end - start) <= PAGE_SIZE) { args->op.cmd = MMUEXT_INVLPG_MULTI; args->op.arg1.linear_addr = start; } diff --git a/trunk/arch/x86/xen/p2m.c b/trunk/arch/x86/xen/p2m.c index 64effdc6da94..72213da605f5 100644 --- a/trunk/arch/x86/xen/p2m.c +++ b/trunk/arch/x86/xen/p2m.c @@ -194,6 +194,13 @@ RESERVE_BRK(p2m_mid_mfn, PAGE_SIZE * (MAX_DOMAIN_PAGES / (P2M_PER_PAGE * P2M_MID * boundary violation will require three middle nodes. */ RESERVE_BRK(p2m_mid_identity, PAGE_SIZE * 2 * 3); +/* When we populate back during bootup, the amount of pages can vary. The + * max we have is seen is 395979, but that does not mean it can't be more. + * Some machines can have 3GB I/O holes even. With early_can_reuse_p2m_middle + * it can re-use Xen provided mfn_list array, so we only need to allocate at + * most three P2M top nodes. */ +RESERVE_BRK(p2m_populated, PAGE_SIZE * 3); + static inline unsigned p2m_top_index(unsigned long pfn) { BUG_ON(pfn >= MAX_P2M_PFN); @@ -570,12 +577,99 @@ static bool __init early_alloc_p2m(unsigned long pfn) } return true; } + +/* + * Skim over the P2M tree looking at pages that are either filled with + * INVALID_P2M_ENTRY or with 1:1 PFNs. If found, re-use that page and + * replace the P2M leaf with a p2m_missing or p2m_identity. + * Stick the old page in the new P2M tree location. + */ +bool __init early_can_reuse_p2m_middle(unsigned long set_pfn, unsigned long set_mfn) +{ + unsigned topidx; + unsigned mididx; + unsigned ident_pfns; + unsigned inv_pfns; + unsigned long *p2m; + unsigned long *mid_mfn_p; + unsigned idx; + unsigned long pfn; + + /* We only look when this entails a P2M middle layer */ + if (p2m_index(set_pfn)) + return false; + + for (pfn = 0; pfn < MAX_DOMAIN_PAGES; pfn += P2M_PER_PAGE) { + topidx = p2m_top_index(pfn); + + if (!p2m_top[topidx]) + continue; + + if (p2m_top[topidx] == p2m_mid_missing) + continue; + + mididx = p2m_mid_index(pfn); + p2m = p2m_top[topidx][mididx]; + if (!p2m) + continue; + + if ((p2m == p2m_missing) || (p2m == p2m_identity)) + continue; + + if ((unsigned long)p2m == INVALID_P2M_ENTRY) + continue; + + ident_pfns = 0; + inv_pfns = 0; + for (idx = 0; idx < P2M_PER_PAGE; idx++) { + /* IDENTITY_PFNs are 1:1 */ + if (p2m[idx] == IDENTITY_FRAME(pfn + idx)) + ident_pfns++; + else if (p2m[idx] == INVALID_P2M_ENTRY) + inv_pfns++; + else + break; + } + if ((ident_pfns == P2M_PER_PAGE) || (inv_pfns == P2M_PER_PAGE)) + goto found; + } + return false; +found: + /* Found one, replace old with p2m_identity or p2m_missing */ + p2m_top[topidx][mididx] = (ident_pfns ? p2m_identity : p2m_missing); + /* And the other for save/restore.. */ + mid_mfn_p = p2m_top_mfn_p[topidx]; + /* NOTE: Even if it is a p2m_identity it should still be point to + * a page filled with INVALID_P2M_ENTRY entries. */ + mid_mfn_p[mididx] = virt_to_mfn(p2m_missing); + + /* Reset where we want to stick the old page in. */ + topidx = p2m_top_index(set_pfn); + mididx = p2m_mid_index(set_pfn); + + /* This shouldn't happen */ + if (WARN_ON(p2m_top[topidx] == p2m_mid_missing)) + early_alloc_p2m(set_pfn); + + if (WARN_ON(p2m_top[topidx][mididx] != p2m_missing)) + return false; + + p2m_init(p2m); + p2m_top[topidx][mididx] = p2m; + mid_mfn_p = p2m_top_mfn_p[topidx]; + mid_mfn_p[mididx] = virt_to_mfn(p2m); + + return true; +} bool __init early_set_phys_to_machine(unsigned long pfn, unsigned long mfn) { if (unlikely(!__set_phys_to_machine(pfn, mfn))) { if (!early_alloc_p2m(pfn)) return false; + if (early_can_reuse_p2m_middle(pfn, mfn)) + return __set_phys_to_machine(pfn, mfn); + if (!early_alloc_p2m_middle(pfn, false /* boundary crossover OK!*/)) return false; @@ -734,9 +828,6 @@ int m2p_add_override(unsigned long mfn, struct page *page, xen_mc_issue(PARAVIRT_LAZY_MMU); } - /* let's use dev_bus_addr to record the old mfn instead */ - kmap_op->dev_bus_addr = page->index; - page->index = (unsigned long) kmap_op; } spin_lock_irqsave(&m2p_override_lock, flags); list_add(&page->lru, &m2p_overrides[mfn_hash(mfn)]); @@ -763,7 +854,8 @@ int m2p_add_override(unsigned long mfn, struct page *page, return 0; } EXPORT_SYMBOL_GPL(m2p_add_override); -int m2p_remove_override(struct page *page, bool clear_pte) +int m2p_remove_override(struct page *page, + struct gnttab_map_grant_ref *kmap_op) { unsigned long flags; unsigned long mfn; @@ -793,10 +885,8 @@ int m2p_remove_override(struct page *page, bool clear_pte) WARN_ON(!PagePrivate(page)); ClearPagePrivate(page); - if (clear_pte) { - struct gnttab_map_grant_ref *map_op = - (struct gnttab_map_grant_ref *) page->index; - set_phys_to_machine(pfn, map_op->dev_bus_addr); + set_phys_to_machine(pfn, page->index); + if (kmap_op != NULL) { if (!PageHighMem(page)) { struct multicall_space mcs; struct gnttab_unmap_grant_ref *unmap_op; @@ -808,13 +898,13 @@ int m2p_remove_override(struct page *page, bool clear_pte) * issued. In this case handle is going to -1 because * it hasn't been modified yet. */ - if (map_op->handle == -1) + if (kmap_op->handle == -1) xen_mc_flush(); /* - * Now if map_op->handle is negative it means that the + * Now if kmap_op->handle is negative it means that the * hypercall actually returned an error. */ - if (map_op->handle == GNTST_general_error) { + if (kmap_op->handle == GNTST_general_error) { printk(KERN_WARNING "m2p_remove_override: " "pfn %lx mfn %lx, failed to modify kernel mappings", pfn, mfn); @@ -824,8 +914,8 @@ int m2p_remove_override(struct page *page, bool clear_pte) mcs = xen_mc_entry( sizeof(struct gnttab_unmap_grant_ref)); unmap_op = mcs.args; - unmap_op->host_addr = map_op->host_addr; - unmap_op->handle = map_op->handle; + unmap_op->host_addr = kmap_op->host_addr; + unmap_op->handle = kmap_op->handle; unmap_op->dev_bus_addr = 0; MULTI_grant_table_op(mcs.mc, @@ -836,10 +926,9 @@ int m2p_remove_override(struct page *page, bool clear_pte) set_pte_at(&init_mm, address, ptep, pfn_pte(pfn, PAGE_KERNEL)); __flush_tlb_single(address); - map_op->host_addr = 0; + kmap_op->host_addr = 0; } - } else - set_phys_to_machine(pfn, page->index); + } /* p2m(m2p(mfn)) == FOREIGN_FRAME(mfn): the mfn is already present * somewhere in this domain, even before being added to the diff --git a/trunk/arch/x86/xen/setup.c b/trunk/arch/x86/xen/setup.c index ead85576d54a..e2d62d697b5d 100644 --- a/trunk/arch/x86/xen/setup.c +++ b/trunk/arch/x86/xen/setup.c @@ -17,6 +17,7 @@ #include #include #include +#include #include #include @@ -78,9 +79,16 @@ static void __init xen_add_extra_mem(u64 start, u64 size) memblock_reserve(start, size); xen_max_p2m_pfn = PFN_DOWN(start + size); + for (pfn = PFN_DOWN(start); pfn < xen_max_p2m_pfn; pfn++) { + unsigned long mfn = pfn_to_mfn(pfn); + + if (WARN(mfn == pfn, "Trying to over-write 1-1 mapping (pfn: %lx)\n", pfn)) + continue; + WARN(mfn != INVALID_P2M_ENTRY, "Trying to remove %lx which has %lx mfn!\n", + pfn, mfn); - for (pfn = PFN_DOWN(start); pfn <= xen_max_p2m_pfn; pfn++) __set_phys_to_machine(pfn, INVALID_P2M_ENTRY); + } } static unsigned long __init xen_do_chunk(unsigned long start, @@ -537,4 +545,7 @@ void __init xen_arch_setup(void) disable_cpufreq(); WARN_ON(set_pm_idle_to_default()); fiddle_vdso(); +#ifdef CONFIG_NUMA + numa_off = 1; +#endif } diff --git a/trunk/arch/x86/xen/smp.c b/trunk/arch/x86/xen/smp.c index f58dca7a6e52..353c50f18702 100644 --- a/trunk/arch/x86/xen/smp.c +++ b/trunk/arch/x86/xen/smp.c @@ -377,7 +377,8 @@ static int __cpuinit xen_cpu_up(unsigned int cpu, struct task_struct *idle) return rc; if (num_online_cpus() == 1) - alternatives_smp_switch(1); + /* Just in case we booted with a single CPU. */ + alternatives_enable_smp(); rc = xen_smp_intr_init(cpu); if (rc) @@ -424,9 +425,6 @@ static void xen_cpu_die(unsigned int cpu) unbind_from_irqhandler(per_cpu(xen_irq_work, cpu), NULL); xen_uninit_lock_cpu(cpu); xen_teardown_timer(cpu); - - if (num_online_cpus() == 1) - alternatives_smp_switch(0); } static void __cpuinit xen_play_dead(void) /* used only with HOTPLUG_CPU */ diff --git a/trunk/arch/x86/xen/suspend.c b/trunk/arch/x86/xen/suspend.c index ae8a00c39de4..45329c8c226e 100644 --- a/trunk/arch/x86/xen/suspend.c +++ b/trunk/arch/x86/xen/suspend.c @@ -30,7 +30,7 @@ void xen_arch_hvm_post_suspend(int suspend_cancelled) { #ifdef CONFIG_XEN_PVHVM int cpu; - xen_hvm_resume_shared_info(); + xen_hvm_init_shared_info(); xen_callback_vector(); xen_unplug_emulated_devices(); if (xen_feature(XENFEAT_hvm_safe_pvclock)) { diff --git a/trunk/arch/x86/xen/xen-ops.h b/trunk/arch/x86/xen/xen-ops.h index 1e4329e04e0f..202d4c150154 100644 --- a/trunk/arch/x86/xen/xen-ops.h +++ b/trunk/arch/x86/xen/xen-ops.h @@ -41,7 +41,7 @@ void xen_enable_syscall(void); void xen_vcpu_restore(void); void xen_callback_vector(void); -void xen_hvm_resume_shared_info(void); +void xen_hvm_init_shared_info(void); void xen_unplug_emulated_devices(void); void __init xen_build_dynamic_phys_to_machine(void); diff --git a/trunk/arch/xtensa/kernel/process.c b/trunk/arch/xtensa/kernel/process.c index 2c8d6a3d250a..bc44311aa18c 100644 --- a/trunk/arch/xtensa/kernel/process.c +++ b/trunk/arch/xtensa/kernel/process.c @@ -31,6 +31,7 @@ #include #include #include +#include #include #include @@ -110,8 +111,10 @@ void cpu_idle(void) /* endless idle loop with no priority at all */ while (1) { + rcu_idle_enter(); while (!need_resched()) platform_idle(); + rcu_idle_exit(); schedule_preempt_disabled(); } } diff --git a/trunk/block/blk-core.c b/trunk/block/blk-core.c index 4b4dbdfbca89..ee3cb3a5e278 100644 --- a/trunk/block/blk-core.c +++ b/trunk/block/blk-core.c @@ -2254,9 +2254,11 @@ bool blk_update_request(struct request *req, int error, unsigned int nr_bytes) error_type = "I/O"; break; } - printk(KERN_ERR "end_request: %s error, dev %s, sector %llu\n", - error_type, req->rq_disk ? req->rq_disk->disk_name : "?", - (unsigned long long)blk_rq_pos(req)); + printk_ratelimited(KERN_ERR "end_request: %s error, dev %s, sector %llu\n", + error_type, req->rq_disk ? + req->rq_disk->disk_name : "?", + (unsigned long long)blk_rq_pos(req)); + } blk_account_io_completion(req, nr_bytes); diff --git a/trunk/block/blk-lib.c b/trunk/block/blk-lib.c index 2b461b496a78..19cc761cacb2 100644 --- a/trunk/block/blk-lib.c +++ b/trunk/block/blk-lib.c @@ -44,6 +44,7 @@ int blkdev_issue_discard(struct block_device *bdev, sector_t sector, struct request_queue *q = bdev_get_queue(bdev); int type = REQ_WRITE | REQ_DISCARD; unsigned int max_discard_sectors; + unsigned int granularity, alignment, mask; struct bio_batch bb; struct bio *bio; int ret = 0; @@ -54,18 +55,20 @@ int blkdev_issue_discard(struct block_device *bdev, sector_t sector, if (!blk_queue_discard(q)) return -EOPNOTSUPP; + /* Zero-sector (unknown) and one-sector granularities are the same. */ + granularity = max(q->limits.discard_granularity >> 9, 1U); + mask = granularity - 1; + alignment = (bdev_discard_alignment(bdev) >> 9) & mask; + /* * Ensure that max_discard_sectors is of the proper - * granularity + * granularity, so that requests stay aligned after a split. */ max_discard_sectors = min(q->limits.max_discard_sectors, UINT_MAX >> 9); + max_discard_sectors = round_down(max_discard_sectors, granularity); if (unlikely(!max_discard_sectors)) { /* Avoid infinite loop below. Being cautious never hurts. */ return -EOPNOTSUPP; - } else if (q->limits.discard_granularity) { - unsigned int disc_sects = q->limits.discard_granularity >> 9; - - max_discard_sectors &= ~(disc_sects - 1); } if (flags & BLKDEV_DISCARD_SECURE) { @@ -79,25 +82,37 @@ int blkdev_issue_discard(struct block_device *bdev, sector_t sector, bb.wait = &wait; while (nr_sects) { + unsigned int req_sects; + sector_t end_sect; + bio = bio_alloc(gfp_mask, 1); if (!bio) { ret = -ENOMEM; break; } + req_sects = min_t(sector_t, nr_sects, max_discard_sectors); + + /* + * If splitting a request, and the next starting sector would be + * misaligned, stop the discard at the previous aligned sector. + */ + end_sect = sector + req_sects; + if (req_sects < nr_sects && (end_sect & mask) != alignment) { + end_sect = + round_down(end_sect - alignment, granularity) + + alignment; + req_sects = end_sect - sector; + } + bio->bi_sector = sector; bio->bi_end_io = bio_batch_end_io; bio->bi_bdev = bdev; bio->bi_private = &bb; - if (nr_sects > max_discard_sectors) { - bio->bi_size = max_discard_sectors << 9; - nr_sects -= max_discard_sectors; - sector += max_discard_sectors; - } else { - bio->bi_size = nr_sects << 9; - nr_sects = 0; - } + bio->bi_size = req_sects << 9; + nr_sects -= req_sects; + sector = end_sect; atomic_inc(&bb.done); submit_bio(type, bio); diff --git a/trunk/block/blk-merge.c b/trunk/block/blk-merge.c index 160035f54882..e76279e41162 100644 --- a/trunk/block/blk-merge.c +++ b/trunk/block/blk-merge.c @@ -110,6 +110,49 @@ static int blk_phys_contig_segment(struct request_queue *q, struct bio *bio, return 0; } +static void +__blk_segment_map_sg(struct request_queue *q, struct bio_vec *bvec, + struct scatterlist *sglist, struct bio_vec **bvprv, + struct scatterlist **sg, int *nsegs, int *cluster) +{ + + int nbytes = bvec->bv_len; + + if (*bvprv && *cluster) { + if ((*sg)->length + nbytes > queue_max_segment_size(q)) + goto new_segment; + + if (!BIOVEC_PHYS_MERGEABLE(*bvprv, bvec)) + goto new_segment; + if (!BIOVEC_SEG_BOUNDARY(q, *bvprv, bvec)) + goto new_segment; + + (*sg)->length += nbytes; + } else { +new_segment: + if (!*sg) + *sg = sglist; + else { + /* + * If the driver previously mapped a shorter + * list, we could see a termination bit + * prematurely unless it fully inits the sg + * table on each mapping. We KNOW that there + * must be more entries here or the driver + * would be buggy, so force clear the + * termination bit to avoid doing a full + * sg_init_table() in drivers for each command. + */ + (*sg)->page_link &= ~0x02; + *sg = sg_next(*sg); + } + + sg_set_page(*sg, bvec->bv_page, nbytes, bvec->bv_offset); + (*nsegs)++; + } + *bvprv = bvec; +} + /* * map a request to scatterlist, return number of sg entries setup. Caller * must make sure sg can hold rq->nr_phys_segments entries @@ -131,41 +174,8 @@ int blk_rq_map_sg(struct request_queue *q, struct request *rq, bvprv = NULL; sg = NULL; rq_for_each_segment(bvec, rq, iter) { - int nbytes = bvec->bv_len; - - if (bvprv && cluster) { - if (sg->length + nbytes > queue_max_segment_size(q)) - goto new_segment; - - if (!BIOVEC_PHYS_MERGEABLE(bvprv, bvec)) - goto new_segment; - if (!BIOVEC_SEG_BOUNDARY(q, bvprv, bvec)) - goto new_segment; - - sg->length += nbytes; - } else { -new_segment: - if (!sg) - sg = sglist; - else { - /* - * If the driver previously mapped a shorter - * list, we could see a termination bit - * prematurely unless it fully inits the sg - * table on each mapping. We KNOW that there - * must be more entries here or the driver - * would be buggy, so force clear the - * termination bit to avoid doing a full - * sg_init_table() in drivers for each command. - */ - sg->page_link &= ~0x02; - sg = sg_next(sg); - } - - sg_set_page(sg, bvec->bv_page, nbytes, bvec->bv_offset); - nsegs++; - } - bvprv = bvec; + __blk_segment_map_sg(q, bvec, sglist, &bvprv, &sg, + &nsegs, &cluster); } /* segments in rq */ @@ -199,6 +209,43 @@ int blk_rq_map_sg(struct request_queue *q, struct request *rq, } EXPORT_SYMBOL(blk_rq_map_sg); +/** + * blk_bio_map_sg - map a bio to a scatterlist + * @q: request_queue in question + * @bio: bio being mapped + * @sglist: scatterlist being mapped + * + * Note: + * Caller must make sure sg can hold bio->bi_phys_segments entries + * + * Will return the number of sg entries setup + */ +int blk_bio_map_sg(struct request_queue *q, struct bio *bio, + struct scatterlist *sglist) +{ + struct bio_vec *bvec, *bvprv; + struct scatterlist *sg; + int nsegs, cluster; + unsigned long i; + + nsegs = 0; + cluster = blk_queue_cluster(q); + + bvprv = NULL; + sg = NULL; + bio_for_each_segment(bvec, bio, i) { + __blk_segment_map_sg(q, bvec, sglist, &bvprv, &sg, + &nsegs, &cluster); + } /* segments in bio */ + + if (sg) + sg_mark_end(sg); + + BUG_ON(bio->bi_phys_segments && nsegs > bio->bi_phys_segments); + return nsegs; +} +EXPORT_SYMBOL(blk_bio_map_sg); + static inline int ll_new_hw_segment(struct request_queue *q, struct request *req, struct bio *bio) diff --git a/trunk/block/genhd.c b/trunk/block/genhd.c index cac7366957c3..d839723303c8 100644 --- a/trunk/block/genhd.c +++ b/trunk/block/genhd.c @@ -835,7 +835,7 @@ static void disk_seqf_stop(struct seq_file *seqf, void *v) static void *show_partition_start(struct seq_file *seqf, loff_t *pos) { - static void *p; + void *p; p = disk_seqf_start(seqf, pos); if (!IS_ERR_OR_NULL(p) && !*pos) diff --git a/trunk/block/ioctl.c b/trunk/block/ioctl.c index 4476e0e85d16..4a85096f5410 100644 --- a/trunk/block/ioctl.c +++ b/trunk/block/ioctl.c @@ -41,7 +41,7 @@ static int blkpg_ioctl(struct block_device *bdev, struct blkpg_ioctl_arg __user sizeof(long long) > sizeof(long)) { long pstart = start, plength = length; if (pstart != start || plength != length - || pstart < 0 || plength < 0) + || pstart < 0 || plength < 0 || partno > 65535) return -EINVAL; } diff --git a/trunk/crypto/authenc.c b/trunk/crypto/authenc.c index 5ef7ba6b6a76..d0583a4489e6 100644 --- a/trunk/crypto/authenc.c +++ b/trunk/crypto/authenc.c @@ -336,7 +336,7 @@ static int crypto_authenc_genicv(struct aead_request *req, u8 *iv, cryptlen += ivsize; } - if (sg_is_last(assoc)) { + if (req->assoclen && sg_is_last(assoc)) { authenc_ahash_fn = crypto_authenc_ahash; sg_init_table(asg, 2); sg_set_page(asg, sg_page(assoc), assoc->length, assoc->offset); @@ -490,7 +490,7 @@ static int crypto_authenc_iverify(struct aead_request *req, u8 *iv, cryptlen += ivsize; } - if (sg_is_last(assoc)) { + if (req->assoclen && sg_is_last(assoc)) { authenc_ahash_fn = crypto_authenc_ahash; sg_init_table(asg, 2); sg_set_page(asg, sg_page(assoc), assoc->length, assoc->offset); diff --git a/trunk/drivers/acpi/Kconfig b/trunk/drivers/acpi/Kconfig index 80998958cf45..119d58db8342 100644 --- a/trunk/drivers/acpi/Kconfig +++ b/trunk/drivers/acpi/Kconfig @@ -385,8 +385,8 @@ config ACPI_CUSTOM_METHOD to override that restriction). config ACPI_BGRT - tristate "Boottime Graphics Resource Table support" - default n + bool "Boottime Graphics Resource Table support" + depends on EFI help This driver adds support for exposing the ACPI Boottime Graphics Resource Table, which allows the operating system to obtain diff --git a/trunk/drivers/acpi/ac.c b/trunk/drivers/acpi/ac.c index ac7034129f3f..d5fdd36190cc 100644 --- a/trunk/drivers/acpi/ac.c +++ b/trunk/drivers/acpi/ac.c @@ -69,7 +69,9 @@ static const struct acpi_device_id ac_device_ids[] = { }; MODULE_DEVICE_TABLE(acpi, ac_device_ids); +#ifdef CONFIG_PM_SLEEP static int acpi_ac_resume(struct device *dev); +#endif static SIMPLE_DEV_PM_OPS(acpi_ac_pm, NULL, acpi_ac_resume); static struct acpi_driver acpi_ac_driver = { @@ -313,6 +315,7 @@ static int acpi_ac_add(struct acpi_device *device) return result; } +#ifdef CONFIG_PM_SLEEP static int acpi_ac_resume(struct device *dev) { struct acpi_ac *ac; @@ -332,6 +335,7 @@ static int acpi_ac_resume(struct device *dev) kobject_uevent(&ac->charger.dev->kobj, KOBJ_CHANGE); return 0; } +#endif static int acpi_ac_remove(struct acpi_device *device, int type) { diff --git a/trunk/drivers/acpi/acpica/achware.h b/trunk/drivers/acpi/acpica/achware.h index 5ccb99ae3a6f..5de4ec72766d 100644 --- a/trunk/drivers/acpi/acpica/achware.h +++ b/trunk/drivers/acpi/acpica/achware.h @@ -83,22 +83,22 @@ acpi_status acpi_hw_clear_acpi_status(void); /* * hwsleep - sleep/wake support (Legacy sleep registers) */ -acpi_status acpi_hw_legacy_sleep(u8 sleep_state, u8 flags); +acpi_status acpi_hw_legacy_sleep(u8 sleep_state); -acpi_status acpi_hw_legacy_wake_prep(u8 sleep_state, u8 flags); +acpi_status acpi_hw_legacy_wake_prep(u8 sleep_state); -acpi_status acpi_hw_legacy_wake(u8 sleep_state, u8 flags); +acpi_status acpi_hw_legacy_wake(u8 sleep_state); /* * hwesleep - sleep/wake support (Extended FADT-V5 sleep registers) */ void acpi_hw_execute_sleep_method(char *method_name, u32 integer_argument); -acpi_status acpi_hw_extended_sleep(u8 sleep_state, u8 flags); +acpi_status acpi_hw_extended_sleep(u8 sleep_state); -acpi_status acpi_hw_extended_wake_prep(u8 sleep_state, u8 flags); +acpi_status acpi_hw_extended_wake_prep(u8 sleep_state); -acpi_status acpi_hw_extended_wake(u8 sleep_state, u8 flags); +acpi_status acpi_hw_extended_wake(u8 sleep_state); /* * hwvalid - Port I/O with validation diff --git a/trunk/drivers/acpi/acpica/hwesleep.c b/trunk/drivers/acpi/acpica/hwesleep.c index 48518dac5342..94996f9ae3ad 100644 --- a/trunk/drivers/acpi/acpica/hwesleep.c +++ b/trunk/drivers/acpi/acpica/hwesleep.c @@ -90,7 +90,6 @@ void acpi_hw_execute_sleep_method(char *method_pathname, u32 integer_argument) * FUNCTION: acpi_hw_extended_sleep * * PARAMETERS: sleep_state - Which sleep state to enter - * flags - ACPI_EXECUTE_GTS to run optional method * * RETURN: Status * @@ -100,7 +99,7 @@ void acpi_hw_execute_sleep_method(char *method_pathname, u32 integer_argument) * ******************************************************************************/ -acpi_status acpi_hw_extended_sleep(u8 sleep_state, u8 flags) +acpi_status acpi_hw_extended_sleep(u8 sleep_state) { acpi_status status; u8 sleep_type_value; @@ -125,12 +124,6 @@ acpi_status acpi_hw_extended_sleep(u8 sleep_state, u8 flags) acpi_gbl_system_awake_and_running = FALSE; - /* Optionally execute _GTS (Going To Sleep) */ - - if (flags & ACPI_EXECUTE_GTS) { - acpi_hw_execute_sleep_method(METHOD_PATHNAME__GTS, sleep_state); - } - /* Flush caches, as per ACPI specification */ ACPI_FLUSH_CPU_CACHE(); @@ -172,7 +165,6 @@ acpi_status acpi_hw_extended_sleep(u8 sleep_state, u8 flags) * FUNCTION: acpi_hw_extended_wake_prep * * PARAMETERS: sleep_state - Which sleep state we just exited - * flags - ACPI_EXECUTE_BFS to run optional method * * RETURN: Status * @@ -181,7 +173,7 @@ acpi_status acpi_hw_extended_sleep(u8 sleep_state, u8 flags) * ******************************************************************************/ -acpi_status acpi_hw_extended_wake_prep(u8 sleep_state, u8 flags) +acpi_status acpi_hw_extended_wake_prep(u8 sleep_state) { acpi_status status; u8 sleep_type_value; @@ -200,11 +192,6 @@ acpi_status acpi_hw_extended_wake_prep(u8 sleep_state, u8 flags) &acpi_gbl_FADT.sleep_control); } - /* Optionally execute _BFS (Back From Sleep) */ - - if (flags & ACPI_EXECUTE_BFS) { - acpi_hw_execute_sleep_method(METHOD_PATHNAME__BFS, sleep_state); - } return_ACPI_STATUS(AE_OK); } @@ -222,7 +209,7 @@ acpi_status acpi_hw_extended_wake_prep(u8 sleep_state, u8 flags) * ******************************************************************************/ -acpi_status acpi_hw_extended_wake(u8 sleep_state, u8 flags) +acpi_status acpi_hw_extended_wake(u8 sleep_state) { ACPI_FUNCTION_TRACE(hw_extended_wake); diff --git a/trunk/drivers/acpi/acpica/hwsleep.c b/trunk/drivers/acpi/acpica/hwsleep.c index 9960fe9ef533..3fddde056a5e 100644 --- a/trunk/drivers/acpi/acpica/hwsleep.c +++ b/trunk/drivers/acpi/acpica/hwsleep.c @@ -56,7 +56,6 @@ ACPI_MODULE_NAME("hwsleep") * FUNCTION: acpi_hw_legacy_sleep * * PARAMETERS: sleep_state - Which sleep state to enter - * flags - ACPI_EXECUTE_GTS to run optional method * * RETURN: Status * @@ -64,7 +63,7 @@ ACPI_MODULE_NAME("hwsleep") * THIS FUNCTION MUST BE CALLED WITH INTERRUPTS DISABLED * ******************************************************************************/ -acpi_status acpi_hw_legacy_sleep(u8 sleep_state, u8 flags) +acpi_status acpi_hw_legacy_sleep(u8 sleep_state) { struct acpi_bit_register_info *sleep_type_reg_info; struct acpi_bit_register_info *sleep_enable_reg_info; @@ -110,12 +109,6 @@ acpi_status acpi_hw_legacy_sleep(u8 sleep_state, u8 flags) return_ACPI_STATUS(status); } - /* Optionally execute _GTS (Going To Sleep) */ - - if (flags & ACPI_EXECUTE_GTS) { - acpi_hw_execute_sleep_method(METHOD_PATHNAME__GTS, sleep_state); - } - /* Get current value of PM1A control */ status = acpi_hw_register_read(ACPI_REGISTER_PM1_CONTROL, @@ -214,7 +207,6 @@ acpi_status acpi_hw_legacy_sleep(u8 sleep_state, u8 flags) * FUNCTION: acpi_hw_legacy_wake_prep * * PARAMETERS: sleep_state - Which sleep state we just exited - * flags - ACPI_EXECUTE_BFS to run optional method * * RETURN: Status * @@ -224,7 +216,7 @@ acpi_status acpi_hw_legacy_sleep(u8 sleep_state, u8 flags) * ******************************************************************************/ -acpi_status acpi_hw_legacy_wake_prep(u8 sleep_state, u8 flags) +acpi_status acpi_hw_legacy_wake_prep(u8 sleep_state) { acpi_status status; struct acpi_bit_register_info *sleep_type_reg_info; @@ -275,11 +267,6 @@ acpi_status acpi_hw_legacy_wake_prep(u8 sleep_state, u8 flags) } } - /* Optionally execute _BFS (Back From Sleep) */ - - if (flags & ACPI_EXECUTE_BFS) { - acpi_hw_execute_sleep_method(METHOD_PATHNAME__BFS, sleep_state); - } return_ACPI_STATUS(status); } @@ -288,7 +275,6 @@ acpi_status acpi_hw_legacy_wake_prep(u8 sleep_state, u8 flags) * FUNCTION: acpi_hw_legacy_wake * * PARAMETERS: sleep_state - Which sleep state we just exited - * flags - Reserved, set to zero * * RETURN: Status * @@ -297,7 +283,7 @@ acpi_status acpi_hw_legacy_wake_prep(u8 sleep_state, u8 flags) * ******************************************************************************/ -acpi_status acpi_hw_legacy_wake(u8 sleep_state, u8 flags) +acpi_status acpi_hw_legacy_wake(u8 sleep_state) { acpi_status status; diff --git a/trunk/drivers/acpi/acpica/hwxfsleep.c b/trunk/drivers/acpi/acpica/hwxfsleep.c index f8684bfe7907..1f165a750ae2 100644 --- a/trunk/drivers/acpi/acpica/hwxfsleep.c +++ b/trunk/drivers/acpi/acpica/hwxfsleep.c @@ -50,7 +50,7 @@ ACPI_MODULE_NAME("hwxfsleep") /* Local prototypes */ static acpi_status -acpi_hw_sleep_dispatch(u8 sleep_state, u8 flags, u32 function_id); +acpi_hw_sleep_dispatch(u8 sleep_state, u32 function_id); /* * Dispatch table used to efficiently branch to the various sleep @@ -235,7 +235,7 @@ ACPI_EXPORT_SYMBOL(acpi_enter_sleep_state_s4bios) * ******************************************************************************/ static acpi_status -acpi_hw_sleep_dispatch(u8 sleep_state, u8 flags, u32 function_id) +acpi_hw_sleep_dispatch(u8 sleep_state, u32 function_id) { acpi_status status; struct acpi_sleep_functions *sleep_functions = @@ -248,11 +248,11 @@ acpi_hw_sleep_dispatch(u8 sleep_state, u8 flags, u32 function_id) * use the extended sleep registers */ if (acpi_gbl_reduced_hardware || acpi_gbl_FADT.sleep_control.address) { - status = sleep_functions->extended_function(sleep_state, flags); + status = sleep_functions->extended_function(sleep_state); } else { /* Legacy sleep */ - status = sleep_functions->legacy_function(sleep_state, flags); + status = sleep_functions->legacy_function(sleep_state); } return (status); @@ -262,7 +262,7 @@ acpi_hw_sleep_dispatch(u8 sleep_state, u8 flags, u32 function_id) * For the case where reduced-hardware-only code is being generated, * we know that only the extended sleep registers are available */ - status = sleep_functions->extended_function(sleep_state, flags); + status = sleep_functions->extended_function(sleep_state); return (status); #endif /* !ACPI_REDUCED_HARDWARE */ @@ -349,7 +349,6 @@ ACPI_EXPORT_SYMBOL(acpi_enter_sleep_state_prep) * FUNCTION: acpi_enter_sleep_state * * PARAMETERS: sleep_state - Which sleep state to enter - * flags - ACPI_EXECUTE_GTS to run optional method * * RETURN: Status * @@ -357,7 +356,7 @@ ACPI_EXPORT_SYMBOL(acpi_enter_sleep_state_prep) * THIS FUNCTION MUST BE CALLED WITH INTERRUPTS DISABLED * ******************************************************************************/ -acpi_status asmlinkage acpi_enter_sleep_state(u8 sleep_state, u8 flags) +acpi_status asmlinkage acpi_enter_sleep_state(u8 sleep_state) { acpi_status status; @@ -371,7 +370,7 @@ acpi_status asmlinkage acpi_enter_sleep_state(u8 sleep_state, u8 flags) } status = - acpi_hw_sleep_dispatch(sleep_state, flags, ACPI_SLEEP_FUNCTION_ID); + acpi_hw_sleep_dispatch(sleep_state, ACPI_SLEEP_FUNCTION_ID); return_ACPI_STATUS(status); } @@ -391,14 +390,14 @@ ACPI_EXPORT_SYMBOL(acpi_enter_sleep_state) * Called with interrupts DISABLED. * ******************************************************************************/ -acpi_status acpi_leave_sleep_state_prep(u8 sleep_state, u8 flags) +acpi_status acpi_leave_sleep_state_prep(u8 sleep_state) { acpi_status status; ACPI_FUNCTION_TRACE(acpi_leave_sleep_state_prep); status = - acpi_hw_sleep_dispatch(sleep_state, flags, + acpi_hw_sleep_dispatch(sleep_state, ACPI_WAKE_PREP_FUNCTION_ID); return_ACPI_STATUS(status); } @@ -423,8 +422,7 @@ acpi_status acpi_leave_sleep_state(u8 sleep_state) ACPI_FUNCTION_TRACE(acpi_leave_sleep_state); - - status = acpi_hw_sleep_dispatch(sleep_state, 0, ACPI_WAKE_FUNCTION_ID); + status = acpi_hw_sleep_dispatch(sleep_state, ACPI_WAKE_FUNCTION_ID); return_ACPI_STATUS(status); } diff --git a/trunk/drivers/acpi/acpica/tbxface.c b/trunk/drivers/acpi/acpica/tbxface.c index ea4c6d52605a..29e51bc01383 100644 --- a/trunk/drivers/acpi/acpica/tbxface.c +++ b/trunk/drivers/acpi/acpica/tbxface.c @@ -387,6 +387,7 @@ acpi_get_table_with_size(char *signature, return (AE_NOT_FOUND); } +ACPI_EXPORT_SYMBOL(acpi_get_table_with_size) acpi_status acpi_get_table(char *signature, diff --git a/trunk/drivers/acpi/battery.c b/trunk/drivers/acpi/battery.c index ff2c876ec412..45e3e1759fb8 100644 --- a/trunk/drivers/acpi/battery.c +++ b/trunk/drivers/acpi/battery.c @@ -1052,6 +1052,7 @@ static int acpi_battery_remove(struct acpi_device *device, int type) return 0; } +#ifdef CONFIG_PM_SLEEP /* this is needed to learn about changes made in suspended state */ static int acpi_battery_resume(struct device *dev) { @@ -1068,6 +1069,7 @@ static int acpi_battery_resume(struct device *dev) acpi_battery_update(battery); return 0; } +#endif static SIMPLE_DEV_PM_OPS(acpi_battery_pm, NULL, acpi_battery_resume); diff --git a/trunk/drivers/acpi/bgrt.c b/trunk/drivers/acpi/bgrt.c index 6680df36b963..be6039958545 100644 --- a/trunk/drivers/acpi/bgrt.c +++ b/trunk/drivers/acpi/bgrt.c @@ -1,5 +1,6 @@ /* * Copyright 2012 Red Hat, Inc + * Copyright 2012 Intel Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as @@ -11,20 +12,10 @@ #include #include #include -#include -#include -#include +#include -static struct acpi_table_bgrt *bgrt_tab; static struct kobject *bgrt_kobj; -struct bmp_header { - u16 id; - u32 size; -} __attribute ((packed)); - -static struct bmp_header bmp_header; - static ssize_t show_version(struct device *dev, struct device_attribute *attr, char *buf) { @@ -63,18 +54,7 @@ static DEVICE_ATTR(yoffset, S_IRUGO, show_yoffset, NULL); static ssize_t show_image(struct file *file, struct kobject *kobj, struct bin_attribute *attr, char *buf, loff_t off, size_t count) { - int size = attr->size; - void __iomem *image = attr->private; - - if (off >= size) { - count = 0; - } else { - if (off + count > size) - count = size - off; - - memcpy_fromio(buf, image+off, count); - } - + memcpy(buf, attr->private + off, count); return count; } @@ -101,45 +81,18 @@ static struct attribute_group bgrt_attribute_group = { static int __init bgrt_init(void) { - acpi_status status; int ret; - void __iomem *bgrt; - if (acpi_disabled) - return -ENODEV; - - status = acpi_get_table("BGRT", 0, - (struct acpi_table_header **)&bgrt_tab); - - if (ACPI_FAILURE(status)) + if (!bgrt_image) return -ENODEV; sysfs_bin_attr_init(&image_attr); - - bgrt = ioremap(bgrt_tab->image_address, sizeof(struct bmp_header)); - - if (!bgrt) { - ret = -EINVAL; - goto out_err; - } - - memcpy_fromio(&bmp_header, bgrt, sizeof(bmp_header)); - image_attr.size = bmp_header.size; - iounmap(bgrt); - - image_attr.private = ioremap(bgrt_tab->image_address, image_attr.size); - - if (!image_attr.private) { - ret = -EINVAL; - goto out_err; - } - + image_attr.private = bgrt_image; + image_attr.size = bgrt_image_size; bgrt_kobj = kobject_create_and_add("bgrt", acpi_kobj); - if (!bgrt_kobj) { - ret = -EINVAL; - goto out_iounmap; - } + if (!bgrt_kobj) + return -EINVAL; ret = sysfs_create_group(bgrt_kobj, &bgrt_attribute_group); if (ret) @@ -155,22 +108,11 @@ static int __init bgrt_init(void) sysfs_remove_group(bgrt_kobj, &bgrt_attribute_group); out_kobject: kobject_put(bgrt_kobj); -out_iounmap: - iounmap(image_attr.private); -out_err: return ret; } -static void __exit bgrt_exit(void) -{ - iounmap(image_attr.private); - sysfs_remove_group(bgrt_kobj, &bgrt_attribute_group); - sysfs_remove_bin_file(bgrt_kobj, &image_attr); -} - module_init(bgrt_init); -module_exit(bgrt_exit); -MODULE_AUTHOR("Matthew Garrett"); +MODULE_AUTHOR("Matthew Garrett, Josh Triplett "); MODULE_DESCRIPTION("BGRT boot graphic support"); MODULE_LICENSE("GPL"); diff --git a/trunk/drivers/acpi/bus.c b/trunk/drivers/acpi/bus.c index 9628652e080c..e0596954290b 100644 --- a/trunk/drivers/acpi/bus.c +++ b/trunk/drivers/acpi/bus.c @@ -237,6 +237,16 @@ static int __acpi_bus_get_power(struct acpi_device *device, int *state) } else if (result == ACPI_STATE_D3_HOT) { result = ACPI_STATE_D3; } + + /* + * If we were unsure about the device parent's power state up to this + * point, the fact that the device is in D0 implies that the parent has + * to be in D0 too. + */ + if (device->parent && device->parent->power.state == ACPI_STATE_UNKNOWN + && result == ACPI_STATE_D0) + device->parent->power.state = ACPI_STATE_D0; + *state = result; out: diff --git a/trunk/drivers/acpi/button.c b/trunk/drivers/acpi/button.c index 79d4c22f7a6d..314a3b84bbc7 100644 --- a/trunk/drivers/acpi/button.c +++ b/trunk/drivers/acpi/button.c @@ -78,7 +78,9 @@ static int acpi_button_add(struct acpi_device *device); static int acpi_button_remove(struct acpi_device *device, int type); static void acpi_button_notify(struct acpi_device *device, u32 event); +#ifdef CONFIG_PM_SLEEP static int acpi_button_resume(struct device *dev); +#endif static SIMPLE_DEV_PM_OPS(acpi_button_pm, NULL, acpi_button_resume); static struct acpi_driver acpi_button_driver = { @@ -310,6 +312,7 @@ static void acpi_button_notify(struct acpi_device *device, u32 event) } } +#ifdef CONFIG_PM_SLEEP static int acpi_button_resume(struct device *dev) { struct acpi_device *device = to_acpi_device(dev); @@ -319,6 +322,7 @@ static int acpi_button_resume(struct device *dev) return acpi_lid_send_state(device); return 0; } +#endif static int acpi_button_add(struct acpi_device *device) { diff --git a/trunk/drivers/acpi/fan.c b/trunk/drivers/acpi/fan.c index 669d9ee80d16..bc36a476f1ab 100644 --- a/trunk/drivers/acpi/fan.c +++ b/trunk/drivers/acpi/fan.c @@ -53,8 +53,10 @@ static const struct acpi_device_id fan_device_ids[] = { }; MODULE_DEVICE_TABLE(acpi, fan_device_ids); +#ifdef CONFIG_PM_SLEEP static int acpi_fan_suspend(struct device *dev); static int acpi_fan_resume(struct device *dev); +#endif static SIMPLE_DEV_PM_OPS(acpi_fan_pm, acpi_fan_suspend, acpi_fan_resume); static struct acpi_driver acpi_fan_driver = { @@ -184,6 +186,7 @@ static int acpi_fan_remove(struct acpi_device *device, int type) return 0; } +#ifdef CONFIG_PM_SLEEP static int acpi_fan_suspend(struct device *dev) { if (!dev) @@ -207,6 +210,7 @@ static int acpi_fan_resume(struct device *dev) return result; } +#endif static int __init acpi_fan_init(void) { diff --git a/trunk/drivers/acpi/numa.c b/trunk/drivers/acpi/numa.c index e56f3be7b07d..cb31298ca684 100644 --- a/trunk/drivers/acpi/numa.c +++ b/trunk/drivers/acpi/numa.c @@ -237,6 +237,8 @@ acpi_parse_processor_affinity(struct acpi_subtable_header *header, return 0; } +static int __initdata parsed_numa_memblks; + static int __init acpi_parse_memory_affinity(struct acpi_subtable_header * header, const unsigned long end) @@ -250,8 +252,8 @@ acpi_parse_memory_affinity(struct acpi_subtable_header * header, acpi_table_print_srat_entry(header); /* let architecture-dependent part to do it */ - acpi_numa_memory_affinity_init(memory_affinity); - + if (!acpi_numa_memory_affinity_init(memory_affinity)) + parsed_numa_memblks++; return 0; } @@ -304,8 +306,10 @@ int __init acpi_numa_init(void) acpi_numa_arch_fixup(); - if (cnt <= 0) - return cnt ?: -ENOENT; + if (cnt < 0) + return cnt; + else if (!parsed_numa_memblks) + return -ENOENT; return 0; } diff --git a/trunk/drivers/acpi/pci_root.c b/trunk/drivers/acpi/pci_root.c index ec54014c321c..72a2c98bc429 100644 --- a/trunk/drivers/acpi/pci_root.c +++ b/trunk/drivers/acpi/pci_root.c @@ -573,8 +573,15 @@ static int __devinit acpi_pci_root_add(struct acpi_device *device) OSC_CLOCK_PWR_CAPABILITY_SUPPORT; if (pci_msi_enabled()) flags |= OSC_MSI_SUPPORT; - if (flags != base_flags) - acpi_pci_osc_support(root, flags); + if (flags != base_flags) { + status = acpi_pci_osc_support(root, flags); + if (ACPI_FAILURE(status)) { + dev_info(root->bus->bridge, "ACPI _OSC support " + "notification failed, disabling PCIe ASPM\n"); + pcie_no_aspm(); + flags = base_flags; + } + } if (!pcie_ports_disabled && (flags & ACPI_PCIE_REQ_SUPPORT) == ACPI_PCIE_REQ_SUPPORT) { diff --git a/trunk/drivers/acpi/power.c b/trunk/drivers/acpi/power.c index 215ecd097408..40e38a06ba85 100644 --- a/trunk/drivers/acpi/power.c +++ b/trunk/drivers/acpi/power.c @@ -67,7 +67,9 @@ static const struct acpi_device_id power_device_ids[] = { }; MODULE_DEVICE_TABLE(acpi, power_device_ids); +#ifdef CONFIG_PM_SLEEP static int acpi_power_resume(struct device *dev); +#endif static SIMPLE_DEV_PM_OPS(acpi_power_pm, NULL, acpi_power_resume); static struct acpi_driver acpi_power_driver = { @@ -105,6 +107,7 @@ struct acpi_power_resource { /* List of devices relying on this power resource */ struct acpi_power_resource_device *devices; + struct mutex devices_lock; }; static struct list_head acpi_power_resource_list; @@ -223,7 +226,6 @@ static void acpi_power_on_device(struct acpi_power_managed_device *device) static int __acpi_power_on(struct acpi_power_resource *resource) { - struct acpi_power_resource_device *device_list = resource->devices; acpi_status status = AE_OK; status = acpi_evaluate_object(resource->device->handle, "_ON", NULL, NULL); @@ -236,19 +238,15 @@ static int __acpi_power_on(struct acpi_power_resource *resource) ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Power resource [%s] turned on\n", resource->name)); - while (device_list) { - acpi_power_on_device(device_list->device); - - device_list = device_list->next; - } - return 0; } static int acpi_power_on(acpi_handle handle) { int result = 0; + bool resume_device = false; struct acpi_power_resource *resource = NULL; + struct acpi_power_resource_device *device_list; result = acpi_power_get_context(handle, &resource); if (result) @@ -264,10 +262,25 @@ static int acpi_power_on(acpi_handle handle) result = __acpi_power_on(resource); if (result) resource->ref_count--; + else + resume_device = true; } mutex_unlock(&resource->resource_lock); + if (!resume_device) + return result; + + mutex_lock(&resource->devices_lock); + + device_list = resource->devices; + while (device_list) { + acpi_power_on_device(device_list->device); + device_list = device_list->next; + } + + mutex_unlock(&resource->devices_lock); + return result; } @@ -353,7 +366,7 @@ static void __acpi_power_resource_unregister_device(struct device *dev, if (acpi_power_get_context(res_handle, &resource)) return; - mutex_lock(&resource->resource_lock); + mutex_lock(&resource->devices_lock); prev = NULL; curr = resource->devices; while (curr) { @@ -370,7 +383,7 @@ static void __acpi_power_resource_unregister_device(struct device *dev, prev = curr; curr = curr->next; } - mutex_unlock(&resource->resource_lock); + mutex_unlock(&resource->devices_lock); } /* Unlink dev from all power resources in _PR0 */ @@ -412,10 +425,10 @@ static int __acpi_power_resource_register_device( power_resource_device->device = powered_device; - mutex_lock(&resource->resource_lock); + mutex_lock(&resource->devices_lock); power_resource_device->next = resource->devices; resource->devices = power_resource_device; - mutex_unlock(&resource->resource_lock); + mutex_unlock(&resource->devices_lock); return 0; } @@ -460,7 +473,7 @@ int acpi_power_resource_register_device(struct device *dev, acpi_handle handle) return ret; no_power_resource: - printk(KERN_WARNING PREFIX "Invalid Power Resource to register!"); + printk(KERN_DEBUG PREFIX "Invalid Power Resource to register!"); return -ENODEV; } EXPORT_SYMBOL_GPL(acpi_power_resource_register_device); @@ -719,6 +732,7 @@ static int acpi_power_add(struct acpi_device *device) resource->device = device; mutex_init(&resource->resource_lock); + mutex_init(&resource->devices_lock); strcpy(resource->name, device->pnp.bus_id); strcpy(acpi_device_name(device), ACPI_POWER_DEVICE_NAME); strcpy(acpi_device_class(device), ACPI_POWER_CLASS); @@ -775,6 +789,7 @@ static int acpi_power_remove(struct acpi_device *device, int type) return 0; } +#ifdef CONFIG_PM_SLEEP static int acpi_power_resume(struct device *dev) { int result = 0, state; @@ -803,6 +818,7 @@ static int acpi_power_resume(struct device *dev) return result; } +#endif int __init acpi_power_init(void) { diff --git a/trunk/drivers/acpi/processor_driver.c b/trunk/drivers/acpi/processor_driver.c index ff8e04f2fab4..bfc31cb0dd3e 100644 --- a/trunk/drivers/acpi/processor_driver.c +++ b/trunk/drivers/acpi/processor_driver.c @@ -437,7 +437,7 @@ static int acpi_cpu_soft_notify(struct notifier_block *nfb, /* Normal CPU soft online event */ } else { acpi_processor_ppc_has_changed(pr, 0); - acpi_processor_cst_has_changed(pr); + acpi_processor_hotplug(pr); acpi_processor_reevaluate_tstate(pr, action); acpi_processor_tstate_has_changed(pr); } diff --git a/trunk/drivers/acpi/sbs.c b/trunk/drivers/acpi/sbs.c index c0b9aa5faf4c..ff0740e0a9c2 100644 --- a/trunk/drivers/acpi/sbs.c +++ b/trunk/drivers/acpi/sbs.c @@ -988,6 +988,7 @@ static void acpi_sbs_rmdirs(void) #endif } +#ifdef CONFIG_PM_SLEEP static int acpi_sbs_resume(struct device *dev) { struct acpi_sbs *sbs; @@ -997,6 +998,7 @@ static int acpi_sbs_resume(struct device *dev) acpi_sbs_callback(sbs); return 0; } +#endif static SIMPLE_DEV_PM_OPS(acpi_sbs_pm, NULL, acpi_sbs_resume); diff --git a/trunk/drivers/acpi/sleep.c b/trunk/drivers/acpi/sleep.c index 7a7a9c929247..fdcdbb652915 100644 --- a/trunk/drivers/acpi/sleep.c +++ b/trunk/drivers/acpi/sleep.c @@ -28,36 +28,7 @@ #include "internal.h" #include "sleep.h" -u8 wake_sleep_flags = ACPI_NO_OPTIONAL_METHODS; -static unsigned int gts, bfs; -static int set_param_wake_flag(const char *val, struct kernel_param *kp) -{ - int ret = param_set_int(val, kp); - - if (ret) - return ret; - - if (kp->arg == (const char *)>s) { - if (gts) - wake_sleep_flags |= ACPI_EXECUTE_GTS; - else - wake_sleep_flags &= ~ACPI_EXECUTE_GTS; - } - if (kp->arg == (const char *)&bfs) { - if (bfs) - wake_sleep_flags |= ACPI_EXECUTE_BFS; - else - wake_sleep_flags &= ~ACPI_EXECUTE_BFS; - } - return ret; -} -module_param_call(gts, set_param_wake_flag, param_get_int, >s, 0644); -module_param_call(bfs, set_param_wake_flag, param_get_int, &bfs, 0644); -MODULE_PARM_DESC(gts, "Enable evaluation of _GTS on suspend."); -MODULE_PARM_DESC(bfs, "Enable evaluation of _BFS on resume".); - static u8 sleep_states[ACPI_S_STATE_COUNT]; -static bool pwr_btn_event_pending; static void acpi_sleep_tts_switch(u32 acpi_state) { @@ -110,6 +81,7 @@ static int acpi_sleep_prepare(u32 acpi_state) #ifdef CONFIG_ACPI_SLEEP static u32 acpi_target_sleep_state = ACPI_STATE_S0; +static bool pwr_btn_event_pending; /* * The ACPI specification wants us to save NVS memory regions during hibernation @@ -305,7 +277,7 @@ static int acpi_suspend_enter(suspend_state_t pm_state) switch (acpi_state) { case ACPI_STATE_S1: barrier(); - status = acpi_enter_sleep_state(acpi_state, wake_sleep_flags); + status = acpi_enter_sleep_state(acpi_state); break; case ACPI_STATE_S3: @@ -319,8 +291,8 @@ static int acpi_suspend_enter(suspend_state_t pm_state) /* This violates the spec but is required for bug compatibility. */ acpi_write_bit_register(ACPI_BITREG_SCI_ENABLE, 1); - /* Reprogram control registers and execute _BFS */ - acpi_leave_sleep_state_prep(acpi_state, wake_sleep_flags); + /* Reprogram control registers */ + acpi_leave_sleep_state_prep(acpi_state); /* ACPI 3.0 specs (P62) says that it's the responsibility * of the OSPM to clear the status bit [ implying that the @@ -603,9 +575,9 @@ static int acpi_hibernation_enter(void) ACPI_FLUSH_CPU_CACHE(); /* This shouldn't return. If it returns, we have a problem */ - status = acpi_enter_sleep_state(ACPI_STATE_S4, wake_sleep_flags); - /* Reprogram control registers and execute _BFS */ - acpi_leave_sleep_state_prep(ACPI_STATE_S4, wake_sleep_flags); + status = acpi_enter_sleep_state(ACPI_STATE_S4); + /* Reprogram control registers */ + acpi_leave_sleep_state_prep(ACPI_STATE_S4); return ACPI_SUCCESS(status) ? 0 : -EFAULT; } @@ -617,8 +589,8 @@ static void acpi_hibernation_leave(void) * enable it here. */ acpi_enable(); - /* Reprogram control registers and execute _BFS */ - acpi_leave_sleep_state_prep(ACPI_STATE_S4, wake_sleep_flags); + /* Reprogram control registers */ + acpi_leave_sleep_state_prep(ACPI_STATE_S4); /* Check the hardware signature */ if (facs && s4_hardware_signature != facs->hardware_signature) { printk(KERN_EMERG "ACPI: Hardware changed while hibernated, " @@ -892,33 +864,7 @@ static void acpi_power_off(void) /* acpi_sleep_prepare(ACPI_STATE_S5) should have already been called */ printk(KERN_DEBUG "%s called\n", __func__); local_irq_disable(); - acpi_enter_sleep_state(ACPI_STATE_S5, wake_sleep_flags); -} - -/* - * ACPI 2.0 created the optional _GTS and _BFS, - * but industry adoption has been neither rapid nor broad. - * - * Linux gets into trouble when it executes poorly validated - * paths through the BIOS, so disable _GTS and _BFS by default, - * but do speak up and offer the option to enable them. - */ -static void __init acpi_gts_bfs_check(void) -{ - acpi_handle dummy; - - if (ACPI_SUCCESS(acpi_get_handle(ACPI_ROOT_OBJECT, METHOD_PATHNAME__GTS, &dummy))) - { - printk(KERN_NOTICE PREFIX "BIOS offers _GTS\n"); - printk(KERN_NOTICE PREFIX "If \"acpi.gts=1\" improves suspend, " - "please notify linux-acpi@vger.kernel.org\n"); - } - if (ACPI_SUCCESS(acpi_get_handle(ACPI_ROOT_OBJECT, METHOD_PATHNAME__BFS, &dummy))) - { - printk(KERN_NOTICE PREFIX "BIOS offers _BFS\n"); - printk(KERN_NOTICE PREFIX "If \"acpi.bfs=1\" improves resume, " - "please notify linux-acpi@vger.kernel.org\n"); - } + acpi_enter_sleep_state(ACPI_STATE_S5); } int __init acpi_sleep_init(void) @@ -979,6 +925,5 @@ int __init acpi_sleep_init(void) * object can also be evaluated when the system enters S5. */ register_reboot_notifier(&tts_notifier); - acpi_gts_bfs_check(); return 0; } diff --git a/trunk/drivers/acpi/sysfs.c b/trunk/drivers/acpi/sysfs.c index 240a24400976..7c3f98ba4afe 100644 --- a/trunk/drivers/acpi/sysfs.c +++ b/trunk/drivers/acpi/sysfs.c @@ -173,7 +173,7 @@ static int param_set_trace_state(const char *val, struct kernel_param *kp) { int result = 0; - if (!strncmp(val, "enable", strlen("enable"))) { + if (!strncmp(val, "enable", sizeof("enable") - 1)) { result = acpi_debug_trace(trace_method_name, trace_debug_level, trace_debug_layer, 0); if (result) @@ -181,7 +181,7 @@ static int param_set_trace_state(const char *val, struct kernel_param *kp) goto exit; } - if (!strncmp(val, "disable", strlen("disable"))) { + if (!strncmp(val, "disable", sizeof("disable") - 1)) { int name = 0; result = acpi_debug_trace((char *)&name, trace_debug_level, trace_debug_layer, 0); diff --git a/trunk/drivers/acpi/thermal.c b/trunk/drivers/acpi/thermal.c index 9fe90e9fecb5..edda74a43406 100644 --- a/trunk/drivers/acpi/thermal.c +++ b/trunk/drivers/acpi/thermal.c @@ -106,7 +106,9 @@ static const struct acpi_device_id thermal_device_ids[] = { }; MODULE_DEVICE_TABLE(acpi, thermal_device_ids); +#ifdef CONFIG_PM_SLEEP static int acpi_thermal_resume(struct device *dev); +#endif static SIMPLE_DEV_PM_OPS(acpi_thermal_pm, NULL, acpi_thermal_resume); static struct acpi_driver acpi_thermal_driver = { @@ -1041,6 +1043,7 @@ static int acpi_thermal_remove(struct acpi_device *device, int type) return 0; } +#ifdef CONFIG_PM_SLEEP static int acpi_thermal_resume(struct device *dev) { struct acpi_thermal *tz; @@ -1075,6 +1078,7 @@ static int acpi_thermal_resume(struct device *dev) return AE_OK; } +#endif static int thermal_act(const struct dmi_system_id *d) { diff --git a/trunk/drivers/ata/Kconfig b/trunk/drivers/ata/Kconfig index 2be8ef1d3093..27cecd313e75 100644 --- a/trunk/drivers/ata/Kconfig +++ b/trunk/drivers/ata/Kconfig @@ -115,7 +115,7 @@ config SATA_SIL24 If unsure, say N. config ATA_SFF - bool "ATA SFF support" + bool "ATA SFF support (for legacy IDE and PATA)" default y help This option adds support for ATA controllers with SFF diff --git a/trunk/drivers/ata/ahci.c b/trunk/drivers/ata/ahci.c index 062e6a1a248f..7862d17976b7 100644 --- a/trunk/drivers/ata/ahci.c +++ b/trunk/drivers/ata/ahci.c @@ -256,10 +256,21 @@ static const struct pci_device_id ahci_pci_tbl[] = { { PCI_VDEVICE(INTEL, 0x8c07), board_ahci }, /* Lynx Point RAID */ { PCI_VDEVICE(INTEL, 0x8c0e), board_ahci }, /* Lynx Point RAID */ { PCI_VDEVICE(INTEL, 0x8c0f), board_ahci }, /* Lynx Point RAID */ + { PCI_VDEVICE(INTEL, 0x9c02), board_ahci }, /* Lynx Point-LP AHCI */ + { PCI_VDEVICE(INTEL, 0x9c03), board_ahci }, /* Lynx Point-LP AHCI */ + { PCI_VDEVICE(INTEL, 0x9c04), board_ahci }, /* Lynx Point-LP RAID */ + { PCI_VDEVICE(INTEL, 0x9c05), board_ahci }, /* Lynx Point-LP RAID */ + { PCI_VDEVICE(INTEL, 0x9c06), board_ahci }, /* Lynx Point-LP RAID */ + { PCI_VDEVICE(INTEL, 0x9c07), board_ahci }, /* Lynx Point-LP RAID */ + { PCI_VDEVICE(INTEL, 0x9c0e), board_ahci }, /* Lynx Point-LP RAID */ + { PCI_VDEVICE(INTEL, 0x9c0f), board_ahci }, /* Lynx Point-LP RAID */ /* JMicron 360/1/3/5/6, match class to avoid IDE function */ { PCI_VENDOR_ID_JMICRON, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_STORAGE_SATA_AHCI, 0xffffff, board_ahci_ign_iferr }, + /* JMicron 362B and 362C have an AHCI function with IDE class code */ + { PCI_VDEVICE(JMICRON, 0x2362), board_ahci_ign_iferr }, + { PCI_VDEVICE(JMICRON, 0x236f), board_ahci_ign_iferr }, /* ATI */ { PCI_VDEVICE(ATI, 0x4380), board_ahci_sb600 }, /* ATI SB600 */ @@ -385,6 +396,8 @@ static const struct pci_device_id ahci_pci_tbl[] = { .driver_data = board_ahci_yes_fbs }, /* 88se9125 */ { PCI_DEVICE(0x1b4b, 0x917a), .driver_data = board_ahci_yes_fbs }, /* 88se9172 */ + { PCI_DEVICE(0x1b4b, 0x9192), + .driver_data = board_ahci_yes_fbs }, /* 88se9172 on some Gigabyte */ { PCI_DEVICE(0x1b4b, 0x91a3), .driver_data = board_ahci_yes_fbs }, @@ -392,7 +405,10 @@ static const struct pci_device_id ahci_pci_tbl[] = { { PCI_VDEVICE(PROMISE, 0x3f20), board_ahci }, /* PDC42819 */ /* Asmedia */ - { PCI_VDEVICE(ASMEDIA, 0x0612), board_ahci }, /* ASM1061 */ + { PCI_VDEVICE(ASMEDIA, 0x0601), board_ahci }, /* ASM1060 */ + { PCI_VDEVICE(ASMEDIA, 0x0602), board_ahci }, /* ASM1060 */ + { PCI_VDEVICE(ASMEDIA, 0x0611), board_ahci }, /* ASM1061 */ + { PCI_VDEVICE(ASMEDIA, 0x0612), board_ahci }, /* ASM1062 */ /* Generic, PCI class code for AHCI */ { PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID, diff --git a/trunk/drivers/ata/ahci.h b/trunk/drivers/ata/ahci.h index c2594ddf25b0..57eb1c212a4c 100644 --- a/trunk/drivers/ata/ahci.h +++ b/trunk/drivers/ata/ahci.h @@ -320,6 +320,7 @@ extern struct device_attribute *ahci_sdev_attrs[]; extern struct ata_port_operations ahci_ops; extern struct ata_port_operations ahci_pmp_retry_srst_ops; +unsigned int ahci_dev_classify(struct ata_port *ap); void ahci_fill_cmd_slot(struct ahci_port_priv *pp, unsigned int tag, u32 opts); void ahci_save_initial_config(struct device *dev, diff --git a/trunk/drivers/ata/ata_piix.c b/trunk/drivers/ata/ata_piix.c index 3c809bfbccf5..ef773e12af79 100644 --- a/trunk/drivers/ata/ata_piix.c +++ b/trunk/drivers/ata/ata_piix.c @@ -329,6 +329,14 @@ static const struct pci_device_id piix_pci_tbl[] = { { 0x8086, 0x8c08, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata }, /* SATA Controller IDE (Lynx Point) */ { 0x8086, 0x8c09, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata }, + /* SATA Controller IDE (Lynx Point-LP) */ + { 0x8086, 0x9c00, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata_snb }, + /* SATA Controller IDE (Lynx Point-LP) */ + { 0x8086, 0x9c01, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata_snb }, + /* SATA Controller IDE (Lynx Point-LP) */ + { 0x8086, 0x9c08, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata }, + /* SATA Controller IDE (Lynx Point-LP) */ + { 0x8086, 0x9c09, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata }, /* SATA Controller IDE (DH89xxCC) */ { 0x8086, 0x2326, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata }, { } /* terminate list */ diff --git a/trunk/drivers/ata/libahci.c b/trunk/drivers/ata/libahci.c index f9eaa82311a9..555c07afa05b 100644 --- a/trunk/drivers/ata/libahci.c +++ b/trunk/drivers/ata/libahci.c @@ -1139,7 +1139,7 @@ static void ahci_dev_config(struct ata_device *dev) } } -static unsigned int ahci_dev_classify(struct ata_port *ap) +unsigned int ahci_dev_classify(struct ata_port *ap) { void __iomem *port_mmio = ahci_port_base(ap); struct ata_taskfile tf; @@ -1153,6 +1153,7 @@ static unsigned int ahci_dev_classify(struct ata_port *ap) return ata_dev_classify(&tf); } +EXPORT_SYMBOL_GPL(ahci_dev_classify); void ahci_fill_cmd_slot(struct ahci_port_priv *pp, unsigned int tag, u32 opts) diff --git a/trunk/drivers/ata/libata-acpi.c b/trunk/drivers/ata/libata-acpi.c index 902b5a457170..fd9ecf74e631 100644 --- a/trunk/drivers/ata/libata-acpi.c +++ b/trunk/drivers/ata/libata-acpi.c @@ -60,17 +60,7 @@ acpi_handle ata_ap_acpi_handle(struct ata_port *ap) if (ap->flags & ATA_FLAG_ACPI_SATA) return NULL; - /* - * If acpi bind operation has already happened, we can get the handle - * for the port by checking the corresponding scsi_host device's - * firmware node, otherwise we will need to find out the handle from - * its parent's acpi node. - */ - if (ap->scsi_host) - return DEVICE_ACPI_HANDLE(&ap->scsi_host->shost_gendev); - else - return acpi_get_child(DEVICE_ACPI_HANDLE(ap->host->dev), - ap->port_no); + return acpi_get_child(DEVICE_ACPI_HANDLE(ap->host->dev), ap->port_no); } EXPORT_SYMBOL(ata_ap_acpi_handle); @@ -1101,6 +1091,9 @@ static int ata_acpi_bind_host(struct ata_port *ap, acpi_handle *handle) if (!*handle) return -ENODEV; + if (ata_acpi_gtm(ap, &ap->__acpi_init_gtm) == 0) + ap->pflags |= ATA_PFLAG_INIT_GTM_VALID; + return 0; } diff --git a/trunk/drivers/ata/libata-core.c b/trunk/drivers/ata/libata-core.c index fadd5866d40f..8e1039c8e159 100644 --- a/trunk/drivers/ata/libata-core.c +++ b/trunk/drivers/ata/libata-core.c @@ -4062,7 +4062,7 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = { { "_NEC DV5800A", NULL, ATA_HORKAGE_NODMA }, { "SAMSUNG CD-ROM SN-124", "N001", ATA_HORKAGE_NODMA }, { "Seagate STT20000A", NULL, ATA_HORKAGE_NODMA }, - { "2GB ATA Flash Disk", "ADMA428M", ATA_HORKAGE_NODMA }, + { " 2GB ATA Flash Disk", "ADMA428M", ATA_HORKAGE_NODMA }, /* Odd clown on sil3726/4726 PMPs */ { "Config Disk", NULL, ATA_HORKAGE_DISABLE }, @@ -4128,6 +4128,7 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = { /* Devices that do not need bridging limits applied */ { "MTRON MSP-SATA*", NULL, ATA_HORKAGE_BRIDGE_OK, }, + { "BUFFALO HD-QSU2/R5", NULL, ATA_HORKAGE_BRIDGE_OK, }, /* Devices which aren't very happy with higher link speeds */ { "WD My Book", NULL, ATA_HORKAGE_1_5_GBPS, }, diff --git a/trunk/drivers/ata/pata_atiixp.c b/trunk/drivers/ata/pata_atiixp.c index 361c75cea57b..24e51056ac26 100644 --- a/trunk/drivers/ata/pata_atiixp.c +++ b/trunk/drivers/ata/pata_atiixp.c @@ -20,6 +20,7 @@ #include #include #include +#include #define DRV_NAME "pata_atiixp" #define DRV_VERSION "0.4.6" @@ -33,11 +34,26 @@ enum { ATIIXP_IDE_UDMA_MODE = 0x56 }; +static const struct dmi_system_id attixp_cable_override_dmi_table[] = { + { + /* Board has onboard PATA<->SATA converters */ + .ident = "MSI E350DM-E33", + .matches = { + DMI_MATCH(DMI_BOARD_VENDOR, "MSI"), + DMI_MATCH(DMI_BOARD_NAME, "E350DM-E33(MS-7720)"), + }, + }, + { } +}; + static int atiixp_cable_detect(struct ata_port *ap) { struct pci_dev *pdev = to_pci_dev(ap->host->dev); u8 udma; + if (dmi_check_system(attixp_cable_override_dmi_table)) + return ATA_CBL_PATA40_SHORT; + /* Hack from drivers/ide/pci. Really we want to know how to do the raw detection not play follow the bios mode guess */ pci_read_config_byte(pdev, ATIIXP_IDE_UDMA_MODE + ap->port_no, &udma); diff --git a/trunk/drivers/atm/iphase.c b/trunk/drivers/atm/iphase.c index d4386019af5d..96cce6d53195 100644 --- a/trunk/drivers/atm/iphase.c +++ b/trunk/drivers/atm/iphase.c @@ -2362,7 +2362,7 @@ static int __devinit ia_init(struct atm_dev *dev) { printk(DEV_LABEL " (itf %d): can't set up page mapping\n", dev->number); - return error; + return -ENOMEM; } IF_INIT(printk(DEV_LABEL " (itf %d): rev.%d,base=%p,irq=%d\n", dev->number, iadev->pci->revision, base, iadev->irq);) diff --git a/trunk/drivers/base/core.c b/trunk/drivers/base/core.c index f338037a4f3d..5e6e00bc1652 100644 --- a/trunk/drivers/base/core.c +++ b/trunk/drivers/base/core.c @@ -1865,6 +1865,7 @@ int __dev_printk(const char *level, const struct device *dev, struct va_format *vaf) { char dict[128]; + const char *level_extra = ""; size_t dictlen = 0; const char *subsys; @@ -1911,10 +1912,14 @@ int __dev_printk(const char *level, const struct device *dev, "DEVICE=+%s:%s", subsys, dev_name(dev)); } skip: + if (level[2]) + level_extra = &level[2]; /* skip past KERN_SOH "L" */ + return printk_emit(0, level[1] - '0', dictlen ? dict : NULL, dictlen, - "%s %s: %pV", - dev_driver_string(dev), dev_name(dev), vaf); + "%s %s: %s%pV", + dev_driver_string(dev), dev_name(dev), + level_extra, vaf); } EXPORT_SYMBOL(__dev_printk); diff --git a/trunk/drivers/base/dma-contiguous.c b/trunk/drivers/base/dma-contiguous.c index 78efb0306a44..34d94c762a1e 100644 --- a/trunk/drivers/base/dma-contiguous.c +++ b/trunk/drivers/base/dma-contiguous.c @@ -250,7 +250,7 @@ int __init dma_declare_contiguous(struct device *dev, unsigned long size, return -EINVAL; /* Sanitise input arguments */ - alignment = PAGE_SIZE << max(MAX_ORDER, pageblock_order); + alignment = PAGE_SIZE << max(MAX_ORDER - 1, pageblock_order); base = ALIGN(base, alignment); size = ALIGN(size, alignment); limit &= ~(alignment - 1); diff --git a/trunk/drivers/base/power/clock_ops.c b/trunk/drivers/base/power/clock_ops.c index 869d7ff2227f..eb78e9640c4a 100644 --- a/trunk/drivers/base/power/clock_ops.c +++ b/trunk/drivers/base/power/clock_ops.c @@ -169,8 +169,7 @@ void pm_clk_init(struct device *dev) */ int pm_clk_create(struct device *dev) { - int ret = dev_pm_get_subsys_data(dev); - return ret < 0 ? ret : 0; + return dev_pm_get_subsys_data(dev); } /** diff --git a/trunk/drivers/base/power/common.c b/trunk/drivers/base/power/common.c index a14085cc613f..39c32529b833 100644 --- a/trunk/drivers/base/power/common.c +++ b/trunk/drivers/base/power/common.c @@ -24,7 +24,6 @@ int dev_pm_get_subsys_data(struct device *dev) { struct pm_subsys_data *psd; - int ret = 0; psd = kzalloc(sizeof(*psd), GFP_KERNEL); if (!psd) @@ -40,7 +39,6 @@ int dev_pm_get_subsys_data(struct device *dev) dev->power.subsys_data = psd; pm_clk_init(dev); psd = NULL; - ret = 1; } spin_unlock_irq(&dev->power.lock); @@ -48,7 +46,7 @@ int dev_pm_get_subsys_data(struct device *dev) /* kfree() verifies that its argument is nonzero. */ kfree(psd); - return ret; + return 0; } EXPORT_SYMBOL_GPL(dev_pm_get_subsys_data); diff --git a/trunk/drivers/base/power/runtime.c b/trunk/drivers/base/power/runtime.c index 59894873a3b3..7d9c1cb1c39a 100644 --- a/trunk/drivers/base/power/runtime.c +++ b/trunk/drivers/base/power/runtime.c @@ -147,6 +147,8 @@ static int rpm_check_suspend_allowed(struct device *dev) || (dev->power.request_pending && dev->power.request == RPM_REQ_RESUME)) retval = -EAGAIN; + else if (__dev_pm_qos_read_value(dev) < 0) + retval = -EPERM; else if (dev->power.runtime_status == RPM_SUSPENDED) retval = 1; @@ -388,7 +390,6 @@ static int rpm_suspend(struct device *dev, int rpmflags) goto repeat; } - dev->power.deferred_resume = false; if (dev->power.no_callbacks) goto no_callback; /* Assume success. */ @@ -403,12 +404,6 @@ static int rpm_suspend(struct device *dev, int rpmflags) goto out; } - if (__dev_pm_qos_read_value(dev) < 0) { - /* Negative PM QoS constraint means "never suspend". */ - retval = -EPERM; - goto out; - } - __update_runtime_status(dev, RPM_SUSPENDING); if (dev->pm_domain) @@ -440,6 +435,7 @@ static int rpm_suspend(struct device *dev, int rpmflags) wake_up_all(&dev->power.wait_queue); if (dev->power.deferred_resume) { + dev->power.deferred_resume = false; rpm_resume(dev, 0); retval = -EAGAIN; goto out; @@ -584,6 +580,7 @@ static int rpm_resume(struct device *dev, int rpmflags) || dev->parent->power.runtime_status == RPM_ACTIVE) { atomic_inc(&dev->parent->power.child_count); spin_unlock(&dev->parent->power.lock); + retval = 1; goto no_callback; /* Assume success. */ } spin_unlock(&dev->parent->power.lock); @@ -664,7 +661,7 @@ static int rpm_resume(struct device *dev, int rpmflags) } wake_up_all(&dev->power.wait_queue); - if (!retval) + if (retval >= 0) rpm_idle(dev, RPM_ASYNC); out: diff --git a/trunk/drivers/base/regmap/regmap-irq.c b/trunk/drivers/base/regmap/regmap-irq.c index a89734621e51..5b6b1d8e6cc0 100644 --- a/trunk/drivers/base/regmap/regmap-irq.c +++ b/trunk/drivers/base/regmap/regmap-irq.c @@ -16,12 +16,14 @@ #include #include #include +#include #include #include "internal.h" struct regmap_irq_chip_data { struct mutex lock; + struct irq_chip irq_chip; struct regmap *map; const struct regmap_irq_chip *chip; @@ -59,6 +61,14 @@ static void regmap_irq_sync_unlock(struct irq_data *data) struct regmap_irq_chip_data *d = irq_data_get_irq_chip_data(data); struct regmap *map = d->map; int i, ret; + u32 reg; + + if (d->chip->runtime_pm) { + ret = pm_runtime_get_sync(map->dev); + if (ret < 0) + dev_err(map->dev, "IRQ sync failed to resume: %d\n", + ret); + } /* * If there's been a change in the mask write it back to the @@ -66,15 +76,22 @@ static void regmap_irq_sync_unlock(struct irq_data *data) * suppress pointless writes. */ for (i = 0; i < d->chip->num_regs; i++) { - ret = regmap_update_bits(d->map, d->chip->mask_base + - (i * map->reg_stride * - d->irq_reg_stride), + reg = d->chip->mask_base + + (i * map->reg_stride * d->irq_reg_stride); + if (d->chip->mask_invert) + ret = regmap_update_bits(d->map, reg, + d->mask_buf_def[i], ~d->mask_buf[i]); + else + ret = regmap_update_bits(d->map, reg, d->mask_buf_def[i], d->mask_buf[i]); if (ret != 0) dev_err(d->map->dev, "Failed to sync masks in %x\n", - d->chip->mask_base + (i * map->reg_stride)); + reg); } + if (d->chip->runtime_pm) + pm_runtime_put(map->dev); + /* If we've changed our wakeup count propagate it to the parent */ if (d->wake_count < 0) for (i = d->wake_count; i < 0; i++) @@ -128,8 +145,7 @@ static int regmap_irq_set_wake(struct irq_data *data, unsigned int on) return 0; } -static struct irq_chip regmap_irq_chip = { - .name = "regmap", +static const struct irq_chip regmap_irq_chip = { .irq_bus_lock = regmap_irq_lock, .irq_bus_sync_unlock = regmap_irq_sync_unlock, .irq_disable = regmap_irq_disable, @@ -144,6 +160,16 @@ static irqreturn_t regmap_irq_thread(int irq, void *d) struct regmap *map = data->map; int ret, i; bool handled = false; + u32 reg; + + if (chip->runtime_pm) { + ret = pm_runtime_get_sync(map->dev); + if (ret < 0) { + dev_err(map->dev, "IRQ thread failed to resume: %d\n", + ret); + return IRQ_NONE; + } + } /* * Ignore masked IRQs and ack if we need to; we ack early so @@ -160,20 +186,20 @@ static irqreturn_t regmap_irq_thread(int irq, void *d) if (ret != 0) { dev_err(map->dev, "Failed to read IRQ status: %d\n", ret); + if (chip->runtime_pm) + pm_runtime_put(map->dev); return IRQ_NONE; } data->status_buf[i] &= ~data->mask_buf[i]; if (data->status_buf[i] && chip->ack_base) { - ret = regmap_write(map, chip->ack_base + - (i * map->reg_stride * - data->irq_reg_stride), - data->status_buf[i]); + reg = chip->ack_base + + (i * map->reg_stride * data->irq_reg_stride); + ret = regmap_write(map, reg, data->status_buf[i]); if (ret != 0) dev_err(map->dev, "Failed to ack 0x%x: %d\n", - chip->ack_base + (i * map->reg_stride), - ret); + reg, ret); } } @@ -185,6 +211,9 @@ static irqreturn_t regmap_irq_thread(int irq, void *d) } } + if (chip->runtime_pm) + pm_runtime_put(map->dev); + if (handled) return IRQ_HANDLED; else @@ -197,7 +226,7 @@ static int regmap_irq_map(struct irq_domain *h, unsigned int virq, struct regmap_irq_chip_data *data = h->host_data; irq_set_chip_data(virq, data); - irq_set_chip_and_handler(virq, ®map_irq_chip, handle_edge_irq); + irq_set_chip(virq, &data->irq_chip); irq_set_nested_thread(virq, 1); /* ARM needs us to explicitly flag the IRQ as valid @@ -238,6 +267,7 @@ int regmap_add_irq_chip(struct regmap *map, int irq, int irq_flags, struct regmap_irq_chip_data *d; int i; int ret = -ENOMEM; + u32 reg; for (i = 0; i < chip->num_irqs; i++) { if (chip->irqs[i].reg_offset % map->reg_stride) @@ -284,6 +314,13 @@ int regmap_add_irq_chip(struct regmap *map, int irq, int irq_flags, goto err_alloc; } + d->irq_chip = regmap_irq_chip; + d->irq_chip.name = chip->name; + if (!chip->wake_base) { + d->irq_chip.irq_set_wake = NULL; + d->irq_chip.flags |= IRQCHIP_MASK_ON_SUSPEND | + IRQCHIP_SKIP_SET_WAKE; + } d->irq = irq; d->map = map; d->chip = chip; @@ -303,16 +340,37 @@ int regmap_add_irq_chip(struct regmap *map, int irq, int irq_flags, /* Mask all the interrupts by default */ for (i = 0; i < chip->num_regs; i++) { d->mask_buf[i] = d->mask_buf_def[i]; - ret = regmap_write(map, chip->mask_base + (i * map->reg_stride - * d->irq_reg_stride), - d->mask_buf[i]); + reg = chip->mask_base + + (i * map->reg_stride * d->irq_reg_stride); + if (chip->mask_invert) + ret = regmap_update_bits(map, reg, + d->mask_buf[i], ~d->mask_buf[i]); + else + ret = regmap_update_bits(map, reg, + d->mask_buf[i], d->mask_buf[i]); if (ret != 0) { dev_err(map->dev, "Failed to set masks in 0x%x: %d\n", - chip->mask_base + (i * map->reg_stride), ret); + reg, ret); goto err_alloc; } } + /* Wake is disabled by default */ + if (d->wake_buf) { + for (i = 0; i < chip->num_regs; i++) { + d->wake_buf[i] = d->mask_buf_def[i]; + reg = chip->wake_base + + (i * map->reg_stride * d->irq_reg_stride); + ret = regmap_update_bits(map, reg, d->wake_buf[i], + d->wake_buf[i]); + if (ret != 0) { + dev_err(map->dev, "Failed to set masks in 0x%x: %d\n", + reg, ret); + goto err_alloc; + } + } + } + if (irq_base) d->domain = irq_domain_add_legacy(map->dev->of_node, chip->num_irqs, irq_base, 0, diff --git a/trunk/drivers/base/regmap/regmap.c b/trunk/drivers/base/regmap/regmap.c index c241ae2f2f10..52069d29ff12 100644 --- a/trunk/drivers/base/regmap/regmap.c +++ b/trunk/drivers/base/regmap/regmap.c @@ -659,13 +659,12 @@ EXPORT_SYMBOL_GPL(devm_regmap_init); * new cache. This can be used to restore the cache to defaults or to * update the cache configuration to reflect runtime discovery of the * hardware. + * + * No explicit locking is done here, the user needs to ensure that + * this function will not race with other calls to regmap. */ int regmap_reinit_cache(struct regmap *map, const struct regmap_config *config) { - int ret; - - map->lock(map); - regcache_exit(map); regmap_debugfs_exit(map); @@ -681,11 +680,7 @@ int regmap_reinit_cache(struct regmap *map, const struct regmap_config *config) map->cache_bypass = false; map->cache_only = false; - ret = regcache_init(map, config); - - map->unlock(map); - - return ret; + return regcache_init(map, config); } EXPORT_SYMBOL_GPL(regmap_reinit_cache); diff --git a/trunk/drivers/bcma/host_pci.c b/trunk/drivers/bcma/host_pci.c index 11b32d2642df..a6e5672c67e7 100644 --- a/trunk/drivers/bcma/host_pci.c +++ b/trunk/drivers/bcma/host_pci.c @@ -272,6 +272,7 @@ static DEFINE_PCI_DEVICE_TABLE(bcma_pci_bridge_tbl) = { { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4331) }, { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4353) }, { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4357) }, + { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4359) }, { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4727) }, { 0, }, }; diff --git a/trunk/drivers/bcma/sprom.c b/trunk/drivers/bcma/sprom.c index 26823d97fd9f..9ea4627dc0c2 100644 --- a/trunk/drivers/bcma/sprom.c +++ b/trunk/drivers/bcma/sprom.c @@ -507,7 +507,9 @@ static bool bcma_sprom_onchip_available(struct bcma_bus *bus) /* for these chips OTP is always available */ present = true; break; - + case BCMA_CHIP_ID_BCM43228: + present = chip_status & BCMA_CC_CHIPST_43228_OTP_PRESENT; + break; default: present = false; break; diff --git a/trunk/drivers/block/aoe/aoecmd.c b/trunk/drivers/block/aoe/aoecmd.c index de0435e63b02..887f68f6d79a 100644 --- a/trunk/drivers/block/aoe/aoecmd.c +++ b/trunk/drivers/block/aoe/aoecmd.c @@ -35,6 +35,7 @@ new_skb(ulong len) skb_reset_mac_header(skb); skb_reset_network_header(skb); skb->protocol = __constant_htons(ETH_P_AOE); + skb_checksum_none_assert(skb); } return skb; } diff --git a/trunk/drivers/block/cciss_scsi.c b/trunk/drivers/block/cciss_scsi.c index acda773b3720..da3311129a0c 100644 --- a/trunk/drivers/block/cciss_scsi.c +++ b/trunk/drivers/block/cciss_scsi.c @@ -763,16 +763,7 @@ static void complete_scsi_command(CommandList_struct *c, int timeout, { case CMD_TARGET_STATUS: /* Pass it up to the upper layers... */ - if( ei->ScsiStatus) - { -#if 0 - printk(KERN_WARNING "cciss: cmd %p " - "has SCSI Status = %x\n", - c, ei->ScsiStatus); -#endif - cmd->result |= (ei->ScsiStatus << 1); - } - else { /* scsi status is zero??? How??? */ + if (!ei->ScsiStatus) { /* Ordinarily, this case should never happen, but there is a bug in some released firmware revisions that allows it to happen @@ -804,6 +795,7 @@ static void complete_scsi_command(CommandList_struct *c, int timeout, } break; case CMD_PROTOCOL_ERR: + cmd->result = DID_ERROR << 16; dev_warn(&h->pdev->dev, "%p has protocol error\n", c); break; diff --git a/trunk/drivers/block/drbd/drbd_bitmap.c b/trunk/drivers/block/drbd/drbd_bitmap.c index ba91b408abad..d84566496746 100644 --- a/trunk/drivers/block/drbd/drbd_bitmap.c +++ b/trunk/drivers/block/drbd/drbd_bitmap.c @@ -889,6 +889,7 @@ struct bm_aio_ctx { unsigned int done; unsigned flags; #define BM_AIO_COPY_PAGES 1 +#define BM_WRITE_ALL_PAGES 2 int error; struct kref kref; }; @@ -1059,7 +1060,8 @@ static int bm_rw(struct drbd_conf *mdev, int rw, unsigned flags, unsigned lazy_w if (lazy_writeout_upper_idx && i == lazy_writeout_upper_idx) break; if (rw & WRITE) { - if (bm_test_page_unchanged(b->bm_pages[i])) { + if (!(flags & BM_WRITE_ALL_PAGES) && + bm_test_page_unchanged(b->bm_pages[i])) { dynamic_dev_dbg(DEV, "skipped bm write for idx %u\n", i); continue; } @@ -1140,6 +1142,17 @@ int drbd_bm_write(struct drbd_conf *mdev) __must_hold(local) return bm_rw(mdev, WRITE, 0, 0); } +/** + * drbd_bm_write_all() - Write the whole bitmap to its on disk location. + * @mdev: DRBD device. + * + * Will write all pages. + */ +int drbd_bm_write_all(struct drbd_conf *mdev) __must_hold(local) +{ + return bm_rw(mdev, WRITE, BM_WRITE_ALL_PAGES, 0); +} + /** * drbd_bm_lazy_write_out() - Write bitmap pages 0 to @upper_idx-1, if they have changed. * @mdev: DRBD device. diff --git a/trunk/drivers/block/drbd/drbd_int.h b/trunk/drivers/block/drbd/drbd_int.h index b2ca143d0053..b953cc7c9c00 100644 --- a/trunk/drivers/block/drbd/drbd_int.h +++ b/trunk/drivers/block/drbd/drbd_int.h @@ -1469,6 +1469,7 @@ extern int drbd_bm_e_weight(struct drbd_conf *mdev, unsigned long enr); extern int drbd_bm_write_page(struct drbd_conf *mdev, unsigned int idx) __must_hold(local); extern int drbd_bm_read(struct drbd_conf *mdev) __must_hold(local); extern int drbd_bm_write(struct drbd_conf *mdev) __must_hold(local); +extern int drbd_bm_write_all(struct drbd_conf *mdev) __must_hold(local); extern int drbd_bm_write_copy_pages(struct drbd_conf *mdev) __must_hold(local); extern unsigned long drbd_bm_ALe_set_all(struct drbd_conf *mdev, unsigned long al_enr); diff --git a/trunk/drivers/block/drbd/drbd_main.c b/trunk/drivers/block/drbd/drbd_main.c index 2e0e7fc1dbba..f93a0320e952 100644 --- a/trunk/drivers/block/drbd/drbd_main.c +++ b/trunk/drivers/block/drbd/drbd_main.c @@ -79,6 +79,7 @@ static int w_md_sync(struct drbd_conf *mdev, struct drbd_work *w, int unused); static void md_sync_timer_fn(unsigned long data); static int w_bitmap_io(struct drbd_conf *mdev, struct drbd_work *w, int unused); static int w_go_diskless(struct drbd_conf *mdev, struct drbd_work *w, int unused); +static void _tl_clear(struct drbd_conf *mdev); MODULE_AUTHOR("Philipp Reisner , " "Lars Ellenberg "); @@ -432,19 +433,10 @@ static void _tl_restart(struct drbd_conf *mdev, enum drbd_req_event what) /* Actions operating on the disk state, also want to work on requests that got barrier acked. */ - switch (what) { - case fail_frozen_disk_io: - case restart_frozen_disk_io: - list_for_each_safe(le, tle, &mdev->barrier_acked_requests) { - req = list_entry(le, struct drbd_request, tl_requests); - _req_mod(req, what); - } - case connection_lost_while_pending: - case resend: - break; - default: - dev_err(DEV, "what = %d in _tl_restart()\n", what); + list_for_each_safe(le, tle, &mdev->barrier_acked_requests) { + req = list_entry(le, struct drbd_request, tl_requests); + _req_mod(req, what); } } @@ -458,12 +450,17 @@ static void _tl_restart(struct drbd_conf *mdev, enum drbd_req_event what) * receiver thread and the worker thread. */ void tl_clear(struct drbd_conf *mdev) +{ + spin_lock_irq(&mdev->req_lock); + _tl_clear(mdev); + spin_unlock_irq(&mdev->req_lock); +} + +static void _tl_clear(struct drbd_conf *mdev) { struct list_head *le, *tle; struct drbd_request *r; - spin_lock_irq(&mdev->req_lock); - _tl_restart(mdev, connection_lost_while_pending); /* we expect this list to be empty. */ @@ -482,7 +479,6 @@ void tl_clear(struct drbd_conf *mdev) memset(mdev->app_reads_hash, 0, APP_R_HSIZE*sizeof(void *)); - spin_unlock_irq(&mdev->req_lock); } void tl_restart(struct drbd_conf *mdev, enum drbd_req_event what) @@ -1476,12 +1472,12 @@ static void after_state_ch(struct drbd_conf *mdev, union drbd_state os, if (ns.susp_fen) { /* case1: The outdate peer handler is successful: */ if (os.pdsk > D_OUTDATED && ns.pdsk <= D_OUTDATED) { - tl_clear(mdev); if (test_bit(NEW_CUR_UUID, &mdev->flags)) { drbd_uuid_new_current(mdev); clear_bit(NEW_CUR_UUID, &mdev->flags); } spin_lock_irq(&mdev->req_lock); + _tl_clear(mdev); _drbd_set_state(_NS(mdev, susp_fen, 0), CS_VERBOSE, NULL); spin_unlock_irq(&mdev->req_lock); } @@ -3537,9 +3533,9 @@ static void drbd_cleanup(void) } /** - * drbd_congested() - Callback for pdflush + * drbd_congested() - Callback for the flusher thread * @congested_data: User data - * @bdi_bits: Bits pdflush is currently interested in + * @bdi_bits: Bits the BDI flusher thread is currently interested in * * Returns 1<rq_state & RQ_WRITE) && !req->w.cb) { + _req_may_be_done(req, m); + break; + } + /* If RQ_NET_OK is already set, we got a P_WRITE_ACK or P_RECV_ACK before the connection loss (B&C only); only P_BARRIER_ACK was missing. Trowing them out of the TL here by pretending we got a BARRIER_ACK @@ -834,7 +840,15 @@ static int drbd_make_request_common(struct drbd_conf *mdev, struct bio *bio, uns req->private_bio = NULL; } if (rw == WRITE) { - remote = 1; + /* Need to replicate writes. Unless it is an empty flush, + * which is better mapped to a DRBD P_BARRIER packet, + * also for drbd wire protocol compatibility reasons. */ + if (unlikely(size == 0)) { + /* The only size==0 bios we expect are empty flushes. */ + D_ASSERT(bio->bi_rw & REQ_FLUSH); + remote = 0; + } else + remote = 1; } else { /* READ || READA */ if (local) { @@ -870,8 +884,11 @@ static int drbd_make_request_common(struct drbd_conf *mdev, struct bio *bio, uns * extent. This waits for any resync activity in the corresponding * resync extent to finish, and, if necessary, pulls in the target * extent into the activity log, which involves further disk io because - * of transactional on-disk meta data updates. */ - if (rw == WRITE && local && !test_bit(AL_SUSPENDED, &mdev->flags)) { + * of transactional on-disk meta data updates. + * Empty flushes don't need to go into the activity log, they can only + * flush data for pending writes which are already in there. */ + if (rw == WRITE && local && size + && !test_bit(AL_SUSPENDED, &mdev->flags)) { req->rq_state |= RQ_IN_ACT_LOG; drbd_al_begin_io(mdev, sector); } @@ -994,7 +1011,10 @@ static int drbd_make_request_common(struct drbd_conf *mdev, struct bio *bio, uns if (rw == WRITE && _req_conflicts(req)) goto fail_conflicting; - list_add_tail(&req->tl_requests, &mdev->newest_tle->requests); + /* no point in adding empty flushes to the transfer log, + * they are mapped to drbd barriers already. */ + if (likely(size!=0)) + list_add_tail(&req->tl_requests, &mdev->newest_tle->requests); /* NOTE remote first: to get the concurrent write detection right, * we must register the request before start of local IO. */ @@ -1014,6 +1034,14 @@ static int drbd_make_request_common(struct drbd_conf *mdev, struct bio *bio, uns mdev->net_conf->on_congestion != OC_BLOCK && mdev->agreed_pro_version >= 96) maybe_pull_ahead(mdev); + /* If this was a flush, queue a drbd barrier/start a new epoch. + * Unless the current epoch was empty anyways, or we are not currently + * replicating, in which case there is no point. */ + if (unlikely(bio->bi_rw & REQ_FLUSH) + && mdev->newest_tle->n_writes + && drbd_should_do_remote(mdev->state)) + queue_barrier(mdev); + spin_unlock_irq(&mdev->req_lock); kfree(b); /* if someone else has beaten us to it... */ diff --git a/trunk/drivers/block/mtip32xx/mtip32xx.c b/trunk/drivers/block/mtip32xx/mtip32xx.c index a8fddeb3d638..f946d31d6917 100644 --- a/trunk/drivers/block/mtip32xx/mtip32xx.c +++ b/trunk/drivers/block/mtip32xx/mtip32xx.c @@ -1148,11 +1148,15 @@ static bool mtip_pause_ncq(struct mtip_port *port, reply = port->rxfis + RX_FIS_D2H_REG; task_file_data = readl(port->mmio+PORT_TFDATA); - if ((task_file_data & 1) || (fis->command == ATA_CMD_SEC_ERASE_UNIT)) + if (fis->command == ATA_CMD_SEC_ERASE_UNIT) + clear_bit(MTIP_DDF_SEC_LOCK_BIT, &port->dd->dd_flag); + + if ((task_file_data & 1)) return false; if (fis->command == ATA_CMD_SEC_ERASE_PREP) { set_bit(MTIP_PF_SE_ACTIVE_BIT, &port->flags); + set_bit(MTIP_DDF_SEC_LOCK_BIT, &port->dd->dd_flag); port->ic_pause_timer = jiffies; return true; } else if ((fis->command == ATA_CMD_DOWNLOAD_MICRO) && @@ -1900,7 +1904,7 @@ static int exec_drive_command(struct mtip_port *port, u8 *command, int rv = 0, xfer_sz = command[3]; if (xfer_sz) { - if (user_buffer) + if (!user_buffer) return -EFAULT; buf = dmam_alloc_coherent(&port->dd->pdev->dev, @@ -2043,7 +2047,7 @@ static void mtip_set_timeout(struct host_to_dev_fis *fis, unsigned int *timeout) *timeout = 240000; /* 4 minutes */ break; case ATA_CMD_STANDBYNOW1: - *timeout = 10000; /* 10 seconds */ + *timeout = 120000; /* 2 minutes */ break; case 0xF7: case 0xFA: @@ -2588,9 +2592,6 @@ static ssize_t mtip_hw_read_registers(struct file *f, char __user *ubuf, if (!len || size) return 0; - if (size < 0) - return -EINVAL; - size += sprintf(&buf[size], "H/ S ACTive : [ 0x"); for (n = dd->slot_groups-1; n >= 0; n--) @@ -2660,9 +2661,6 @@ static ssize_t mtip_hw_read_flags(struct file *f, char __user *ubuf, if (!len || size) return 0; - if (size < 0) - return -EINVAL; - size += sprintf(&buf[size], "Flag-port : [ %08lX ]\n", dd->port->flags); size += sprintf(&buf[size], "Flag-dd : [ %08lX ]\n", @@ -3214,8 +3212,8 @@ static int mtip_hw_init(struct driver_data *dd) "Unable to check write protect progress\n"); else dev_info(&dd->pdev->dev, - "Write protect progress: %d%% (%d blocks)\n", - attr242.cur, attr242.data); + "Write protect progress: %u%% (%u blocks)\n", + attr242.cur, le32_to_cpu(attr242.data)); return rv; out3: @@ -3619,6 +3617,10 @@ static void mtip_make_request(struct request_queue *queue, struct bio *bio) bio_endio(bio, -ENODATA); return; } + if (unlikely(test_bit(MTIP_DDF_SEC_LOCK_BIT, &dd->dd_flag))) { + bio_endio(bio, -ENODATA); + return; + } } if (unlikely(!bio_has_data(bio))) { @@ -4168,7 +4170,13 @@ static void mtip_pci_shutdown(struct pci_dev *pdev) /* Table of device ids supported by this driver. */ static DEFINE_PCI_DEVICE_TABLE(mtip_pci_tbl) = { - { PCI_DEVICE(PCI_VENDOR_ID_MICRON, P320_DEVICE_ID) }, + { PCI_DEVICE(PCI_VENDOR_ID_MICRON, P320H_DEVICE_ID) }, + { PCI_DEVICE(PCI_VENDOR_ID_MICRON, P320M_DEVICE_ID) }, + { PCI_DEVICE(PCI_VENDOR_ID_MICRON, P320S_DEVICE_ID) }, + { PCI_DEVICE(PCI_VENDOR_ID_MICRON, P325M_DEVICE_ID) }, + { PCI_DEVICE(PCI_VENDOR_ID_MICRON, P420H_DEVICE_ID) }, + { PCI_DEVICE(PCI_VENDOR_ID_MICRON, P420M_DEVICE_ID) }, + { PCI_DEVICE(PCI_VENDOR_ID_MICRON, P425M_DEVICE_ID) }, { 0 } }; @@ -4199,12 +4207,12 @@ static int __init mtip_init(void) { int error; - printk(KERN_INFO MTIP_DRV_NAME " Version " MTIP_DRV_VERSION "\n"); + pr_info(MTIP_DRV_NAME " Version " MTIP_DRV_VERSION "\n"); /* Allocate a major block device number to use with this driver. */ error = register_blkdev(0, MTIP_DRV_NAME); if (error <= 0) { - printk(KERN_ERR "Unable to register block device (%d)\n", + pr_err("Unable to register block device (%d)\n", error); return -EBUSY; } @@ -4213,7 +4221,7 @@ static int __init mtip_init(void) if (!dfs_parent) { dfs_parent = debugfs_create_dir("rssd", NULL); if (IS_ERR_OR_NULL(dfs_parent)) { - printk(KERN_WARNING "Error creating debugfs parent\n"); + pr_warn("Error creating debugfs parent\n"); dfs_parent = NULL; } } diff --git a/trunk/drivers/block/mtip32xx/mtip32xx.h b/trunk/drivers/block/mtip32xx/mtip32xx.h index f51fc23d17bb..18627a1d04c5 100644 --- a/trunk/drivers/block/mtip32xx/mtip32xx.h +++ b/trunk/drivers/block/mtip32xx/mtip32xx.h @@ -76,7 +76,13 @@ /* Micron Vendor ID & P320x SSD Device ID */ #define PCI_VENDOR_ID_MICRON 0x1344 -#define P320_DEVICE_ID 0x5150 +#define P320H_DEVICE_ID 0x5150 +#define P320M_DEVICE_ID 0x5151 +#define P320S_DEVICE_ID 0x5152 +#define P325M_DEVICE_ID 0x5153 +#define P420H_DEVICE_ID 0x5160 +#define P420M_DEVICE_ID 0x5161 +#define P425M_DEVICE_ID 0x5163 /* Driver name and version strings */ #define MTIP_DRV_NAME "mtip32xx" @@ -131,10 +137,12 @@ enum { MTIP_PF_SVC_THD_STOP_BIT = 8, /* below are bit numbers in 'dd_flag' defined in driver_data */ + MTIP_DDF_SEC_LOCK_BIT = 0, MTIP_DDF_REMOVE_PENDING_BIT = 1, MTIP_DDF_OVER_TEMP_BIT = 2, MTIP_DDF_WRITE_PROTECT_BIT = 3, MTIP_DDF_STOP_IO = ((1 << MTIP_DDF_REMOVE_PENDING_BIT) | \ + (1 << MTIP_DDF_SEC_LOCK_BIT) | \ (1 << MTIP_DDF_OVER_TEMP_BIT) | \ (1 << MTIP_DDF_WRITE_PROTECT_BIT)), diff --git a/trunk/drivers/block/nbd.c b/trunk/drivers/block/nbd.c index d07c9f7fded6..0c03411c59eb 100644 --- a/trunk/drivers/block/nbd.c +++ b/trunk/drivers/block/nbd.c @@ -449,6 +449,14 @@ static void nbd_clear_que(struct nbd_device *nbd) req->errors++; nbd_end_request(req); } + + while (!list_empty(&nbd->waiting_queue)) { + req = list_entry(nbd->waiting_queue.next, struct request, + queuelist); + list_del_init(&req->queuelist); + req->errors++; + nbd_end_request(req); + } } @@ -598,6 +606,7 @@ static int __nbd_ioctl(struct block_device *bdev, struct nbd_device *nbd, nbd->file = NULL; nbd_clear_que(nbd); BUG_ON(!list_empty(&nbd->queue_head)); + BUG_ON(!list_empty(&nbd->waiting_queue)); if (file) fput(file); return 0; diff --git a/trunk/drivers/block/nvme.c b/trunk/drivers/block/nvme.c index 38a2d0631882..ad16c68c8645 100644 --- a/trunk/drivers/block/nvme.c +++ b/trunk/drivers/block/nvme.c @@ -79,6 +79,7 @@ struct nvme_dev { char serial[20]; char model[40]; char firmware_rev[8]; + u32 max_hw_sectors; }; /* @@ -835,15 +836,15 @@ static int nvme_identify(struct nvme_dev *dev, unsigned nsid, unsigned cns, } static int nvme_get_features(struct nvme_dev *dev, unsigned fid, - unsigned dword11, dma_addr_t dma_addr) + unsigned nsid, dma_addr_t dma_addr) { struct nvme_command c; memset(&c, 0, sizeof(c)); c.features.opcode = nvme_admin_get_features; + c.features.nsid = cpu_to_le32(nsid); c.features.prp1 = cpu_to_le64(dma_addr); c.features.fid = cpu_to_le32(fid); - c.features.dword11 = cpu_to_le32(dword11); return nvme_submit_admin_cmd(dev, &c, NULL); } @@ -862,11 +863,51 @@ static int nvme_set_features(struct nvme_dev *dev, unsigned fid, return nvme_submit_admin_cmd(dev, &c, result); } +/** + * nvme_cancel_ios - Cancel outstanding I/Os + * @queue: The queue to cancel I/Os on + * @timeout: True to only cancel I/Os which have timed out + */ +static void nvme_cancel_ios(struct nvme_queue *nvmeq, bool timeout) +{ + int depth = nvmeq->q_depth - 1; + struct nvme_cmd_info *info = nvme_cmd_info(nvmeq); + unsigned long now = jiffies; + int cmdid; + + for_each_set_bit(cmdid, nvmeq->cmdid_data, depth) { + void *ctx; + nvme_completion_fn fn; + static struct nvme_completion cqe = { + .status = cpu_to_le16(NVME_SC_ABORT_REQ) << 1, + }; + + if (timeout && !time_after(now, info[cmdid].timeout)) + continue; + dev_warn(nvmeq->q_dmadev, "Cancelling I/O %d\n", cmdid); + ctx = cancel_cmdid(nvmeq, cmdid, &fn); + fn(nvmeq->dev, ctx, &cqe); + } +} + +static void nvme_free_queue_mem(struct nvme_queue *nvmeq) +{ + dma_free_coherent(nvmeq->q_dmadev, CQ_SIZE(nvmeq->q_depth), + (void *)nvmeq->cqes, nvmeq->cq_dma_addr); + dma_free_coherent(nvmeq->q_dmadev, SQ_SIZE(nvmeq->q_depth), + nvmeq->sq_cmds, nvmeq->sq_dma_addr); + kfree(nvmeq); +} + static void nvme_free_queue(struct nvme_dev *dev, int qid) { struct nvme_queue *nvmeq = dev->queues[qid]; int vector = dev->entry[nvmeq->cq_vector].vector; + spin_lock_irq(&nvmeq->q_lock); + nvme_cancel_ios(nvmeq, false); + spin_unlock_irq(&nvmeq->q_lock); + irq_set_affinity_hint(vector, NULL); free_irq(vector, nvmeq); @@ -876,18 +917,15 @@ static void nvme_free_queue(struct nvme_dev *dev, int qid) adapter_delete_cq(dev, qid); } - dma_free_coherent(nvmeq->q_dmadev, CQ_SIZE(nvmeq->q_depth), - (void *)nvmeq->cqes, nvmeq->cq_dma_addr); - dma_free_coherent(nvmeq->q_dmadev, SQ_SIZE(nvmeq->q_depth), - nvmeq->sq_cmds, nvmeq->sq_dma_addr); - kfree(nvmeq); + nvme_free_queue_mem(nvmeq); } static struct nvme_queue *nvme_alloc_queue(struct nvme_dev *dev, int qid, int depth, int vector) { struct device *dmadev = &dev->pci_dev->dev; - unsigned extra = (depth / 8) + (depth * sizeof(struct nvme_cmd_info)); + unsigned extra = DIV_ROUND_UP(depth, 8) + (depth * + sizeof(struct nvme_cmd_info)); struct nvme_queue *nvmeq = kzalloc(sizeof(*nvmeq) + extra, GFP_KERNEL); if (!nvmeq) return NULL; @@ -975,7 +1013,7 @@ static __devinit struct nvme_queue *nvme_create_queue(struct nvme_dev *dev, static int __devinit nvme_configure_admin_queue(struct nvme_dev *dev) { - int result; + int result = 0; u32 aqa; u64 cap; unsigned long timeout; @@ -1005,17 +1043,22 @@ static int __devinit nvme_configure_admin_queue(struct nvme_dev *dev) timeout = ((NVME_CAP_TIMEOUT(cap) + 1) * HZ / 2) + jiffies; dev->db_stride = NVME_CAP_STRIDE(cap); - while (!(readl(&dev->bar->csts) & NVME_CSTS_RDY)) { + while (!result && !(readl(&dev->bar->csts) & NVME_CSTS_RDY)) { msleep(100); if (fatal_signal_pending(current)) - return -EINTR; + result = -EINTR; if (time_after(jiffies, timeout)) { dev_err(&dev->pci_dev->dev, "Device not ready; aborting initialisation\n"); - return -ENODEV; + result = -ENODEV; } } + if (result) { + nvme_free_queue_mem(nvmeq); + return result; + } + result = queue_request_irq(dev, nvmeq, "nvme admin"); dev->queues[0] = nvmeq; return result; @@ -1037,6 +1080,8 @@ static struct nvme_iod *nvme_map_user_pages(struct nvme_dev *dev, int write, offset = offset_in_page(addr); count = DIV_ROUND_UP(offset + length, PAGE_SIZE); pages = kcalloc(count, sizeof(*pages), GFP_KERNEL); + if (!pages) + return ERR_PTR(-ENOMEM); err = get_user_pages_fast(addr, count, 1, pages); if (err < count) { @@ -1146,14 +1191,13 @@ static int nvme_submit_io(struct nvme_ns *ns, struct nvme_user_io __user *uio) return status; } -static int nvme_user_admin_cmd(struct nvme_ns *ns, +static int nvme_user_admin_cmd(struct nvme_dev *dev, struct nvme_admin_cmd __user *ucmd) { - struct nvme_dev *dev = ns->dev; struct nvme_admin_cmd cmd; struct nvme_command c; int status, length; - struct nvme_iod *iod; + struct nvme_iod *uninitialized_var(iod); if (!capable(CAP_SYS_ADMIN)) return -EACCES; @@ -1204,7 +1248,7 @@ static int nvme_ioctl(struct block_device *bdev, fmode_t mode, unsigned int cmd, case NVME_IOCTL_ID: return ns->ns_id; case NVME_IOCTL_ADMIN_CMD: - return nvme_user_admin_cmd(ns, (void __user *)arg); + return nvme_user_admin_cmd(ns->dev, (void __user *)arg); case NVME_IOCTL_SUBMIT_IO: return nvme_submit_io(ns, (void __user *)arg); default: @@ -1218,26 +1262,6 @@ static const struct block_device_operations nvme_fops = { .compat_ioctl = nvme_ioctl, }; -static void nvme_timeout_ios(struct nvme_queue *nvmeq) -{ - int depth = nvmeq->q_depth - 1; - struct nvme_cmd_info *info = nvme_cmd_info(nvmeq); - unsigned long now = jiffies; - int cmdid; - - for_each_set_bit(cmdid, nvmeq->cmdid_data, depth) { - void *ctx; - nvme_completion_fn fn; - static struct nvme_completion cqe = { .status = cpu_to_le16(NVME_SC_ABORT_REQ) << 1, }; - - if (!time_after(now, info[cmdid].timeout)) - continue; - dev_warn(nvmeq->q_dmadev, "Timing out I/O %d\n", cmdid); - ctx = cancel_cmdid(nvmeq, cmdid, &fn); - fn(nvmeq->dev, ctx, &cqe); - } -} - static void nvme_resubmit_bios(struct nvme_queue *nvmeq) { while (bio_list_peek(&nvmeq->sq_cong)) { @@ -1269,7 +1293,7 @@ static int nvme_kthread(void *data) spin_lock_irq(&nvmeq->q_lock); if (nvme_process_cq(nvmeq)) printk("process_cq did something\n"); - nvme_timeout_ios(nvmeq); + nvme_cancel_ios(nvmeq, true); nvme_resubmit_bios(nvmeq); spin_unlock_irq(&nvmeq->q_lock); } @@ -1339,6 +1363,9 @@ static struct nvme_ns *nvme_alloc_ns(struct nvme_dev *dev, int nsid, ns->disk = disk; lbaf = id->flbas & 0xf; ns->lba_shift = id->lbaf[lbaf].ds; + blk_queue_logical_block_size(ns->queue, 1 << ns->lba_shift); + if (dev->max_hw_sectors) + blk_queue_max_hw_sectors(ns->queue, dev->max_hw_sectors); disk->major = nvme_major; disk->minors = NVME_MINORS; @@ -1383,7 +1410,7 @@ static int set_queue_count(struct nvme_dev *dev, int count) static int __devinit nvme_setup_io_queues(struct nvme_dev *dev) { - int result, cpu, i, nr_io_queues, db_bar_size; + int result, cpu, i, nr_io_queues, db_bar_size, q_depth; nr_io_queues = num_online_cpus(); result = set_queue_count(dev, nr_io_queues); @@ -1429,9 +1456,10 @@ static int __devinit nvme_setup_io_queues(struct nvme_dev *dev) cpu = cpumask_next(cpu, cpu_online_mask); } + q_depth = min_t(int, NVME_CAP_MQES(readq(&dev->bar->cap)) + 1, + NVME_Q_DEPTH); for (i = 0; i < nr_io_queues; i++) { - dev->queues[i + 1] = nvme_create_queue(dev, i + 1, - NVME_Q_DEPTH, i); + dev->queues[i + 1] = nvme_create_queue(dev, i + 1, q_depth, i); if (IS_ERR(dev->queues[i + 1])) return PTR_ERR(dev->queues[i + 1]); dev->queue_count++; @@ -1480,6 +1508,10 @@ static int __devinit nvme_dev_add(struct nvme_dev *dev) memcpy(dev->serial, ctrl->sn, sizeof(ctrl->sn)); memcpy(dev->model, ctrl->mn, sizeof(ctrl->mn)); memcpy(dev->firmware_rev, ctrl->fr, sizeof(ctrl->fr)); + if (ctrl->mdts) { + int shift = NVME_CAP_MPSMIN(readq(&dev->bar->cap)) + 12; + dev->max_hw_sectors = 1 << (ctrl->mdts + shift - 9); + } id_ns = mem; for (i = 1; i <= nn; i++) { @@ -1523,8 +1555,6 @@ static int nvme_dev_remove(struct nvme_dev *dev) list_del(&dev->node); spin_unlock(&dev_list_lock); - /* TODO: wait all I/O finished or cancel them */ - list_for_each_entry_safe(ns, next, &dev->namespaces, list) { list_del(&ns->list); del_gendisk(ns->disk); @@ -1560,15 +1590,33 @@ static void nvme_release_prp_pools(struct nvme_dev *dev) dma_pool_destroy(dev->prp_small_pool); } -/* XXX: Use an ida or something to let remove / add work correctly */ -static void nvme_set_instance(struct nvme_dev *dev) +static DEFINE_IDA(nvme_instance_ida); + +static int nvme_set_instance(struct nvme_dev *dev) { - static int instance; - dev->instance = instance++; + int instance, error; + + do { + if (!ida_pre_get(&nvme_instance_ida, GFP_KERNEL)) + return -ENODEV; + + spin_lock(&dev_list_lock); + error = ida_get_new(&nvme_instance_ida, &instance); + spin_unlock(&dev_list_lock); + } while (error == -EAGAIN); + + if (error) + return -ENODEV; + + dev->instance = instance; + return 0; } static void nvme_release_instance(struct nvme_dev *dev) { + spin_lock(&dev_list_lock); + ida_remove(&nvme_instance_ida, dev->instance); + spin_unlock(&dev_list_lock); } static int __devinit nvme_probe(struct pci_dev *pdev, @@ -1601,7 +1649,10 @@ static int __devinit nvme_probe(struct pci_dev *pdev, pci_set_drvdata(pdev, dev); dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)); dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64)); - nvme_set_instance(dev); + result = nvme_set_instance(dev); + if (result) + goto disable; + dev->entry[0].vector = pdev->irq; result = nvme_setup_prp_pools(dev); @@ -1704,15 +1755,17 @@ static struct pci_driver nvme_driver = { static int __init nvme_init(void) { - int result = -EBUSY; + int result; nvme_thread = kthread_run(nvme_kthread, NULL, "nvme"); if (IS_ERR(nvme_thread)) return PTR_ERR(nvme_thread); - nvme_major = register_blkdev(nvme_major, "nvme"); - if (nvme_major <= 0) + result = register_blkdev(nvme_major, "nvme"); + if (result < 0) goto kill_kthread; + else if (result > 0) + nvme_major = result; result = pci_register_driver(&nvme_driver); if (result) diff --git a/trunk/drivers/block/rbd.c b/trunk/drivers/block/rbd.c index 9917943a3572..54a55f03115d 100644 --- a/trunk/drivers/block/rbd.c +++ b/trunk/drivers/block/rbd.c @@ -246,13 +246,12 @@ static int rbd_open(struct block_device *bdev, fmode_t mode) { struct rbd_device *rbd_dev = bdev->bd_disk->private_data; - rbd_get_dev(rbd_dev); - - set_device_ro(bdev, rbd_dev->read_only); - if ((mode & FMODE_WRITE) && rbd_dev->read_only) return -EROFS; + rbd_get_dev(rbd_dev); + set_device_ro(bdev, rbd_dev->read_only); + return 0; } diff --git a/trunk/drivers/block/xen-blkback/blkback.c b/trunk/drivers/block/xen-blkback/blkback.c index 73f196ca713f..c6decb901e5e 100644 --- a/trunk/drivers/block/xen-blkback/blkback.c +++ b/trunk/drivers/block/xen-blkback/blkback.c @@ -337,7 +337,7 @@ static void xen_blkbk_unmap(struct pending_req *req) invcount++; } - ret = gnttab_unmap_refs(unmap, pages, invcount, false); + ret = gnttab_unmap_refs(unmap, NULL, pages, invcount); BUG_ON(ret); } diff --git a/trunk/drivers/bluetooth/ath3k.c b/trunk/drivers/bluetooth/ath3k.c index 10308cd8a7ed..fc2de5528dcc 100644 --- a/trunk/drivers/bluetooth/ath3k.c +++ b/trunk/drivers/bluetooth/ath3k.c @@ -79,12 +79,14 @@ static struct usb_device_id ath3k_table[] = { { USB_DEVICE(0x13d3, 0x3362) }, { USB_DEVICE(0x0CF3, 0xE004) }, { USB_DEVICE(0x0930, 0x0219) }, + { USB_DEVICE(0x0489, 0xe057) }, /* Atheros AR5BBU12 with sflash firmware */ { USB_DEVICE(0x0489, 0xE02C) }, /* Atheros AR5BBU22 with sflash firmware */ { USB_DEVICE(0x0489, 0xE03C) }, + { USB_DEVICE(0x0489, 0xE036) }, { } /* Terminating entry */ }; @@ -104,9 +106,11 @@ static struct usb_device_id ath3k_blist_tbl[] = { { USB_DEVICE(0x13d3, 0x3362), .driver_info = BTUSB_ATH3012 }, { USB_DEVICE(0x0cf3, 0xe004), .driver_info = BTUSB_ATH3012 }, { USB_DEVICE(0x0930, 0x0219), .driver_info = BTUSB_ATH3012 }, + { USB_DEVICE(0x0489, 0xe057), .driver_info = BTUSB_ATH3012 }, /* Atheros AR5BBU22 with sflash firmware */ { USB_DEVICE(0x0489, 0xE03C), .driver_info = BTUSB_ATH3012 }, + { USB_DEVICE(0x0489, 0xE036), .driver_info = BTUSB_ATH3012 }, { } /* Terminating entry */ }; diff --git a/trunk/drivers/bluetooth/btusb.c b/trunk/drivers/bluetooth/btusb.c index e27221411036..654e248763ef 100644 --- a/trunk/drivers/bluetooth/btusb.c +++ b/trunk/drivers/bluetooth/btusb.c @@ -52,6 +52,9 @@ static struct usb_device_id btusb_table[] = { /* Generic Bluetooth USB device */ { USB_DEVICE_INFO(0xe0, 0x01, 0x01) }, + /* Apple-specific (Broadcom) devices */ + { USB_VENDOR_AND_INTERFACE_INFO(0x05ac, 0xff, 0x01, 0x01) }, + /* Broadcom SoftSailing reporting vendor specific */ { USB_DEVICE(0x0a5c, 0x21e1) }, @@ -94,15 +97,14 @@ static struct usb_device_id btusb_table[] = { /* Broadcom BCM20702A0 */ { USB_DEVICE(0x0489, 0xe042) }, - { USB_DEVICE(0x0a5c, 0x21e3) }, - { USB_DEVICE(0x0a5c, 0x21e6) }, - { USB_DEVICE(0x0a5c, 0x21e8) }, - { USB_DEVICE(0x0a5c, 0x21f3) }, { USB_DEVICE(0x413c, 0x8197) }, /* Foxconn - Hon Hai */ { USB_DEVICE(0x0489, 0xe033) }, + /*Broadcom devices with vendor specific id */ + { USB_VENDOR_AND_INTERFACE_INFO(0x0a5c, 0xff, 0x01, 0x01) }, + { } /* Terminating entry */ }; @@ -133,12 +135,14 @@ static struct usb_device_id blacklist_table[] = { { USB_DEVICE(0x13d3, 0x3362), .driver_info = BTUSB_ATH3012 }, { USB_DEVICE(0x0cf3, 0xe004), .driver_info = BTUSB_ATH3012 }, { USB_DEVICE(0x0930, 0x0219), .driver_info = BTUSB_ATH3012 }, + { USB_DEVICE(0x0489, 0xe057), .driver_info = BTUSB_ATH3012 }, /* Atheros AR5BBU12 with sflash firmware */ { USB_DEVICE(0x0489, 0xe02c), .driver_info = BTUSB_IGNORE }, /* Atheros AR5BBU12 with sflash firmware */ { USB_DEVICE(0x0489, 0xe03c), .driver_info = BTUSB_ATH3012 }, + { USB_DEVICE(0x0489, 0xe036), .driver_info = BTUSB_ATH3012 }, /* Broadcom BCM2035 */ { USB_DEVICE(0x0a5c, 0x2035), .driver_info = BTUSB_WRONG_SCO_MTU }, diff --git a/trunk/drivers/char/agp/intel-agp.h b/trunk/drivers/char/agp/intel-agp.h index 57226424690c..6ec0fff79bc2 100644 --- a/trunk/drivers/char/agp/intel-agp.h +++ b/trunk/drivers/char/agp/intel-agp.h @@ -64,6 +64,7 @@ #define I830_PTE_SYSTEM_CACHED 0x00000006 /* GT PTE cache control fields */ #define GEN6_PTE_UNCACHED 0x00000002 +#define HSW_PTE_UNCACHED 0x00000000 #define GEN6_PTE_LLC 0x00000004 #define GEN6_PTE_LLC_MLC 0x00000006 #define GEN6_PTE_GFDT 0x00000008 @@ -239,16 +240,45 @@ #define PCI_DEVICE_ID_INTEL_IVYBRIDGE_S_GT2_IG 0x016A #define PCI_DEVICE_ID_INTEL_VALLEYVIEW_HB 0x0F00 /* VLV1 */ #define PCI_DEVICE_ID_INTEL_VALLEYVIEW_IG 0x0F30 -#define PCI_DEVICE_ID_INTEL_HASWELL_HB 0x0400 /* Desktop */ +#define PCI_DEVICE_ID_INTEL_HASWELL_HB 0x0400 /* Desktop */ #define PCI_DEVICE_ID_INTEL_HASWELL_D_GT1_IG 0x0402 #define PCI_DEVICE_ID_INTEL_HASWELL_D_GT2_IG 0x0412 -#define PCI_DEVICE_ID_INTEL_HASWELL_M_HB 0x0404 /* Mobile */ +#define PCI_DEVICE_ID_INTEL_HASWELL_D_GT2_PLUS_IG 0x0422 +#define PCI_DEVICE_ID_INTEL_HASWELL_M_HB 0x0404 /* Mobile */ #define PCI_DEVICE_ID_INTEL_HASWELL_M_GT1_IG 0x0406 #define PCI_DEVICE_ID_INTEL_HASWELL_M_GT2_IG 0x0416 -#define PCI_DEVICE_ID_INTEL_HASWELL_S_HB 0x0408 /* Server */ +#define PCI_DEVICE_ID_INTEL_HASWELL_M_GT2_PLUS_IG 0x0426 +#define PCI_DEVICE_ID_INTEL_HASWELL_S_HB 0x0408 /* Server */ #define PCI_DEVICE_ID_INTEL_HASWELL_S_GT1_IG 0x040a #define PCI_DEVICE_ID_INTEL_HASWELL_S_GT2_IG 0x041a -#define PCI_DEVICE_ID_INTEL_HASWELL_SDV 0x0c16 /* SDV */ -#define PCI_DEVICE_ID_INTEL_HASWELL_E_HB 0x0c04 +#define PCI_DEVICE_ID_INTEL_HASWELL_S_GT2_PLUS_IG 0x042a +#define PCI_DEVICE_ID_INTEL_HASWELL_E_HB 0x0c04 +#define PCI_DEVICE_ID_INTEL_HASWELL_SDV_D_GT1_IG 0x0C02 +#define PCI_DEVICE_ID_INTEL_HASWELL_SDV_D_GT2_IG 0x0C12 +#define PCI_DEVICE_ID_INTEL_HASWELL_SDV_D_GT2_PLUS_IG 0x0C22 +#define PCI_DEVICE_ID_INTEL_HASWELL_SDV_M_GT1_IG 0x0C06 +#define PCI_DEVICE_ID_INTEL_HASWELL_SDV_M_GT2_IG 0x0C16 +#define PCI_DEVICE_ID_INTEL_HASWELL_SDV_M_GT2_PLUS_IG 0x0C26 +#define PCI_DEVICE_ID_INTEL_HASWELL_SDV_S_GT1_IG 0x0C0A +#define PCI_DEVICE_ID_INTEL_HASWELL_SDV_S_GT2_IG 0x0C1A +#define PCI_DEVICE_ID_INTEL_HASWELL_SDV_S_GT2_PLUS_IG 0x0C2A +#define PCI_DEVICE_ID_INTEL_HASWELL_ULT_D_GT1_IG 0x0A02 +#define PCI_DEVICE_ID_INTEL_HASWELL_ULT_D_GT2_IG 0x0A12 +#define PCI_DEVICE_ID_INTEL_HASWELL_ULT_D_GT2_PLUS_IG 0x0A22 +#define PCI_DEVICE_ID_INTEL_HASWELL_ULT_M_GT1_IG 0x0A06 +#define PCI_DEVICE_ID_INTEL_HASWELL_ULT_M_GT2_IG 0x0A16 +#define PCI_DEVICE_ID_INTEL_HASWELL_ULT_M_GT2_PLUS_IG 0x0A26 +#define PCI_DEVICE_ID_INTEL_HASWELL_ULT_S_GT1_IG 0x0A0A +#define PCI_DEVICE_ID_INTEL_HASWELL_ULT_S_GT2_IG 0x0A1A +#define PCI_DEVICE_ID_INTEL_HASWELL_ULT_S_GT2_PLUS_IG 0x0A2A +#define PCI_DEVICE_ID_INTEL_HASWELL_CRW_D_GT1_IG 0x0D12 +#define PCI_DEVICE_ID_INTEL_HASWELL_CRW_D_GT2_IG 0x0D22 +#define PCI_DEVICE_ID_INTEL_HASWELL_CRW_D_GT2_PLUS_IG 0x0D32 +#define PCI_DEVICE_ID_INTEL_HASWELL_CRW_M_GT1_IG 0x0D16 +#define PCI_DEVICE_ID_INTEL_HASWELL_CRW_M_GT2_IG 0x0D26 +#define PCI_DEVICE_ID_INTEL_HASWELL_CRW_M_GT2_PLUS_IG 0x0D36 +#define PCI_DEVICE_ID_INTEL_HASWELL_CRW_S_GT1_IG 0x0D1A +#define PCI_DEVICE_ID_INTEL_HASWELL_CRW_S_GT2_IG 0x0D2A +#define PCI_DEVICE_ID_INTEL_HASWELL_CRW_S_GT2_PLUS_IG 0x0D3A #endif diff --git a/trunk/drivers/char/agp/intel-gtt.c b/trunk/drivers/char/agp/intel-gtt.c index 9ed92ef5829b..58e32f7c3229 100644 --- a/trunk/drivers/char/agp/intel-gtt.c +++ b/trunk/drivers/char/agp/intel-gtt.c @@ -1156,6 +1156,30 @@ static bool gen6_check_flags(unsigned int flags) return true; } +static void haswell_write_entry(dma_addr_t addr, unsigned int entry, + unsigned int flags) +{ + unsigned int type_mask = flags & ~AGP_USER_CACHED_MEMORY_GFDT; + unsigned int gfdt = flags & AGP_USER_CACHED_MEMORY_GFDT; + u32 pte_flags; + + if (type_mask == AGP_USER_MEMORY) + pte_flags = HSW_PTE_UNCACHED | I810_PTE_VALID; + else if (type_mask == AGP_USER_CACHED_MEMORY_LLC_MLC) { + pte_flags = GEN6_PTE_LLC_MLC | I810_PTE_VALID; + if (gfdt) + pte_flags |= GEN6_PTE_GFDT; + } else { /* set 'normal'/'cached' to LLC by default */ + pte_flags = GEN6_PTE_LLC | I810_PTE_VALID; + if (gfdt) + pte_flags |= GEN6_PTE_GFDT; + } + + /* gen6 has bit11-4 for physical addr bit39-32 */ + addr |= (addr >> 28) & 0xff0; + writel(addr | pte_flags, intel_private.gtt + entry); +} + static void gen6_write_entry(dma_addr_t addr, unsigned int entry, unsigned int flags) { @@ -1382,6 +1406,15 @@ static const struct intel_gtt_driver sandybridge_gtt_driver = { .check_flags = gen6_check_flags, .chipset_flush = i9xx_chipset_flush, }; +static const struct intel_gtt_driver haswell_gtt_driver = { + .gen = 6, + .setup = i9xx_setup, + .cleanup = gen6_cleanup, + .write_entry = haswell_write_entry, + .dma_mask_size = 40, + .check_flags = gen6_check_flags, + .chipset_flush = i9xx_chipset_flush, +}; static const struct intel_gtt_driver valleyview_gtt_driver = { .gen = 7, .setup = i9xx_setup, @@ -1499,19 +1532,77 @@ static const struct intel_gtt_driver_description { { PCI_DEVICE_ID_INTEL_VALLEYVIEW_IG, "ValleyView", &valleyview_gtt_driver }, { PCI_DEVICE_ID_INTEL_HASWELL_D_GT1_IG, - "Haswell", &sandybridge_gtt_driver }, + "Haswell", &haswell_gtt_driver }, { PCI_DEVICE_ID_INTEL_HASWELL_D_GT2_IG, - "Haswell", &sandybridge_gtt_driver }, + "Haswell", &haswell_gtt_driver }, + { PCI_DEVICE_ID_INTEL_HASWELL_D_GT2_PLUS_IG, + "Haswell", &haswell_gtt_driver }, { PCI_DEVICE_ID_INTEL_HASWELL_M_GT1_IG, - "Haswell", &sandybridge_gtt_driver }, + "Haswell", &haswell_gtt_driver }, { PCI_DEVICE_ID_INTEL_HASWELL_M_GT2_IG, - "Haswell", &sandybridge_gtt_driver }, + "Haswell", &haswell_gtt_driver }, + { PCI_DEVICE_ID_INTEL_HASWELL_M_GT2_PLUS_IG, + "Haswell", &haswell_gtt_driver }, { PCI_DEVICE_ID_INTEL_HASWELL_S_GT1_IG, - "Haswell", &sandybridge_gtt_driver }, + "Haswell", &haswell_gtt_driver }, { PCI_DEVICE_ID_INTEL_HASWELL_S_GT2_IG, - "Haswell", &sandybridge_gtt_driver }, - { PCI_DEVICE_ID_INTEL_HASWELL_SDV, - "Haswell", &sandybridge_gtt_driver }, + "Haswell", &haswell_gtt_driver }, + { PCI_DEVICE_ID_INTEL_HASWELL_S_GT2_PLUS_IG, + "Haswell", &haswell_gtt_driver }, + { PCI_DEVICE_ID_INTEL_HASWELL_SDV_D_GT1_IG, + "Haswell", &haswell_gtt_driver }, + { PCI_DEVICE_ID_INTEL_HASWELL_SDV_D_GT2_IG, + "Haswell", &haswell_gtt_driver }, + { PCI_DEVICE_ID_INTEL_HASWELL_SDV_D_GT2_PLUS_IG, + "Haswell", &haswell_gtt_driver }, + { PCI_DEVICE_ID_INTEL_HASWELL_SDV_M_GT1_IG, + "Haswell", &haswell_gtt_driver }, + { PCI_DEVICE_ID_INTEL_HASWELL_SDV_M_GT2_IG, + "Haswell", &haswell_gtt_driver }, + { PCI_DEVICE_ID_INTEL_HASWELL_SDV_M_GT2_PLUS_IG, + "Haswell", &haswell_gtt_driver }, + { PCI_DEVICE_ID_INTEL_HASWELL_SDV_S_GT1_IG, + "Haswell", &haswell_gtt_driver }, + { PCI_DEVICE_ID_INTEL_HASWELL_SDV_S_GT2_IG, + "Haswell", &haswell_gtt_driver }, + { PCI_DEVICE_ID_INTEL_HASWELL_SDV_S_GT2_PLUS_IG, + "Haswell", &haswell_gtt_driver }, + { PCI_DEVICE_ID_INTEL_HASWELL_ULT_D_GT1_IG, + "Haswell", &haswell_gtt_driver }, + { PCI_DEVICE_ID_INTEL_HASWELL_ULT_D_GT2_IG, + "Haswell", &haswell_gtt_driver }, + { PCI_DEVICE_ID_INTEL_HASWELL_ULT_D_GT2_PLUS_IG, + "Haswell", &haswell_gtt_driver }, + { PCI_DEVICE_ID_INTEL_HASWELL_ULT_M_GT1_IG, + "Haswell", &haswell_gtt_driver }, + { PCI_DEVICE_ID_INTEL_HASWELL_ULT_M_GT2_IG, + "Haswell", &haswell_gtt_driver }, + { PCI_DEVICE_ID_INTEL_HASWELL_ULT_M_GT2_PLUS_IG, + "Haswell", &haswell_gtt_driver }, + { PCI_DEVICE_ID_INTEL_HASWELL_ULT_S_GT1_IG, + "Haswell", &haswell_gtt_driver }, + { PCI_DEVICE_ID_INTEL_HASWELL_ULT_S_GT2_IG, + "Haswell", &haswell_gtt_driver }, + { PCI_DEVICE_ID_INTEL_HASWELL_ULT_S_GT2_PLUS_IG, + "Haswell", &haswell_gtt_driver }, + { PCI_DEVICE_ID_INTEL_HASWELL_CRW_D_GT1_IG, + "Haswell", &haswell_gtt_driver }, + { PCI_DEVICE_ID_INTEL_HASWELL_CRW_D_GT2_IG, + "Haswell", &haswell_gtt_driver }, + { PCI_DEVICE_ID_INTEL_HASWELL_CRW_D_GT2_PLUS_IG, + "Haswell", &haswell_gtt_driver }, + { PCI_DEVICE_ID_INTEL_HASWELL_CRW_M_GT1_IG, + "Haswell", &haswell_gtt_driver }, + { PCI_DEVICE_ID_INTEL_HASWELL_CRW_M_GT2_IG, + "Haswell", &haswell_gtt_driver }, + { PCI_DEVICE_ID_INTEL_HASWELL_CRW_M_GT2_PLUS_IG, + "Haswell", &haswell_gtt_driver }, + { PCI_DEVICE_ID_INTEL_HASWELL_CRW_S_GT1_IG, + "Haswell", &haswell_gtt_driver }, + { PCI_DEVICE_ID_INTEL_HASWELL_CRW_S_GT2_IG, + "Haswell", &haswell_gtt_driver }, + { PCI_DEVICE_ID_INTEL_HASWELL_CRW_S_GT2_PLUS_IG, + "Haswell", &haswell_gtt_driver }, { 0, NULL, NULL } }; diff --git a/trunk/drivers/char/hw_random/omap-rng.c b/trunk/drivers/char/hw_random/omap-rng.c index d706bd0e9e80..4fbdceb6f773 100644 --- a/trunk/drivers/char/hw_random/omap-rng.c +++ b/trunk/drivers/char/hw_random/omap-rng.c @@ -160,7 +160,7 @@ static int __exit omap_rng_remove(struct platform_device *pdev) return 0; } -#ifdef CONFIG_PM +#ifdef CONFIG_PM_SLEEP static int omap_rng_suspend(struct device *dev) { diff --git a/trunk/drivers/char/tpm/tpm_tis.c b/trunk/drivers/char/tpm/tpm_tis.c index 89682fa8801e..c4be3519a587 100644 --- a/trunk/drivers/char/tpm/tpm_tis.c +++ b/trunk/drivers/char/tpm/tpm_tis.c @@ -807,6 +807,7 @@ module_param_string(hid, tpm_pnp_tbl[TIS_HID_USR_IDX].id, MODULE_PARM_DESC(hid, "Set additional specific HID for this driver to probe"); #endif +#ifdef CONFIG_PM_SLEEP static int tpm_tis_resume(struct device *dev) { struct tpm_chip *chip = dev_get_drvdata(dev); @@ -816,6 +817,7 @@ static int tpm_tis_resume(struct device *dev) return tpm_pm_resume(dev); } +#endif static SIMPLE_DEV_PM_OPS(tpm_tis_pm, tpm_pm_suspend, tpm_tis_resume); diff --git a/trunk/drivers/clk/Makefile b/trunk/drivers/clk/Makefile index 5869ea387054..72ce247a0e8d 100644 --- a/trunk/drivers/clk/Makefile +++ b/trunk/drivers/clk/Makefile @@ -1,4 +1,5 @@ # common clock types +obj-$(CONFIG_HAVE_CLK) += clk-devres.o obj-$(CONFIG_CLKDEV_LOOKUP) += clkdev.o obj-$(CONFIG_COMMON_CLK) += clk.o clk-fixed-rate.o clk-gate.o \ clk-mux.o clk-divider.o clk-fixed-factor.o diff --git a/trunk/drivers/clk/clk-devres.c b/trunk/drivers/clk/clk-devres.c new file mode 100644 index 000000000000..8f571548870f --- /dev/null +++ b/trunk/drivers/clk/clk-devres.c @@ -0,0 +1,55 @@ +/* + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include +#include +#include +#include + +static void devm_clk_release(struct device *dev, void *res) +{ + clk_put(*(struct clk **)res); +} + +struct clk *devm_clk_get(struct device *dev, const char *id) +{ + struct clk **ptr, *clk; + + ptr = devres_alloc(devm_clk_release, sizeof(*ptr), GFP_KERNEL); + if (!ptr) + return ERR_PTR(-ENOMEM); + + clk = clk_get(dev, id); + if (!IS_ERR(clk)) { + *ptr = clk; + devres_add(dev, ptr); + } else { + devres_free(ptr); + } + + return clk; +} +EXPORT_SYMBOL(devm_clk_get); + +static int devm_clk_match(struct device *dev, void *res, void *data) +{ + struct clk **c = res; + if (!c || !*c) { + WARN_ON(!c || !*c); + return 0; + } + return *c == data; +} + +void devm_clk_put(struct device *dev, struct clk *clk) +{ + int ret; + + ret = devres_release(dev, devm_clk_release, devm_clk_match, clk); + + WARN_ON(ret); +} +EXPORT_SYMBOL(devm_clk_put); diff --git a/trunk/drivers/clk/clkdev.c b/trunk/drivers/clk/clkdev.c index d423c9bdd71a..442a31363873 100644 --- a/trunk/drivers/clk/clkdev.c +++ b/trunk/drivers/clk/clkdev.c @@ -171,51 +171,6 @@ void clk_put(struct clk *clk) } EXPORT_SYMBOL(clk_put); -static void devm_clk_release(struct device *dev, void *res) -{ - clk_put(*(struct clk **)res); -} - -struct clk *devm_clk_get(struct device *dev, const char *id) -{ - struct clk **ptr, *clk; - - ptr = devres_alloc(devm_clk_release, sizeof(*ptr), GFP_KERNEL); - if (!ptr) - return ERR_PTR(-ENOMEM); - - clk = clk_get(dev, id); - if (!IS_ERR(clk)) { - *ptr = clk; - devres_add(dev, ptr); - } else { - devres_free(ptr); - } - - return clk; -} -EXPORT_SYMBOL(devm_clk_get); - -static int devm_clk_match(struct device *dev, void *res, void *data) -{ - struct clk **c = res; - if (!c || !*c) { - WARN_ON(!c || !*c); - return 0; - } - return *c == data; -} - -void devm_clk_put(struct device *dev, struct clk *clk) -{ - int ret; - - ret = devres_destroy(dev, devm_clk_release, devm_clk_match, clk); - - WARN_ON(ret); -} -EXPORT_SYMBOL(devm_clk_put); - void clkdev_add(struct clk_lookup *cl) { mutex_lock(&clocks_mutex); diff --git a/trunk/drivers/clocksource/cs5535-clockevt.c b/trunk/drivers/clocksource/cs5535-clockevt.c index 540795cd0760..d9279385304d 100644 --- a/trunk/drivers/clocksource/cs5535-clockevt.c +++ b/trunk/drivers/clocksource/cs5535-clockevt.c @@ -53,7 +53,7 @@ static struct cs5535_mfgpt_timer *cs5535_event_clock; #define MFGPT_PERIODIC (MFGPT_HZ / HZ) /* - * The MFPGT timers on the CS5536 provide us with suitable timers to use + * The MFGPT timers on the CS5536 provide us with suitable timers to use * as clock event sources - not as good as a HPET or APIC, but certainly * better than the PIT. This isn't a general purpose MFGPT driver, but * a simplified one designed specifically to act as a clock event source. @@ -144,7 +144,7 @@ static int __init cs5535_mfgpt_init(void) timer = cs5535_mfgpt_alloc_timer(MFGPT_TIMER_ANY, MFGPT_DOMAIN_WORKING); if (!timer) { - printk(KERN_ERR DRV_NAME ": Could not allocate MFPGT timer\n"); + printk(KERN_ERR DRV_NAME ": Could not allocate MFGPT timer\n"); return -ENODEV; } cs5535_event_clock = timer; diff --git a/trunk/drivers/cpufreq/omap-cpufreq.c b/trunk/drivers/cpufreq/omap-cpufreq.c index 17fa04d08be9..b47034e650a5 100644 --- a/trunk/drivers/cpufreq/omap-cpufreq.c +++ b/trunk/drivers/cpufreq/omap-cpufreq.c @@ -218,7 +218,7 @@ static int __cpuinit omap_cpu_init(struct cpufreq_policy *policy) policy->cur = policy->min = policy->max = omap_getspeed(policy->cpu); - if (atomic_inc_return(&freq_table_users) == 1) + if (!freq_table) result = opp_init_cpufreq_table(mpu_dev, &freq_table); if (result) { @@ -227,6 +227,8 @@ static int __cpuinit omap_cpu_init(struct cpufreq_policy *policy) goto fail_ck; } + atomic_inc_return(&freq_table_users); + result = cpufreq_frequency_table_cpuinfo(policy, freq_table); if (result) goto fail_table; diff --git a/trunk/drivers/cpufreq/pcc-cpufreq.c b/trunk/drivers/cpufreq/pcc-cpufreq.c index cdc02ac8f41a..503996a94a6a 100644 --- a/trunk/drivers/cpufreq/pcc-cpufreq.c +++ b/trunk/drivers/cpufreq/pcc-cpufreq.c @@ -454,6 +454,7 @@ static int __init pcc_cpufreq_probe(void) mem_resource->address_length); if (pcch_virt_addr == NULL) { pr_debug("probe: could not map shared mem region\n"); + ret = -ENOMEM; goto out_free; } pcch_hdr = pcch_virt_addr; diff --git a/trunk/drivers/cpufreq/powernow-k8.c b/trunk/drivers/cpufreq/powernow-k8.c index c0e816468e30..1a40935c85fd 100644 --- a/trunk/drivers/cpufreq/powernow-k8.c +++ b/trunk/drivers/cpufreq/powernow-k8.c @@ -35,7 +35,6 @@ #include #include #include -#include /* for current / set_cpus_allowed() */ #include #include @@ -1139,16 +1138,23 @@ static int transition_frequency_pstate(struct powernow_k8_data *data, return res; } -/* Driver entry point to switch to the target frequency */ -static int powernowk8_target(struct cpufreq_policy *pol, - unsigned targfreq, unsigned relation) +struct powernowk8_target_arg { + struct cpufreq_policy *pol; + unsigned targfreq; + unsigned relation; +}; + +static long powernowk8_target_fn(void *arg) { - cpumask_var_t oldmask; + struct powernowk8_target_arg *pta = arg; + struct cpufreq_policy *pol = pta->pol; + unsigned targfreq = pta->targfreq; + unsigned relation = pta->relation; struct powernow_k8_data *data = per_cpu(powernow_data, pol->cpu); u32 checkfid; u32 checkvid; unsigned int newstate; - int ret = -EIO; + int ret; if (!data) return -EINVAL; @@ -1156,29 +1162,16 @@ static int powernowk8_target(struct cpufreq_policy *pol, checkfid = data->currfid; checkvid = data->currvid; - /* only run on specific CPU from here on. */ - /* This is poor form: use a workqueue or smp_call_function_single */ - if (!alloc_cpumask_var(&oldmask, GFP_KERNEL)) - return -ENOMEM; - - cpumask_copy(oldmask, tsk_cpus_allowed(current)); - set_cpus_allowed_ptr(current, cpumask_of(pol->cpu)); - - if (smp_processor_id() != pol->cpu) { - printk(KERN_ERR PFX "limiting to cpu %u failed\n", pol->cpu); - goto err_out; - } - if (pending_bit_stuck()) { printk(KERN_ERR PFX "failing targ, change pending bit set\n"); - goto err_out; + return -EIO; } pr_debug("targ: cpu %d, %d kHz, min %d, max %d, relation %d\n", pol->cpu, targfreq, pol->min, pol->max, relation); if (query_current_values_with_pending_wait(data)) - goto err_out; + return -EIO; if (cpu_family != CPU_HW_PSTATE) { pr_debug("targ: curr fid 0x%x, vid 0x%x\n", @@ -1196,7 +1189,7 @@ static int powernowk8_target(struct cpufreq_policy *pol, if (cpufreq_frequency_table_target(pol, data->powernow_table, targfreq, relation, &newstate)) - goto err_out; + return -EIO; mutex_lock(&fidvid_mutex); @@ -1209,9 +1202,8 @@ static int powernowk8_target(struct cpufreq_policy *pol, ret = transition_frequency_fidvid(data, newstate); if (ret) { printk(KERN_ERR PFX "transition frequency failed\n"); - ret = 1; mutex_unlock(&fidvid_mutex); - goto err_out; + return 1; } mutex_unlock(&fidvid_mutex); @@ -1220,12 +1212,25 @@ static int powernowk8_target(struct cpufreq_policy *pol, data->powernow_table[newstate].index); else pol->cur = find_khz_freq_from_fid(data->currfid); - ret = 0; -err_out: - set_cpus_allowed_ptr(current, oldmask); - free_cpumask_var(oldmask); - return ret; + return 0; +} + +/* Driver entry point to switch to the target frequency */ +static int powernowk8_target(struct cpufreq_policy *pol, + unsigned targfreq, unsigned relation) +{ + struct powernowk8_target_arg pta = { .pol = pol, .targfreq = targfreq, + .relation = relation }; + + /* + * Must run on @pol->cpu. cpufreq core is responsible for ensuring + * that we're bound to the current CPU and pol->cpu stays online. + */ + if (smp_processor_id() == pol->cpu) + return powernowk8_target_fn(&pta); + else + return work_on_cpu(pol->cpu, powernowk8_target_fn, &pta); } /* Driver entry point to verify the policy and range of frequencies */ diff --git a/trunk/drivers/cpuidle/coupled.c b/trunk/drivers/cpuidle/coupled.c index 2c9bf2692232..3265844839bf 100644 --- a/trunk/drivers/cpuidle/coupled.c +++ b/trunk/drivers/cpuidle/coupled.c @@ -678,10 +678,22 @@ static int cpuidle_coupled_cpu_notify(struct notifier_block *nb, int cpu = (unsigned long)hcpu; struct cpuidle_device *dev; + switch (action & ~CPU_TASKS_FROZEN) { + case CPU_UP_PREPARE: + case CPU_DOWN_PREPARE: + case CPU_ONLINE: + case CPU_DEAD: + case CPU_UP_CANCELED: + case CPU_DOWN_FAILED: + break; + default: + return NOTIFY_OK; + } + mutex_lock(&cpuidle_lock); dev = per_cpu(cpuidle_devices, cpu); - if (!dev->coupled) + if (!dev || !dev->coupled) goto out; switch (action & ~CPU_TASKS_FROZEN) { diff --git a/trunk/drivers/crypto/caam/jr.c b/trunk/drivers/crypto/caam/jr.c index 53c8c51d5881..93d14070141a 100644 --- a/trunk/drivers/crypto/caam/jr.c +++ b/trunk/drivers/crypto/caam/jr.c @@ -63,7 +63,7 @@ static void caam_jr_dequeue(unsigned long devarg) head = ACCESS_ONCE(jrp->head); - spin_lock_bh(&jrp->outlock); + spin_lock(&jrp->outlock); sw_idx = tail = jrp->tail; hw_idx = jrp->out_ring_read_index; @@ -115,7 +115,7 @@ static void caam_jr_dequeue(unsigned long devarg) jrp->tail = tail; } - spin_unlock_bh(&jrp->outlock); + spin_unlock(&jrp->outlock); /* Finally, execute user's callback */ usercall(dev, userdesc, userstatus, userarg); @@ -236,14 +236,14 @@ int caam_jr_enqueue(struct device *dev, u32 *desc, return -EIO; } - spin_lock(&jrp->inplock); + spin_lock_bh(&jrp->inplock); head = jrp->head; tail = ACCESS_ONCE(jrp->tail); if (!rd_reg32(&jrp->rregs->inpring_avail) || CIRC_SPACE(head, tail, JOBR_DEPTH) <= 0) { - spin_unlock(&jrp->inplock); + spin_unlock_bh(&jrp->inplock); dma_unmap_single(dev, desc_dma, desc_size, DMA_TO_DEVICE); return -EBUSY; } @@ -265,7 +265,7 @@ int caam_jr_enqueue(struct device *dev, u32 *desc, wr_reg32(&jrp->rregs->inpring_jobadd, 1); - spin_unlock(&jrp->inplock); + spin_unlock_bh(&jrp->inplock); return 0; } diff --git a/trunk/drivers/crypto/caam/key_gen.c b/trunk/drivers/crypto/caam/key_gen.c index 002888185f17..d216cd3cc569 100644 --- a/trunk/drivers/crypto/caam/key_gen.c +++ b/trunk/drivers/crypto/caam/key_gen.c @@ -120,3 +120,4 @@ u32 gen_split_key(struct device *jrdev, u8 *key_out, int split_key_len, return ret; } +EXPORT_SYMBOL(gen_split_key); diff --git a/trunk/drivers/crypto/hifn_795x.c b/trunk/drivers/crypto/hifn_795x.c index c9c4befb5a8d..df14358d7fa1 100644 --- a/trunk/drivers/crypto/hifn_795x.c +++ b/trunk/drivers/crypto/hifn_795x.c @@ -821,8 +821,8 @@ static int hifn_register_rng(struct hifn_device *dev) /* * We must wait at least 256 Pk_clk cycles between two reads of the rng. */ - dev->rng_wait_time = DIV_ROUND_UP(NSEC_PER_SEC, dev->pk_clk_freq) * - 256; + dev->rng_wait_time = DIV_ROUND_UP_ULL(NSEC_PER_SEC, + dev->pk_clk_freq) * 256; dev->rng.name = dev->name; dev->rng.data_present = hifn_rng_data_present, diff --git a/trunk/drivers/dma/at_hdmac.c b/trunk/drivers/dma/at_hdmac.c index 3934fcc4e00b..17d6958342e7 100644 --- a/trunk/drivers/dma/at_hdmac.c +++ b/trunk/drivers/dma/at_hdmac.c @@ -168,9 +168,9 @@ static void atc_desc_put(struct at_dma_chan *atchan, struct at_desc *desc) } /** - * atc_desc_chain - build chain adding a descripor - * @first: address of first descripor of the chain - * @prev: address of previous descripor of the chain + * atc_desc_chain - build chain adding a descriptor + * @first: address of first descriptor of the chain + * @prev: address of previous descriptor of the chain * @desc: descriptor to queue * * Called from prep_* functions @@ -661,7 +661,7 @@ atc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, flags); if (unlikely(!atslave || !sg_len)) { - dev_dbg(chan2dev(chan), "prep_dma_memcpy: length is zero!\n"); + dev_dbg(chan2dev(chan), "prep_slave_sg: sg length is zero!\n"); return NULL; } @@ -689,6 +689,11 @@ atc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, mem = sg_dma_address(sg); len = sg_dma_len(sg); + if (unlikely(!len)) { + dev_dbg(chan2dev(chan), + "prep_slave_sg: sg(%d) data length is zero\n", i); + goto err; + } mem_width = 2; if (unlikely(mem & 3 || len & 3)) mem_width = 0; @@ -724,6 +729,11 @@ atc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, mem = sg_dma_address(sg); len = sg_dma_len(sg); + if (unlikely(!len)) { + dev_dbg(chan2dev(chan), + "prep_slave_sg: sg(%d) data length is zero\n", i); + goto err; + } mem_width = 2; if (unlikely(mem & 3 || len & 3)) mem_width = 0; @@ -757,6 +767,7 @@ atc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, err_desc_get: dev_err(chan2dev(chan), "not enough descriptors available\n"); +err: atc_desc_put(atchan, first); return NULL; } @@ -785,7 +796,7 @@ atc_dma_cyclic_check_values(unsigned int reg_width, dma_addr_t buf_addr, } /** - * atc_dma_cyclic_fill_desc - Fill one period decriptor + * atc_dma_cyclic_fill_desc - Fill one period descriptor */ static int atc_dma_cyclic_fill_desc(struct dma_chan *chan, struct at_desc *desc, diff --git a/trunk/drivers/dma/ep93xx_dma.c b/trunk/drivers/dma/ep93xx_dma.c index c64917ec313d..bb02fd981afb 100644 --- a/trunk/drivers/dma/ep93xx_dma.c +++ b/trunk/drivers/dma/ep93xx_dma.c @@ -1118,7 +1118,7 @@ ep93xx_dma_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, * @chan: channel * @dma_addr: DMA mapped address of the buffer * @buf_len: length of the buffer (in bytes) - * @period_len: lenght of a single period + * @period_len: length of a single period * @dir: direction of the operation * @context: operation context (ignored) * diff --git a/trunk/drivers/dma/fsldma.c b/trunk/drivers/dma/fsldma.c index 8f84761f98ba..094437b9d823 100644 --- a/trunk/drivers/dma/fsldma.c +++ b/trunk/drivers/dma/fsldma.c @@ -1015,7 +1015,7 @@ static irqreturn_t fsldma_chan_irq(int irq, void *data) /* * Programming Error * The DMA_INTERRUPT async_tx is a NULL transfer, which will - * triger a PE interrupt. + * trigger a PE interrupt. */ if (stat & FSL_DMA_SR_PE) { chan_dbg(chan, "irq: Programming Error INT\n"); diff --git a/trunk/drivers/dma/imx-dma.c b/trunk/drivers/dma/imx-dma.c index fcfeb3cd8d31..54f580bb993c 100644 --- a/trunk/drivers/dma/imx-dma.c +++ b/trunk/drivers/dma/imx-dma.c @@ -172,7 +172,8 @@ struct imxdma_engine { struct device_dma_parameters dma_parms; struct dma_device dma_device; void __iomem *base; - struct clk *dma_clk; + struct clk *dma_ahb; + struct clk *dma_ipg; spinlock_t lock; struct imx_dma_2d_config slots_2d[IMX_DMA_2D_SLOTS]; struct imxdma_channel channel[IMX_DMA_CHANNELS]; @@ -571,8 +572,8 @@ static void imxdma_tasklet(unsigned long data) if (desc->desc.callback) desc->desc.callback(desc->desc.callback_param); - /* If we are dealing with a cyclic descriptor keep it on ld_active - * and dont mark the descripor as complete. + /* If we are dealing with a cyclic descriptor, keep it on ld_active + * and dont mark the descriptor as complete. * Only in non-cyclic cases it would be marked as complete */ if (imxdma_chan_is_doing_cyclic(imxdmac)) @@ -976,10 +977,20 @@ static int __init imxdma_probe(struct platform_device *pdev) return 0; } - imxdma->dma_clk = clk_get(NULL, "dma"); - if (IS_ERR(imxdma->dma_clk)) - return PTR_ERR(imxdma->dma_clk); - clk_enable(imxdma->dma_clk); + imxdma->dma_ipg = devm_clk_get(&pdev->dev, "ipg"); + if (IS_ERR(imxdma->dma_ipg)) { + ret = PTR_ERR(imxdma->dma_ipg); + goto err_clk; + } + + imxdma->dma_ahb = devm_clk_get(&pdev->dev, "ahb"); + if (IS_ERR(imxdma->dma_ahb)) { + ret = PTR_ERR(imxdma->dma_ahb); + goto err_clk; + } + + clk_prepare_enable(imxdma->dma_ipg); + clk_prepare_enable(imxdma->dma_ahb); /* reset DMA module */ imx_dmav1_writel(imxdma, DCR_DRST, DMA_DCR); @@ -988,16 +999,14 @@ static int __init imxdma_probe(struct platform_device *pdev) ret = request_irq(MX1_DMA_INT, dma_irq_handler, 0, "DMA", imxdma); if (ret) { dev_warn(imxdma->dev, "Can't register IRQ for DMA\n"); - kfree(imxdma); - return ret; + goto err_enable; } ret = request_irq(MX1_DMA_ERR, imxdma_err_handler, 0, "DMA", imxdma); if (ret) { dev_warn(imxdma->dev, "Can't register ERRIRQ for DMA\n"); free_irq(MX1_DMA_INT, NULL); - kfree(imxdma); - return ret; + goto err_enable; } } @@ -1094,7 +1103,10 @@ static int __init imxdma_probe(struct platform_device *pdev) free_irq(MX1_DMA_INT, NULL); free_irq(MX1_DMA_ERR, NULL); } - +err_enable: + clk_disable_unprepare(imxdma->dma_ipg); + clk_disable_unprepare(imxdma->dma_ahb); +err_clk: kfree(imxdma); return ret; } @@ -1114,7 +1126,9 @@ static int __exit imxdma_remove(struct platform_device *pdev) free_irq(MX1_DMA_ERR, NULL); } - kfree(imxdma); + clk_disable_unprepare(imxdma->dma_ipg); + clk_disable_unprepare(imxdma->dma_ahb); + kfree(imxdma); return 0; } diff --git a/trunk/drivers/dma/intel_mid_dma.c b/trunk/drivers/dma/intel_mid_dma.c index 222e907bfaaa..02b21d7d38e5 100644 --- a/trunk/drivers/dma/intel_mid_dma.c +++ b/trunk/drivers/dma/intel_mid_dma.c @@ -427,7 +427,7 @@ DMA engine callback Functions*/ * intel_mid_dma_tx_submit - callback to submit DMA transaction * @tx: dma engine descriptor * - * Submit the DMA trasaction for this descriptor, start if ch idle + * Submit the DMA transaction for this descriptor, start if ch idle */ static dma_cookie_t intel_mid_dma_tx_submit(struct dma_async_tx_descriptor *tx) { diff --git a/trunk/drivers/dma/intel_mid_dma_regs.h b/trunk/drivers/dma/intel_mid_dma_regs.h index 1bfa9268feaf..17b42192ea58 100644 --- a/trunk/drivers/dma/intel_mid_dma_regs.h +++ b/trunk/drivers/dma/intel_mid_dma_regs.h @@ -168,9 +168,9 @@ union intel_mid_dma_cfg_hi { * @active_list: current active descriptors * @queue: current queued up descriptors * @free_list: current free descriptors - * @slave: dma slave struture - * @descs_allocated: total number of decsiptors allocated - * @dma: dma device struture pointer + * @slave: dma slave structure + * @descs_allocated: total number of descriptors allocated + * @dma: dma device structure pointer * @busy: bool representing if ch is busy (active txn) or not * @in_use: bool representing if ch is in use or not * @raw_tfr: raw trf interrupt received diff --git a/trunk/drivers/dma/ioat/hw.h b/trunk/drivers/dma/ioat/hw.h index 60e675455b6a..d2ff3fda0b18 100644 --- a/trunk/drivers/dma/ioat/hw.h +++ b/trunk/drivers/dma/ioat/hw.h @@ -22,7 +22,6 @@ #define _IOAT_HW_H_ /* PCI Configuration Space Values */ -#define IOAT_PCI_VID 0x8086 #define IOAT_MMIO_BAR 0 /* CB device ID's */ @@ -31,9 +30,6 @@ #define IOAT_PCI_DID_SCNB 0x65FF #define IOAT_PCI_DID_SNB 0x402F -#define IOAT_PCI_RID 0x00 -#define IOAT_PCI_SVID 0x8086 -#define IOAT_PCI_SID 0x8086 #define IOAT_VER_1_2 0x12 /* Version 1.2 */ #define IOAT_VER_2_0 0x20 /* Version 2.0 */ #define IOAT_VER_3_0 0x30 /* Version 3.0 */ diff --git a/trunk/drivers/dma/pl330.c b/trunk/drivers/dma/pl330.c index e4feba6b03c0..5d3bbcd279b4 100644 --- a/trunk/drivers/dma/pl330.c +++ b/trunk/drivers/dma/pl330.c @@ -522,7 +522,7 @@ enum desc_status { /* In the DMAC pool */ FREE, /* - * Allocted to some channel during prep_xxx + * Allocated to some channel during prep_xxx * Also may be sitting on the work_list. */ PREP, @@ -1567,17 +1567,19 @@ static int pl330_submit_req(void *ch_id, struct pl330_req *r) goto xfer_exit; } - /* Prefer Secure Channel */ - if (!_manager_ns(thrd)) - r->cfg->nonsecure = 0; - else - r->cfg->nonsecure = 1; /* Use last settings, if not provided */ - if (r->cfg) + if (r->cfg) { + /* Prefer Secure Channel */ + if (!_manager_ns(thrd)) + r->cfg->nonsecure = 0; + else + r->cfg->nonsecure = 1; + ccr = _prepare_ccr(r->cfg); - else + } else { ccr = readl(regs + CC(thrd->id)); + } /* If this req doesn't have valid xfer settings */ if (!_is_valid(ccr)) { @@ -2928,6 +2930,11 @@ pl330_probe(struct amba_device *adev, const struct amba_id *id) num_chan = max_t(int, pi->pcfg.num_peri, pi->pcfg.num_chan); pdmac->peripherals = kzalloc(num_chan * sizeof(*pch), GFP_KERNEL); + if (!pdmac->peripherals) { + ret = -ENOMEM; + dev_err(&adev->dev, "unable to allocate pdmac->peripherals\n"); + goto probe_err5; + } for (i = 0; i < num_chan; i++) { pch = &pdmac->peripherals[i]; diff --git a/trunk/drivers/dma/ppc4xx/adma.c b/trunk/drivers/dma/ppc4xx/adma.c index ced98826684a..f72348d0bc41 100644 --- a/trunk/drivers/dma/ppc4xx/adma.c +++ b/trunk/drivers/dma/ppc4xx/adma.c @@ -4446,7 +4446,7 @@ static int __devinit ppc440spe_adma_probe(struct platform_device *ofdev) ret = -ENOMEM; goto err_dma_alloc; } - dev_dbg(&ofdev->dev, "allocted descriptor pool virt 0x%p phys 0x%llx\n", + dev_dbg(&ofdev->dev, "allocated descriptor pool virt 0x%p phys 0x%llx\n", adev->dma_desc_pool_virt, (u64)adev->dma_desc_pool); regs = ioremap(res.start, resource_size(&res)); diff --git a/trunk/drivers/dma/ste_dma40_ll.h b/trunk/drivers/dma/ste_dma40_ll.h index 51e8e5396e9b..6d47373f3f58 100644 --- a/trunk/drivers/dma/ste_dma40_ll.h +++ b/trunk/drivers/dma/ste_dma40_ll.h @@ -202,7 +202,7 @@ /* LLI related structures */ /** - * struct d40_phy_lli - The basic configration register for each physical + * struct d40_phy_lli - The basic configuration register for each physical * channel. * * @reg_cfg: The configuration register. diff --git a/trunk/drivers/dma/tegra20-apb-dma.c b/trunk/drivers/dma/tegra20-apb-dma.c index d52dbc6c54ab..24acd711e032 100644 --- a/trunk/drivers/dma/tegra20-apb-dma.c +++ b/trunk/drivers/dma/tegra20-apb-dma.c @@ -1119,15 +1119,21 @@ struct dma_async_tx_descriptor *tegra_dma_prep_dma_cyclic( static int tegra_dma_alloc_chan_resources(struct dma_chan *dc) { struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc); + struct tegra_dma *tdma = tdc->tdma; + int ret; dma_cookie_init(&tdc->dma_chan); tdc->config_init = false; - return 0; + ret = clk_prepare_enable(tdma->dma_clk); + if (ret < 0) + dev_err(tdc2dev(tdc), "clk_prepare_enable failed: %d\n", ret); + return ret; } static void tegra_dma_free_chan_resources(struct dma_chan *dc) { struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc); + struct tegra_dma *tdma = tdc->tdma; struct tegra_dma_desc *dma_desc; struct tegra_dma_sg_req *sg_req; @@ -1163,6 +1169,7 @@ static void tegra_dma_free_chan_resources(struct dma_chan *dc) list_del(&sg_req->node); kfree(sg_req); } + clk_disable_unprepare(tdma->dma_clk); } /* Tegra20 specific DMA controller information */ @@ -1255,6 +1262,13 @@ static int __devinit tegra_dma_probe(struct platform_device *pdev) } } + /* Enable clock before accessing registers */ + ret = clk_prepare_enable(tdma->dma_clk); + if (ret < 0) { + dev_err(&pdev->dev, "clk_prepare_enable failed: %d\n", ret); + goto err_pm_disable; + } + /* Reset DMA controller */ tegra_periph_reset_assert(tdma->dma_clk); udelay(2); @@ -1265,6 +1279,8 @@ static int __devinit tegra_dma_probe(struct platform_device *pdev) tdma_write(tdma, TEGRA_APBDMA_CONTROL, 0); tdma_write(tdma, TEGRA_APBDMA_IRQ_MASK_SET, 0xFFFFFFFFul); + clk_disable_unprepare(tdma->dma_clk); + INIT_LIST_HEAD(&tdma->dma_dev.channels); for (i = 0; i < cdata->nr_channels; i++) { struct tegra_dma_channel *tdc = &tdma->channels[i]; diff --git a/trunk/drivers/edac/edac_mc.c b/trunk/drivers/edac/edac_mc.c index 616d90bcb3a4..d5dc9da7f99f 100644 --- a/trunk/drivers/edac/edac_mc.c +++ b/trunk/drivers/edac/edac_mc.c @@ -199,6 +199,36 @@ void *edac_align_ptr(void **p, unsigned size, int n_elems) return (void *)(((unsigned long)ptr) + align - r); } +static void _edac_mc_free(struct mem_ctl_info *mci) +{ + int i, chn, row; + struct csrow_info *csr; + const unsigned int tot_dimms = mci->tot_dimms; + const unsigned int tot_channels = mci->num_cschannel; + const unsigned int tot_csrows = mci->nr_csrows; + + if (mci->dimms) { + for (i = 0; i < tot_dimms; i++) + kfree(mci->dimms[i]); + kfree(mci->dimms); + } + if (mci->csrows) { + for (row = 0; row < tot_csrows; row++) { + csr = mci->csrows[row]; + if (csr) { + if (csr->channels) { + for (chn = 0; chn < tot_channels; chn++) + kfree(csr->channels[chn]); + kfree(csr->channels); + } + kfree(csr); + } + } + kfree(mci->csrows); + } + kfree(mci); +} + /** * edac_mc_alloc: Allocate and partially fill a struct mem_ctl_info structure * @mc_num: Memory controller number @@ -413,24 +443,7 @@ struct mem_ctl_info *edac_mc_alloc(unsigned mc_num, return mci; error: - if (mci->dimms) { - for (i = 0; i < tot_dimms; i++) - kfree(mci->dimms[i]); - kfree(mci->dimms); - } - if (mci->csrows) { - for (chn = 0; chn < tot_channels; chn++) { - csr = mci->csrows[chn]; - if (csr) { - for (chn = 0; chn < tot_channels; chn++) - kfree(csr->channels[chn]); - kfree(csr); - } - kfree(mci->csrows[i]); - } - kfree(mci->csrows); - } - kfree(mci); + _edac_mc_free(mci); return NULL; } @@ -445,6 +458,14 @@ void edac_mc_free(struct mem_ctl_info *mci) { edac_dbg(1, "\n"); + /* If we're not yet registered with sysfs free only what was allocated + * in edac_mc_alloc(). + */ + if (!device_is_registered(&mci->dev)) { + _edac_mc_free(mci); + return; + } + /* the mci instance is freed here, when the sysfs object is dropped */ edac_unregister_sysfs(mci); } diff --git a/trunk/drivers/edac/i3200_edac.c b/trunk/drivers/edac/i3200_edac.c index 47180a08edad..b6653a6fc5d5 100644 --- a/trunk/drivers/edac/i3200_edac.c +++ b/trunk/drivers/edac/i3200_edac.c @@ -391,7 +391,7 @@ static int i3200_probe1(struct pci_dev *pdev, int dev_idx) for (j = 0; j < nr_channels; j++) { struct dimm_info *dimm = csrow->channels[j]->dimm; - dimm->nr_pages = nr_pages / nr_channels; + dimm->nr_pages = nr_pages; dimm->grain = nr_pages << PAGE_SHIFT; dimm->mtype = MEM_DDR2; dimm->dtype = DEV_UNKNOWN; diff --git a/trunk/drivers/edac/i5000_edac.c b/trunk/drivers/edac/i5000_edac.c index 39c63757c2a1..6a49dd00b81b 100644 --- a/trunk/drivers/edac/i5000_edac.c +++ b/trunk/drivers/edac/i5000_edac.c @@ -1012,6 +1012,10 @@ static void handle_channel(struct i5000_pvt *pvt, int slot, int channel, /* add the number of COLUMN bits */ addrBits += MTR_DIMM_COLS_ADDR_BITS(mtr); + /* Dual-rank memories have twice the size */ + if (dinfo->dual_rank) + addrBits++; + addrBits += 6; /* add 64 bits per DIMM */ addrBits -= 20; /* divide by 2^^20 */ addrBits -= 3; /* 8 bits per bytes */ diff --git a/trunk/drivers/edac/sb_edac.c b/trunk/drivers/edac/sb_edac.c index f3b1f9fafa4b..5715b7c2c517 100644 --- a/trunk/drivers/edac/sb_edac.c +++ b/trunk/drivers/edac/sb_edac.c @@ -513,7 +513,8 @@ static int get_dimm_config(struct mem_ctl_info *mci) { struct sbridge_pvt *pvt = mci->pvt_info; struct dimm_info *dimm; - int i, j, banks, ranks, rows, cols, size, npages; + unsigned i, j, banks, ranks, rows, cols, npages; + u64 size; u32 reg; enum edac_type mode; enum mem_type mtype; @@ -585,10 +586,10 @@ static int get_dimm_config(struct mem_ctl_info *mci) cols = numcol(mtr); /* DDR3 has 8 I/O banks */ - size = (rows * cols * banks * ranks) >> (20 - 3); + size = ((u64)rows * cols * banks * ranks) >> (20 - 3); npages = MiB_TO_PAGES(size); - edac_dbg(0, "mc#%d: channel %d, dimm %d, %d Mb (%d pages) bank: %d, rank: %d, row: %#x, col: %#x\n", + edac_dbg(0, "mc#%d: channel %d, dimm %d, %Ld Mb (%d pages) bank: %d, rank: %d, row: %#x, col: %#x\n", pvt->sbridge_dev->mc, i, j, size, npages, banks, ranks, rows, cols); diff --git a/trunk/drivers/extcon/extcon-arizona.c b/trunk/drivers/extcon/extcon-arizona.c index 427a289f32a5..6c19833ed2d0 100644 --- a/trunk/drivers/extcon/extcon-arizona.c +++ b/trunk/drivers/extcon/extcon-arizona.c @@ -434,6 +434,11 @@ static int __devinit arizona_extcon_probe(struct platform_device *pdev) regmap_update_bits(arizona->regmap, ARIZONA_JACK_DETECT_ANALOGUE, ARIZONA_JD1_ENA, ARIZONA_JD1_ENA); + ret = regulator_allow_bypass(info->micvdd, true); + if (ret != 0) + dev_warn(arizona->dev, "Failed to set MICVDD to bypass: %d\n", + ret); + pm_runtime_put(&pdev->dev); return 0; diff --git a/trunk/drivers/extcon/extcon-max77693.c b/trunk/drivers/extcon/extcon-max77693.c index 920a609b2c35..38f9e52f358b 100644 --- a/trunk/drivers/extcon/extcon-max77693.c +++ b/trunk/drivers/extcon/extcon-max77693.c @@ -669,13 +669,18 @@ static int __devinit max77693_muic_probe(struct platform_device *pdev) } info->dev = &pdev->dev; info->max77693 = max77693; - info->max77693->regmap_muic = regmap_init_i2c(info->max77693->muic, - &max77693_muic_regmap_config); - if (IS_ERR(info->max77693->regmap_muic)) { - ret = PTR_ERR(info->max77693->regmap_muic); - dev_err(max77693->dev, - "failed to allocate register map: %d\n", ret); - goto err_regmap; + if (info->max77693->regmap_muic) + dev_dbg(&pdev->dev, "allocate register map\n"); + else { + info->max77693->regmap_muic = devm_regmap_init_i2c( + info->max77693->muic, + &max77693_muic_regmap_config); + if (IS_ERR(info->max77693->regmap_muic)) { + ret = PTR_ERR(info->max77693->regmap_muic); + dev_err(max77693->dev, + "failed to allocate register map: %d\n", ret); + goto err_regmap; + } } platform_set_drvdata(pdev, info); mutex_init(&info->mutex); diff --git a/trunk/drivers/extcon/extcon_gpio.c b/trunk/drivers/extcon/extcon_gpio.c index fe3db45fa83c..3cc152e690b0 100644 --- a/trunk/drivers/extcon/extcon_gpio.c +++ b/trunk/drivers/extcon/extcon_gpio.c @@ -107,7 +107,8 @@ static int __devinit gpio_extcon_probe(struct platform_device *pdev) if (ret < 0) return ret; - ret = gpio_request_one(extcon_data->gpio, GPIOF_DIR_IN, pdev->name); + ret = devm_gpio_request_one(&pdev->dev, extcon_data->gpio, GPIOF_DIR_IN, + pdev->name); if (ret < 0) goto err; diff --git a/trunk/drivers/gpio/Kconfig b/trunk/drivers/gpio/Kconfig index b16c8a72a2e2..ba7926f5c099 100644 --- a/trunk/drivers/gpio/Kconfig +++ b/trunk/drivers/gpio/Kconfig @@ -294,7 +294,7 @@ config GPIO_MAX732X_IRQ config GPIO_MC9S08DZ60 bool "MX35 3DS BOARD MC9S08DZ60 GPIO functions" - depends on I2C && MACH_MX35_3DS + depends on I2C=y && MACH_MX35_3DS help Select this to enable the MC9S08DZ60 GPIO driver diff --git a/trunk/drivers/gpio/gpio-em.c b/trunk/drivers/gpio/gpio-em.c index 150d9768811d..ec48ed512628 100644 --- a/trunk/drivers/gpio/gpio-em.c +++ b/trunk/drivers/gpio/gpio-em.c @@ -247,9 +247,9 @@ static int __devinit em_gio_irq_domain_init(struct em_gio_priv *p) p->irq_base = irq_alloc_descs(pdata->irq_base, 0, pdata->number_of_pins, numa_node_id()); - if (IS_ERR_VALUE(p->irq_base)) { + if (p->irq_base < 0) { dev_err(&pdev->dev, "cannot get irq_desc\n"); - return -ENXIO; + return p->irq_base; } pr_debug("gio: hw base = %d, nr = %d, sw base = %d\n", pdata->gpio_base, pdata->number_of_pins, p->irq_base); @@ -266,7 +266,7 @@ static int __devinit em_gio_irq_domain_init(struct em_gio_priv *p) return 0; } -static void __devexit em_gio_irq_domain_cleanup(struct em_gio_priv *p) +static void em_gio_irq_domain_cleanup(struct em_gio_priv *p) { struct gpio_em_config *pdata = p->pdev->dev.platform_data; diff --git a/trunk/drivers/gpio/gpio-langwell.c b/trunk/drivers/gpio/gpio-langwell.c index a1c8754f52cf..202a99207b7d 100644 --- a/trunk/drivers/gpio/gpio-langwell.c +++ b/trunk/drivers/gpio/gpio-langwell.c @@ -339,7 +339,7 @@ static int __devinit lnw_gpio_probe(struct pci_dev *pdev, resource_size_t start, len; struct lnw_gpio *lnw; u32 gpio_base; - int retval = 0; + int retval; int ngpio = id->driver_data; retval = pci_enable_device(pdev); @@ -357,6 +357,7 @@ static int __devinit lnw_gpio_probe(struct pci_dev *pdev, base = ioremap_nocache(start, len); if (!base) { dev_err(&pdev->dev, "error mapping bar1\n"); + retval = -EFAULT; goto err3; } gpio_base = *((u32 *)base + 1); @@ -381,8 +382,10 @@ static int __devinit lnw_gpio_probe(struct pci_dev *pdev, lnw->domain = irq_domain_add_linear(pdev->dev.of_node, ngpio, &lnw_gpio_irq_ops, lnw); - if (!lnw->domain) + if (!lnw->domain) { + retval = -ENOMEM; goto err3; + } lnw->reg_base = base; lnw->chip.label = dev_name(&pdev->dev); diff --git a/trunk/drivers/gpio/gpio-lpc32xx.c b/trunk/drivers/gpio/gpio-lpc32xx.c index 8a420f13905e..ed94b4ea72e9 100644 --- a/trunk/drivers/gpio/gpio-lpc32xx.c +++ b/trunk/drivers/gpio/gpio-lpc32xx.c @@ -308,6 +308,7 @@ static int lpc32xx_gpio_dir_output_p012(struct gpio_chip *chip, unsigned pin, { struct lpc32xx_gpio_chip *group = to_lpc32xx_gpio(chip); + __set_gpio_level_p012(group, pin, value); __set_gpio_dir_p012(group, pin, 0); return 0; @@ -318,6 +319,7 @@ static int lpc32xx_gpio_dir_output_p3(struct gpio_chip *chip, unsigned pin, { struct lpc32xx_gpio_chip *group = to_lpc32xx_gpio(chip); + __set_gpio_level_p3(group, pin, value); __set_gpio_dir_p3(group, pin, 0); return 0; @@ -326,6 +328,9 @@ static int lpc32xx_gpio_dir_output_p3(struct gpio_chip *chip, unsigned pin, static int lpc32xx_gpio_dir_out_always(struct gpio_chip *chip, unsigned pin, int value) { + struct lpc32xx_gpio_chip *group = to_lpc32xx_gpio(chip); + + __set_gpo_level_p3(group, pin, value); return 0; } diff --git a/trunk/drivers/gpio/gpio-msic.c b/trunk/drivers/gpio/gpio-msic.c index 71a838f44501..b38986285868 100644 --- a/trunk/drivers/gpio/gpio-msic.c +++ b/trunk/drivers/gpio/gpio-msic.c @@ -99,7 +99,7 @@ static int msic_gpio_to_oreg(unsigned offset) if (offset < 20) return INTEL_MSIC_GPIO0HV0CTLO - offset + 16; - return INTEL_MSIC_GPIO1HV0CTLO + offset + 20; + return INTEL_MSIC_GPIO1HV0CTLO - offset + 20; } static int msic_gpio_direction_input(struct gpio_chip *chip, unsigned offset) diff --git a/trunk/drivers/gpio/gpio-mxc.c b/trunk/drivers/gpio/gpio-mxc.c index 4db460b6ecf7..80f44bb64a87 100644 --- a/trunk/drivers/gpio/gpio-mxc.c +++ b/trunk/drivers/gpio/gpio-mxc.c @@ -465,9 +465,8 @@ static int __devinit mxc_gpio_probe(struct platform_device *pdev) goto out_iounmap; port->bgc.gc.to_irq = mxc_gpio_to_irq; - port->bgc.gc.base = pdev->id * 32; - port->bgc.dir = port->bgc.read_reg(port->bgc.reg_dir); - port->bgc.data = port->bgc.read_reg(port->bgc.reg_set); + port->bgc.gc.base = (pdev->id < 0) ? of_alias_get_id(np, "gpio") * 32 : + pdev->id * 32; err = gpiochip_add(&port->bgc.gc); if (err) diff --git a/trunk/drivers/gpio/gpio-pxa.c b/trunk/drivers/gpio/gpio-pxa.c index 58a6a63a6ece..9cac88a65f78 100644 --- a/trunk/drivers/gpio/gpio-pxa.c +++ b/trunk/drivers/gpio/gpio-pxa.c @@ -62,6 +62,7 @@ int pxa_last_gpio; #ifdef CONFIG_OF static struct irq_domain *domain; +static struct device_node *pxa_gpio_of_node; #endif struct pxa_gpio_chip { @@ -277,6 +278,24 @@ static void pxa_gpio_set(struct gpio_chip *chip, unsigned offset, int value) (value ? GPSR_OFFSET : GPCR_OFFSET)); } +#ifdef CONFIG_OF_GPIO +static int pxa_gpio_of_xlate(struct gpio_chip *gc, + const struct of_phandle_args *gpiospec, + u32 *flags) +{ + if (gpiospec->args[0] > pxa_last_gpio) + return -EINVAL; + + if (gc != &pxa_gpio_chips[gpiospec->args[0] / 32].chip) + return -EINVAL; + + if (flags) + *flags = gpiospec->args[1]; + + return gpiospec->args[0] % 32; +} +#endif + static int __devinit pxa_init_gpio_chip(int gpio_end, int (*set_wake)(unsigned int, unsigned int)) { @@ -304,6 +323,11 @@ static int __devinit pxa_init_gpio_chip(int gpio_end, c->get = pxa_gpio_get; c->set = pxa_gpio_set; c->to_irq = pxa_gpio_to_irq; +#ifdef CONFIG_OF_GPIO + c->of_node = pxa_gpio_of_node; + c->of_xlate = pxa_gpio_of_xlate; + c->of_gpio_n_cells = 2; +#endif /* number of GPIOs on last bank may be less than 32 */ c->ngpio = (gpio + 31 > gpio_end) ? (gpio_end - gpio + 1) : 32; @@ -488,6 +512,7 @@ static int pxa_gpio_nums(void) return count; } +#ifdef CONFIG_OF static struct of_device_id pxa_gpio_dt_ids[] = { { .compatible = "mrvl,pxa-gpio" }, { .compatible = "mrvl,mmp-gpio", .data = (void *)MMP_GPIO }, @@ -505,9 +530,9 @@ static int pxa_irq_domain_map(struct irq_domain *d, unsigned int irq, const struct irq_domain_ops pxa_irq_domain_ops = { .map = pxa_irq_domain_map, + .xlate = irq_domain_xlate_twocell, }; -#ifdef CONFIG_OF static int __devinit pxa_gpio_probe_dt(struct platform_device *pdev) { int ret, nr_banks, nr_gpios, irq_base; @@ -545,6 +570,7 @@ static int __devinit pxa_gpio_probe_dt(struct platform_device *pdev) } domain = irq_domain_add_legacy(np, nr_gpios, irq_base, 0, &pxa_irq_domain_ops, NULL); + pxa_gpio_of_node = np; return 0; err: iounmap(gpio_reg_base); @@ -653,7 +679,7 @@ static struct platform_driver pxa_gpio_driver = { .probe = pxa_gpio_probe, .driver = { .name = "pxa-gpio", - .of_match_table = pxa_gpio_dt_ids, + .of_match_table = of_match_ptr(pxa_gpio_dt_ids), }, }; diff --git a/trunk/drivers/gpio/gpio-rdc321x.c b/trunk/drivers/gpio/gpio-rdc321x.c index e97016af6443..b62d443e9a59 100644 --- a/trunk/drivers/gpio/gpio-rdc321x.c +++ b/trunk/drivers/gpio/gpio-rdc321x.c @@ -170,6 +170,7 @@ static int __devinit rdc321x_gpio_probe(struct platform_device *pdev) rdc321x_gpio_dev->reg2_data_base = r->start + 0x4; rdc321x_gpio_dev->chip.label = "rdc321x-gpio"; + rdc321x_gpio_dev->chip.owner = THIS_MODULE; rdc321x_gpio_dev->chip.direction_input = rdc_gpio_direction_input; rdc321x_gpio_dev->chip.direction_output = rdc_gpio_config; rdc321x_gpio_dev->chip.get = rdc_gpio_get_value; diff --git a/trunk/drivers/gpio/gpio-samsung.c b/trunk/drivers/gpio/gpio-samsung.c index 92f7b2bb79d4..ba126cc04073 100644 --- a/trunk/drivers/gpio/gpio-samsung.c +++ b/trunk/drivers/gpio/gpio-samsung.c @@ -2452,12 +2452,6 @@ static struct samsung_gpio_chip exynos5_gpios_1[] = { .ngpio = EXYNOS5_GPIO_C3_NR, .label = "GPC3", }, - }, { - .chip = { - .base = EXYNOS5_GPC4(0), - .ngpio = EXYNOS5_GPIO_C4_NR, - .label = "GPC4", - }, }, { .chip = { .base = EXYNOS5_GPD0(0), @@ -2512,6 +2506,12 @@ static struct samsung_gpio_chip exynos5_gpios_1[] = { .ngpio = EXYNOS5_GPIO_Y6_NR, .label = "GPY6", }, + }, { + .chip = { + .base = EXYNOS5_GPC4(0), + .ngpio = EXYNOS5_GPIO_C4_NR, + .label = "GPC4", + }, }, { .config = &samsung_gpio_cfgs[9], .irq_base = IRQ_EINT(0), @@ -2836,7 +2836,7 @@ static __init void exynos5_gpiolib_init(void) } /* need to set base address for gpc4 */ - exynos5_gpios_1[11].base = gpio_base1 + 0x2E0; + exynos5_gpios_1[20].base = gpio_base1 + 0x2E0; /* need to set base address for gpx */ chip = &exynos5_gpios_1[21]; diff --git a/trunk/drivers/gpio/gpio-sch.c b/trunk/drivers/gpio/gpio-sch.c index 424dce8e3f30..8707d4572a06 100644 --- a/trunk/drivers/gpio/gpio-sch.c +++ b/trunk/drivers/gpio/gpio-sch.c @@ -241,7 +241,8 @@ static int __devinit sch_gpio_probe(struct platform_device *pdev) break; default: - return -ENODEV; + err = -ENODEV; + goto err_sch_gpio_core; } sch_gpio_core.dev = &pdev->dev; diff --git a/trunk/drivers/gpio/gpiolib-of.c b/trunk/drivers/gpio/gpiolib-of.c index a18c4aa68b1e..f1a45997aea8 100644 --- a/trunk/drivers/gpio/gpiolib-of.c +++ b/trunk/drivers/gpio/gpiolib-of.c @@ -82,7 +82,7 @@ int of_get_named_gpio_flags(struct device_node *np, const char *propname, gpiochip_find(&gg_data, of_gpiochip_find_and_xlate); of_node_put(gg_data.gpiospec.np); - pr_debug("%s exited with status %d\n", __func__, ret); + pr_debug("%s exited with status %d\n", __func__, gg_data.out_gpio); return gg_data.out_gpio; } EXPORT_SYMBOL(of_get_named_gpio_flags); diff --git a/trunk/drivers/gpu/drm/Kconfig b/trunk/drivers/gpu/drm/Kconfig index 23120c00a881..90e28081712d 100644 --- a/trunk/drivers/gpu/drm/Kconfig +++ b/trunk/drivers/gpu/drm/Kconfig @@ -22,6 +22,7 @@ menuconfig DRM config DRM_USB tristate depends on DRM + depends on USB_ARCH_HAS_HCD select USB config DRM_KMS_HELPER diff --git a/trunk/drivers/gpu/drm/ast/ast_drv.c b/trunk/drivers/gpu/drm/ast/ast_drv.c index d0c4574ef49c..36164806b9d4 100644 --- a/trunk/drivers/gpu/drm/ast/ast_drv.c +++ b/trunk/drivers/gpu/drm/ast/ast_drv.c @@ -193,6 +193,9 @@ static const struct file_operations ast_fops = { .mmap = ast_mmap, .poll = drm_poll, .fasync = drm_fasync, +#ifdef CONFIG_COMPAT + .compat_ioctl = drm_compat_ioctl, +#endif .read = drm_read, }; diff --git a/trunk/drivers/gpu/drm/ast/ast_mode.c b/trunk/drivers/gpu/drm/ast/ast_mode.c index 7282c081fb53..a712cafcfa1d 100644 --- a/trunk/drivers/gpu/drm/ast/ast_mode.c +++ b/trunk/drivers/gpu/drm/ast/ast_mode.c @@ -841,7 +841,7 @@ int ast_cursor_init(struct drm_device *dev) ast->cursor_cache = obj; ast->cursor_cache_gpu_addr = gpu_addr; - DRM_ERROR("pinned cursor cache at %llx\n", ast->cursor_cache_gpu_addr); + DRM_DEBUG_KMS("pinned cursor cache at %llx\n", ast->cursor_cache_gpu_addr); return 0; fail: return ret; diff --git a/trunk/drivers/gpu/drm/cirrus/cirrus_drv.c b/trunk/drivers/gpu/drm/cirrus/cirrus_drv.c index 7053140c6596..b83a2d7ddd1a 100644 --- a/trunk/drivers/gpu/drm/cirrus/cirrus_drv.c +++ b/trunk/drivers/gpu/drm/cirrus/cirrus_drv.c @@ -74,6 +74,9 @@ static const struct file_operations cirrus_driver_fops = { .unlocked_ioctl = drm_ioctl, .mmap = cirrus_mmap, .poll = drm_poll, +#ifdef CONFIG_COMPAT + .compat_ioctl = drm_compat_ioctl, +#endif .fasync = drm_fasync, }; static struct drm_driver driver = { diff --git a/trunk/drivers/gpu/drm/drm_crtc.c b/trunk/drivers/gpu/drm/drm_crtc.c index 08a7aa722d6b..6fbfc244748f 100644 --- a/trunk/drivers/gpu/drm/drm_crtc.c +++ b/trunk/drivers/gpu/drm/drm_crtc.c @@ -1981,7 +1981,7 @@ int drm_mode_cursor_ioctl(struct drm_device *dev, if (!drm_core_check_feature(dev, DRIVER_MODESET)) return -EINVAL; - if (!req->flags) + if (!req->flags || (~DRM_MODE_CURSOR_FLAGS & req->flags)) return -EINVAL; mutex_lock(&dev->mode_config.mutex); diff --git a/trunk/drivers/gpu/drm/drm_edid.c b/trunk/drivers/gpu/drm/drm_edid.c index a8743c399e83..b7ee230572b7 100644 --- a/trunk/drivers/gpu/drm/drm_edid.c +++ b/trunk/drivers/gpu/drm/drm_edid.c @@ -87,6 +87,9 @@ static struct edid_quirk { int product_id; u32 quirks; } edid_quirk_list[] = { + /* ASUS VW222S */ + { "ACI", 0x22a2, EDID_QUIRK_FORCE_REDUCED_BLANKING }, + /* Acer AL1706 */ { "ACR", 44358, EDID_QUIRK_PREFER_LARGE_60 }, /* Acer F51 */ diff --git a/trunk/drivers/gpu/drm/drm_edid_load.c b/trunk/drivers/gpu/drm/drm_edid_load.c index 66d4a28ad5a2..0303935d10e2 100644 --- a/trunk/drivers/gpu/drm/drm_edid_load.c +++ b/trunk/drivers/gpu/drm/drm_edid_load.c @@ -119,7 +119,7 @@ static int edid_load(struct drm_connector *connector, char *name, { const struct firmware *fw; struct platform_device *pdev; - u8 *fwdata = NULL, *edid; + u8 *fwdata = NULL, *edid, *new_edid; int fwsize, expected; int builtin = 0, err = 0; int i, valid_extensions = 0; @@ -195,12 +195,14 @@ static int edid_load(struct drm_connector *connector, char *name, "\"%s\" for connector \"%s\"\n", valid_extensions, edid[0x7e], name, connector_name); edid[0x7e] = valid_extensions; - edid = krealloc(edid, (valid_extensions + 1) * EDID_LENGTH, + new_edid = krealloc(edid, (valid_extensions + 1) * EDID_LENGTH, GFP_KERNEL); - if (edid == NULL) { + if (new_edid == NULL) { err = -ENOMEM; + kfree(edid); goto relfw_out; } + edid = new_edid; } connector->display_info.raw_edid = edid; diff --git a/trunk/drivers/gpu/drm/drm_modes.c b/trunk/drivers/gpu/drm/drm_modes.c index b7adb4a967fd..28637c181b15 100644 --- a/trunk/drivers/gpu/drm/drm_modes.c +++ b/trunk/drivers/gpu/drm/drm_modes.c @@ -706,9 +706,6 @@ void drm_mode_set_crtcinfo(struct drm_display_mode *p, int adjust_flags) p->crtc_vblank_end = max(p->crtc_vsync_end, p->crtc_vtotal); p->crtc_hblank_start = min(p->crtc_hsync_start, p->crtc_hdisplay); p->crtc_hblank_end = max(p->crtc_hsync_end, p->crtc_htotal); - - p->crtc_hadjusted = false; - p->crtc_vadjusted = false; } EXPORT_SYMBOL(drm_mode_set_crtcinfo); diff --git a/trunk/drivers/gpu/drm/drm_proc.c b/trunk/drivers/gpu/drm/drm_proc.c index 371c695322d9..da457b18eaaf 100644 --- a/trunk/drivers/gpu/drm/drm_proc.c +++ b/trunk/drivers/gpu/drm/drm_proc.c @@ -89,7 +89,7 @@ static const struct file_operations drm_proc_fops = { * Create a given set of proc files represented by an array of * gdm_proc_lists in the given root directory. */ -int drm_proc_create_files(struct drm_info_list *files, int count, +static int drm_proc_create_files(struct drm_info_list *files, int count, struct proc_dir_entry *root, struct drm_minor *minor) { struct drm_device *dev = minor->dev; @@ -172,7 +172,7 @@ int drm_proc_init(struct drm_minor *minor, int minor_id, return 0; } -int drm_proc_remove_files(struct drm_info_list *files, int count, +static int drm_proc_remove_files(struct drm_info_list *files, int count, struct drm_minor *minor) { struct list_head *pos, *q; diff --git a/trunk/drivers/gpu/drm/exynos/Kconfig b/trunk/drivers/gpu/drm/exynos/Kconfig index 7f5096763b7d..59a26e577b57 100644 --- a/trunk/drivers/gpu/drm/exynos/Kconfig +++ b/trunk/drivers/gpu/drm/exynos/Kconfig @@ -36,6 +36,6 @@ config DRM_EXYNOS_VIDI config DRM_EXYNOS_G2D bool "Exynos DRM G2D" - depends on DRM_EXYNOS + depends on DRM_EXYNOS && !VIDEO_SAMSUNG_S5P_G2D help Choose this option if you want to use Exynos G2D for DRM. diff --git a/trunk/drivers/gpu/drm/exynos/exynos_drm_dmabuf.c b/trunk/drivers/gpu/drm/exynos/exynos_drm_dmabuf.c index 613bf8a5d9b2..ae13febe0eaa 100644 --- a/trunk/drivers/gpu/drm/exynos/exynos_drm_dmabuf.c +++ b/trunk/drivers/gpu/drm/exynos/exynos_drm_dmabuf.c @@ -163,6 +163,12 @@ static void exynos_gem_dmabuf_kunmap(struct dma_buf *dma_buf, /* TODO */ } +static int exynos_gem_dmabuf_mmap(struct dma_buf *dma_buf, + struct vm_area_struct *vma) +{ + return -ENOTTY; +} + static struct dma_buf_ops exynos_dmabuf_ops = { .map_dma_buf = exynos_gem_map_dma_buf, .unmap_dma_buf = exynos_gem_unmap_dma_buf, @@ -170,6 +176,7 @@ static struct dma_buf_ops exynos_dmabuf_ops = { .kmap_atomic = exynos_gem_dmabuf_kmap_atomic, .kunmap = exynos_gem_dmabuf_kunmap, .kunmap_atomic = exynos_gem_dmabuf_kunmap_atomic, + .mmap = exynos_gem_dmabuf_mmap, .release = exynos_dmabuf_release, }; diff --git a/trunk/drivers/gpu/drm/exynos/exynos_drm_drv.c b/trunk/drivers/gpu/drm/exynos/exynos_drm_drv.c index ebacec6f1e48..d07071937453 100644 --- a/trunk/drivers/gpu/drm/exynos/exynos_drm_drv.c +++ b/trunk/drivers/gpu/drm/exynos/exynos_drm_drv.c @@ -160,7 +160,6 @@ static int exynos_drm_open(struct drm_device *dev, struct drm_file *file) if (!file_priv) return -ENOMEM; - drm_prime_init_file_private(&file->prime); file->driver_priv = file_priv; return exynos_drm_subdrv_open(dev, file); @@ -184,7 +183,6 @@ static void exynos_drm_preclose(struct drm_device *dev, e->base.destroy(&e->base); } } - drm_prime_destroy_file_private(&file->prime); spin_unlock_irqrestore(&dev->event_lock, flags); exynos_drm_subdrv_close(dev, file); @@ -241,6 +239,9 @@ static const struct file_operations exynos_drm_driver_fops = { .poll = drm_poll, .read = drm_read, .unlocked_ioctl = drm_ioctl, +#ifdef CONFIG_COMPAT + .compat_ioctl = drm_compat_ioctl, +#endif .release = drm_release, }; diff --git a/trunk/drivers/gpu/drm/exynos/exynos_drm_fimd.c b/trunk/drivers/gpu/drm/exynos/exynos_drm_fimd.c index a68d2b313f03..b19cd93e7047 100644 --- a/trunk/drivers/gpu/drm/exynos/exynos_drm_fimd.c +++ b/trunk/drivers/gpu/drm/exynos/exynos_drm_fimd.c @@ -831,11 +831,6 @@ static int __devinit fimd_probe(struct platform_device *pdev) } res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - if (!res) { - dev_err(dev, "failed to find registers\n"); - ret = -ENOENT; - goto err_clk; - } ctx->regs = devm_request_and_ioremap(&pdev->dev, res); if (!ctx->regs) { diff --git a/trunk/drivers/gpu/drm/exynos/exynos_drm_g2d.c b/trunk/drivers/gpu/drm/exynos/exynos_drm_g2d.c index d2d88f22a037..1065e90d0919 100644 --- a/trunk/drivers/gpu/drm/exynos/exynos_drm_g2d.c +++ b/trunk/drivers/gpu/drm/exynos/exynos_drm_g2d.c @@ -129,7 +129,6 @@ struct g2d_runqueue_node { struct g2d_data { struct device *dev; struct clk *gate_clk; - struct resource *regs_res; void __iomem *regs; int irq; struct workqueue_struct *g2d_workq; @@ -751,7 +750,7 @@ static int __devinit g2d_probe(struct platform_device *pdev) struct exynos_drm_subdrv *subdrv; int ret; - g2d = kzalloc(sizeof(*g2d), GFP_KERNEL); + g2d = devm_kzalloc(&pdev->dev, sizeof(*g2d), GFP_KERNEL); if (!g2d) { dev_err(dev, "failed to allocate driver data\n"); return -ENOMEM; @@ -759,10 +758,8 @@ static int __devinit g2d_probe(struct platform_device *pdev) g2d->runqueue_slab = kmem_cache_create("g2d_runqueue_slab", sizeof(struct g2d_runqueue_node), 0, 0, NULL); - if (!g2d->runqueue_slab) { - ret = -ENOMEM; - goto err_free_mem; - } + if (!g2d->runqueue_slab) + return -ENOMEM; g2d->dev = dev; @@ -794,38 +791,26 @@ static int __devinit g2d_probe(struct platform_device *pdev) pm_runtime_enable(dev); res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - if (!res) { - dev_err(dev, "failed to get I/O memory\n"); - ret = -ENOENT; - goto err_put_clk; - } - g2d->regs_res = request_mem_region(res->start, resource_size(res), - dev_name(dev)); - if (!g2d->regs_res) { - dev_err(dev, "failed to request I/O memory\n"); - ret = -ENOENT; - goto err_put_clk; - } - - g2d->regs = ioremap(res->start, resource_size(res)); + g2d->regs = devm_request_and_ioremap(&pdev->dev, res); if (!g2d->regs) { dev_err(dev, "failed to remap I/O memory\n"); ret = -ENXIO; - goto err_release_res; + goto err_put_clk; } g2d->irq = platform_get_irq(pdev, 0); if (g2d->irq < 0) { dev_err(dev, "failed to get irq\n"); ret = g2d->irq; - goto err_unmap_base; + goto err_put_clk; } - ret = request_irq(g2d->irq, g2d_irq_handler, 0, "drm_g2d", g2d); + ret = devm_request_irq(&pdev->dev, g2d->irq, g2d_irq_handler, 0, + "drm_g2d", g2d); if (ret < 0) { dev_err(dev, "irq request failed\n"); - goto err_unmap_base; + goto err_put_clk; } platform_set_drvdata(pdev, g2d); @@ -838,7 +823,7 @@ static int __devinit g2d_probe(struct platform_device *pdev) ret = exynos_drm_subdrv_register(subdrv); if (ret < 0) { dev_err(dev, "failed to register drm g2d device\n"); - goto err_free_irq; + goto err_put_clk; } dev_info(dev, "The exynos g2d(ver %d.%d) successfully probed\n", @@ -846,13 +831,6 @@ static int __devinit g2d_probe(struct platform_device *pdev) return 0; -err_free_irq: - free_irq(g2d->irq, g2d); -err_unmap_base: - iounmap(g2d->regs); -err_release_res: - release_resource(g2d->regs_res); - kfree(g2d->regs_res); err_put_clk: pm_runtime_disable(dev); clk_put(g2d->gate_clk); @@ -862,8 +840,6 @@ static int __devinit g2d_probe(struct platform_device *pdev) destroy_workqueue(g2d->g2d_workq); err_destroy_slab: kmem_cache_destroy(g2d->runqueue_slab); -err_free_mem: - kfree(g2d); return ret; } @@ -873,24 +849,18 @@ static int __devexit g2d_remove(struct platform_device *pdev) cancel_work_sync(&g2d->runqueue_work); exynos_drm_subdrv_unregister(&g2d->subdrv); - free_irq(g2d->irq, g2d); while (g2d->runqueue_node) { g2d_free_runqueue_node(g2d, g2d->runqueue_node); g2d->runqueue_node = g2d_get_runqueue_node(g2d); } - iounmap(g2d->regs); - release_resource(g2d->regs_res); - kfree(g2d->regs_res); - pm_runtime_disable(&pdev->dev); clk_put(g2d->gate_clk); g2d_fini_cmdlist(g2d); destroy_workqueue(g2d->g2d_workq); kmem_cache_destroy(g2d->runqueue_slab); - kfree(g2d); return 0; } @@ -924,7 +894,7 @@ static int g2d_resume(struct device *dev) } #endif -SIMPLE_DEV_PM_OPS(g2d_pm_ops, g2d_suspend, g2d_resume); +static SIMPLE_DEV_PM_OPS(g2d_pm_ops, g2d_suspend, g2d_resume); struct platform_driver g2d_driver = { .probe = g2d_probe, diff --git a/trunk/drivers/gpu/drm/exynos/exynos_drm_gem.c b/trunk/drivers/gpu/drm/exynos/exynos_drm_gem.c index f9efde40c097..a38051c95ec4 100644 --- a/trunk/drivers/gpu/drm/exynos/exynos_drm_gem.c +++ b/trunk/drivers/gpu/drm/exynos/exynos_drm_gem.c @@ -122,7 +122,7 @@ struct page **exynos_gem_get_pages(struct drm_gem_object *obj, __free_page(pages[i]); drm_free_large(pages); - return ERR_PTR(PTR_ERR(p)); + return ERR_CAST(p); } static void exynos_gem_put_pages(struct drm_gem_object *obj, @@ -662,7 +662,7 @@ int exynos_drm_gem_dumb_create(struct drm_file *file_priv, */ args->pitch = args->width * ((args->bpp + 7) / 8); - args->size = PAGE_ALIGN(args->pitch * args->height); + args->size = args->pitch * args->height; exynos_gem_obj = exynos_drm_gem_create(dev, args->flags, args->size); if (IS_ERR(exynos_gem_obj)) diff --git a/trunk/drivers/gpu/drm/exynos/exynos_drm_hdmi.c b/trunk/drivers/gpu/drm/exynos/exynos_drm_hdmi.c index 8ffcdf8b9e22..3fdf0b65f47e 100644 --- a/trunk/drivers/gpu/drm/exynos/exynos_drm_hdmi.c +++ b/trunk/drivers/gpu/drm/exynos/exynos_drm_hdmi.c @@ -345,7 +345,7 @@ static int __devinit exynos_drm_hdmi_probe(struct platform_device *pdev) DRM_DEBUG_KMS("%s\n", __FILE__); - ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); + ctx = devm_kzalloc(&pdev->dev, sizeof(*ctx), GFP_KERNEL); if (!ctx) { DRM_LOG_KMS("failed to alloc common hdmi context.\n"); return -ENOMEM; @@ -371,7 +371,6 @@ static int __devexit exynos_drm_hdmi_remove(struct platform_device *pdev) DRM_DEBUG_KMS("%s\n", __FILE__); exynos_drm_subdrv_unregister(&ctx->subdrv); - kfree(ctx); return 0; } diff --git a/trunk/drivers/gpu/drm/exynos/exynos_drm_plane.c b/trunk/drivers/gpu/drm/exynos/exynos_drm_plane.c index b89829e5043a..e1f94b746bd7 100644 --- a/trunk/drivers/gpu/drm/exynos/exynos_drm_plane.c +++ b/trunk/drivers/gpu/drm/exynos/exynos_drm_plane.c @@ -29,7 +29,6 @@ static const uint32_t formats[] = { DRM_FORMAT_XRGB8888, DRM_FORMAT_ARGB8888, DRM_FORMAT_NV12, - DRM_FORMAT_NV12M, DRM_FORMAT_NV12MT, }; diff --git a/trunk/drivers/gpu/drm/exynos/exynos_drm_vidi.c b/trunk/drivers/gpu/drm/exynos/exynos_drm_vidi.c index bb1550c4dd57..537027a74fd5 100644 --- a/trunk/drivers/gpu/drm/exynos/exynos_drm_vidi.c +++ b/trunk/drivers/gpu/drm/exynos/exynos_drm_vidi.c @@ -633,7 +633,7 @@ static int __devinit vidi_probe(struct platform_device *pdev) DRM_DEBUG_KMS("%s\n", __FILE__); - ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); + ctx = devm_kzalloc(&pdev->dev, sizeof(*ctx), GFP_KERNEL); if (!ctx) return -ENOMEM; @@ -673,8 +673,6 @@ static int __devexit vidi_remove(struct platform_device *pdev) ctx->raw_edid = NULL; } - kfree(ctx); - return 0; } diff --git a/trunk/drivers/gpu/drm/exynos/exynos_hdmi.c b/trunk/drivers/gpu/drm/exynos/exynos_hdmi.c index 409e2ec1207c..a6aea6f3ea1a 100644 --- a/trunk/drivers/gpu/drm/exynos/exynos_hdmi.c +++ b/trunk/drivers/gpu/drm/exynos/exynos_hdmi.c @@ -2172,7 +2172,7 @@ static int __devinit hdmi_resources_init(struct hdmi_context *hdata) DRM_DEBUG_KMS("HDMI resource init\n"); - memset(res, 0, sizeof *res); + memset(res, 0, sizeof(*res)); /* get clocks, power */ res->hdmi = clk_get(dev, "hdmi"); @@ -2204,7 +2204,7 @@ static int __devinit hdmi_resources_init(struct hdmi_context *hdata) clk_set_parent(res->sclk_hdmi, res->sclk_pixel); res->regul_bulk = kzalloc(ARRAY_SIZE(supply) * - sizeof res->regul_bulk[0], GFP_KERNEL); + sizeof(res->regul_bulk[0]), GFP_KERNEL); if (!res->regul_bulk) { DRM_ERROR("failed to get memory for regulators\n"); goto fail; @@ -2243,7 +2243,7 @@ static int hdmi_resources_cleanup(struct hdmi_context *hdata) clk_put(res->sclk_hdmi); if (!IS_ERR_OR_NULL(res->hdmi)) clk_put(res->hdmi); - memset(res, 0, sizeof *res); + memset(res, 0, sizeof(*res)); return 0; } @@ -2312,11 +2312,6 @@ static int __devinit hdmi_probe(struct platform_device *pdev) } res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - if (!res) { - DRM_ERROR("failed to find registers\n"); - ret = -ENOENT; - goto err_resource; - } hdata->regs = devm_request_and_ioremap(&pdev->dev, res); if (!hdata->regs) { diff --git a/trunk/drivers/gpu/drm/exynos/exynos_mixer.c b/trunk/drivers/gpu/drm/exynos/exynos_mixer.c index 30fcc12f81dd..25b97d5e5fcb 100644 --- a/trunk/drivers/gpu/drm/exynos/exynos_mixer.c +++ b/trunk/drivers/gpu/drm/exynos/exynos_mixer.c @@ -236,11 +236,11 @@ static inline void vp_filter_set(struct mixer_resources *res, static void vp_default_filter(struct mixer_resources *res) { vp_filter_set(res, VP_POLY8_Y0_LL, - filter_y_horiz_tap8, sizeof filter_y_horiz_tap8); + filter_y_horiz_tap8, sizeof(filter_y_horiz_tap8)); vp_filter_set(res, VP_POLY4_Y0_LL, - filter_y_vert_tap4, sizeof filter_y_vert_tap4); + filter_y_vert_tap4, sizeof(filter_y_vert_tap4)); vp_filter_set(res, VP_POLY4_C0_LL, - filter_cr_horiz_tap4, sizeof filter_cr_horiz_tap4); + filter_cr_horiz_tap4, sizeof(filter_cr_horiz_tap4)); } static void mixer_vsync_set_update(struct mixer_context *ctx, bool enable) diff --git a/trunk/drivers/gpu/drm/gma500/oaktrail_device.c b/trunk/drivers/gpu/drm/gma500/oaktrail_device.c index 0f9b7db80f6b..cf49ba5a54bf 100644 --- a/trunk/drivers/gpu/drm/gma500/oaktrail_device.c +++ b/trunk/drivers/gpu/drm/gma500/oaktrail_device.c @@ -476,6 +476,7 @@ static const struct psb_offset oaktrail_regmap[2] = { .pos = DSPAPOS, .surf = DSPASURF, .addr = MRST_DSPABASE, + .base = MRST_DSPABASE, .status = PIPEASTAT, .linoff = DSPALINOFF, .tileoff = DSPATILEOFF, @@ -499,6 +500,7 @@ static const struct psb_offset oaktrail_regmap[2] = { .pos = DSPBPOS, .surf = DSPBSURF, .addr = DSPBBASE, + .base = DSPBBASE, .status = PIPEBSTAT, .linoff = DSPBLINOFF, .tileoff = DSPBTILEOFF, diff --git a/trunk/drivers/gpu/drm/gma500/psb_intel_display.c b/trunk/drivers/gpu/drm/gma500/psb_intel_display.c index 30dc22a7156c..8033526bb53b 100644 --- a/trunk/drivers/gpu/drm/gma500/psb_intel_display.c +++ b/trunk/drivers/gpu/drm/gma500/psb_intel_display.c @@ -1362,6 +1362,9 @@ void psb_intel_crtc_init(struct drm_device *dev, int pipe, (struct drm_connector **) (psb_intel_crtc + 1); psb_intel_crtc->mode_set.num_connectors = 0; psb_intel_cursor_init(dev, psb_intel_crtc); + + /* Set to true so that the pipe is forced off on initial config. */ + psb_intel_crtc->active = true; } int psb_intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data, diff --git a/trunk/drivers/gpu/drm/i810/i810_dma.c b/trunk/drivers/gpu/drm/i810/i810_dma.c index 57d892eaaa6e..463ec6871fe9 100644 --- a/trunk/drivers/gpu/drm/i810/i810_dma.c +++ b/trunk/drivers/gpu/drm/i810/i810_dma.c @@ -115,6 +115,9 @@ static const struct file_operations i810_buffer_fops = { .unlocked_ioctl = drm_ioctl, .mmap = i810_mmap_buffers, .fasync = drm_fasync, +#ifdef CONFIG_COMPAT + .compat_ioctl = drm_compat_ioctl, +#endif .llseek = noop_llseek, }; diff --git a/trunk/drivers/gpu/drm/i810/i810_drv.c b/trunk/drivers/gpu/drm/i810/i810_drv.c index f9924ad04d09..48cfcca2b350 100644 --- a/trunk/drivers/gpu/drm/i810/i810_drv.c +++ b/trunk/drivers/gpu/drm/i810/i810_drv.c @@ -51,6 +51,9 @@ static const struct file_operations i810_driver_fops = { .mmap = drm_mmap, .poll = drm_poll, .fasync = drm_fasync, +#ifdef CONFIG_COMPAT + .compat_ioctl = drm_compat_ioctl, +#endif .llseek = noop_llseek, }; diff --git a/trunk/drivers/gpu/drm/i915/i915_dma.c b/trunk/drivers/gpu/drm/i915/i915_dma.c index 9cf7dfe022b9..914c0dfabe60 100644 --- a/trunk/drivers/gpu/drm/i915/i915_dma.c +++ b/trunk/drivers/gpu/drm/i915/i915_dma.c @@ -1587,6 +1587,7 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags) spin_lock_init(&dev_priv->irq_lock); spin_lock_init(&dev_priv->error_lock); spin_lock_init(&dev_priv->rps_lock); + spin_lock_init(&dev_priv->dpio_lock); if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev)) dev_priv->num_pipe = 3; diff --git a/trunk/drivers/gpu/drm/i915/i915_drv.c b/trunk/drivers/gpu/drm/i915/i915_drv.c index ed22612bc847..a24ffbe97c01 100644 --- a/trunk/drivers/gpu/drm/i915/i915_drv.c +++ b/trunk/drivers/gpu/drm/i915/i915_drv.c @@ -346,11 +346,40 @@ static const struct pci_device_id pciidlist[] = { /* aka */ INTEL_VGA_DEVICE(0x016a, &intel_ivybridge_d_info), /* GT2 server */ INTEL_VGA_DEVICE(0x0402, &intel_haswell_d_info), /* GT1 desktop */ INTEL_VGA_DEVICE(0x0412, &intel_haswell_d_info), /* GT2 desktop */ + INTEL_VGA_DEVICE(0x0422, &intel_haswell_d_info), /* GT2 desktop */ INTEL_VGA_DEVICE(0x040a, &intel_haswell_d_info), /* GT1 server */ INTEL_VGA_DEVICE(0x041a, &intel_haswell_d_info), /* GT2 server */ + INTEL_VGA_DEVICE(0x042a, &intel_haswell_d_info), /* GT2 server */ INTEL_VGA_DEVICE(0x0406, &intel_haswell_m_info), /* GT1 mobile */ INTEL_VGA_DEVICE(0x0416, &intel_haswell_m_info), /* GT2 mobile */ - INTEL_VGA_DEVICE(0x0c16, &intel_haswell_d_info), /* SDV */ + INTEL_VGA_DEVICE(0x0426, &intel_haswell_m_info), /* GT2 mobile */ + INTEL_VGA_DEVICE(0x0C02, &intel_haswell_d_info), /* SDV GT1 desktop */ + INTEL_VGA_DEVICE(0x0C12, &intel_haswell_d_info), /* SDV GT2 desktop */ + INTEL_VGA_DEVICE(0x0C22, &intel_haswell_d_info), /* SDV GT2 desktop */ + INTEL_VGA_DEVICE(0x0C0A, &intel_haswell_d_info), /* SDV GT1 server */ + INTEL_VGA_DEVICE(0x0C1A, &intel_haswell_d_info), /* SDV GT2 server */ + INTEL_VGA_DEVICE(0x0C2A, &intel_haswell_d_info), /* SDV GT2 server */ + INTEL_VGA_DEVICE(0x0C06, &intel_haswell_m_info), /* SDV GT1 mobile */ + INTEL_VGA_DEVICE(0x0C16, &intel_haswell_m_info), /* SDV GT2 mobile */ + INTEL_VGA_DEVICE(0x0C26, &intel_haswell_m_info), /* SDV GT2 mobile */ + INTEL_VGA_DEVICE(0x0A02, &intel_haswell_d_info), /* ULT GT1 desktop */ + INTEL_VGA_DEVICE(0x0A12, &intel_haswell_d_info), /* ULT GT2 desktop */ + INTEL_VGA_DEVICE(0x0A22, &intel_haswell_d_info), /* ULT GT2 desktop */ + INTEL_VGA_DEVICE(0x0A0A, &intel_haswell_d_info), /* ULT GT1 server */ + INTEL_VGA_DEVICE(0x0A1A, &intel_haswell_d_info), /* ULT GT2 server */ + INTEL_VGA_DEVICE(0x0A2A, &intel_haswell_d_info), /* ULT GT2 server */ + INTEL_VGA_DEVICE(0x0A06, &intel_haswell_m_info), /* ULT GT1 mobile */ + INTEL_VGA_DEVICE(0x0A16, &intel_haswell_m_info), /* ULT GT2 mobile */ + INTEL_VGA_DEVICE(0x0A26, &intel_haswell_m_info), /* ULT GT2 mobile */ + INTEL_VGA_DEVICE(0x0D12, &intel_haswell_d_info), /* CRW GT1 desktop */ + INTEL_VGA_DEVICE(0x0D22, &intel_haswell_d_info), /* CRW GT2 desktop */ + INTEL_VGA_DEVICE(0x0D32, &intel_haswell_d_info), /* CRW GT2 desktop */ + INTEL_VGA_DEVICE(0x0D1A, &intel_haswell_d_info), /* CRW GT1 server */ + INTEL_VGA_DEVICE(0x0D2A, &intel_haswell_d_info), /* CRW GT2 server */ + INTEL_VGA_DEVICE(0x0D3A, &intel_haswell_d_info), /* CRW GT2 server */ + INTEL_VGA_DEVICE(0x0D16, &intel_haswell_m_info), /* CRW GT1 mobile */ + INTEL_VGA_DEVICE(0x0D26, &intel_haswell_m_info), /* CRW GT2 mobile */ + INTEL_VGA_DEVICE(0x0D36, &intel_haswell_m_info), /* CRW GT2 mobile */ INTEL_VGA_DEVICE(0x0f30, &intel_valleyview_m_info), INTEL_VGA_DEVICE(0x0157, &intel_valleyview_m_info), INTEL_VGA_DEVICE(0x0155, &intel_valleyview_d_info), diff --git a/trunk/drivers/gpu/drm/i915/i915_gem.c b/trunk/drivers/gpu/drm/i915/i915_gem.c index 5c4657a54f97..274d25de521e 100644 --- a/trunk/drivers/gpu/drm/i915/i915_gem.c +++ b/trunk/drivers/gpu/drm/i915/i915_gem.c @@ -2365,6 +2365,10 @@ int i915_gpu_idle(struct drm_device *dev) /* Flush everything onto the inactive list. */ for_each_ring(ring, dev_priv, i) { + ret = i915_switch_context(ring, NULL, DEFAULT_CONTEXT_ID); + if (ret) + return ret; + ret = i915_ring_idle(ring); if (ret) return ret; @@ -2372,10 +2376,6 @@ int i915_gpu_idle(struct drm_device *dev) /* Is the device fubar? */ if (WARN_ON(!list_empty(&ring->gpu_write_list))) return -EBUSY; - - ret = i915_switch_context(ring, NULL, DEFAULT_CONTEXT_ID); - if (ret) - return ret; } return 0; @@ -3242,7 +3242,8 @@ i915_gem_object_pin(struct drm_i915_gem_object *obj, { int ret; - BUG_ON(obj->pin_count == DRM_I915_GEM_OBJECT_MAX_PIN_COUNT); + if (WARN_ON(obj->pin_count == DRM_I915_GEM_OBJECT_MAX_PIN_COUNT)) + return -EBUSY; if (obj->gtt_space != NULL) { if ((alignment && obj->gtt_offset & (alignment - 1)) || diff --git a/trunk/drivers/gpu/drm/i915/i915_gem_context.c b/trunk/drivers/gpu/drm/i915/i915_gem_context.c index da8b01fb1bf8..a9d58d72bb4d 100644 --- a/trunk/drivers/gpu/drm/i915/i915_gem_context.c +++ b/trunk/drivers/gpu/drm/i915/i915_gem_context.c @@ -451,7 +451,6 @@ int i915_switch_context(struct intel_ring_buffer *ring, struct drm_i915_file_private *file_priv = NULL; struct i915_hw_context *to; struct drm_i915_gem_object *from_obj = ring->last_context_obj; - int ret; if (dev_priv->hw_contexts_disabled) return 0; diff --git a/trunk/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/trunk/drivers/gpu/drm/i915/i915_gem_execbuffer.c index 5af631e788c8..ff2819ea0813 100644 --- a/trunk/drivers/gpu/drm/i915/i915_gem_execbuffer.c +++ b/trunk/drivers/gpu/drm/i915/i915_gem_execbuffer.c @@ -291,6 +291,16 @@ i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj, target_i915_obj = to_intel_bo(target_obj); target_offset = target_i915_obj->gtt_offset; + /* Sandybridge PPGTT errata: We need a global gtt mapping for MI and + * pipe_control writes because the gpu doesn't properly redirect them + * through the ppgtt for non_secure batchbuffers. */ + if (unlikely(IS_GEN6(dev) && + reloc->write_domain == I915_GEM_DOMAIN_INSTRUCTION && + !target_i915_obj->has_global_gtt_mapping)) { + i915_gem_gtt_bind_object(target_i915_obj, + target_i915_obj->cache_level); + } + /* The target buffer should have appeared before us in the * exec_object list, so it should have a GTT space bound by now. */ @@ -399,16 +409,6 @@ i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj, io_mapping_unmap_atomic(reloc_page); } - /* Sandybridge PPGTT errata: We need a global gtt mapping for MI and - * pipe_control writes because the gpu doesn't properly redirect them - * through the ppgtt for non_secure batchbuffers. */ - if (unlikely(IS_GEN6(dev) && - reloc->write_domain == I915_GEM_DOMAIN_INSTRUCTION && - !target_i915_obj->has_global_gtt_mapping)) { - i915_gem_gtt_bind_object(target_i915_obj, - target_i915_obj->cache_level); - } - /* and update the user's relocation entry */ reloc->presumed_offset = target_offset; diff --git a/trunk/drivers/gpu/drm/i915/i915_gem_gtt.c b/trunk/drivers/gpu/drm/i915/i915_gem_gtt.c index 9fd25a435536..60815b861ec2 100644 --- a/trunk/drivers/gpu/drm/i915/i915_gem_gtt.c +++ b/trunk/drivers/gpu/drm/i915/i915_gem_gtt.c @@ -72,7 +72,7 @@ int i915_gem_init_aliasing_ppgtt(struct drm_device *dev) /* ppgtt PDEs reside in the global gtt pagetable, which has 512*1024 * entries. For aliasing ppgtt support we just steal them at the end for * now. */ - first_pd_entry_in_global_pt = 512*1024 - I915_PPGTT_PD_ENTRIES; + first_pd_entry_in_global_pt = dev_priv->mm.gtt->gtt_total_entries - I915_PPGTT_PD_ENTRIES; ppgtt = kzalloc(sizeof(*ppgtt), GFP_KERNEL); if (!ppgtt) @@ -261,7 +261,10 @@ void i915_ppgtt_bind_object(struct i915_hw_ppgtt *ppgtt, pte_flags |= GEN6_PTE_CACHE_LLC; break; case I915_CACHE_NONE: - pte_flags |= GEN6_PTE_UNCACHED; + if (IS_HASWELL(dev)) + pte_flags |= HSW_PTE_UNCACHED; + else + pte_flags |= GEN6_PTE_UNCACHED; break; default: BUG(); @@ -361,7 +364,8 @@ int i915_gem_gtt_prepare_object(struct drm_i915_gem_object *obj) struct drm_device *dev = obj->base.dev; struct drm_i915_private *dev_priv = dev->dev_private; - if (dev_priv->mm.gtt->needs_dmar) + /* don't map imported dma buf objects */ + if (dev_priv->mm.gtt->needs_dmar && !obj->sg_table) return intel_gtt_map_memory(obj->pages, obj->base.size >> PAGE_SHIFT, &obj->sg_list, diff --git a/trunk/drivers/gpu/drm/i915/i915_irq.c b/trunk/drivers/gpu/drm/i915/i915_irq.c index 8a3828528b9d..5249640cce13 100644 --- a/trunk/drivers/gpu/drm/i915/i915_irq.c +++ b/trunk/drivers/gpu/drm/i915/i915_irq.c @@ -2700,9 +2700,6 @@ void intel_irq_init(struct drm_device *dev) dev->driver->irq_handler = i8xx_irq_handler; dev->driver->irq_uninstall = i8xx_irq_uninstall; } else if (INTEL_INFO(dev)->gen == 3) { - /* IIR "flip pending" means done if this bit is set */ - I915_WRITE(ECOSKPD, _MASKED_BIT_DISABLE(ECO_FLIP_DONE)); - dev->driver->irq_preinstall = i915_irq_preinstall; dev->driver->irq_postinstall = i915_irq_postinstall; dev->driver->irq_uninstall = i915_irq_uninstall; diff --git a/trunk/drivers/gpu/drm/i915/i915_reg.h b/trunk/drivers/gpu/drm/i915/i915_reg.h index acc99b21e0b6..28725ce5b82c 100644 --- a/trunk/drivers/gpu/drm/i915/i915_reg.h +++ b/trunk/drivers/gpu/drm/i915/i915_reg.h @@ -115,6 +115,7 @@ #define GEN6_PTE_VALID (1 << 0) #define GEN6_PTE_UNCACHED (1 << 1) +#define HSW_PTE_UNCACHED (0) #define GEN6_PTE_CACHE_LLC (2 << 1) #define GEN6_PTE_CACHE_LLC_MLC (3 << 1) #define GEN6_PTE_CACHE_BITS (3 << 1) diff --git a/trunk/drivers/gpu/drm/i915/i915_sysfs.c b/trunk/drivers/gpu/drm/i915/i915_sysfs.c index 2f5388af8df9..7631807a2788 100644 --- a/trunk/drivers/gpu/drm/i915/i915_sysfs.c +++ b/trunk/drivers/gpu/drm/i915/i915_sysfs.c @@ -32,6 +32,7 @@ #include "intel_drv.h" #include "i915_drv.h" +#ifdef CONFIG_PM static u32 calc_residency(struct drm_device *dev, const u32 reg) { struct drm_i915_private *dev_priv = dev->dev_private; @@ -224,3 +225,14 @@ void i915_teardown_sysfs(struct drm_device *dev) device_remove_bin_file(&dev->primary->kdev, &dpf_attrs); sysfs_unmerge_group(&dev->primary->kdev.kobj, &rc6_attr_group); } +#else +void i915_setup_sysfs(struct drm_device *dev) +{ + return; +} + +void i915_teardown_sysfs(struct drm_device *dev) +{ + return; +} +#endif /* CONFIG_PM */ diff --git a/trunk/drivers/gpu/drm/i915/intel_crt.c b/trunk/drivers/gpu/drm/i915/intel_crt.c index 7ed4a41c3965..23bdc8cd1458 100644 --- a/trunk/drivers/gpu/drm/i915/intel_crt.c +++ b/trunk/drivers/gpu/drm/i915/intel_crt.c @@ -326,6 +326,36 @@ static bool intel_crt_detect_hotplug(struct drm_connector *connector) return ret; } +static struct edid *intel_crt_get_edid(struct drm_connector *connector, + struct i2c_adapter *i2c) +{ + struct edid *edid; + + edid = drm_get_edid(connector, i2c); + + if (!edid && !intel_gmbus_is_forced_bit(i2c)) { + DRM_DEBUG_KMS("CRT GMBUS EDID read failed, retry using GPIO bit-banging\n"); + intel_gmbus_force_bit(i2c, true); + edid = drm_get_edid(connector, i2c); + intel_gmbus_force_bit(i2c, false); + } + + return edid; +} + +/* local version of intel_ddc_get_modes() to use intel_crt_get_edid() */ +static int intel_crt_ddc_get_modes(struct drm_connector *connector, + struct i2c_adapter *adapter) +{ + struct edid *edid; + + edid = intel_crt_get_edid(connector, adapter); + if (!edid) + return 0; + + return intel_connector_update_modes(connector, edid); +} + static bool intel_crt_detect_ddc(struct drm_connector *connector) { struct intel_crt *crt = intel_attached_crt(connector); @@ -336,7 +366,7 @@ static bool intel_crt_detect_ddc(struct drm_connector *connector) BUG_ON(crt->base.type != INTEL_OUTPUT_ANALOG); i2c = intel_gmbus_get_adapter(dev_priv, dev_priv->crt_ddc_pin); - edid = drm_get_edid(connector, i2c); + edid = intel_crt_get_edid(connector, i2c); if (edid) { bool is_digital = edid->input & DRM_EDID_INPUT_DIGITAL; @@ -544,13 +574,13 @@ static int intel_crt_get_modes(struct drm_connector *connector) struct i2c_adapter *i2c; i2c = intel_gmbus_get_adapter(dev_priv, dev_priv->crt_ddc_pin); - ret = intel_ddc_get_modes(connector, i2c); + ret = intel_crt_ddc_get_modes(connector, i2c); if (ret || !IS_G4X(dev)) return ret; /* Try to probe digital port for output in DVI-I -> VGA mode. */ i2c = intel_gmbus_get_adapter(dev_priv, GMBUS_PORT_DPB); - return intel_ddc_get_modes(connector, i2c); + return intel_crt_ddc_get_modes(connector, i2c); } static int intel_crt_set_property(struct drm_connector *connector, diff --git a/trunk/drivers/gpu/drm/i915/intel_display.c b/trunk/drivers/gpu/drm/i915/intel_display.c index f6159765f1eb..c040aee1341c 100644 --- a/trunk/drivers/gpu/drm/i915/intel_display.c +++ b/trunk/drivers/gpu/drm/i915/intel_display.c @@ -869,6 +869,7 @@ intel_vlv_find_best_pll(const intel_limit_t *limit, struct drm_crtc *crtc, unsigned long bestppm, ppm, absppm; int dotclk, flag; + flag = 0; dotclk = target * 1000; bestppm = 1000000; ppm = absppm = 0; @@ -1375,7 +1376,8 @@ static void assert_pch_dp_disabled(struct drm_i915_private *dev_priv, "PCH DP (0x%08x) enabled on transcoder %c, should be disabled\n", reg, pipe_name(pipe)); - WARN(HAS_PCH_IBX(dev_priv->dev) && (val & SDVO_PIPE_B_SELECT), + WARN(HAS_PCH_IBX(dev_priv->dev) && (val & DP_PORT_EN) == 0 + && (val & DP_PIPEB_SELECT), "IBX PCH dp port still using transcoder B\n"); } @@ -1383,11 +1385,12 @@ static void assert_pch_hdmi_disabled(struct drm_i915_private *dev_priv, enum pipe pipe, int reg) { u32 val = I915_READ(reg); - WARN(hdmi_pipe_enabled(dev_priv, val, pipe), + WARN(hdmi_pipe_enabled(dev_priv, pipe, val), "PCH HDMI (0x%08x) enabled on transcoder %c, should be disabled\n", reg, pipe_name(pipe)); - WARN(HAS_PCH_IBX(dev_priv->dev) && (val & SDVO_PIPE_B_SELECT), + WARN(HAS_PCH_IBX(dev_priv->dev) && (val & PORT_ENABLE) == 0 + && (val & SDVO_PIPE_B_SELECT), "IBX PCH hdmi port still using transcoder B\n"); } @@ -1403,13 +1406,13 @@ static void assert_pch_ports_disabled(struct drm_i915_private *dev_priv, reg = PCH_ADPA; val = I915_READ(reg); - WARN(adpa_pipe_enabled(dev_priv, val, pipe), + WARN(adpa_pipe_enabled(dev_priv, pipe, val), "PCH VGA enabled on transcoder %c, should be disabled\n", pipe_name(pipe)); reg = PCH_LVDS; val = I915_READ(reg); - WARN(lvds_pipe_enabled(dev_priv, val, pipe), + WARN(lvds_pipe_enabled(dev_priv, pipe, val), "PCH LVDS enabled on transcoder %c, should be disabled\n", pipe_name(pipe)); @@ -1871,7 +1874,7 @@ static void disable_pch_hdmi(struct drm_i915_private *dev_priv, enum pipe pipe, int reg) { u32 val = I915_READ(reg); - if (hdmi_pipe_enabled(dev_priv, val, pipe)) { + if (hdmi_pipe_enabled(dev_priv, pipe, val)) { DRM_DEBUG_KMS("Disabling pch HDMI %x on pipe %d\n", reg, pipe); I915_WRITE(reg, val & ~PORT_ENABLE); @@ -1893,12 +1896,12 @@ static void intel_disable_pch_ports(struct drm_i915_private *dev_priv, reg = PCH_ADPA; val = I915_READ(reg); - if (adpa_pipe_enabled(dev_priv, val, pipe)) + if (adpa_pipe_enabled(dev_priv, pipe, val)) I915_WRITE(reg, val & ~ADPA_DAC_ENABLE); reg = PCH_LVDS; val = I915_READ(reg); - if (lvds_pipe_enabled(dev_priv, val, pipe)) { + if (lvds_pipe_enabled(dev_priv, pipe, val)) { DRM_DEBUG_KMS("disable lvds on pipe %d val 0x%08x\n", pipe, val); I915_WRITE(reg, val & ~LVDS_PORT_EN); POSTING_READ(reg); @@ -3753,17 +3756,6 @@ static bool intel_choose_pipe_bpp_dither(struct drm_crtc *crtc, continue; } - if (intel_encoder->type == INTEL_OUTPUT_EDP) { - /* Use VBT settings if we have an eDP panel */ - unsigned int edp_bpc = dev_priv->edp.bpp / 3; - - if (edp_bpc < display_bpc) { - DRM_DEBUG_KMS("clamping display bpc (was %d) to eDP (%d)\n", display_bpc, edp_bpc); - display_bpc = edp_bpc; - } - continue; - } - /* Not one of the known troublemakers, check the EDID */ list_for_each_entry(connector, &dev->mode_config.connector_list, head) { @@ -4199,12 +4191,6 @@ static void i8xx_update_pll(struct drm_crtc *crtc, POSTING_READ(DPLL(pipe)); udelay(150); - I915_WRITE(DPLL(pipe), dpll); - - /* Wait for the clocks to stabilize. */ - POSTING_READ(DPLL(pipe)); - udelay(150); - /* The LVDS pin pair needs to be on before the DPLLs are enabled. * This is an exception to the general rule that mode_set doesn't turn * things on. @@ -4212,6 +4198,12 @@ static void i8xx_update_pll(struct drm_crtc *crtc, if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) intel_update_lvds(crtc, clock, adjusted_mode); + I915_WRITE(DPLL(pipe), dpll); + + /* Wait for the clocks to stabilize. */ + POSTING_READ(DPLL(pipe)); + udelay(150); + /* The pixel multiplier can only be updated once the * DPLL is enabled and the clocks are stable. * diff --git a/trunk/drivers/gpu/drm/i915/intel_dp.c b/trunk/drivers/gpu/drm/i915/intel_dp.c index 0a56b9ab0f58..ace757af9133 100644 --- a/trunk/drivers/gpu/drm/i915/intel_dp.c +++ b/trunk/drivers/gpu/drm/i915/intel_dp.c @@ -1174,10 +1174,14 @@ static void ironlake_edp_panel_off(struct intel_dp *intel_dp) WARN(!intel_dp->want_panel_vdd, "Need VDD to turn off panel\n"); pp = ironlake_get_pp_control(dev_priv); - pp &= ~(POWER_TARGET_ON | PANEL_POWER_RESET | EDP_BLC_ENABLE); + /* We need to switch off panel power _and_ force vdd, for otherwise some + * panels get very unhappy and cease to work. */ + pp &= ~(POWER_TARGET_ON | EDP_FORCE_VDD | PANEL_POWER_RESET | EDP_BLC_ENABLE); I915_WRITE(PCH_PP_CONTROL, pp); POSTING_READ(PCH_PP_CONTROL); + intel_dp->want_panel_vdd = false; + ironlake_wait_panel_off(intel_dp); } @@ -1287,11 +1291,9 @@ static void intel_dp_prepare(struct drm_encoder *encoder) * ensure that we have vdd while we switch off the panel. */ ironlake_edp_panel_vdd_on(intel_dp); ironlake_edp_backlight_off(intel_dp); - ironlake_edp_panel_off(intel_dp); - intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON); + ironlake_edp_panel_off(intel_dp); intel_dp_link_down(intel_dp); - ironlake_edp_panel_vdd_off(intel_dp, false); } static void intel_dp_commit(struct drm_encoder *encoder) @@ -1326,11 +1328,9 @@ intel_dp_dpms(struct drm_encoder *encoder, int mode) /* Switching the panel off requires vdd. */ ironlake_edp_panel_vdd_on(intel_dp); ironlake_edp_backlight_off(intel_dp); - ironlake_edp_panel_off(intel_dp); - intel_dp_sink_dpms(intel_dp, mode); + ironlake_edp_panel_off(intel_dp); intel_dp_link_down(intel_dp); - ironlake_edp_panel_vdd_off(intel_dp, false); if (is_cpu_edp(intel_dp)) ironlake_edp_pll_off(encoder); @@ -2533,14 +2533,10 @@ intel_dp_init(struct drm_device *dev, int output_reg) break; } - intel_dp_i2c_init(intel_dp, intel_connector, name); - /* Cache some DPCD data in the eDP case */ if (is_edp(intel_dp)) { - bool ret; struct edp_power_seq cur, vbt; u32 pp_on, pp_off, pp_div; - struct edid *edid; pp_on = I915_READ(PCH_PP_ON_DELAYS); pp_off = I915_READ(PCH_PP_OFF_DELAYS); @@ -2591,6 +2587,13 @@ intel_dp_init(struct drm_device *dev, int output_reg) DRM_DEBUG_KMS("backlight on delay %d, off delay %d\n", intel_dp->backlight_on_delay, intel_dp->backlight_off_delay); + } + + intel_dp_i2c_init(intel_dp, intel_connector, name); + + if (is_edp(intel_dp)) { + bool ret; + struct edid *edid; ironlake_edp_panel_vdd_on(intel_dp); ret = intel_dp_get_dpcd(intel_dp); diff --git a/trunk/drivers/gpu/drm/i915/intel_drv.h b/trunk/drivers/gpu/drm/i915/intel_drv.h index 84353559441c..cd54cf88a28f 100644 --- a/trunk/drivers/gpu/drm/i915/intel_drv.h +++ b/trunk/drivers/gpu/drm/i915/intel_drv.h @@ -46,15 +46,16 @@ }) #define wait_for_atomic_us(COND, US) ({ \ - int i, ret__ = -ETIMEDOUT; \ - for (i = 0; i < (US); i++) { \ - if ((COND)) { \ - ret__ = 0; \ - break; \ - } \ - udelay(1); \ - } \ - ret__; \ + unsigned long timeout__ = jiffies + usecs_to_jiffies(US); \ + int ret__ = 0; \ + while (!(COND)) { \ + if (time_after(jiffies, timeout__)) { \ + ret__ = -ETIMEDOUT; \ + break; \ + } \ + cpu_relax(); \ + } \ + ret__; \ }) #define wait_for(COND, MS) _wait_for(COND, MS, 1) @@ -341,6 +342,8 @@ struct intel_fbc_work { int interval; }; +int intel_connector_update_modes(struct drm_connector *connector, + struct edid *edid); int intel_ddc_get_modes(struct drm_connector *c, struct i2c_adapter *adapter); extern void intel_attach_force_audio_property(struct drm_connector *connector); @@ -380,7 +383,6 @@ extern void intel_pch_panel_fitting(struct drm_device *dev, const struct drm_display_mode *mode, struct drm_display_mode *adjusted_mode); extern u32 intel_panel_get_max_backlight(struct drm_device *dev); -extern u32 intel_panel_get_backlight(struct drm_device *dev); extern void intel_panel_set_backlight(struct drm_device *dev, u32 level); extern int intel_panel_setup_backlight(struct drm_device *dev); extern void intel_panel_enable_backlight(struct drm_device *dev, diff --git a/trunk/drivers/gpu/drm/i915/intel_hdmi.c b/trunk/drivers/gpu/drm/i915/intel_hdmi.c index 98f602427eb8..12dc3308ab8c 100644 --- a/trunk/drivers/gpu/drm/i915/intel_hdmi.c +++ b/trunk/drivers/gpu/drm/i915/intel_hdmi.c @@ -609,7 +609,7 @@ static void intel_hdmi_dpms(struct drm_encoder *encoder, int mode) u32 temp; u32 enable_bits = SDVO_ENABLE; - if (intel_hdmi->has_audio) + if (intel_hdmi->has_audio || mode != DRM_MODE_DPMS_ON) enable_bits |= SDVO_AUDIO_ENABLE; temp = I915_READ(intel_hdmi->sdvox_reg); diff --git a/trunk/drivers/gpu/drm/i915/intel_i2c.c b/trunk/drivers/gpu/drm/i915/intel_i2c.c index 1991a4408cf9..b9755f6378d8 100644 --- a/trunk/drivers/gpu/drm/i915/intel_i2c.c +++ b/trunk/drivers/gpu/drm/i915/intel_i2c.c @@ -486,9 +486,6 @@ int intel_setup_gmbus(struct drm_device *dev) bus->dev_priv = dev_priv; bus->adapter.algo = &gmbus_algorithm; - ret = i2c_add_adapter(&bus->adapter); - if (ret) - goto err; /* By default use a conservative clock rate */ bus->reg0 = port | GMBUS_RATE_100KHZ; @@ -498,6 +495,10 @@ int intel_setup_gmbus(struct drm_device *dev) bus->force_bit = true; intel_gpio_setup(bus, port); + + ret = i2c_add_adapter(&bus->adapter); + if (ret) + goto err; } intel_i2c_reset(dev_priv->dev); @@ -540,9 +541,6 @@ void intel_teardown_gmbus(struct drm_device *dev) struct drm_i915_private *dev_priv = dev->dev_private; int i; - if (dev_priv->gmbus == NULL) - return; - for (i = 0; i < GMBUS_NUM_PORTS; i++) { struct intel_gmbus *bus = &dev_priv->gmbus[i]; i2c_del_adapter(&bus->adapter); diff --git a/trunk/drivers/gpu/drm/i915/intel_lvds.c b/trunk/drivers/gpu/drm/i915/intel_lvds.c index e05c0d3e3440..e9a6f6aaed85 100644 --- a/trunk/drivers/gpu/drm/i915/intel_lvds.c +++ b/trunk/drivers/gpu/drm/i915/intel_lvds.c @@ -780,6 +780,14 @@ static const struct dmi_system_id intel_no_lvds[] = { DMI_MATCH(DMI_BOARD_NAME, "ZBOXSD-ID12/ID13"), }, }, + { + .callback = intel_no_lvds_dmi_callback, + .ident = "Gigabyte GA-D525TUD", + .matches = { + DMI_MATCH(DMI_BOARD_VENDOR, "Gigabyte Technology Co., Ltd."), + DMI_MATCH(DMI_BOARD_NAME, "D525TUD"), + }, + }, { } /* terminating entry */ }; diff --git a/trunk/drivers/gpu/drm/i915/intel_modes.c b/trunk/drivers/gpu/drm/i915/intel_modes.c index 45848b9b670b..29b72593fbb2 100644 --- a/trunk/drivers/gpu/drm/i915/intel_modes.c +++ b/trunk/drivers/gpu/drm/i915/intel_modes.c @@ -32,6 +32,25 @@ #include "intel_drv.h" #include "i915_drv.h" +/** + * intel_connector_update_modes - update connector from edid + * @connector: DRM connector device to use + * @edid: previously read EDID information + */ +int intel_connector_update_modes(struct drm_connector *connector, + struct edid *edid) +{ + int ret; + + drm_mode_connector_update_edid_property(connector, edid); + ret = drm_add_edid_modes(connector, edid); + drm_edid_to_eld(connector, edid); + connector->display_info.raw_edid = NULL; + kfree(edid); + + return ret; +} + /** * intel_ddc_get_modes - get modelist from monitor * @connector: DRM connector device to use @@ -43,18 +62,12 @@ int intel_ddc_get_modes(struct drm_connector *connector, struct i2c_adapter *adapter) { struct edid *edid; - int ret = 0; edid = drm_get_edid(connector, adapter); - if (edid) { - drm_mode_connector_update_edid_property(connector, edid); - ret = drm_add_edid_modes(connector, edid); - drm_edid_to_eld(connector, edid); - connector->display_info.raw_edid = NULL; - kfree(edid); - } + if (!edid) + return 0; - return ret; + return intel_connector_update_modes(connector, edid); } static const struct drm_prop_enum_list force_audio_names[] = { diff --git a/trunk/drivers/gpu/drm/i915/intel_panel.c b/trunk/drivers/gpu/drm/i915/intel_panel.c index 10c7d39034e1..e019b2369861 100644 --- a/trunk/drivers/gpu/drm/i915/intel_panel.c +++ b/trunk/drivers/gpu/drm/i915/intel_panel.c @@ -162,19 +162,12 @@ static u32 i915_read_blc_pwm_ctl(struct drm_i915_private *dev_priv) return val; } -u32 intel_panel_get_max_backlight(struct drm_device *dev) +static u32 _intel_panel_get_max_backlight(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; u32 max; max = i915_read_blc_pwm_ctl(dev_priv); - if (max == 0) { - /* XXX add code here to query mode clock or hardware clock - * and program max PWM appropriately. - */ - pr_warn_once("fixme: max PWM is zero\n"); - return 1; - } if (HAS_PCH_SPLIT(dev)) { max >>= 16; @@ -188,6 +181,22 @@ u32 intel_panel_get_max_backlight(struct drm_device *dev) max *= 0xff; } + return max; +} + +u32 intel_panel_get_max_backlight(struct drm_device *dev) +{ + u32 max; + + max = _intel_panel_get_max_backlight(dev); + if (max == 0) { + /* XXX add code here to query mode clock or hardware clock + * and program max PWM appropriately. + */ + pr_warn_once("fixme: max PWM is zero\n"); + return 1; + } + DRM_DEBUG_DRIVER("max backlight PWM = %d\n", max); return max; } @@ -213,7 +222,7 @@ static u32 intel_panel_compute_brightness(struct drm_device *dev, u32 val) return val; } -u32 intel_panel_get_backlight(struct drm_device *dev) +static u32 intel_panel_get_backlight(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; u32 val; @@ -311,9 +320,6 @@ void intel_panel_enable_backlight(struct drm_device *dev, if (dev_priv->backlight_level == 0) dev_priv->backlight_level = intel_panel_get_max_backlight(dev); - dev_priv->backlight_enabled = true; - intel_panel_actually_set_backlight(dev, dev_priv->backlight_level); - if (INTEL_INFO(dev)->gen >= 4) { uint32_t reg, tmp; @@ -326,7 +332,7 @@ void intel_panel_enable_backlight(struct drm_device *dev, * we don't track the backlight dpms state, hence check whether * we have to do anything first. */ if (tmp & BLM_PWM_ENABLE) - return; + goto set_level; if (dev_priv->num_pipe == 3) tmp &= ~BLM_PIPE_SELECT_IVB; @@ -347,6 +353,14 @@ void intel_panel_enable_backlight(struct drm_device *dev, I915_WRITE(BLC_PWM_PCH_CTL1, tmp); } } + +set_level: + /* Call below after setting BLC_PWM_CPU_CTL2 and BLC_PWM_PCH_CTL1. + * BLC_PWM_CPU_CTL may be cleared to zero automatically when these + * registers are set. + */ + dev_priv->backlight_enabled = true; + intel_panel_actually_set_backlight(dev, dev_priv->backlight_level); } static void intel_panel_init_backlight(struct drm_device *dev) @@ -419,7 +433,11 @@ int intel_panel_setup_backlight(struct drm_device *dev) memset(&props, 0, sizeof(props)); props.type = BACKLIGHT_RAW; - props.max_brightness = intel_panel_get_max_backlight(dev); + props.max_brightness = _intel_panel_get_max_backlight(dev); + if (props.max_brightness == 0) { + DRM_ERROR("Failed to get maximum backlight value\n"); + return -ENODEV; + } dev_priv->backlight = backlight_device_register("intel_backlight", &connector->kdev, dev, diff --git a/trunk/drivers/gpu/drm/i915/intel_pm.c b/trunk/drivers/gpu/drm/i915/intel_pm.c index 94aabcaa3a67..ba8a27b1757a 100644 --- a/trunk/drivers/gpu/drm/i915/intel_pm.c +++ b/trunk/drivers/gpu/drm/i915/intel_pm.c @@ -2441,17 +2441,10 @@ static void gen6_enable_rps(struct drm_device *dev) dev_priv->max_delay << 24 | dev_priv->min_delay << 16); - if (IS_HASWELL(dev)) { - I915_WRITE(GEN6_RP_UP_THRESHOLD, 59400); - I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 245000); - I915_WRITE(GEN6_RP_UP_EI, 66000); - I915_WRITE(GEN6_RP_DOWN_EI, 350000); - } else { - I915_WRITE(GEN6_RP_UP_THRESHOLD, 10000); - I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 1000000); - I915_WRITE(GEN6_RP_UP_EI, 100000); - I915_WRITE(GEN6_RP_DOWN_EI, 5000000); - } + I915_WRITE(GEN6_RP_UP_THRESHOLD, 59400); + I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 245000); + I915_WRITE(GEN6_RP_UP_EI, 66000); + I915_WRITE(GEN6_RP_DOWN_EI, 350000); I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 10); I915_WRITE(GEN6_RP_CONTROL, @@ -3679,6 +3672,9 @@ static void gen3_init_clock_gating(struct drm_device *dev) if (IS_PINEVIEW(dev)) I915_WRITE(ECOSKPD, _MASKED_BIT_ENABLE(ECO_GATING_CX_ONLY)); + + /* IIR "flip pending" means done if this bit is set */ + I915_WRITE(ECOSKPD, _MASKED_BIT_DISABLE(ECO_FLIP_DONE)); } static void i85x_init_clock_gating(struct drm_device *dev) @@ -3963,6 +3959,7 @@ static void __gen6_gt_force_wake_get(struct drm_i915_private *dev_priv) DRM_ERROR("Force wake wait timed out\n"); I915_WRITE_NOTRACE(FORCEWAKE, 1); + POSTING_READ(FORCEWAKE); if (wait_for_atomic_us((I915_READ_NOTRACE(forcewake_ack) & 1), 500)) DRM_ERROR("Force wake wait timed out\n"); @@ -3983,6 +3980,7 @@ static void __gen6_gt_force_wake_mt_get(struct drm_i915_private *dev_priv) DRM_ERROR("Force wake wait timed out\n"); I915_WRITE_NOTRACE(FORCEWAKE_MT, _MASKED_BIT_ENABLE(1)); + POSTING_READ(FORCEWAKE_MT); if (wait_for_atomic_us((I915_READ_NOTRACE(forcewake_ack) & 1), 500)) DRM_ERROR("Force wake wait timed out\n"); @@ -4018,14 +4016,14 @@ void gen6_gt_check_fifodbg(struct drm_i915_private *dev_priv) static void __gen6_gt_force_wake_put(struct drm_i915_private *dev_priv) { I915_WRITE_NOTRACE(FORCEWAKE, 0); - /* The below doubles as a POSTING_READ */ + POSTING_READ(FORCEWAKE); gen6_gt_check_fifodbg(dev_priv); } static void __gen6_gt_force_wake_mt_put(struct drm_i915_private *dev_priv) { I915_WRITE_NOTRACE(FORCEWAKE_MT, _MASKED_BIT_DISABLE(1)); - /* The below doubles as a POSTING_READ */ + POSTING_READ(FORCEWAKE_MT); gen6_gt_check_fifodbg(dev_priv); } diff --git a/trunk/drivers/gpu/drm/i915/intel_ringbuffer.c b/trunk/drivers/gpu/drm/i915/intel_ringbuffer.c index bf0195a96d53..e2a73b38abe9 100644 --- a/trunk/drivers/gpu/drm/i915/intel_ringbuffer.c +++ b/trunk/drivers/gpu/drm/i915/intel_ringbuffer.c @@ -227,31 +227,36 @@ gen6_render_ring_flush(struct intel_ring_buffer *ring, * number of bits based on the write domains has little performance * impact. */ - flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH; - flags |= PIPE_CONTROL_TLB_INVALIDATE; - flags |= PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE; - flags |= PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE; - flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH; - flags |= PIPE_CONTROL_VF_CACHE_INVALIDATE; - flags |= PIPE_CONTROL_CONST_CACHE_INVALIDATE; - flags |= PIPE_CONTROL_STATE_CACHE_INVALIDATE; - /* - * Ensure that any following seqno writes only happen when the render - * cache is indeed flushed (but only if the caller actually wants that). - */ - if (flush_domains) + if (flush_domains) { + flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH; + flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH; + /* + * Ensure that any following seqno writes only happen + * when the render cache is indeed flushed. + */ flags |= PIPE_CONTROL_CS_STALL; + } + if (invalidate_domains) { + flags |= PIPE_CONTROL_TLB_INVALIDATE; + flags |= PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE; + flags |= PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE; + flags |= PIPE_CONTROL_VF_CACHE_INVALIDATE; + flags |= PIPE_CONTROL_CONST_CACHE_INVALIDATE; + flags |= PIPE_CONTROL_STATE_CACHE_INVALIDATE; + /* + * TLB invalidate requires a post-sync write. + */ + flags |= PIPE_CONTROL_QW_WRITE; + } - ret = intel_ring_begin(ring, 6); + ret = intel_ring_begin(ring, 4); if (ret) return ret; - intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(5)); + intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(4)); intel_ring_emit(ring, flags); intel_ring_emit(ring, scratch_addr | PIPE_CONTROL_GLOBAL_GTT); - intel_ring_emit(ring, 0); /* lower dword */ - intel_ring_emit(ring, 0); /* uppwer dword */ - intel_ring_emit(ring, MI_NOOP); + intel_ring_emit(ring, 0); intel_ring_advance(ring); return 0; @@ -289,8 +294,6 @@ static int init_ring_common(struct intel_ring_buffer *ring) I915_WRITE_HEAD(ring, 0); ring->write_tail(ring, 0); - /* Initialize the ring. */ - I915_WRITE_START(ring, obj->gtt_offset); head = I915_READ_HEAD(ring) & HEAD_ADDR; /* G45 ring initialization fails to reset head to zero */ @@ -316,6 +319,11 @@ static int init_ring_common(struct intel_ring_buffer *ring) } } + /* Initialize the ring. This must happen _after_ we've cleared the ring + * registers with the above sequence (the readback of the HEAD registers + * also enforces ordering), otherwise the hw might lose the new ring + * register values. */ + I915_WRITE_START(ring, obj->gtt_offset); I915_WRITE_CTL(ring, ((ring->size - PAGE_SIZE) & RING_NR_PAGES) | RING_VALID); diff --git a/trunk/drivers/gpu/drm/i915/intel_sdvo.c b/trunk/drivers/gpu/drm/i915/intel_sdvo.c index 26a6a4d0d078..123afd357611 100644 --- a/trunk/drivers/gpu/drm/i915/intel_sdvo.c +++ b/trunk/drivers/gpu/drm/i915/intel_sdvo.c @@ -444,13 +444,16 @@ static bool intel_sdvo_write_cmd(struct intel_sdvo *intel_sdvo, u8 cmd, struct i2c_msg *msgs; int i, ret = true; + /* Would be simpler to allocate both in one go ? */ buf = (u8 *)kzalloc(args_len * 2 + 2, GFP_KERNEL); if (!buf) return false; msgs = kcalloc(args_len + 3, sizeof(*msgs), GFP_KERNEL); - if (!msgs) + if (!msgs) { + kfree(buf); return false; + } intel_sdvo_debug_write(intel_sdvo, cmd, args, args_len); @@ -1689,6 +1692,7 @@ static bool intel_sdvo_detect_hdmi_audio(struct drm_connector *connector) edid = intel_sdvo_get_edid(connector); if (edid != NULL && edid->input & DRM_EDID_INPUT_DIGITAL) has_audio = drm_detect_monitor_audio(edid); + kfree(edid); return has_audio; } @@ -2569,7 +2573,6 @@ bool intel_sdvo_init(struct drm_device *dev, uint32_t sdvo_reg, bool is_sdvob) hotplug_mask = intel_sdvo->is_sdvob ? SDVOB_HOTPLUG_INT_STATUS_I915 : SDVOC_HOTPLUG_INT_STATUS_I915; } - dev_priv->hotplug_supported_mask |= hotplug_mask; drm_encoder_helper_add(&intel_encoder->base, &intel_sdvo_helper_funcs); @@ -2577,14 +2580,6 @@ bool intel_sdvo_init(struct drm_device *dev, uint32_t sdvo_reg, bool is_sdvob) if (!intel_sdvo_get_capabilities(intel_sdvo, &intel_sdvo->caps)) goto err; - /* Set up hotplug command - note paranoia about contents of reply. - * We assume that the hardware is in a sane state, and only touch - * the bits we think we understand. - */ - intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_ACTIVE_HOT_PLUG, - &intel_sdvo->hotplug_active, 2); - intel_sdvo->hotplug_active[0] &= ~0x3; - if (intel_sdvo_output_setup(intel_sdvo, intel_sdvo->caps.output_flags) != true) { DRM_DEBUG_KMS("SDVO output failed to setup on %s\n", @@ -2592,6 +2587,12 @@ bool intel_sdvo_init(struct drm_device *dev, uint32_t sdvo_reg, bool is_sdvob) goto err; } + /* Only enable the hotplug irq if we need it, to work around noisy + * hotplug lines. + */ + if (intel_sdvo->hotplug_active[0]) + dev_priv->hotplug_supported_mask |= hotplug_mask; + intel_sdvo_select_ddc_bus(dev_priv, intel_sdvo, sdvo_reg); /* Set the input timing to the screen. Assume always input 0. */ diff --git a/trunk/drivers/gpu/drm/i915/intel_sprite.c b/trunk/drivers/gpu/drm/i915/intel_sprite.c index cc8df4de2d92..7644f31a3778 100644 --- a/trunk/drivers/gpu/drm/i915/intel_sprite.c +++ b/trunk/drivers/gpu/drm/i915/intel_sprite.c @@ -60,11 +60,11 @@ ivb_update_plane(struct drm_plane *plane, struct drm_framebuffer *fb, switch (fb->pixel_format) { case DRM_FORMAT_XBGR8888: - sprctl |= SPRITE_FORMAT_RGBX888; + sprctl |= SPRITE_FORMAT_RGBX888 | SPRITE_RGB_ORDER_RGBX; pixel_size = 4; break; case DRM_FORMAT_XRGB8888: - sprctl |= SPRITE_FORMAT_RGBX888 | SPRITE_RGB_ORDER_RGBX; + sprctl |= SPRITE_FORMAT_RGBX888; pixel_size = 4; break; case DRM_FORMAT_YUYV: diff --git a/trunk/drivers/gpu/drm/mgag200/mgag200_drv.c b/trunk/drivers/gpu/drm/mgag200/mgag200_drv.c index ea1024d79974..e5f145d2cb3b 100644 --- a/trunk/drivers/gpu/drm/mgag200/mgag200_drv.c +++ b/trunk/drivers/gpu/drm/mgag200/mgag200_drv.c @@ -84,6 +84,9 @@ static const struct file_operations mgag200_driver_fops = { .mmap = mgag200_mmap, .poll = drm_poll, .fasync = drm_fasync, +#ifdef CONFIG_COMPAT + .compat_ioctl = drm_compat_ioctl, +#endif .read = drm_read, }; diff --git a/trunk/drivers/gpu/drm/mgag200/mgag200_mode.c b/trunk/drivers/gpu/drm/mgag200/mgag200_mode.c index a4d7c500c97b..b69642d5d850 100644 --- a/trunk/drivers/gpu/drm/mgag200/mgag200_mode.c +++ b/trunk/drivers/gpu/drm/mgag200/mgag200_mode.c @@ -468,10 +468,11 @@ static int mga_g200er_set_plls(struct mga_device *mdev, long clock) { unsigned int vcomax, vcomin, pllreffreq; unsigned int delta, tmpdelta; - unsigned int testr, testn, testm, testo; + int testr, testn, testm, testo; unsigned int p, m, n; - unsigned int computed; + unsigned int computed, vco; int tmp; + const unsigned int m_div_val[] = { 1, 2, 4, 8 }; m = n = p = 0; vcomax = 1488000; @@ -490,12 +491,13 @@ static int mga_g200er_set_plls(struct mga_device *mdev, long clock) if (delta == 0) break; for (testo = 5; testo < 33; testo++) { - computed = pllreffreq * (testn + 1) / + vco = pllreffreq * (testn + 1) / (testr + 1); - if (computed < vcomin) + if (vco < vcomin) continue; - if (computed > vcomax) + if (vco > vcomax) continue; + computed = vco / (m_div_val[testm] * (testo + 1)); if (computed > clock) tmpdelta = computed - clock; else diff --git a/trunk/drivers/gpu/drm/nouveau/nouveau_abi16.c b/trunk/drivers/gpu/drm/nouveau/nouveau_abi16.c index ff23d88880e5..3ca240b4413d 100644 --- a/trunk/drivers/gpu/drm/nouveau/nouveau_abi16.c +++ b/trunk/drivers/gpu/drm/nouveau/nouveau_abi16.c @@ -179,7 +179,7 @@ nouveau_abi16_ioctl_grobj_alloc(ABI16_IOCTL_ARGS) return 0; } else if (init->class == 0x906e) { - NV_ERROR(dev, "906e not supported yet\n"); + NV_DEBUG(dev, "906e not supported yet\n"); return -EINVAL; } diff --git a/trunk/drivers/gpu/drm/nouveau/nouveau_acpi.c b/trunk/drivers/gpu/drm/nouveau/nouveau_acpi.c index fc841e87b343..26ebffebe710 100644 --- a/trunk/drivers/gpu/drm/nouveau/nouveau_acpi.c +++ b/trunk/drivers/gpu/drm/nouveau/nouveau_acpi.c @@ -211,11 +211,6 @@ static int nouveau_dsm_power_state(enum vga_switcheroo_client_id id, return nouveau_dsm_set_discrete_state(nouveau_dsm_priv.dhandle, state); } -static int nouveau_dsm_init(void) -{ - return 0; -} - static int nouveau_dsm_get_client_id(struct pci_dev *pdev) { /* easy option one - intel vendor ID means Integrated */ @@ -232,7 +227,6 @@ static int nouveau_dsm_get_client_id(struct pci_dev *pdev) static struct vga_switcheroo_handler nouveau_dsm_handler = { .switchto = nouveau_dsm_switchto, .power_state = nouveau_dsm_power_state, - .init = nouveau_dsm_init, .get_client_id = nouveau_dsm_get_client_id, }; diff --git a/trunk/drivers/gpu/drm/nouveau/nouveau_display.c b/trunk/drivers/gpu/drm/nouveau/nouveau_display.c index 69688ef5cf46..7e16dc5e6467 100644 --- a/trunk/drivers/gpu/drm/nouveau/nouveau_display.c +++ b/trunk/drivers/gpu/drm/nouveau/nouveau_display.c @@ -598,7 +598,7 @@ nouveau_display_dumb_create(struct drm_file *file_priv, struct drm_device *dev, args->size = args->pitch * args->height; args->size = roundup(args->size, PAGE_SIZE); - ret = nouveau_gem_new(dev, args->size, 0, TTM_PL_FLAG_VRAM, 0, 0, &bo); + ret = nouveau_gem_new(dev, args->size, 0, NOUVEAU_GEM_DOMAIN_VRAM, 0, 0, &bo); if (ret) return ret; diff --git a/trunk/drivers/gpu/drm/nouveau/nouveau_i2c.c b/trunk/drivers/gpu/drm/nouveau/nouveau_i2c.c index 77e564667b5c..240cf962c999 100644 --- a/trunk/drivers/gpu/drm/nouveau/nouveau_i2c.c +++ b/trunk/drivers/gpu/drm/nouveau/nouveau_i2c.c @@ -229,7 +229,7 @@ nouveau_i2c_init(struct drm_device *dev) } break; case 6: /* NV50- DP AUX */ - port->drive = entry[0]; + port->drive = entry[0] & 0x0f; port->sense = port->drive; port->adapter.algo = &nouveau_dp_i2c_algo; break; diff --git a/trunk/drivers/gpu/drm/nouveau/nouveau_state.c b/trunk/drivers/gpu/drm/nouveau/nouveau_state.c index 1cdfd6e757ce..c61014442aa9 100644 --- a/trunk/drivers/gpu/drm/nouveau/nouveau_state.c +++ b/trunk/drivers/gpu/drm/nouveau/nouveau_state.c @@ -731,15 +731,16 @@ nouveau_card_init(struct drm_device *dev) case 0xa3: case 0xa5: case 0xa8: - case 0xaf: nva3_copy_create(dev); break; } break; case NV_C0: - nvc0_copy_create(dev, 1); + if (!(nv_rd32(dev, 0x022500) & 0x00000200)) + nvc0_copy_create(dev, 1); case NV_D0: - nvc0_copy_create(dev, 0); + if (!(nv_rd32(dev, 0x022500) & 0x00000100)) + nvc0_copy_create(dev, 0); break; default: break; diff --git a/trunk/drivers/gpu/drm/nouveau/nv50_gpio.c b/trunk/drivers/gpu/drm/nouveau/nv50_gpio.c index f429e6a8ca7a..c399d510b27a 100644 --- a/trunk/drivers/gpu/drm/nouveau/nv50_gpio.c +++ b/trunk/drivers/gpu/drm/nouveau/nv50_gpio.c @@ -22,6 +22,7 @@ * Authors: Ben Skeggs */ +#include #include "drmP.h" #include "nouveau_drv.h" #include "nouveau_hw.h" @@ -110,11 +111,26 @@ nv50_gpio_isr(struct drm_device *dev) nv_wr32(dev, 0xe074, intr1); } +static struct dmi_system_id gpio_reset_ids[] = { + { + .ident = "Apple Macbook 10,1", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."), + DMI_MATCH(DMI_PRODUCT_NAME, "MacBookPro10,1"), + } + }, + { } +}; + int nv50_gpio_init(struct drm_device *dev) { struct drm_nouveau_private *dev_priv = dev->dev_private; + /* initialise gpios and routing to vbios defaults */ + if (dmi_check_system(gpio_reset_ids)) + nouveau_gpio_reset(dev); + /* disable, and ack any pending gpio interrupts */ nv_wr32(dev, 0xe050, 0x00000000); nv_wr32(dev, 0xe054, 0xffffffff); diff --git a/trunk/drivers/gpu/drm/nouveau/nv84_fifo.c b/trunk/drivers/gpu/drm/nouveau/nv84_fifo.c index cc82d799fc3b..c564c5e4c30a 100644 --- a/trunk/drivers/gpu/drm/nouveau/nv84_fifo.c +++ b/trunk/drivers/gpu/drm/nouveau/nv84_fifo.c @@ -117,17 +117,22 @@ nv84_fifo_context_del(struct nouveau_channel *chan, int engine) struct drm_device *dev = chan->dev; struct drm_nouveau_private *dev_priv = dev->dev_private; unsigned long flags; + u32 save; /* remove channel from playlist, will context switch if active */ spin_lock_irqsave(&dev_priv->context_switch_lock, flags); nv_mask(dev, 0x002600 + (chan->id * 4), 0x80000000, 0x00000000); nv50_fifo_playlist_update(dev); + save = nv_mask(dev, 0x002520, 0x0000003f, 0x15); + /* tell any engines on this channel to unload their contexts */ nv_wr32(dev, 0x0032fc, chan->ramin->vinst >> 12); if (!nv_wait_ne(dev, 0x0032fc, 0xffffffff, 0xffffffff)) NV_INFO(dev, "PFIFO: channel %d unload timeout\n", chan->id); + nv_wr32(dev, 0x002520, save); + nv_wr32(dev, 0x002600 + (chan->id * 4), 0x00000000); spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags); @@ -184,10 +189,13 @@ nv84_fifo_fini(struct drm_device *dev, int engine, bool suspend) struct drm_nouveau_private *dev_priv = dev->dev_private; struct nv84_fifo_priv *priv = nv_engine(dev, engine); int i; + u32 save; /* set playlist length to zero, fifo will unload context */ nv_wr32(dev, 0x0032ec, 0); + save = nv_mask(dev, 0x002520, 0x0000003f, 0x15); + /* tell all connected engines to unload their contexts */ for (i = 0; i < priv->base.channels; i++) { struct nouveau_channel *chan = dev_priv->channels.ptr[i]; @@ -199,6 +207,7 @@ nv84_fifo_fini(struct drm_device *dev, int engine, bool suspend) } } + nv_wr32(dev, 0x002520, save); nv_wr32(dev, 0x002140, 0); return 0; } diff --git a/trunk/drivers/gpu/drm/nouveau/nvc0_fb.c b/trunk/drivers/gpu/drm/nouveau/nvc0_fb.c index f704e942372e..f376c39310df 100644 --- a/trunk/drivers/gpu/drm/nouveau/nvc0_fb.c +++ b/trunk/drivers/gpu/drm/nouveau/nvc0_fb.c @@ -124,6 +124,7 @@ nvc0_fb_init(struct drm_device *dev) priv = dev_priv->engine.fb.priv; nv_wr32(dev, 0x100c10, priv->r100c10 >> 8); + nv_mask(dev, 0x17e820, 0x00100000, 0x00000000); /* NV_PLTCG_INTR_EN */ return 0; } diff --git a/trunk/drivers/gpu/drm/nouveau/nvc0_fifo.c b/trunk/drivers/gpu/drm/nouveau/nvc0_fifo.c index 7d85553d518c..cd39eb99f5b1 100644 --- a/trunk/drivers/gpu/drm/nouveau/nvc0_fifo.c +++ b/trunk/drivers/gpu/drm/nouveau/nvc0_fifo.c @@ -373,7 +373,8 @@ nvc0_fifo_isr_subfifo_intr(struct drm_device *dev, int unit) static void nvc0_fifo_isr(struct drm_device *dev) { - u32 stat = nv_rd32(dev, 0x002100); + u32 mask = nv_rd32(dev, 0x002140); + u32 stat = nv_rd32(dev, 0x002100) & mask; if (stat & 0x00000100) { NV_INFO(dev, "PFIFO: unknown status 0x00000100\n"); diff --git a/trunk/drivers/gpu/drm/nouveau/nvc0_pm.c b/trunk/drivers/gpu/drm/nouveau/nvc0_pm.c index 7c95c44e2887..4e712b10ebdb 100644 --- a/trunk/drivers/gpu/drm/nouveau/nvc0_pm.c +++ b/trunk/drivers/gpu/drm/nouveau/nvc0_pm.c @@ -557,7 +557,7 @@ prog_mem(struct drm_device *dev, struct nvc0_pm_state *info) nouveau_mem_exec(&exec, info->perflvl); if (dev_priv->chipset < 0xd0) - nv_wr32(dev, 0x611200, 0x00003300); + nv_wr32(dev, 0x611200, 0x00003330); else nv_wr32(dev, 0x62c000, 0x03030300); } diff --git a/trunk/drivers/gpu/drm/nouveau/nvd0_display.c b/trunk/drivers/gpu/drm/nouveau/nvd0_display.c index d0d60e1e7f95..8a2fc89b7763 100644 --- a/trunk/drivers/gpu/drm/nouveau/nvd0_display.c +++ b/trunk/drivers/gpu/drm/nouveau/nvd0_display.c @@ -790,7 +790,7 @@ nvd0_crtc_cursor_move(struct drm_crtc *crtc, int x, int y) struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc); int ch = EVO_CURS(nv_crtc->index); - evo_piow(crtc->dev, ch, 0x0084, (y << 16) | x); + evo_piow(crtc->dev, ch, 0x0084, (y << 16) | (x & 0xffff)); evo_piow(crtc->dev, ch, 0x0080, 0x00000000); return 0; } @@ -1510,10 +1510,10 @@ nvd0_sor_mode_set(struct drm_encoder *encoder, struct drm_display_mode *umode, case OUTPUT_DP: if (nv_connector->base.display_info.bpc == 6) { nv_encoder->dp.datarate = mode->clock * 18 / 8; - syncs |= 0x00000140; + syncs |= 0x00000002 << 6; } else { nv_encoder->dp.datarate = mode->clock * 24 / 8; - syncs |= 0x00000180; + syncs |= 0x00000005 << 6; } if (nv_encoder->dcb->sorconf.link & 1) diff --git a/trunk/drivers/gpu/drm/nouveau/nve0_fifo.c b/trunk/drivers/gpu/drm/nouveau/nve0_fifo.c index 1855ecbd843b..281bece751b6 100644 --- a/trunk/drivers/gpu/drm/nouveau/nve0_fifo.c +++ b/trunk/drivers/gpu/drm/nouveau/nve0_fifo.c @@ -294,6 +294,25 @@ nve0_fifo_isr_vm_fault(struct drm_device *dev, int unit) printk(" on channel 0x%010llx\n", (u64)inst << 12); } +static int +nve0_fifo_page_flip(struct drm_device *dev, u32 chid) +{ + struct nve0_fifo_priv *priv = nv_engine(dev, NVOBJ_ENGINE_FIFO); + struct drm_nouveau_private *dev_priv = dev->dev_private; + struct nouveau_channel *chan = NULL; + unsigned long flags; + int ret = -EINVAL; + + spin_lock_irqsave(&dev_priv->channels.lock, flags); + if (likely(chid >= 0 && chid < priv->base.channels)) { + chan = dev_priv->channels.ptr[chid]; + if (likely(chan)) + ret = nouveau_finish_page_flip(chan, NULL); + } + spin_unlock_irqrestore(&dev_priv->channels.lock, flags); + return ret; +} + static void nve0_fifo_isr_subfifo_intr(struct drm_device *dev, int unit) { @@ -303,11 +322,21 @@ nve0_fifo_isr_subfifo_intr(struct drm_device *dev, int unit) u32 chid = nv_rd32(dev, 0x040120 + (unit * 0x2000)) & 0x7f; u32 subc = (addr & 0x00070000); u32 mthd = (addr & 0x00003ffc); + u32 show = stat; + + if (stat & 0x00200000) { + if (mthd == 0x0054) { + if (!nve0_fifo_page_flip(dev, chid)) + show &= ~0x00200000; + } + } - NV_INFO(dev, "PSUBFIFO %d:", unit); - nouveau_bitfield_print(nve0_fifo_subfifo_intr, stat); - NV_INFO(dev, "PSUBFIFO %d: ch %d subc %d mthd 0x%04x data 0x%08x\n", - unit, chid, subc, mthd, data); + if (show) { + NV_INFO(dev, "PFIFO%d:", unit); + nouveau_bitfield_print(nve0_fifo_subfifo_intr, show); + NV_INFO(dev, "PFIFO%d: ch %d subc %d mthd 0x%04x data 0x%08x\n", + unit, chid, subc, mthd, data); + } nv_wr32(dev, 0x0400c0 + (unit * 0x2000), 0x80600008); nv_wr32(dev, 0x040108 + (unit * 0x2000), stat); @@ -316,7 +345,8 @@ nve0_fifo_isr_subfifo_intr(struct drm_device *dev, int unit) static void nve0_fifo_isr(struct drm_device *dev) { - u32 stat = nv_rd32(dev, 0x002100); + u32 mask = nv_rd32(dev, 0x002140); + u32 stat = nv_rd32(dev, 0x002100) & mask; if (stat & 0x00000100) { NV_INFO(dev, "PFIFO: unknown status 0x00000100\n"); diff --git a/trunk/drivers/gpu/drm/radeon/atombios_crtc.c b/trunk/drivers/gpu/drm/radeon/atombios_crtc.c index 9e6f76fec527..2817101fb167 100644 --- a/trunk/drivers/gpu/drm/radeon/atombios_crtc.c +++ b/trunk/drivers/gpu/drm/radeon/atombios_crtc.c @@ -258,8 +258,7 @@ void atombios_crtc_dpms(struct drm_crtc *crtc, int mode) radeon_crtc->enabled = true; /* adjust pm to dpms changes BEFORE enabling crtcs */ radeon_pm_compute_clocks(rdev); - /* disable crtc pair power gating before programming */ - if (ASIC_IS_DCE6(rdev)) + if (ASIC_IS_DCE6(rdev) && !radeon_crtc->in_mode_set) atombios_powergate_crtc(crtc, ATOM_DISABLE); atombios_enable_crtc(crtc, ATOM_ENABLE); if (ASIC_IS_DCE3(rdev) && !ASIC_IS_DCE6(rdev)) @@ -278,25 +277,8 @@ void atombios_crtc_dpms(struct drm_crtc *crtc, int mode) atombios_enable_crtc_memreq(crtc, ATOM_DISABLE); atombios_enable_crtc(crtc, ATOM_DISABLE); radeon_crtc->enabled = false; - /* power gating is per-pair */ - if (ASIC_IS_DCE6(rdev)) { - struct drm_crtc *other_crtc; - struct radeon_crtc *other_radeon_crtc; - list_for_each_entry(other_crtc, &rdev->ddev->mode_config.crtc_list, head) { - other_radeon_crtc = to_radeon_crtc(other_crtc); - if (((radeon_crtc->crtc_id == 0) && (other_radeon_crtc->crtc_id == 1)) || - ((radeon_crtc->crtc_id == 1) && (other_radeon_crtc->crtc_id == 0)) || - ((radeon_crtc->crtc_id == 2) && (other_radeon_crtc->crtc_id == 3)) || - ((radeon_crtc->crtc_id == 3) && (other_radeon_crtc->crtc_id == 2)) || - ((radeon_crtc->crtc_id == 4) && (other_radeon_crtc->crtc_id == 5)) || - ((radeon_crtc->crtc_id == 5) && (other_radeon_crtc->crtc_id == 4))) { - /* if both crtcs in the pair are off, enable power gating */ - if (other_radeon_crtc->enabled == false) - atombios_powergate_crtc(crtc, ATOM_ENABLE); - break; - } - } - } + if (ASIC_IS_DCE6(rdev) && !radeon_crtc->in_mode_set) + atombios_powergate_crtc(crtc, ATOM_ENABLE); /* adjust pm to dpms changes AFTER disabling crtcs */ radeon_pm_compute_clocks(rdev); break; @@ -444,11 +426,28 @@ union atom_enable_ss { static void atombios_crtc_program_ss(struct radeon_device *rdev, int enable, int pll_id, + int crtc_id, struct radeon_atom_ss *ss) { + unsigned i; int index = GetIndexIntoMasterTable(COMMAND, EnableSpreadSpectrumOnPPLL); union atom_enable_ss args; + if (!enable) { + for (i = 0; i < rdev->num_crtc; i++) { + if (rdev->mode_info.crtcs[i] && + rdev->mode_info.crtcs[i]->enabled && + i != crtc_id && + pll_id == rdev->mode_info.crtcs[i]->pll_id) { + /* one other crtc is using this pll don't turn + * off spread spectrum as it might turn off + * display on active crtc + */ + return; + } + } + } + memset(&args, 0, sizeof(args)); if (ASIC_IS_DCE5(rdev)) { @@ -1028,7 +1027,7 @@ static void atombios_crtc_set_pll(struct drm_crtc *crtc, struct drm_display_mode radeon_compute_pll_legacy(pll, adjusted_clock, &pll_clock, &fb_div, &frac_fb_div, &ref_div, &post_div); - atombios_crtc_program_ss(rdev, ATOM_DISABLE, radeon_crtc->pll_id, &ss); + atombios_crtc_program_ss(rdev, ATOM_DISABLE, radeon_crtc->pll_id, radeon_crtc->crtc_id, &ss); atombios_crtc_program_pll(crtc, radeon_crtc->crtc_id, radeon_crtc->pll_id, encoder_mode, radeon_encoder->encoder_id, mode->clock, @@ -1051,7 +1050,7 @@ static void atombios_crtc_set_pll(struct drm_crtc *crtc, struct drm_display_mode ss.step = step_size; } - atombios_crtc_program_ss(rdev, ATOM_ENABLE, radeon_crtc->pll_id, &ss); + atombios_crtc_program_ss(rdev, ATOM_ENABLE, radeon_crtc->pll_id, radeon_crtc->crtc_id, &ss); } } @@ -1531,12 +1530,12 @@ static int radeon_atom_pick_pll(struct drm_crtc *crtc) * crtc virtual pixel clock. */ if (ENCODER_MODE_IS_DP(atombios_get_encoder_mode(test_encoder))) { - if (ASIC_IS_DCE5(rdev)) - return ATOM_DCPLL; + if (rdev->clock.dp_extclk) + return ATOM_PPLL_INVALID; else if (ASIC_IS_DCE6(rdev)) return ATOM_PPLL0; - else if (rdev->clock.dp_extclk) - return ATOM_PPLL_INVALID; + else if (ASIC_IS_DCE5(rdev)) + return ATOM_DCPLL; } } } @@ -1572,11 +1571,11 @@ void radeon_atom_disp_eng_pll_init(struct radeon_device *rdev) ASIC_INTERNAL_SS_ON_DCPLL, rdev->clock.default_dispclk); if (ss_enabled) - atombios_crtc_program_ss(rdev, ATOM_DISABLE, ATOM_DCPLL, &ss); + atombios_crtc_program_ss(rdev, ATOM_DISABLE, ATOM_DCPLL, -1, &ss); /* XXX: DCE5, make sure voltage, dispclk is high enough */ atombios_crtc_set_disp_eng_pll(rdev, rdev->clock.default_dispclk); if (ss_enabled) - atombios_crtc_program_ss(rdev, ATOM_ENABLE, ATOM_DCPLL, &ss); + atombios_crtc_program_ss(rdev, ATOM_ENABLE, ATOM_DCPLL, -1, &ss); } } @@ -1635,18 +1634,28 @@ static bool atombios_crtc_mode_fixup(struct drm_crtc *crtc, static void atombios_crtc_prepare(struct drm_crtc *crtc) { struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); + struct drm_device *dev = crtc->dev; + struct radeon_device *rdev = dev->dev_private; + radeon_crtc->in_mode_set = true; /* pick pll */ radeon_crtc->pll_id = radeon_atom_pick_pll(crtc); + /* disable crtc pair power gating before programming */ + if (ASIC_IS_DCE6(rdev)) + atombios_powergate_crtc(crtc, ATOM_DISABLE); + atombios_lock_crtc(crtc, ATOM_ENABLE); atombios_crtc_dpms(crtc, DRM_MODE_DPMS_OFF); } static void atombios_crtc_commit(struct drm_crtc *crtc) { + struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); + atombios_crtc_dpms(crtc, DRM_MODE_DPMS_ON); atombios_lock_crtc(crtc, ATOM_DISABLE); + radeon_crtc->in_mode_set = false; } static void atombios_crtc_disable(struct drm_crtc *crtc) @@ -1655,9 +1664,22 @@ static void atombios_crtc_disable(struct drm_crtc *crtc) struct drm_device *dev = crtc->dev; struct radeon_device *rdev = dev->dev_private; struct radeon_atom_ss ss; + int i; atombios_crtc_dpms(crtc, DRM_MODE_DPMS_OFF); + for (i = 0; i < rdev->num_crtc; i++) { + if (rdev->mode_info.crtcs[i] && + rdev->mode_info.crtcs[i]->enabled && + i != radeon_crtc->crtc_id && + radeon_crtc->pll_id == rdev->mode_info.crtcs[i]->pll_id) { + /* one other crtc is using this pll don't turn + * off the pll + */ + goto done; + } + } + switch (radeon_crtc->pll_id) { case ATOM_PPLL1: case ATOM_PPLL2: @@ -1674,6 +1696,7 @@ static void atombios_crtc_disable(struct drm_crtc *crtc) default: break; } +done: radeon_crtc->pll_id = -1; } diff --git a/trunk/drivers/gpu/drm/radeon/atombios_dp.c b/trunk/drivers/gpu/drm/radeon/atombios_dp.c index 7712cf5ab33b..3623b98ed3fe 100644 --- a/trunk/drivers/gpu/drm/radeon/atombios_dp.c +++ b/trunk/drivers/gpu/drm/radeon/atombios_dp.c @@ -577,30 +577,25 @@ int radeon_dp_get_panel_mode(struct drm_encoder *encoder, struct radeon_device *rdev = dev->dev_private; struct radeon_connector *radeon_connector = to_radeon_connector(connector); int panel_mode = DP_PANEL_MODE_EXTERNAL_DP_MODE; + u16 dp_bridge = radeon_connector_encoder_get_dp_bridge_encoder_id(connector); + u8 tmp; if (!ASIC_IS_DCE4(rdev)) return panel_mode; - if (radeon_connector_encoder_get_dp_bridge_encoder_id(connector) == - ENCODER_OBJECT_ID_NUTMEG) - panel_mode = DP_PANEL_MODE_INTERNAL_DP1_MODE; - else if (radeon_connector_encoder_get_dp_bridge_encoder_id(connector) == - ENCODER_OBJECT_ID_TRAVIS) { - u8 id[6]; - int i; - for (i = 0; i < 6; i++) - id[i] = radeon_read_dpcd_reg(radeon_connector, 0x503 + i); - if (id[0] == 0x73 && - id[1] == 0x69 && - id[2] == 0x76 && - id[3] == 0x61 && - id[4] == 0x72 && - id[5] == 0x54) + if (dp_bridge != ENCODER_OBJECT_ID_NONE) { + /* DP bridge chips */ + tmp = radeon_read_dpcd_reg(radeon_connector, DP_EDP_CONFIGURATION_CAP); + if (tmp & 1) + panel_mode = DP_PANEL_MODE_INTERNAL_DP2_MODE; + else if ((dp_bridge == ENCODER_OBJECT_ID_NUTMEG) || + (dp_bridge == ENCODER_OBJECT_ID_TRAVIS)) panel_mode = DP_PANEL_MODE_INTERNAL_DP1_MODE; else - panel_mode = DP_PANEL_MODE_INTERNAL_DP2_MODE; + panel_mode = DP_PANEL_MODE_EXTERNAL_DP_MODE; } else if (connector->connector_type == DRM_MODE_CONNECTOR_eDP) { - u8 tmp = radeon_read_dpcd_reg(radeon_connector, DP_EDP_CONFIGURATION_CAP); + /* eDP */ + tmp = radeon_read_dpcd_reg(radeon_connector, DP_EDP_CONFIGURATION_CAP); if (tmp & 1) panel_mode = DP_PANEL_MODE_INTERNAL_DP2_MODE; } diff --git a/trunk/drivers/gpu/drm/radeon/atombios_encoders.c b/trunk/drivers/gpu/drm/radeon/atombios_encoders.c index f9bc27fe269a..6e8803a1170c 100644 --- a/trunk/drivers/gpu/drm/radeon/atombios_encoders.c +++ b/trunk/drivers/gpu/drm/radeon/atombios_encoders.c @@ -1379,6 +1379,8 @@ radeon_atom_encoder_dpms_dig(struct drm_encoder *encoder, int mode) struct drm_device *dev = encoder->dev; struct radeon_device *rdev = dev->dev_private; struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); + struct drm_encoder *ext_encoder = radeon_get_external_encoder(encoder); + struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv; struct drm_connector *connector = radeon_get_connector_for_encoder(encoder); struct radeon_connector *radeon_connector = NULL; struct radeon_connector_atom_dig *radeon_dig_connector = NULL; @@ -1390,19 +1392,37 @@ radeon_atom_encoder_dpms_dig(struct drm_encoder *encoder, int mode) switch (mode) { case DRM_MODE_DPMS_ON: - /* some early dce3.2 boards have a bug in their transmitter control table */ - if ((rdev->family == CHIP_RV710) || (rdev->family == CHIP_RV730) || - ASIC_IS_DCE41(rdev) || ASIC_IS_DCE5(rdev)) { - if (ASIC_IS_DCE6(rdev)) { - /* It seems we need to call ATOM_ENCODER_CMD_SETUP again - * before reenabling encoder on DPMS ON, otherwise we never - * get picture - */ - atombios_dig_encoder_setup(encoder, ATOM_ENCODER_CMD_SETUP, 0); + if (ASIC_IS_DCE41(rdev) || ASIC_IS_DCE5(rdev)) { + if (!connector) + dig->panel_mode = DP_PANEL_MODE_EXTERNAL_DP_MODE; + else + dig->panel_mode = radeon_dp_get_panel_mode(encoder, connector); + + /* setup and enable the encoder */ + atombios_dig_encoder_setup(encoder, ATOM_ENCODER_CMD_SETUP, 0); + atombios_dig_encoder_setup(encoder, + ATOM_ENCODER_CMD_SETUP_PANEL_MODE, + dig->panel_mode); + if (ext_encoder) { + if (ASIC_IS_DCE41(rdev) || ASIC_IS_DCE61(rdev)) + atombios_external_encoder_setup(encoder, ext_encoder, + EXTERNAL_ENCODER_ACTION_V3_ENCODER_SETUP); } atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE, 0, 0); - } else { + } else if (ASIC_IS_DCE4(rdev)) { + /* setup and enable the encoder */ + atombios_dig_encoder_setup(encoder, ATOM_ENCODER_CMD_SETUP, 0); + /* enable the transmitter */ + atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE, 0, 0); atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE_OUTPUT, 0, 0); + } else { + /* setup and enable the encoder and transmitter */ + atombios_dig_encoder_setup(encoder, ATOM_ENABLE, 0); + atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_SETUP, 0, 0); + atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE, 0, 0); + /* some early dce3.2 boards have a bug in their transmitter control table */ + if ((rdev->family != CHIP_RV710) || (rdev->family != CHIP_RV730)) + atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE_OUTPUT, 0, 0); } if (ENCODER_MODE_IS_DP(atombios_get_encoder_mode(encoder)) && connector) { if (connector->connector_type == DRM_MODE_CONNECTOR_eDP) { @@ -1420,10 +1440,19 @@ radeon_atom_encoder_dpms_dig(struct drm_encoder *encoder, int mode) case DRM_MODE_DPMS_STANDBY: case DRM_MODE_DPMS_SUSPEND: case DRM_MODE_DPMS_OFF: - if (ASIC_IS_DCE41(rdev) || ASIC_IS_DCE5(rdev)) + if (ASIC_IS_DCE41(rdev) || ASIC_IS_DCE5(rdev)) { + /* disable the transmitter */ atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_DISABLE, 0, 0); - else + } else if (ASIC_IS_DCE4(rdev)) { + /* disable the transmitter */ + atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_DISABLE_OUTPUT, 0, 0); + atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_DISABLE, 0, 0); + } else { + /* disable the encoder and transmitter */ atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_DISABLE_OUTPUT, 0, 0); + atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_DISABLE, 0, 0); + atombios_dig_encoder_setup(encoder, ATOM_DISABLE, 0); + } if (ENCODER_MODE_IS_DP(atombios_get_encoder_mode(encoder)) && connector) { if (ASIC_IS_DCE4(rdev)) atombios_dig_encoder_setup(encoder, ATOM_ENCODER_CMD_DP_VIDEO_OFF, 0); @@ -1740,13 +1769,34 @@ static int radeon_atom_pick_dig_encoder(struct drm_encoder *encoder) struct radeon_crtc *radeon_crtc = to_radeon_crtc(encoder->crtc); struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); struct drm_encoder *test_encoder; - struct radeon_encoder_atom_dig *dig; + struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv; uint32_t dig_enc_in_use = 0; - /* DCE4/5 */ - if (ASIC_IS_DCE4(rdev)) { - dig = radeon_encoder->enc_priv; - if (ASIC_IS_DCE41(rdev)) { + if (ASIC_IS_DCE6(rdev)) { + /* DCE6 */ + switch (radeon_encoder->encoder_id) { + case ENCODER_OBJECT_ID_INTERNAL_UNIPHY: + if (dig->linkb) + return 1; + else + return 0; + break; + case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1: + if (dig->linkb) + return 3; + else + return 2; + break; + case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2: + if (dig->linkb) + return 5; + else + return 4; + break; + } + } else if (ASIC_IS_DCE4(rdev)) { + /* DCE4/5 */ + if (ASIC_IS_DCE41(rdev) && !ASIC_IS_DCE61(rdev)) { /* ontario follows DCE4 */ if (rdev->family == CHIP_PALM) { if (dig->linkb) @@ -1848,10 +1898,12 @@ radeon_atom_encoder_mode_set(struct drm_encoder *encoder, struct drm_device *dev = encoder->dev; struct radeon_device *rdev = dev->dev_private; struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); - struct drm_encoder *ext_encoder = radeon_get_external_encoder(encoder); radeon_encoder->pixel_clock = adjusted_mode->clock; + /* need to call this here rather than in prepare() since we need some crtc info */ + radeon_atom_encoder_dpms(encoder, DRM_MODE_DPMS_OFF); + if (ASIC_IS_AVIVO(rdev) && !ASIC_IS_DCE4(rdev)) { if (radeon_encoder->active_device & (ATOM_DEVICE_CV_SUPPORT | ATOM_DEVICE_TV_SUPPORT)) atombios_yuv_setup(encoder, true); @@ -1870,38 +1922,7 @@ radeon_atom_encoder_mode_set(struct drm_encoder *encoder, case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1: case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2: case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA: - if (ASIC_IS_DCE41(rdev) || ASIC_IS_DCE5(rdev)) { - struct drm_connector *connector = radeon_get_connector_for_encoder(encoder); - struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv; - - if (!connector) - dig->panel_mode = DP_PANEL_MODE_EXTERNAL_DP_MODE; - else - dig->panel_mode = radeon_dp_get_panel_mode(encoder, connector); - - /* setup and enable the encoder */ - atombios_dig_encoder_setup(encoder, ATOM_ENCODER_CMD_SETUP, 0); - atombios_dig_encoder_setup(encoder, - ATOM_ENCODER_CMD_SETUP_PANEL_MODE, - dig->panel_mode); - } else if (ASIC_IS_DCE4(rdev)) { - /* disable the transmitter */ - atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_DISABLE, 0, 0); - /* setup and enable the encoder */ - atombios_dig_encoder_setup(encoder, ATOM_ENCODER_CMD_SETUP, 0); - - /* enable the transmitter */ - atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE, 0, 0); - } else { - /* disable the encoder and transmitter */ - atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_DISABLE, 0, 0); - atombios_dig_encoder_setup(encoder, ATOM_DISABLE, 0); - - /* setup and enable the encoder and transmitter */ - atombios_dig_encoder_setup(encoder, ATOM_ENABLE, 0); - atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_SETUP, 0, 0); - atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE, 0, 0); - } + /* handled in dpms */ break; case ENCODER_OBJECT_ID_INTERNAL_DDI: case ENCODER_OBJECT_ID_INTERNAL_DVO1: @@ -1922,14 +1943,6 @@ radeon_atom_encoder_mode_set(struct drm_encoder *encoder, break; } - if (ext_encoder) { - if (ASIC_IS_DCE41(rdev) || ASIC_IS_DCE61(rdev)) - atombios_external_encoder_setup(encoder, ext_encoder, - EXTERNAL_ENCODER_ACTION_V3_ENCODER_SETUP); - else - atombios_external_encoder_setup(encoder, ext_encoder, ATOM_ENABLE); - } - atombios_apply_encoder_quirks(encoder, adjusted_mode); if (atombios_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_HDMI) { @@ -2116,7 +2129,6 @@ static void radeon_atom_encoder_prepare(struct drm_encoder *encoder) } radeon_atom_output_lock(encoder, true); - radeon_atom_encoder_dpms(encoder, DRM_MODE_DPMS_OFF); if (connector) { struct radeon_connector *radeon_connector = to_radeon_connector(connector); @@ -2137,6 +2149,7 @@ static void radeon_atom_encoder_prepare(struct drm_encoder *encoder) static void radeon_atom_encoder_commit(struct drm_encoder *encoder) { + /* need to call this here as we need the crtc set up */ radeon_atom_encoder_dpms(encoder, DRM_MODE_DPMS_ON); radeon_atom_output_lock(encoder, false); } @@ -2177,14 +2190,7 @@ static void radeon_atom_encoder_disable(struct drm_encoder *encoder) case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1: case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2: case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA: - if (ASIC_IS_DCE4(rdev)) - /* disable the transmitter */ - atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_DISABLE, 0, 0); - else { - /* disable the encoder and transmitter */ - atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_DISABLE, 0, 0); - atombios_dig_encoder_setup(encoder, ATOM_DISABLE, 0); - } + /* handled in dpms */ break; case ENCODER_OBJECT_ID_INTERNAL_DDI: case ENCODER_OBJECT_ID_INTERNAL_DVO1: diff --git a/trunk/drivers/gpu/drm/radeon/evergreen.c b/trunk/drivers/gpu/drm/radeon/evergreen.c index e585a3b947eb..e93b80a6d4e9 100644 --- a/trunk/drivers/gpu/drm/radeon/evergreen.c +++ b/trunk/drivers/gpu/drm/radeon/evergreen.c @@ -1229,24 +1229,8 @@ void evergreen_agp_enable(struct radeon_device *rdev) void evergreen_mc_stop(struct radeon_device *rdev, struct evergreen_mc_save *save) { - save->vga_control[0] = RREG32(D1VGA_CONTROL); - save->vga_control[1] = RREG32(D2VGA_CONTROL); save->vga_render_control = RREG32(VGA_RENDER_CONTROL); save->vga_hdp_control = RREG32(VGA_HDP_CONTROL); - save->crtc_control[0] = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET); - save->crtc_control[1] = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET); - if (rdev->num_crtc >= 4) { - save->vga_control[2] = RREG32(EVERGREEN_D3VGA_CONTROL); - save->vga_control[3] = RREG32(EVERGREEN_D4VGA_CONTROL); - save->crtc_control[2] = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET); - save->crtc_control[3] = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET); - } - if (rdev->num_crtc >= 6) { - save->vga_control[4] = RREG32(EVERGREEN_D5VGA_CONTROL); - save->vga_control[5] = RREG32(EVERGREEN_D6VGA_CONTROL); - save->crtc_control[4] = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET); - save->crtc_control[5] = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET); - } /* Stop all video */ WREG32(VGA_RENDER_CONTROL, 0); @@ -1357,47 +1341,6 @@ void evergreen_mc_resume(struct radeon_device *rdev, struct evergreen_mc_save *s /* Unlock host access */ WREG32(VGA_HDP_CONTROL, save->vga_hdp_control); mdelay(1); - /* Restore video state */ - WREG32(D1VGA_CONTROL, save->vga_control[0]); - WREG32(D2VGA_CONTROL, save->vga_control[1]); - if (rdev->num_crtc >= 4) { - WREG32(EVERGREEN_D3VGA_CONTROL, save->vga_control[2]); - WREG32(EVERGREEN_D4VGA_CONTROL, save->vga_control[3]); - } - if (rdev->num_crtc >= 6) { - WREG32(EVERGREEN_D5VGA_CONTROL, save->vga_control[4]); - WREG32(EVERGREEN_D6VGA_CONTROL, save->vga_control[5]); - } - WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC0_REGISTER_OFFSET, 1); - WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC1_REGISTER_OFFSET, 1); - if (rdev->num_crtc >= 4) { - WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC2_REGISTER_OFFSET, 1); - WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC3_REGISTER_OFFSET, 1); - } - if (rdev->num_crtc >= 6) { - WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC4_REGISTER_OFFSET, 1); - WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC5_REGISTER_OFFSET, 1); - } - WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET, save->crtc_control[0]); - WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET, save->crtc_control[1]); - if (rdev->num_crtc >= 4) { - WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET, save->crtc_control[2]); - WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET, save->crtc_control[3]); - } - if (rdev->num_crtc >= 6) { - WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET, save->crtc_control[4]); - WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET, save->crtc_control[5]); - } - WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC0_REGISTER_OFFSET, 0); - WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC1_REGISTER_OFFSET, 0); - if (rdev->num_crtc >= 4) { - WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC2_REGISTER_OFFSET, 0); - WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC3_REGISTER_OFFSET, 0); - } - if (rdev->num_crtc >= 6) { - WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC4_REGISTER_OFFSET, 0); - WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC5_REGISTER_OFFSET, 0); - } WREG32(VGA_RENDER_CONTROL, save->vga_render_control); } @@ -1986,10 +1929,18 @@ static void evergreen_gpu_init(struct radeon_device *rdev) if (rdev->flags & RADEON_IS_IGP) rdev->config.evergreen.tile_config |= 1 << 4; else { - if ((mc_arb_ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT) - rdev->config.evergreen.tile_config |= 1 << 4; - else + switch ((mc_arb_ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT) { + case 0: /* four banks */ rdev->config.evergreen.tile_config |= 0 << 4; + break; + case 1: /* eight banks */ + rdev->config.evergreen.tile_config |= 1 << 4; + break; + case 2: /* sixteen banks */ + default: + rdev->config.evergreen.tile_config |= 2 << 4; + break; + } } rdev->config.evergreen.tile_config |= 0 << 8; rdev->config.evergreen.tile_config |= diff --git a/trunk/drivers/gpu/drm/radeon/evergreen_cs.c b/trunk/drivers/gpu/drm/radeon/evergreen_cs.c index c16554122ccd..e44a62a07fe3 100644 --- a/trunk/drivers/gpu/drm/radeon/evergreen_cs.c +++ b/trunk/drivers/gpu/drm/radeon/evergreen_cs.c @@ -788,6 +788,13 @@ static int evergreen_cs_track_validate_texture(struct radeon_cs_parser *p, case V_030000_SQ_TEX_DIM_1D_ARRAY: case V_030000_SQ_TEX_DIM_2D_ARRAY: depth = 1; + break; + case V_030000_SQ_TEX_DIM_2D_MSAA: + case V_030000_SQ_TEX_DIM_2D_ARRAY_MSAA: + surf.nsamples = 1 << llevel; + llevel = 0; + depth = 1; + break; case V_030000_SQ_TEX_DIM_3D: break; default: @@ -961,13 +968,15 @@ static int evergreen_cs_track_check(struct radeon_cs_parser *p) if (track->db_dirty) { /* Check stencil buffer */ - if (G_028800_STENCIL_ENABLE(track->db_depth_control)) { + if (G_028044_FORMAT(track->db_s_info) != V_028044_STENCIL_INVALID && + G_028800_STENCIL_ENABLE(track->db_depth_control)) { r = evergreen_cs_track_validate_stencil(p); if (r) return r; } /* Check depth buffer */ - if (G_028800_Z_ENABLE(track->db_depth_control)) { + if (G_028040_FORMAT(track->db_z_info) != V_028040_Z_INVALID && + G_028800_Z_ENABLE(track->db_depth_control)) { r = evergreen_cs_track_validate_depth(p); if (r) return r; diff --git a/trunk/drivers/gpu/drm/radeon/evergreend.h b/trunk/drivers/gpu/drm/radeon/evergreend.h index d3bd098e4e19..79347855d9bf 100644 --- a/trunk/drivers/gpu/drm/radeon/evergreend.h +++ b/trunk/drivers/gpu/drm/radeon/evergreend.h @@ -1277,6 +1277,8 @@ #define S_028044_FORMAT(x) (((x) & 0x1) << 0) #define G_028044_FORMAT(x) (((x) >> 0) & 0x1) #define C_028044_FORMAT 0xFFFFFFFE +#define V_028044_STENCIL_INVALID 0 +#define V_028044_STENCIL_8 1 #define G_028044_TILE_SPLIT(x) (((x) >> 8) & 0x7) #define DB_Z_READ_BASE 0x28048 #define DB_STENCIL_READ_BASE 0x2804c diff --git a/trunk/drivers/gpu/drm/radeon/ni.c b/trunk/drivers/gpu/drm/radeon/ni.c index 9945d86d9001..853800e8582f 100644 --- a/trunk/drivers/gpu/drm/radeon/ni.c +++ b/trunk/drivers/gpu/drm/radeon/ni.c @@ -574,10 +574,18 @@ static void cayman_gpu_init(struct radeon_device *rdev) if (rdev->flags & RADEON_IS_IGP) rdev->config.cayman.tile_config |= 1 << 4; else { - if ((mc_arb_ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT) - rdev->config.cayman.tile_config |= 1 << 4; - else + switch ((mc_arb_ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT) { + case 0: /* four banks */ rdev->config.cayman.tile_config |= 0 << 4; + break; + case 1: /* eight banks */ + rdev->config.cayman.tile_config |= 1 << 4; + break; + case 2: /* sixteen banks */ + default: + rdev->config.cayman.tile_config |= 2 << 4; + break; + } } rdev->config.cayman.tile_config |= ((gb_addr_config & PIPE_INTERLEAVE_SIZE_MASK) >> PIPE_INTERLEAVE_SIZE_SHIFT) << 8; diff --git a/trunk/drivers/gpu/drm/radeon/r100.c b/trunk/drivers/gpu/drm/radeon/r100.c index 8acb34fd3fd5..8d7e33a0b243 100644 --- a/trunk/drivers/gpu/drm/radeon/r100.c +++ b/trunk/drivers/gpu/drm/radeon/r100.c @@ -1182,7 +1182,8 @@ int r100_cp_init(struct radeon_device *rdev, unsigned ring_size) ring->ready = true; radeon_ttm_set_active_vram_size(rdev, rdev->mc.real_vram_size); - if (radeon_ring_supports_scratch_reg(rdev, ring)) { + if (!ring->rptr_save_reg /* not resuming from suspend */ + && radeon_ring_supports_scratch_reg(rdev, ring)) { r = radeon_scratch_get(rdev, &ring->rptr_save_reg); if (r) { DRM_ERROR("failed to get scratch reg for rptr save (%d).\n", r); diff --git a/trunk/drivers/gpu/drm/radeon/r600.c b/trunk/drivers/gpu/drm/radeon/r600.c index 637280f541a3..d79c639ae739 100644 --- a/trunk/drivers/gpu/drm/radeon/r600.c +++ b/trunk/drivers/gpu/drm/radeon/r600.c @@ -3789,3 +3789,23 @@ static void r600_pcie_gen2_enable(struct radeon_device *rdev) WREG32_PCIE_P(PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl); } } + +/** + * r600_get_gpu_clock - return GPU clock counter snapshot + * + * @rdev: radeon_device pointer + * + * Fetches a GPU clock counter snapshot (R6xx-cayman). + * Returns the 64 bit clock counter snapshot. + */ +uint64_t r600_get_gpu_clock(struct radeon_device *rdev) +{ + uint64_t clock; + + mutex_lock(&rdev->gpu_clock_mutex); + WREG32(RLC_CAPTURE_GPU_CLOCK_COUNT, 1); + clock = (uint64_t)RREG32(RLC_GPU_CLOCK_COUNT_LSB) | + ((uint64_t)RREG32(RLC_GPU_CLOCK_COUNT_MSB) << 32ULL); + mutex_unlock(&rdev->gpu_clock_mutex); + return clock; +} diff --git a/trunk/drivers/gpu/drm/radeon/r600_cs.c b/trunk/drivers/gpu/drm/radeon/r600_cs.c index ca87f7afaf23..f37676d7f217 100644 --- a/trunk/drivers/gpu/drm/radeon/r600_cs.c +++ b/trunk/drivers/gpu/drm/radeon/r600_cs.c @@ -47,18 +47,23 @@ struct r600_cs_track { u32 npipes; /* value we track */ u32 sq_config; + u32 log_nsamples; u32 nsamples; u32 cb_color_base_last[8]; struct radeon_bo *cb_color_bo[8]; u64 cb_color_bo_mc[8]; - u32 cb_color_bo_offset[8]; - struct radeon_bo *cb_color_frag_bo[8]; /* unused */ - struct radeon_bo *cb_color_tile_bo[8]; /* unused */ + u64 cb_color_bo_offset[8]; + struct radeon_bo *cb_color_frag_bo[8]; + u64 cb_color_frag_offset[8]; + struct radeon_bo *cb_color_tile_bo[8]; + u64 cb_color_tile_offset[8]; + u32 cb_color_mask[8]; u32 cb_color_info[8]; u32 cb_color_view[8]; u32 cb_color_size_idx[8]; /* unused */ u32 cb_target_mask; u32 cb_shader_mask; /* unused */ + bool is_resolve; u32 cb_color_size[8]; u32 vgt_strmout_en; u32 vgt_strmout_buffer_en; @@ -311,7 +316,15 @@ static void r600_cs_track_init(struct r600_cs_track *track) track->cb_color_bo[i] = NULL; track->cb_color_bo_offset[i] = 0xFFFFFFFF; track->cb_color_bo_mc[i] = 0xFFFFFFFF; - } + track->cb_color_frag_bo[i] = NULL; + track->cb_color_frag_offset[i] = 0xFFFFFFFF; + track->cb_color_tile_bo[i] = NULL; + track->cb_color_tile_offset[i] = 0xFFFFFFFF; + track->cb_color_mask[i] = 0xFFFFFFFF; + } + track->is_resolve = false; + track->nsamples = 16; + track->log_nsamples = 4; track->cb_target_mask = 0xFFFFFFFF; track->cb_shader_mask = 0xFFFFFFFF; track->cb_dirty = true; @@ -348,11 +361,9 @@ static int r600_cs_track_validate_cb(struct radeon_cs_parser *p, int i) volatile u32 *ib = p->ib.ptr; unsigned array_mode; u32 format; + /* When resolve is used, the second colorbuffer has always 1 sample. */ + unsigned nsamples = track->is_resolve && i == 1 ? 1 : track->nsamples; - if (G_0280A0_TILE_MODE(track->cb_color_info[i])) { - dev_warn(p->dev, "FMASK or CMASK buffer are not supported by this kernel\n"); - return -EINVAL; - } size = radeon_bo_size(track->cb_color_bo[i]) - track->cb_color_bo_offset[i]; format = G_0280A0_FORMAT(track->cb_color_info[i]); if (!r600_fmt_is_valid_color(format)) { @@ -375,7 +386,7 @@ static int r600_cs_track_validate_cb(struct radeon_cs_parser *p, int i) array_check.group_size = track->group_size; array_check.nbanks = track->nbanks; array_check.npipes = track->npipes; - array_check.nsamples = track->nsamples; + array_check.nsamples = nsamples; array_check.blocksize = r600_fmt_get_blocksize(format); if (r600_get_array_mode_alignment(&array_check, &pitch_align, &height_align, &depth_align, &base_align)) { @@ -420,7 +431,8 @@ static int r600_cs_track_validate_cb(struct radeon_cs_parser *p, int i) } /* check offset */ - tmp = r600_fmt_get_nblocksy(format, height) * r600_fmt_get_nblocksx(format, pitch) * r600_fmt_get_blocksize(format); + tmp = r600_fmt_get_nblocksy(format, height) * r600_fmt_get_nblocksx(format, pitch) * + r600_fmt_get_blocksize(format) * nsamples; switch (array_mode) { default: case V_0280A0_ARRAY_LINEAR_GENERAL: @@ -441,7 +453,7 @@ static int r600_cs_track_validate_cb(struct radeon_cs_parser *p, int i) * broken userspace. */ } else { - dev_warn(p->dev, "%s offset[%d] %d %d %d %lu too big (%d %d) (%d %d %d)\n", + dev_warn(p->dev, "%s offset[%d] %d %llu %d %lu too big (%d %d) (%d %d %d)\n", __func__, i, array_mode, track->cb_color_bo_offset[i], tmp, radeon_bo_size(track->cb_color_bo[i]), @@ -458,6 +470,51 @@ static int r600_cs_track_validate_cb(struct radeon_cs_parser *p, int i) tmp = S_028060_PITCH_TILE_MAX((pitch / 8) - 1) | S_028060_SLICE_TILE_MAX(slice_tile_max - 1); ib[track->cb_color_size_idx[i]] = tmp; + + /* FMASK/CMASK */ + switch (G_0280A0_TILE_MODE(track->cb_color_info[i])) { + case V_0280A0_TILE_DISABLE: + break; + case V_0280A0_FRAG_ENABLE: + if (track->nsamples > 1) { + uint32_t tile_max = G_028100_FMASK_TILE_MAX(track->cb_color_mask[i]); + /* the tile size is 8x8, but the size is in units of bits. + * for bytes, do just * 8. */ + uint32_t bytes = track->nsamples * track->log_nsamples * 8 * (tile_max + 1); + + if (bytes + track->cb_color_frag_offset[i] > + radeon_bo_size(track->cb_color_frag_bo[i])) { + dev_warn(p->dev, "%s FMASK_TILE_MAX too large " + "(tile_max=%u, bytes=%u, offset=%llu, bo_size=%lu)\n", + __func__, tile_max, bytes, + track->cb_color_frag_offset[i], + radeon_bo_size(track->cb_color_frag_bo[i])); + return -EINVAL; + } + } + /* fall through */ + case V_0280A0_CLEAR_ENABLE: + { + uint32_t block_max = G_028100_CMASK_BLOCK_MAX(track->cb_color_mask[i]); + /* One block = 128x128 pixels, one 8x8 tile has 4 bits.. + * (128*128) / (8*8) / 2 = 128 bytes per block. */ + uint32_t bytes = (block_max + 1) * 128; + + if (bytes + track->cb_color_tile_offset[i] > + radeon_bo_size(track->cb_color_tile_bo[i])) { + dev_warn(p->dev, "%s CMASK_BLOCK_MAX too large " + "(block_max=%u, bytes=%u, offset=%llu, bo_size=%lu)\n", + __func__, block_max, bytes, + track->cb_color_tile_offset[i], + radeon_bo_size(track->cb_color_tile_bo[i])); + return -EINVAL; + } + break; + } + default: + dev_warn(p->dev, "%s invalid tile mode\n", __func__); + return -EINVAL; + } return 0; } @@ -566,7 +623,7 @@ static int r600_cs_track_validate_db(struct radeon_cs_parser *p) ntiles = G_028000_SLICE_TILE_MAX(track->db_depth_size) + 1; nviews = G_028004_SLICE_MAX(track->db_depth_view) + 1; - tmp = ntiles * bpe * 64 * nviews; + tmp = ntiles * bpe * 64 * nviews * track->nsamples; if ((tmp + track->db_offset) > radeon_bo_size(track->db_bo)) { dev_warn(p->dev, "z/stencil buffer (%d) too small (0x%08X %d %d %d -> %u have %lu)\n", array_mode, @@ -746,6 +803,12 @@ static int r600_cs_track_check(struct radeon_cs_parser *p) */ if (track->cb_dirty) { tmp = track->cb_target_mask; + + /* We must check both colorbuffers for RESOLVE. */ + if (track->is_resolve) { + tmp |= 0xff; + } + for (i = 0; i < 8; i++) { if ((tmp >> (i * 4)) & 0xF) { /* at least one component is enabled */ @@ -764,8 +827,10 @@ static int r600_cs_track_check(struct radeon_cs_parser *p) } /* Check depth buffer */ - if (track->db_dirty && (G_028800_STENCIL_ENABLE(track->db_depth_control) || - G_028800_Z_ENABLE(track->db_depth_control))) { + if (track->db_dirty && + G_028010_FORMAT(track->db_depth_info) != V_028010_DEPTH_INVALID && + (G_028800_STENCIL_ENABLE(track->db_depth_control) || + G_028800_Z_ENABLE(track->db_depth_control))) { r = r600_cs_track_validate_db(p); if (r) return r; @@ -1229,9 +1294,15 @@ static int r600_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx) break; case R_028C04_PA_SC_AA_CONFIG: tmp = G_028C04_MSAA_NUM_SAMPLES(radeon_get_ib_value(p, idx)); + track->log_nsamples = tmp; track->nsamples = 1 << tmp; track->cb_dirty = true; break; + case R_028808_CB_COLOR_CONTROL: + tmp = G_028808_SPECIAL_OP(radeon_get_ib_value(p, idx)); + track->is_resolve = tmp == V_028808_SPECIAL_RESOLVE_BOX; + track->cb_dirty = true; + break; case R_0280A0_CB_COLOR0_INFO: case R_0280A4_CB_COLOR1_INFO: case R_0280A8_CB_COLOR2_INFO: @@ -1310,16 +1381,21 @@ static int r600_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx) dev_err(p->dev, "Broken old userspace ? no cb_color0_base supplied before trying to write 0x%08X\n", reg); return -EINVAL; } - ib[idx] = track->cb_color_base_last[tmp]; track->cb_color_frag_bo[tmp] = track->cb_color_bo[tmp]; + track->cb_color_frag_offset[tmp] = track->cb_color_bo_offset[tmp]; + ib[idx] = track->cb_color_base_last[tmp]; } else { r = r600_cs_packet_next_reloc(p, &reloc); if (r) { dev_err(p->dev, "bad SET_CONTEXT_REG 0x%04X\n", reg); return -EINVAL; } - ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff); track->cb_color_frag_bo[tmp] = reloc->robj; + track->cb_color_frag_offset[tmp] = (u64)ib[idx] << 8; + ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff); + } + if (G_0280A0_TILE_MODE(track->cb_color_info[tmp])) { + track->cb_dirty = true; } break; case R_0280C0_CB_COLOR0_TILE: @@ -1336,16 +1412,35 @@ static int r600_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx) dev_err(p->dev, "Broken old userspace ? no cb_color0_base supplied before trying to write 0x%08X\n", reg); return -EINVAL; } - ib[idx] = track->cb_color_base_last[tmp]; track->cb_color_tile_bo[tmp] = track->cb_color_bo[tmp]; + track->cb_color_tile_offset[tmp] = track->cb_color_bo_offset[tmp]; + ib[idx] = track->cb_color_base_last[tmp]; } else { r = r600_cs_packet_next_reloc(p, &reloc); if (r) { dev_err(p->dev, "bad SET_CONTEXT_REG 0x%04X\n", reg); return -EINVAL; } - ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff); track->cb_color_tile_bo[tmp] = reloc->robj; + track->cb_color_tile_offset[tmp] = (u64)ib[idx] << 8; + ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff); + } + if (G_0280A0_TILE_MODE(track->cb_color_info[tmp])) { + track->cb_dirty = true; + } + break; + case R_028100_CB_COLOR0_MASK: + case R_028104_CB_COLOR1_MASK: + case R_028108_CB_COLOR2_MASK: + case R_02810C_CB_COLOR3_MASK: + case R_028110_CB_COLOR4_MASK: + case R_028114_CB_COLOR5_MASK: + case R_028118_CB_COLOR6_MASK: + case R_02811C_CB_COLOR7_MASK: + tmp = (reg - R_028100_CB_COLOR0_MASK) / 4; + track->cb_color_mask[tmp] = radeon_get_ib_value(p, idx); + if (G_0280A0_TILE_MODE(track->cb_color_info[tmp])) { + track->cb_dirty = true; } break; case CB_COLOR0_BASE: @@ -1490,7 +1585,7 @@ unsigned r600_mip_minify(unsigned size, unsigned level) } static void r600_texture_size(unsigned nfaces, unsigned blevel, unsigned llevel, - unsigned w0, unsigned h0, unsigned d0, unsigned format, + unsigned w0, unsigned h0, unsigned d0, unsigned nsamples, unsigned format, unsigned block_align, unsigned height_align, unsigned base_align, unsigned *l0_size, unsigned *mipmap_size) { @@ -1518,7 +1613,7 @@ static void r600_texture_size(unsigned nfaces, unsigned blevel, unsigned llevel, depth = r600_mip_minify(d0, i); - size = nbx * nby * blocksize; + size = nbx * nby * blocksize * nsamples; if (nfaces) size *= nfaces; else @@ -1557,13 +1652,14 @@ static int r600_check_texture_resource(struct radeon_cs_parser *p, u32 idx, u32 tiling_flags) { struct r600_cs_track *track = p->track; - u32 nfaces, llevel, blevel, w0, h0, d0; - u32 word0, word1, l0_size, mipmap_size, word2, word3; + u32 dim, nfaces, llevel, blevel, w0, h0, d0; + u32 word0, word1, l0_size, mipmap_size, word2, word3, word4, word5; u32 height_align, pitch, pitch_align, depth_align; - u32 array, barray, larray; + u32 barray, larray; u64 base_align; struct array_mode_checker array_check; u32 format; + bool is_array; /* on legacy kernel we don't perform advanced check */ if (p->rdev == NULL) @@ -1581,12 +1677,28 @@ static int r600_check_texture_resource(struct radeon_cs_parser *p, u32 idx, word0 |= S_038000_TILE_MODE(V_038000_ARRAY_1D_TILED_THIN1); } word1 = radeon_get_ib_value(p, idx + 1); + word2 = radeon_get_ib_value(p, idx + 2) << 8; + word3 = radeon_get_ib_value(p, idx + 3) << 8; + word4 = radeon_get_ib_value(p, idx + 4); + word5 = radeon_get_ib_value(p, idx + 5); + dim = G_038000_DIM(word0); w0 = G_038000_TEX_WIDTH(word0) + 1; + pitch = (G_038000_PITCH(word0) + 1) * 8; h0 = G_038004_TEX_HEIGHT(word1) + 1; d0 = G_038004_TEX_DEPTH(word1); + format = G_038004_DATA_FORMAT(word1); + blevel = G_038010_BASE_LEVEL(word4); + llevel = G_038014_LAST_LEVEL(word5); + /* pitch in texels */ + array_check.array_mode = G_038000_TILE_MODE(word0); + array_check.group_size = track->group_size; + array_check.nbanks = track->nbanks; + array_check.npipes = track->npipes; + array_check.nsamples = 1; + array_check.blocksize = r600_fmt_get_blocksize(format); nfaces = 1; - array = 0; - switch (G_038000_DIM(word0)) { + is_array = false; + switch (dim) { case V_038000_SQ_TEX_DIM_1D: case V_038000_SQ_TEX_DIM_2D: case V_038000_SQ_TEX_DIM_3D: @@ -1599,29 +1711,25 @@ static int r600_check_texture_resource(struct radeon_cs_parser *p, u32 idx, break; case V_038000_SQ_TEX_DIM_1D_ARRAY: case V_038000_SQ_TEX_DIM_2D_ARRAY: - array = 1; + is_array = true; break; - case V_038000_SQ_TEX_DIM_2D_MSAA: case V_038000_SQ_TEX_DIM_2D_ARRAY_MSAA: + is_array = true; + /* fall through */ + case V_038000_SQ_TEX_DIM_2D_MSAA: + array_check.nsamples = 1 << llevel; + llevel = 0; + break; default: dev_warn(p->dev, "this kernel doesn't support %d texture dim\n", G_038000_DIM(word0)); return -EINVAL; } - format = G_038004_DATA_FORMAT(word1); if (!r600_fmt_is_valid_texture(format, p->family)) { dev_warn(p->dev, "%s:%d texture invalid format %d\n", __func__, __LINE__, format); return -EINVAL; } - /* pitch in texels */ - pitch = (G_038000_PITCH(word0) + 1) * 8; - array_check.array_mode = G_038000_TILE_MODE(word0); - array_check.group_size = track->group_size; - array_check.nbanks = track->nbanks; - array_check.npipes = track->npipes; - array_check.nsamples = 1; - array_check.blocksize = r600_fmt_get_blocksize(format); if (r600_get_array_mode_alignment(&array_check, &pitch_align, &height_align, &depth_align, &base_align)) { dev_warn(p->dev, "%s:%d tex array mode (%d) invalid\n", @@ -1647,24 +1755,17 @@ static int r600_check_texture_resource(struct radeon_cs_parser *p, u32 idx, return -EINVAL; } - word2 = radeon_get_ib_value(p, idx + 2) << 8; - word3 = radeon_get_ib_value(p, idx + 3) << 8; - - word0 = radeon_get_ib_value(p, idx + 4); - word1 = radeon_get_ib_value(p, idx + 5); - blevel = G_038010_BASE_LEVEL(word0); - llevel = G_038014_LAST_LEVEL(word1); if (blevel > llevel) { dev_warn(p->dev, "texture blevel %d > llevel %d\n", blevel, llevel); } - if (array == 1) { - barray = G_038014_BASE_ARRAY(word1); - larray = G_038014_LAST_ARRAY(word1); + if (is_array) { + barray = G_038014_BASE_ARRAY(word5); + larray = G_038014_LAST_ARRAY(word5); nfaces = larray - barray + 1; } - r600_texture_size(nfaces, blevel, llevel, w0, h0, d0, format, + r600_texture_size(nfaces, blevel, llevel, w0, h0, d0, array_check.nsamples, format, pitch_align, height_align, base_align, &l0_size, &mipmap_size); /* using get ib will give us the offset into the texture bo */ @@ -1677,7 +1778,6 @@ static int r600_check_texture_resource(struct radeon_cs_parser *p, u32 idx, return -EINVAL; } /* using get ib will give us the offset into the mipmap bo */ - word3 = radeon_get_ib_value(p, idx + 3) << 8; if ((mipmap_size + word3) > radeon_bo_size(mipmap)) { /*dev_warn(p->dev, "mipmap bo too small (%d %d %d %d %d %d -> %d have %ld)\n", w0, h0, format, blevel, nlevels, word3, mipmap_size, radeon_bo_size(texture));*/ diff --git a/trunk/drivers/gpu/drm/radeon/r600d.h b/trunk/drivers/gpu/drm/radeon/r600d.h index 4b116ae75fc2..fa6f37099ba9 100644 --- a/trunk/drivers/gpu/drm/radeon/r600d.h +++ b/trunk/drivers/gpu/drm/radeon/r600d.h @@ -66,6 +66,14 @@ #define CC_RB_BACKEND_DISABLE 0x98F4 #define BACKEND_DISABLE(x) ((x) << 16) +#define R_028808_CB_COLOR_CONTROL 0x28808 +#define S_028808_SPECIAL_OP(x) (((x) & 0x7) << 4) +#define G_028808_SPECIAL_OP(x) (((x) >> 4) & 0x7) +#define C_028808_SPECIAL_OP 0xFFFFFF8F +#define V_028808_SPECIAL_NORMAL 0x00 +#define V_028808_SPECIAL_DISABLE 0x01 +#define V_028808_SPECIAL_RESOLVE_BOX 0x07 + #define CB_COLOR0_BASE 0x28040 #define CB_COLOR1_BASE 0x28044 #define CB_COLOR2_BASE 0x28048 @@ -92,6 +100,20 @@ #define R_028094_CB_COLOR5_VIEW 0x028094 #define R_028098_CB_COLOR6_VIEW 0x028098 #define R_02809C_CB_COLOR7_VIEW 0x02809C +#define R_028100_CB_COLOR0_MASK 0x028100 +#define S_028100_CMASK_BLOCK_MAX(x) (((x) & 0xFFF) << 0) +#define G_028100_CMASK_BLOCK_MAX(x) (((x) >> 0) & 0xFFF) +#define C_028100_CMASK_BLOCK_MAX 0xFFFFF000 +#define S_028100_FMASK_TILE_MAX(x) (((x) & 0xFFFFF) << 12) +#define G_028100_FMASK_TILE_MAX(x) (((x) >> 12) & 0xFFFFF) +#define C_028100_FMASK_TILE_MAX 0x00000FFF +#define R_028104_CB_COLOR1_MASK 0x028104 +#define R_028108_CB_COLOR2_MASK 0x028108 +#define R_02810C_CB_COLOR3_MASK 0x02810C +#define R_028110_CB_COLOR4_MASK 0x028110 +#define R_028114_CB_COLOR5_MASK 0x028114 +#define R_028118_CB_COLOR6_MASK 0x028118 +#define R_02811C_CB_COLOR7_MASK 0x02811C #define CB_COLOR0_INFO 0x280a0 # define CB_FORMAT(x) ((x) << 2) # define CB_ARRAY_MODE(x) ((x) << 8) @@ -602,6 +624,9 @@ #define RLC_HB_WPTR 0x3f1c #define RLC_HB_WPTR_LSB_ADDR 0x3f14 #define RLC_HB_WPTR_MSB_ADDR 0x3f18 +#define RLC_GPU_CLOCK_COUNT_LSB 0x3f38 +#define RLC_GPU_CLOCK_COUNT_MSB 0x3f3c +#define RLC_CAPTURE_GPU_CLOCK_COUNT 0x3f40 #define RLC_MC_CNTL 0x3f44 #define RLC_UCODE_CNTL 0x3f48 #define RLC_UCODE_ADDR 0x3f2c @@ -1397,6 +1422,9 @@ #define S_0280A0_TILE_MODE(x) (((x) & 0x3) << 18) #define G_0280A0_TILE_MODE(x) (((x) >> 18) & 0x3) #define C_0280A0_TILE_MODE 0xFFF3FFFF +#define V_0280A0_TILE_DISABLE 0 +#define V_0280A0_CLEAR_ENABLE 1 +#define V_0280A0_FRAG_ENABLE 2 #define S_0280A0_BLEND_CLAMP(x) (((x) & 0x1) << 20) #define G_0280A0_BLEND_CLAMP(x) (((x) >> 20) & 0x1) #define C_0280A0_BLEND_CLAMP 0xFFEFFFFF diff --git a/trunk/drivers/gpu/drm/radeon/radeon.h b/trunk/drivers/gpu/drm/radeon/radeon.h index 5431af292408..59a15315ae9f 100644 --- a/trunk/drivers/gpu/drm/radeon/radeon.h +++ b/trunk/drivers/gpu/drm/radeon/radeon.h @@ -142,21 +142,6 @@ struct radeon_device; /* * BIOS. */ -#define ATRM_BIOS_PAGE 4096 - -#if defined(CONFIG_VGA_SWITCHEROO) -bool radeon_atrm_supported(struct pci_dev *pdev); -int radeon_atrm_get_bios_chunk(uint8_t *bios, int offset, int len); -#else -static inline bool radeon_atrm_supported(struct pci_dev *pdev) -{ - return false; -} - -static inline int radeon_atrm_get_bios_chunk(uint8_t *bios, int offset, int len){ - return -EINVAL; -} -#endif bool radeon_get_bios(struct radeon_device *rdev); /* @@ -300,6 +285,7 @@ struct radeon_bo_va { uint64_t soffset; uint64_t eoffset; uint32_t flags; + struct radeon_fence *fence; bool valid; }; @@ -1533,6 +1519,7 @@ struct radeon_device { unsigned debugfs_count; /* virtual memory */ struct radeon_vm_manager vm_manager; + struct mutex gpu_clock_mutex; }; int radeon_device_init(struct radeon_device *rdev, @@ -1733,11 +1720,11 @@ void radeon_ring_write(struct radeon_ring *ring, uint32_t v); #define radeon_pm_finish(rdev) (rdev)->asic->pm.finish((rdev)) #define radeon_pm_init_profile(rdev) (rdev)->asic->pm.init_profile((rdev)) #define radeon_pm_get_dynpm_state(rdev) (rdev)->asic->pm.get_dynpm_state((rdev)) -#define radeon_pre_page_flip(rdev, crtc) rdev->asic->pflip.pre_page_flip((rdev), (crtc)) -#define radeon_page_flip(rdev, crtc, base) rdev->asic->pflip.page_flip((rdev), (crtc), (base)) -#define radeon_post_page_flip(rdev, crtc) rdev->asic->pflip.post_page_flip((rdev), (crtc)) -#define radeon_wait_for_vblank(rdev, crtc) rdev->asic->display.wait_for_vblank((rdev), (crtc)) -#define radeon_mc_wait_for_idle(rdev) rdev->asic->mc_wait_for_idle((rdev)) +#define radeon_pre_page_flip(rdev, crtc) (rdev)->asic->pflip.pre_page_flip((rdev), (crtc)) +#define radeon_page_flip(rdev, crtc, base) (rdev)->asic->pflip.page_flip((rdev), (crtc), (base)) +#define radeon_post_page_flip(rdev, crtc) (rdev)->asic->pflip.post_page_flip((rdev), (crtc)) +#define radeon_wait_for_vblank(rdev, crtc) (rdev)->asic->display.wait_for_vblank((rdev), (crtc)) +#define radeon_mc_wait_for_idle(rdev) (rdev)->asic->mc_wait_for_idle((rdev)) /* Common functions */ /* AGP */ diff --git a/trunk/drivers/gpu/drm/radeon/radeon_asic.h b/trunk/drivers/gpu/drm/radeon/radeon_asic.h index f4af24310438..18c38d14c8cd 100644 --- a/trunk/drivers/gpu/drm/radeon/radeon_asic.h +++ b/trunk/drivers/gpu/drm/radeon/radeon_asic.h @@ -255,13 +255,10 @@ extern int rs690_mc_wait_for_idle(struct radeon_device *rdev); * rv515 */ struct rv515_mc_save { - u32 d1vga_control; - u32 d2vga_control; u32 vga_render_control; u32 vga_hdp_control; - u32 d1crtc_control; - u32 d2crtc_control; }; + int rv515_init(struct radeon_device *rdev); void rv515_fini(struct radeon_device *rdev); uint32_t rv515_mc_rreg(struct radeon_device *rdev, uint32_t reg); @@ -371,6 +368,7 @@ void r600_kms_blit_copy(struct radeon_device *rdev, unsigned num_gpu_pages, struct radeon_sa_bo *vb); int r600_mc_wait_for_idle(struct radeon_device *rdev); +uint64_t r600_get_gpu_clock(struct radeon_device *rdev); /* * rv770,rv730,rv710,rv740 @@ -389,11 +387,10 @@ void r700_cp_fini(struct radeon_device *rdev); * evergreen */ struct evergreen_mc_save { - u32 vga_control[6]; u32 vga_render_control; u32 vga_hdp_control; - u32 crtc_control[6]; }; + void evergreen_pcie_gart_tlb_flush(struct radeon_device *rdev); int evergreen_init(struct radeon_device *rdev); void evergreen_fini(struct radeon_device *rdev); @@ -472,5 +469,6 @@ int si_vm_bind(struct radeon_device *rdev, struct radeon_vm *vm, int id); void si_vm_unbind(struct radeon_device *rdev, struct radeon_vm *vm); void si_vm_tlb_flush(struct radeon_device *rdev, struct radeon_vm *vm); int si_ib_parse(struct radeon_device *rdev, struct radeon_ib *ib); +uint64_t si_get_gpu_clock(struct radeon_device *rdev); #endif diff --git a/trunk/drivers/gpu/drm/radeon/radeon_atombios.c b/trunk/drivers/gpu/drm/radeon/radeon_atombios.c index b1e3820df363..d67d4f3eb6f4 100644 --- a/trunk/drivers/gpu/drm/radeon/radeon_atombios.c +++ b/trunk/drivers/gpu/drm/radeon/radeon_atombios.c @@ -452,7 +452,7 @@ static bool radeon_atom_apply_quirks(struct drm_device *dev, } /* Fujitsu D3003-S2 board lists DVI-I as DVI-D and VGA */ - if ((dev->pdev->device == 0x9802) && + if (((dev->pdev->device == 0x9802) || (dev->pdev->device == 0x9806)) && (dev->pdev->subsystem_vendor == 0x1734) && (dev->pdev->subsystem_device == 0x11bd)) { if (*connector_type == DRM_MODE_CONNECTOR_VGA) { @@ -1263,6 +1263,8 @@ bool radeon_atom_get_clock_info(struct drm_device *dev) union igp_info { struct _ATOM_INTEGRATED_SYSTEM_INFO info; struct _ATOM_INTEGRATED_SYSTEM_INFO_V2 info_2; + struct _ATOM_INTEGRATED_SYSTEM_INFO_V6 info_6; + struct _ATOM_INTEGRATED_SYSTEM_INFO_V1_7 info_7; }; bool radeon_atombios_sideport_present(struct radeon_device *rdev) @@ -1390,27 +1392,50 @@ static void radeon_atombios_get_igp_ss_overrides(struct radeon_device *rdev, struct radeon_mode_info *mode_info = &rdev->mode_info; int index = GetIndexIntoMasterTable(DATA, IntegratedSystemInfo); u16 data_offset, size; - struct _ATOM_INTEGRATED_SYSTEM_INFO_V6 *igp_info; + union igp_info *igp_info; u8 frev, crev; u16 percentage = 0, rate = 0; /* get any igp specific overrides */ if (atom_parse_data_header(mode_info->atom_context, index, &size, &frev, &crev, &data_offset)) { - igp_info = (struct _ATOM_INTEGRATED_SYSTEM_INFO_V6 *) + igp_info = (union igp_info *) (mode_info->atom_context->bios + data_offset); - switch (id) { - case ASIC_INTERNAL_SS_ON_TMDS: - percentage = le16_to_cpu(igp_info->usDVISSPercentage); - rate = le16_to_cpu(igp_info->usDVISSpreadRateIn10Hz); + switch (crev) { + case 6: + switch (id) { + case ASIC_INTERNAL_SS_ON_TMDS: + percentage = le16_to_cpu(igp_info->info_6.usDVISSPercentage); + rate = le16_to_cpu(igp_info->info_6.usDVISSpreadRateIn10Hz); + break; + case ASIC_INTERNAL_SS_ON_HDMI: + percentage = le16_to_cpu(igp_info->info_6.usHDMISSPercentage); + rate = le16_to_cpu(igp_info->info_6.usHDMISSpreadRateIn10Hz); + break; + case ASIC_INTERNAL_SS_ON_LVDS: + percentage = le16_to_cpu(igp_info->info_6.usLvdsSSPercentage); + rate = le16_to_cpu(igp_info->info_6.usLvdsSSpreadRateIn10Hz); + break; + } break; - case ASIC_INTERNAL_SS_ON_HDMI: - percentage = le16_to_cpu(igp_info->usHDMISSPercentage); - rate = le16_to_cpu(igp_info->usHDMISSpreadRateIn10Hz); + case 7: + switch (id) { + case ASIC_INTERNAL_SS_ON_TMDS: + percentage = le16_to_cpu(igp_info->info_7.usDVISSPercentage); + rate = le16_to_cpu(igp_info->info_7.usDVISSpreadRateIn10Hz); + break; + case ASIC_INTERNAL_SS_ON_HDMI: + percentage = le16_to_cpu(igp_info->info_7.usHDMISSPercentage); + rate = le16_to_cpu(igp_info->info_7.usHDMISSpreadRateIn10Hz); + break; + case ASIC_INTERNAL_SS_ON_LVDS: + percentage = le16_to_cpu(igp_info->info_7.usLvdsSSPercentage); + rate = le16_to_cpu(igp_info->info_7.usLvdsSSpreadRateIn10Hz); + break; + } break; - case ASIC_INTERNAL_SS_ON_LVDS: - percentage = le16_to_cpu(igp_info->usLvdsSSPercentage); - rate = le16_to_cpu(igp_info->usLvdsSSpreadRateIn10Hz); + default: + DRM_ERROR("Unsupported IGP table: %d %d\n", frev, crev); break; } if (percentage) diff --git a/trunk/drivers/gpu/drm/radeon/radeon_atpx_handler.c b/trunk/drivers/gpu/drm/radeon/radeon_atpx_handler.c index 98724fcb0088..2a2cf0b88a28 100644 --- a/trunk/drivers/gpu/drm/radeon/radeon_atpx_handler.c +++ b/trunk/drivers/gpu/drm/radeon/radeon_atpx_handler.c @@ -30,57 +30,8 @@ static struct radeon_atpx_priv { /* handle for device - and atpx */ acpi_handle dhandle; acpi_handle atpx_handle; - acpi_handle atrm_handle; } radeon_atpx_priv; -/* retrieve the ROM in 4k blocks */ -static int radeon_atrm_call(acpi_handle atrm_handle, uint8_t *bios, - int offset, int len) -{ - acpi_status status; - union acpi_object atrm_arg_elements[2], *obj; - struct acpi_object_list atrm_arg; - struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL}; - - atrm_arg.count = 2; - atrm_arg.pointer = &atrm_arg_elements[0]; - - atrm_arg_elements[0].type = ACPI_TYPE_INTEGER; - atrm_arg_elements[0].integer.value = offset; - - atrm_arg_elements[1].type = ACPI_TYPE_INTEGER; - atrm_arg_elements[1].integer.value = len; - - status = acpi_evaluate_object(atrm_handle, NULL, &atrm_arg, &buffer); - if (ACPI_FAILURE(status)) { - printk("failed to evaluate ATRM got %s\n", acpi_format_exception(status)); - return -ENODEV; - } - - obj = (union acpi_object *)buffer.pointer; - memcpy(bios+offset, obj->buffer.pointer, obj->buffer.length); - len = obj->buffer.length; - kfree(buffer.pointer); - return len; -} - -bool radeon_atrm_supported(struct pci_dev *pdev) -{ - /* get the discrete ROM only via ATRM */ - if (!radeon_atpx_priv.atpx_detected) - return false; - - if (radeon_atpx_priv.dhandle == DEVICE_ACPI_HANDLE(&pdev->dev)) - return false; - return true; -} - - -int radeon_atrm_get_bios_chunk(uint8_t *bios, int offset, int len) -{ - return radeon_atrm_call(radeon_atpx_priv.atrm_handle, bios, offset, len); -} - static int radeon_atpx_get_version(acpi_handle handle) { acpi_status status; @@ -198,7 +149,7 @@ static int radeon_atpx_power_state(enum vga_switcheroo_client_id id, static bool radeon_atpx_pci_probe_handle(struct pci_dev *pdev) { - acpi_handle dhandle, atpx_handle, atrm_handle; + acpi_handle dhandle, atpx_handle; acpi_status status; dhandle = DEVICE_ACPI_HANDLE(&pdev->dev); @@ -209,13 +160,8 @@ static bool radeon_atpx_pci_probe_handle(struct pci_dev *pdev) if (ACPI_FAILURE(status)) return false; - status = acpi_get_handle(dhandle, "ATRM", &atrm_handle); - if (ACPI_FAILURE(status)) - return false; - radeon_atpx_priv.dhandle = dhandle; radeon_atpx_priv.atpx_handle = atpx_handle; - radeon_atpx_priv.atrm_handle = atrm_handle; return true; } diff --git a/trunk/drivers/gpu/drm/radeon/radeon_bios.c b/trunk/drivers/gpu/drm/radeon/radeon_bios.c index 501f4881e5aa..d306cc8fdeaa 100644 --- a/trunk/drivers/gpu/drm/radeon/radeon_bios.c +++ b/trunk/drivers/gpu/drm/radeon/radeon_bios.c @@ -32,6 +32,7 @@ #include #include +#include /* * BIOS. */ @@ -98,16 +99,81 @@ static bool radeon_read_bios(struct radeon_device *rdev) return true; } +#ifdef CONFIG_ACPI /* ATRM is used to get the BIOS on the discrete cards in * dual-gpu systems. */ +/* retrieve the ROM in 4k blocks */ +#define ATRM_BIOS_PAGE 4096 +/** + * radeon_atrm_call - fetch a chunk of the vbios + * + * @atrm_handle: acpi ATRM handle + * @bios: vbios image pointer + * @offset: offset of vbios image data to fetch + * @len: length of vbios image data to fetch + * + * Executes ATRM to fetch a chunk of the discrete + * vbios image on PX systems (all asics). + * Returns the length of the buffer fetched. + */ +static int radeon_atrm_call(acpi_handle atrm_handle, uint8_t *bios, + int offset, int len) +{ + acpi_status status; + union acpi_object atrm_arg_elements[2], *obj; + struct acpi_object_list atrm_arg; + struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL}; + + atrm_arg.count = 2; + atrm_arg.pointer = &atrm_arg_elements[0]; + + atrm_arg_elements[0].type = ACPI_TYPE_INTEGER; + atrm_arg_elements[0].integer.value = offset; + + atrm_arg_elements[1].type = ACPI_TYPE_INTEGER; + atrm_arg_elements[1].integer.value = len; + + status = acpi_evaluate_object(atrm_handle, NULL, &atrm_arg, &buffer); + if (ACPI_FAILURE(status)) { + printk("failed to evaluate ATRM got %s\n", acpi_format_exception(status)); + return -ENODEV; + } + + obj = (union acpi_object *)buffer.pointer; + memcpy(bios+offset, obj->buffer.pointer, obj->buffer.length); + len = obj->buffer.length; + kfree(buffer.pointer); + return len; +} + static bool radeon_atrm_get_bios(struct radeon_device *rdev) { int ret; int size = 256 * 1024; int i; + struct pci_dev *pdev = NULL; + acpi_handle dhandle, atrm_handle; + acpi_status status; + bool found = false; + + /* ATRM is for the discrete card only */ + if (rdev->flags & RADEON_IS_IGP) + return false; + + while ((pdev = pci_get_class(PCI_CLASS_DISPLAY_VGA << 8, pdev)) != NULL) { + dhandle = DEVICE_ACPI_HANDLE(&pdev->dev); + if (!dhandle) + continue; + + status = acpi_get_handle(dhandle, "ATRM", &atrm_handle); + if (!ACPI_FAILURE(status)) { + found = true; + break; + } + } - if (!radeon_atrm_supported(rdev->pdev)) + if (!found) return false; rdev->bios = kmalloc(size, GFP_KERNEL); @@ -117,9 +183,10 @@ static bool radeon_atrm_get_bios(struct radeon_device *rdev) } for (i = 0; i < size / ATRM_BIOS_PAGE; i++) { - ret = radeon_atrm_get_bios_chunk(rdev->bios, - (i * ATRM_BIOS_PAGE), - ATRM_BIOS_PAGE); + ret = radeon_atrm_call(atrm_handle, + rdev->bios, + (i * ATRM_BIOS_PAGE), + ATRM_BIOS_PAGE); if (ret < ATRM_BIOS_PAGE) break; } @@ -130,6 +197,12 @@ static bool radeon_atrm_get_bios(struct radeon_device *rdev) } return true; } +#else +static inline bool radeon_atrm_get_bios(struct radeon_device *rdev) +{ + return false; +} +#endif static bool ni_read_disabled_bios(struct radeon_device *rdev) { @@ -476,6 +549,61 @@ static bool radeon_read_disabled_bios(struct radeon_device *rdev) return legacy_read_disabled_bios(rdev); } +#ifdef CONFIG_ACPI +static bool radeon_acpi_vfct_bios(struct radeon_device *rdev) +{ + bool ret = false; + struct acpi_table_header *hdr; + acpi_size tbl_size; + UEFI_ACPI_VFCT *vfct; + GOP_VBIOS_CONTENT *vbios; + VFCT_IMAGE_HEADER *vhdr; + + if (!ACPI_SUCCESS(acpi_get_table_with_size("VFCT", 1, &hdr, &tbl_size))) + return false; + if (tbl_size < sizeof(UEFI_ACPI_VFCT)) { + DRM_ERROR("ACPI VFCT table present but broken (too short #1)\n"); + goto out_unmap; + } + + vfct = (UEFI_ACPI_VFCT *)hdr; + if (vfct->VBIOSImageOffset + sizeof(VFCT_IMAGE_HEADER) > tbl_size) { + DRM_ERROR("ACPI VFCT table present but broken (too short #2)\n"); + goto out_unmap; + } + + vbios = (GOP_VBIOS_CONTENT *)((char *)hdr + vfct->VBIOSImageOffset); + vhdr = &vbios->VbiosHeader; + DRM_INFO("ACPI VFCT contains a BIOS for %02x:%02x.%d %04x:%04x, size %d\n", + vhdr->PCIBus, vhdr->PCIDevice, vhdr->PCIFunction, + vhdr->VendorID, vhdr->DeviceID, vhdr->ImageLength); + + if (vhdr->PCIBus != rdev->pdev->bus->number || + vhdr->PCIDevice != PCI_SLOT(rdev->pdev->devfn) || + vhdr->PCIFunction != PCI_FUNC(rdev->pdev->devfn) || + vhdr->VendorID != rdev->pdev->vendor || + vhdr->DeviceID != rdev->pdev->device) { + DRM_INFO("ACPI VFCT table is not for this card\n"); + goto out_unmap; + }; + + if (vfct->VBIOSImageOffset + sizeof(VFCT_IMAGE_HEADER) + vhdr->ImageLength > tbl_size) { + DRM_ERROR("ACPI VFCT image truncated\n"); + goto out_unmap; + } + + rdev->bios = kmemdup(&vbios->VbiosContent, vhdr->ImageLength, GFP_KERNEL); + ret = !!rdev->bios; + +out_unmap: + return ret; +} +#else +static inline bool radeon_acpi_vfct_bios(struct radeon_device *rdev) +{ + return false; +} +#endif bool radeon_get_bios(struct radeon_device *rdev) { @@ -483,6 +611,8 @@ bool radeon_get_bios(struct radeon_device *rdev) uint16_t tmp; r = radeon_atrm_get_bios(rdev); + if (r == false) + r = radeon_acpi_vfct_bios(rdev); if (r == false) r = igp_read_bios_from_vram(rdev); if (r == false) diff --git a/trunk/drivers/gpu/drm/radeon/radeon_combios.c b/trunk/drivers/gpu/drm/radeon/radeon_combios.c index 576f4f6919f2..f75247d42ffd 100644 --- a/trunk/drivers/gpu/drm/radeon/radeon_combios.c +++ b/trunk/drivers/gpu/drm/radeon/radeon_combios.c @@ -719,6 +719,34 @@ static struct radeon_i2c_bus_rec combios_setup_i2c_bus(struct radeon_device *rde return i2c; } +static struct radeon_i2c_bus_rec radeon_combios_get_i2c_info_from_table(struct radeon_device *rdev) +{ + struct drm_device *dev = rdev->ddev; + struct radeon_i2c_bus_rec i2c; + u16 offset; + u8 id, blocks, clk, data; + int i; + + i2c.valid = false; + + offset = combios_get_table_offset(dev, COMBIOS_I2C_INFO_TABLE); + if (offset) { + blocks = RBIOS8(offset + 2); + for (i = 0; i < blocks; i++) { + id = RBIOS8(offset + 3 + (i * 5) + 0); + if (id == 136) { + clk = RBIOS8(offset + 3 + (i * 5) + 3); + data = RBIOS8(offset + 3 + (i * 5) + 4); + /* gpiopad */ + i2c = combios_setup_i2c_bus(rdev, DDC_MONID, + (1 << clk), (1 << data)); + break; + } + } + } + return i2c; +} + void radeon_combios_i2c_init(struct radeon_device *rdev) { struct drm_device *dev = rdev->ddev; @@ -755,30 +783,14 @@ void radeon_combios_i2c_init(struct radeon_device *rdev) } else if (rdev->family == CHIP_RS300 || rdev->family == CHIP_RS400 || rdev->family == CHIP_RS480) { - u16 offset; - u8 id, blocks, clk, data; - int i; - /* 0x68 */ i2c = combios_setup_i2c_bus(rdev, DDC_CRT2, 0, 0); rdev->i2c_bus[3] = radeon_i2c_create(dev, &i2c, "MONID"); - offset = combios_get_table_offset(dev, COMBIOS_I2C_INFO_TABLE); - if (offset) { - blocks = RBIOS8(offset + 2); - for (i = 0; i < blocks; i++) { - id = RBIOS8(offset + 3 + (i * 5) + 0); - if (id == 136) { - clk = RBIOS8(offset + 3 + (i * 5) + 3); - data = RBIOS8(offset + 3 + (i * 5) + 4); - /* gpiopad */ - i2c = combios_setup_i2c_bus(rdev, DDC_MONID, - (1 << clk), (1 << data)); - rdev->i2c_bus[4] = radeon_i2c_create(dev, &i2c, "GPIOPAD_MASK"); - break; - } - } - } + /* gpiopad */ + i2c = radeon_combios_get_i2c_info_from_table(rdev); + if (i2c.valid) + rdev->i2c_bus[4] = radeon_i2c_create(dev, &i2c, "GPIOPAD_MASK"); } else if ((rdev->family == CHIP_R200) || (rdev->family >= CHIP_R300)) { /* 0x68 */ @@ -2321,7 +2333,10 @@ bool radeon_get_legacy_connector_info_from_bios(struct drm_device *dev) connector = (tmp >> 12) & 0xf; ddc_type = (tmp >> 8) & 0xf; - ddc_i2c = combios_setup_i2c_bus(rdev, ddc_type, 0, 0); + if (ddc_type == 5) + ddc_i2c = radeon_combios_get_i2c_info_from_table(rdev); + else + ddc_i2c = combios_setup_i2c_bus(rdev, ddc_type, 0, 0); switch (connector) { case CONNECTOR_PROPRIETARY_LEGACY: diff --git a/trunk/drivers/gpu/drm/radeon/radeon_cs.c b/trunk/drivers/gpu/drm/radeon/radeon_cs.c index 8a4c49ef0cc4..b4a0db24f4dd 100644 --- a/trunk/drivers/gpu/drm/radeon/radeon_cs.c +++ b/trunk/drivers/gpu/drm/radeon/radeon_cs.c @@ -278,6 +278,30 @@ int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data) return 0; } +static void radeon_bo_vm_fence_va(struct radeon_cs_parser *parser, + struct radeon_fence *fence) +{ + struct radeon_fpriv *fpriv = parser->filp->driver_priv; + struct radeon_vm *vm = &fpriv->vm; + struct radeon_bo_list *lobj; + + if (parser->chunk_ib_idx == -1) { + return; + } + if ((parser->cs_flags & RADEON_CS_USE_VM) == 0) { + return; + } + + list_for_each_entry(lobj, &parser->validated, tv.head) { + struct radeon_bo_va *bo_va; + struct radeon_bo *rbo = lobj->bo; + + bo_va = radeon_bo_va(rbo, vm); + radeon_fence_unref(&bo_va->fence); + bo_va->fence = radeon_fence_ref(fence); + } +} + /** * cs_parser_fini() - clean parser states * @parser: parser structure holding parsing context. @@ -290,11 +314,14 @@ static void radeon_cs_parser_fini(struct radeon_cs_parser *parser, int error) { unsigned i; - if (!error) + if (!error) { + /* fence all bo va before ttm_eu_fence_buffer_objects so bo are still reserved */ + radeon_bo_vm_fence_va(parser, parser->ib.fence); ttm_eu_fence_buffer_objects(&parser->validated, parser->ib.fence); - else + } else { ttm_eu_backoff_reservation(&parser->validated); + } if (parser->relocs != NULL) { for (i = 0; i < parser->nrelocs; i++) { @@ -388,7 +415,6 @@ static int radeon_cs_ib_vm_chunk(struct radeon_device *rdev, if (parser->chunk_ib_idx == -1) return 0; - if ((parser->cs_flags & RADEON_CS_USE_VM) == 0) return 0; diff --git a/trunk/drivers/gpu/drm/radeon/radeon_cursor.c b/trunk/drivers/gpu/drm/radeon/radeon_cursor.c index 711e95ad39bf..8794744cdf1a 100644 --- a/trunk/drivers/gpu/drm/radeon/radeon_cursor.c +++ b/trunk/drivers/gpu/drm/radeon/radeon_cursor.c @@ -67,7 +67,8 @@ static void radeon_hide_cursor(struct drm_crtc *crtc) if (ASIC_IS_DCE4(rdev)) { WREG32(RADEON_MM_INDEX, EVERGREEN_CUR_CONTROL + radeon_crtc->crtc_offset); - WREG32(RADEON_MM_DATA, EVERGREEN_CURSOR_MODE(EVERGREEN_CURSOR_24_8_PRE_MULT)); + WREG32(RADEON_MM_DATA, EVERGREEN_CURSOR_MODE(EVERGREEN_CURSOR_24_8_PRE_MULT) | + EVERGREEN_CURSOR_URGENT_CONTROL(EVERGREEN_CURSOR_URGENT_1_2)); } else if (ASIC_IS_AVIVO(rdev)) { WREG32(RADEON_MM_INDEX, AVIVO_D1CUR_CONTROL + radeon_crtc->crtc_offset); WREG32(RADEON_MM_DATA, (AVIVO_D1CURSOR_MODE_24BPP << AVIVO_D1CURSOR_MODE_SHIFT)); @@ -94,7 +95,8 @@ static void radeon_show_cursor(struct drm_crtc *crtc) if (ASIC_IS_DCE4(rdev)) { WREG32(RADEON_MM_INDEX, EVERGREEN_CUR_CONTROL + radeon_crtc->crtc_offset); WREG32(RADEON_MM_DATA, EVERGREEN_CURSOR_EN | - EVERGREEN_CURSOR_MODE(EVERGREEN_CURSOR_24_8_PRE_MULT)); + EVERGREEN_CURSOR_MODE(EVERGREEN_CURSOR_24_8_PRE_MULT) | + EVERGREEN_CURSOR_URGENT_CONTROL(EVERGREEN_CURSOR_URGENT_1_2)); } else if (ASIC_IS_AVIVO(rdev)) { WREG32(RADEON_MM_INDEX, AVIVO_D1CUR_CONTROL + radeon_crtc->crtc_offset); WREG32(RADEON_MM_DATA, AVIVO_D1CURSOR_EN | diff --git a/trunk/drivers/gpu/drm/radeon/radeon_device.c b/trunk/drivers/gpu/drm/radeon/radeon_device.c index 742af8244e89..7a3daebd732d 100644 --- a/trunk/drivers/gpu/drm/radeon/radeon_device.c +++ b/trunk/drivers/gpu/drm/radeon/radeon_device.c @@ -1009,6 +1009,7 @@ int radeon_device_init(struct radeon_device *rdev, atomic_set(&rdev->ih.lock, 0); mutex_init(&rdev->gem.mutex); mutex_init(&rdev->pm.mutex); + mutex_init(&rdev->gpu_clock_mutex); init_rwsem(&rdev->pm.mclk_lock); init_rwsem(&rdev->exclusive_lock); init_waitqueue_head(&rdev->irq.vblank_queue); @@ -1050,7 +1051,7 @@ int radeon_device_init(struct radeon_device *rdev, if (rdev->flags & RADEON_IS_AGP) rdev->need_dma32 = true; if ((rdev->flags & RADEON_IS_PCI) && - (rdev->family < CHIP_RS400)) + (rdev->family <= CHIP_RS740)) rdev->need_dma32 = true; dma_bits = rdev->need_dma32 ? 32 : 40; @@ -1345,12 +1346,15 @@ int radeon_gpu_reset(struct radeon_device *rdev) for (i = 0; i < RADEON_NUM_RINGS; ++i) { radeon_ring_restore(rdev, &rdev->ring[i], ring_sizes[i], ring_data[i]); + ring_sizes[i] = 0; + ring_data[i] = NULL; } r = radeon_ib_ring_tests(rdev); if (r) { dev_err(rdev->dev, "ib ring test failed (%d).\n", r); if (saved) { + saved = false; radeon_suspend(rdev); goto retry; } diff --git a/trunk/drivers/gpu/drm/radeon/radeon_drv.c b/trunk/drivers/gpu/drm/radeon/radeon_drv.c index dcea6f01ae4e..8c593ea82c41 100644 --- a/trunk/drivers/gpu/drm/radeon/radeon_drv.c +++ b/trunk/drivers/gpu/drm/radeon/radeon_drv.c @@ -59,9 +59,14 @@ * 2.15.0 - add max_pipes query * 2.16.0 - fix evergreen 2D tiled surface calculation * 2.17.0 - add STRMOUT_BASE_UPDATE for r7xx + * 2.18.0 - r600-eg: allow "invalid" DB formats + * 2.19.0 - r600-eg: MSAA textures + * 2.20.0 - r600-si: RADEON_INFO_TIMESTAMP query + * 2.21.0 - r600-r700: FMASK and CMASK + * 2.22.0 - r600 only: RESOLVE_BOX allowed */ #define KMS_DRIVER_MAJOR 2 -#define KMS_DRIVER_MINOR 17 +#define KMS_DRIVER_MINOR 22 #define KMS_DRIVER_PATCHLEVEL 0 int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags); int radeon_driver_unload_kms(struct drm_device *dev); diff --git a/trunk/drivers/gpu/drm/radeon/radeon_fence.c b/trunk/drivers/gpu/drm/radeon/radeon_fence.c index 7b737b9339ad..2a59375dbe52 100644 --- a/trunk/drivers/gpu/drm/radeon/radeon_fence.c +++ b/trunk/drivers/gpu/drm/radeon/radeon_fence.c @@ -131,7 +131,7 @@ int radeon_fence_emit(struct radeon_device *rdev, */ void radeon_fence_process(struct radeon_device *rdev, int ring) { - uint64_t seq, last_seq; + uint64_t seq, last_seq, last_emitted; unsigned count_loop = 0; bool wake = false; @@ -158,13 +158,15 @@ void radeon_fence_process(struct radeon_device *rdev, int ring) */ last_seq = atomic64_read(&rdev->fence_drv[ring].last_seq); do { + last_emitted = rdev->fence_drv[ring].sync_seq[ring]; seq = radeon_fence_read(rdev, ring); seq |= last_seq & 0xffffffff00000000LL; if (seq < last_seq) { - seq += 0x100000000LL; + seq &= 0xffffffff; + seq |= last_emitted & 0xffffffff00000000LL; } - if (seq == last_seq) { + if (seq <= last_seq || seq > last_emitted) { break; } /* If we loop over we don't want to return without diff --git a/trunk/drivers/gpu/drm/radeon/radeon_gart.c b/trunk/drivers/gpu/drm/radeon/radeon_gart.c index b3720054614d..bb3b7fe05ccd 100644 --- a/trunk/drivers/gpu/drm/radeon/radeon_gart.c +++ b/trunk/drivers/gpu/drm/radeon/radeon_gart.c @@ -814,7 +814,7 @@ int radeon_vm_bo_update_pte(struct radeon_device *rdev, return -EINVAL; } - if (bo_va->valid) + if (bo_va->valid && mem) return 0; ngpu_pages = radeon_bo_ngpu_pages(bo); @@ -859,11 +859,27 @@ int radeon_vm_bo_rmv(struct radeon_device *rdev, struct radeon_bo *bo) { struct radeon_bo_va *bo_va; + int r; bo_va = radeon_bo_va(bo, vm); if (bo_va == NULL) return 0; + /* wait for va use to end */ + while (bo_va->fence) { + r = radeon_fence_wait(bo_va->fence, false); + if (r) { + DRM_ERROR("error while waiting for fence: %d\n", r); + } + if (r == -EDEADLK) { + r = radeon_gpu_reset(rdev); + if (!r) + continue; + } + break; + } + radeon_fence_unref(&bo_va->fence); + mutex_lock(&rdev->vm_manager.lock); mutex_lock(&vm->mutex); radeon_vm_bo_update_pte(rdev, vm, bo, NULL); @@ -934,7 +950,7 @@ int radeon_vm_init(struct radeon_device *rdev, struct radeon_vm *vm) } /** - * radeon_vm_init - tear down a vm instance + * radeon_vm_fini - tear down a vm instance * * @rdev: radeon_device pointer * @vm: requested vm @@ -952,12 +968,15 @@ void radeon_vm_fini(struct radeon_device *rdev, struct radeon_vm *vm) radeon_vm_unbind_locked(rdev, vm); mutex_unlock(&rdev->vm_manager.lock); - /* remove all bo */ + /* remove all bo at this point non are busy any more because unbind + * waited for the last vm fence to signal + */ r = radeon_bo_reserve(rdev->ring_tmp_bo.bo, false); if (!r) { bo_va = radeon_bo_va(rdev->ring_tmp_bo.bo, vm); list_del_init(&bo_va->bo_list); list_del_init(&bo_va->vm_list); + radeon_fence_unref(&bo_va->fence); radeon_bo_unreserve(rdev->ring_tmp_bo.bo); kfree(bo_va); } @@ -969,6 +988,7 @@ void radeon_vm_fini(struct radeon_device *rdev, struct radeon_vm *vm) r = radeon_bo_reserve(bo_va->bo, false); if (!r) { list_del_init(&bo_va->bo_list); + radeon_fence_unref(&bo_va->fence); radeon_bo_unreserve(bo_va->bo); kfree(bo_va); } diff --git a/trunk/drivers/gpu/drm/radeon/radeon_gem.c b/trunk/drivers/gpu/drm/radeon/radeon_gem.c index 84d045245739..1b57b0058ad6 100644 --- a/trunk/drivers/gpu/drm/radeon/radeon_gem.c +++ b/trunk/drivers/gpu/drm/radeon/radeon_gem.c @@ -134,25 +134,16 @@ void radeon_gem_object_close(struct drm_gem_object *obj, struct radeon_device *rdev = rbo->rdev; struct radeon_fpriv *fpriv = file_priv->driver_priv; struct radeon_vm *vm = &fpriv->vm; - struct radeon_bo_va *bo_va, *tmp; if (rdev->family < CHIP_CAYMAN) { return; } if (radeon_bo_reserve(rbo, false)) { + dev_err(rdev->dev, "leaking bo va because we fail to reserve bo\n"); return; } - list_for_each_entry_safe(bo_va, tmp, &rbo->va, bo_list) { - if (bo_va->vm == vm) { - /* remove from this vm address space */ - mutex_lock(&vm->mutex); - list_del(&bo_va->vm_list); - mutex_unlock(&vm->mutex); - list_del(&bo_va->bo_list); - kfree(bo_va); - } - } + radeon_vm_bo_rmv(rdev, vm, rbo); radeon_bo_unreserve(rbo); } diff --git a/trunk/drivers/gpu/drm/radeon/radeon_kms.c b/trunk/drivers/gpu/drm/radeon/radeon_kms.c index 1d73f16b5d97..414b4acf6947 100644 --- a/trunk/drivers/gpu/drm/radeon/radeon_kms.c +++ b/trunk/drivers/gpu/drm/radeon/radeon_kms.c @@ -29,6 +29,7 @@ #include "drm_sarea.h" #include "radeon.h" #include "radeon_drm.h" +#include "radeon_asic.h" #include #include @@ -167,17 +168,39 @@ static void radeon_set_filp_rights(struct drm_device *dev, int radeon_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) { struct radeon_device *rdev = dev->dev_private; - struct drm_radeon_info *info; + struct drm_radeon_info *info = data; struct radeon_mode_info *minfo = &rdev->mode_info; - uint32_t *value_ptr; - uint32_t value; + uint32_t value, *value_ptr; + uint64_t value64, *value_ptr64; struct drm_crtc *crtc; int i, found; - info = data; + /* TIMESTAMP is a 64-bit value, needs special handling. */ + if (info->request == RADEON_INFO_TIMESTAMP) { + if (rdev->family >= CHIP_R600) { + value_ptr64 = (uint64_t*)((unsigned long)info->value); + if (rdev->family >= CHIP_TAHITI) { + value64 = si_get_gpu_clock(rdev); + } else { + value64 = r600_get_gpu_clock(rdev); + } + + if (DRM_COPY_TO_USER(value_ptr64, &value64, sizeof(value64))) { + DRM_ERROR("copy_to_user %s:%u\n", __func__, __LINE__); + return -EFAULT; + } + return 0; + } else { + DRM_DEBUG_KMS("timestamp is r6xx+ only!\n"); + return -EINVAL; + } + } + value_ptr = (uint32_t *)((unsigned long)info->value); - if (DRM_COPY_FROM_USER(&value, value_ptr, sizeof(value))) + if (DRM_COPY_FROM_USER(&value, value_ptr, sizeof(value))) { + DRM_ERROR("copy_from_user %s:%u\n", __func__, __LINE__); return -EFAULT; + } switch (info->request) { case RADEON_INFO_DEVICE_ID: @@ -337,7 +360,7 @@ int radeon_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) return -EINVAL; } if (DRM_COPY_TO_USER(value_ptr, &value, sizeof(uint32_t))) { - DRM_ERROR("copy_to_user\n"); + DRM_ERROR("copy_to_user %s:%u\n", __func__, __LINE__); return -EFAULT; } return 0; diff --git a/trunk/drivers/gpu/drm/radeon/radeon_legacy_crtc.c b/trunk/drivers/gpu/drm/radeon/radeon_legacy_crtc.c index d5fd615897ec..94b4a1c12893 100644 --- a/trunk/drivers/gpu/drm/radeon/radeon_legacy_crtc.c +++ b/trunk/drivers/gpu/drm/radeon/radeon_legacy_crtc.c @@ -1025,9 +1025,11 @@ static int radeon_crtc_mode_set(struct drm_crtc *crtc, static void radeon_crtc_prepare(struct drm_crtc *crtc) { + struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); struct drm_device *dev = crtc->dev; struct drm_crtc *crtci; + radeon_crtc->in_mode_set = true; /* * The hardware wedges sometimes if you reconfigure one CRTC * whilst another is running (see fdo bug #24611). @@ -1038,6 +1040,7 @@ static void radeon_crtc_prepare(struct drm_crtc *crtc) static void radeon_crtc_commit(struct drm_crtc *crtc) { + struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); struct drm_device *dev = crtc->dev; struct drm_crtc *crtci; @@ -1048,6 +1051,7 @@ static void radeon_crtc_commit(struct drm_crtc *crtc) if (crtci->enabled) radeon_crtc_dpms(crtci, DRM_MODE_DPMS_ON); } + radeon_crtc->in_mode_set = false; } static const struct drm_crtc_helper_funcs legacy_helper_funcs = { diff --git a/trunk/drivers/gpu/drm/radeon/radeon_mode.h b/trunk/drivers/gpu/drm/radeon/radeon_mode.h index f380d59c5763..d56978949f34 100644 --- a/trunk/drivers/gpu/drm/radeon/radeon_mode.h +++ b/trunk/drivers/gpu/drm/radeon/radeon_mode.h @@ -275,6 +275,7 @@ struct radeon_crtc { u16 lut_r[256], lut_g[256], lut_b[256]; bool enabled; bool can_tile; + bool in_mode_set; uint32_t crtc_offset; struct drm_gem_object *cursor_bo; uint64_t cursor_addr; diff --git a/trunk/drivers/gpu/drm/radeon/radeon_object.c b/trunk/drivers/gpu/drm/radeon/radeon_object.c index 1f1a4c803c1d..9024e7222839 100644 --- a/trunk/drivers/gpu/drm/radeon/radeon_object.c +++ b/trunk/drivers/gpu/drm/radeon/radeon_object.c @@ -52,11 +52,7 @@ void radeon_bo_clear_va(struct radeon_bo *bo) list_for_each_entry_safe(bo_va, tmp, &bo->va, bo_list) { /* remove from all vm address space */ - mutex_lock(&bo_va->vm->mutex); - list_del(&bo_va->vm_list); - mutex_unlock(&bo_va->vm->mutex); - list_del(&bo_va->bo_list); - kfree(bo_va); + radeon_vm_bo_rmv(bo->rdev, bo_va->vm, bo); } } @@ -136,6 +132,7 @@ int radeon_bo_create(struct radeon_device *rdev, acc_size = ttm_bo_dma_acc_size(&rdev->mman.bdev, size, sizeof(struct radeon_bo)); +retry: bo = kzalloc(sizeof(struct radeon_bo), GFP_KERNEL); if (bo == NULL) return -ENOMEM; @@ -149,8 +146,6 @@ int radeon_bo_create(struct radeon_device *rdev, bo->surface_reg = -1; INIT_LIST_HEAD(&bo->list); INIT_LIST_HEAD(&bo->va); - -retry: radeon_ttm_placement_from_domain(bo, domain); /* Kernel allocation are uninterruptible */ down_read(&rdev->pm.mclk_lock); diff --git a/trunk/drivers/gpu/drm/radeon/radeon_ring.c b/trunk/drivers/gpu/drm/radeon/radeon_ring.c index ec79b3750430..43c431a2686d 100644 --- a/trunk/drivers/gpu/drm/radeon/radeon_ring.c +++ b/trunk/drivers/gpu/drm/radeon/radeon_ring.c @@ -706,6 +706,7 @@ int radeon_ring_init(struct radeon_device *rdev, struct radeon_ring *ring, unsig if (radeon_debugfs_ring_init(rdev, ring)) { DRM_ERROR("Failed to register debugfs file for rings !\n"); } + radeon_ring_lockup_update(ring); return 0; } diff --git a/trunk/drivers/gpu/drm/radeon/reg_srcs/r600 b/trunk/drivers/gpu/drm/radeon/reg_srcs/r600 index 5e659b034d9a..20bfbda7b3f1 100644 --- a/trunk/drivers/gpu/drm/radeon/reg_srcs/r600 +++ b/trunk/drivers/gpu/drm/radeon/reg_srcs/r600 @@ -744,15 +744,6 @@ r600 0x9400 0x00028C38 CB_CLRCMP_DST 0x00028C3C CB_CLRCMP_MSK 0x00028C34 CB_CLRCMP_SRC -0x00028100 CB_COLOR0_MASK -0x00028104 CB_COLOR1_MASK -0x00028108 CB_COLOR2_MASK -0x0002810C CB_COLOR3_MASK -0x00028110 CB_COLOR4_MASK -0x00028114 CB_COLOR5_MASK -0x00028118 CB_COLOR6_MASK -0x0002811C CB_COLOR7_MASK -0x00028808 CB_COLOR_CONTROL 0x0002842C CB_FOG_BLUE 0x00028428 CB_FOG_GREEN 0x00028424 CB_FOG_RED diff --git a/trunk/drivers/gpu/drm/radeon/rv515.c b/trunk/drivers/gpu/drm/radeon/rv515.c index a12fbcc8ccb6..aa8ef491ef3c 100644 --- a/trunk/drivers/gpu/drm/radeon/rv515.c +++ b/trunk/drivers/gpu/drm/radeon/rv515.c @@ -281,12 +281,8 @@ int rv515_debugfs_ga_info_init(struct radeon_device *rdev) void rv515_mc_stop(struct radeon_device *rdev, struct rv515_mc_save *save) { - save->d1vga_control = RREG32(R_000330_D1VGA_CONTROL); - save->d2vga_control = RREG32(R_000338_D2VGA_CONTROL); save->vga_render_control = RREG32(R_000300_VGA_RENDER_CONTROL); save->vga_hdp_control = RREG32(R_000328_VGA_HDP_CONTROL); - save->d1crtc_control = RREG32(R_006080_D1CRTC_CONTROL); - save->d2crtc_control = RREG32(R_006880_D2CRTC_CONTROL); /* Stop all video */ WREG32(R_0068E8_D2CRTC_UPDATE_LOCK, 0); @@ -311,15 +307,6 @@ void rv515_mc_resume(struct radeon_device *rdev, struct rv515_mc_save *save) /* Unlock host access */ WREG32(R_000328_VGA_HDP_CONTROL, save->vga_hdp_control); mdelay(1); - /* Restore video state */ - WREG32(R_000330_D1VGA_CONTROL, save->d1vga_control); - WREG32(R_000338_D2VGA_CONTROL, save->d2vga_control); - WREG32(R_0060E8_D1CRTC_UPDATE_LOCK, 1); - WREG32(R_0068E8_D2CRTC_UPDATE_LOCK, 1); - WREG32(R_006080_D1CRTC_CONTROL, save->d1crtc_control); - WREG32(R_006880_D2CRTC_CONTROL, save->d2crtc_control); - WREG32(R_0060E8_D1CRTC_UPDATE_LOCK, 0); - WREG32(R_0068E8_D2CRTC_UPDATE_LOCK, 0); WREG32(R_000300_VGA_RENDER_CONTROL, save->vga_render_control); } diff --git a/trunk/drivers/gpu/drm/radeon/si.c b/trunk/drivers/gpu/drm/radeon/si.c index c053f8193771..0139e227e3c7 100644 --- a/trunk/drivers/gpu/drm/radeon/si.c +++ b/trunk/drivers/gpu/drm/radeon/si.c @@ -1639,11 +1639,19 @@ static void si_gpu_init(struct radeon_device *rdev) /* XXX what about 12? */ rdev->config.si.tile_config |= (3 << 0); break; - } - if ((mc_arb_ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT) - rdev->config.si.tile_config |= 1 << 4; - else + } + switch ((mc_arb_ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT) { + case 0: /* four banks */ rdev->config.si.tile_config |= 0 << 4; + break; + case 1: /* eight banks */ + rdev->config.si.tile_config |= 1 << 4; + break; + case 2: /* sixteen banks */ + default: + rdev->config.si.tile_config |= 2 << 4; + break; + } rdev->config.si.tile_config |= ((gb_addr_config & PIPE_INTERLEAVE_SIZE_MASK) >> PIPE_INTERLEAVE_SIZE_SHIFT) << 8; rdev->config.si.tile_config |= @@ -3960,3 +3968,22 @@ void si_fini(struct radeon_device *rdev) rdev->bios = NULL; } +/** + * si_get_gpu_clock - return GPU clock counter snapshot + * + * @rdev: radeon_device pointer + * + * Fetches a GPU clock counter snapshot (SI). + * Returns the 64 bit clock counter snapshot. + */ +uint64_t si_get_gpu_clock(struct radeon_device *rdev) +{ + uint64_t clock; + + mutex_lock(&rdev->gpu_clock_mutex); + WREG32(RLC_CAPTURE_GPU_CLOCK_COUNT, 1); + clock = (uint64_t)RREG32(RLC_GPU_CLOCK_COUNT_LSB) | + ((uint64_t)RREG32(RLC_GPU_CLOCK_COUNT_MSB) << 32ULL); + mutex_unlock(&rdev->gpu_clock_mutex); + return clock; +} diff --git a/trunk/drivers/gpu/drm/radeon/sid.h b/trunk/drivers/gpu/drm/radeon/sid.h index 7869089e8761..ef4815c27b1c 100644 --- a/trunk/drivers/gpu/drm/radeon/sid.h +++ b/trunk/drivers/gpu/drm/radeon/sid.h @@ -698,6 +698,9 @@ #define RLC_UCODE_ADDR 0xC32C #define RLC_UCODE_DATA 0xC330 +#define RLC_GPU_CLOCK_COUNT_LSB 0xC338 +#define RLC_GPU_CLOCK_COUNT_MSB 0xC33C +#define RLC_CAPTURE_GPU_CLOCK_COUNT 0xC340 #define RLC_MC_CNTL 0xC344 #define RLC_UCODE_CNTL 0xC348 diff --git a/trunk/drivers/gpu/drm/savage/savage_drv.c b/trunk/drivers/gpu/drm/savage/savage_drv.c index d31d4cca9a4c..c5a164337bd5 100644 --- a/trunk/drivers/gpu/drm/savage/savage_drv.c +++ b/trunk/drivers/gpu/drm/savage/savage_drv.c @@ -43,6 +43,9 @@ static const struct file_operations savage_driver_fops = { .mmap = drm_mmap, .poll = drm_poll, .fasync = drm_fasync, +#ifdef CONFIG_COMPAT + .compat_ioctl = drm_compat_ioctl, +#endif .llseek = noop_llseek, }; diff --git a/trunk/drivers/gpu/drm/sis/sis_drv.c b/trunk/drivers/gpu/drm/sis/sis_drv.c index 7f119870147c..867dc03000e6 100644 --- a/trunk/drivers/gpu/drm/sis/sis_drv.c +++ b/trunk/drivers/gpu/drm/sis/sis_drv.c @@ -74,6 +74,9 @@ static const struct file_operations sis_driver_fops = { .mmap = drm_mmap, .poll = drm_poll, .fasync = drm_fasync, +#ifdef CONFIG_COMPAT + .compat_ioctl = drm_compat_ioctl, +#endif .llseek = noop_llseek, }; diff --git a/trunk/drivers/gpu/drm/tdfx/tdfx_drv.c b/trunk/drivers/gpu/drm/tdfx/tdfx_drv.c index 90f6b13acfac..a7f4d6bd1330 100644 --- a/trunk/drivers/gpu/drm/tdfx/tdfx_drv.c +++ b/trunk/drivers/gpu/drm/tdfx/tdfx_drv.c @@ -49,6 +49,9 @@ static const struct file_operations tdfx_driver_fops = { .mmap = drm_mmap, .poll = drm_poll, .fasync = drm_fasync, +#ifdef CONFIG_COMPAT + .compat_ioctl = drm_compat_ioctl, +#endif .llseek = noop_llseek, }; diff --git a/trunk/drivers/gpu/drm/udl/Kconfig b/trunk/drivers/gpu/drm/udl/Kconfig index 0b5e096d39a6..56e0bf31d425 100644 --- a/trunk/drivers/gpu/drm/udl/Kconfig +++ b/trunk/drivers/gpu/drm/udl/Kconfig @@ -1,6 +1,7 @@ config DRM_UDL tristate "DisplayLink" depends on DRM && EXPERIMENTAL + depends on USB_ARCH_HAS_HCD select DRM_USB select FB_SYS_FILLRECT select FB_SYS_COPYAREA diff --git a/trunk/drivers/gpu/drm/udl/udl_connector.c b/trunk/drivers/gpu/drm/udl/udl_connector.c index ba055e9ca007..8d9dc44f1f94 100644 --- a/trunk/drivers/gpu/drm/udl/udl_connector.c +++ b/trunk/drivers/gpu/drm/udl/udl_connector.c @@ -69,6 +69,13 @@ static int udl_get_modes(struct drm_connector *connector) static int udl_mode_valid(struct drm_connector *connector, struct drm_display_mode *mode) { + struct udl_device *udl = connector->dev->dev_private; + if (!udl->sku_pixel_limit) + return 0; + + if (mode->vdisplay * mode->hdisplay > udl->sku_pixel_limit) + return MODE_VIRTUAL_Y; + return 0; } diff --git a/trunk/drivers/gpu/drm/udl/udl_drv.c b/trunk/drivers/gpu/drm/udl/udl_drv.c index 6e52069894b3..9f84128505bb 100644 --- a/trunk/drivers/gpu/drm/udl/udl_drv.c +++ b/trunk/drivers/gpu/drm/udl/udl_drv.c @@ -66,6 +66,9 @@ static const struct file_operations udl_driver_fops = { .unlocked_ioctl = drm_ioctl, .release = drm_release, .fasync = drm_fasync, +#ifdef CONFIG_COMPAT + .compat_ioctl = drm_compat_ioctl, +#endif .llseek = noop_llseek, }; diff --git a/trunk/drivers/gpu/drm/udl/udl_gem.c b/trunk/drivers/gpu/drm/udl/udl_gem.c index 7bd65bdd15a8..291ecc145585 100644 --- a/trunk/drivers/gpu/drm/udl/udl_gem.c +++ b/trunk/drivers/gpu/drm/udl/udl_gem.c @@ -308,7 +308,7 @@ struct drm_gem_object *udl_gem_prime_import(struct drm_device *dev, /* need to attach */ attach = dma_buf_attach(dma_buf, dev->dev); if (IS_ERR(attach)) - return ERR_PTR(PTR_ERR(attach)); + return ERR_CAST(attach); sg = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL); if (IS_ERR(sg)) { diff --git a/trunk/drivers/gpu/drm/udl/udl_modeset.c b/trunk/drivers/gpu/drm/udl/udl_modeset.c index f5dd89e891de..9159d48d1dfd 100644 --- a/trunk/drivers/gpu/drm/udl/udl_modeset.c +++ b/trunk/drivers/gpu/drm/udl/udl_modeset.c @@ -354,8 +354,7 @@ static int udl_crtc_mode_set(struct drm_crtc *crtc, static void udl_crtc_disable(struct drm_crtc *crtc) { - - + udl_crtc_dpms(crtc, DRM_MODE_DPMS_OFF); } static void udl_crtc_destroy(struct drm_crtc *crtc) diff --git a/trunk/drivers/gpu/drm/via/via_drv.c b/trunk/drivers/gpu/drm/via/via_drv.c index e927b4c052f5..af1b914b17e3 100644 --- a/trunk/drivers/gpu/drm/via/via_drv.c +++ b/trunk/drivers/gpu/drm/via/via_drv.c @@ -65,6 +65,9 @@ static const struct file_operations via_driver_fops = { .mmap = drm_mmap, .poll = drm_poll, .fasync = drm_fasync, +#ifdef CONFIG_COMPAT + .compat_ioctl = drm_compat_ioctl, +#endif .llseek = noop_llseek, }; diff --git a/trunk/drivers/gpu/drm/vmwgfx/Kconfig b/trunk/drivers/gpu/drm/vmwgfx/Kconfig index 794ff67c5701..b71bcd0bfbbf 100644 --- a/trunk/drivers/gpu/drm/vmwgfx/Kconfig +++ b/trunk/drivers/gpu/drm/vmwgfx/Kconfig @@ -12,3 +12,11 @@ config DRM_VMWGFX This is a KMS enabled DRM driver for the VMware SVGA2 virtual hardware. The compiled module will be called "vmwgfx.ko". + +config DRM_VMWGFX_FBCON + depends on DRM_VMWGFX + bool "Enable framebuffer console under vmwgfx by default" + help + Choose this option if you are shipping a new vmwgfx + userspace driver that supports using the kernel driver. + diff --git a/trunk/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/trunk/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c index 4d9edead01ac..ba2c35dbf10e 100644 --- a/trunk/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c +++ b/trunk/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c @@ -182,8 +182,9 @@ static struct pci_device_id vmw_pci_id_list[] = { {0x15ad, 0x0405, PCI_ANY_ID, PCI_ANY_ID, 0, 0, VMWGFX_CHIP_SVGAII}, {0, 0, 0} }; +MODULE_DEVICE_TABLE(pci, vmw_pci_id_list); -static int enable_fbdev; +static int enable_fbdev = IS_ENABLED(CONFIG_DRM_VMWGFX_FBCON); static int vmw_probe(struct pci_dev *, const struct pci_device_id *); static void vmw_master_init(struct vmw_master *); @@ -1154,6 +1155,11 @@ static struct drm_driver driver = { .open = vmw_driver_open, .preclose = vmw_preclose, .postclose = vmw_postclose, + + .dumb_create = vmw_dumb_create, + .dumb_map_offset = vmw_dumb_map_offset, + .dumb_destroy = vmw_dumb_destroy, + .fops = &vmwgfx_driver_fops, .name = VMWGFX_DRIVER_NAME, .desc = VMWGFX_DRIVER_DESC, diff --git a/trunk/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/trunk/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h index d0f2c079ee27..29c984ff7f23 100644 --- a/trunk/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h +++ b/trunk/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h @@ -645,6 +645,16 @@ int vmw_kms_readback(struct vmw_private *dev_priv, int vmw_kms_update_layout_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); +int vmw_dumb_create(struct drm_file *file_priv, + struct drm_device *dev, + struct drm_mode_create_dumb *args); + +int vmw_dumb_map_offset(struct drm_file *file_priv, + struct drm_device *dev, uint32_t handle, + uint64_t *offset); +int vmw_dumb_destroy(struct drm_file *file_priv, + struct drm_device *dev, + uint32_t handle); /** * Overlay control - vmwgfx_overlay.c */ diff --git a/trunk/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c b/trunk/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c index f2fb8f15e2f1..7e0743358dff 100644 --- a/trunk/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c +++ b/trunk/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c @@ -1018,7 +1018,7 @@ int vmw_event_fence_action_create(struct drm_file *file_priv, } - event = kzalloc(sizeof(event->event), GFP_KERNEL); + event = kzalloc(sizeof(*event), GFP_KERNEL); if (unlikely(event == NULL)) { DRM_ERROR("Failed to allocate an event.\n"); ret = -ENOMEM; diff --git a/trunk/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/trunk/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c index 6b0078ffa763..c50724bd30f6 100644 --- a/trunk/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c +++ b/trunk/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c @@ -1688,15 +1688,19 @@ int vmw_du_page_flip(struct drm_crtc *crtc, struct vmw_private *dev_priv = vmw_priv(crtc->dev); struct drm_framebuffer *old_fb = crtc->fb; struct vmw_framebuffer *vfb = vmw_framebuffer_to_vfb(fb); - struct drm_file *file_priv = event->base.file_priv; + struct drm_file *file_priv ; struct vmw_fence_obj *fence = NULL; struct drm_clip_rect clips; int ret; + if (event == NULL) + return -EINVAL; + /* require ScreenObject support for page flipping */ if (!dev_priv->sou_priv) return -ENOSYS; + file_priv = event->base.file_priv; if (!vmw_kms_screen_object_flippable(dev_priv, crtc)) return -EINVAL; diff --git a/trunk/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c b/trunk/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c index 22bf9a21ec71..2c6ffe0e2c07 100644 --- a/trunk/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c +++ b/trunk/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c @@ -1917,3 +1917,76 @@ int vmw_user_stream_lookup(struct vmw_private *dev_priv, vmw_resource_unreference(&res); return ret; } + + +int vmw_dumb_create(struct drm_file *file_priv, + struct drm_device *dev, + struct drm_mode_create_dumb *args) +{ + struct vmw_private *dev_priv = vmw_priv(dev); + struct vmw_master *vmaster = vmw_master(file_priv->master); + struct vmw_user_dma_buffer *vmw_user_bo; + struct ttm_buffer_object *tmp; + int ret; + + args->pitch = args->width * ((args->bpp + 7) / 8); + args->size = args->pitch * args->height; + + vmw_user_bo = kzalloc(sizeof(*vmw_user_bo), GFP_KERNEL); + if (vmw_user_bo == NULL) + return -ENOMEM; + + ret = ttm_read_lock(&vmaster->lock, true); + if (ret != 0) { + kfree(vmw_user_bo); + return ret; + } + + ret = vmw_dmabuf_init(dev_priv, &vmw_user_bo->dma, args->size, + &vmw_vram_sys_placement, true, + &vmw_user_dmabuf_destroy); + if (ret != 0) + goto out_no_dmabuf; + + tmp = ttm_bo_reference(&vmw_user_bo->dma.base); + ret = ttm_base_object_init(vmw_fpriv(file_priv)->tfile, + &vmw_user_bo->base, + false, + ttm_buffer_type, + &vmw_user_dmabuf_release, NULL); + if (unlikely(ret != 0)) + goto out_no_base_object; + + args->handle = vmw_user_bo->base.hash.key; + +out_no_base_object: + ttm_bo_unref(&tmp); +out_no_dmabuf: + ttm_read_unlock(&vmaster->lock); + return ret; +} + +int vmw_dumb_map_offset(struct drm_file *file_priv, + struct drm_device *dev, uint32_t handle, + uint64_t *offset) +{ + struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile; + struct vmw_dma_buffer *out_buf; + int ret; + + ret = vmw_user_dmabuf_lookup(tfile, handle, &out_buf); + if (ret != 0) + return -EINVAL; + + *offset = out_buf->base.addr_space_offset; + vmw_dmabuf_unreference(&out_buf); + return 0; +} + +int vmw_dumb_destroy(struct drm_file *file_priv, + struct drm_device *dev, + uint32_t handle) +{ + return ttm_ref_object_base_unref(vmw_fpriv(file_priv)->tfile, + handle, TTM_REF_USAGE); +} diff --git a/trunk/drivers/gpu/vga/vga_switcheroo.c b/trunk/drivers/gpu/vga/vga_switcheroo.c index 5b3c7d135dc9..e25cf31faab2 100644 --- a/trunk/drivers/gpu/vga/vga_switcheroo.c +++ b/trunk/drivers/gpu/vga/vga_switcheroo.c @@ -70,27 +70,12 @@ static struct vgasr_priv vgasr_priv = { .clients = LIST_HEAD_INIT(vgasr_priv.clients), }; -int vga_switcheroo_register_handler(struct vga_switcheroo_handler *handler) -{ - mutex_lock(&vgasr_mutex); - if (vgasr_priv.handler) { - mutex_unlock(&vgasr_mutex); - return -EINVAL; - } - - vgasr_priv.handler = handler; - mutex_unlock(&vgasr_mutex); - return 0; -} -EXPORT_SYMBOL(vga_switcheroo_register_handler); - -void vga_switcheroo_unregister_handler(void) +static bool vga_switcheroo_ready(void) { - mutex_lock(&vgasr_mutex); - vgasr_priv.handler = NULL; - mutex_unlock(&vgasr_mutex); + /* we're ready if we get two clients + handler */ + return !vgasr_priv.active && + vgasr_priv.registered_clients == 2 && vgasr_priv.handler; } -EXPORT_SYMBOL(vga_switcheroo_unregister_handler); static void vga_switcheroo_enable(void) { @@ -98,7 +83,8 @@ static void vga_switcheroo_enable(void) struct vga_switcheroo_client *client; /* call the handler to init */ - vgasr_priv.handler->init(); + if (vgasr_priv.handler->init) + vgasr_priv.handler->init(); list_for_each_entry(client, &vgasr_priv.clients, list) { if (client->id != -1) @@ -113,6 +99,37 @@ static void vga_switcheroo_enable(void) vgasr_priv.active = true; } +int vga_switcheroo_register_handler(struct vga_switcheroo_handler *handler) +{ + mutex_lock(&vgasr_mutex); + if (vgasr_priv.handler) { + mutex_unlock(&vgasr_mutex); + return -EINVAL; + } + + vgasr_priv.handler = handler; + if (vga_switcheroo_ready()) { + printk(KERN_INFO "vga_switcheroo: enabled\n"); + vga_switcheroo_enable(); + } + mutex_unlock(&vgasr_mutex); + return 0; +} +EXPORT_SYMBOL(vga_switcheroo_register_handler); + +void vga_switcheroo_unregister_handler(void) +{ + mutex_lock(&vgasr_mutex); + vgasr_priv.handler = NULL; + if (vgasr_priv.active) { + pr_info("vga_switcheroo: disabled\n"); + vga_switcheroo_debugfs_fini(&vgasr_priv); + vgasr_priv.active = false; + } + mutex_unlock(&vgasr_mutex); +} +EXPORT_SYMBOL(vga_switcheroo_unregister_handler); + static int register_client(struct pci_dev *pdev, const struct vga_switcheroo_client_ops *ops, int id, bool active) @@ -134,9 +151,7 @@ static int register_client(struct pci_dev *pdev, if (client_is_vga(client)) vgasr_priv.registered_clients++; - /* if we get two clients + handler */ - if (!vgasr_priv.active && - vgasr_priv.registered_clients == 2 && vgasr_priv.handler) { + if (vga_switcheroo_ready()) { printk(KERN_INFO "vga_switcheroo: enabled\n"); vga_switcheroo_enable(); } diff --git a/trunk/drivers/hid/Kconfig b/trunk/drivers/hid/Kconfig index fbf49503508d..2af774ad1060 100644 --- a/trunk/drivers/hid/Kconfig +++ b/trunk/drivers/hid/Kconfig @@ -307,7 +307,6 @@ config HID_LOGITECH config HID_LOGITECH_DJ tristate "Logitech Unifying receivers full support" depends on HID_LOGITECH - default m ---help--- Say Y if you want support for Logitech Unifying receivers and devices. Unifying receivers are capable of pairing up to 6 Logitech compliant @@ -527,6 +526,14 @@ config HID_PICOLCD_LEDS ---help--- Provide access to PicoLCD's GPO pins via leds class. +config HID_PICOLCD_CIR + bool "CIR via RC class" if EXPERT + default !EXPERT + depends on HID_PICOLCD + depends on HID_PICOLCD=RC_CORE || RC_CORE=y + ---help--- + Provide access to PicoLCD's CIR interface via remote control (LIRC). + config HID_PRIMAX tristate "Primax non-fully HID-compliant devices" depends on USB_HID @@ -534,6 +541,15 @@ config HID_PRIMAX Support for Primax devices that are not fully compliant with the HID standard. +config HID_PS3REMOTE + tristate "Sony PS3 BD Remote Control" + depends on BT_HIDP + ---help--- + Support for the Sony PS3 Blue-ray Disk Remote Control and Logitech + Harmony Adapter for PS3, which connect over Bluetooth. + + Support for the 6-axis controllers is provided by HID_SONY. + config HID_ROCCAT tristate "Roccat device support" depends on USB_HID @@ -561,7 +577,9 @@ config HID_SONY tristate "Sony PS3 controller" depends on USB_HID ---help--- - Support for Sony PS3 controller. + Support for Sony PS3 6-axis controllers. + + Support for the Sony PS3 BD Remote is provided by HID_PS3REMOTE. config HID_SPEEDLINK tristate "Speedlink VAD Cezanne mouse support" diff --git a/trunk/drivers/hid/Makefile b/trunk/drivers/hid/Makefile index f975485f88b2..5a3690ff9bf2 100644 --- a/trunk/drivers/hid/Makefile +++ b/trunk/drivers/hid/Makefile @@ -69,7 +69,28 @@ obj-$(CONFIG_HID_PRODIKEYS) += hid-prodikeys.o obj-$(CONFIG_HID_PANTHERLORD) += hid-pl.o obj-$(CONFIG_HID_PETALYNX) += hid-petalynx.o obj-$(CONFIG_HID_PICOLCD) += hid-picolcd.o +hid-picolcd-y += hid-picolcd_core.o +ifdef CONFIG_HID_PICOLCD_FB +hid-picolcd-y += hid-picolcd_fb.o +endif +ifdef CONFIG_HID_PICOLCD_BACKLIGHT +hid-picolcd-y += hid-picolcd_backlight.o +endif +ifdef CONFIG_HID_PICOLCD_LCD +hid-picolcd-y += hid-picolcd_lcd.o +endif +ifdef CONFIG_HID_PICOLCD_LEDS +hid-picolcd-y += hid-picolcd_leds.o +endif +ifdef CONFIG_HID_PICOLCD_CIR +hid-picolcd-y += hid-picolcd_cir.o +endif +ifdef CONFIG_DEBUG_FS +hid-picolcd-y += hid-picolcd_debugfs.o +endif + obj-$(CONFIG_HID_PRIMAX) += hid-primax.o +obj-$(CONFIG_HID_PS3REMOTE) += hid-ps3remote.o obj-$(CONFIG_HID_ROCCAT) += hid-roccat.o hid-roccat-common.o \ hid-roccat-arvo.o hid-roccat-isku.o hid-roccat-kone.o \ hid-roccat-koneplus.o hid-roccat-kovaplus.o hid-roccat-pyra.o \ diff --git a/trunk/drivers/hid/hid-a4tech.c b/trunk/drivers/hid/hid-a4tech.c index 902d1dfeb1b5..0a239885e67c 100644 --- a/trunk/drivers/hid/hid-a4tech.c +++ b/trunk/drivers/hid/hid-a4tech.c @@ -5,7 +5,6 @@ * Copyright (c) 2000-2005 Vojtech Pavlik * Copyright (c) 2005 Michael Haboustak for Concept2, Inc * Copyright (c) 2006-2007 Jiri Kosina - * Copyright (c) 2007 Paul Walmsley * Copyright (c) 2008 Jiri Slaby */ diff --git a/trunk/drivers/hid/hid-apple.c b/trunk/drivers/hid/hid-apple.c index 585344b6d338..06ebdbb6ea02 100644 --- a/trunk/drivers/hid/hid-apple.c +++ b/trunk/drivers/hid/hid-apple.c @@ -5,7 +5,6 @@ * Copyright (c) 2000-2005 Vojtech Pavlik * Copyright (c) 2005 Michael Haboustak for Concept2, Inc * Copyright (c) 2006-2007 Jiri Kosina - * Copyright (c) 2007 Paul Walmsley * Copyright (c) 2008 Jiri Slaby */ diff --git a/trunk/drivers/hid/hid-aureal.c b/trunk/drivers/hid/hid-aureal.c index ba64b041b8bf..7968187ddf7b 100644 --- a/trunk/drivers/hid/hid-aureal.c +++ b/trunk/drivers/hid/hid-aureal.c @@ -9,7 +9,6 @@ * Copyright (c) 2000-2005 Vojtech Pavlik * Copyright (c) 2005 Michael Haboustak for Concept2, Inc * Copyright (c) 2006-2007 Jiri Kosina - * Copyright (c) 2007 Paul Walmsley * Copyright (c) 2008 Jiri Slaby */ #include diff --git a/trunk/drivers/hid/hid-belkin.c b/trunk/drivers/hid/hid-belkin.c index a1a765a5b08a..a1a5a12c3a6b 100644 --- a/trunk/drivers/hid/hid-belkin.c +++ b/trunk/drivers/hid/hid-belkin.c @@ -5,7 +5,6 @@ * Copyright (c) 2000-2005 Vojtech Pavlik * Copyright (c) 2005 Michael Haboustak for Concept2, Inc * Copyright (c) 2006-2007 Jiri Kosina - * Copyright (c) 2007 Paul Walmsley * Copyright (c) 2008 Jiri Slaby */ diff --git a/trunk/drivers/hid/hid-cherry.c b/trunk/drivers/hid/hid-cherry.c index 888ece68a47c..af034d3d9256 100644 --- a/trunk/drivers/hid/hid-cherry.c +++ b/trunk/drivers/hid/hid-cherry.c @@ -5,7 +5,6 @@ * Copyright (c) 2000-2005 Vojtech Pavlik * Copyright (c) 2005 Michael Haboustak for Concept2, Inc * Copyright (c) 2006-2007 Jiri Kosina - * Copyright (c) 2007 Paul Walmsley * Copyright (c) 2008 Jiri Slaby */ diff --git a/trunk/drivers/hid/hid-core.c b/trunk/drivers/hid/hid-core.c index 60ea284407ce..2cd6880b6b17 100644 --- a/trunk/drivers/hid/hid-core.c +++ b/trunk/drivers/hid/hid-core.c @@ -126,7 +126,7 @@ static int open_collection(struct hid_parser *parser, unsigned type) if (parser->collection_stack_ptr == HID_COLLECTION_STACK_SIZE) { hid_err(parser->device, "collection stack overflow\n"); - return -1; + return -EINVAL; } if (parser->device->maxcollection == parser->device->collection_size) { @@ -134,7 +134,7 @@ static int open_collection(struct hid_parser *parser, unsigned type) parser->device->collection_size * 2, GFP_KERNEL); if (collection == NULL) { hid_err(parser->device, "failed to reallocate collection array\n"); - return -1; + return -ENOMEM; } memcpy(collection, parser->device->collection, sizeof(struct hid_collection) * @@ -170,7 +170,7 @@ static int close_collection(struct hid_parser *parser) { if (!parser->collection_stack_ptr) { hid_err(parser->device, "collection stack underflow\n"); - return -1; + return -EINVAL; } parser->collection_stack_ptr--; return 0; @@ -374,7 +374,7 @@ static int hid_parser_global(struct hid_parser *parser, struct hid_item *item) case HID_GLOBAL_ITEM_TAG_REPORT_SIZE: parser->global.report_size = item_udata(item); - if (parser->global.report_size > 96) { + if (parser->global.report_size > 128) { hid_err(parser->device, "invalid report_size %d\n", parser->global.report_size); return -1; @@ -757,6 +757,7 @@ int hid_open_report(struct hid_device *device) struct hid_item item; unsigned int size; __u8 *start; + __u8 *buf; __u8 *end; int ret; static int (*dispatch_type[])(struct hid_parser *parser, @@ -775,12 +776,21 @@ int hid_open_report(struct hid_device *device) return -ENODEV; size = device->dev_rsize; + buf = kmemdup(start, size, GFP_KERNEL); + if (buf == NULL) + return -ENOMEM; + if (device->driver->report_fixup) - start = device->driver->report_fixup(device, start, &size); + start = device->driver->report_fixup(device, buf, &size); + else + start = buf; - device->rdesc = kmemdup(start, size, GFP_KERNEL); - if (device->rdesc == NULL) + start = kmemdup(start, size, GFP_KERNEL); + kfree(buf); + if (start == NULL) return -ENOMEM; + + device->rdesc = start; device->rsize = size; parser = vzalloc(sizeof(struct hid_parser)); @@ -996,7 +1006,8 @@ static void hid_process_event(struct hid_device *hid, struct hid_field *field, struct hid_driver *hdrv = hid->driver; int ret; - hid_dump_input(hid, usage, value); + if (!list_empty(&hid->debug_list)) + hid_dump_input(hid, usage, value); if (hdrv && hdrv->event && hid_match_usage(hid, usage)) { ret = hdrv->event(hid, field, usage, value); @@ -1447,7 +1458,14 @@ void hid_disconnect(struct hid_device *hdev) } EXPORT_SYMBOL_GPL(hid_disconnect); -/* a list of devices for which there is a specialized driver on HID bus */ +/* + * A list of devices for which there is a specialized driver on HID bus. + * + * Please note that for multitouch devices (driven by hid-multitouch driver), + * there is a proper autodetection and autoloading in place (based on presence + * of HID_DG_CONTACTID), so those devices don't need to be added to this list, + * as we are doing the right thing in hid_scan_usage(). + */ static const struct hid_device_id hid_have_special_driver[] = { { HID_USB_DEVICE(USB_VENDOR_ID_A4TECH, USB_DEVICE_ID_A4TECH_WCP32PU) }, { HID_USB_DEVICE(USB_VENDOR_ID_A4TECH, USB_DEVICE_ID_A4TECH_X5_005D) }, @@ -1558,11 +1576,14 @@ static const struct hid_device_id hid_have_special_driver[] = { { HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_EASYPEN_M610X) }, { HID_USB_DEVICE(USB_VENDOR_ID_LABTEC, USB_DEVICE_ID_LABTEC_WIRELESS_KEYBOARD) }, { HID_USB_DEVICE(USB_VENDOR_ID_LCPOWER, USB_DEVICE_ID_LCPOWER_LC1000 ) }, - { HID_USB_DEVICE(USB_VENDOR_ID_LENOVO, USB_DEVICE_ID_LENOVO_TPKBD) }, +#if IS_ENABLED(CONFIG_HID_LENOVO_TPKBD) + { HID_USB_DEVICE(USB_VENDOR_ID_LENOVO, USB_DEVICE_ID_LENOVO_TPKBD) }, +#endif { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_MX3000_RECEIVER) }, { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_S510_RECEIVER) }, { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_S510_RECEIVER_2) }, { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_RECEIVER) }, + { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_HARMONY_PS3) }, { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_DINOVO_DESKTOP) }, { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_DINOVO_EDGE) }, { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_DINOVO_MINI) }, @@ -1624,7 +1645,7 @@ static const struct hid_device_id hid_have_special_driver[] = { { HID_USB_DEVICE(USB_VENDOR_ID_ORTEK, USB_DEVICE_ID_ORTEK_WKB2000) }, { HID_USB_DEVICE(USB_VENDOR_ID_PETALYNX, USB_DEVICE_ID_PETALYNX_MAXTER_REMOTE) }, { HID_USB_DEVICE(USB_VENDOR_ID_PRIMAX, USB_DEVICE_ID_PRIMAX_KEYBOARD) }, - { HID_USB_DEVICE(USB_VENDOR_ID_QUANTA, USB_DEVICE_ID_PIXART_IMAGING_INC_OPTICAL_TOUCH_SCREEN) }, +#if IS_ENABLED(CONFIG_HID_ROCCAT) { HID_USB_DEVICE(USB_VENDOR_ID_ROCCAT, USB_DEVICE_ID_ROCCAT_KONE) }, { HID_USB_DEVICE(USB_VENDOR_ID_ROCCAT, USB_DEVICE_ID_ROCCAT_ARVO) }, { HID_USB_DEVICE(USB_VENDOR_ID_ROCCAT, USB_DEVICE_ID_ROCCAT_ISKU) }, @@ -1633,10 +1654,12 @@ static const struct hid_device_id hid_have_special_driver[] = { { HID_USB_DEVICE(USB_VENDOR_ID_ROCCAT, USB_DEVICE_ID_ROCCAT_PYRA_WIRED) }, { HID_USB_DEVICE(USB_VENDOR_ID_ROCCAT, USB_DEVICE_ID_ROCCAT_PYRA_WIRELESS) }, { HID_USB_DEVICE(USB_VENDOR_ID_ROCCAT, USB_DEVICE_ID_ROCCAT_SAVU) }, +#endif { HID_USB_DEVICE(USB_VENDOR_ID_SAITEK, USB_DEVICE_ID_SAITEK_PS1000) }, { HID_USB_DEVICE(USB_VENDOR_ID_SAMSUNG, USB_DEVICE_ID_SAMSUNG_IR_REMOTE) }, { HID_USB_DEVICE(USB_VENDOR_ID_SAMSUNG, USB_DEVICE_ID_SAMSUNG_WIRELESS_KBD_MOUSE) }, { HID_USB_DEVICE(USB_VENDOR_ID_SKYCABLE, USB_DEVICE_ID_SKYCABLE_WIRELESS_PRESENTER) }, + { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_PS3_BDREMOTE) }, { HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_PS3_CONTROLLER) }, { HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_NAVIGATION_CONTROLLER) }, { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_PS3_CONTROLLER) }, @@ -1661,6 +1684,7 @@ static const struct hid_device_id hid_have_special_driver[] = { { HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UCLOGIC_TABLET_WP8060U) }, { HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UCLOGIC_TABLET_WP1062) }, { HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UCLOGIC_WIRELESS_TABLET_TWHL850) }, + { HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UCLOGIC_TABLET_TWHA60) }, { HID_USB_DEVICE(USB_VENDOR_ID_WISEGROUP, USB_DEVICE_ID_SMARTJOY_PLUS) }, { HID_USB_DEVICE(USB_VENDOR_ID_WISEGROUP, USB_DEVICE_ID_SUPER_JOY_BOX_3) }, { HID_USB_DEVICE(USB_VENDOR_ID_WISEGROUP, USB_DEVICE_ID_DUAL_USB_JOYPAD) }, diff --git a/trunk/drivers/hid/hid-cypress.c b/trunk/drivers/hid/hid-cypress.c index 9e43aaca9774..3e159a50dac7 100644 --- a/trunk/drivers/hid/hid-cypress.c +++ b/trunk/drivers/hid/hid-cypress.c @@ -5,7 +5,6 @@ * Copyright (c) 2000-2005 Vojtech Pavlik * Copyright (c) 2005 Michael Haboustak for Concept2, Inc * Copyright (c) 2006-2007 Jiri Kosina - * Copyright (c) 2007 Paul Walmsley * Copyright (c) 2008 Jiri Slaby */ diff --git a/trunk/drivers/hid/hid-debug.c b/trunk/drivers/hid/hid-debug.c index 01dd9a7daf7a..933fff0fff1f 100644 --- a/trunk/drivers/hid/hid-debug.c +++ b/trunk/drivers/hid/hid-debug.c @@ -911,15 +911,21 @@ static void hid_dump_input_mapping(struct hid_device *hid, struct seq_file *f) } - static int hid_debug_rdesc_show(struct seq_file *f, void *p) { struct hid_device *hdev = f->private; + const __u8 *rdesc = hdev->rdesc; + unsigned rsize = hdev->rsize; int i; + if (!rdesc) { + rdesc = hdev->dev_rdesc; + rsize = hdev->dev_rsize; + } + /* dump HID report descriptor */ - for (i = 0; i < hdev->rsize; i++) - seq_printf(f, "%02x ", hdev->rdesc[i]); + for (i = 0; i < rsize; i++) + seq_printf(f, "%02x ", rdesc[i]); seq_printf(f, "\n\n"); /* dump parsed data and input mappings */ diff --git a/trunk/drivers/hid/hid-ezkey.c b/trunk/drivers/hid/hid-ezkey.c index ca1163e9d42d..6540af2871a7 100644 --- a/trunk/drivers/hid/hid-ezkey.c +++ b/trunk/drivers/hid/hid-ezkey.c @@ -5,7 +5,6 @@ * Copyright (c) 2000-2005 Vojtech Pavlik * Copyright (c) 2005 Michael Haboustak for Concept2, Inc * Copyright (c) 2006-2007 Jiri Kosina - * Copyright (c) 2007 Paul Walmsley * Copyright (c) 2008 Jiri Slaby */ diff --git a/trunk/drivers/hid/hid-gyration.c b/trunk/drivers/hid/hid-gyration.c index e88b951cd10d..4442c30ef531 100644 --- a/trunk/drivers/hid/hid-gyration.c +++ b/trunk/drivers/hid/hid-gyration.c @@ -4,7 +4,6 @@ * Copyright (c) 1999 Andreas Gal * Copyright (c) 2000-2005 Vojtech Pavlik * Copyright (c) 2005 Michael Haboustak for Concept2, Inc - * Copyright (c) 2007 Paul Walmsley * Copyright (c) 2008 Jiri Slaby * Copyright (c) 2006-2008 Jiri Kosina */ diff --git a/trunk/drivers/hid/hid-holtekff.c b/trunk/drivers/hid/hid-holtekff.c index 4e7542151e22..ff295e60059b 100644 --- a/trunk/drivers/hid/hid-holtekff.c +++ b/trunk/drivers/hid/hid-holtekff.c @@ -100,8 +100,7 @@ static void holtekff_send(struct holtekff_device *holtekff, holtekff->field->value[i] = data[i]; } - dbg_hid("sending %02x %02x %02x %02x %02x %02x %02x\n", data[0], - data[1], data[2], data[3], data[4], data[5], data[6]); + dbg_hid("sending %*ph\n", 7, data); usbhid_submit_report(hid, holtekff->field->report, USB_DIR_OUT); } diff --git a/trunk/drivers/hid/hid-ids.h b/trunk/drivers/hid/hid-ids.h index 1dcb76ff51e3..ca4d83e6e387 100644 --- a/trunk/drivers/hid/hid-ids.h +++ b/trunk/drivers/hid/hid-ids.h @@ -5,7 +5,6 @@ * Copyright (c) 2000-2005 Vojtech Pavlik * Copyright (c) 2005 Michael Haboustak for Concept2, Inc * Copyright (c) 2006-2007 Jiri Kosina - * Copyright (c) 2007 Paul Walmsley */ /* @@ -269,7 +268,11 @@ #define USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_72FA 0x72fa #define USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_7302 0x7302 #define USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_7349 0x7349 +#define USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_73F7 0x73f7 #define USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_A001 0xa001 +#define USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_7224 0x7224 +#define USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_72D0 0x72d0 +#define USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_72C4 0x72c4 #define USB_VENDOR_ID_ELECOM 0x056e #define USB_DEVICE_ID_ELECOM_BM084 0x0061 @@ -283,6 +286,9 @@ #define USB_VENDOR_ID_EMS 0x2006 #define USB_DEVICE_ID_EMS_TRIO_LINKER_PLUS_II 0x0118 +#define USB_VENDOR_ID_FLATFROG 0x25b5 +#define USB_DEVICE_ID_MULTITOUCH_3200 0x0002 + #define USB_VENDOR_ID_ESSENTIAL_REALITY 0x0d7f #define USB_DEVICE_ID_ESSENTIAL_REALITY_P5 0x0100 @@ -296,6 +302,9 @@ #define USB_VENDOR_ID_EZKEY 0x0518 #define USB_DEVICE_ID_BTC_8193 0x0002 +#define USB_VENDOR_ID_FREESCALE 0x15A2 +#define USB_DEVICE_ID_FREESCALE_MX28 0x004F + #define USB_VENDOR_ID_FRUCTEL 0x25B6 #define USB_DEVICE_ID_GAMETEL_MT_MODE 0x0002 @@ -305,6 +314,7 @@ #define USB_VENDOR_ID_GENERAL_TOUCH 0x0dfc #define USB_DEVICE_ID_GENERAL_TOUCH_WIN7_TWOFINGERS 0x0003 +#define USB_DEVICE_ID_GENERAL_TOUCH_WIN8_PWT_TENFINGERS 0x0100 #define USB_VENDOR_ID_GLAB 0x06c2 #define USB_DEVICE_ID_4_PHIDGETSERVO_30 0x0038 @@ -496,6 +506,7 @@ #define USB_DEVICE_ID_LOGITECH_RECEIVER 0xc101 #define USB_DEVICE_ID_LOGITECH_HARMONY_FIRST 0xc110 #define USB_DEVICE_ID_LOGITECH_HARMONY_LAST 0xc14f +#define USB_DEVICE_ID_LOGITECH_HARMONY_PS3 0x0306 #define USB_DEVICE_ID_LOGITECH_RUMBLEPAD_CORD 0xc20a #define USB_DEVICE_ID_LOGITECH_RUMBLEPAD 0xc211 #define USB_DEVICE_ID_LOGITECH_EXTREME_3D 0xc215 @@ -652,7 +663,6 @@ #define USB_DEVICE_ID_QUANTA_OPTICAL_TOUCH 0x3000 #define USB_DEVICE_ID_QUANTA_OPTICAL_TOUCH_3001 0x3001 #define USB_DEVICE_ID_QUANTA_OPTICAL_TOUCH_3008 0x3008 -#define USB_DEVICE_ID_PIXART_IMAGING_INC_OPTICAL_TOUCH_SCREEN 0x3001 #define USB_VENDOR_ID_ROCCAT 0x1e7d #define USB_DEVICE_ID_ROCCAT_ARVO 0x30d4 @@ -683,6 +693,7 @@ #define USB_VENDOR_ID_SONY 0x054c #define USB_DEVICE_ID_SONY_VAIO_VGX_MOUSE 0x024b +#define USB_DEVICE_ID_SONY_PS3_BDREMOTE 0x0306 #define USB_DEVICE_ID_SONY_PS3_CONTROLLER 0x0268 #define USB_DEVICE_ID_SONY_NAVIGATION_CONTROLLER 0x042f @@ -758,6 +769,7 @@ #define USB_DEVICE_ID_UCLOGIC_TABLET_WP8060U 0x0005 #define USB_DEVICE_ID_UCLOGIC_TABLET_WP1062 0x0064 #define USB_DEVICE_ID_UCLOGIC_WIRELESS_TABLET_TWHL850 0x0522 +#define USB_DEVICE_ID_UCLOGIC_TABLET_TWHA60 0x0781 #define USB_VENDOR_ID_UNITEC 0x227d #define USB_DEVICE_ID_UNITEC_USB_TOUCH_0709 0x0709 diff --git a/trunk/drivers/hid/hid-input.c b/trunk/drivers/hid/hid-input.c index 811bfad64609..d917c0d53685 100644 --- a/trunk/drivers/hid/hid-input.c +++ b/trunk/drivers/hid/hid-input.c @@ -1154,6 +1154,7 @@ static void report_features(struct hid_device *hid) int hidinput_connect(struct hid_device *hid, unsigned int force) { + struct hid_driver *drv = hid->driver; struct hid_report *report; struct hid_input *hidinput = NULL; struct input_dev *input_dev; @@ -1228,6 +1229,8 @@ int hidinput_connect(struct hid_device *hid, unsigned int force) * UGCI) cram a lot of unrelated inputs into the * same interface. */ hidinput->report = report; + if (drv->input_configured) + drv->input_configured(hid, hidinput); if (input_register_device(hidinput->input)) goto out_cleanup; hidinput = NULL; @@ -1235,8 +1238,12 @@ int hidinput_connect(struct hid_device *hid, unsigned int force) } } - if (hidinput && input_register_device(hidinput->input)) - goto out_cleanup; + if (hidinput) { + if (drv->input_configured) + drv->input_configured(hid, hidinput); + if (input_register_device(hidinput->input)) + goto out_cleanup; + } return 0; diff --git a/trunk/drivers/hid/hid-lcpower.c b/trunk/drivers/hid/hid-lcpower.c index c4fe9bd095b7..22bc14abdfa3 100644 --- a/trunk/drivers/hid/hid-lcpower.c +++ b/trunk/drivers/hid/hid-lcpower.c @@ -24,7 +24,7 @@ static int ts_input_mapping(struct hid_device *hdev, struct hid_input *hi, struct hid_field *field, struct hid_usage *usage, unsigned long **bit, int *max) { - if ((usage->hid & HID_USAGE_PAGE) != 0x0ffbc0000) + if ((usage->hid & HID_USAGE_PAGE) != HID_UP_LOGIVENDOR) return 0; switch (usage->hid & HID_USAGE) { diff --git a/trunk/drivers/hid/hid-lenovo-tpkbd.c b/trunk/drivers/hid/hid-lenovo-tpkbd.c index 77d2df04c97b..cea016e94f43 100644 --- a/trunk/drivers/hid/hid-lenovo-tpkbd.c +++ b/trunk/drivers/hid/hid-lenovo-tpkbd.c @@ -56,9 +56,8 @@ static int tpkbd_input_mapping(struct hid_device *hdev, static int tpkbd_features_set(struct hid_device *hdev) { struct hid_report *report; - struct tpkbd_data_pointer *data_pointer; + struct tpkbd_data_pointer *data_pointer = hid_get_drvdata(hdev); - data_pointer = (struct tpkbd_data_pointer *) hid_get_drvdata(hdev); report = hdev->report_enum[HID_FEATURE_REPORT].report_id_hash[4]; report->field[0]->value[0] = data_pointer->press_to_select ? 0x01 : 0x02; @@ -77,14 +76,8 @@ static ssize_t pointer_press_to_select_show(struct device *dev, struct device_attribute *attr, char *buf) { - struct hid_device *hdev; - struct tpkbd_data_pointer *data_pointer; - - hdev = container_of(dev, struct hid_device, dev); - if (hdev == NULL) - return -ENODEV; - - data_pointer = (struct tpkbd_data_pointer *) hid_get_drvdata(hdev); + struct hid_device *hdev = container_of(dev, struct hid_device, dev); + struct tpkbd_data_pointer *data_pointer = hid_get_drvdata(hdev); return snprintf(buf, PAGE_SIZE, "%u\n", data_pointer->press_to_select); } @@ -94,16 +87,10 @@ static ssize_t pointer_press_to_select_store(struct device *dev, const char *buf, size_t count) { - struct hid_device *hdev; - struct tpkbd_data_pointer *data_pointer; + struct hid_device *hdev = container_of(dev, struct hid_device, dev); + struct tpkbd_data_pointer *data_pointer = hid_get_drvdata(hdev); int value; - hdev = container_of(dev, struct hid_device, dev); - if (hdev == NULL) - return -ENODEV; - - data_pointer = (struct tpkbd_data_pointer *) hid_get_drvdata(hdev); - if (kstrtoint(buf, 10, &value)) return -EINVAL; if (value < 0 || value > 1) @@ -119,14 +106,8 @@ static ssize_t pointer_dragging_show(struct device *dev, struct device_attribute *attr, char *buf) { - struct hid_device *hdev; - struct tpkbd_data_pointer *data_pointer; - - hdev = container_of(dev, struct hid_device, dev); - if (hdev == NULL) - return -ENODEV; - - data_pointer = (struct tpkbd_data_pointer *) hid_get_drvdata(hdev); + struct hid_device *hdev = container_of(dev, struct hid_device, dev); + struct tpkbd_data_pointer *data_pointer = hid_get_drvdata(hdev); return snprintf(buf, PAGE_SIZE, "%u\n", data_pointer->dragging); } @@ -136,16 +117,10 @@ static ssize_t pointer_dragging_store(struct device *dev, const char *buf, size_t count) { - struct hid_device *hdev; - struct tpkbd_data_pointer *data_pointer; + struct hid_device *hdev = container_of(dev, struct hid_device, dev); + struct tpkbd_data_pointer *data_pointer = hid_get_drvdata(hdev); int value; - hdev = container_of(dev, struct hid_device, dev); - if (hdev == NULL) - return -ENODEV; - - data_pointer = (struct tpkbd_data_pointer *) hid_get_drvdata(hdev); - if (kstrtoint(buf, 10, &value)) return -EINVAL; if (value < 0 || value > 1) @@ -161,14 +136,8 @@ static ssize_t pointer_release_to_select_show(struct device *dev, struct device_attribute *attr, char *buf) { - struct hid_device *hdev; - struct tpkbd_data_pointer *data_pointer; - - hdev = container_of(dev, struct hid_device, dev); - if (hdev == NULL) - return -ENODEV; - - data_pointer = (struct tpkbd_data_pointer *) hid_get_drvdata(hdev); + struct hid_device *hdev = container_of(dev, struct hid_device, dev); + struct tpkbd_data_pointer *data_pointer = hid_get_drvdata(hdev); return snprintf(buf, PAGE_SIZE, "%u\n", data_pointer->release_to_select); } @@ -178,16 +147,10 @@ static ssize_t pointer_release_to_select_store(struct device *dev, const char *buf, size_t count) { - struct hid_device *hdev; - struct tpkbd_data_pointer *data_pointer; + struct hid_device *hdev = container_of(dev, struct hid_device, dev); + struct tpkbd_data_pointer *data_pointer = hid_get_drvdata(hdev); int value; - hdev = container_of(dev, struct hid_device, dev); - if (hdev == NULL) - return -ENODEV; - - data_pointer = (struct tpkbd_data_pointer *) hid_get_drvdata(hdev); - if (kstrtoint(buf, 10, &value)) return -EINVAL; if (value < 0 || value > 1) @@ -203,14 +166,8 @@ static ssize_t pointer_select_right_show(struct device *dev, struct device_attribute *attr, char *buf) { - struct hid_device *hdev; - struct tpkbd_data_pointer *data_pointer; - - hdev = container_of(dev, struct hid_device, dev); - if (hdev == NULL) - return -ENODEV; - - data_pointer = (struct tpkbd_data_pointer *) hid_get_drvdata(hdev); + struct hid_device *hdev = container_of(dev, struct hid_device, dev); + struct tpkbd_data_pointer *data_pointer = hid_get_drvdata(hdev); return snprintf(buf, PAGE_SIZE, "%u\n", data_pointer->select_right); } @@ -220,16 +177,10 @@ static ssize_t pointer_select_right_store(struct device *dev, const char *buf, size_t count) { - struct hid_device *hdev; - struct tpkbd_data_pointer *data_pointer; + struct hid_device *hdev = container_of(dev, struct hid_device, dev); + struct tpkbd_data_pointer *data_pointer = hid_get_drvdata(hdev); int value; - hdev = container_of(dev, struct hid_device, dev); - if (hdev == NULL) - return -ENODEV; - - data_pointer = (struct tpkbd_data_pointer *) hid_get_drvdata(hdev); - if (kstrtoint(buf, 10, &value)) return -EINVAL; if (value < 0 || value > 1) @@ -245,14 +196,8 @@ static ssize_t pointer_sensitivity_show(struct device *dev, struct device_attribute *attr, char *buf) { - struct hid_device *hdev; - struct tpkbd_data_pointer *data_pointer; - - hdev = container_of(dev, struct hid_device, dev); - if (hdev == NULL) - return -ENODEV; - - data_pointer = (struct tpkbd_data_pointer *) hid_get_drvdata(hdev); + struct hid_device *hdev = container_of(dev, struct hid_device, dev); + struct tpkbd_data_pointer *data_pointer = hid_get_drvdata(hdev); return snprintf(buf, PAGE_SIZE, "%u\n", data_pointer->sensitivity); @@ -263,16 +208,10 @@ static ssize_t pointer_sensitivity_store(struct device *dev, const char *buf, size_t count) { - struct hid_device *hdev; - struct tpkbd_data_pointer *data_pointer; + struct hid_device *hdev = container_of(dev, struct hid_device, dev); + struct tpkbd_data_pointer *data_pointer = hid_get_drvdata(hdev); int value; - hdev = container_of(dev, struct hid_device, dev); - if (hdev == NULL) - return -ENODEV; - - data_pointer = (struct tpkbd_data_pointer *) hid_get_drvdata(hdev); - if (kstrtoint(buf, 10, &value) || value < 1 || value > 255) return -EINVAL; @@ -286,14 +225,10 @@ static ssize_t pointer_press_speed_show(struct device *dev, struct device_attribute *attr, char *buf) { - struct hid_device *hdev; - struct tpkbd_data_pointer *data_pointer; + struct hid_device *hdev = container_of(dev, struct hid_device, dev); + struct tpkbd_data_pointer *data_pointer = hid_get_drvdata(hdev); - hdev = container_of(dev, struct hid_device, dev); - if (hdev == NULL) - return -ENODEV; - - data_pointer = (struct tpkbd_data_pointer *) hid_get_drvdata(hdev); + data_pointer = hid_get_drvdata(hdev); return snprintf(buf, PAGE_SIZE, "%u\n", data_pointer->press_speed); @@ -304,16 +239,10 @@ static ssize_t pointer_press_speed_store(struct device *dev, const char *buf, size_t count) { - struct hid_device *hdev; - struct tpkbd_data_pointer *data_pointer; + struct hid_device *hdev = container_of(dev, struct hid_device, dev); + struct tpkbd_data_pointer *data_pointer = hid_get_drvdata(hdev); int value; - hdev = container_of(dev, struct hid_device, dev); - if (hdev == NULL) - return -ENODEV; - - data_pointer = (struct tpkbd_data_pointer *) hid_get_drvdata(hdev); - if (kstrtoint(buf, 10, &value) || value < 1 || value > 255) return -EINVAL; @@ -370,15 +299,11 @@ static const struct attribute_group tpkbd_attr_group_pointer = { static enum led_brightness tpkbd_led_brightness_get( struct led_classdev *led_cdev) { - struct device *dev; - struct hid_device *hdev; - struct tpkbd_data_pointer *data_pointer; + struct device *dev = led_cdev->dev->parent; + struct hid_device *hdev = container_of(dev, struct hid_device, dev); + struct tpkbd_data_pointer *data_pointer = hid_get_drvdata(hdev); int led_nr = 0; - dev = led_cdev->dev->parent; - hdev = container_of(dev, struct hid_device, dev); - data_pointer = (struct tpkbd_data_pointer *) hid_get_drvdata(hdev); - if (led_cdev == &data_pointer->led_micmute) led_nr = 1; @@ -390,16 +315,12 @@ static enum led_brightness tpkbd_led_brightness_get( static void tpkbd_led_brightness_set(struct led_classdev *led_cdev, enum led_brightness value) { - struct device *dev; - struct hid_device *hdev; + struct device *dev = led_cdev->dev->parent; + struct hid_device *hdev = container_of(dev, struct hid_device, dev); + struct tpkbd_data_pointer *data_pointer = hid_get_drvdata(hdev); struct hid_report *report; - struct tpkbd_data_pointer *data_pointer; int led_nr = 0; - dev = led_cdev->dev->parent; - hdev = container_of(dev, struct hid_device, dev); - data_pointer = (struct tpkbd_data_pointer *) hid_get_drvdata(hdev); - if (led_cdev == &data_pointer->led_micmute) led_nr = 1; @@ -508,17 +429,17 @@ static int tpkbd_probe(struct hid_device *hdev, static void tpkbd_remove_tp(struct hid_device *hdev) { - struct tpkbd_data_pointer *data_pointer; + struct tpkbd_data_pointer *data_pointer = hid_get_drvdata(hdev); sysfs_remove_group(&hdev->dev.kobj, &tpkbd_attr_group_pointer); - data_pointer = (struct tpkbd_data_pointer *) hid_get_drvdata(hdev); - led_classdev_unregister(&data_pointer->led_micmute); led_classdev_unregister(&data_pointer->led_mute); hid_set_drvdata(hdev, NULL); + kfree(data_pointer->led_micmute.name); + kfree(data_pointer->led_mute.name); kfree(data_pointer); } diff --git a/trunk/drivers/hid/hid-lg.c b/trunk/drivers/hid/hid-lg.c index fc37ed6b108c..a2f8e88b9fa2 100644 --- a/trunk/drivers/hid/hid-lg.c +++ b/trunk/drivers/hid/hid-lg.c @@ -5,7 +5,6 @@ * Copyright (c) 2000-2005 Vojtech Pavlik * Copyright (c) 2005 Michael Haboustak for Concept2, Inc * Copyright (c) 2006-2007 Jiri Kosina - * Copyright (c) 2007 Paul Walmsley * Copyright (c) 2008 Jiri Slaby * Copyright (c) 2010 Hendrik Iben */ @@ -109,7 +108,7 @@ static __u8 dfp_rdesc_fixed[] = { static __u8 *lg_report_fixup(struct hid_device *hdev, __u8 *rdesc, unsigned int *rsize) { - struct lg_drv_data *drv_data = (struct lg_drv_data *)hid_get_drvdata(hdev); + struct lg_drv_data *drv_data = hid_get_drvdata(hdev); if ((drv_data->quirks & LG_RDESC) && *rsize >= 90 && rdesc[83] == 0x26 && rdesc[84] == 0x8c && rdesc[85] == 0x02) { @@ -278,7 +277,7 @@ static int lg_input_mapping(struct hid_device *hdev, struct hid_input *hi, 0, 0, 0, 0, 0,183,184,185,186,187, 188,189,190,191,192,193,194, 0, 0, 0 }; - struct lg_drv_data *drv_data = (struct lg_drv_data *)hid_get_drvdata(hdev); + struct lg_drv_data *drv_data = hid_get_drvdata(hdev); unsigned int hid = usage->hid; if (hdev->product == USB_DEVICE_ID_LOGITECH_RECEIVER && @@ -319,7 +318,7 @@ static int lg_input_mapped(struct hid_device *hdev, struct hid_input *hi, struct hid_field *field, struct hid_usage *usage, unsigned long **bit, int *max) { - struct lg_drv_data *drv_data = (struct lg_drv_data *)hid_get_drvdata(hdev); + struct lg_drv_data *drv_data = hid_get_drvdata(hdev); if ((drv_data->quirks & LG_BAD_RELATIVE_KEYS) && usage->type == EV_KEY && (field->flags & HID_MAIN_ITEM_RELATIVE)) @@ -335,13 +334,16 @@ static int lg_input_mapped(struct hid_device *hdev, struct hid_input *hi, static int lg_event(struct hid_device *hdev, struct hid_field *field, struct hid_usage *usage, __s32 value) { - struct lg_drv_data *drv_data = (struct lg_drv_data *)hid_get_drvdata(hdev); + struct lg_drv_data *drv_data = hid_get_drvdata(hdev); if ((drv_data->quirks & LG_INVERT_HWHEEL) && usage->code == REL_HWHEEL) { input_event(field->hidinput->input, usage->type, usage->code, -value); return 1; } + if (drv_data->quirks & LG_FF4) { + return lg4ff_adjust_input_event(hdev, field, usage, value, drv_data); + } return 0; } @@ -358,7 +360,7 @@ static int lg_probe(struct hid_device *hdev, const struct hid_device_id *id) return -ENOMEM; } drv_data->quirks = id->driver_data; - + hid_set_drvdata(hdev, (void *)drv_data); if (drv_data->quirks & LG_NOGET) @@ -380,7 +382,7 @@ static int lg_probe(struct hid_device *hdev, const struct hid_device_id *id) } /* Setup wireless link with Logitech Wii wheel */ - if(hdev->product == USB_DEVICE_ID_LOGITECH_WII_WHEEL) { + if (hdev->product == USB_DEVICE_ID_LOGITECH_WII_WHEEL) { unsigned char buf[] = { 0x00, 0xAF, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 }; ret = hdev->hid_output_raw_report(hdev, buf, sizeof(buf), HID_FEATURE_REPORT); @@ -416,7 +418,7 @@ static int lg_probe(struct hid_device *hdev, const struct hid_device_id *id) static void lg_remove(struct hid_device *hdev) { - struct lg_drv_data *drv_data = (struct lg_drv_data *)hid_get_drvdata(hdev); + struct lg_drv_data *drv_data = hid_get_drvdata(hdev); if (drv_data->quirks & LG_FF4) lg4ff_deinit(hdev); @@ -476,7 +478,7 @@ static const struct hid_device_id lg_devices[] = { .driver_data = LG_NOGET | LG_FF4 }, { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_WII_WHEEL), .driver_data = LG_FF4 }, - { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_WINGMAN_FFG ), + { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_WINGMAN_FFG), .driver_data = LG_FF }, { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_RUMBLEPAD2), .driver_data = LG_FF2 }, diff --git a/trunk/drivers/hid/hid-lg.h b/trunk/drivers/hid/hid-lg.h index d64cf8d2751e..142ce3f5f055 100644 --- a/trunk/drivers/hid/hid-lg.h +++ b/trunk/drivers/hid/hid-lg.h @@ -25,9 +25,13 @@ static inline int lg3ff_init(struct hid_device *hdev) { return -1; } #endif #ifdef CONFIG_LOGIWHEELS_FF +int lg4ff_adjust_input_event(struct hid_device *hid, struct hid_field *field, + struct hid_usage *usage, __s32 value, struct lg_drv_data *drv_data); int lg4ff_init(struct hid_device *hdev); int lg4ff_deinit(struct hid_device *hdev); #else +static inline int lg4ff_adjust_input_event(struct hid_device *hid, struct hid_field *field, + struct hid_usage *usage, __s32 value, struct lg_drv_data *drv_data) { return 0; } static inline int lg4ff_init(struct hid_device *hdev) { return -1; } static inline int lg4ff_deinit(struct hid_device *hdev) { return -1; } #endif diff --git a/trunk/drivers/hid/hid-lg4ff.c b/trunk/drivers/hid/hid-lg4ff.c index f3390ee6105c..d7947c701f30 100644 --- a/trunk/drivers/hid/hid-lg4ff.c +++ b/trunk/drivers/hid/hid-lg4ff.c @@ -43,6 +43,11 @@ #define G27_REV_MAJ 0x12 #define G27_REV_MIN 0x38 +#define DFP_X_MIN 0 +#define DFP_X_MAX 16383 +#define DFP_PEDAL_MIN 0 +#define DFP_PEDAL_MAX 255 + #define to_hid_device(pdev) container_of(pdev, struct hid_device, dev) static void hid_lg4ff_set_range_dfp(struct hid_device *hid, u16 range); @@ -53,6 +58,7 @@ static ssize_t lg4ff_range_store(struct device *dev, struct device_attribute *at static DEVICE_ATTR(range, S_IRWXU | S_IRWXG | S_IRWXO, lg4ff_range_show, lg4ff_range_store); struct lg4ff_device_entry { + __u32 product_id; __u16 range; __u16 min_range; __u16 max_range; @@ -129,26 +135,77 @@ static const struct lg4ff_usb_revision lg4ff_revs[] = { {G27_REV_MAJ, G27_REV_MIN, &native_g27}, /* G27 */ }; +/* Recalculates X axis value accordingly to currently selected range */ +static __s32 lg4ff_adjust_dfp_x_axis(__s32 value, __u16 range) +{ + __u16 max_range; + __s32 new_value; + + if (range == 900) + return value; + else if (range == 200) + return value; + else if (range < 200) + max_range = 200; + else + max_range = 900; + + new_value = 8192 + mult_frac(value - 8192, max_range, range); + if (new_value < 0) + return 0; + else if (new_value > 16383) + return 16383; + else + return new_value; +} + +int lg4ff_adjust_input_event(struct hid_device *hid, struct hid_field *field, + struct hid_usage *usage, __s32 value, struct lg_drv_data *drv_data) +{ + struct lg4ff_device_entry *entry = drv_data->device_props; + __s32 new_value = 0; + + if (!entry) { + hid_err(hid, "Device properties not found"); + return 0; + } + + switch (entry->product_id) { + case USB_DEVICE_ID_LOGITECH_DFP_WHEEL: + switch (usage->code) { + case ABS_X: + new_value = lg4ff_adjust_dfp_x_axis(value, entry->range); + input_event(field->hidinput->input, usage->type, usage->code, new_value); + return 1; + default: + return 0; + } + default: + return 0; + } +} + static int hid_lg4ff_play(struct input_dev *dev, void *data, struct ff_effect *effect) { struct hid_device *hid = input_get_drvdata(dev); struct list_head *report_list = &hid->report_enum[HID_OUTPUT_REPORT].report_list; struct hid_report *report = list_entry(report_list->next, struct hid_report, list); + __s32 *value = report->field[0]->value; int x; -#define CLAMP(x) if (x < 0) x = 0; if (x > 0xff) x = 0xff +#define CLAMP(x) do { if (x < 0) x = 0; else if (x > 0xff) x = 0xff; } while (0) switch (effect->type) { case FF_CONSTANT: x = effect->u.ramp.start_level + 0x80; /* 0x80 is no force */ CLAMP(x); - report->field[0]->value[0] = 0x11; /* Slot 1 */ - report->field[0]->value[1] = 0x08; - report->field[0]->value[2] = x; - report->field[0]->value[3] = 0x80; - report->field[0]->value[4] = 0x00; - report->field[0]->value[5] = 0x00; - report->field[0]->value[6] = 0x00; + value[0] = 0x11; /* Slot 1 */ + value[1] = 0x08; + value[2] = x; + value[3] = 0x80; + value[4] = 0x00; + value[5] = 0x00; + value[6] = 0x00; usbhid_submit_report(hid, report, USB_DIR_OUT); break; @@ -163,14 +220,15 @@ static void hid_lg4ff_set_autocenter_default(struct input_dev *dev, u16 magnitud struct hid_device *hid = input_get_drvdata(dev); struct list_head *report_list = &hid->report_enum[HID_OUTPUT_REPORT].report_list; struct hid_report *report = list_entry(report_list->next, struct hid_report, list); + __s32 *value = report->field[0]->value; - report->field[0]->value[0] = 0xfe; - report->field[0]->value[1] = 0x0d; - report->field[0]->value[2] = magnitude >> 13; - report->field[0]->value[3] = magnitude >> 13; - report->field[0]->value[4] = magnitude >> 8; - report->field[0]->value[5] = 0x00; - report->field[0]->value[6] = 0x00; + value[0] = 0xfe; + value[1] = 0x0d; + value[2] = magnitude >> 13; + value[3] = magnitude >> 13; + value[4] = magnitude >> 8; + value[5] = 0x00; + value[6] = 0x00; usbhid_submit_report(hid, report, USB_DIR_OUT); } @@ -181,16 +239,16 @@ static void hid_lg4ff_set_autocenter_ffex(struct input_dev *dev, u16 magnitude) struct hid_device *hid = input_get_drvdata(dev); struct list_head *report_list = &hid->report_enum[HID_OUTPUT_REPORT].report_list; struct hid_report *report = list_entry(report_list->next, struct hid_report, list); + __s32 *value = report->field[0]->value; magnitude = magnitude * 90 / 65535; - - report->field[0]->value[0] = 0xfe; - report->field[0]->value[1] = 0x03; - report->field[0]->value[2] = magnitude >> 14; - report->field[0]->value[3] = magnitude >> 14; - report->field[0]->value[4] = magnitude; - report->field[0]->value[5] = 0x00; - report->field[0]->value[6] = 0x00; + value[0] = 0xfe; + value[1] = 0x03; + value[2] = magnitude >> 14; + value[3] = magnitude >> 14; + value[4] = magnitude; + value[5] = 0x00; + value[6] = 0x00; usbhid_submit_report(hid, report, USB_DIR_OUT); } @@ -200,15 +258,17 @@ static void hid_lg4ff_set_range_g25(struct hid_device *hid, u16 range) { struct list_head *report_list = &hid->report_enum[HID_OUTPUT_REPORT].report_list; struct hid_report *report = list_entry(report_list->next, struct hid_report, list); + __s32 *value = report->field[0]->value; + dbg_hid("G25/G27/DFGT: setting range to %u\n", range); - report->field[0]->value[0] = 0xf8; - report->field[0]->value[1] = 0x81; - report->field[0]->value[2] = range & 0x00ff; - report->field[0]->value[3] = (range & 0xff00) >> 8; - report->field[0]->value[4] = 0x00; - report->field[0]->value[5] = 0x00; - report->field[0]->value[6] = 0x00; + value[0] = 0xf8; + value[1] = 0x81; + value[2] = range & 0x00ff; + value[3] = (range & 0xff00) >> 8; + value[4] = 0x00; + value[5] = 0x00; + value[6] = 0x00; usbhid_submit_report(hid, report, USB_DIR_OUT); } @@ -219,16 +279,18 @@ static void hid_lg4ff_set_range_dfp(struct hid_device *hid, __u16 range) struct list_head *report_list = &hid->report_enum[HID_OUTPUT_REPORT].report_list; struct hid_report *report = list_entry(report_list->next, struct hid_report, list); int start_left, start_right, full_range; + __s32 *value = report->field[0]->value; + dbg_hid("Driving Force Pro: setting range to %u\n", range); /* Prepare "coarse" limit command */ - report->field[0]->value[0] = 0xf8; - report->field[0]->value[1] = 0x00; /* Set later */ - report->field[0]->value[2] = 0x00; - report->field[0]->value[3] = 0x00; - report->field[0]->value[4] = 0x00; - report->field[0]->value[5] = 0x00; - report->field[0]->value[6] = 0x00; + value[0] = 0xf8; + value[1] = 0x00; /* Set later */ + value[2] = 0x00; + value[3] = 0x00; + value[4] = 0x00; + value[5] = 0x00; + value[6] = 0x00; if (range > 200) { report->field[0]->value[1] = 0x03; @@ -240,13 +302,13 @@ static void hid_lg4ff_set_range_dfp(struct hid_device *hid, __u16 range) usbhid_submit_report(hid, report, USB_DIR_OUT); /* Prepare "fine" limit command */ - report->field[0]->value[0] = 0x81; - report->field[0]->value[1] = 0x0b; - report->field[0]->value[2] = 0x00; - report->field[0]->value[3] = 0x00; - report->field[0]->value[4] = 0x00; - report->field[0]->value[5] = 0x00; - report->field[0]->value[6] = 0x00; + value[0] = 0x81; + value[1] = 0x0b; + value[2] = 0x00; + value[3] = 0x00; + value[4] = 0x00; + value[5] = 0x00; + value[6] = 0x00; if (range == 200 || range == 900) { /* Do not apply any fine limit */ usbhid_submit_report(hid, report, USB_DIR_OUT); @@ -257,11 +319,11 @@ static void hid_lg4ff_set_range_dfp(struct hid_device *hid, __u16 range) start_left = (((full_range - range + 1) * 2047) / full_range); start_right = 0xfff - start_left; - report->field[0]->value[2] = start_left >> 4; - report->field[0]->value[3] = start_right >> 4; - report->field[0]->value[4] = 0xff; - report->field[0]->value[5] = (start_right & 0xe) << 4 | (start_left & 0xe); - report->field[0]->value[6] = 0xff; + value[2] = start_left >> 4; + value[3] = start_right >> 4; + value[4] = 0xff; + value[5] = (start_right & 0xe) << 4 | (start_left & 0xe); + value[6] = 0xff; usbhid_submit_report(hid, report, USB_DIR_OUT); } @@ -344,14 +406,15 @@ static void lg4ff_set_leds(struct hid_device *hid, __u8 leds) { struct list_head *report_list = &hid->report_enum[HID_OUTPUT_REPORT].report_list; struct hid_report *report = list_entry(report_list->next, struct hid_report, list); - - report->field[0]->value[0] = 0xf8; - report->field[0]->value[1] = 0x12; - report->field[0]->value[2] = leds; - report->field[0]->value[3] = 0x00; - report->field[0]->value[4] = 0x00; - report->field[0]->value[5] = 0x00; - report->field[0]->value[6] = 0x00; + __s32 *value = report->field[0]->value; + + value[0] = 0xf8; + value[1] = 0x12; + value[2] = leds; + value[3] = 0x00; + value[4] = 0x00; + value[5] = 0x00; + value[6] = 0x00; usbhid_submit_report(hid, report, USB_DIR_OUT); } @@ -360,7 +423,7 @@ static void lg4ff_led_set_brightness(struct led_classdev *led_cdev, { struct device *dev = led_cdev->dev->parent; struct hid_device *hid = container_of(dev, struct hid_device, dev); - struct lg_drv_data *drv_data = (struct lg_drv_data *)hid_get_drvdata(hid); + struct lg_drv_data *drv_data = hid_get_drvdata(hid); struct lg4ff_device_entry *entry; int i, state = 0; @@ -395,7 +458,7 @@ static enum led_brightness lg4ff_led_get_brightness(struct led_classdev *led_cde { struct device *dev = led_cdev->dev->parent; struct hid_device *hid = container_of(dev, struct hid_device, dev); - struct lg_drv_data *drv_data = (struct lg_drv_data *)hid_get_drvdata(hid); + struct lg_drv_data *drv_data = hid_get_drvdata(hid); struct lg4ff_device_entry *entry; int i, value = 0; @@ -501,7 +564,7 @@ int lg4ff_init(struct hid_device *hid) /* Check if autocentering is available and * set the centering force to zero by default */ if (test_bit(FF_AUTOCENTER, dev->ffbit)) { - if(rev_maj == FFEX_REV_MAJ && rev_min == FFEX_REV_MIN) /* Formula Force EX expects different autocentering command */ + if (rev_maj == FFEX_REV_MAJ && rev_min == FFEX_REV_MIN) /* Formula Force EX expects different autocentering command */ dev->ff->set_autocenter = hid_lg4ff_set_autocenter_ffex; else dev->ff->set_autocenter = hid_lg4ff_set_autocenter_default; @@ -524,6 +587,7 @@ int lg4ff_init(struct hid_device *hid) } drv_data->device_props = entry; + entry->product_id = lg4ff_devices[i].product_id; entry->min_range = lg4ff_devices[i].min_range; entry->max_range = lg4ff_devices[i].max_range; entry->set_range = lg4ff_devices[i].set_range; @@ -534,6 +598,18 @@ int lg4ff_init(struct hid_device *hid) return error; dbg_hid("sysfs interface created\n"); + /* Set default axes parameters */ + switch (lg4ff_devices[i].product_id) { + case USB_DEVICE_ID_LOGITECH_DFP_WHEEL: + dbg_hid("Setting axes parameters for Driving Force Pro\n"); + input_set_abs_params(dev, ABS_X, DFP_X_MIN, DFP_X_MAX, 0, 0); + input_set_abs_params(dev, ABS_Y, DFP_PEDAL_MIN, DFP_PEDAL_MAX, 0, 0); + input_set_abs_params(dev, ABS_RZ, DFP_PEDAL_MIN, DFP_PEDAL_MAX, 0, 0); + break; + default: + break; + } + /* Set the maximum range to start with */ entry->range = entry->max_range; if (entry->set_range != NULL) @@ -594,6 +670,8 @@ int lg4ff_init(struct hid_device *hid) return 0; } + + int lg4ff_deinit(struct hid_device *hid) { struct lg4ff_device_entry *entry; diff --git a/trunk/drivers/hid/hid-logitech-dj.c b/trunk/drivers/hid/hid-logitech-dj.c index 0f9c146fc00d..9500f2f3f8fe 100644 --- a/trunk/drivers/hid/hid-logitech-dj.c +++ b/trunk/drivers/hid/hid-logitech-dj.c @@ -193,6 +193,7 @@ static struct hid_ll_driver logi_dj_ll_driver; static int logi_dj_output_hidraw_report(struct hid_device *hid, u8 * buf, size_t count, unsigned char report_type); +static int logi_dj_recv_query_paired_devices(struct dj_receiver_dev *djrcv_dev); static void logi_dj_recv_destroy_djhid_device(struct dj_receiver_dev *djrcv_dev, struct dj_report *dj_report) @@ -233,6 +234,7 @@ static void logi_dj_recv_add_djhid_device(struct dj_receiver_dev *djrcv_dev, if (dj_report->report_params[DEVICE_PAIRED_PARAM_SPFUNCTION] & SPFUNCTION_DEVICE_LIST_EMPTY) { dbg_hid("%s: device list is empty\n", __func__); + djrcv_dev->querying_devices = false; return; } @@ -243,6 +245,12 @@ static void logi_dj_recv_add_djhid_device(struct dj_receiver_dev *djrcv_dev, return; } + if (djrcv_dev->paired_dj_devices[dj_report->device_index]) { + /* The device is already known. No need to reallocate it. */ + dbg_hid("%s: device is already known\n", __func__); + return; + } + dj_hiddev = hid_allocate_device(); if (IS_ERR(dj_hiddev)) { dev_err(&djrcv_hdev->dev, "%s: hid_allocate_device failed\n", @@ -306,6 +314,7 @@ static void delayedwork_callback(struct work_struct *work) struct dj_report dj_report; unsigned long flags; int count; + int retval; dbg_hid("%s\n", __func__); @@ -338,6 +347,25 @@ static void delayedwork_callback(struct work_struct *work) logi_dj_recv_destroy_djhid_device(djrcv_dev, &dj_report); break; default: + /* A normal report (i. e. not belonging to a pair/unpair notification) + * arriving here, means that the report arrived but we did not have a + * paired dj_device associated to the report's device_index, this + * means that the original "device paired" notification corresponding + * to this dj_device never arrived to this driver. The reason is that + * hid-core discards all packets coming from a device while probe() is + * executing. */ + if (!djrcv_dev->paired_dj_devices[dj_report.device_index]) { + /* ok, we don't know the device, just re-ask the + * receiver for the list of connected devices. */ + retval = logi_dj_recv_query_paired_devices(djrcv_dev); + if (!retval) { + /* everything went fine, so just leave */ + break; + } + dev_err(&djrcv_dev->hdev->dev, + "%s:logi_dj_recv_query_paired_devices " + "error:%d\n", __func__, retval); + } dbg_hid("%s: unexpected report type\n", __func__); } } @@ -368,6 +396,12 @@ static void logi_dj_recv_forward_null_report(struct dj_receiver_dev *djrcv_dev, if (!djdev) { dbg_hid("djrcv_dev->paired_dj_devices[dj_report->device_index]" " is NULL, index %d\n", dj_report->device_index); + kfifo_in(&djrcv_dev->notif_fifo, dj_report, sizeof(struct dj_report)); + + if (schedule_work(&djrcv_dev->work) == 0) { + dbg_hid("%s: did not schedule the work item, was already " + "queued\n", __func__); + } return; } @@ -398,6 +432,12 @@ static void logi_dj_recv_forward_report(struct dj_receiver_dev *djrcv_dev, if (dj_device == NULL) { dbg_hid("djrcv_dev->paired_dj_devices[dj_report->device_index]" " is NULL, index %d\n", dj_report->device_index); + kfifo_in(&djrcv_dev->notif_fifo, dj_report, sizeof(struct dj_report)); + + if (schedule_work(&djrcv_dev->work) == 0) { + dbg_hid("%s: did not schedule the work item, was already " + "queued\n", __func__); + } return; } @@ -439,7 +479,11 @@ static int logi_dj_recv_query_paired_devices(struct dj_receiver_dev *djrcv_dev) struct dj_report *dj_report; int retval; - dj_report = kzalloc(sizeof(dj_report), GFP_KERNEL); + /* no need to protect djrcv_dev->querying_devices */ + if (djrcv_dev->querying_devices) + return 0; + + dj_report = kzalloc(sizeof(struct dj_report), GFP_KERNEL); if (!dj_report) return -ENOMEM; dj_report->report_id = REPORT_ID_DJ_SHORT; @@ -450,13 +494,14 @@ static int logi_dj_recv_query_paired_devices(struct dj_receiver_dev *djrcv_dev) return retval; } + static int logi_dj_recv_switch_to_dj_mode(struct dj_receiver_dev *djrcv_dev, unsigned timeout) { struct dj_report *dj_report; int retval; - dj_report = kzalloc(sizeof(dj_report), GFP_KERNEL); + dj_report = kzalloc(sizeof(struct dj_report), GFP_KERNEL); if (!dj_report) return -ENOMEM; dj_report->report_id = REPORT_ID_DJ_SHORT; diff --git a/trunk/drivers/hid/hid-logitech-dj.h b/trunk/drivers/hid/hid-logitech-dj.h index fd28a5e0ca3b..4a4000340ce1 100644 --- a/trunk/drivers/hid/hid-logitech-dj.h +++ b/trunk/drivers/hid/hid-logitech-dj.h @@ -101,6 +101,7 @@ struct dj_receiver_dev { struct work_struct work; struct kfifo notif_fifo; spinlock_t lock; + bool querying_devices; }; struct dj_device { diff --git a/trunk/drivers/hid/hid-magicmouse.c b/trunk/drivers/hid/hid-magicmouse.c index 73647266daad..25ddf3e3aec6 100644 --- a/trunk/drivers/hid/hid-magicmouse.c +++ b/trunk/drivers/hid/hid-magicmouse.c @@ -392,7 +392,7 @@ static int magicmouse_setup_input(struct input_dev *input, struct hid_device *hd __set_bit(EV_ABS, input->evbit); - error = input_mt_init_slots(input, 16); + error = input_mt_init_slots(input, 16, 0); if (error) return error; input_set_abs_params(input, ABS_MT_TOUCH_MAJOR, 0, 255 << 2, diff --git a/trunk/drivers/hid/hid-microsoft.c b/trunk/drivers/hid/hid-microsoft.c index e5c699b6c6f3..3acdcfcc17df 100644 --- a/trunk/drivers/hid/hid-microsoft.c +++ b/trunk/drivers/hid/hid-microsoft.c @@ -5,7 +5,6 @@ * Copyright (c) 2000-2005 Vojtech Pavlik * Copyright (c) 2005 Michael Haboustak for Concept2, Inc * Copyright (c) 2006-2007 Jiri Kosina - * Copyright (c) 2007 Paul Walmsley * Copyright (c) 2008 Jiri Slaby */ diff --git a/trunk/drivers/hid/hid-monterey.c b/trunk/drivers/hid/hid-monterey.c index dedf757781ae..cd3643e06fa6 100644 --- a/trunk/drivers/hid/hid-monterey.c +++ b/trunk/drivers/hid/hid-monterey.c @@ -5,7 +5,6 @@ * Copyright (c) 2000-2005 Vojtech Pavlik * Copyright (c) 2005 Michael Haboustak for Concept2, Inc * Copyright (c) 2006-2007 Jiri Kosina - * Copyright (c) 2007 Paul Walmsley * Copyright (c) 2008 Jiri Slaby */ diff --git a/trunk/drivers/hid/hid-multitouch.c b/trunk/drivers/hid/hid-multitouch.c index 59c8b5c1d2de..3eb02b94fc87 100644 --- a/trunk/drivers/hid/hid-multitouch.c +++ b/trunk/drivers/hid/hid-multitouch.c @@ -51,12 +51,12 @@ MODULE_LICENSE("GPL"); #define MT_QUIRK_VALID_IS_INRANGE (1 << 5) #define MT_QUIRK_VALID_IS_CONFIDENCE (1 << 6) #define MT_QUIRK_SLOT_IS_CONTACTID_MINUS_ONE (1 << 8) +#define MT_QUIRK_NO_AREA (1 << 9) struct mt_slot { __s32 x, y, p, w, h; __s32 contactid; /* the device ContactID assigned to this slot */ bool touch_state; /* is the touch valid? */ - bool seen_in_this_frame;/* has this slot been updated */ }; struct mt_class { @@ -92,8 +92,9 @@ struct mt_device { __u8 touches_by_report; /* how many touches are present in one report: * 1 means we should use a serial protocol * > 1 means hybrid (multitouch) protocol */ + bool serial_maybe; /* need to check for serial protocol */ bool curvalid; /* is the current contact valid? */ - struct mt_slot *slots; + unsigned mt_flags; /* flags to pass to input-mt */ }; /* classes of device behavior */ @@ -115,6 +116,9 @@ struct mt_device { #define MT_CLS_EGALAX_SERIAL 0x0104 #define MT_CLS_TOPSEED 0x0105 #define MT_CLS_PANASONIC 0x0106 +#define MT_CLS_FLATFROG 0x0107 +#define MT_CLS_GENERALTOUCH_TWOFINGERS 0x0108 +#define MT_CLS_GENERALTOUCH_PWT_TENFINGERS 0x0109 #define MT_DEFAULT_MAXCONTACT 10 @@ -134,25 +138,6 @@ static int cypress_compute_slot(struct mt_device *td) return -1; } -static int find_slot_from_contactid(struct mt_device *td) -{ - int i; - for (i = 0; i < td->maxcontacts; ++i) { - if (td->slots[i].contactid == td->curdata.contactid && - td->slots[i].touch_state) - return i; - } - for (i = 0; i < td->maxcontacts; ++i) { - if (!td->slots[i].seen_in_this_frame && - !td->slots[i].touch_state) - return i; - } - /* should not occurs. If this happens that means - * that the device sent more touches that it says - * in the report descriptor. It is ignored then. */ - return -1; -} - static struct mt_class mt_classes[] = { { .name = MT_CLS_DEFAULT, .quirks = MT_QUIRK_NOT_SEEN_MEANS_UP }, @@ -190,7 +175,9 @@ static struct mt_class mt_classes[] = { MT_QUIRK_SLOT_IS_CONTACTID, .sn_move = 2048, .sn_width = 128, - .sn_height = 128 }, + .sn_height = 128, + .maxcontacts = 60, + }, { .name = MT_CLS_CYPRESS, .quirks = MT_QUIRK_NOT_SEEN_MEANS_UP | MT_QUIRK_CYPRESS, @@ -215,7 +202,24 @@ static struct mt_class mt_classes[] = { { .name = MT_CLS_PANASONIC, .quirks = MT_QUIRK_NOT_SEEN_MEANS_UP, .maxcontacts = 4 }, + { .name = MT_CLS_GENERALTOUCH_TWOFINGERS, + .quirks = MT_QUIRK_NOT_SEEN_MEANS_UP | + MT_QUIRK_VALID_IS_INRANGE | + MT_QUIRK_SLOT_IS_CONTACTNUMBER, + .maxcontacts = 2 + }, + { .name = MT_CLS_GENERALTOUCH_PWT_TENFINGERS, + .quirks = MT_QUIRK_NOT_SEEN_MEANS_UP | + MT_QUIRK_SLOT_IS_CONTACTNUMBER, + .maxcontacts = 10 + }, + { .name = MT_CLS_FLATFROG, + .quirks = MT_QUIRK_NOT_SEEN_MEANS_UP | + MT_QUIRK_NO_AREA, + .sn_move = 2048, + .maxcontacts = 40, + }, { } }; @@ -319,24 +323,16 @@ static int mt_input_mapping(struct hid_device *hdev, struct hid_input *hi, * We need to ignore fields that belong to other collections * such as Mouse that might have the same GenericDesktop usages. */ if (field->application == HID_DG_TOUCHSCREEN) - set_bit(INPUT_PROP_DIRECT, hi->input->propbit); + td->mt_flags |= INPUT_MT_DIRECT; else if (field->application != HID_DG_TOUCHPAD) return 0; - /* In case of an indirect device (touchpad), we need to add - * specific BTN_TOOL_* to be handled by the synaptics xorg - * driver. - * We also consider that touchscreens providing buttons are touchpads. + /* + * Model touchscreens providing buttons as touchpads. */ if (field->application == HID_DG_TOUCHPAD || - (usage->hid & HID_USAGE_PAGE) == HID_UP_BUTTON || - cls->is_indirect) { - set_bit(INPUT_PROP_POINTER, hi->input->propbit); - set_bit(BTN_TOOL_FINGER, hi->input->keybit); - set_bit(BTN_TOOL_DOUBLETAP, hi->input->keybit); - set_bit(BTN_TOOL_TRIPLETAP, hi->input->keybit); - set_bit(BTN_TOOL_QUADTAP, hi->input->keybit); - } + (usage->hid & HID_USAGE_PAGE) == HID_UP_BUTTON) + td->mt_flags |= INPUT_MT_POINTER; /* eGalax devices provide a Digitizer.Stylus input which overrides * the correct Digitizers.Finger X/Y ranges. @@ -353,8 +349,6 @@ static int mt_input_mapping(struct hid_device *hdev, struct hid_input *hi, EV_ABS, ABS_MT_POSITION_X); set_abs(hi->input, ABS_MT_POSITION_X, field, cls->sn_move); - /* touchscreen emulation */ - set_abs(hi->input, ABS_X, field, cls->sn_move); mt_store_field(usage, td, hi); td->last_field_index = field->index; return 1; @@ -363,8 +357,6 @@ static int mt_input_mapping(struct hid_device *hdev, struct hid_input *hi, EV_ABS, ABS_MT_POSITION_Y); set_abs(hi->input, ABS_MT_POSITION_Y, field, cls->sn_move); - /* touchscreen emulation */ - set_abs(hi->input, ABS_Y, field, cls->sn_move); mt_store_field(usage, td, hi); td->last_field_index = field->index; return 1; @@ -388,9 +380,6 @@ static int mt_input_mapping(struct hid_device *hdev, struct hid_input *hi, td->last_field_index = field->index; return 1; case HID_DG_CONTACTID: - if (!td->maxcontacts) - td->maxcontacts = MT_DEFAULT_MAXCONTACT; - input_mt_init_slots(hi->input, td->maxcontacts); mt_store_field(usage, td, hi); td->last_field_index = field->index; td->touches_by_report++; @@ -398,18 +387,21 @@ static int mt_input_mapping(struct hid_device *hdev, struct hid_input *hi, case HID_DG_WIDTH: hid_map_usage(hi, usage, bit, max, EV_ABS, ABS_MT_TOUCH_MAJOR); - set_abs(hi->input, ABS_MT_TOUCH_MAJOR, field, - cls->sn_width); + if (!(cls->quirks & MT_QUIRK_NO_AREA)) + set_abs(hi->input, ABS_MT_TOUCH_MAJOR, field, + cls->sn_width); mt_store_field(usage, td, hi); td->last_field_index = field->index; return 1; case HID_DG_HEIGHT: hid_map_usage(hi, usage, bit, max, EV_ABS, ABS_MT_TOUCH_MINOR); - set_abs(hi->input, ABS_MT_TOUCH_MINOR, field, - cls->sn_height); - input_set_abs_params(hi->input, + if (!(cls->quirks & MT_QUIRK_NO_AREA)) { + set_abs(hi->input, ABS_MT_TOUCH_MINOR, field, + cls->sn_height); + input_set_abs_params(hi->input, ABS_MT_ORIENTATION, 0, 1, 0, 0); + } mt_store_field(usage, td, hi); td->last_field_index = field->index; return 1; @@ -418,9 +410,6 @@ static int mt_input_mapping(struct hid_device *hdev, struct hid_input *hi, EV_ABS, ABS_MT_PRESSURE); set_abs(hi->input, ABS_MT_PRESSURE, field, cls->sn_pressure); - /* touchscreen emulation */ - set_abs(hi->input, ABS_PRESSURE, field, - cls->sn_pressure); mt_store_field(usage, td, hi); td->last_field_index = field->index; return 1; @@ -464,7 +453,7 @@ static int mt_input_mapped(struct hid_device *hdev, struct hid_input *hi, return -1; } -static int mt_compute_slot(struct mt_device *td) +static int mt_compute_slot(struct mt_device *td, struct input_dev *input) { __s32 quirks = td->mtclass.quirks; @@ -480,42 +469,23 @@ static int mt_compute_slot(struct mt_device *td) if (quirks & MT_QUIRK_SLOT_IS_CONTACTID_MINUS_ONE) return td->curdata.contactid - 1; - return find_slot_from_contactid(td); + return input_mt_get_slot_by_key(input, td->curdata.contactid); } /* * this function is called when a whole contact has been processed, * so that it can assign it to a slot and store the data there */ -static void mt_complete_slot(struct mt_device *td) +static void mt_complete_slot(struct mt_device *td, struct input_dev *input) { - td->curdata.seen_in_this_frame = true; if (td->curvalid) { - int slotnum = mt_compute_slot(td); - - if (slotnum >= 0 && slotnum < td->maxcontacts) - td->slots[slotnum] = td->curdata; - } - td->num_received++; -} + int slotnum = mt_compute_slot(td, input); + struct mt_slot *s = &td->curdata; + if (slotnum < 0 || slotnum >= td->maxcontacts) + return; -/* - * this function is called when a whole packet has been received and processed, - * so that it can decide what to send to the input layer. - */ -static void mt_emit_event(struct mt_device *td, struct input_dev *input) -{ - int i; - - for (i = 0; i < td->maxcontacts; ++i) { - struct mt_slot *s = &(td->slots[i]); - if ((td->mtclass.quirks & MT_QUIRK_NOT_SEEN_MEANS_UP) && - !s->seen_in_this_frame) { - s->touch_state = false; - } - - input_mt_slot(input, i); + input_mt_slot(input, slotnum); input_mt_report_slot_state(input, MT_TOOL_FINGER, s->touch_state); if (s->touch_state) { @@ -532,24 +502,29 @@ static void mt_emit_event(struct mt_device *td, struct input_dev *input) input_event(input, EV_ABS, ABS_MT_TOUCH_MAJOR, major); input_event(input, EV_ABS, ABS_MT_TOUCH_MINOR, minor); } - s->seen_in_this_frame = false; - } - input_mt_report_pointer_emulation(input, true); + td->num_received++; +} + +/* + * this function is called when a whole packet has been received and processed, + * so that it can decide what to send to the input layer. + */ +static void mt_sync_frame(struct mt_device *td, struct input_dev *input) +{ + input_mt_sync_frame(input); input_sync(input); td->num_received = 0; } - - static int mt_event(struct hid_device *hid, struct hid_field *field, struct hid_usage *usage, __s32 value) { struct mt_device *td = hid_get_drvdata(hid); __s32 quirks = td->mtclass.quirks; - if (hid->claimed & HID_CLAIMED_INPUT && td->slots) { + if (hid->claimed & HID_CLAIMED_INPUT) { switch (usage->hid) { case HID_DG_INRANGE: if (quirks & MT_QUIRK_ALWAYS_VALID) @@ -602,11 +577,11 @@ static int mt_event(struct hid_device *hid, struct hid_field *field, } if (usage->hid == td->last_slot_field) - mt_complete_slot(td); + mt_complete_slot(td, field->hidinput->input); if (field->index == td->last_field_index && td->num_received >= td->num_expected) - mt_emit_event(td, field->hidinput->input); + mt_sync_frame(td, field->hidinput->input); } @@ -685,18 +660,45 @@ static void mt_post_parse(struct mt_device *td) } } +static void mt_input_configured(struct hid_device *hdev, struct hid_input *hi) + +{ + struct mt_device *td = hid_get_drvdata(hdev); + struct mt_class *cls = &td->mtclass; + struct input_dev *input = hi->input; + + /* Only initialize slots for MT input devices */ + if (!test_bit(ABS_MT_POSITION_X, input->absbit)) + return; + + if (!td->maxcontacts) + td->maxcontacts = MT_DEFAULT_MAXCONTACT; + + mt_post_parse(td); + if (td->serial_maybe) + mt_post_parse_default_settings(td); + + if (cls->is_indirect) + td->mt_flags |= INPUT_MT_POINTER; + + if (cls->quirks & MT_QUIRK_NOT_SEEN_MEANS_UP) + td->mt_flags |= INPUT_MT_DROP_UNUSED; + + input_mt_init_slots(input, td->maxcontacts, td->mt_flags); + + td->mt_flags = 0; +} + static int mt_probe(struct hid_device *hdev, const struct hid_device_id *id) { int ret, i; struct mt_device *td; struct mt_class *mtclass = mt_classes; /* MT_CLS_DEFAULT */ - if (id) { - for (i = 0; mt_classes[i].name ; i++) { - if (id->driver_data == mt_classes[i].name) { - mtclass = &(mt_classes[i]); - break; - } + for (i = 0; mt_classes[i].name ; i++) { + if (id->driver_data == mt_classes[i].name) { + mtclass = &(mt_classes[i]); + break; } } @@ -722,6 +724,9 @@ static int mt_probe(struct hid_device *hdev, const struct hid_device_id *id) goto fail; } + if (id->vendor == HID_ANY_ID && id->product == HID_ANY_ID) + td->serial_maybe = true; + ret = hid_parse(hdev); if (ret != 0) goto fail; @@ -730,20 +735,6 @@ static int mt_probe(struct hid_device *hdev, const struct hid_device_id *id) if (ret) goto fail; - mt_post_parse(td); - - if (id->vendor == HID_ANY_ID && id->product == HID_ANY_ID) - mt_post_parse_default_settings(td); - - td->slots = kzalloc(td->maxcontacts * sizeof(struct mt_slot), - GFP_KERNEL); - if (!td->slots) { - dev_err(&hdev->dev, "cannot allocate multitouch slots\n"); - hid_hw_stop(hdev); - ret = -ENOMEM; - goto fail; - } - ret = sysfs_create_group(&hdev->dev.kobj, &mt_attribute_group); mt_set_maxcontacts(hdev); @@ -767,6 +758,32 @@ static int mt_reset_resume(struct hid_device *hdev) mt_set_input_mode(hdev); return 0; } + +static int mt_resume(struct hid_device *hdev) +{ + struct usb_interface *intf; + struct usb_host_interface *interface; + struct usb_device *dev; + + if (hdev->bus != BUS_USB) + return 0; + + intf = to_usb_interface(hdev->dev.parent); + interface = intf->cur_altsetting; + dev = hid_to_usb_dev(hdev); + + /* Some Elan legacy devices require SET_IDLE to be set on resume. + * It should be safe to send it to other devices too. + * Tested on 3M, Stantum, Cypress, Zytronic, eGalax, and Elan panels. */ + + usb_control_msg(dev, usb_sndctrlpipe(dev, 0), + HID_REQ_SET_IDLE, + USB_TYPE_CLASS | USB_RECIP_INTERFACE, + 0, interface->desc.bInterfaceNumber, + NULL, 0, USB_CTRL_SET_TIMEOUT); + + return 0; +} #endif static void mt_remove(struct hid_device *hdev) @@ -774,7 +791,6 @@ static void mt_remove(struct hid_device *hdev) struct mt_device *td = hid_get_drvdata(hdev); sysfs_remove_group(&hdev->dev.kobj, &mt_attribute_group); hid_hw_stop(hdev); - kfree(td->slots); kfree(td); hid_set_drvdata(hdev, NULL); } @@ -883,19 +899,39 @@ static const struct hid_device_id mt_devices[] = { { .driver_data = MT_CLS_EGALAX_SERIAL, MT_USB_DEVICE(USB_VENDOR_ID_DWAV, USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_7349) }, + { .driver_data = MT_CLS_EGALAX_SERIAL, + MT_USB_DEVICE(USB_VENDOR_ID_DWAV, + USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_73F7) }, { .driver_data = MT_CLS_EGALAX_SERIAL, MT_USB_DEVICE(USB_VENDOR_ID_DWAV, USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_A001) }, + { .driver_data = MT_CLS_EGALAX, + HID_USB_DEVICE(USB_VENDOR_ID_DWAV, + USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_7224) }, + { .driver_data = MT_CLS_EGALAX, + HID_USB_DEVICE(USB_VENDOR_ID_DWAV, + USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_72D0) }, + { .driver_data = MT_CLS_EGALAX, + HID_USB_DEVICE(USB_VENDOR_ID_DWAV, + USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_72C4) }, /* Elo TouchSystems IntelliTouch Plus panel */ { .driver_data = MT_CLS_DUAL_NSMU_CONTACTID, MT_USB_DEVICE(USB_VENDOR_ID_ELO, USB_DEVICE_ID_ELO_TS2515) }, + /* Flatfrog Panels */ + { .driver_data = MT_CLS_FLATFROG, + MT_USB_DEVICE(USB_VENDOR_ID_FLATFROG, + USB_DEVICE_ID_MULTITOUCH_3200) }, + /* GeneralTouch panel */ - { .driver_data = MT_CLS_DUAL_INRANGE_CONTACTNUMBER, + { .driver_data = MT_CLS_GENERALTOUCH_TWOFINGERS, MT_USB_DEVICE(USB_VENDOR_ID_GENERAL_TOUCH, USB_DEVICE_ID_GENERAL_TOUCH_WIN7_TWOFINGERS) }, + { .driver_data = MT_CLS_GENERALTOUCH_PWT_TENFINGERS, + MT_USB_DEVICE(USB_VENDOR_ID_GENERAL_TOUCH, + USB_DEVICE_ID_GENERAL_TOUCH_WIN8_PWT_TENFINGERS) }, /* Gametel game controller */ { .driver_data = MT_CLS_DEFAULT, @@ -1087,11 +1123,13 @@ static struct hid_driver mt_driver = { .remove = mt_remove, .input_mapping = mt_input_mapping, .input_mapped = mt_input_mapped, + .input_configured = mt_input_configured, .feature_mapping = mt_feature_mapping, .usage_table = mt_grabbed_usages, .event = mt_event, #ifdef CONFIG_PM .reset_resume = mt_reset_resume, + .resume = mt_resume, #endif }; diff --git a/trunk/drivers/hid/hid-ntrig.c b/trunk/drivers/hid/hid-ntrig.c index 9fae2ebdd758..86a969f63292 100644 --- a/trunk/drivers/hid/hid-ntrig.c +++ b/trunk/drivers/hid/hid-ntrig.c @@ -882,10 +882,10 @@ static int ntrig_probe(struct hid_device *hdev, const struct hid_device_id *id) nd->activate_slack = activate_slack; nd->act_state = activate_slack; nd->deactivate_slack = -deactivate_slack; - nd->sensor_logical_width = 0; - nd->sensor_logical_height = 0; - nd->sensor_physical_width = 0; - nd->sensor_physical_height = 0; + nd->sensor_logical_width = 1; + nd->sensor_logical_height = 1; + nd->sensor_physical_width = 1; + nd->sensor_physical_height = 1; hid_set_drvdata(hdev, nd); diff --git a/trunk/drivers/hid/hid-petalynx.c b/trunk/drivers/hid/hid-petalynx.c index f1ea3ff8a98d..4c521de4e7e6 100644 --- a/trunk/drivers/hid/hid-petalynx.c +++ b/trunk/drivers/hid/hid-petalynx.c @@ -5,7 +5,6 @@ * Copyright (c) 2000-2005 Vojtech Pavlik * Copyright (c) 2005 Michael Haboustak for Concept2, Inc * Copyright (c) 2006-2007 Jiri Kosina - * Copyright (c) 2007 Paul Walmsley * Copyright (c) 2008 Jiri Slaby */ diff --git a/trunk/drivers/hid/hid-picolcd.c b/trunk/drivers/hid/hid-picolcd.c deleted file mode 100644 index 27c8ebdfad01..000000000000 --- a/trunk/drivers/hid/hid-picolcd.c +++ /dev/null @@ -1,2748 +0,0 @@ -/*************************************************************************** - * Copyright (C) 2010 by Bruno Prémont * - * * - * Based on Logitech G13 driver (v0.4) * - * Copyright (C) 2009 by Rick L. Vinyard, Jr. * - * * - * This program is free software: you can redistribute it and/or modify * - * it under the terms of the GNU General Public License as published by * - * the Free Software Foundation, version 2 of the License. * - * * - * This driver is distributed in the hope that it will be useful, but * - * WITHOUT ANY WARRANTY; without even the implied warranty of * - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * - * General Public License for more details. * - * * - * You should have received a copy of the GNU General Public License * - * along with this software. If not see . * - ***************************************************************************/ - -#include -#include -#include -#include "hid-ids.h" -#include "usbhid/usbhid.h" -#include - -#include -#include -#include -#include - -#include - -#include -#include - -#include -#include -#include - -#define PICOLCD_NAME "PicoLCD (graphic)" - -/* Report numbers */ -#define REPORT_ERROR_CODE 0x10 /* LCD: IN[16] */ -#define ERR_SUCCESS 0x00 -#define ERR_PARAMETER_MISSING 0x01 -#define ERR_DATA_MISSING 0x02 -#define ERR_BLOCK_READ_ONLY 0x03 -#define ERR_BLOCK_NOT_ERASABLE 0x04 -#define ERR_BLOCK_TOO_BIG 0x05 -#define ERR_SECTION_OVERFLOW 0x06 -#define ERR_INVALID_CMD_LEN 0x07 -#define ERR_INVALID_DATA_LEN 0x08 -#define REPORT_KEY_STATE 0x11 /* LCD: IN[2] */ -#define REPORT_IR_DATA 0x21 /* LCD: IN[63] */ -#define REPORT_EE_DATA 0x32 /* LCD: IN[63] */ -#define REPORT_MEMORY 0x41 /* LCD: IN[63] */ -#define REPORT_LED_STATE 0x81 /* LCD: OUT[1] */ -#define REPORT_BRIGHTNESS 0x91 /* LCD: OUT[1] */ -#define REPORT_CONTRAST 0x92 /* LCD: OUT[1] */ -#define REPORT_RESET 0x93 /* LCD: OUT[2] */ -#define REPORT_LCD_CMD 0x94 /* LCD: OUT[63] */ -#define REPORT_LCD_DATA 0x95 /* LCD: OUT[63] */ -#define REPORT_LCD_CMD_DATA 0x96 /* LCD: OUT[63] */ -#define REPORT_EE_READ 0xa3 /* LCD: OUT[63] */ -#define REPORT_EE_WRITE 0xa4 /* LCD: OUT[63] */ -#define REPORT_ERASE_MEMORY 0xb2 /* LCD: OUT[2] */ -#define REPORT_READ_MEMORY 0xb3 /* LCD: OUT[3] */ -#define REPORT_WRITE_MEMORY 0xb4 /* LCD: OUT[63] */ -#define REPORT_SPLASH_RESTART 0xc1 /* LCD: OUT[1] */ -#define REPORT_EXIT_KEYBOARD 0xef /* LCD: OUT[2] */ -#define REPORT_VERSION 0xf1 /* LCD: IN[2],OUT[1] Bootloader: IN[2],OUT[1] */ -#define REPORT_BL_ERASE_MEMORY 0xf2 /* Bootloader: IN[36],OUT[4] */ -#define REPORT_BL_READ_MEMORY 0xf3 /* Bootloader: IN[36],OUT[4] */ -#define REPORT_BL_WRITE_MEMORY 0xf4 /* Bootloader: IN[36],OUT[36] */ -#define REPORT_DEVID 0xf5 /* LCD: IN[5], OUT[1] Bootloader: IN[5],OUT[1] */ -#define REPORT_SPLASH_SIZE 0xf6 /* LCD: IN[4], OUT[1] */ -#define REPORT_HOOK_VERSION 0xf7 /* LCD: IN[2], OUT[1] */ -#define REPORT_EXIT_FLASHER 0xff /* Bootloader: OUT[2] */ - -#ifdef CONFIG_HID_PICOLCD_FB -/* Framebuffer - * - * The PicoLCD use a Topway LCD module of 256x64 pixel - * This display area is tiled over 4 controllers with 8 tiles - * each. Each tile has 8x64 pixel, each data byte representing - * a 1-bit wide vertical line of the tile. - * - * The display can be updated at a tile granularity. - * - * Chip 1 Chip 2 Chip 3 Chip 4 - * +----------------+----------------+----------------+----------------+ - * | Tile 1 | Tile 1 | Tile 1 | Tile 1 | - * +----------------+----------------+----------------+----------------+ - * | Tile 2 | Tile 2 | Tile 2 | Tile 2 | - * +----------------+----------------+----------------+----------------+ - * ... - * +----------------+----------------+----------------+----------------+ - * | Tile 8 | Tile 8 | Tile 8 | Tile 8 | - * +----------------+----------------+----------------+----------------+ - */ -#define PICOLCDFB_NAME "picolcdfb" -#define PICOLCDFB_WIDTH (256) -#define PICOLCDFB_HEIGHT (64) -#define PICOLCDFB_SIZE (PICOLCDFB_WIDTH * PICOLCDFB_HEIGHT / 8) - -#define PICOLCDFB_UPDATE_RATE_LIMIT 10 -#define PICOLCDFB_UPDATE_RATE_DEFAULT 2 - -/* Framebuffer visual structures */ -static const struct fb_fix_screeninfo picolcdfb_fix = { - .id = PICOLCDFB_NAME, - .type = FB_TYPE_PACKED_PIXELS, - .visual = FB_VISUAL_MONO01, - .xpanstep = 0, - .ypanstep = 0, - .ywrapstep = 0, - .line_length = PICOLCDFB_WIDTH / 8, - .accel = FB_ACCEL_NONE, -}; - -static const struct fb_var_screeninfo picolcdfb_var = { - .xres = PICOLCDFB_WIDTH, - .yres = PICOLCDFB_HEIGHT, - .xres_virtual = PICOLCDFB_WIDTH, - .yres_virtual = PICOLCDFB_HEIGHT, - .width = 103, - .height = 26, - .bits_per_pixel = 1, - .grayscale = 1, - .red = { - .offset = 0, - .length = 1, - .msb_right = 0, - }, - .green = { - .offset = 0, - .length = 1, - .msb_right = 0, - }, - .blue = { - .offset = 0, - .length = 1, - .msb_right = 0, - }, - .transp = { - .offset = 0, - .length = 0, - .msb_right = 0, - }, -}; -#endif /* CONFIG_HID_PICOLCD_FB */ - -/* Input device - * - * The PicoLCD has an IR receiver header, a built-in keypad with 5 keys - * and header for 4x4 key matrix. The built-in keys are part of the matrix. - */ -static const unsigned short def_keymap[] = { - KEY_RESERVED, /* none */ - KEY_BACK, /* col 4 + row 1 */ - KEY_HOMEPAGE, /* col 3 + row 1 */ - KEY_RESERVED, /* col 2 + row 1 */ - KEY_RESERVED, /* col 1 + row 1 */ - KEY_SCROLLUP, /* col 4 + row 2 */ - KEY_OK, /* col 3 + row 2 */ - KEY_SCROLLDOWN, /* col 2 + row 2 */ - KEY_RESERVED, /* col 1 + row 2 */ - KEY_RESERVED, /* col 4 + row 3 */ - KEY_RESERVED, /* col 3 + row 3 */ - KEY_RESERVED, /* col 2 + row 3 */ - KEY_RESERVED, /* col 1 + row 3 */ - KEY_RESERVED, /* col 4 + row 4 */ - KEY_RESERVED, /* col 3 + row 4 */ - KEY_RESERVED, /* col 2 + row 4 */ - KEY_RESERVED, /* col 1 + row 4 */ -}; -#define PICOLCD_KEYS ARRAY_SIZE(def_keymap) - -/* Description of in-progress IO operation, used for operations - * that trigger response from device */ -struct picolcd_pending { - struct hid_report *out_report; - struct hid_report *in_report; - struct completion ready; - int raw_size; - u8 raw_data[64]; -}; - -/* Per device data structure */ -struct picolcd_data { - struct hid_device *hdev; -#ifdef CONFIG_DEBUG_FS - struct dentry *debug_reset; - struct dentry *debug_eeprom; - struct dentry *debug_flash; - struct mutex mutex_flash; - int addr_sz; -#endif - u8 version[2]; - unsigned short opmode_delay; - /* input stuff */ - u8 pressed_keys[2]; - struct input_dev *input_keys; - struct input_dev *input_cir; - unsigned short keycode[PICOLCD_KEYS]; - -#ifdef CONFIG_HID_PICOLCD_FB - /* Framebuffer stuff */ - u8 fb_update_rate; - u8 fb_bpp; - u8 fb_force; - u8 *fb_vbitmap; /* local copy of what was sent to PicoLCD */ - u8 *fb_bitmap; /* framebuffer */ - struct fb_info *fb_info; - struct fb_deferred_io fb_defio; -#endif /* CONFIG_HID_PICOLCD_FB */ -#ifdef CONFIG_HID_PICOLCD_LCD - struct lcd_device *lcd; - u8 lcd_contrast; -#endif /* CONFIG_HID_PICOLCD_LCD */ -#ifdef CONFIG_HID_PICOLCD_BACKLIGHT - struct backlight_device *backlight; - u8 lcd_brightness; - u8 lcd_power; -#endif /* CONFIG_HID_PICOLCD_BACKLIGHT */ -#ifdef CONFIG_HID_PICOLCD_LEDS - /* LED stuff */ - u8 led_state; - struct led_classdev *led[8]; -#endif /* CONFIG_HID_PICOLCD_LEDS */ - - /* Housekeeping stuff */ - spinlock_t lock; - struct mutex mutex; - struct picolcd_pending *pending; - int status; -#define PICOLCD_BOOTLOADER 1 -#define PICOLCD_FAILED 2 -#define PICOLCD_READY_FB 4 -}; - - -/* Find a given report */ -#define picolcd_in_report(id, dev) picolcd_report(id, dev, HID_INPUT_REPORT) -#define picolcd_out_report(id, dev) picolcd_report(id, dev, HID_OUTPUT_REPORT) - -static struct hid_report *picolcd_report(int id, struct hid_device *hdev, int dir) -{ - struct list_head *feature_report_list = &hdev->report_enum[dir].report_list; - struct hid_report *report = NULL; - - list_for_each_entry(report, feature_report_list, list) { - if (report->id == id) - return report; - } - hid_warn(hdev, "No report with id 0x%x found\n", id); - return NULL; -} - -#ifdef CONFIG_DEBUG_FS -static void picolcd_debug_out_report(struct picolcd_data *data, - struct hid_device *hdev, struct hid_report *report); -#define usbhid_submit_report(a, b, c) \ - do { \ - picolcd_debug_out_report(hid_get_drvdata(a), a, b); \ - usbhid_submit_report(a, b, c); \ - } while (0) -#endif - -/* Submit a report and wait for a reply from device - if device fades away - * or does not respond in time, return NULL */ -static struct picolcd_pending *picolcd_send_and_wait(struct hid_device *hdev, - int report_id, const u8 *raw_data, int size) -{ - struct picolcd_data *data = hid_get_drvdata(hdev); - struct picolcd_pending *work; - struct hid_report *report = picolcd_out_report(report_id, hdev); - unsigned long flags; - int i, j, k; - - if (!report || !data) - return NULL; - if (data->status & PICOLCD_FAILED) - return NULL; - work = kzalloc(sizeof(*work), GFP_KERNEL); - if (!work) - return NULL; - - init_completion(&work->ready); - work->out_report = report; - work->in_report = NULL; - work->raw_size = 0; - - mutex_lock(&data->mutex); - spin_lock_irqsave(&data->lock, flags); - for (i = k = 0; i < report->maxfield; i++) - for (j = 0; j < report->field[i]->report_count; j++) { - hid_set_field(report->field[i], j, k < size ? raw_data[k] : 0); - k++; - } - data->pending = work; - usbhid_submit_report(data->hdev, report, USB_DIR_OUT); - spin_unlock_irqrestore(&data->lock, flags); - wait_for_completion_interruptible_timeout(&work->ready, HZ*2); - spin_lock_irqsave(&data->lock, flags); - data->pending = NULL; - spin_unlock_irqrestore(&data->lock, flags); - mutex_unlock(&data->mutex); - return work; -} - -#ifdef CONFIG_HID_PICOLCD_FB -/* Send a given tile to PicoLCD */ -static int picolcd_fb_send_tile(struct hid_device *hdev, int chip, int tile) -{ - struct picolcd_data *data = hid_get_drvdata(hdev); - struct hid_report *report1 = picolcd_out_report(REPORT_LCD_CMD_DATA, hdev); - struct hid_report *report2 = picolcd_out_report(REPORT_LCD_DATA, hdev); - unsigned long flags; - u8 *tdata; - int i; - - if (!report1 || report1->maxfield != 1 || !report2 || report2->maxfield != 1) - return -ENODEV; - - spin_lock_irqsave(&data->lock, flags); - hid_set_field(report1->field[0], 0, chip << 2); - hid_set_field(report1->field[0], 1, 0x02); - hid_set_field(report1->field[0], 2, 0x00); - hid_set_field(report1->field[0], 3, 0x00); - hid_set_field(report1->field[0], 4, 0xb8 | tile); - hid_set_field(report1->field[0], 5, 0x00); - hid_set_field(report1->field[0], 6, 0x00); - hid_set_field(report1->field[0], 7, 0x40); - hid_set_field(report1->field[0], 8, 0x00); - hid_set_field(report1->field[0], 9, 0x00); - hid_set_field(report1->field[0], 10, 32); - - hid_set_field(report2->field[0], 0, (chip << 2) | 0x01); - hid_set_field(report2->field[0], 1, 0x00); - hid_set_field(report2->field[0], 2, 0x00); - hid_set_field(report2->field[0], 3, 32); - - tdata = data->fb_vbitmap + (tile * 4 + chip) * 64; - for (i = 0; i < 64; i++) - if (i < 32) - hid_set_field(report1->field[0], 11 + i, tdata[i]); - else - hid_set_field(report2->field[0], 4 + i - 32, tdata[i]); - - usbhid_submit_report(data->hdev, report1, USB_DIR_OUT); - usbhid_submit_report(data->hdev, report2, USB_DIR_OUT); - spin_unlock_irqrestore(&data->lock, flags); - return 0; -} - -/* Translate a single tile*/ -static int picolcd_fb_update_tile(u8 *vbitmap, const u8 *bitmap, int bpp, - int chip, int tile) -{ - int i, b, changed = 0; - u8 tdata[64]; - u8 *vdata = vbitmap + (tile * 4 + chip) * 64; - - if (bpp == 1) { - for (b = 7; b >= 0; b--) { - const u8 *bdata = bitmap + tile * 256 + chip * 8 + b * 32; - for (i = 0; i < 64; i++) { - tdata[i] <<= 1; - tdata[i] |= (bdata[i/8] >> (i % 8)) & 0x01; - } - } - } else if (bpp == 8) { - for (b = 7; b >= 0; b--) { - const u8 *bdata = bitmap + (tile * 256 + chip * 8 + b * 32) * 8; - for (i = 0; i < 64; i++) { - tdata[i] <<= 1; - tdata[i] |= (bdata[i] & 0x80) ? 0x01 : 0x00; - } - } - } else { - /* Oops, we should never get here! */ - WARN_ON(1); - return 0; - } - - for (i = 0; i < 64; i++) - if (tdata[i] != vdata[i]) { - changed = 1; - vdata[i] = tdata[i]; - } - return changed; -} - -/* Reconfigure LCD display */ -static int picolcd_fb_reset(struct picolcd_data *data, int clear) -{ - struct hid_report *report = picolcd_out_report(REPORT_LCD_CMD, data->hdev); - int i, j; - unsigned long flags; - static const u8 mapcmd[8] = { 0x00, 0x02, 0x00, 0x64, 0x3f, 0x00, 0x64, 0xc0 }; - - if (!report || report->maxfield != 1) - return -ENODEV; - - spin_lock_irqsave(&data->lock, flags); - for (i = 0; i < 4; i++) { - for (j = 0; j < report->field[0]->maxusage; j++) - if (j == 0) - hid_set_field(report->field[0], j, i << 2); - else if (j < sizeof(mapcmd)) - hid_set_field(report->field[0], j, mapcmd[j]); - else - hid_set_field(report->field[0], j, 0); - usbhid_submit_report(data->hdev, report, USB_DIR_OUT); - } - - data->status |= PICOLCD_READY_FB; - spin_unlock_irqrestore(&data->lock, flags); - - if (data->fb_bitmap) { - if (clear) { - memset(data->fb_vbitmap, 0, PICOLCDFB_SIZE); - memset(data->fb_bitmap, 0, PICOLCDFB_SIZE*data->fb_bpp); - } - data->fb_force = 1; - } - - /* schedule first output of framebuffer */ - if (data->fb_info) - schedule_delayed_work(&data->fb_info->deferred_work, 0); - - return 0; -} - -/* Update fb_vbitmap from the screen_base and send changed tiles to device */ -static void picolcd_fb_update(struct picolcd_data *data) -{ - int chip, tile, n; - unsigned long flags; - - if (!data) - return; - - spin_lock_irqsave(&data->lock, flags); - if (!(data->status & PICOLCD_READY_FB)) { - spin_unlock_irqrestore(&data->lock, flags); - picolcd_fb_reset(data, 0); - } else { - spin_unlock_irqrestore(&data->lock, flags); - } - - /* - * Translate the framebuffer into the format needed by the PicoLCD. - * See display layout above. - * Do this one tile after the other and push those tiles that changed. - * - * Wait for our IO to complete as otherwise we might flood the queue! - */ - n = 0; - for (chip = 0; chip < 4; chip++) - for (tile = 0; tile < 8; tile++) - if (picolcd_fb_update_tile(data->fb_vbitmap, - data->fb_bitmap, data->fb_bpp, chip, tile) || - data->fb_force) { - n += 2; - if (!data->fb_info->par) - return; /* device lost! */ - if (n >= HID_OUTPUT_FIFO_SIZE / 2) { - usbhid_wait_io(data->hdev); - n = 0; - } - picolcd_fb_send_tile(data->hdev, chip, tile); - } - data->fb_force = false; - if (n) - usbhid_wait_io(data->hdev); -} - -/* Stub to call the system default and update the image on the picoLCD */ -static void picolcd_fb_fillrect(struct fb_info *info, - const struct fb_fillrect *rect) -{ - if (!info->par) - return; - sys_fillrect(info, rect); - - schedule_delayed_work(&info->deferred_work, 0); -} - -/* Stub to call the system default and update the image on the picoLCD */ -static void picolcd_fb_copyarea(struct fb_info *info, - const struct fb_copyarea *area) -{ - if (!info->par) - return; - sys_copyarea(info, area); - - schedule_delayed_work(&info->deferred_work, 0); -} - -/* Stub to call the system default and update the image on the picoLCD */ -static void picolcd_fb_imageblit(struct fb_info *info, const struct fb_image *image) -{ - if (!info->par) - return; - sys_imageblit(info, image); - - schedule_delayed_work(&info->deferred_work, 0); -} - -/* - * this is the slow path from userspace. they can seek and write to - * the fb. it's inefficient to do anything less than a full screen draw - */ -static ssize_t picolcd_fb_write(struct fb_info *info, const char __user *buf, - size_t count, loff_t *ppos) -{ - ssize_t ret; - if (!info->par) - return -ENODEV; - ret = fb_sys_write(info, buf, count, ppos); - if (ret >= 0) - schedule_delayed_work(&info->deferred_work, 0); - return ret; -} - -static int picolcd_fb_blank(int blank, struct fb_info *info) -{ - if (!info->par) - return -ENODEV; - /* We let fb notification do this for us via lcd/backlight device */ - return 0; -} - -static void picolcd_fb_destroy(struct fb_info *info) -{ - struct picolcd_data *data = info->par; - u32 *ref_cnt = info->pseudo_palette; - int may_release; - - info->par = NULL; - if (data) - data->fb_info = NULL; - fb_deferred_io_cleanup(info); - - ref_cnt--; - mutex_lock(&info->lock); - (*ref_cnt)--; - may_release = !*ref_cnt; - mutex_unlock(&info->lock); - if (may_release) { - vfree((u8 *)info->fix.smem_start); - framebuffer_release(info); - } -} - -static int picolcd_fb_check_var(struct fb_var_screeninfo *var, struct fb_info *info) -{ - __u32 bpp = var->bits_per_pixel; - __u32 activate = var->activate; - - /* only allow 1/8 bit depth (8-bit is grayscale) */ - *var = picolcdfb_var; - var->activate = activate; - if (bpp >= 8) { - var->bits_per_pixel = 8; - var->red.length = 8; - var->green.length = 8; - var->blue.length = 8; - } else { - var->bits_per_pixel = 1; - var->red.length = 1; - var->green.length = 1; - var->blue.length = 1; - } - return 0; -} - -static int picolcd_set_par(struct fb_info *info) -{ - struct picolcd_data *data = info->par; - u8 *tmp_fb, *o_fb; - if (!data) - return -ENODEV; - if (info->var.bits_per_pixel == data->fb_bpp) - return 0; - /* switch between 1/8 bit depths */ - if (info->var.bits_per_pixel != 1 && info->var.bits_per_pixel != 8) - return -EINVAL; - - o_fb = data->fb_bitmap; - tmp_fb = kmalloc(PICOLCDFB_SIZE*info->var.bits_per_pixel, GFP_KERNEL); - if (!tmp_fb) - return -ENOMEM; - - /* translate FB content to new bits-per-pixel */ - if (info->var.bits_per_pixel == 1) { - int i, b; - for (i = 0; i < PICOLCDFB_SIZE; i++) { - u8 p = 0; - for (b = 0; b < 8; b++) { - p <<= 1; - p |= o_fb[i*8+b] ? 0x01 : 0x00; - } - tmp_fb[i] = p; - } - memcpy(o_fb, tmp_fb, PICOLCDFB_SIZE); - info->fix.visual = FB_VISUAL_MONO01; - info->fix.line_length = PICOLCDFB_WIDTH / 8; - } else { - int i; - memcpy(tmp_fb, o_fb, PICOLCDFB_SIZE); - for (i = 0; i < PICOLCDFB_SIZE * 8; i++) - o_fb[i] = tmp_fb[i/8] & (0x01 << (7 - i % 8)) ? 0xff : 0x00; - info->fix.visual = FB_VISUAL_DIRECTCOLOR; - info->fix.line_length = PICOLCDFB_WIDTH; - } - - kfree(tmp_fb); - data->fb_bpp = info->var.bits_per_pixel; - return 0; -} - -/* Do refcounting on our FB and cleanup per worker if FB is - * closed after unplug of our device - * (fb_release holds info->lock and still touches info after - * we return so we can't release it immediately. - */ -struct picolcd_fb_cleanup_item { - struct fb_info *info; - struct picolcd_fb_cleanup_item *next; -}; -static struct picolcd_fb_cleanup_item *fb_pending; -static DEFINE_SPINLOCK(fb_pending_lock); - -static void picolcd_fb_do_cleanup(struct work_struct *data) -{ - struct picolcd_fb_cleanup_item *item; - unsigned long flags; - - do { - spin_lock_irqsave(&fb_pending_lock, flags); - item = fb_pending; - fb_pending = item ? item->next : NULL; - spin_unlock_irqrestore(&fb_pending_lock, flags); - - if (item) { - u8 *fb = (u8 *)item->info->fix.smem_start; - /* make sure we do not race against fb core when - * releasing */ - mutex_lock(&item->info->lock); - mutex_unlock(&item->info->lock); - framebuffer_release(item->info); - vfree(fb); - } - } while (item); -} - -static DECLARE_WORK(picolcd_fb_cleanup, picolcd_fb_do_cleanup); - -static int picolcd_fb_open(struct fb_info *info, int u) -{ - u32 *ref_cnt = info->pseudo_palette; - ref_cnt--; - - (*ref_cnt)++; - return 0; -} - -static int picolcd_fb_release(struct fb_info *info, int u) -{ - u32 *ref_cnt = info->pseudo_palette; - ref_cnt--; - - (*ref_cnt)++; - if (!*ref_cnt) { - unsigned long flags; - struct picolcd_fb_cleanup_item *item = (struct picolcd_fb_cleanup_item *)ref_cnt; - item--; - spin_lock_irqsave(&fb_pending_lock, flags); - item->next = fb_pending; - fb_pending = item; - spin_unlock_irqrestore(&fb_pending_lock, flags); - schedule_work(&picolcd_fb_cleanup); - } - return 0; -} - -/* Note this can't be const because of struct fb_info definition */ -static struct fb_ops picolcdfb_ops = { - .owner = THIS_MODULE, - .fb_destroy = picolcd_fb_destroy, - .fb_open = picolcd_fb_open, - .fb_release = picolcd_fb_release, - .fb_read = fb_sys_read, - .fb_write = picolcd_fb_write, - .fb_blank = picolcd_fb_blank, - .fb_fillrect = picolcd_fb_fillrect, - .fb_copyarea = picolcd_fb_copyarea, - .fb_imageblit = picolcd_fb_imageblit, - .fb_check_var = picolcd_fb_check_var, - .fb_set_par = picolcd_set_par, -}; - - -/* Callback from deferred IO workqueue */ -static void picolcd_fb_deferred_io(struct fb_info *info, struct list_head *pagelist) -{ - picolcd_fb_update(info->par); -} - -static const struct fb_deferred_io picolcd_fb_defio = { - .delay = HZ / PICOLCDFB_UPDATE_RATE_DEFAULT, - .deferred_io = picolcd_fb_deferred_io, -}; - - -/* - * The "fb_update_rate" sysfs attribute - */ -static ssize_t picolcd_fb_update_rate_show(struct device *dev, - struct device_attribute *attr, char *buf) -{ - struct picolcd_data *data = dev_get_drvdata(dev); - unsigned i, fb_update_rate = data->fb_update_rate; - size_t ret = 0; - - for (i = 1; i <= PICOLCDFB_UPDATE_RATE_LIMIT; i++) - if (ret >= PAGE_SIZE) - break; - else if (i == fb_update_rate) - ret += snprintf(buf+ret, PAGE_SIZE-ret, "[%u] ", i); - else - ret += snprintf(buf+ret, PAGE_SIZE-ret, "%u ", i); - if (ret > 0) - buf[min(ret, (size_t)PAGE_SIZE)-1] = '\n'; - return ret; -} - -static ssize_t picolcd_fb_update_rate_store(struct device *dev, - struct device_attribute *attr, const char *buf, size_t count) -{ - struct picolcd_data *data = dev_get_drvdata(dev); - int i; - unsigned u; - - if (count < 1 || count > 10) - return -EINVAL; - - i = sscanf(buf, "%u", &u); - if (i != 1) - return -EINVAL; - - if (u > PICOLCDFB_UPDATE_RATE_LIMIT) - return -ERANGE; - else if (u == 0) - u = PICOLCDFB_UPDATE_RATE_DEFAULT; - - data->fb_update_rate = u; - data->fb_defio.delay = HZ / data->fb_update_rate; - return count; -} - -static DEVICE_ATTR(fb_update_rate, 0666, picolcd_fb_update_rate_show, - picolcd_fb_update_rate_store); - -/* initialize Framebuffer device */ -static int picolcd_init_framebuffer(struct picolcd_data *data) -{ - struct device *dev = &data->hdev->dev; - struct fb_info *info = NULL; - int i, error = -ENOMEM; - u8 *fb_vbitmap = NULL; - u8 *fb_bitmap = NULL; - u32 *palette; - - fb_bitmap = vmalloc(PICOLCDFB_SIZE*8); - if (fb_bitmap == NULL) { - dev_err(dev, "can't get a free page for framebuffer\n"); - goto err_nomem; - } - - fb_vbitmap = kmalloc(PICOLCDFB_SIZE, GFP_KERNEL); - if (fb_vbitmap == NULL) { - dev_err(dev, "can't alloc vbitmap image buffer\n"); - goto err_nomem; - } - - data->fb_update_rate = PICOLCDFB_UPDATE_RATE_DEFAULT; - data->fb_defio = picolcd_fb_defio; - /* The extra memory is: - * - struct picolcd_fb_cleanup_item - * - u32 for ref_count - * - 256*u32 for pseudo_palette - */ - info = framebuffer_alloc(257 * sizeof(u32) + sizeof(struct picolcd_fb_cleanup_item), dev); - if (info == NULL) { - dev_err(dev, "failed to allocate a framebuffer\n"); - goto err_nomem; - } - - palette = info->par + sizeof(struct picolcd_fb_cleanup_item); - *palette = 1; - palette++; - for (i = 0; i < 256; i++) - palette[i] = i > 0 && i < 16 ? 0xff : 0; - info->pseudo_palette = palette; - info->fbdefio = &data->fb_defio; - info->screen_base = (char __force __iomem *)fb_bitmap; - info->fbops = &picolcdfb_ops; - info->var = picolcdfb_var; - info->fix = picolcdfb_fix; - info->fix.smem_len = PICOLCDFB_SIZE*8; - info->fix.smem_start = (unsigned long)fb_bitmap; - info->par = data; - info->flags = FBINFO_FLAG_DEFAULT; - - data->fb_vbitmap = fb_vbitmap; - data->fb_bitmap = fb_bitmap; - data->fb_bpp = picolcdfb_var.bits_per_pixel; - error = picolcd_fb_reset(data, 1); - if (error) { - dev_err(dev, "failed to configure display\n"); - goto err_cleanup; - } - error = device_create_file(dev, &dev_attr_fb_update_rate); - if (error) { - dev_err(dev, "failed to create sysfs attributes\n"); - goto err_cleanup; - } - fb_deferred_io_init(info); - data->fb_info = info; - error = register_framebuffer(info); - if (error) { - dev_err(dev, "failed to register framebuffer\n"); - goto err_sysfs; - } - /* schedule first output of framebuffer */ - data->fb_force = 1; - schedule_delayed_work(&info->deferred_work, 0); - return 0; - -err_sysfs: - fb_deferred_io_cleanup(info); - device_remove_file(dev, &dev_attr_fb_update_rate); -err_cleanup: - data->fb_vbitmap = NULL; - data->fb_bitmap = NULL; - data->fb_bpp = 0; - data->fb_info = NULL; - -err_nomem: - framebuffer_release(info); - vfree(fb_bitmap); - kfree(fb_vbitmap); - return error; -} - -static void picolcd_exit_framebuffer(struct picolcd_data *data) -{ - struct fb_info *info = data->fb_info; - u8 *fb_vbitmap = data->fb_vbitmap; - - if (!info) - return; - - info->par = NULL; - device_remove_file(&data->hdev->dev, &dev_attr_fb_update_rate); - unregister_framebuffer(info); - data->fb_vbitmap = NULL; - data->fb_bitmap = NULL; - data->fb_bpp = 0; - data->fb_info = NULL; - kfree(fb_vbitmap); -} - -#define picolcd_fbinfo(d) ((d)->fb_info) -#else -static inline int picolcd_fb_reset(struct picolcd_data *data, int clear) -{ - return 0; -} -static inline int picolcd_init_framebuffer(struct picolcd_data *data) -{ - return 0; -} -static inline void picolcd_exit_framebuffer(struct picolcd_data *data) -{ -} -#define picolcd_fbinfo(d) NULL -#endif /* CONFIG_HID_PICOLCD_FB */ - -#ifdef CONFIG_HID_PICOLCD_BACKLIGHT -/* - * backlight class device - */ -static int picolcd_get_brightness(struct backlight_device *bdev) -{ - struct picolcd_data *data = bl_get_data(bdev); - return data->lcd_brightness; -} - -static int picolcd_set_brightness(struct backlight_device *bdev) -{ - struct picolcd_data *data = bl_get_data(bdev); - struct hid_report *report = picolcd_out_report(REPORT_BRIGHTNESS, data->hdev); - unsigned long flags; - - if (!report || report->maxfield != 1 || report->field[0]->report_count != 1) - return -ENODEV; - - data->lcd_brightness = bdev->props.brightness & 0x0ff; - data->lcd_power = bdev->props.power; - spin_lock_irqsave(&data->lock, flags); - hid_set_field(report->field[0], 0, data->lcd_power == FB_BLANK_UNBLANK ? data->lcd_brightness : 0); - usbhid_submit_report(data->hdev, report, USB_DIR_OUT); - spin_unlock_irqrestore(&data->lock, flags); - return 0; -} - -static int picolcd_check_bl_fb(struct backlight_device *bdev, struct fb_info *fb) -{ - return fb && fb == picolcd_fbinfo((struct picolcd_data *)bl_get_data(bdev)); -} - -static const struct backlight_ops picolcd_blops = { - .update_status = picolcd_set_brightness, - .get_brightness = picolcd_get_brightness, - .check_fb = picolcd_check_bl_fb, -}; - -static int picolcd_init_backlight(struct picolcd_data *data, struct hid_report *report) -{ - struct device *dev = &data->hdev->dev; - struct backlight_device *bdev; - struct backlight_properties props; - if (!report) - return -ENODEV; - if (report->maxfield != 1 || report->field[0]->report_count != 1 || - report->field[0]->report_size != 8) { - dev_err(dev, "unsupported BRIGHTNESS report"); - return -EINVAL; - } - - memset(&props, 0, sizeof(props)); - props.type = BACKLIGHT_RAW; - props.max_brightness = 0xff; - bdev = backlight_device_register(dev_name(dev), dev, data, - &picolcd_blops, &props); - if (IS_ERR(bdev)) { - dev_err(dev, "failed to register backlight\n"); - return PTR_ERR(bdev); - } - bdev->props.brightness = 0xff; - data->lcd_brightness = 0xff; - data->backlight = bdev; - picolcd_set_brightness(bdev); - return 0; -} - -static void picolcd_exit_backlight(struct picolcd_data *data) -{ - struct backlight_device *bdev = data->backlight; - - data->backlight = NULL; - if (bdev) - backlight_device_unregister(bdev); -} - -static inline int picolcd_resume_backlight(struct picolcd_data *data) -{ - if (!data->backlight) - return 0; - return picolcd_set_brightness(data->backlight); -} - -#ifdef CONFIG_PM -static void picolcd_suspend_backlight(struct picolcd_data *data) -{ - int bl_power = data->lcd_power; - if (!data->backlight) - return; - - data->backlight->props.power = FB_BLANK_POWERDOWN; - picolcd_set_brightness(data->backlight); - data->lcd_power = data->backlight->props.power = bl_power; -} -#endif /* CONFIG_PM */ -#else -static inline int picolcd_init_backlight(struct picolcd_data *data, - struct hid_report *report) -{ - return 0; -} -static inline void picolcd_exit_backlight(struct picolcd_data *data) -{ -} -static inline int picolcd_resume_backlight(struct picolcd_data *data) -{ - return 0; -} -static inline void picolcd_suspend_backlight(struct picolcd_data *data) -{ -} -#endif /* CONFIG_HID_PICOLCD_BACKLIGHT */ - -#ifdef CONFIG_HID_PICOLCD_LCD -/* - * lcd class device - */ -static int picolcd_get_contrast(struct lcd_device *ldev) -{ - struct picolcd_data *data = lcd_get_data(ldev); - return data->lcd_contrast; -} - -static int picolcd_set_contrast(struct lcd_device *ldev, int contrast) -{ - struct picolcd_data *data = lcd_get_data(ldev); - struct hid_report *report = picolcd_out_report(REPORT_CONTRAST, data->hdev); - unsigned long flags; - - if (!report || report->maxfield != 1 || report->field[0]->report_count != 1) - return -ENODEV; - - data->lcd_contrast = contrast & 0x0ff; - spin_lock_irqsave(&data->lock, flags); - hid_set_field(report->field[0], 0, data->lcd_contrast); - usbhid_submit_report(data->hdev, report, USB_DIR_OUT); - spin_unlock_irqrestore(&data->lock, flags); - return 0; -} - -static int picolcd_check_lcd_fb(struct lcd_device *ldev, struct fb_info *fb) -{ - return fb && fb == picolcd_fbinfo((struct picolcd_data *)lcd_get_data(ldev)); -} - -static struct lcd_ops picolcd_lcdops = { - .get_contrast = picolcd_get_contrast, - .set_contrast = picolcd_set_contrast, - .check_fb = picolcd_check_lcd_fb, -}; - -static int picolcd_init_lcd(struct picolcd_data *data, struct hid_report *report) -{ - struct device *dev = &data->hdev->dev; - struct lcd_device *ldev; - - if (!report) - return -ENODEV; - if (report->maxfield != 1 || report->field[0]->report_count != 1 || - report->field[0]->report_size != 8) { - dev_err(dev, "unsupported CONTRAST report"); - return -EINVAL; - } - - ldev = lcd_device_register(dev_name(dev), dev, data, &picolcd_lcdops); - if (IS_ERR(ldev)) { - dev_err(dev, "failed to register LCD\n"); - return PTR_ERR(ldev); - } - ldev->props.max_contrast = 0x0ff; - data->lcd_contrast = 0xe5; - data->lcd = ldev; - picolcd_set_contrast(ldev, 0xe5); - return 0; -} - -static void picolcd_exit_lcd(struct picolcd_data *data) -{ - struct lcd_device *ldev = data->lcd; - - data->lcd = NULL; - if (ldev) - lcd_device_unregister(ldev); -} - -static inline int picolcd_resume_lcd(struct picolcd_data *data) -{ - if (!data->lcd) - return 0; - return picolcd_set_contrast(data->lcd, data->lcd_contrast); -} -#else -static inline int picolcd_init_lcd(struct picolcd_data *data, - struct hid_report *report) -{ - return 0; -} -static inline void picolcd_exit_lcd(struct picolcd_data *data) -{ -} -static inline int picolcd_resume_lcd(struct picolcd_data *data) -{ - return 0; -} -#endif /* CONFIG_HID_PICOLCD_LCD */ - -#ifdef CONFIG_HID_PICOLCD_LEDS -/** - * LED class device - */ -static void picolcd_leds_set(struct picolcd_data *data) -{ - struct hid_report *report; - unsigned long flags; - - if (!data->led[0]) - return; - report = picolcd_out_report(REPORT_LED_STATE, data->hdev); - if (!report || report->maxfield != 1 || report->field[0]->report_count != 1) - return; - - spin_lock_irqsave(&data->lock, flags); - hid_set_field(report->field[0], 0, data->led_state); - usbhid_submit_report(data->hdev, report, USB_DIR_OUT); - spin_unlock_irqrestore(&data->lock, flags); -} - -static void picolcd_led_set_brightness(struct led_classdev *led_cdev, - enum led_brightness value) -{ - struct device *dev; - struct hid_device *hdev; - struct picolcd_data *data; - int i, state = 0; - - dev = led_cdev->dev->parent; - hdev = container_of(dev, struct hid_device, dev); - data = hid_get_drvdata(hdev); - for (i = 0; i < 8; i++) { - if (led_cdev != data->led[i]) - continue; - state = (data->led_state >> i) & 1; - if (value == LED_OFF && state) { - data->led_state &= ~(1 << i); - picolcd_leds_set(data); - } else if (value != LED_OFF && !state) { - data->led_state |= 1 << i; - picolcd_leds_set(data); - } - break; - } -} - -static enum led_brightness picolcd_led_get_brightness(struct led_classdev *led_cdev) -{ - struct device *dev; - struct hid_device *hdev; - struct picolcd_data *data; - int i, value = 0; - - dev = led_cdev->dev->parent; - hdev = container_of(dev, struct hid_device, dev); - data = hid_get_drvdata(hdev); - for (i = 0; i < 8; i++) - if (led_cdev == data->led[i]) { - value = (data->led_state >> i) & 1; - break; - } - return value ? LED_FULL : LED_OFF; -} - -static int picolcd_init_leds(struct picolcd_data *data, struct hid_report *report) -{ - struct device *dev = &data->hdev->dev; - struct led_classdev *led; - size_t name_sz = strlen(dev_name(dev)) + 8; - char *name; - int i, ret = 0; - - if (!report) - return -ENODEV; - if (report->maxfield != 1 || report->field[0]->report_count != 1 || - report->field[0]->report_size != 8) { - dev_err(dev, "unsupported LED_STATE report"); - return -EINVAL; - } - - for (i = 0; i < 8; i++) { - led = kzalloc(sizeof(struct led_classdev)+name_sz, GFP_KERNEL); - if (!led) { - dev_err(dev, "can't allocate memory for LED %d\n", i); - ret = -ENOMEM; - goto err; - } - name = (void *)(&led[1]); - snprintf(name, name_sz, "%s::GPO%d", dev_name(dev), i); - led->name = name; - led->brightness = 0; - led->max_brightness = 1; - led->brightness_get = picolcd_led_get_brightness; - led->brightness_set = picolcd_led_set_brightness; - - data->led[i] = led; - ret = led_classdev_register(dev, data->led[i]); - if (ret) { - data->led[i] = NULL; - kfree(led); - dev_err(dev, "can't register LED %d\n", i); - goto err; - } - } - return 0; -err: - for (i = 0; i < 8; i++) - if (data->led[i]) { - led = data->led[i]; - data->led[i] = NULL; - led_classdev_unregister(led); - kfree(led); - } - return ret; -} - -static void picolcd_exit_leds(struct picolcd_data *data) -{ - struct led_classdev *led; - int i; - - for (i = 0; i < 8; i++) { - led = data->led[i]; - data->led[i] = NULL; - if (!led) - continue; - led_classdev_unregister(led); - kfree(led); - } -} - -#else -static inline int picolcd_init_leds(struct picolcd_data *data, - struct hid_report *report) -{ - return 0; -} -static inline void picolcd_exit_leds(struct picolcd_data *data) -{ -} -static inline int picolcd_leds_set(struct picolcd_data *data) -{ - return 0; -} -#endif /* CONFIG_HID_PICOLCD_LEDS */ - -/* - * input class device - */ -static int picolcd_raw_keypad(struct picolcd_data *data, - struct hid_report *report, u8 *raw_data, int size) -{ - /* - * Keypad event - * First and second data bytes list currently pressed keys, - * 0x00 means no key and at most 2 keys may be pressed at same time - */ - int i, j; - - /* determine newly pressed keys */ - for (i = 0; i < size; i++) { - unsigned int key_code; - if (raw_data[i] == 0) - continue; - for (j = 0; j < sizeof(data->pressed_keys); j++) - if (data->pressed_keys[j] == raw_data[i]) - goto key_already_down; - for (j = 0; j < sizeof(data->pressed_keys); j++) - if (data->pressed_keys[j] == 0) { - data->pressed_keys[j] = raw_data[i]; - break; - } - input_event(data->input_keys, EV_MSC, MSC_SCAN, raw_data[i]); - if (raw_data[i] < PICOLCD_KEYS) - key_code = data->keycode[raw_data[i]]; - else - key_code = KEY_UNKNOWN; - if (key_code != KEY_UNKNOWN) { - dbg_hid(PICOLCD_NAME " got key press for %u:%d", - raw_data[i], key_code); - input_report_key(data->input_keys, key_code, 1); - } - input_sync(data->input_keys); -key_already_down: - continue; - } - - /* determine newly released keys */ - for (j = 0; j < sizeof(data->pressed_keys); j++) { - unsigned int key_code; - if (data->pressed_keys[j] == 0) - continue; - for (i = 0; i < size; i++) - if (data->pressed_keys[j] == raw_data[i]) - goto key_still_down; - input_event(data->input_keys, EV_MSC, MSC_SCAN, data->pressed_keys[j]); - if (data->pressed_keys[j] < PICOLCD_KEYS) - key_code = data->keycode[data->pressed_keys[j]]; - else - key_code = KEY_UNKNOWN; - if (key_code != KEY_UNKNOWN) { - dbg_hid(PICOLCD_NAME " got key release for %u:%d", - data->pressed_keys[j], key_code); - input_report_key(data->input_keys, key_code, 0); - } - input_sync(data->input_keys); - data->pressed_keys[j] = 0; -key_still_down: - continue; - } - return 1; -} - -static int picolcd_raw_cir(struct picolcd_data *data, - struct hid_report *report, u8 *raw_data, int size) -{ - /* Need understanding of CIR data format to implement ... */ - return 1; -} - -static int picolcd_check_version(struct hid_device *hdev) -{ - struct picolcd_data *data = hid_get_drvdata(hdev); - struct picolcd_pending *verinfo; - int ret = 0; - - if (!data) - return -ENODEV; - - verinfo = picolcd_send_and_wait(hdev, REPORT_VERSION, NULL, 0); - if (!verinfo) { - hid_err(hdev, "no version response from PicoLCD\n"); - return -ENODEV; - } - - if (verinfo->raw_size == 2) { - data->version[0] = verinfo->raw_data[1]; - data->version[1] = verinfo->raw_data[0]; - if (data->status & PICOLCD_BOOTLOADER) { - hid_info(hdev, "PicoLCD, bootloader version %d.%d\n", - verinfo->raw_data[1], verinfo->raw_data[0]); - } else { - hid_info(hdev, "PicoLCD, firmware version %d.%d\n", - verinfo->raw_data[1], verinfo->raw_data[0]); - } - } else { - hid_err(hdev, "confused, got unexpected version response from PicoLCD\n"); - ret = -EINVAL; - } - kfree(verinfo); - return ret; -} - -/* - * Reset our device and wait for answer to VERSION request - */ -static int picolcd_reset(struct hid_device *hdev) -{ - struct picolcd_data *data = hid_get_drvdata(hdev); - struct hid_report *report = picolcd_out_report(REPORT_RESET, hdev); - unsigned long flags; - int error; - - if (!data || !report || report->maxfield != 1) - return -ENODEV; - - spin_lock_irqsave(&data->lock, flags); - if (hdev->product == USB_DEVICE_ID_PICOLCD_BOOTLOADER) - data->status |= PICOLCD_BOOTLOADER; - - /* perform the reset */ - hid_set_field(report->field[0], 0, 1); - usbhid_submit_report(hdev, report, USB_DIR_OUT); - spin_unlock_irqrestore(&data->lock, flags); - - error = picolcd_check_version(hdev); - if (error) - return error; - - picolcd_resume_lcd(data); - picolcd_resume_backlight(data); -#ifdef CONFIG_HID_PICOLCD_FB - if (data->fb_info) - schedule_delayed_work(&data->fb_info->deferred_work, 0); -#endif /* CONFIG_HID_PICOLCD_FB */ - - picolcd_leds_set(data); - return 0; -} - -/* - * The "operation_mode" sysfs attribute - */ -static ssize_t picolcd_operation_mode_show(struct device *dev, - struct device_attribute *attr, char *buf) -{ - struct picolcd_data *data = dev_get_drvdata(dev); - - if (data->status & PICOLCD_BOOTLOADER) - return snprintf(buf, PAGE_SIZE, "[bootloader] lcd\n"); - else - return snprintf(buf, PAGE_SIZE, "bootloader [lcd]\n"); -} - -static ssize_t picolcd_operation_mode_store(struct device *dev, - struct device_attribute *attr, const char *buf, size_t count) -{ - struct picolcd_data *data = dev_get_drvdata(dev); - struct hid_report *report = NULL; - size_t cnt = count; - int timeout = data->opmode_delay; - unsigned long flags; - - if (cnt >= 3 && strncmp("lcd", buf, 3) == 0) { - if (data->status & PICOLCD_BOOTLOADER) - report = picolcd_out_report(REPORT_EXIT_FLASHER, data->hdev); - buf += 3; - cnt -= 3; - } else if (cnt >= 10 && strncmp("bootloader", buf, 10) == 0) { - if (!(data->status & PICOLCD_BOOTLOADER)) - report = picolcd_out_report(REPORT_EXIT_KEYBOARD, data->hdev); - buf += 10; - cnt -= 10; - } - if (!report) - return -EINVAL; - - while (cnt > 0 && (buf[cnt-1] == '\n' || buf[cnt-1] == '\r')) - cnt--; - if (cnt != 0) - return -EINVAL; - - spin_lock_irqsave(&data->lock, flags); - hid_set_field(report->field[0], 0, timeout & 0xff); - hid_set_field(report->field[0], 1, (timeout >> 8) & 0xff); - usbhid_submit_report(data->hdev, report, USB_DIR_OUT); - spin_unlock_irqrestore(&data->lock, flags); - return count; -} - -static DEVICE_ATTR(operation_mode, 0644, picolcd_operation_mode_show, - picolcd_operation_mode_store); - -/* - * The "operation_mode_delay" sysfs attribute - */ -static ssize_t picolcd_operation_mode_delay_show(struct device *dev, - struct device_attribute *attr, char *buf) -{ - struct picolcd_data *data = dev_get_drvdata(dev); - - return snprintf(buf, PAGE_SIZE, "%hu\n", data->opmode_delay); -} - -static ssize_t picolcd_operation_mode_delay_store(struct device *dev, - struct device_attribute *attr, const char *buf, size_t count) -{ - struct picolcd_data *data = dev_get_drvdata(dev); - unsigned u; - if (sscanf(buf, "%u", &u) != 1) - return -EINVAL; - if (u > 30000) - return -EINVAL; - else - data->opmode_delay = u; - return count; -} - -static DEVICE_ATTR(operation_mode_delay, 0644, picolcd_operation_mode_delay_show, - picolcd_operation_mode_delay_store); - - -#ifdef CONFIG_DEBUG_FS -/* - * The "reset" file - */ -static int picolcd_debug_reset_show(struct seq_file *f, void *p) -{ - if (picolcd_fbinfo((struct picolcd_data *)f->private)) - seq_printf(f, "all fb\n"); - else - seq_printf(f, "all\n"); - return 0; -} - -static int picolcd_debug_reset_open(struct inode *inode, struct file *f) -{ - return single_open(f, picolcd_debug_reset_show, inode->i_private); -} - -static ssize_t picolcd_debug_reset_write(struct file *f, const char __user *user_buf, - size_t count, loff_t *ppos) -{ - struct picolcd_data *data = ((struct seq_file *)f->private_data)->private; - char buf[32]; - size_t cnt = min(count, sizeof(buf)-1); - if (copy_from_user(buf, user_buf, cnt)) - return -EFAULT; - - while (cnt > 0 && (buf[cnt-1] == ' ' || buf[cnt-1] == '\n')) - cnt--; - buf[cnt] = '\0'; - if (strcmp(buf, "all") == 0) { - picolcd_reset(data->hdev); - picolcd_fb_reset(data, 1); - } else if (strcmp(buf, "fb") == 0) { - picolcd_fb_reset(data, 1); - } else { - return -EINVAL; - } - return count; -} - -static const struct file_operations picolcd_debug_reset_fops = { - .owner = THIS_MODULE, - .open = picolcd_debug_reset_open, - .read = seq_read, - .llseek = seq_lseek, - .write = picolcd_debug_reset_write, - .release = single_release, -}; - -/* - * The "eeprom" file - */ -static ssize_t picolcd_debug_eeprom_read(struct file *f, char __user *u, - size_t s, loff_t *off) -{ - struct picolcd_data *data = f->private_data; - struct picolcd_pending *resp; - u8 raw_data[3]; - ssize_t ret = -EIO; - - if (s == 0) - return -EINVAL; - if (*off > 0x0ff) - return 0; - - /* prepare buffer with info about what we want to read (addr & len) */ - raw_data[0] = *off & 0xff; - raw_data[1] = (*off >> 8) & 0xff; - raw_data[2] = s < 20 ? s : 20; - if (*off + raw_data[2] > 0xff) - raw_data[2] = 0x100 - *off; - resp = picolcd_send_and_wait(data->hdev, REPORT_EE_READ, raw_data, - sizeof(raw_data)); - if (!resp) - return -EIO; - - if (resp->in_report && resp->in_report->id == REPORT_EE_DATA) { - /* successful read :) */ - ret = resp->raw_data[2]; - if (ret > s) - ret = s; - if (copy_to_user(u, resp->raw_data+3, ret)) - ret = -EFAULT; - else - *off += ret; - } /* anything else is some kind of IO error */ - - kfree(resp); - return ret; -} - -static ssize_t picolcd_debug_eeprom_write(struct file *f, const char __user *u, - size_t s, loff_t *off) -{ - struct picolcd_data *data = f->private_data; - struct picolcd_pending *resp; - ssize_t ret = -EIO; - u8 raw_data[23]; - - if (s == 0) - return -EINVAL; - if (*off > 0x0ff) - return -ENOSPC; - - memset(raw_data, 0, sizeof(raw_data)); - raw_data[0] = *off & 0xff; - raw_data[1] = (*off >> 8) & 0xff; - raw_data[2] = min((size_t)20, s); - if (*off + raw_data[2] > 0xff) - raw_data[2] = 0x100 - *off; - - if (copy_from_user(raw_data+3, u, min((u8)20, raw_data[2]))) - return -EFAULT; - resp = picolcd_send_and_wait(data->hdev, REPORT_EE_WRITE, raw_data, - sizeof(raw_data)); - - if (!resp) - return -EIO; - - if (resp->in_report && resp->in_report->id == REPORT_EE_DATA) { - /* check if written data matches */ - if (memcmp(raw_data, resp->raw_data, 3+raw_data[2]) == 0) { - *off += raw_data[2]; - ret = raw_data[2]; - } - } - kfree(resp); - return ret; -} - -/* - * Notes: - * - read/write happens in chunks of at most 20 bytes, it's up to userspace - * to loop in order to get more data. - * - on write errors on otherwise correct write request the bytes - * that should have been written are in undefined state. - */ -static const struct file_operations picolcd_debug_eeprom_fops = { - .owner = THIS_MODULE, - .open = simple_open, - .read = picolcd_debug_eeprom_read, - .write = picolcd_debug_eeprom_write, - .llseek = generic_file_llseek, -}; - -/* - * The "flash" file - */ -/* record a flash address to buf (bounds check to be done by caller) */ -static int _picolcd_flash_setaddr(struct picolcd_data *data, u8 *buf, long off) -{ - buf[0] = off & 0xff; - buf[1] = (off >> 8) & 0xff; - if (data->addr_sz == 3) - buf[2] = (off >> 16) & 0xff; - return data->addr_sz == 2 ? 2 : 3; -} - -/* read a given size of data (bounds check to be done by caller) */ -static ssize_t _picolcd_flash_read(struct picolcd_data *data, int report_id, - char __user *u, size_t s, loff_t *off) -{ - struct picolcd_pending *resp; - u8 raw_data[4]; - ssize_t ret = 0; - int len_off, err = -EIO; - - while (s > 0) { - err = -EIO; - len_off = _picolcd_flash_setaddr(data, raw_data, *off); - raw_data[len_off] = s > 32 ? 32 : s; - resp = picolcd_send_and_wait(data->hdev, report_id, raw_data, len_off+1); - if (!resp || !resp->in_report) - goto skip; - if (resp->in_report->id == REPORT_MEMORY || - resp->in_report->id == REPORT_BL_READ_MEMORY) { - if (memcmp(raw_data, resp->raw_data, len_off+1) != 0) - goto skip; - if (copy_to_user(u+ret, resp->raw_data+len_off+1, raw_data[len_off])) { - err = -EFAULT; - goto skip; - } - *off += raw_data[len_off]; - s -= raw_data[len_off]; - ret += raw_data[len_off]; - err = 0; - } -skip: - kfree(resp); - if (err) - return ret > 0 ? ret : err; - } - return ret; -} - -static ssize_t picolcd_debug_flash_read(struct file *f, char __user *u, - size_t s, loff_t *off) -{ - struct picolcd_data *data = f->private_data; - - if (s == 0) - return -EINVAL; - if (*off > 0x05fff) - return 0; - if (*off + s > 0x05fff) - s = 0x06000 - *off; - - if (data->status & PICOLCD_BOOTLOADER) - return _picolcd_flash_read(data, REPORT_BL_READ_MEMORY, u, s, off); - else - return _picolcd_flash_read(data, REPORT_READ_MEMORY, u, s, off); -} - -/* erase block aligned to 64bytes boundary */ -static ssize_t _picolcd_flash_erase64(struct picolcd_data *data, int report_id, - loff_t *off) -{ - struct picolcd_pending *resp; - u8 raw_data[3]; - int len_off; - ssize_t ret = -EIO; - - if (*off & 0x3f) - return -EINVAL; - - len_off = _picolcd_flash_setaddr(data, raw_data, *off); - resp = picolcd_send_and_wait(data->hdev, report_id, raw_data, len_off); - if (!resp || !resp->in_report) - goto skip; - if (resp->in_report->id == REPORT_MEMORY || - resp->in_report->id == REPORT_BL_ERASE_MEMORY) { - if (memcmp(raw_data, resp->raw_data, len_off) != 0) - goto skip; - ret = 0; - } -skip: - kfree(resp); - return ret; -} - -/* write a given size of data (bounds check to be done by caller) */ -static ssize_t _picolcd_flash_write(struct picolcd_data *data, int report_id, - const char __user *u, size_t s, loff_t *off) -{ - struct picolcd_pending *resp; - u8 raw_data[36]; - ssize_t ret = 0; - int len_off, err = -EIO; - - while (s > 0) { - err = -EIO; - len_off = _picolcd_flash_setaddr(data, raw_data, *off); - raw_data[len_off] = s > 32 ? 32 : s; - if (copy_from_user(raw_data+len_off+1, u, raw_data[len_off])) { - err = -EFAULT; - break; - } - resp = picolcd_send_and_wait(data->hdev, report_id, raw_data, - len_off+1+raw_data[len_off]); - if (!resp || !resp->in_report) - goto skip; - if (resp->in_report->id == REPORT_MEMORY || - resp->in_report->id == REPORT_BL_WRITE_MEMORY) { - if (memcmp(raw_data, resp->raw_data, len_off+1+raw_data[len_off]) != 0) - goto skip; - *off += raw_data[len_off]; - s -= raw_data[len_off]; - ret += raw_data[len_off]; - err = 0; - } -skip: - kfree(resp); - if (err) - break; - } - return ret > 0 ? ret : err; -} - -static ssize_t picolcd_debug_flash_write(struct file *f, const char __user *u, - size_t s, loff_t *off) -{ - struct picolcd_data *data = f->private_data; - ssize_t err, ret = 0; - int report_erase, report_write; - - if (s == 0) - return -EINVAL; - if (*off > 0x5fff) - return -ENOSPC; - if (s & 0x3f) - return -EINVAL; - if (*off & 0x3f) - return -EINVAL; - - if (data->status & PICOLCD_BOOTLOADER) { - report_erase = REPORT_BL_ERASE_MEMORY; - report_write = REPORT_BL_WRITE_MEMORY; - } else { - report_erase = REPORT_ERASE_MEMORY; - report_write = REPORT_WRITE_MEMORY; - } - mutex_lock(&data->mutex_flash); - while (s > 0) { - err = _picolcd_flash_erase64(data, report_erase, off); - if (err) - break; - err = _picolcd_flash_write(data, report_write, u, 64, off); - if (err < 0) - break; - ret += err; - *off += err; - s -= err; - if (err != 64) - break; - } - mutex_unlock(&data->mutex_flash); - return ret > 0 ? ret : err; -} - -/* - * Notes: - * - concurrent writing is prevented by mutex and all writes must be - * n*64 bytes and 64-byte aligned, each write being preceded by an - * ERASE which erases a 64byte block. - * If less than requested was written or an error is returned for an - * otherwise correct write request the next 64-byte block which should - * have been written is in undefined state (mostly: original, erased, - * (half-)written with write error) - * - reading can happen without special restriction - */ -static const struct file_operations picolcd_debug_flash_fops = { - .owner = THIS_MODULE, - .open = simple_open, - .read = picolcd_debug_flash_read, - .write = picolcd_debug_flash_write, - .llseek = generic_file_llseek, -}; - - -/* - * Helper code for HID report level dumping/debugging - */ -static const char *error_codes[] = { - "success", "parameter missing", "data_missing", "block readonly", - "block not erasable", "block too big", "section overflow", - "invalid command length", "invalid data length", -}; - -static void dump_buff_as_hex(char *dst, size_t dst_sz, const u8 *data, - const size_t data_len) -{ - int i, j; - for (i = j = 0; i < data_len && j + 3 < dst_sz; i++) { - dst[j++] = hex_asc[(data[i] >> 4) & 0x0f]; - dst[j++] = hex_asc[data[i] & 0x0f]; - dst[j++] = ' '; - } - if (j < dst_sz) { - dst[j--] = '\0'; - dst[j] = '\n'; - } else - dst[j] = '\0'; -} - -static void picolcd_debug_out_report(struct picolcd_data *data, - struct hid_device *hdev, struct hid_report *report) -{ - u8 raw_data[70]; - int raw_size = (report->size >> 3) + 1; - char *buff; -#define BUFF_SZ 256 - - /* Avoid unnecessary overhead if debugfs is disabled */ - if (list_empty(&hdev->debug_list)) - return; - - buff = kmalloc(BUFF_SZ, GFP_ATOMIC); - if (!buff) - return; - - snprintf(buff, BUFF_SZ, "\nout report %d (size %d) = ", - report->id, raw_size); - hid_debug_event(hdev, buff); - if (raw_size + 5 > sizeof(raw_data)) { - kfree(buff); - hid_debug_event(hdev, " TOO BIG\n"); - return; - } else { - raw_data[0] = report->id; - hid_output_report(report, raw_data); - dump_buff_as_hex(buff, BUFF_SZ, raw_data, raw_size); - hid_debug_event(hdev, buff); - } - - switch (report->id) { - case REPORT_LED_STATE: - /* 1 data byte with GPO state */ - snprintf(buff, BUFF_SZ, "out report %s (%d, size=%d)\n", - "REPORT_LED_STATE", report->id, raw_size-1); - hid_debug_event(hdev, buff); - snprintf(buff, BUFF_SZ, "\tGPO state: 0x%02x\n", raw_data[1]); - hid_debug_event(hdev, buff); - break; - case REPORT_BRIGHTNESS: - /* 1 data byte with brightness */ - snprintf(buff, BUFF_SZ, "out report %s (%d, size=%d)\n", - "REPORT_BRIGHTNESS", report->id, raw_size-1); - hid_debug_event(hdev, buff); - snprintf(buff, BUFF_SZ, "\tBrightness: 0x%02x\n", raw_data[1]); - hid_debug_event(hdev, buff); - break; - case REPORT_CONTRAST: - /* 1 data byte with contrast */ - snprintf(buff, BUFF_SZ, "out report %s (%d, size=%d)\n", - "REPORT_CONTRAST", report->id, raw_size-1); - hid_debug_event(hdev, buff); - snprintf(buff, BUFF_SZ, "\tContrast: 0x%02x\n", raw_data[1]); - hid_debug_event(hdev, buff); - break; - case REPORT_RESET: - /* 2 data bytes with reset duration in ms */ - snprintf(buff, BUFF_SZ, "out report %s (%d, size=%d)\n", - "REPORT_RESET", report->id, raw_size-1); - hid_debug_event(hdev, buff); - snprintf(buff, BUFF_SZ, "\tDuration: 0x%02x%02x (%dms)\n", - raw_data[2], raw_data[1], raw_data[2] << 8 | raw_data[1]); - hid_debug_event(hdev, buff); - break; - case REPORT_LCD_CMD: - /* 63 data bytes with LCD commands */ - snprintf(buff, BUFF_SZ, "out report %s (%d, size=%d)\n", - "REPORT_LCD_CMD", report->id, raw_size-1); - hid_debug_event(hdev, buff); - /* TODO: format decoding */ - break; - case REPORT_LCD_DATA: - /* 63 data bytes with LCD data */ - snprintf(buff, BUFF_SZ, "out report %s (%d, size=%d)\n", - "REPORT_LCD_CMD", report->id, raw_size-1); - /* TODO: format decoding */ - hid_debug_event(hdev, buff); - break; - case REPORT_LCD_CMD_DATA: - /* 63 data bytes with LCD commands and data */ - snprintf(buff, BUFF_SZ, "out report %s (%d, size=%d)\n", - "REPORT_LCD_CMD", report->id, raw_size-1); - /* TODO: format decoding */ - hid_debug_event(hdev, buff); - break; - case REPORT_EE_READ: - /* 3 data bytes with read area description */ - snprintf(buff, BUFF_SZ, "out report %s (%d, size=%d)\n", - "REPORT_EE_READ", report->id, raw_size-1); - hid_debug_event(hdev, buff); - snprintf(buff, BUFF_SZ, "\tData address: 0x%02x%02x\n", - raw_data[2], raw_data[1]); - hid_debug_event(hdev, buff); - snprintf(buff, BUFF_SZ, "\tData length: %d\n", raw_data[3]); - hid_debug_event(hdev, buff); - break; - case REPORT_EE_WRITE: - /* 3+1..20 data bytes with write area description */ - snprintf(buff, BUFF_SZ, "out report %s (%d, size=%d)\n", - "REPORT_EE_WRITE", report->id, raw_size-1); - hid_debug_event(hdev, buff); - snprintf(buff, BUFF_SZ, "\tData address: 0x%02x%02x\n", - raw_data[2], raw_data[1]); - hid_debug_event(hdev, buff); - snprintf(buff, BUFF_SZ, "\tData length: %d\n", raw_data[3]); - hid_debug_event(hdev, buff); - if (raw_data[3] == 0) { - snprintf(buff, BUFF_SZ, "\tNo data\n"); - } else if (raw_data[3] + 4 <= raw_size) { - snprintf(buff, BUFF_SZ, "\tData: "); - hid_debug_event(hdev, buff); - dump_buff_as_hex(buff, BUFF_SZ, raw_data+4, raw_data[3]); - } else { - snprintf(buff, BUFF_SZ, "\tData overflowed\n"); - } - hid_debug_event(hdev, buff); - break; - case REPORT_ERASE_MEMORY: - case REPORT_BL_ERASE_MEMORY: - /* 3 data bytes with pointer inside erase block */ - snprintf(buff, BUFF_SZ, "out report %s (%d, size=%d)\n", - "REPORT_ERASE_MEMORY", report->id, raw_size-1); - hid_debug_event(hdev, buff); - switch (data->addr_sz) { - case 2: - snprintf(buff, BUFF_SZ, "\tAddress inside 64 byte block: 0x%02x%02x\n", - raw_data[2], raw_data[1]); - break; - case 3: - snprintf(buff, BUFF_SZ, "\tAddress inside 64 byte block: 0x%02x%02x%02x\n", - raw_data[3], raw_data[2], raw_data[1]); - break; - default: - snprintf(buff, BUFF_SZ, "\tNot supported\n"); - } - hid_debug_event(hdev, buff); - break; - case REPORT_READ_MEMORY: - case REPORT_BL_READ_MEMORY: - /* 4 data bytes with read area description */ - snprintf(buff, BUFF_SZ, "out report %s (%d, size=%d)\n", - "REPORT_READ_MEMORY", report->id, raw_size-1); - hid_debug_event(hdev, buff); - switch (data->addr_sz) { - case 2: - snprintf(buff, BUFF_SZ, "\tData address: 0x%02x%02x\n", - raw_data[2], raw_data[1]); - hid_debug_event(hdev, buff); - snprintf(buff, BUFF_SZ, "\tData length: %d\n", raw_data[3]); - break; - case 3: - snprintf(buff, BUFF_SZ, "\tData address: 0x%02x%02x%02x\n", - raw_data[3], raw_data[2], raw_data[1]); - hid_debug_event(hdev, buff); - snprintf(buff, BUFF_SZ, "\tData length: %d\n", raw_data[4]); - break; - default: - snprintf(buff, BUFF_SZ, "\tNot supported\n"); - } - hid_debug_event(hdev, buff); - break; - case REPORT_WRITE_MEMORY: - case REPORT_BL_WRITE_MEMORY: - /* 4+1..32 data bytes with write adrea description */ - snprintf(buff, BUFF_SZ, "out report %s (%d, size=%d)\n", - "REPORT_WRITE_MEMORY", report->id, raw_size-1); - hid_debug_event(hdev, buff); - switch (data->addr_sz) { - case 2: - snprintf(buff, BUFF_SZ, "\tData address: 0x%02x%02x\n", - raw_data[2], raw_data[1]); - hid_debug_event(hdev, buff); - snprintf(buff, BUFF_SZ, "\tData length: %d\n", raw_data[3]); - hid_debug_event(hdev, buff); - if (raw_data[3] == 0) { - snprintf(buff, BUFF_SZ, "\tNo data\n"); - } else if (raw_data[3] + 4 <= raw_size) { - snprintf(buff, BUFF_SZ, "\tData: "); - hid_debug_event(hdev, buff); - dump_buff_as_hex(buff, BUFF_SZ, raw_data+4, raw_data[3]); - } else { - snprintf(buff, BUFF_SZ, "\tData overflowed\n"); - } - break; - case 3: - snprintf(buff, BUFF_SZ, "\tData address: 0x%02x%02x%02x\n", - raw_data[3], raw_data[2], raw_data[1]); - hid_debug_event(hdev, buff); - snprintf(buff, BUFF_SZ, "\tData length: %d\n", raw_data[4]); - hid_debug_event(hdev, buff); - if (raw_data[4] == 0) { - snprintf(buff, BUFF_SZ, "\tNo data\n"); - } else if (raw_data[4] + 5 <= raw_size) { - snprintf(buff, BUFF_SZ, "\tData: "); - hid_debug_event(hdev, buff); - dump_buff_as_hex(buff, BUFF_SZ, raw_data+5, raw_data[4]); - } else { - snprintf(buff, BUFF_SZ, "\tData overflowed\n"); - } - break; - default: - snprintf(buff, BUFF_SZ, "\tNot supported\n"); - } - hid_debug_event(hdev, buff); - break; - case REPORT_SPLASH_RESTART: - /* TODO */ - break; - case REPORT_EXIT_KEYBOARD: - snprintf(buff, BUFF_SZ, "out report %s (%d, size=%d)\n", - "REPORT_EXIT_KEYBOARD", report->id, raw_size-1); - hid_debug_event(hdev, buff); - snprintf(buff, BUFF_SZ, "\tRestart delay: %dms (0x%02x%02x)\n", - raw_data[1] | (raw_data[2] << 8), - raw_data[2], raw_data[1]); - hid_debug_event(hdev, buff); - break; - case REPORT_VERSION: - snprintf(buff, BUFF_SZ, "out report %s (%d, size=%d)\n", - "REPORT_VERSION", report->id, raw_size-1); - hid_debug_event(hdev, buff); - break; - case REPORT_DEVID: - snprintf(buff, BUFF_SZ, "out report %s (%d, size=%d)\n", - "REPORT_DEVID", report->id, raw_size-1); - hid_debug_event(hdev, buff); - break; - case REPORT_SPLASH_SIZE: - snprintf(buff, BUFF_SZ, "out report %s (%d, size=%d)\n", - "REPORT_SPLASH_SIZE", report->id, raw_size-1); - hid_debug_event(hdev, buff); - break; - case REPORT_HOOK_VERSION: - snprintf(buff, BUFF_SZ, "out report %s (%d, size=%d)\n", - "REPORT_HOOK_VERSION", report->id, raw_size-1); - hid_debug_event(hdev, buff); - break; - case REPORT_EXIT_FLASHER: - snprintf(buff, BUFF_SZ, "out report %s (%d, size=%d)\n", - "REPORT_VERSION", report->id, raw_size-1); - hid_debug_event(hdev, buff); - snprintf(buff, BUFF_SZ, "\tRestart delay: %dms (0x%02x%02x)\n", - raw_data[1] | (raw_data[2] << 8), - raw_data[2], raw_data[1]); - hid_debug_event(hdev, buff); - break; - default: - snprintf(buff, BUFF_SZ, "out report %s (%d, size=%d)\n", - "", report->id, raw_size-1); - hid_debug_event(hdev, buff); - break; - } - wake_up_interruptible(&hdev->debug_wait); - kfree(buff); -} - -static void picolcd_debug_raw_event(struct picolcd_data *data, - struct hid_device *hdev, struct hid_report *report, - u8 *raw_data, int size) -{ - char *buff; - -#define BUFF_SZ 256 - /* Avoid unnecessary overhead if debugfs is disabled */ - if (!hdev->debug_events) - return; - - buff = kmalloc(BUFF_SZ, GFP_ATOMIC); - if (!buff) - return; - - switch (report->id) { - case REPORT_ERROR_CODE: - /* 2 data bytes with affected report and error code */ - snprintf(buff, BUFF_SZ, "report %s (%d, size=%d)\n", - "REPORT_ERROR_CODE", report->id, size-1); - hid_debug_event(hdev, buff); - if (raw_data[2] < ARRAY_SIZE(error_codes)) - snprintf(buff, BUFF_SZ, "\tError code 0x%02x (%s) in reply to report 0x%02x\n", - raw_data[2], error_codes[raw_data[2]], raw_data[1]); - else - snprintf(buff, BUFF_SZ, "\tError code 0x%02x in reply to report 0x%02x\n", - raw_data[2], raw_data[1]); - hid_debug_event(hdev, buff); - break; - case REPORT_KEY_STATE: - /* 2 data bytes with key state */ - snprintf(buff, BUFF_SZ, "report %s (%d, size=%d)\n", - "REPORT_KEY_STATE", report->id, size-1); - hid_debug_event(hdev, buff); - if (raw_data[1] == 0) - snprintf(buff, BUFF_SZ, "\tNo key pressed\n"); - else if (raw_data[2] == 0) - snprintf(buff, BUFF_SZ, "\tOne key pressed: 0x%02x (%d)\n", - raw_data[1], raw_data[1]); - else - snprintf(buff, BUFF_SZ, "\tTwo keys pressed: 0x%02x (%d), 0x%02x (%d)\n", - raw_data[1], raw_data[1], raw_data[2], raw_data[2]); - hid_debug_event(hdev, buff); - break; - case REPORT_IR_DATA: - /* Up to 20 byes of IR scancode data */ - snprintf(buff, BUFF_SZ, "report %s (%d, size=%d)\n", - "REPORT_IR_DATA", report->id, size-1); - hid_debug_event(hdev, buff); - if (raw_data[1] == 0) { - snprintf(buff, BUFF_SZ, "\tUnexpectedly 0 data length\n"); - hid_debug_event(hdev, buff); - } else if (raw_data[1] + 1 <= size) { - snprintf(buff, BUFF_SZ, "\tData length: %d\n\tIR Data: ", - raw_data[1]-1); - hid_debug_event(hdev, buff); - dump_buff_as_hex(buff, BUFF_SZ, raw_data+2, raw_data[1]-1); - hid_debug_event(hdev, buff); - } else { - snprintf(buff, BUFF_SZ, "\tOverflowing data length: %d\n", - raw_data[1]-1); - hid_debug_event(hdev, buff); - } - break; - case REPORT_EE_DATA: - /* Data buffer in response to REPORT_EE_READ or REPORT_EE_WRITE */ - snprintf(buff, BUFF_SZ, "report %s (%d, size=%d)\n", - "REPORT_EE_DATA", report->id, size-1); - hid_debug_event(hdev, buff); - snprintf(buff, BUFF_SZ, "\tData address: 0x%02x%02x\n", - raw_data[2], raw_data[1]); - hid_debug_event(hdev, buff); - snprintf(buff, BUFF_SZ, "\tData length: %d\n", raw_data[3]); - hid_debug_event(hdev, buff); - if (raw_data[3] == 0) { - snprintf(buff, BUFF_SZ, "\tNo data\n"); - hid_debug_event(hdev, buff); - } else if (raw_data[3] + 4 <= size) { - snprintf(buff, BUFF_SZ, "\tData: "); - hid_debug_event(hdev, buff); - dump_buff_as_hex(buff, BUFF_SZ, raw_data+4, raw_data[3]); - hid_debug_event(hdev, buff); - } else { - snprintf(buff, BUFF_SZ, "\tData overflowed\n"); - hid_debug_event(hdev, buff); - } - break; - case REPORT_MEMORY: - /* Data buffer in response to REPORT_READ_MEMORY or REPORT_WRTIE_MEMORY */ - snprintf(buff, BUFF_SZ, "report %s (%d, size=%d)\n", - "REPORT_MEMORY", report->id, size-1); - hid_debug_event(hdev, buff); - switch (data->addr_sz) { - case 2: - snprintf(buff, BUFF_SZ, "\tData address: 0x%02x%02x\n", - raw_data[2], raw_data[1]); - hid_debug_event(hdev, buff); - snprintf(buff, BUFF_SZ, "\tData length: %d\n", raw_data[3]); - hid_debug_event(hdev, buff); - if (raw_data[3] == 0) { - snprintf(buff, BUFF_SZ, "\tNo data\n"); - } else if (raw_data[3] + 4 <= size) { - snprintf(buff, BUFF_SZ, "\tData: "); - hid_debug_event(hdev, buff); - dump_buff_as_hex(buff, BUFF_SZ, raw_data+4, raw_data[3]); - } else { - snprintf(buff, BUFF_SZ, "\tData overflowed\n"); - } - break; - case 3: - snprintf(buff, BUFF_SZ, "\tData address: 0x%02x%02x%02x\n", - raw_data[3], raw_data[2], raw_data[1]); - hid_debug_event(hdev, buff); - snprintf(buff, BUFF_SZ, "\tData length: %d\n", raw_data[4]); - hid_debug_event(hdev, buff); - if (raw_data[4] == 0) { - snprintf(buff, BUFF_SZ, "\tNo data\n"); - } else if (raw_data[4] + 5 <= size) { - snprintf(buff, BUFF_SZ, "\tData: "); - hid_debug_event(hdev, buff); - dump_buff_as_hex(buff, BUFF_SZ, raw_data+5, raw_data[4]); - } else { - snprintf(buff, BUFF_SZ, "\tData overflowed\n"); - } - break; - default: - snprintf(buff, BUFF_SZ, "\tNot supported\n"); - } - hid_debug_event(hdev, buff); - break; - case REPORT_VERSION: - snprintf(buff, BUFF_SZ, "report %s (%d, size=%d)\n", - "REPORT_VERSION", report->id, size-1); - hid_debug_event(hdev, buff); - snprintf(buff, BUFF_SZ, "\tFirmware version: %d.%d\n", - raw_data[2], raw_data[1]); - hid_debug_event(hdev, buff); - break; - case REPORT_BL_ERASE_MEMORY: - snprintf(buff, BUFF_SZ, "report %s (%d, size=%d)\n", - "REPORT_BL_ERASE_MEMORY", report->id, size-1); - hid_debug_event(hdev, buff); - /* TODO */ - break; - case REPORT_BL_READ_MEMORY: - snprintf(buff, BUFF_SZ, "report %s (%d, size=%d)\n", - "REPORT_BL_READ_MEMORY", report->id, size-1); - hid_debug_event(hdev, buff); - /* TODO */ - break; - case REPORT_BL_WRITE_MEMORY: - snprintf(buff, BUFF_SZ, "report %s (%d, size=%d)\n", - "REPORT_BL_WRITE_MEMORY", report->id, size-1); - hid_debug_event(hdev, buff); - /* TODO */ - break; - case REPORT_DEVID: - snprintf(buff, BUFF_SZ, "report %s (%d, size=%d)\n", - "REPORT_DEVID", report->id, size-1); - hid_debug_event(hdev, buff); - snprintf(buff, BUFF_SZ, "\tSerial: 0x%02x%02x%02x%02x\n", - raw_data[1], raw_data[2], raw_data[3], raw_data[4]); - hid_debug_event(hdev, buff); - snprintf(buff, BUFF_SZ, "\tType: 0x%02x\n", - raw_data[5]); - hid_debug_event(hdev, buff); - break; - case REPORT_SPLASH_SIZE: - snprintf(buff, BUFF_SZ, "report %s (%d, size=%d)\n", - "REPORT_SPLASH_SIZE", report->id, size-1); - hid_debug_event(hdev, buff); - snprintf(buff, BUFF_SZ, "\tTotal splash space: %d\n", - (raw_data[2] << 8) | raw_data[1]); - hid_debug_event(hdev, buff); - snprintf(buff, BUFF_SZ, "\tUsed splash space: %d\n", - (raw_data[4] << 8) | raw_data[3]); - hid_debug_event(hdev, buff); - break; - case REPORT_HOOK_VERSION: - snprintf(buff, BUFF_SZ, "report %s (%d, size=%d)\n", - "REPORT_HOOK_VERSION", report->id, size-1); - hid_debug_event(hdev, buff); - snprintf(buff, BUFF_SZ, "\tFirmware version: %d.%d\n", - raw_data[1], raw_data[2]); - hid_debug_event(hdev, buff); - break; - default: - snprintf(buff, BUFF_SZ, "report %s (%d, size=%d)\n", - "", report->id, size-1); - hid_debug_event(hdev, buff); - break; - } - wake_up_interruptible(&hdev->debug_wait); - kfree(buff); -} - -static void picolcd_init_devfs(struct picolcd_data *data, - struct hid_report *eeprom_r, struct hid_report *eeprom_w, - struct hid_report *flash_r, struct hid_report *flash_w, - struct hid_report *reset) -{ - struct hid_device *hdev = data->hdev; - - mutex_init(&data->mutex_flash); - - /* reset */ - if (reset) - data->debug_reset = debugfs_create_file("reset", 0600, - hdev->debug_dir, data, &picolcd_debug_reset_fops); - - /* eeprom */ - if (eeprom_r || eeprom_w) - data->debug_eeprom = debugfs_create_file("eeprom", - (eeprom_w ? S_IWUSR : 0) | (eeprom_r ? S_IRUSR : 0), - hdev->debug_dir, data, &picolcd_debug_eeprom_fops); - - /* flash */ - if (flash_r && flash_r->maxfield == 1 && flash_r->field[0]->report_size == 8) - data->addr_sz = flash_r->field[0]->report_count - 1; - else - data->addr_sz = -1; - if (data->addr_sz == 2 || data->addr_sz == 3) { - data->debug_flash = debugfs_create_file("flash", - (flash_w ? S_IWUSR : 0) | (flash_r ? S_IRUSR : 0), - hdev->debug_dir, data, &picolcd_debug_flash_fops); - } else if (flash_r || flash_w) - hid_warn(hdev, "Unexpected FLASH access reports, please submit rdesc for review\n"); -} - -static void picolcd_exit_devfs(struct picolcd_data *data) -{ - struct dentry *dent; - - dent = data->debug_reset; - data->debug_reset = NULL; - if (dent) - debugfs_remove(dent); - dent = data->debug_eeprom; - data->debug_eeprom = NULL; - if (dent) - debugfs_remove(dent); - dent = data->debug_flash; - data->debug_flash = NULL; - if (dent) - debugfs_remove(dent); - mutex_destroy(&data->mutex_flash); -} -#else -static inline void picolcd_debug_raw_event(struct picolcd_data *data, - struct hid_device *hdev, struct hid_report *report, - u8 *raw_data, int size) -{ -} -static inline void picolcd_init_devfs(struct picolcd_data *data, - struct hid_report *eeprom_r, struct hid_report *eeprom_w, - struct hid_report *flash_r, struct hid_report *flash_w, - struct hid_report *reset) -{ -} -static inline void picolcd_exit_devfs(struct picolcd_data *data) -{ -} -#endif /* CONFIG_DEBUG_FS */ - -/* - * Handle raw report as sent by device - */ -static int picolcd_raw_event(struct hid_device *hdev, - struct hid_report *report, u8 *raw_data, int size) -{ - struct picolcd_data *data = hid_get_drvdata(hdev); - unsigned long flags; - int ret = 0; - - if (!data) - return 1; - - if (report->id == REPORT_KEY_STATE) { - if (data->input_keys) - ret = picolcd_raw_keypad(data, report, raw_data+1, size-1); - } else if (report->id == REPORT_IR_DATA) { - if (data->input_cir) - ret = picolcd_raw_cir(data, report, raw_data+1, size-1); - } else { - spin_lock_irqsave(&data->lock, flags); - /* - * We let the caller of picolcd_send_and_wait() check if the - * report we got is one of the expected ones or not. - */ - if (data->pending) { - memcpy(data->pending->raw_data, raw_data+1, size-1); - data->pending->raw_size = size-1; - data->pending->in_report = report; - complete(&data->pending->ready); - } - spin_unlock_irqrestore(&data->lock, flags); - } - - picolcd_debug_raw_event(data, hdev, report, raw_data, size); - return 1; -} - -#ifdef CONFIG_PM -static int picolcd_suspend(struct hid_device *hdev, pm_message_t message) -{ - if (PMSG_IS_AUTO(message)) - return 0; - - picolcd_suspend_backlight(hid_get_drvdata(hdev)); - dbg_hid(PICOLCD_NAME " device ready for suspend\n"); - return 0; -} - -static int picolcd_resume(struct hid_device *hdev) -{ - int ret; - ret = picolcd_resume_backlight(hid_get_drvdata(hdev)); - if (ret) - dbg_hid(PICOLCD_NAME " restoring backlight failed: %d\n", ret); - return 0; -} - -static int picolcd_reset_resume(struct hid_device *hdev) -{ - int ret; - ret = picolcd_reset(hdev); - if (ret) - dbg_hid(PICOLCD_NAME " resetting our device failed: %d\n", ret); - ret = picolcd_fb_reset(hid_get_drvdata(hdev), 0); - if (ret) - dbg_hid(PICOLCD_NAME " restoring framebuffer content failed: %d\n", ret); - ret = picolcd_resume_lcd(hid_get_drvdata(hdev)); - if (ret) - dbg_hid(PICOLCD_NAME " restoring lcd failed: %d\n", ret); - ret = picolcd_resume_backlight(hid_get_drvdata(hdev)); - if (ret) - dbg_hid(PICOLCD_NAME " restoring backlight failed: %d\n", ret); - picolcd_leds_set(hid_get_drvdata(hdev)); - return 0; -} -#endif - -/* initialize keypad input device */ -static int picolcd_init_keys(struct picolcd_data *data, - struct hid_report *report) -{ - struct hid_device *hdev = data->hdev; - struct input_dev *idev; - int error, i; - - if (!report) - return -ENODEV; - if (report->maxfield != 1 || report->field[0]->report_count != 2 || - report->field[0]->report_size != 8) { - hid_err(hdev, "unsupported KEY_STATE report\n"); - return -EINVAL; - } - - idev = input_allocate_device(); - if (idev == NULL) { - hid_err(hdev, "failed to allocate input device\n"); - return -ENOMEM; - } - input_set_drvdata(idev, hdev); - memcpy(data->keycode, def_keymap, sizeof(def_keymap)); - idev->name = hdev->name; - idev->phys = hdev->phys; - idev->uniq = hdev->uniq; - idev->id.bustype = hdev->bus; - idev->id.vendor = hdev->vendor; - idev->id.product = hdev->product; - idev->id.version = hdev->version; - idev->dev.parent = hdev->dev.parent; - idev->keycode = &data->keycode; - idev->keycodemax = PICOLCD_KEYS; - idev->keycodesize = sizeof(data->keycode[0]); - input_set_capability(idev, EV_MSC, MSC_SCAN); - set_bit(EV_REP, idev->evbit); - for (i = 0; i < PICOLCD_KEYS; i++) - input_set_capability(idev, EV_KEY, data->keycode[i]); - error = input_register_device(idev); - if (error) { - hid_err(hdev, "error registering the input device\n"); - input_free_device(idev); - return error; - } - data->input_keys = idev; - return 0; -} - -static void picolcd_exit_keys(struct picolcd_data *data) -{ - struct input_dev *idev = data->input_keys; - - data->input_keys = NULL; - if (idev) - input_unregister_device(idev); -} - -/* initialize CIR input device */ -static inline int picolcd_init_cir(struct picolcd_data *data, struct hid_report *report) -{ - /* support not implemented yet */ - return 0; -} - -static inline void picolcd_exit_cir(struct picolcd_data *data) -{ -} - -static int picolcd_probe_lcd(struct hid_device *hdev, struct picolcd_data *data) -{ - int error; - - error = picolcd_check_version(hdev); - if (error) - return error; - - if (data->version[0] != 0 && data->version[1] != 3) - hid_info(hdev, "Device with untested firmware revision, please submit /sys/kernel/debug/hid/%s/rdesc for this device.\n", - dev_name(&hdev->dev)); - - /* Setup keypad input device */ - error = picolcd_init_keys(data, picolcd_in_report(REPORT_KEY_STATE, hdev)); - if (error) - goto err; - - /* Setup CIR input device */ - error = picolcd_init_cir(data, picolcd_in_report(REPORT_IR_DATA, hdev)); - if (error) - goto err; - - /* Set up the framebuffer device */ - error = picolcd_init_framebuffer(data); - if (error) - goto err; - - /* Setup lcd class device */ - error = picolcd_init_lcd(data, picolcd_out_report(REPORT_CONTRAST, hdev)); - if (error) - goto err; - - /* Setup backlight class device */ - error = picolcd_init_backlight(data, picolcd_out_report(REPORT_BRIGHTNESS, hdev)); - if (error) - goto err; - - /* Setup the LED class devices */ - error = picolcd_init_leds(data, picolcd_out_report(REPORT_LED_STATE, hdev)); - if (error) - goto err; - - picolcd_init_devfs(data, picolcd_out_report(REPORT_EE_READ, hdev), - picolcd_out_report(REPORT_EE_WRITE, hdev), - picolcd_out_report(REPORT_READ_MEMORY, hdev), - picolcd_out_report(REPORT_WRITE_MEMORY, hdev), - picolcd_out_report(REPORT_RESET, hdev)); - return 0; -err: - picolcd_exit_leds(data); - picolcd_exit_backlight(data); - picolcd_exit_lcd(data); - picolcd_exit_framebuffer(data); - picolcd_exit_cir(data); - picolcd_exit_keys(data); - return error; -} - -static int picolcd_probe_bootloader(struct hid_device *hdev, struct picolcd_data *data) -{ - int error; - - error = picolcd_check_version(hdev); - if (error) - return error; - - if (data->version[0] != 1 && data->version[1] != 0) - hid_info(hdev, "Device with untested bootloader revision, please submit /sys/kernel/debug/hid/%s/rdesc for this device.\n", - dev_name(&hdev->dev)); - - picolcd_init_devfs(data, NULL, NULL, - picolcd_out_report(REPORT_BL_READ_MEMORY, hdev), - picolcd_out_report(REPORT_BL_WRITE_MEMORY, hdev), NULL); - return 0; -} - -static int picolcd_probe(struct hid_device *hdev, - const struct hid_device_id *id) -{ - struct picolcd_data *data; - int error = -ENOMEM; - - dbg_hid(PICOLCD_NAME " hardware probe...\n"); - - /* - * Let's allocate the picolcd data structure, set some reasonable - * defaults, and associate it with the device - */ - data = kzalloc(sizeof(struct picolcd_data), GFP_KERNEL); - if (data == NULL) { - hid_err(hdev, "can't allocate space for Minibox PicoLCD device data\n"); - error = -ENOMEM; - goto err_no_cleanup; - } - - spin_lock_init(&data->lock); - mutex_init(&data->mutex); - data->hdev = hdev; - data->opmode_delay = 5000; - if (hdev->product == USB_DEVICE_ID_PICOLCD_BOOTLOADER) - data->status |= PICOLCD_BOOTLOADER; - hid_set_drvdata(hdev, data); - - /* Parse the device reports and start it up */ - error = hid_parse(hdev); - if (error) { - hid_err(hdev, "device report parse failed\n"); - goto err_cleanup_data; - } - - error = hid_hw_start(hdev, 0); - if (error) { - hid_err(hdev, "hardware start failed\n"); - goto err_cleanup_data; - } - - error = hid_hw_open(hdev); - if (error) { - hid_err(hdev, "failed to open input interrupt pipe for key and IR events\n"); - goto err_cleanup_hid_hw; - } - - error = device_create_file(&hdev->dev, &dev_attr_operation_mode_delay); - if (error) { - hid_err(hdev, "failed to create sysfs attributes\n"); - goto err_cleanup_hid_ll; - } - - error = device_create_file(&hdev->dev, &dev_attr_operation_mode); - if (error) { - hid_err(hdev, "failed to create sysfs attributes\n"); - goto err_cleanup_sysfs1; - } - - if (data->status & PICOLCD_BOOTLOADER) - error = picolcd_probe_bootloader(hdev, data); - else - error = picolcd_probe_lcd(hdev, data); - if (error) - goto err_cleanup_sysfs2; - - dbg_hid(PICOLCD_NAME " activated and initialized\n"); - return 0; - -err_cleanup_sysfs2: - device_remove_file(&hdev->dev, &dev_attr_operation_mode); -err_cleanup_sysfs1: - device_remove_file(&hdev->dev, &dev_attr_operation_mode_delay); -err_cleanup_hid_ll: - hid_hw_close(hdev); -err_cleanup_hid_hw: - hid_hw_stop(hdev); -err_cleanup_data: - kfree(data); -err_no_cleanup: - hid_set_drvdata(hdev, NULL); - - return error; -} - -static void picolcd_remove(struct hid_device *hdev) -{ - struct picolcd_data *data = hid_get_drvdata(hdev); - unsigned long flags; - - dbg_hid(PICOLCD_NAME " hardware remove...\n"); - spin_lock_irqsave(&data->lock, flags); - data->status |= PICOLCD_FAILED; - spin_unlock_irqrestore(&data->lock, flags); -#ifdef CONFIG_HID_PICOLCD_FB - /* short-circuit FB as early as possible in order to - * avoid long delays if we host console. - */ - if (data->fb_info) - data->fb_info->par = NULL; -#endif - - picolcd_exit_devfs(data); - device_remove_file(&hdev->dev, &dev_attr_operation_mode); - device_remove_file(&hdev->dev, &dev_attr_operation_mode_delay); - hid_hw_close(hdev); - hid_hw_stop(hdev); - hid_set_drvdata(hdev, NULL); - - /* Shortcut potential pending reply that will never arrive */ - spin_lock_irqsave(&data->lock, flags); - if (data->pending) - complete(&data->pending->ready); - spin_unlock_irqrestore(&data->lock, flags); - - /* Cleanup LED */ - picolcd_exit_leds(data); - /* Clean up the framebuffer */ - picolcd_exit_backlight(data); - picolcd_exit_lcd(data); - picolcd_exit_framebuffer(data); - /* Cleanup input */ - picolcd_exit_cir(data); - picolcd_exit_keys(data); - - mutex_destroy(&data->mutex); - /* Finally, clean up the picolcd data itself */ - kfree(data); -} - -static const struct hid_device_id picolcd_devices[] = { - { HID_USB_DEVICE(USB_VENDOR_ID_MICROCHIP, USB_DEVICE_ID_PICOLCD) }, - { HID_USB_DEVICE(USB_VENDOR_ID_MICROCHIP, USB_DEVICE_ID_PICOLCD_BOOTLOADER) }, - { } -}; -MODULE_DEVICE_TABLE(hid, picolcd_devices); - -static struct hid_driver picolcd_driver = { - .name = "hid-picolcd", - .id_table = picolcd_devices, - .probe = picolcd_probe, - .remove = picolcd_remove, - .raw_event = picolcd_raw_event, -#ifdef CONFIG_PM - .suspend = picolcd_suspend, - .resume = picolcd_resume, - .reset_resume = picolcd_reset_resume, -#endif -}; - -static int __init picolcd_init(void) -{ - return hid_register_driver(&picolcd_driver); -} - -static void __exit picolcd_exit(void) -{ - hid_unregister_driver(&picolcd_driver); -#ifdef CONFIG_HID_PICOLCD_FB - flush_work_sync(&picolcd_fb_cleanup); - WARN_ON(fb_pending); -#endif -} - -module_init(picolcd_init); -module_exit(picolcd_exit); -MODULE_DESCRIPTION("Minibox graphics PicoLCD Driver"); -MODULE_LICENSE("GPL v2"); diff --git a/trunk/drivers/hid/hid-picolcd.h b/trunk/drivers/hid/hid-picolcd.h new file mode 100644 index 000000000000..020cef69f6a1 --- /dev/null +++ b/trunk/drivers/hid/hid-picolcd.h @@ -0,0 +1,309 @@ +/*************************************************************************** + * Copyright (C) 2010-2012 by Bruno Prémont * + * * + * Based on Logitech G13 driver (v0.4) * + * Copyright (C) 2009 by Rick L. Vinyard, Jr. * + * * + * This program is free software: you can redistribute it and/or modify * + * it under the terms of the GNU General Public License as published by * + * the Free Software Foundation, version 2 of the License. * + * * + * This driver is distributed in the hope that it will be useful, but * + * WITHOUT ANY WARRANTY; without even the implied warranty of * + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * + * General Public License for more details. * + * * + * You should have received a copy of the GNU General Public License * + * along with this software. If not see . * + ***************************************************************************/ + +#define PICOLCD_NAME "PicoLCD (graphic)" + +/* Report numbers */ +#define REPORT_ERROR_CODE 0x10 /* LCD: IN[16] */ +#define ERR_SUCCESS 0x00 +#define ERR_PARAMETER_MISSING 0x01 +#define ERR_DATA_MISSING 0x02 +#define ERR_BLOCK_READ_ONLY 0x03 +#define ERR_BLOCK_NOT_ERASABLE 0x04 +#define ERR_BLOCK_TOO_BIG 0x05 +#define ERR_SECTION_OVERFLOW 0x06 +#define ERR_INVALID_CMD_LEN 0x07 +#define ERR_INVALID_DATA_LEN 0x08 +#define REPORT_KEY_STATE 0x11 /* LCD: IN[2] */ +#define REPORT_IR_DATA 0x21 /* LCD: IN[63] */ +#define REPORT_EE_DATA 0x32 /* LCD: IN[63] */ +#define REPORT_MEMORY 0x41 /* LCD: IN[63] */ +#define REPORT_LED_STATE 0x81 /* LCD: OUT[1] */ +#define REPORT_BRIGHTNESS 0x91 /* LCD: OUT[1] */ +#define REPORT_CONTRAST 0x92 /* LCD: OUT[1] */ +#define REPORT_RESET 0x93 /* LCD: OUT[2] */ +#define REPORT_LCD_CMD 0x94 /* LCD: OUT[63] */ +#define REPORT_LCD_DATA 0x95 /* LCD: OUT[63] */ +#define REPORT_LCD_CMD_DATA 0x96 /* LCD: OUT[63] */ +#define REPORT_EE_READ 0xa3 /* LCD: OUT[63] */ +#define REPORT_EE_WRITE 0xa4 /* LCD: OUT[63] */ +#define REPORT_ERASE_MEMORY 0xb2 /* LCD: OUT[2] */ +#define REPORT_READ_MEMORY 0xb3 /* LCD: OUT[3] */ +#define REPORT_WRITE_MEMORY 0xb4 /* LCD: OUT[63] */ +#define REPORT_SPLASH_RESTART 0xc1 /* LCD: OUT[1] */ +#define REPORT_EXIT_KEYBOARD 0xef /* LCD: OUT[2] */ +#define REPORT_VERSION 0xf1 /* LCD: IN[2],OUT[1] Bootloader: IN[2],OUT[1] */ +#define REPORT_BL_ERASE_MEMORY 0xf2 /* Bootloader: IN[36],OUT[4] */ +#define REPORT_BL_READ_MEMORY 0xf3 /* Bootloader: IN[36],OUT[4] */ +#define REPORT_BL_WRITE_MEMORY 0xf4 /* Bootloader: IN[36],OUT[36] */ +#define REPORT_DEVID 0xf5 /* LCD: IN[5], OUT[1] Bootloader: IN[5],OUT[1] */ +#define REPORT_SPLASH_SIZE 0xf6 /* LCD: IN[4], OUT[1] */ +#define REPORT_HOOK_VERSION 0xf7 /* LCD: IN[2], OUT[1] */ +#define REPORT_EXIT_FLASHER 0xff /* Bootloader: OUT[2] */ + +/* Description of in-progress IO operation, used for operations + * that trigger response from device */ +struct picolcd_pending { + struct hid_report *out_report; + struct hid_report *in_report; + struct completion ready; + int raw_size; + u8 raw_data[64]; +}; + + +#define PICOLCD_KEYS 17 + +/* Per device data structure */ +struct picolcd_data { + struct hid_device *hdev; +#ifdef CONFIG_DEBUG_FS + struct dentry *debug_reset; + struct dentry *debug_eeprom; + struct dentry *debug_flash; + struct mutex mutex_flash; + int addr_sz; +#endif + u8 version[2]; + unsigned short opmode_delay; + /* input stuff */ + u8 pressed_keys[2]; + struct input_dev *input_keys; +#ifdef CONFIG_HID_PICOLCD_CIR + struct rc_dev *rc_dev; +#endif + unsigned short keycode[PICOLCD_KEYS]; + +#ifdef CONFIG_HID_PICOLCD_FB + /* Framebuffer stuff */ + struct fb_info *fb_info; +#endif /* CONFIG_HID_PICOLCD_FB */ +#ifdef CONFIG_HID_PICOLCD_LCD + struct lcd_device *lcd; + u8 lcd_contrast; +#endif /* CONFIG_HID_PICOLCD_LCD */ +#ifdef CONFIG_HID_PICOLCD_BACKLIGHT + struct backlight_device *backlight; + u8 lcd_brightness; + u8 lcd_power; +#endif /* CONFIG_HID_PICOLCD_BACKLIGHT */ +#ifdef CONFIG_HID_PICOLCD_LEDS + /* LED stuff */ + u8 led_state; + struct led_classdev *led[8]; +#endif /* CONFIG_HID_PICOLCD_LEDS */ + + /* Housekeeping stuff */ + spinlock_t lock; + struct mutex mutex; + struct picolcd_pending *pending; + int status; +#define PICOLCD_BOOTLOADER 1 +#define PICOLCD_FAILED 2 +#define PICOLCD_CIR_SHUN 4 +}; + +#ifdef CONFIG_HID_PICOLCD_FB +struct picolcd_fb_data { + /* Framebuffer stuff */ + spinlock_t lock; + struct picolcd_data *picolcd; + u8 update_rate; + u8 bpp; + u8 force; + u8 ready; + u8 *vbitmap; /* local copy of what was sent to PicoLCD */ + u8 *bitmap; /* framebuffer */ +}; +#endif /* CONFIG_HID_PICOLCD_FB */ + +/* Find a given report */ +#define picolcd_in_report(id, dev) picolcd_report(id, dev, HID_INPUT_REPORT) +#define picolcd_out_report(id, dev) picolcd_report(id, dev, HID_OUTPUT_REPORT) + +struct hid_report *picolcd_report(int id, struct hid_device *hdev, int dir); + +#ifdef CONFIG_DEBUG_FS +void picolcd_debug_out_report(struct picolcd_data *data, + struct hid_device *hdev, struct hid_report *report); +#define usbhid_submit_report(a, b, c) \ + do { \ + picolcd_debug_out_report(hid_get_drvdata(a), a, b); \ + usbhid_submit_report(a, b, c); \ + } while (0) + +void picolcd_debug_raw_event(struct picolcd_data *data, + struct hid_device *hdev, struct hid_report *report, + u8 *raw_data, int size); + +void picolcd_init_devfs(struct picolcd_data *data, + struct hid_report *eeprom_r, struct hid_report *eeprom_w, + struct hid_report *flash_r, struct hid_report *flash_w, + struct hid_report *reset); + +void picolcd_exit_devfs(struct picolcd_data *data); +#else +static inline void picolcd_debug_out_report(struct picolcd_data *data, + struct hid_device *hdev, struct hid_report *report) +{ +} +static inline void picolcd_debug_raw_event(struct picolcd_data *data, + struct hid_device *hdev, struct hid_report *report, + u8 *raw_data, int size) +{ +} +static inline void picolcd_init_devfs(struct picolcd_data *data, + struct hid_report *eeprom_r, struct hid_report *eeprom_w, + struct hid_report *flash_r, struct hid_report *flash_w, + struct hid_report *reset) +{ +} +static inline void picolcd_exit_devfs(struct picolcd_data *data) +{ +} +#endif /* CONFIG_DEBUG_FS */ + + +#ifdef CONFIG_HID_PICOLCD_FB +int picolcd_fb_reset(struct picolcd_data *data, int clear); + +int picolcd_init_framebuffer(struct picolcd_data *data); + +void picolcd_exit_framebuffer(struct picolcd_data *data); + +void picolcd_fb_refresh(struct picolcd_data *data); +#define picolcd_fbinfo(d) ((d)->fb_info) +#else +static inline int picolcd_fb_reset(struct picolcd_data *data, int clear) +{ + return 0; +} +static inline int picolcd_init_framebuffer(struct picolcd_data *data) +{ + return 0; +} +static inline void picolcd_exit_framebuffer(struct picolcd_data *data) +{ +} +static inline void picolcd_fb_refresh(struct picolcd_data *data) +{ +} +#define picolcd_fbinfo(d) NULL +#endif /* CONFIG_HID_PICOLCD_FB */ + + +#ifdef CONFIG_HID_PICOLCD_BACKLIGHT +int picolcd_init_backlight(struct picolcd_data *data, + struct hid_report *report); + +void picolcd_exit_backlight(struct picolcd_data *data); + +int picolcd_resume_backlight(struct picolcd_data *data); + +void picolcd_suspend_backlight(struct picolcd_data *data); +#else +static inline int picolcd_init_backlight(struct picolcd_data *data, + struct hid_report *report) +{ + return 0; +} +static inline void picolcd_exit_backlight(struct picolcd_data *data) +{ +} +static inline int picolcd_resume_backlight(struct picolcd_data *data) +{ + return 0; +} +static inline void picolcd_suspend_backlight(struct picolcd_data *data) +{ +} + +#endif /* CONFIG_HID_PICOLCD_BACKLIGHT */ + + +#ifdef CONFIG_HID_PICOLCD_LCD +int picolcd_init_lcd(struct picolcd_data *data, + struct hid_report *report); + +void picolcd_exit_lcd(struct picolcd_data *data); + +int picolcd_resume_lcd(struct picolcd_data *data); +#else +static inline int picolcd_init_lcd(struct picolcd_data *data, + struct hid_report *report) +{ + return 0; +} +static inline void picolcd_exit_lcd(struct picolcd_data *data) +{ +} +static inline int picolcd_resume_lcd(struct picolcd_data *data) +{ + return 0; +} +#endif /* CONFIG_HID_PICOLCD_LCD */ + + +#ifdef CONFIG_HID_PICOLCD_LEDS +int picolcd_init_leds(struct picolcd_data *data, + struct hid_report *report); + +void picolcd_exit_leds(struct picolcd_data *data); + +void picolcd_leds_set(struct picolcd_data *data); +#else +static inline int picolcd_init_leds(struct picolcd_data *data, + struct hid_report *report) +{ + return 0; +} +static inline void picolcd_exit_leds(struct picolcd_data *data) +{ +} +static inline void picolcd_leds_set(struct picolcd_data *data) +{ +} +#endif /* CONFIG_HID_PICOLCD_LEDS */ + + +#ifdef CONFIG_HID_PICOLCD_CIR +int picolcd_raw_cir(struct picolcd_data *data, + struct hid_report *report, u8 *raw_data, int size); + +int picolcd_init_cir(struct picolcd_data *data, struct hid_report *report); + +void picolcd_exit_cir(struct picolcd_data *data); +#else +static inline int picolcd_raw_cir(struct picolcd_data *data, + struct hid_report *report, u8 *raw_data, int size) +{ + return 1; +} +static inline int picolcd_init_cir(struct picolcd_data *data, struct hid_report *report) +{ + return 0; +} +static inline void picolcd_exit_cir(struct picolcd_data *data) +{ +} +#endif /* CONFIG_HID_PICOLCD_LIRC */ + +int picolcd_reset(struct hid_device *hdev); +struct picolcd_pending *picolcd_send_and_wait(struct hid_device *hdev, + int report_id, const u8 *raw_data, int size); diff --git a/trunk/drivers/hid/hid-picolcd_backlight.c b/trunk/drivers/hid/hid-picolcd_backlight.c new file mode 100644 index 000000000000..b91f30945f9c --- /dev/null +++ b/trunk/drivers/hid/hid-picolcd_backlight.c @@ -0,0 +1,122 @@ +/*************************************************************************** + * Copyright (C) 2010-2012 by Bruno Prémont * + * * + * Based on Logitech G13 driver (v0.4) * + * Copyright (C) 2009 by Rick L. Vinyard, Jr. * + * * + * This program is free software: you can redistribute it and/or modify * + * it under the terms of the GNU General Public License as published by * + * the Free Software Foundation, version 2 of the License. * + * * + * This driver is distributed in the hope that it will be useful, but * + * WITHOUT ANY WARRANTY; without even the implied warranty of * + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * + * General Public License for more details. * + * * + * You should have received a copy of the GNU General Public License * + * along with this software. If not see . * + ***************************************************************************/ + +#include +#include "usbhid/usbhid.h" +#include + +#include +#include + +#include "hid-picolcd.h" + +static int picolcd_get_brightness(struct backlight_device *bdev) +{ + struct picolcd_data *data = bl_get_data(bdev); + return data->lcd_brightness; +} + +static int picolcd_set_brightness(struct backlight_device *bdev) +{ + struct picolcd_data *data = bl_get_data(bdev); + struct hid_report *report = picolcd_out_report(REPORT_BRIGHTNESS, data->hdev); + unsigned long flags; + + if (!report || report->maxfield != 1 || report->field[0]->report_count != 1) + return -ENODEV; + + data->lcd_brightness = bdev->props.brightness & 0x0ff; + data->lcd_power = bdev->props.power; + spin_lock_irqsave(&data->lock, flags); + hid_set_field(report->field[0], 0, data->lcd_power == FB_BLANK_UNBLANK ? data->lcd_brightness : 0); + if (!(data->status & PICOLCD_FAILED)) + usbhid_submit_report(data->hdev, report, USB_DIR_OUT); + spin_unlock_irqrestore(&data->lock, flags); + return 0; +} + +static int picolcd_check_bl_fb(struct backlight_device *bdev, struct fb_info *fb) +{ + return fb && fb == picolcd_fbinfo((struct picolcd_data *)bl_get_data(bdev)); +} + +static const struct backlight_ops picolcd_blops = { + .update_status = picolcd_set_brightness, + .get_brightness = picolcd_get_brightness, + .check_fb = picolcd_check_bl_fb, +}; + +int picolcd_init_backlight(struct picolcd_data *data, struct hid_report *report) +{ + struct device *dev = &data->hdev->dev; + struct backlight_device *bdev; + struct backlight_properties props; + if (!report) + return -ENODEV; + if (report->maxfield != 1 || report->field[0]->report_count != 1 || + report->field[0]->report_size != 8) { + dev_err(dev, "unsupported BRIGHTNESS report"); + return -EINVAL; + } + + memset(&props, 0, sizeof(props)); + props.type = BACKLIGHT_RAW; + props.max_brightness = 0xff; + bdev = backlight_device_register(dev_name(dev), dev, data, + &picolcd_blops, &props); + if (IS_ERR(bdev)) { + dev_err(dev, "failed to register backlight\n"); + return PTR_ERR(bdev); + } + bdev->props.brightness = 0xff; + data->lcd_brightness = 0xff; + data->backlight = bdev; + picolcd_set_brightness(bdev); + return 0; +} + +void picolcd_exit_backlight(struct picolcd_data *data) +{ + struct backlight_device *bdev = data->backlight; + + data->backlight = NULL; + if (bdev) + backlight_device_unregister(bdev); +} + +int picolcd_resume_backlight(struct picolcd_data *data) +{ + if (!data->backlight) + return 0; + return picolcd_set_brightness(data->backlight); +} + +#ifdef CONFIG_PM +void picolcd_suspend_backlight(struct picolcd_data *data) +{ + int bl_power = data->lcd_power; + if (!data->backlight) + return; + + data->backlight->props.power = FB_BLANK_POWERDOWN; + picolcd_set_brightness(data->backlight); + data->lcd_power = data->backlight->props.power = bl_power; +} +#endif /* CONFIG_PM */ + diff --git a/trunk/drivers/hid/hid-picolcd_cir.c b/trunk/drivers/hid/hid-picolcd_cir.c new file mode 100644 index 000000000000..13ca9191b630 --- /dev/null +++ b/trunk/drivers/hid/hid-picolcd_cir.c @@ -0,0 +1,152 @@ +/*************************************************************************** + * Copyright (C) 2010-2012 by Bruno Prémont * + * * + * Based on Logitech G13 driver (v0.4) * + * Copyright (C) 2009 by Rick L. Vinyard, Jr. * + * * + * This program is free software: you can redistribute it and/or modify * + * it under the terms of the GNU General Public License as published by * + * the Free Software Foundation, version 2 of the License. * + * * + * This driver is distributed in the hope that it will be useful, but * + * WITHOUT ANY WARRANTY; without even the implied warranty of * + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * + * General Public License for more details. * + * * + * You should have received a copy of the GNU General Public License * + * along with this software. If not see . * + ***************************************************************************/ + +#include +#include +#include +#include "hid-ids.h" +#include "usbhid/usbhid.h" +#include + +#include +#include +#include +#include + +#include + +#include +#include + +#include +#include +#include +#include + +#include "hid-picolcd.h" + + +int picolcd_raw_cir(struct picolcd_data *data, + struct hid_report *report, u8 *raw_data, int size) +{ + unsigned long flags; + int i, w, sz; + DEFINE_IR_RAW_EVENT(rawir); + + /* ignore if rc_dev is NULL or status is shunned */ + spin_lock_irqsave(&data->lock, flags); + if (!data->rc_dev || (data->status & PICOLCD_CIR_SHUN)) { + spin_unlock_irqrestore(&data->lock, flags); + return 1; + } + spin_unlock_irqrestore(&data->lock, flags); + + /* PicoLCD USB packets contain 16-bit intervals in network order, + * with value negated for pulse. Intervals are in microseconds. + * + * Note: some userspace LIRC code for PicoLCD says negated values + * for space - is it a matter of IR chip? (pulse for my TSOP2236) + * + * In addition, the first interval seems to be around 15000 + base + * interval for non-first report of IR data - thus the quirk below + * to get RC_CODE to understand Sony and JVC remotes I have at hand + */ + sz = size > 0 ? min((int)raw_data[0], size-1) : 0; + for (i = 0; i+1 < sz; i += 2) { + init_ir_raw_event(&rawir); + w = (raw_data[i] << 8) | (raw_data[i+1]); + rawir.pulse = !!(w & 0x8000); + rawir.duration = US_TO_NS(rawir.pulse ? (65536 - w) : w); + /* Quirk!! - see above */ + if (i == 0 && rawir.duration > 15000000) + rawir.duration -= 15000000; + ir_raw_event_store(data->rc_dev, &rawir); + } + ir_raw_event_handle(data->rc_dev); + + return 1; +} + +static int picolcd_cir_open(struct rc_dev *dev) +{ + struct picolcd_data *data = dev->priv; + unsigned long flags; + + spin_lock_irqsave(&data->lock, flags); + data->status &= ~PICOLCD_CIR_SHUN; + spin_unlock_irqrestore(&data->lock, flags); + return 0; +} + +static void picolcd_cir_close(struct rc_dev *dev) +{ + struct picolcd_data *data = dev->priv; + unsigned long flags; + + spin_lock_irqsave(&data->lock, flags); + data->status |= PICOLCD_CIR_SHUN; + spin_unlock_irqrestore(&data->lock, flags); +} + +/* initialize CIR input device */ +int picolcd_init_cir(struct picolcd_data *data, struct hid_report *report) +{ + struct rc_dev *rdev; + int ret = 0; + + rdev = rc_allocate_device(); + if (!rdev) + return -ENOMEM; + + rdev->priv = data; + rdev->driver_type = RC_DRIVER_IR_RAW; + rdev->allowed_protos = RC_TYPE_ALL; + rdev->open = picolcd_cir_open; + rdev->close = picolcd_cir_close; + rdev->input_name = data->hdev->name; + rdev->input_phys = data->hdev->phys; + rdev->input_id.bustype = data->hdev->bus; + rdev->input_id.vendor = data->hdev->vendor; + rdev->input_id.product = data->hdev->product; + rdev->input_id.version = data->hdev->version; + rdev->dev.parent = &data->hdev->dev; + rdev->driver_name = PICOLCD_NAME; + rdev->map_name = RC_MAP_RC6_MCE; + rdev->timeout = MS_TO_NS(100); + rdev->rx_resolution = US_TO_NS(1); + + ret = rc_register_device(rdev); + if (ret) + goto err; + data->rc_dev = rdev; + return 0; + +err: + rc_free_device(rdev); + return ret; +} + +void picolcd_exit_cir(struct picolcd_data *data) +{ + struct rc_dev *rdev = data->rc_dev; + + data->rc_dev = NULL; + rc_unregister_device(rdev); +} + diff --git a/trunk/drivers/hid/hid-picolcd_core.c b/trunk/drivers/hid/hid-picolcd_core.c new file mode 100644 index 000000000000..86df26e58aba --- /dev/null +++ b/trunk/drivers/hid/hid-picolcd_core.c @@ -0,0 +1,689 @@ +/*************************************************************************** + * Copyright (C) 2010-2012 by Bruno Prémont * + * * + * Based on Logitech G13 driver (v0.4) * + * Copyright (C) 2009 by Rick L. Vinyard, Jr. * + * * + * This program is free software: you can redistribute it and/or modify * + * it under the terms of the GNU General Public License as published by * + * the Free Software Foundation, version 2 of the License. * + * * + * This driver is distributed in the hope that it will be useful, but * + * WITHOUT ANY WARRANTY; without even the implied warranty of * + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * + * General Public License for more details. * + * * + * You should have received a copy of the GNU General Public License * + * along with this software. If not see . * + ***************************************************************************/ + +#include +#include +#include +#include "hid-ids.h" +#include "usbhid/usbhid.h" +#include + +#include +#include + +#include +#include +#include + +#include "hid-picolcd.h" + + +/* Input device + * + * The PicoLCD has an IR receiver header, a built-in keypad with 5 keys + * and header for 4x4 key matrix. The built-in keys are part of the matrix. + */ +static const unsigned short def_keymap[PICOLCD_KEYS] = { + KEY_RESERVED, /* none */ + KEY_BACK, /* col 4 + row 1 */ + KEY_HOMEPAGE, /* col 3 + row 1 */ + KEY_RESERVED, /* col 2 + row 1 */ + KEY_RESERVED, /* col 1 + row 1 */ + KEY_SCROLLUP, /* col 4 + row 2 */ + KEY_OK, /* col 3 + row 2 */ + KEY_SCROLLDOWN, /* col 2 + row 2 */ + KEY_RESERVED, /* col 1 + row 2 */ + KEY_RESERVED, /* col 4 + row 3 */ + KEY_RESERVED, /* col 3 + row 3 */ + KEY_RESERVED, /* col 2 + row 3 */ + KEY_RESERVED, /* col 1 + row 3 */ + KEY_RESERVED, /* col 4 + row 4 */ + KEY_RESERVED, /* col 3 + row 4 */ + KEY_RESERVED, /* col 2 + row 4 */ + KEY_RESERVED, /* col 1 + row 4 */ +}; + + +/* Find a given report */ +struct hid_report *picolcd_report(int id, struct hid_device *hdev, int dir) +{ + struct list_head *feature_report_list = &hdev->report_enum[dir].report_list; + struct hid_report *report = NULL; + + list_for_each_entry(report, feature_report_list, list) { + if (report->id == id) + return report; + } + hid_warn(hdev, "No report with id 0x%x found\n", id); + return NULL; +} + +/* Submit a report and wait for a reply from device - if device fades away + * or does not respond in time, return NULL */ +struct picolcd_pending *picolcd_send_and_wait(struct hid_device *hdev, + int report_id, const u8 *raw_data, int size) +{ + struct picolcd_data *data = hid_get_drvdata(hdev); + struct picolcd_pending *work; + struct hid_report *report = picolcd_out_report(report_id, hdev); + unsigned long flags; + int i, j, k; + + if (!report || !data) + return NULL; + if (data->status & PICOLCD_FAILED) + return NULL; + work = kzalloc(sizeof(*work), GFP_KERNEL); + if (!work) + return NULL; + + init_completion(&work->ready); + work->out_report = report; + work->in_report = NULL; + work->raw_size = 0; + + mutex_lock(&data->mutex); + spin_lock_irqsave(&data->lock, flags); + for (i = k = 0; i < report->maxfield; i++) + for (j = 0; j < report->field[i]->report_count; j++) { + hid_set_field(report->field[i], j, k < size ? raw_data[k] : 0); + k++; + } + if (data->status & PICOLCD_FAILED) { + kfree(work); + work = NULL; + } else { + data->pending = work; + usbhid_submit_report(data->hdev, report, USB_DIR_OUT); + spin_unlock_irqrestore(&data->lock, flags); + wait_for_completion_interruptible_timeout(&work->ready, HZ*2); + spin_lock_irqsave(&data->lock, flags); + data->pending = NULL; + } + spin_unlock_irqrestore(&data->lock, flags); + mutex_unlock(&data->mutex); + return work; +} + +/* + * input class device + */ +static int picolcd_raw_keypad(struct picolcd_data *data, + struct hid_report *report, u8 *raw_data, int size) +{ + /* + * Keypad event + * First and second data bytes list currently pressed keys, + * 0x00 means no key and at most 2 keys may be pressed at same time + */ + int i, j; + + /* determine newly pressed keys */ + for (i = 0; i < size; i++) { + unsigned int key_code; + if (raw_data[i] == 0) + continue; + for (j = 0; j < sizeof(data->pressed_keys); j++) + if (data->pressed_keys[j] == raw_data[i]) + goto key_already_down; + for (j = 0; j < sizeof(data->pressed_keys); j++) + if (data->pressed_keys[j] == 0) { + data->pressed_keys[j] = raw_data[i]; + break; + } + input_event(data->input_keys, EV_MSC, MSC_SCAN, raw_data[i]); + if (raw_data[i] < PICOLCD_KEYS) + key_code = data->keycode[raw_data[i]]; + else + key_code = KEY_UNKNOWN; + if (key_code != KEY_UNKNOWN) { + dbg_hid(PICOLCD_NAME " got key press for %u:%d", + raw_data[i], key_code); + input_report_key(data->input_keys, key_code, 1); + } + input_sync(data->input_keys); +key_already_down: + continue; + } + + /* determine newly released keys */ + for (j = 0; j < sizeof(data->pressed_keys); j++) { + unsigned int key_code; + if (data->pressed_keys[j] == 0) + continue; + for (i = 0; i < size; i++) + if (data->pressed_keys[j] == raw_data[i]) + goto key_still_down; + input_event(data->input_keys, EV_MSC, MSC_SCAN, data->pressed_keys[j]); + if (data->pressed_keys[j] < PICOLCD_KEYS) + key_code = data->keycode[data->pressed_keys[j]]; + else + key_code = KEY_UNKNOWN; + if (key_code != KEY_UNKNOWN) { + dbg_hid(PICOLCD_NAME " got key release for %u:%d", + data->pressed_keys[j], key_code); + input_report_key(data->input_keys, key_code, 0); + } + input_sync(data->input_keys); + data->pressed_keys[j] = 0; +key_still_down: + continue; + } + return 1; +} + +static int picolcd_check_version(struct hid_device *hdev) +{ + struct picolcd_data *data = hid_get_drvdata(hdev); + struct picolcd_pending *verinfo; + int ret = 0; + + if (!data) + return -ENODEV; + + verinfo = picolcd_send_and_wait(hdev, REPORT_VERSION, NULL, 0); + if (!verinfo) { + hid_err(hdev, "no version response from PicoLCD\n"); + return -ENODEV; + } + + if (verinfo->raw_size == 2) { + data->version[0] = verinfo->raw_data[1]; + data->version[1] = verinfo->raw_data[0]; + if (data->status & PICOLCD_BOOTLOADER) { + hid_info(hdev, "PicoLCD, bootloader version %d.%d\n", + verinfo->raw_data[1], verinfo->raw_data[0]); + } else { + hid_info(hdev, "PicoLCD, firmware version %d.%d\n", + verinfo->raw_data[1], verinfo->raw_data[0]); + } + } else { + hid_err(hdev, "confused, got unexpected version response from PicoLCD\n"); + ret = -EINVAL; + } + kfree(verinfo); + return ret; +} + +/* + * Reset our device and wait for answer to VERSION request + */ +int picolcd_reset(struct hid_device *hdev) +{ + struct picolcd_data *data = hid_get_drvdata(hdev); + struct hid_report *report = picolcd_out_report(REPORT_RESET, hdev); + unsigned long flags; + int error; + + if (!data || !report || report->maxfield != 1) + return -ENODEV; + + spin_lock_irqsave(&data->lock, flags); + if (hdev->product == USB_DEVICE_ID_PICOLCD_BOOTLOADER) + data->status |= PICOLCD_BOOTLOADER; + + /* perform the reset */ + hid_set_field(report->field[0], 0, 1); + if (data->status & PICOLCD_FAILED) { + spin_unlock_irqrestore(&data->lock, flags); + return -ENODEV; + } + usbhid_submit_report(hdev, report, USB_DIR_OUT); + spin_unlock_irqrestore(&data->lock, flags); + + error = picolcd_check_version(hdev); + if (error) + return error; + + picolcd_resume_lcd(data); + picolcd_resume_backlight(data); + picolcd_fb_refresh(data); + picolcd_leds_set(data); + return 0; +} + +/* + * The "operation_mode" sysfs attribute + */ +static ssize_t picolcd_operation_mode_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct picolcd_data *data = dev_get_drvdata(dev); + + if (data->status & PICOLCD_BOOTLOADER) + return snprintf(buf, PAGE_SIZE, "[bootloader] lcd\n"); + else + return snprintf(buf, PAGE_SIZE, "bootloader [lcd]\n"); +} + +static ssize_t picolcd_operation_mode_store(struct device *dev, + struct device_attribute *attr, const char *buf, size_t count) +{ + struct picolcd_data *data = dev_get_drvdata(dev); + struct hid_report *report = NULL; + size_t cnt = count; + int timeout = data->opmode_delay; + unsigned long flags; + + if (cnt >= 3 && strncmp("lcd", buf, 3) == 0) { + if (data->status & PICOLCD_BOOTLOADER) + report = picolcd_out_report(REPORT_EXIT_FLASHER, data->hdev); + buf += 3; + cnt -= 3; + } else if (cnt >= 10 && strncmp("bootloader", buf, 10) == 0) { + if (!(data->status & PICOLCD_BOOTLOADER)) + report = picolcd_out_report(REPORT_EXIT_KEYBOARD, data->hdev); + buf += 10; + cnt -= 10; + } + if (!report) + return -EINVAL; + + while (cnt > 0 && (buf[cnt-1] == '\n' || buf[cnt-1] == '\r')) + cnt--; + if (cnt != 0) + return -EINVAL; + + spin_lock_irqsave(&data->lock, flags); + hid_set_field(report->field[0], 0, timeout & 0xff); + hid_set_field(report->field[0], 1, (timeout >> 8) & 0xff); + usbhid_submit_report(data->hdev, report, USB_DIR_OUT); + spin_unlock_irqrestore(&data->lock, flags); + return count; +} + +static DEVICE_ATTR(operation_mode, 0644, picolcd_operation_mode_show, + picolcd_operation_mode_store); + +/* + * The "operation_mode_delay" sysfs attribute + */ +static ssize_t picolcd_operation_mode_delay_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct picolcd_data *data = dev_get_drvdata(dev); + + return snprintf(buf, PAGE_SIZE, "%hu\n", data->opmode_delay); +} + +static ssize_t picolcd_operation_mode_delay_store(struct device *dev, + struct device_attribute *attr, const char *buf, size_t count) +{ + struct picolcd_data *data = dev_get_drvdata(dev); + unsigned u; + if (sscanf(buf, "%u", &u) != 1) + return -EINVAL; + if (u > 30000) + return -EINVAL; + else + data->opmode_delay = u; + return count; +} + +static DEVICE_ATTR(operation_mode_delay, 0644, picolcd_operation_mode_delay_show, + picolcd_operation_mode_delay_store); + +/* + * Handle raw report as sent by device + */ +static int picolcd_raw_event(struct hid_device *hdev, + struct hid_report *report, u8 *raw_data, int size) +{ + struct picolcd_data *data = hid_get_drvdata(hdev); + unsigned long flags; + int ret = 0; + + if (!data) + return 1; + + if (report->id == REPORT_KEY_STATE) { + if (data->input_keys) + ret = picolcd_raw_keypad(data, report, raw_data+1, size-1); + } else if (report->id == REPORT_IR_DATA) { + ret = picolcd_raw_cir(data, report, raw_data+1, size-1); + } else { + spin_lock_irqsave(&data->lock, flags); + /* + * We let the caller of picolcd_send_and_wait() check if the + * report we got is one of the expected ones or not. + */ + if (data->pending) { + memcpy(data->pending->raw_data, raw_data+1, size-1); + data->pending->raw_size = size-1; + data->pending->in_report = report; + complete(&data->pending->ready); + } + spin_unlock_irqrestore(&data->lock, flags); + } + + picolcd_debug_raw_event(data, hdev, report, raw_data, size); + return 1; +} + +#ifdef CONFIG_PM +static int picolcd_suspend(struct hid_device *hdev, pm_message_t message) +{ + if (PMSG_IS_AUTO(message)) + return 0; + + picolcd_suspend_backlight(hid_get_drvdata(hdev)); + dbg_hid(PICOLCD_NAME " device ready for suspend\n"); + return 0; +} + +static int picolcd_resume(struct hid_device *hdev) +{ + int ret; + ret = picolcd_resume_backlight(hid_get_drvdata(hdev)); + if (ret) + dbg_hid(PICOLCD_NAME " restoring backlight failed: %d\n", ret); + return 0; +} + +static int picolcd_reset_resume(struct hid_device *hdev) +{ + int ret; + ret = picolcd_reset(hdev); + if (ret) + dbg_hid(PICOLCD_NAME " resetting our device failed: %d\n", ret); + ret = picolcd_fb_reset(hid_get_drvdata(hdev), 0); + if (ret) + dbg_hid(PICOLCD_NAME " restoring framebuffer content failed: %d\n", ret); + ret = picolcd_resume_lcd(hid_get_drvdata(hdev)); + if (ret) + dbg_hid(PICOLCD_NAME " restoring lcd failed: %d\n", ret); + ret = picolcd_resume_backlight(hid_get_drvdata(hdev)); + if (ret) + dbg_hid(PICOLCD_NAME " restoring backlight failed: %d\n", ret); + picolcd_leds_set(hid_get_drvdata(hdev)); + return 0; +} +#endif + +/* initialize keypad input device */ +static int picolcd_init_keys(struct picolcd_data *data, + struct hid_report *report) +{ + struct hid_device *hdev = data->hdev; + struct input_dev *idev; + int error, i; + + if (!report) + return -ENODEV; + if (report->maxfield != 1 || report->field[0]->report_count != 2 || + report->field[0]->report_size != 8) { + hid_err(hdev, "unsupported KEY_STATE report\n"); + return -EINVAL; + } + + idev = input_allocate_device(); + if (idev == NULL) { + hid_err(hdev, "failed to allocate input device\n"); + return -ENOMEM; + } + input_set_drvdata(idev, hdev); + memcpy(data->keycode, def_keymap, sizeof(def_keymap)); + idev->name = hdev->name; + idev->phys = hdev->phys; + idev->uniq = hdev->uniq; + idev->id.bustype = hdev->bus; + idev->id.vendor = hdev->vendor; + idev->id.product = hdev->product; + idev->id.version = hdev->version; + idev->dev.parent = &hdev->dev; + idev->keycode = &data->keycode; + idev->keycodemax = PICOLCD_KEYS; + idev->keycodesize = sizeof(data->keycode[0]); + input_set_capability(idev, EV_MSC, MSC_SCAN); + set_bit(EV_REP, idev->evbit); + for (i = 0; i < PICOLCD_KEYS; i++) + input_set_capability(idev, EV_KEY, data->keycode[i]); + error = input_register_device(idev); + if (error) { + hid_err(hdev, "error registering the input device\n"); + input_free_device(idev); + return error; + } + data->input_keys = idev; + return 0; +} + +static void picolcd_exit_keys(struct picolcd_data *data) +{ + struct input_dev *idev = data->input_keys; + + data->input_keys = NULL; + if (idev) + input_unregister_device(idev); +} + +static int picolcd_probe_lcd(struct hid_device *hdev, struct picolcd_data *data) +{ + int error; + + /* Setup keypad input device */ + error = picolcd_init_keys(data, picolcd_in_report(REPORT_KEY_STATE, hdev)); + if (error) + goto err; + + /* Setup CIR input device */ + error = picolcd_init_cir(data, picolcd_in_report(REPORT_IR_DATA, hdev)); + if (error) + goto err; + + /* Set up the framebuffer device */ + error = picolcd_init_framebuffer(data); + if (error) + goto err; + + /* Setup lcd class device */ + error = picolcd_init_lcd(data, picolcd_out_report(REPORT_CONTRAST, hdev)); + if (error) + goto err; + + /* Setup backlight class device */ + error = picolcd_init_backlight(data, picolcd_out_report(REPORT_BRIGHTNESS, hdev)); + if (error) + goto err; + + /* Setup the LED class devices */ + error = picolcd_init_leds(data, picolcd_out_report(REPORT_LED_STATE, hdev)); + if (error) + goto err; + + picolcd_init_devfs(data, picolcd_out_report(REPORT_EE_READ, hdev), + picolcd_out_report(REPORT_EE_WRITE, hdev), + picolcd_out_report(REPORT_READ_MEMORY, hdev), + picolcd_out_report(REPORT_WRITE_MEMORY, hdev), + picolcd_out_report(REPORT_RESET, hdev)); + return 0; +err: + picolcd_exit_leds(data); + picolcd_exit_backlight(data); + picolcd_exit_lcd(data); + picolcd_exit_framebuffer(data); + picolcd_exit_cir(data); + picolcd_exit_keys(data); + return error; +} + +static int picolcd_probe_bootloader(struct hid_device *hdev, struct picolcd_data *data) +{ + picolcd_init_devfs(data, NULL, NULL, + picolcd_out_report(REPORT_BL_READ_MEMORY, hdev), + picolcd_out_report(REPORT_BL_WRITE_MEMORY, hdev), NULL); + return 0; +} + +static int picolcd_probe(struct hid_device *hdev, + const struct hid_device_id *id) +{ + struct picolcd_data *data; + int error = -ENOMEM; + + dbg_hid(PICOLCD_NAME " hardware probe...\n"); + + /* + * Let's allocate the picolcd data structure, set some reasonable + * defaults, and associate it with the device + */ + data = kzalloc(sizeof(struct picolcd_data), GFP_KERNEL); + if (data == NULL) { + hid_err(hdev, "can't allocate space for Minibox PicoLCD device data\n"); + error = -ENOMEM; + goto err_no_cleanup; + } + + spin_lock_init(&data->lock); + mutex_init(&data->mutex); + data->hdev = hdev; + data->opmode_delay = 5000; + if (hdev->product == USB_DEVICE_ID_PICOLCD_BOOTLOADER) + data->status |= PICOLCD_BOOTLOADER; + hid_set_drvdata(hdev, data); + + /* Parse the device reports and start it up */ + error = hid_parse(hdev); + if (error) { + hid_err(hdev, "device report parse failed\n"); + goto err_cleanup_data; + } + + error = hid_hw_start(hdev, 0); + if (error) { + hid_err(hdev, "hardware start failed\n"); + goto err_cleanup_data; + } + + error = hid_hw_open(hdev); + if (error) { + hid_err(hdev, "failed to open input interrupt pipe for key and IR events\n"); + goto err_cleanup_hid_hw; + } + + error = device_create_file(&hdev->dev, &dev_attr_operation_mode_delay); + if (error) { + hid_err(hdev, "failed to create sysfs attributes\n"); + goto err_cleanup_hid_ll; + } + + error = device_create_file(&hdev->dev, &dev_attr_operation_mode); + if (error) { + hid_err(hdev, "failed to create sysfs attributes\n"); + goto err_cleanup_sysfs1; + } + + if (data->status & PICOLCD_BOOTLOADER) + error = picolcd_probe_bootloader(hdev, data); + else + error = picolcd_probe_lcd(hdev, data); + if (error) + goto err_cleanup_sysfs2; + + dbg_hid(PICOLCD_NAME " activated and initialized\n"); + return 0; + +err_cleanup_sysfs2: + device_remove_file(&hdev->dev, &dev_attr_operation_mode); +err_cleanup_sysfs1: + device_remove_file(&hdev->dev, &dev_attr_operation_mode_delay); +err_cleanup_hid_ll: + hid_hw_close(hdev); +err_cleanup_hid_hw: + hid_hw_stop(hdev); +err_cleanup_data: + kfree(data); +err_no_cleanup: + hid_set_drvdata(hdev, NULL); + + return error; +} + +static void picolcd_remove(struct hid_device *hdev) +{ + struct picolcd_data *data = hid_get_drvdata(hdev); + unsigned long flags; + + dbg_hid(PICOLCD_NAME " hardware remove...\n"); + spin_lock_irqsave(&data->lock, flags); + data->status |= PICOLCD_FAILED; + spin_unlock_irqrestore(&data->lock, flags); + + picolcd_exit_devfs(data); + device_remove_file(&hdev->dev, &dev_attr_operation_mode); + device_remove_file(&hdev->dev, &dev_attr_operation_mode_delay); + hid_hw_close(hdev); + hid_hw_stop(hdev); + + /* Shortcut potential pending reply that will never arrive */ + spin_lock_irqsave(&data->lock, flags); + if (data->pending) + complete(&data->pending->ready); + spin_unlock_irqrestore(&data->lock, flags); + + /* Cleanup LED */ + picolcd_exit_leds(data); + /* Clean up the framebuffer */ + picolcd_exit_backlight(data); + picolcd_exit_lcd(data); + picolcd_exit_framebuffer(data); + /* Cleanup input */ + picolcd_exit_cir(data); + picolcd_exit_keys(data); + + hid_set_drvdata(hdev, NULL); + mutex_destroy(&data->mutex); + /* Finally, clean up the picolcd data itself */ + kfree(data); +} + +static const struct hid_device_id picolcd_devices[] = { + { HID_USB_DEVICE(USB_VENDOR_ID_MICROCHIP, USB_DEVICE_ID_PICOLCD) }, + { HID_USB_DEVICE(USB_VENDOR_ID_MICROCHIP, USB_DEVICE_ID_PICOLCD_BOOTLOADER) }, + { } +}; +MODULE_DEVICE_TABLE(hid, picolcd_devices); + +static struct hid_driver picolcd_driver = { + .name = "hid-picolcd", + .id_table = picolcd_devices, + .probe = picolcd_probe, + .remove = picolcd_remove, + .raw_event = picolcd_raw_event, +#ifdef CONFIG_PM + .suspend = picolcd_suspend, + .resume = picolcd_resume, + .reset_resume = picolcd_reset_resume, +#endif +}; + +static int __init picolcd_init(void) +{ + return hid_register_driver(&picolcd_driver); +} + +static void __exit picolcd_exit(void) +{ + hid_unregister_driver(&picolcd_driver); +} + +module_init(picolcd_init); +module_exit(picolcd_exit); +MODULE_DESCRIPTION("Minibox graphics PicoLCD Driver"); +MODULE_LICENSE("GPL v2"); diff --git a/trunk/drivers/hid/hid-picolcd_debugfs.c b/trunk/drivers/hid/hid-picolcd_debugfs.c new file mode 100644 index 000000000000..4809aa1bdb9c --- /dev/null +++ b/trunk/drivers/hid/hid-picolcd_debugfs.c @@ -0,0 +1,899 @@ +/*************************************************************************** + * Copyright (C) 2010-2012 by Bruno Prémont * + * * + * Based on Logitech G13 driver (v0.4) * + * Copyright (C) 2009 by Rick L. Vinyard, Jr. * + * * + * This program is free software: you can redistribute it and/or modify * + * it under the terms of the GNU General Public License as published by * + * the Free Software Foundation, version 2 of the License. * + * * + * This driver is distributed in the hope that it will be useful, but * + * WITHOUT ANY WARRANTY; without even the implied warranty of * + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * + * General Public License for more details. * + * * + * You should have received a copy of the GNU General Public License * + * along with this software. If not see . * + ***************************************************************************/ + +#include +#include +#include "usbhid/usbhid.h" +#include + +#include +#include +#include + +#include +#include + +#include "hid-picolcd.h" + + +static int picolcd_debug_reset_show(struct seq_file *f, void *p) +{ + if (picolcd_fbinfo((struct picolcd_data *)f->private)) + seq_printf(f, "all fb\n"); + else + seq_printf(f, "all\n"); + return 0; +} + +static int picolcd_debug_reset_open(struct inode *inode, struct file *f) +{ + return single_open(f, picolcd_debug_reset_show, inode->i_private); +} + +static ssize_t picolcd_debug_reset_write(struct file *f, const char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct picolcd_data *data = ((struct seq_file *)f->private_data)->private; + char buf[32]; + size_t cnt = min(count, sizeof(buf)-1); + if (copy_from_user(buf, user_buf, cnt)) + return -EFAULT; + + while (cnt > 0 && (buf[cnt-1] == ' ' || buf[cnt-1] == '\n')) + cnt--; + buf[cnt] = '\0'; + if (strcmp(buf, "all") == 0) { + picolcd_reset(data->hdev); + picolcd_fb_reset(data, 1); + } else if (strcmp(buf, "fb") == 0) { + picolcd_fb_reset(data, 1); + } else { + return -EINVAL; + } + return count; +} + +static const struct file_operations picolcd_debug_reset_fops = { + .owner = THIS_MODULE, + .open = picolcd_debug_reset_open, + .read = seq_read, + .llseek = seq_lseek, + .write = picolcd_debug_reset_write, + .release = single_release, +}; + +/* + * The "eeprom" file + */ +static ssize_t picolcd_debug_eeprom_read(struct file *f, char __user *u, + size_t s, loff_t *off) +{ + struct picolcd_data *data = f->private_data; + struct picolcd_pending *resp; + u8 raw_data[3]; + ssize_t ret = -EIO; + + if (s == 0) + return -EINVAL; + if (*off > 0x0ff) + return 0; + + /* prepare buffer with info about what we want to read (addr & len) */ + raw_data[0] = *off & 0xff; + raw_data[1] = (*off >> 8) & 0xff; + raw_data[2] = s < 20 ? s : 20; + if (*off + raw_data[2] > 0xff) + raw_data[2] = 0x100 - *off; + resp = picolcd_send_and_wait(data->hdev, REPORT_EE_READ, raw_data, + sizeof(raw_data)); + if (!resp) + return -EIO; + + if (resp->in_report && resp->in_report->id == REPORT_EE_DATA) { + /* successful read :) */ + ret = resp->raw_data[2]; + if (ret > s) + ret = s; + if (copy_to_user(u, resp->raw_data+3, ret)) + ret = -EFAULT; + else + *off += ret; + } /* anything else is some kind of IO error */ + + kfree(resp); + return ret; +} + +static ssize_t picolcd_debug_eeprom_write(struct file *f, const char __user *u, + size_t s, loff_t *off) +{ + struct picolcd_data *data = f->private_data; + struct picolcd_pending *resp; + ssize_t ret = -EIO; + u8 raw_data[23]; + + if (s == 0) + return -EINVAL; + if (*off > 0x0ff) + return -ENOSPC; + + memset(raw_data, 0, sizeof(raw_data)); + raw_data[0] = *off & 0xff; + raw_data[1] = (*off >> 8) & 0xff; + raw_data[2] = min_t(size_t, 20, s); + if (*off + raw_data[2] > 0xff) + raw_data[2] = 0x100 - *off; + + if (copy_from_user(raw_data+3, u, min((u8)20, raw_data[2]))) + return -EFAULT; + resp = picolcd_send_and_wait(data->hdev, REPORT_EE_WRITE, raw_data, + sizeof(raw_data)); + + if (!resp) + return -EIO; + + if (resp->in_report && resp->in_report->id == REPORT_EE_DATA) { + /* check if written data matches */ + if (memcmp(raw_data, resp->raw_data, 3+raw_data[2]) == 0) { + *off += raw_data[2]; + ret = raw_data[2]; + } + } + kfree(resp); + return ret; +} + +/* + * Notes: + * - read/write happens in chunks of at most 20 bytes, it's up to userspace + * to loop in order to get more data. + * - on write errors on otherwise correct write request the bytes + * that should have been written are in undefined state. + */ +static const struct file_operations picolcd_debug_eeprom_fops = { + .owner = THIS_MODULE, + .open = simple_open, + .read = picolcd_debug_eeprom_read, + .write = picolcd_debug_eeprom_write, + .llseek = generic_file_llseek, +}; + +/* + * The "flash" file + */ +/* record a flash address to buf (bounds check to be done by caller) */ +static int _picolcd_flash_setaddr(struct picolcd_data *data, u8 *buf, long off) +{ + buf[0] = off & 0xff; + buf[1] = (off >> 8) & 0xff; + if (data->addr_sz == 3) + buf[2] = (off >> 16) & 0xff; + return data->addr_sz == 2 ? 2 : 3; +} + +/* read a given size of data (bounds check to be done by caller) */ +static ssize_t _picolcd_flash_read(struct picolcd_data *data, int report_id, + char __user *u, size_t s, loff_t *off) +{ + struct picolcd_pending *resp; + u8 raw_data[4]; + ssize_t ret = 0; + int len_off, err = -EIO; + + while (s > 0) { + err = -EIO; + len_off = _picolcd_flash_setaddr(data, raw_data, *off); + raw_data[len_off] = s > 32 ? 32 : s; + resp = picolcd_send_and_wait(data->hdev, report_id, raw_data, len_off+1); + if (!resp || !resp->in_report) + goto skip; + if (resp->in_report->id == REPORT_MEMORY || + resp->in_report->id == REPORT_BL_READ_MEMORY) { + if (memcmp(raw_data, resp->raw_data, len_off+1) != 0) + goto skip; + if (copy_to_user(u+ret, resp->raw_data+len_off+1, raw_data[len_off])) { + err = -EFAULT; + goto skip; + } + *off += raw_data[len_off]; + s -= raw_data[len_off]; + ret += raw_data[len_off]; + err = 0; + } +skip: + kfree(resp); + if (err) + return ret > 0 ? ret : err; + } + return ret; +} + +static ssize_t picolcd_debug_flash_read(struct file *f, char __user *u, + size_t s, loff_t *off) +{ + struct picolcd_data *data = f->private_data; + + if (s == 0) + return -EINVAL; + if (*off > 0x05fff) + return 0; + if (*off + s > 0x05fff) + s = 0x06000 - *off; + + if (data->status & PICOLCD_BOOTLOADER) + return _picolcd_flash_read(data, REPORT_BL_READ_MEMORY, u, s, off); + else + return _picolcd_flash_read(data, REPORT_READ_MEMORY, u, s, off); +} + +/* erase block aligned to 64bytes boundary */ +static ssize_t _picolcd_flash_erase64(struct picolcd_data *data, int report_id, + loff_t *off) +{ + struct picolcd_pending *resp; + u8 raw_data[3]; + int len_off; + ssize_t ret = -EIO; + + if (*off & 0x3f) + return -EINVAL; + + len_off = _picolcd_flash_setaddr(data, raw_data, *off); + resp = picolcd_send_and_wait(data->hdev, report_id, raw_data, len_off); + if (!resp || !resp->in_report) + goto skip; + if (resp->in_report->id == REPORT_MEMORY || + resp->in_report->id == REPORT_BL_ERASE_MEMORY) { + if (memcmp(raw_data, resp->raw_data, len_off) != 0) + goto skip; + ret = 0; + } +skip: + kfree(resp); + return ret; +} + +/* write a given size of data (bounds check to be done by caller) */ +static ssize_t _picolcd_flash_write(struct picolcd_data *data, int report_id, + const char __user *u, size_t s, loff_t *off) +{ + struct picolcd_pending *resp; + u8 raw_data[36]; + ssize_t ret = 0; + int len_off, err = -EIO; + + while (s > 0) { + err = -EIO; + len_off = _picolcd_flash_setaddr(data, raw_data, *off); + raw_data[len_off] = s > 32 ? 32 : s; + if (copy_from_user(raw_data+len_off+1, u, raw_data[len_off])) { + err = -EFAULT; + break; + } + resp = picolcd_send_and_wait(data->hdev, report_id, raw_data, + len_off+1+raw_data[len_off]); + if (!resp || !resp->in_report) + goto skip; + if (resp->in_report->id == REPORT_MEMORY || + resp->in_report->id == REPORT_BL_WRITE_MEMORY) { + if (memcmp(raw_data, resp->raw_data, len_off+1+raw_data[len_off]) != 0) + goto skip; + *off += raw_data[len_off]; + s -= raw_data[len_off]; + ret += raw_data[len_off]; + err = 0; + } +skip: + kfree(resp); + if (err) + break; + } + return ret > 0 ? ret : err; +} + +static ssize_t picolcd_debug_flash_write(struct file *f, const char __user *u, + size_t s, loff_t *off) +{ + struct picolcd_data *data = f->private_data; + ssize_t err, ret = 0; + int report_erase, report_write; + + if (s == 0) + return -EINVAL; + if (*off > 0x5fff) + return -ENOSPC; + if (s & 0x3f) + return -EINVAL; + if (*off & 0x3f) + return -EINVAL; + + if (data->status & PICOLCD_BOOTLOADER) { + report_erase = REPORT_BL_ERASE_MEMORY; + report_write = REPORT_BL_WRITE_MEMORY; + } else { + report_erase = REPORT_ERASE_MEMORY; + report_write = REPORT_WRITE_MEMORY; + } + mutex_lock(&data->mutex_flash); + while (s > 0) { + err = _picolcd_flash_erase64(data, report_erase, off); + if (err) + break; + err = _picolcd_flash_write(data, report_write, u, 64, off); + if (err < 0) + break; + ret += err; + *off += err; + s -= err; + if (err != 64) + break; + } + mutex_unlock(&data->mutex_flash); + return ret > 0 ? ret : err; +} + +/* + * Notes: + * - concurrent writing is prevented by mutex and all writes must be + * n*64 bytes and 64-byte aligned, each write being preceded by an + * ERASE which erases a 64byte block. + * If less than requested was written or an error is returned for an + * otherwise correct write request the next 64-byte block which should + * have been written is in undefined state (mostly: original, erased, + * (half-)written with write error) + * - reading can happen without special restriction + */ +static const struct file_operations picolcd_debug_flash_fops = { + .owner = THIS_MODULE, + .open = simple_open, + .read = picolcd_debug_flash_read, + .write = picolcd_debug_flash_write, + .llseek = generic_file_llseek, +}; + + +/* + * Helper code for HID report level dumping/debugging + */ +static const char * const error_codes[] = { + "success", "parameter missing", "data_missing", "block readonly", + "block not erasable", "block too big", "section overflow", + "invalid command length", "invalid data length", +}; + +static void dump_buff_as_hex(char *dst, size_t dst_sz, const u8 *data, + const size_t data_len) +{ + int i, j; + for (i = j = 0; i < data_len && j + 4 < dst_sz; i++) { + dst[j++] = hex_asc[(data[i] >> 4) & 0x0f]; + dst[j++] = hex_asc[data[i] & 0x0f]; + dst[j++] = ' '; + } + dst[j] = '\0'; + if (j > 0) + dst[j-1] = '\n'; + if (i < data_len && j > 2) + dst[j-2] = dst[j-3] = '.'; +} + +void picolcd_debug_out_report(struct picolcd_data *data, + struct hid_device *hdev, struct hid_report *report) +{ + u8 raw_data[70]; + int raw_size = (report->size >> 3) + 1; + char *buff; +#define BUFF_SZ 256 + + /* Avoid unnecessary overhead if debugfs is disabled */ + if (list_empty(&hdev->debug_list)) + return; + + buff = kmalloc(BUFF_SZ, GFP_ATOMIC); + if (!buff) + return; + + snprintf(buff, BUFF_SZ, "\nout report %d (size %d) = ", + report->id, raw_size); + hid_debug_event(hdev, buff); + if (raw_size + 5 > sizeof(raw_data)) { + kfree(buff); + hid_debug_event(hdev, " TOO BIG\n"); + return; + } else { + raw_data[0] = report->id; + hid_output_report(report, raw_data); + dump_buff_as_hex(buff, BUFF_SZ, raw_data, raw_size); + hid_debug_event(hdev, buff); + } + + switch (report->id) { + case REPORT_LED_STATE: + /* 1 data byte with GPO state */ + snprintf(buff, BUFF_SZ, "out report %s (%d, size=%d)\n", + "REPORT_LED_STATE", report->id, raw_size-1); + hid_debug_event(hdev, buff); + snprintf(buff, BUFF_SZ, "\tGPO state: 0x%02x\n", raw_data[1]); + hid_debug_event(hdev, buff); + break; + case REPORT_BRIGHTNESS: + /* 1 data byte with brightness */ + snprintf(buff, BUFF_SZ, "out report %s (%d, size=%d)\n", + "REPORT_BRIGHTNESS", report->id, raw_size-1); + hid_debug_event(hdev, buff); + snprintf(buff, BUFF_SZ, "\tBrightness: 0x%02x\n", raw_data[1]); + hid_debug_event(hdev, buff); + break; + case REPORT_CONTRAST: + /* 1 data byte with contrast */ + snprintf(buff, BUFF_SZ, "out report %s (%d, size=%d)\n", + "REPORT_CONTRAST", report->id, raw_size-1); + hid_debug_event(hdev, buff); + snprintf(buff, BUFF_SZ, "\tContrast: 0x%02x\n", raw_data[1]); + hid_debug_event(hdev, buff); + break; + case REPORT_RESET: + /* 2 data bytes with reset duration in ms */ + snprintf(buff, BUFF_SZ, "out report %s (%d, size=%d)\n", + "REPORT_RESET", report->id, raw_size-1); + hid_debug_event(hdev, buff); + snprintf(buff, BUFF_SZ, "\tDuration: 0x%02x%02x (%dms)\n", + raw_data[2], raw_data[1], raw_data[2] << 8 | raw_data[1]); + hid_debug_event(hdev, buff); + break; + case REPORT_LCD_CMD: + /* 63 data bytes with LCD commands */ + snprintf(buff, BUFF_SZ, "out report %s (%d, size=%d)\n", + "REPORT_LCD_CMD", report->id, raw_size-1); + hid_debug_event(hdev, buff); + /* TODO: format decoding */ + break; + case REPORT_LCD_DATA: + /* 63 data bytes with LCD data */ + snprintf(buff, BUFF_SZ, "out report %s (%d, size=%d)\n", + "REPORT_LCD_CMD", report->id, raw_size-1); + /* TODO: format decoding */ + hid_debug_event(hdev, buff); + break; + case REPORT_LCD_CMD_DATA: + /* 63 data bytes with LCD commands and data */ + snprintf(buff, BUFF_SZ, "out report %s (%d, size=%d)\n", + "REPORT_LCD_CMD", report->id, raw_size-1); + /* TODO: format decoding */ + hid_debug_event(hdev, buff); + break; + case REPORT_EE_READ: + /* 3 data bytes with read area description */ + snprintf(buff, BUFF_SZ, "out report %s (%d, size=%d)\n", + "REPORT_EE_READ", report->id, raw_size-1); + hid_debug_event(hdev, buff); + snprintf(buff, BUFF_SZ, "\tData address: 0x%02x%02x\n", + raw_data[2], raw_data[1]); + hid_debug_event(hdev, buff); + snprintf(buff, BUFF_SZ, "\tData length: %d\n", raw_data[3]); + hid_debug_event(hdev, buff); + break; + case REPORT_EE_WRITE: + /* 3+1..20 data bytes with write area description */ + snprintf(buff, BUFF_SZ, "out report %s (%d, size=%d)\n", + "REPORT_EE_WRITE", report->id, raw_size-1); + hid_debug_event(hdev, buff); + snprintf(buff, BUFF_SZ, "\tData address: 0x%02x%02x\n", + raw_data[2], raw_data[1]); + hid_debug_event(hdev, buff); + snprintf(buff, BUFF_SZ, "\tData length: %d\n", raw_data[3]); + hid_debug_event(hdev, buff); + if (raw_data[3] == 0) { + snprintf(buff, BUFF_SZ, "\tNo data\n"); + } else if (raw_data[3] + 4 <= raw_size) { + snprintf(buff, BUFF_SZ, "\tData: "); + hid_debug_event(hdev, buff); + dump_buff_as_hex(buff, BUFF_SZ, raw_data+4, raw_data[3]); + } else { + snprintf(buff, BUFF_SZ, "\tData overflowed\n"); + } + hid_debug_event(hdev, buff); + break; + case REPORT_ERASE_MEMORY: + case REPORT_BL_ERASE_MEMORY: + /* 3 data bytes with pointer inside erase block */ + snprintf(buff, BUFF_SZ, "out report %s (%d, size=%d)\n", + "REPORT_ERASE_MEMORY", report->id, raw_size-1); + hid_debug_event(hdev, buff); + switch (data->addr_sz) { + case 2: + snprintf(buff, BUFF_SZ, "\tAddress inside 64 byte block: 0x%02x%02x\n", + raw_data[2], raw_data[1]); + break; + case 3: + snprintf(buff, BUFF_SZ, "\tAddress inside 64 byte block: 0x%02x%02x%02x\n", + raw_data[3], raw_data[2], raw_data[1]); + break; + default: + snprintf(buff, BUFF_SZ, "\tNot supported\n"); + } + hid_debug_event(hdev, buff); + break; + case REPORT_READ_MEMORY: + case REPORT_BL_READ_MEMORY: + /* 4 data bytes with read area description */ + snprintf(buff, BUFF_SZ, "out report %s (%d, size=%d)\n", + "REPORT_READ_MEMORY", report->id, raw_size-1); + hid_debug_event(hdev, buff); + switch (data->addr_sz) { + case 2: + snprintf(buff, BUFF_SZ, "\tData address: 0x%02x%02x\n", + raw_data[2], raw_data[1]); + hid_debug_event(hdev, buff); + snprintf(buff, BUFF_SZ, "\tData length: %d\n", raw_data[3]); + break; + case 3: + snprintf(buff, BUFF_SZ, "\tData address: 0x%02x%02x%02x\n", + raw_data[3], raw_data[2], raw_data[1]); + hid_debug_event(hdev, buff); + snprintf(buff, BUFF_SZ, "\tData length: %d\n", raw_data[4]); + break; + default: + snprintf(buff, BUFF_SZ, "\tNot supported\n"); + } + hid_debug_event(hdev, buff); + break; + case REPORT_WRITE_MEMORY: + case REPORT_BL_WRITE_MEMORY: + /* 4+1..32 data bytes with write adrea description */ + snprintf(buff, BUFF_SZ, "out report %s (%d, size=%d)\n", + "REPORT_WRITE_MEMORY", report->id, raw_size-1); + hid_debug_event(hdev, buff); + switch (data->addr_sz) { + case 2: + snprintf(buff, BUFF_SZ, "\tData address: 0x%02x%02x\n", + raw_data[2], raw_data[1]); + hid_debug_event(hdev, buff); + snprintf(buff, BUFF_SZ, "\tData length: %d\n", raw_data[3]); + hid_debug_event(hdev, buff); + if (raw_data[3] == 0) { + snprintf(buff, BUFF_SZ, "\tNo data\n"); + } else if (raw_data[3] + 4 <= raw_size) { + snprintf(buff, BUFF_SZ, "\tData: "); + hid_debug_event(hdev, buff); + dump_buff_as_hex(buff, BUFF_SZ, raw_data+4, raw_data[3]); + } else { + snprintf(buff, BUFF_SZ, "\tData overflowed\n"); + } + break; + case 3: + snprintf(buff, BUFF_SZ, "\tData address: 0x%02x%02x%02x\n", + raw_data[3], raw_data[2], raw_data[1]); + hid_debug_event(hdev, buff); + snprintf(buff, BUFF_SZ, "\tData length: %d\n", raw_data[4]); + hid_debug_event(hdev, buff); + if (raw_data[4] == 0) { + snprintf(buff, BUFF_SZ, "\tNo data\n"); + } else if (raw_data[4] + 5 <= raw_size) { + snprintf(buff, BUFF_SZ, "\tData: "); + hid_debug_event(hdev, buff); + dump_buff_as_hex(buff, BUFF_SZ, raw_data+5, raw_data[4]); + } else { + snprintf(buff, BUFF_SZ, "\tData overflowed\n"); + } + break; + default: + snprintf(buff, BUFF_SZ, "\tNot supported\n"); + } + hid_debug_event(hdev, buff); + break; + case REPORT_SPLASH_RESTART: + /* TODO */ + break; + case REPORT_EXIT_KEYBOARD: + snprintf(buff, BUFF_SZ, "out report %s (%d, size=%d)\n", + "REPORT_EXIT_KEYBOARD", report->id, raw_size-1); + hid_debug_event(hdev, buff); + snprintf(buff, BUFF_SZ, "\tRestart delay: %dms (0x%02x%02x)\n", + raw_data[1] | (raw_data[2] << 8), + raw_data[2], raw_data[1]); + hid_debug_event(hdev, buff); + break; + case REPORT_VERSION: + snprintf(buff, BUFF_SZ, "out report %s (%d, size=%d)\n", + "REPORT_VERSION", report->id, raw_size-1); + hid_debug_event(hdev, buff); + break; + case REPORT_DEVID: + snprintf(buff, BUFF_SZ, "out report %s (%d, size=%d)\n", + "REPORT_DEVID", report->id, raw_size-1); + hid_debug_event(hdev, buff); + break; + case REPORT_SPLASH_SIZE: + snprintf(buff, BUFF_SZ, "out report %s (%d, size=%d)\n", + "REPORT_SPLASH_SIZE", report->id, raw_size-1); + hid_debug_event(hdev, buff); + break; + case REPORT_HOOK_VERSION: + snprintf(buff, BUFF_SZ, "out report %s (%d, size=%d)\n", + "REPORT_HOOK_VERSION", report->id, raw_size-1); + hid_debug_event(hdev, buff); + break; + case REPORT_EXIT_FLASHER: + snprintf(buff, BUFF_SZ, "out report %s (%d, size=%d)\n", + "REPORT_VERSION", report->id, raw_size-1); + hid_debug_event(hdev, buff); + snprintf(buff, BUFF_SZ, "\tRestart delay: %dms (0x%02x%02x)\n", + raw_data[1] | (raw_data[2] << 8), + raw_data[2], raw_data[1]); + hid_debug_event(hdev, buff); + break; + default: + snprintf(buff, BUFF_SZ, "out report %s (%d, size=%d)\n", + "", report->id, raw_size-1); + hid_debug_event(hdev, buff); + break; + } + wake_up_interruptible(&hdev->debug_wait); + kfree(buff); +} + +void picolcd_debug_raw_event(struct picolcd_data *data, + struct hid_device *hdev, struct hid_report *report, + u8 *raw_data, int size) +{ + char *buff; + +#define BUFF_SZ 256 + /* Avoid unnecessary overhead if debugfs is disabled */ + if (list_empty(&hdev->debug_list)) + return; + + buff = kmalloc(BUFF_SZ, GFP_ATOMIC); + if (!buff) + return; + + switch (report->id) { + case REPORT_ERROR_CODE: + /* 2 data bytes with affected report and error code */ + snprintf(buff, BUFF_SZ, "report %s (%d, size=%d)\n", + "REPORT_ERROR_CODE", report->id, size-1); + hid_debug_event(hdev, buff); + if (raw_data[2] < ARRAY_SIZE(error_codes)) + snprintf(buff, BUFF_SZ, "\tError code 0x%02x (%s) in reply to report 0x%02x\n", + raw_data[2], error_codes[raw_data[2]], raw_data[1]); + else + snprintf(buff, BUFF_SZ, "\tError code 0x%02x in reply to report 0x%02x\n", + raw_data[2], raw_data[1]); + hid_debug_event(hdev, buff); + break; + case REPORT_KEY_STATE: + /* 2 data bytes with key state */ + snprintf(buff, BUFF_SZ, "report %s (%d, size=%d)\n", + "REPORT_KEY_STATE", report->id, size-1); + hid_debug_event(hdev, buff); + if (raw_data[1] == 0) + snprintf(buff, BUFF_SZ, "\tNo key pressed\n"); + else if (raw_data[2] == 0) + snprintf(buff, BUFF_SZ, "\tOne key pressed: 0x%02x (%d)\n", + raw_data[1], raw_data[1]); + else + snprintf(buff, BUFF_SZ, "\tTwo keys pressed: 0x%02x (%d), 0x%02x (%d)\n", + raw_data[1], raw_data[1], raw_data[2], raw_data[2]); + hid_debug_event(hdev, buff); + break; + case REPORT_IR_DATA: + /* Up to 20 byes of IR scancode data */ + snprintf(buff, BUFF_SZ, "report %s (%d, size=%d)\n", + "REPORT_IR_DATA", report->id, size-1); + hid_debug_event(hdev, buff); + if (raw_data[1] == 0) { + snprintf(buff, BUFF_SZ, "\tUnexpectedly 0 data length\n"); + hid_debug_event(hdev, buff); + } else if (raw_data[1] + 1 <= size) { + snprintf(buff, BUFF_SZ, "\tData length: %d\n\tIR Data: ", + raw_data[1]); + hid_debug_event(hdev, buff); + dump_buff_as_hex(buff, BUFF_SZ, raw_data+2, raw_data[1]); + hid_debug_event(hdev, buff); + } else { + snprintf(buff, BUFF_SZ, "\tOverflowing data length: %d\n", + raw_data[1]-1); + hid_debug_event(hdev, buff); + } + break; + case REPORT_EE_DATA: + /* Data buffer in response to REPORT_EE_READ or REPORT_EE_WRITE */ + snprintf(buff, BUFF_SZ, "report %s (%d, size=%d)\n", + "REPORT_EE_DATA", report->id, size-1); + hid_debug_event(hdev, buff); + snprintf(buff, BUFF_SZ, "\tData address: 0x%02x%02x\n", + raw_data[2], raw_data[1]); + hid_debug_event(hdev, buff); + snprintf(buff, BUFF_SZ, "\tData length: %d\n", raw_data[3]); + hid_debug_event(hdev, buff); + if (raw_data[3] == 0) { + snprintf(buff, BUFF_SZ, "\tNo data\n"); + hid_debug_event(hdev, buff); + } else if (raw_data[3] + 4 <= size) { + snprintf(buff, BUFF_SZ, "\tData: "); + hid_debug_event(hdev, buff); + dump_buff_as_hex(buff, BUFF_SZ, raw_data+4, raw_data[3]); + hid_debug_event(hdev, buff); + } else { + snprintf(buff, BUFF_SZ, "\tData overflowed\n"); + hid_debug_event(hdev, buff); + } + break; + case REPORT_MEMORY: + /* Data buffer in response to REPORT_READ_MEMORY or REPORT_WRTIE_MEMORY */ + snprintf(buff, BUFF_SZ, "report %s (%d, size=%d)\n", + "REPORT_MEMORY", report->id, size-1); + hid_debug_event(hdev, buff); + switch (data->addr_sz) { + case 2: + snprintf(buff, BUFF_SZ, "\tData address: 0x%02x%02x\n", + raw_data[2], raw_data[1]); + hid_debug_event(hdev, buff); + snprintf(buff, BUFF_SZ, "\tData length: %d\n", raw_data[3]); + hid_debug_event(hdev, buff); + if (raw_data[3] == 0) { + snprintf(buff, BUFF_SZ, "\tNo data\n"); + } else if (raw_data[3] + 4 <= size) { + snprintf(buff, BUFF_SZ, "\tData: "); + hid_debug_event(hdev, buff); + dump_buff_as_hex(buff, BUFF_SZ, raw_data+4, raw_data[3]); + } else { + snprintf(buff, BUFF_SZ, "\tData overflowed\n"); + } + break; + case 3: + snprintf(buff, BUFF_SZ, "\tData address: 0x%02x%02x%02x\n", + raw_data[3], raw_data[2], raw_data[1]); + hid_debug_event(hdev, buff); + snprintf(buff, BUFF_SZ, "\tData length: %d\n", raw_data[4]); + hid_debug_event(hdev, buff); + if (raw_data[4] == 0) { + snprintf(buff, BUFF_SZ, "\tNo data\n"); + } else if (raw_data[4] + 5 <= size) { + snprintf(buff, BUFF_SZ, "\tData: "); + hid_debug_event(hdev, buff); + dump_buff_as_hex(buff, BUFF_SZ, raw_data+5, raw_data[4]); + } else { + snprintf(buff, BUFF_SZ, "\tData overflowed\n"); + } + break; + default: + snprintf(buff, BUFF_SZ, "\tNot supported\n"); + } + hid_debug_event(hdev, buff); + break; + case REPORT_VERSION: + snprintf(buff, BUFF_SZ, "report %s (%d, size=%d)\n", + "REPORT_VERSION", report->id, size-1); + hid_debug_event(hdev, buff); + snprintf(buff, BUFF_SZ, "\tFirmware version: %d.%d\n", + raw_data[2], raw_data[1]); + hid_debug_event(hdev, buff); + break; + case REPORT_BL_ERASE_MEMORY: + snprintf(buff, BUFF_SZ, "report %s (%d, size=%d)\n", + "REPORT_BL_ERASE_MEMORY", report->id, size-1); + hid_debug_event(hdev, buff); + /* TODO */ + break; + case REPORT_BL_READ_MEMORY: + snprintf(buff, BUFF_SZ, "report %s (%d, size=%d)\n", + "REPORT_BL_READ_MEMORY", report->id, size-1); + hid_debug_event(hdev, buff); + /* TODO */ + break; + case REPORT_BL_WRITE_MEMORY: + snprintf(buff, BUFF_SZ, "report %s (%d, size=%d)\n", + "REPORT_BL_WRITE_MEMORY", report->id, size-1); + hid_debug_event(hdev, buff); + /* TODO */ + break; + case REPORT_DEVID: + snprintf(buff, BUFF_SZ, "report %s (%d, size=%d)\n", + "REPORT_DEVID", report->id, size-1); + hid_debug_event(hdev, buff); + snprintf(buff, BUFF_SZ, "\tSerial: 0x%02x%02x%02x%02x\n", + raw_data[1], raw_data[2], raw_data[3], raw_data[4]); + hid_debug_event(hdev, buff); + snprintf(buff, BUFF_SZ, "\tType: 0x%02x\n", + raw_data[5]); + hid_debug_event(hdev, buff); + break; + case REPORT_SPLASH_SIZE: + snprintf(buff, BUFF_SZ, "report %s (%d, size=%d)\n", + "REPORT_SPLASH_SIZE", report->id, size-1); + hid_debug_event(hdev, buff); + snprintf(buff, BUFF_SZ, "\tTotal splash space: %d\n", + (raw_data[2] << 8) | raw_data[1]); + hid_debug_event(hdev, buff); + snprintf(buff, BUFF_SZ, "\tUsed splash space: %d\n", + (raw_data[4] << 8) | raw_data[3]); + hid_debug_event(hdev, buff); + break; + case REPORT_HOOK_VERSION: + snprintf(buff, BUFF_SZ, "report %s (%d, size=%d)\n", + "REPORT_HOOK_VERSION", report->id, size-1); + hid_debug_event(hdev, buff); + snprintf(buff, BUFF_SZ, "\tFirmware version: %d.%d\n", + raw_data[1], raw_data[2]); + hid_debug_event(hdev, buff); + break; + default: + snprintf(buff, BUFF_SZ, "report %s (%d, size=%d)\n", + "", report->id, size-1); + hid_debug_event(hdev, buff); + break; + } + wake_up_interruptible(&hdev->debug_wait); + kfree(buff); +} + +void picolcd_init_devfs(struct picolcd_data *data, + struct hid_report *eeprom_r, struct hid_report *eeprom_w, + struct hid_report *flash_r, struct hid_report *flash_w, + struct hid_report *reset) +{ + struct hid_device *hdev = data->hdev; + + mutex_init(&data->mutex_flash); + + /* reset */ + if (reset) + data->debug_reset = debugfs_create_file("reset", 0600, + hdev->debug_dir, data, &picolcd_debug_reset_fops); + + /* eeprom */ + if (eeprom_r || eeprom_w) + data->debug_eeprom = debugfs_create_file("eeprom", + (eeprom_w ? S_IWUSR : 0) | (eeprom_r ? S_IRUSR : 0), + hdev->debug_dir, data, &picolcd_debug_eeprom_fops); + + /* flash */ + if (flash_r && flash_r->maxfield == 1 && flash_r->field[0]->report_size == 8) + data->addr_sz = flash_r->field[0]->report_count - 1; + else + data->addr_sz = -1; + if (data->addr_sz == 2 || data->addr_sz == 3) { + data->debug_flash = debugfs_create_file("flash", + (flash_w ? S_IWUSR : 0) | (flash_r ? S_IRUSR : 0), + hdev->debug_dir, data, &picolcd_debug_flash_fops); + } else if (flash_r || flash_w) + hid_warn(hdev, "Unexpected FLASH access reports, please submit rdesc for review\n"); +} + +void picolcd_exit_devfs(struct picolcd_data *data) +{ + struct dentry *dent; + + dent = data->debug_reset; + data->debug_reset = NULL; + if (dent) + debugfs_remove(dent); + dent = data->debug_eeprom; + data->debug_eeprom = NULL; + if (dent) + debugfs_remove(dent); + dent = data->debug_flash; + data->debug_flash = NULL; + if (dent) + debugfs_remove(dent); + mutex_destroy(&data->mutex_flash); +} + diff --git a/trunk/drivers/hid/hid-picolcd_fb.c b/trunk/drivers/hid/hid-picolcd_fb.c new file mode 100644 index 000000000000..0008a512211d --- /dev/null +++ b/trunk/drivers/hid/hid-picolcd_fb.c @@ -0,0 +1,615 @@ +/*************************************************************************** + * Copyright (C) 2010-2012 by Bruno Prémont * + * * + * Based on Logitech G13 driver (v0.4) * + * Copyright (C) 2009 by Rick L. Vinyard, Jr. * + * * + * This program is free software: you can redistribute it and/or modify * + * it under the terms of the GNU General Public License as published by * + * the Free Software Foundation, version 2 of the License. * + * * + * This driver is distributed in the hope that it will be useful, but * + * WITHOUT ANY WARRANTY; without even the implied warranty of * + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * + * General Public License for more details. * + * * + * You should have received a copy of the GNU General Public License * + * along with this software. If not see . * + ***************************************************************************/ + +#include +#include +#include "usbhid/usbhid.h" +#include + +#include +#include + +#include "hid-picolcd.h" + +/* Framebuffer + * + * The PicoLCD use a Topway LCD module of 256x64 pixel + * This display area is tiled over 4 controllers with 8 tiles + * each. Each tile has 8x64 pixel, each data byte representing + * a 1-bit wide vertical line of the tile. + * + * The display can be updated at a tile granularity. + * + * Chip 1 Chip 2 Chip 3 Chip 4 + * +----------------+----------------+----------------+----------------+ + * | Tile 1 | Tile 1 | Tile 1 | Tile 1 | + * +----------------+----------------+----------------+----------------+ + * | Tile 2 | Tile 2 | Tile 2 | Tile 2 | + * +----------------+----------------+----------------+----------------+ + * ... + * +----------------+----------------+----------------+----------------+ + * | Tile 8 | Tile 8 | Tile 8 | Tile 8 | + * +----------------+----------------+----------------+----------------+ + */ +#define PICOLCDFB_NAME "picolcdfb" +#define PICOLCDFB_WIDTH (256) +#define PICOLCDFB_HEIGHT (64) +#define PICOLCDFB_SIZE (PICOLCDFB_WIDTH * PICOLCDFB_HEIGHT / 8) + +#define PICOLCDFB_UPDATE_RATE_LIMIT 10 +#define PICOLCDFB_UPDATE_RATE_DEFAULT 2 + +/* Framebuffer visual structures */ +static const struct fb_fix_screeninfo picolcdfb_fix = { + .id = PICOLCDFB_NAME, + .type = FB_TYPE_PACKED_PIXELS, + .visual = FB_VISUAL_MONO01, + .xpanstep = 0, + .ypanstep = 0, + .ywrapstep = 0, + .line_length = PICOLCDFB_WIDTH / 8, + .accel = FB_ACCEL_NONE, +}; + +static const struct fb_var_screeninfo picolcdfb_var = { + .xres = PICOLCDFB_WIDTH, + .yres = PICOLCDFB_HEIGHT, + .xres_virtual = PICOLCDFB_WIDTH, + .yres_virtual = PICOLCDFB_HEIGHT, + .width = 103, + .height = 26, + .bits_per_pixel = 1, + .grayscale = 1, + .red = { + .offset = 0, + .length = 1, + .msb_right = 0, + }, + .green = { + .offset = 0, + .length = 1, + .msb_right = 0, + }, + .blue = { + .offset = 0, + .length = 1, + .msb_right = 0, + }, + .transp = { + .offset = 0, + .length = 0, + .msb_right = 0, + }, +}; + +/* Send a given tile to PicoLCD */ +static int picolcd_fb_send_tile(struct picolcd_data *data, u8 *vbitmap, + int chip, int tile) +{ + struct hid_report *report1, *report2; + unsigned long flags; + u8 *tdata; + int i; + + report1 = picolcd_out_report(REPORT_LCD_CMD_DATA, data->hdev); + if (!report1 || report1->maxfield != 1) + return -ENODEV; + report2 = picolcd_out_report(REPORT_LCD_DATA, data->hdev); + if (!report2 || report2->maxfield != 1) + return -ENODEV; + + spin_lock_irqsave(&data->lock, flags); + if ((data->status & PICOLCD_FAILED)) { + spin_unlock_irqrestore(&data->lock, flags); + return -ENODEV; + } + hid_set_field(report1->field[0], 0, chip << 2); + hid_set_field(report1->field[0], 1, 0x02); + hid_set_field(report1->field[0], 2, 0x00); + hid_set_field(report1->field[0], 3, 0x00); + hid_set_field(report1->field[0], 4, 0xb8 | tile); + hid_set_field(report1->field[0], 5, 0x00); + hid_set_field(report1->field[0], 6, 0x00); + hid_set_field(report1->field[0], 7, 0x40); + hid_set_field(report1->field[0], 8, 0x00); + hid_set_field(report1->field[0], 9, 0x00); + hid_set_field(report1->field[0], 10, 32); + + hid_set_field(report2->field[0], 0, (chip << 2) | 0x01); + hid_set_field(report2->field[0], 1, 0x00); + hid_set_field(report2->field[0], 2, 0x00); + hid_set_field(report2->field[0], 3, 32); + + tdata = vbitmap + (tile * 4 + chip) * 64; + for (i = 0; i < 64; i++) + if (i < 32) + hid_set_field(report1->field[0], 11 + i, tdata[i]); + else + hid_set_field(report2->field[0], 4 + i - 32, tdata[i]); + + usbhid_submit_report(data->hdev, report1, USB_DIR_OUT); + usbhid_submit_report(data->hdev, report2, USB_DIR_OUT); + spin_unlock_irqrestore(&data->lock, flags); + return 0; +} + +/* Translate a single tile*/ +static int picolcd_fb_update_tile(u8 *vbitmap, const u8 *bitmap, int bpp, + int chip, int tile) +{ + int i, b, changed = 0; + u8 tdata[64]; + u8 *vdata = vbitmap + (tile * 4 + chip) * 64; + + if (bpp == 1) { + for (b = 7; b >= 0; b--) { + const u8 *bdata = bitmap + tile * 256 + chip * 8 + b * 32; + for (i = 0; i < 64; i++) { + tdata[i] <<= 1; + tdata[i] |= (bdata[i/8] >> (i % 8)) & 0x01; + } + } + } else if (bpp == 8) { + for (b = 7; b >= 0; b--) { + const u8 *bdata = bitmap + (tile * 256 + chip * 8 + b * 32) * 8; + for (i = 0; i < 64; i++) { + tdata[i] <<= 1; + tdata[i] |= (bdata[i] & 0x80) ? 0x01 : 0x00; + } + } + } else { + /* Oops, we should never get here! */ + WARN_ON(1); + return 0; + } + + for (i = 0; i < 64; i++) + if (tdata[i] != vdata[i]) { + changed = 1; + vdata[i] = tdata[i]; + } + return changed; +} + +void picolcd_fb_refresh(struct picolcd_data *data) +{ + if (data->fb_info) + schedule_delayed_work(&data->fb_info->deferred_work, 0); +} + +/* Reconfigure LCD display */ +int picolcd_fb_reset(struct picolcd_data *data, int clear) +{ + struct hid_report *report = picolcd_out_report(REPORT_LCD_CMD, data->hdev); + struct picolcd_fb_data *fbdata = data->fb_info->par; + int i, j; + unsigned long flags; + static const u8 mapcmd[8] = { 0x00, 0x02, 0x00, 0x64, 0x3f, 0x00, 0x64, 0xc0 }; + + if (!report || report->maxfield != 1) + return -ENODEV; + + spin_lock_irqsave(&data->lock, flags); + for (i = 0; i < 4; i++) { + for (j = 0; j < report->field[0]->maxusage; j++) + if (j == 0) + hid_set_field(report->field[0], j, i << 2); + else if (j < sizeof(mapcmd)) + hid_set_field(report->field[0], j, mapcmd[j]); + else + hid_set_field(report->field[0], j, 0); + usbhid_submit_report(data->hdev, report, USB_DIR_OUT); + } + spin_unlock_irqrestore(&data->lock, flags); + + if (clear) { + memset(fbdata->vbitmap, 0, PICOLCDFB_SIZE); + memset(fbdata->bitmap, 0, PICOLCDFB_SIZE*fbdata->bpp); + } + fbdata->force = 1; + + /* schedule first output of framebuffer */ + if (fbdata->ready) + schedule_delayed_work(&data->fb_info->deferred_work, 0); + else + fbdata->ready = 1; + + return 0; +} + +/* Update fb_vbitmap from the screen_base and send changed tiles to device */ +static void picolcd_fb_update(struct fb_info *info) +{ + int chip, tile, n; + unsigned long flags; + struct picolcd_fb_data *fbdata = info->par; + struct picolcd_data *data; + + mutex_lock(&info->lock); + + spin_lock_irqsave(&fbdata->lock, flags); + if (!fbdata->ready && fbdata->picolcd) + picolcd_fb_reset(fbdata->picolcd, 0); + spin_unlock_irqrestore(&fbdata->lock, flags); + + /* + * Translate the framebuffer into the format needed by the PicoLCD. + * See display layout above. + * Do this one tile after the other and push those tiles that changed. + * + * Wait for our IO to complete as otherwise we might flood the queue! + */ + n = 0; + for (chip = 0; chip < 4; chip++) + for (tile = 0; tile < 8; tile++) { + if (!fbdata->force && !picolcd_fb_update_tile( + fbdata->vbitmap, fbdata->bitmap, + fbdata->bpp, chip, tile)) + continue; + n += 2; + if (n >= HID_OUTPUT_FIFO_SIZE / 2) { + spin_lock_irqsave(&fbdata->lock, flags); + data = fbdata->picolcd; + spin_unlock_irqrestore(&fbdata->lock, flags); + mutex_unlock(&info->lock); + if (!data) + return; + usbhid_wait_io(data->hdev); + mutex_lock(&info->lock); + n = 0; + } + spin_lock_irqsave(&fbdata->lock, flags); + data = fbdata->picolcd; + spin_unlock_irqrestore(&fbdata->lock, flags); + if (!data || picolcd_fb_send_tile(data, + fbdata->vbitmap, chip, tile)) + goto out; + } + fbdata->force = false; + if (n) { + spin_lock_irqsave(&fbdata->lock, flags); + data = fbdata->picolcd; + spin_unlock_irqrestore(&fbdata->lock, flags); + mutex_unlock(&info->lock); + if (data) + usbhid_wait_io(data->hdev); + return; + } +out: + mutex_unlock(&info->lock); +} + +/* Stub to call the system default and update the image on the picoLCD */ +static void picolcd_fb_fillrect(struct fb_info *info, + const struct fb_fillrect *rect) +{ + if (!info->par) + return; + sys_fillrect(info, rect); + + schedule_delayed_work(&info->deferred_work, 0); +} + +/* Stub to call the system default and update the image on the picoLCD */ +static void picolcd_fb_copyarea(struct fb_info *info, + const struct fb_copyarea *area) +{ + if (!info->par) + return; + sys_copyarea(info, area); + + schedule_delayed_work(&info->deferred_work, 0); +} + +/* Stub to call the system default and update the image on the picoLCD */ +static void picolcd_fb_imageblit(struct fb_info *info, const struct fb_image *image) +{ + if (!info->par) + return; + sys_imageblit(info, image); + + schedule_delayed_work(&info->deferred_work, 0); +} + +/* + * this is the slow path from userspace. they can seek and write to + * the fb. it's inefficient to do anything less than a full screen draw + */ +static ssize_t picolcd_fb_write(struct fb_info *info, const char __user *buf, + size_t count, loff_t *ppos) +{ + ssize_t ret; + if (!info->par) + return -ENODEV; + ret = fb_sys_write(info, buf, count, ppos); + if (ret >= 0) + schedule_delayed_work(&info->deferred_work, 0); + return ret; +} + +static int picolcd_fb_blank(int blank, struct fb_info *info) +{ + /* We let fb notification do this for us via lcd/backlight device */ + return 0; +} + +static void picolcd_fb_destroy(struct fb_info *info) +{ + struct picolcd_fb_data *fbdata = info->par; + + /* make sure no work is deferred */ + fb_deferred_io_cleanup(info); + + /* No thridparty should ever unregister our framebuffer! */ + WARN_ON(fbdata->picolcd != NULL); + + vfree((u8 *)info->fix.smem_start); + framebuffer_release(info); +} + +static int picolcd_fb_check_var(struct fb_var_screeninfo *var, struct fb_info *info) +{ + __u32 bpp = var->bits_per_pixel; + __u32 activate = var->activate; + + /* only allow 1/8 bit depth (8-bit is grayscale) */ + *var = picolcdfb_var; + var->activate = activate; + if (bpp >= 8) { + var->bits_per_pixel = 8; + var->red.length = 8; + var->green.length = 8; + var->blue.length = 8; + } else { + var->bits_per_pixel = 1; + var->red.length = 1; + var->green.length = 1; + var->blue.length = 1; + } + return 0; +} + +static int picolcd_set_par(struct fb_info *info) +{ + struct picolcd_fb_data *fbdata = info->par; + u8 *tmp_fb, *o_fb; + if (info->var.bits_per_pixel == fbdata->bpp) + return 0; + /* switch between 1/8 bit depths */ + if (info->var.bits_per_pixel != 1 && info->var.bits_per_pixel != 8) + return -EINVAL; + + o_fb = fbdata->bitmap; + tmp_fb = kmalloc(PICOLCDFB_SIZE*info->var.bits_per_pixel, GFP_KERNEL); + if (!tmp_fb) + return -ENOMEM; + + /* translate FB content to new bits-per-pixel */ + if (info->var.bits_per_pixel == 1) { + int i, b; + for (i = 0; i < PICOLCDFB_SIZE; i++) { + u8 p = 0; + for (b = 0; b < 8; b++) { + p <<= 1; + p |= o_fb[i*8+b] ? 0x01 : 0x00; + } + tmp_fb[i] = p; + } + memcpy(o_fb, tmp_fb, PICOLCDFB_SIZE); + info->fix.visual = FB_VISUAL_MONO01; + info->fix.line_length = PICOLCDFB_WIDTH / 8; + } else { + int i; + memcpy(tmp_fb, o_fb, PICOLCDFB_SIZE); + for (i = 0; i < PICOLCDFB_SIZE * 8; i++) + o_fb[i] = tmp_fb[i/8] & (0x01 << (7 - i % 8)) ? 0xff : 0x00; + info->fix.visual = FB_VISUAL_DIRECTCOLOR; + info->fix.line_length = PICOLCDFB_WIDTH; + } + + kfree(tmp_fb); + fbdata->bpp = info->var.bits_per_pixel; + return 0; +} + +/* Note this can't be const because of struct fb_info definition */ +static struct fb_ops picolcdfb_ops = { + .owner = THIS_MODULE, + .fb_destroy = picolcd_fb_destroy, + .fb_read = fb_sys_read, + .fb_write = picolcd_fb_write, + .fb_blank = picolcd_fb_blank, + .fb_fillrect = picolcd_fb_fillrect, + .fb_copyarea = picolcd_fb_copyarea, + .fb_imageblit = picolcd_fb_imageblit, + .fb_check_var = picolcd_fb_check_var, + .fb_set_par = picolcd_set_par, +}; + + +/* Callback from deferred IO workqueue */ +static void picolcd_fb_deferred_io(struct fb_info *info, struct list_head *pagelist) +{ + picolcd_fb_update(info); +} + +static const struct fb_deferred_io picolcd_fb_defio = { + .delay = HZ / PICOLCDFB_UPDATE_RATE_DEFAULT, + .deferred_io = picolcd_fb_deferred_io, +}; + + +/* + * The "fb_update_rate" sysfs attribute + */ +static ssize_t picolcd_fb_update_rate_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct picolcd_data *data = dev_get_drvdata(dev); + struct picolcd_fb_data *fbdata = data->fb_info->par; + unsigned i, fb_update_rate = fbdata->update_rate; + size_t ret = 0; + + for (i = 1; i <= PICOLCDFB_UPDATE_RATE_LIMIT; i++) + if (ret >= PAGE_SIZE) + break; + else if (i == fb_update_rate) + ret += snprintf(buf+ret, PAGE_SIZE-ret, "[%u] ", i); + else + ret += snprintf(buf+ret, PAGE_SIZE-ret, "%u ", i); + if (ret > 0) + buf[min(ret, (size_t)PAGE_SIZE)-1] = '\n'; + return ret; +} + +static ssize_t picolcd_fb_update_rate_store(struct device *dev, + struct device_attribute *attr, const char *buf, size_t count) +{ + struct picolcd_data *data = dev_get_drvdata(dev); + struct picolcd_fb_data *fbdata = data->fb_info->par; + int i; + unsigned u; + + if (count < 1 || count > 10) + return -EINVAL; + + i = sscanf(buf, "%u", &u); + if (i != 1) + return -EINVAL; + + if (u > PICOLCDFB_UPDATE_RATE_LIMIT) + return -ERANGE; + else if (u == 0) + u = PICOLCDFB_UPDATE_RATE_DEFAULT; + + fbdata->update_rate = u; + data->fb_info->fbdefio->delay = HZ / fbdata->update_rate; + return count; +} + +static DEVICE_ATTR(fb_update_rate, 0666, picolcd_fb_update_rate_show, + picolcd_fb_update_rate_store); + +/* initialize Framebuffer device */ +int picolcd_init_framebuffer(struct picolcd_data *data) +{ + struct device *dev = &data->hdev->dev; + struct fb_info *info = NULL; + struct picolcd_fb_data *fbdata = NULL; + int i, error = -ENOMEM; + u32 *palette; + + /* The extra memory is: + * - 256*u32 for pseudo_palette + * - struct fb_deferred_io + */ + info = framebuffer_alloc(256 * sizeof(u32) + + sizeof(struct fb_deferred_io) + + sizeof(struct picolcd_fb_data) + + PICOLCDFB_SIZE, dev); + if (info == NULL) { + dev_err(dev, "failed to allocate a framebuffer\n"); + goto err_nomem; + } + + info->fbdefio = info->par; + *info->fbdefio = picolcd_fb_defio; + info->par += sizeof(struct fb_deferred_io); + palette = info->par; + info->par += 256 * sizeof(u32); + for (i = 0; i < 256; i++) + palette[i] = i > 0 && i < 16 ? 0xff : 0; + info->pseudo_palette = palette; + info->fbops = &picolcdfb_ops; + info->var = picolcdfb_var; + info->fix = picolcdfb_fix; + info->fix.smem_len = PICOLCDFB_SIZE*8; + info->flags = FBINFO_FLAG_DEFAULT; + + fbdata = info->par; + spin_lock_init(&fbdata->lock); + fbdata->picolcd = data; + fbdata->update_rate = PICOLCDFB_UPDATE_RATE_DEFAULT; + fbdata->bpp = picolcdfb_var.bits_per_pixel; + fbdata->force = 1; + fbdata->vbitmap = info->par + sizeof(struct picolcd_fb_data); + fbdata->bitmap = vmalloc(PICOLCDFB_SIZE*8); + if (fbdata->bitmap == NULL) { + dev_err(dev, "can't get a free page for framebuffer\n"); + goto err_nomem; + } + info->screen_base = (char __force __iomem *)fbdata->bitmap; + info->fix.smem_start = (unsigned long)fbdata->bitmap; + memset(fbdata->vbitmap, 0xff, PICOLCDFB_SIZE); + data->fb_info = info; + + error = picolcd_fb_reset(data, 1); + if (error) { + dev_err(dev, "failed to configure display\n"); + goto err_cleanup; + } + + error = device_create_file(dev, &dev_attr_fb_update_rate); + if (error) { + dev_err(dev, "failed to create sysfs attributes\n"); + goto err_cleanup; + } + + fb_deferred_io_init(info); + error = register_framebuffer(info); + if (error) { + dev_err(dev, "failed to register framebuffer\n"); + goto err_sysfs; + } + return 0; + +err_sysfs: + device_remove_file(dev, &dev_attr_fb_update_rate); + fb_deferred_io_cleanup(info); +err_cleanup: + data->fb_info = NULL; + +err_nomem: + if (fbdata) + vfree(fbdata->bitmap); + framebuffer_release(info); + return error; +} + +void picolcd_exit_framebuffer(struct picolcd_data *data) +{ + struct fb_info *info = data->fb_info; + struct picolcd_fb_data *fbdata = info->par; + unsigned long flags; + + device_remove_file(&data->hdev->dev, &dev_attr_fb_update_rate); + + /* disconnect framebuffer from HID dev */ + spin_lock_irqsave(&fbdata->lock, flags); + fbdata->picolcd = NULL; + spin_unlock_irqrestore(&fbdata->lock, flags); + + /* make sure there is no running update - thus that fbdata->picolcd + * once obtained under lock is guaranteed not to get free() under + * the feet of the deferred work */ + flush_delayed_work_sync(&info->deferred_work); + + data->fb_info = NULL; + unregister_framebuffer(info); +} diff --git a/trunk/drivers/hid/hid-picolcd_lcd.c b/trunk/drivers/hid/hid-picolcd_lcd.c new file mode 100644 index 000000000000..2d0ddc5ac65f --- /dev/null +++ b/trunk/drivers/hid/hid-picolcd_lcd.c @@ -0,0 +1,107 @@ +/*************************************************************************** + * Copyright (C) 2010-2012 by Bruno Prémont * + * * + * Based on Logitech G13 driver (v0.4) * + * Copyright (C) 2009 by Rick L. Vinyard, Jr. * + * * + * This program is free software: you can redistribute it and/or modify * + * it under the terms of the GNU General Public License as published by * + * the Free Software Foundation, version 2 of the License. * + * * + * This driver is distributed in the hope that it will be useful, but * + * WITHOUT ANY WARRANTY; without even the implied warranty of * + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * + * General Public License for more details. * + * * + * You should have received a copy of the GNU General Public License * + * along with this software. If not see . * + ***************************************************************************/ + +#include +#include "usbhid/usbhid.h" +#include + +#include +#include + +#include "hid-picolcd.h" + +/* + * lcd class device + */ +static int picolcd_get_contrast(struct lcd_device *ldev) +{ + struct picolcd_data *data = lcd_get_data(ldev); + return data->lcd_contrast; +} + +static int picolcd_set_contrast(struct lcd_device *ldev, int contrast) +{ + struct picolcd_data *data = lcd_get_data(ldev); + struct hid_report *report = picolcd_out_report(REPORT_CONTRAST, data->hdev); + unsigned long flags; + + if (!report || report->maxfield != 1 || report->field[0]->report_count != 1) + return -ENODEV; + + data->lcd_contrast = contrast & 0x0ff; + spin_lock_irqsave(&data->lock, flags); + hid_set_field(report->field[0], 0, data->lcd_contrast); + if (!(data->status & PICOLCD_FAILED)) + usbhid_submit_report(data->hdev, report, USB_DIR_OUT); + spin_unlock_irqrestore(&data->lock, flags); + return 0; +} + +static int picolcd_check_lcd_fb(struct lcd_device *ldev, struct fb_info *fb) +{ + return fb && fb == picolcd_fbinfo((struct picolcd_data *)lcd_get_data(ldev)); +} + +static struct lcd_ops picolcd_lcdops = { + .get_contrast = picolcd_get_contrast, + .set_contrast = picolcd_set_contrast, + .check_fb = picolcd_check_lcd_fb, +}; + +int picolcd_init_lcd(struct picolcd_data *data, struct hid_report *report) +{ + struct device *dev = &data->hdev->dev; + struct lcd_device *ldev; + + if (!report) + return -ENODEV; + if (report->maxfield != 1 || report->field[0]->report_count != 1 || + report->field[0]->report_size != 8) { + dev_err(dev, "unsupported CONTRAST report"); + return -EINVAL; + } + + ldev = lcd_device_register(dev_name(dev), dev, data, &picolcd_lcdops); + if (IS_ERR(ldev)) { + dev_err(dev, "failed to register LCD\n"); + return PTR_ERR(ldev); + } + ldev->props.max_contrast = 0x0ff; + data->lcd_contrast = 0xe5; + data->lcd = ldev; + picolcd_set_contrast(ldev, 0xe5); + return 0; +} + +void picolcd_exit_lcd(struct picolcd_data *data) +{ + struct lcd_device *ldev = data->lcd; + + data->lcd = NULL; + if (ldev) + lcd_device_unregister(ldev); +} + +int picolcd_resume_lcd(struct picolcd_data *data) +{ + if (!data->lcd) + return 0; + return picolcd_set_contrast(data->lcd, data->lcd_contrast); +} + diff --git a/trunk/drivers/hid/hid-picolcd_leds.c b/trunk/drivers/hid/hid-picolcd_leds.c new file mode 100644 index 000000000000..28cb6a4f9634 --- /dev/null +++ b/trunk/drivers/hid/hid-picolcd_leds.c @@ -0,0 +1,175 @@ +/*************************************************************************** + * Copyright (C) 2010-2012 by Bruno Prémont * + * * + * Based on Logitech G13 driver (v0.4) * + * Copyright (C) 2009 by Rick L. Vinyard, Jr. * + * * + * This program is free software: you can redistribute it and/or modify * + * it under the terms of the GNU General Public License as published by * + * the Free Software Foundation, version 2 of the License. * + * * + * This driver is distributed in the hope that it will be useful, but * + * WITHOUT ANY WARRANTY; without even the implied warranty of * + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * + * General Public License for more details. * + * * + * You should have received a copy of the GNU General Public License * + * along with this software. If not see . * + ***************************************************************************/ + +#include +#include +#include +#include "hid-ids.h" +#include "usbhid/usbhid.h" +#include + +#include +#include +#include +#include + +#include + +#include +#include + +#include +#include +#include + +#include "hid-picolcd.h" + + +void picolcd_leds_set(struct picolcd_data *data) +{ + struct hid_report *report; + unsigned long flags; + + if (!data->led[0]) + return; + report = picolcd_out_report(REPORT_LED_STATE, data->hdev); + if (!report || report->maxfield != 1 || report->field[0]->report_count != 1) + return; + + spin_lock_irqsave(&data->lock, flags); + hid_set_field(report->field[0], 0, data->led_state); + if (!(data->status & PICOLCD_FAILED)) + usbhid_submit_report(data->hdev, report, USB_DIR_OUT); + spin_unlock_irqrestore(&data->lock, flags); +} + +static void picolcd_led_set_brightness(struct led_classdev *led_cdev, + enum led_brightness value) +{ + struct device *dev; + struct hid_device *hdev; + struct picolcd_data *data; + int i, state = 0; + + dev = led_cdev->dev->parent; + hdev = container_of(dev, struct hid_device, dev); + data = hid_get_drvdata(hdev); + if (!data) + return; + for (i = 0; i < 8; i++) { + if (led_cdev != data->led[i]) + continue; + state = (data->led_state >> i) & 1; + if (value == LED_OFF && state) { + data->led_state &= ~(1 << i); + picolcd_leds_set(data); + } else if (value != LED_OFF && !state) { + data->led_state |= 1 << i; + picolcd_leds_set(data); + } + break; + } +} + +static enum led_brightness picolcd_led_get_brightness(struct led_classdev *led_cdev) +{ + struct device *dev; + struct hid_device *hdev; + struct picolcd_data *data; + int i, value = 0; + + dev = led_cdev->dev->parent; + hdev = container_of(dev, struct hid_device, dev); + data = hid_get_drvdata(hdev); + for (i = 0; i < 8; i++) + if (led_cdev == data->led[i]) { + value = (data->led_state >> i) & 1; + break; + } + return value ? LED_FULL : LED_OFF; +} + +int picolcd_init_leds(struct picolcd_data *data, struct hid_report *report) +{ + struct device *dev = &data->hdev->dev; + struct led_classdev *led; + size_t name_sz = strlen(dev_name(dev)) + 8; + char *name; + int i, ret = 0; + + if (!report) + return -ENODEV; + if (report->maxfield != 1 || report->field[0]->report_count != 1 || + report->field[0]->report_size != 8) { + dev_err(dev, "unsupported LED_STATE report"); + return -EINVAL; + } + + for (i = 0; i < 8; i++) { + led = kzalloc(sizeof(struct led_classdev)+name_sz, GFP_KERNEL); + if (!led) { + dev_err(dev, "can't allocate memory for LED %d\n", i); + ret = -ENOMEM; + goto err; + } + name = (void *)(&led[1]); + snprintf(name, name_sz, "%s::GPO%d", dev_name(dev), i); + led->name = name; + led->brightness = 0; + led->max_brightness = 1; + led->brightness_get = picolcd_led_get_brightness; + led->brightness_set = picolcd_led_set_brightness; + + data->led[i] = led; + ret = led_classdev_register(dev, data->led[i]); + if (ret) { + data->led[i] = NULL; + kfree(led); + dev_err(dev, "can't register LED %d\n", i); + goto err; + } + } + return 0; +err: + for (i = 0; i < 8; i++) + if (data->led[i]) { + led = data->led[i]; + data->led[i] = NULL; + led_classdev_unregister(led); + kfree(led); + } + return ret; +} + +void picolcd_exit_leds(struct picolcd_data *data) +{ + struct led_classdev *led; + int i; + + for (i = 0; i < 8; i++) { + led = data->led[i]; + data->led[i] = NULL; + if (!led) + continue; + led_classdev_unregister(led); + kfree(led); + } +} + + diff --git a/trunk/drivers/hid/hid-primax.c b/trunk/drivers/hid/hid-primax.c index 4d3c60d88318..c15adb0c98a1 100644 --- a/trunk/drivers/hid/hid-primax.c +++ b/trunk/drivers/hid/hid-primax.c @@ -64,29 +64,6 @@ static int px_raw_event(struct hid_device *hid, struct hid_report *report, return 0; } -static int px_probe(struct hid_device *hid, const struct hid_device_id *id) -{ - int ret; - - ret = hid_parse(hid); - if (ret) { - hid_err(hid, "parse failed\n"); - goto fail; - } - - ret = hid_hw_start(hid, HID_CONNECT_DEFAULT); - if (ret) - hid_err(hid, "hw start failed\n"); - -fail: - return ret; -} - -static void px_remove(struct hid_device *hid) -{ - hid_hw_stop(hid); -} - static const struct hid_device_id px_devices[] = { { HID_USB_DEVICE(USB_VENDOR_ID_PRIMAX, USB_DEVICE_ID_PRIMAX_KEYBOARD) }, { } @@ -97,8 +74,6 @@ static struct hid_driver px_driver = { .name = "primax", .id_table = px_devices, .raw_event = px_raw_event, - .probe = px_probe, - .remove = px_remove, }; static int __init px_init(void) diff --git a/trunk/drivers/hid/hid-prodikeys.c b/trunk/drivers/hid/hid-prodikeys.c index b71b77ab0dc7..ec8ca3336315 100644 --- a/trunk/drivers/hid/hid-prodikeys.c +++ b/trunk/drivers/hid/hid-prodikeys.c @@ -105,7 +105,7 @@ static ssize_t show_channel(struct device *dev, struct device_attribute *attr, char *buf) { struct hid_device *hdev = container_of(dev, struct hid_device, dev); - struct pk_device *pk = (struct pk_device *)hid_get_drvdata(hdev); + struct pk_device *pk = hid_get_drvdata(hdev); dbg_hid("pcmidi sysfs read channel=%u\n", pk->pm->midi_channel); @@ -118,7 +118,7 @@ static ssize_t store_channel(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct hid_device *hdev = container_of(dev, struct hid_device, dev); - struct pk_device *pk = (struct pk_device *)hid_get_drvdata(hdev); + struct pk_device *pk = hid_get_drvdata(hdev); unsigned channel = 0; @@ -142,7 +142,7 @@ static ssize_t show_sustain(struct device *dev, struct device_attribute *attr, char *buf) { struct hid_device *hdev = container_of(dev, struct hid_device, dev); - struct pk_device *pk = (struct pk_device *)hid_get_drvdata(hdev); + struct pk_device *pk = hid_get_drvdata(hdev); dbg_hid("pcmidi sysfs read sustain=%u\n", pk->pm->midi_sustain); @@ -155,7 +155,7 @@ static ssize_t store_sustain(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct hid_device *hdev = container_of(dev, struct hid_device, dev); - struct pk_device *pk = (struct pk_device *)hid_get_drvdata(hdev); + struct pk_device *pk = hid_get_drvdata(hdev); unsigned sustain = 0; @@ -181,7 +181,7 @@ static ssize_t show_octave(struct device *dev, struct device_attribute *attr, char *buf) { struct hid_device *hdev = container_of(dev, struct hid_device, dev); - struct pk_device *pk = (struct pk_device *)hid_get_drvdata(hdev); + struct pk_device *pk = hid_get_drvdata(hdev); dbg_hid("pcmidi sysfs read octave=%d\n", pk->pm->midi_octave); @@ -194,7 +194,7 @@ static ssize_t store_octave(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct hid_device *hdev = container_of(dev, struct hid_device, dev); - struct pk_device *pk = (struct pk_device *)hid_get_drvdata(hdev); + struct pk_device *pk = hid_get_drvdata(hdev); int octave = 0; @@ -759,7 +759,7 @@ static int pk_input_mapping(struct hid_device *hdev, struct hid_input *hi, struct hid_field *field, struct hid_usage *usage, unsigned long **bit, int *max) { - struct pk_device *pk = (struct pk_device *)hid_get_drvdata(hdev); + struct pk_device *pk = hid_get_drvdata(hdev); struct pcmidi_snd *pm; pm = pk->pm; @@ -777,7 +777,7 @@ static int pk_input_mapping(struct hid_device *hdev, struct hid_input *hi, static int pk_raw_event(struct hid_device *hdev, struct hid_report *report, u8 *data, int size) { - struct pk_device *pk = (struct pk_device *)hid_get_drvdata(hdev); + struct pk_device *pk = hid_get_drvdata(hdev); int ret = 0; if (1 == pk->pm->ifnum) { @@ -858,7 +858,7 @@ static int pk_probe(struct hid_device *hdev, const struct hid_device_id *id) static void pk_remove(struct hid_device *hdev) { - struct pk_device *pk = (struct pk_device *)hid_get_drvdata(hdev); + struct pk_device *pk = hid_get_drvdata(hdev); struct pcmidi_snd *pm; pm = pk->pm; diff --git a/trunk/drivers/hid/hid-ps3remote.c b/trunk/drivers/hid/hid-ps3remote.c new file mode 100644 index 000000000000..03811e539d71 --- /dev/null +++ b/trunk/drivers/hid/hid-ps3remote.c @@ -0,0 +1,215 @@ +/* + * HID driver for Sony PS3 BD Remote Control + * + * Copyright (c) 2012 David Dillow + * Based on a blend of the bluez fakehid user-space code by Marcel Holtmann + * and other kernel HID drivers. + */ + +/* + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 2 of the License, or (at your option) + * any later version. + */ + +/* NOTE: in order for the Sony PS3 BD Remote Control to be found by + * a Bluetooth host, the key combination Start+Enter has to be kept pressed + * for about 7 seconds with the Bluetooth Host Controller in discovering mode. + * + * There will be no PIN request from the device. + */ + +#include +#include +#include + +#include "hid-ids.h" + +static __u8 ps3remote_rdesc[] = { + 0x05, 0x01, /* GUsagePage Generic Desktop */ + 0x09, 0x05, /* LUsage 0x05 [Game Pad] */ + 0xA1, 0x01, /* MCollection Application (mouse, keyboard) */ + + /* Use collection 1 for joypad buttons */ + 0xA1, 0x02, /* MCollection Logical (interrelated data) */ + + /* Ignore the 1st byte, maybe it is used for a controller + * number but it's not needed for correct operation */ + 0x75, 0x08, /* GReportSize 0x08 [8] */ + 0x95, 0x01, /* GReportCount 0x01 [1] */ + 0x81, 0x01, /* MInput 0x01 (Const[0] Arr[1] Abs[2]) */ + + /* Bytes from 2nd to 4th are a bitmap for joypad buttons, for these + * buttons multiple keypresses are allowed */ + 0x05, 0x09, /* GUsagePage Button */ + 0x19, 0x01, /* LUsageMinimum 0x01 [Button 1 (primary/trigger)] */ + 0x29, 0x18, /* LUsageMaximum 0x18 [Button 24] */ + 0x14, /* GLogicalMinimum [0] */ + 0x25, 0x01, /* GLogicalMaximum 0x01 [1] */ + 0x75, 0x01, /* GReportSize 0x01 [1] */ + 0x95, 0x18, /* GReportCount 0x18 [24] */ + 0x81, 0x02, /* MInput 0x02 (Data[0] Var[1] Abs[2]) */ + + 0xC0, /* MEndCollection */ + + /* Use collection 2 for remote control buttons */ + 0xA1, 0x02, /* MCollection Logical (interrelated data) */ + + /* 5th byte is used for remote control buttons */ + 0x05, 0x09, /* GUsagePage Button */ + 0x18, /* LUsageMinimum [No button pressed] */ + 0x29, 0xFE, /* LUsageMaximum 0xFE [Button 254] */ + 0x14, /* GLogicalMinimum [0] */ + 0x26, 0xFE, 0x00, /* GLogicalMaximum 0x00FE [254] */ + 0x75, 0x08, /* GReportSize 0x08 [8] */ + 0x95, 0x01, /* GReportCount 0x01 [1] */ + 0x80, /* MInput */ + + /* Ignore bytes from 6th to 11th, 6th to 10th are always constant at + * 0xff and 11th is for press indication */ + 0x75, 0x08, /* GReportSize 0x08 [8] */ + 0x95, 0x06, /* GReportCount 0x06 [6] */ + 0x81, 0x01, /* MInput 0x01 (Const[0] Arr[1] Abs[2]) */ + + /* 12th byte is for battery strength */ + 0x05, 0x06, /* GUsagePage Generic Device Controls */ + 0x09, 0x20, /* LUsage 0x20 [Battery Strength] */ + 0x14, /* GLogicalMinimum [0] */ + 0x25, 0x05, /* GLogicalMaximum 0x05 [5] */ + 0x75, 0x08, /* GReportSize 0x08 [8] */ + 0x95, 0x01, /* GReportCount 0x01 [1] */ + 0x81, 0x02, /* MInput 0x02 (Data[0] Var[1] Abs[2]) */ + + 0xC0, /* MEndCollection */ + + 0xC0 /* MEndCollection [Game Pad] */ +}; + +static const unsigned int ps3remote_keymap_joypad_buttons[] = { + [0x01] = KEY_SELECT, + [0x02] = BTN_THUMBL, /* L3 */ + [0x03] = BTN_THUMBR, /* R3 */ + [0x04] = BTN_START, + [0x05] = KEY_UP, + [0x06] = KEY_RIGHT, + [0x07] = KEY_DOWN, + [0x08] = KEY_LEFT, + [0x09] = BTN_TL2, /* L2 */ + [0x0a] = BTN_TR2, /* R2 */ + [0x0b] = BTN_TL, /* L1 */ + [0x0c] = BTN_TR, /* R1 */ + [0x0d] = KEY_OPTION, /* options/triangle */ + [0x0e] = KEY_BACK, /* back/circle */ + [0x0f] = BTN_0, /* cross */ + [0x10] = KEY_SCREEN, /* view/square */ + [0x11] = KEY_HOMEPAGE, /* PS button */ + [0x14] = KEY_ENTER, +}; +static const unsigned int ps3remote_keymap_remote_buttons[] = { + [0x00] = KEY_1, + [0x01] = KEY_2, + [0x02] = KEY_3, + [0x03] = KEY_4, + [0x04] = KEY_5, + [0x05] = KEY_6, + [0x06] = KEY_7, + [0x07] = KEY_8, + [0x08] = KEY_9, + [0x09] = KEY_0, + [0x0e] = KEY_ESC, /* return */ + [0x0f] = KEY_CLEAR, + [0x16] = KEY_EJECTCD, + [0x1a] = KEY_MENU, /* top menu */ + [0x28] = KEY_TIME, + [0x30] = KEY_PREVIOUS, + [0x31] = KEY_NEXT, + [0x32] = KEY_PLAY, + [0x33] = KEY_REWIND, /* scan back */ + [0x34] = KEY_FORWARD, /* scan forward */ + [0x38] = KEY_STOP, + [0x39] = KEY_PAUSE, + [0x40] = KEY_CONTEXT_MENU, /* pop up/menu */ + [0x60] = KEY_FRAMEBACK, /* slow/step back */ + [0x61] = KEY_FRAMEFORWARD, /* slow/step forward */ + [0x63] = KEY_SUBTITLE, + [0x64] = KEY_AUDIO, + [0x65] = KEY_ANGLE, + [0x70] = KEY_INFO, /* display */ + [0x80] = KEY_BLUE, + [0x81] = KEY_RED, + [0x82] = KEY_GREEN, + [0x83] = KEY_YELLOW, +}; + +static __u8 *ps3remote_fixup(struct hid_device *hdev, __u8 *rdesc, + unsigned int *rsize) +{ + *rsize = sizeof(ps3remote_rdesc); + return ps3remote_rdesc; +} + +static int ps3remote_mapping(struct hid_device *hdev, struct hid_input *hi, + struct hid_field *field, struct hid_usage *usage, + unsigned long **bit, int *max) +{ + unsigned int key = usage->hid & HID_USAGE; + + if ((usage->hid & HID_USAGE_PAGE) != HID_UP_BUTTON) + return -1; + + switch (usage->collection_index) { + case 1: + if (key >= ARRAY_SIZE(ps3remote_keymap_joypad_buttons)) + return -1; + + key = ps3remote_keymap_joypad_buttons[key]; + if (!key) + return -1; + break; + case 2: + if (key >= ARRAY_SIZE(ps3remote_keymap_remote_buttons)) + return -1; + + key = ps3remote_keymap_remote_buttons[key]; + if (!key) + return -1; + break; + default: + return -1; + } + + hid_map_usage_clear(hi, usage, bit, max, EV_KEY, key); + return 1; +} + +static const struct hid_device_id ps3remote_devices[] = { + /* PS3 BD Remote Control */ + { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_PS3_BDREMOTE) }, + /* Logitech Harmony Adapter for PS3 */ + { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_HARMONY_PS3) }, + { } +}; +MODULE_DEVICE_TABLE(hid, ps3remote_devices); + +static struct hid_driver ps3remote_driver = { + .name = "ps3_remote", + .id_table = ps3remote_devices, + .report_fixup = ps3remote_fixup, + .input_mapping = ps3remote_mapping, +}; + +static int __init ps3remote_init(void) +{ + return hid_register_driver(&ps3remote_driver); +} + +static void __exit ps3remote_exit(void) +{ + hid_unregister_driver(&ps3remote_driver); +} + +module_init(ps3remote_init); +module_exit(ps3remote_exit); +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("David Dillow , Antonio Ospite "); diff --git a/trunk/drivers/hid/hid-samsung.c b/trunk/drivers/hid/hid-samsung.c index 3c1fd8af5e0c..a5821d317229 100644 --- a/trunk/drivers/hid/hid-samsung.c +++ b/trunk/drivers/hid/hid-samsung.c @@ -5,7 +5,6 @@ * Copyright (c) 2000-2005 Vojtech Pavlik * Copyright (c) 2005 Michael Haboustak for Concept2, Inc * Copyright (c) 2006-2007 Jiri Kosina - * Copyright (c) 2007 Paul Walmsley * Copyright (c) 2008 Jiri Slaby * Copyright (c) 2010 Don Prince * diff --git a/trunk/drivers/hid/hid-sony.c b/trunk/drivers/hid/hid-sony.c index 5cd25bd907f8..7f33ebf299c2 100644 --- a/trunk/drivers/hid/hid-sony.c +++ b/trunk/drivers/hid/hid-sony.c @@ -4,7 +4,6 @@ * Copyright (c) 1999 Andreas Gal * Copyright (c) 2000-2005 Vojtech Pavlik * Copyright (c) 2005 Michael Haboustak for Concept2, Inc - * Copyright (c) 2007 Paul Walmsley * Copyright (c) 2008 Jiri Slaby * Copyright (c) 2006-2008 Jiri Kosina */ diff --git a/trunk/drivers/hid/hid-sunplus.c b/trunk/drivers/hid/hid-sunplus.c index d484a0043dd4..45b4b066a262 100644 --- a/trunk/drivers/hid/hid-sunplus.c +++ b/trunk/drivers/hid/hid-sunplus.c @@ -5,7 +5,6 @@ * Copyright (c) 2000-2005 Vojtech Pavlik * Copyright (c) 2005 Michael Haboustak for Concept2, Inc * Copyright (c) 2006-2007 Jiri Kosina - * Copyright (c) 2007 Paul Walmsley * Copyright (c) 2008 Jiri Slaby */ diff --git a/trunk/drivers/hid/hid-uclogic.c b/trunk/drivers/hid/hid-uclogic.c index 3aba02be1f26..2e56a1fd2375 100644 --- a/trunk/drivers/hid/hid-uclogic.c +++ b/trunk/drivers/hid/hid-uclogic.c @@ -466,6 +466,86 @@ static __u8 twhl850_rdesc_fixed2[] = { 0xC0 /* End Collection */ }; +/* + * See TWHA60 description, device and HID report descriptors at + * http://sf.net/apps/mediawiki/digimend/?title=UC-Logic_Tablet_TWHA60 + */ + +/* Size of the original descriptors of TWHA60 tablet */ +#define TWHA60_RDESC_ORIG_SIZE0 254 +#define TWHA60_RDESC_ORIG_SIZE1 139 + +/* Fixed TWHA60 report descriptor, interface 0 (stylus) */ +static __u8 twha60_rdesc_fixed0[] = { + 0x05, 0x0D, /* Usage Page (Digitizer), */ + 0x09, 0x02, /* Usage (Pen), */ + 0xA1, 0x01, /* Collection (Application), */ + 0x85, 0x09, /* Report ID (9), */ + 0x09, 0x20, /* Usage (Stylus), */ + 0xA0, /* Collection (Physical), */ + 0x75, 0x01, /* Report Size (1), */ + 0x09, 0x42, /* Usage (Tip Switch), */ + 0x09, 0x44, /* Usage (Barrel Switch), */ + 0x09, 0x46, /* Usage (Tablet Pick), */ + 0x14, /* Logical Minimum (0), */ + 0x25, 0x01, /* Logical Maximum (1), */ + 0x95, 0x03, /* Report Count (3), */ + 0x81, 0x02, /* Input (Variable), */ + 0x95, 0x04, /* Report Count (4), */ + 0x81, 0x01, /* Input (Constant), */ + 0x09, 0x32, /* Usage (In Range), */ + 0x95, 0x01, /* Report Count (1), */ + 0x81, 0x02, /* Input (Variable), */ + 0x75, 0x10, /* Report Size (16), */ + 0x95, 0x01, /* Report Count (1), */ + 0x14, /* Logical Minimum (0), */ + 0xA4, /* Push, */ + 0x05, 0x01, /* Usage Page (Desktop), */ + 0x55, 0xFD, /* Unit Exponent (-3), */ + 0x65, 0x13, /* Unit (Inch), */ + 0x34, /* Physical Minimum (0), */ + 0x09, 0x30, /* Usage (X), */ + 0x46, 0x10, 0x27, /* Physical Maximum (10000), */ + 0x27, 0x3F, 0x9C, + 0x00, 0x00, /* Logical Maximum (39999), */ + 0x81, 0x02, /* Input (Variable), */ + 0x09, 0x31, /* Usage (Y), */ + 0x46, 0x6A, 0x18, /* Physical Maximum (6250), */ + 0x26, 0xA7, 0x61, /* Logical Maximum (24999), */ + 0x81, 0x02, /* Input (Variable), */ + 0xB4, /* Pop, */ + 0x09, 0x30, /* Usage (Tip Pressure), */ + 0x26, 0xFF, 0x03, /* Logical Maximum (1023), */ + 0x81, 0x02, /* Input (Variable), */ + 0xC0, /* End Collection, */ + 0xC0 /* End Collection */ +}; + +/* Fixed TWHA60 report descriptor, interface 1 (frame buttons) */ +static __u8 twha60_rdesc_fixed1[] = { + 0x05, 0x01, /* Usage Page (Desktop), */ + 0x09, 0x06, /* Usage (Keyboard), */ + 0xA1, 0x01, /* Collection (Application), */ + 0x85, 0x05, /* Report ID (5), */ + 0x05, 0x07, /* Usage Page (Keyboard), */ + 0x14, /* Logical Minimum (0), */ + 0x25, 0x01, /* Logical Maximum (1), */ + 0x75, 0x01, /* Report Size (1), */ + 0x95, 0x08, /* Report Count (8), */ + 0x81, 0x01, /* Input (Constant), */ + 0x95, 0x0C, /* Report Count (12), */ + 0x19, 0x3A, /* Usage Minimum (KB F1), */ + 0x29, 0x45, /* Usage Maximum (KB F12), */ + 0x81, 0x02, /* Input (Variable), */ + 0x95, 0x0C, /* Report Count (12), */ + 0x19, 0x68, /* Usage Minimum (KB F13), */ + 0x29, 0x73, /* Usage Maximum (KB F24), */ + 0x81, 0x02, /* Input (Variable), */ + 0x95, 0x08, /* Report Count (8), */ + 0x81, 0x01, /* Input (Constant), */ + 0xC0 /* End Collection */ +}; + static __u8 *uclogic_report_fixup(struct hid_device *hdev, __u8 *rdesc, unsigned int *rsize) { @@ -525,6 +605,22 @@ static __u8 *uclogic_report_fixup(struct hid_device *hdev, __u8 *rdesc, break; } break; + case USB_DEVICE_ID_UCLOGIC_TABLET_TWHA60: + switch (iface_num) { + case 0: + if (*rsize == TWHA60_RDESC_ORIG_SIZE0) { + rdesc = twha60_rdesc_fixed0; + *rsize = sizeof(twha60_rdesc_fixed0); + } + break; + case 1: + if (*rsize == TWHA60_RDESC_ORIG_SIZE1) { + rdesc = twha60_rdesc_fixed1; + *rsize = sizeof(twha60_rdesc_fixed1); + } + break; + } + break; } return rdesc; @@ -543,6 +639,8 @@ static const struct hid_device_id uclogic_devices[] = { USB_DEVICE_ID_UCLOGIC_TABLET_WP1062) }, { HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UCLOGIC_WIRELESS_TABLET_TWHL850) }, + { HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC, + USB_DEVICE_ID_UCLOGIC_TABLET_TWHA60) }, { } }; MODULE_DEVICE_TABLE(hid, uclogic_devices); diff --git a/trunk/drivers/hid/hid-wacom.c b/trunk/drivers/hid/hid-wacom.c index fe23a1eb586b..2f60da9ed066 100644 --- a/trunk/drivers/hid/hid-wacom.c +++ b/trunk/drivers/hid/hid-wacom.c @@ -5,7 +5,6 @@ * Copyright (c) 2000-2005 Vojtech Pavlik * Copyright (c) 2005 Michael Haboustak for Concept2, Inc * Copyright (c) 2006-2007 Jiri Kosina - * Copyright (c) 2007 Paul Walmsley * Copyright (c) 2008 Jiri Slaby * Copyright (c) 2006 Andrew Zabolotny * Copyright (c) 2009 Bastien Nocera @@ -33,6 +32,8 @@ #define PAD_DEVICE_ID 0x0F #define WAC_CMD_LED_CONTROL 0x20 +#define WAC_CMD_ICON_START_STOP 0x21 +#define WAC_CMD_ICON_TRANSFER 0x26 struct wacom_data { __u16 tool; @@ -69,6 +70,91 @@ static enum power_supply_property wacom_ac_props[] = { POWER_SUPPLY_PROP_SCOPE, }; +static void wacom_scramble(__u8 *image) +{ + __u16 mask; + __u16 s1; + __u16 s2; + __u16 r1 ; + __u16 r2 ; + __u16 r; + __u8 buf[256]; + int i, w, x, y, z; + + for (x = 0; x < 32; x++) { + for (y = 0; y < 8; y++) + buf[(8 * x) + (7 - y)] = image[(8 * x) + y]; + } + + /* Change 76543210 into GECA6420 as required by Intuos4 WL + * HGFEDCBA HFDB7531 + */ + for (x = 0; x < 4; x++) { + for (y = 0; y < 4; y++) { + for (z = 0; z < 8; z++) { + mask = 0x0001; + r1 = 0; + r2 = 0; + i = (x << 6) + (y << 4) + z; + s1 = buf[i]; + s2 = buf[i+8]; + for (w = 0; w < 8; w++) { + r1 |= (s1 & mask); + r2 |= (s2 & mask); + s1 <<= 1; + s2 <<= 1; + mask <<= 2; + } + r = r1 | (r2 << 1); + i = (x << 6) + (y << 4) + (z << 1); + image[i] = 0xFF & r; + image[i+1] = (0xFF00 & r) >> 8; + } + } + } +} + +static void wacom_set_image(struct hid_device *hdev, const char *image, + __u8 icon_no) +{ + __u8 rep_data[68]; + __u8 p[256]; + int ret, i, j; + + for (i = 0; i < 256; i++) + p[i] = image[i]; + + rep_data[0] = WAC_CMD_ICON_START_STOP; + rep_data[1] = 0; + ret = hdev->hid_output_raw_report(hdev, rep_data, 2, + HID_FEATURE_REPORT); + if (ret < 0) + goto err; + + rep_data[0] = WAC_CMD_ICON_TRANSFER; + rep_data[1] = icon_no & 0x07; + + wacom_scramble(p); + + for (i = 0; i < 4; i++) { + for (j = 0; j < 64; j++) + rep_data[j + 3] = p[(i << 6) + j]; + + rep_data[2] = i; + ret = hdev->hid_output_raw_report(hdev, rep_data, 67, + HID_FEATURE_REPORT); + } + + rep_data[0] = WAC_CMD_ICON_START_STOP; + rep_data[1] = 0; + + ret = hdev->hid_output_raw_report(hdev, rep_data, 2, + HID_FEATURE_REPORT); + +err: + return; +} + static void wacom_leds_set_brightness(struct led_classdev *led_dev, enum led_brightness value) { @@ -91,7 +177,10 @@ static void wacom_leds_set_brightness(struct led_classdev *led_dev, if (buf) { buf[0] = WAC_CMD_LED_CONTROL; buf[1] = led; - buf[2] = value; + buf[2] = value >> 2; + buf[3] = value; + /* use fixed brightness for OLEDs */ + buf[4] = 0x08; hdev->hid_output_raw_report(hdev, buf, 9, HID_FEATURE_REPORT); kfree(buf); } @@ -317,6 +406,34 @@ static ssize_t wacom_store_speed(struct device *dev, static DEVICE_ATTR(speed, S_IRUGO | S_IWUSR | S_IWGRP, wacom_show_speed, wacom_store_speed); +#define WACOM_STORE(OLED_ID) \ +static ssize_t wacom_oled##OLED_ID##_store(struct device *dev, \ + struct device_attribute *attr, \ + const char *buf, size_t count) \ +{ \ + struct hid_device *hdev = container_of(dev, struct hid_device, \ + dev); \ + \ + if (count != 256) \ + return -EINVAL; \ + \ + wacom_set_image(hdev, buf, OLED_ID); \ + \ + return count; \ +} \ + \ +static DEVICE_ATTR(oled##OLED_ID##_img, S_IWUSR | S_IWGRP, NULL, \ + wacom_oled##OLED_ID##_store) + +WACOM_STORE(0); +WACOM_STORE(1); +WACOM_STORE(2); +WACOM_STORE(3); +WACOM_STORE(4); +WACOM_STORE(5); +WACOM_STORE(6); +WACOM_STORE(7); + static int wacom_gr_parse_report(struct hid_device *hdev, struct wacom_data *wdata, struct input_dev *input, unsigned char *data) @@ -717,17 +834,33 @@ static int wacom_probe(struct hid_device *hdev, hid_warn(hdev, "can't create sysfs speed attribute err: %d\n", ret); +#define OLED_INIT(OLED_ID) \ + do { \ + ret = device_create_file(&hdev->dev, \ + &dev_attr_oled##OLED_ID##_img); \ + if (ret) \ + hid_warn(hdev, \ + "can't create sysfs oled attribute, err: %d\n", ret);\ + } while (0) + +OLED_INIT(0); +OLED_INIT(1); +OLED_INIT(2); +OLED_INIT(3); +OLED_INIT(4); +OLED_INIT(5); +OLED_INIT(6); +OLED_INIT(7); + wdata->features = 0; wacom_set_features(hdev, 1); if (hdev->product == USB_DEVICE_ID_WACOM_INTUOS4_BLUETOOTH) { sprintf(hdev->name, "%s", "Wacom Intuos4 WL"); ret = wacom_initialize_leds(hdev); - if (ret) { + if (ret) hid_warn(hdev, "can't create led attribute, err: %d\n", ret); - goto destroy_leds; - } } wdata->battery.properties = wacom_battery_props; @@ -740,8 +873,8 @@ static int wacom_probe(struct hid_device *hdev, ret = power_supply_register(&hdev->dev, &wdata->battery); if (ret) { - hid_warn(hdev, "can't create sysfs battery attribute, err: %d\n", - ret); + hid_err(hdev, "can't create sysfs battery attribute, err: %d\n", + ret); goto err_battery; } @@ -756,8 +889,8 @@ static int wacom_probe(struct hid_device *hdev, ret = power_supply_register(&hdev->dev, &wdata->ac); if (ret) { - hid_warn(hdev, - "can't create ac battery attribute, err: %d\n", ret); + hid_err(hdev, + "can't create ac battery attribute, err: %d\n", ret); goto err_ac; } @@ -767,10 +900,17 @@ static int wacom_probe(struct hid_device *hdev, err_ac: power_supply_unregister(&wdata->battery); err_battery: + wacom_destroy_leds(hdev); + device_remove_file(&hdev->dev, &dev_attr_oled0_img); + device_remove_file(&hdev->dev, &dev_attr_oled1_img); + device_remove_file(&hdev->dev, &dev_attr_oled2_img); + device_remove_file(&hdev->dev, &dev_attr_oled3_img); + device_remove_file(&hdev->dev, &dev_attr_oled4_img); + device_remove_file(&hdev->dev, &dev_attr_oled5_img); + device_remove_file(&hdev->dev, &dev_attr_oled6_img); + device_remove_file(&hdev->dev, &dev_attr_oled7_img); device_remove_file(&hdev->dev, &dev_attr_speed); hid_hw_stop(hdev); -destroy_leds: - wacom_destroy_leds(hdev); err_free: kfree(wdata); return ret; @@ -781,6 +921,14 @@ static void wacom_remove(struct hid_device *hdev) struct wacom_data *wdata = hid_get_drvdata(hdev); wacom_destroy_leds(hdev); + device_remove_file(&hdev->dev, &dev_attr_oled0_img); + device_remove_file(&hdev->dev, &dev_attr_oled1_img); + device_remove_file(&hdev->dev, &dev_attr_oled2_img); + device_remove_file(&hdev->dev, &dev_attr_oled3_img); + device_remove_file(&hdev->dev, &dev_attr_oled4_img); + device_remove_file(&hdev->dev, &dev_attr_oled5_img); + device_remove_file(&hdev->dev, &dev_attr_oled6_img); + device_remove_file(&hdev->dev, &dev_attr_oled7_img); device_remove_file(&hdev->dev, &dev_attr_speed); hid_hw_stop(hdev); diff --git a/trunk/drivers/hid/hid-waltop.c b/trunk/drivers/hid/hid-waltop.c index 745e4e9a8cf2..bb536ab5941e 100644 --- a/trunk/drivers/hid/hid-waltop.c +++ b/trunk/drivers/hid/hid-waltop.c @@ -638,28 +638,6 @@ static __u8 sirius_battery_free_tablet_rdesc_fixed[] = { 0xC0 /* End Collection */ }; -static int waltop_probe(struct hid_device *hdev, - const struct hid_device_id *id) -{ - int ret; - - ret = hid_parse(hdev); - if (ret) { - hid_err(hdev, "parse failed\n"); - goto err; - } - - ret = hid_hw_start(hdev, HID_CONNECT_DEFAULT); - if (ret) { - hid_err(hdev, "hw start failed\n"); - goto err; - } - - return 0; -err: - return ret; -} - static __u8 *waltop_report_fixup(struct hid_device *hdev, __u8 *rdesc, unsigned int *rsize) { @@ -776,11 +754,6 @@ static int waltop_raw_event(struct hid_device *hdev, struct hid_report *report, return 0; } -static void waltop_remove(struct hid_device *hdev) -{ - hid_hw_stop(hdev); -} - static const struct hid_device_id waltop_devices[] = { { HID_USB_DEVICE(USB_VENDOR_ID_WALTOP, USB_DEVICE_ID_WALTOP_SLIM_TABLET_5_8_INCH) }, @@ -803,10 +776,8 @@ MODULE_DEVICE_TABLE(hid, waltop_devices); static struct hid_driver waltop_driver = { .name = "waltop", .id_table = waltop_devices, - .probe = waltop_probe, .report_fixup = waltop_report_fixup, .raw_event = waltop_raw_event, - .remove = waltop_remove, }; static int __init waltop_init(void) diff --git a/trunk/drivers/hid/hid-wiimote-ext.c b/trunk/drivers/hid/hid-wiimote-ext.c index 0a1805c9b0e5..bc85bf29062e 100644 --- a/trunk/drivers/hid/hid-wiimote-ext.c +++ b/trunk/drivers/hid/hid-wiimote-ext.c @@ -28,12 +28,14 @@ struct wiimote_ext { bool mp_plugged; bool motionp; __u8 ext_type; + __u16 calib[4][3]; }; enum wiiext_type { WIIEXT_NONE, /* placeholder */ WIIEXT_CLASSIC, /* Nintendo classic controller */ WIIEXT_NUNCHUCK, /* Nintendo nunchuck controller */ + WIIEXT_BALANCE_BOARD, /* Nintendo balance board controller */ }; enum wiiext_keys { @@ -126,6 +128,7 @@ static bool motionp_read(struct wiimote_ext *ext) static __u8 ext_read(struct wiimote_ext *ext) { ssize_t ret; + __u8 buf[24], i, j, offs = 0; __u8 rmem[2], wmem; __u8 type = WIIEXT_NONE; @@ -151,6 +154,28 @@ static __u8 ext_read(struct wiimote_ext *ext) type = WIIEXT_NUNCHUCK; else if (rmem[0] == 0x01 && rmem[1] == 0x01) type = WIIEXT_CLASSIC; + else if (rmem[0] == 0x04 && rmem[1] == 0x02) + type = WIIEXT_BALANCE_BOARD; + } + + /* get balance board calibration data */ + if (type == WIIEXT_BALANCE_BOARD) { + ret = wiimote_cmd_read(ext->wdata, 0xa40024, buf, 12); + ret += wiimote_cmd_read(ext->wdata, 0xa40024 + 12, + buf + 12, 12); + + if (ret != 24) { + type = WIIEXT_NONE; + } else { + for (i = 0; i < 3; i++) { + for (j = 0; j < 4; j++) { + ext->calib[j][i] = buf[offs]; + ext->calib[j][i] <<= 8; + ext->calib[j][i] |= buf[offs + 1]; + offs += 2; + } + } + } } wiimote_cmd_release(ext->wdata); @@ -509,6 +534,71 @@ static void handler_classic(struct wiimote_ext *ext, const __u8 *payload) input_sync(ext->input); } +static void handler_balance_board(struct wiimote_ext *ext, const __u8 *payload) +{ + __s32 val[4], tmp; + unsigned int i; + + /* Byte | 8 7 6 5 4 3 2 1 | + * -----+--------------------------+ + * 1 | Top Right <15:8> | + * 2 | Top Right <7:0> | + * -----+--------------------------+ + * 3 | Bottom Right <15:8> | + * 4 | Bottom Right <7:0> | + * -----+--------------------------+ + * 5 | Top Left <15:8> | + * 6 | Top Left <7:0> | + * -----+--------------------------+ + * 7 | Bottom Left <15:8> | + * 8 | Bottom Left <7:0> | + * -----+--------------------------+ + * + * These values represent the weight-measurements of the Wii-balance + * board with 16bit precision. + * + * The balance-board is never reported interleaved with motionp. + */ + + val[0] = payload[0]; + val[0] <<= 8; + val[0] |= payload[1]; + + val[1] = payload[2]; + val[1] <<= 8; + val[1] |= payload[3]; + + val[2] = payload[4]; + val[2] <<= 8; + val[2] |= payload[5]; + + val[3] = payload[6]; + val[3] <<= 8; + val[3] |= payload[7]; + + /* apply calibration data */ + for (i = 0; i < 4; i++) { + if (val[i] < ext->calib[i][1]) { + tmp = val[i] - ext->calib[i][0]; + tmp *= 1700; + tmp /= ext->calib[i][1] - ext->calib[i][0]; + } else { + tmp = val[i] - ext->calib[i][1]; + tmp *= 1700; + tmp /= ext->calib[i][2] - ext->calib[i][1]; + tmp += 1700; + } + val[i] = tmp; + } + + input_report_abs(ext->input, ABS_HAT0X, val[0]); + input_report_abs(ext->input, ABS_HAT0Y, val[1]); + input_report_abs(ext->input, ABS_HAT1X, val[2]); + input_report_abs(ext->input, ABS_HAT1Y, val[3]); + + input_sync(ext->input); +} + /* call this with state.lock spinlock held */ void wiiext_handle(struct wiimote_data *wdata, const __u8 *payload) { @@ -523,6 +613,8 @@ void wiiext_handle(struct wiimote_data *wdata, const __u8 *payload) handler_nunchuck(ext, payload); } else if (ext->ext_type == WIIEXT_CLASSIC) { handler_classic(ext, payload); + } else if (ext->ext_type == WIIEXT_BALANCE_BOARD) { + handler_balance_board(ext, payload); } } @@ -551,6 +643,11 @@ static ssize_t wiiext_show(struct device *dev, struct device_attribute *attr, return sprintf(buf, "motionp+classic\n"); else return sprintf(buf, "classic\n"); + } else if (type == WIIEXT_BALANCE_BOARD) { + if (motionp) + return sprintf(buf, "motionp+balanceboard\n"); + else + return sprintf(buf, "balanceboard\n"); } else { if (motionp) return sprintf(buf, "motionp\n"); diff --git a/trunk/drivers/hid/hidraw.c b/trunk/drivers/hid/hidraw.c index 3b6f7bf5a77e..17d15bb610d1 100644 --- a/trunk/drivers/hid/hidraw.c +++ b/trunk/drivers/hid/hidraw.c @@ -42,6 +42,7 @@ static struct cdev hidraw_cdev; static struct class *hidraw_class; static struct hidraw *hidraw_table[HIDRAW_MAX_DEVICES]; static DEFINE_MUTEX(minors_lock); +static void drop_ref(struct hidraw *hid, int exists_bit); static ssize_t hidraw_read(struct file *file, char __user *buffer, size_t count, loff_t *ppos) { @@ -113,7 +114,7 @@ static ssize_t hidraw_send_report(struct file *file, const char __user *buffer, __u8 *buf; int ret = 0; - if (!hidraw_table[minor]) { + if (!hidraw_table[minor] || !hidraw_table[minor]->exist) { ret = -ENODEV; goto out; } @@ -261,7 +262,7 @@ static int hidraw_open(struct inode *inode, struct file *file) } mutex_lock(&minors_lock); - if (!hidraw_table[minor]) { + if (!hidraw_table[minor] || !hidraw_table[minor]->exist) { err = -ENODEV; goto out_unlock; } @@ -298,36 +299,12 @@ static int hidraw_open(struct inode *inode, struct file *file) static int hidraw_release(struct inode * inode, struct file * file) { unsigned int minor = iminor(inode); - struct hidraw *dev; struct hidraw_list *list = file->private_data; - int ret; - int i; - - mutex_lock(&minors_lock); - if (!hidraw_table[minor]) { - ret = -ENODEV; - goto unlock; - } + drop_ref(hidraw_table[minor], 0); list_del(&list->node); - dev = hidraw_table[minor]; - if (!--dev->open) { - if (list->hidraw->exist) { - hid_hw_power(dev->hid, PM_HINT_NORMAL); - hid_hw_close(dev->hid); - } else { - kfree(list->hidraw); - } - } - - for (i = 0; i < HIDRAW_BUFFER_SIZE; ++i) - kfree(list->buffer[i].value); kfree(list); - ret = 0; -unlock: - mutex_unlock(&minors_lock); - - return ret; + return 0; } static long hidraw_ioctl(struct file *file, unsigned int cmd, @@ -529,21 +506,7 @@ EXPORT_SYMBOL_GPL(hidraw_connect); void hidraw_disconnect(struct hid_device *hid) { struct hidraw *hidraw = hid->hidraw; - - mutex_lock(&minors_lock); - hidraw->exist = 0; - - device_destroy(hidraw_class, MKDEV(hidraw_major, hidraw->minor)); - - hidraw_table[hidraw->minor] = NULL; - - if (hidraw->open) { - hid_hw_close(hid); - wake_up_interruptible(&hidraw->wait); - } else { - kfree(hidraw); - } - mutex_unlock(&minors_lock); + drop_ref(hidraw, 1); } EXPORT_SYMBOL_GPL(hidraw_disconnect); @@ -559,21 +522,28 @@ int __init hidraw_init(void) if (result < 0) { pr_warn("can't get major number\n"); - result = 0; goto out; } hidraw_class = class_create(THIS_MODULE, "hidraw"); if (IS_ERR(hidraw_class)) { result = PTR_ERR(hidraw_class); - unregister_chrdev(hidraw_major, "hidraw"); - goto out; + goto error_cdev; } cdev_init(&hidraw_cdev, &hidraw_ops); - cdev_add(&hidraw_cdev, dev_id, HIDRAW_MAX_DEVICES); + result = cdev_add(&hidraw_cdev, dev_id, HIDRAW_MAX_DEVICES); + if (result < 0) + goto error_class; + out: return result; + +error_class: + class_destroy(hidraw_class); +error_cdev: + unregister_chrdev_region(dev_id, HIDRAW_MAX_DEVICES); + goto out; } void hidraw_exit(void) @@ -585,3 +555,23 @@ void hidraw_exit(void) unregister_chrdev_region(dev_id, HIDRAW_MAX_DEVICES); } + +static void drop_ref(struct hidraw *hidraw, int exists_bit) +{ + mutex_lock(&minors_lock); + if (exists_bit) { + hid_hw_close(hidraw->hid); + hidraw->exist = 0; + if (hidraw->open) + wake_up_interruptible(&hidraw->wait); + } else { + --hidraw->open; + } + + if (!hidraw->open && !hidraw->exist) { + device_destroy(hidraw_class, MKDEV(hidraw_major, hidraw->minor)); + hidraw_table[hidraw->minor] = NULL; + kfree(hidraw); + } + mutex_unlock(&minors_lock); +} diff --git a/trunk/drivers/hid/usbhid/hid-core.c b/trunk/drivers/hid/usbhid/hid-core.c index dedd8e4e5c6d..8e0c4bf94ebc 100644 --- a/trunk/drivers/hid/usbhid/hid-core.c +++ b/trunk/drivers/hid/usbhid/hid-core.c @@ -1415,20 +1415,20 @@ static int hid_post_reset(struct usb_interface *intf) * configuration descriptors passed, we already know that * the size of the HID report descriptor has not changed. */ - rdesc = kmalloc(hid->rsize, GFP_KERNEL); + rdesc = kmalloc(hid->dev_rsize, GFP_KERNEL); if (!rdesc) { dbg_hid("couldn't allocate rdesc memory (post_reset)\n"); return 1; } status = hid_get_class_descriptor(dev, interface->desc.bInterfaceNumber, - HID_DT_REPORT, rdesc, hid->rsize); + HID_DT_REPORT, rdesc, hid->dev_rsize); if (status < 0) { dbg_hid("reading report descriptor failed (post_reset)\n"); kfree(rdesc); return 1; } - status = memcmp(rdesc, hid->rdesc, hid->rsize); + status = memcmp(rdesc, hid->dev_rdesc, hid->dev_rsize); kfree(rdesc); if (status != 0) { dbg_hid("report descriptor changed\n"); diff --git a/trunk/drivers/hid/usbhid/hid-quirks.c b/trunk/drivers/hid/usbhid/hid-quirks.c index 903eef3d3e10..11c7932dc7e6 100644 --- a/trunk/drivers/hid/usbhid/hid-quirks.c +++ b/trunk/drivers/hid/usbhid/hid-quirks.c @@ -70,11 +70,13 @@ static const struct hid_blacklist { { USB_VENDOR_ID_CH, USB_DEVICE_ID_CH_AXIS_295, HID_QUIRK_NOGET }, { USB_VENDOR_ID_DMI, USB_DEVICE_ID_DMI_ENC, HID_QUIRK_NOGET }, { USB_VENDOR_ID_ELO, USB_DEVICE_ID_ELO_TS2700, HID_QUIRK_NOGET }, + { USB_VENDOR_ID_FREESCALE, USB_DEVICE_ID_FREESCALE_MX28, HID_QUIRK_NOGET }, + { USB_VENDOR_ID_MGE, USB_DEVICE_ID_MGE_UPS, HID_QUIRK_NOGET }, { USB_VENDOR_ID_PIXART, USB_DEVICE_ID_PIXART_OPTICAL_TOUCH_SCREEN, HID_QUIRK_NO_INIT_REPORTS }, { USB_VENDOR_ID_PIXART, USB_DEVICE_ID_PIXART_OPTICAL_TOUCH_SCREEN1, HID_QUIRK_NO_INIT_REPORTS }, { USB_VENDOR_ID_PIXART, USB_DEVICE_ID_PIXART_OPTICAL_TOUCH_SCREEN2, HID_QUIRK_NO_INIT_REPORTS }, { USB_VENDOR_ID_PRODIGE, USB_DEVICE_ID_PRODIGE_CORDLESS, HID_QUIRK_NOGET }, - { USB_VENDOR_ID_QUANTA, USB_DEVICE_ID_PIXART_IMAGING_INC_OPTICAL_TOUCH_SCREEN, HID_QUIRK_NOGET }, + { USB_VENDOR_ID_QUANTA, USB_DEVICE_ID_QUANTA_OPTICAL_TOUCH_3001, HID_QUIRK_NOGET }, { USB_VENDOR_ID_QUANTA, USB_DEVICE_ID_QUANTA_OPTICAL_TOUCH_3008, HID_QUIRK_NOGET }, { USB_VENDOR_ID_SENNHEISER, USB_DEVICE_ID_SENNHEISER_BTD500USB, HID_QUIRK_NOGET }, { USB_VENDOR_ID_SUN, USB_DEVICE_ID_RARITAN_KVM_DONGLE, HID_QUIRK_NOGET }, diff --git a/trunk/drivers/hwmon/ad7314.c b/trunk/drivers/hwmon/ad7314.c index cfec802cf9ca..f915eb1c29f7 100644 --- a/trunk/drivers/hwmon/ad7314.c +++ b/trunk/drivers/hwmon/ad7314.c @@ -87,10 +87,18 @@ static ssize_t ad7314_show_temperature(struct device *dev, } } +static ssize_t ad7314_show_name(struct device *dev, + struct device_attribute *devattr, char *buf) +{ + return sprintf(buf, "%s\n", to_spi_device(dev)->modalias); +} + +static DEVICE_ATTR(name, S_IRUGO, ad7314_show_name, NULL); static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, ad7314_show_temperature, NULL, 0); static struct attribute *ad7314_attributes[] = { + &dev_attr_name.attr, &sensor_dev_attr_temp1_input.dev_attr.attr, NULL, }; diff --git a/trunk/drivers/hwmon/ads7871.c b/trunk/drivers/hwmon/ads7871.c index e65c6e45d36b..7bf4ce3d405e 100644 --- a/trunk/drivers/hwmon/ads7871.c +++ b/trunk/drivers/hwmon/ads7871.c @@ -139,6 +139,12 @@ static ssize_t show_voltage(struct device *dev, } } +static ssize_t ads7871_show_name(struct device *dev, + struct device_attribute *devattr, char *buf) +{ + return sprintf(buf, "%s\n", to_spi_device(dev)->modalias); +} + static SENSOR_DEVICE_ATTR(in0_input, S_IRUGO, show_voltage, NULL, 0); static SENSOR_DEVICE_ATTR(in1_input, S_IRUGO, show_voltage, NULL, 1); static SENSOR_DEVICE_ATTR(in2_input, S_IRUGO, show_voltage, NULL, 2); @@ -148,6 +154,8 @@ static SENSOR_DEVICE_ATTR(in5_input, S_IRUGO, show_voltage, NULL, 5); static SENSOR_DEVICE_ATTR(in6_input, S_IRUGO, show_voltage, NULL, 6); static SENSOR_DEVICE_ATTR(in7_input, S_IRUGO, show_voltage, NULL, 7); +static DEVICE_ATTR(name, S_IRUGO, ads7871_show_name, NULL); + static struct attribute *ads7871_attributes[] = { &sensor_dev_attr_in0_input.dev_attr.attr, &sensor_dev_attr_in1_input.dev_attr.attr, @@ -157,6 +165,7 @@ static struct attribute *ads7871_attributes[] = { &sensor_dev_attr_in5_input.dev_attr.attr, &sensor_dev_attr_in6_input.dev_attr.attr, &sensor_dev_attr_in7_input.dev_attr.attr, + &dev_attr_name.attr, NULL }; diff --git a/trunk/drivers/hwmon/applesmc.c b/trunk/drivers/hwmon/applesmc.c index 282708860517..8f3f6f2c45fd 100644 --- a/trunk/drivers/hwmon/applesmc.c +++ b/trunk/drivers/hwmon/applesmc.c @@ -53,10 +53,10 @@ #define APPLESMC_MAX_DATA_LENGTH 32 -/* wait up to 32 ms for a status change. */ +/* wait up to 128 ms for a status change. */ #define APPLESMC_MIN_WAIT 0x0010 #define APPLESMC_RETRY_WAIT 0x0100 -#define APPLESMC_MAX_WAIT 0x8000 +#define APPLESMC_MAX_WAIT 0x20000 #define APPLESMC_READ_CMD 0x10 #define APPLESMC_WRITE_CMD 0x11 diff --git a/trunk/drivers/hwmon/asus_atk0110.c b/trunk/drivers/hwmon/asus_atk0110.c index 351d1f4593e7..4ee578948723 100644 --- a/trunk/drivers/hwmon/asus_atk0110.c +++ b/trunk/drivers/hwmon/asus_atk0110.c @@ -34,6 +34,12 @@ static const struct dmi_system_id __initconst atk_force_new_if[] = { .matches = { DMI_MATCH(DMI_BOARD_NAME, "SABERTOOTH X58") } + }, { + /* Old interface reads the same sensor for fan0 and fan1 */ + .ident = "Asus M5A78L", + .matches = { + DMI_MATCH(DMI_BOARD_NAME, "M5A78L") + } }, { } }; diff --git a/trunk/drivers/hwmon/coretemp.c b/trunk/drivers/hwmon/coretemp.c index faa16f80db9c..984a3f13923b 100644 --- a/trunk/drivers/hwmon/coretemp.c +++ b/trunk/drivers/hwmon/coretemp.c @@ -196,7 +196,7 @@ struct tjmax { int tjmax; }; -static struct tjmax __cpuinitconst tjmax_table[] = { +static const struct tjmax __cpuinitconst tjmax_table[] = { { "CPU D410", 100000 }, { "CPU D425", 100000 }, { "CPU D510", 100000 }, @@ -815,17 +815,20 @@ static int __init coretemp_init(void) if (err) goto exit; + get_online_cpus(); for_each_online_cpu(i) get_core_online(i); #ifndef CONFIG_HOTPLUG_CPU if (list_empty(&pdev_list)) { + put_online_cpus(); err = -ENODEV; goto exit_driver_unreg; } #endif register_hotcpu_notifier(&coretemp_cpu_notifier); + put_online_cpus(); return 0; #ifndef CONFIG_HOTPLUG_CPU @@ -840,6 +843,7 @@ static void __exit coretemp_exit(void) { struct pdev_entry *p, *n; + get_online_cpus(); unregister_hotcpu_notifier(&coretemp_cpu_notifier); mutex_lock(&pdev_list_mutex); list_for_each_entry_safe(p, n, &pdev_list, list) { @@ -848,6 +852,7 @@ static void __exit coretemp_exit(void) kfree(p); } mutex_unlock(&pdev_list_mutex); + put_online_cpus(); platform_driver_unregister(&coretemp_driver); } diff --git a/trunk/drivers/hwmon/fam15h_power.c b/trunk/drivers/hwmon/fam15h_power.c index 2764b78a784b..af69073b3fe8 100644 --- a/trunk/drivers/hwmon/fam15h_power.c +++ b/trunk/drivers/hwmon/fam15h_power.c @@ -129,12 +129,12 @@ static bool __devinit fam15h_power_is_internal_node0(struct pci_dev *f4) * counter saturations resulting in bogus power readings. * We correct this value ourselves to cope with older BIOSes. */ -static DEFINE_PCI_DEVICE_TABLE(affected_device) = { +static const struct pci_device_id affected_device[] = { { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_15H_NB_F4) }, { 0 } }; -static void __devinit tweak_runavg_range(struct pci_dev *pdev) +static void tweak_runavg_range(struct pci_dev *pdev) { u32 val; @@ -158,6 +158,16 @@ static void __devinit tweak_runavg_range(struct pci_dev *pdev) REG_TDP_RUNNING_AVERAGE, val); } +#ifdef CONFIG_PM +static int fam15h_power_resume(struct pci_dev *pdev) +{ + tweak_runavg_range(pdev); + return 0; +} +#else +#define fam15h_power_resume NULL +#endif + static void __devinit fam15h_power_init_data(struct pci_dev *f4, struct fam15h_power_data *data) { @@ -256,6 +266,7 @@ static struct pci_driver fam15h_power_driver = { .id_table = fam15h_power_id_table, .probe = fam15h_power_probe, .remove = __devexit_p(fam15h_power_remove), + .resume = fam15h_power_resume, }; module_pci_driver(fam15h_power_driver); diff --git a/trunk/drivers/hwmon/ina2xx.c b/trunk/drivers/hwmon/ina2xx.c index 7f3f4a385729..602148299f68 100644 --- a/trunk/drivers/hwmon/ina2xx.c +++ b/trunk/drivers/hwmon/ina2xx.c @@ -69,22 +69,6 @@ struct ina2xx_data { u16 regs[INA2XX_MAX_REGISTERS]; }; -int ina2xx_read_word(struct i2c_client *client, int reg) -{ - int val = i2c_smbus_read_word_data(client, reg); - if (unlikely(val < 0)) { - dev_dbg(&client->dev, - "Failed to read register: %d\n", reg); - return val; - } - return be16_to_cpu(val); -} - -void ina2xx_write_word(struct i2c_client *client, int reg, int data) -{ - i2c_smbus_write_word_data(client, reg, cpu_to_be16(data)); -} - static struct ina2xx_data *ina2xx_update_device(struct device *dev) { struct i2c_client *client = to_i2c_client(dev); @@ -102,7 +86,7 @@ static struct ina2xx_data *ina2xx_update_device(struct device *dev) /* Read all registers */ for (i = 0; i < data->registers; i++) { - int rv = ina2xx_read_word(client, i); + int rv = i2c_smbus_read_word_swapped(client, i); if (rv < 0) { ret = ERR_PTR(rv); goto abort; @@ -279,22 +263,26 @@ static int ina2xx_probe(struct i2c_client *client, switch (data->kind) { case ina219: /* device configuration */ - ina2xx_write_word(client, INA2XX_CONFIG, INA219_CONFIG_DEFAULT); + i2c_smbus_write_word_swapped(client, INA2XX_CONFIG, + INA219_CONFIG_DEFAULT); /* set current LSB to 1mA, shunt is in uOhms */ /* (equation 13 in datasheet) */ - ina2xx_write_word(client, INA2XX_CALIBRATION, 40960000 / shunt); + i2c_smbus_write_word_swapped(client, INA2XX_CALIBRATION, + 40960000 / shunt); dev_info(&client->dev, "power monitor INA219 (Rshunt = %li uOhm)\n", shunt); data->registers = INA219_REGISTERS; break; case ina226: /* device configuration */ - ina2xx_write_word(client, INA2XX_CONFIG, INA226_CONFIG_DEFAULT); + i2c_smbus_write_word_swapped(client, INA2XX_CONFIG, + INA226_CONFIG_DEFAULT); /* set current LSB to 1mA, shunt is in uOhms */ /* (equation 1 in datasheet)*/ - ina2xx_write_word(client, INA2XX_CALIBRATION, 5120000 / shunt); + i2c_smbus_write_word_swapped(client, INA2XX_CALIBRATION, + 5120000 / shunt); dev_info(&client->dev, "power monitor INA226 (Rshunt = %li uOhm)\n", shunt); data->registers = INA226_REGISTERS; diff --git a/trunk/drivers/hwmon/twl4030-madc-hwmon.c b/trunk/drivers/hwmon/twl4030-madc-hwmon.c index 0018c7dd0097..1a174f0a3cde 100644 --- a/trunk/drivers/hwmon/twl4030-madc-hwmon.c +++ b/trunk/drivers/hwmon/twl4030-madc-hwmon.c @@ -44,12 +44,13 @@ static ssize_t madc_read(struct device *dev, struct device_attribute *devattr, char *buf) { struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr); - struct twl4030_madc_request req; + struct twl4030_madc_request req = { + .channels = 1 << attr->index, + .method = TWL4030_MADC_SW2, + .type = TWL4030_MADC_WAIT, + }; long val; - req.channels = (1 << attr->index); - req.method = TWL4030_MADC_SW2; - req.func_cb = NULL; val = twl4030_madc_conversion(&req); if (val < 0) return val; diff --git a/trunk/drivers/hwmon/via-cputemp.c b/trunk/drivers/hwmon/via-cputemp.c index ee4ebc198a94..2e56c6ce9fb6 100644 --- a/trunk/drivers/hwmon/via-cputemp.c +++ b/trunk/drivers/hwmon/via-cputemp.c @@ -328,6 +328,7 @@ static int __init via_cputemp_init(void) if (err) goto exit; + get_online_cpus(); for_each_online_cpu(i) { struct cpuinfo_x86 *c = &cpu_data(i); @@ -347,12 +348,14 @@ static int __init via_cputemp_init(void) #ifndef CONFIG_HOTPLUG_CPU if (list_empty(&pdev_list)) { + put_online_cpus(); err = -ENODEV; goto exit_driver_unreg; } #endif register_hotcpu_notifier(&via_cputemp_cpu_notifier); + put_online_cpus(); return 0; #ifndef CONFIG_HOTPLUG_CPU @@ -367,6 +370,7 @@ static void __exit via_cputemp_exit(void) { struct pdev_entry *p, *n; + get_online_cpus(); unregister_hotcpu_notifier(&via_cputemp_cpu_notifier); mutex_lock(&pdev_list_mutex); list_for_each_entry_safe(p, n, &pdev_list, list) { @@ -375,6 +379,7 @@ static void __exit via_cputemp_exit(void) kfree(p); } mutex_unlock(&pdev_list_mutex); + put_online_cpus(); platform_driver_unregister(&via_cputemp_driver); } diff --git a/trunk/drivers/hwmon/w83627hf.c b/trunk/drivers/hwmon/w83627hf.c index ab4825205a9d..5b1a6a666441 100644 --- a/trunk/drivers/hwmon/w83627hf.c +++ b/trunk/drivers/hwmon/w83627hf.c @@ -1206,7 +1206,7 @@ static int __init w83627hf_find(int sioaddr, unsigned short *addr, int err = -ENODEV; u16 val; - static const __initdata char *names[] = { + static __initconst char *const names[] = { "W83627HF", "W83627THF", "W83697HF", diff --git a/trunk/drivers/hwspinlock/hwspinlock_core.c b/trunk/drivers/hwspinlock/hwspinlock_core.c index 1201a15784c3..db713c0dfba4 100644 --- a/trunk/drivers/hwspinlock/hwspinlock_core.c +++ b/trunk/drivers/hwspinlock/hwspinlock_core.c @@ -552,7 +552,7 @@ EXPORT_SYMBOL_GPL(hwspin_lock_request_specific); */ int hwspin_lock_free(struct hwspinlock *hwlock) { - struct device *dev = hwlock->bank->dev; + struct device *dev; struct hwspinlock *tmp; int ret; @@ -561,6 +561,7 @@ int hwspin_lock_free(struct hwspinlock *hwlock) return -EINVAL; } + dev = hwlock->bank->dev; mutex_lock(&hwspinlock_tree_lock); /* make sure the hwspinlock is used */ diff --git a/trunk/drivers/i2c/algos/i2c-algo-pca.c b/trunk/drivers/i2c/algos/i2c-algo-pca.c index 73133b1063f0..6f5f98d69af7 100644 --- a/trunk/drivers/i2c/algos/i2c-algo-pca.c +++ b/trunk/drivers/i2c/algos/i2c-algo-pca.c @@ -476,17 +476,17 @@ static int pca_init(struct i2c_adapter *adap) /* To avoid integer overflow, use clock/100 for calculations */ clock = pca_clock(pca_data) / 100; - if (pca_data->i2c_clock > 10000) { + if (pca_data->i2c_clock > 1000000) { mode = I2C_PCA_MODE_TURBO; min_tlow = 14; min_thi = 5; raise_fall_time = 22; /* Raise 11e-8s, Fall 11e-8s */ - } else if (pca_data->i2c_clock > 4000) { + } else if (pca_data->i2c_clock > 400000) { mode = I2C_PCA_MODE_FASTP; min_tlow = 17; min_thi = 9; raise_fall_time = 22; /* Raise 11e-8s, Fall 11e-8s */ - } else if (pca_data->i2c_clock > 1000) { + } else if (pca_data->i2c_clock > 100000) { mode = I2C_PCA_MODE_FAST; min_tlow = 44; min_thi = 20; diff --git a/trunk/drivers/i2c/busses/Kconfig b/trunk/drivers/i2c/busses/Kconfig index b4aaa1bd6728..970a1612e795 100644 --- a/trunk/drivers/i2c/busses/Kconfig +++ b/trunk/drivers/i2c/busses/Kconfig @@ -104,6 +104,7 @@ config I2C_I801 DH89xxCC (PCH) Panther Point (PCH) Lynx Point (PCH) + Lynx Point-LP (PCH) This driver can also be built as a module. If so, the module will be called i2c-i801. @@ -354,9 +355,13 @@ config I2C_DAVINCI devices such as DaVinci NIC. For details please see http://www.ti.com/davinci +config I2C_DESIGNWARE_CORE + tristate + config I2C_DESIGNWARE_PLATFORM tristate "Synopsys DesignWare Platform" depends on HAVE_CLK + select I2C_DESIGNWARE_CORE help If you say yes to this option, support will be included for the Synopsys DesignWare I2C adapter. Only master mode is supported. @@ -367,6 +372,7 @@ config I2C_DESIGNWARE_PLATFORM config I2C_DESIGNWARE_PCI tristate "Synopsys DesignWare PCI" depends on PCI + select I2C_DESIGNWARE_CORE help If you say yes to this option, support will be included for the Synopsys DesignWare I2C adapter. Only master mode is supported. diff --git a/trunk/drivers/i2c/busses/Makefile b/trunk/drivers/i2c/busses/Makefile index ce3c2be7fb40..37c4182cc98b 100644 --- a/trunk/drivers/i2c/busses/Makefile +++ b/trunk/drivers/i2c/busses/Makefile @@ -33,10 +33,11 @@ obj-$(CONFIG_I2C_AU1550) += i2c-au1550.o obj-$(CONFIG_I2C_BLACKFIN_TWI) += i2c-bfin-twi.o obj-$(CONFIG_I2C_CPM) += i2c-cpm.o obj-$(CONFIG_I2C_DAVINCI) += i2c-davinci.o +obj-$(CONFIG_I2C_DESIGNWARE_CORE) += i2c-designware-core.o obj-$(CONFIG_I2C_DESIGNWARE_PLATFORM) += i2c-designware-platform.o -i2c-designware-platform-objs := i2c-designware-platdrv.o i2c-designware-core.o +i2c-designware-platform-objs := i2c-designware-platdrv.o obj-$(CONFIG_I2C_DESIGNWARE_PCI) += i2c-designware-pci.o -i2c-designware-pci-objs := i2c-designware-pcidrv.o i2c-designware-core.o +i2c-designware-pci-objs := i2c-designware-pcidrv.o obj-$(CONFIG_I2C_EG20T) += i2c-eg20t.o obj-$(CONFIG_I2C_GPIO) += i2c-gpio.o obj-$(CONFIG_I2C_HIGHLANDER) += i2c-highlander.o diff --git a/trunk/drivers/i2c/busses/i2c-designware-core.c b/trunk/drivers/i2c/busses/i2c-designware-core.c index 1e48bec80edf..7b8ebbefb581 100644 --- a/trunk/drivers/i2c/busses/i2c-designware-core.c +++ b/trunk/drivers/i2c/busses/i2c-designware-core.c @@ -25,6 +25,7 @@ * ---------------------------------------------------------------------------- * */ +#include #include #include #include @@ -316,6 +317,7 @@ int i2c_dw_init(struct dw_i2c_dev *dev) dw_writel(dev, dev->master_cfg , DW_IC_CON); return 0; } +EXPORT_SYMBOL_GPL(i2c_dw_init); /* * Waiting for bus not busy @@ -568,12 +570,14 @@ i2c_dw_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[], int num) return ret; } +EXPORT_SYMBOL_GPL(i2c_dw_xfer); u32 i2c_dw_func(struct i2c_adapter *adap) { struct dw_i2c_dev *dev = i2c_get_adapdata(adap); return dev->functionality; } +EXPORT_SYMBOL_GPL(i2c_dw_func); static u32 i2c_dw_read_clear_intrbits(struct dw_i2c_dev *dev) { @@ -678,17 +682,20 @@ irqreturn_t i2c_dw_isr(int this_irq, void *dev_id) return IRQ_HANDLED; } +EXPORT_SYMBOL_GPL(i2c_dw_isr); void i2c_dw_enable(struct dw_i2c_dev *dev) { /* Enable the adapter */ dw_writel(dev, 1, DW_IC_ENABLE); } +EXPORT_SYMBOL_GPL(i2c_dw_enable); u32 i2c_dw_is_enabled(struct dw_i2c_dev *dev) { return dw_readl(dev, DW_IC_ENABLE); } +EXPORT_SYMBOL_GPL(i2c_dw_is_enabled); void i2c_dw_disable(struct dw_i2c_dev *dev) { @@ -699,18 +706,22 @@ void i2c_dw_disable(struct dw_i2c_dev *dev) dw_writel(dev, 0, DW_IC_INTR_MASK); dw_readl(dev, DW_IC_CLR_INTR); } +EXPORT_SYMBOL_GPL(i2c_dw_disable); void i2c_dw_clear_int(struct dw_i2c_dev *dev) { dw_readl(dev, DW_IC_CLR_INTR); } +EXPORT_SYMBOL_GPL(i2c_dw_clear_int); void i2c_dw_disable_int(struct dw_i2c_dev *dev) { dw_writel(dev, 0, DW_IC_INTR_MASK); } +EXPORT_SYMBOL_GPL(i2c_dw_disable_int); u32 i2c_dw_read_comp_param(struct dw_i2c_dev *dev) { return dw_readl(dev, DW_IC_COMP_PARAM_1); } +EXPORT_SYMBOL_GPL(i2c_dw_read_comp_param); diff --git a/trunk/drivers/i2c/busses/i2c-diolan-u2c.c b/trunk/drivers/i2c/busses/i2c-diolan-u2c.c index aedb94f34bf7..dae3ddfe7619 100644 --- a/trunk/drivers/i2c/busses/i2c-diolan-u2c.c +++ b/trunk/drivers/i2c/busses/i2c-diolan-u2c.c @@ -405,6 +405,7 @@ static int diolan_usb_xfer(struct i2c_adapter *adapter, struct i2c_msg *msgs, } } } + ret = num; abort: sret = diolan_i2c_stop(dev); if (sret < 0 && ret >= 0) diff --git a/trunk/drivers/i2c/busses/i2c-i801.c b/trunk/drivers/i2c/busses/i2c-i801.c index 898dcf9c7ade..33e9b0c09af2 100644 --- a/trunk/drivers/i2c/busses/i2c-i801.c +++ b/trunk/drivers/i2c/busses/i2c-i801.c @@ -52,6 +52,7 @@ DH89xxCC (PCH) 0x2330 32 hard yes yes yes Panther Point (PCH) 0x1e22 32 hard yes yes yes Lynx Point (PCH) 0x8c22 32 hard yes yes yes + Lynx Point-LP (PCH) 0x9c22 32 hard yes yes yes Features supported by this driver: Software PEC no @@ -155,6 +156,7 @@ #define PCI_DEVICE_ID_INTEL_DH89XXCC_SMBUS 0x2330 #define PCI_DEVICE_ID_INTEL_5_3400_SERIES_SMBUS 0x3b30 #define PCI_DEVICE_ID_INTEL_LYNXPOINT_SMBUS 0x8c22 +#define PCI_DEVICE_ID_INTEL_LYNXPOINT_LP_SMBUS 0x9c22 struct i801_priv { struct i2c_adapter adapter; @@ -771,6 +773,7 @@ static DEFINE_PCI_DEVICE_TABLE(i801_ids) = { { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_DH89XXCC_SMBUS) }, { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PANTHERPOINT_SMBUS) }, { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_LYNXPOINT_SMBUS) }, + { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_LYNXPOINT_LP_SMBUS) }, { 0, } }; diff --git a/trunk/drivers/i2c/busses/i2c-mxs.c b/trunk/drivers/i2c/busses/i2c-mxs.c index 088c5c1ed17d..51f05b8520ed 100644 --- a/trunk/drivers/i2c/busses/i2c-mxs.c +++ b/trunk/drivers/i2c/busses/i2c-mxs.c @@ -365,10 +365,6 @@ static int mxs_i2c_get_ofdata(struct mxs_i2c_dev *i2c) struct device_node *node = dev->of_node; int ret; - if (!node) - return -EINVAL; - - i2c->speed = &mxs_i2c_95kHz_config; ret = of_property_read_u32(node, "clock-frequency", &speed); if (ret) dev_warn(dev, "No I2C speed selected, using 100kHz\n"); @@ -419,10 +415,13 @@ static int __devinit mxs_i2c_probe(struct platform_device *pdev) return err; i2c->dev = dev; + i2c->speed = &mxs_i2c_95kHz_config; - err = mxs_i2c_get_ofdata(i2c); - if (err) - return err; + if (dev->of_node) { + err = mxs_i2c_get_ofdata(i2c); + if (err) + return err; + } platform_set_drvdata(pdev, i2c); diff --git a/trunk/drivers/i2c/busses/i2c-nomadik.c b/trunk/drivers/i2c/busses/i2c-nomadik.c index 5e6f1eed4f83..61b00edacb08 100644 --- a/trunk/drivers/i2c/busses/i2c-nomadik.c +++ b/trunk/drivers/i2c/busses/i2c-nomadik.c @@ -350,10 +350,6 @@ static void setup_i2c_controller(struct nmk_i2c_dev *dev) i2c_clk = clk_get_rate(dev->clk); - /* fallback to std. mode if machine has not provided it */ - if (dev->cfg.clk_freq == 0) - dev->cfg.clk_freq = 100000; - /* * The spec says, in case of std. mode the divider is * 2 whereas it is 3 for fast and fastplus mode of @@ -911,20 +907,32 @@ static const struct i2c_algorithm nmk_i2c_algo = { .functionality = nmk_i2c_functionality }; +static struct nmk_i2c_controller u8500_i2c = { + /* + * Slave data setup time; 250ns, 100ns, and 10ns, which + * is 14, 6 and 2 respectively for a 48Mhz i2c clock. + */ + .slsu = 0xe, + .tft = 1, /* Tx FIFO threshold */ + .rft = 8, /* Rx FIFO threshold */ + .clk_freq = 400000, /* fast mode operation */ + .timeout = 200, /* Slave response timeout(ms) */ + .sm = I2C_FREQ_MODE_FAST, +}; + static atomic_t adapter_id = ATOMIC_INIT(0); static int nmk_i2c_probe(struct amba_device *adev, const struct amba_id *id) { int ret = 0; - struct nmk_i2c_controller *pdata = - adev->dev.platform_data; + struct nmk_i2c_controller *pdata = adev->dev.platform_data; struct nmk_i2c_dev *dev; struct i2c_adapter *adap; - if (!pdata) { - dev_warn(&adev->dev, "no platform data\n"); - return -ENODEV; - } + if (!pdata) + /* No i2c configuration found, using the default. */ + pdata = &u8500_i2c; + dev = kzalloc(sizeof(struct nmk_i2c_dev), GFP_KERNEL); if (!dev) { dev_err(&adev->dev, "cannot allocate memory\n"); diff --git a/trunk/drivers/i2c/busses/i2c-omap.c b/trunk/drivers/i2c/busses/i2c-omap.c index 6849635b268a..5d19a49803c1 100644 --- a/trunk/drivers/i2c/busses/i2c-omap.c +++ b/trunk/drivers/i2c/busses/i2c-omap.c @@ -584,7 +584,7 @@ omap_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[], int num) r = pm_runtime_get_sync(dev->dev); if (IS_ERR_VALUE(r)) - return r; + goto out; r = omap_i2c_wait_for_bb(dev); if (r < 0) diff --git a/trunk/drivers/i2c/busses/i2c-pnx.c b/trunk/drivers/i2c/busses/i2c-pnx.c index 5d54416770b0..8488bddfe465 100644 --- a/trunk/drivers/i2c/busses/i2c-pnx.c +++ b/trunk/drivers/i2c/busses/i2c-pnx.c @@ -48,8 +48,9 @@ enum { mcntrl_afie = 0x00000002, mcntrl_naie = 0x00000004, mcntrl_drmie = 0x00000008, - mcntrl_daie = 0x00000020, - mcntrl_rffie = 0x00000040, + mcntrl_drsie = 0x00000010, + mcntrl_rffie = 0x00000020, + mcntrl_daie = 0x00000040, mcntrl_tffie = 0x00000080, mcntrl_reset = 0x00000100, mcntrl_cdbmode = 0x00000400, @@ -290,31 +291,37 @@ static int i2c_pnx_master_rcv(struct i2c_pnx_algo_data *alg_data) * or we didn't 'ask' for it yet. */ if (ioread32(I2C_REG_STS(alg_data)) & mstatus_rfe) { - dev_dbg(&alg_data->adapter.dev, - "%s(): Write dummy data to fill Rx-fifo...\n", - __func__); + /* 'Asking' is done asynchronously, e.g. dummy TX of several + * bytes is done before the first actual RX arrives in FIFO. + * Therefore, ordered bytes (via TX) are counted separately. + */ + if (alg_data->mif.order) { + dev_dbg(&alg_data->adapter.dev, + "%s(): Write dummy data to fill Rx-fifo...\n", + __func__); - if (alg_data->mif.len == 1) { - /* Last byte, do not acknowledge next rcv. */ - val |= stop_bit; + if (alg_data->mif.order == 1) { + /* Last byte, do not acknowledge next rcv. */ + val |= stop_bit; + + /* + * Enable interrupt RFDAIE (data in Rx fifo), + * and disable DRMIE (need data for Tx) + */ + ctl = ioread32(I2C_REG_CTL(alg_data)); + ctl |= mcntrl_rffie | mcntrl_daie; + ctl &= ~mcntrl_drmie; + iowrite32(ctl, I2C_REG_CTL(alg_data)); + } /* - * Enable interrupt RFDAIE (data in Rx fifo), - * and disable DRMIE (need data for Tx) + * Now we'll 'ask' for data: + * For each byte we want to receive, we must + * write a (dummy) byte to the Tx-FIFO. */ - ctl = ioread32(I2C_REG_CTL(alg_data)); - ctl |= mcntrl_rffie | mcntrl_daie; - ctl &= ~mcntrl_drmie; - iowrite32(ctl, I2C_REG_CTL(alg_data)); + iowrite32(val, I2C_REG_TX(alg_data)); + alg_data->mif.order--; } - - /* - * Now we'll 'ask' for data: - * For each byte we want to receive, we must - * write a (dummy) byte to the Tx-FIFO. - */ - iowrite32(val, I2C_REG_TX(alg_data)); - return 0; } @@ -514,6 +521,7 @@ i2c_pnx_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num) alg_data->mif.buf = pmsg->buf; alg_data->mif.len = pmsg->len; + alg_data->mif.order = pmsg->len; alg_data->mif.mode = (pmsg->flags & I2C_M_RD) ? I2C_SMBUS_READ : I2C_SMBUS_WRITE; alg_data->mif.ret = 0; @@ -566,6 +574,7 @@ i2c_pnx_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num) /* Cleanup to be sure... */ alg_data->mif.buf = NULL; alg_data->mif.len = 0; + alg_data->mif.order = 0; dev_dbg(&alg_data->adapter.dev, "%s(): exiting, stat = %x\n", __func__, ioread32(I2C_REG_STS(alg_data))); diff --git a/trunk/drivers/i2c/busses/i2c-tegra.c b/trunk/drivers/i2c/busses/i2c-tegra.c index 66eb53fac202..9a08c57bc936 100644 --- a/trunk/drivers/i2c/busses/i2c-tegra.c +++ b/trunk/drivers/i2c/busses/i2c-tegra.c @@ -712,7 +712,7 @@ static int __devexit tegra_i2c_remove(struct platform_device *pdev) return 0; } -#ifdef CONFIG_PM +#ifdef CONFIG_PM_SLEEP static int tegra_i2c_suspend(struct device *dev) { struct tegra_i2c_dev *i2c_dev = dev_get_drvdata(dev); diff --git a/trunk/drivers/i2c/i2c-core.c b/trunk/drivers/i2c/i2c-core.c index 2efa56c5ff2c..2091ae8f539a 100644 --- a/trunk/drivers/i2c/i2c-core.c +++ b/trunk/drivers/i2c/i2c-core.c @@ -636,6 +636,22 @@ static void i2c_adapter_dev_release(struct device *dev) complete(&adap->dev_released); } +/* + * This function is only needed for mutex_lock_nested, so it is never + * called unless locking correctness checking is enabled. Thus we + * make it inline to avoid a compiler warning. That's what gcc ends up + * doing anyway. + */ +static inline unsigned int i2c_adapter_depth(struct i2c_adapter *adapter) +{ + unsigned int depth = 0; + + while ((adapter = i2c_parent_is_i2c_adapter(adapter))) + depth++; + + return depth; +} + /* * Let users instantiate I2C devices through sysfs. This can be used when * platform initialization code doesn't contain the proper data for @@ -726,7 +742,8 @@ i2c_sysfs_delete_device(struct device *dev, struct device_attribute *attr, /* Make sure the device was added through sysfs */ res = -ENOENT; - mutex_lock(&adap->userspace_clients_lock); + mutex_lock_nested(&adap->userspace_clients_lock, + i2c_adapter_depth(adap)); list_for_each_entry_safe(client, next, &adap->userspace_clients, detected) { if (client->addr == addr) { @@ -1073,7 +1090,8 @@ int i2c_del_adapter(struct i2c_adapter *adap) return res; /* Remove devices instantiated from sysfs */ - mutex_lock(&adap->userspace_clients_lock); + mutex_lock_nested(&adap->userspace_clients_lock, + i2c_adapter_depth(adap)); list_for_each_entry_safe(client, next, &adap->userspace_clients, detected) { dev_dbg(&adap->dev, "Removing %s at 0x%x\n", client->name, diff --git a/trunk/drivers/ide/ide-pm.c b/trunk/drivers/ide/ide-pm.c index 92406097efeb..8d1e32d7cd97 100644 --- a/trunk/drivers/ide/ide-pm.c +++ b/trunk/drivers/ide/ide-pm.c @@ -4,7 +4,7 @@ int generic_ide_suspend(struct device *dev, pm_message_t mesg) { - ide_drive_t *drive = dev_get_drvdata(dev); + ide_drive_t *drive = to_ide_device(dev); ide_drive_t *pair = ide_get_pair_dev(drive); ide_hwif_t *hwif = drive->hwif; struct request *rq; @@ -40,7 +40,7 @@ int generic_ide_suspend(struct device *dev, pm_message_t mesg) int generic_ide_resume(struct device *dev) { - ide_drive_t *drive = dev_get_drvdata(dev); + ide_drive_t *drive = to_ide_device(dev); ide_drive_t *pair = ide_get_pair_dev(drive); ide_hwif_t *hwif = drive->hwif; struct request *rq; diff --git a/trunk/drivers/idle/intel_idle.c b/trunk/drivers/idle/intel_idle.c index f559088869f6..e8726177d103 100644 --- a/trunk/drivers/idle/intel_idle.c +++ b/trunk/drivers/idle/intel_idle.c @@ -606,8 +606,9 @@ static int __init intel_idle_init(void) intel_idle_cpuidle_driver_init(); retval = cpuidle_register_driver(&intel_idle_driver); if (retval) { + struct cpuidle_driver *drv = cpuidle_get_driver(); printk(KERN_DEBUG PREFIX "intel_idle yielding to %s", - cpuidle_get_driver()->name); + drv ? drv->name : "none"); return retval; } diff --git a/trunk/drivers/iio/adc/at91_adc.c b/trunk/drivers/iio/adc/at91_adc.c index f61780a02374..3bd5540238a7 100644 --- a/trunk/drivers/iio/adc/at91_adc.c +++ b/trunk/drivers/iio/adc/at91_adc.c @@ -617,7 +617,7 @@ static int __devinit at91_adc_probe(struct platform_device *pdev) st->adc_clk = clk_get(&pdev->dev, "adc_op_clk"); if (IS_ERR(st->adc_clk)) { dev_err(&pdev->dev, "Failed to get the ADC clock.\n"); - ret = PTR_ERR(st->clk); + ret = PTR_ERR(st->adc_clk); goto error_disable_clk; } diff --git a/trunk/drivers/iio/frequency/adf4350.c b/trunk/drivers/iio/frequency/adf4350.c index 59fbb3ae40e7..e35bb8f6fe75 100644 --- a/trunk/drivers/iio/frequency/adf4350.c +++ b/trunk/drivers/iio/frequency/adf4350.c @@ -129,7 +129,7 @@ static int adf4350_set_freq(struct adf4350_state *st, unsigned long long freq) { struct adf4350_platform_data *pdata = st->pdata; u64 tmp; - u32 div_gcd, prescaler; + u32 div_gcd, prescaler, chspc; u16 mdiv, r_cnt = 0; u8 band_sel_div; @@ -158,14 +158,20 @@ static int adf4350_set_freq(struct adf4350_state *st, unsigned long long freq) if (pdata->ref_div_factor) r_cnt = pdata->ref_div_factor - 1; - do { - r_cnt = adf4350_tune_r_cnt(st, r_cnt); + chspc = st->chspc; - st->r1_mod = st->fpfd / st->chspc; - while (st->r1_mod > ADF4350_MAX_MODULUS) { - r_cnt = adf4350_tune_r_cnt(st, r_cnt); - st->r1_mod = st->fpfd / st->chspc; - } + do { + do { + do { + r_cnt = adf4350_tune_r_cnt(st, r_cnt); + st->r1_mod = st->fpfd / chspc; + if (r_cnt > ADF4350_MAX_R_CNT) { + /* try higher spacing values */ + chspc++; + r_cnt = 0; + } + } while ((st->r1_mod > ADF4350_MAX_MODULUS) && r_cnt); + } while (r_cnt == 0); tmp = freq * (u64)st->r1_mod + (st->fpfd > 1); do_div(tmp, st->fpfd); /* Div round closest (n + d/2)/d */ @@ -194,7 +200,7 @@ static int adf4350_set_freq(struct adf4350_state *st, unsigned long long freq) st->regs[ADF4350_REG0] = ADF4350_REG0_INT(st->r0_int) | ADF4350_REG0_FRACT(st->r0_fract); - st->regs[ADF4350_REG1] = ADF4350_REG1_PHASE(0) | + st->regs[ADF4350_REG1] = ADF4350_REG1_PHASE(1) | ADF4350_REG1_MOD(st->r1_mod) | prescaler; diff --git a/trunk/drivers/iio/light/adjd_s311.c b/trunk/drivers/iio/light/adjd_s311.c index 1cbb449b319a..9a99f43094f0 100644 --- a/trunk/drivers/iio/light/adjd_s311.c +++ b/trunk/drivers/iio/light/adjd_s311.c @@ -271,9 +271,10 @@ static int adjd_s311_update_scan_mode(struct iio_dev *indio_dev, const unsigned long *scan_mask) { struct adjd_s311_data *data = iio_priv(indio_dev); - data->buffer = krealloc(data->buffer, indio_dev->scan_bytes, - GFP_KERNEL); - if (!data->buffer) + + kfree(data->buffer); + data->buffer = kmalloc(indio_dev->scan_bytes, GFP_KERNEL); + if (data->buffer == NULL) return -ENOMEM; return 0; diff --git a/trunk/drivers/iio/light/lm3533-als.c b/trunk/drivers/iio/light/lm3533-als.c index c3e7bac13123..e45712a921ce 100644 --- a/trunk/drivers/iio/light/lm3533-als.c +++ b/trunk/drivers/iio/light/lm3533-als.c @@ -404,7 +404,7 @@ static int lm3533_als_get_hysteresis(struct iio_dev *indio_dev, unsigned nr, return ret; } -static int show_thresh_either_en(struct device *dev, +static ssize_t show_thresh_either_en(struct device *dev, struct device_attribute *attr, char *buf) { @@ -424,7 +424,7 @@ static int show_thresh_either_en(struct device *dev, return scnprintf(buf, PAGE_SIZE, "%u\n", enable); } -static int store_thresh_either_en(struct device *dev, +static ssize_t store_thresh_either_en(struct device *dev, struct device_attribute *attr, const char *buf, size_t len) { diff --git a/trunk/drivers/infiniband/core/ucma.c b/trunk/drivers/infiniband/core/ucma.c index 6bf850422895..055ed59838dc 100644 --- a/trunk/drivers/infiniband/core/ucma.c +++ b/trunk/drivers/infiniband/core/ucma.c @@ -267,6 +267,7 @@ static int ucma_event_handler(struct rdma_cm_id *cm_id, if (!uevent) return event->event == RDMA_CM_EVENT_CONNECT_REQUEST; + mutex_lock(&ctx->file->mut); uevent->cm_id = cm_id; ucma_set_event_context(ctx, event, uevent); uevent->resp.event = event->event; @@ -277,7 +278,6 @@ static int ucma_event_handler(struct rdma_cm_id *cm_id, ucma_copy_conn_event(&uevent->resp.param.conn, &event->param.conn); - mutex_lock(&ctx->file->mut); if (event->event == RDMA_CM_EVENT_CONNECT_REQUEST) { if (!ctx->backlog) { ret = -ENOMEM; diff --git a/trunk/drivers/infiniband/hw/amso1100/c2_rnic.c b/trunk/drivers/infiniband/hw/amso1100/c2_rnic.c index 8c81992fa6db..e4a73158fc7f 100644 --- a/trunk/drivers/infiniband/hw/amso1100/c2_rnic.c +++ b/trunk/drivers/infiniband/hw/amso1100/c2_rnic.c @@ -439,7 +439,7 @@ static int c2_rnic_close(struct c2_dev *c2dev) /* * Called by c2_probe to initialize the RNIC. This principally - * involves initalizing the various limits and resouce pools that + * involves initializing the various limits and resource pools that * comprise the RNIC instance. */ int __devinit c2_rnic_init(struct c2_dev *c2dev) diff --git a/trunk/drivers/infiniband/hw/cxgb3/iwch_cm.c b/trunk/drivers/infiniband/hw/cxgb3/iwch_cm.c index 77b6b182778a..aaf88ef9409c 100644 --- a/trunk/drivers/infiniband/hw/cxgb3/iwch_cm.c +++ b/trunk/drivers/infiniband/hw/cxgb3/iwch_cm.c @@ -1680,7 +1680,7 @@ static int close_con_rpl(struct t3cdev *tdev, struct sk_buff *skb, void *ctx) * T3A does 3 things when a TERM is received: * 1) send up a CPL_RDMA_TERMINATE message with the TERM packet * 2) generate an async event on the QP with the TERMINATE opcode - * 3) post a TERMINATE opcde cqe into the associated CQ. + * 3) post a TERMINATE opcode cqe into the associated CQ. * * For (1), we save the message in the qp for later consumer consumption. * For (2), we move the QP into TERMINATE, post a QP event and disconnect. diff --git a/trunk/drivers/infiniband/hw/cxgb4/cm.c b/trunk/drivers/infiniband/hw/cxgb4/cm.c index 51f42061dae9..6cfd4d8fd0bd 100644 --- a/trunk/drivers/infiniband/hw/cxgb4/cm.c +++ b/trunk/drivers/infiniband/hw/cxgb4/cm.c @@ -1361,11 +1361,11 @@ static int abort_rpl(struct c4iw_dev *dev, struct sk_buff *skb) struct tid_info *t = dev->rdev.lldi.tids; ep = lookup_tid(t, tid); - PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid); if (!ep) { printk(KERN_WARNING MOD "Abort rpl to freed endpoint\n"); return 0; } + PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid); mutex_lock(&ep->com.mutex); switch (ep->com.state) { case ABORTING: diff --git a/trunk/drivers/infiniband/hw/ehca/ehca_irq.c b/trunk/drivers/infiniband/hw/ehca/ehca_irq.c index 53589000fd07..8615d7cf7e01 100644 --- a/trunk/drivers/infiniband/hw/ehca/ehca_irq.c +++ b/trunk/drivers/infiniband/hw/ehca/ehca_irq.c @@ -42,6 +42,7 @@ */ #include +#include #include "ehca_classes.h" #include "ehca_irq.h" @@ -652,7 +653,7 @@ void ehca_tasklet_eq(unsigned long data) ehca_process_eq((struct ehca_shca*)data, 1); } -static inline int find_next_online_cpu(struct ehca_comp_pool *pool) +static int find_next_online_cpu(struct ehca_comp_pool *pool) { int cpu; unsigned long flags; @@ -662,17 +663,20 @@ static inline int find_next_online_cpu(struct ehca_comp_pool *pool) ehca_dmp(cpu_online_mask, cpumask_size(), ""); spin_lock_irqsave(&pool->last_cpu_lock, flags); - cpu = cpumask_next(pool->last_cpu, cpu_online_mask); - if (cpu >= nr_cpu_ids) - cpu = cpumask_first(cpu_online_mask); - pool->last_cpu = cpu; + do { + cpu = cpumask_next(pool->last_cpu, cpu_online_mask); + if (cpu >= nr_cpu_ids) + cpu = cpumask_first(cpu_online_mask); + pool->last_cpu = cpu; + } while (!per_cpu_ptr(pool->cpu_comp_tasks, cpu)->active); spin_unlock_irqrestore(&pool->last_cpu_lock, flags); return cpu; } static void __queue_comp_task(struct ehca_cq *__cq, - struct ehca_cpu_comp_task *cct) + struct ehca_cpu_comp_task *cct, + struct task_struct *thread) { unsigned long flags; @@ -683,7 +687,7 @@ static void __queue_comp_task(struct ehca_cq *__cq, __cq->nr_callbacks++; list_add_tail(&__cq->entry, &cct->cq_list); cct->cq_jobs++; - wake_up(&cct->wait_queue); + wake_up_process(thread); } else __cq->nr_callbacks++; @@ -695,6 +699,7 @@ static void queue_comp_task(struct ehca_cq *__cq) { int cpu_id; struct ehca_cpu_comp_task *cct; + struct task_struct *thread; int cq_jobs; unsigned long flags; @@ -702,7 +707,8 @@ static void queue_comp_task(struct ehca_cq *__cq) BUG_ON(!cpu_online(cpu_id)); cct = per_cpu_ptr(pool->cpu_comp_tasks, cpu_id); - BUG_ON(!cct); + thread = *per_cpu_ptr(pool->cpu_comp_threads, cpu_id); + BUG_ON(!cct || !thread); spin_lock_irqsave(&cct->task_lock, flags); cq_jobs = cct->cq_jobs; @@ -710,28 +716,25 @@ static void queue_comp_task(struct ehca_cq *__cq) if (cq_jobs > 0) { cpu_id = find_next_online_cpu(pool); cct = per_cpu_ptr(pool->cpu_comp_tasks, cpu_id); - BUG_ON(!cct); + thread = *per_cpu_ptr(pool->cpu_comp_threads, cpu_id); + BUG_ON(!cct || !thread); } - - __queue_comp_task(__cq, cct); + __queue_comp_task(__cq, cct, thread); } static void run_comp_task(struct ehca_cpu_comp_task *cct) { struct ehca_cq *cq; - unsigned long flags; - - spin_lock_irqsave(&cct->task_lock, flags); while (!list_empty(&cct->cq_list)) { cq = list_entry(cct->cq_list.next, struct ehca_cq, entry); - spin_unlock_irqrestore(&cct->task_lock, flags); + spin_unlock_irq(&cct->task_lock); comp_event_callback(cq); if (atomic_dec_and_test(&cq->nr_events)) wake_up(&cq->wait_completion); - spin_lock_irqsave(&cct->task_lock, flags); + spin_lock_irq(&cct->task_lock); spin_lock(&cq->task_lock); cq->nr_callbacks--; if (!cq->nr_callbacks) { @@ -740,159 +743,76 @@ static void run_comp_task(struct ehca_cpu_comp_task *cct) } spin_unlock(&cq->task_lock); } - - spin_unlock_irqrestore(&cct->task_lock, flags); } -static int comp_task(void *__cct) +static void comp_task_park(unsigned int cpu) { - struct ehca_cpu_comp_task *cct = __cct; - int cql_empty; - DECLARE_WAITQUEUE(wait, current); - - set_current_state(TASK_INTERRUPTIBLE); - while (!kthread_should_stop()) { - add_wait_queue(&cct->wait_queue, &wait); - - spin_lock_irq(&cct->task_lock); - cql_empty = list_empty(&cct->cq_list); - spin_unlock_irq(&cct->task_lock); - if (cql_empty) - schedule(); - else - __set_current_state(TASK_RUNNING); - - remove_wait_queue(&cct->wait_queue, &wait); + struct ehca_cpu_comp_task *cct = per_cpu_ptr(pool->cpu_comp_tasks, cpu); + struct ehca_cpu_comp_task *target; + struct task_struct *thread; + struct ehca_cq *cq, *tmp; + LIST_HEAD(list); - spin_lock_irq(&cct->task_lock); - cql_empty = list_empty(&cct->cq_list); - spin_unlock_irq(&cct->task_lock); - if (!cql_empty) - run_comp_task(__cct); + spin_lock_irq(&cct->task_lock); + cct->cq_jobs = 0; + cct->active = 0; + list_splice_init(&cct->cq_list, &list); + spin_unlock_irq(&cct->task_lock); - set_current_state(TASK_INTERRUPTIBLE); + cpu = find_next_online_cpu(pool); + target = per_cpu_ptr(pool->cpu_comp_tasks, cpu); + thread = *per_cpu_ptr(pool->cpu_comp_threads, cpu); + spin_lock_irq(&target->task_lock); + list_for_each_entry_safe(cq, tmp, &list, entry) { + list_del(&cq->entry); + __queue_comp_task(cq, target, thread); } - __set_current_state(TASK_RUNNING); - - return 0; -} - -static struct task_struct *create_comp_task(struct ehca_comp_pool *pool, - int cpu) -{ - struct ehca_cpu_comp_task *cct; - - cct = per_cpu_ptr(pool->cpu_comp_tasks, cpu); - spin_lock_init(&cct->task_lock); - INIT_LIST_HEAD(&cct->cq_list); - init_waitqueue_head(&cct->wait_queue); - cct->task = kthread_create_on_node(comp_task, cct, cpu_to_node(cpu), - "ehca_comp/%d", cpu); - - return cct->task; + spin_unlock_irq(&target->task_lock); } -static void destroy_comp_task(struct ehca_comp_pool *pool, - int cpu) +static void comp_task_stop(unsigned int cpu, bool online) { - struct ehca_cpu_comp_task *cct; - struct task_struct *task; - unsigned long flags_cct; - - cct = per_cpu_ptr(pool->cpu_comp_tasks, cpu); - - spin_lock_irqsave(&cct->task_lock, flags_cct); + struct ehca_cpu_comp_task *cct = per_cpu_ptr(pool->cpu_comp_tasks, cpu); - task = cct->task; - cct->task = NULL; + spin_lock_irq(&cct->task_lock); cct->cq_jobs = 0; - - spin_unlock_irqrestore(&cct->task_lock, flags_cct); - - if (task) - kthread_stop(task); + cct->active = 0; + WARN_ON(!list_empty(&cct->cq_list)); + spin_unlock_irq(&cct->task_lock); } -static void __cpuinit take_over_work(struct ehca_comp_pool *pool, int cpu) +static int comp_task_should_run(unsigned int cpu) { struct ehca_cpu_comp_task *cct = per_cpu_ptr(pool->cpu_comp_tasks, cpu); - LIST_HEAD(list); - struct ehca_cq *cq; - unsigned long flags_cct; - - spin_lock_irqsave(&cct->task_lock, flags_cct); - - list_splice_init(&cct->cq_list, &list); - - while (!list_empty(&list)) { - cq = list_entry(cct->cq_list.next, struct ehca_cq, entry); - - list_del(&cq->entry); - __queue_comp_task(cq, this_cpu_ptr(pool->cpu_comp_tasks)); - } - - spin_unlock_irqrestore(&cct->task_lock, flags_cct); + return cct->cq_jobs; } -static int __cpuinit comp_pool_callback(struct notifier_block *nfb, - unsigned long action, - void *hcpu) +static void comp_task(unsigned int cpu) { - unsigned int cpu = (unsigned long)hcpu; - struct ehca_cpu_comp_task *cct; + struct ehca_cpu_comp_task *cct = this_cpu_ptr(pool->cpu_comp_tasks); + int cql_empty; - switch (action) { - case CPU_UP_PREPARE: - case CPU_UP_PREPARE_FROZEN: - ehca_gen_dbg("CPU: %x (CPU_PREPARE)", cpu); - if (!create_comp_task(pool, cpu)) { - ehca_gen_err("Can't create comp_task for cpu: %x", cpu); - return notifier_from_errno(-ENOMEM); - } - break; - case CPU_UP_CANCELED: - case CPU_UP_CANCELED_FROZEN: - ehca_gen_dbg("CPU: %x (CPU_CANCELED)", cpu); - cct = per_cpu_ptr(pool->cpu_comp_tasks, cpu); - kthread_bind(cct->task, cpumask_any(cpu_online_mask)); - destroy_comp_task(pool, cpu); - break; - case CPU_ONLINE: - case CPU_ONLINE_FROZEN: - ehca_gen_dbg("CPU: %x (CPU_ONLINE)", cpu); - cct = per_cpu_ptr(pool->cpu_comp_tasks, cpu); - kthread_bind(cct->task, cpu); - wake_up_process(cct->task); - break; - case CPU_DOWN_PREPARE: - case CPU_DOWN_PREPARE_FROZEN: - ehca_gen_dbg("CPU: %x (CPU_DOWN_PREPARE)", cpu); - break; - case CPU_DOWN_FAILED: - case CPU_DOWN_FAILED_FROZEN: - ehca_gen_dbg("CPU: %x (CPU_DOWN_FAILED)", cpu); - break; - case CPU_DEAD: - case CPU_DEAD_FROZEN: - ehca_gen_dbg("CPU: %x (CPU_DEAD)", cpu); - destroy_comp_task(pool, cpu); - take_over_work(pool, cpu); - break; + spin_lock_irq(&cct->task_lock); + cql_empty = list_empty(&cct->cq_list); + if (!cql_empty) { + __set_current_state(TASK_RUNNING); + run_comp_task(cct); } - - return NOTIFY_OK; + spin_unlock_irq(&cct->task_lock); } -static struct notifier_block comp_pool_callback_nb __cpuinitdata = { - .notifier_call = comp_pool_callback, - .priority = 0, +static struct smp_hotplug_thread comp_pool_threads = { + .thread_should_run = comp_task_should_run, + .thread_fn = comp_task, + .thread_comm = "ehca_comp/%u", + .cleanup = comp_task_stop, + .park = comp_task_park, }; int ehca_create_comp_pool(void) { - int cpu; - struct task_struct *task; + int cpu, ret = -ENOMEM; if (!ehca_scaling_code) return 0; @@ -905,38 +825,46 @@ int ehca_create_comp_pool(void) pool->last_cpu = cpumask_any(cpu_online_mask); pool->cpu_comp_tasks = alloc_percpu(struct ehca_cpu_comp_task); - if (pool->cpu_comp_tasks == NULL) { - kfree(pool); - return -EINVAL; - } + if (!pool->cpu_comp_tasks) + goto out_pool; - for_each_online_cpu(cpu) { - task = create_comp_task(pool, cpu); - if (task) { - kthread_bind(task, cpu); - wake_up_process(task); - } + pool->cpu_comp_threads = alloc_percpu(struct task_struct *); + if (!pool->cpu_comp_threads) + goto out_tasks; + + for_each_present_cpu(cpu) { + struct ehca_cpu_comp_task *cct; + + cct = per_cpu_ptr(pool->cpu_comp_tasks, cpu); + spin_lock_init(&cct->task_lock); + INIT_LIST_HEAD(&cct->cq_list); } - register_hotcpu_notifier(&comp_pool_callback_nb); + comp_pool_threads.store = pool->cpu_comp_threads; + ret = smpboot_register_percpu_thread(&comp_pool_threads); + if (ret) + goto out_threads; - printk(KERN_INFO "eHCA scaling code enabled\n"); + pr_info("eHCA scaling code enabled\n"); + return ret; - return 0; +out_threads: + free_percpu(pool->cpu_comp_threads); +out_tasks: + free_percpu(pool->cpu_comp_tasks); +out_pool: + kfree(pool); + return ret; } void ehca_destroy_comp_pool(void) { - int i; - if (!ehca_scaling_code) return; - unregister_hotcpu_notifier(&comp_pool_callback_nb); - - for_each_online_cpu(i) - destroy_comp_task(pool, i); + smpboot_unregister_percpu_thread(&comp_pool_threads); + free_percpu(pool->cpu_comp_threads); free_percpu(pool->cpu_comp_tasks); kfree(pool); } diff --git a/trunk/drivers/infiniband/hw/ehca/ehca_irq.h b/trunk/drivers/infiniband/hw/ehca/ehca_irq.h index 3346cb06cea6..5370199f08c7 100644 --- a/trunk/drivers/infiniband/hw/ehca/ehca_irq.h +++ b/trunk/drivers/infiniband/hw/ehca/ehca_irq.h @@ -58,15 +58,15 @@ void ehca_tasklet_eq(unsigned long data); void ehca_process_eq(struct ehca_shca *shca, int is_irq); struct ehca_cpu_comp_task { - wait_queue_head_t wait_queue; struct list_head cq_list; - struct task_struct *task; spinlock_t task_lock; int cq_jobs; + int active; }; struct ehca_comp_pool { - struct ehca_cpu_comp_task *cpu_comp_tasks; + struct ehca_cpu_comp_task __percpu *cpu_comp_tasks; + struct task_struct * __percpu *cpu_comp_threads; int last_cpu; spinlock_t last_cpu_lock; }; diff --git a/trunk/drivers/infiniband/hw/mlx4/mad.c b/trunk/drivers/infiniband/hw/mlx4/mad.c index c27141fef1ab..9c2ae7efd00f 100644 --- a/trunk/drivers/infiniband/hw/mlx4/mad.c +++ b/trunk/drivers/infiniband/hw/mlx4/mad.c @@ -125,6 +125,7 @@ static void update_sm_ah(struct mlx4_ib_dev *dev, u8 port_num, u16 lid, u8 sl) { struct ib_ah *new_ah; struct ib_ah_attr ah_attr; + unsigned long flags; if (!dev->send_agent[port_num - 1][0]) return; @@ -139,11 +140,11 @@ static void update_sm_ah(struct mlx4_ib_dev *dev, u8 port_num, u16 lid, u8 sl) if (IS_ERR(new_ah)) return; - spin_lock(&dev->sm_lock); + spin_lock_irqsave(&dev->sm_lock, flags); if (dev->sm_ah[port_num - 1]) ib_destroy_ah(dev->sm_ah[port_num - 1]); dev->sm_ah[port_num - 1] = new_ah; - spin_unlock(&dev->sm_lock); + spin_unlock_irqrestore(&dev->sm_lock, flags); } /* @@ -197,13 +198,15 @@ static void smp_snoop(struct ib_device *ibdev, u8 port_num, struct ib_mad *mad, static void node_desc_override(struct ib_device *dev, struct ib_mad *mad) { + unsigned long flags; + if ((mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_LID_ROUTED || mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) && mad->mad_hdr.method == IB_MGMT_METHOD_GET_RESP && mad->mad_hdr.attr_id == IB_SMP_ATTR_NODE_DESC) { - spin_lock(&to_mdev(dev)->sm_lock); + spin_lock_irqsave(&to_mdev(dev)->sm_lock, flags); memcpy(((struct ib_smp *) mad)->data, dev->node_desc, 64); - spin_unlock(&to_mdev(dev)->sm_lock); + spin_unlock_irqrestore(&to_mdev(dev)->sm_lock, flags); } } @@ -213,6 +216,7 @@ static void forward_trap(struct mlx4_ib_dev *dev, u8 port_num, struct ib_mad *ma struct ib_mad_send_buf *send_buf; struct ib_mad_agent *agent = dev->send_agent[port_num - 1][qpn]; int ret; + unsigned long flags; if (agent) { send_buf = ib_create_send_mad(agent, qpn, 0, 0, IB_MGMT_MAD_HDR, @@ -225,13 +229,13 @@ static void forward_trap(struct mlx4_ib_dev *dev, u8 port_num, struct ib_mad *ma * wrong following the IB spec strictly, but we know * it's OK for our devices). */ - spin_lock(&dev->sm_lock); + spin_lock_irqsave(&dev->sm_lock, flags); memcpy(send_buf->mad, mad, sizeof *mad); if ((send_buf->ah = dev->sm_ah[port_num - 1])) ret = ib_post_send_mad(send_buf, NULL); else ret = -EINVAL; - spin_unlock(&dev->sm_lock); + spin_unlock_irqrestore(&dev->sm_lock, flags); if (ret) ib_free_send_mad(send_buf); diff --git a/trunk/drivers/infiniband/hw/mlx4/main.c b/trunk/drivers/infiniband/hw/mlx4/main.c index fe2088cfa6ee..cc05579ebce7 100644 --- a/trunk/drivers/infiniband/hw/mlx4/main.c +++ b/trunk/drivers/infiniband/hw/mlx4/main.c @@ -423,6 +423,7 @@ static int mlx4_ib_modify_device(struct ib_device *ibdev, int mask, struct ib_device_modify *props) { struct mlx4_cmd_mailbox *mailbox; + unsigned long flags; if (mask & ~IB_DEVICE_MODIFY_NODE_DESC) return -EOPNOTSUPP; @@ -430,9 +431,9 @@ static int mlx4_ib_modify_device(struct ib_device *ibdev, int mask, if (!(mask & IB_DEVICE_MODIFY_NODE_DESC)) return 0; - spin_lock(&to_mdev(ibdev)->sm_lock); + spin_lock_irqsave(&to_mdev(ibdev)->sm_lock, flags); memcpy(ibdev->node_desc, props->node_desc, 64); - spin_unlock(&to_mdev(ibdev)->sm_lock); + spin_unlock_irqrestore(&to_mdev(ibdev)->sm_lock, flags); /* * If possible, pass node desc to FW, so it can generate diff --git a/trunk/drivers/infiniband/hw/mlx4/qp.c b/trunk/drivers/infiniband/hw/mlx4/qp.c index a6d8ea060ea8..f585eddef4b7 100644 --- a/trunk/drivers/infiniband/hw/mlx4/qp.c +++ b/trunk/drivers/infiniband/hw/mlx4/qp.c @@ -1407,6 +1407,7 @@ static int build_mlx_header(struct mlx4_ib_sqp *sqp, struct ib_send_wr *wr, struct mlx4_wqe_mlx_seg *mlx = wqe; struct mlx4_wqe_inline_seg *inl = wqe + sizeof *mlx; struct mlx4_ib_ah *ah = to_mah(wr->wr.ud.ah); + struct net_device *ndev; union ib_gid sgid; u16 pkey; int send_size; @@ -1483,7 +1484,10 @@ static int build_mlx_header(struct mlx4_ib_sqp *sqp, struct ib_send_wr *wr, memcpy(sqp->ud_header.eth.dmac_h, ah->av.eth.mac, 6); /* FIXME: cache smac value? */ - smac = to_mdev(sqp->qp.ibqp.device)->iboe.netdevs[sqp->qp.port - 1]->dev_addr; + ndev = to_mdev(sqp->qp.ibqp.device)->iboe.netdevs[sqp->qp.port - 1]; + if (!ndev) + return -ENODEV; + smac = ndev->dev_addr; memcpy(sqp->ud_header.eth.smac_h, smac, 6); if (!memcmp(sqp->ud_header.eth.smac_h, sqp->ud_header.eth.dmac_h, 6)) mlx->flags |= cpu_to_be32(MLX4_WQE_CTRL_FORCE_LOOPBACK); diff --git a/trunk/drivers/infiniband/hw/ocrdma/ocrdma_main.c b/trunk/drivers/infiniband/hw/ocrdma/ocrdma_main.c index 5a044526e4f4..c4e0131f1b57 100644 --- a/trunk/drivers/infiniband/hw/ocrdma/ocrdma_main.c +++ b/trunk/drivers/infiniband/hw/ocrdma/ocrdma_main.c @@ -161,7 +161,7 @@ static void ocrdma_add_default_sgid(struct ocrdma_dev *dev) ocrdma_get_guid(dev, &sgid->raw[8]); } -#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE) +#if IS_ENABLED(CONFIG_VLAN_8021Q) static void ocrdma_add_vlan_sgids(struct ocrdma_dev *dev) { struct net_device *netdev, *tmp; @@ -202,14 +202,13 @@ static int ocrdma_build_sgid_tbl(struct ocrdma_dev *dev) return 0; } -#if IS_ENABLED(CONFIG_IPV6) || IS_ENABLED(CONFIG_VLAN_8021Q) +#if IS_ENABLED(CONFIG_IPV6) static int ocrdma_inet6addr_event(struct notifier_block *notifier, unsigned long event, void *ptr) { struct inet6_ifaddr *ifa = (struct inet6_ifaddr *)ptr; - struct net_device *event_netdev = ifa->idev->dev; - struct net_device *netdev = NULL; + struct net_device *netdev = ifa->idev->dev; struct ib_event gid_event; struct ocrdma_dev *dev; bool found = false; @@ -217,11 +216,12 @@ static int ocrdma_inet6addr_event(struct notifier_block *notifier, bool is_vlan = false; u16 vid = 0; - netdev = vlan_dev_real_dev(event_netdev); - if (netdev != event_netdev) { - is_vlan = true; - vid = vlan_dev_vlan_id(event_netdev); + is_vlan = netdev->priv_flags & IFF_802_1Q_VLAN; + if (is_vlan) { + vid = vlan_dev_vlan_id(netdev); + netdev = vlan_dev_real_dev(netdev); } + rcu_read_lock(); list_for_each_entry_rcu(dev, &ocrdma_dev_list, entry) { if (dev->nic_info.netdev == netdev) { diff --git a/trunk/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c b/trunk/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c index cb5b7f7d4d38..b29a4246ef41 100644 --- a/trunk/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c +++ b/trunk/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c @@ -2219,7 +2219,6 @@ static bool ocrdma_poll_success_scqe(struct ocrdma_qp *qp, u32 wqe_idx; if (!qp->wqe_wr_id_tbl[tail].signaled) { - expand = true; /* CQE cannot be consumed yet */ *polled = false; /* WC cannot be consumed yet */ } else { ibwc->status = IB_WC_SUCCESS; @@ -2227,10 +2226,11 @@ static bool ocrdma_poll_success_scqe(struct ocrdma_qp *qp, ibwc->qp = &qp->ibqp; ocrdma_update_wc(qp, ibwc, tail); *polled = true; - wqe_idx = le32_to_cpu(cqe->wq.wqeidx) & OCRDMA_CQE_WQEIDX_MASK; - if (tail != wqe_idx) - expand = true; /* Coalesced CQE can't be consumed yet */ } + wqe_idx = le32_to_cpu(cqe->wq.wqeidx) & OCRDMA_CQE_WQEIDX_MASK; + if (tail != wqe_idx) + expand = true; /* Coalesced CQE can't be consumed yet */ + ocrdma_hwq_inc_tail(&qp->sq); return expand; } diff --git a/trunk/drivers/infiniband/hw/qib/qib_iba7322.c b/trunk/drivers/infiniband/hw/qib/qib_iba7322.c index 0d7280af99bc..3f6b21e9dc11 100644 --- a/trunk/drivers/infiniband/hw/qib/qib_iba7322.c +++ b/trunk/drivers/infiniband/hw/qib/qib_iba7322.c @@ -6346,8 +6346,10 @@ static int qib_init_7322_variables(struct qib_devdata *dd) dd->piobcnt4k * dd->align4k; dd->piovl15base = ioremap_nocache(vl15off, NUM_VL15_BUFS * dd->align4k); - if (!dd->piovl15base) + if (!dd->piovl15base) { + ret = -ENOMEM; goto bail; + } } qib_7322_set_baseaddrs(dd); /* set chip access pointers now */ diff --git a/trunk/drivers/infiniband/hw/qib/qib_mad.c b/trunk/drivers/infiniband/hw/qib/qib_mad.c index 19f1e6c45fb6..ccb119143d20 100644 --- a/trunk/drivers/infiniband/hw/qib/qib_mad.c +++ b/trunk/drivers/infiniband/hw/qib/qib_mad.c @@ -471,9 +471,10 @@ static int subn_get_portinfo(struct ib_smp *smp, struct ib_device *ibdev, if (port_num != port) { ibp = to_iport(ibdev, port_num); ret = check_mkey(ibp, smp, 0); - if (ret) + if (ret) { ret = IB_MAD_RESULT_FAILURE; goto bail; + } } } diff --git a/trunk/drivers/infiniband/hw/qib/qib_sd7220.c b/trunk/drivers/infiniband/hw/qib/qib_sd7220.c index a322d5171a2c..50a8a0d4fe67 100644 --- a/trunk/drivers/infiniband/hw/qib/qib_sd7220.c +++ b/trunk/drivers/infiniband/hw/qib/qib_sd7220.c @@ -372,7 +372,7 @@ static void qib_sd_trimdone_monitor(struct qib_devdata *dd, /* Read CTRL reg for each channel to check TRIMDONE */ if (baduns & (1 << chn)) { qib_dev_err(dd, - "Reseting TRIMDONE on chn %d (%s)\n", + "Resetting TRIMDONE on chn %d (%s)\n", chn, where); ret = qib_sd7220_reg_mod(dd, IB_7220_SERDES, IB_CTRL2(chn), 0x10, 0x10); diff --git a/trunk/drivers/infiniband/ulp/ipoib/ipoib.h b/trunk/drivers/infiniband/ulp/ipoib/ipoib.h index ca43901ed861..0af216d21f87 100644 --- a/trunk/drivers/infiniband/ulp/ipoib/ipoib.h +++ b/trunk/drivers/infiniband/ulp/ipoib/ipoib.h @@ -262,7 +262,10 @@ struct ipoib_ethtool_st { u16 max_coalesced_frames; }; +struct ipoib_neigh_table; + struct ipoib_neigh_hash { + struct ipoib_neigh_table *ntbl; struct ipoib_neigh __rcu **buckets; struct rcu_head rcu; u32 mask; @@ -271,9 +274,9 @@ struct ipoib_neigh_hash { struct ipoib_neigh_table { struct ipoib_neigh_hash __rcu *htbl; - rwlock_t rwlock; atomic_t entries; struct completion flushed; + struct completion deleted; }; /* diff --git a/trunk/drivers/infiniband/ulp/ipoib/ipoib_cm.c b/trunk/drivers/infiniband/ulp/ipoib/ipoib_cm.c index 95ecf4eadf5f..24683fda8e21 100644 --- a/trunk/drivers/infiniband/ulp/ipoib/ipoib_cm.c +++ b/trunk/drivers/infiniband/ulp/ipoib/ipoib_cm.c @@ -1271,12 +1271,15 @@ struct ipoib_cm_tx *ipoib_cm_create_tx(struct net_device *dev, struct ipoib_path void ipoib_cm_destroy_tx(struct ipoib_cm_tx *tx) { struct ipoib_dev_priv *priv = netdev_priv(tx->dev); + unsigned long flags; if (test_and_clear_bit(IPOIB_FLAG_INITIALIZED, &tx->flags)) { + spin_lock_irqsave(&priv->lock, flags); list_move(&tx->list, &priv->cm.reap_list); queue_work(ipoib_workqueue, &priv->cm.reap_task); ipoib_dbg(priv, "Reap connection for gid %pI6\n", tx->neigh->daddr + 4); tx->neigh = NULL; + spin_unlock_irqrestore(&priv->lock, flags); } } diff --git a/trunk/drivers/infiniband/ulp/ipoib/ipoib_main.c b/trunk/drivers/infiniband/ulp/ipoib/ipoib_main.c index 97920b77a5d0..1e19b5ae7c47 100644 --- a/trunk/drivers/infiniband/ulp/ipoib/ipoib_main.c +++ b/trunk/drivers/infiniband/ulp/ipoib/ipoib_main.c @@ -546,15 +546,15 @@ static void neigh_add_path(struct sk_buff *skb, u8 *daddr, struct ipoib_neigh *neigh; unsigned long flags; + spin_lock_irqsave(&priv->lock, flags); neigh = ipoib_neigh_alloc(daddr, dev); if (!neigh) { + spin_unlock_irqrestore(&priv->lock, flags); ++dev->stats.tx_dropped; dev_kfree_skb_any(skb); return; } - spin_lock_irqsave(&priv->lock, flags); - path = __path_find(dev, daddr + 4); if (!path) { path = path_rec_create(dev, daddr + 4); @@ -863,10 +863,10 @@ static void __ipoib_reap_neigh(struct ipoib_dev_priv *priv) if (test_bit(IPOIB_STOP_NEIGH_GC, &priv->flags)) return; - write_lock_bh(&ntbl->rwlock); + spin_lock_irqsave(&priv->lock, flags); htbl = rcu_dereference_protected(ntbl->htbl, - lockdep_is_held(&ntbl->rwlock)); + lockdep_is_held(&priv->lock)); if (!htbl) goto out_unlock; @@ -883,16 +883,14 @@ static void __ipoib_reap_neigh(struct ipoib_dev_priv *priv) struct ipoib_neigh __rcu **np = &htbl->buckets[i]; while ((neigh = rcu_dereference_protected(*np, - lockdep_is_held(&ntbl->rwlock))) != NULL) { + lockdep_is_held(&priv->lock))) != NULL) { /* was the neigh idle for two GC periods */ if (time_after(neigh_obsolete, neigh->alive)) { rcu_assign_pointer(*np, rcu_dereference_protected(neigh->hnext, - lockdep_is_held(&ntbl->rwlock))); + lockdep_is_held(&priv->lock))); /* remove from path/mc list */ - spin_lock_irqsave(&priv->lock, flags); list_del(&neigh->list); - spin_unlock_irqrestore(&priv->lock, flags); call_rcu(&neigh->rcu, ipoib_neigh_reclaim); } else { np = &neigh->hnext; @@ -902,7 +900,7 @@ static void __ipoib_reap_neigh(struct ipoib_dev_priv *priv) } out_unlock: - write_unlock_bh(&ntbl->rwlock); + spin_unlock_irqrestore(&priv->lock, flags); } static void ipoib_reap_neigh(struct work_struct *work) @@ -947,10 +945,8 @@ struct ipoib_neigh *ipoib_neigh_alloc(u8 *daddr, struct ipoib_neigh *neigh; u32 hash_val; - write_lock_bh(&ntbl->rwlock); - htbl = rcu_dereference_protected(ntbl->htbl, - lockdep_is_held(&ntbl->rwlock)); + lockdep_is_held(&priv->lock)); if (!htbl) { neigh = NULL; goto out_unlock; @@ -961,10 +957,10 @@ struct ipoib_neigh *ipoib_neigh_alloc(u8 *daddr, */ hash_val = ipoib_addr_hash(htbl, daddr); for (neigh = rcu_dereference_protected(htbl->buckets[hash_val], - lockdep_is_held(&ntbl->rwlock)); + lockdep_is_held(&priv->lock)); neigh != NULL; neigh = rcu_dereference_protected(neigh->hnext, - lockdep_is_held(&ntbl->rwlock))) { + lockdep_is_held(&priv->lock))) { if (memcmp(daddr, neigh->daddr, INFINIBAND_ALEN) == 0) { /* found, take one ref on behalf of the caller */ if (!atomic_inc_not_zero(&neigh->refcnt)) { @@ -987,12 +983,11 @@ struct ipoib_neigh *ipoib_neigh_alloc(u8 *daddr, /* put in hash */ rcu_assign_pointer(neigh->hnext, rcu_dereference_protected(htbl->buckets[hash_val], - lockdep_is_held(&ntbl->rwlock))); + lockdep_is_held(&priv->lock))); rcu_assign_pointer(htbl->buckets[hash_val], neigh); atomic_inc(&ntbl->entries); out_unlock: - write_unlock_bh(&ntbl->rwlock); return neigh; } @@ -1040,35 +1035,29 @@ void ipoib_neigh_free(struct ipoib_neigh *neigh) struct ipoib_neigh *n; u32 hash_val; - write_lock_bh(&ntbl->rwlock); - htbl = rcu_dereference_protected(ntbl->htbl, - lockdep_is_held(&ntbl->rwlock)); + lockdep_is_held(&priv->lock)); if (!htbl) - goto out_unlock; + return; hash_val = ipoib_addr_hash(htbl, neigh->daddr); np = &htbl->buckets[hash_val]; for (n = rcu_dereference_protected(*np, - lockdep_is_held(&ntbl->rwlock)); + lockdep_is_held(&priv->lock)); n != NULL; - n = rcu_dereference_protected(neigh->hnext, - lockdep_is_held(&ntbl->rwlock))) { + n = rcu_dereference_protected(*np, + lockdep_is_held(&priv->lock))) { if (n == neigh) { /* found */ rcu_assign_pointer(*np, rcu_dereference_protected(neigh->hnext, - lockdep_is_held(&ntbl->rwlock))); + lockdep_is_held(&priv->lock))); call_rcu(&neigh->rcu, ipoib_neigh_reclaim); - goto out_unlock; + return; } else { np = &n->hnext; } } - -out_unlock: - write_unlock_bh(&ntbl->rwlock); - } static int ipoib_neigh_hash_init(struct ipoib_dev_priv *priv) @@ -1080,7 +1069,6 @@ static int ipoib_neigh_hash_init(struct ipoib_dev_priv *priv) clear_bit(IPOIB_NEIGH_TBL_FLUSH, &priv->flags); ntbl->htbl = NULL; - rwlock_init(&ntbl->rwlock); htbl = kzalloc(sizeof(*htbl), GFP_KERNEL); if (!htbl) return -ENOMEM; @@ -1095,6 +1083,7 @@ static int ipoib_neigh_hash_init(struct ipoib_dev_priv *priv) htbl->mask = (size - 1); htbl->buckets = buckets; ntbl->htbl = htbl; + htbl->ntbl = ntbl; atomic_set(&ntbl->entries, 0); /* start garbage collection */ @@ -1111,9 +1100,11 @@ static void neigh_hash_free_rcu(struct rcu_head *head) struct ipoib_neigh_hash, rcu); struct ipoib_neigh __rcu **buckets = htbl->buckets; + struct ipoib_neigh_table *ntbl = htbl->ntbl; kfree(buckets); kfree(htbl); + complete(&ntbl->deleted); } void ipoib_del_neighs_by_gid(struct net_device *dev, u8 *gid) @@ -1125,10 +1116,10 @@ void ipoib_del_neighs_by_gid(struct net_device *dev, u8 *gid) int i; /* remove all neigh connected to a given path or mcast */ - write_lock_bh(&ntbl->rwlock); + spin_lock_irqsave(&priv->lock, flags); htbl = rcu_dereference_protected(ntbl->htbl, - lockdep_is_held(&ntbl->rwlock)); + lockdep_is_held(&priv->lock)); if (!htbl) goto out_unlock; @@ -1138,16 +1129,14 @@ void ipoib_del_neighs_by_gid(struct net_device *dev, u8 *gid) struct ipoib_neigh __rcu **np = &htbl->buckets[i]; while ((neigh = rcu_dereference_protected(*np, - lockdep_is_held(&ntbl->rwlock))) != NULL) { + lockdep_is_held(&priv->lock))) != NULL) { /* delete neighs belong to this parent */ if (!memcmp(gid, neigh->daddr + 4, sizeof (union ib_gid))) { rcu_assign_pointer(*np, rcu_dereference_protected(neigh->hnext, - lockdep_is_held(&ntbl->rwlock))); + lockdep_is_held(&priv->lock))); /* remove from parent list */ - spin_lock_irqsave(&priv->lock, flags); list_del(&neigh->list); - spin_unlock_irqrestore(&priv->lock, flags); call_rcu(&neigh->rcu, ipoib_neigh_reclaim); } else { np = &neigh->hnext; @@ -1156,7 +1145,7 @@ void ipoib_del_neighs_by_gid(struct net_device *dev, u8 *gid) } } out_unlock: - write_unlock_bh(&ntbl->rwlock); + spin_unlock_irqrestore(&priv->lock, flags); } static void ipoib_flush_neighs(struct ipoib_dev_priv *priv) @@ -1164,37 +1153,44 @@ static void ipoib_flush_neighs(struct ipoib_dev_priv *priv) struct ipoib_neigh_table *ntbl = &priv->ntbl; struct ipoib_neigh_hash *htbl; unsigned long flags; - int i; + int i, wait_flushed = 0; - write_lock_bh(&ntbl->rwlock); + init_completion(&priv->ntbl.flushed); + + spin_lock_irqsave(&priv->lock, flags); htbl = rcu_dereference_protected(ntbl->htbl, - lockdep_is_held(&ntbl->rwlock)); + lockdep_is_held(&priv->lock)); if (!htbl) goto out_unlock; + wait_flushed = atomic_read(&priv->ntbl.entries); + if (!wait_flushed) + goto free_htbl; + for (i = 0; i < htbl->size; i++) { struct ipoib_neigh *neigh; struct ipoib_neigh __rcu **np = &htbl->buckets[i]; while ((neigh = rcu_dereference_protected(*np, - lockdep_is_held(&ntbl->rwlock))) != NULL) { + lockdep_is_held(&priv->lock))) != NULL) { rcu_assign_pointer(*np, rcu_dereference_protected(neigh->hnext, - lockdep_is_held(&ntbl->rwlock))); + lockdep_is_held(&priv->lock))); /* remove from path/mc list */ - spin_lock_irqsave(&priv->lock, flags); list_del(&neigh->list); - spin_unlock_irqrestore(&priv->lock, flags); call_rcu(&neigh->rcu, ipoib_neigh_reclaim); } } +free_htbl: rcu_assign_pointer(ntbl->htbl, NULL); call_rcu(&htbl->rcu, neigh_hash_free_rcu); out_unlock: - write_unlock_bh(&ntbl->rwlock); + spin_unlock_irqrestore(&priv->lock, flags); + if (wait_flushed) + wait_for_completion(&priv->ntbl.flushed); } static void ipoib_neigh_hash_uninit(struct net_device *dev) @@ -1203,7 +1199,7 @@ static void ipoib_neigh_hash_uninit(struct net_device *dev) int stopped; ipoib_dbg(priv, "ipoib_neigh_hash_uninit\n"); - init_completion(&priv->ntbl.flushed); + init_completion(&priv->ntbl.deleted); set_bit(IPOIB_NEIGH_TBL_FLUSH, &priv->flags); /* Stop GC if called at init fail need to cancel work */ @@ -1211,10 +1207,9 @@ static void ipoib_neigh_hash_uninit(struct net_device *dev) if (!stopped) cancel_delayed_work(&priv->neigh_reap_task); - if (atomic_read(&priv->ntbl.entries)) { - ipoib_flush_neighs(priv); - wait_for_completion(&priv->ntbl.flushed); - } + ipoib_flush_neighs(priv); + + wait_for_completion(&priv->ntbl.deleted); } diff --git a/trunk/drivers/infiniband/ulp/ipoib/ipoib_multicast.c b/trunk/drivers/infiniband/ulp/ipoib/ipoib_multicast.c index 13f4aa7593c8..75367249f447 100644 --- a/trunk/drivers/infiniband/ulp/ipoib/ipoib_multicast.c +++ b/trunk/drivers/infiniband/ulp/ipoib/ipoib_multicast.c @@ -707,9 +707,7 @@ void ipoib_mcast_send(struct net_device *dev, u8 *daddr, struct sk_buff *skb) neigh = ipoib_neigh_get(dev, daddr); spin_lock_irqsave(&priv->lock, flags); if (!neigh) { - spin_unlock_irqrestore(&priv->lock, flags); neigh = ipoib_neigh_alloc(daddr, dev); - spin_lock_irqsave(&priv->lock, flags); if (neigh) { kref_get(&mcast->ah->ref); neigh->ah = mcast->ah; diff --git a/trunk/drivers/infiniband/ulp/srp/ib_srp.c b/trunk/drivers/infiniband/ulp/srp/ib_srp.c index bcbf22ee0aa7..1b5b0c730054 100644 --- a/trunk/drivers/infiniband/ulp/srp/ib_srp.c +++ b/trunk/drivers/infiniband/ulp/srp/ib_srp.c @@ -586,24 +586,62 @@ static void srp_unmap_data(struct scsi_cmnd *scmnd, scmnd->sc_data_direction); } -static void srp_remove_req(struct srp_target_port *target, - struct srp_request *req, s32 req_lim_delta) +/** + * srp_claim_req - Take ownership of the scmnd associated with a request. + * @target: SRP target port. + * @req: SRP request. + * @scmnd: If NULL, take ownership of @req->scmnd. If not NULL, only take + * ownership of @req->scmnd if it equals @scmnd. + * + * Return value: + * Either NULL or a pointer to the SCSI command the caller became owner of. + */ +static struct scsi_cmnd *srp_claim_req(struct srp_target_port *target, + struct srp_request *req, + struct scsi_cmnd *scmnd) +{ + unsigned long flags; + + spin_lock_irqsave(&target->lock, flags); + if (!scmnd) { + scmnd = req->scmnd; + req->scmnd = NULL; + } else if (req->scmnd == scmnd) { + req->scmnd = NULL; + } else { + scmnd = NULL; + } + spin_unlock_irqrestore(&target->lock, flags); + + return scmnd; +} + +/** + * srp_free_req() - Unmap data and add request to the free request list. + */ +static void srp_free_req(struct srp_target_port *target, + struct srp_request *req, struct scsi_cmnd *scmnd, + s32 req_lim_delta) { unsigned long flags; - srp_unmap_data(req->scmnd, target, req); + srp_unmap_data(scmnd, target, req); + spin_lock_irqsave(&target->lock, flags); target->req_lim += req_lim_delta; - req->scmnd = NULL; list_add_tail(&req->list, &target->free_reqs); spin_unlock_irqrestore(&target->lock, flags); } static void srp_reset_req(struct srp_target_port *target, struct srp_request *req) { - req->scmnd->result = DID_RESET << 16; - req->scmnd->scsi_done(req->scmnd); - srp_remove_req(target, req, 0); + struct scsi_cmnd *scmnd = srp_claim_req(target, req, NULL); + + if (scmnd) { + scmnd->result = DID_RESET << 16; + scmnd->scsi_done(scmnd); + srp_free_req(target, req, scmnd, 0); + } } static int srp_reconnect_target(struct srp_target_port *target) @@ -1073,11 +1111,18 @@ static void srp_process_rsp(struct srp_target_port *target, struct srp_rsp *rsp) complete(&target->tsk_mgmt_done); } else { req = &target->req_ring[rsp->tag]; - scmnd = req->scmnd; - if (!scmnd) + scmnd = srp_claim_req(target, req, NULL); + if (!scmnd) { shost_printk(KERN_ERR, target->scsi_host, "Null scmnd for RSP w/tag %016llx\n", (unsigned long long) rsp->tag); + + spin_lock_irqsave(&target->lock, flags); + target->req_lim += be32_to_cpu(rsp->req_lim_delta); + spin_unlock_irqrestore(&target->lock, flags); + + return; + } scmnd->result = rsp->status; if (rsp->flags & SRP_RSP_FLAG_SNSVALID) { @@ -1092,7 +1137,9 @@ static void srp_process_rsp(struct srp_target_port *target, struct srp_rsp *rsp) else if (rsp->flags & (SRP_RSP_FLAG_DIOVER | SRP_RSP_FLAG_DIUNDER)) scsi_set_resid(scmnd, be32_to_cpu(rsp->data_in_res_cnt)); - srp_remove_req(target, req, be32_to_cpu(rsp->req_lim_delta)); + srp_free_req(target, req, scmnd, + be32_to_cpu(rsp->req_lim_delta)); + scmnd->host_scribble = NULL; scmnd->scsi_done(scmnd); } @@ -1631,25 +1678,17 @@ static int srp_abort(struct scsi_cmnd *scmnd) { struct srp_target_port *target = host_to_target(scmnd->device->host); struct srp_request *req = (struct srp_request *) scmnd->host_scribble; - int ret = SUCCESS; shost_printk(KERN_ERR, target->scsi_host, "SRP abort called\n"); - if (!req || target->qp_in_error) + if (!req || target->qp_in_error || !srp_claim_req(target, req, scmnd)) return FAILED; - if (srp_send_tsk_mgmt(target, req->index, scmnd->device->lun, - SRP_TSK_ABORT_TASK)) - return FAILED; - - if (req->scmnd) { - if (!target->tsk_mgmt_status) { - srp_remove_req(target, req, 0); - scmnd->result = DID_ABORT << 16; - } else - ret = FAILED; - } + srp_send_tsk_mgmt(target, req->index, scmnd->device->lun, + SRP_TSK_ABORT_TASK); + srp_free_req(target, req, scmnd, 0); + scmnd->result = DID_ABORT << 16; - return ret; + return SUCCESS; } static int srp_reset_device(struct scsi_cmnd *scmnd) diff --git a/trunk/drivers/infiniband/ulp/srpt/ib_srpt.c b/trunk/drivers/infiniband/ulp/srpt/ib_srpt.c index 7a0ce8d42887..9e1449f8c6a2 100644 --- a/trunk/drivers/infiniband/ulp/srpt/ib_srpt.c +++ b/trunk/drivers/infiniband/ulp/srpt/ib_srpt.c @@ -1469,7 +1469,7 @@ static void srpt_handle_send_comp(struct srpt_rdma_ch *ch, * * XXX: what is now target_execute_cmd used to be asynchronous, and unmapping * the data that has been transferred via IB RDMA had to be postponed until the - * check_stop_free() callback. None of this is nessecary anymore and needs to + * check_stop_free() callback. None of this is necessary anymore and needs to * be cleaned up. */ static void srpt_handle_rdma_comp(struct srpt_rdma_ch *ch, diff --git a/trunk/drivers/input/evdev.c b/trunk/drivers/input/evdev.c index 6c58bfff01a3..118d0300f1fb 100644 --- a/trunk/drivers/input/evdev.c +++ b/trunk/drivers/input/evdev.c @@ -54,16 +54,9 @@ struct evdev_client { static struct evdev *evdev_table[EVDEV_MINORS]; static DEFINE_MUTEX(evdev_table_mutex); -static void evdev_pass_event(struct evdev_client *client, - struct input_event *event, - ktime_t mono, ktime_t real) +static void __pass_event(struct evdev_client *client, + const struct input_event *event) { - event->time = ktime_to_timeval(client->clkid == CLOCK_MONOTONIC ? - mono : real); - - /* Interrupts are disabled, just acquire the lock. */ - spin_lock(&client->buffer_lock); - client->buffer[client->head++] = *event; client->head &= client->bufsize - 1; @@ -86,42 +79,74 @@ static void evdev_pass_event(struct evdev_client *client, client->packet_head = client->head; kill_fasync(&client->fasync, SIGIO, POLL_IN); } +} + +static void evdev_pass_values(struct evdev_client *client, + const struct input_value *vals, unsigned int count, + ktime_t mono, ktime_t real) +{ + struct evdev *evdev = client->evdev; + const struct input_value *v; + struct input_event event; + bool wakeup = false; + + event.time = ktime_to_timeval(client->clkid == CLOCK_MONOTONIC ? + mono : real); + + /* Interrupts are disabled, just acquire the lock. */ + spin_lock(&client->buffer_lock); + + for (v = vals; v != vals + count; v++) { + event.type = v->type; + event.code = v->code; + event.value = v->value; + __pass_event(client, &event); + if (v->type == EV_SYN && v->code == SYN_REPORT) + wakeup = true; + } spin_unlock(&client->buffer_lock); + + if (wakeup) + wake_up_interruptible(&evdev->wait); } /* - * Pass incoming event to all connected clients. + * Pass incoming events to all connected clients. */ -static void evdev_event(struct input_handle *handle, - unsigned int type, unsigned int code, int value) +static void evdev_events(struct input_handle *handle, + const struct input_value *vals, unsigned int count) { struct evdev *evdev = handle->private; struct evdev_client *client; - struct input_event event; ktime_t time_mono, time_real; time_mono = ktime_get(); time_real = ktime_sub(time_mono, ktime_get_monotonic_offset()); - event.type = type; - event.code = code; - event.value = value; - rcu_read_lock(); client = rcu_dereference(evdev->grab); if (client) - evdev_pass_event(client, &event, time_mono, time_real); + evdev_pass_values(client, vals, count, time_mono, time_real); else list_for_each_entry_rcu(client, &evdev->client_list, node) - evdev_pass_event(client, &event, time_mono, time_real); + evdev_pass_values(client, vals, count, + time_mono, time_real); rcu_read_unlock(); +} - if (type == EV_SYN && code == SYN_REPORT) - wake_up_interruptible(&evdev->wait); +/* + * Pass incoming event to all connected clients. + */ +static void evdev_event(struct input_handle *handle, + unsigned int type, unsigned int code, int value) +{ + struct input_value vals[] = { { type, code, value } }; + + evdev_events(handle, vals, 1); } static int evdev_fasync(int fd, struct file *file, int on) @@ -653,20 +678,22 @@ static int evdev_handle_mt_request(struct input_dev *dev, unsigned int size, int __user *ip) { - const struct input_mt_slot *mt = dev->mt; + const struct input_mt *mt = dev->mt; unsigned int code; int max_slots; int i; if (get_user(code, &ip[0])) return -EFAULT; - if (!input_is_mt_value(code)) + if (!mt || !input_is_mt_value(code)) return -EINVAL; max_slots = (size - sizeof(__u32)) / sizeof(__s32); - for (i = 0; i < dev->mtsize && i < max_slots; i++) - if (put_user(input_mt_get_value(&mt[i], code), &ip[1 + i])) + for (i = 0; i < mt->num_slots && i < max_slots; i++) { + int value = input_mt_get_value(&mt->slots[i], code); + if (put_user(value, &ip[1 + i])) return -EFAULT; + } return 0; } @@ -1048,6 +1075,7 @@ MODULE_DEVICE_TABLE(input, evdev_ids); static struct input_handler evdev_handler = { .event = evdev_event, + .events = evdev_events, .connect = evdev_connect, .disconnect = evdev_disconnect, .fops = &evdev_fops, diff --git a/trunk/drivers/input/input-mt.c b/trunk/drivers/input/input-mt.c index 70a16c7da8cc..c0ec7d42c3be 100644 --- a/trunk/drivers/input/input-mt.c +++ b/trunk/drivers/input/input-mt.c @@ -14,6 +14,14 @@ #define TRKID_SGN ((TRKID_MAX + 1) >> 1) +static void copy_abs(struct input_dev *dev, unsigned int dst, unsigned int src) +{ + if (dev->absinfo && test_bit(src, dev->absbit)) { + dev->absinfo[dst] = dev->absinfo[src]; + dev->absbit[BIT_WORD(dst)] |= BIT_MASK(dst); + } +} + /** * input_mt_init_slots() - initialize MT input slots * @dev: input device supporting MT events and finger tracking @@ -25,29 +33,63 @@ * May be called repeatedly. Returns -EINVAL if attempting to * reinitialize with a different number of slots. */ -int input_mt_init_slots(struct input_dev *dev, unsigned int num_slots) +int input_mt_init_slots(struct input_dev *dev, unsigned int num_slots, + unsigned int flags) { + struct input_mt *mt = dev->mt; int i; if (!num_slots) return 0; - if (dev->mt) - return dev->mtsize != num_slots ? -EINVAL : 0; + if (mt) + return mt->num_slots != num_slots ? -EINVAL : 0; - dev->mt = kcalloc(num_slots, sizeof(struct input_mt_slot), GFP_KERNEL); - if (!dev->mt) - return -ENOMEM; + mt = kzalloc(sizeof(*mt) + num_slots * sizeof(*mt->slots), GFP_KERNEL); + if (!mt) + goto err_mem; - dev->mtsize = num_slots; + mt->num_slots = num_slots; + mt->flags = flags; input_set_abs_params(dev, ABS_MT_SLOT, 0, num_slots - 1, 0, 0); input_set_abs_params(dev, ABS_MT_TRACKING_ID, 0, TRKID_MAX, 0, 0); - input_set_events_per_packet(dev, 6 * num_slots); + + if (flags & (INPUT_MT_POINTER | INPUT_MT_DIRECT)) { + __set_bit(EV_KEY, dev->evbit); + __set_bit(BTN_TOUCH, dev->keybit); + + copy_abs(dev, ABS_X, ABS_MT_POSITION_X); + copy_abs(dev, ABS_Y, ABS_MT_POSITION_Y); + copy_abs(dev, ABS_PRESSURE, ABS_MT_PRESSURE); + } + if (flags & INPUT_MT_POINTER) { + __set_bit(BTN_TOOL_FINGER, dev->keybit); + __set_bit(BTN_TOOL_DOUBLETAP, dev->keybit); + if (num_slots >= 3) + __set_bit(BTN_TOOL_TRIPLETAP, dev->keybit); + if (num_slots >= 4) + __set_bit(BTN_TOOL_QUADTAP, dev->keybit); + if (num_slots >= 5) + __set_bit(BTN_TOOL_QUINTTAP, dev->keybit); + __set_bit(INPUT_PROP_POINTER, dev->propbit); + } + if (flags & INPUT_MT_DIRECT) + __set_bit(INPUT_PROP_DIRECT, dev->propbit); + if (flags & INPUT_MT_TRACK) { + unsigned int n2 = num_slots * num_slots; + mt->red = kcalloc(n2, sizeof(*mt->red), GFP_KERNEL); + if (!mt->red) + goto err_mem; + } /* Mark slots as 'unused' */ for (i = 0; i < num_slots; i++) - input_mt_set_value(&dev->mt[i], ABS_MT_TRACKING_ID, -1); + input_mt_set_value(&mt->slots[i], ABS_MT_TRACKING_ID, -1); + dev->mt = mt; return 0; +err_mem: + kfree(mt); + return -ENOMEM; } EXPORT_SYMBOL(input_mt_init_slots); @@ -60,11 +102,11 @@ EXPORT_SYMBOL(input_mt_init_slots); */ void input_mt_destroy_slots(struct input_dev *dev) { - kfree(dev->mt); + if (dev->mt) { + kfree(dev->mt->red); + kfree(dev->mt); + } dev->mt = NULL; - dev->mtsize = 0; - dev->slot = 0; - dev->trkid = 0; } EXPORT_SYMBOL(input_mt_destroy_slots); @@ -83,18 +125,24 @@ EXPORT_SYMBOL(input_mt_destroy_slots); void input_mt_report_slot_state(struct input_dev *dev, unsigned int tool_type, bool active) { - struct input_mt_slot *mt; + struct input_mt *mt = dev->mt; + struct input_mt_slot *slot; int id; - if (!dev->mt || !active) { + if (!mt) + return; + + slot = &mt->slots[mt->slot]; + slot->frame = mt->frame; + + if (!active) { input_event(dev, EV_ABS, ABS_MT_TRACKING_ID, -1); return; } - mt = &dev->mt[dev->slot]; - id = input_mt_get_value(mt, ABS_MT_TRACKING_ID); - if (id < 0 || input_mt_get_value(mt, ABS_MT_TOOL_TYPE) != tool_type) - id = input_mt_new_trkid(dev); + id = input_mt_get_value(slot, ABS_MT_TRACKING_ID); + if (id < 0 || input_mt_get_value(slot, ABS_MT_TOOL_TYPE) != tool_type) + id = input_mt_new_trkid(mt); input_event(dev, EV_ABS, ABS_MT_TRACKING_ID, id); input_event(dev, EV_ABS, ABS_MT_TOOL_TYPE, tool_type); @@ -135,13 +183,19 @@ EXPORT_SYMBOL(input_mt_report_finger_count); */ void input_mt_report_pointer_emulation(struct input_dev *dev, bool use_count) { - struct input_mt_slot *oldest = NULL; - int oldid = dev->trkid; - int count = 0; - int i; + struct input_mt *mt = dev->mt; + struct input_mt_slot *oldest; + int oldid, count, i; + + if (!mt) + return; + + oldest = 0; + oldid = mt->trkid; + count = 0; - for (i = 0; i < dev->mtsize; ++i) { - struct input_mt_slot *ps = &dev->mt[i]; + for (i = 0; i < mt->num_slots; ++i) { + struct input_mt_slot *ps = &mt->slots[i]; int id = input_mt_get_value(ps, ABS_MT_TRACKING_ID); if (id < 0) @@ -160,13 +214,208 @@ void input_mt_report_pointer_emulation(struct input_dev *dev, bool use_count) if (oldest) { int x = input_mt_get_value(oldest, ABS_MT_POSITION_X); int y = input_mt_get_value(oldest, ABS_MT_POSITION_Y); - int p = input_mt_get_value(oldest, ABS_MT_PRESSURE); input_event(dev, EV_ABS, ABS_X, x); input_event(dev, EV_ABS, ABS_Y, y); - input_event(dev, EV_ABS, ABS_PRESSURE, p); + + if (test_bit(ABS_MT_PRESSURE, dev->absbit)) { + int p = input_mt_get_value(oldest, ABS_MT_PRESSURE); + input_event(dev, EV_ABS, ABS_PRESSURE, p); + } } else { - input_event(dev, EV_ABS, ABS_PRESSURE, 0); + if (test_bit(ABS_MT_PRESSURE, dev->absbit)) + input_event(dev, EV_ABS, ABS_PRESSURE, 0); } } EXPORT_SYMBOL(input_mt_report_pointer_emulation); + +/** + * input_mt_sync_frame() - synchronize mt frame + * @dev: input device with allocated MT slots + * + * Close the frame and prepare the internal state for a new one. + * Depending on the flags, marks unused slots as inactive and performs + * pointer emulation. + */ +void input_mt_sync_frame(struct input_dev *dev) +{ + struct input_mt *mt = dev->mt; + struct input_mt_slot *s; + + if (!mt) + return; + + if (mt->flags & INPUT_MT_DROP_UNUSED) { + for (s = mt->slots; s != mt->slots + mt->num_slots; s++) { + if (s->frame == mt->frame) + continue; + input_mt_slot(dev, s - mt->slots); + input_event(dev, EV_ABS, ABS_MT_TRACKING_ID, -1); + } + } + + input_mt_report_pointer_emulation(dev, (mt->flags & INPUT_MT_POINTER)); + + mt->frame++; +} +EXPORT_SYMBOL(input_mt_sync_frame); + +static int adjust_dual(int *begin, int step, int *end, int eq) +{ + int f, *p, s, c; + + if (begin == end) + return 0; + + f = *begin; + p = begin + step; + s = p == end ? f + 1 : *p; + + for (; p != end; p += step) + if (*p < f) + s = f, f = *p; + else if (*p < s) + s = *p; + + c = (f + s + 1) / 2; + if (c == 0 || (c > 0 && !eq)) + return 0; + if (s < 0) + c *= 2; + + for (p = begin; p != end; p += step) + *p -= c; + + return (c < s && s <= 0) || (f >= 0 && f < c); +} + +static void find_reduced_matrix(int *w, int nr, int nc, int nrc) +{ + int i, k, sum; + + for (k = 0; k < nrc; k++) { + for (i = 0; i < nr; i++) + adjust_dual(w + i, nr, w + i + nrc, nr <= nc); + sum = 0; + for (i = 0; i < nrc; i += nr) + sum += adjust_dual(w + i, 1, w + i + nr, nc <= nr); + if (!sum) + break; + } +} + +static int input_mt_set_matrix(struct input_mt *mt, + const struct input_mt_pos *pos, int num_pos) +{ + const struct input_mt_pos *p; + struct input_mt_slot *s; + int *w = mt->red; + int x, y; + + for (s = mt->slots; s != mt->slots + mt->num_slots; s++) { + if (!input_mt_is_active(s)) + continue; + x = input_mt_get_value(s, ABS_MT_POSITION_X); + y = input_mt_get_value(s, ABS_MT_POSITION_Y); + for (p = pos; p != pos + num_pos; p++) { + int dx = x - p->x, dy = y - p->y; + *w++ = dx * dx + dy * dy; + } + } + + return w - mt->red; +} + +static void input_mt_set_slots(struct input_mt *mt, + int *slots, int num_pos) +{ + struct input_mt_slot *s; + int *w = mt->red, *p; + + for (p = slots; p != slots + num_pos; p++) + *p = -1; + + for (s = mt->slots; s != mt->slots + mt->num_slots; s++) { + if (!input_mt_is_active(s)) + continue; + for (p = slots; p != slots + num_pos; p++) + if (*w++ < 0) + *p = s - mt->slots; + } + + for (s = mt->slots; s != mt->slots + mt->num_slots; s++) { + if (input_mt_is_active(s)) + continue; + for (p = slots; p != slots + num_pos; p++) + if (*p < 0) { + *p = s - mt->slots; + break; + } + } +} + +/** + * input_mt_assign_slots() - perform a best-match assignment + * @dev: input device with allocated MT slots + * @slots: the slot assignment to be filled + * @pos: the position array to match + * @num_pos: number of positions + * + * Performs a best match against the current contacts and returns + * the slot assignment list. New contacts are assigned to unused + * slots. + * + * Returns zero on success, or negative error in case of failure. + */ +int input_mt_assign_slots(struct input_dev *dev, int *slots, + const struct input_mt_pos *pos, int num_pos) +{ + struct input_mt *mt = dev->mt; + int nrc; + + if (!mt || !mt->red) + return -ENXIO; + if (num_pos > mt->num_slots) + return -EINVAL; + if (num_pos < 1) + return 0; + + nrc = input_mt_set_matrix(mt, pos, num_pos); + find_reduced_matrix(mt->red, num_pos, nrc / num_pos, nrc); + input_mt_set_slots(mt, slots, num_pos); + + return 0; +} +EXPORT_SYMBOL(input_mt_assign_slots); + +/** + * input_mt_get_slot_by_key() - return slot matching key + * @dev: input device with allocated MT slots + * @key: the key of the sought slot + * + * Returns the slot of the given key, if it exists, otherwise + * set the key on the first unused slot and return. + * + * If no available slot can be found, -1 is returned. + */ +int input_mt_get_slot_by_key(struct input_dev *dev, int key) +{ + struct input_mt *mt = dev->mt; + struct input_mt_slot *s; + + if (!mt) + return -1; + + for (s = mt->slots; s != mt->slots + mt->num_slots; s++) + if (input_mt_is_active(s) && s->key == key) + return s - mt->slots; + + for (s = mt->slots; s != mt->slots + mt->num_slots; s++) + if (!input_mt_is_active(s)) { + s->key = key; + return s - mt->slots; + } + + return -1; +} +EXPORT_SYMBOL(input_mt_get_slot_by_key); diff --git a/trunk/drivers/input/input.c b/trunk/drivers/input/input.c index 8921c6180c51..5244f3d05b12 100644 --- a/trunk/drivers/input/input.c +++ b/trunk/drivers/input/input.c @@ -47,6 +47,8 @@ static DEFINE_MUTEX(input_mutex); static struct input_handler *input_table[8]; +static const struct input_value input_value_sync = { EV_SYN, SYN_REPORT, 1 }; + static inline int is_event_supported(unsigned int code, unsigned long *bm, unsigned int max) { @@ -69,42 +71,102 @@ static int input_defuzz_abs_event(int value, int old_val, int fuzz) return value; } +static void input_start_autorepeat(struct input_dev *dev, int code) +{ + if (test_bit(EV_REP, dev->evbit) && + dev->rep[REP_PERIOD] && dev->rep[REP_DELAY] && + dev->timer.data) { + dev->repeat_key = code; + mod_timer(&dev->timer, + jiffies + msecs_to_jiffies(dev->rep[REP_DELAY])); + } +} + +static void input_stop_autorepeat(struct input_dev *dev) +{ + del_timer(&dev->timer); +} + /* * Pass event first through all filters and then, if event has not been * filtered out, through all open handles. This function is called with * dev->event_lock held and interrupts disabled. */ -static void input_pass_event(struct input_dev *dev, - unsigned int type, unsigned int code, int value) +static unsigned int input_to_handler(struct input_handle *handle, + struct input_value *vals, unsigned int count) +{ + struct input_handler *handler = handle->handler; + struct input_value *end = vals; + struct input_value *v; + + for (v = vals; v != vals + count; v++) { + if (handler->filter && + handler->filter(handle, v->type, v->code, v->value)) + continue; + if (end != v) + *end = *v; + end++; + } + + count = end - vals; + if (!count) + return 0; + + if (handler->events) + handler->events(handle, vals, count); + else if (handler->event) + for (v = vals; v != end; v++) + handler->event(handle, v->type, v->code, v->value); + + return count; +} + +/* + * Pass values first through all filters and then, if event has not been + * filtered out, through all open handles. This function is called with + * dev->event_lock held and interrupts disabled. + */ +static void input_pass_values(struct input_dev *dev, + struct input_value *vals, unsigned int count) { - struct input_handler *handler; struct input_handle *handle; + struct input_value *v; + + if (!count) + return; rcu_read_lock(); handle = rcu_dereference(dev->grab); - if (handle) - handle->handler->event(handle, type, code, value); - else { - bool filtered = false; - - list_for_each_entry_rcu(handle, &dev->h_list, d_node) { - if (!handle->open) - continue; + if (handle) { + count = input_to_handler(handle, vals, count); + } else { + list_for_each_entry_rcu(handle, &dev->h_list, d_node) + if (handle->open) + count = input_to_handler(handle, vals, count); + } - handler = handle->handler; - if (!handler->filter) { - if (filtered) - break; + rcu_read_unlock(); - handler->event(handle, type, code, value); + add_input_randomness(vals->type, vals->code, vals->value); - } else if (handler->filter(handle, type, code, value)) - filtered = true; + /* trigger auto repeat for key events */ + for (v = vals; v != vals + count; v++) { + if (v->type == EV_KEY && v->value != 2) { + if (v->value) + input_start_autorepeat(dev, v->code); + else + input_stop_autorepeat(dev); } } +} - rcu_read_unlock(); +static void input_pass_event(struct input_dev *dev, + unsigned int type, unsigned int code, int value) +{ + struct input_value vals[] = { { type, code, value } }; + + input_pass_values(dev, vals, ARRAY_SIZE(vals)); } /* @@ -121,18 +183,12 @@ static void input_repeat_key(unsigned long data) if (test_bit(dev->repeat_key, dev->key) && is_event_supported(dev->repeat_key, dev->keybit, KEY_MAX)) { + struct input_value vals[] = { + { EV_KEY, dev->repeat_key, 2 }, + input_value_sync + }; - input_pass_event(dev, EV_KEY, dev->repeat_key, 2); - - if (dev->sync) { - /* - * Only send SYN_REPORT if we are not in a middle - * of driver parsing a new hardware packet. - * Otherwise assume that the driver will send - * SYN_REPORT once it's done. - */ - input_pass_event(dev, EV_SYN, SYN_REPORT, 1); - } + input_pass_values(dev, vals, ARRAY_SIZE(vals)); if (dev->rep[REP_PERIOD]) mod_timer(&dev->timer, jiffies + @@ -142,30 +198,17 @@ static void input_repeat_key(unsigned long data) spin_unlock_irqrestore(&dev->event_lock, flags); } -static void input_start_autorepeat(struct input_dev *dev, int code) -{ - if (test_bit(EV_REP, dev->evbit) && - dev->rep[REP_PERIOD] && dev->rep[REP_DELAY] && - dev->timer.data) { - dev->repeat_key = code; - mod_timer(&dev->timer, - jiffies + msecs_to_jiffies(dev->rep[REP_DELAY])); - } -} - -static void input_stop_autorepeat(struct input_dev *dev) -{ - del_timer(&dev->timer); -} - #define INPUT_IGNORE_EVENT 0 #define INPUT_PASS_TO_HANDLERS 1 #define INPUT_PASS_TO_DEVICE 2 +#define INPUT_SLOT 4 +#define INPUT_FLUSH 8 #define INPUT_PASS_TO_ALL (INPUT_PASS_TO_HANDLERS | INPUT_PASS_TO_DEVICE) static int input_handle_abs_event(struct input_dev *dev, unsigned int code, int *pval) { + struct input_mt *mt = dev->mt; bool is_mt_event; int *pold; @@ -174,8 +217,8 @@ static int input_handle_abs_event(struct input_dev *dev, * "Stage" the event; we'll flush it later, when we * get actual touch data. */ - if (*pval >= 0 && *pval < dev->mtsize) - dev->slot = *pval; + if (mt && *pval >= 0 && *pval < mt->num_slots) + mt->slot = *pval; return INPUT_IGNORE_EVENT; } @@ -184,9 +227,8 @@ static int input_handle_abs_event(struct input_dev *dev, if (!is_mt_event) { pold = &dev->absinfo[code].value; - } else if (dev->mt) { - struct input_mt_slot *mtslot = &dev->mt[dev->slot]; - pold = &mtslot->abs[code - ABS_MT_FIRST]; + } else if (mt) { + pold = &mt->slots[mt->slot].abs[code - ABS_MT_FIRST]; } else { /* * Bypass filtering for multi-touch events when @@ -205,16 +247,16 @@ static int input_handle_abs_event(struct input_dev *dev, } /* Flush pending "slot" event */ - if (is_mt_event && dev->slot != input_abs_get_val(dev, ABS_MT_SLOT)) { - input_abs_set_val(dev, ABS_MT_SLOT, dev->slot); - input_pass_event(dev, EV_ABS, ABS_MT_SLOT, dev->slot); + if (is_mt_event && mt && mt->slot != input_abs_get_val(dev, ABS_MT_SLOT)) { + input_abs_set_val(dev, ABS_MT_SLOT, mt->slot); + return INPUT_PASS_TO_HANDLERS | INPUT_SLOT; } return INPUT_PASS_TO_HANDLERS; } -static void input_handle_event(struct input_dev *dev, - unsigned int type, unsigned int code, int value) +static int input_get_disposition(struct input_dev *dev, + unsigned int type, unsigned int code, int value) { int disposition = INPUT_IGNORE_EVENT; @@ -227,37 +269,34 @@ static void input_handle_event(struct input_dev *dev, break; case SYN_REPORT: - if (!dev->sync) { - dev->sync = true; - disposition = INPUT_PASS_TO_HANDLERS; - } + disposition = INPUT_PASS_TO_HANDLERS | INPUT_FLUSH; break; case SYN_MT_REPORT: - dev->sync = false; disposition = INPUT_PASS_TO_HANDLERS; break; } break; case EV_KEY: - if (is_event_supported(code, dev->keybit, KEY_MAX) && - !!test_bit(code, dev->key) != value) { + if (is_event_supported(code, dev->keybit, KEY_MAX)) { - if (value != 2) { - __change_bit(code, dev->key); - if (value) - input_start_autorepeat(dev, code); - else - input_stop_autorepeat(dev); + /* auto-repeat bypasses state updates */ + if (value == 2) { + disposition = INPUT_PASS_TO_HANDLERS; + break; } - disposition = INPUT_PASS_TO_HANDLERS; + if (!!test_bit(code, dev->key) != !!value) { + + __change_bit(code, dev->key); + disposition = INPUT_PASS_TO_HANDLERS; + } } break; case EV_SW: if (is_event_supported(code, dev->swbit, SW_MAX) && - !!test_bit(code, dev->sw) != value) { + !!test_bit(code, dev->sw) != !!value) { __change_bit(code, dev->sw); disposition = INPUT_PASS_TO_HANDLERS; @@ -284,7 +323,7 @@ static void input_handle_event(struct input_dev *dev, case EV_LED: if (is_event_supported(code, dev->ledbit, LED_MAX) && - !!test_bit(code, dev->led) != value) { + !!test_bit(code, dev->led) != !!value) { __change_bit(code, dev->led); disposition = INPUT_PASS_TO_ALL; @@ -317,14 +356,48 @@ static void input_handle_event(struct input_dev *dev, break; } - if (disposition != INPUT_IGNORE_EVENT && type != EV_SYN) - dev->sync = false; + return disposition; +} + +static void input_handle_event(struct input_dev *dev, + unsigned int type, unsigned int code, int value) +{ + int disposition; + + disposition = input_get_disposition(dev, type, code, value); if ((disposition & INPUT_PASS_TO_DEVICE) && dev->event) dev->event(dev, type, code, value); - if (disposition & INPUT_PASS_TO_HANDLERS) - input_pass_event(dev, type, code, value); + if (!dev->vals) + return; + + if (disposition & INPUT_PASS_TO_HANDLERS) { + struct input_value *v; + + if (disposition & INPUT_SLOT) { + v = &dev->vals[dev->num_vals++]; + v->type = EV_ABS; + v->code = ABS_MT_SLOT; + v->value = dev->mt->slot; + } + + v = &dev->vals[dev->num_vals++]; + v->type = type; + v->code = code; + v->value = value; + } + + if (disposition & INPUT_FLUSH) { + if (dev->num_vals >= 2) + input_pass_values(dev, dev->vals, dev->num_vals); + dev->num_vals = 0; + } else if (dev->num_vals >= dev->max_vals - 2) { + dev->vals[dev->num_vals++] = input_value_sync; + input_pass_values(dev, dev->vals, dev->num_vals); + dev->num_vals = 0; + } + } /** @@ -352,7 +425,6 @@ void input_event(struct input_dev *dev, if (is_event_supported(type, dev->evbit, EV_MAX)) { spin_lock_irqsave(&dev->event_lock, flags); - add_input_randomness(type, code, value); input_handle_event(dev, type, code, value); spin_unlock_irqrestore(&dev->event_lock, flags); } @@ -831,10 +903,12 @@ int input_set_keycode(struct input_dev *dev, if (test_bit(EV_KEY, dev->evbit) && !is_event_supported(old_keycode, dev->keybit, KEY_MAX) && __test_and_clear_bit(old_keycode, dev->key)) { + struct input_value vals[] = { + { EV_KEY, old_keycode, 0 }, + input_value_sync + }; - input_pass_event(dev, EV_KEY, old_keycode, 0); - if (dev->sync) - input_pass_event(dev, EV_SYN, SYN_REPORT, 1); + input_pass_values(dev, vals, ARRAY_SIZE(vals)); } out: @@ -1416,6 +1490,7 @@ static void input_dev_release(struct device *device) input_ff_destroy(dev); input_mt_destroy_slots(dev); kfree(dev->absinfo); + kfree(dev->vals); kfree(dev); module_put(THIS_MODULE); @@ -1751,8 +1826,8 @@ static unsigned int input_estimate_events_per_packet(struct input_dev *dev) int i; unsigned int events; - if (dev->mtsize) { - mt_slots = dev->mtsize; + if (dev->mt) { + mt_slots = dev->mt->num_slots; } else if (test_bit(ABS_MT_TRACKING_ID, dev->absbit)) { mt_slots = dev->absinfo[ABS_MT_TRACKING_ID].maximum - dev->absinfo[ABS_MT_TRACKING_ID].minimum + 1, @@ -1778,6 +1853,9 @@ static unsigned int input_estimate_events_per_packet(struct input_dev *dev) if (test_bit(i, dev->relbit)) events++; + /* Make room for KEY and MSC events */ + events += 7; + return events; } @@ -1816,6 +1894,7 @@ int input_register_device(struct input_dev *dev) { static atomic_t input_no = ATOMIC_INIT(0); struct input_handler *handler; + unsigned int packet_size; const char *path; int error; @@ -1828,9 +1907,14 @@ int input_register_device(struct input_dev *dev) /* Make sure that bitmasks not mentioned in dev->evbit are clean. */ input_cleanse_bitmasks(dev); - if (!dev->hint_events_per_packet) - dev->hint_events_per_packet = - input_estimate_events_per_packet(dev); + packet_size = input_estimate_events_per_packet(dev); + if (dev->hint_events_per_packet < packet_size) + dev->hint_events_per_packet = packet_size; + + dev->max_vals = max(dev->hint_events_per_packet, packet_size) + 2; + dev->vals = kcalloc(dev->max_vals, sizeof(*dev->vals), GFP_KERNEL); + if (!dev->vals) + return -ENOMEM; /* * If delay and period are pre-set by the driver, then autorepeating diff --git a/trunk/drivers/input/keyboard/imx_keypad.c b/trunk/drivers/input/keyboard/imx_keypad.c index ff4c0a87a25f..cdc252612c0b 100644 --- a/trunk/drivers/input/keyboard/imx_keypad.c +++ b/trunk/drivers/input/keyboard/imx_keypad.c @@ -358,6 +358,7 @@ static void imx_keypad_inhibit(struct imx_keypad *keypad) /* Inhibit KDI and KRI interrupts. */ reg_val = readw(keypad->mmio_base + KPSR); reg_val &= ~(KBD_STAT_KRIE | KBD_STAT_KDIE); + reg_val |= KBD_STAT_KPKR | KBD_STAT_KPKD; writew(reg_val, keypad->mmio_base + KPSR); /* Colums as open drain and disable all rows */ @@ -515,7 +516,9 @@ static int __devinit imx_keypad_probe(struct platform_device *pdev) input_set_drvdata(input_dev, keypad); /* Ensure that the keypad will stay dormant until opened */ + clk_prepare_enable(keypad->clk); imx_keypad_inhibit(keypad); + clk_disable_unprepare(keypad->clk); error = request_irq(irq, imx_keypad_irq_handler, 0, pdev->name, keypad); diff --git a/trunk/drivers/input/misc/ab8500-ponkey.c b/trunk/drivers/input/misc/ab8500-ponkey.c index f06231b7cab1..84ec691c05aa 100644 --- a/trunk/drivers/input/misc/ab8500-ponkey.c +++ b/trunk/drivers/input/misc/ab8500-ponkey.c @@ -74,8 +74,8 @@ static int __devinit ab8500_ponkey_probe(struct platform_device *pdev) ponkey->idev = input; ponkey->ab8500 = ab8500; - ponkey->irq_dbf = ab8500_irq_get_virq(ab8500, irq_dbf); - ponkey->irq_dbr = ab8500_irq_get_virq(ab8500, irq_dbr); + ponkey->irq_dbf = irq_dbf; + ponkey->irq_dbr = irq_dbr; input->name = "AB8500 POn(PowerOn) Key"; input->dev.parent = &pdev->dev; diff --git a/trunk/drivers/input/misc/uinput.c b/trunk/drivers/input/misc/uinput.c index 736056897e50..6b1797503e34 100644 --- a/trunk/drivers/input/misc/uinput.c +++ b/trunk/drivers/input/misc/uinput.c @@ -405,7 +405,7 @@ static int uinput_setup_device(struct uinput_device *udev, const char __user *bu goto exit; if (test_bit(ABS_MT_SLOT, dev->absbit)) { int nslot = input_abs_get_max(dev, ABS_MT_SLOT) + 1; - input_mt_init_slots(dev, nslot); + input_mt_init_slots(dev, nslot, 0); } else if (test_bit(ABS_MT_POSITION_X, dev->absbit)) { input_set_events_per_packet(dev, 60); } diff --git a/trunk/drivers/input/mouse/alps.c b/trunk/drivers/input/mouse/alps.c index 4a1347e91bdc..cf5af1f495ec 100644 --- a/trunk/drivers/input/mouse/alps.c +++ b/trunk/drivers/input/mouse/alps.c @@ -1620,7 +1620,7 @@ int alps_init(struct psmouse *psmouse) case ALPS_PROTO_V3: case ALPS_PROTO_V4: set_bit(INPUT_PROP_SEMI_MT, dev1->propbit); - input_mt_init_slots(dev1, 2); + input_mt_init_slots(dev1, 2, 0); input_set_abs_params(dev1, ABS_MT_POSITION_X, 0, ALPS_V3_X_MAX, 0, 0); input_set_abs_params(dev1, ABS_MT_POSITION_Y, 0, ALPS_V3_Y_MAX, 0, 0); diff --git a/trunk/drivers/input/mouse/bcm5974.c b/trunk/drivers/input/mouse/bcm5974.c index d528c23e194f..3a78f235fa3e 100644 --- a/trunk/drivers/input/mouse/bcm5974.c +++ b/trunk/drivers/input/mouse/bcm5974.c @@ -40,6 +40,7 @@ #include #include #include +#include #define USB_VENDOR_ID_APPLE 0x05ac @@ -183,26 +184,26 @@ struct tp_finger { __le16 abs_y; /* absolute y coodinate */ __le16 rel_x; /* relative x coodinate */ __le16 rel_y; /* relative y coodinate */ - __le16 size_major; /* finger size, major axis? */ - __le16 size_minor; /* finger size, minor axis? */ + __le16 tool_major; /* tool area, major axis */ + __le16 tool_minor; /* tool area, minor axis */ __le16 orientation; /* 16384 when point, else 15 bit angle */ - __le16 force_major; /* trackpad force, major axis? */ - __le16 force_minor; /* trackpad force, minor axis? */ + __le16 touch_major; /* touch area, major axis */ + __le16 touch_minor; /* touch area, minor axis */ __le16 unused[3]; /* zeros */ __le16 multi; /* one finger: varies, more fingers: constant */ } __attribute__((packed,aligned(2))); /* trackpad finger data size, empirically at least ten fingers */ +#define MAX_FINGERS 16 #define SIZEOF_FINGER sizeof(struct tp_finger) -#define SIZEOF_ALL_FINGERS (16 * SIZEOF_FINGER) +#define SIZEOF_ALL_FINGERS (MAX_FINGERS * SIZEOF_FINGER) #define MAX_FINGER_ORIENTATION 16384 /* device-specific parameters */ struct bcm5974_param { - int dim; /* logical dimension */ - int fuzz; /* logical noise value */ - int devmin; /* device minimum reading */ - int devmax; /* device maximum reading */ + int snratio; /* signal-to-noise ratio */ + int min; /* device minimum reading */ + int max; /* device maximum reading */ }; /* device-specific configuration */ @@ -219,6 +220,7 @@ struct bcm5974_config { struct bcm5974_param w; /* finger width limits */ struct bcm5974_param x; /* horizontal limits */ struct bcm5974_param y; /* vertical limits */ + struct bcm5974_param o; /* orientation limits */ }; /* logical device structure */ @@ -234,23 +236,16 @@ struct bcm5974 { struct bt_data *bt_data; /* button transferred data */ struct urb *tp_urb; /* trackpad usb request block */ u8 *tp_data; /* trackpad transferred data */ - int fingers; /* number of fingers on trackpad */ + const struct tp_finger *index[MAX_FINGERS]; /* finger index data */ + struct input_mt_pos pos[MAX_FINGERS]; /* position array */ + int slots[MAX_FINGERS]; /* slot assignments */ }; -/* logical dimensions */ -#define DIM_PRESSURE 256 /* maximum finger pressure */ -#define DIM_WIDTH 16 /* maximum finger width */ -#define DIM_X 1280 /* maximum trackpad x value */ -#define DIM_Y 800 /* maximum trackpad y value */ - /* logical signal quality */ #define SN_PRESSURE 45 /* pressure signal-to-noise ratio */ -#define SN_WIDTH 100 /* width signal-to-noise ratio */ +#define SN_WIDTH 25 /* width signal-to-noise ratio */ #define SN_COORD 250 /* coordinate signal-to-noise ratio */ - -/* pressure thresholds */ -#define PRESSURE_LOW (2 * DIM_PRESSURE / SN_PRESSURE) -#define PRESSURE_HIGH (3 * PRESSURE_LOW) +#define SN_ORIENT 10 /* orientation signal-to-noise ratio */ /* device constants */ static const struct bcm5974_config bcm5974_config_table[] = { @@ -261,10 +256,11 @@ static const struct bcm5974_config bcm5974_config_table[] = { 0, 0x84, sizeof(struct bt_data), 0x81, TYPE1, FINGER_TYPE1, FINGER_TYPE1 + SIZEOF_ALL_FINGERS, - { DIM_PRESSURE, DIM_PRESSURE / SN_PRESSURE, 0, 256 }, - { DIM_WIDTH, DIM_WIDTH / SN_WIDTH, 0, 2048 }, - { DIM_X, DIM_X / SN_COORD, -4824, 5342 }, - { DIM_Y, DIM_Y / SN_COORD, -172, 5820 } + { SN_PRESSURE, 0, 256 }, + { SN_WIDTH, 0, 2048 }, + { SN_COORD, -4824, 5342 }, + { SN_COORD, -172, 5820 }, + { SN_ORIENT, -MAX_FINGER_ORIENTATION, MAX_FINGER_ORIENTATION } }, { USB_DEVICE_ID_APPLE_WELLSPRING2_ANSI, @@ -273,10 +269,11 @@ static const struct bcm5974_config bcm5974_config_table[] = { 0, 0x84, sizeof(struct bt_data), 0x81, TYPE1, FINGER_TYPE1, FINGER_TYPE1 + SIZEOF_ALL_FINGERS, - { DIM_PRESSURE, DIM_PRESSURE / SN_PRESSURE, 0, 256 }, - { DIM_WIDTH, DIM_WIDTH / SN_WIDTH, 0, 2048 }, - { DIM_X, DIM_X / SN_COORD, -4824, 4824 }, - { DIM_Y, DIM_Y / SN_COORD, -172, 4290 } + { SN_PRESSURE, 0, 256 }, + { SN_WIDTH, 0, 2048 }, + { SN_COORD, -4824, 4824 }, + { SN_COORD, -172, 4290 }, + { SN_ORIENT, -MAX_FINGER_ORIENTATION, MAX_FINGER_ORIENTATION } }, { USB_DEVICE_ID_APPLE_WELLSPRING3_ANSI, @@ -285,10 +282,11 @@ static const struct bcm5974_config bcm5974_config_table[] = { HAS_INTEGRATED_BUTTON, 0x84, sizeof(struct bt_data), 0x81, TYPE2, FINGER_TYPE2, FINGER_TYPE2 + SIZEOF_ALL_FINGERS, - { DIM_PRESSURE, DIM_PRESSURE / SN_PRESSURE, 0, 300 }, - { DIM_WIDTH, DIM_WIDTH / SN_WIDTH, 0, 2048 }, - { DIM_X, DIM_X / SN_COORD, -4460, 5166 }, - { DIM_Y, DIM_Y / SN_COORD, -75, 6700 } + { SN_PRESSURE, 0, 300 }, + { SN_WIDTH, 0, 2048 }, + { SN_COORD, -4460, 5166 }, + { SN_COORD, -75, 6700 }, + { SN_ORIENT, -MAX_FINGER_ORIENTATION, MAX_FINGER_ORIENTATION } }, { USB_DEVICE_ID_APPLE_WELLSPRING4_ANSI, @@ -297,10 +295,11 @@ static const struct bcm5974_config bcm5974_config_table[] = { HAS_INTEGRATED_BUTTON, 0x84, sizeof(struct bt_data), 0x81, TYPE2, FINGER_TYPE2, FINGER_TYPE2 + SIZEOF_ALL_FINGERS, - { DIM_PRESSURE, DIM_PRESSURE / SN_PRESSURE, 0, 300 }, - { DIM_WIDTH, DIM_WIDTH / SN_WIDTH, 0, 2048 }, - { DIM_X, DIM_X / SN_COORD, -4620, 5140 }, - { DIM_Y, DIM_Y / SN_COORD, -150, 6600 } + { SN_PRESSURE, 0, 300 }, + { SN_WIDTH, 0, 2048 }, + { SN_COORD, -4620, 5140 }, + { SN_COORD, -150, 6600 }, + { SN_ORIENT, -MAX_FINGER_ORIENTATION, MAX_FINGER_ORIENTATION } }, { USB_DEVICE_ID_APPLE_WELLSPRING4A_ANSI, @@ -309,10 +308,11 @@ static const struct bcm5974_config bcm5974_config_table[] = { HAS_INTEGRATED_BUTTON, 0x84, sizeof(struct bt_data), 0x81, TYPE2, FINGER_TYPE2, FINGER_TYPE2 + SIZEOF_ALL_FINGERS, - { DIM_PRESSURE, DIM_PRESSURE / SN_PRESSURE, 0, 300 }, - { DIM_WIDTH, DIM_WIDTH / SN_WIDTH, 0, 2048 }, - { DIM_X, DIM_X / SN_COORD, -4616, 5112 }, - { DIM_Y, DIM_Y / SN_COORD, -142, 5234 } + { SN_PRESSURE, 0, 300 }, + { SN_WIDTH, 0, 2048 }, + { SN_COORD, -4616, 5112 }, + { SN_COORD, -142, 5234 }, + { SN_ORIENT, -MAX_FINGER_ORIENTATION, MAX_FINGER_ORIENTATION } }, { USB_DEVICE_ID_APPLE_WELLSPRING5_ANSI, @@ -321,10 +321,11 @@ static const struct bcm5974_config bcm5974_config_table[] = { HAS_INTEGRATED_BUTTON, 0x84, sizeof(struct bt_data), 0x81, TYPE2, FINGER_TYPE2, FINGER_TYPE2 + SIZEOF_ALL_FINGERS, - { DIM_PRESSURE, DIM_PRESSURE / SN_PRESSURE, 0, 300 }, - { DIM_WIDTH, DIM_WIDTH / SN_WIDTH, 0, 2048 }, - { DIM_X, DIM_X / SN_COORD, -4415, 5050 }, - { DIM_Y, DIM_Y / SN_COORD, -55, 6680 } + { SN_PRESSURE, 0, 300 }, + { SN_WIDTH, 0, 2048 }, + { SN_COORD, -4415, 5050 }, + { SN_COORD, -55, 6680 }, + { SN_ORIENT, -MAX_FINGER_ORIENTATION, MAX_FINGER_ORIENTATION } }, { USB_DEVICE_ID_APPLE_WELLSPRING6_ANSI, @@ -333,10 +334,11 @@ static const struct bcm5974_config bcm5974_config_table[] = { HAS_INTEGRATED_BUTTON, 0x84, sizeof(struct bt_data), 0x81, TYPE2, FINGER_TYPE2, FINGER_TYPE2 + SIZEOF_ALL_FINGERS, - { DIM_PRESSURE, DIM_PRESSURE / SN_PRESSURE, 0, 300 }, - { DIM_WIDTH, DIM_WIDTH / SN_WIDTH, 0, 2048 }, - { DIM_X, DIM_X / SN_COORD, -4620, 5140 }, - { DIM_Y, DIM_Y / SN_COORD, -150, 6600 } + { SN_PRESSURE, 0, 300 }, + { SN_WIDTH, 0, 2048 }, + { SN_COORD, -4620, 5140 }, + { SN_COORD, -150, 6600 }, + { SN_ORIENT, -MAX_FINGER_ORIENTATION, MAX_FINGER_ORIENTATION } }, { USB_DEVICE_ID_APPLE_WELLSPRING5A_ANSI, @@ -345,10 +347,11 @@ static const struct bcm5974_config bcm5974_config_table[] = { HAS_INTEGRATED_BUTTON, 0x84, sizeof(struct bt_data), 0x81, TYPE2, FINGER_TYPE2, FINGER_TYPE2 + SIZEOF_ALL_FINGERS, - { DIM_PRESSURE, DIM_PRESSURE / SN_PRESSURE, 0, 300 }, - { DIM_WIDTH, DIM_WIDTH / SN_WIDTH, 0, 2048 }, - { DIM_X, DIM_X / SN_COORD, -4750, 5280 }, - { DIM_Y, DIM_Y / SN_COORD, -150, 6730 } + { SN_PRESSURE, 0, 300 }, + { SN_WIDTH, 0, 2048 }, + { SN_COORD, -4750, 5280 }, + { SN_COORD, -150, 6730 }, + { SN_ORIENT, -MAX_FINGER_ORIENTATION, MAX_FINGER_ORIENTATION } }, { USB_DEVICE_ID_APPLE_WELLSPRING6A_ANSI, @@ -357,10 +360,11 @@ static const struct bcm5974_config bcm5974_config_table[] = { HAS_INTEGRATED_BUTTON, 0x84, sizeof(struct bt_data), 0x81, TYPE2, FINGER_TYPE2, FINGER_TYPE2 + SIZEOF_ALL_FINGERS, - { DIM_PRESSURE, DIM_PRESSURE / SN_PRESSURE, 0, 300 }, - { DIM_WIDTH, DIM_WIDTH / SN_WIDTH, 0, 2048 }, - { DIM_X, DIM_X / SN_COORD, -4620, 5140 }, - { DIM_Y, DIM_Y / SN_COORD, -150, 6600 } + { SN_PRESSURE, 0, 300 }, + { SN_WIDTH, 0, 2048 }, + { SN_COORD, -4620, 5140 }, + { SN_COORD, -150, 6600 }, + { SN_ORIENT, -MAX_FINGER_ORIENTATION, MAX_FINGER_ORIENTATION } }, { USB_DEVICE_ID_APPLE_WELLSPRING7_ANSI, @@ -369,10 +373,11 @@ static const struct bcm5974_config bcm5974_config_table[] = { HAS_INTEGRATED_BUTTON, 0x84, sizeof(struct bt_data), 0x81, TYPE2, FINGER_TYPE2, FINGER_TYPE2 + SIZEOF_ALL_FINGERS, - { DIM_PRESSURE, DIM_PRESSURE / SN_PRESSURE, 0, 300 }, - { DIM_WIDTH, DIM_WIDTH / SN_WIDTH, 0, 2048 }, - { DIM_X, DIM_X / SN_COORD, -4750, 5280 }, - { DIM_Y, DIM_Y / SN_COORD, -150, 6730 } + { SN_PRESSURE, 0, 300 }, + { SN_WIDTH, 0, 2048 }, + { SN_COORD, -4750, 5280 }, + { SN_COORD, -150, 6730 }, + { SN_ORIENT, -MAX_FINGER_ORIENTATION, MAX_FINGER_ORIENTATION } }, {} }; @@ -396,18 +401,11 @@ static inline int raw2int(__le16 x) return (signed short)le16_to_cpu(x); } -/* scale device data to logical dimensions (asserts devmin < devmax) */ -static inline int int2scale(const struct bcm5974_param *p, int x) -{ - return x * p->dim / (p->devmax - p->devmin); -} - -/* all logical value ranges are [0,dim). */ -static inline int int2bound(const struct bcm5974_param *p, int x) +static void set_abs(struct input_dev *input, unsigned int code, + const struct bcm5974_param *p) { - int s = int2scale(p, x); - - return clamp_val(s, 0, p->dim - 1); + int fuzz = p->snratio ? (p->max - p->min) / p->snratio : 0; + input_set_abs_params(input, code, p->min, p->max, fuzz, 0); } /* setup which logical events to report */ @@ -416,48 +414,30 @@ static void setup_events_to_report(struct input_dev *input_dev, { __set_bit(EV_ABS, input_dev->evbit); - input_set_abs_params(input_dev, ABS_PRESSURE, - 0, cfg->p.dim, cfg->p.fuzz, 0); - input_set_abs_params(input_dev, ABS_TOOL_WIDTH, - 0, cfg->w.dim, cfg->w.fuzz, 0); - input_set_abs_params(input_dev, ABS_X, - 0, cfg->x.dim, cfg->x.fuzz, 0); - input_set_abs_params(input_dev, ABS_Y, - 0, cfg->y.dim, cfg->y.fuzz, 0); + /* for synaptics only */ + input_set_abs_params(input_dev, ABS_PRESSURE, 0, 256, 5, 0); + input_set_abs_params(input_dev, ABS_TOOL_WIDTH, 0, 16, 0, 0); /* finger touch area */ - input_set_abs_params(input_dev, ABS_MT_TOUCH_MAJOR, - cfg->w.devmin, cfg->w.devmax, 0, 0); - input_set_abs_params(input_dev, ABS_MT_TOUCH_MINOR, - cfg->w.devmin, cfg->w.devmax, 0, 0); + set_abs(input_dev, ABS_MT_TOUCH_MAJOR, &cfg->w); + set_abs(input_dev, ABS_MT_TOUCH_MINOR, &cfg->w); /* finger approach area */ - input_set_abs_params(input_dev, ABS_MT_WIDTH_MAJOR, - cfg->w.devmin, cfg->w.devmax, 0, 0); - input_set_abs_params(input_dev, ABS_MT_WIDTH_MINOR, - cfg->w.devmin, cfg->w.devmax, 0, 0); + set_abs(input_dev, ABS_MT_WIDTH_MAJOR, &cfg->w); + set_abs(input_dev, ABS_MT_WIDTH_MINOR, &cfg->w); /* finger orientation */ - input_set_abs_params(input_dev, ABS_MT_ORIENTATION, - -MAX_FINGER_ORIENTATION, - MAX_FINGER_ORIENTATION, 0, 0); + set_abs(input_dev, ABS_MT_ORIENTATION, &cfg->o); /* finger position */ - input_set_abs_params(input_dev, ABS_MT_POSITION_X, - cfg->x.devmin, cfg->x.devmax, 0, 0); - input_set_abs_params(input_dev, ABS_MT_POSITION_Y, - cfg->y.devmin, cfg->y.devmax, 0, 0); + set_abs(input_dev, ABS_MT_POSITION_X, &cfg->x); + set_abs(input_dev, ABS_MT_POSITION_Y, &cfg->y); __set_bit(EV_KEY, input_dev->evbit); - __set_bit(BTN_TOUCH, input_dev->keybit); - __set_bit(BTN_TOOL_FINGER, input_dev->keybit); - __set_bit(BTN_TOOL_DOUBLETAP, input_dev->keybit); - __set_bit(BTN_TOOL_TRIPLETAP, input_dev->keybit); - __set_bit(BTN_TOOL_QUADTAP, input_dev->keybit); __set_bit(BTN_LEFT, input_dev->keybit); - __set_bit(INPUT_PROP_POINTER, input_dev->propbit); if (cfg->caps & HAS_INTEGRATED_BUTTON) __set_bit(INPUT_PROP_BUTTONPAD, input_dev->propbit); - input_set_events_per_packet(input_dev, 60); + input_mt_init_slots(input_dev, MAX_FINGERS, + INPUT_MT_POINTER | INPUT_MT_DROP_UNUSED | INPUT_MT_TRACK); } /* report button data as logical button state */ @@ -477,24 +457,44 @@ static int report_bt_state(struct bcm5974 *dev, int size) return 0; } -static void report_finger_data(struct input_dev *input, - const struct bcm5974_config *cfg, +static void report_finger_data(struct input_dev *input, int slot, + const struct input_mt_pos *pos, const struct tp_finger *f) { + input_mt_slot(input, slot); + input_mt_report_slot_state(input, MT_TOOL_FINGER, true); + input_report_abs(input, ABS_MT_TOUCH_MAJOR, - raw2int(f->force_major) << 1); + raw2int(f->touch_major) << 1); input_report_abs(input, ABS_MT_TOUCH_MINOR, - raw2int(f->force_minor) << 1); + raw2int(f->touch_minor) << 1); input_report_abs(input, ABS_MT_WIDTH_MAJOR, - raw2int(f->size_major) << 1); + raw2int(f->tool_major) << 1); input_report_abs(input, ABS_MT_WIDTH_MINOR, - raw2int(f->size_minor) << 1); + raw2int(f->tool_minor) << 1); input_report_abs(input, ABS_MT_ORIENTATION, MAX_FINGER_ORIENTATION - raw2int(f->orientation)); - input_report_abs(input, ABS_MT_POSITION_X, raw2int(f->abs_x)); - input_report_abs(input, ABS_MT_POSITION_Y, - cfg->y.devmin + cfg->y.devmax - raw2int(f->abs_y)); - input_mt_sync(input); + input_report_abs(input, ABS_MT_POSITION_X, pos->x); + input_report_abs(input, ABS_MT_POSITION_Y, pos->y); +} + +static void report_synaptics_data(struct input_dev *input, + const struct bcm5974_config *cfg, + const struct tp_finger *f, int raw_n) +{ + int abs_p = 0, abs_w = 0; + + if (raw_n) { + int p = raw2int(f->touch_major); + int w = raw2int(f->tool_major); + if (p > 0 && raw2int(f->origin)) { + abs_p = clamp_val(256 * p / cfg->p.max, 0, 255); + abs_w = clamp_val(16 * w / cfg->w.max, 0, 15); + } + } + + input_report_abs(input, ABS_PRESSURE, abs_p); + input_report_abs(input, ABS_TOOL_WIDTH, abs_w); } /* report trackpad data as logical trackpad state */ @@ -503,9 +503,7 @@ static int report_tp_state(struct bcm5974 *dev, int size) const struct bcm5974_config *c = &dev->cfg; const struct tp_finger *f; struct input_dev *input = dev->input; - int raw_p, raw_w, raw_x, raw_y, raw_n, i; - int ptest, origin, ibt = 0, nmin = 0, nmax = 0; - int abs_p = 0, abs_w = 0, abs_x = 0, abs_y = 0; + int raw_n, i, n = 0; if (size < c->tp_offset || (size - c->tp_offset) % SIZEOF_FINGER != 0) return -EIO; @@ -514,76 +512,29 @@ static int report_tp_state(struct bcm5974 *dev, int size) f = (const struct tp_finger *)(dev->tp_data + c->tp_offset); raw_n = (size - c->tp_offset) / SIZEOF_FINGER; - /* always track the first finger; when detached, start over */ - if (raw_n) { - - /* report raw trackpad data */ - for (i = 0; i < raw_n; i++) - report_finger_data(input, c, &f[i]); - - raw_p = raw2int(f->force_major); - raw_w = raw2int(f->size_major); - raw_x = raw2int(f->abs_x); - raw_y = raw2int(f->abs_y); - - dprintk(9, - "bcm5974: " - "raw: p: %+05d w: %+05d x: %+05d y: %+05d n: %d\n", - raw_p, raw_w, raw_x, raw_y, raw_n); - - ptest = int2bound(&c->p, raw_p); - origin = raw2int(f->origin); - - /* while tracking finger still valid, count all fingers */ - if (ptest > PRESSURE_LOW && origin) { - abs_p = ptest; - abs_w = int2bound(&c->w, raw_w); - abs_x = int2bound(&c->x, raw_x - c->x.devmin); - abs_y = int2bound(&c->y, c->y.devmax - raw_y); - while (raw_n--) { - ptest = int2bound(&c->p, - raw2int(f->force_major)); - if (ptest > PRESSURE_LOW) - nmax++; - if (ptest > PRESSURE_HIGH) - nmin++; - f++; - } - } + for (i = 0; i < raw_n; i++) { + if (raw2int(f[i].touch_major) == 0) + continue; + dev->pos[n].x = raw2int(f[i].abs_x); + dev->pos[n].y = c->y.min + c->y.max - raw2int(f[i].abs_y); + dev->index[n++] = &f[i]; } - /* set the integrated button if applicable */ - if (c->tp_type == TYPE2) - ibt = raw2int(dev->tp_data[BUTTON_TYPE2]); - - if (dev->fingers < nmin) - dev->fingers = nmin; - if (dev->fingers > nmax) - dev->fingers = nmax; - - input_report_key(input, BTN_TOUCH, dev->fingers > 0); - input_report_key(input, BTN_TOOL_FINGER, dev->fingers == 1); - input_report_key(input, BTN_TOOL_DOUBLETAP, dev->fingers == 2); - input_report_key(input, BTN_TOOL_TRIPLETAP, dev->fingers == 3); - input_report_key(input, BTN_TOOL_QUADTAP, dev->fingers > 3); - - input_report_abs(input, ABS_PRESSURE, abs_p); - input_report_abs(input, ABS_TOOL_WIDTH, abs_w); + input_mt_assign_slots(input, dev->slots, dev->pos, n); - if (abs_p) { - input_report_abs(input, ABS_X, abs_x); - input_report_abs(input, ABS_Y, abs_y); + for (i = 0; i < n; i++) + report_finger_data(input, dev->slots[i], + &dev->pos[i], dev->index[i]); - dprintk(8, - "bcm5974: abs: p: %+05d w: %+05d x: %+05d y: %+05d " - "nmin: %d nmax: %d n: %d ibt: %d\n", abs_p, abs_w, - abs_x, abs_y, nmin, nmax, dev->fingers, ibt); + input_mt_sync_frame(input); - } + report_synaptics_data(input, c, f, raw_n); /* type 2 reports button events via ibt only */ - if (c->tp_type == TYPE2) + if (c->tp_type == TYPE2) { + int ibt = raw2int(dev->tp_data[BUTTON_TYPE2]); input_report_key(input, BTN_LEFT, ibt); + } input_sync(input); @@ -742,9 +693,11 @@ static int bcm5974_start_traffic(struct bcm5974 *dev) goto err_out; } - error = usb_submit_urb(dev->bt_urb, GFP_KERNEL); - if (error) - goto err_reset_mode; + if (dev->bt_urb) { + error = usb_submit_urb(dev->bt_urb, GFP_KERNEL); + if (error) + goto err_reset_mode; + } error = usb_submit_urb(dev->tp_urb, GFP_KERNEL); if (error) @@ -868,19 +821,23 @@ static int bcm5974_probe(struct usb_interface *iface, mutex_init(&dev->pm_mutex); /* setup urbs */ - dev->bt_urb = usb_alloc_urb(0, GFP_KERNEL); - if (!dev->bt_urb) - goto err_free_devs; + if (cfg->tp_type == TYPE1) { + dev->bt_urb = usb_alloc_urb(0, GFP_KERNEL); + if (!dev->bt_urb) + goto err_free_devs; + } dev->tp_urb = usb_alloc_urb(0, GFP_KERNEL); if (!dev->tp_urb) goto err_free_bt_urb; - dev->bt_data = usb_alloc_coherent(dev->udev, + if (dev->bt_urb) { + dev->bt_data = usb_alloc_coherent(dev->udev, dev->cfg.bt_datalen, GFP_KERNEL, &dev->bt_urb->transfer_dma); - if (!dev->bt_data) - goto err_free_urb; + if (!dev->bt_data) + goto err_free_urb; + } dev->tp_data = usb_alloc_coherent(dev->udev, dev->cfg.tp_datalen, GFP_KERNEL, @@ -888,10 +845,11 @@ static int bcm5974_probe(struct usb_interface *iface, if (!dev->tp_data) goto err_free_bt_buffer; - usb_fill_int_urb(dev->bt_urb, udev, - usb_rcvintpipe(udev, cfg->bt_ep), - dev->bt_data, dev->cfg.bt_datalen, - bcm5974_irq_button, dev, 1); + if (dev->bt_urb) + usb_fill_int_urb(dev->bt_urb, udev, + usb_rcvintpipe(udev, cfg->bt_ep), + dev->bt_data, dev->cfg.bt_datalen, + bcm5974_irq_button, dev, 1); usb_fill_int_urb(dev->tp_urb, udev, usb_rcvintpipe(udev, cfg->tp_ep), @@ -929,8 +887,9 @@ static int bcm5974_probe(struct usb_interface *iface, usb_free_coherent(dev->udev, dev->cfg.tp_datalen, dev->tp_data, dev->tp_urb->transfer_dma); err_free_bt_buffer: - usb_free_coherent(dev->udev, dev->cfg.bt_datalen, - dev->bt_data, dev->bt_urb->transfer_dma); + if (dev->bt_urb) + usb_free_coherent(dev->udev, dev->cfg.bt_datalen, + dev->bt_data, dev->bt_urb->transfer_dma); err_free_urb: usb_free_urb(dev->tp_urb); err_free_bt_urb: @@ -951,8 +910,9 @@ static void bcm5974_disconnect(struct usb_interface *iface) input_unregister_device(dev->input); usb_free_coherent(dev->udev, dev->cfg.tp_datalen, dev->tp_data, dev->tp_urb->transfer_dma); - usb_free_coherent(dev->udev, dev->cfg.bt_datalen, - dev->bt_data, dev->bt_urb->transfer_dma); + if (dev->bt_urb) + usb_free_coherent(dev->udev, dev->cfg.bt_datalen, + dev->bt_data, dev->bt_urb->transfer_dma); usb_free_urb(dev->tp_urb); usb_free_urb(dev->bt_urb); kfree(dev); diff --git a/trunk/drivers/input/mouse/elantech.c b/trunk/drivers/input/mouse/elantech.c index 479011004a11..1e8e42fb03a4 100644 --- a/trunk/drivers/input/mouse/elantech.c +++ b/trunk/drivers/input/mouse/elantech.c @@ -1004,7 +1004,7 @@ static int elantech_set_input_params(struct psmouse *psmouse) input_set_abs_params(dev, ABS_TOOL_WIDTH, ETP_WMIN_V2, ETP_WMAX_V2, 0, 0); } - input_mt_init_slots(dev, 2); + input_mt_init_slots(dev, 2, 0); input_set_abs_params(dev, ABS_MT_POSITION_X, x_min, x_max, 0, 0); input_set_abs_params(dev, ABS_MT_POSITION_Y, y_min, y_max, 0, 0); break; @@ -1035,7 +1035,7 @@ static int elantech_set_input_params(struct psmouse *psmouse) input_set_abs_params(dev, ABS_TOOL_WIDTH, ETP_WMIN_V2, ETP_WMAX_V2, 0, 0); /* Multitouch capable pad, up to 5 fingers. */ - input_mt_init_slots(dev, ETP_MAX_FINGERS); + input_mt_init_slots(dev, ETP_MAX_FINGERS, 0); input_set_abs_params(dev, ABS_MT_POSITION_X, x_min, x_max, 0, 0); input_set_abs_params(dev, ABS_MT_POSITION_Y, y_min, y_max, 0, 0); input_abs_set_res(dev, ABS_MT_POSITION_X, x_res); diff --git a/trunk/drivers/input/mouse/sentelic.c b/trunk/drivers/input/mouse/sentelic.c index 3f5649f19082..e582922bacf7 100644 --- a/trunk/drivers/input/mouse/sentelic.c +++ b/trunk/drivers/input/mouse/sentelic.c @@ -721,6 +721,17 @@ static psmouse_ret_t fsp_process_byte(struct psmouse *psmouse) switch (psmouse->packet[0] >> FSP_PKT_TYPE_SHIFT) { case FSP_PKT_TYPE_ABS: + + if ((packet[0] == 0x48 || packet[0] == 0x49) && + packet[1] == 0 && packet[2] == 0) { + /* + * Ignore coordinate noise when finger leaving the + * surface, otherwise cursor may jump to upper-left + * corner. + */ + packet[3] &= 0xf0; + } + abs_x = GET_ABS_X(packet); abs_y = GET_ABS_Y(packet); @@ -960,7 +971,7 @@ static int fsp_set_input_params(struct psmouse *psmouse) input_set_abs_params(dev, ABS_X, 0, abs_x, 0, 0); input_set_abs_params(dev, ABS_Y, 0, abs_y, 0, 0); - input_mt_init_slots(dev, 2); + input_mt_init_slots(dev, 2, 0); input_set_abs_params(dev, ABS_MT_POSITION_X, 0, abs_x, 0, 0); input_set_abs_params(dev, ABS_MT_POSITION_Y, 0, abs_y, 0, 0); } diff --git a/trunk/drivers/input/mouse/synaptics.c b/trunk/drivers/input/mouse/synaptics.c index 14eaecea2b70..37033ade79d3 100644 --- a/trunk/drivers/input/mouse/synaptics.c +++ b/trunk/drivers/input/mouse/synaptics.c @@ -1232,7 +1232,7 @@ static void set_input_params(struct input_dev *dev, struct synaptics_data *priv) input_set_abs_params(dev, ABS_PRESSURE, 0, 255, 0, 0); if (SYN_CAP_IMAGE_SENSOR(priv->ext_cap_0c)) { - input_mt_init_slots(dev, 2); + input_mt_init_slots(dev, 2, 0); set_abs_position_params(dev, priv, ABS_MT_POSITION_X, ABS_MT_POSITION_Y); /* Image sensors can report per-contact pressure */ @@ -1244,7 +1244,7 @@ static void set_input_params(struct input_dev *dev, struct synaptics_data *priv) } else if (SYN_CAP_ADV_GESTURE(priv->ext_cap_0c)) { /* Non-image sensors with AGM use semi-mt */ __set_bit(INPUT_PROP_SEMI_MT, dev->propbit); - input_mt_init_slots(dev, 2); + input_mt_init_slots(dev, 2, 0); set_abs_position_params(dev, priv, ABS_MT_POSITION_X, ABS_MT_POSITION_Y); } diff --git a/trunk/drivers/input/serio/ambakmi.c b/trunk/drivers/input/serio/ambakmi.c index 2ffd110bd5bc..2e77246c2e5a 100644 --- a/trunk/drivers/input/serio/ambakmi.c +++ b/trunk/drivers/input/serio/ambakmi.c @@ -72,7 +72,7 @@ static int amba_kmi_open(struct serio *io) unsigned int divisor; int ret; - ret = clk_enable(kmi->clk); + ret = clk_prepare_enable(kmi->clk); if (ret) goto out; @@ -92,7 +92,7 @@ static int amba_kmi_open(struct serio *io) return 0; clk_disable: - clk_disable(kmi->clk); + clk_disable_unprepare(kmi->clk); out: return ret; } @@ -104,7 +104,7 @@ static void amba_kmi_close(struct serio *io) writeb(0, KMICR); free_irq(kmi->irq, kmi); - clk_disable(kmi->clk); + clk_disable_unprepare(kmi->clk); } static int __devinit amba_kmi_probe(struct amba_device *dev, diff --git a/trunk/drivers/input/serio/i8042-x86ia64io.h b/trunk/drivers/input/serio/i8042-x86ia64io.h index 5ec774d6c82b..d6cc77a53c7e 100644 --- a/trunk/drivers/input/serio/i8042-x86ia64io.h +++ b/trunk/drivers/input/serio/i8042-x86ia64io.h @@ -176,6 +176,20 @@ static const struct dmi_system_id __initconst i8042_dmi_noloop_table[] = { DMI_MATCH(DMI_PRODUCT_NAME, "Spring Peak"), }, }, + { + /* Gigabyte T1005 - defines wrong chassis type ("Other") */ + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "GIGABYTE"), + DMI_MATCH(DMI_PRODUCT_NAME, "T1005"), + }, + }, + { + /* Gigabyte T1005M/P - defines wrong chassis type ("Other") */ + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "GIGABYTE"), + DMI_MATCH(DMI_PRODUCT_NAME, "T1005M/P"), + }, + }, { .matches = { DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"), @@ -319,6 +333,12 @@ static const struct dmi_system_id __initconst i8042_dmi_nomux_table[] = { DMI_MATCH(DMI_PRODUCT_NAME, "EQUIUM A110"), }, }, + { + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"), + DMI_MATCH(DMI_PRODUCT_NAME, "SATELLITE C850D"), + }, + }, { .matches = { DMI_MATCH(DMI_SYS_VENDOR, "ALIENWARE"), diff --git a/trunk/drivers/input/tablet/wacom_wac.c b/trunk/drivers/input/tablet/wacom_wac.c index 002041975de9..2a81ce375f75 100644 --- a/trunk/drivers/input/tablet/wacom_wac.c +++ b/trunk/drivers/input/tablet/wacom_wac.c @@ -1530,7 +1530,7 @@ int wacom_setup_input_capabilities(struct input_dev *input_dev, __set_bit(BTN_TOOL_TRIPLETAP, input_dev->keybit); __set_bit(BTN_TOOL_QUADTAP, input_dev->keybit); - input_mt_init_slots(input_dev, features->touch_max); + input_mt_init_slots(input_dev, features->touch_max, 0); input_set_abs_params(input_dev, ABS_MT_TOUCH_MAJOR, 0, 255, 0, 0); @@ -1575,7 +1575,7 @@ int wacom_setup_input_capabilities(struct input_dev *input_dev, case TABLETPC2FG: if (features->device_type == BTN_TOOL_FINGER) { - input_mt_init_slots(input_dev, features->touch_max); + input_mt_init_slots(input_dev, features->touch_max, 0); input_set_abs_params(input_dev, ABS_MT_TOOL_TYPE, 0, MT_TOOL_MAX, 0, 0); input_set_abs_params(input_dev, ABS_MT_POSITION_X, @@ -1631,7 +1631,7 @@ int wacom_setup_input_capabilities(struct input_dev *input_dev, __set_bit(BTN_TOOL_FINGER, input_dev->keybit); __set_bit(BTN_TOOL_DOUBLETAP, input_dev->keybit); - input_mt_init_slots(input_dev, features->touch_max); + input_mt_init_slots(input_dev, features->touch_max, 0); if (features->pktlen == WACOM_PKGLEN_BBTOUCH3) { __set_bit(BTN_TOOL_TRIPLETAP, @@ -1848,7 +1848,10 @@ static const struct wacom_features wacom_features_0x2A = { "Wacom Intuos5 M", WACOM_PKGLEN_INTUOS, 44704, 27940, 2047, 63, INTUOS5, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES }; static const struct wacom_features wacom_features_0xF4 = - { "Wacom Cintiq 24HD", WACOM_PKGLEN_INTUOS, 104480, 65600, 2047, + { "Wacom Cintiq 24HD", WACOM_PKGLEN_INTUOS, 104480, 65600, 2047, + 63, WACOM_24HD, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES }; +static const struct wacom_features wacom_features_0xF8 = + { "Wacom Cintiq 24HD touch", WACOM_PKGLEN_INTUOS, 104480, 65600, 2047, 63, WACOM_24HD, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES }; static const struct wacom_features wacom_features_0x3F = { "Wacom Cintiq 21UX", WACOM_PKGLEN_INTUOS, 87200, 65600, 1023, @@ -2091,6 +2094,7 @@ const struct usb_device_id wacom_ids[] = { { USB_DEVICE_WACOM(0xEF) }, { USB_DEVICE_WACOM(0x47) }, { USB_DEVICE_WACOM(0xF4) }, + { USB_DEVICE_WACOM(0xF8) }, { USB_DEVICE_WACOM(0xFA) }, { USB_DEVICE_LENOVO(0x6004) }, { } diff --git a/trunk/drivers/input/touchscreen/atmel_mxt_ts.c b/trunk/drivers/input/touchscreen/atmel_mxt_ts.c index 4623cc69fc60..e92615d0b1b0 100644 --- a/trunk/drivers/input/touchscreen/atmel_mxt_ts.c +++ b/trunk/drivers/input/touchscreen/atmel_mxt_ts.c @@ -1152,7 +1152,7 @@ static int __devinit mxt_probe(struct i2c_client *client, /* For multi touch */ num_mt_slots = data->T9_reportid_max - data->T9_reportid_min + 1; - error = input_mt_init_slots(input_dev, num_mt_slots); + error = input_mt_init_slots(input_dev, num_mt_slots, 0); if (error) goto err_free_object; input_set_abs_params(input_dev, ABS_MT_TOUCH_MAJOR, diff --git a/trunk/drivers/input/touchscreen/cyttsp_core.c b/trunk/drivers/input/touchscreen/cyttsp_core.c index f030d9ec795d..8e60437ac85b 100644 --- a/trunk/drivers/input/touchscreen/cyttsp_core.c +++ b/trunk/drivers/input/touchscreen/cyttsp_core.c @@ -571,7 +571,7 @@ struct cyttsp *cyttsp_probe(const struct cyttsp_bus_ops *bus_ops, input_set_abs_params(input_dev, ABS_MT_TOUCH_MAJOR, 0, CY_MAXZ, 0, 0); - input_mt_init_slots(input_dev, CY_MAX_ID); + input_mt_init_slots(input_dev, CY_MAX_ID, 0); error = request_threaded_irq(ts->irq, NULL, cyttsp_irq, IRQF_TRIGGER_FALLING | IRQF_ONESHOT, diff --git a/trunk/drivers/input/touchscreen/edt-ft5x06.c b/trunk/drivers/input/touchscreen/edt-ft5x06.c index 9afc777a40a7..099d144ab7c9 100644 --- a/trunk/drivers/input/touchscreen/edt-ft5x06.c +++ b/trunk/drivers/input/touchscreen/edt-ft5x06.c @@ -566,9 +566,12 @@ static ssize_t edt_ft5x06_debugfs_raw_data_read(struct file *file, } read = min_t(size_t, count, tsdata->raw_bufsize - *off); - error = copy_to_user(buf, tsdata->raw_buffer + *off, read); - if (!error) - *off += read; + if (copy_to_user(buf, tsdata->raw_buffer + *off, read)) { + error = -EFAULT; + goto out; + } + + *off += read; out: mutex_unlock(&tsdata->mutex); return error ?: read; @@ -602,6 +605,7 @@ edt_ft5x06_ts_teardown_debugfs(struct edt_ft5x06_ts_data *tsdata) { if (tsdata->debug_dir) debugfs_remove_recursive(tsdata->debug_dir); + kfree(tsdata->raw_buffer); } #else @@ -778,7 +782,7 @@ static int __devinit edt_ft5x06_ts_probe(struct i2c_client *client, 0, tsdata->num_x * 64 - 1, 0, 0); input_set_abs_params(input, ABS_MT_POSITION_Y, 0, tsdata->num_y * 64 - 1, 0, 0); - error = input_mt_init_slots(input, MAX_SUPPORT_POINTS); + error = input_mt_init_slots(input, MAX_SUPPORT_POINTS, 0); if (error) { dev_err(&client->dev, "Unable to init MT slots.\n"); goto err_free_mem; @@ -843,7 +847,6 @@ static int __devexit edt_ft5x06_ts_remove(struct i2c_client *client) if (gpio_is_valid(pdata->reset_pin)) gpio_free(pdata->reset_pin); - kfree(tsdata->raw_buffer); kfree(tsdata); return 0; diff --git a/trunk/drivers/input/touchscreen/eeti_ts.c b/trunk/drivers/input/touchscreen/eeti_ts.c index 503c7096ed36..908407efc672 100644 --- a/trunk/drivers/input/touchscreen/eeti_ts.c +++ b/trunk/drivers/input/touchscreen/eeti_ts.c @@ -48,7 +48,7 @@ struct eeti_ts_priv { struct input_dev *input; struct work_struct work; struct mutex mutex; - int irq, irq_active_high; + int irq_gpio, irq, irq_active_high; }; #define EETI_TS_BITDEPTH (11) @@ -62,7 +62,7 @@ struct eeti_ts_priv { static inline int eeti_ts_irq_active(struct eeti_ts_priv *priv) { - return gpio_get_value(irq_to_gpio(priv->irq)) == priv->irq_active_high; + return gpio_get_value(priv->irq_gpio) == priv->irq_active_high; } static void eeti_ts_read(struct work_struct *work) @@ -157,7 +157,7 @@ static void eeti_ts_close(struct input_dev *dev) static int __devinit eeti_ts_probe(struct i2c_client *client, const struct i2c_device_id *idp) { - struct eeti_ts_platform_data *pdata; + struct eeti_ts_platform_data *pdata = client->dev.platform_data; struct eeti_ts_priv *priv; struct input_dev *input; unsigned int irq_flags; @@ -199,9 +199,12 @@ static int __devinit eeti_ts_probe(struct i2c_client *client, priv->client = client; priv->input = input; - priv->irq = client->irq; + priv->irq_gpio = pdata->irq_gpio; + priv->irq = gpio_to_irq(pdata->irq_gpio); - pdata = client->dev.platform_data; + err = gpio_request_one(pdata->irq_gpio, GPIOF_IN, client->name); + if (err < 0) + goto err1; if (pdata) priv->irq_active_high = pdata->irq_active_high; @@ -215,13 +218,13 @@ static int __devinit eeti_ts_probe(struct i2c_client *client, err = input_register_device(input); if (err) - goto err1; + goto err2; err = request_irq(priv->irq, eeti_ts_isr, irq_flags, client->name, priv); if (err) { dev_err(&client->dev, "Unable to request touchscreen IRQ.\n"); - goto err2; + goto err3; } /* @@ -233,9 +236,11 @@ static int __devinit eeti_ts_probe(struct i2c_client *client, device_init_wakeup(&client->dev, 0); return 0; -err2: +err3: input_unregister_device(input); input = NULL; /* so we dont try to free it below */ +err2: + gpio_free(pdata->irq_gpio); err1: input_free_device(input); kfree(priv); diff --git a/trunk/drivers/input/touchscreen/egalax_ts.c b/trunk/drivers/input/touchscreen/egalax_ts.c index 70524dd34f42..c1e3460f1195 100644 --- a/trunk/drivers/input/touchscreen/egalax_ts.c +++ b/trunk/drivers/input/touchscreen/egalax_ts.c @@ -204,7 +204,7 @@ static int __devinit egalax_ts_probe(struct i2c_client *client, ABS_MT_POSITION_X, 0, EGALAX_MAX_X, 0, 0); input_set_abs_params(input_dev, ABS_MT_POSITION_X, 0, EGALAX_MAX_Y, 0, 0); - input_mt_init_slots(input_dev, MAX_SUPPORT_POINTS); + input_mt_init_slots(input_dev, MAX_SUPPORT_POINTS, 0); input_set_drvdata(input_dev, ts); diff --git a/trunk/drivers/input/touchscreen/ili210x.c b/trunk/drivers/input/touchscreen/ili210x.c index c0044175a921..4ac69760ec08 100644 --- a/trunk/drivers/input/touchscreen/ili210x.c +++ b/trunk/drivers/input/touchscreen/ili210x.c @@ -252,7 +252,7 @@ static int __devinit ili210x_i2c_probe(struct i2c_client *client, input_set_abs_params(input, ABS_Y, 0, ymax, 0, 0); /* Multi touch */ - input_mt_init_slots(input, MAX_TOUCHES); + input_mt_init_slots(input, MAX_TOUCHES, 0); input_set_abs_params(input, ABS_MT_POSITION_X, 0, xmax, 0, 0); input_set_abs_params(input, ABS_MT_POSITION_Y, 0, ymax, 0, 0); diff --git a/trunk/drivers/input/touchscreen/mms114.c b/trunk/drivers/input/touchscreen/mms114.c index 49c44bbf548d..560cf09d1c5a 100644 --- a/trunk/drivers/input/touchscreen/mms114.c +++ b/trunk/drivers/input/touchscreen/mms114.c @@ -404,7 +404,7 @@ static int __devinit mms114_probe(struct i2c_client *client, input_set_abs_params(input_dev, ABS_Y, 0, data->pdata->y_size, 0, 0); /* For multi touch */ - input_mt_init_slots(input_dev, MMS114_MAX_TOUCH); + input_mt_init_slots(input_dev, MMS114_MAX_TOUCH, 0); input_set_abs_params(input_dev, ABS_MT_TOUCH_MAJOR, 0, MMS114_MAX_AREA, 0, 0); input_set_abs_params(input_dev, ABS_MT_POSITION_X, diff --git a/trunk/drivers/input/touchscreen/penmount.c b/trunk/drivers/input/touchscreen/penmount.c index 4ccde45b9da2..b49f0b836925 100644 --- a/trunk/drivers/input/touchscreen/penmount.c +++ b/trunk/drivers/input/touchscreen/penmount.c @@ -264,7 +264,7 @@ static int pm_connect(struct serio *serio, struct serio_driver *drv) input_set_abs_params(pm->dev, ABS_Y, 0, max_y, 0, 0); if (pm->maxcontacts > 1) { - input_mt_init_slots(pm->dev, pm->maxcontacts); + input_mt_init_slots(pm->dev, pm->maxcontacts, 0); input_set_abs_params(pm->dev, ABS_MT_POSITION_X, 0, max_x, 0, 0); input_set_abs_params(pm->dev, diff --git a/trunk/drivers/input/touchscreen/usbtouchscreen.c b/trunk/drivers/input/touchscreen/usbtouchscreen.c index e32709e0dd65..721fdb3597ca 100644 --- a/trunk/drivers/input/touchscreen/usbtouchscreen.c +++ b/trunk/drivers/input/touchscreen/usbtouchscreen.c @@ -304,6 +304,45 @@ static int e2i_read_data(struct usbtouch_usb *dev, unsigned char *pkt) #define EGALAX_PKT_TYPE_REPT 0x80 #define EGALAX_PKT_TYPE_DIAG 0x0A +static int egalax_init(struct usbtouch_usb *usbtouch) +{ + int ret, i; + unsigned char *buf; + struct usb_device *udev = interface_to_usbdev(usbtouch->interface); + + /* + * An eGalax diagnostic packet kicks the device into using the right + * protocol. We send a "check active" packet. The response will be + * read later and ignored. + */ + + buf = kmalloc(3, GFP_KERNEL); + if (!buf) + return -ENOMEM; + + buf[0] = EGALAX_PKT_TYPE_DIAG; + buf[1] = 1; /* length */ + buf[2] = 'A'; /* command - check active */ + + for (i = 0; i < 3; i++) { + ret = usb_control_msg(udev, usb_sndctrlpipe(udev, 0), + 0, + USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE, + 0, 0, buf, 3, + USB_CTRL_SET_TIMEOUT); + if (ret >= 0) { + ret = 0; + break; + } + if (ret != -EPIPE) + break; + } + + kfree(buf); + + return ret; +} + static int egalax_read_data(struct usbtouch_usb *dev, unsigned char *pkt) { if ((pkt[0] & EGALAX_PKT_TYPE_MASK) != EGALAX_PKT_TYPE_REPT) @@ -1056,6 +1095,7 @@ static struct usbtouch_device_info usbtouch_dev_info[] = { .process_pkt = usbtouch_process_multi, .get_pkt_len = egalax_get_pkt_len, .read_data = egalax_read_data, + .init = egalax_init, }, #endif diff --git a/trunk/drivers/input/touchscreen/wacom_w8001.c b/trunk/drivers/input/touchscreen/wacom_w8001.c index 8f9ad2f893b8..9a83be6b6584 100644 --- a/trunk/drivers/input/touchscreen/wacom_w8001.c +++ b/trunk/drivers/input/touchscreen/wacom_w8001.c @@ -471,7 +471,7 @@ static int w8001_setup(struct w8001 *w8001) case 5: w8001->pktlen = W8001_PKTLEN_TOUCH2FG; - input_mt_init_slots(dev, 2); + input_mt_init_slots(dev, 2, 0); input_set_abs_params(dev, ABS_MT_POSITION_X, 0, touch.x, 0, 0); input_set_abs_params(dev, ABS_MT_POSITION_Y, diff --git a/trunk/drivers/iommu/amd_iommu.c b/trunk/drivers/iommu/amd_iommu.c index 6d1cbdfc9b2a..e89daf1b21b4 100644 --- a/trunk/drivers/iommu/amd_iommu.c +++ b/trunk/drivers/iommu/amd_iommu.c @@ -266,7 +266,7 @@ static void swap_pci_ref(struct pci_dev **from, struct pci_dev *to) static int iommu_init_device(struct device *dev) { - struct pci_dev *dma_pdev, *pdev = to_pci_dev(dev); + struct pci_dev *dma_pdev = NULL, *pdev = to_pci_dev(dev); struct iommu_dev_data *dev_data; struct iommu_group *group; u16 alias; @@ -293,11 +293,18 @@ static int iommu_init_device(struct device *dev) dev_data->alias_data = alias_data; dma_pdev = pci_get_bus_and_slot(alias >> 8, alias & 0xff); - } else + } + + if (dma_pdev == NULL) dma_pdev = pci_dev_get(pdev); + /* Account for quirked devices */ swap_pci_ref(&dma_pdev, pci_get_dma_source(dma_pdev)); + /* + * If it's a multifunction device that does not support our + * required ACS flags, add to the same group as function 0. + */ if (dma_pdev->multifunction && !pci_acs_enabled(dma_pdev, REQ_ACS_FLAGS)) swap_pci_ref(&dma_pdev, @@ -305,14 +312,28 @@ static int iommu_init_device(struct device *dev) PCI_DEVFN(PCI_SLOT(dma_pdev->devfn), 0))); + /* + * Devices on the root bus go through the iommu. If that's not us, + * find the next upstream device and test ACS up to the root bus. + * Finding the next device may require skipping virtual buses. + */ while (!pci_is_root_bus(dma_pdev->bus)) { - if (pci_acs_path_enabled(dma_pdev->bus->self, - NULL, REQ_ACS_FLAGS)) + struct pci_bus *bus = dma_pdev->bus; + + while (!bus->self) { + if (!pci_is_root_bus(bus)) + bus = bus->parent; + else + goto root_bus; + } + + if (pci_acs_path_enabled(bus->self, NULL, REQ_ACS_FLAGS)) break; - swap_pci_ref(&dma_pdev, pci_dev_get(dma_pdev->bus->self)); + swap_pci_ref(&dma_pdev, pci_dev_get(bus->self)); } +root_bus: group = iommu_group_get(&dma_pdev->dev); pci_dev_put(dma_pdev); if (!group) { diff --git a/trunk/drivers/iommu/amd_iommu_init.c b/trunk/drivers/iommu/amd_iommu_init.c index 500e7f15f5c2..18a89b760aaa 100644 --- a/trunk/drivers/iommu/amd_iommu_init.c +++ b/trunk/drivers/iommu/amd_iommu_init.c @@ -1111,7 +1111,7 @@ static void print_iommu_info(void) if (iommu->cap & (1 << IOMMU_CAP_EFR)) { pr_info("AMD-Vi: Extended features: "); - for (i = 0; ARRAY_SIZE(feat_str); ++i) { + for (i = 0; i < ARRAY_SIZE(feat_str); ++i) { if (iommu_feature(iommu, (1ULL << i))) pr_cont(" %s", feat_str[i]); } @@ -1131,9 +1131,6 @@ static int __init amd_iommu_init_pci(void) break; } - /* Make sure ACS will be enabled */ - pci_request_acs(); - ret = amd_iommu_init_devices(); print_iommu_info(); @@ -1652,6 +1649,9 @@ static bool detect_ivrs(void) early_acpi_os_unmap_memory((char __iomem *)ivrs_base, ivrs_size); + /* Make sure ACS will be enabled during PCI probe */ + pci_request_acs(); + return true; } diff --git a/trunk/drivers/iommu/exynos-iommu.c b/trunk/drivers/iommu/exynos-iommu.c index 45350ff5e93c..80bad32aa463 100644 --- a/trunk/drivers/iommu/exynos-iommu.c +++ b/trunk/drivers/iommu/exynos-iommu.c @@ -732,9 +732,9 @@ static int exynos_iommu_domain_init(struct iommu_domain *domain) spin_lock_init(&priv->pgtablelock); INIT_LIST_HEAD(&priv->clients); - dom->geometry.aperture_start = 0; - dom->geometry.aperture_end = ~0UL; - dom->geometry.force_aperture = true; + domain->geometry.aperture_start = 0; + domain->geometry.aperture_end = ~0UL; + domain->geometry.force_aperture = true; domain->priv = priv; return 0; diff --git a/trunk/drivers/iommu/intel-iommu.c b/trunk/drivers/iommu/intel-iommu.c index 7469b5346643..2297ec193eb4 100644 --- a/trunk/drivers/iommu/intel-iommu.c +++ b/trunk/drivers/iommu/intel-iommu.c @@ -2008,6 +2008,7 @@ static struct dmar_domain *get_domain_for_dev(struct pci_dev *pdev, int gaw) if (!drhd) { printk(KERN_ERR "IOMMU: can't find DMAR for device %s\n", pci_name(pdev)); + free_domain_mem(domain); return NULL; } iommu = drhd->iommu; @@ -4124,8 +4125,13 @@ static int intel_iommu_add_device(struct device *dev) } else dma_pdev = pci_dev_get(pdev); + /* Account for quirked devices */ swap_pci_ref(&dma_pdev, pci_get_dma_source(dma_pdev)); + /* + * If it's a multifunction device that does not support our + * required ACS flags, add to the same group as function 0. + */ if (dma_pdev->multifunction && !pci_acs_enabled(dma_pdev, REQ_ACS_FLAGS)) swap_pci_ref(&dma_pdev, @@ -4133,14 +4139,28 @@ static int intel_iommu_add_device(struct device *dev) PCI_DEVFN(PCI_SLOT(dma_pdev->devfn), 0))); + /* + * Devices on the root bus go through the iommu. If that's not us, + * find the next upstream device and test ACS up to the root bus. + * Finding the next device may require skipping virtual buses. + */ while (!pci_is_root_bus(dma_pdev->bus)) { - if (pci_acs_path_enabled(dma_pdev->bus->self, - NULL, REQ_ACS_FLAGS)) + struct pci_bus *bus = dma_pdev->bus; + + while (!bus->self) { + if (!pci_is_root_bus(bus)) + bus = bus->parent; + else + goto root_bus; + } + + if (pci_acs_path_enabled(bus->self, NULL, REQ_ACS_FLAGS)) break; - swap_pci_ref(&dma_pdev, pci_dev_get(dma_pdev->bus->self)); + swap_pci_ref(&dma_pdev, pci_dev_get(bus->self)); } +root_bus: group = iommu_group_get(&dma_pdev->dev); pci_dev_put(dma_pdev); if (!group) { diff --git a/trunk/drivers/iommu/intel_irq_remapping.c b/trunk/drivers/iommu/intel_irq_remapping.c index e0b18f3ae9a8..af8904de1d44 100644 --- a/trunk/drivers/iommu/intel_irq_remapping.c +++ b/trunk/drivers/iommu/intel_irq_remapping.c @@ -736,6 +736,7 @@ int __init parse_ioapics_under_ir(void) { struct dmar_drhd_unit *drhd; int ir_supported = 0; + int ioapic_idx; for_each_drhd_unit(drhd) { struct intel_iommu *iommu = drhd->iommu; @@ -748,13 +749,20 @@ int __init parse_ioapics_under_ir(void) } } - if (ir_supported && ir_ioapic_num != nr_ioapics) { - printk(KERN_WARNING - "Not all IO-APIC's listed under remapping hardware\n"); - return -1; + if (!ir_supported) + return 0; + + for (ioapic_idx = 0; ioapic_idx < nr_ioapics; ioapic_idx++) { + int ioapic_id = mpc_ioapic_id(ioapic_idx); + if (!map_ioapic_to_ir(ioapic_id)) { + pr_err(FW_BUG "ioapic %d has no mapping iommu, " + "interrupt remapping will be disabled\n", + ioapic_id); + return -1; + } } - return ir_supported; + return 1; } int __init ir_dev_scope_init(void) diff --git a/trunk/drivers/iommu/tegra-smmu.c b/trunk/drivers/iommu/tegra-smmu.c index 4ba325ab6262..2a4bb36bc688 100644 --- a/trunk/drivers/iommu/tegra-smmu.c +++ b/trunk/drivers/iommu/tegra-smmu.c @@ -799,14 +799,14 @@ static void smmu_iommu_detach_dev(struct iommu_domain *domain, goto out; } } - dev_err(smmu->dev, "Couldn't find %s\n", dev_name(c->dev)); + dev_err(smmu->dev, "Couldn't find %s\n", dev_name(dev)); out: spin_unlock(&as->client_lock); } static int smmu_iommu_domain_init(struct iommu_domain *domain) { - int i, err = -ENODEV; + int i, err = -EAGAIN; unsigned long flags; struct smmu_as *as; struct smmu_device *smmu = smmu_handle; @@ -814,11 +814,14 @@ static int smmu_iommu_domain_init(struct iommu_domain *domain) /* Look for a free AS with lock held */ for (i = 0; i < smmu->num_as; i++) { as = &smmu->as[i]; - if (!as->pdir_page) { - err = alloc_pdir(as); - if (!err) - goto found; - } + + if (as->pdir_page) + continue; + + err = alloc_pdir(as); + if (!err) + goto found; + if (err != -EAGAIN) break; } diff --git a/trunk/drivers/isdn/hardware/mISDN/avmfritz.c b/trunk/drivers/isdn/hardware/mISDN/avmfritz.c index fa6ca4733725..dceaec821b0e 100644 --- a/trunk/drivers/isdn/hardware/mISDN/avmfritz.c +++ b/trunk/drivers/isdn/hardware/mISDN/avmfritz.c @@ -857,8 +857,9 @@ avm_bctrl(struct mISDNchannel *ch, u32 cmd, void *arg) switch (cmd) { case CLOSE_CHANNEL: test_and_clear_bit(FLG_OPEN, &bch->Flags); + cancel_work_sync(&bch->workq); spin_lock_irqsave(&fc->lock, flags); - mISDN_freebchannel(bch); + mISDN_clear_bchannel(bch); modehdlc(bch, ISDN_P_NONE); spin_unlock_irqrestore(&fc->lock, flags); ch->protocol = ISDN_P_NONE; diff --git a/trunk/drivers/isdn/hardware/mISDN/hfcmulti.c b/trunk/drivers/isdn/hardware/mISDN/hfcmulti.c index 5e402cf2e795..f02794203bb1 100644 --- a/trunk/drivers/isdn/hardware/mISDN/hfcmulti.c +++ b/trunk/drivers/isdn/hardware/mISDN/hfcmulti.c @@ -5059,6 +5059,7 @@ hfcmulti_init(struct hm_map *m, struct pci_dev *pdev, printk(KERN_INFO "HFC-E1 #%d has overlapping B-channels on fragment #%d\n", E1_cnt + 1, pt); + kfree(hc); return -EINVAL; } maskcheck |= hc->bmask[pt]; @@ -5086,6 +5087,7 @@ hfcmulti_init(struct hm_map *m, struct pci_dev *pdev, if ((poll >> 1) > sizeof(hc->silence_data)) { printk(KERN_ERR "HFCMULTI error: silence_data too small, " "please fix\n"); + kfree(hc); return -EINVAL; } for (i = 0; i < (poll >> 1); i++) diff --git a/trunk/drivers/isdn/hardware/mISDN/mISDNipac.c b/trunk/drivers/isdn/hardware/mISDN/mISDNipac.c index 752e0825591f..ccd7d851be26 100644 --- a/trunk/drivers/isdn/hardware/mISDN/mISDNipac.c +++ b/trunk/drivers/isdn/hardware/mISDN/mISDNipac.c @@ -1406,8 +1406,9 @@ hscx_bctrl(struct mISDNchannel *ch, u32 cmd, void *arg) switch (cmd) { case CLOSE_CHANNEL: test_and_clear_bit(FLG_OPEN, &bch->Flags); + cancel_work_sync(&bch->workq); spin_lock_irqsave(hx->ip->hwlock, flags); - mISDN_freebchannel(bch); + mISDN_clear_bchannel(bch); hscx_mode(hx, ISDN_P_NONE); spin_unlock_irqrestore(hx->ip->hwlock, flags); ch->protocol = ISDN_P_NONE; diff --git a/trunk/drivers/isdn/hardware/mISDN/mISDNisar.c b/trunk/drivers/isdn/hardware/mISDN/mISDNisar.c index be5973ded6d6..182ecf0626c2 100644 --- a/trunk/drivers/isdn/hardware/mISDN/mISDNisar.c +++ b/trunk/drivers/isdn/hardware/mISDN/mISDNisar.c @@ -1588,8 +1588,9 @@ isar_bctrl(struct mISDNchannel *ch, u32 cmd, void *arg) switch (cmd) { case CLOSE_CHANNEL: test_and_clear_bit(FLG_OPEN, &bch->Flags); + cancel_work_sync(&bch->workq); spin_lock_irqsave(ich->is->hwlock, flags); - mISDN_freebchannel(bch); + mISDN_clear_bchannel(bch); modeisar(ich, ISDN_P_NONE); spin_unlock_irqrestore(ich->is->hwlock, flags); ch->protocol = ISDN_P_NONE; diff --git a/trunk/drivers/isdn/hardware/mISDN/netjet.c b/trunk/drivers/isdn/hardware/mISDN/netjet.c index c3e3e7686273..9bcade59eb73 100644 --- a/trunk/drivers/isdn/hardware/mISDN/netjet.c +++ b/trunk/drivers/isdn/hardware/mISDN/netjet.c @@ -812,8 +812,9 @@ nj_bctrl(struct mISDNchannel *ch, u32 cmd, void *arg) switch (cmd) { case CLOSE_CHANNEL: test_and_clear_bit(FLG_OPEN, &bch->Flags); + cancel_work_sync(&bch->workq); spin_lock_irqsave(&card->lock, flags); - mISDN_freebchannel(bch); + mISDN_clear_bchannel(bch); mode_tiger(bc, ISDN_P_NONE); spin_unlock_irqrestore(&card->lock, flags); ch->protocol = ISDN_P_NONE; diff --git a/trunk/drivers/isdn/hardware/mISDN/w6692.c b/trunk/drivers/isdn/hardware/mISDN/w6692.c index 26a86b846099..335fe6455002 100644 --- a/trunk/drivers/isdn/hardware/mISDN/w6692.c +++ b/trunk/drivers/isdn/hardware/mISDN/w6692.c @@ -1054,8 +1054,9 @@ w6692_bctrl(struct mISDNchannel *ch, u32 cmd, void *arg) switch (cmd) { case CLOSE_CHANNEL: test_and_clear_bit(FLG_OPEN, &bch->Flags); + cancel_work_sync(&bch->workq); spin_lock_irqsave(&card->lock, flags); - mISDN_freebchannel(bch); + mISDN_clear_bchannel(bch); w6692_mode(bc, ISDN_P_NONE); spin_unlock_irqrestore(&card->lock, flags); ch->protocol = ISDN_P_NONE; diff --git a/trunk/drivers/isdn/isdnloop/isdnloop.c b/trunk/drivers/isdn/isdnloop/isdnloop.c index 5405ec644db3..baf2686aa8eb 100644 --- a/trunk/drivers/isdn/isdnloop/isdnloop.c +++ b/trunk/drivers/isdn/isdnloop/isdnloop.c @@ -16,7 +16,6 @@ #include #include "isdnloop.h" -static char *revision = "$Revision: 1.11.6.7 $"; static char *isdnloop_id = "loop0"; MODULE_DESCRIPTION("ISDN4Linux: Pseudo Driver that simulates an ISDN card"); @@ -1494,17 +1493,6 @@ isdnloop_addcard(char *id1) static int __init isdnloop_init(void) { - char *p; - char rev[10]; - - if ((p = strchr(revision, ':'))) { - strcpy(rev, p + 1); - p = strchr(rev, '$'); - *p = 0; - } else - strcpy(rev, " ??? "); - printk(KERN_NOTICE "isdnloop-ISDN-driver Rev%s\n", rev); - if (isdnloop_id) return (isdnloop_addcard(isdnloop_id)); diff --git a/trunk/drivers/isdn/mISDN/hwchannel.c b/trunk/drivers/isdn/mISDN/hwchannel.c index ef34fd40867c..2602be23f341 100644 --- a/trunk/drivers/isdn/mISDN/hwchannel.c +++ b/trunk/drivers/isdn/mISDN/hwchannel.c @@ -148,17 +148,16 @@ mISDN_clear_bchannel(struct bchannel *ch) ch->next_minlen = ch->init_minlen; ch->maxlen = ch->init_maxlen; ch->next_maxlen = ch->init_maxlen; + skb_queue_purge(&ch->rqueue); + ch->rcount = 0; } EXPORT_SYMBOL(mISDN_clear_bchannel); -int +void mISDN_freebchannel(struct bchannel *ch) { + cancel_work_sync(&ch->workq); mISDN_clear_bchannel(ch); - skb_queue_purge(&ch->rqueue); - ch->rcount = 0; - flush_work_sync(&ch->workq); - return 0; } EXPORT_SYMBOL(mISDN_freebchannel); diff --git a/trunk/drivers/isdn/mISDN/layer2.c b/trunk/drivers/isdn/mISDN/layer2.c index 0dc8abca1407..949cabb88f1c 100644 --- a/trunk/drivers/isdn/mISDN/layer2.c +++ b/trunk/drivers/isdn/mISDN/layer2.c @@ -2222,7 +2222,7 @@ create_l2(struct mISDNchannel *ch, u_int protocol, u_long options, int tei, InitWin(l2); l2->l2m.fsm = &l2fsm; if (test_bit(FLG_LAPB, &l2->flag) || - test_bit(FLG_PTP, &l2->flag) || + test_bit(FLG_FIXED_TEI, &l2->flag) || test_bit(FLG_LAPD_NET, &l2->flag)) l2->l2m.state = ST_L2_4; else diff --git a/trunk/drivers/leds/led-triggers.c b/trunk/drivers/leds/led-triggers.c index 6157cbbf4113..363975b3c925 100644 --- a/trunk/drivers/leds/led-triggers.c +++ b/trunk/drivers/leds/led-triggers.c @@ -224,7 +224,7 @@ void led_trigger_event(struct led_trigger *trig, struct led_classdev *led_cdev; led_cdev = list_entry(entry, struct led_classdev, trig_list); - led_set_brightness(led_cdev, brightness); + __led_set_brightness(led_cdev, brightness); } read_unlock(&trig->leddev_list_lock); } diff --git a/trunk/drivers/leds/leds-lp8788.c b/trunk/drivers/leds/leds-lp8788.c index 53bd136f1ef0..0ade6ebfc914 100644 --- a/trunk/drivers/leds/leds-lp8788.c +++ b/trunk/drivers/leds/leds-lp8788.c @@ -63,7 +63,7 @@ static int lp8788_led_init_device(struct lp8788_led *led, /* scale configuration */ addr = LP8788_ISINK_CTRL; mask = 1 << (cfg->num + LP8788_ISINK_SCALE_OFFSET); - val = cfg->scale << cfg->num; + val = cfg->scale << (cfg->num + LP8788_ISINK_SCALE_OFFSET); ret = lp8788_update_bits(led->lp, addr, mask, val); if (ret) return ret; diff --git a/trunk/drivers/leds/leds-renesas-tpu.c b/trunk/drivers/leds/leds-renesas-tpu.c index 9ee12c28059a..771ea067e680 100644 --- a/trunk/drivers/leds/leds-renesas-tpu.c +++ b/trunk/drivers/leds/leds-renesas-tpu.c @@ -247,7 +247,7 @@ static int __devinit r_tpu_probe(struct platform_device *pdev) if (!cfg) { dev_err(&pdev->dev, "missing platform data\n"); - goto err0; + return -ENODEV; } p = devm_kzalloc(&pdev->dev, sizeof(*p), GFP_KERNEL); diff --git a/trunk/drivers/lguest/x86/core.c b/trunk/drivers/lguest/x86/core.c index 39809035320a..4af12e1844d5 100644 --- a/trunk/drivers/lguest/x86/core.c +++ b/trunk/drivers/lguest/x86/core.c @@ -203,8 +203,8 @@ void lguest_arch_run_guest(struct lg_cpu *cpu) * we set it now, so we can trap and pass that trap to the Guest if it * uses the FPU. */ - if (cpu->ts) - unlazy_fpu(current); + if (cpu->ts && user_has_fpu()) + stts(); /* * SYSENTER is an optimized way of doing system calls. We can't allow @@ -234,6 +234,10 @@ void lguest_arch_run_guest(struct lg_cpu *cpu) if (boot_cpu_has(X86_FEATURE_SEP)) wrmsr(MSR_IA32_SYSENTER_CS, __KERNEL_CS, 0); + /* Clear the host TS bit if it was set above. */ + if (cpu->ts && user_has_fpu()) + clts(); + /* * If the Guest page faulted, then the cr2 register will tell us the * bad virtual address. We have to grab this now, because once we @@ -249,7 +253,7 @@ void lguest_arch_run_guest(struct lg_cpu *cpu) * a different CPU. So all the critical stuff should be done * before this. */ - else if (cpu->regs->trapnum == 7) + else if (cpu->regs->trapnum == 7 && !user_has_fpu()) math_state_restore(); } diff --git a/trunk/drivers/md/dm-mpath.c b/trunk/drivers/md/dm-mpath.c index d8abb90a6c2f..034233eefc82 100644 --- a/trunk/drivers/md/dm-mpath.c +++ b/trunk/drivers/md/dm-mpath.c @@ -1555,6 +1555,7 @@ static int multipath_ioctl(struct dm_target *ti, unsigned int cmd, unsigned long arg) { struct multipath *m = ti->private; + struct pgpath *pgpath; struct block_device *bdev; fmode_t mode; unsigned long flags; @@ -1570,12 +1571,14 @@ static int multipath_ioctl(struct dm_target *ti, unsigned int cmd, if (!m->current_pgpath) __choose_pgpath(m, 0); - if (m->current_pgpath) { - bdev = m->current_pgpath->path.dev->bdev; - mode = m->current_pgpath->path.dev->mode; + pgpath = m->current_pgpath; + + if (pgpath) { + bdev = pgpath->path.dev->bdev; + mode = pgpath->path.dev->mode; } - if (m->queue_io) + if ((pgpath && m->queue_io) || (!pgpath && m->queue_if_no_path)) r = -EAGAIN; else if (!bdev) r = -EIO; diff --git a/trunk/drivers/md/dm-table.c b/trunk/drivers/md/dm-table.c index f90069029aae..100368eb7991 100644 --- a/trunk/drivers/md/dm-table.c +++ b/trunk/drivers/md/dm-table.c @@ -1212,6 +1212,41 @@ struct dm_target *dm_table_find_target(struct dm_table *t, sector_t sector) return &t->targets[(KEYS_PER_NODE * n) + k]; } +static int count_device(struct dm_target *ti, struct dm_dev *dev, + sector_t start, sector_t len, void *data) +{ + unsigned *num_devices = data; + + (*num_devices)++; + + return 0; +} + +/* + * Check whether a table has no data devices attached using each + * target's iterate_devices method. + * Returns false if the result is unknown because a target doesn't + * support iterate_devices. + */ +bool dm_table_has_no_data_devices(struct dm_table *table) +{ + struct dm_target *uninitialized_var(ti); + unsigned i = 0, num_devices = 0; + + while (i < dm_table_get_num_targets(table)) { + ti = dm_table_get_target(table, i++); + + if (!ti->type->iterate_devices) + return false; + + ti->type->iterate_devices(ti, count_device, &num_devices); + if (num_devices) + return false; + } + + return true; +} + /* * Establish the new table's queue_limits and validate them. */ @@ -1354,17 +1389,25 @@ static int device_is_nonrot(struct dm_target *ti, struct dm_dev *dev, return q && blk_queue_nonrot(q); } -static bool dm_table_is_nonrot(struct dm_table *t) +static int device_is_not_random(struct dm_target *ti, struct dm_dev *dev, + sector_t start, sector_t len, void *data) +{ + struct request_queue *q = bdev_get_queue(dev->bdev); + + return q && !blk_queue_add_random(q); +} + +static bool dm_table_all_devices_attribute(struct dm_table *t, + iterate_devices_callout_fn func) { struct dm_target *ti; unsigned i = 0; - /* Ensure that all underlying device are non-rotational. */ while (i < dm_table_get_num_targets(t)) { ti = dm_table_get_target(t, i++); if (!ti->type->iterate_devices || - !ti->type->iterate_devices(ti, device_is_nonrot, NULL)) + !ti->type->iterate_devices(ti, func, NULL)) return 0; } @@ -1396,13 +1439,23 @@ void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q, if (!dm_table_discard_zeroes_data(t)) q->limits.discard_zeroes_data = 0; - if (dm_table_is_nonrot(t)) + /* Ensure that all underlying devices are non-rotational. */ + if (dm_table_all_devices_attribute(t, device_is_nonrot)) queue_flag_set_unlocked(QUEUE_FLAG_NONROT, q); else queue_flag_clear_unlocked(QUEUE_FLAG_NONROT, q); dm_table_set_integrity(t); + /* + * Determine whether or not this queue's I/O timings contribute + * to the entropy pool, Only request-based targets use this. + * Clear QUEUE_FLAG_ADD_RANDOM if any underlying device does not + * have it set. + */ + if (blk_queue_add_random(q) && dm_table_all_devices_attribute(t, device_is_not_random)) + queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, q); + /* * QUEUE_FLAG_STACKABLE must be set after all queue settings are * visible to other CPUs because, once the flag is set, incoming bios diff --git a/trunk/drivers/md/dm-thin.c b/trunk/drivers/md/dm-thin.c index af1fc3b2c2ad..c29410af1e22 100644 --- a/trunk/drivers/md/dm-thin.c +++ b/trunk/drivers/md/dm-thin.c @@ -509,9 +509,9 @@ enum pool_mode { struct pool_features { enum pool_mode mode; - unsigned zero_new_blocks:1; - unsigned discard_enabled:1; - unsigned discard_passdown:1; + bool zero_new_blocks:1; + bool discard_enabled:1; + bool discard_passdown:1; }; struct thin_c; @@ -580,7 +580,8 @@ struct pool_c { struct dm_target_callbacks callbacks; dm_block_t low_water_blocks; - struct pool_features pf; + struct pool_features requested_pf; /* Features requested during table load */ + struct pool_features adjusted_pf; /* Features used after adjusting for constituent devices */ }; /* @@ -1839,6 +1840,47 @@ static void __requeue_bios(struct pool *pool) /*---------------------------------------------------------------- * Binding of control targets to a pool object *--------------------------------------------------------------*/ +static bool data_dev_supports_discard(struct pool_c *pt) +{ + struct request_queue *q = bdev_get_queue(pt->data_dev->bdev); + + return q && blk_queue_discard(q); +} + +/* + * If discard_passdown was enabled verify that the data device + * supports discards. Disable discard_passdown if not. + */ +static void disable_passdown_if_not_supported(struct pool_c *pt) +{ + struct pool *pool = pt->pool; + struct block_device *data_bdev = pt->data_dev->bdev; + struct queue_limits *data_limits = &bdev_get_queue(data_bdev)->limits; + sector_t block_size = pool->sectors_per_block << SECTOR_SHIFT; + const char *reason = NULL; + char buf[BDEVNAME_SIZE]; + + if (!pt->adjusted_pf.discard_passdown) + return; + + if (!data_dev_supports_discard(pt)) + reason = "discard unsupported"; + + else if (data_limits->max_discard_sectors < pool->sectors_per_block) + reason = "max discard sectors smaller than a block"; + + else if (data_limits->discard_granularity > block_size) + reason = "discard granularity larger than a block"; + + else if (block_size & (data_limits->discard_granularity - 1)) + reason = "discard granularity not a factor of block size"; + + if (reason) { + DMWARN("Data device (%s) %s: Disabling discard passdown.", bdevname(data_bdev, buf), reason); + pt->adjusted_pf.discard_passdown = false; + } +} + static int bind_control_target(struct pool *pool, struct dm_target *ti) { struct pool_c *pt = ti->private; @@ -1847,31 +1889,16 @@ static int bind_control_target(struct pool *pool, struct dm_target *ti) * We want to make sure that degraded pools are never upgraded. */ enum pool_mode old_mode = pool->pf.mode; - enum pool_mode new_mode = pt->pf.mode; + enum pool_mode new_mode = pt->adjusted_pf.mode; if (old_mode > new_mode) new_mode = old_mode; pool->ti = ti; pool->low_water_blocks = pt->low_water_blocks; - pool->pf = pt->pf; - set_pool_mode(pool, new_mode); + pool->pf = pt->adjusted_pf; - /* - * If discard_passdown was enabled verify that the data device - * supports discards. Disable discard_passdown if not; otherwise - * -EOPNOTSUPP will be returned. - */ - /* FIXME: pull this out into a sep fn. */ - if (pt->pf.discard_passdown) { - struct request_queue *q = bdev_get_queue(pt->data_dev->bdev); - if (!q || !blk_queue_discard(q)) { - char buf[BDEVNAME_SIZE]; - DMWARN("Discard unsupported by data device (%s): Disabling discard passdown.", - bdevname(pt->data_dev->bdev, buf)); - pool->pf.discard_passdown = 0; - } - } + set_pool_mode(pool, new_mode); return 0; } @@ -1889,9 +1916,9 @@ static void unbind_control_target(struct pool *pool, struct dm_target *ti) static void pool_features_init(struct pool_features *pf) { pf->mode = PM_WRITE; - pf->zero_new_blocks = 1; - pf->discard_enabled = 1; - pf->discard_passdown = 1; + pf->zero_new_blocks = true; + pf->discard_enabled = true; + pf->discard_passdown = true; } static void __pool_destroy(struct pool *pool) @@ -2119,13 +2146,13 @@ static int parse_pool_features(struct dm_arg_set *as, struct pool_features *pf, argc--; if (!strcasecmp(arg_name, "skip_block_zeroing")) - pf->zero_new_blocks = 0; + pf->zero_new_blocks = false; else if (!strcasecmp(arg_name, "ignore_discard")) - pf->discard_enabled = 0; + pf->discard_enabled = false; else if (!strcasecmp(arg_name, "no_discard_passdown")) - pf->discard_passdown = 0; + pf->discard_passdown = false; else if (!strcasecmp(arg_name, "read_only")) pf->mode = PM_READ_ONLY; @@ -2259,8 +2286,9 @@ static int pool_ctr(struct dm_target *ti, unsigned argc, char **argv) pt->metadata_dev = metadata_dev; pt->data_dev = data_dev; pt->low_water_blocks = low_water_blocks; - pt->pf = pf; + pt->adjusted_pf = pt->requested_pf = pf; ti->num_flush_requests = 1; + /* * Only need to enable discards if the pool should pass * them down to the data device. The thin device's discard @@ -2268,12 +2296,14 @@ static int pool_ctr(struct dm_target *ti, unsigned argc, char **argv) */ if (pf.discard_enabled && pf.discard_passdown) { ti->num_discard_requests = 1; + /* * Setting 'discards_supported' circumvents the normal * stacking of discard limits (this keeps the pool and * thin devices' discard limits consistent). */ ti->discards_supported = true; + ti->discard_zeroes_data_unsupported = true; } ti->private = pt; @@ -2703,7 +2733,7 @@ static int pool_status(struct dm_target *ti, status_type_t type, format_dev_t(buf2, pt->data_dev->bdev->bd_dev), (unsigned long)pool->sectors_per_block, (unsigned long long)pt->low_water_blocks); - emit_flags(&pt->pf, result, sz, maxlen); + emit_flags(&pt->requested_pf, result, sz, maxlen); break; } @@ -2732,20 +2762,21 @@ static int pool_merge(struct dm_target *ti, struct bvec_merge_data *bvm, return min(max_size, q->merge_bvec_fn(q, bvm, biovec)); } -static void set_discard_limits(struct pool *pool, struct queue_limits *limits) +static void set_discard_limits(struct pool_c *pt, struct queue_limits *limits) { - /* - * FIXME: these limits may be incompatible with the pool's data device - */ + struct pool *pool = pt->pool; + struct queue_limits *data_limits; + limits->max_discard_sectors = pool->sectors_per_block; /* - * This is just a hint, and not enforced. We have to cope with - * bios that cover a block partially. A discard that spans a block - * boundary is not sent to this target. + * discard_granularity is just a hint, and not enforced. */ - limits->discard_granularity = pool->sectors_per_block << SECTOR_SHIFT; - limits->discard_zeroes_data = pool->pf.zero_new_blocks; + if (pt->adjusted_pf.discard_passdown) { + data_limits = &bdev_get_queue(pt->data_dev->bdev)->limits; + limits->discard_granularity = data_limits->discard_granularity; + } else + limits->discard_granularity = pool->sectors_per_block << SECTOR_SHIFT; } static void pool_io_hints(struct dm_target *ti, struct queue_limits *limits) @@ -2755,15 +2786,25 @@ static void pool_io_hints(struct dm_target *ti, struct queue_limits *limits) blk_limits_io_min(limits, 0); blk_limits_io_opt(limits, pool->sectors_per_block << SECTOR_SHIFT); - if (pool->pf.discard_enabled) - set_discard_limits(pool, limits); + + /* + * pt->adjusted_pf is a staging area for the actual features to use. + * They get transferred to the live pool in bind_control_target() + * called from pool_preresume(). + */ + if (!pt->adjusted_pf.discard_enabled) + return; + + disable_passdown_if_not_supported(pt); + + set_discard_limits(pt, limits); } static struct target_type pool_target = { .name = "thin-pool", .features = DM_TARGET_SINGLETON | DM_TARGET_ALWAYS_WRITEABLE | DM_TARGET_IMMUTABLE, - .version = {1, 3, 0}, + .version = {1, 4, 0}, .module = THIS_MODULE, .ctr = pool_ctr, .dtr = pool_dtr, @@ -3042,19 +3083,19 @@ static int thin_iterate_devices(struct dm_target *ti, return 0; } +/* + * A thin device always inherits its queue limits from its pool. + */ static void thin_io_hints(struct dm_target *ti, struct queue_limits *limits) { struct thin_c *tc = ti->private; - struct pool *pool = tc->pool; - blk_limits_io_min(limits, 0); - blk_limits_io_opt(limits, pool->sectors_per_block << SECTOR_SHIFT); - set_discard_limits(pool, limits); + *limits = bdev_get_queue(tc->pool_dev->bdev)->limits; } static struct target_type thin_target = { .name = "thin", - .version = {1, 3, 0}, + .version = {1, 4, 0}, .module = THIS_MODULE, .ctr = thin_ctr, .dtr = thin_dtr, diff --git a/trunk/drivers/md/dm-verity.c b/trunk/drivers/md/dm-verity.c index 254d19268ad2..892ae2766aa6 100644 --- a/trunk/drivers/md/dm-verity.c +++ b/trunk/drivers/md/dm-verity.c @@ -718,8 +718,8 @@ static int verity_ctr(struct dm_target *ti, unsigned argc, char **argv) v->hash_dev_block_bits = ffs(num) - 1; if (sscanf(argv[5], "%llu%c", &num_ll, &dummy) != 1 || - num_ll << (v->data_dev_block_bits - SECTOR_SHIFT) != - (sector_t)num_ll << (v->data_dev_block_bits - SECTOR_SHIFT)) { + (sector_t)(num_ll << (v->data_dev_block_bits - SECTOR_SHIFT)) + >> (v->data_dev_block_bits - SECTOR_SHIFT) != num_ll) { ti->error = "Invalid data blocks"; r = -EINVAL; goto bad; @@ -733,8 +733,8 @@ static int verity_ctr(struct dm_target *ti, unsigned argc, char **argv) } if (sscanf(argv[6], "%llu%c", &num_ll, &dummy) != 1 || - num_ll << (v->hash_dev_block_bits - SECTOR_SHIFT) != - (sector_t)num_ll << (v->hash_dev_block_bits - SECTOR_SHIFT)) { + (sector_t)(num_ll << (v->hash_dev_block_bits - SECTOR_SHIFT)) + >> (v->hash_dev_block_bits - SECTOR_SHIFT) != num_ll) { ti->error = "Invalid hash start"; r = -EINVAL; goto bad; diff --git a/trunk/drivers/md/dm.c b/trunk/drivers/md/dm.c index 4e09b6ff5b49..67ffa391edcf 100644 --- a/trunk/drivers/md/dm.c +++ b/trunk/drivers/md/dm.c @@ -865,10 +865,14 @@ static void dm_done(struct request *clone, int error, bool mapped) { int r = error; struct dm_rq_target_io *tio = clone->end_io_data; - dm_request_endio_fn rq_end_io = tio->ti->type->rq_end_io; + dm_request_endio_fn rq_end_io = NULL; - if (mapped && rq_end_io) - r = rq_end_io(tio->ti, clone, error, &tio->info); + if (tio->ti) { + rq_end_io = tio->ti->type->rq_end_io; + + if (mapped && rq_end_io) + r = rq_end_io(tio->ti, clone, error, &tio->info); + } if (r <= 0) /* The target wants to complete the I/O */ @@ -1588,15 +1592,6 @@ static int map_request(struct dm_target *ti, struct request *clone, int r, requeued = 0; struct dm_rq_target_io *tio = clone->end_io_data; - /* - * Hold the md reference here for the in-flight I/O. - * We can't rely on the reference count by device opener, - * because the device may be closed during the request completion - * when all bios are completed. - * See the comment in rq_completed() too. - */ - dm_get(md); - tio->ti = ti; r = ti->type->map_rq(ti, clone, &tio->info); switch (r) { @@ -1628,6 +1623,26 @@ static int map_request(struct dm_target *ti, struct request *clone, return requeued; } +static struct request *dm_start_request(struct mapped_device *md, struct request *orig) +{ + struct request *clone; + + blk_start_request(orig); + clone = orig->special; + atomic_inc(&md->pending[rq_data_dir(clone)]); + + /* + * Hold the md reference here for the in-flight I/O. + * We can't rely on the reference count by device opener, + * because the device may be closed during the request completion + * when all bios are completed. + * See the comment in rq_completed() too. + */ + dm_get(md); + + return clone; +} + /* * q->request_fn for request-based dm. * Called with the queue lock held. @@ -1657,14 +1672,21 @@ static void dm_request_fn(struct request_queue *q) pos = blk_rq_pos(rq); ti = dm_table_find_target(map, pos); - BUG_ON(!dm_target_is_valid(ti)); + if (!dm_target_is_valid(ti)) { + /* + * Must perform setup, that dm_done() requires, + * before calling dm_kill_unmapped_request + */ + DMERR_LIMIT("request attempted access beyond the end of device"); + clone = dm_start_request(md, rq); + dm_kill_unmapped_request(clone, -EIO); + continue; + } if (ti->type->busy && ti->type->busy(ti)) goto delay_and_out; - blk_start_request(rq); - clone = rq->special; - atomic_inc(&md->pending[rq_data_dir(clone)]); + clone = dm_start_request(md, rq); spin_unlock(q->queue_lock); if (map_request(ti, clone, md)) @@ -1684,8 +1706,6 @@ static void dm_request_fn(struct request_queue *q) blk_delay_queue(q, HZ / 10); out: dm_table_put(map); - - return; } int dm_underlying_device_busy(struct request_queue *q) @@ -2409,7 +2429,7 @@ static void dm_queue_flush(struct mapped_device *md) */ struct dm_table *dm_swap_table(struct mapped_device *md, struct dm_table *table) { - struct dm_table *map = ERR_PTR(-EINVAL); + struct dm_table *live_map, *map = ERR_PTR(-EINVAL); struct queue_limits limits; int r; @@ -2419,6 +2439,19 @@ struct dm_table *dm_swap_table(struct mapped_device *md, struct dm_table *table) if (!dm_suspended_md(md)) goto out; + /* + * If the new table has no data devices, retain the existing limits. + * This helps multipath with queue_if_no_path if all paths disappear, + * then new I/O is queued based on these limits, and then some paths + * reappear. + */ + if (dm_table_has_no_data_devices(table)) { + live_map = dm_get_live_table(md); + if (live_map) + limits = md->queue->limits; + dm_table_put(live_map); + } + r = dm_calculate_queue_limits(table, &limits); if (r) { map = ERR_PTR(r); diff --git a/trunk/drivers/md/dm.h b/trunk/drivers/md/dm.h index 52eef493d266..6a99fefaa743 100644 --- a/trunk/drivers/md/dm.h +++ b/trunk/drivers/md/dm.h @@ -54,6 +54,7 @@ void dm_table_event_callback(struct dm_table *t, void (*fn)(void *), void *context); struct dm_target *dm_table_get_target(struct dm_table *t, unsigned int index); struct dm_target *dm_table_find_target(struct dm_table *t, sector_t sector); +bool dm_table_has_no_data_devices(struct dm_table *table); int dm_calculate_queue_limits(struct dm_table *table, struct queue_limits *limits); void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q, diff --git a/trunk/drivers/md/md.c b/trunk/drivers/md/md.c index fcd098794d37..308e87b417e0 100644 --- a/trunk/drivers/md/md.c +++ b/trunk/drivers/md/md.c @@ -1108,8 +1108,11 @@ static int super_90_load(struct md_rdev *rdev, struct md_rdev *refdev, int minor ret = 0; } rdev->sectors = rdev->sb_start; - /* Limit to 4TB as metadata cannot record more than that */ - if (rdev->sectors >= (2ULL << 32)) + /* Limit to 4TB as metadata cannot record more than that. + * (not needed for Linear and RAID0 as metadata doesn't + * record this size) + */ + if (rdev->sectors >= (2ULL << 32) && sb->level >= 1) rdev->sectors = (2ULL << 32) - 2; if (rdev->sectors < ((sector_t)sb->size) * 2 && sb->level >= 1) @@ -1400,7 +1403,7 @@ super_90_rdev_size_change(struct md_rdev *rdev, sector_t num_sectors) /* Limit to 4TB as metadata cannot record more than that. * 4TB == 2^32 KB, or 2*2^32 sectors. */ - if (num_sectors >= (2ULL << 32)) + if (num_sectors >= (2ULL << 32) && rdev->mddev->level >= 1) num_sectors = (2ULL << 32) - 2; md_super_write(rdev->mddev, rdev, rdev->sb_start, rdev->sb_size, rdev->sb_page); @@ -7616,6 +7619,8 @@ static int remove_and_add_spares(struct mddev *mddev) } } } + if (removed) + set_bit(MD_CHANGE_DEVS, &mddev->flags); return spares; } @@ -7629,9 +7634,11 @@ static void reap_sync_thread(struct mddev *mddev) !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) { /* success...*/ /* activate any spares */ - if (mddev->pers->spare_active(mddev)) + if (mddev->pers->spare_active(mddev)) { sysfs_notify(&mddev->kobj, NULL, "degraded"); + set_bit(MD_CHANGE_DEVS, &mddev->flags); + } } if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) && mddev->pers->finish_reshape) diff --git a/trunk/drivers/md/raid10.c b/trunk/drivers/md/raid10.c index de5ed6fd8806..0138a727c1f3 100644 --- a/trunk/drivers/md/raid10.c +++ b/trunk/drivers/md/raid10.c @@ -659,7 +659,11 @@ static int raid10_mergeable_bvec(struct request_queue *q, max = biovec->bv_len; if (mddev->merge_check_needed) { - struct r10bio r10_bio; + struct { + struct r10bio r10_bio; + struct r10dev devs[conf->copies]; + } on_stack; + struct r10bio *r10_bio = &on_stack.r10_bio; int s; if (conf->reshape_progress != MaxSector) { /* Cannot give any guidance during reshape */ @@ -667,18 +671,18 @@ static int raid10_mergeable_bvec(struct request_queue *q, return biovec->bv_len; return 0; } - r10_bio.sector = sector; - raid10_find_phys(conf, &r10_bio); + r10_bio->sector = sector; + raid10_find_phys(conf, r10_bio); rcu_read_lock(); for (s = 0; s < conf->copies; s++) { - int disk = r10_bio.devs[s].devnum; + int disk = r10_bio->devs[s].devnum; struct md_rdev *rdev = rcu_dereference( conf->mirrors[disk].rdev); if (rdev && !test_bit(Faulty, &rdev->flags)) { struct request_queue *q = bdev_get_queue(rdev->bdev); if (q->merge_bvec_fn) { - bvm->bi_sector = r10_bio.devs[s].addr + bvm->bi_sector = r10_bio->devs[s].addr + rdev->data_offset; bvm->bi_bdev = rdev->bdev; max = min(max, q->merge_bvec_fn( @@ -690,7 +694,7 @@ static int raid10_mergeable_bvec(struct request_queue *q, struct request_queue *q = bdev_get_queue(rdev->bdev); if (q->merge_bvec_fn) { - bvm->bi_sector = r10_bio.devs[s].addr + bvm->bi_sector = r10_bio->devs[s].addr + rdev->data_offset; bvm->bi_bdev = rdev->bdev; max = min(max, q->merge_bvec_fn( @@ -1508,14 +1512,16 @@ static int _enough(struct r10conf *conf, struct geom *geo, int ignore) do { int n = conf->copies; int cnt = 0; + int this = first; while (n--) { - if (conf->mirrors[first].rdev && - first != ignore) + if (conf->mirrors[this].rdev && + this != ignore) cnt++; - first = (first+1) % geo->raid_disks; + this = (this+1) % geo->raid_disks; } if (cnt == 0) return 0; + first = (first + geo->near_copies) % geo->raid_disks; } while (first != 0); return 1; } @@ -4414,14 +4420,18 @@ static int handle_reshape_read_error(struct mddev *mddev, { /* Use sync reads to get the blocks from somewhere else */ int sectors = r10_bio->sectors; - struct r10bio r10b; struct r10conf *conf = mddev->private; + struct { + struct r10bio r10_bio; + struct r10dev devs[conf->copies]; + } on_stack; + struct r10bio *r10b = &on_stack.r10_bio; int slot = 0; int idx = 0; struct bio_vec *bvec = r10_bio->master_bio->bi_io_vec; - r10b.sector = r10_bio->sector; - __raid10_find_phys(&conf->prev, &r10b); + r10b->sector = r10_bio->sector; + __raid10_find_phys(&conf->prev, r10b); while (sectors) { int s = sectors; @@ -4432,7 +4442,7 @@ static int handle_reshape_read_error(struct mddev *mddev, s = PAGE_SIZE >> 9; while (!success) { - int d = r10b.devs[slot].devnum; + int d = r10b->devs[slot].devnum; struct md_rdev *rdev = conf->mirrors[d].rdev; sector_t addr; if (rdev == NULL || @@ -4440,7 +4450,7 @@ static int handle_reshape_read_error(struct mddev *mddev, !test_bit(In_sync, &rdev->flags)) goto failed; - addr = r10b.devs[slot].addr + idx * PAGE_SIZE; + addr = r10b->devs[slot].addr + idx * PAGE_SIZE; success = sync_page_io(rdev, addr, s << 9, diff --git a/trunk/drivers/md/raid10.h b/trunk/drivers/md/raid10.h index 007c2c68dd83..1054cf602345 100644 --- a/trunk/drivers/md/raid10.h +++ b/trunk/drivers/md/raid10.h @@ -110,7 +110,7 @@ struct r10bio { * We choose the number when they are allocated. * We sometimes need an extra bio to write to the replacement. */ - struct { + struct r10dev { struct bio *bio; union { struct bio *repl_bio; /* used for resync and diff --git a/trunk/drivers/md/raid5.c b/trunk/drivers/md/raid5.c index adda94df5eb2..0689173fd9f5 100644 --- a/trunk/drivers/md/raid5.c +++ b/trunk/drivers/md/raid5.c @@ -393,6 +393,8 @@ static int calc_degraded(struct r5conf *conf) degraded = 0; for (i = 0; i < conf->previous_raid_disks; i++) { struct md_rdev *rdev = rcu_dereference(conf->disks[i].rdev); + if (rdev && test_bit(Faulty, &rdev->flags)) + rdev = rcu_dereference(conf->disks[i].replacement); if (!rdev || test_bit(Faulty, &rdev->flags)) degraded++; else if (test_bit(In_sync, &rdev->flags)) @@ -417,6 +419,8 @@ static int calc_degraded(struct r5conf *conf) degraded2 = 0; for (i = 0; i < conf->raid_disks; i++) { struct md_rdev *rdev = rcu_dereference(conf->disks[i].rdev); + if (rdev && test_bit(Faulty, &rdev->flags)) + rdev = rcu_dereference(conf->disks[i].replacement); if (!rdev || test_bit(Faulty, &rdev->flags)) degraded2++; else if (test_bit(In_sync, &rdev->flags)) @@ -1587,6 +1591,7 @@ static int resize_stripes(struct r5conf *conf, int newsize) #ifdef CONFIG_MULTICORE_RAID456 init_waitqueue_head(&nsh->ops.wait_for_ops); #endif + spin_lock_init(&nsh->stripe_lock); list_add(&nsh->lru, &newstripes); } @@ -4192,7 +4197,7 @@ static void make_request(struct mddev *mddev, struct bio * bi) finish_wait(&conf->wait_for_overlap, &w); set_bit(STRIPE_HANDLE, &sh->state); clear_bit(STRIPE_DELAYED, &sh->state); - if ((bi->bi_rw & REQ_NOIDLE) && + if ((bi->bi_rw & REQ_SYNC) && !test_and_set_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) atomic_inc(&conf->preread_active_stripes); release_stripe_plug(mddev, sh); diff --git a/trunk/drivers/media/dvb/siano/smsusb.c b/trunk/drivers/media/dvb/siano/smsusb.c index 664e460f247b..aac622200e99 100644 --- a/trunk/drivers/media/dvb/siano/smsusb.c +++ b/trunk/drivers/media/dvb/siano/smsusb.c @@ -481,7 +481,7 @@ static int smsusb_resume(struct usb_interface *intf) return 0; } -static const struct usb_device_id smsusb_id_table[] __devinitconst = { +static const struct usb_device_id smsusb_id_table[] = { { USB_DEVICE(0x187f, 0x0010), .driver_info = SMS1XXX_BOARD_SIANO_STELLAR }, { USB_DEVICE(0x187f, 0x0100), diff --git a/trunk/drivers/media/radio/radio-shark.c b/trunk/drivers/media/radio/radio-shark.c index d0b6bb507634..72ded29728bb 100644 --- a/trunk/drivers/media/radio/radio-shark.c +++ b/trunk/drivers/media/radio/radio-shark.c @@ -35,6 +35,11 @@ #include #include +#if defined(CONFIG_LEDS_CLASS) || \ + (defined(CONFIG_LEDS_CLASS_MODULE) && defined(CONFIG_RADIO_SHARK_MODULE)) +#define SHARK_USE_LEDS 1 +#endif + /* * Version Information */ @@ -56,44 +61,18 @@ MODULE_LICENSE("GPL"); enum { BLUE_LED, BLUE_PULSE_LED, RED_LED, NO_LEDS }; -static void shark_led_set_blue(struct led_classdev *led_cdev, - enum led_brightness value); -static void shark_led_set_blue_pulse(struct led_classdev *led_cdev, - enum led_brightness value); -static void shark_led_set_red(struct led_classdev *led_cdev, - enum led_brightness value); - -static const struct led_classdev shark_led_templates[NO_LEDS] = { - [BLUE_LED] = { - .name = "%s:blue:", - .brightness = LED_OFF, - .max_brightness = 127, - .brightness_set = shark_led_set_blue, - }, - [BLUE_PULSE_LED] = { - .name = "%s:blue-pulse:", - .brightness = LED_OFF, - .max_brightness = 255, - .brightness_set = shark_led_set_blue_pulse, - }, - [RED_LED] = { - .name = "%s:red:", - .brightness = LED_OFF, - .max_brightness = 1, - .brightness_set = shark_led_set_red, - }, -}; - struct shark_device { struct usb_device *usbdev; struct v4l2_device v4l2_dev; struct snd_tea575x tea; +#ifdef SHARK_USE_LEDS struct work_struct led_work; struct led_classdev leds[NO_LEDS]; char led_names[NO_LEDS][32]; atomic_t brightness[NO_LEDS]; unsigned long brightness_new; +#endif u8 *transfer_buffer; u32 last_val; @@ -175,20 +154,13 @@ static struct snd_tea575x_ops shark_tea_ops = { .read_val = shark_read_val, }; +#ifdef SHARK_USE_LEDS static void shark_led_work(struct work_struct *work) { struct shark_device *shark = container_of(work, struct shark_device, led_work); int i, res, brightness, actual_len; - /* - * We use the v4l2_dev lock and registered bit to ensure the device - * does not get unplugged and unreffed while we're running. - */ - mutex_lock(&shark->tea.mutex); - if (!video_is_registered(&shark->tea.vd)) - goto leave; - for (i = 0; i < 3; i++) { if (!test_and_clear_bit(i, &shark->brightness_new)) continue; @@ -208,8 +180,6 @@ static void shark_led_work(struct work_struct *work) v4l2_err(&shark->v4l2_dev, "set LED %s error: %d\n", shark->led_names[i], res); } -leave: - mutex_unlock(&shark->tea.mutex); } static void shark_led_set_blue(struct led_classdev *led_cdev, @@ -245,19 +215,78 @@ static void shark_led_set_red(struct led_classdev *led_cdev, schedule_work(&shark->led_work); } +static const struct led_classdev shark_led_templates[NO_LEDS] = { + [BLUE_LED] = { + .name = "%s:blue:", + .brightness = LED_OFF, + .max_brightness = 127, + .brightness_set = shark_led_set_blue, + }, + [BLUE_PULSE_LED] = { + .name = "%s:blue-pulse:", + .brightness = LED_OFF, + .max_brightness = 255, + .brightness_set = shark_led_set_blue_pulse, + }, + [RED_LED] = { + .name = "%s:red:", + .brightness = LED_OFF, + .max_brightness = 1, + .brightness_set = shark_led_set_red, + }, +}; + +static int shark_register_leds(struct shark_device *shark, struct device *dev) +{ + int i, retval; + + INIT_WORK(&shark->led_work, shark_led_work); + for (i = 0; i < NO_LEDS; i++) { + shark->leds[i] = shark_led_templates[i]; + snprintf(shark->led_names[i], sizeof(shark->led_names[0]), + shark->leds[i].name, shark->v4l2_dev.name); + shark->leds[i].name = shark->led_names[i]; + retval = led_classdev_register(dev, &shark->leds[i]); + if (retval) { + v4l2_err(&shark->v4l2_dev, + "couldn't register led: %s\n", + shark->led_names[i]); + return retval; + } + } + return 0; +} + +static void shark_unregister_leds(struct shark_device *shark) +{ + int i; + + for (i = 0; i < NO_LEDS; i++) + led_classdev_unregister(&shark->leds[i]); + + cancel_work_sync(&shark->led_work); +} +#else +static int shark_register_leds(struct shark_device *shark, struct device *dev) +{ + v4l2_warn(&shark->v4l2_dev, + "CONFIG_LED_CLASS not enabled, LED support disabled\n"); + return 0; +} +static inline void shark_unregister_leds(struct shark_device *shark) { } +#endif + static void usb_shark_disconnect(struct usb_interface *intf) { struct v4l2_device *v4l2_dev = usb_get_intfdata(intf); struct shark_device *shark = v4l2_dev_to_shark(v4l2_dev); - int i; mutex_lock(&shark->tea.mutex); v4l2_device_disconnect(&shark->v4l2_dev); snd_tea575x_exit(&shark->tea); mutex_unlock(&shark->tea.mutex); - for (i = 0; i < NO_LEDS; i++) - led_classdev_unregister(&shark->leds[i]); + shark_unregister_leds(shark); v4l2_device_put(&shark->v4l2_dev); } @@ -266,7 +295,6 @@ static void usb_shark_release(struct v4l2_device *v4l2_dev) { struct shark_device *shark = v4l2_dev_to_shark(v4l2_dev); - cancel_work_sync(&shark->led_work); v4l2_device_unregister(&shark->v4l2_dev); kfree(shark->transfer_buffer); kfree(shark); @@ -276,7 +304,7 @@ static int usb_shark_probe(struct usb_interface *intf, const struct usb_device_id *id) { struct shark_device *shark; - int i, retval = -ENOMEM; + int retval = -ENOMEM; shark = kzalloc(sizeof(struct shark_device), GFP_KERNEL); if (!shark) @@ -286,17 +314,13 @@ static int usb_shark_probe(struct usb_interface *intf, if (!shark->transfer_buffer) goto err_alloc_buffer; - /* - * Work around a bug in usbhid/hid-core.c, where it leaves a dangling - * pointer in intfdata causing v4l2-device.c to not set it. Which - * results in usb_shark_disconnect() referencing the dangling pointer - * - * REMOVE (as soon as the above bug is fixed, patch submitted) - */ - usb_set_intfdata(intf, NULL); + v4l2_device_set_name(&shark->v4l2_dev, DRV_NAME, &shark_instance); + + retval = shark_register_leds(shark, &intf->dev); + if (retval) + goto err_reg_leds; shark->v4l2_dev.release = usb_shark_release; - v4l2_device_set_name(&shark->v4l2_dev, DRV_NAME, &shark_instance); retval = v4l2_device_register(&intf->dev, &shark->v4l2_dev); if (retval) { v4l2_err(&shark->v4l2_dev, "couldn't register v4l2_device\n"); @@ -320,32 +344,13 @@ static int usb_shark_probe(struct usb_interface *intf, goto err_init_tea; } - INIT_WORK(&shark->led_work, shark_led_work); - for (i = 0; i < NO_LEDS; i++) { - shark->leds[i] = shark_led_templates[i]; - snprintf(shark->led_names[i], sizeof(shark->led_names[0]), - shark->leds[i].name, shark->v4l2_dev.name); - shark->leds[i].name = shark->led_names[i]; - /* - * We don't fail the probe if we fail to register the leds, - * because once we've called snd_tea575x_init, the /dev/radio0 - * node may be opened from userspace holding a reference to us! - * - * Note we cannot register the leds first instead as - * shark_led_work depends on the v4l2 mutex and registered bit. - */ - retval = led_classdev_register(&intf->dev, &shark->leds[i]); - if (retval) - v4l2_err(&shark->v4l2_dev, - "couldn't register led: %s\n", - shark->led_names[i]); - } - return 0; err_init_tea: v4l2_device_unregister(&shark->v4l2_dev); err_reg_dev: + shark_unregister_leds(shark); +err_reg_leds: kfree(shark->transfer_buffer); err_alloc_buffer: kfree(shark); diff --git a/trunk/drivers/media/radio/radio-shark2.c b/trunk/drivers/media/radio/radio-shark2.c index b9575de3e7e8..7b4efdfaae28 100644 --- a/trunk/drivers/media/radio/radio-shark2.c +++ b/trunk/drivers/media/radio/radio-shark2.c @@ -35,6 +35,11 @@ #include #include "radio-tea5777.h" +#if defined(CONFIG_LEDS_CLASS) || \ + (defined(CONFIG_LEDS_CLASS_MODULE) && defined(CONFIG_RADIO_SHARK2_MODULE)) +#define SHARK_USE_LEDS 1 +#endif + MODULE_AUTHOR("Hans de Goede "); MODULE_DESCRIPTION("Griffin radioSHARK2, USB radio receiver driver"); MODULE_LICENSE("GPL"); @@ -43,7 +48,6 @@ static int debug; module_param(debug, int, 0); MODULE_PARM_DESC(debug, "Debug level (0-1)"); - #define SHARK_IN_EP 0x83 #define SHARK_OUT_EP 0x05 @@ -54,36 +58,18 @@ MODULE_PARM_DESC(debug, "Debug level (0-1)"); enum { BLUE_LED, RED_LED, NO_LEDS }; -static void shark_led_set_blue(struct led_classdev *led_cdev, - enum led_brightness value); -static void shark_led_set_red(struct led_classdev *led_cdev, - enum led_brightness value); - -static const struct led_classdev shark_led_templates[NO_LEDS] = { - [BLUE_LED] = { - .name = "%s:blue:", - .brightness = LED_OFF, - .max_brightness = 127, - .brightness_set = shark_led_set_blue, - }, - [RED_LED] = { - .name = "%s:red:", - .brightness = LED_OFF, - .max_brightness = 1, - .brightness_set = shark_led_set_red, - }, -}; - struct shark_device { struct usb_device *usbdev; struct v4l2_device v4l2_dev; struct radio_tea5777 tea; +#ifdef SHARK_USE_LEDS struct work_struct led_work; struct led_classdev leds[NO_LEDS]; char led_names[NO_LEDS][32]; atomic_t brightness[NO_LEDS]; unsigned long brightness_new; +#endif u8 *transfer_buffer; }; @@ -161,18 +147,12 @@ static struct radio_tea5777_ops shark_tea_ops = { .read_reg = shark_read_reg, }; +#ifdef SHARK_USE_LEDS static void shark_led_work(struct work_struct *work) { struct shark_device *shark = container_of(work, struct shark_device, led_work); int i, res, brightness, actual_len; - /* - * We use the v4l2_dev lock and registered bit to ensure the device - * does not get unplugged and unreffed while we're running. - */ - mutex_lock(&shark->tea.mutex); - if (!video_is_registered(&shark->tea.vd)) - goto leave; for (i = 0; i < 2; i++) { if (!test_and_clear_bit(i, &shark->brightness_new)) @@ -191,8 +171,6 @@ static void shark_led_work(struct work_struct *work) v4l2_err(&shark->v4l2_dev, "set LED %s error: %d\n", shark->led_names[i], res); } -leave: - mutex_unlock(&shark->tea.mutex); } static void shark_led_set_blue(struct led_classdev *led_cdev, @@ -217,19 +195,72 @@ static void shark_led_set_red(struct led_classdev *led_cdev, schedule_work(&shark->led_work); } +static const struct led_classdev shark_led_templates[NO_LEDS] = { + [BLUE_LED] = { + .name = "%s:blue:", + .brightness = LED_OFF, + .max_brightness = 127, + .brightness_set = shark_led_set_blue, + }, + [RED_LED] = { + .name = "%s:red:", + .brightness = LED_OFF, + .max_brightness = 1, + .brightness_set = shark_led_set_red, + }, +}; + +static int shark_register_leds(struct shark_device *shark, struct device *dev) +{ + int i, retval; + + INIT_WORK(&shark->led_work, shark_led_work); + for (i = 0; i < NO_LEDS; i++) { + shark->leds[i] = shark_led_templates[i]; + snprintf(shark->led_names[i], sizeof(shark->led_names[0]), + shark->leds[i].name, shark->v4l2_dev.name); + shark->leds[i].name = shark->led_names[i]; + retval = led_classdev_register(dev, &shark->leds[i]); + if (retval) { + v4l2_err(&shark->v4l2_dev, + "couldn't register led: %s\n", + shark->led_names[i]); + return retval; + } + } + return 0; +} + +static void shark_unregister_leds(struct shark_device *shark) +{ + int i; + + for (i = 0; i < NO_LEDS; i++) + led_classdev_unregister(&shark->leds[i]); + + cancel_work_sync(&shark->led_work); +} +#else +static int shark_register_leds(struct shark_device *shark, struct device *dev) +{ + v4l2_warn(&shark->v4l2_dev, + "CONFIG_LED_CLASS not enabled, LED support disabled\n"); + return 0; +} +static inline void shark_unregister_leds(struct shark_device *shark) { } +#endif + static void usb_shark_disconnect(struct usb_interface *intf) { struct v4l2_device *v4l2_dev = usb_get_intfdata(intf); struct shark_device *shark = v4l2_dev_to_shark(v4l2_dev); - int i; mutex_lock(&shark->tea.mutex); v4l2_device_disconnect(&shark->v4l2_dev); radio_tea5777_exit(&shark->tea); mutex_unlock(&shark->tea.mutex); - for (i = 0; i < NO_LEDS; i++) - led_classdev_unregister(&shark->leds[i]); + shark_unregister_leds(shark); v4l2_device_put(&shark->v4l2_dev); } @@ -238,7 +269,6 @@ static void usb_shark_release(struct v4l2_device *v4l2_dev) { struct shark_device *shark = v4l2_dev_to_shark(v4l2_dev); - cancel_work_sync(&shark->led_work); v4l2_device_unregister(&shark->v4l2_dev); kfree(shark->transfer_buffer); kfree(shark); @@ -248,7 +278,7 @@ static int usb_shark_probe(struct usb_interface *intf, const struct usb_device_id *id) { struct shark_device *shark; - int i, retval = -ENOMEM; + int retval = -ENOMEM; shark = kzalloc(sizeof(struct shark_device), GFP_KERNEL); if (!shark) @@ -258,17 +288,13 @@ static int usb_shark_probe(struct usb_interface *intf, if (!shark->transfer_buffer) goto err_alloc_buffer; - /* - * Work around a bug in usbhid/hid-core.c, where it leaves a dangling - * pointer in intfdata causing v4l2-device.c to not set it. Which - * results in usb_shark_disconnect() referencing the dangling pointer - * - * REMOVE (as soon as the above bug is fixed, patch submitted) - */ - usb_set_intfdata(intf, NULL); + v4l2_device_set_name(&shark->v4l2_dev, DRV_NAME, &shark_instance); + + retval = shark_register_leds(shark, &intf->dev); + if (retval) + goto err_reg_leds; shark->v4l2_dev.release = usb_shark_release; - v4l2_device_set_name(&shark->v4l2_dev, DRV_NAME, &shark_instance); retval = v4l2_device_register(&intf->dev, &shark->v4l2_dev); if (retval) { v4l2_err(&shark->v4l2_dev, "couldn't register v4l2_device\n"); @@ -292,32 +318,13 @@ static int usb_shark_probe(struct usb_interface *intf, goto err_init_tea; } - INIT_WORK(&shark->led_work, shark_led_work); - for (i = 0; i < NO_LEDS; i++) { - shark->leds[i] = shark_led_templates[i]; - snprintf(shark->led_names[i], sizeof(shark->led_names[0]), - shark->leds[i].name, shark->v4l2_dev.name); - shark->leds[i].name = shark->led_names[i]; - /* - * We don't fail the probe if we fail to register the leds, - * because once we've called radio_tea5777_init, the /dev/radio0 - * node may be opened from userspace holding a reference to us! - * - * Note we cannot register the leds first instead as - * shark_led_work depends on the v4l2 mutex and registered bit. - */ - retval = led_classdev_register(&intf->dev, &shark->leds[i]); - if (retval) - v4l2_err(&shark->v4l2_dev, - "couldn't register led: %s\n", - shark->led_names[i]); - } - return 0; err_init_tea: v4l2_device_unregister(&shark->v4l2_dev); err_reg_dev: + shark_unregister_leds(shark); +err_reg_leds: kfree(shark->transfer_buffer); err_alloc_buffer: kfree(shark); diff --git a/trunk/drivers/media/radio/si470x/radio-si470x-common.c b/trunk/drivers/media/radio/si470x/radio-si470x-common.c index 9e38132afec6..9bb65e170d99 100644 --- a/trunk/drivers/media/radio/si470x/radio-si470x-common.c +++ b/trunk/drivers/media/radio/si470x/radio-si470x-common.c @@ -151,6 +151,7 @@ static const struct v4l2_frequency_band bands[] = { .index = 0, .capability = V4L2_TUNER_CAP_LOW | V4L2_TUNER_CAP_STEREO | V4L2_TUNER_CAP_RDS | V4L2_TUNER_CAP_RDS_BLOCK_IO | + V4L2_TUNER_CAP_FREQ_BANDS | V4L2_TUNER_CAP_HWSEEK_BOUNDED | V4L2_TUNER_CAP_HWSEEK_WRAP, .rangelow = 87500 * 16, @@ -162,6 +163,7 @@ static const struct v4l2_frequency_band bands[] = { .index = 1, .capability = V4L2_TUNER_CAP_LOW | V4L2_TUNER_CAP_STEREO | V4L2_TUNER_CAP_RDS | V4L2_TUNER_CAP_RDS_BLOCK_IO | + V4L2_TUNER_CAP_FREQ_BANDS | V4L2_TUNER_CAP_HWSEEK_BOUNDED | V4L2_TUNER_CAP_HWSEEK_WRAP, .rangelow = 76000 * 16, @@ -173,6 +175,7 @@ static const struct v4l2_frequency_band bands[] = { .index = 2, .capability = V4L2_TUNER_CAP_LOW | V4L2_TUNER_CAP_STEREO | V4L2_TUNER_CAP_RDS | V4L2_TUNER_CAP_RDS_BLOCK_IO | + V4L2_TUNER_CAP_FREQ_BANDS | V4L2_TUNER_CAP_HWSEEK_BOUNDED | V4L2_TUNER_CAP_HWSEEK_WRAP, .rangelow = 76000 * 16, diff --git a/trunk/drivers/media/radio/si470x/radio-si470x-i2c.c b/trunk/drivers/media/radio/si470x/radio-si470x-i2c.c index 643a6ff7c5d0..f867f04cccc9 100644 --- a/trunk/drivers/media/radio/si470x/radio-si470x-i2c.c +++ b/trunk/drivers/media/radio/si470x/radio-si470x-i2c.c @@ -225,8 +225,9 @@ int si470x_vidioc_querycap(struct file *file, void *priv, { strlcpy(capability->driver, DRIVER_NAME, sizeof(capability->driver)); strlcpy(capability->card, DRIVER_CARD, sizeof(capability->card)); - capability->capabilities = V4L2_CAP_HW_FREQ_SEEK | - V4L2_CAP_TUNER | V4L2_CAP_RADIO; + capability->device_caps = V4L2_CAP_HW_FREQ_SEEK | V4L2_CAP_READWRITE | + V4L2_CAP_TUNER | V4L2_CAP_RADIO | V4L2_CAP_RDS_CAPTURE; + capability->capabilities = capability->device_caps | V4L2_CAP_DEVICE_CAPS; return 0; } diff --git a/trunk/drivers/media/radio/si470x/radio-si470x-usb.c b/trunk/drivers/media/radio/si470x/radio-si470x-usb.c index 146be4263ea1..be076f7181e7 100644 --- a/trunk/drivers/media/radio/si470x/radio-si470x-usb.c +++ b/trunk/drivers/media/radio/si470x/radio-si470x-usb.c @@ -531,7 +531,7 @@ int si470x_vidioc_querycap(struct file *file, void *priv, strlcpy(capability->card, DRIVER_CARD, sizeof(capability->card)); usb_make_path(radio->usbdev, capability->bus_info, sizeof(capability->bus_info)); - capability->device_caps = V4L2_CAP_HW_FREQ_SEEK | + capability->device_caps = V4L2_CAP_HW_FREQ_SEEK | V4L2_CAP_READWRITE | V4L2_CAP_TUNER | V4L2_CAP_RADIO | V4L2_CAP_RDS_CAPTURE; capability->capabilities = capability->device_caps | V4L2_CAP_DEVICE_CAPS; return 0; diff --git a/trunk/drivers/media/rc/Kconfig b/trunk/drivers/media/rc/Kconfig index 5180390be7ab..8be57634ba60 100644 --- a/trunk/drivers/media/rc/Kconfig +++ b/trunk/drivers/media/rc/Kconfig @@ -261,6 +261,7 @@ config IR_WINBOND_CIR config IR_IGUANA tristate "IguanaWorks USB IR Transceiver" + depends on USB_ARCH_HAS_HCD depends on RC_CORE select USB ---help--- diff --git a/trunk/drivers/media/video/gspca/jl2005bcd.c b/trunk/drivers/media/video/gspca/jl2005bcd.c index cf9d9fca5b84..234777116e5f 100644 --- a/trunk/drivers/media/video/gspca/jl2005bcd.c +++ b/trunk/drivers/media/video/gspca/jl2005bcd.c @@ -512,7 +512,7 @@ static const struct sd_desc sd_desc = { }; /* -- module initialisation -- */ -static const __devinitdata struct usb_device_id device_table[] = { +static const struct usb_device_id device_table[] = { {USB_DEVICE(0x0979, 0x0227)}, {} }; diff --git a/trunk/drivers/media/video/gspca/spca506.c b/trunk/drivers/media/video/gspca/spca506.c index 969bb5a4cd93..bab01c86c315 100644 --- a/trunk/drivers/media/video/gspca/spca506.c +++ b/trunk/drivers/media/video/gspca/spca506.c @@ -579,7 +579,7 @@ static const struct sd_desc sd_desc = { }; /* -- module initialisation -- */ -static const struct usb_device_id device_table[] __devinitconst = { +static const struct usb_device_id device_table[] = { {USB_DEVICE(0x06e1, 0xa190)}, /*fixme: may be IntelPCCameraPro BRIDGE_SPCA505 {USB_DEVICE(0x0733, 0x0430)}, */ diff --git a/trunk/drivers/media/video/mem2mem_testdev.c b/trunk/drivers/media/video/mem2mem_testdev.c index 7efe9ad7acc7..0b91a5cd38eb 100644 --- a/trunk/drivers/media/video/mem2mem_testdev.c +++ b/trunk/drivers/media/video/mem2mem_testdev.c @@ -431,7 +431,7 @@ static int vidioc_querycap(struct file *file, void *priv, strncpy(cap->driver, MEM2MEM_NAME, sizeof(cap->driver) - 1); strncpy(cap->card, MEM2MEM_NAME, sizeof(cap->card) - 1); strlcpy(cap->bus_info, MEM2MEM_NAME, sizeof(cap->bus_info)); - cap->capabilities = V4L2_CAP_VIDEO_M2M | V4L2_CAP_STREAMING; + cap->device_caps = V4L2_CAP_VIDEO_M2M | V4L2_CAP_STREAMING; cap->capabilities = cap->device_caps | V4L2_CAP_DEVICE_CAPS; return 0; } diff --git a/trunk/drivers/media/video/mx1_camera.c b/trunk/drivers/media/video/mx1_camera.c index d2e6f82ecfac..560a65aa7038 100644 --- a/trunk/drivers/media/video/mx1_camera.c +++ b/trunk/drivers/media/video/mx1_camera.c @@ -403,7 +403,7 @@ static void mx1_camera_activate(struct mx1_camera_dev *pcdev) dev_dbg(pcdev->icd->parent, "Activate device\n"); - clk_enable(pcdev->clk); + clk_prepare_enable(pcdev->clk); /* enable CSI before doing anything else */ __raw_writel(csicr1, pcdev->base + CSICR1); @@ -422,7 +422,7 @@ static void mx1_camera_deactivate(struct mx1_camera_dev *pcdev) /* Disable all CSI interface */ __raw_writel(0x00, pcdev->base + CSICR1); - clk_disable(pcdev->clk); + clk_disable_unprepare(pcdev->clk); } /* diff --git a/trunk/drivers/media/video/mx2_camera.c b/trunk/drivers/media/video/mx2_camera.c index 637bde8aca28..ac175406e582 100644 --- a/trunk/drivers/media/video/mx2_camera.c +++ b/trunk/drivers/media/video/mx2_camera.c @@ -272,7 +272,7 @@ struct mx2_camera_dev { struct device *dev; struct soc_camera_host soc_host; struct soc_camera_device *icd; - struct clk *clk_csi, *clk_emma; + struct clk *clk_csi, *clk_emma_ahb, *clk_emma_ipg; unsigned int irq_csi, irq_emma; void __iomem *base_csi, *base_emma; @@ -407,7 +407,7 @@ static void mx2_camera_deactivate(struct mx2_camera_dev *pcdev) { unsigned long flags; - clk_disable(pcdev->clk_csi); + clk_disable_unprepare(pcdev->clk_csi); writel(0, pcdev->base_csi + CSICR1); if (cpu_is_mx27()) { writel(0, pcdev->base_emma + PRP_CNTL); @@ -435,7 +435,7 @@ static int mx2_camera_add_device(struct soc_camera_device *icd) if (pcdev->icd) return -EBUSY; - ret = clk_enable(pcdev->clk_csi); + ret = clk_prepare_enable(pcdev->clk_csi); if (ret < 0) return ret; @@ -1633,23 +1633,34 @@ static int __devinit mx27_camera_emma_init(struct mx2_camera_dev *pcdev) goto exit_iounmap; } - pcdev->clk_emma = clk_get(NULL, "emma"); - if (IS_ERR(pcdev->clk_emma)) { - err = PTR_ERR(pcdev->clk_emma); + pcdev->clk_emma_ipg = clk_get(pcdev->dev, "emma-ipg"); + if (IS_ERR(pcdev->clk_emma_ipg)) { + err = PTR_ERR(pcdev->clk_emma_ipg); goto exit_free_irq; } - clk_enable(pcdev->clk_emma); + clk_prepare_enable(pcdev->clk_emma_ipg); + + pcdev->clk_emma_ahb = clk_get(pcdev->dev, "emma-ahb"); + if (IS_ERR(pcdev->clk_emma_ahb)) { + err = PTR_ERR(pcdev->clk_emma_ahb); + goto exit_clk_emma_ipg_put; + } + + clk_prepare_enable(pcdev->clk_emma_ahb); err = mx27_camera_emma_prp_reset(pcdev); if (err) - goto exit_clk_emma_put; + goto exit_clk_emma_ahb_put; return err; -exit_clk_emma_put: - clk_disable(pcdev->clk_emma); - clk_put(pcdev->clk_emma); +exit_clk_emma_ahb_put: + clk_disable_unprepare(pcdev->clk_emma_ahb); + clk_put(pcdev->clk_emma_ahb); +exit_clk_emma_ipg_put: + clk_disable_unprepare(pcdev->clk_emma_ipg); + clk_put(pcdev->clk_emma_ipg); exit_free_irq: free_irq(pcdev->irq_emma, pcdev); exit_iounmap: @@ -1685,7 +1696,7 @@ static int __devinit mx2_camera_probe(struct platform_device *pdev) goto exit; } - pcdev->clk_csi = clk_get(&pdev->dev, NULL); + pcdev->clk_csi = clk_get(&pdev->dev, "ahb"); if (IS_ERR(pcdev->clk_csi)) { dev_err(&pdev->dev, "Could not get csi clock\n"); err = PTR_ERR(pcdev->clk_csi); @@ -1785,8 +1796,10 @@ static int __devinit mx2_camera_probe(struct platform_device *pdev) eallocctx: if (cpu_is_mx27()) { free_irq(pcdev->irq_emma, pcdev); - clk_disable(pcdev->clk_emma); - clk_put(pcdev->clk_emma); + clk_disable_unprepare(pcdev->clk_emma_ipg); + clk_put(pcdev->clk_emma_ipg); + clk_disable_unprepare(pcdev->clk_emma_ahb); + clk_put(pcdev->clk_emma_ahb); iounmap(pcdev->base_emma); release_mem_region(pcdev->res_emma->start, resource_size(pcdev->res_emma)); } @@ -1825,8 +1838,10 @@ static int __devexit mx2_camera_remove(struct platform_device *pdev) iounmap(pcdev->base_csi); if (cpu_is_mx27()) { - clk_disable(pcdev->clk_emma); - clk_put(pcdev->clk_emma); + clk_disable_unprepare(pcdev->clk_emma_ipg); + clk_put(pcdev->clk_emma_ipg); + clk_disable_unprepare(pcdev->clk_emma_ahb); + clk_put(pcdev->clk_emma_ahb); iounmap(pcdev->base_emma); res = pcdev->res_emma; release_mem_region(res->start, resource_size(res)); diff --git a/trunk/drivers/media/video/mx3_camera.c b/trunk/drivers/media/video/mx3_camera.c index f13643d31353..af2297dd49c8 100644 --- a/trunk/drivers/media/video/mx3_camera.c +++ b/trunk/drivers/media/video/mx3_camera.c @@ -61,15 +61,9 @@ #define MAX_VIDEO_MEM 16 -enum csi_buffer_state { - CSI_BUF_NEEDS_INIT, - CSI_BUF_PREPARED, -}; - struct mx3_camera_buffer { /* common v4l buffer stuff -- must be first */ struct vb2_buffer vb; - enum csi_buffer_state state; struct list_head queue; /* One descriptot per scatterlist (per frame) */ @@ -285,7 +279,7 @@ static void mx3_videobuf_queue(struct vb2_buffer *vb) goto error; } - if (buf->state == CSI_BUF_NEEDS_INIT) { + if (!buf->txd) { sg_dma_address(sg) = vb2_dma_contig_plane_dma_addr(vb, 0); sg_dma_len(sg) = new_size; @@ -298,7 +292,6 @@ static void mx3_videobuf_queue(struct vb2_buffer *vb) txd->callback_param = txd; txd->callback = mx3_cam_dma_done; - buf->state = CSI_BUF_PREPARED; buf->txd = txd; } else { txd = buf->txd; @@ -385,7 +378,6 @@ static void mx3_videobuf_release(struct vb2_buffer *vb) /* Doesn't hurt also if the list is empty */ list_del_init(&buf->queue); - buf->state = CSI_BUF_NEEDS_INIT; if (txd) { buf->txd = NULL; @@ -405,13 +397,13 @@ static int mx3_videobuf_init(struct vb2_buffer *vb) struct mx3_camera_dev *mx3_cam = ici->priv; struct mx3_camera_buffer *buf = to_mx3_vb(vb); - /* This is for locking debugging only */ - INIT_LIST_HEAD(&buf->queue); - sg_init_table(&buf->sg, 1); + if (!buf->txd) { + /* This is for locking debugging only */ + INIT_LIST_HEAD(&buf->queue); + sg_init_table(&buf->sg, 1); - buf->state = CSI_BUF_NEEDS_INIT; - - mx3_cam->buf_total += vb2_plane_size(vb, 0); + mx3_cam->buf_total += vb2_plane_size(vb, 0); + } return 0; } diff --git a/trunk/drivers/media/video/soc_camera.c b/trunk/drivers/media/video/soc_camera.c index b03ffecb7438..1bde255e45df 100644 --- a/trunk/drivers/media/video/soc_camera.c +++ b/trunk/drivers/media/video/soc_camera.c @@ -171,7 +171,8 @@ static int soc_camera_try_fmt(struct soc_camera_device *icd, dev_dbg(icd->pdev, "TRY_FMT(%c%c%c%c, %ux%u)\n", pixfmtstr(pix->pixelformat), pix->width, pix->height); - if (!(ici->capabilities & SOCAM_HOST_CAP_STRIDE)) { + if (pix->pixelformat != V4L2_PIX_FMT_JPEG && + !(ici->capabilities & SOCAM_HOST_CAP_STRIDE)) { pix->bytesperline = 0; pix->sizeimage = 0; } diff --git a/trunk/drivers/media/video/soc_mediabus.c b/trunk/drivers/media/video/soc_mediabus.c index 89dce097a827..a397812635d6 100644 --- a/trunk/drivers/media/video/soc_mediabus.c +++ b/trunk/drivers/media/video/soc_mediabus.c @@ -378,6 +378,9 @@ EXPORT_SYMBOL(soc_mbus_samples_per_pixel); s32 soc_mbus_bytes_per_line(u32 width, const struct soc_mbus_pixelfmt *mf) { + if (mf->fourcc == V4L2_PIX_FMT_JPEG) + return 0; + if (mf->layout != SOC_MBUS_LAYOUT_PACKED) return width * mf->bits_per_sample / 8; @@ -400,6 +403,9 @@ EXPORT_SYMBOL(soc_mbus_bytes_per_line); s32 soc_mbus_image_size(const struct soc_mbus_pixelfmt *mf, u32 bytes_per_line, u32 height) { + if (mf->fourcc == V4L2_PIX_FMT_JPEG) + return 0; + if (mf->layout == SOC_MBUS_LAYOUT_PACKED) return bytes_per_line * height; diff --git a/trunk/drivers/media/video/uvc/uvc_queue.c b/trunk/drivers/media/video/uvc/uvc_queue.c index 9288fbd5001b..5577381b5bf0 100644 --- a/trunk/drivers/media/video/uvc/uvc_queue.c +++ b/trunk/drivers/media/video/uvc/uvc_queue.c @@ -338,6 +338,7 @@ struct uvc_buffer *uvc_queue_next_buffer(struct uvc_video_queue *queue, if ((queue->flags & UVC_QUEUE_DROP_CORRUPTED) && buf->error) { buf->error = 0; buf->state = UVC_BUF_STATE_QUEUED; + buf->bytesused = 0; vb2_set_plane_payload(&buf->buf, 0, 0); return buf; } diff --git a/trunk/drivers/media/video/v4l2-ioctl.c b/trunk/drivers/media/video/v4l2-ioctl.c index c3b7b5f59b32..6bc47fc82fe2 100644 --- a/trunk/drivers/media/video/v4l2-ioctl.c +++ b/trunk/drivers/media/video/v4l2-ioctl.c @@ -402,8 +402,10 @@ static void v4l_print_hw_freq_seek(const void *arg, bool write_only) { const struct v4l2_hw_freq_seek *p = arg; - pr_cont("tuner=%u, type=%u, seek_upward=%u, wrap_around=%u, spacing=%u\n", - p->tuner, p->type, p->seek_upward, p->wrap_around, p->spacing); + pr_cont("tuner=%u, type=%u, seek_upward=%u, wrap_around=%u, spacing=%u, " + "rangelow=%u, rangehigh=%u\n", + p->tuner, p->type, p->seek_upward, p->wrap_around, p->spacing, + p->rangelow, p->rangehigh); } static void v4l_print_requestbuffers(const void *arg, bool write_only) @@ -1853,6 +1855,8 @@ static int v4l_enum_freq_bands(const struct v4l2_ioctl_ops *ops, .type = type, }; + if (p->index) + return -EINVAL; err = ops->vidioc_g_tuner(file, fh, &t); if (err) return err; @@ -1870,6 +1874,8 @@ static int v4l_enum_freq_bands(const struct v4l2_ioctl_ops *ops, if (type != V4L2_TUNER_RADIO) return -EINVAL; + if (p->index) + return -EINVAL; err = ops->vidioc_g_modulator(file, fh, &m); if (err) return err; diff --git a/trunk/drivers/mfd/88pm800.c b/trunk/drivers/mfd/88pm800.c index b67a3018b136..ce229ea933d1 100644 --- a/trunk/drivers/mfd/88pm800.c +++ b/trunk/drivers/mfd/88pm800.c @@ -470,7 +470,8 @@ static int __devinit device_800_init(struct pm80x_chip *chip, ret = mfd_add_devices(chip->dev, 0, &onkey_devs[0], - ARRAY_SIZE(onkey_devs), &onkey_resources[0], 0); + ARRAY_SIZE(onkey_devs), &onkey_resources[0], 0, + NULL); if (ret < 0) { dev_err(chip->dev, "Failed to add onkey subdev\n"); goto out_dev; @@ -481,7 +482,7 @@ static int __devinit device_800_init(struct pm80x_chip *chip, rtc_devs[0].platform_data = pdata->rtc; rtc_devs[0].pdata_size = sizeof(struct pm80x_rtc_pdata); ret = mfd_add_devices(chip->dev, 0, &rtc_devs[0], - ARRAY_SIZE(rtc_devs), NULL, 0); + ARRAY_SIZE(rtc_devs), NULL, 0, NULL); if (ret < 0) { dev_err(chip->dev, "Failed to add rtc subdev\n"); goto out_dev; diff --git a/trunk/drivers/mfd/88pm805.c b/trunk/drivers/mfd/88pm805.c index 6146583589f6..c20a31136f04 100644 --- a/trunk/drivers/mfd/88pm805.c +++ b/trunk/drivers/mfd/88pm805.c @@ -216,7 +216,8 @@ static int __devinit device_805_init(struct pm80x_chip *chip) } ret = mfd_add_devices(chip->dev, 0, &codec_devs[0], - ARRAY_SIZE(codec_devs), &codec_resources[0], 0); + ARRAY_SIZE(codec_devs), &codec_resources[0], 0, + NULL); if (ret < 0) { dev_err(chip->dev, "Failed to add codec subdev\n"); goto out_codec; diff --git a/trunk/drivers/mfd/88pm860x-core.c b/trunk/drivers/mfd/88pm860x-core.c index d09918cf1b15..b73f033b2c60 100644 --- a/trunk/drivers/mfd/88pm860x-core.c +++ b/trunk/drivers/mfd/88pm860x-core.c @@ -637,7 +637,7 @@ static void __devinit device_bk_init(struct pm860x_chip *chip, bk_devs[i].resources = &bk_resources[j]; ret = mfd_add_devices(chip->dev, 0, &bk_devs[i], 1, - &bk_resources[j], 0); + &bk_resources[j], 0, NULL); if (ret < 0) { dev_err(chip->dev, "Failed to add " "backlight subdev\n"); @@ -672,7 +672,7 @@ static void __devinit device_led_init(struct pm860x_chip *chip, led_devs[i].resources = &led_resources[j], ret = mfd_add_devices(chip->dev, 0, &led_devs[i], 1, - &led_resources[j], 0); + &led_resources[j], 0, NULL); if (ret < 0) { dev_err(chip->dev, "Failed to add " "led subdev\n"); @@ -709,7 +709,7 @@ static void __devinit device_regulator_init(struct pm860x_chip *chip, regulator_devs[i].resources = ®ulator_resources[seq]; ret = mfd_add_devices(chip->dev, 0, ®ulator_devs[i], 1, - ®ulator_resources[seq], 0); + ®ulator_resources[seq], 0, NULL); if (ret < 0) { dev_err(chip->dev, "Failed to add regulator subdev\n"); goto out; @@ -733,7 +733,7 @@ static void __devinit device_rtc_init(struct pm860x_chip *chip, rtc_devs[0].resources = &rtc_resources[0]; ret = mfd_add_devices(chip->dev, 0, &rtc_devs[0], ARRAY_SIZE(rtc_devs), &rtc_resources[0], - chip->irq_base); + chip->irq_base, NULL); if (ret < 0) dev_err(chip->dev, "Failed to add rtc subdev\n"); } @@ -752,7 +752,7 @@ static void __devinit device_touch_init(struct pm860x_chip *chip, touch_devs[0].resources = &touch_resources[0]; ret = mfd_add_devices(chip->dev, 0, &touch_devs[0], ARRAY_SIZE(touch_devs), &touch_resources[0], - chip->irq_base); + chip->irq_base, NULL); if (ret < 0) dev_err(chip->dev, "Failed to add touch subdev\n"); } @@ -770,7 +770,7 @@ static void __devinit device_power_init(struct pm860x_chip *chip, power_devs[0].num_resources = ARRAY_SIZE(battery_resources); power_devs[0].resources = &battery_resources[0], ret = mfd_add_devices(chip->dev, 0, &power_devs[0], 1, - &battery_resources[0], chip->irq_base); + &battery_resources[0], chip->irq_base, NULL); if (ret < 0) dev_err(chip->dev, "Failed to add battery subdev\n"); @@ -779,7 +779,7 @@ static void __devinit device_power_init(struct pm860x_chip *chip, power_devs[1].num_resources = ARRAY_SIZE(charger_resources); power_devs[1].resources = &charger_resources[0], ret = mfd_add_devices(chip->dev, 0, &power_devs[1], 1, - &charger_resources[0], chip->irq_base); + &charger_resources[0], chip->irq_base, NULL); if (ret < 0) dev_err(chip->dev, "Failed to add charger subdev\n"); @@ -788,7 +788,7 @@ static void __devinit device_power_init(struct pm860x_chip *chip, power_devs[2].num_resources = ARRAY_SIZE(preg_resources); power_devs[2].resources = &preg_resources[0], ret = mfd_add_devices(chip->dev, 0, &power_devs[2], 1, - &preg_resources[0], chip->irq_base); + &preg_resources[0], chip->irq_base, NULL); if (ret < 0) dev_err(chip->dev, "Failed to add preg subdev\n"); } @@ -802,7 +802,7 @@ static void __devinit device_onkey_init(struct pm860x_chip *chip, onkey_devs[0].resources = &onkey_resources[0], ret = mfd_add_devices(chip->dev, 0, &onkey_devs[0], ARRAY_SIZE(onkey_devs), &onkey_resources[0], - chip->irq_base); + chip->irq_base, NULL); if (ret < 0) dev_err(chip->dev, "Failed to add onkey subdev\n"); } @@ -815,7 +815,8 @@ static void __devinit device_codec_init(struct pm860x_chip *chip, codec_devs[0].num_resources = ARRAY_SIZE(codec_resources); codec_devs[0].resources = &codec_resources[0], ret = mfd_add_devices(chip->dev, 0, &codec_devs[0], - ARRAY_SIZE(codec_devs), &codec_resources[0], 0); + ARRAY_SIZE(codec_devs), &codec_resources[0], 0, + NULL); if (ret < 0) dev_err(chip->dev, "Failed to add codec subdev\n"); } diff --git a/trunk/drivers/mfd/Kconfig b/trunk/drivers/mfd/Kconfig index d1facef28a60..b1a146205c08 100644 --- a/trunk/drivers/mfd/Kconfig +++ b/trunk/drivers/mfd/Kconfig @@ -395,7 +395,8 @@ config MFD_TC6387XB config MFD_TC6393XB bool "Support Toshiba TC6393XB" - depends on GPIOLIB && ARM && HAVE_CLK + depends on ARM && HAVE_CLK + select GPIOLIB select MFD_CORE select MFD_TMIO help diff --git a/trunk/drivers/mfd/aat2870-core.c b/trunk/drivers/mfd/aat2870-core.c index 44a3fdbadef4..f1beb4971f87 100644 --- a/trunk/drivers/mfd/aat2870-core.c +++ b/trunk/drivers/mfd/aat2870-core.c @@ -424,7 +424,7 @@ static int aat2870_i2c_probe(struct i2c_client *client, } ret = mfd_add_devices(aat2870->dev, 0, aat2870_devs, - ARRAY_SIZE(aat2870_devs), NULL, 0); + ARRAY_SIZE(aat2870_devs), NULL, 0, NULL); if (ret != 0) { dev_err(aat2870->dev, "Failed to add subdev: %d\n", ret); goto out_disable; diff --git a/trunk/drivers/mfd/ab3100-core.c b/trunk/drivers/mfd/ab3100-core.c index 78fca2902c8d..01781ae5d0d7 100644 --- a/trunk/drivers/mfd/ab3100-core.c +++ b/trunk/drivers/mfd/ab3100-core.c @@ -946,7 +946,7 @@ static int __devinit ab3100_probe(struct i2c_client *client, } err = mfd_add_devices(&client->dev, 0, ab3100_devs, - ARRAY_SIZE(ab3100_devs), NULL, 0); + ARRAY_SIZE(ab3100_devs), NULL, 0, NULL); ab3100_setup_debugfs(ab3100); diff --git a/trunk/drivers/mfd/ab8500-core.c b/trunk/drivers/mfd/ab8500-core.c index 626b4ecaf647..47adf800024e 100644 --- a/trunk/drivers/mfd/ab8500-core.c +++ b/trunk/drivers/mfd/ab8500-core.c @@ -1418,25 +1418,25 @@ static int __devinit ab8500_probe(struct platform_device *pdev) ret = mfd_add_devices(ab8500->dev, 0, abx500_common_devs, ARRAY_SIZE(abx500_common_devs), NULL, - ab8500->irq_base); + ab8500->irq_base, ab8500->domain); if (ret) goto out_freeirq; if (is_ab9540(ab8500)) ret = mfd_add_devices(ab8500->dev, 0, ab9540_devs, ARRAY_SIZE(ab9540_devs), NULL, - ab8500->irq_base); + ab8500->irq_base, ab8500->domain); else ret = mfd_add_devices(ab8500->dev, 0, ab8500_devs, ARRAY_SIZE(ab8500_devs), NULL, - ab8500->irq_base); + ab8500->irq_base, ab8500->domain); if (ret) goto out_freeirq; if (is_ab9540(ab8500) || is_ab8505(ab8500)) ret = mfd_add_devices(ab8500->dev, 0, ab9540_ab8505_devs, ARRAY_SIZE(ab9540_ab8505_devs), NULL, - ab8500->irq_base); + ab8500->irq_base, ab8500->domain); if (ret) goto out_freeirq; @@ -1444,7 +1444,7 @@ static int __devinit ab8500_probe(struct platform_device *pdev) /* Add battery management devices */ ret = mfd_add_devices(ab8500->dev, 0, ab8500_bm_devs, ARRAY_SIZE(ab8500_bm_devs), NULL, - ab8500->irq_base); + ab8500->irq_base, ab8500->domain); if (ret) dev_err(ab8500->dev, "error adding bm devices\n"); } diff --git a/trunk/drivers/mfd/ab8500-gpadc.c b/trunk/drivers/mfd/ab8500-gpadc.c index 866f95960b4b..29d72a259c85 100644 --- a/trunk/drivers/mfd/ab8500-gpadc.c +++ b/trunk/drivers/mfd/ab8500-gpadc.c @@ -342,7 +342,7 @@ int ab8500_gpadc_read_raw(struct ab8500_gpadc *gpadc, u8 channel) /* * Delay might be needed for ABB8500 cut 3.0, if not, remove - * when hardware will be availible + * when hardware will be available */ msleep(1); break; diff --git a/trunk/drivers/mfd/arizona-core.c b/trunk/drivers/mfd/arizona-core.c index c7983e862549..1b48f2094806 100644 --- a/trunk/drivers/mfd/arizona-core.c +++ b/trunk/drivers/mfd/arizona-core.c @@ -316,7 +316,7 @@ int __devinit arizona_dev_init(struct arizona *arizona) } ret = mfd_add_devices(arizona->dev, -1, early_devs, - ARRAY_SIZE(early_devs), NULL, 0); + ARRAY_SIZE(early_devs), NULL, 0, NULL); if (ret != 0) { dev_err(dev, "Failed to add early children: %d\n", ret); return ret; @@ -516,11 +516,11 @@ int __devinit arizona_dev_init(struct arizona *arizona) switch (arizona->type) { case WM5102: ret = mfd_add_devices(arizona->dev, -1, wm5102_devs, - ARRAY_SIZE(wm5102_devs), NULL, 0); + ARRAY_SIZE(wm5102_devs), NULL, 0, NULL); break; case WM5110: ret = mfd_add_devices(arizona->dev, -1, wm5110_devs, - ARRAY_SIZE(wm5102_devs), NULL, 0); + ARRAY_SIZE(wm5102_devs), NULL, 0, NULL); break; } diff --git a/trunk/drivers/mfd/asic3.c b/trunk/drivers/mfd/asic3.c index 383421bf5760..62f0883a7630 100644 --- a/trunk/drivers/mfd/asic3.c +++ b/trunk/drivers/mfd/asic3.c @@ -913,18 +913,19 @@ static int __init asic3_mfd_probe(struct platform_device *pdev, if (pdata->clock_rate) { ds1wm_pdata.clock_rate = pdata->clock_rate; ret = mfd_add_devices(&pdev->dev, pdev->id, - &asic3_cell_ds1wm, 1, mem, asic->irq_base); + &asic3_cell_ds1wm, 1, mem, asic->irq_base, NULL); if (ret < 0) goto out; } if (mem_sdio && (irq >= 0)) { ret = mfd_add_devices(&pdev->dev, pdev->id, - &asic3_cell_mmc, 1, mem_sdio, irq); + &asic3_cell_mmc, 1, mem_sdio, irq, NULL); if (ret < 0) goto out; } + ret = 0; if (pdata->leds) { int i; @@ -933,7 +934,7 @@ static int __init asic3_mfd_probe(struct platform_device *pdev, asic3_cell_leds[i].pdata_size = sizeof(pdata->leds[i]); } ret = mfd_add_devices(&pdev->dev, 0, - asic3_cell_leds, ASIC3_NUM_LEDS, NULL, 0); + asic3_cell_leds, ASIC3_NUM_LEDS, NULL, 0, NULL); } out: diff --git a/trunk/drivers/mfd/cs5535-mfd.c b/trunk/drivers/mfd/cs5535-mfd.c index 3419e726de47..2b282133c725 100644 --- a/trunk/drivers/mfd/cs5535-mfd.c +++ b/trunk/drivers/mfd/cs5535-mfd.c @@ -149,7 +149,7 @@ static int __devinit cs5535_mfd_probe(struct pci_dev *pdev, } err = mfd_add_devices(&pdev->dev, -1, cs5535_mfd_cells, - ARRAY_SIZE(cs5535_mfd_cells), NULL, 0); + ARRAY_SIZE(cs5535_mfd_cells), NULL, 0, NULL); if (err) { dev_err(&pdev->dev, "MFD add devices failed: %d\n", err); goto err_disable; diff --git a/trunk/drivers/mfd/da9052-core.c b/trunk/drivers/mfd/da9052-core.c index 2544910e1fd6..a0a62b24621b 100644 --- a/trunk/drivers/mfd/da9052-core.c +++ b/trunk/drivers/mfd/da9052-core.c @@ -803,7 +803,7 @@ int __devinit da9052_device_init(struct da9052 *da9052, u8 chip_id) dev_err(da9052->dev, "DA9052 ADC IRQ failed ret=%d\n", ret); ret = mfd_add_devices(da9052->dev, -1, da9052_subdev_info, - ARRAY_SIZE(da9052_subdev_info), NULL, 0); + ARRAY_SIZE(da9052_subdev_info), NULL, 0, NULL); if (ret) goto err; diff --git a/trunk/drivers/mfd/davinci_voicecodec.c b/trunk/drivers/mfd/davinci_voicecodec.c index 4e2af2cb2d26..45e83a68641b 100644 --- a/trunk/drivers/mfd/davinci_voicecodec.c +++ b/trunk/drivers/mfd/davinci_voicecodec.c @@ -129,7 +129,7 @@ static int __init davinci_vc_probe(struct platform_device *pdev) cell->pdata_size = sizeof(*davinci_vc); ret = mfd_add_devices(&pdev->dev, pdev->id, davinci_vc->cells, - DAVINCI_VC_CELLS, NULL, 0); + DAVINCI_VC_CELLS, NULL, 0, NULL); if (ret != 0) { dev_err(&pdev->dev, "fail to register client devices\n"); goto fail4; diff --git a/trunk/drivers/mfd/db8500-prcmu.c b/trunk/drivers/mfd/db8500-prcmu.c index 7040a0081130..0e63cdd9b52a 100644 --- a/trunk/drivers/mfd/db8500-prcmu.c +++ b/trunk/drivers/mfd/db8500-prcmu.c @@ -3010,7 +3010,7 @@ static int __devinit db8500_prcmu_probe(struct platform_device *pdev) prcmu_config_esram0_deep_sleep(ESRAM0_DEEP_SLEEP_STATE_RET); err = mfd_add_devices(&pdev->dev, 0, db8500_prcmu_devs, - ARRAY_SIZE(db8500_prcmu_devs), NULL, 0); + ARRAY_SIZE(db8500_prcmu_devs), NULL, 0, NULL); if (err) { pr_err("prcmu: Failed to add subdevices\n"); return err; diff --git a/trunk/drivers/mfd/ezx-pcap.c b/trunk/drivers/mfd/ezx-pcap.c index 43a76c41cfcc..db662e2dcfa5 100644 --- a/trunk/drivers/mfd/ezx-pcap.c +++ b/trunk/drivers/mfd/ezx-pcap.c @@ -202,7 +202,7 @@ static void pcap_isr_work(struct work_struct *work) } local_irq_enable(); ezx_pcap_write(pcap, PCAP_REG_MSR, pcap->msr); - } while (gpio_get_value(irq_to_gpio(pcap->spi->irq))); + } while (gpio_get_value(pdata->gpio)); } static void pcap_irq_handler(unsigned int irq, struct irq_desc *desc) diff --git a/trunk/drivers/mfd/htc-pasic3.c b/trunk/drivers/mfd/htc-pasic3.c index 04c7093d6499..9e5453d21a68 100644 --- a/trunk/drivers/mfd/htc-pasic3.c +++ b/trunk/drivers/mfd/htc-pasic3.c @@ -168,7 +168,7 @@ static int __init pasic3_probe(struct platform_device *pdev) /* the first 5 PASIC3 registers control the DS1WM */ ds1wm_resources[0].end = (5 << asic->bus_shift) - 1; ret = mfd_add_devices(&pdev->dev, pdev->id, - &ds1wm_cell, 1, r, irq); + &ds1wm_cell, 1, r, irq, NULL); if (ret < 0) dev_warn(dev, "failed to register DS1WM\n"); } @@ -176,7 +176,8 @@ static int __init pasic3_probe(struct platform_device *pdev) if (pdata && pdata->led_pdata) { led_cell.platform_data = pdata->led_pdata; led_cell.pdata_size = sizeof(struct pasic3_leds_machinfo); - ret = mfd_add_devices(&pdev->dev, pdev->id, &led_cell, 1, r, 0); + ret = mfd_add_devices(&pdev->dev, pdev->id, &led_cell, 1, r, + 0, NULL); if (ret < 0) dev_warn(dev, "failed to register LED device\n"); } diff --git a/trunk/drivers/mfd/intel_msic.c b/trunk/drivers/mfd/intel_msic.c index 59df5584cb58..266bdc5bd96d 100644 --- a/trunk/drivers/mfd/intel_msic.c +++ b/trunk/drivers/mfd/intel_msic.c @@ -344,13 +344,13 @@ static int __devinit intel_msic_init_devices(struct intel_msic *msic) continue; ret = mfd_add_devices(&pdev->dev, -1, &msic_devs[i], 1, NULL, - pdata->irq[i]); + pdata->irq[i], NULL); if (ret) goto fail; } ret = mfd_add_devices(&pdev->dev, 0, msic_other_devs, - ARRAY_SIZE(msic_other_devs), NULL, 0); + ARRAY_SIZE(msic_other_devs), NULL, 0, NULL); if (ret) goto fail; diff --git a/trunk/drivers/mfd/janz-cmodio.c b/trunk/drivers/mfd/janz-cmodio.c index 2ea99989551a..965c4801df8a 100644 --- a/trunk/drivers/mfd/janz-cmodio.c +++ b/trunk/drivers/mfd/janz-cmodio.c @@ -147,7 +147,7 @@ static int __devinit cmodio_probe_submodules(struct cmodio_device *priv) } return mfd_add_devices(&pdev->dev, 0, priv->cells, - num_probed, NULL, pdev->irq); + num_probed, NULL, pdev->irq, NULL); } /* diff --git a/trunk/drivers/mfd/jz4740-adc.c b/trunk/drivers/mfd/jz4740-adc.c index 87662a17dec6..c6b6d7dda517 100644 --- a/trunk/drivers/mfd/jz4740-adc.c +++ b/trunk/drivers/mfd/jz4740-adc.c @@ -287,7 +287,8 @@ static int __devinit jz4740_adc_probe(struct platform_device *pdev) writeb(0xff, adc->base + JZ_REG_ADC_CTRL); ret = mfd_add_devices(&pdev->dev, 0, jz4740_adc_cells, - ARRAY_SIZE(jz4740_adc_cells), mem_base, irq_base); + ARRAY_SIZE(jz4740_adc_cells), mem_base, + irq_base, NULL); if (ret < 0) goto err_clk_put; diff --git a/trunk/drivers/mfd/lm3533-core.c b/trunk/drivers/mfd/lm3533-core.c index 0b2879b87fd9..24212f45b201 100644 --- a/trunk/drivers/mfd/lm3533-core.c +++ b/trunk/drivers/mfd/lm3533-core.c @@ -393,7 +393,8 @@ static int __devinit lm3533_device_als_init(struct lm3533 *lm3533) lm3533_als_devs[0].platform_data = pdata->als; lm3533_als_devs[0].pdata_size = sizeof(*pdata->als); - ret = mfd_add_devices(lm3533->dev, 0, lm3533_als_devs, 1, NULL, 0); + ret = mfd_add_devices(lm3533->dev, 0, lm3533_als_devs, 1, NULL, + 0, NULL); if (ret) { dev_err(lm3533->dev, "failed to add ALS device\n"); return ret; @@ -422,7 +423,7 @@ static int __devinit lm3533_device_bl_init(struct lm3533 *lm3533) } ret = mfd_add_devices(lm3533->dev, 0, lm3533_bl_devs, - pdata->num_backlights, NULL, 0); + pdata->num_backlights, NULL, 0, NULL); if (ret) { dev_err(lm3533->dev, "failed to add backlight devices\n"); return ret; @@ -451,7 +452,7 @@ static int __devinit lm3533_device_led_init(struct lm3533 *lm3533) } ret = mfd_add_devices(lm3533->dev, 0, lm3533_led_devs, - pdata->num_leds, NULL, 0); + pdata->num_leds, NULL, 0, NULL); if (ret) { dev_err(lm3533->dev, "failed to add LED devices\n"); return ret; diff --git a/trunk/drivers/mfd/lpc_ich.c b/trunk/drivers/mfd/lpc_ich.c index 027cc8f86132..092ad4b44b6d 100644 --- a/trunk/drivers/mfd/lpc_ich.c +++ b/trunk/drivers/mfd/lpc_ich.c @@ -750,7 +750,7 @@ static int __devinit lpc_ich_init_gpio(struct pci_dev *dev, lpc_ich_finalize_cell(&lpc_ich_cells[LPC_GPIO], id); ret = mfd_add_devices(&dev->dev, -1, &lpc_ich_cells[LPC_GPIO], - 1, NULL, 0); + 1, NULL, 0, NULL); gpio_done: if (acpi_conflict) @@ -765,7 +765,6 @@ static int __devinit lpc_ich_init_wdt(struct pci_dev *dev, u32 base_addr_cfg; u32 base_addr; int ret; - bool acpi_conflict = false; struct resource *res; /* Setup power management base register */ @@ -780,20 +779,11 @@ static int __devinit lpc_ich_init_wdt(struct pci_dev *dev, res = wdt_io_res(ICH_RES_IO_TCO); res->start = base_addr + ACPIBASE_TCO_OFF; res->end = base_addr + ACPIBASE_TCO_END; - ret = acpi_check_resource_conflict(res); - if (ret) { - acpi_conflict = true; - goto wdt_done; - } res = wdt_io_res(ICH_RES_IO_SMI); res->start = base_addr + ACPIBASE_SMI_OFF; res->end = base_addr + ACPIBASE_SMI_END; - ret = acpi_check_resource_conflict(res); - if (ret) { - acpi_conflict = true; - goto wdt_done; - } + lpc_ich_enable_acpi_space(dev); /* @@ -813,21 +803,13 @@ static int __devinit lpc_ich_init_wdt(struct pci_dev *dev, res = wdt_mem_res(ICH_RES_MEM_GCS); res->start = base_addr + ACPIBASE_GCS_OFF; res->end = base_addr + ACPIBASE_GCS_END; - ret = acpi_check_resource_conflict(res); - if (ret) { - acpi_conflict = true; - goto wdt_done; - } } lpc_ich_finalize_cell(&lpc_ich_cells[LPC_WDT], id); ret = mfd_add_devices(&dev->dev, -1, &lpc_ich_cells[LPC_WDT], - 1, NULL, 0); + 1, NULL, 0, NULL); wdt_done: - if (acpi_conflict) - pr_warn("Resource conflict(s) found affecting %s\n", - lpc_ich_cells[LPC_WDT].name); return ret; } diff --git a/trunk/drivers/mfd/lpc_sch.c b/trunk/drivers/mfd/lpc_sch.c index 9f20abc5e393..f6b9c5c96b24 100644 --- a/trunk/drivers/mfd/lpc_sch.c +++ b/trunk/drivers/mfd/lpc_sch.c @@ -127,7 +127,8 @@ static int __devinit lpc_sch_probe(struct pci_dev *dev, lpc_sch_cells[i].id = id->device; ret = mfd_add_devices(&dev->dev, 0, - lpc_sch_cells, ARRAY_SIZE(lpc_sch_cells), NULL, 0); + lpc_sch_cells, ARRAY_SIZE(lpc_sch_cells), NULL, + 0, NULL); if (ret) goto out_dev; @@ -153,7 +154,8 @@ static int __devinit lpc_sch_probe(struct pci_dev *dev, tunnelcreek_cells[i].id = id->device; ret = mfd_add_devices(&dev->dev, 0, tunnelcreek_cells, - ARRAY_SIZE(tunnelcreek_cells), NULL, 0); + ARRAY_SIZE(tunnelcreek_cells), NULL, + 0, NULL); } return ret; diff --git a/trunk/drivers/mfd/max77686.c b/trunk/drivers/mfd/max77686.c index c03e12b51924..d9e24c849a00 100644 --- a/trunk/drivers/mfd/max77686.c +++ b/trunk/drivers/mfd/max77686.c @@ -126,7 +126,7 @@ static int max77686_i2c_probe(struct i2c_client *i2c, max77686_irq_init(max77686); ret = mfd_add_devices(max77686->dev, -1, max77686_devs, - ARRAY_SIZE(max77686_devs), NULL, 0); + ARRAY_SIZE(max77686_devs), NULL, 0, NULL); if (ret < 0) goto err_mfd; diff --git a/trunk/drivers/mfd/max77693-irq.c b/trunk/drivers/mfd/max77693-irq.c index 2b403569e0a6..1029d018c739 100644 --- a/trunk/drivers/mfd/max77693-irq.c +++ b/trunk/drivers/mfd/max77693-irq.c @@ -137,6 +137,9 @@ static void max77693_irq_mask(struct irq_data *data) const struct max77693_irq_data *irq_data = irq_to_max77693_irq(max77693, data->irq); + if (irq_data->group >= MAX77693_IRQ_GROUP_NR) + return; + if (irq_data->group >= MUIC_INT1 && irq_data->group <= MUIC_INT3) max77693->irq_masks_cur[irq_data->group] &= ~irq_data->mask; else @@ -149,6 +152,9 @@ static void max77693_irq_unmask(struct irq_data *data) const struct max77693_irq_data *irq_data = irq_to_max77693_irq(max77693, data->irq); + if (irq_data->group >= MAX77693_IRQ_GROUP_NR) + return; + if (irq_data->group >= MUIC_INT1 && irq_data->group <= MUIC_INT3) max77693->irq_masks_cur[irq_data->group] |= irq_data->mask; else @@ -200,7 +206,7 @@ static irqreturn_t max77693_irq_thread(int irq, void *data) if (irq_src & MAX77693_IRQSRC_MUIC) /* MUIC INT1 ~ INT3 */ - max77693_bulk_read(max77693->regmap, MAX77693_MUIC_REG_INT1, + max77693_bulk_read(max77693->regmap_muic, MAX77693_MUIC_REG_INT1, MAX77693_NUM_IRQ_MUIC_REGS, &irq_reg[MUIC_INT1]); /* Apply masking */ @@ -255,7 +261,8 @@ int max77693_irq_init(struct max77693_dev *max77693) { struct irq_domain *domain; int i; - int ret; + int ret = 0; + u8 intsrc_mask; mutex_init(&max77693->irqlock); @@ -287,19 +294,38 @@ int max77693_irq_init(struct max77693_dev *max77693) &max77693_irq_domain_ops, max77693); if (!domain) { dev_err(max77693->dev, "could not create irq domain\n"); - return -ENODEV; + ret = -ENODEV; + goto err_irq; } max77693->irq_domain = domain; + /* Unmask max77693 interrupt */ + ret = max77693_read_reg(max77693->regmap, + MAX77693_PMIC_REG_INTSRC_MASK, &intsrc_mask); + if (ret < 0) { + dev_err(max77693->dev, "fail to read PMIC register\n"); + goto err_irq; + } + + intsrc_mask &= ~(MAX77693_IRQSRC_CHG); + intsrc_mask &= ~(MAX77693_IRQSRC_FLASH); + intsrc_mask &= ~(MAX77693_IRQSRC_MUIC); + ret = max77693_write_reg(max77693->regmap, + MAX77693_PMIC_REG_INTSRC_MASK, intsrc_mask); + if (ret < 0) { + dev_err(max77693->dev, "fail to write PMIC register\n"); + goto err_irq; + } + ret = request_threaded_irq(max77693->irq, NULL, max77693_irq_thread, IRQF_TRIGGER_FALLING | IRQF_ONESHOT, "max77693-irq", max77693); - if (ret) dev_err(max77693->dev, "Failed to request IRQ %d: %d\n", max77693->irq, ret); - return 0; +err_irq: + return ret; } void max77693_irq_exit(struct max77693_dev *max77693) diff --git a/trunk/drivers/mfd/max77693.c b/trunk/drivers/mfd/max77693.c index a1811cb50ec7..cc5155e20494 100644 --- a/trunk/drivers/mfd/max77693.c +++ b/trunk/drivers/mfd/max77693.c @@ -152,6 +152,20 @@ static int max77693_i2c_probe(struct i2c_client *i2c, max77693->haptic = i2c_new_dummy(i2c->adapter, I2C_ADDR_HAPTIC); i2c_set_clientdata(max77693->haptic, max77693); + /* + * Initialize register map for MUIC device because use regmap-muic + * instance of MUIC device when irq of max77693 is initialized + * before call max77693-muic probe() function. + */ + max77693->regmap_muic = devm_regmap_init_i2c(max77693->muic, + &max77693_regmap_config); + if (IS_ERR(max77693->regmap_muic)) { + ret = PTR_ERR(max77693->regmap_muic); + dev_err(max77693->dev, + "failed to allocate register map: %d\n", ret); + goto err_regmap; + } + ret = max77693_irq_init(max77693); if (ret < 0) goto err_irq; @@ -159,7 +173,7 @@ static int max77693_i2c_probe(struct i2c_client *i2c, pm_runtime_set_active(max77693->dev); ret = mfd_add_devices(max77693->dev, -1, max77693_devs, - ARRAY_SIZE(max77693_devs), NULL, 0); + ARRAY_SIZE(max77693_devs), NULL, 0, NULL); if (ret < 0) goto err_mfd; diff --git a/trunk/drivers/mfd/max8925-core.c b/trunk/drivers/mfd/max8925-core.c index 825a7f06d9ba..ee53757beca7 100644 --- a/trunk/drivers/mfd/max8925-core.c +++ b/trunk/drivers/mfd/max8925-core.c @@ -598,7 +598,7 @@ int __devinit max8925_device_init(struct max8925_chip *chip, ret = mfd_add_devices(chip->dev, 0, &rtc_devs[0], ARRAY_SIZE(rtc_devs), - &rtc_resources[0], chip->irq_base); + &rtc_resources[0], chip->irq_base, NULL); if (ret < 0) { dev_err(chip->dev, "Failed to add rtc subdev\n"); goto out; @@ -606,7 +606,7 @@ int __devinit max8925_device_init(struct max8925_chip *chip, ret = mfd_add_devices(chip->dev, 0, &onkey_devs[0], ARRAY_SIZE(onkey_devs), - &onkey_resources[0], 0); + &onkey_resources[0], 0, NULL); if (ret < 0) { dev_err(chip->dev, "Failed to add onkey subdev\n"); goto out_dev; @@ -615,7 +615,7 @@ int __devinit max8925_device_init(struct max8925_chip *chip, if (pdata) { ret = mfd_add_devices(chip->dev, 0, ®ulator_devs[0], ARRAY_SIZE(regulator_devs), - ®ulator_resources[0], 0); + ®ulator_resources[0], 0, NULL); if (ret < 0) { dev_err(chip->dev, "Failed to add regulator subdev\n"); goto out_dev; @@ -625,7 +625,7 @@ int __devinit max8925_device_init(struct max8925_chip *chip, if (pdata && pdata->backlight) { ret = mfd_add_devices(chip->dev, 0, &backlight_devs[0], ARRAY_SIZE(backlight_devs), - &backlight_resources[0], 0); + &backlight_resources[0], 0, NULL); if (ret < 0) { dev_err(chip->dev, "Failed to add backlight subdev\n"); goto out_dev; @@ -635,7 +635,7 @@ int __devinit max8925_device_init(struct max8925_chip *chip, if (pdata && pdata->power) { ret = mfd_add_devices(chip->dev, 0, &power_devs[0], ARRAY_SIZE(power_devs), - &power_supply_resources[0], 0); + &power_supply_resources[0], 0, NULL); if (ret < 0) { dev_err(chip->dev, "Failed to add power supply " "subdev\n"); @@ -646,7 +646,7 @@ int __devinit max8925_device_init(struct max8925_chip *chip, if (pdata && pdata->touch) { ret = mfd_add_devices(chip->dev, 0, &touch_devs[0], ARRAY_SIZE(touch_devs), - &touch_resources[0], 0); + &touch_resources[0], 0, NULL); if (ret < 0) { dev_err(chip->dev, "Failed to add touch subdev\n"); goto out_dev; diff --git a/trunk/drivers/mfd/max8997.c b/trunk/drivers/mfd/max8997.c index 10b629c245b6..f123517065ec 100644 --- a/trunk/drivers/mfd/max8997.c +++ b/trunk/drivers/mfd/max8997.c @@ -160,7 +160,7 @@ static int max8997_i2c_probe(struct i2c_client *i2c, mfd_add_devices(max8997->dev, -1, max8997_devs, ARRAY_SIZE(max8997_devs), - NULL, 0); + NULL, 0, NULL); /* * TODO: enable others (flash, muic, rtc, battery, ...) and diff --git a/trunk/drivers/mfd/max8998.c b/trunk/drivers/mfd/max8998.c index 6ef56d28c056..d7218cc90945 100644 --- a/trunk/drivers/mfd/max8998.c +++ b/trunk/drivers/mfd/max8998.c @@ -161,13 +161,13 @@ static int max8998_i2c_probe(struct i2c_client *i2c, switch (id->driver_data) { case TYPE_LP3974: ret = mfd_add_devices(max8998->dev, -1, - lp3974_devs, ARRAY_SIZE(lp3974_devs), - NULL, 0); + lp3974_devs, ARRAY_SIZE(lp3974_devs), + NULL, 0, NULL); break; case TYPE_MAX8998: ret = mfd_add_devices(max8998->dev, -1, - max8998_devs, ARRAY_SIZE(max8998_devs), - NULL, 0); + max8998_devs, ARRAY_SIZE(max8998_devs), + NULL, 0, NULL); break; default: ret = -EINVAL; diff --git a/trunk/drivers/mfd/mc13xxx-core.c b/trunk/drivers/mfd/mc13xxx-core.c index b801dc72f041..1ec79b54bd2f 100644 --- a/trunk/drivers/mfd/mc13xxx-core.c +++ b/trunk/drivers/mfd/mc13xxx-core.c @@ -612,7 +612,7 @@ static int mc13xxx_add_subdevice_pdata(struct mc13xxx *mc13xxx, if (!cell.name) return -ENOMEM; - return mfd_add_devices(mc13xxx->dev, -1, &cell, 1, NULL, 0); + return mfd_add_devices(mc13xxx->dev, -1, &cell, 1, NULL, 0, NULL); } static int mc13xxx_add_subdevice(struct mc13xxx *mc13xxx, const char *format) diff --git a/trunk/drivers/mfd/mfd-core.c b/trunk/drivers/mfd/mfd-core.c index 0c3a01cde2f7..f8b77711ad2d 100644 --- a/trunk/drivers/mfd/mfd-core.c +++ b/trunk/drivers/mfd/mfd-core.c @@ -74,12 +74,11 @@ static int mfd_platform_add_cell(struct platform_device *pdev, static int mfd_add_device(struct device *parent, int id, const struct mfd_cell *cell, struct resource *mem_base, - int irq_base) + int irq_base, struct irq_domain *domain) { struct resource *res; struct platform_device *pdev; struct device_node *np = NULL; - struct irq_domain *domain = NULL; int ret = -ENOMEM; int r; @@ -97,7 +96,6 @@ static int mfd_add_device(struct device *parent, int id, for_each_child_of_node(parent->of_node, np) { if (of_device_is_compatible(np, cell->of_compatible)) { pdev->dev.of_node = np; - domain = irq_find_host(parent->of_node); break; } } @@ -177,7 +175,7 @@ static int mfd_add_device(struct device *parent, int id, int mfd_add_devices(struct device *parent, int id, struct mfd_cell *cells, int n_devs, struct resource *mem_base, - int irq_base) + int irq_base, struct irq_domain *domain) { int i; int ret = 0; @@ -191,7 +189,8 @@ int mfd_add_devices(struct device *parent, int id, for (i = 0; i < n_devs; i++) { atomic_set(&cnts[i], 0); cells[i].usage_count = &cnts[i]; - ret = mfd_add_device(parent, id, cells + i, mem_base, irq_base); + ret = mfd_add_device(parent, id, cells + i, mem_base, + irq_base, domain); if (ret) break; } @@ -247,7 +246,8 @@ int mfd_clone_cell(const char *cell, const char **clones, size_t n_clones) for (i = 0; i < n_clones; i++) { cell_entry.name = clones[i]; /* don't give up if a single call fails; just report error */ - if (mfd_add_device(pdev->dev.parent, -1, &cell_entry, NULL, 0)) + if (mfd_add_device(pdev->dev.parent, -1, &cell_entry, NULL, 0, + NULL)) dev_err(dev, "failed to create platform device '%s'\n", clones[i]); } diff --git a/trunk/drivers/mfd/palmas.c b/trunk/drivers/mfd/palmas.c index c4a69f193a1d..a345f9bb7b47 100644 --- a/trunk/drivers/mfd/palmas.c +++ b/trunk/drivers/mfd/palmas.c @@ -453,7 +453,8 @@ static int __devinit palmas_i2c_probe(struct i2c_client *i2c, ret = mfd_add_devices(palmas->dev, -1, children, ARRAY_SIZE(palmas_children), - NULL, regmap_irq_chip_get_base(palmas->irq_data)); + NULL, regmap_irq_chip_get_base(palmas->irq_data), + NULL); kfree(children); if (ret < 0) diff --git a/trunk/drivers/mfd/rc5t583.c b/trunk/drivers/mfd/rc5t583.c index cdc1df7fa0e9..ff61efc76ce2 100644 --- a/trunk/drivers/mfd/rc5t583.c +++ b/trunk/drivers/mfd/rc5t583.c @@ -281,7 +281,7 @@ static int __devinit rc5t583_i2c_probe(struct i2c_client *i2c, if (i2c->irq) { ret = rc5t583_irq_init(rc5t583, i2c->irq, pdata->irq_base); - /* Still continue with waring if irq init fails */ + /* Still continue with warning, if irq init fails */ if (ret) dev_warn(&i2c->dev, "IRQ init failed: %d\n", ret); else @@ -289,7 +289,7 @@ static int __devinit rc5t583_i2c_probe(struct i2c_client *i2c, } ret = mfd_add_devices(rc5t583->dev, -1, rc5t583_subdevs, - ARRAY_SIZE(rc5t583_subdevs), NULL, 0); + ARRAY_SIZE(rc5t583_subdevs), NULL, 0, NULL); if (ret) { dev_err(&i2c->dev, "add mfd devices failed: %d\n", ret); goto err_add_devs; diff --git a/trunk/drivers/mfd/rdc321x-southbridge.c b/trunk/drivers/mfd/rdc321x-southbridge.c index 685d61e431ad..fbabc3cbe350 100644 --- a/trunk/drivers/mfd/rdc321x-southbridge.c +++ b/trunk/drivers/mfd/rdc321x-southbridge.c @@ -1,5 +1,5 @@ /* - * RDC321x MFD southbrige driver + * RDC321x MFD southbridge driver * * Copyright (C) 2007-2010 Florian Fainelli * Copyright (C) 2010 Bernhard Loos @@ -87,7 +87,8 @@ static int __devinit rdc321x_sb_probe(struct pci_dev *pdev, rdc321x_wdt_pdata.sb_pdev = pdev; return mfd_add_devices(&pdev->dev, -1, - rdc321x_sb_cells, ARRAY_SIZE(rdc321x_sb_cells), NULL, 0); + rdc321x_sb_cells, ARRAY_SIZE(rdc321x_sb_cells), + NULL, 0, NULL); } static void __devexit rdc321x_sb_remove(struct pci_dev *pdev) diff --git a/trunk/drivers/mfd/sec-core.c b/trunk/drivers/mfd/sec-core.c index 2988efde11eb..49d361a618d0 100644 --- a/trunk/drivers/mfd/sec-core.c +++ b/trunk/drivers/mfd/sec-core.c @@ -141,19 +141,19 @@ static int sec_pmic_probe(struct i2c_client *i2c, switch (sec_pmic->device_type) { case S5M8751X: ret = mfd_add_devices(sec_pmic->dev, -1, s5m8751_devs, - ARRAY_SIZE(s5m8751_devs), NULL, 0); + ARRAY_SIZE(s5m8751_devs), NULL, 0, NULL); break; case S5M8763X: ret = mfd_add_devices(sec_pmic->dev, -1, s5m8763_devs, - ARRAY_SIZE(s5m8763_devs), NULL, 0); + ARRAY_SIZE(s5m8763_devs), NULL, 0, NULL); break; case S5M8767X: ret = mfd_add_devices(sec_pmic->dev, -1, s5m8767_devs, - ARRAY_SIZE(s5m8767_devs), NULL, 0); + ARRAY_SIZE(s5m8767_devs), NULL, 0, NULL); break; case S2MPS11X: ret = mfd_add_devices(sec_pmic->dev, -1, s2mps11_devs, - ARRAY_SIZE(s2mps11_devs), NULL, 0); + ARRAY_SIZE(s2mps11_devs), NULL, 0, NULL); break; default: /* If this happens the probe function is problem */ diff --git a/trunk/drivers/mfd/sta2x11-mfd.c b/trunk/drivers/mfd/sta2x11-mfd.c index d31fed07aefb..d35da6820bea 100644 --- a/trunk/drivers/mfd/sta2x11-mfd.c +++ b/trunk/drivers/mfd/sta2x11-mfd.c @@ -407,7 +407,7 @@ static int __devinit sta2x11_mfd_probe(struct pci_dev *pdev, sta2x11_mfd_bar0, ARRAY_SIZE(sta2x11_mfd_bar0), &pdev->resource[0], - 0); + 0, NULL); if (err) { dev_err(&pdev->dev, "mfd_add_devices[0] failed: %d\n", err); goto err_disable; @@ -417,7 +417,7 @@ static int __devinit sta2x11_mfd_probe(struct pci_dev *pdev, sta2x11_mfd_bar1, ARRAY_SIZE(sta2x11_mfd_bar1), &pdev->resource[1], - 0); + 0, NULL); if (err) { dev_err(&pdev->dev, "mfd_add_devices[1] failed: %d\n", err); goto err_disable; diff --git a/trunk/drivers/mfd/stmpe.c b/trunk/drivers/mfd/stmpe.c index 2dd8d49cb30b..c94f521f392c 100644 --- a/trunk/drivers/mfd/stmpe.c +++ b/trunk/drivers/mfd/stmpe.c @@ -962,7 +962,7 @@ static int __devinit stmpe_add_device(struct stmpe *stmpe, struct mfd_cell *cell, int irq) { return mfd_add_devices(stmpe->dev, stmpe->pdata->id, cell, 1, - NULL, stmpe->irq_base + irq); + NULL, stmpe->irq_base + irq, NULL); } static int __devinit stmpe_devices_init(struct stmpe *stmpe) diff --git a/trunk/drivers/mfd/t7l66xb.c b/trunk/drivers/mfd/t7l66xb.c index 2d9e8799e733..b32940ec9034 100644 --- a/trunk/drivers/mfd/t7l66xb.c +++ b/trunk/drivers/mfd/t7l66xb.c @@ -388,7 +388,7 @@ static int t7l66xb_probe(struct platform_device *dev) ret = mfd_add_devices(&dev->dev, dev->id, t7l66xb_cells, ARRAY_SIZE(t7l66xb_cells), - iomem, t7l66xb->irq_base); + iomem, t7l66xb->irq_base, NULL); if (!ret) return 0; diff --git a/trunk/drivers/mfd/tc3589x.c b/trunk/drivers/mfd/tc3589x.c index 048bf0532a09..b56ba6b43294 100644 --- a/trunk/drivers/mfd/tc3589x.c +++ b/trunk/drivers/mfd/tc3589x.c @@ -262,8 +262,8 @@ static int __devinit tc3589x_device_init(struct tc3589x *tc3589x) if (blocks & TC3589x_BLOCK_GPIO) { ret = mfd_add_devices(tc3589x->dev, -1, tc3589x_dev_gpio, - ARRAY_SIZE(tc3589x_dev_gpio), NULL, - tc3589x->irq_base); + ARRAY_SIZE(tc3589x_dev_gpio), NULL, + tc3589x->irq_base, NULL); if (ret) { dev_err(tc3589x->dev, "failed to add gpio child\n"); return ret; @@ -273,8 +273,8 @@ static int __devinit tc3589x_device_init(struct tc3589x *tc3589x) if (blocks & TC3589x_BLOCK_KEYPAD) { ret = mfd_add_devices(tc3589x->dev, -1, tc3589x_dev_keypad, - ARRAY_SIZE(tc3589x_dev_keypad), NULL, - tc3589x->irq_base); + ARRAY_SIZE(tc3589x_dev_keypad), NULL, + tc3589x->irq_base, NULL); if (ret) { dev_err(tc3589x->dev, "failed to keypad child\n"); return ret; diff --git a/trunk/drivers/mfd/tc6387xb.c b/trunk/drivers/mfd/tc6387xb.c index d20a284ad4ba..413c891102f8 100644 --- a/trunk/drivers/mfd/tc6387xb.c +++ b/trunk/drivers/mfd/tc6387xb.c @@ -192,7 +192,7 @@ static int __devinit tc6387xb_probe(struct platform_device *dev) printk(KERN_INFO "Toshiba tc6387xb initialised\n"); ret = mfd_add_devices(&dev->dev, dev->id, tc6387xb_cells, - ARRAY_SIZE(tc6387xb_cells), iomem, irq); + ARRAY_SIZE(tc6387xb_cells), iomem, irq, NULL); if (!ret) return 0; diff --git a/trunk/drivers/mfd/tc6393xb.c b/trunk/drivers/mfd/tc6393xb.c index 9612264f0e6d..dcab026fcbb2 100644 --- a/trunk/drivers/mfd/tc6393xb.c +++ b/trunk/drivers/mfd/tc6393xb.c @@ -700,8 +700,8 @@ static int __devinit tc6393xb_probe(struct platform_device *dev) tc6393xb_cells[TC6393XB_CELL_FB].pdata_size = sizeof(*tcpd->fb_data); ret = mfd_add_devices(&dev->dev, dev->id, - tc6393xb_cells, ARRAY_SIZE(tc6393xb_cells), - iomem, tcpd->irq_base); + tc6393xb_cells, ARRAY_SIZE(tc6393xb_cells), + iomem, tcpd->irq_base, NULL); if (!ret) return 0; diff --git a/trunk/drivers/mfd/ti-ssp.c b/trunk/drivers/mfd/ti-ssp.c index 4fb0e6c8e8fe..7c3675a74f93 100644 --- a/trunk/drivers/mfd/ti-ssp.c +++ b/trunk/drivers/mfd/ti-ssp.c @@ -412,7 +412,7 @@ static int __devinit ti_ssp_probe(struct platform_device *pdev) cells[id].data_size = data->pdata_size; } - error = mfd_add_devices(dev, 0, cells, 2, NULL, 0); + error = mfd_add_devices(dev, 0, cells, 2, NULL, 0, NULL); if (error < 0) { dev_err(dev, "cannot add mfd cells\n"); goto error_enable; diff --git a/trunk/drivers/mfd/timberdale.c b/trunk/drivers/mfd/timberdale.c index a447f4ec11fb..cccc626c83c8 100644 --- a/trunk/drivers/mfd/timberdale.c +++ b/trunk/drivers/mfd/timberdale.c @@ -757,25 +757,25 @@ static int __devinit timb_probe(struct pci_dev *dev, err = mfd_add_devices(&dev->dev, -1, timberdale_cells_bar0_cfg0, ARRAY_SIZE(timberdale_cells_bar0_cfg0), - &dev->resource[0], msix_entries[0].vector); + &dev->resource[0], msix_entries[0].vector, NULL); break; case TIMB_HW_VER1: err = mfd_add_devices(&dev->dev, -1, timberdale_cells_bar0_cfg1, ARRAY_SIZE(timberdale_cells_bar0_cfg1), - &dev->resource[0], msix_entries[0].vector); + &dev->resource[0], msix_entries[0].vector, NULL); break; case TIMB_HW_VER2: err = mfd_add_devices(&dev->dev, -1, timberdale_cells_bar0_cfg2, ARRAY_SIZE(timberdale_cells_bar0_cfg2), - &dev->resource[0], msix_entries[0].vector); + &dev->resource[0], msix_entries[0].vector, NULL); break; case TIMB_HW_VER3: err = mfd_add_devices(&dev->dev, -1, timberdale_cells_bar0_cfg3, ARRAY_SIZE(timberdale_cells_bar0_cfg3), - &dev->resource[0], msix_entries[0].vector); + &dev->resource[0], msix_entries[0].vector, NULL); break; default: dev_err(&dev->dev, "Uknown IP setup: %d.%d.%d\n", @@ -792,7 +792,7 @@ static int __devinit timb_probe(struct pci_dev *dev, err = mfd_add_devices(&dev->dev, 0, timberdale_cells_bar1, ARRAY_SIZE(timberdale_cells_bar1), - &dev->resource[1], msix_entries[0].vector); + &dev->resource[1], msix_entries[0].vector, NULL); if (err) { dev_err(&dev->dev, "mfd_add_devices failed: %d\n", err); goto err_mfd2; @@ -803,7 +803,7 @@ static int __devinit timb_probe(struct pci_dev *dev, ((priv->fw.config & TIMB_HW_VER_MASK) == TIMB_HW_VER3)) { err = mfd_add_devices(&dev->dev, 1, timberdale_cells_bar2, ARRAY_SIZE(timberdale_cells_bar2), - &dev->resource[2], msix_entries[0].vector); + &dev->resource[2], msix_entries[0].vector, NULL); if (err) { dev_err(&dev->dev, "mfd_add_devices failed: %d\n", err); goto err_mfd2; diff --git a/trunk/drivers/mfd/tps6105x.c b/trunk/drivers/mfd/tps6105x.c index a293b978e27c..14051bdc714b 100644 --- a/trunk/drivers/mfd/tps6105x.c +++ b/trunk/drivers/mfd/tps6105x.c @@ -188,7 +188,7 @@ static int __devinit tps6105x_probe(struct i2c_client *client, } ret = mfd_add_devices(&client->dev, 0, tps6105x_cells, - ARRAY_SIZE(tps6105x_cells), NULL, 0); + ARRAY_SIZE(tps6105x_cells), NULL, 0, NULL); if (ret) goto fail; diff --git a/trunk/drivers/mfd/tps6507x.c b/trunk/drivers/mfd/tps6507x.c index 33ba7723c967..1b203499c744 100644 --- a/trunk/drivers/mfd/tps6507x.c +++ b/trunk/drivers/mfd/tps6507x.c @@ -100,7 +100,7 @@ static int tps6507x_i2c_probe(struct i2c_client *i2c, ret = mfd_add_devices(tps6507x->dev, -1, tps6507x_devs, ARRAY_SIZE(tps6507x_devs), - NULL, 0); + NULL, 0, NULL); if (ret < 0) goto err; diff --git a/trunk/drivers/mfd/tps65090.c b/trunk/drivers/mfd/tps65090.c index 80e24f4b47bf..50fd87c87a1c 100644 --- a/trunk/drivers/mfd/tps65090.c +++ b/trunk/drivers/mfd/tps65090.c @@ -292,7 +292,7 @@ static int __devinit tps65090_i2c_probe(struct i2c_client *client, } ret = mfd_add_devices(tps65090->dev, -1, tps65090s, - ARRAY_SIZE(tps65090s), NULL, 0); + ARRAY_SIZE(tps65090s), NULL, 0, NULL); if (ret) { dev_err(&client->dev, "add mfd devices failed with err: %d\n", ret); diff --git a/trunk/drivers/mfd/tps65217.c b/trunk/drivers/mfd/tps65217.c index 61c097a98f5d..a95e9421b735 100644 --- a/trunk/drivers/mfd/tps65217.c +++ b/trunk/drivers/mfd/tps65217.c @@ -24,11 +24,18 @@ #include #include #include -#include +#include +#include #include #include +static struct mfd_cell tps65217s[] = { + { + .name = "tps65217-pmic", + }, +}; + /** * tps65217_reg_read: Read a single tps65217 register. * @@ -133,83 +140,48 @@ int tps65217_clear_bits(struct tps65217 *tps, unsigned int reg, } EXPORT_SYMBOL_GPL(tps65217_clear_bits); -#ifdef CONFIG_OF -static struct of_regulator_match reg_matches[] = { - { .name = "dcdc1", .driver_data = (void *)TPS65217_DCDC_1 }, - { .name = "dcdc2", .driver_data = (void *)TPS65217_DCDC_2 }, - { .name = "dcdc3", .driver_data = (void *)TPS65217_DCDC_3 }, - { .name = "ldo1", .driver_data = (void *)TPS65217_LDO_1 }, - { .name = "ldo2", .driver_data = (void *)TPS65217_LDO_2 }, - { .name = "ldo3", .driver_data = (void *)TPS65217_LDO_3 }, - { .name = "ldo4", .driver_data = (void *)TPS65217_LDO_4 }, -}; - -static struct tps65217_board *tps65217_parse_dt(struct i2c_client *client) -{ - struct device_node *node = client->dev.of_node; - struct tps65217_board *pdata; - struct device_node *regs; - int count = ARRAY_SIZE(reg_matches); - int ret, i; - - regs = of_find_node_by_name(node, "regulators"); - if (!regs) - return NULL; - - ret = of_regulator_match(&client->dev, regs, reg_matches, count); - of_node_put(regs); - if ((ret < 0) || (ret > count)) - return NULL; - - count = ret; - pdata = devm_kzalloc(&client->dev, count * sizeof(*pdata), GFP_KERNEL); - if (!pdata) - return NULL; - - for (i = 0; i < count; i++) { - if (!reg_matches[i].init_data || !reg_matches[i].of_node) - continue; - - pdata->tps65217_init_data[i] = reg_matches[i].init_data; - pdata->of_node[i] = reg_matches[i].of_node; - } - - return pdata; -} - -static struct of_device_id tps65217_of_match[] = { - { .compatible = "ti,tps65217", }, - { }, -}; -#else -static struct tps65217_board *tps65217_parse_dt(struct i2c_client *client) -{ - return NULL; -} -#endif - static struct regmap_config tps65217_regmap_config = { .reg_bits = 8, .val_bits = 8, }; +static const struct of_device_id tps65217_of_match[] = { + { .compatible = "ti,tps65217", .data = (void *)TPS65217 }, + { /* sentinel */ }, +}; + static int __devinit tps65217_probe(struct i2c_client *client, const struct i2c_device_id *ids) { struct tps65217 *tps; - struct regulator_init_data *reg_data; - struct tps65217_board *pdata = client->dev.platform_data; - int i, ret; unsigned int version; + unsigned int chip_id = ids->driver_data; + const struct of_device_id *match; + int ret; - if (!pdata && client->dev.of_node) - pdata = tps65217_parse_dt(client); + if (client->dev.of_node) { + match = of_match_device(tps65217_of_match, &client->dev); + if (!match) { + dev_err(&client->dev, + "Failed to find matching dt id\n"); + return -EINVAL; + } + chip_id = (unsigned int)match->data; + } + + if (!chip_id) { + dev_err(&client->dev, "id is null.\n"); + return -ENODEV; + } tps = devm_kzalloc(&client->dev, sizeof(*tps), GFP_KERNEL); if (!tps) return -ENOMEM; - tps->pdata = pdata; + i2c_set_clientdata(client, tps); + tps->dev = &client->dev; + tps->id = chip_id; + tps->regmap = devm_regmap_init_i2c(client, &tps65217_regmap_config); if (IS_ERR(tps->regmap)) { ret = PTR_ERR(tps->regmap); @@ -218,8 +190,12 @@ static int __devinit tps65217_probe(struct i2c_client *client, return ret; } - i2c_set_clientdata(client, tps); - tps->dev = &client->dev; + ret = mfd_add_devices(tps->dev, -1, tps65217s, + ARRAY_SIZE(tps65217s), NULL, 0, NULL); + if (ret < 0) { + dev_err(tps->dev, "mfd_add_devices failed: %d\n", ret); + return ret; + } ret = tps65217_reg_read(tps, TPS65217_REG_CHIPID, &version); if (ret < 0) { @@ -232,41 +208,21 @@ static int __devinit tps65217_probe(struct i2c_client *client, (version & TPS65217_CHIPID_CHIP_MASK) >> 4, version & TPS65217_CHIPID_REV_MASK); - for (i = 0; i < TPS65217_NUM_REGULATOR; i++) { - struct platform_device *pdev; - - pdev = platform_device_alloc("tps65217-pmic", i); - if (!pdev) { - dev_err(tps->dev, "Cannot create regulator %d\n", i); - continue; - } - - pdev->dev.parent = tps->dev; - pdev->dev.of_node = pdata->of_node[i]; - reg_data = pdata->tps65217_init_data[i]; - platform_device_add_data(pdev, reg_data, sizeof(*reg_data)); - tps->regulator_pdev[i] = pdev; - - platform_device_add(pdev); - } - return 0; } static int __devexit tps65217_remove(struct i2c_client *client) { struct tps65217 *tps = i2c_get_clientdata(client); - int i; - for (i = 0; i < TPS65217_NUM_REGULATOR; i++) - platform_device_unregister(tps->regulator_pdev[i]); + mfd_remove_devices(tps->dev); return 0; } static const struct i2c_device_id tps65217_id_table[] = { - {"tps65217", 0xF0}, - {/* end of list */} + {"tps65217", TPS65217}, + { /* sentinel */ } }; MODULE_DEVICE_TABLE(i2c, tps65217_id_table); diff --git a/trunk/drivers/mfd/tps6586x.c b/trunk/drivers/mfd/tps6586x.c index 353c34812120..345960ca2fd8 100644 --- a/trunk/drivers/mfd/tps6586x.c +++ b/trunk/drivers/mfd/tps6586x.c @@ -25,6 +25,7 @@ #include #include #include +#include #include #include @@ -346,6 +347,7 @@ static int __devinit tps6586x_add_subdevs(struct tps6586x *tps6586x, #ifdef CONFIG_OF static struct of_regulator_match tps6586x_matches[] = { + { .name = "sys", .driver_data = (void *)TPS6586X_ID_SYS }, { .name = "sm0", .driver_data = (void *)TPS6586X_ID_SM_0 }, { .name = "sm1", .driver_data = (void *)TPS6586X_ID_SM_1 }, { .name = "sm2", .driver_data = (void *)TPS6586X_ID_SM_2 }, @@ -369,6 +371,7 @@ static struct tps6586x_platform_data *tps6586x_parse_dt(struct i2c_client *clien struct tps6586x_platform_data *pdata; struct tps6586x_subdev_info *devs; struct device_node *regs; + const char *sys_rail_name = NULL; unsigned int count; unsigned int i, j; int err; @@ -391,12 +394,22 @@ static struct tps6586x_platform_data *tps6586x_parse_dt(struct i2c_client *clien return NULL; for (i = 0, j = 0; i < num && j < count; i++) { + struct regulator_init_data *reg_idata; + if (!tps6586x_matches[i].init_data) continue; + reg_idata = tps6586x_matches[i].init_data; devs[j].name = "tps6586x-regulator"; devs[j].platform_data = tps6586x_matches[i].init_data; devs[j].id = (int)tps6586x_matches[i].driver_data; + if (devs[j].id == TPS6586X_ID_SYS) + sys_rail_name = reg_idata->constraints.name; + + if ((devs[j].id == TPS6586X_ID_LDO_5) || + (devs[j].id == TPS6586X_ID_LDO_RTC)) + reg_idata->supply_regulator = sys_rail_name; + devs[j].of_node = tps6586x_matches[i].of_node; j++; } @@ -493,7 +506,8 @@ static int __devinit tps6586x_i2c_probe(struct i2c_client *client, } ret = mfd_add_devices(tps6586x->dev, -1, - tps6586x_cell, ARRAY_SIZE(tps6586x_cell), NULL, 0); + tps6586x_cell, ARRAY_SIZE(tps6586x_cell), + NULL, 0, NULL); if (ret < 0) { dev_err(&client->dev, "mfd_add_devices failed: %d\n", ret); goto err_mfd_add; diff --git a/trunk/drivers/mfd/tps65910.c b/trunk/drivers/mfd/tps65910.c index 1c563792c777..d3ce4d569deb 100644 --- a/trunk/drivers/mfd/tps65910.c +++ b/trunk/drivers/mfd/tps65910.c @@ -254,7 +254,7 @@ static __devinit int tps65910_i2c_probe(struct i2c_client *i2c, ret = mfd_add_devices(tps65910->dev, -1, tps65910s, ARRAY_SIZE(tps65910s), - NULL, 0); + NULL, 0, NULL); if (ret < 0) { dev_err(&i2c->dev, "mfd_add_devices failed: %d\n", ret); return ret; diff --git a/trunk/drivers/mfd/tps65911-comparator.c b/trunk/drivers/mfd/tps65911-comparator.c index 5a62e6bf89ae..0b6e361432c4 100644 --- a/trunk/drivers/mfd/tps65911-comparator.c +++ b/trunk/drivers/mfd/tps65911-comparator.c @@ -136,7 +136,7 @@ static __devinit int tps65911_comparator_probe(struct platform_device *pdev) ret = comp_threshold_set(tps65910, COMP2, pdata->vmbch2_threshold); if (ret < 0) { - dev_err(&pdev->dev, "cannot set COMP2 theshold\n"); + dev_err(&pdev->dev, "cannot set COMP2 threshold\n"); return ret; } diff --git a/trunk/drivers/mfd/tps65912-core.c b/trunk/drivers/mfd/tps65912-core.c index 74fd8cb5f372..4658b5bdcd84 100644 --- a/trunk/drivers/mfd/tps65912-core.c +++ b/trunk/drivers/mfd/tps65912-core.c @@ -146,7 +146,7 @@ int tps65912_device_init(struct tps65912 *tps65912) ret = mfd_add_devices(tps65912->dev, -1, tps65912s, ARRAY_SIZE(tps65912s), - NULL, 0); + NULL, 0, NULL); if (ret < 0) goto err; diff --git a/trunk/drivers/mfd/twl4030-audio.c b/trunk/drivers/mfd/twl4030-audio.c index 838ce4eb444e..77c9acb14583 100644 --- a/trunk/drivers/mfd/twl4030-audio.c +++ b/trunk/drivers/mfd/twl4030-audio.c @@ -223,7 +223,7 @@ static int __devinit twl4030_audio_probe(struct platform_device *pdev) if (childs) ret = mfd_add_devices(&pdev->dev, pdev->id, audio->cells, - childs, NULL, 0); + childs, NULL, 0, NULL); else { dev_err(&pdev->dev, "No platform data found for childs\n"); ret = -ENODEV; diff --git a/trunk/drivers/mfd/twl6040-core.c b/trunk/drivers/mfd/twl6040-core.c index b0fad0ffca56..3dca5c195a20 100644 --- a/trunk/drivers/mfd/twl6040-core.c +++ b/trunk/drivers/mfd/twl6040-core.c @@ -632,7 +632,7 @@ static int __devinit twl6040_probe(struct i2c_client *client, } ret = mfd_add_devices(&client->dev, -1, twl6040->cells, children, - NULL, 0); + NULL, 0, NULL); if (ret) goto mfd_err; diff --git a/trunk/drivers/mfd/vx855.c b/trunk/drivers/mfd/vx855.c index 872aff21e4be..b9a636d44c7f 100644 --- a/trunk/drivers/mfd/vx855.c +++ b/trunk/drivers/mfd/vx855.c @@ -102,7 +102,7 @@ static __devinit int vx855_probe(struct pci_dev *pdev, vx855_gpio_resources[1].end = vx855_gpio_resources[1].start + 3; ret = mfd_add_devices(&pdev->dev, -1, vx855_cells, ARRAY_SIZE(vx855_cells), - NULL, 0); + NULL, 0, NULL); /* we always return -ENODEV here in order to enable other * drivers like old, not-yet-platform_device ported i2c-viapro */ diff --git a/trunk/drivers/mfd/wl1273-core.c b/trunk/drivers/mfd/wl1273-core.c index f39b756df561..86e0e4309fc2 100644 --- a/trunk/drivers/mfd/wl1273-core.c +++ b/trunk/drivers/mfd/wl1273-core.c @@ -241,7 +241,7 @@ static int __devinit wl1273_core_probe(struct i2c_client *client, __func__, children); r = mfd_add_devices(&client->dev, -1, core->cells, - children, NULL, 0); + children, NULL, 0, NULL); if (r) goto err; diff --git a/trunk/drivers/mfd/wm831x-core.c b/trunk/drivers/mfd/wm831x-core.c index 946698fd2dc6..301731035940 100644 --- a/trunk/drivers/mfd/wm831x-core.c +++ b/trunk/drivers/mfd/wm831x-core.c @@ -1813,27 +1813,27 @@ int wm831x_device_init(struct wm831x *wm831x, unsigned long id, int irq) case WM8310: ret = mfd_add_devices(wm831x->dev, wm831x_num, wm8310_devs, ARRAY_SIZE(wm8310_devs), - NULL, 0); + NULL, 0, NULL); break; case WM8311: ret = mfd_add_devices(wm831x->dev, wm831x_num, wm8311_devs, ARRAY_SIZE(wm8311_devs), - NULL, 0); + NULL, 0, NULL); if (!pdata || !pdata->disable_touch) mfd_add_devices(wm831x->dev, wm831x_num, touch_devs, ARRAY_SIZE(touch_devs), - NULL, 0); + NULL, 0, NULL); break; case WM8312: ret = mfd_add_devices(wm831x->dev, wm831x_num, wm8312_devs, ARRAY_SIZE(wm8312_devs), - NULL, 0); + NULL, 0, NULL); if (!pdata || !pdata->disable_touch) mfd_add_devices(wm831x->dev, wm831x_num, touch_devs, ARRAY_SIZE(touch_devs), - NULL, 0); + NULL, 0, NULL); break; case WM8320: @@ -1842,7 +1842,7 @@ int wm831x_device_init(struct wm831x *wm831x, unsigned long id, int irq) case WM8326: ret = mfd_add_devices(wm831x->dev, wm831x_num, wm8320_devs, ARRAY_SIZE(wm8320_devs), - NULL, 0); + NULL, 0, NULL); break; default: @@ -1867,7 +1867,7 @@ int wm831x_device_init(struct wm831x *wm831x, unsigned long id, int irq) if (ret & WM831X_XTAL_ENA) { ret = mfd_add_devices(wm831x->dev, wm831x_num, rtc_devs, ARRAY_SIZE(rtc_devs), - NULL, 0); + NULL, 0, NULL); if (ret != 0) { dev_err(wm831x->dev, "Failed to add RTC: %d\n", ret); goto err_irq; @@ -1880,7 +1880,7 @@ int wm831x_device_init(struct wm831x *wm831x, unsigned long id, int irq) /* Treat errors as non-critical */ ret = mfd_add_devices(wm831x->dev, wm831x_num, backlight_devs, ARRAY_SIZE(backlight_devs), NULL, - 0); + 0, NULL); if (ret < 0) dev_err(wm831x->dev, "Failed to add backlight: %d\n", ret); diff --git a/trunk/drivers/mfd/wm8400-core.c b/trunk/drivers/mfd/wm8400-core.c index 4b7d378551d5..639ca359242f 100644 --- a/trunk/drivers/mfd/wm8400-core.c +++ b/trunk/drivers/mfd/wm8400-core.c @@ -70,7 +70,7 @@ static int wm8400_register_codec(struct wm8400 *wm8400) .pdata_size = sizeof(*wm8400), }; - return mfd_add_devices(wm8400->dev, -1, &cell, 1, NULL, 0); + return mfd_add_devices(wm8400->dev, -1, &cell, 1, NULL, 0, NULL); } /* diff --git a/trunk/drivers/mfd/wm8994-core.c b/trunk/drivers/mfd/wm8994-core.c index eec74aa55fdf..2febf88cfce8 100644 --- a/trunk/drivers/mfd/wm8994-core.c +++ b/trunk/drivers/mfd/wm8994-core.c @@ -414,7 +414,7 @@ static __devinit int wm8994_device_init(struct wm8994 *wm8994, int irq) ret = mfd_add_devices(wm8994->dev, -1, wm8994_regulator_devs, ARRAY_SIZE(wm8994_regulator_devs), - NULL, 0); + NULL, 0, NULL); if (ret != 0) { dev_err(wm8994->dev, "Failed to add children: %d\n", ret); goto err; @@ -648,7 +648,7 @@ static __devinit int wm8994_device_init(struct wm8994 *wm8994, int irq) ret = mfd_add_devices(wm8994->dev, -1, wm8994_devs, ARRAY_SIZE(wm8994_devs), - NULL, 0); + NULL, 0, NULL); if (ret != 0) { dev_err(wm8994->dev, "Failed to add children: %d\n", ret); goto err_irq; diff --git a/trunk/drivers/mfd/wm8994-irq.c b/trunk/drivers/mfd/wm8994-irq.c index 0aac4aff17a5..a050e56a9bbd 100644 --- a/trunk/drivers/mfd/wm8994-irq.c +++ b/trunk/drivers/mfd/wm8994-irq.c @@ -135,6 +135,7 @@ static struct regmap_irq_chip wm8994_irq_chip = { .status_base = WM8994_INTERRUPT_STATUS_1, .mask_base = WM8994_INTERRUPT_STATUS_1_MASK, .ack_base = WM8994_INTERRUPT_STATUS_1, + .runtime_pm = true, }; int wm8994_irq_init(struct wm8994 *wm8994) diff --git a/trunk/drivers/misc/mei/interrupt.c b/trunk/drivers/misc/mei/interrupt.c index c6ffbbe5a6c0..d78c05e693f7 100644 --- a/trunk/drivers/misc/mei/interrupt.c +++ b/trunk/drivers/misc/mei/interrupt.c @@ -1253,7 +1253,7 @@ static int mei_irq_thread_write_handler(struct mei_io_list *cmpl_list, if (dev->wd_timeout) *slots -= mei_data2slots(MEI_START_WD_DATA_SIZE); else - *slots -= mei_data2slots(MEI_START_WD_DATA_SIZE); + *slots -= mei_data2slots(MEI_WD_PARAMS_SIZE); } } if (dev->stop) diff --git a/trunk/drivers/misc/mei/main.c b/trunk/drivers/misc/mei/main.c index 092330208869..7422c7652845 100644 --- a/trunk/drivers/misc/mei/main.c +++ b/trunk/drivers/misc/mei/main.c @@ -924,6 +924,27 @@ static struct miscdevice mei_misc_device = { .minor = MISC_DYNAMIC_MINOR, }; +/** + * mei_quirk_probe - probe for devices that doesn't valid ME interface + * @pdev: PCI device structure + * @ent: entry into pci_device_table + * + * returns true if ME Interface is valid, false otherwise + */ +static bool __devinit mei_quirk_probe(struct pci_dev *pdev, + const struct pci_device_id *ent) +{ + u32 reg; + if (ent->device == MEI_DEV_ID_PBG_1) { + pci_read_config_dword(pdev, 0x48, ®); + /* make sure that bit 9 is up and bit 10 is down */ + if ((reg & 0x600) == 0x200) { + dev_info(&pdev->dev, "Device doesn't have valid ME Interface\n"); + return false; + } + } + return true; +} /** * mei_probe - Device Initialization Routine * @@ -939,6 +960,12 @@ static int __devinit mei_probe(struct pci_dev *pdev, int err; mutex_lock(&mei_mutex); + + if (!mei_quirk_probe(pdev, ent)) { + err = -ENODEV; + goto end; + } + if (mei_device) { err = -EEXIST; goto end; diff --git a/trunk/drivers/misc/sgi-xp/xpc_uv.c b/trunk/drivers/misc/sgi-xp/xpc_uv.c index 87b251ab6ec5..b9e2000969f0 100644 --- a/trunk/drivers/misc/sgi-xp/xpc_uv.c +++ b/trunk/drivers/misc/sgi-xp/xpc_uv.c @@ -18,6 +18,8 @@ #include #include #include +#include +#include #include #include #include @@ -59,6 +61,8 @@ static struct xpc_heartbeat_uv *xpc_heartbeat_uv; XPC_NOTIFY_MSG_SIZE_UV) #define XPC_NOTIFY_IRQ_NAME "xpc_notify" +static int xpc_mq_node = -1; + static struct xpc_gru_mq_uv *xpc_activate_mq_uv; static struct xpc_gru_mq_uv *xpc_notify_mq_uv; @@ -109,11 +113,8 @@ xpc_get_gru_mq_irq_uv(struct xpc_gru_mq_uv *mq, int cpu, char *irq_name) #if defined CONFIG_X86_64 mq->irq = uv_setup_irq(irq_name, cpu, mq->mmr_blade, mq->mmr_offset, UV_AFFINITY_CPU); - if (mq->irq < 0) { - dev_err(xpc_part, "uv_setup_irq() returned error=%d\n", - -mq->irq); + if (mq->irq < 0) return mq->irq; - } mq->mmr_value = uv_read_global_mmr64(mmr_pnode, mq->mmr_offset); @@ -238,8 +239,9 @@ xpc_create_gru_mq_uv(unsigned int mq_size, int cpu, char *irq_name, mq->mmr_blade = uv_cpu_to_blade_id(cpu); nid = cpu_to_node(cpu); - page = alloc_pages_exact_node(nid, GFP_KERNEL | __GFP_ZERO | GFP_THISNODE, - pg_order); + page = alloc_pages_exact_node(nid, + GFP_KERNEL | __GFP_ZERO | GFP_THISNODE, + pg_order); if (page == NULL) { dev_err(xpc_part, "xpc_create_gru_mq_uv() failed to alloc %d " "bytes of memory on nid=%d for GRU mq\n", mq_size, nid); @@ -1731,9 +1733,50 @@ static struct xpc_arch_operations xpc_arch_ops_uv = { .notify_senders_of_disconnect = xpc_notify_senders_of_disconnect_uv, }; +static int +xpc_init_mq_node(int nid) +{ + int cpu; + + get_online_cpus(); + + for_each_cpu(cpu, cpumask_of_node(nid)) { + xpc_activate_mq_uv = + xpc_create_gru_mq_uv(XPC_ACTIVATE_MQ_SIZE_UV, nid, + XPC_ACTIVATE_IRQ_NAME, + xpc_handle_activate_IRQ_uv); + if (!IS_ERR(xpc_activate_mq_uv)) + break; + } + if (IS_ERR(xpc_activate_mq_uv)) { + put_online_cpus(); + return PTR_ERR(xpc_activate_mq_uv); + } + + for_each_cpu(cpu, cpumask_of_node(nid)) { + xpc_notify_mq_uv = + xpc_create_gru_mq_uv(XPC_NOTIFY_MQ_SIZE_UV, nid, + XPC_NOTIFY_IRQ_NAME, + xpc_handle_notify_IRQ_uv); + if (!IS_ERR(xpc_notify_mq_uv)) + break; + } + if (IS_ERR(xpc_notify_mq_uv)) { + xpc_destroy_gru_mq_uv(xpc_activate_mq_uv); + put_online_cpus(); + return PTR_ERR(xpc_notify_mq_uv); + } + + put_online_cpus(); + return 0; +} + int xpc_init_uv(void) { + int nid; + int ret = 0; + xpc_arch_ops = xpc_arch_ops_uv; if (sizeof(struct xpc_notify_mq_msghdr_uv) > XPC_MSG_HDR_MAX_SIZE) { @@ -1742,21 +1785,21 @@ xpc_init_uv(void) return -E2BIG; } - xpc_activate_mq_uv = xpc_create_gru_mq_uv(XPC_ACTIVATE_MQ_SIZE_UV, 0, - XPC_ACTIVATE_IRQ_NAME, - xpc_handle_activate_IRQ_uv); - if (IS_ERR(xpc_activate_mq_uv)) - return PTR_ERR(xpc_activate_mq_uv); + if (xpc_mq_node < 0) + for_each_online_node(nid) { + ret = xpc_init_mq_node(nid); - xpc_notify_mq_uv = xpc_create_gru_mq_uv(XPC_NOTIFY_MQ_SIZE_UV, 0, - XPC_NOTIFY_IRQ_NAME, - xpc_handle_notify_IRQ_uv); - if (IS_ERR(xpc_notify_mq_uv)) { - xpc_destroy_gru_mq_uv(xpc_activate_mq_uv); - return PTR_ERR(xpc_notify_mq_uv); - } + if (!ret) + break; + } + else + ret = xpc_init_mq_node(xpc_mq_node); - return 0; + if (ret < 0) + dev_err(xpc_part, "xpc_init_mq_node() returned error=%d\n", + -ret); + + return ret; } void @@ -1765,3 +1808,6 @@ xpc_exit_uv(void) xpc_destroy_gru_mq_uv(xpc_notify_mq_uv); xpc_destroy_gru_mq_uv(xpc_activate_mq_uv); } + +module_param(xpc_mq_node, int, 0); +MODULE_PARM_DESC(xpc_mq_node, "Node number on which to allocate message queues."); diff --git a/trunk/drivers/misc/ti-st/st_ll.c b/trunk/drivers/misc/ti-st/st_ll.c index 1ff460a8e9c7..93b4d67cc4a3 100644 --- a/trunk/drivers/misc/ti-st/st_ll.c +++ b/trunk/drivers/misc/ti-st/st_ll.c @@ -87,7 +87,7 @@ static void ll_device_want_to_wakeup(struct st_data_s *st_data) /* communicate to platform about chip wakeup */ kim_data = st_data->kim_data; pdata = kim_data->kim_pdev->dev.platform_data; - if (pdata->chip_asleep) + if (pdata->chip_awake) pdata->chip_awake(NULL); } diff --git a/trunk/drivers/mmc/card/block.c b/trunk/drivers/mmc/card/block.c index f1c84decb192..172a768036d8 100644 --- a/trunk/drivers/mmc/card/block.c +++ b/trunk/drivers/mmc/card/block.c @@ -1411,7 +1411,8 @@ static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req) /* complete ongoing async transfer before issuing discard */ if (card->host->areq) mmc_blk_issue_rw_rq(mq, NULL); - if (req->cmd_flags & REQ_SECURE) + if (req->cmd_flags & REQ_SECURE && + !(card->quirks & MMC_QUIRK_SEC_ERASE_TRIM_BROKEN)) ret = mmc_blk_issue_secdiscard_rq(mq, req); else ret = mmc_blk_issue_discard_rq(mq, req); @@ -1716,6 +1717,7 @@ static int mmc_add_disk(struct mmc_blk_data *md) #define CID_MANFID_SANDISK 0x2 #define CID_MANFID_TOSHIBA 0x11 #define CID_MANFID_MICRON 0x13 +#define CID_MANFID_SAMSUNG 0x15 static const struct mmc_fixup blk_fixups[] = { @@ -1752,6 +1754,28 @@ static const struct mmc_fixup blk_fixups[] = MMC_FIXUP(CID_NAME_ANY, CID_MANFID_MICRON, 0x200, add_quirk_mmc, MMC_QUIRK_LONG_READ_TIME), + /* + * On these Samsung MoviNAND parts, performing secure erase or + * secure trim can result in unrecoverable corruption due to a + * firmware bug. + */ + MMC_FIXUP("M8G2FA", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc, + MMC_QUIRK_SEC_ERASE_TRIM_BROKEN), + MMC_FIXUP("MAG4FA", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc, + MMC_QUIRK_SEC_ERASE_TRIM_BROKEN), + MMC_FIXUP("MBG8FA", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc, + MMC_QUIRK_SEC_ERASE_TRIM_BROKEN), + MMC_FIXUP("MCGAFA", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc, + MMC_QUIRK_SEC_ERASE_TRIM_BROKEN), + MMC_FIXUP("VAL00M", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc, + MMC_QUIRK_SEC_ERASE_TRIM_BROKEN), + MMC_FIXUP("VYL00M", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc, + MMC_QUIRK_SEC_ERASE_TRIM_BROKEN), + MMC_FIXUP("KYL00M", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc, + MMC_QUIRK_SEC_ERASE_TRIM_BROKEN), + MMC_FIXUP("VZL00M", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc, + MMC_QUIRK_SEC_ERASE_TRIM_BROKEN), + END_FIXUP }; diff --git a/trunk/drivers/mmc/core/sdio.c b/trunk/drivers/mmc/core/sdio.c index d4619e2ec030..2273ce6b6c1a 100644 --- a/trunk/drivers/mmc/core/sdio.c +++ b/trunk/drivers/mmc/core/sdio.c @@ -641,7 +641,7 @@ static int mmc_sdio_init_card(struct mmc_host *host, u32 ocr, /* * If the host and card support UHS-I mode request the card * to switch to 1.8V signaling level. No 1.8v signalling if - * UHS mode is not enabled to maintain compatibilty and some + * UHS mode is not enabled to maintain compatibility and some * systems that claim 1.8v signalling in fact do not support * it. */ diff --git a/trunk/drivers/mmc/host/at91_mci.c b/trunk/drivers/mmc/host/at91_mci.c index efdb81d21c44..74bed0fc23e7 100644 --- a/trunk/drivers/mmc/host/at91_mci.c +++ b/trunk/drivers/mmc/host/at91_mci.c @@ -356,7 +356,7 @@ static void at91_mci_handle_transmitted(struct at91mci_host *host) } /* - * Update bytes tranfered count during a write operation + * Update bytes transfered count during a write operation */ static void at91_mci_update_bytes_xfered(struct at91mci_host *host) { diff --git a/trunk/drivers/mmc/host/atmel-mci.c b/trunk/drivers/mmc/host/atmel-mci.c index 322412cec4ee..852d5fbda630 100644 --- a/trunk/drivers/mmc/host/atmel-mci.c +++ b/trunk/drivers/mmc/host/atmel-mci.c @@ -81,6 +81,7 @@ struct atmel_mci_caps { bool has_bad_data_ordering; bool need_reset_after_xfer; bool need_blksz_mul_4; + bool need_notbusy_for_read_ops; }; struct atmel_mci_dma { @@ -1021,7 +1022,7 @@ static void atmci_stop_transfer(struct atmel_mci *host) } /* - * Stop data transfer because error(s) occured. + * Stop data transfer because error(s) occurred. */ static void atmci_stop_transfer_pdc(struct atmel_mci *host) { @@ -1625,7 +1626,8 @@ static void atmci_tasklet_func(unsigned long priv) __func__); atmci_set_completed(host, EVENT_XFER_COMPLETE); - if (host->data->flags & MMC_DATA_WRITE) { + if (host->caps.need_notbusy_for_read_ops || + (host->data->flags & MMC_DATA_WRITE)) { atmci_writel(host, ATMCI_IER, ATMCI_NOTBUSY); state = STATE_WAITING_NOTBUSY; } else if (host->mrq->stop) { @@ -2218,6 +2220,7 @@ static void __init atmci_get_cap(struct atmel_mci *host) host->caps.has_bad_data_ordering = 1; host->caps.need_reset_after_xfer = 1; host->caps.need_blksz_mul_4 = 1; + host->caps.need_notbusy_for_read_ops = 0; /* keep only major version number */ switch (version & 0xf00) { @@ -2238,6 +2241,7 @@ static void __init atmci_get_cap(struct atmel_mci *host) case 0x200: host->caps.has_rwproof = 1; host->caps.need_blksz_mul_4 = 0; + host->caps.need_notbusy_for_read_ops = 1; case 0x100: host->caps.has_bad_data_ordering = 0; host->caps.need_reset_after_xfer = 0; diff --git a/trunk/drivers/mmc/host/bfin_sdh.c b/trunk/drivers/mmc/host/bfin_sdh.c index 03666174ca48..a17dd7363ceb 100644 --- a/trunk/drivers/mmc/host/bfin_sdh.c +++ b/trunk/drivers/mmc/host/bfin_sdh.c @@ -49,13 +49,6 @@ #define bfin_write_SDH_CFG bfin_write_RSI_CFG #endif -struct dma_desc_array { - unsigned long start_addr; - unsigned short cfg; - unsigned short x_count; - short x_modify; -} __packed; - struct sdh_host { struct mmc_host *mmc; spinlock_t lock; diff --git a/trunk/drivers/mmc/host/dw_mmc.c b/trunk/drivers/mmc/host/dw_mmc.c index 72dc3cde646d..af40d227bece 100644 --- a/trunk/drivers/mmc/host/dw_mmc.c +++ b/trunk/drivers/mmc/host/dw_mmc.c @@ -627,6 +627,7 @@ static void dw_mci_setup_bus(struct dw_mci_slot *slot) { struct dw_mci *host = slot->host; u32 div; + u32 clk_en_a; if (slot->clock != host->current_speed) { div = host->bus_hz / slot->clock; @@ -659,9 +660,11 @@ static void dw_mci_setup_bus(struct dw_mci_slot *slot) mci_send_cmd(slot, SDMMC_CMD_UPD_CLK | SDMMC_CMD_PRV_DAT_WAIT, 0); - /* enable clock */ - mci_writel(host, CLKENA, ((SDMMC_CLKEN_ENABLE | - SDMMC_CLKEN_LOW_PWR) << slot->id)); + /* enable clock; only low power if no SDIO */ + clk_en_a = SDMMC_CLKEN_ENABLE << slot->id; + if (!(mci_readl(host, INTMASK) & SDMMC_INT_SDIO(slot->id))) + clk_en_a |= SDMMC_CLKEN_LOW_PWR << slot->id; + mci_writel(host, CLKENA, clk_en_a); /* inform CIU */ mci_send_cmd(slot, @@ -862,6 +865,30 @@ static int dw_mci_get_cd(struct mmc_host *mmc) return present; } +/* + * Disable lower power mode. + * + * Low power mode will stop the card clock when idle. According to the + * description of the CLKENA register we should disable low power mode + * for SDIO cards if we need SDIO interrupts to work. + * + * This function is fast if low power mode is already disabled. + */ +static void dw_mci_disable_low_power(struct dw_mci_slot *slot) +{ + struct dw_mci *host = slot->host; + u32 clk_en_a; + const u32 clken_low_pwr = SDMMC_CLKEN_LOW_PWR << slot->id; + + clk_en_a = mci_readl(host, CLKENA); + + if (clk_en_a & clken_low_pwr) { + mci_writel(host, CLKENA, clk_en_a & ~clken_low_pwr); + mci_send_cmd(slot, SDMMC_CMD_UPD_CLK | + SDMMC_CMD_PRV_DAT_WAIT, 0); + } +} + static void dw_mci_enable_sdio_irq(struct mmc_host *mmc, int enb) { struct dw_mci_slot *slot = mmc_priv(mmc); @@ -871,6 +898,14 @@ static void dw_mci_enable_sdio_irq(struct mmc_host *mmc, int enb) /* Enable/disable Slot Specific SDIO interrupt */ int_mask = mci_readl(host, INTMASK); if (enb) { + /* + * Turn off low power mode if it was enabled. This is a bit of + * a heavy operation and we disable / enable IRQs a lot, so + * we'll leave low power mode disabled and it will get + * re-enabled again in dw_mci_setup_bus(). + */ + dw_mci_disable_low_power(slot); + mci_writel(host, INTMASK, (int_mask | SDMMC_INT_SDIO(slot->id))); } else { @@ -1429,22 +1464,10 @@ static void dw_mci_read_data_pio(struct dw_mci *host) nbytes += len; remain -= len; } while (remain); - sg_miter->consumed = offset; + sg_miter->consumed = offset; status = mci_readl(host, MINTSTS); mci_writel(host, RINTSTS, SDMMC_INT_RXDR); - if (status & DW_MCI_DATA_ERROR_FLAGS) { - host->data_status = status; - data->bytes_xfered += nbytes; - sg_miter_stop(sg_miter); - host->sg = NULL; - smp_wmb(); - - set_bit(EVENT_DATA_ERROR, &host->pending_events); - - tasklet_schedule(&host->tasklet); - return; - } } while (status & SDMMC_INT_RXDR); /*if the RXDR is ready read again*/ data->bytes_xfered += nbytes; @@ -1497,23 +1520,10 @@ static void dw_mci_write_data_pio(struct dw_mci *host) nbytes += len; remain -= len; } while (remain); - sg_miter->consumed = offset; + sg_miter->consumed = offset; status = mci_readl(host, MINTSTS); mci_writel(host, RINTSTS, SDMMC_INT_TXDR); - if (status & DW_MCI_DATA_ERROR_FLAGS) { - host->data_status = status; - data->bytes_xfered += nbytes; - sg_miter_stop(sg_miter); - host->sg = NULL; - - smp_wmb(); - - set_bit(EVENT_DATA_ERROR, &host->pending_events); - - tasklet_schedule(&host->tasklet); - return; - } } while (status & SDMMC_INT_TXDR); /* if TXDR write again */ data->bytes_xfered += nbytes; @@ -1547,12 +1557,11 @@ static void dw_mci_cmd_interrupt(struct dw_mci *host, u32 status) static irqreturn_t dw_mci_interrupt(int irq, void *dev_id) { struct dw_mci *host = dev_id; - u32 status, pending; + u32 pending; unsigned int pass_count = 0; int i; do { - status = mci_readl(host, RINTSTS); pending = mci_readl(host, MINTSTS); /* read-only mask reg */ /* @@ -1570,7 +1579,7 @@ static irqreturn_t dw_mci_interrupt(int irq, void *dev_id) if (pending & DW_MCI_CMD_ERROR_FLAGS) { mci_writel(host, RINTSTS, DW_MCI_CMD_ERROR_FLAGS); - host->cmd_status = status; + host->cmd_status = pending; smp_wmb(); set_bit(EVENT_CMD_COMPLETE, &host->pending_events); } @@ -1578,18 +1587,16 @@ static irqreturn_t dw_mci_interrupt(int irq, void *dev_id) if (pending & DW_MCI_DATA_ERROR_FLAGS) { /* if there is an error report DATA_ERROR */ mci_writel(host, RINTSTS, DW_MCI_DATA_ERROR_FLAGS); - host->data_status = status; + host->data_status = pending; smp_wmb(); set_bit(EVENT_DATA_ERROR, &host->pending_events); - if (!(pending & (SDMMC_INT_DTO | SDMMC_INT_DCRC | - SDMMC_INT_SBE | SDMMC_INT_EBE))) - tasklet_schedule(&host->tasklet); + tasklet_schedule(&host->tasklet); } if (pending & SDMMC_INT_DATA_OVER) { mci_writel(host, RINTSTS, SDMMC_INT_DATA_OVER); if (!host->data_status) - host->data_status = status; + host->data_status = pending; smp_wmb(); if (host->dir_status == DW_MCI_RECV_STATUS) { if (host->sg != NULL) @@ -1613,7 +1620,7 @@ static irqreturn_t dw_mci_interrupt(int irq, void *dev_id) if (pending & SDMMC_INT_CMD_DONE) { mci_writel(host, RINTSTS, SDMMC_INT_CMD_DONE); - dw_mci_cmd_interrupt(host, status); + dw_mci_cmd_interrupt(host, pending); } if (pending & SDMMC_INT_CD) { diff --git a/trunk/drivers/mmc/host/mxs-mmc.c b/trunk/drivers/mmc/host/mxs-mmc.c index a51f9309ffbb..ad3fcea1269e 100644 --- a/trunk/drivers/mmc/host/mxs-mmc.c +++ b/trunk/drivers/mmc/host/mxs-mmc.c @@ -285,11 +285,11 @@ static irqreturn_t mxs_mmc_irq_handler(int irq, void *dev_id) writel(stat & MXS_MMC_IRQ_BITS, host->base + HW_SSP_CTRL1(host) + STMP_OFFSET_REG_CLR); + spin_unlock(&host->lock); + if ((stat & BM_SSP_CTRL1_SDIO_IRQ) && (stat & BM_SSP_CTRL1_SDIO_IRQ_EN)) mmc_signal_sdio_irq(host->mmc); - spin_unlock(&host->lock); - if (stat & BM_SSP_CTRL1_RESP_TIMEOUT_IRQ) cmd->error = -ETIMEDOUT; else if (stat & BM_SSP_CTRL1_RESP_ERR_IRQ) @@ -644,11 +644,6 @@ static void mxs_mmc_enable_sdio_irq(struct mmc_host *mmc, int enable) host->base + HW_SSP_CTRL0 + STMP_OFFSET_REG_SET); writel(BM_SSP_CTRL1_SDIO_IRQ_EN, host->base + HW_SSP_CTRL1(host) + STMP_OFFSET_REG_SET); - - if (readl(host->base + HW_SSP_STATUS(host)) & - BM_SSP_STATUS_SDIO_IRQ) - mmc_signal_sdio_irq(host->mmc); - } else { writel(BM_SSP_CTRL0_SDIO_IRQ_CHECK, host->base + HW_SSP_CTRL0 + STMP_OFFSET_REG_CLR); @@ -657,6 +652,11 @@ static void mxs_mmc_enable_sdio_irq(struct mmc_host *mmc, int enable) } spin_unlock_irqrestore(&host->lock, flags); + + if (enable && readl(host->base + HW_SSP_STATUS(host)) & + BM_SSP_STATUS_SDIO_IRQ) + mmc_signal_sdio_irq(host->mmc); + } static const struct mmc_host_ops mxs_mmc_ops = { diff --git a/trunk/drivers/mmc/host/omap.c b/trunk/drivers/mmc/host/omap.c index 50e08f03aa65..a5999a74496a 100644 --- a/trunk/drivers/mmc/host/omap.c +++ b/trunk/drivers/mmc/host/omap.c @@ -668,7 +668,7 @@ mmc_omap_clk_timer(unsigned long data) static void mmc_omap_xfer_data(struct mmc_omap_host *host, int write) { - int n; + int n, nwords; if (host->buffer_bytes_left == 0) { host->sg_idx++; @@ -678,15 +678,23 @@ mmc_omap_xfer_data(struct mmc_omap_host *host, int write) n = 64; if (n > host->buffer_bytes_left) n = host->buffer_bytes_left; + + nwords = n / 2; + nwords += n & 1; /* handle odd number of bytes to transfer */ + host->buffer_bytes_left -= n; host->total_bytes_left -= n; host->data->bytes_xfered += n; if (write) { - __raw_writesw(host->virt_base + OMAP_MMC_REG(host, DATA), host->buffer, n); + __raw_writesw(host->virt_base + OMAP_MMC_REG(host, DATA), + host->buffer, nwords); } else { - __raw_readsw(host->virt_base + OMAP_MMC_REG(host, DATA), host->buffer, n); + __raw_readsw(host->virt_base + OMAP_MMC_REG(host, DATA), + host->buffer, nwords); } + + host->buffer += nwords; } static inline void mmc_omap_report_irq(u16 status) diff --git a/trunk/drivers/mmc/host/omap_hsmmc.c b/trunk/drivers/mmc/host/omap_hsmmc.c index 3a09f93cc3b6..686e256764c8 100644 --- a/trunk/drivers/mmc/host/omap_hsmmc.c +++ b/trunk/drivers/mmc/host/omap_hsmmc.c @@ -447,7 +447,7 @@ static void omap_hsmmc_stop_clock(struct omap_hsmmc_host *host) OMAP_HSMMC_WRITE(host->base, SYSCTL, OMAP_HSMMC_READ(host->base, SYSCTL) & ~CEN); if ((OMAP_HSMMC_READ(host->base, SYSCTL) & CEN) != 0x0) - dev_dbg(mmc_dev(host->mmc), "MMC Clock is not stoped\n"); + dev_dbg(mmc_dev(host->mmc), "MMC Clock is not stopped\n"); } static void omap_hsmmc_enable_irq(struct omap_hsmmc_host *host, diff --git a/trunk/drivers/mmc/host/sdhci-esdhc-imx.c b/trunk/drivers/mmc/host/sdhci-esdhc-imx.c index e23f8134591c..32f4a070551f 100644 --- a/trunk/drivers/mmc/host/sdhci-esdhc-imx.c +++ b/trunk/drivers/mmc/host/sdhci-esdhc-imx.c @@ -315,7 +315,7 @@ static void esdhc_writeb_le(struct sdhci_host *host, u8 val, int reg) new_val = val & (SDHCI_CTRL_LED | \ SDHCI_CTRL_4BITBUS | \ SDHCI_CTRL_D3CD); - /* ensure the endianess */ + /* ensure the endianness */ new_val |= ESDHC_HOST_CONTROL_LE; /* bits 8&9 are reserved on mx25 */ if (!is_imx25_esdhc(imx_data)) { diff --git a/trunk/drivers/mmc/host/sdhci-esdhc.h b/trunk/drivers/mmc/host/sdhci-esdhc.h index b97b2f5dafdb..d25f9ab9a54d 100644 --- a/trunk/drivers/mmc/host/sdhci-esdhc.h +++ b/trunk/drivers/mmc/host/sdhci-esdhc.h @@ -48,14 +48,14 @@ static inline void esdhc_set_clock(struct sdhci_host *host, unsigned int clock) int div = 1; u32 temp; + if (clock == 0) + goto out; + temp = sdhci_readl(host, ESDHC_SYSTEM_CONTROL); temp &= ~(ESDHC_CLOCK_IPGEN | ESDHC_CLOCK_HCKEN | ESDHC_CLOCK_PEREN | ESDHC_CLOCK_MASK); sdhci_writel(host, temp, ESDHC_SYSTEM_CONTROL); - if (clock == 0) - goto out; - while (host->max_clk / pre_div / 16 > clock && pre_div < 256) pre_div *= 2; diff --git a/trunk/drivers/mmc/host/vub300.c b/trunk/drivers/mmc/host/vub300.c index 3135a1a5d75d..58eab9ac1d01 100644 --- a/trunk/drivers/mmc/host/vub300.c +++ b/trunk/drivers/mmc/host/vub300.c @@ -806,7 +806,7 @@ static void command_res_completed(struct urb *urb) * we suspect a buggy USB host controller */ } else if (!vub300->data) { - /* this means that the command (typically CMD52) suceeded */ + /* this means that the command (typically CMD52) succeeded */ } else if (vub300->resp.common.header_type != 0x02) { /* * this is an error response from the VUB300 chip diff --git a/trunk/drivers/mtd/maps/uclinux.c b/trunk/drivers/mtd/maps/uclinux.c index cfff454f628b..c3bb304eca07 100644 --- a/trunk/drivers/mtd/maps/uclinux.c +++ b/trunk/drivers/mtd/maps/uclinux.c @@ -19,14 +19,13 @@ #include #include #include +#include /****************************************************************************/ -extern char _ebss; - struct map_info uclinux_ram_map = { .name = "RAM", - .phys = (unsigned long)&_ebss, + .phys = (unsigned long)__bss_stop, .size = 0, }; diff --git a/trunk/drivers/mtd/mtdchar.c b/trunk/drivers/mtd/mtdchar.c index f2f482bec573..a6e74514e662 100644 --- a/trunk/drivers/mtd/mtdchar.c +++ b/trunk/drivers/mtd/mtdchar.c @@ -1123,6 +1123,33 @@ static unsigned long mtdchar_get_unmapped_area(struct file *file, } #endif +static inline unsigned long get_vm_size(struct vm_area_struct *vma) +{ + return vma->vm_end - vma->vm_start; +} + +static inline resource_size_t get_vm_offset(struct vm_area_struct *vma) +{ + return (resource_size_t) vma->vm_pgoff << PAGE_SHIFT; +} + +/* + * Set a new vm offset. + * + * Verify that the incoming offset really works as a page offset, + * and that the offset and size fit in a resource_size_t. + */ +static inline int set_vm_offset(struct vm_area_struct *vma, resource_size_t off) +{ + pgoff_t pgoff = off >> PAGE_SHIFT; + if (off != (resource_size_t) pgoff << PAGE_SHIFT) + return -EINVAL; + if (off + get_vm_size(vma) - 1 < off) + return -EINVAL; + vma->vm_pgoff = pgoff; + return 0; +} + /* * set up a mapping for shared memory segments */ @@ -1132,20 +1159,29 @@ static int mtdchar_mmap(struct file *file, struct vm_area_struct *vma) struct mtd_file_info *mfi = file->private_data; struct mtd_info *mtd = mfi->mtd; struct map_info *map = mtd->priv; - unsigned long start; - unsigned long off; - u32 len; + resource_size_t start, off; + unsigned long len, vma_len; if (mtd->type == MTD_RAM || mtd->type == MTD_ROM) { - off = vma->vm_pgoff << PAGE_SHIFT; + off = get_vm_offset(vma); start = map->phys; len = PAGE_ALIGN((start & ~PAGE_MASK) + map->size); start &= PAGE_MASK; - if ((vma->vm_end - vma->vm_start + off) > len) + vma_len = get_vm_size(vma); + + /* Overflow in off+len? */ + if (vma_len + off < off) + return -EINVAL; + /* Does it fit in the mapping? */ + if (vma_len + off > len) return -EINVAL; off += start; - vma->vm_pgoff = off >> PAGE_SHIFT; + /* Did that overflow? */ + if (off < start) + return -EINVAL; + if (set_vm_offset(vma, off) < 0) + return -EINVAL; vma->vm_flags |= VM_IO | VM_RESERVED; #ifdef pgprot_noncached diff --git a/trunk/drivers/mtd/nand/Kconfig b/trunk/drivers/mtd/nand/Kconfig index 31bb7e5b504a..8ca417614c57 100644 --- a/trunk/drivers/mtd/nand/Kconfig +++ b/trunk/drivers/mtd/nand/Kconfig @@ -480,7 +480,7 @@ config MTD_NAND_NANDSIM config MTD_NAND_GPMI_NAND bool "GPMI NAND Flash Controller driver" - depends on MTD_NAND && (SOC_IMX23 || SOC_IMX28 || SOC_IMX6Q) + depends on MTD_NAND && MXS_DMA help Enables NAND Flash support for IMX23 or IMX28. The GPMI controller is very powerful, with the help of BCH diff --git a/trunk/drivers/mtd/nand/omap2.c b/trunk/drivers/mtd/nand/omap2.c index e9309b3659e7..ac4fd756eda3 100644 --- a/trunk/drivers/mtd/nand/omap2.c +++ b/trunk/drivers/mtd/nand/omap2.c @@ -1245,7 +1245,6 @@ static int __devinit omap_nand_probe(struct platform_device *pdev) goto out_release_mem_region; } else { struct dma_slave_config cfg; - int rc; memset(&cfg, 0, sizeof(cfg)); cfg.src_addr = info->phys_base; @@ -1254,10 +1253,10 @@ static int __devinit omap_nand_probe(struct platform_device *pdev) cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; cfg.src_maxburst = 16; cfg.dst_maxburst = 16; - rc = dmaengine_slave_config(info->dma, &cfg); - if (rc) { + err = dmaengine_slave_config(info->dma, &cfg); + if (err) { dev_err(&pdev->dev, "DMA engine slave config failed: %d\n", - rc); + err); goto out_release_mem_region; } info->nand.read_buf = omap_read_buf_dma_pref; diff --git a/trunk/drivers/mtd/ubi/vtbl.c b/trunk/drivers/mtd/ubi/vtbl.c index 437bc193e170..568307cc7caf 100644 --- a/trunk/drivers/mtd/ubi/vtbl.c +++ b/trunk/drivers/mtd/ubi/vtbl.c @@ -340,7 +340,7 @@ static int create_vtbl(struct ubi_device *ubi, struct ubi_attach_info *ai, * of this LEB as it will be deleted and freed in 'ubi_add_to_av()'. */ err = ubi_add_to_av(ubi, ai, new_aeb->pnum, new_aeb->ec, vid_hdr, 0); - kfree(new_aeb); + kmem_cache_free(ai->aeb_slab_cache, new_aeb); ubi_free_vid_hdr(ubi, vid_hdr); return err; @@ -353,7 +353,7 @@ static int create_vtbl(struct ubi_device *ubi, struct ubi_attach_info *ai, list_add(&new_aeb->u.list, &ai->erase); goto retry; } - kfree(new_aeb); + kmem_cache_free(ai->aeb_slab_cache, new_aeb); out_free: ubi_free_vid_hdr(ubi, vid_hdr); return err; diff --git a/trunk/drivers/net/appletalk/cops.c b/trunk/drivers/net/appletalk/cops.c index 545c09ed9079..cff6f023c03a 100644 --- a/trunk/drivers/net/appletalk/cops.c +++ b/trunk/drivers/net/appletalk/cops.c @@ -996,9 +996,7 @@ static int __init cops_module_init(void) printk(KERN_WARNING "%s: You shouldn't autoprobe with insmod\n", cardname); cops_dev = cops_probe(-1); - if (IS_ERR(cops_dev)) - return PTR_ERR(cops_dev); - return 0; + return PTR_RET(cops_dev); } static void __exit cops_module_exit(void) diff --git a/trunk/drivers/net/appletalk/ltpc.c b/trunk/drivers/net/appletalk/ltpc.c index 0910dce3996d..b5782cdf0bca 100644 --- a/trunk/drivers/net/appletalk/ltpc.c +++ b/trunk/drivers/net/appletalk/ltpc.c @@ -1243,9 +1243,7 @@ static int __init ltpc_module_init(void) "ltpc: Autoprobing is not recommended for modules\n"); dev_ltpc = ltpc_probe(); - if (IS_ERR(dev_ltpc)) - return PTR_ERR(dev_ltpc); - return 0; + return PTR_RET(dev_ltpc); } module_init(ltpc_module_init); #endif diff --git a/trunk/drivers/net/bonding/bond_main.c b/trunk/drivers/net/bonding/bond_main.c index 6fae5f3ec7f6..d688a8af432c 100644 --- a/trunk/drivers/net/bonding/bond_main.c +++ b/trunk/drivers/net/bonding/bond_main.c @@ -398,7 +398,7 @@ int bond_dev_queue_xmit(struct bonding *bond, struct sk_buff *skb, sizeof(qdisc_skb_cb(skb)->slave_dev_queue_mapping)); skb->queue_mapping = qdisc_skb_cb(skb)->slave_dev_queue_mapping; - if (unlikely(netpoll_tx_running(slave_dev))) + if (unlikely(netpoll_tx_running(bond->dev))) bond_netpoll_send_skb(bond_get_slave_by_dev(bond, slave_dev), skb); else dev_queue_xmit(skb); @@ -1235,12 +1235,12 @@ static inline int slave_enable_netpoll(struct slave *slave) struct netpoll *np; int err = 0; - np = kzalloc(sizeof(*np), GFP_KERNEL); + np = kzalloc(sizeof(*np), GFP_ATOMIC); err = -ENOMEM; if (!np) goto out; - err = __netpoll_setup(np, slave->dev); + err = __netpoll_setup(np, slave->dev, GFP_ATOMIC); if (err) { kfree(np); goto out; @@ -1257,9 +1257,7 @@ static inline void slave_disable_netpoll(struct slave *slave) return; slave->np = NULL; - synchronize_rcu_bh(); - __netpoll_cleanup(np); - kfree(np); + __netpoll_free_rcu(np); } static inline bool slave_dev_support_netpoll(struct net_device *slave_dev) { @@ -1292,7 +1290,7 @@ static void bond_netpoll_cleanup(struct net_device *bond_dev) read_unlock(&bond->lock); } -static int bond_netpoll_setup(struct net_device *dev, struct netpoll_info *ni) +static int bond_netpoll_setup(struct net_device *dev, struct netpoll_info *ni, gfp_t gfp) { struct bonding *bond = netdev_priv(dev); struct slave *slave; diff --git a/trunk/drivers/net/can/janz-ican3.c b/trunk/drivers/net/can/janz-ican3.c index 98ee43819911..7edadee487ba 100644 --- a/trunk/drivers/net/can/janz-ican3.c +++ b/trunk/drivers/net/can/janz-ican3.c @@ -1391,7 +1391,6 @@ static irqreturn_t ican3_irq(int irq, void *dev_id) */ static int ican3_reset_module(struct ican3_dev *mod) { - u8 val = 1 << mod->num; unsigned long start; u8 runold, runnew; @@ -1405,8 +1404,7 @@ static int ican3_reset_module(struct ican3_dev *mod) runold = ioread8(mod->dpm + TARGET_RUNNING); /* reset the module */ - iowrite8(val, &mod->ctrl->reset_assert); - iowrite8(val, &mod->ctrl->reset_deassert); + iowrite8(0x00, &mod->dpmctrl->hwreset); /* wait until the module has finished resetting and is running */ start = jiffies; diff --git a/trunk/drivers/net/can/mcp251x.c b/trunk/drivers/net/can/mcp251x.c index a580db29e503..26e7129332ab 100644 --- a/trunk/drivers/net/can/mcp251x.c +++ b/trunk/drivers/net/can/mcp251x.c @@ -83,6 +83,11 @@ #define INSTRUCTION_LOAD_TXB(n) (0x40 + 2 * (n)) #define INSTRUCTION_READ_RXB(n) (((n) == 0) ? 0x90 : 0x94) #define INSTRUCTION_RESET 0xC0 +#define RTS_TXB0 0x01 +#define RTS_TXB1 0x02 +#define RTS_TXB2 0x04 +#define INSTRUCTION_RTS(n) (0x80 | ((n) & 0x07)) + /* MPC251x registers */ #define CANSTAT 0x0e @@ -397,6 +402,7 @@ static void mcp251x_hw_tx_frame(struct spi_device *spi, u8 *buf, static void mcp251x_hw_tx(struct spi_device *spi, struct can_frame *frame, int tx_buf_idx) { + struct mcp251x_priv *priv = dev_get_drvdata(&spi->dev); u32 sid, eid, exide, rtr; u8 buf[SPI_TRANSFER_BUF_LEN]; @@ -418,7 +424,10 @@ static void mcp251x_hw_tx(struct spi_device *spi, struct can_frame *frame, buf[TXBDLC_OFF] = (rtr << DLC_RTR_SHIFT) | frame->can_dlc; memcpy(buf + TXBDAT_OFF, frame->data, frame->can_dlc); mcp251x_hw_tx_frame(spi, buf, frame->can_dlc, tx_buf_idx); - mcp251x_write_reg(spi, TXBCTRL(tx_buf_idx), TXBCTRL_TXREQ); + + /* use INSTRUCTION_RTS, to avoid "repeated frame problem" */ + priv->spi_tx_buf[0] = INSTRUCTION_RTS(1 << tx_buf_idx); + mcp251x_spi_trans(priv->spi, 1); } static void mcp251x_hw_rx_frame(struct spi_device *spi, u8 *buf, diff --git a/trunk/drivers/net/can/sja1000/sja1000_platform.c b/trunk/drivers/net/can/sja1000/sja1000_platform.c index 4f50145f6483..662c5f7eb0c5 100644 --- a/trunk/drivers/net/can/sja1000/sja1000_platform.c +++ b/trunk/drivers/net/can/sja1000/sja1000_platform.c @@ -109,7 +109,9 @@ static int sp_probe(struct platform_device *pdev) priv = netdev_priv(dev); dev->irq = res_irq->start; - priv->irq_flags = res_irq->flags & (IRQF_TRIGGER_MASK | IRQF_SHARED); + priv->irq_flags = res_irq->flags & IRQF_TRIGGER_MASK; + if (res_irq->flags & IORESOURCE_IRQ_SHAREABLE) + priv->irq_flags |= IRQF_SHARED; priv->reg_base = addr; /* The CAN clock frequency is half the oscillator clock frequency */ priv->can.clock.freq = pdata->osc_freq / 2; diff --git a/trunk/drivers/net/can/softing/softing_fw.c b/trunk/drivers/net/can/softing/softing_fw.c index 310596175676..b595d3422b9f 100644 --- a/trunk/drivers/net/can/softing/softing_fw.c +++ b/trunk/drivers/net/can/softing/softing_fw.c @@ -150,7 +150,7 @@ int softing_load_fw(const char *file, struct softing *card, const uint8_t *mem, *end, *dat; uint16_t type, len; uint32_t addr; - uint8_t *buf = NULL; + uint8_t *buf = NULL, *new_buf; int buflen = 0; int8_t type_end = 0; @@ -199,11 +199,12 @@ int softing_load_fw(const char *file, struct softing *card, if (len > buflen) { /* align buflen */ buflen = (len + (1024-1)) & ~(1024-1); - buf = krealloc(buf, buflen, GFP_KERNEL); - if (!buf) { + new_buf = krealloc(buf, buflen, GFP_KERNEL); + if (!new_buf) { ret = -ENOMEM; goto failed; } + buf = new_buf; } /* verify record data */ memcpy_fromio(buf, &dpram[addr + offset], len); diff --git a/trunk/drivers/net/can/ti_hecc.c b/trunk/drivers/net/can/ti_hecc.c index 527dbcf95335..9ded21e79db5 100644 --- a/trunk/drivers/net/can/ti_hecc.c +++ b/trunk/drivers/net/can/ti_hecc.c @@ -984,12 +984,12 @@ static int __devexit ti_hecc_remove(struct platform_device *pdev) struct net_device *ndev = platform_get_drvdata(pdev); struct ti_hecc_priv *priv = netdev_priv(ndev); + unregister_candev(ndev); clk_disable(priv->clk); clk_put(priv->clk); res = platform_get_resource(pdev, IORESOURCE_MEM, 0); iounmap(priv->base); release_mem_region(res->start, resource_size(res)); - unregister_candev(ndev); free_candev(ndev); platform_set_drvdata(pdev, NULL); diff --git a/trunk/drivers/net/cris/eth_v10.c b/trunk/drivers/net/cris/eth_v10.c index f0c8bd54ce29..021d69c5d9bc 100644 --- a/trunk/drivers/net/cris/eth_v10.c +++ b/trunk/drivers/net/cris/eth_v10.c @@ -1712,7 +1712,7 @@ e100_set_network_leds(int active) static void e100_netpoll(struct net_device* netdev) { - e100rxtx_interrupt(NETWORK_DMA_TX_IRQ_NBR, netdev, NULL); + e100rxtx_interrupt(NETWORK_DMA_TX_IRQ_NBR, netdev); } #endif diff --git a/trunk/drivers/net/ethernet/3com/typhoon.c b/trunk/drivers/net/ethernet/3com/typhoon.c index b15366635147..bb9670f29b59 100644 --- a/trunk/drivers/net/ethernet/3com/typhoon.c +++ b/trunk/drivers/net/ethernet/3com/typhoon.c @@ -364,7 +364,7 @@ typhoon_inc_rxfree_index(u32 *index, const int count) static inline void typhoon_inc_tx_index(u32 *index, const int count) { - /* if we start using the Hi Tx ring, this needs updateing */ + /* if we start using the Hi Tx ring, this needs updating */ typhoon_inc_index(index, count, TXLO_ENTRIES); } diff --git a/trunk/drivers/net/ethernet/broadcom/bnx2.c b/trunk/drivers/net/ethernet/broadcom/bnx2.c index 79cebd8525ce..e48312f2305d 100644 --- a/trunk/drivers/net/ethernet/broadcom/bnx2.c +++ b/trunk/drivers/net/ethernet/broadcom/bnx2.c @@ -8564,7 +8564,7 @@ bnx2_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) return 0; error: - iounmap(bp->regview); + pci_iounmap(pdev, bp->regview); pci_release_regions(pdev); pci_disable_device(pdev); pci_set_drvdata(pdev, NULL); diff --git a/trunk/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h b/trunk/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h index 77bcd4cb4ffb..eac25236856c 100644 --- a/trunk/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h +++ b/trunk/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h @@ -1278,7 +1278,7 @@ struct bnx2x { #define BNX2X_FW_RX_ALIGN_START (1UL << BNX2X_RX_ALIGN_SHIFT) #define BNX2X_FW_RX_ALIGN_END \ - max(1UL << BNX2X_RX_ALIGN_SHIFT, \ + max_t(u64, 1UL << BNX2X_RX_ALIGN_SHIFT, \ SKB_DATA_ALIGN(sizeof(struct skb_shared_info))) #define BNX2X_PXP_DRAM_ALIGN (BNX2X_RX_ALIGN_SHIFT - 5) @@ -1458,7 +1458,7 @@ struct bnx2x { int fw_stats_req_sz; /* - * FW statistics data shortcut (points at the begining of + * FW statistics data shortcut (points at the beginning of * fw_stats buffer + fw_stats_req_sz). */ struct bnx2x_fw_stats_data *fw_stats_data; @@ -1708,9 +1708,6 @@ struct bnx2x_func_init_params { continue; \ else -#define for_each_napi_rx_queue(bp, var) \ - for ((var) = 0; (var) < bp->num_napi_queues; (var)++) - /* Skip OOO FP */ #define for_each_tx_queue(bp, var) \ for ((var) = 0; (var) < BNX2X_NUM_QUEUES(bp); (var)++) \ diff --git a/trunk/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/trunk/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c index e879e19eb0d6..e8e97a7d1d06 100644 --- a/trunk/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c +++ b/trunk/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c @@ -662,14 +662,16 @@ void bnx2x_csum_validate(struct sk_buff *skb, union eth_rx_cqe *cqe, struct bnx2x_fastpath *fp, struct bnx2x_eth_q_stats *qstats) { - /* Do nothing if no IP/L4 csum validation was done */ - + /* Do nothing if no L4 csum validation was done. + * We do not check whether IP csum was validated. For IPv4 we assume + * that if the card got as far as validating the L4 csum, it also + * validated the IP csum. IPv6 has no IP csum. + */ if (cqe->fast_path_cqe.status_flags & - (ETH_FAST_PATH_RX_CQE_IP_XSUM_NO_VALIDATION_FLG | - ETH_FAST_PATH_RX_CQE_L4_XSUM_NO_VALIDATION_FLG)) + ETH_FAST_PATH_RX_CQE_L4_XSUM_NO_VALIDATION_FLG) return; - /* If both IP/L4 validation were done, check if an error was found. */ + /* If L4 validation was done, check if an error was found. */ if (cqe->fast_path_cqe.type_error_flags & (ETH_FAST_PATH_RX_CQE_IP_BAD_XSUM_FLG | @@ -2046,6 +2048,8 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode) */ bnx2x_setup_tc(bp->dev, bp->max_cos); + /* Add all NAPI objects */ + bnx2x_add_all_napi(bp); bnx2x_napi_enable(bp); /* set pf load just before approaching the MCP */ @@ -2408,6 +2412,8 @@ int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode) /* Disable HW interrupts, NAPI */ bnx2x_netif_stop(bp, 1); + /* Delete all NAPI objects */ + bnx2x_del_all_napi(bp); /* Release IRQs */ bnx2x_free_irq(bp); diff --git a/trunk/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h b/trunk/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h index dfa757e74296..dfd86a55f1dc 100644 --- a/trunk/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h +++ b/trunk/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h @@ -710,17 +710,15 @@ static inline u16 bnx2x_tx_avail(struct bnx2x *bp, prod = txdata->tx_bd_prod; cons = txdata->tx_bd_cons; - /* NUM_TX_RINGS = number of "next-page" entries - It will be used as a threshold */ - used = SUB_S16(prod, cons) + (s16)NUM_TX_RINGS; + used = SUB_S16(prod, cons); #ifdef BNX2X_STOP_ON_ERROR WARN_ON(used < 0); - WARN_ON(used > bp->tx_ring_size); - WARN_ON((bp->tx_ring_size - used) > MAX_TX_AVAIL); + WARN_ON(used > txdata->tx_ring_size); + WARN_ON((txdata->tx_ring_size - used) > MAX_TX_AVAIL); #endif - return (s16)(bp->tx_ring_size) - used; + return (s16)(txdata->tx_ring_size) - used; } static inline int bnx2x_tx_queue_has_work(struct bnx2x_fp_txdata *txdata) @@ -792,7 +790,7 @@ static inline void bnx2x_add_all_napi(struct bnx2x *bp) bp->num_napi_queues = bp->num_queues; /* Add NAPI objects */ - for_each_napi_rx_queue(bp, i) + for_each_rx_queue(bp, i) netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi), bnx2x_poll, BNX2X_NAPI_WEIGHT); } @@ -801,7 +799,7 @@ static inline void bnx2x_del_all_napi(struct bnx2x *bp) { int i; - for_each_napi_rx_queue(bp, i) + for_each_rx_queue(bp, i) netif_napi_del(&bnx2x_fp(bp, i, napi)); } @@ -1088,6 +1086,7 @@ static inline void bnx2x_init_txdata(struct bnx2x *bp, txdata->txq_index = txq_index; txdata->tx_cons_sb = tx_cons_sb; txdata->parent_fp = fp; + txdata->tx_ring_size = IS_FCOE_FP(fp) ? MAX_TX_AVAIL : bp->tx_ring_size; DP(NETIF_MSG_IFUP, "created tx data cid %d, txq %d\n", txdata->cid, txdata->txq_index); diff --git a/trunk/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dump.h b/trunk/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dump.h index 3e4cff9b1ebe..b926f58e983b 100644 --- a/trunk/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dump.h +++ b/trunk/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dump.h @@ -401,11 +401,11 @@ static const struct reg_addr reg_addrs[] = { { 0x70000, 8, RI_ALL_ONLINE }, { 0x70020, 8184, RI_ALL_OFFLINE }, { 0x78000, 8192, RI_E3E3B0_OFFLINE }, - { 0x85000, 3, RI_ALL_ONLINE }, - { 0x8501c, 7, RI_ALL_ONLINE }, - { 0x85048, 1, RI_ALL_ONLINE }, - { 0x85200, 32, RI_ALL_ONLINE }, - { 0xb0000, 16384, RI_E1H_ONLINE }, + { 0x85000, 3, RI_ALL_OFFLINE }, + { 0x8501c, 7, RI_ALL_OFFLINE }, + { 0x85048, 1, RI_ALL_OFFLINE }, + { 0x85200, 32, RI_ALL_OFFLINE }, + { 0xb0000, 16384, RI_E1H_OFFLINE }, { 0xc1000, 7, RI_ALL_ONLINE }, { 0xc103c, 2, RI_E2E3E3B0_ONLINE }, { 0xc1800, 2, RI_ALL_ONLINE }, @@ -581,17 +581,12 @@ static const struct reg_addr reg_addrs[] = { { 0x140188, 3, RI_E1E1HE2E3_ONLINE }, { 0x140194, 13, RI_ALL_ONLINE }, { 0x140200, 6, RI_E1E1HE2E3_ONLINE }, - { 0x140220, 4, RI_E2E3_ONLINE }, - { 0x140240, 4, RI_E2E3_ONLINE }, { 0x140260, 4, RI_E2E3_ONLINE }, { 0x140280, 4, RI_E2E3_ONLINE }, - { 0x1402a0, 4, RI_E2E3_ONLINE }, - { 0x1402c0, 4, RI_E2E3_ONLINE }, { 0x1402e0, 2, RI_E2E3_ONLINE }, { 0x1402e8, 2, RI_E2E3E3B0_ONLINE }, { 0x1402f0, 9, RI_E2E3_ONLINE }, { 0x140314, 44, RI_E3B0_ONLINE }, - { 0x1403d0, 70, RI_E3B0_ONLINE }, { 0x144000, 4, RI_E1E1H_ONLINE }, { 0x148000, 4, RI_E1E1H_ONLINE }, { 0x14c000, 4, RI_E1E1H_ONLINE }, @@ -704,7 +699,6 @@ static const struct reg_addr reg_addrs[] = { { 0x180398, 1, RI_E2E3E3B0_ONLINE }, { 0x1803a0, 5, RI_E2E3E3B0_ONLINE }, { 0x1803b4, 2, RI_E3E3B0_ONLINE }, - { 0x180400, 1, RI_ALL_ONLINE }, { 0x180404, 255, RI_E1E1H_OFFLINE }, { 0x181000, 4, RI_ALL_ONLINE }, { 0x181010, 1020, RI_ALL_OFFLINE }, @@ -800,9 +794,9 @@ static const struct reg_addr reg_addrs[] = { { 0x1b905c, 1, RI_E3E3B0_ONLINE }, { 0x1b9064, 1, RI_E3B0_ONLINE }, { 0x1b9080, 10, RI_E3B0_ONLINE }, - { 0x1b9400, 14, RI_E2E3E3B0_ONLINE }, - { 0x1b943c, 19, RI_E2E3E3B0_ONLINE }, - { 0x1b9490, 10, RI_E2E3E3B0_ONLINE }, + { 0x1b9400, 14, RI_E2E3E3B0_OFFLINE }, + { 0x1b943c, 19, RI_E2E3E3B0_OFFLINE }, + { 0x1b9490, 10, RI_E2E3E3B0_OFFLINE }, { 0x1c0000, 2, RI_ALL_ONLINE }, { 0x200000, 65, RI_ALL_ONLINE }, { 0x20014c, 2, RI_E1HE2E3E3B0_ONLINE }, @@ -814,7 +808,6 @@ static const struct reg_addr reg_addrs[] = { { 0x200398, 1, RI_E2E3E3B0_ONLINE }, { 0x2003a0, 1, RI_E2E3E3B0_ONLINE }, { 0x2003a8, 2, RI_E2E3E3B0_ONLINE }, - { 0x200400, 1, RI_ALL_ONLINE }, { 0x200404, 255, RI_E1E1H_OFFLINE }, { 0x202000, 4, RI_ALL_ONLINE }, { 0x202010, 2044, RI_ALL_OFFLINE }, @@ -921,7 +914,6 @@ static const struct reg_addr reg_addrs[] = { { 0x280398, 1, RI_E2E3E3B0_ONLINE }, { 0x2803a0, 1, RI_E2E3E3B0_ONLINE }, { 0x2803a8, 2, RI_E2E3E3B0_ONLINE }, - { 0x280400, 1, RI_ALL_ONLINE }, { 0x280404, 255, RI_E1E1H_OFFLINE }, { 0x282000, 4, RI_ALL_ONLINE }, { 0x282010, 2044, RI_ALL_OFFLINE }, @@ -1031,7 +1023,6 @@ static const struct reg_addr reg_addrs[] = { { 0x300398, 1, RI_E2E3E3B0_ONLINE }, { 0x3003a0, 1, RI_E2E3E3B0_ONLINE }, { 0x3003a8, 2, RI_E2E3E3B0_ONLINE }, - { 0x300400, 1, RI_ALL_ONLINE }, { 0x300404, 255, RI_E1E1H_OFFLINE }, { 0x302000, 4, RI_ALL_ONLINE }, { 0x302010, 2044, RI_ALL_OFFLINE }, diff --git a/trunk/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c b/trunk/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c index fc4e0e3885b0..ebf40cd7aa10 100644 --- a/trunk/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c +++ b/trunk/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c @@ -775,7 +775,7 @@ static void bnx2x_get_regs(struct net_device *dev, struct bnx2x *bp = netdev_priv(dev); struct dump_hdr dump_hdr = {0}; - regs->version = 0; + regs->version = 1; memset(p, 0, regs->len); if (!netif_running(bp->dev)) @@ -1587,6 +1587,12 @@ static int bnx2x_set_pauseparam(struct net_device *dev, bp->link_params.req_flow_ctrl[cfg_idx] = BNX2X_FLOW_CTRL_AUTO; } + bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_NONE; + if (epause->rx_pause) + bp->link_params.req_fc_auto_adv |= BNX2X_FLOW_CTRL_RX; + + if (epause->tx_pause) + bp->link_params.req_fc_auto_adv |= BNX2X_FLOW_CTRL_TX; } DP(BNX2X_MSG_ETHTOOL, @@ -2888,11 +2894,9 @@ static void bnx2x_get_channels(struct net_device *dev, */ static void bnx2x_change_num_queues(struct bnx2x *bp, int num_rss) { - bnx2x_del_all_napi(bp); bnx2x_disable_msi(bp); BNX2X_NUM_QUEUES(bp) = num_rss + NON_ETH_CONTEXT_USE; bnx2x_set_int_mode(bp); - bnx2x_add_all_napi(bp); } /** diff --git a/trunk/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c b/trunk/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c index f4beb46c4709..b046beb435b2 100644 --- a/trunk/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c +++ b/trunk/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c @@ -2667,9 +2667,11 @@ int bnx2x_update_pfc(struct link_params *params, return bnx2x_status; DP(NETIF_MSG_LINK, "About to update PFC in BMAC\n"); - if (CHIP_IS_E3(bp)) - bnx2x_update_pfc_xmac(params, vars, 0); - else { + + if (CHIP_IS_E3(bp)) { + if (vars->mac_type == MAC_TYPE_XMAC) + bnx2x_update_pfc_xmac(params, vars, 0); + } else { val = REG_RD(bp, MISC_REG_RESET_REG_2); if ((val & (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << params->port)) @@ -5432,7 +5434,7 @@ static int bnx2x_get_link_speed_duplex(struct bnx2x_phy *phy, switch (speed_mask) { case GP_STATUS_10M: vars->line_speed = SPEED_10; - if (vars->duplex == DUPLEX_FULL) + if (is_duplex == DUPLEX_FULL) vars->link_status |= LINK_10TFD; else vars->link_status |= LINK_10THD; @@ -5440,7 +5442,7 @@ static int bnx2x_get_link_speed_duplex(struct bnx2x_phy *phy, case GP_STATUS_100M: vars->line_speed = SPEED_100; - if (vars->duplex == DUPLEX_FULL) + if (is_duplex == DUPLEX_FULL) vars->link_status |= LINK_100TXFD; else vars->link_status |= LINK_100TXHD; @@ -5449,7 +5451,7 @@ static int bnx2x_get_link_speed_duplex(struct bnx2x_phy *phy, case GP_STATUS_1G: case GP_STATUS_1G_KX: vars->line_speed = SPEED_1000; - if (vars->duplex == DUPLEX_FULL) + if (is_duplex == DUPLEX_FULL) vars->link_status |= LINK_1000TFD; else vars->link_status |= LINK_1000THD; @@ -5457,7 +5459,7 @@ static int bnx2x_get_link_speed_duplex(struct bnx2x_phy *phy, case GP_STATUS_2_5G: vars->line_speed = SPEED_2500; - if (vars->duplex == DUPLEX_FULL) + if (is_duplex == DUPLEX_FULL) vars->link_status |= LINK_2500TFD; else vars->link_status |= LINK_2500THD; @@ -5531,6 +5533,7 @@ static int bnx2x_link_settings_status(struct bnx2x_phy *phy, if (gp_status & MDIO_GP_STATUS_TOP_AN_STATUS1_LINK_STATUS) { if (SINGLE_MEDIA_DIRECT(params)) { + vars->duplex = duplex; bnx2x_flow_ctrl_resolve(phy, params, vars, gp_status); if (phy->req_line_speed == SPEED_AUTO_NEG) bnx2x_xgxs_an_resolve(phy, params, vars, @@ -5625,6 +5628,7 @@ static int bnx2x_warpcore_read_status(struct bnx2x_phy *phy, LINK_STATUS_PARALLEL_DETECTION_USED; } bnx2x_ext_phy_resolve_fc(phy, params, vars); + vars->duplex = duplex; } } diff --git a/trunk/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/trunk/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c index dd451c3dd83d..0875ecfe3372 100644 --- a/trunk/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c +++ b/trunk/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c @@ -4041,20 +4041,6 @@ static bool bnx2x_get_load_status(struct bnx2x *bp, int engine) return val != 0; } -/* - * Reset the load status for the current engine. - */ -static void bnx2x_clear_load_status(struct bnx2x *bp) -{ - u32 val; - u32 mask = (BP_PATH(bp) ? BNX2X_PATH1_LOAD_CNT_MASK : - BNX2X_PATH0_LOAD_CNT_MASK); - bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG); - val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG); - REG_WR(bp, BNX2X_RECOVERY_GLOB_REG, val & (~mask)); - bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG); -} - static void _print_next_block(int idx, const char *blk) { pr_cont("%s%s", idx ? ", " : "", blk); @@ -7575,8 +7561,14 @@ int bnx2x_set_mac_one(struct bnx2x *bp, u8 *mac, } rc = bnx2x_config_vlan_mac(bp, &ramrod_param); - if (rc < 0) + + if (rc == -EEXIST) { + DP(BNX2X_MSG_SP, "Failed to schedule ADD operations: %d\n", rc); + /* do not treat adding same MAC as error */ + rc = 0; + } else if (rc < 0) BNX2X_ERR("%s MAC failed\n", (set ? "Set" : "Del")); + return rc; } @@ -8441,6 +8433,8 @@ void bnx2x_chip_cleanup(struct bnx2x *bp, int unload_mode) /* Disable HW interrupts, NAPI */ bnx2x_netif_stop(bp, 1); + /* Delete all NAPI objects */ + bnx2x_del_all_napi(bp); /* Release IRQs */ bnx2x_free_irq(bp); @@ -9384,32 +9378,24 @@ static int __devinit bnx2x_prev_mark_path(struct bnx2x *bp) return rc; } -static bool __devinit bnx2x_can_flr(struct bnx2x *bp) -{ - int pos; - u32 cap; - struct pci_dev *dev = bp->pdev; - - pos = pci_pcie_cap(dev); - if (!pos) - return false; - - pci_read_config_dword(dev, pos + PCI_EXP_DEVCAP, &cap); - if (!(cap & PCI_EXP_DEVCAP_FLR)) - return false; - - return true; -} - static int __devinit bnx2x_do_flr(struct bnx2x *bp) { int i, pos; u16 status; struct pci_dev *dev = bp->pdev; - /* probe the capability first */ - if (bnx2x_can_flr(bp)) - return -ENOTTY; + + if (CHIP_IS_E1x(bp)) { + BNX2X_DEV_INFO("FLR not supported in E1/E1H\n"); + return -EINVAL; + } + + /* only bootcode REQ_BC_VER_4_INITIATE_FLR and onwards support flr */ + if (bp->common.bc_ver < REQ_BC_VER_4_INITIATE_FLR) { + BNX2X_ERR("FLR not supported by BC_VER: 0x%x\n", + bp->common.bc_ver); + return -EINVAL; + } pos = pci_pcie_cap(dev); if (!pos) @@ -9429,12 +9415,8 @@ static int __devinit bnx2x_do_flr(struct bnx2x *bp) "transaction is not cleared; proceeding with reset anyway\n"); clear: - if (bp->common.bc_ver < REQ_BC_VER_4_INITIATE_FLR) { - BNX2X_ERR("FLR not supported by BC_VER: 0x%x\n", - bp->common.bc_ver); - return -EINVAL; - } + BNX2X_DEV_INFO("Initiating FLR\n"); bnx2x_fw_command(bp, DRV_MSG_CODE_INITIATE_FLR, 0); return 0; @@ -9454,8 +9436,21 @@ static int __devinit bnx2x_prev_unload_uncommon(struct bnx2x *bp) * the one required, then FLR will be sufficient to clean any residue * left by previous driver */ - if (bnx2x_test_firmware_version(bp, false) && bnx2x_can_flr(bp)) - return bnx2x_do_flr(bp); + rc = bnx2x_test_firmware_version(bp, false); + + if (!rc) { + /* fw version is good */ + BNX2X_DEV_INFO("FW version matches our own. Attempting FLR\n"); + rc = bnx2x_do_flr(bp); + } + + if (!rc) { + /* FLR was performed */ + BNX2X_DEV_INFO("FLR successful\n"); + return 0; + } + + BNX2X_DEV_INFO("Could not FLR\n"); /* Close the MCP request, return failure*/ rc = bnx2x_prev_mcp_done(bp); @@ -9836,12 +9831,13 @@ static void __devinit bnx2x_get_igu_cam_info(struct bnx2x *bp) } #ifdef CONFIG_PCI_MSI - /* - * It's expected that number of CAM entries for this functions is equal - * to the number evaluated based on the MSI-X table size. We want a - * harsh warning if these values are different! + /* Due to new PF resource allocation by MFW T7.4 and above, it's + * optional that number of CAM entries will not be equal to the value + * advertised in PCI. + * Driver should use the minimal value of both as the actual status + * block count */ - WARN_ON(bp->igu_sb_cnt != igu_sb_cnt); + bp->igu_sb_cnt = min_t(int, bp->igu_sb_cnt, igu_sb_cnt); #endif if (igu_sb_cnt == 0) @@ -10305,13 +10301,11 @@ static void __devinit bnx2x_get_fcoe_info(struct bnx2x *bp) dev_info.port_hw_config[port]. fcoe_wwn_node_name_lower); } else if (!IS_MF_SD(bp)) { - u32 cfg = MF_CFG_RD(bp, func_ext_config[func].func_cfg); - /* * Read the WWN info only if the FCoE feature is enabled for * this function. */ - if (cfg & MACP_FUNC_CFG_FLAGS_FCOE_OFFLOAD) + if (BNX2X_MF_EXT_PROTOCOL_FCOE(bp) && !CHIP_IS_E1x(bp)) bnx2x_get_ext_wwn_info(bp, func); } else if (IS_MF_FCOE_SD(bp)) @@ -11084,7 +11078,14 @@ static int bnx2x_set_uc_list(struct bnx2x *bp) netdev_for_each_uc_addr(ha, dev) { rc = bnx2x_set_mac_one(bp, bnx2x_uc_addr(ha), mac_obj, true, BNX2X_UC_LIST_MAC, &ramrod_flags); - if (rc < 0) { + if (rc == -EEXIST) { + DP(BNX2X_MSG_SP, + "Failed to schedule ADD operations: %d\n", rc); + /* do not treat adding same MAC as error */ + rc = 0; + + } else if (rc < 0) { + BNX2X_ERR("Failed to schedule ADD operations: %d\n", rc); return rc; @@ -11242,10 +11243,12 @@ static int bnx2x_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) static void poll_bnx2x(struct net_device *dev) { struct bnx2x *bp = netdev_priv(dev); + int i; - disable_irq(bp->pdev->irq); - bnx2x_interrupt(bp->pdev->irq, dev); - enable_irq(bp->pdev->irq); + for_each_eth_queue(bp, i) { + struct bnx2x_fastpath *fp = &bp->fp[i]; + napi_schedule(&bnx2x_fp(bp, fp->index, napi)); + } } #endif @@ -11427,9 +11430,6 @@ static int __devinit bnx2x_init_dev(struct pci_dev *pdev, if (!chip_is_e1x) REG_WR(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_TARGET_READ, 1); - /* Reset the load counter */ - bnx2x_clear_load_status(bp); - dev->watchdog_timeo = TX_TIMEOUT; dev->netdev_ops = &bnx2x_netdev_ops; @@ -11915,9 +11915,6 @@ static int __devinit bnx2x_init_one(struct pci_dev *pdev, */ bnx2x_set_int_mode(bp); - /* Add all NAPI objects */ - bnx2x_add_all_napi(bp); - rc = register_netdev(dev); if (rc) { dev_err(&pdev->dev, "Cannot register net device\n"); @@ -11992,9 +11989,6 @@ static void __devexit bnx2x_remove_one(struct pci_dev *pdev) unregister_netdev(dev); - /* Delete all NAPI objects */ - bnx2x_del_all_napi(bp); - /* Power on: we can't let PCI layer write to us while we are in D3 */ bnx2x_set_power_state(bp, PCI_D0); @@ -12041,6 +12035,8 @@ static int bnx2x_eeh_nic_unload(struct bnx2x *bp) bnx2x_tx_disable(bp); bnx2x_netif_stop(bp, 0); + /* Delete all NAPI objects */ + bnx2x_del_all_napi(bp); del_timer_sync(&bp->timer); diff --git a/trunk/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c b/trunk/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c index 734fd87cd990..62f754bd0dfe 100644 --- a/trunk/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c +++ b/trunk/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c @@ -2485,6 +2485,7 @@ static int bnx2x_mcast_enqueue_cmd(struct bnx2x *bp, break; default: + kfree(new_cmd); BNX2X_ERR("Unknown command: %d\n", cmd); return -EINVAL; } diff --git a/trunk/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h b/trunk/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h index f83e033da6da..acf2fe4ca608 100644 --- a/trunk/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h +++ b/trunk/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h @@ -1321,7 +1321,7 @@ void bnx2x_init_mcast_obj(struct bnx2x *bp, * the current command will be enqueued to the tail of the * pending commands list. * - * Return: 0 is operation was sucessfull and there are no pending completions, + * Return: 0 is operation was successfull and there are no pending completions, * negative if there were errors, positive if there are pending * completions. */ diff --git a/trunk/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c b/trunk/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c index 332db64dd5be..a1d0446b39b3 100644 --- a/trunk/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c +++ b/trunk/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c @@ -101,6 +101,11 @@ static void bnx2x_hw_stats_post(struct bnx2x *bp) if (CHIP_REV_IS_SLOW(bp)) return; + /* Update MCP's statistics if possible */ + if (bp->func_stx) + memcpy(bnx2x_sp(bp, func_stats), &bp->func_stats, + sizeof(bp->func_stats)); + /* loader */ if (bp->executer_idx) { int loader_idx = PMF_DMAE_C(bp); @@ -128,8 +133,6 @@ static void bnx2x_hw_stats_post(struct bnx2x *bp) } else if (bp->func_stx) { *stats_comp = 0; - memcpy(bnx2x_sp(bp, func_stats), &bp->func_stats, - sizeof(bp->func_stats)); bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp)); } } @@ -1151,9 +1154,11 @@ static void bnx2x_stats_update(struct bnx2x *bp) if (bp->port.pmf) bnx2x_hw_stats_update(bp); - if (bnx2x_storm_stats_update(bp) && (bp->stats_pending++ == 3)) { - BNX2X_ERR("storm stats were not updated for 3 times\n"); - bnx2x_panic(); + if (bnx2x_storm_stats_update(bp)) { + if (bp->stats_pending++ == 3) { + BNX2X_ERR("storm stats were not updated for 3 times\n"); + bnx2x_panic(); + } return; } diff --git a/trunk/drivers/net/ethernet/cadence/at91_ether.c b/trunk/drivers/net/ethernet/cadence/at91_ether.c index 77884191a8c6..4e980a7886fb 100644 --- a/trunk/drivers/net/ethernet/cadence/at91_ether.c +++ b/trunk/drivers/net/ethernet/cadence/at91_ether.c @@ -1086,7 +1086,7 @@ static int __init at91ether_probe(struct platform_device *pdev) /* Clock */ lp->ether_clk = clk_get(&pdev->dev, "ether_clk"); if (IS_ERR(lp->ether_clk)) { - res = -ENODEV; + res = PTR_ERR(lp->ether_clk); goto err_ioumap; } clk_enable(lp->ether_clk); diff --git a/trunk/drivers/net/ethernet/cirrus/cs89x0.c b/trunk/drivers/net/ethernet/cirrus/cs89x0.c index 845b2020f291..138446957786 100644 --- a/trunk/drivers/net/ethernet/cirrus/cs89x0.c +++ b/trunk/drivers/net/ethernet/cirrus/cs89x0.c @@ -1243,6 +1243,7 @@ static void set_multicast_list(struct net_device *dev) { struct net_local *lp = netdev_priv(dev); unsigned long flags; + u16 cfg; spin_lock_irqsave(&lp->lock, flags); if (dev->flags & IFF_PROMISC) @@ -1260,11 +1261,10 @@ static void set_multicast_list(struct net_device *dev) /* in promiscuous mode, we accept errored packets, * so we have to enable interrupts on them also */ - writereg(dev, PP_RxCFG, - (lp->curr_rx_cfg | - (lp->rx_mode == RX_ALL_ACCEPT) - ? (RX_CRC_ERROR_ENBL | RX_RUNT_ENBL | RX_EXTRA_DATA_ENBL) - : 0)); + cfg = lp->curr_rx_cfg; + if (lp->rx_mode == RX_ALL_ACCEPT) + cfg |= RX_CRC_ERROR_ENBL | RX_RUNT_ENBL | RX_EXTRA_DATA_ENBL; + writereg(dev, PP_RxCFG, cfg); spin_unlock_irqrestore(&lp->lock, flags); } diff --git a/trunk/drivers/net/ethernet/emulex/benet/be_cmds.c b/trunk/drivers/net/ethernet/emulex/benet/be_cmds.c index 7fac97b4bb59..8c63d06ab12b 100644 --- a/trunk/drivers/net/ethernet/emulex/benet/be_cmds.c +++ b/trunk/drivers/net/ethernet/emulex/benet/be_cmds.c @@ -259,7 +259,7 @@ int be_process_mcc(struct be_adapter *adapter) int num = 0, status = 0; struct be_mcc_obj *mcc_obj = &adapter->mcc_obj; - spin_lock_bh(&adapter->mcc_cq_lock); + spin_lock(&adapter->mcc_cq_lock); while ((compl = be_mcc_compl_get(adapter))) { if (compl->flags & CQE_FLAGS_ASYNC_MASK) { /* Interpret flags as an async trailer */ @@ -280,7 +280,7 @@ int be_process_mcc(struct be_adapter *adapter) if (num) be_cq_notify(adapter, mcc_obj->cq.id, mcc_obj->rearm_cq, num); - spin_unlock_bh(&adapter->mcc_cq_lock); + spin_unlock(&adapter->mcc_cq_lock); return status; } @@ -295,7 +295,9 @@ static int be_mcc_wait_compl(struct be_adapter *adapter) if (be_error(adapter)) return -EIO; + local_bh_disable(); status = be_process_mcc(adapter); + local_bh_enable(); if (atomic_read(&mcc_obj->q.used) == 0) break; diff --git a/trunk/drivers/net/ethernet/emulex/benet/be_main.c b/trunk/drivers/net/ethernet/emulex/benet/be_main.c index c60de89b6669..78b8aa8069f0 100644 --- a/trunk/drivers/net/ethernet/emulex/benet/be_main.c +++ b/trunk/drivers/net/ethernet/emulex/benet/be_main.c @@ -1948,7 +1948,7 @@ static int be_rx_cqs_create(struct be_adapter *adapter) if (adapter->num_rx_qs != MAX_RX_QS) dev_info(&adapter->pdev->dev, - "Created only %d receive queues", adapter->num_rx_qs); + "Created only %d receive queues\n", adapter->num_rx_qs); return 0; } @@ -3763,7 +3763,9 @@ static void be_worker(struct work_struct *work) /* when interrupts are not yet enabled, just reap any pending * mcc completions */ if (!netif_running(adapter->netdev)) { + local_bh_disable(); be_process_mcc(adapter); + local_bh_enable(); goto reschedule; } diff --git a/trunk/drivers/net/ethernet/freescale/fs_enet/mii-bitbang.c b/trunk/drivers/net/ethernet/freescale/fs_enet/mii-bitbang.c index 0f2d1a710909..151453309401 100644 --- a/trunk/drivers/net/ethernet/freescale/fs_enet/mii-bitbang.c +++ b/trunk/drivers/net/ethernet/freescale/fs_enet/mii-bitbang.c @@ -174,8 +174,10 @@ static int __devinit fs_enet_mdio_probe(struct platform_device *ofdev) new_bus->phy_mask = ~0; new_bus->irq = kmalloc(sizeof(int) * PHY_MAX_ADDR, GFP_KERNEL); - if (!new_bus->irq) + if (!new_bus->irq) { + ret = -ENOMEM; goto out_unmap_regs; + } new_bus->parent = &ofdev->dev; dev_set_drvdata(&ofdev->dev, new_bus); diff --git a/trunk/drivers/net/ethernet/freescale/fs_enet/mii-fec.c b/trunk/drivers/net/ethernet/freescale/fs_enet/mii-fec.c index 55bb867258e6..cdf702a59485 100644 --- a/trunk/drivers/net/ethernet/freescale/fs_enet/mii-fec.c +++ b/trunk/drivers/net/ethernet/freescale/fs_enet/mii-fec.c @@ -137,8 +137,10 @@ static int __devinit fs_enet_mdio_probe(struct platform_device *ofdev) snprintf(new_bus->id, MII_BUS_ID_SIZE, "%x", res.start); fec->fecp = ioremap(res.start, resource_size(&res)); - if (!fec->fecp) + if (!fec->fecp) { + ret = -ENOMEM; goto out_fec; + } if (get_bus_freq) { clock = get_bus_freq(ofdev->dev.of_node); @@ -172,8 +174,10 @@ static int __devinit fs_enet_mdio_probe(struct platform_device *ofdev) new_bus->phy_mask = ~0; new_bus->irq = kmalloc(sizeof(int) * PHY_MAX_ADDR, GFP_KERNEL); - if (!new_bus->irq) + if (!new_bus->irq) { + ret = -ENOMEM; goto out_unmap_regs; + } new_bus->parent = &ofdev->dev; dev_set_drvdata(&ofdev->dev, new_bus); diff --git a/trunk/drivers/net/ethernet/freescale/gianfar.c b/trunk/drivers/net/ethernet/freescale/gianfar.c index 4605f7246687..d3233f59a82e 100644 --- a/trunk/drivers/net/ethernet/freescale/gianfar.c +++ b/trunk/drivers/net/ethernet/freescale/gianfar.c @@ -1041,7 +1041,7 @@ static int gfar_probe(struct platform_device *ofdev) if (priv->device_flags & FSL_GIANFAR_DEV_HAS_VLAN) { dev->hw_features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX; - dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX; + dev->features |= NETIF_F_HW_VLAN_RX; } if (priv->device_flags & FSL_GIANFAR_DEV_HAS_EXTENDED_HASH) { diff --git a/trunk/drivers/net/ethernet/freescale/gianfar_ethtool.c b/trunk/drivers/net/ethernet/freescale/gianfar_ethtool.c index 8971921cc1c8..ab6762caa957 100644 --- a/trunk/drivers/net/ethernet/freescale/gianfar_ethtool.c +++ b/trunk/drivers/net/ethernet/freescale/gianfar_ethtool.c @@ -1773,6 +1773,7 @@ static int gfar_get_nfc(struct net_device *dev, struct ethtool_rxnfc *cmd, } int gfar_phc_index = -1; +EXPORT_SYMBOL(gfar_phc_index); static int gfar_get_ts_info(struct net_device *dev, struct ethtool_ts_info *info) diff --git a/trunk/drivers/net/ethernet/freescale/gianfar_ptp.c b/trunk/drivers/net/ethernet/freescale/gianfar_ptp.c index c08e5d40fecb..0daa66b8eca0 100644 --- a/trunk/drivers/net/ethernet/freescale/gianfar_ptp.c +++ b/trunk/drivers/net/ethernet/freescale/gianfar_ptp.c @@ -515,7 +515,7 @@ static int gianfar_ptp_probe(struct platform_device *dev) err = PTR_ERR(etsects->clock); goto no_clock; } - gfar_phc_clock = ptp_clock_index(etsects->clock); + gfar_phc_index = ptp_clock_index(etsects->clock); dev_set_drvdata(&dev->dev, etsects); @@ -539,7 +539,7 @@ static int gianfar_ptp_remove(struct platform_device *dev) gfar_write(&etsects->regs->tmr_temask, 0); gfar_write(&etsects->regs->tmr_ctrl, 0); - gfar_phc_clock = -1; + gfar_phc_index = -1; ptp_clock_unregister(etsects->clock); iounmap(etsects->regs); release_resource(etsects->rsrc); diff --git a/trunk/drivers/net/ethernet/i825xx/znet.c b/trunk/drivers/net/ethernet/i825xx/znet.c index bd1f1ef91e19..ba4e0cea3506 100644 --- a/trunk/drivers/net/ethernet/i825xx/znet.c +++ b/trunk/drivers/net/ethernet/i825xx/znet.c @@ -139,8 +139,11 @@ struct znet_private { /* Only one can be built-in;-> */ static struct net_device *znet_dev; +#define NETIDBLK_MAGIC "NETIDBLK" +#define NETIDBLK_MAGIC_SIZE 8 + struct netidblk { - char magic[8]; /* The magic number (string) "NETIDBLK" */ + char magic[NETIDBLK_MAGIC_SIZE]; /* The magic number (string) "NETIDBLK" */ unsigned char netid[8]; /* The physical station address */ char nettype, globalopt; char vendor[8]; /* The machine vendor and product name. */ @@ -373,14 +376,16 @@ static int __init znet_probe (void) struct znet_private *znet; struct net_device *dev; char *p; + char *plast = phys_to_virt(0x100000 - NETIDBLK_MAGIC_SIZE); int err = -ENOMEM; /* This code scans the region 0xf0000 to 0xfffff for a "NETIDBLK". */ - for(p = (char *)phys_to_virt(0xf0000); p < (char *)phys_to_virt(0x100000); p++) - if (*p == 'N' && strncmp(p, "NETIDBLK", 8) == 0) + for(p = (char *)phys_to_virt(0xf0000); p <= plast; p++) + if (*p == 'N' && + strncmp(p, NETIDBLK_MAGIC, NETIDBLK_MAGIC_SIZE) == 0) break; - if (p >= (char *)phys_to_virt(0x100000)) { + if (p > plast) { if (znet_debug > 1) printk(KERN_INFO "No Z-Note ethernet adaptor found.\n"); return -ENODEV; diff --git a/trunk/drivers/net/ethernet/ibm/ibmveth.c b/trunk/drivers/net/ethernet/ibm/ibmveth.c index 9010cea68bc3..b68d28a130e6 100644 --- a/trunk/drivers/net/ethernet/ibm/ibmveth.c +++ b/trunk/drivers/net/ethernet/ibm/ibmveth.c @@ -472,14 +472,9 @@ static void ibmveth_cleanup(struct ibmveth_adapter *adapter) } if (adapter->rx_queue.queue_addr != NULL) { - if (!dma_mapping_error(dev, adapter->rx_queue.queue_dma)) { - dma_unmap_single(dev, - adapter->rx_queue.queue_dma, - adapter->rx_queue.queue_len, - DMA_BIDIRECTIONAL); - adapter->rx_queue.queue_dma = DMA_ERROR_CODE; - } - kfree(adapter->rx_queue.queue_addr); + dma_free_coherent(dev, adapter->rx_queue.queue_len, + adapter->rx_queue.queue_addr, + adapter->rx_queue.queue_dma); adapter->rx_queue.queue_addr = NULL; } @@ -556,10 +551,13 @@ static int ibmveth_open(struct net_device *netdev) goto err_out; } + dev = &adapter->vdev->dev; + adapter->rx_queue.queue_len = sizeof(struct ibmveth_rx_q_entry) * rxq_entries; - adapter->rx_queue.queue_addr = kmalloc(adapter->rx_queue.queue_len, - GFP_KERNEL); + adapter->rx_queue.queue_addr = + dma_alloc_coherent(dev, adapter->rx_queue.queue_len, + &adapter->rx_queue.queue_dma, GFP_KERNEL); if (!adapter->rx_queue.queue_addr) { netdev_err(netdev, "unable to allocate rx queue pages\n"); @@ -567,19 +565,13 @@ static int ibmveth_open(struct net_device *netdev) goto err_out; } - dev = &adapter->vdev->dev; - adapter->buffer_list_dma = dma_map_single(dev, adapter->buffer_list_addr, 4096, DMA_BIDIRECTIONAL); adapter->filter_list_dma = dma_map_single(dev, adapter->filter_list_addr, 4096, DMA_BIDIRECTIONAL); - adapter->rx_queue.queue_dma = dma_map_single(dev, - adapter->rx_queue.queue_addr, - adapter->rx_queue.queue_len, DMA_BIDIRECTIONAL); if ((dma_mapping_error(dev, adapter->buffer_list_dma)) || - (dma_mapping_error(dev, adapter->filter_list_dma)) || - (dma_mapping_error(dev, adapter->rx_queue.queue_dma))) { + (dma_mapping_error(dev, adapter->filter_list_dma))) { netdev_err(netdev, "unable to map filter or buffer list " "pages\n"); rc = -ENOMEM; diff --git a/trunk/drivers/net/ethernet/intel/e1000/e1000_main.c b/trunk/drivers/net/ethernet/intel/e1000/e1000_main.c index 3bfbb8df8989..bde337ee1a34 100644 --- a/trunk/drivers/net/ethernet/intel/e1000/e1000_main.c +++ b/trunk/drivers/net/ethernet/intel/e1000/e1000_main.c @@ -3149,6 +3149,17 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb, return NETDEV_TX_OK; } + /* On PCI/PCI-X HW, if packet size is less than ETH_ZLEN, + * packets may get corrupted during padding by HW. + * To WA this issue, pad all small packets manually. + */ + if (skb->len < ETH_ZLEN) { + if (skb_pad(skb, ETH_ZLEN - skb->len)) + return NETDEV_TX_OK; + skb->len = ETH_ZLEN; + skb_set_tail_pointer(skb, ETH_ZLEN); + } + mss = skb_shinfo(skb)->gso_size; /* The controller does a simple calculation to * make sure there is enough room in the FIFO before diff --git a/trunk/drivers/net/ethernet/intel/e1000e/82571.c b/trunk/drivers/net/ethernet/intel/e1000e/82571.c index 0b3bade957fd..080c89093feb 100644 --- a/trunk/drivers/net/ethernet/intel/e1000e/82571.c +++ b/trunk/drivers/net/ethernet/intel/e1000e/82571.c @@ -999,7 +999,7 @@ static s32 e1000_set_d0_lplu_state_82571(struct e1000_hw *hw, bool active) **/ static s32 e1000_reset_hw_82571(struct e1000_hw *hw) { - u32 ctrl, ctrl_ext, eecd; + u32 ctrl, ctrl_ext, eecd, tctl; s32 ret_val; /* @@ -1014,7 +1014,9 @@ static s32 e1000_reset_hw_82571(struct e1000_hw *hw) ew32(IMC, 0xffffffff); ew32(RCTL, 0); - ew32(TCTL, E1000_TCTL_PSP); + tctl = er32(TCTL); + tctl &= ~E1000_TCTL_EN; + ew32(TCTL, tctl); e1e_flush(); usleep_range(10000, 20000); @@ -1601,10 +1603,8 @@ static s32 e1000_check_for_serdes_link_82571(struct e1000_hw *hw) * auto-negotiation in the TXCW register and disable * forced link in the Device Control register in an * attempt to auto-negotiate with our link partner. - * If the partner code word is null, stop forcing - * and restart auto negotiation. */ - if ((rxcw & E1000_RXCW_C) || !(rxcw & E1000_RXCW_CW)) { + if (rxcw & E1000_RXCW_C) { /* Enable autoneg, and unforce link up */ ew32(TXCW, mac->txcw); ew32(CTRL, (ctrl & ~E1000_CTRL_SLU)); diff --git a/trunk/drivers/net/ethernet/intel/e1000e/e1000.h b/trunk/drivers/net/ethernet/intel/e1000e/e1000.h index cd153326c3cf..cb3356c9af80 100644 --- a/trunk/drivers/net/ethernet/intel/e1000e/e1000.h +++ b/trunk/drivers/net/ethernet/intel/e1000e/e1000.h @@ -310,6 +310,7 @@ struct e1000_adapter { */ struct e1000_ring *tx_ring /* One per active queue */ ____cacheline_aligned_in_smp; + u32 tx_fifo_limit; struct napi_struct napi; diff --git a/trunk/drivers/net/ethernet/intel/e1000e/netdev.c b/trunk/drivers/net/ethernet/intel/e1000e/netdev.c index 95b245310f17..d01a099475a1 100644 --- a/trunk/drivers/net/ethernet/intel/e1000e/netdev.c +++ b/trunk/drivers/net/ethernet/intel/e1000e/netdev.c @@ -178,6 +178,24 @@ static void e1000_regdump(struct e1000_hw *hw, struct e1000_reg_info *reginfo) pr_info("%-15s %08x %08x\n", rname, regs[0], regs[1]); } +static void e1000e_dump_ps_pages(struct e1000_adapter *adapter, + struct e1000_buffer *bi) +{ + int i; + struct e1000_ps_page *ps_page; + + for (i = 0; i < adapter->rx_ps_pages; i++) { + ps_page = &bi->ps_pages[i]; + + if (ps_page->page) { + pr_info("packet dump for ps_page %d:\n", i); + print_hex_dump(KERN_INFO, "", DUMP_PREFIX_ADDRESS, + 16, 1, page_address(ps_page->page), + PAGE_SIZE, true); + } + } +} + /* * e1000e_dump - Print registers, Tx-ring and Rx-ring */ @@ -299,10 +317,10 @@ static void e1000e_dump(struct e1000_adapter *adapter) (unsigned long long)buffer_info->time_stamp, buffer_info->skb, next_desc); - if (netif_msg_pktdata(adapter) && buffer_info->dma != 0) + if (netif_msg_pktdata(adapter) && buffer_info->skb) print_hex_dump(KERN_INFO, "", DUMP_PREFIX_ADDRESS, - 16, 1, phys_to_virt(buffer_info->dma), - buffer_info->length, true); + 16, 1, buffer_info->skb->data, + buffer_info->skb->len, true); } /* Print Rx Ring Summary */ @@ -381,10 +399,8 @@ static void e1000e_dump(struct e1000_adapter *adapter) buffer_info->skb, next_desc); if (netif_msg_pktdata(adapter)) - print_hex_dump(KERN_INFO, "", - DUMP_PREFIX_ADDRESS, 16, 1, - phys_to_virt(buffer_info->dma), - adapter->rx_ps_bsize0, true); + e1000e_dump_ps_pages(adapter, + buffer_info); } } break; @@ -444,12 +460,12 @@ static void e1000e_dump(struct e1000_adapter *adapter) (unsigned long long)buffer_info->dma, buffer_info->skb, next_desc); - if (netif_msg_pktdata(adapter)) + if (netif_msg_pktdata(adapter) && + buffer_info->skb) print_hex_dump(KERN_INFO, "", DUMP_PREFIX_ADDRESS, 16, 1, - phys_to_virt - (buffer_info->dma), + buffer_info->skb->data, adapter->rx_buffer_len, true); } @@ -3500,6 +3516,15 @@ void e1000e_reset(struct e1000_adapter *adapter) break; } + /* + * Alignment of Tx data is on an arbitrary byte boundary with the + * maximum size per Tx descriptor limited only to the transmit + * allocation of the packet buffer minus 96 bytes with an upper + * limit of 24KB due to receive synchronization limitations. + */ + adapter->tx_fifo_limit = min_t(u32, ((er32(PBA) >> 16) << 10) - 96, + 24 << 10); + /* * Disable Adaptive Interrupt Moderation if 2 full packets cannot * fit in receive buffer. @@ -4769,12 +4794,9 @@ static bool e1000_tx_csum(struct e1000_ring *tx_ring, struct sk_buff *skb) return 1; } -#define E1000_MAX_PER_TXD 8192 -#define E1000_MAX_TXD_PWR 12 - static int e1000_tx_map(struct e1000_ring *tx_ring, struct sk_buff *skb, unsigned int first, unsigned int max_per_txd, - unsigned int nr_frags, unsigned int mss) + unsigned int nr_frags) { struct e1000_adapter *adapter = tx_ring->adapter; struct pci_dev *pdev = adapter->pdev; @@ -5007,20 +5029,19 @@ static int __e1000_maybe_stop_tx(struct e1000_ring *tx_ring, int size) static int e1000_maybe_stop_tx(struct e1000_ring *tx_ring, int size) { + BUG_ON(size > tx_ring->count); + if (e1000_desc_unused(tx_ring) >= size) return 0; return __e1000_maybe_stop_tx(tx_ring, size); } -#define TXD_USE_COUNT(S, X) (((S) >> (X)) + 1) static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb, struct net_device *netdev) { struct e1000_adapter *adapter = netdev_priv(netdev); struct e1000_ring *tx_ring = adapter->tx_ring; unsigned int first; - unsigned int max_per_txd = E1000_MAX_PER_TXD; - unsigned int max_txd_pwr = E1000_MAX_TXD_PWR; unsigned int tx_flags = 0; unsigned int len = skb_headlen(skb); unsigned int nr_frags; @@ -5040,18 +5061,8 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb, } mss = skb_shinfo(skb)->gso_size; - /* - * The controller does a simple calculation to - * make sure there is enough room in the FIFO before - * initiating the DMA for each buffer. The calc is: - * 4 = ceil(buffer len/mss). To make sure we don't - * overrun the FIFO, adjust the max buffer len if mss - * drops. - */ if (mss) { u8 hdr_len; - max_per_txd = min(mss << 2, max_per_txd); - max_txd_pwr = fls(max_per_txd) - 1; /* * TSO Workaround for 82571/2/3 Controllers -- if skb->data @@ -5081,12 +5092,12 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb, count++; count++; - count += TXD_USE_COUNT(len, max_txd_pwr); + count += DIV_ROUND_UP(len, adapter->tx_fifo_limit); nr_frags = skb_shinfo(skb)->nr_frags; for (f = 0; f < nr_frags; f++) - count += TXD_USE_COUNT(skb_frag_size(&skb_shinfo(skb)->frags[f]), - max_txd_pwr); + count += DIV_ROUND_UP(skb_frag_size(&skb_shinfo(skb)->frags[f]), + adapter->tx_fifo_limit); if (adapter->hw.mac.tx_pkt_filtering) e1000_transfer_dhcp_info(adapter, skb); @@ -5128,15 +5139,18 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb, tx_flags |= E1000_TX_FLAGS_NO_FCS; /* if count is 0 then mapping error has occurred */ - count = e1000_tx_map(tx_ring, skb, first, max_per_txd, nr_frags, mss); + count = e1000_tx_map(tx_ring, skb, first, adapter->tx_fifo_limit, + nr_frags); if (count) { skb_tx_timestamp(skb); netdev_sent_queue(netdev, skb->len); e1000_tx_queue(tx_ring, tx_flags, count); /* Make sure there is space in the ring for the next send. */ - e1000_maybe_stop_tx(tx_ring, MAX_SKB_FRAGS + 2); - + e1000_maybe_stop_tx(tx_ring, + (MAX_SKB_FRAGS * + DIV_ROUND_UP(PAGE_SIZE, + adapter->tx_fifo_limit) + 2)); } else { dev_kfree_skb_any(skb); tx_ring->buffer_info[first].time_stamp = 0; @@ -6311,8 +6325,8 @@ static int __devinit e1000_probe(struct pci_dev *pdev, adapter->hw.phy.autoneg_advertised = 0x2f; /* ring size defaults */ - adapter->rx_ring->count = 256; - adapter->tx_ring->count = 256; + adapter->rx_ring->count = E1000_DEFAULT_RXD; + adapter->tx_ring->count = E1000_DEFAULT_TXD; /* * Initial Wake on LAN setting - If APM wake is enabled in diff --git a/trunk/drivers/net/ethernet/intel/igb/e1000_82575.c b/trunk/drivers/net/ethernet/intel/igb/e1000_82575.c index 5e84eaac48c1..ba994fb4cec6 100644 --- a/trunk/drivers/net/ethernet/intel/igb/e1000_82575.c +++ b/trunk/drivers/net/ethernet/intel/igb/e1000_82575.c @@ -254,6 +254,14 @@ static s32 igb_get_invariants_82575(struct e1000_hw *hw) */ size += NVM_WORD_SIZE_BASE_SHIFT; + /* + * Check for invalid size + */ + if ((hw->mac.type == e1000_82576) && (size > 15)) { + pr_notice("The NVM size is not valid, defaulting to 32K\n"); + size = 15; + } + nvm->word_size = 1 << size; if (hw->mac.type < e1000_i210) { nvm->opcode_bits = 8; @@ -281,14 +289,6 @@ static s32 igb_get_invariants_82575(struct e1000_hw *hw) } else nvm->type = e1000_nvm_flash_hw; - /* - * Check for invalid size - */ - if ((hw->mac.type == e1000_82576) && (size > 15)) { - pr_notice("The NVM size is not valid, defaulting to 32K\n"); - size = 15; - } - /* NVM Function Pointers */ switch (hw->mac.type) { case e1000_82580: diff --git a/trunk/drivers/net/ethernet/intel/igb/e1000_regs.h b/trunk/drivers/net/ethernet/intel/igb/e1000_regs.h index 10efcd88dca0..28394bea5253 100644 --- a/trunk/drivers/net/ethernet/intel/igb/e1000_regs.h +++ b/trunk/drivers/net/ethernet/intel/igb/e1000_regs.h @@ -156,8 +156,12 @@ : (0x0E018 + ((_n) * 0x40))) #define E1000_TXDCTL(_n) ((_n) < 4 ? (0x03828 + ((_n) * 0x100)) \ : (0x0E028 + ((_n) * 0x40))) -#define E1000_DCA_TXCTRL(_n) (0x03814 + (_n << 8)) -#define E1000_DCA_RXCTRL(_n) (0x02814 + (_n << 8)) +#define E1000_RXCTL(_n) ((_n) < 4 ? (0x02814 + ((_n) * 0x100)) : \ + (0x0C014 + ((_n) * 0x40))) +#define E1000_DCA_RXCTRL(_n) E1000_RXCTL(_n) +#define E1000_TXCTL(_n) ((_n) < 4 ? (0x03814 + ((_n) * 0x100)) : \ + (0x0E014 + ((_n) * 0x40))) +#define E1000_DCA_TXCTRL(_n) E1000_TXCTL(_n) #define E1000_TDWBAL(_n) ((_n) < 4 ? (0x03838 + ((_n) * 0x100)) \ : (0x0E038 + ((_n) * 0x40))) #define E1000_TDWBAH(_n) ((_n) < 4 ? (0x0383C + ((_n) * 0x100)) \ diff --git a/trunk/drivers/net/ethernet/intel/igb/igb_ethtool.c b/trunk/drivers/net/ethernet/intel/igb/igb_ethtool.c index a19c84cad0e9..70591117051b 100644 --- a/trunk/drivers/net/ethernet/intel/igb/igb_ethtool.c +++ b/trunk/drivers/net/ethernet/intel/igb/igb_ethtool.c @@ -209,8 +209,8 @@ static int igb_set_settings(struct net_device *netdev, struct ethtool_cmd *ecmd) /* When SoL/IDER sessions are active, autoneg/speed/duplex * cannot be changed */ if (igb_check_reset_block(hw)) { - dev_err(&adapter->pdev->dev, "Cannot change link " - "characteristics when SoL/IDER is active.\n"); + dev_err(&adapter->pdev->dev, + "Cannot change link characteristics when SoL/IDER is active.\n"); return -EINVAL; } @@ -1089,8 +1089,8 @@ static bool reg_pattern_test(struct igb_adapter *adapter, u64 *data, wr32(reg, (_test[pat] & write)); val = rd32(reg) & mask; if (val != (_test[pat] & write & mask)) { - dev_err(&adapter->pdev->dev, "pattern test reg %04X " - "failed: got 0x%08X expected 0x%08X\n", + dev_err(&adapter->pdev->dev, + "pattern test reg %04X failed: got 0x%08X expected 0x%08X\n", reg, val, (_test[pat] & write & mask)); *data = reg; return 1; @@ -1108,8 +1108,8 @@ static bool reg_set_and_check(struct igb_adapter *adapter, u64 *data, wr32(reg, write & mask); val = rd32(reg); if ((write & mask) != (val & mask)) { - dev_err(&adapter->pdev->dev, "set/check reg %04X test failed:" - " got 0x%08X expected 0x%08X\n", reg, + dev_err(&adapter->pdev->dev, + "set/check reg %04X test failed: got 0x%08X expected 0x%08X\n", reg, (val & mask), (write & mask)); *data = reg; return 1; @@ -1171,8 +1171,9 @@ static int igb_reg_test(struct igb_adapter *adapter, u64 *data) wr32(E1000_STATUS, toggle); after = rd32(E1000_STATUS) & toggle; if (value != after) { - dev_err(&adapter->pdev->dev, "failed STATUS register test " - "got: 0x%08X expected: 0x%08X\n", after, value); + dev_err(&adapter->pdev->dev, + "failed STATUS register test got: 0x%08X expected: 0x%08X\n", + after, value); *data = 1; return 1; } @@ -1497,6 +1498,9 @@ static int igb_integrated_phy_loopback(struct igb_adapter *adapter) break; } + /* add small delay to avoid loopback test failure */ + msleep(50); + /* force 1000, set loopback */ igb_write_phy_reg(hw, PHY_CONTROL, 0x4140); @@ -1777,16 +1781,14 @@ static int igb_loopback_test(struct igb_adapter *adapter, u64 *data) * sessions are active */ if (igb_check_reset_block(&adapter->hw)) { dev_err(&adapter->pdev->dev, - "Cannot do PHY loopback test " - "when SoL/IDER is active.\n"); + "Cannot do PHY loopback test when SoL/IDER is active.\n"); *data = 0; goto out; } if ((adapter->hw.mac.type == e1000_i210) - || (adapter->hw.mac.type == e1000_i210)) { + || (adapter->hw.mac.type == e1000_i211)) { dev_err(&adapter->pdev->dev, - "Loopback test not supported " - "on this part at this time.\n"); + "Loopback test not supported on this part at this time.\n"); *data = 0; goto out; } diff --git a/trunk/drivers/net/ethernet/intel/igb/igb_main.c b/trunk/drivers/net/ethernet/intel/igb/igb_main.c index b7c2d5050572..48cc4fb1a307 100644 --- a/trunk/drivers/net/ethernet/intel/igb/igb_main.c +++ b/trunk/drivers/net/ethernet/intel/igb/igb_main.c @@ -462,10 +462,10 @@ static void igb_dump(struct igb_adapter *adapter) (u64)buffer_info->time_stamp, buffer_info->skb, next_desc); - if (netif_msg_pktdata(adapter) && buffer_info->dma != 0) + if (netif_msg_pktdata(adapter) && buffer_info->skb) print_hex_dump(KERN_INFO, "", DUMP_PREFIX_ADDRESS, - 16, 1, phys_to_virt(buffer_info->dma), + 16, 1, buffer_info->skb->data, buffer_info->length, true); } } @@ -547,18 +547,17 @@ static void igb_dump(struct igb_adapter *adapter) (u64)buffer_info->dma, buffer_info->skb, next_desc); - if (netif_msg_pktdata(adapter)) { + if (netif_msg_pktdata(adapter) && + buffer_info->dma && buffer_info->skb) { print_hex_dump(KERN_INFO, "", - DUMP_PREFIX_ADDRESS, - 16, 1, - phys_to_virt(buffer_info->dma), - IGB_RX_HDR_LEN, true); + DUMP_PREFIX_ADDRESS, + 16, 1, buffer_info->skb->data, + IGB_RX_HDR_LEN, true); print_hex_dump(KERN_INFO, "", DUMP_PREFIX_ADDRESS, 16, 1, - phys_to_virt( - buffer_info->page_dma + - buffer_info->page_offset), + page_address(buffer_info->page) + + buffer_info->page_offset, PAGE_SIZE/2, true); } } diff --git a/trunk/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c b/trunk/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c index 50fc137501da..18bf08c9d7a4 100644 --- a/trunk/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c +++ b/trunk/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c @@ -804,12 +804,13 @@ static s32 ixgbe_setup_mac_link_82599(struct ixgbe_hw *hw, link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR_SGMII) { /* Set KX4/KX/KR support according to speed requested */ autoc &= ~(IXGBE_AUTOC_KX4_KX_SUPP_MASK | IXGBE_AUTOC_KR_SUPP); - if (speed & IXGBE_LINK_SPEED_10GB_FULL) + if (speed & IXGBE_LINK_SPEED_10GB_FULL) { if (orig_autoc & IXGBE_AUTOC_KX4_SUPP) autoc |= IXGBE_AUTOC_KX4_SUPP; if ((orig_autoc & IXGBE_AUTOC_KR_SUPP) && (hw->phy.smart_speed_active == false)) autoc |= IXGBE_AUTOC_KR_SUPP; + } if (speed & IXGBE_LINK_SPEED_1GB_FULL) autoc |= IXGBE_AUTOC_KX_SUPP; } else if ((pma_pmd_1g == IXGBE_AUTOC_1G_SFI) && diff --git a/trunk/drivers/net/ethernet/mellanox/mlx4/en_rx.c b/trunk/drivers/net/ethernet/mellanox/mlx4/en_rx.c index f32e70300770..5aba5ecdf1e2 100644 --- a/trunk/drivers/net/ethernet/mellanox/mlx4/en_rx.c +++ b/trunk/drivers/net/ethernet/mellanox/mlx4/en_rx.c @@ -614,8 +614,8 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud /* If source MAC is equal to our own MAC and not performing * the selftest or flb disabled - drop the packet */ if (s_mac == priv->mac && - (!(dev->features & NETIF_F_LOOPBACK) || - !priv->validate_loopback)) + !((dev->features & NETIF_F_LOOPBACK) || + priv->validate_loopback)) goto next; /* diff --git a/trunk/drivers/net/ethernet/mellanox/mlx4/en_tx.c b/trunk/drivers/net/ethernet/mellanox/mlx4/en_tx.c index 019d856b1334..10bba09c44ea 100644 --- a/trunk/drivers/net/ethernet/mellanox/mlx4/en_tx.c +++ b/trunk/drivers/net/ethernet/mellanox/mlx4/en_tx.c @@ -164,7 +164,6 @@ int mlx4_en_activate_tx_ring(struct mlx4_en_priv *priv, ring->cons = 0xffffffff; ring->last_nr_txbb = 1; ring->poll_cnt = 0; - ring->blocked = 0; memset(ring->tx_info, 0, ring->size * sizeof(struct mlx4_en_tx_info)); memset(ring->buf, 0, ring->buf_size); @@ -365,14 +364,13 @@ static void mlx4_en_process_tx_cq(struct net_device *dev, struct mlx4_en_cq *cq) ring->cons += txbbs_skipped; netdev_tx_completed_queue(ring->tx_queue, packets, bytes); - /* Wakeup Tx queue if this ring stopped it */ - if (unlikely(ring->blocked)) { - if ((u32) (ring->prod - ring->cons) <= - ring->size - HEADROOM - MAX_DESC_TXBBS) { - ring->blocked = 0; - netif_tx_wake_queue(ring->tx_queue); - priv->port_stats.wake_queue++; - } + /* + * Wakeup Tx queue if this stopped, and at least 1 packet + * was completed + */ + if (netif_tx_queue_stopped(ring->tx_queue) && txbbs_skipped > 0) { + netif_tx_wake_queue(ring->tx_queue); + priv->port_stats.wake_queue++; } } @@ -592,7 +590,6 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev) ring->size - HEADROOM - MAX_DESC_TXBBS)) { /* every full Tx ring stops queue */ netif_tx_stop_queue(ring->tx_queue); - ring->blocked = 1; priv->port_stats.queue_stopped++; return NETDEV_TX_BUSY; diff --git a/trunk/drivers/net/ethernet/mellanox/mlx4/icm.c b/trunk/drivers/net/ethernet/mellanox/mlx4/icm.c index 88b7b3e75ab1..31d02649be41 100644 --- a/trunk/drivers/net/ethernet/mellanox/mlx4/icm.c +++ b/trunk/drivers/net/ethernet/mellanox/mlx4/icm.c @@ -227,9 +227,10 @@ int mlx4_UNMAP_ICM_AUX(struct mlx4_dev *dev) MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE); } -int mlx4_table_get(struct mlx4_dev *dev, struct mlx4_icm_table *table, int obj) +int mlx4_table_get(struct mlx4_dev *dev, struct mlx4_icm_table *table, u32 obj) { - int i = (obj & (table->num_obj - 1)) / (MLX4_TABLE_CHUNK_SIZE / table->obj_size); + u32 i = (obj & (table->num_obj - 1)) / + (MLX4_TABLE_CHUNK_SIZE / table->obj_size); int ret = 0; mutex_lock(&table->mutex); @@ -262,16 +263,18 @@ int mlx4_table_get(struct mlx4_dev *dev, struct mlx4_icm_table *table, int obj) return ret; } -void mlx4_table_put(struct mlx4_dev *dev, struct mlx4_icm_table *table, int obj) +void mlx4_table_put(struct mlx4_dev *dev, struct mlx4_icm_table *table, u32 obj) { - int i; + u32 i; + u64 offset; i = (obj & (table->num_obj - 1)) / (MLX4_TABLE_CHUNK_SIZE / table->obj_size); mutex_lock(&table->mutex); if (--table->icm[i]->refcount == 0) { - mlx4_UNMAP_ICM(dev, table->virt + i * MLX4_TABLE_CHUNK_SIZE, + offset = (u64) i * MLX4_TABLE_CHUNK_SIZE; + mlx4_UNMAP_ICM(dev, table->virt + offset, MLX4_TABLE_CHUNK_SIZE / MLX4_ICM_PAGE_SIZE); mlx4_free_icm(dev, table->icm[i], table->coherent); table->icm[i] = NULL; @@ -280,9 +283,11 @@ void mlx4_table_put(struct mlx4_dev *dev, struct mlx4_icm_table *table, int obj) mutex_unlock(&table->mutex); } -void *mlx4_table_find(struct mlx4_icm_table *table, int obj, dma_addr_t *dma_handle) +void *mlx4_table_find(struct mlx4_icm_table *table, u32 obj, + dma_addr_t *dma_handle) { - int idx, offset, dma_offset, i; + int offset, dma_offset, i; + u64 idx; struct mlx4_icm_chunk *chunk; struct mlx4_icm *icm; struct page *page = NULL; @@ -292,7 +297,7 @@ void *mlx4_table_find(struct mlx4_icm_table *table, int obj, dma_addr_t *dma_han mutex_lock(&table->mutex); - idx = (obj & (table->num_obj - 1)) * table->obj_size; + idx = (u64) (obj & (table->num_obj - 1)) * table->obj_size; icm = table->icm[idx / MLX4_TABLE_CHUNK_SIZE]; dma_offset = offset = idx % MLX4_TABLE_CHUNK_SIZE; @@ -326,10 +331,11 @@ void *mlx4_table_find(struct mlx4_icm_table *table, int obj, dma_addr_t *dma_han } int mlx4_table_get_range(struct mlx4_dev *dev, struct mlx4_icm_table *table, - int start, int end) + u32 start, u32 end) { int inc = MLX4_TABLE_CHUNK_SIZE / table->obj_size; - int i, err; + int err; + u32 i; for (i = start; i <= end; i += inc) { err = mlx4_table_get(dev, table, i); @@ -349,22 +355,23 @@ int mlx4_table_get_range(struct mlx4_dev *dev, struct mlx4_icm_table *table, } void mlx4_table_put_range(struct mlx4_dev *dev, struct mlx4_icm_table *table, - int start, int end) + u32 start, u32 end) { - int i; + u32 i; for (i = start; i <= end; i += MLX4_TABLE_CHUNK_SIZE / table->obj_size) mlx4_table_put(dev, table, i); } int mlx4_init_icm_table(struct mlx4_dev *dev, struct mlx4_icm_table *table, - u64 virt, int obj_size, int nobj, int reserved, + u64 virt, int obj_size, u32 nobj, int reserved, int use_lowmem, int use_coherent) { int obj_per_chunk; int num_icm; unsigned chunk_size; int i; + u64 size; obj_per_chunk = MLX4_TABLE_CHUNK_SIZE / obj_size; num_icm = (nobj + obj_per_chunk - 1) / obj_per_chunk; @@ -380,10 +387,12 @@ int mlx4_init_icm_table(struct mlx4_dev *dev, struct mlx4_icm_table *table, table->coherent = use_coherent; mutex_init(&table->mutex); + size = (u64) nobj * obj_size; for (i = 0; i * MLX4_TABLE_CHUNK_SIZE < reserved * obj_size; ++i) { chunk_size = MLX4_TABLE_CHUNK_SIZE; - if ((i + 1) * MLX4_TABLE_CHUNK_SIZE > nobj * obj_size) - chunk_size = PAGE_ALIGN(nobj * obj_size - i * MLX4_TABLE_CHUNK_SIZE); + if ((i + 1) * MLX4_TABLE_CHUNK_SIZE > size) + chunk_size = PAGE_ALIGN(size - + i * MLX4_TABLE_CHUNK_SIZE); table->icm[i] = mlx4_alloc_icm(dev, chunk_size >> PAGE_SHIFT, (use_lowmem ? GFP_KERNEL : GFP_HIGHUSER) | diff --git a/trunk/drivers/net/ethernet/mellanox/mlx4/icm.h b/trunk/drivers/net/ethernet/mellanox/mlx4/icm.h index 19e4efc0b342..dee67fa39107 100644 --- a/trunk/drivers/net/ethernet/mellanox/mlx4/icm.h +++ b/trunk/drivers/net/ethernet/mellanox/mlx4/icm.h @@ -71,17 +71,17 @@ struct mlx4_icm *mlx4_alloc_icm(struct mlx4_dev *dev, int npages, gfp_t gfp_mask, int coherent); void mlx4_free_icm(struct mlx4_dev *dev, struct mlx4_icm *icm, int coherent); -int mlx4_table_get(struct mlx4_dev *dev, struct mlx4_icm_table *table, int obj); -void mlx4_table_put(struct mlx4_dev *dev, struct mlx4_icm_table *table, int obj); +int mlx4_table_get(struct mlx4_dev *dev, struct mlx4_icm_table *table, u32 obj); +void mlx4_table_put(struct mlx4_dev *dev, struct mlx4_icm_table *table, u32 obj); int mlx4_table_get_range(struct mlx4_dev *dev, struct mlx4_icm_table *table, - int start, int end); + u32 start, u32 end); void mlx4_table_put_range(struct mlx4_dev *dev, struct mlx4_icm_table *table, - int start, int end); + u32 start, u32 end); int mlx4_init_icm_table(struct mlx4_dev *dev, struct mlx4_icm_table *table, - u64 virt, int obj_size, int nobj, int reserved, + u64 virt, int obj_size, u32 nobj, int reserved, int use_lowmem, int use_coherent); void mlx4_cleanup_icm_table(struct mlx4_dev *dev, struct mlx4_icm_table *table); -void *mlx4_table_find(struct mlx4_icm_table *table, int obj, dma_addr_t *dma_handle); +void *mlx4_table_find(struct mlx4_icm_table *table, u32 obj, dma_addr_t *dma_handle); static inline void mlx4_icm_first(struct mlx4_icm *icm, struct mlx4_icm_iter *iter) diff --git a/trunk/drivers/net/ethernet/mellanox/mlx4/main.c b/trunk/drivers/net/ethernet/mellanox/mlx4/main.c index 48d0e90194cb..2f816c6aed72 100644 --- a/trunk/drivers/net/ethernet/mellanox/mlx4/main.c +++ b/trunk/drivers/net/ethernet/mellanox/mlx4/main.c @@ -157,9 +157,6 @@ int mlx4_check_port_params(struct mlx4_dev *dev, "on this HCA, aborting.\n"); return -EINVAL; } - if (port_type[i] == MLX4_PORT_TYPE_ETH && - port_type[i + 1] == MLX4_PORT_TYPE_IB) - return -EINVAL; } } @@ -1237,13 +1234,13 @@ static int mlx4_init_hca(struct mlx4_dev *dev) mlx4_info(dev, "non-primary physical function, skipping.\n"); else mlx4_err(dev, "QUERY_FW command failed, aborting.\n"); - goto unmap_bf; + return err; } err = mlx4_load_fw(dev); if (err) { mlx4_err(dev, "Failed to start FW, aborting.\n"); - goto unmap_bf; + return err; } mlx4_cfg.log_pg_sz_m = 1; @@ -1307,7 +1304,7 @@ static int mlx4_init_hca(struct mlx4_dev *dev) err = mlx4_init_slave(dev); if (err) { mlx4_err(dev, "Failed to initialize slave\n"); - goto unmap_bf; + return err; } err = mlx4_slave_cap(dev); @@ -1327,7 +1324,7 @@ static int mlx4_init_hca(struct mlx4_dev *dev) err = mlx4_QUERY_ADAPTER(dev, &adapter); if (err) { mlx4_err(dev, "QUERY_ADAPTER command failed, aborting.\n"); - goto err_close; + goto unmap_bf; } priv->eq_table.inta_pin = adapter.inta_pin; @@ -1335,6 +1332,9 @@ static int mlx4_init_hca(struct mlx4_dev *dev) return 0; +unmap_bf: + unmap_bf_area(dev); + err_close: mlx4_close_hca(dev); @@ -1347,8 +1347,6 @@ static int mlx4_init_hca(struct mlx4_dev *dev) mlx4_UNMAP_FA(dev); mlx4_free_icm(dev, priv->fw.fw_icm, 0); } -unmap_bf: - unmap_bf_area(dev); return err; } @@ -1999,7 +1997,8 @@ static int __mlx4_init_one(struct pci_dev *pdev, const struct pci_device_id *id) } slave_start: - if (mlx4_cmd_init(dev)) { + err = mlx4_cmd_init(dev); + if (err) { mlx4_err(dev, "Failed to init command interface, aborting.\n"); goto err_sriov; } diff --git a/trunk/drivers/net/ethernet/mellanox/mlx4/mcg.c b/trunk/drivers/net/ethernet/mellanox/mlx4/mcg.c index 4ec3835e1bc2..e151c21baf2b 100644 --- a/trunk/drivers/net/ethernet/mellanox/mlx4/mcg.c +++ b/trunk/drivers/net/ethernet/mellanox/mlx4/mcg.c @@ -137,11 +137,11 @@ static int mlx4_GID_HASH(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox, return err; } -static struct mlx4_promisc_qp *get_promisc_qp(struct mlx4_dev *dev, u8 pf_num, +static struct mlx4_promisc_qp *get_promisc_qp(struct mlx4_dev *dev, u8 port, enum mlx4_steer_type steer, u32 qpn) { - struct mlx4_steer *s_steer = &mlx4_priv(dev)->steer[pf_num]; + struct mlx4_steer *s_steer = &mlx4_priv(dev)->steer[port - 1]; struct mlx4_promisc_qp *pqp; list_for_each_entry(pqp, &s_steer->promisc_qps[steer], list) { @@ -182,7 +182,7 @@ static int new_steering_entry(struct mlx4_dev *dev, u8 port, /* If the given qpn is also a promisc qp, * it should be inserted to duplicates list */ - pqp = get_promisc_qp(dev, 0, steer, qpn); + pqp = get_promisc_qp(dev, port, steer, qpn); if (pqp) { dqp = kmalloc(sizeof *dqp, GFP_KERNEL); if (!dqp) { @@ -256,7 +256,7 @@ static int existing_steering_entry(struct mlx4_dev *dev, u8 port, s_steer = &mlx4_priv(dev)->steer[port - 1]; - pqp = get_promisc_qp(dev, 0, steer, qpn); + pqp = get_promisc_qp(dev, port, steer, qpn); if (!pqp) return 0; /* nothing to do */ @@ -302,7 +302,7 @@ static bool check_duplicate_entry(struct mlx4_dev *dev, u8 port, s_steer = &mlx4_priv(dev)->steer[port - 1]; /* if qp is not promisc, it cannot be duplicated */ - if (!get_promisc_qp(dev, 0, steer, qpn)) + if (!get_promisc_qp(dev, port, steer, qpn)) return false; /* The qp is promisc qp so it is a duplicate on this index @@ -352,7 +352,7 @@ static bool can_remove_steering_entry(struct mlx4_dev *dev, u8 port, members_count = be32_to_cpu(mgm->members_count) & 0xffffff; for (i = 0; i < members_count; i++) { qpn = be32_to_cpu(mgm->qp[i]) & MGM_QPN_MASK; - if (!get_promisc_qp(dev, 0, steer, qpn) && qpn != tqpn) { + if (!get_promisc_qp(dev, port, steer, qpn) && qpn != tqpn) { /* the qp is not promisc, the entry can't be removed */ goto out; } @@ -398,7 +398,7 @@ static int add_promisc_qp(struct mlx4_dev *dev, u8 port, mutex_lock(&priv->mcg_table.mutex); - if (get_promisc_qp(dev, 0, steer, qpn)) { + if (get_promisc_qp(dev, port, steer, qpn)) { err = 0; /* Noting to do, already exists */ goto out_mutex; } @@ -432,8 +432,10 @@ static int add_promisc_qp(struct mlx4_dev *dev, u8 port, if ((be32_to_cpu(mgm->qp[i]) & MGM_QPN_MASK) == qpn) { /* Entry already exists, add to duplicates */ dqp = kmalloc(sizeof *dqp, GFP_KERNEL); - if (!dqp) + if (!dqp) { + err = -ENOMEM; goto out_mailbox; + } dqp->qpn = qpn; list_add_tail(&dqp->list, &entry->duplicates); found = true; @@ -501,7 +503,7 @@ static int remove_promisc_qp(struct mlx4_dev *dev, u8 port, s_steer = &mlx4_priv(dev)->steer[port - 1]; mutex_lock(&priv->mcg_table.mutex); - pqp = get_promisc_qp(dev, 0, steer, qpn); + pqp = get_promisc_qp(dev, port, steer, qpn); if (unlikely(!pqp)) { mlx4_warn(dev, "QP %x is not promiscuous QP\n", qpn); /* nothing to do */ @@ -648,13 +650,6 @@ static int find_entry(struct mlx4_dev *dev, u8 port, return err; } -struct mlx4_net_trans_rule_hw_ctrl { - __be32 ctrl; - __be32 vf_vep_port; - __be32 qpn; - __be32 reserved; -}; - static void trans_rule_ctrl_to_hw(struct mlx4_net_trans_rule *ctrl, struct mlx4_net_trans_rule_hw_ctrl *hw) { @@ -678,87 +673,18 @@ static void trans_rule_ctrl_to_hw(struct mlx4_net_trans_rule *ctrl, hw->qpn = cpu_to_be32(ctrl->qpn); } -struct mlx4_net_trans_rule_hw_ib { - u8 size; - u8 rsvd1; - __be16 id; - u32 rsvd2; - __be32 qpn; - __be32 qpn_mask; - u8 dst_gid[16]; - u8 dst_gid_msk[16]; -} __packed; - -struct mlx4_net_trans_rule_hw_eth { - u8 size; - u8 rsvd; - __be16 id; - u8 rsvd1[6]; - u8 dst_mac[6]; - u16 rsvd2; - u8 dst_mac_msk[6]; - u16 rsvd3; - u8 src_mac[6]; - u16 rsvd4; - u8 src_mac_msk[6]; - u8 rsvd5; - u8 ether_type_enable; - __be16 ether_type; - __be16 vlan_id_msk; - __be16 vlan_id; -} __packed; - -struct mlx4_net_trans_rule_hw_tcp_udp { - u8 size; - u8 rsvd; - __be16 id; - __be16 rsvd1[3]; - __be16 dst_port; - __be16 rsvd2; - __be16 dst_port_msk; - __be16 rsvd3; - __be16 src_port; - __be16 rsvd4; - __be16 src_port_msk; -} __packed; - -struct mlx4_net_trans_rule_hw_ipv4 { - u8 size; - u8 rsvd; - __be16 id; - __be32 rsvd1; - __be32 dst_ip; - __be32 dst_ip_msk; - __be32 src_ip; - __be32 src_ip_msk; -} __packed; - -struct _rule_hw { - union { - struct { - u8 size; - u8 rsvd; - __be16 id; - }; - struct mlx4_net_trans_rule_hw_eth eth; - struct mlx4_net_trans_rule_hw_ib ib; - struct mlx4_net_trans_rule_hw_ipv4 ipv4; - struct mlx4_net_trans_rule_hw_tcp_udp tcp_udp; - }; +const u16 __sw_id_hw[] = { + [MLX4_NET_TRANS_RULE_ID_ETH] = 0xE001, + [MLX4_NET_TRANS_RULE_ID_IB] = 0xE005, + [MLX4_NET_TRANS_RULE_ID_IPV6] = 0xE003, + [MLX4_NET_TRANS_RULE_ID_IPV4] = 0xE002, + [MLX4_NET_TRANS_RULE_ID_TCP] = 0xE004, + [MLX4_NET_TRANS_RULE_ID_UDP] = 0xE006 }; static int parse_trans_rule(struct mlx4_dev *dev, struct mlx4_spec_list *spec, struct _rule_hw *rule_hw) { - static const u16 __sw_id_hw[] = { - [MLX4_NET_TRANS_RULE_ID_ETH] = 0xE001, - [MLX4_NET_TRANS_RULE_ID_IB] = 0xE005, - [MLX4_NET_TRANS_RULE_ID_IPV6] = 0xE003, - [MLX4_NET_TRANS_RULE_ID_IPV4] = 0xE002, - [MLX4_NET_TRANS_RULE_ID_TCP] = 0xE004, - [MLX4_NET_TRANS_RULE_ID_UDP] = 0xE006 - }; - static const size_t __rule_hw_sz[] = { [MLX4_NET_TRANS_RULE_ID_ETH] = sizeof(struct mlx4_net_trans_rule_hw_eth), diff --git a/trunk/drivers/net/ethernet/mellanox/mlx4/mlx4.h b/trunk/drivers/net/ethernet/mellanox/mlx4/mlx4.h index 59ebc0339638..dba69d98734a 100644 --- a/trunk/drivers/net/ethernet/mellanox/mlx4/mlx4.h +++ b/trunk/drivers/net/ethernet/mellanox/mlx4/mlx4.h @@ -249,7 +249,7 @@ struct mlx4_bitmap { struct mlx4_buddy { unsigned long **bits; unsigned int *num_free; - int max_order; + u32 max_order; spinlock_t lock; }; @@ -258,7 +258,7 @@ struct mlx4_icm; struct mlx4_icm_table { u64 virt; int num_icm; - int num_obj; + u32 num_obj; int obj_size; int lowmem; int coherent; @@ -690,6 +690,82 @@ struct mlx4_steer { struct list_head steer_entries[MLX4_NUM_STEERS]; }; +struct mlx4_net_trans_rule_hw_ctrl { + __be32 ctrl; + __be32 vf_vep_port; + __be32 qpn; + __be32 reserved; +}; + +struct mlx4_net_trans_rule_hw_ib { + u8 size; + u8 rsvd1; + __be16 id; + u32 rsvd2; + __be32 qpn; + __be32 qpn_mask; + u8 dst_gid[16]; + u8 dst_gid_msk[16]; +} __packed; + +struct mlx4_net_trans_rule_hw_eth { + u8 size; + u8 rsvd; + __be16 id; + u8 rsvd1[6]; + u8 dst_mac[6]; + u16 rsvd2; + u8 dst_mac_msk[6]; + u16 rsvd3; + u8 src_mac[6]; + u16 rsvd4; + u8 src_mac_msk[6]; + u8 rsvd5; + u8 ether_type_enable; + __be16 ether_type; + __be16 vlan_id_msk; + __be16 vlan_id; +} __packed; + +struct mlx4_net_trans_rule_hw_tcp_udp { + u8 size; + u8 rsvd; + __be16 id; + __be16 rsvd1[3]; + __be16 dst_port; + __be16 rsvd2; + __be16 dst_port_msk; + __be16 rsvd3; + __be16 src_port; + __be16 rsvd4; + __be16 src_port_msk; +} __packed; + +struct mlx4_net_trans_rule_hw_ipv4 { + u8 size; + u8 rsvd; + __be16 id; + __be32 rsvd1; + __be32 dst_ip; + __be32 dst_ip_msk; + __be32 src_ip; + __be32 src_ip_msk; +} __packed; + +struct _rule_hw { + union { + struct { + u8 size; + u8 rsvd; + __be16 id; + }; + struct mlx4_net_trans_rule_hw_eth eth; + struct mlx4_net_trans_rule_hw_ib ib; + struct mlx4_net_trans_rule_hw_ipv4 ipv4; + struct mlx4_net_trans_rule_hw_tcp_udp tcp_udp; + }; +}; + struct mlx4_priv { struct mlx4_dev dev; diff --git a/trunk/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h b/trunk/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h index 5f1ab105debc..9d27e42264e2 100644 --- a/trunk/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h +++ b/trunk/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h @@ -248,7 +248,6 @@ struct mlx4_en_tx_ring { u32 doorbell_qpn; void *buf; u16 poll_cnt; - int blocked; struct mlx4_en_tx_info *tx_info; u8 *bounce_buf; u32 last_nr_txbb; diff --git a/trunk/drivers/net/ethernet/mellanox/mlx4/mr.c b/trunk/drivers/net/ethernet/mellanox/mlx4/mr.c index af55b7ce5341..c202d3ad2a0e 100644 --- a/trunk/drivers/net/ethernet/mellanox/mlx4/mr.c +++ b/trunk/drivers/net/ethernet/mellanox/mlx4/mr.c @@ -37,6 +37,7 @@ #include #include #include +#include #include @@ -120,7 +121,7 @@ static int mlx4_buddy_init(struct mlx4_buddy *buddy, int max_order) buddy->max_order = max_order; spin_lock_init(&buddy->lock); - buddy->bits = kzalloc((buddy->max_order + 1) * sizeof (long *), + buddy->bits = kcalloc(buddy->max_order + 1, sizeof (long *), GFP_KERNEL); buddy->num_free = kcalloc((buddy->max_order + 1), sizeof *buddy->num_free, GFP_KERNEL); @@ -129,10 +130,12 @@ static int mlx4_buddy_init(struct mlx4_buddy *buddy, int max_order) for (i = 0; i <= buddy->max_order; ++i) { s = BITS_TO_LONGS(1 << (buddy->max_order - i)); - buddy->bits[i] = kmalloc(s * sizeof (long), GFP_KERNEL); - if (!buddy->bits[i]) - goto err_out_free; - bitmap_zero(buddy->bits[i], 1 << (buddy->max_order - i)); + buddy->bits[i] = kcalloc(s, sizeof (long), GFP_KERNEL | __GFP_NOWARN); + if (!buddy->bits[i]) { + buddy->bits[i] = vzalloc(s * sizeof(long)); + if (!buddy->bits[i]) + goto err_out_free; + } } set_bit(0, buddy->bits[buddy->max_order]); @@ -142,7 +145,10 @@ static int mlx4_buddy_init(struct mlx4_buddy *buddy, int max_order) err_out_free: for (i = 0; i <= buddy->max_order; ++i) - kfree(buddy->bits[i]); + if (buddy->bits[i] && is_vmalloc_addr(buddy->bits[i])) + vfree(buddy->bits[i]); + else + kfree(buddy->bits[i]); err_out: kfree(buddy->bits); @@ -156,7 +162,10 @@ static void mlx4_buddy_cleanup(struct mlx4_buddy *buddy) int i; for (i = 0; i <= buddy->max_order; ++i) - kfree(buddy->bits[i]); + if (is_vmalloc_addr(buddy->bits[i])) + vfree(buddy->bits[i]); + else + kfree(buddy->bits[i]); kfree(buddy->bits); kfree(buddy->num_free); @@ -668,7 +677,7 @@ int mlx4_init_mr_table(struct mlx4_dev *dev) return err; err = mlx4_buddy_init(&mr_table->mtt_buddy, - ilog2(dev->caps.num_mtts / + ilog2((u32)dev->caps.num_mtts / (1 << log_mtts_per_seg))); if (err) goto err_buddy; @@ -678,7 +687,7 @@ int mlx4_init_mr_table(struct mlx4_dev *dev) mlx4_alloc_mtt_range(dev, fls(dev->caps.reserved_mtts - 1)); if (priv->reserved_mtts < 0) { - mlx4_warn(dev, "MTT table of order %d is too small.\n", + mlx4_warn(dev, "MTT table of order %u is too small.\n", mr_table->mtt_buddy.max_order); err = -ENOMEM; goto err_reserve_mtts; diff --git a/trunk/drivers/net/ethernet/mellanox/mlx4/profile.c b/trunk/drivers/net/ethernet/mellanox/mlx4/profile.c index 9ee4725363d5..8e0c3cc2a1ec 100644 --- a/trunk/drivers/net/ethernet/mellanox/mlx4/profile.c +++ b/trunk/drivers/net/ethernet/mellanox/mlx4/profile.c @@ -76,7 +76,7 @@ u64 mlx4_make_profile(struct mlx4_dev *dev, u64 size; u64 start; int type; - int num; + u32 num; int log_num; }; @@ -105,7 +105,7 @@ u64 mlx4_make_profile(struct mlx4_dev *dev, si_meminfo(&si); request->num_mtt = roundup_pow_of_two(max_t(unsigned, request->num_mtt, - min(1UL << 31, + min(1UL << (31 - log_mtts_per_seg), si.totalram >> (log_mtts_per_seg - 1)))); profile[MLX4_RES_QP].size = dev_cap->qpc_entry_sz; diff --git a/trunk/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c b/trunk/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c index 94ceddd17ab2..293c9e820c49 100644 --- a/trunk/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c +++ b/trunk/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c @@ -42,6 +42,7 @@ #include #include #include +#include #include "mlx4.h" #include "fw.h" @@ -2776,18 +2777,133 @@ int mlx4_QP_ATTACH_wrapper(struct mlx4_dev *dev, int slave, return err; } +/* + * MAC validation for Flow Steering rules. + * VF can attach rules only with a mac address which is assigned to it. + */ +static int validate_eth_header_mac(int slave, struct _rule_hw *eth_header, + struct list_head *rlist) +{ + struct mac_res *res, *tmp; + __be64 be_mac; + + /* make sure it isn't multicast or broadcast mac*/ + if (!is_multicast_ether_addr(eth_header->eth.dst_mac) && + !is_broadcast_ether_addr(eth_header->eth.dst_mac)) { + list_for_each_entry_safe(res, tmp, rlist, list) { + be_mac = cpu_to_be64(res->mac << 16); + if (!memcmp(&be_mac, eth_header->eth.dst_mac, ETH_ALEN)) + return 0; + } + pr_err("MAC %pM doesn't belong to VF %d, Steering rule rejected\n", + eth_header->eth.dst_mac, slave); + return -EINVAL; + } + return 0; +} + +/* + * In case of missing eth header, append eth header with a MAC address + * assigned to the VF. + */ +static int add_eth_header(struct mlx4_dev *dev, int slave, + struct mlx4_cmd_mailbox *inbox, + struct list_head *rlist, int header_id) +{ + struct mac_res *res, *tmp; + u8 port; + struct mlx4_net_trans_rule_hw_ctrl *ctrl; + struct mlx4_net_trans_rule_hw_eth *eth_header; + struct mlx4_net_trans_rule_hw_ipv4 *ip_header; + struct mlx4_net_trans_rule_hw_tcp_udp *l4_header; + __be64 be_mac = 0; + __be64 mac_msk = cpu_to_be64(MLX4_MAC_MASK << 16); + + ctrl = (struct mlx4_net_trans_rule_hw_ctrl *)inbox->buf; + port = be32_to_cpu(ctrl->vf_vep_port) & 0xff; + eth_header = (struct mlx4_net_trans_rule_hw_eth *)(ctrl + 1); + + /* Clear a space in the inbox for eth header */ + switch (header_id) { + case MLX4_NET_TRANS_RULE_ID_IPV4: + ip_header = + (struct mlx4_net_trans_rule_hw_ipv4 *)(eth_header + 1); + memmove(ip_header, eth_header, + sizeof(*ip_header) + sizeof(*l4_header)); + break; + case MLX4_NET_TRANS_RULE_ID_TCP: + case MLX4_NET_TRANS_RULE_ID_UDP: + l4_header = (struct mlx4_net_trans_rule_hw_tcp_udp *) + (eth_header + 1); + memmove(l4_header, eth_header, sizeof(*l4_header)); + break; + default: + return -EINVAL; + } + list_for_each_entry_safe(res, tmp, rlist, list) { + if (port == res->port) { + be_mac = cpu_to_be64(res->mac << 16); + break; + } + } + if (!be_mac) { + pr_err("Failed adding eth header to FS rule, Can't find matching MAC for port %d .\n", + port); + return -EINVAL; + } + + memset(eth_header, 0, sizeof(*eth_header)); + eth_header->size = sizeof(*eth_header) >> 2; + eth_header->id = cpu_to_be16(__sw_id_hw[MLX4_NET_TRANS_RULE_ID_ETH]); + memcpy(eth_header->dst_mac, &be_mac, ETH_ALEN); + memcpy(eth_header->dst_mac_msk, &mac_msk, ETH_ALEN); + + return 0; + +} + int mlx4_QP_FLOW_STEERING_ATTACH_wrapper(struct mlx4_dev *dev, int slave, struct mlx4_vhcr *vhcr, struct mlx4_cmd_mailbox *inbox, struct mlx4_cmd_mailbox *outbox, struct mlx4_cmd_info *cmd) { + + struct mlx4_priv *priv = mlx4_priv(dev); + struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker; + struct list_head *rlist = &tracker->slave_list[slave].res_list[RES_MAC]; int err; + struct mlx4_net_trans_rule_hw_ctrl *ctrl; + struct _rule_hw *rule_header; + int header_id; if (dev->caps.steering_mode != MLX4_STEERING_MODE_DEVICE_MANAGED) return -EOPNOTSUPP; + ctrl = (struct mlx4_net_trans_rule_hw_ctrl *)inbox->buf; + rule_header = (struct _rule_hw *)(ctrl + 1); + header_id = map_hw_to_sw_id(be16_to_cpu(rule_header->id)); + + switch (header_id) { + case MLX4_NET_TRANS_RULE_ID_ETH: + if (validate_eth_header_mac(slave, rule_header, rlist)) + return -EINVAL; + break; + case MLX4_NET_TRANS_RULE_ID_IPV4: + case MLX4_NET_TRANS_RULE_ID_TCP: + case MLX4_NET_TRANS_RULE_ID_UDP: + pr_warn("Can't attach FS rule without L2 headers, adding L2 header.\n"); + if (add_eth_header(dev, slave, inbox, rlist, header_id)) + return -EINVAL; + vhcr->in_modifier += + sizeof(struct mlx4_net_trans_rule_hw_eth) >> 2; + break; + default: + pr_err("Corrupted mailbox.\n"); + return -EINVAL; + } + err = mlx4_cmd_imm(dev, inbox->dma, &vhcr->out_param, vhcr->in_modifier, 0, MLX4_QP_FLOW_STEERING_ATTACH, MLX4_CMD_TIME_CLASS_A, diff --git a/trunk/drivers/net/ethernet/mellanox/mlx4/sense.c b/trunk/drivers/net/ethernet/mellanox/mlx4/sense.c index 802498293528..34ee09bae36e 100644 --- a/trunk/drivers/net/ethernet/mellanox/mlx4/sense.c +++ b/trunk/drivers/net/ethernet/mellanox/mlx4/sense.c @@ -80,20 +80,6 @@ void mlx4_do_sense_ports(struct mlx4_dev *dev, stype[i - 1] = defaults[i - 1]; } - /* - * Adjust port configuration: - * If port 1 sensed nothing and port 2 is IB, set both as IB - * If port 2 sensed nothing and port 1 is Eth, set both as Eth - */ - if (stype[0] == MLX4_PORT_TYPE_ETH) { - for (i = 1; i < dev->caps.num_ports; i++) - stype[i] = stype[i] ? stype[i] : MLX4_PORT_TYPE_ETH; - } - if (stype[dev->caps.num_ports - 1] == MLX4_PORT_TYPE_IB) { - for (i = 0; i < dev->caps.num_ports - 1; i++) - stype[i] = stype[i] ? stype[i] : MLX4_PORT_TYPE_IB; - } - /* * If sensed nothing, remain in current configuration. */ diff --git a/trunk/drivers/net/ethernet/nxp/lpc_eth.c b/trunk/drivers/net/ethernet/nxp/lpc_eth.c index 4069edab229e..53743f7a2ca9 100644 --- a/trunk/drivers/net/ethernet/nxp/lpc_eth.c +++ b/trunk/drivers/net/ethernet/nxp/lpc_eth.c @@ -346,28 +346,15 @@ static phy_interface_t lpc_phy_interface_mode(struct device *dev) "phy-mode", NULL); if (mode && !strcmp(mode, "mii")) return PHY_INTERFACE_MODE_MII; - return PHY_INTERFACE_MODE_RMII; } - - /* non-DT */ -#ifdef CONFIG_ARCH_LPC32XX_MII_SUPPORT - return PHY_INTERFACE_MODE_MII; -#else return PHY_INTERFACE_MODE_RMII; -#endif } static bool use_iram_for_net(struct device *dev) { if (dev && dev->of_node) return of_property_read_bool(dev->of_node, "use-iram"); - - /* non-DT */ -#ifdef CONFIG_ARCH_LPC32XX_IRAM_FOR_NET - return true; -#else return false; -#endif } /* Receive Status information word */ diff --git a/trunk/drivers/net/ethernet/octeon/octeon_mgmt.c b/trunk/drivers/net/ethernet/octeon/octeon_mgmt.c index c42bbb16cdae..a688a2ddcfd6 100644 --- a/trunk/drivers/net/ethernet/octeon/octeon_mgmt.c +++ b/trunk/drivers/net/ethernet/octeon/octeon_mgmt.c @@ -722,10 +722,8 @@ static int octeon_mgmt_init_phy(struct net_device *netdev) octeon_mgmt_adjust_link, 0, PHY_INTERFACE_MODE_MII); - if (IS_ERR(p->phydev)) { - p->phydev = NULL; + if (!p->phydev) return -1; - } phy_start_aneg(p->phydev); diff --git a/trunk/drivers/net/ethernet/pasemi/pasemi_mac.c b/trunk/drivers/net/ethernet/pasemi/pasemi_mac.c index e559dfa06d6a..6fa74d530e44 100644 --- a/trunk/drivers/net/ethernet/pasemi/pasemi_mac.c +++ b/trunk/drivers/net/ethernet/pasemi/pasemi_mac.c @@ -1101,9 +1101,9 @@ static int pasemi_mac_phy_init(struct net_device *dev) phydev = of_phy_connect(dev, phy_dn, &pasemi_adjust_link, 0, PHY_INTERFACE_MODE_SGMII); - if (IS_ERR(phydev)) { + if (!phydev) { printk(KERN_ERR "%s: Could not attach to phy\n", dev->name); - return PTR_ERR(phydev); + return -ENODEV; } mac->phydev = phydev; diff --git a/trunk/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c b/trunk/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c index 342b3a79bd0f..a77c558d8f40 100644 --- a/trunk/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c +++ b/trunk/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c @@ -1378,6 +1378,10 @@ static void netxen_mask_aer_correctable(struct netxen_adapter *adapter) struct pci_dev *root = pdev->bus->self; u32 aer_pos; + /* root bus? */ + if (!root) + return; + if (adapter->ahw.board_type != NETXEN_BRDTYPE_P3_4_GB_MM && adapter->ahw.board_type != NETXEN_BRDTYPE_P3_10G_TP) return; diff --git a/trunk/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c b/trunk/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c index b8ead696141e..2a179d087207 100644 --- a/trunk/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c +++ b/trunk/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c @@ -15,7 +15,7 @@ qlcnic_poll_rsp(struct qlcnic_adapter *adapter) do { /* give atleast 1ms for firmware to respond */ - msleep(1); + mdelay(1); if (++timeout > QLCNIC_OS_CRB_RETRY_COUNT) return QLCNIC_CDRP_RSP_TIMEOUT; @@ -601,7 +601,7 @@ void qlcnic_fw_destroy_ctx(struct qlcnic_adapter *adapter) qlcnic_fw_cmd_destroy_tx_ctx(adapter); /* Allow dma queues to drain after context reset */ - msleep(20); + mdelay(20); } } diff --git a/trunk/drivers/net/ethernet/renesas/Kconfig b/trunk/drivers/net/ethernet/renesas/Kconfig index 46df3a04030c..24c2305d7948 100644 --- a/trunk/drivers/net/ethernet/renesas/Kconfig +++ b/trunk/drivers/net/ethernet/renesas/Kconfig @@ -8,7 +8,7 @@ config SH_ETH (CPU_SUBTYPE_SH7710 || CPU_SUBTYPE_SH7712 || \ CPU_SUBTYPE_SH7763 || CPU_SUBTYPE_SH7619 || \ CPU_SUBTYPE_SH7724 || CPU_SUBTYPE_SH7734 || \ - CPU_SUBTYPE_SH7757 || ARCH_R8A7740) + CPU_SUBTYPE_SH7757 || ARCH_R8A7740 || ARCH_R8A7779) select CRC32 select NET_CORE select MII @@ -18,4 +18,4 @@ config SH_ETH Renesas SuperH Ethernet device driver. This driver supporting CPUs are: - SH7619, SH7710, SH7712, SH7724, SH7734, SH7763, SH7757, - and R8A7740. + R8A7740 and R8A7779. diff --git a/trunk/drivers/net/ethernet/renesas/sh_eth.c b/trunk/drivers/net/ethernet/renesas/sh_eth.c index af0b867a6cf6..bad8f2eec9b4 100644 --- a/trunk/drivers/net/ethernet/renesas/sh_eth.c +++ b/trunk/drivers/net/ethernet/renesas/sh_eth.c @@ -78,7 +78,7 @@ static void sh_eth_select_mii(struct net_device *ndev) #endif /* There is CPU dependent code */ -#if defined(CONFIG_CPU_SUBTYPE_SH7724) +#if defined(CONFIG_CPU_SUBTYPE_SH7724) || defined(CONFIG_ARCH_R8A7779) #define SH_ETH_RESET_DEFAULT 1 static void sh_eth_set_duplex(struct net_device *ndev) { @@ -93,13 +93,18 @@ static void sh_eth_set_duplex(struct net_device *ndev) static void sh_eth_set_rate(struct net_device *ndev) { struct sh_eth_private *mdp = netdev_priv(ndev); + unsigned int bits = ECMR_RTM; + +#if defined(CONFIG_ARCH_R8A7779) + bits |= ECMR_ELB; +#endif switch (mdp->speed) { case 10: /* 10BASE */ - sh_eth_write(ndev, sh_eth_read(ndev, ECMR) & ~ECMR_RTM, ECMR); + sh_eth_write(ndev, sh_eth_read(ndev, ECMR) & ~bits, ECMR); break; case 100:/* 100BASE */ - sh_eth_write(ndev, sh_eth_read(ndev, ECMR) | ECMR_RTM, ECMR); + sh_eth_write(ndev, sh_eth_read(ndev, ECMR) | bits, ECMR); break; default: break; diff --git a/trunk/drivers/net/ethernet/seeq/sgiseeq.c b/trunk/drivers/net/ethernet/seeq/sgiseeq.c index bb8c8222122b..4d15bf413bdc 100644 --- a/trunk/drivers/net/ethernet/seeq/sgiseeq.c +++ b/trunk/drivers/net/ethernet/seeq/sgiseeq.c @@ -751,6 +751,7 @@ static int __devinit sgiseeq_probe(struct platform_device *pdev) sp->srings = sr; sp->rx_desc = sp->srings->rxvector; sp->tx_desc = sp->srings->txvector; + spin_lock_init(&sp->tx_lock); /* A couple calculations now, saves many cycles later. */ setup_rx_ring(dev, sp->rx_desc, SEEQ_RX_BUFFERS); diff --git a/trunk/drivers/net/ethernet/sfc/efx.c b/trunk/drivers/net/ethernet/sfc/efx.c index 70554a1b2b02..65a8d49106a4 100644 --- a/trunk/drivers/net/ethernet/sfc/efx.c +++ b/trunk/drivers/net/ethernet/sfc/efx.c @@ -1503,6 +1503,11 @@ static int efx_probe_all(struct efx_nic *efx) goto fail2; } + BUILD_BUG_ON(EFX_DEFAULT_DMAQ_SIZE < EFX_RXQ_MIN_ENT); + if (WARN_ON(EFX_DEFAULT_DMAQ_SIZE < EFX_TXQ_MIN_ENT(efx))) { + rc = -EINVAL; + goto fail3; + } efx->rxq_entries = efx->txq_entries = EFX_DEFAULT_DMAQ_SIZE; rc = efx_probe_filters(efx); @@ -2070,6 +2075,7 @@ static int efx_register_netdev(struct efx_nic *efx) net_dev->irq = efx->pci_dev->irq; net_dev->netdev_ops = &efx_netdev_ops; SET_ETHTOOL_OPS(net_dev, &efx_ethtool_ops); + net_dev->gso_max_segs = EFX_TSO_MAX_SEGS; rtnl_lock(); diff --git a/trunk/drivers/net/ethernet/sfc/efx.h b/trunk/drivers/net/ethernet/sfc/efx.h index be8f9158a714..70755c97251a 100644 --- a/trunk/drivers/net/ethernet/sfc/efx.h +++ b/trunk/drivers/net/ethernet/sfc/efx.h @@ -30,6 +30,7 @@ extern netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb); extern void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index); extern int efx_setup_tc(struct net_device *net_dev, u8 num_tc); +extern unsigned int efx_tx_max_skb_descs(struct efx_nic *efx); /* RX */ extern int efx_probe_rx_queue(struct efx_rx_queue *rx_queue); @@ -52,10 +53,15 @@ extern void efx_schedule_slow_fill(struct efx_rx_queue *rx_queue); #define EFX_MAX_EVQ_SIZE 16384UL #define EFX_MIN_EVQ_SIZE 512UL -/* The smallest [rt]xq_entries that the driver supports. Callers of - * efx_wake_queue() assume that they can subsequently send at least one - * skb. Falcon/A1 may require up to three descriptors per skb_frag. */ -#define EFX_MIN_RING_SIZE (roundup_pow_of_two(2 * 3 * MAX_SKB_FRAGS)) +/* Maximum number of TCP segments we support for soft-TSO */ +#define EFX_TSO_MAX_SEGS 100 + +/* The smallest [rt]xq_entries that the driver supports. RX minimum + * is a bit arbitrary. For TX, we must have space for at least 2 + * TSO skbs. + */ +#define EFX_RXQ_MIN_ENT 128U +#define EFX_TXQ_MIN_ENT(efx) (2 * efx_tx_max_skb_descs(efx)) /* Filters */ extern int efx_probe_filters(struct efx_nic *efx); diff --git a/trunk/drivers/net/ethernet/sfc/ethtool.c b/trunk/drivers/net/ethernet/sfc/ethtool.c index 10536f93b561..5faedd855b77 100644 --- a/trunk/drivers/net/ethernet/sfc/ethtool.c +++ b/trunk/drivers/net/ethernet/sfc/ethtool.c @@ -680,21 +680,27 @@ static int efx_ethtool_set_ringparam(struct net_device *net_dev, struct ethtool_ringparam *ring) { struct efx_nic *efx = netdev_priv(net_dev); + u32 txq_entries; if (ring->rx_mini_pending || ring->rx_jumbo_pending || ring->rx_pending > EFX_MAX_DMAQ_SIZE || ring->tx_pending > EFX_MAX_DMAQ_SIZE) return -EINVAL; - if (ring->rx_pending < EFX_MIN_RING_SIZE || - ring->tx_pending < EFX_MIN_RING_SIZE) { + if (ring->rx_pending < EFX_RXQ_MIN_ENT) { netif_err(efx, drv, efx->net_dev, - "TX and RX queues cannot be smaller than %ld\n", - EFX_MIN_RING_SIZE); + "RX queues cannot be smaller than %u\n", + EFX_RXQ_MIN_ENT); return -EINVAL; } - return efx_realloc_channels(efx, ring->rx_pending, ring->tx_pending); + txq_entries = max(ring->tx_pending, EFX_TXQ_MIN_ENT(efx)); + if (txq_entries != ring->tx_pending) + netif_warn(efx, drv, efx->net_dev, + "increasing TX queue size to minimum of %u\n", + txq_entries); + + return efx_realloc_channels(efx, ring->rx_pending, txq_entries); } static int efx_ethtool_set_pauseparam(struct net_device *net_dev, @@ -857,8 +863,8 @@ static int efx_ethtool_get_class_rule(struct efx_nic *efx, &ip_entry->ip4dst, &ip_entry->pdst); if (rc != 0) { rc = efx_filter_get_ipv4_full( - &spec, &proto, &ip_entry->ip4src, &ip_entry->psrc, - &ip_entry->ip4dst, &ip_entry->pdst); + &spec, &proto, &ip_entry->ip4dst, &ip_entry->pdst, + &ip_entry->ip4src, &ip_entry->psrc); EFX_WARN_ON_PARANOID(rc); ip_mask->ip4src = ~0; ip_mask->psrc = ~0; diff --git a/trunk/drivers/net/ethernet/sfc/tx.c b/trunk/drivers/net/ethernet/sfc/tx.c index 9b225a7769f7..18713436b443 100644 --- a/trunk/drivers/net/ethernet/sfc/tx.c +++ b/trunk/drivers/net/ethernet/sfc/tx.c @@ -119,6 +119,25 @@ efx_max_tx_len(struct efx_nic *efx, dma_addr_t dma_addr) return len; } +unsigned int efx_tx_max_skb_descs(struct efx_nic *efx) +{ + /* Header and payload descriptor for each output segment, plus + * one for every input fragment boundary within a segment + */ + unsigned int max_descs = EFX_TSO_MAX_SEGS * 2 + MAX_SKB_FRAGS; + + /* Possibly one more per segment for the alignment workaround */ + if (EFX_WORKAROUND_5391(efx)) + max_descs += EFX_TSO_MAX_SEGS; + + /* Possibly more for PCIe page boundaries within input fragments */ + if (PAGE_SIZE > EFX_PAGE_SIZE) + max_descs += max_t(unsigned int, MAX_SKB_FRAGS, + DIV_ROUND_UP(GSO_MAX_SIZE, EFX_PAGE_SIZE)); + + return max_descs; +} + /* * Add a socket buffer to a TX queue * diff --git a/trunk/drivers/net/ethernet/stmicro/stmmac/common.h b/trunk/drivers/net/ethernet/stmicro/stmmac/common.h index e2d083228f3a..719be3912aa9 100644 --- a/trunk/drivers/net/ethernet/stmicro/stmmac/common.h +++ b/trunk/drivers/net/ethernet/stmicro/stmmac/common.h @@ -22,6 +22,9 @@ Author: Giuseppe Cavallaro *******************************************************************************/ +#ifndef __COMMON_H__ +#define __COMMON_H__ + #include #include #include @@ -366,3 +369,5 @@ extern void stmmac_set_mac(void __iomem *ioaddr, bool enable); extern void dwmac_dma_flush_tx_fifo(void __iomem *ioaddr); extern const struct stmmac_ring_mode_ops ring_mode_ops; + +#endif /* __COMMON_H__ */ diff --git a/trunk/drivers/net/ethernet/stmicro/stmmac/descs.h b/trunk/drivers/net/ethernet/stmicro/stmmac/descs.h index 9820ec842cc0..223adf95fd03 100644 --- a/trunk/drivers/net/ethernet/stmicro/stmmac/descs.h +++ b/trunk/drivers/net/ethernet/stmicro/stmmac/descs.h @@ -20,6 +20,10 @@ Author: Giuseppe Cavallaro *******************************************************************************/ + +#ifndef __DESCS_H__ +#define __DESCS_H__ + struct dma_desc { /* Receive descriptor */ union { @@ -166,3 +170,5 @@ enum tdes_csum_insertion { * is not calculated */ cic_full = 3, /* IP header and pseudoheader */ }; + +#endif /* __DESCS_H__ */ diff --git a/trunk/drivers/net/ethernet/stmicro/stmmac/descs_com.h b/trunk/drivers/net/ethernet/stmicro/stmmac/descs_com.h index dd8d6e19dff6..7ee9499a6e38 100644 --- a/trunk/drivers/net/ethernet/stmicro/stmmac/descs_com.h +++ b/trunk/drivers/net/ethernet/stmicro/stmmac/descs_com.h @@ -27,6 +27,9 @@ Author: Giuseppe Cavallaro *******************************************************************************/ +#ifndef __DESC_COM_H__ +#define __DESC_COM_H__ + #if defined(CONFIG_STMMAC_RING) static inline void ehn_desc_rx_set_on_ring_chain(struct dma_desc *p, int end) { @@ -124,3 +127,5 @@ static inline void norm_set_tx_desc_len(struct dma_desc *p, int len) p->des01.tx.buffer1_size = len; } #endif + +#endif /* __DESC_COM_H__ */ diff --git a/trunk/drivers/net/ethernet/stmicro/stmmac/dwmac100.h b/trunk/drivers/net/ethernet/stmicro/stmmac/dwmac100.h index 7c6d857a9cc7..2ec6aeae349e 100644 --- a/trunk/drivers/net/ethernet/stmicro/stmmac/dwmac100.h +++ b/trunk/drivers/net/ethernet/stmicro/stmmac/dwmac100.h @@ -22,6 +22,9 @@ Author: Giuseppe Cavallaro *******************************************************************************/ +#ifndef __DWMAC100_H__ +#define __DWMAC100_H__ + #include #include "common.h" @@ -119,3 +122,5 @@ enum ttc_control { #define DMA_MISSED_FRAME_M_CNTR 0x0000ffff /* Missed Frame Couinter */ extern const struct stmmac_dma_ops dwmac100_dma_ops; + +#endif /* __DWMAC100_H__ */ diff --git a/trunk/drivers/net/ethernet/stmicro/stmmac/dwmac1000.h b/trunk/drivers/net/ethernet/stmicro/stmmac/dwmac1000.h index f90fcb5f9573..0e4cacedc1f0 100644 --- a/trunk/drivers/net/ethernet/stmicro/stmmac/dwmac1000.h +++ b/trunk/drivers/net/ethernet/stmicro/stmmac/dwmac1000.h @@ -19,6 +19,8 @@ Author: Giuseppe Cavallaro *******************************************************************************/ +#ifndef __DWMAC1000_H__ +#define __DWMAC1000_H__ #include #include "common.h" @@ -229,6 +231,7 @@ enum rtc_control { #define GMAC_MMC_RX_CSUM_OFFLOAD 0x208 /* Synopsys Core versions */ -#define DWMAC_CORE_3_40 34 +#define DWMAC_CORE_3_40 0x34 extern const struct stmmac_dma_ops dwmac1000_dma_ops; +#endif /* __DWMAC1000_H__ */ diff --git a/trunk/drivers/net/ethernet/stmicro/stmmac/dwmac_dma.h b/trunk/drivers/net/ethernet/stmicro/stmmac/dwmac_dma.h index e678ce39d014..e49c9a0fd6ff 100644 --- a/trunk/drivers/net/ethernet/stmicro/stmmac/dwmac_dma.h +++ b/trunk/drivers/net/ethernet/stmicro/stmmac/dwmac_dma.h @@ -22,6 +22,9 @@ Author: Giuseppe Cavallaro *******************************************************************************/ +#ifndef __DWMAC_DMA_H__ +#define __DWMAC_DMA_H__ + /* DMA CRS Control and Status Register Mapping */ #define DMA_BUS_MODE 0x00001000 /* Bus Mode */ #define DMA_XMT_POLL_DEMAND 0x00001004 /* Transmit Poll Demand */ @@ -109,3 +112,5 @@ extern void dwmac_dma_start_rx(void __iomem *ioaddr); extern void dwmac_dma_stop_rx(void __iomem *ioaddr); extern int dwmac_dma_interrupt(void __iomem *ioaddr, struct stmmac_extra_stats *x); + +#endif /* __DWMAC_DMA_H__ */ diff --git a/trunk/drivers/net/ethernet/stmicro/stmmac/mmc.h b/trunk/drivers/net/ethernet/stmicro/stmmac/mmc.h index a38352024cb8..67995ef25251 100644 --- a/trunk/drivers/net/ethernet/stmicro/stmmac/mmc.h +++ b/trunk/drivers/net/ethernet/stmicro/stmmac/mmc.h @@ -22,6 +22,9 @@ Author: Giuseppe Cavallaro *******************************************************************************/ +#ifndef __MMC_H__ +#define __MMC_H__ + /* MMC control register */ /* When set, all counter are reset */ #define MMC_CNTRL_COUNTER_RESET 0x1 @@ -129,3 +132,5 @@ struct stmmac_counters { extern void dwmac_mmc_ctrl(void __iomem *ioaddr, unsigned int mode); extern void dwmac_mmc_intr_all_mask(void __iomem *ioaddr); extern void dwmac_mmc_read(void __iomem *ioaddr, struct stmmac_counters *mmc); + +#endif /* __MMC_H__ */ diff --git a/trunk/drivers/net/ethernet/stmicro/stmmac/mmc_core.c b/trunk/drivers/net/ethernet/stmicro/stmmac/mmc_core.c index c07cfe989f6e..0c74a702d461 100644 --- a/trunk/drivers/net/ethernet/stmicro/stmmac/mmc_core.c +++ b/trunk/drivers/net/ethernet/stmicro/stmmac/mmc_core.c @@ -33,7 +33,7 @@ #define MMC_TX_INTR 0x00000108 /* MMC TX Interrupt */ #define MMC_RX_INTR_MASK 0x0000010c /* MMC Interrupt Mask */ #define MMC_TX_INTR_MASK 0x00000110 /* MMC Interrupt Mask */ -#define MMC_DEFAUL_MASK 0xffffffff +#define MMC_DEFAULT_MASK 0xffffffff /* MMC TX counter registers */ @@ -147,8 +147,8 @@ void dwmac_mmc_ctrl(void __iomem *ioaddr, unsigned int mode) /* To mask all all interrupts.*/ void dwmac_mmc_intr_all_mask(void __iomem *ioaddr) { - writel(MMC_DEFAUL_MASK, ioaddr + MMC_RX_INTR_MASK); - writel(MMC_DEFAUL_MASK, ioaddr + MMC_TX_INTR_MASK); + writel(MMC_DEFAULT_MASK, ioaddr + MMC_RX_INTR_MASK); + writel(MMC_DEFAULT_MASK, ioaddr + MMC_TX_INTR_MASK); } /* This reads the MAC core counters (if actaully supported). diff --git a/trunk/drivers/net/ethernet/stmicro/stmmac/stmmac.h b/trunk/drivers/net/ethernet/stmicro/stmmac/stmmac.h index f2d3665430ad..e872e1da3137 100644 --- a/trunk/drivers/net/ethernet/stmicro/stmmac/stmmac.h +++ b/trunk/drivers/net/ethernet/stmicro/stmmac/stmmac.h @@ -20,6 +20,9 @@ Author: Giuseppe Cavallaro *******************************************************************************/ +#ifndef __STMMAC_H__ +#define __STMMAC_H__ + #define STMMAC_RESOURCE_NAME "stmmaceth" #define DRV_MODULE_VERSION "March_2012" @@ -166,3 +169,5 @@ static inline void stmmac_unregister_pci(void) { } #endif /* CONFIG_STMMAC_PCI */ + +#endif /* __STMMAC_H__ */ diff --git a/trunk/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/trunk/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c index fd8882f9602a..3be88331d17a 100644 --- a/trunk/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +++ b/trunk/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c @@ -1066,7 +1066,7 @@ static int stmmac_open(struct net_device *dev) } else priv->tm->enable = 1; #endif - clk_enable(priv->stmmac_clk); + clk_prepare_enable(priv->stmmac_clk); stmmac_check_ether_addr(priv); @@ -1188,7 +1188,7 @@ static int stmmac_open(struct net_device *dev) if (priv->phydev) phy_disconnect(priv->phydev); - clk_disable(priv->stmmac_clk); + clk_disable_unprepare(priv->stmmac_clk); return ret; } @@ -1246,7 +1246,7 @@ static int stmmac_release(struct net_device *dev) #ifdef CONFIG_STMMAC_DEBUG_FS stmmac_exit_fs(); #endif - clk_disable(priv->stmmac_clk); + clk_disable_unprepare(priv->stmmac_clk); return 0; } @@ -2077,7 +2077,7 @@ struct stmmac_priv *stmmac_dvr_probe(struct device *device, goto error_netdev_register; } - priv->stmmac_clk = clk_get(priv->device, NULL); + priv->stmmac_clk = clk_get(priv->device, STMMAC_RESOURCE_NAME); if (IS_ERR(priv->stmmac_clk)) { pr_warning("%s: warning: cannot get CSR clock\n", __func__); goto error_clk_get; @@ -2178,7 +2178,7 @@ int stmmac_suspend(struct net_device *ndev) else { stmmac_set_mac(priv->ioaddr, false); /* Disable clock in case of PWM is off */ - clk_disable(priv->stmmac_clk); + clk_disable_unprepare(priv->stmmac_clk); } spin_unlock_irqrestore(&priv->lock, flags); return 0; @@ -2203,7 +2203,7 @@ int stmmac_resume(struct net_device *ndev) priv->hw->mac->pmt(priv->ioaddr, 0); else /* enable the clk prevously disabled */ - clk_enable(priv->stmmac_clk); + clk_prepare_enable(priv->stmmac_clk); netif_device_attach(ndev); diff --git a/trunk/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c b/trunk/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c index cd01ee7ecef1..b93245c11995 100644 --- a/trunk/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c +++ b/trunk/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c @@ -74,7 +74,7 @@ static int __devinit stmmac_probe_config_dt(struct platform_device *pdev, * the necessary resources and invokes the main to init * the net device, register the mdio bus etc. */ -static int stmmac_pltfr_probe(struct platform_device *pdev) +static int __devinit stmmac_pltfr_probe(struct platform_device *pdev) { int ret = 0; struct resource *res; diff --git a/trunk/drivers/net/ethernet/stmicro/stmmac/stmmac_timer.c b/trunk/drivers/net/ethernet/stmicro/stmmac/stmmac_timer.c index 2a0e1abde7e7..4ccd4e2977b7 100644 --- a/trunk/drivers/net/ethernet/stmicro/stmmac/stmmac_timer.c +++ b/trunk/drivers/net/ethernet/stmicro/stmmac/stmmac_timer.c @@ -97,19 +97,19 @@ static struct clk *timer_clock; static void stmmac_tmu_start(unsigned int new_freq) { clk_set_rate(timer_clock, new_freq); - clk_enable(timer_clock); + clk_prepare_enable(timer_clock); } static void stmmac_tmu_stop(void) { - clk_disable(timer_clock); + clk_disable_unprepare(timer_clock); } int stmmac_open_ext_timer(struct net_device *dev, struct stmmac_timer *tm) { timer_clock = clk_get(NULL, TMU_CHANNEL); - if (timer_clock == NULL) + if (IS_ERR(timer_clock)) return -1; if (tmu2_register_user(stmmac_timer_handler, (void *)dev) < 0) { @@ -126,7 +126,7 @@ int stmmac_open_ext_timer(struct net_device *dev, struct stmmac_timer *tm) int stmmac_close_ext_timer(void) { - clk_disable(timer_clock); + clk_disable_unprepare(timer_clock); tmu2_unregister_user(); clk_put(timer_clock); return 0; diff --git a/trunk/drivers/net/ethernet/stmicro/stmmac/stmmac_timer.h b/trunk/drivers/net/ethernet/stmicro/stmmac/stmmac_timer.h index 6863590d184b..aea9b14cdfbe 100644 --- a/trunk/drivers/net/ethernet/stmicro/stmmac/stmmac_timer.h +++ b/trunk/drivers/net/ethernet/stmicro/stmmac/stmmac_timer.h @@ -21,6 +21,8 @@ Author: Giuseppe Cavallaro *******************************************************************************/ +#ifndef __STMMAC_TIMER_H__ +#define __STMMAC_TIMER_H__ struct stmmac_timer { void (*timer_start) (unsigned int new_freq); @@ -40,3 +42,5 @@ void stmmac_schedule(struct net_device *dev); extern int tmu2_register_user(void *fnt, void *data); extern void tmu2_unregister_user(void); #endif + +#endif /* __STMMAC_TIMER_H__ */ diff --git a/trunk/drivers/net/ethernet/ti/davinci_cpdma.c b/trunk/drivers/net/ethernet/ti/davinci_cpdma.c index 3b5c4571b55e..d15c888e9df8 100644 --- a/trunk/drivers/net/ethernet/ti/davinci_cpdma.c +++ b/trunk/drivers/net/ethernet/ti/davinci_cpdma.c @@ -538,11 +538,12 @@ EXPORT_SYMBOL_GPL(cpdma_chan_create); int cpdma_chan_destroy(struct cpdma_chan *chan) { - struct cpdma_ctlr *ctlr = chan->ctlr; + struct cpdma_ctlr *ctlr; unsigned long flags; if (!chan) return -EINVAL; + ctlr = chan->ctlr; spin_lock_irqsave(&ctlr->lock, flags); if (chan->state != CPDMA_STATE_IDLE) diff --git a/trunk/drivers/net/ethernet/ti/davinci_mdio.c b/trunk/drivers/net/ethernet/ti/davinci_mdio.c index cd7ee204e94a..a9ca4a03d31b 100644 --- a/trunk/drivers/net/ethernet/ti/davinci_mdio.c +++ b/trunk/drivers/net/ethernet/ti/davinci_mdio.c @@ -394,8 +394,10 @@ static int __devexit davinci_mdio_remove(struct platform_device *pdev) struct device *dev = &pdev->dev; struct davinci_mdio_data *data = dev_get_drvdata(dev); - if (data->bus) + if (data->bus) { + mdiobus_unregister(data->bus); mdiobus_free(data->bus); + } if (data->clk) clk_put(data->clk); diff --git a/trunk/drivers/net/ethernet/xscale/ixp4xx_eth.c b/trunk/drivers/net/ethernet/xscale/ixp4xx_eth.c index 482648fcf0b6..98934bdf6acf 100644 --- a/trunk/drivers/net/ethernet/xscale/ixp4xx_eth.c +++ b/trunk/drivers/net/ethernet/xscale/ixp4xx_eth.c @@ -1003,6 +1003,7 @@ static int ixp4xx_nway_reset(struct net_device *dev) } int ixp46x_phc_index = -1; +EXPORT_SYMBOL_GPL(ixp46x_phc_index); static int ixp4xx_get_ts_info(struct net_device *dev, struct ethtool_ts_info *info) diff --git a/trunk/drivers/net/fddi/skfp/pmf.c b/trunk/drivers/net/fddi/skfp/pmf.c index 24d8566cfd8b..441b4dc79450 100644 --- a/trunk/drivers/net/fddi/skfp/pmf.c +++ b/trunk/drivers/net/fddi/skfp/pmf.c @@ -673,7 +673,7 @@ void smt_add_para(struct s_smc *smc, struct s_pcon *pcon, u_short para, sm_pm_get_ls(smc,port_to_mib(smc,port))) ; break ; case SMT_P_REASON : - * (u_long *) to = 0 ; + *(u32 *)to = 0 ; sp_len = 4 ; goto sp_done ; case SMT_P1033 : /* time stamp */ diff --git a/trunk/drivers/net/hyperv/netvsc.c b/trunk/drivers/net/hyperv/netvsc.c index 6cee2917eb02..4a1a5f58fa73 100644 --- a/trunk/drivers/net/hyperv/netvsc.c +++ b/trunk/drivers/net/hyperv/netvsc.c @@ -383,13 +383,6 @@ int netvsc_device_remove(struct hv_device *device) unsigned long flags; net_device = hv_get_drvdata(device); - spin_lock_irqsave(&device->channel->inbound_lock, flags); - net_device->destroy = true; - spin_unlock_irqrestore(&device->channel->inbound_lock, flags); - - /* Wait for all send completions */ - wait_event(net_device->wait_drain, - atomic_read(&net_device->num_outstanding_sends) == 0); netvsc_disconnect_vsp(net_device); diff --git a/trunk/drivers/net/hyperv/rndis_filter.c b/trunk/drivers/net/hyperv/rndis_filter.c index e5d6146937fa..1e88a1095934 100644 --- a/trunk/drivers/net/hyperv/rndis_filter.c +++ b/trunk/drivers/net/hyperv/rndis_filter.c @@ -718,6 +718,9 @@ static void rndis_filter_halt_device(struct rndis_device *dev) { struct rndis_request *request; struct rndis_halt_request *halt; + struct netvsc_device *nvdev = dev->net_dev; + struct hv_device *hdev = nvdev->dev; + ulong flags; /* Attempt to do a rndis device halt */ request = get_rndis_request(dev, RNDIS_MSG_HALT, @@ -735,6 +738,14 @@ static void rndis_filter_halt_device(struct rndis_device *dev) dev->state = RNDIS_DEV_UNINITIALIZED; cleanup: + spin_lock_irqsave(&hdev->channel->inbound_lock, flags); + nvdev->destroy = true; + spin_unlock_irqrestore(&hdev->channel->inbound_lock, flags); + + /* Wait for all send completions */ + wait_event(nvdev->wait_drain, + atomic_read(&nvdev->num_outstanding_sends) == 0); + if (request) put_rndis_request(dev, request); return; diff --git a/trunk/drivers/net/irda/bfin_sir.c b/trunk/drivers/net/irda/bfin_sir.c index a561ae44a9ac..c6a0299aa9f9 100644 --- a/trunk/drivers/net/irda/bfin_sir.c +++ b/trunk/drivers/net/irda/bfin_sir.c @@ -158,7 +158,7 @@ static int bfin_sir_set_speed(struct bfin_sir_port *port, int speed) /* If not add the 'RPOLC', we can't catch the receive interrupt. * It's related with the HW layout and the IR transiver. */ - val |= IREN | RPOLC; + val |= UMOD_IRDA | RPOLC; UART_PUT_GCTL(port, val); return ret; } @@ -432,7 +432,7 @@ static void bfin_sir_shutdown(struct bfin_sir_port *port, struct net_device *dev bfin_sir_stop_rx(port); val = UART_GET_GCTL(port); - val &= ~(UCEN | IREN | RPOLC); + val &= ~(UCEN | UMOD_MASK | RPOLC); UART_PUT_GCTL(port, val); #ifdef CONFIG_SIR_BFIN_DMA @@ -518,10 +518,10 @@ static void bfin_sir_send_work(struct work_struct *work) * reset all the UART. */ val = UART_GET_GCTL(port); - val &= ~(IREN | RPOLC); + val &= ~(UMOD_MASK | RPOLC); UART_PUT_GCTL(port, val); SSYNC(); - val |= IREN | RPOLC; + val |= UMOD_IRDA | RPOLC; UART_PUT_GCTL(port, val); SSYNC(); /* bfin_sir_set_speed(port, self->speed); */ diff --git a/trunk/drivers/net/irda/ks959-sir.c b/trunk/drivers/net/irda/ks959-sir.c index 824e2a93fe8a..5f3aeac3f86d 100644 --- a/trunk/drivers/net/irda/ks959-sir.c +++ b/trunk/drivers/net/irda/ks959-sir.c @@ -542,6 +542,7 @@ static int ks959_net_open(struct net_device *netdev) sprintf(hwname, "usb#%d", kingsun->usbdev->devnum); kingsun->irlap = irlap_open(netdev, &kingsun->qos, hwname); if (!kingsun->irlap) { + err = -ENOMEM; dev_err(&kingsun->usbdev->dev, "irlap_open failed\n"); goto free_mem; } diff --git a/trunk/drivers/net/irda/ksdazzle-sir.c b/trunk/drivers/net/irda/ksdazzle-sir.c index 5a278ab83c2f..2d4b6a1ab202 100644 --- a/trunk/drivers/net/irda/ksdazzle-sir.c +++ b/trunk/drivers/net/irda/ksdazzle-sir.c @@ -436,6 +436,7 @@ static int ksdazzle_net_open(struct net_device *netdev) sprintf(hwname, "usb#%d", kingsun->usbdev->devnum); kingsun->irlap = irlap_open(netdev, &kingsun->qos, hwname); if (!kingsun->irlap) { + err = -ENOMEM; dev_err(&kingsun->usbdev->dev, "irlap_open failed\n"); goto free_mem; } diff --git a/trunk/drivers/net/irda/sh_sir.c b/trunk/drivers/net/irda/sh_sir.c index 256eddf1f75a..795109425568 100644 --- a/trunk/drivers/net/irda/sh_sir.c +++ b/trunk/drivers/net/irda/sh_sir.c @@ -280,7 +280,7 @@ static int sh_sir_set_baudrate(struct sh_sir_self *self, u32 baudrate) } clk = clk_get(NULL, "irda_clk"); - if (!clk) { + if (IS_ERR(clk)) { dev_err(dev, "can not get irda_clk\n"); return -EIO; } diff --git a/trunk/drivers/net/macvtap.c b/trunk/drivers/net/macvtap.c index 0737bd4d1669..0f0f9ce3a776 100644 --- a/trunk/drivers/net/macvtap.c +++ b/trunk/drivers/net/macvtap.c @@ -94,7 +94,8 @@ static int get_slot(struct macvlan_dev *vlan, struct macvtap_queue *q) int i; for (i = 0; i < MAX_MACVTAP_QUEUES; i++) { - if (rcu_dereference(vlan->taps[i]) == q) + if (rcu_dereference_protected(vlan->taps[i], + lockdep_is_held(&macvtap_lock)) == q) return i; } diff --git a/trunk/drivers/net/netconsole.c b/trunk/drivers/net/netconsole.c index f9347ea3d381..b3321129a83c 100644 --- a/trunk/drivers/net/netconsole.c +++ b/trunk/drivers/net/netconsole.c @@ -640,15 +640,9 @@ static int netconsole_netdev_event(struct notifier_block *this, * rtnl_lock already held */ if (nt->np.dev) { - spin_unlock_irqrestore( - &target_list_lock, - flags); __netpoll_cleanup(&nt->np); - spin_lock_irqsave(&target_list_lock, - flags); dev_put(nt->np.dev); nt->np.dev = NULL; - netconsole_target_put(nt); } nt->enabled = 0; stopped = true; diff --git a/trunk/drivers/net/phy/bcm87xx.c b/trunk/drivers/net/phy/bcm87xx.c index 2346b38b9837..799789518e87 100644 --- a/trunk/drivers/net/phy/bcm87xx.c +++ b/trunk/drivers/net/phy/bcm87xx.c @@ -229,3 +229,5 @@ static void __exit bcm87xx_exit(void) ARRAY_SIZE(bcm87xx_driver)); } module_exit(bcm87xx_exit); + +MODULE_LICENSE("GPL"); diff --git a/trunk/drivers/net/phy/mdio-mux-gpio.c b/trunk/drivers/net/phy/mdio-mux-gpio.c index e0cc4ef33dee..eefe49e8713c 100644 --- a/trunk/drivers/net/phy/mdio-mux-gpio.c +++ b/trunk/drivers/net/phy/mdio-mux-gpio.c @@ -101,7 +101,6 @@ static int __devinit mdio_mux_gpio_probe(struct platform_device *pdev) n--; gpio_free(s->gpio[n]); } - devm_kfree(&pdev->dev, s); return r; } diff --git a/trunk/drivers/net/phy/mdio-mux.c b/trunk/drivers/net/phy/mdio-mux.c index 5c120189ec86..4d4d25efc1e1 100644 --- a/trunk/drivers/net/phy/mdio-mux.c +++ b/trunk/drivers/net/phy/mdio-mux.c @@ -132,7 +132,7 @@ int mdio_mux_init(struct device *dev, pb->mii_bus = parent_bus; ret_val = -ENODEV; - for_each_child_of_node(dev->of_node, child_bus_node) { + for_each_available_child_of_node(dev->of_node, child_bus_node) { u32 v; r = of_property_read_u32(child_bus_node, "reg", &v); diff --git a/trunk/drivers/net/phy/micrel.c b/trunk/drivers/net/phy/micrel.c index cf287e0eb408..2165d5fdb8c0 100644 --- a/trunk/drivers/net/phy/micrel.c +++ b/trunk/drivers/net/phy/micrel.c @@ -21,6 +21,12 @@ #include #include +/* Operation Mode Strap Override */ +#define MII_KSZPHY_OMSO 0x16 +#define KSZPHY_OMSO_B_CAST_OFF (1 << 9) +#define KSZPHY_OMSO_RMII_OVERRIDE (1 << 1) +#define KSZPHY_OMSO_MII_OVERRIDE (1 << 0) + /* general Interrupt control/status reg in vendor specific block. */ #define MII_KSZPHY_INTCS 0x1B #define KSZPHY_INTCS_JABBER (1 << 15) @@ -101,6 +107,13 @@ static int kszphy_config_init(struct phy_device *phydev) return 0; } +static int ksz8021_config_init(struct phy_device *phydev) +{ + const u16 val = KSZPHY_OMSO_B_CAST_OFF | KSZPHY_OMSO_RMII_OVERRIDE; + phy_write(phydev, MII_KSZPHY_OMSO, val); + return 0; +} + static int ks8051_config_init(struct phy_device *phydev) { int regval; @@ -128,9 +141,22 @@ static struct phy_driver ksphy_driver[] = { .config_intr = ks8737_config_intr, .driver = { .owner = THIS_MODULE,}, }, { - .phy_id = PHY_ID_KS8041, + .phy_id = PHY_ID_KSZ8021, + .phy_id_mask = 0x00ffffff, + .name = "Micrel KSZ8021", + .features = (PHY_BASIC_FEATURES | SUPPORTED_Pause | + SUPPORTED_Asym_Pause), + .flags = PHY_HAS_MAGICANEG | PHY_HAS_INTERRUPT, + .config_init = ksz8021_config_init, + .config_aneg = genphy_config_aneg, + .read_status = genphy_read_status, + .ack_interrupt = kszphy_ack_interrupt, + .config_intr = kszphy_config_intr, + .driver = { .owner = THIS_MODULE,}, +}, { + .phy_id = PHY_ID_KSZ8041, .phy_id_mask = 0x00fffff0, - .name = "Micrel KS8041", + .name = "Micrel KSZ8041", .features = (PHY_BASIC_FEATURES | SUPPORTED_Pause | SUPPORTED_Asym_Pause), .flags = PHY_HAS_MAGICANEG | PHY_HAS_INTERRUPT, @@ -141,9 +167,9 @@ static struct phy_driver ksphy_driver[] = { .config_intr = kszphy_config_intr, .driver = { .owner = THIS_MODULE,}, }, { - .phy_id = PHY_ID_KS8051, + .phy_id = PHY_ID_KSZ8051, .phy_id_mask = 0x00fffff0, - .name = "Micrel KS8051", + .name = "Micrel KSZ8051", .features = (PHY_BASIC_FEATURES | SUPPORTED_Pause | SUPPORTED_Asym_Pause), .flags = PHY_HAS_MAGICANEG | PHY_HAS_INTERRUPT, @@ -154,8 +180,8 @@ static struct phy_driver ksphy_driver[] = { .config_intr = kszphy_config_intr, .driver = { .owner = THIS_MODULE,}, }, { - .phy_id = PHY_ID_KS8001, - .name = "Micrel KS8001 or KS8721", + .phy_id = PHY_ID_KSZ8001, + .name = "Micrel KSZ8001 or KS8721", .phy_id_mask = 0x00ffffff, .features = (PHY_BASIC_FEATURES | SUPPORTED_Pause), .flags = PHY_HAS_MAGICANEG | PHY_HAS_INTERRUPT, @@ -201,10 +227,11 @@ MODULE_LICENSE("GPL"); static struct mdio_device_id __maybe_unused micrel_tbl[] = { { PHY_ID_KSZ9021, 0x000ffffe }, - { PHY_ID_KS8001, 0x00ffffff }, + { PHY_ID_KSZ8001, 0x00ffffff }, { PHY_ID_KS8737, 0x00fffff0 }, - { PHY_ID_KS8041, 0x00fffff0 }, - { PHY_ID_KS8051, 0x00fffff0 }, + { PHY_ID_KSZ8021, 0x00ffffff }, + { PHY_ID_KSZ8041, 0x00fffff0 }, + { PHY_ID_KSZ8051, 0x00fffff0 }, { } }; diff --git a/trunk/drivers/net/phy/smsc.c b/trunk/drivers/net/phy/smsc.c index 6d6192316b30..88e3991464e7 100644 --- a/trunk/drivers/net/phy/smsc.c +++ b/trunk/drivers/net/phy/smsc.c @@ -56,6 +56,32 @@ static int smsc_phy_config_init(struct phy_device *phydev) return smsc_phy_ack_interrupt (phydev); } +static int lan87xx_config_init(struct phy_device *phydev) +{ + /* + * Make sure the EDPWRDOWN bit is NOT set. Setting this bit on + * LAN8710/LAN8720 PHY causes the PHY to misbehave, likely due + * to a bug on the chip. + * + * When the system is powered on with the network cable being + * disconnected all the way until after ifconfig ethX up is + * issued for the LAN port with this PHY, connecting the cable + * afterwards does not cause LINK change detection, while the + * expected behavior is the Link UP being detected. + */ + int rc = phy_read(phydev, MII_LAN83C185_CTRL_STATUS); + if (rc < 0) + return rc; + + rc &= ~MII_LAN83C185_EDPWRDOWN; + + rc = phy_write(phydev, MII_LAN83C185_CTRL_STATUS, rc); + if (rc < 0) + return rc; + + return smsc_phy_ack_interrupt(phydev); +} + static int lan911x_config_init(struct phy_device *phydev) { return smsc_phy_ack_interrupt(phydev); @@ -162,7 +188,7 @@ static struct phy_driver smsc_phy_driver[] = { /* basic functions */ .config_aneg = genphy_config_aneg, .read_status = genphy_read_status, - .config_init = smsc_phy_config_init, + .config_init = lan87xx_config_init, /* IRQ related */ .ack_interrupt = smsc_phy_ack_interrupt, diff --git a/trunk/drivers/net/ppp/pppoe.c b/trunk/drivers/net/ppp/pppoe.c index cbf7047decc0..20f31d0d1536 100644 --- a/trunk/drivers/net/ppp/pppoe.c +++ b/trunk/drivers/net/ppp/pppoe.c @@ -570,7 +570,7 @@ static int pppoe_release(struct socket *sock) po = pppox_sk(sk); - if (sk->sk_state & (PPPOX_CONNECTED | PPPOX_BOUND)) { + if (sk->sk_state & (PPPOX_CONNECTED | PPPOX_BOUND | PPPOX_ZOMBIE)) { dev_put(po->pppoe_dev); po->pppoe_dev = NULL; } diff --git a/trunk/drivers/net/ppp/pptp.c b/trunk/drivers/net/ppp/pptp.c index 1c98321b56cc..162464fe86bf 100644 --- a/trunk/drivers/net/ppp/pptp.c +++ b/trunk/drivers/net/ppp/pptp.c @@ -189,7 +189,7 @@ static int pptp_xmit(struct ppp_channel *chan, struct sk_buff *skb) if (sk_pppox(po)->sk_state & PPPOX_DEAD) goto tx_error; - rt = ip_route_output_ports(&init_net, &fl4, NULL, + rt = ip_route_output_ports(sock_net(sk), &fl4, NULL, opt->dst_addr.sin_addr.s_addr, opt->src_addr.sin_addr.s_addr, 0, 0, IPPROTO_GRE, @@ -468,7 +468,7 @@ static int pptp_connect(struct socket *sock, struct sockaddr *uservaddr, po->chan.private = sk; po->chan.ops = &pptp_chan_ops; - rt = ip_route_output_ports(&init_net, &fl4, sk, + rt = ip_route_output_ports(sock_net(sk), &fl4, sk, opt->dst_addr.sin_addr.s_addr, opt->src_addr.sin_addr.s_addr, 0, 0, diff --git a/trunk/drivers/net/team/team.c b/trunk/drivers/net/team/team.c index 87707ab39430..f8cd61f449a4 100644 --- a/trunk/drivers/net/team/team.c +++ b/trunk/drivers/net/team/team.c @@ -795,16 +795,17 @@ static void team_port_leave(struct team *team, struct team_port *port) } #ifdef CONFIG_NET_POLL_CONTROLLER -static int team_port_enable_netpoll(struct team *team, struct team_port *port) +static int team_port_enable_netpoll(struct team *team, struct team_port *port, + gfp_t gfp) { struct netpoll *np; int err; - np = kzalloc(sizeof(*np), GFP_KERNEL); + np = kzalloc(sizeof(*np), gfp); if (!np) return -ENOMEM; - err = __netpoll_setup(np, port->dev); + err = __netpoll_setup(np, port->dev, gfp); if (err) { kfree(np); return err; @@ -833,7 +834,8 @@ static struct netpoll_info *team_netpoll_info(struct team *team) } #else -static int team_port_enable_netpoll(struct team *team, struct team_port *port) +static int team_port_enable_netpoll(struct team *team, struct team_port *port, + gfp_t gfp) { return 0; } @@ -846,7 +848,7 @@ static struct netpoll_info *team_netpoll_info(struct team *team) } #endif -static void __team_port_change_check(struct team_port *port, bool linkup); +static void __team_port_change_port_added(struct team_port *port, bool linkup); static int team_port_add(struct team *team, struct net_device *port_dev) { @@ -913,7 +915,7 @@ static int team_port_add(struct team *team, struct net_device *port_dev) } if (team_netpoll_info(team)) { - err = team_port_enable_netpoll(team, port); + err = team_port_enable_netpoll(team, port, GFP_KERNEL); if (err) { netdev_err(dev, "Failed to enable netpoll on device %s\n", portname); @@ -946,7 +948,7 @@ static int team_port_add(struct team *team, struct net_device *port_dev) team_port_enable(team, port); list_add_tail_rcu(&port->list, &team->port_list); __team_compute_features(team); - __team_port_change_check(port, !!netif_carrier_ok(port_dev)); + __team_port_change_port_added(port, !!netif_carrier_ok(port_dev)); __team_options_change_check(team); netdev_info(dev, "Port device %s added\n", portname); @@ -981,6 +983,8 @@ static int team_port_add(struct team *team, struct net_device *port_dev) return err; } +static void __team_port_change_port_removed(struct team_port *port); + static int team_port_del(struct team *team, struct net_device *port_dev) { struct net_device *dev = team->dev; @@ -997,8 +1001,7 @@ static int team_port_del(struct team *team, struct net_device *port_dev) __team_option_inst_mark_removed_port(team, port); __team_options_change_check(team); __team_option_inst_del_port(team, port); - port->removed = true; - __team_port_change_check(port, false); + __team_port_change_port_removed(port); team_port_disable(team, port); list_del_rcu(&port->list); netdev_rx_handler_unregister(port_dev); @@ -1443,7 +1446,7 @@ static void team_netpoll_cleanup(struct net_device *dev) } static int team_netpoll_setup(struct net_device *dev, - struct netpoll_info *npifo) + struct netpoll_info *npifo, gfp_t gfp) { struct team *team = netdev_priv(dev); struct team_port *port; @@ -1451,7 +1454,7 @@ static int team_netpoll_setup(struct net_device *dev, mutex_lock(&team->lock); list_for_each_entry(port, &team->port_list, list) { - err = team_port_enable_netpoll(team, port); + err = team_port_enable_netpoll(team, port, gfp); if (err) { __team_netpoll_cleanup(team); break; @@ -1650,8 +1653,8 @@ static int team_nl_cmd_noop(struct sk_buff *skb, struct genl_info *info) hdr = genlmsg_put(msg, info->snd_pid, info->snd_seq, &team_nl_family, 0, TEAM_CMD_NOOP); - if (IS_ERR(hdr)) { - err = PTR_ERR(hdr); + if (!hdr) { + err = -EMSGSIZE; goto err_msg_put; } @@ -1845,8 +1848,8 @@ static int team_nl_send_options_get(struct team *team, u32 pid, u32 seq, hdr = genlmsg_put(skb, pid, seq, &team_nl_family, flags | NLM_F_MULTI, TEAM_CMD_OPTIONS_GET); - if (IS_ERR(hdr)) - return PTR_ERR(hdr); + if (!hdr) + return -EMSGSIZE; if (nla_put_u32(skb, TEAM_ATTR_TEAM_IFINDEX, team->dev->ifindex)) goto nla_put_failure; @@ -2065,8 +2068,8 @@ static int team_nl_fill_port_list_get(struct sk_buff *skb, hdr = genlmsg_put(skb, pid, seq, &team_nl_family, flags, TEAM_CMD_PORT_LIST_GET); - if (IS_ERR(hdr)) - return PTR_ERR(hdr); + if (!hdr) + return -EMSGSIZE; if (nla_put_u32(skb, TEAM_ATTR_TEAM_IFINDEX, team->dev->ifindex)) goto nla_put_failure; @@ -2249,13 +2252,11 @@ static void __team_options_change_check(struct team *team) } /* rtnl lock is held */ -static void __team_port_change_check(struct team_port *port, bool linkup) + +static void __team_port_change_send(struct team_port *port, bool linkup) { int err; - if (!port->removed && port->state.linkup == linkup) - return; - port->changed = true; port->state.linkup = linkup; team_refresh_port_linkup(port); @@ -2280,6 +2281,23 @@ static void __team_port_change_check(struct team_port *port, bool linkup) } +static void __team_port_change_check(struct team_port *port, bool linkup) +{ + if (port->state.linkup != linkup) + __team_port_change_send(port, linkup); +} + +static void __team_port_change_port_added(struct team_port *port, bool linkup) +{ + __team_port_change_send(port, linkup); +} + +static void __team_port_change_port_removed(struct team_port *port) +{ + port->removed = true; + __team_port_change_send(port, false); +} + static void team_port_change_check(struct team_port *port, bool linkup) { struct team *team = port->team; diff --git a/trunk/drivers/net/tun.c b/trunk/drivers/net/tun.c index 926d4db5cb38..3a16d4fdaa05 100644 --- a/trunk/drivers/net/tun.c +++ b/trunk/drivers/net/tun.c @@ -187,7 +187,6 @@ static void __tun_detach(struct tun_struct *tun) netif_tx_lock_bh(tun->dev); netif_carrier_off(tun->dev); tun->tfile = NULL; - tun->socket.file = NULL; netif_tx_unlock_bh(tun->dev); /* Drop read queue */ diff --git a/trunk/drivers/net/usb/asix_devices.c b/trunk/drivers/net/usb/asix_devices.c index 4fd48df6b989..32e31c5c5dc6 100644 --- a/trunk/drivers/net/usb/asix_devices.c +++ b/trunk/drivers/net/usb/asix_devices.c @@ -961,6 +961,10 @@ static const struct usb_device_id products [] = { // DLink DUB-E100 H/W Ver B1 Alternate USB_DEVICE (0x2001, 0x3c05), .driver_info = (unsigned long) &ax88772_info, +}, { + // DLink DUB-E100 H/W Ver C1 + USB_DEVICE (0x2001, 0x1a02), + .driver_info = (unsigned long) &ax88772_info, }, { // Linksys USB1000 USB_DEVICE (0x1737, 0x0039), diff --git a/trunk/drivers/net/usb/cdc-phonet.c b/trunk/drivers/net/usb/cdc-phonet.c index 64610048ce87..7d78669000d7 100644 --- a/trunk/drivers/net/usb/cdc-phonet.c +++ b/trunk/drivers/net/usb/cdc-phonet.c @@ -232,6 +232,7 @@ static int usbpn_open(struct net_device *dev) struct urb *req = usb_alloc_urb(0, GFP_KERNEL); if (!req || rx_submit(pnd, req, GFP_KERNEL | __GFP_COLD)) { + usb_free_urb(req); usbpn_close(dev); return -ENOMEM; } diff --git a/trunk/drivers/net/usb/cdc_ncm.c b/trunk/drivers/net/usb/cdc_ncm.c index f4ce5957df32..4cd582a4f625 100644 --- a/trunk/drivers/net/usb/cdc_ncm.c +++ b/trunk/drivers/net/usb/cdc_ncm.c @@ -1225,6 +1225,26 @@ static const struct usb_device_id cdc_devs[] = { .driver_info = (unsigned long) &wwan_info, }, + /* Dell branded MBM devices like DW5550 */ + { .match_flags = USB_DEVICE_ID_MATCH_INT_INFO + | USB_DEVICE_ID_MATCH_VENDOR, + .idVendor = 0x413c, + .bInterfaceClass = USB_CLASS_COMM, + .bInterfaceSubClass = USB_CDC_SUBCLASS_NCM, + .bInterfaceProtocol = USB_CDC_PROTO_NONE, + .driver_info = (unsigned long) &wwan_info, + }, + + /* Toshiba branded MBM devices */ + { .match_flags = USB_DEVICE_ID_MATCH_INT_INFO + | USB_DEVICE_ID_MATCH_VENDOR, + .idVendor = 0x0930, + .bInterfaceClass = USB_CLASS_COMM, + .bInterfaceSubClass = USB_CDC_SUBCLASS_NCM, + .bInterfaceProtocol = USB_CDC_PROTO_NONE, + .driver_info = (unsigned long) &wwan_info, + }, + /* Generic CDC-NCM devices */ { USB_INTERFACE_INFO(USB_CLASS_COMM, USB_CDC_SUBCLASS_NCM, USB_CDC_PROTO_NONE), diff --git a/trunk/drivers/net/usb/qmi_wwan.c b/trunk/drivers/net/usb/qmi_wwan.c index 2ea126a16d79..3543c9e57824 100644 --- a/trunk/drivers/net/usb/qmi_wwan.c +++ b/trunk/drivers/net/usb/qmi_wwan.c @@ -247,30 +247,12 @@ static int qmi_wwan_bind(struct usbnet *dev, struct usb_interface *intf) */ static int qmi_wwan_bind_shared(struct usbnet *dev, struct usb_interface *intf) { - int rv; struct qmi_wwan_state *info = (void *)&dev->data; - /* ZTE makes devices where the interface descriptors and endpoint - * configurations of two or more interfaces are identical, even - * though the functions are completely different. If set, then - * driver_info->data is a bitmap of acceptable interface numbers - * allowing us to bind to one such interface without binding to - * all of them - */ - if (dev->driver_info->data && - !test_bit(intf->cur_altsetting->desc.bInterfaceNumber, &dev->driver_info->data)) { - dev_info(&intf->dev, "not on our whitelist - ignored"); - rv = -ENODEV; - goto err; - } - /* control and data is shared */ info->control = intf; info->data = intf; - rv = qmi_wwan_register_subdriver(dev); - -err: - return rv; + return qmi_wwan_register_subdriver(dev); } static void qmi_wwan_unbind(struct usbnet *dev, struct usb_interface *intf) @@ -315,7 +297,7 @@ static int qmi_wwan_suspend(struct usb_interface *intf, pm_message_t message) if (ret < 0) goto err; - if (info->subdriver && info->subdriver->suspend) + if (intf == info->control && info->subdriver && info->subdriver->suspend) ret = info->subdriver->suspend(intf, message); if (ret < 0) usbnet_resume(intf); @@ -328,13 +310,14 @@ static int qmi_wwan_resume(struct usb_interface *intf) struct usbnet *dev = usb_get_intfdata(intf); struct qmi_wwan_state *info = (void *)&dev->data; int ret = 0; + bool callsub = (intf == info->control && info->subdriver && info->subdriver->resume); - if (info->subdriver && info->subdriver->resume) + if (callsub) ret = info->subdriver->resume(intf); if (ret < 0) goto err; ret = usbnet_resume(intf); - if (ret < 0 && info->subdriver && info->subdriver->resume && info->subdriver->suspend) + if (ret < 0 && callsub && info->subdriver->suspend) info->subdriver->suspend(intf, PMSG_SUSPEND); err: return ret; @@ -356,217 +339,71 @@ static const struct driver_info qmi_wwan_shared = { .manage_power = qmi_wwan_manage_power, }; -static const struct driver_info qmi_wwan_force_int0 = { - .description = "Qualcomm WWAN/QMI device", - .flags = FLAG_WWAN, - .bind = qmi_wwan_bind_shared, - .unbind = qmi_wwan_unbind, - .manage_power = qmi_wwan_manage_power, - .data = BIT(0), /* interface whitelist bitmap */ -}; - -static const struct driver_info qmi_wwan_force_int1 = { - .description = "Qualcomm WWAN/QMI device", - .flags = FLAG_WWAN, - .bind = qmi_wwan_bind_shared, - .unbind = qmi_wwan_unbind, - .manage_power = qmi_wwan_manage_power, - .data = BIT(1), /* interface whitelist bitmap */ -}; - -static const struct driver_info qmi_wwan_force_int2 = { - .description = "Qualcomm WWAN/QMI device", - .flags = FLAG_WWAN, - .bind = qmi_wwan_bind_shared, - .unbind = qmi_wwan_unbind, - .manage_power = qmi_wwan_manage_power, - .data = BIT(2), /* interface whitelist bitmap */ -}; - -static const struct driver_info qmi_wwan_force_int3 = { - .description = "Qualcomm WWAN/QMI device", - .flags = FLAG_WWAN, - .bind = qmi_wwan_bind_shared, - .unbind = qmi_wwan_unbind, - .manage_power = qmi_wwan_manage_power, - .data = BIT(3), /* interface whitelist bitmap */ -}; - -static const struct driver_info qmi_wwan_force_int4 = { - .description = "Qualcomm WWAN/QMI device", - .flags = FLAG_WWAN, - .bind = qmi_wwan_bind_shared, - .unbind = qmi_wwan_unbind, - .manage_power = qmi_wwan_manage_power, - .data = BIT(4), /* interface whitelist bitmap */ -}; - -/* Sierra Wireless provide equally useless interface descriptors - * Devices in QMI mode can be switched between two different - * configurations: - * a) USB interface #8 is QMI/wwan - * b) USB interfaces #8, #19 and #20 are QMI/wwan - * - * Both configurations provide a number of other interfaces (serial++), - * some of which have the same endpoint configuration as we expect, so - * a whitelist or blacklist is necessary. - * - * FIXME: The below whitelist should include BIT(20). It does not - * because I cannot get it to work... - */ -static const struct driver_info qmi_wwan_sierra = { - .description = "Sierra Wireless wwan/QMI device", - .flags = FLAG_WWAN, - .bind = qmi_wwan_bind_shared, - .unbind = qmi_wwan_unbind, - .manage_power = qmi_wwan_manage_power, - .data = BIT(8) | BIT(19), /* interface whitelist bitmap */ -}; - #define HUAWEI_VENDOR_ID 0x12D1 +/* map QMI/wwan function by a fixed interface number */ +#define QMI_FIXED_INTF(vend, prod, num) \ + USB_DEVICE_INTERFACE_NUMBER(vend, prod, num), \ + .driver_info = (unsigned long)&qmi_wwan_shared + /* Gobi 1000 QMI/wwan interface number is 3 according to qcserial */ #define QMI_GOBI1K_DEVICE(vend, prod) \ - USB_DEVICE(vend, prod), \ - .driver_info = (unsigned long)&qmi_wwan_force_int3 + QMI_FIXED_INTF(vend, prod, 3) -/* Gobi 2000 and Gobi 3000 QMI/wwan interface number is 0 according to qcserial */ +/* Gobi 2000/3000 QMI/wwan interface number is 0 according to qcserial */ #define QMI_GOBI_DEVICE(vend, prod) \ - USB_DEVICE(vend, prod), \ - .driver_info = (unsigned long)&qmi_wwan_force_int0 + QMI_FIXED_INTF(vend, prod, 0) static const struct usb_device_id products[] = { + /* 1. CDC ECM like devices match on the control interface */ { /* Huawei E392, E398 and possibly others sharing both device id and more... */ - .match_flags = USB_DEVICE_ID_MATCH_VENDOR | USB_DEVICE_ID_MATCH_INT_INFO, - .idVendor = HUAWEI_VENDOR_ID, - .bInterfaceClass = USB_CLASS_VENDOR_SPEC, - .bInterfaceSubClass = 1, - .bInterfaceProtocol = 9, /* CDC Ethernet *control* interface */ + USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, USB_CLASS_VENDOR_SPEC, 1, 9), .driver_info = (unsigned long)&qmi_wwan_info, }, { /* Vodafone/Huawei K5005 (12d1:14c8) and similar modems */ - .match_flags = USB_DEVICE_ID_MATCH_VENDOR | USB_DEVICE_ID_MATCH_INT_INFO, - .idVendor = HUAWEI_VENDOR_ID, - .bInterfaceClass = USB_CLASS_VENDOR_SPEC, - .bInterfaceSubClass = 1, - .bInterfaceProtocol = 57, /* CDC Ethernet *control* interface */ + USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, USB_CLASS_VENDOR_SPEC, 1, 57), .driver_info = (unsigned long)&qmi_wwan_info, }, - { /* Huawei E392, E398 and possibly others in "Windows mode" - * using a combined control and data interface without any CDC - * functional descriptors - */ - .match_flags = USB_DEVICE_ID_MATCH_VENDOR | USB_DEVICE_ID_MATCH_INT_INFO, - .idVendor = HUAWEI_VENDOR_ID, - .bInterfaceClass = USB_CLASS_VENDOR_SPEC, - .bInterfaceSubClass = 1, - .bInterfaceProtocol = 17, - .driver_info = (unsigned long)&qmi_wwan_shared, + + /* 2. Combined interface devices matching on class+protocol */ + { /* Huawei E367 and possibly others in "Windows mode" */ + USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, USB_CLASS_VENDOR_SPEC, 1, 7), + .driver_info = (unsigned long)&qmi_wwan_info, }, - { /* Pantech UML290 */ - .match_flags = USB_DEVICE_ID_MATCH_DEVICE | USB_DEVICE_ID_MATCH_INT_INFO, - .idVendor = 0x106c, - .idProduct = 0x3718, - .bInterfaceClass = 0xff, - .bInterfaceSubClass = 0xf0, - .bInterfaceProtocol = 0xff, + { /* Huawei E392, E398 and possibly others in "Windows mode" */ + USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, USB_CLASS_VENDOR_SPEC, 1, 17), .driver_info = (unsigned long)&qmi_wwan_shared, }, - { /* ZTE MF820D */ - .match_flags = USB_DEVICE_ID_MATCH_DEVICE | USB_DEVICE_ID_MATCH_INT_INFO, - .idVendor = 0x19d2, - .idProduct = 0x0167, - .bInterfaceClass = 0xff, - .bInterfaceSubClass = 0xff, - .bInterfaceProtocol = 0xff, - .driver_info = (unsigned long)&qmi_wwan_force_int4, - }, - { /* ZTE MF821D */ - .match_flags = USB_DEVICE_ID_MATCH_DEVICE | USB_DEVICE_ID_MATCH_INT_INFO, - .idVendor = 0x19d2, - .idProduct = 0x0326, - .bInterfaceClass = 0xff, - .bInterfaceSubClass = 0xff, - .bInterfaceProtocol = 0xff, - .driver_info = (unsigned long)&qmi_wwan_force_int4, - }, - { /* ZTE (Vodafone) K3520-Z */ - .match_flags = USB_DEVICE_ID_MATCH_DEVICE | USB_DEVICE_ID_MATCH_INT_INFO, - .idVendor = 0x19d2, - .idProduct = 0x0055, - .bInterfaceClass = 0xff, - .bInterfaceSubClass = 0xff, - .bInterfaceProtocol = 0xff, - .driver_info = (unsigned long)&qmi_wwan_force_int1, - }, - { /* ZTE (Vodafone) K3565-Z */ - .match_flags = USB_DEVICE_ID_MATCH_DEVICE | USB_DEVICE_ID_MATCH_INT_INFO, - .idVendor = 0x19d2, - .idProduct = 0x0063, - .bInterfaceClass = 0xff, - .bInterfaceSubClass = 0xff, - .bInterfaceProtocol = 0xff, - .driver_info = (unsigned long)&qmi_wwan_force_int4, - }, - { /* ZTE (Vodafone) K3570-Z */ - .match_flags = USB_DEVICE_ID_MATCH_DEVICE | USB_DEVICE_ID_MATCH_INT_INFO, - .idVendor = 0x19d2, - .idProduct = 0x1008, - .bInterfaceClass = 0xff, - .bInterfaceSubClass = 0xff, - .bInterfaceProtocol = 0xff, - .driver_info = (unsigned long)&qmi_wwan_force_int4, - }, - { /* ZTE (Vodafone) K3571-Z */ - .match_flags = USB_DEVICE_ID_MATCH_DEVICE | USB_DEVICE_ID_MATCH_INT_INFO, - .idVendor = 0x19d2, - .idProduct = 0x1010, - .bInterfaceClass = 0xff, - .bInterfaceSubClass = 0xff, - .bInterfaceProtocol = 0xff, - .driver_info = (unsigned long)&qmi_wwan_force_int4, - }, - { /* ZTE (Vodafone) K3765-Z */ - .match_flags = USB_DEVICE_ID_MATCH_DEVICE | USB_DEVICE_ID_MATCH_INT_INFO, - .idVendor = 0x19d2, - .idProduct = 0x2002, - .bInterfaceClass = 0xff, - .bInterfaceSubClass = 0xff, - .bInterfaceProtocol = 0xff, - .driver_info = (unsigned long)&qmi_wwan_force_int4, - }, - { /* ZTE (Vodafone) K4505-Z */ - .match_flags = USB_DEVICE_ID_MATCH_DEVICE | USB_DEVICE_ID_MATCH_INT_INFO, - .idVendor = 0x19d2, - .idProduct = 0x0104, - .bInterfaceClass = 0xff, - .bInterfaceSubClass = 0xff, - .bInterfaceProtocol = 0xff, - .driver_info = (unsigned long)&qmi_wwan_force_int4, - }, - { /* ZTE MF60 */ - .match_flags = USB_DEVICE_ID_MATCH_DEVICE | USB_DEVICE_ID_MATCH_INT_INFO, - .idVendor = 0x19d2, - .idProduct = 0x1402, - .bInterfaceClass = 0xff, - .bInterfaceSubClass = 0xff, - .bInterfaceProtocol = 0xff, - .driver_info = (unsigned long)&qmi_wwan_force_int2, + { /* Pantech UML290, P4200 and more */ + USB_VENDOR_AND_INTERFACE_INFO(0x106c, USB_CLASS_VENDOR_SPEC, 0xf0, 0xff), + .driver_info = (unsigned long)&qmi_wwan_shared, }, - { /* Sierra Wireless MC77xx in QMI mode */ - .match_flags = USB_DEVICE_ID_MATCH_DEVICE | USB_DEVICE_ID_MATCH_INT_INFO, - .idVendor = 0x1199, - .idProduct = 0x68a2, - .bInterfaceClass = 0xff, - .bInterfaceSubClass = 0xff, - .bInterfaceProtocol = 0xff, - .driver_info = (unsigned long)&qmi_wwan_sierra, + { /* Pantech UML290 - newer firmware */ + USB_VENDOR_AND_INTERFACE_INFO(0x106c, USB_CLASS_VENDOR_SPEC, 0xf1, 0xff), + .driver_info = (unsigned long)&qmi_wwan_shared, }, - /* Gobi 1000 devices */ + /* 3. Combined interface devices matching on interface number */ + {QMI_FIXED_INTF(0x19d2, 0x0055, 1)}, /* ZTE (Vodafone) K3520-Z */ + {QMI_FIXED_INTF(0x19d2, 0x0063, 4)}, /* ZTE (Vodafone) K3565-Z */ + {QMI_FIXED_INTF(0x19d2, 0x0104, 4)}, /* ZTE (Vodafone) K4505-Z */ + {QMI_FIXED_INTF(0x19d2, 0x0157, 5)}, /* ZTE MF683 */ + {QMI_FIXED_INTF(0x19d2, 0x0167, 4)}, /* ZTE MF820D */ + {QMI_FIXED_INTF(0x19d2, 0x0326, 4)}, /* ZTE MF821D */ + {QMI_FIXED_INTF(0x19d2, 0x1008, 4)}, /* ZTE (Vodafone) K3570-Z */ + {QMI_FIXED_INTF(0x19d2, 0x1010, 4)}, /* ZTE (Vodafone) K3571-Z */ + {QMI_FIXED_INTF(0x19d2, 0x1018, 3)}, /* ZTE (Vodafone) K5006-Z */ + {QMI_FIXED_INTF(0x19d2, 0x1402, 2)}, /* ZTE MF60 */ + {QMI_FIXED_INTF(0x19d2, 0x2002, 4)}, /* ZTE (Vodafone) K3765-Z */ + {QMI_FIXED_INTF(0x0f3d, 0x68a2, 8)}, /* Sierra Wireless MC7700 */ + {QMI_FIXED_INTF(0x114f, 0x68a2, 8)}, /* Sierra Wireless MC7750 */ + {QMI_FIXED_INTF(0x1199, 0x68a2, 8)}, /* Sierra Wireless MC7710 in QMI mode */ + {QMI_FIXED_INTF(0x1199, 0x68a2, 19)}, /* Sierra Wireless MC7710 in QMI mode */ + {QMI_FIXED_INTF(0x1199, 0x901c, 8)}, /* Sierra Wireless EM7700 */ + + /* 4. Gobi 1000 devices */ {QMI_GOBI1K_DEVICE(0x05c6, 0x9212)}, /* Acer Gobi Modem Device */ {QMI_GOBI1K_DEVICE(0x03f0, 0x1f1d)}, /* HP un2400 Gobi Modem Device */ - {QMI_GOBI1K_DEVICE(0x03f0, 0x371d)}, /* HP un2430 Mobile Broadband Module */ {QMI_GOBI1K_DEVICE(0x04da, 0x250d)}, /* Panasonic Gobi Modem device */ {QMI_GOBI1K_DEVICE(0x413c, 0x8172)}, /* Dell Gobi Modem device */ {QMI_GOBI1K_DEVICE(0x1410, 0xa001)}, /* Novatel Gobi Modem device */ @@ -579,9 +416,11 @@ static const struct usb_device_id products[] = { {QMI_GOBI1K_DEVICE(0x05c6, 0x9222)}, /* Generic Gobi Modem device */ {QMI_GOBI1K_DEVICE(0x05c6, 0x9009)}, /* Generic Gobi Modem device */ - /* Gobi 2000 and 3000 devices */ + /* 5. Gobi 2000 and 3000 devices */ {QMI_GOBI_DEVICE(0x413c, 0x8186)}, /* Dell Gobi 2000 Modem device (N0218, VU936) */ + {QMI_GOBI_DEVICE(0x413c, 0x8194)}, /* Dell Gobi 3000 Composite */ {QMI_GOBI_DEVICE(0x05c6, 0x920b)}, /* Generic Gobi 2000 Modem device */ + {QMI_GOBI_DEVICE(0x05c6, 0x920d)}, /* Gobi 3000 Composite */ {QMI_GOBI_DEVICE(0x05c6, 0x9225)}, /* Sony Gobi 2000 Modem device (N0279, VU730) */ {QMI_GOBI_DEVICE(0x05c6, 0x9245)}, /* Samsung Gobi 2000 Modem device (VL176) */ {QMI_GOBI_DEVICE(0x03f0, 0x251d)}, /* HP Gobi 2000 Modem device (VP412) */ @@ -589,6 +428,8 @@ static const struct usb_device_id products[] = { {QMI_GOBI_DEVICE(0x05c6, 0x9265)}, /* Asus Gobi 2000 Modem device (VR305) */ {QMI_GOBI_DEVICE(0x05c6, 0x9235)}, /* Top Global Gobi 2000 Modem device (VR306) */ {QMI_GOBI_DEVICE(0x05c6, 0x9275)}, /* iRex Technologies Gobi 2000 Modem device (VR307) */ + {QMI_GOBI_DEVICE(0x1199, 0x68a5)}, /* Sierra Wireless Modem */ + {QMI_GOBI_DEVICE(0x1199, 0x68a9)}, /* Sierra Wireless Modem */ {QMI_GOBI_DEVICE(0x1199, 0x9001)}, /* Sierra Wireless Gobi 2000 Modem device (VT773) */ {QMI_GOBI_DEVICE(0x1199, 0x9002)}, /* Sierra Wireless Gobi 2000 Modem device (VT773) */ {QMI_GOBI_DEVICE(0x1199, 0x9003)}, /* Sierra Wireless Gobi 2000 Modem device (VT773) */ @@ -600,11 +441,17 @@ static const struct usb_device_id products[] = { {QMI_GOBI_DEVICE(0x1199, 0x9009)}, /* Sierra Wireless Gobi 2000 Modem device (VT773) */ {QMI_GOBI_DEVICE(0x1199, 0x900a)}, /* Sierra Wireless Gobi 2000 Modem device (VT773) */ {QMI_GOBI_DEVICE(0x1199, 0x9011)}, /* Sierra Wireless Gobi 2000 Modem device (MC8305) */ + {QMI_FIXED_INTF(0x1199, 0x9011, 5)}, /* alternate interface number!? */ {QMI_GOBI_DEVICE(0x16d8, 0x8002)}, /* CMDTech Gobi 2000 Modem device (VU922) */ {QMI_GOBI_DEVICE(0x05c6, 0x9205)}, /* Gobi 2000 Modem device */ {QMI_GOBI_DEVICE(0x1199, 0x9013)}, /* Sierra Wireless Gobi 3000 Modem device (MC8355) */ + {QMI_GOBI_DEVICE(0x03f0, 0x371d)}, /* HP un2430 Mobile Broadband Module */ {QMI_GOBI_DEVICE(0x1199, 0x9015)}, /* Sierra Wireless Gobi 3000 Modem device */ {QMI_GOBI_DEVICE(0x1199, 0x9019)}, /* Sierra Wireless Gobi 3000 Modem device */ + {QMI_GOBI_DEVICE(0x1199, 0x901b)}, /* Sierra Wireless MC7770 */ + {QMI_GOBI_DEVICE(0x12d1, 0x14f1)}, /* Sony Gobi 3000 Composite */ + {QMI_GOBI_DEVICE(0x1410, 0xa021)}, /* Foxconn Gobi 3000 Modem device (Novatel E396) */ + { } /* END */ }; MODULE_DEVICE_TABLE(usb, products); diff --git a/trunk/drivers/net/usb/sierra_net.c b/trunk/drivers/net/usb/sierra_net.c index d75d1f56becf..8e22417fa6c1 100644 --- a/trunk/drivers/net/usb/sierra_net.c +++ b/trunk/drivers/net/usb/sierra_net.c @@ -68,15 +68,8 @@ static atomic_t iface_counter = ATOMIC_INIT(0); */ #define SIERRA_NET_USBCTL_BUF_LEN 1024 -/* list of interface numbers - used for constructing interface lists */ -struct sierra_net_iface_info { - const u32 infolen; /* number of interface numbers on list */ - const u8 *ifaceinfo; /* pointer to the array holding the numbers */ -}; - struct sierra_net_info_data { u16 rx_urb_size; - struct sierra_net_iface_info whitelist; }; /* Private data structure */ @@ -637,21 +630,6 @@ static int sierra_net_change_mtu(struct net_device *net, int new_mtu) return usbnet_change_mtu(net, new_mtu); } -static int is_whitelisted(const u8 ifnum, - const struct sierra_net_iface_info *whitelist) -{ - if (whitelist) { - const u8 *list = whitelist->ifaceinfo; - int i; - - for (i = 0; i < whitelist->infolen; i++) { - if (list[i] == ifnum) - return 1; - } - } - return 0; -} - static int sierra_net_get_fw_attr(struct usbnet *dev, u16 *datap) { int result = 0; @@ -678,7 +656,7 @@ static int sierra_net_get_fw_attr(struct usbnet *dev, u16 *datap) return -EIO; } - *datap = *attrdata; + *datap = le16_to_cpu(*attrdata); kfree(attrdata); return result; @@ -706,11 +684,6 @@ static int sierra_net_bind(struct usbnet *dev, struct usb_interface *intf) dev_dbg(&dev->udev->dev, "%s", __func__); ifacenum = intf->cur_altsetting->desc.bInterfaceNumber; - /* We only accept certain interfaces */ - if (!is_whitelisted(ifacenum, &data->whitelist)) { - dev_dbg(&dev->udev->dev, "Ignoring interface: %d", ifacenum); - return -ENODEV; - } numendpoints = intf->cur_altsetting->desc.bNumEndpoints; /* We have three endpoints, bulk in and out, and a status */ if (numendpoints != 3) { @@ -945,13 +918,8 @@ struct sk_buff *sierra_net_tx_fixup(struct usbnet *dev, struct sk_buff *skb, return NULL; } -static const u8 sierra_net_ifnum_list[] = { 7, 10, 11 }; static const struct sierra_net_info_data sierra_net_info_data_direct_ip = { .rx_urb_size = 8 * 1024, - .whitelist = { - .infolen = ARRAY_SIZE(sierra_net_ifnum_list), - .ifaceinfo = sierra_net_ifnum_list - } }; static const struct driver_info sierra_net_info_direct_ip = { @@ -965,15 +933,19 @@ static const struct driver_info sierra_net_info_direct_ip = { .data = (unsigned long)&sierra_net_info_data_direct_ip, }; +#define DIRECT_IP_DEVICE(vend, prod) \ + {USB_DEVICE_INTERFACE_NUMBER(vend, prod, 7), \ + .driver_info = (unsigned long)&sierra_net_info_direct_ip}, \ + {USB_DEVICE_INTERFACE_NUMBER(vend, prod, 10), \ + .driver_info = (unsigned long)&sierra_net_info_direct_ip}, \ + {USB_DEVICE_INTERFACE_NUMBER(vend, prod, 11), \ + .driver_info = (unsigned long)&sierra_net_info_direct_ip} + static const struct usb_device_id products[] = { - {USB_DEVICE(0x1199, 0x68A3), /* Sierra Wireless USB-to-WWAN modem */ - .driver_info = (unsigned long) &sierra_net_info_direct_ip}, - {USB_DEVICE(0x0F3D, 0x68A3), /* AT&T Direct IP modem */ - .driver_info = (unsigned long) &sierra_net_info_direct_ip}, - {USB_DEVICE(0x1199, 0x68AA), /* Sierra Wireless Direct IP LTE modem */ - .driver_info = (unsigned long) &sierra_net_info_direct_ip}, - {USB_DEVICE(0x0F3D, 0x68AA), /* AT&T Direct IP LTE modem */ - .driver_info = (unsigned long) &sierra_net_info_direct_ip}, + DIRECT_IP_DEVICE(0x1199, 0x68A3), /* Sierra Wireless USB-to-WWAN modem */ + DIRECT_IP_DEVICE(0x0F3D, 0x68A3), /* AT&T Direct IP modem */ + DIRECT_IP_DEVICE(0x1199, 0x68AA), /* Sierra Wireless Direct IP LTE modem */ + DIRECT_IP_DEVICE(0x0F3D, 0x68AA), /* AT&T Direct IP LTE modem */ {}, /* last item */ }; diff --git a/trunk/drivers/net/usb/smsc75xx.c b/trunk/drivers/net/usb/smsc75xx.c index f5ab6e613ec8..376143e8a1aa 100644 --- a/trunk/drivers/net/usb/smsc75xx.c +++ b/trunk/drivers/net/usb/smsc75xx.c @@ -1253,6 +1253,7 @@ static struct usb_driver smsc75xx_driver = { .probe = usbnet_probe, .suspend = usbnet_suspend, .resume = usbnet_resume, + .reset_resume = usbnet_resume, .disconnect = usbnet_disconnect, .disable_hub_initiated_lpm = 1, }; diff --git a/trunk/drivers/net/usb/usbnet.c b/trunk/drivers/net/usb/usbnet.c index 8531c1caac28..fc9f578a1e25 100644 --- a/trunk/drivers/net/usb/usbnet.c +++ b/trunk/drivers/net/usb/usbnet.c @@ -1201,19 +1201,26 @@ netdev_tx_t usbnet_start_xmit (struct sk_buff *skb, } EXPORT_SYMBOL_GPL(usbnet_start_xmit); -static void rx_alloc_submit(struct usbnet *dev, gfp_t flags) +static int rx_alloc_submit(struct usbnet *dev, gfp_t flags) { struct urb *urb; int i; + int ret = 0; /* don't refill the queue all at once */ for (i = 0; i < 10 && dev->rxq.qlen < RX_QLEN(dev); i++) { urb = usb_alloc_urb(0, flags); if (urb != NULL) { - if (rx_submit(dev, urb, flags) == -ENOLINK) - return; + ret = rx_submit(dev, urb, flags); + if (ret) + goto err; + } else { + ret = -ENOMEM; + goto err; } } +err: + return ret; } /*-------------------------------------------------------------------------*/ @@ -1257,7 +1264,8 @@ static void usbnet_bh (unsigned long param) int temp = dev->rxq.qlen; if (temp < RX_QLEN(dev)) { - rx_alloc_submit(dev, GFP_ATOMIC); + if (rx_alloc_submit(dev, GFP_ATOMIC) == -ENOLINK) + return; if (temp != dev->rxq.qlen) netif_dbg(dev, link, dev->net, "rxqlen %d --> %d\n", @@ -1573,7 +1581,7 @@ int usbnet_resume (struct usb_interface *intf) netif_device_present(dev->net) && !timer_pending(&dev->delay) && !test_bit(EVENT_RX_HALT, &dev->flags)) - rx_alloc_submit(dev, GFP_KERNEL); + rx_alloc_submit(dev, GFP_NOIO); if (!(dev->txq.qlen >= TX_QLEN(dev))) netif_tx_wake_all_queues(dev->net); diff --git a/trunk/drivers/net/vmxnet3/vmxnet3_drv.c b/trunk/drivers/net/vmxnet3/vmxnet3_drv.c index 93e0cfb739b8..ce9d4f2c9776 100644 --- a/trunk/drivers/net/vmxnet3/vmxnet3_drv.c +++ b/trunk/drivers/net/vmxnet3/vmxnet3_drv.c @@ -3019,6 +3019,7 @@ vmxnet3_probe_device(struct pci_dev *pdev, netdev->watchdog_timeo = 5 * HZ; INIT_WORK(&adapter->work, vmxnet3_reset_work); + set_bit(VMXNET3_STATE_BIT_QUIESCED, &adapter->state); if (adapter->intr.type == VMXNET3_IT_MSIX) { int i; @@ -3043,7 +3044,6 @@ vmxnet3_probe_device(struct pci_dev *pdev, goto err_register; } - set_bit(VMXNET3_STATE_BIT_QUIESCED, &adapter->state); vmxnet3_check_link(adapter, false); atomic_inc(&devices_found); return 0; diff --git a/trunk/drivers/net/wan/dscc4.c b/trunk/drivers/net/wan/dscc4.c index 9eb6479306d6..ef36cafd44b7 100644 --- a/trunk/drivers/net/wan/dscc4.c +++ b/trunk/drivers/net/wan/dscc4.c @@ -774,14 +774,15 @@ static int __devinit dscc4_init_one(struct pci_dev *pdev, } /* Global interrupt queue */ writel((u32)(((IRQ_RING_SIZE >> 5) - 1) << 20), ioaddr + IQLENR1); + + rc = -ENOMEM; + priv->iqcfg = (__le32 *) pci_alloc_consistent(pdev, IRQ_RING_SIZE*sizeof(__le32), &priv->iqcfg_dma); if (!priv->iqcfg) goto err_free_irq_5; writel(priv->iqcfg_dma, ioaddr + IQCFG); - rc = -ENOMEM; - /* * SCC 0-3 private rx/tx irq structures * IQRX/TXi needs to be set soon. Learned it the hard way... diff --git a/trunk/drivers/net/wan/ixp4xx_hss.c b/trunk/drivers/net/wan/ixp4xx_hss.c index aaaca9aa2293..3f575afd8cfc 100644 --- a/trunk/drivers/net/wan/ixp4xx_hss.c +++ b/trunk/drivers/net/wan/ixp4xx_hss.c @@ -10,6 +10,7 @@ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt +#include #include #include #include diff --git a/trunk/drivers/net/wimax/i2400m/fw.c b/trunk/drivers/net/wimax/i2400m/fw.c index 283237f6f074..def12b38cbf7 100644 --- a/trunk/drivers/net/wimax/i2400m/fw.c +++ b/trunk/drivers/net/wimax/i2400m/fw.c @@ -326,8 +326,10 @@ int i2400m_barker_db_init(const char *_options) unsigned barker; options_orig = kstrdup(_options, GFP_KERNEL); - if (options_orig == NULL) + if (options_orig == NULL) { + result = -ENOMEM; goto error_parse; + } options = options_orig; while ((token = strsep(&options, ",")) != NULL) { diff --git a/trunk/drivers/net/wireless/at76c50x-usb.c b/trunk/drivers/net/wireless/at76c50x-usb.c index efc162e0b511..88b8d64c90f1 100644 --- a/trunk/drivers/net/wireless/at76c50x-usb.c +++ b/trunk/drivers/net/wireless/at76c50x-usb.c @@ -342,7 +342,7 @@ static int at76_dfu_get_status(struct usb_device *udev, return ret; } -static u8 at76_dfu_get_state(struct usb_device *udev, u8 *state) +static int at76_dfu_get_state(struct usb_device *udev, u8 *state) { int ret; diff --git a/trunk/drivers/net/wireless/ath/ath5k/base.c b/trunk/drivers/net/wireless/ath/ath5k/base.c index 8c4c040a47b8..2aab20ee9f38 100644 --- a/trunk/drivers/net/wireless/ath/ath5k/base.c +++ b/trunk/drivers/net/wireless/ath/ath5k/base.c @@ -2056,9 +2056,7 @@ ath5k_beacon_update_timers(struct ath5k_hw *ah, u64 bc_tsf) void ath5k_beacon_config(struct ath5k_hw *ah) { - unsigned long flags; - - spin_lock_irqsave(&ah->block, flags); + spin_lock_bh(&ah->block); ah->bmisscount = 0; ah->imask &= ~(AR5K_INT_BMISS | AR5K_INT_SWBA); @@ -2085,7 +2083,7 @@ ath5k_beacon_config(struct ath5k_hw *ah) ath5k_hw_set_imr(ah, ah->imask); mmiowb(); - spin_unlock_irqrestore(&ah->block, flags); + spin_unlock_bh(&ah->block); } static void ath5k_tasklet_beacon(unsigned long data) diff --git a/trunk/drivers/net/wireless/ath/ath5k/eeprom.c b/trunk/drivers/net/wireless/ath/ath5k/eeprom.c index 4026c906cc7b..b7e0258887e7 100644 --- a/trunk/drivers/net/wireless/ath/ath5k/eeprom.c +++ b/trunk/drivers/net/wireless/ath/ath5k/eeprom.c @@ -1482,7 +1482,7 @@ ath5k_eeprom_read_target_rate_pwr_info(struct ath5k_hw *ah, unsigned int mode) case AR5K_EEPROM_MODE_11A: offset += AR5K_EEPROM_TARGET_PWR_OFF_11A(ee->ee_version); rate_pcal_info = ee->ee_rate_tpwr_a; - ee->ee_rate_target_pwr_num[mode] = AR5K_EEPROM_N_5GHZ_CHAN; + ee->ee_rate_target_pwr_num[mode] = AR5K_EEPROM_N_5GHZ_RATE_CHAN; break; case AR5K_EEPROM_MODE_11B: offset += AR5K_EEPROM_TARGET_PWR_OFF_11B(ee->ee_version); diff --git a/trunk/drivers/net/wireless/ath/ath5k/eeprom.h b/trunk/drivers/net/wireless/ath/ath5k/eeprom.h index dc2bcfeadeb4..94a9bbea6874 100644 --- a/trunk/drivers/net/wireless/ath/ath5k/eeprom.h +++ b/trunk/drivers/net/wireless/ath/ath5k/eeprom.h @@ -182,6 +182,7 @@ #define AR5K_EEPROM_EEP_DELTA 10 #define AR5K_EEPROM_N_MODES 3 #define AR5K_EEPROM_N_5GHZ_CHAN 10 +#define AR5K_EEPROM_N_5GHZ_RATE_CHAN 8 #define AR5K_EEPROM_N_2GHZ_CHAN 3 #define AR5K_EEPROM_N_2GHZ_CHAN_2413 4 #define AR5K_EEPROM_N_2GHZ_CHAN_MAX 4 diff --git a/trunk/drivers/net/wireless/ath/ath5k/mac80211-ops.c b/trunk/drivers/net/wireless/ath/ath5k/mac80211-ops.c index 260e7dc7f751..d56453e43d7e 100644 --- a/trunk/drivers/net/wireless/ath/ath5k/mac80211-ops.c +++ b/trunk/drivers/net/wireless/ath/ath5k/mac80211-ops.c @@ -254,7 +254,6 @@ ath5k_bss_info_changed(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ath5k_vif *avf = (void *)vif->drv_priv; struct ath5k_hw *ah = hw->priv; struct ath_common *common = ath5k_hw_common(ah); - unsigned long flags; mutex_lock(&ah->lock); @@ -300,9 +299,9 @@ ath5k_bss_info_changed(struct ieee80211_hw *hw, struct ieee80211_vif *vif, } if (changes & BSS_CHANGED_BEACON) { - spin_lock_irqsave(&ah->block, flags); + spin_lock_bh(&ah->block); ath5k_beacon_update(hw, vif); - spin_unlock_irqrestore(&ah->block, flags); + spin_unlock_bh(&ah->block); } if (changes & BSS_CHANGED_BEACON_ENABLED) diff --git a/trunk/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c b/trunk/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c index 2588848f4a82..d066f2516e47 100644 --- a/trunk/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c +++ b/trunk/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c @@ -2982,6 +2982,10 @@ static u32 ath9k_hw_ar9300_get_eeprom(struct ath_hw *ah, case EEP_RX_MASK: return pBase->txrxMask & 0xf; case EEP_PAPRD: + if (AR_SREV_9462(ah)) + return false; + if (!ah->config.enable_paprd); + return false; return !!(pBase->featureEnable & BIT(5)); case EEP_CHAIN_MASK_REDUCE: return (pBase->miscConfiguration >> 0x3) & 0x1; diff --git a/trunk/drivers/net/wireless/ath/ath9k/ar9003_paprd.c b/trunk/drivers/net/wireless/ath/ath9k/ar9003_paprd.c index 2c9f7d7ed4cc..0ed3846f9cbb 100644 --- a/trunk/drivers/net/wireless/ath/ath9k/ar9003_paprd.c +++ b/trunk/drivers/net/wireless/ath/ath9k/ar9003_paprd.c @@ -142,6 +142,7 @@ static int ar9003_paprd_setup_single_table(struct ath_hw *ah) }; int training_power; int i, val; + u32 am2pm_mask = ah->paprd_ratemask; if (IS_CHAN_2GHZ(ah->curchan)) training_power = ar9003_get_training_power_2g(ah); @@ -158,10 +159,13 @@ static int ar9003_paprd_setup_single_table(struct ath_hw *ah) } ah->paprd_training_power = training_power; + if (AR_SREV_9330(ah)) + am2pm_mask = 0; + REG_RMW_FIELD(ah, AR_PHY_PAPRD_AM2AM, AR_PHY_PAPRD_AM2AM_MASK, ah->paprd_ratemask); REG_RMW_FIELD(ah, AR_PHY_PAPRD_AM2PM, AR_PHY_PAPRD_AM2PM_MASK, - ah->paprd_ratemask); + am2pm_mask); REG_RMW_FIELD(ah, AR_PHY_PAPRD_HT40, AR_PHY_PAPRD_HT40_MASK, ah->paprd_ratemask_ht40); @@ -782,6 +786,102 @@ int ar9003_paprd_setup_gain_table(struct ath_hw *ah, int chain) } EXPORT_SYMBOL(ar9003_paprd_setup_gain_table); +static bool ar9003_paprd_retrain_pa_in(struct ath_hw *ah, + struct ath9k_hw_cal_data *caldata, + int chain) +{ + u32 *pa_in = caldata->pa_table[chain]; + int capdiv_offset, quick_drop_offset; + int capdiv2g, quick_drop; + int count = 0; + int i; + + if (!AR_SREV_9485(ah) && !AR_SREV_9330(ah)) + return false; + + capdiv2g = REG_READ_FIELD(ah, AR_PHY_65NM_CH0_TXRF3, + AR_PHY_65NM_CH0_TXRF3_CAPDIV2G); + + quick_drop = REG_READ_FIELD(ah, AR_PHY_PAPRD_TRAINER_CNTL3, + AR_PHY_PAPRD_TRAINER_CNTL3_CF_PAPRD_QUICK_DROP); + + if (quick_drop) + quick_drop -= 0x40; + + for (i = 0; i < NUM_BIN + 1; i++) { + if (pa_in[i] == 1400) + count++; + } + + if (AR_SREV_9485(ah)) { + if (pa_in[23] < 800) { + capdiv_offset = (int)((1000 - pa_in[23] + 75) / 150); + capdiv2g += capdiv_offset; + if (capdiv2g > 7) { + capdiv2g = 7; + if (pa_in[23] < 600) { + quick_drop++; + if (quick_drop > 0) + quick_drop = 0; + } + } + } else if (pa_in[23] == 1400) { + quick_drop_offset = min_t(int, count / 3, 2); + quick_drop += quick_drop_offset; + capdiv2g += quick_drop_offset / 2; + + if (capdiv2g > 7) + capdiv2g = 7; + + if (quick_drop > 0) { + quick_drop = 0; + capdiv2g -= quick_drop_offset; + if (capdiv2g < 0) + capdiv2g = 0; + } + } else { + return false; + } + } else if (AR_SREV_9330(ah)) { + if (pa_in[23] < 1000) { + capdiv_offset = (1000 - pa_in[23]) / 100; + capdiv2g += capdiv_offset; + if (capdiv_offset > 3) { + capdiv_offset = 1; + quick_drop--; + } + + capdiv2g += capdiv_offset; + if (capdiv2g > 6) + capdiv2g = 6; + if (quick_drop < -4) + quick_drop = -4; + } else if (pa_in[23] == 1400) { + if (count > 3) { + quick_drop++; + capdiv2g -= count / 4; + if (quick_drop > -2) + quick_drop = -2; + } else { + capdiv2g--; + } + + if (capdiv2g < 0) + capdiv2g = 0; + } else { + return false; + } + } + + REG_RMW_FIELD(ah, AR_PHY_65NM_CH0_TXRF3, + AR_PHY_65NM_CH0_TXRF3_CAPDIV2G, capdiv2g); + REG_RMW_FIELD(ah, AR_PHY_PAPRD_TRAINER_CNTL3, + AR_PHY_PAPRD_TRAINER_CNTL3_CF_PAPRD_QUICK_DROP, + quick_drop); + + return true; +} + int ar9003_paprd_create_curve(struct ath_hw *ah, struct ath9k_hw_cal_data *caldata, int chain) { @@ -817,6 +917,9 @@ int ar9003_paprd_create_curve(struct ath_hw *ah, if (!create_pa_curve(data_L, data_U, pa_table, small_signal_gain)) status = -2; + if (ar9003_paprd_retrain_pa_in(ah, caldata, chain)) + status = -EINPROGRESS; + REG_CLR_BIT(ah, AR_PHY_PAPRD_TRAINER_STAT1, AR_PHY_PAPRD_TRAINER_STAT1_PAPRD_TRAIN_DONE); diff --git a/trunk/drivers/net/wireless/ath/ath9k/ar9003_phy.h b/trunk/drivers/net/wireless/ath/ath9k/ar9003_phy.h index 7bfbaf065a43..84d3d4956861 100644 --- a/trunk/drivers/net/wireless/ath/ath9k/ar9003_phy.h +++ b/trunk/drivers/net/wireless/ath/ath9k/ar9003_phy.h @@ -625,6 +625,10 @@ #define AR_PHY_AIC_CTRL_4_B0 (AR_SM_BASE + 0x4c0) #define AR_PHY_AIC_STAT_2_B0 (AR_SM_BASE + 0x4cc) +#define AR_PHY_65NM_CH0_TXRF3 0x16048 +#define AR_PHY_65NM_CH0_TXRF3_CAPDIV2G 0x0000001e +#define AR_PHY_65NM_CH0_TXRF3_CAPDIV2G_S 1 + #define AR_PHY_65NM_CH0_SYNTH4 0x1608c #define AR_PHY_SYNTH4_LONG_SHIFT_SELECT (AR_SREV_9462(ah) ? 0x00000001 : 0x00000002) #define AR_PHY_SYNTH4_LONG_SHIFT_SELECT_S (AR_SREV_9462(ah) ? 0 : 1) diff --git a/trunk/drivers/net/wireless/ath/ath9k/debug.c b/trunk/drivers/net/wireless/ath/ath9k/debug.c index 68b643c8943c..c8ef30127adb 100644 --- a/trunk/drivers/net/wireless/ath/ath9k/debug.c +++ b/trunk/drivers/net/wireless/ath/ath9k/debug.c @@ -1577,6 +1577,8 @@ int ath9k_init_debug(struct ath_hw *ah) sc->debug.debugfs_phy, sc, &fops_tx_chainmask); debugfs_create_file("disable_ani", S_IRUSR | S_IWUSR, sc->debug.debugfs_phy, sc, &fops_disable_ani); + debugfs_create_bool("paprd", S_IRUSR | S_IWUSR, sc->debug.debugfs_phy, + &sc->sc_ah->config.enable_paprd); debugfs_create_file("regidx", S_IRUSR | S_IWUSR, sc->debug.debugfs_phy, sc, &fops_regidx); debugfs_create_file("regval", S_IRUSR | S_IWUSR, sc->debug.debugfs_phy, diff --git a/trunk/drivers/net/wireless/ath/ath9k/gpio.c b/trunk/drivers/net/wireless/ath/ath9k/gpio.c index bacdb8fb4ef4..9f83f71742a5 100644 --- a/trunk/drivers/net/wireless/ath/ath9k/gpio.c +++ b/trunk/drivers/net/wireless/ath/ath9k/gpio.c @@ -341,7 +341,8 @@ void ath9k_btcoex_stop_gen_timer(struct ath_softc *sc) { struct ath_btcoex *btcoex = &sc->btcoex; - ath9k_gen_timer_stop(sc->sc_ah, btcoex->no_stomp_timer); + if (btcoex->hw_timer_enabled) + ath9k_gen_timer_stop(sc->sc_ah, btcoex->no_stomp_timer); } u16 ath9k_btcoex_aggr_limit(struct ath_softc *sc, u32 max_4ms_framelen) diff --git a/trunk/drivers/net/wireless/ath/ath9k/hw.c b/trunk/drivers/net/wireless/ath/ath9k/hw.c index cfa91ab7acf8..4faf0a395876 100644 --- a/trunk/drivers/net/wireless/ath/ath9k/hw.c +++ b/trunk/drivers/net/wireless/ath/ath9k/hw.c @@ -463,9 +463,6 @@ static void ath9k_hw_init_config(struct ath_hw *ah) ah->config.spurchans[i][1] = AR_NO_SPUR; } - /* PAPRD needs some more work to be enabled */ - ah->config.paprd_disable = 1; - ah->config.rx_intr_mitigation = true; ah->config.pcieSerDesWrite = true; @@ -730,6 +727,7 @@ int ath9k_hw_init(struct ath_hw *ah) case AR9300_DEVID_QCA955X: case AR9300_DEVID_AR9580: case AR9300_DEVID_AR9462: + case AR9485_DEVID_AR1111: break; default: if (common->bus_ops->ath_bus_type == ATH_USB) @@ -977,9 +975,6 @@ static void ath9k_hw_init_interrupt_masks(struct ath_hw *ah, else imr_reg |= AR_IMR_TXOK; - if (opmode == NL80211_IFTYPE_AP) - imr_reg |= AR_IMR_MIB; - ENABLE_REGWRITE_BUFFER(ah); REG_WRITE(ah, AR_IMR, imr_reg); @@ -1777,6 +1772,8 @@ int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan, /* Operating channel changed, reset channel calibration data */ memset(caldata, 0, sizeof(*caldata)); ath9k_init_nfcal_hist_buffer(ah, chan); + } else if (caldata) { + caldata->paprd_packet_sent = false; } ah->noise = ath9k_hw_getchan_noise(ah, chan); @@ -2500,9 +2497,6 @@ int ath9k_hw_fill_cap_info(struct ath_hw *ah) pCap->rx_status_len = sizeof(struct ar9003_rxs); pCap->tx_desc_len = sizeof(struct ar9003_txc); pCap->txs_len = sizeof(struct ar9003_txs); - if (!ah->config.paprd_disable && - ah->eep_ops->get_eeprom(ah, EEP_PAPRD)) - pCap->hw_caps |= ATH9K_HW_CAP_PAPRD; } else { pCap->tx_desc_len = sizeof(struct ath_desc); if (AR_SREV_9280_20(ah)) diff --git a/trunk/drivers/net/wireless/ath/ath9k/hw.h b/trunk/drivers/net/wireless/ath/ath9k/hw.h index dd0c146d81dc..de6968fc64f4 100644 --- a/trunk/drivers/net/wireless/ath/ath9k/hw.h +++ b/trunk/drivers/net/wireless/ath/ath9k/hw.h @@ -49,6 +49,7 @@ #define AR9300_DEVID_AR9462 0x0034 #define AR9300_DEVID_AR9330 0x0035 #define AR9300_DEVID_QCA955X 0x0038 +#define AR9485_DEVID_AR1111 0x0037 #define AR5416_AR9100_DEVID 0x000b @@ -235,7 +236,6 @@ enum ath9k_hw_caps { ATH9K_HW_CAP_LDPC = BIT(6), ATH9K_HW_CAP_FASTCLOCK = BIT(7), ATH9K_HW_CAP_SGI_20 = BIT(8), - ATH9K_HW_CAP_PAPRD = BIT(9), ATH9K_HW_CAP_ANT_DIV_COMB = BIT(10), ATH9K_HW_CAP_2GHZ = BIT(11), ATH9K_HW_CAP_5GHZ = BIT(12), @@ -286,12 +286,12 @@ struct ath9k_ops_config { u8 pcie_clock_req; u32 pcie_waen; u8 analog_shiftreg; - u8 paprd_disable; u32 ofdm_trig_low; u32 ofdm_trig_high; u32 cck_trig_high; u32 cck_trig_low; u32 enable_ani; + u32 enable_paprd; int serialize_regmode; bool rx_intr_mitigation; bool tx_intr_mitigation; @@ -404,6 +404,7 @@ struct ath9k_hw_cal_data { int8_t iCoff; int8_t qCoff; bool rtt_done; + bool paprd_packet_sent; bool paprd_done; bool nfcal_pending; bool nfcal_interference; diff --git a/trunk/drivers/net/wireless/ath/ath9k/link.c b/trunk/drivers/net/wireless/ath/ath9k/link.c index d4549e9aac5c..7b88b9c39ccd 100644 --- a/trunk/drivers/net/wireless/ath/ath9k/link.c +++ b/trunk/drivers/net/wireless/ath/ath9k/link.c @@ -254,8 +254,9 @@ void ath_paprd_calibrate(struct work_struct *work) int chain_ok = 0; int chain; int len = 1800; + int ret; - if (!caldata) + if (!caldata || !caldata->paprd_packet_sent || caldata->paprd_done) return; ath9k_ps_wakeup(sc); @@ -282,13 +283,6 @@ void ath_paprd_calibrate(struct work_struct *work) continue; chain_ok = 0; - - ath_dbg(common, CALIBRATE, - "Sending PAPRD frame for thermal measurement on chain %d\n", - chain); - if (!ath_paprd_send_frame(sc, skb, chain)) - goto fail_paprd; - ar9003_paprd_setup_gain_table(ah, chain); ath_dbg(common, CALIBRATE, @@ -302,7 +296,13 @@ void ath_paprd_calibrate(struct work_struct *work) break; } - if (ar9003_paprd_create_curve(ah, caldata, chain)) { + ret = ar9003_paprd_create_curve(ah, caldata, chain); + if (ret == -EINPROGRESS) { + ath_dbg(common, CALIBRATE, + "PAPRD curve on chain %d needs to be re-trained\n", + chain); + break; + } else if (ret) { ath_dbg(common, CALIBRATE, "PAPRD create curve failed on chain %d\n", chain); @@ -423,7 +423,7 @@ void ath_ani_calibrate(unsigned long data) cal_interval = min(cal_interval, (u32)short_cal_interval); mod_timer(&common->ani.timer, jiffies + msecs_to_jiffies(cal_interval)); - if ((sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_PAPRD) && ah->caldata) { + if (ah->eep_ops->get_eeprom(ah, EEP_PAPRD) && ah->caldata) { if (!ah->caldata->paprd_done) ieee80211_queue_work(sc->hw, &sc->paprd_work); else if (!ah->paprd_table_write_done) diff --git a/trunk/drivers/net/wireless/ath/ath9k/mac.c b/trunk/drivers/net/wireless/ath/ath9k/mac.c index 7990cd55599c..b42be910a83d 100644 --- a/trunk/drivers/net/wireless/ath/ath9k/mac.c +++ b/trunk/drivers/net/wireless/ath/ath9k/mac.c @@ -773,15 +773,10 @@ bool ath9k_hw_intrpend(struct ath_hw *ah) } EXPORT_SYMBOL(ath9k_hw_intrpend); -void ath9k_hw_disable_interrupts(struct ath_hw *ah) +void ath9k_hw_kill_interrupts(struct ath_hw *ah) { struct ath_common *common = ath9k_hw_common(ah); - if (!(ah->imask & ATH9K_INT_GLOBAL)) - atomic_set(&ah->intr_ref_cnt, -1); - else - atomic_dec(&ah->intr_ref_cnt); - ath_dbg(common, INTERRUPT, "disable IER\n"); REG_WRITE(ah, AR_IER, AR_IER_DISABLE); (void) REG_READ(ah, AR_IER); @@ -793,6 +788,17 @@ void ath9k_hw_disable_interrupts(struct ath_hw *ah) (void) REG_READ(ah, AR_INTR_SYNC_ENABLE); } } +EXPORT_SYMBOL(ath9k_hw_kill_interrupts); + +void ath9k_hw_disable_interrupts(struct ath_hw *ah) +{ + if (!(ah->imask & ATH9K_INT_GLOBAL)) + atomic_set(&ah->intr_ref_cnt, -1); + else + atomic_dec(&ah->intr_ref_cnt); + + ath9k_hw_kill_interrupts(ah); +} EXPORT_SYMBOL(ath9k_hw_disable_interrupts); void ath9k_hw_enable_interrupts(struct ath_hw *ah) diff --git a/trunk/drivers/net/wireless/ath/ath9k/mac.h b/trunk/drivers/net/wireless/ath/ath9k/mac.h index 0eba36dca6f8..4a745e68dd94 100644 --- a/trunk/drivers/net/wireless/ath/ath9k/mac.h +++ b/trunk/drivers/net/wireless/ath/ath9k/mac.h @@ -738,6 +738,7 @@ bool ath9k_hw_intrpend(struct ath_hw *ah); void ath9k_hw_set_interrupts(struct ath_hw *ah); void ath9k_hw_enable_interrupts(struct ath_hw *ah); void ath9k_hw_disable_interrupts(struct ath_hw *ah); +void ath9k_hw_kill_interrupts(struct ath_hw *ah); void ar9002_hw_attach_mac_ops(struct ath_hw *ah); diff --git a/trunk/drivers/net/wireless/ath/ath9k/main.c b/trunk/drivers/net/wireless/ath/ath9k/main.c index 6049d8b82855..a22df749b8db 100644 --- a/trunk/drivers/net/wireless/ath/ath9k/main.c +++ b/trunk/drivers/net/wireless/ath/ath9k/main.c @@ -462,8 +462,10 @@ irqreturn_t ath_isr(int irq, void *dev) if (!ath9k_hw_intrpend(ah)) return IRQ_NONE; - if(test_bit(SC_OP_HW_RESET, &sc->sc_flags)) + if (test_bit(SC_OP_HW_RESET, &sc->sc_flags)) { + ath9k_hw_kill_interrupts(ah); return IRQ_HANDLED; + } /* * Figure out the reason(s) for the interrupt. Note diff --git a/trunk/drivers/net/wireless/ath/ath9k/pci.c b/trunk/drivers/net/wireless/ath/ath9k/pci.c index 87b89d55e637..a978984d78a5 100644 --- a/trunk/drivers/net/wireless/ath/ath9k/pci.c +++ b/trunk/drivers/net/wireless/ath/ath9k/pci.c @@ -37,6 +37,7 @@ static DEFINE_PCI_DEVICE_TABLE(ath_pci_id_table) = { { PCI_VDEVICE(ATHEROS, 0x0032) }, /* PCI-E AR9485 */ { PCI_VDEVICE(ATHEROS, 0x0033) }, /* PCI-E AR9580 */ { PCI_VDEVICE(ATHEROS, 0x0034) }, /* PCI-E AR9462 */ + { PCI_VDEVICE(ATHEROS, 0x0037) }, /* PCI-E AR1111/AR9485 */ { 0 } }; @@ -320,6 +321,7 @@ static int ath_pci_suspend(struct device *device) * Otherwise the chip never moved to full sleep, * when no interface is up. */ + ath9k_stop_btcoex(sc); ath9k_hw_disable(sc->sc_ah); ath9k_hw_setpower(sc->sc_ah, ATH9K_PM_FULL_SLEEP); diff --git a/trunk/drivers/net/wireless/ath/ath9k/recv.c b/trunk/drivers/net/wireless/ath/ath9k/recv.c index 12aca02228c2..4480c0cc655f 100644 --- a/trunk/drivers/net/wireless/ath/ath9k/recv.c +++ b/trunk/drivers/net/wireless/ath/ath9k/recv.c @@ -1044,7 +1044,6 @@ int ath_rx_tasklet(struct ath_softc *sc, int flush, bool hp) struct ieee80211_hw *hw = sc->hw; struct ieee80211_hdr *hdr; int retval; - bool decrypt_error = false; struct ath_rx_status rs; enum ath9k_rx_qtype qtype; bool edma = !!(ah->caps.hw_caps & ATH9K_HW_CAP_EDMA); @@ -1066,6 +1065,7 @@ int ath_rx_tasklet(struct ath_softc *sc, int flush, bool hp) tsf_lower = tsf & 0xffffffff; do { + bool decrypt_error = false; /* If handling rx interrupt and flush is in progress => exit */ if (test_bit(SC_OP_RXFLUSH, &sc->sc_flags) && (flush == 0)) break; diff --git a/trunk/drivers/net/wireless/ath/ath9k/xmit.c b/trunk/drivers/net/wireless/ath/ath9k/xmit.c index 2c9da6b2ecb1..0d4155aec48d 100644 --- a/trunk/drivers/net/wireless/ath/ath9k/xmit.c +++ b/trunk/drivers/net/wireless/ath/ath9k/xmit.c @@ -2018,6 +2018,9 @@ static void ath_tx_complete(struct ath_softc *sc, struct sk_buff *skb, ath_dbg(common, XMIT, "TX complete: skb: %p\n", skb); + if (sc->sc_ah->caldata) + sc->sc_ah->caldata->paprd_packet_sent = true; + if (!(tx_flags & ATH_TX_ERROR)) /* Frame was ACKed */ tx_info->flags |= IEEE80211_TX_STAT_ACK; diff --git a/trunk/drivers/net/wireless/b43/Kconfig b/trunk/drivers/net/wireless/b43/Kconfig index 3876c7ea54f4..7a28d21ac389 100644 --- a/trunk/drivers/net/wireless/b43/Kconfig +++ b/trunk/drivers/net/wireless/b43/Kconfig @@ -34,8 +34,8 @@ config B43_BCMA config B43_BCMA_EXTRA bool "Hardware support that overlaps with the brcmsmac driver" depends on B43_BCMA - default n if BRCMSMAC || BRCMSMAC_MODULE - default y + default n if BRCMSMAC + default y config B43_SSB bool diff --git a/trunk/drivers/net/wireless/b43/main.c b/trunk/drivers/net/wireless/b43/main.c index b80352b308d5..a140165dfee0 100644 --- a/trunk/drivers/net/wireless/b43/main.c +++ b/trunk/drivers/net/wireless/b43/main.c @@ -2719,32 +2719,37 @@ static int b43_gpio_init(struct b43_wldev *dev) if (dev->dev->chip_id == 0x4301) { mask |= 0x0060; set |= 0x0060; + } else if (dev->dev->chip_id == 0x5354) { + /* Don't allow overtaking buttons GPIOs */ + set &= 0x2; /* 0x2 is LED GPIO on BCM5354 */ } - if (dev->dev->chip_id == 0x5354) - set &= 0xff02; + if (0 /* FIXME: conditional unknown */ ) { b43_write16(dev, B43_MMIO_GPIO_MASK, b43_read16(dev, B43_MMIO_GPIO_MASK) | 0x0100); - mask |= 0x0180; - set |= 0x0180; + /* BT Coexistance Input */ + mask |= 0x0080; + set |= 0x0080; + /* BT Coexistance Out */ + mask |= 0x0100; + set |= 0x0100; } if (dev->dev->bus_sprom->boardflags_lo & B43_BFL_PACTRL) { + /* PA is controlled by gpio 9, let ucode handle it */ b43_write16(dev, B43_MMIO_GPIO_MASK, b43_read16(dev, B43_MMIO_GPIO_MASK) | 0x0200); mask |= 0x0200; set |= 0x0200; } - if (dev->dev->core_rev >= 2) - mask |= 0x0010; /* FIXME: This is redundant. */ switch (dev->dev->bus_type) { #ifdef CONFIG_B43_BCMA case B43_BUS_BCMA: bcma_cc_write32(&dev->dev->bdev->bus->drv_cc, BCMA_CC_GPIOCTL, (bcma_cc_read32(&dev->dev->bdev->bus->drv_cc, - BCMA_CC_GPIOCTL) & mask) | set); + BCMA_CC_GPIOCTL) & ~mask) | set); break; #endif #ifdef CONFIG_B43_SSB @@ -2753,7 +2758,7 @@ static int b43_gpio_init(struct b43_wldev *dev) if (gpiodev) ssb_write32(gpiodev, B43_GPIO_CONTROL, (ssb_read32(gpiodev, B43_GPIO_CONTROL) - & mask) | set); + & ~mask) | set); break; #endif } diff --git a/trunk/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh_sdmmc.c b/trunk/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh_sdmmc.c index 49765d34b4e0..7c4ee72f9d56 100644 --- a/trunk/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh_sdmmc.c +++ b/trunk/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh_sdmmc.c @@ -638,6 +638,8 @@ static int brcmf_sdio_pd_probe(struct platform_device *pdev) oobirq_entry = kzalloc(sizeof(struct brcmf_sdio_oobirq), GFP_KERNEL); + if (!oobirq_entry) + return -ENOMEM; oobirq_entry->irq = res->start; oobirq_entry->flags = res->flags & IRQF_TRIGGER_MASK; list_add_tail(&oobirq_entry->list, &oobirq_lh); diff --git a/trunk/drivers/net/wireless/brcm80211/brcmfmac/dhd_common.c b/trunk/drivers/net/wireless/brcm80211/brcmfmac/dhd_common.c index 2621dd3d7dcd..6f70953f0bad 100644 --- a/trunk/drivers/net/wireless/brcm80211/brcmfmac/dhd_common.c +++ b/trunk/drivers/net/wireless/brcm80211/brcmfmac/dhd_common.c @@ -764,8 +764,11 @@ static void brcmf_c_arp_offload_set(struct brcmf_pub *drvr, int arp_mode) { char iovbuf[32]; int retcode; + __le32 arp_mode_le; - brcmf_c_mkiovar("arp_ol", (char *)&arp_mode, 4, iovbuf, sizeof(iovbuf)); + arp_mode_le = cpu_to_le32(arp_mode); + brcmf_c_mkiovar("arp_ol", (char *)&arp_mode_le, 4, iovbuf, + sizeof(iovbuf)); retcode = brcmf_proto_cdc_set_dcmd(drvr, 0, BRCMF_C_SET_VAR, iovbuf, sizeof(iovbuf)); retcode = retcode >= 0 ? 0 : retcode; @@ -781,8 +784,11 @@ static void brcmf_c_arp_offload_enable(struct brcmf_pub *drvr, int arp_enable) { char iovbuf[32]; int retcode; + __le32 arp_enable_le; - brcmf_c_mkiovar("arpoe", (char *)&arp_enable, 4, + arp_enable_le = cpu_to_le32(arp_enable); + + brcmf_c_mkiovar("arpoe", (char *)&arp_enable_le, 4, iovbuf, sizeof(iovbuf)); retcode = brcmf_proto_cdc_set_dcmd(drvr, 0, BRCMF_C_SET_VAR, iovbuf, sizeof(iovbuf)); @@ -800,10 +806,10 @@ int brcmf_c_preinit_dcmds(struct brcmf_pub *drvr) char iovbuf[BRCMF_EVENTING_MASK_LEN + 12]; /* Room for "event_msgs" + '\0' + bitvec */ char buf[128], *ptr; - u32 roaming = 1; - uint bcn_timeout = 3; - int scan_assoc_time = 40; - int scan_unassoc_time = 40; + __le32 roaming_le = cpu_to_le32(1); + __le32 bcn_timeout_le = cpu_to_le32(3); + __le32 scan_assoc_time_le = cpu_to_le32(40); + __le32 scan_unassoc_time_le = cpu_to_le32(40); int i; struct brcmf_bus_dcmd *cmdlst; struct list_head *cur, *q; @@ -829,14 +835,14 @@ int brcmf_c_preinit_dcmds(struct brcmf_pub *drvr) /* Setup timeout if Beacons are lost and roam is off to report link down */ - brcmf_c_mkiovar("bcn_timeout", (char *)&bcn_timeout, 4, iovbuf, + brcmf_c_mkiovar("bcn_timeout", (char *)&bcn_timeout_le, 4, iovbuf, sizeof(iovbuf)); brcmf_proto_cdc_set_dcmd(drvr, 0, BRCMF_C_SET_VAR, iovbuf, sizeof(iovbuf)); /* Enable/Disable build-in roaming to allowed ext supplicant to take of romaing */ - brcmf_c_mkiovar("roam_off", (char *)&roaming, 4, + brcmf_c_mkiovar("roam_off", (char *)&roaming_le, 4, iovbuf, sizeof(iovbuf)); brcmf_proto_cdc_set_dcmd(drvr, 0, BRCMF_C_SET_VAR, iovbuf, sizeof(iovbuf)); @@ -848,9 +854,9 @@ int brcmf_c_preinit_dcmds(struct brcmf_pub *drvr) sizeof(iovbuf)); brcmf_proto_cdc_set_dcmd(drvr, 0, BRCMF_C_SET_SCAN_CHANNEL_TIME, - (char *)&scan_assoc_time, sizeof(scan_assoc_time)); + (char *)&scan_assoc_time_le, sizeof(scan_assoc_time_le)); brcmf_proto_cdc_set_dcmd(drvr, 0, BRCMF_C_SET_SCAN_UNASSOC_TIME, - (char *)&scan_unassoc_time, sizeof(scan_unassoc_time)); + (char *)&scan_unassoc_time_le, sizeof(scan_unassoc_time_le)); /* Set and enable ARP offload feature */ brcmf_c_arp_offload_set(drvr, BRCMF_ARPOL_MODE); diff --git a/trunk/drivers/net/wireless/brcm80211/brcmfmac/usb.c b/trunk/drivers/net/wireless/brcm80211/brcmfmac/usb.c index a299d42da8e7..58f89fa9c9f8 100644 --- a/trunk/drivers/net/wireless/brcm80211/brcmfmac/usb.c +++ b/trunk/drivers/net/wireless/brcm80211/brcmfmac/usb.c @@ -519,7 +519,7 @@ static void brcmf_usb_tx_complete(struct urb *urb) else devinfo->bus_pub.bus->dstats.tx_errors++; - dev_kfree_skb(req->skb); + brcmu_pkt_buf_free_skb(req->skb); req->skb = NULL; brcmf_usb_enq(devinfo, &devinfo->tx_freeq, req); @@ -540,7 +540,7 @@ static void brcmf_usb_rx_complete(struct urb *urb) devinfo->bus_pub.bus->dstats.rx_packets++; } else { devinfo->bus_pub.bus->dstats.rx_errors++; - dev_kfree_skb(skb); + brcmu_pkt_buf_free_skb(skb); brcmf_usb_enq(devinfo, &devinfo->rx_freeq, req); return; } @@ -550,13 +550,15 @@ static void brcmf_usb_rx_complete(struct urb *urb) if (brcmf_proto_hdrpull(devinfo->dev, &ifidx, skb) != 0) { brcmf_dbg(ERROR, "rx protocol error\n"); brcmu_pkt_buf_free_skb(skb); + brcmf_usb_enq(devinfo, &devinfo->rx_freeq, req); devinfo->bus_pub.bus->dstats.rx_errors++; } else { brcmf_rx_packet(devinfo->dev, ifidx, skb); brcmf_usb_rx_refill(devinfo, req); } } else { - dev_kfree_skb(skb); + brcmu_pkt_buf_free_skb(skb); + brcmf_usb_enq(devinfo, &devinfo->rx_freeq, req); } return; @@ -581,14 +583,13 @@ static void brcmf_usb_rx_refill(struct brcmf_usbdev_info *devinfo, usb_fill_bulk_urb(req->urb, devinfo->usbdev, devinfo->rx_pipe, skb->data, skb_tailroom(skb), brcmf_usb_rx_complete, req); - req->urb->transfer_flags |= URB_ZERO_PACKET; req->devinfo = devinfo; + brcmf_usb_enq(devinfo, &devinfo->rx_postq, req); ret = usb_submit_urb(req->urb, GFP_ATOMIC); - if (ret == 0) { - brcmf_usb_enq(devinfo, &devinfo->rx_postq, req); - } else { - dev_kfree_skb(req->skb); + if (ret) { + brcmf_usb_del_fromq(devinfo, req); + brcmu_pkt_buf_free_skb(req->skb); req->skb = NULL; brcmf_usb_enq(devinfo, &devinfo->rx_freeq, req); } @@ -683,23 +684,22 @@ static int brcmf_usb_tx(struct device *dev, struct sk_buff *skb) req = brcmf_usb_deq(devinfo, &devinfo->tx_freeq); if (!req) { + brcmu_pkt_buf_free_skb(skb); brcmf_dbg(ERROR, "no req to send\n"); return -ENOMEM; } - if (!req->urb) { - brcmf_dbg(ERROR, "no urb for req %p\n", req); - return -ENOBUFS; - } req->skb = skb; req->devinfo = devinfo; usb_fill_bulk_urb(req->urb, devinfo->usbdev, devinfo->tx_pipe, skb->data, skb->len, brcmf_usb_tx_complete, req); req->urb->transfer_flags |= URB_ZERO_PACKET; + brcmf_usb_enq(devinfo, &devinfo->tx_postq, req); ret = usb_submit_urb(req->urb, GFP_ATOMIC); - if (!ret) { - brcmf_usb_enq(devinfo, &devinfo->tx_postq, req); - } else { + if (ret) { + brcmf_dbg(ERROR, "brcmf_usb_tx usb_submit_urb FAILED\n"); + brcmf_usb_del_fromq(devinfo, req); + brcmu_pkt_buf_free_skb(req->skb); req->skb = NULL; brcmf_usb_enq(devinfo, &devinfo->tx_freeq, req); } diff --git a/trunk/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c b/trunk/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c index 28c5fbb4af26..50b5553b6964 100644 --- a/trunk/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c +++ b/trunk/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c @@ -500,8 +500,10 @@ static void wl_iscan_prep(struct brcmf_scan_params_le *params_le, params_le->active_time = cpu_to_le32(-1); params_le->passive_time = cpu_to_le32(-1); params_le->home_time = cpu_to_le32(-1); - if (ssid && ssid->SSID_len) - memcpy(¶ms_le->ssid_le, ssid, sizeof(struct brcmf_ssid)); + if (ssid && ssid->SSID_len) { + params_le->ssid_le.SSID_len = cpu_to_le32(ssid->SSID_len); + memcpy(¶ms_le->ssid_le.SSID, ssid->SSID, ssid->SSID_len); + } } static s32 @@ -1876,16 +1878,17 @@ brcmf_cfg80211_get_station(struct wiphy *wiphy, struct net_device *ndev, } if (test_bit(WL_STATUS_CONNECTED, &cfg_priv->status)) { - scb_val.val = cpu_to_le32(0); + memset(&scb_val, 0, sizeof(scb_val)); err = brcmf_exec_dcmd(ndev, BRCMF_C_GET_RSSI, &scb_val, sizeof(struct brcmf_scb_val_le)); - if (err) + if (err) { WL_ERR("Could not get rssi (%d)\n", err); - - rssi = le32_to_cpu(scb_val.val); - sinfo->filled |= STATION_INFO_SIGNAL; - sinfo->signal = rssi; - WL_CONN("RSSI %d dBm\n", rssi); + } else { + rssi = le32_to_cpu(scb_val.val); + sinfo->filled |= STATION_INFO_SIGNAL; + sinfo->signal = rssi; + WL_CONN("RSSI %d dBm\n", rssi); + } } done: diff --git a/trunk/drivers/net/wireless/brcm80211/brcmsmac/channel.c b/trunk/drivers/net/wireless/brcm80211/brcmsmac/channel.c index 9a4c63f927cb..64a48f06d68b 100644 --- a/trunk/drivers/net/wireless/brcm80211/brcmsmac/channel.c +++ b/trunk/drivers/net/wireless/brcm80211/brcmsmac/channel.c @@ -77,7 +77,7 @@ NL80211_RRF_NO_IBSS) static const struct ieee80211_regdomain brcms_regdom_x2 = { - .n_reg_rules = 7, + .n_reg_rules = 6, .alpha2 = "X2", .reg_rules = { BRCM_2GHZ_2412_2462, @@ -382,9 +382,7 @@ brcms_c_channel_set_chanspec(struct brcms_cm_info *wlc_cm, u16 chanspec, { struct brcms_c_info *wlc = wlc_cm->wlc; struct ieee80211_channel *ch = wlc->pub->ieee_hw->conf.channel; - const struct ieee80211_reg_rule *reg_rule; struct txpwr_limits txpwr; - int ret; brcms_c_channel_reg_limits(wlc_cm, chanspec, &txpwr); @@ -393,8 +391,7 @@ brcms_c_channel_set_chanspec(struct brcms_cm_info *wlc_cm, u16 chanspec, ); /* set or restore gmode as required by regulatory */ - ret = freq_reg_info(wlc->wiphy, ch->center_freq, 0, ®_rule); - if (!ret && (reg_rule->flags & NL80211_RRF_NO_OFDM)) + if (ch->flags & IEEE80211_CHAN_NO_OFDM) brcms_c_set_gmode(wlc, GMODE_LEGACY_B, false); else brcms_c_set_gmode(wlc, wlc->protection->gmode_user, false); diff --git a/trunk/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c b/trunk/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c index 9e79d47e077f..a5edebeb0b4f 100644 --- a/trunk/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c +++ b/trunk/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c @@ -121,7 +121,8 @@ static struct ieee80211_channel brcms_2ghz_chantable[] = { IEEE80211_CHAN_NO_HT40PLUS), CHAN2GHZ(14, 2484, IEEE80211_CHAN_PASSIVE_SCAN | IEEE80211_CHAN_NO_IBSS | - IEEE80211_CHAN_NO_HT40PLUS | IEEE80211_CHAN_NO_HT40MINUS) + IEEE80211_CHAN_NO_HT40PLUS | IEEE80211_CHAN_NO_HT40MINUS | + IEEE80211_CHAN_NO_OFDM) }; static struct ieee80211_channel brcms_5ghz_nphy_chantable[] = { @@ -1232,6 +1233,9 @@ uint brcms_reset(struct brcms_info *wl) /* dpc will not be rescheduled */ wl->resched = false; + /* inform publicly that interface is down */ + wl->pub->up = false; + return 0; } diff --git a/trunk/drivers/net/wireless/ipw2x00/ipw2100.c b/trunk/drivers/net/wireless/ipw2x00/ipw2100.c index 95aa8e1683ec..83324b321652 100644 --- a/trunk/drivers/net/wireless/ipw2x00/ipw2100.c +++ b/trunk/drivers/net/wireless/ipw2x00/ipw2100.c @@ -2042,7 +2042,8 @@ static void isr_indicate_associated(struct ipw2100_priv *priv, u32 status) return; } len = ETH_ALEN; - ipw2100_get_ordinal(priv, IPW_ORD_STAT_ASSN_AP_BSSID, &bssid, &len); + ret = ipw2100_get_ordinal(priv, IPW_ORD_STAT_ASSN_AP_BSSID, bssid, + &len); if (ret) { IPW_DEBUG_INFO("failed querying ordinals at line %d\n", __LINE__); diff --git a/trunk/drivers/net/wireless/iwlwifi/dvm/debugfs.c b/trunk/drivers/net/wireless/iwlwifi/dvm/debugfs.c index 46782f1102ac..a47b306b522c 100644 --- a/trunk/drivers/net/wireless/iwlwifi/dvm/debugfs.c +++ b/trunk/drivers/net/wireless/iwlwifi/dvm/debugfs.c @@ -124,6 +124,9 @@ static ssize_t iwl_dbgfs_sram_read(struct file *file, const struct fw_img *img; size_t bufsz; + if (!iwl_is_ready_rf(priv)) + return -EAGAIN; + /* default is to dump the entire data segment */ if (!priv->dbgfs_sram_offset && !priv->dbgfs_sram_len) { priv->dbgfs_sram_offset = 0x800000; diff --git a/trunk/drivers/net/wireless/iwlwifi/dvm/rs.c b/trunk/drivers/net/wireless/iwlwifi/dvm/rs.c index 6fddd2785e6e..a82f46c10f5e 100644 --- a/trunk/drivers/net/wireless/iwlwifi/dvm/rs.c +++ b/trunk/drivers/net/wireless/iwlwifi/dvm/rs.c @@ -707,11 +707,14 @@ static int rs_toggle_antenna(u32 valid_ant, u32 *rate_n_flags, */ static bool rs_use_green(struct ieee80211_sta *sta) { - struct iwl_station_priv *sta_priv = (void *)sta->drv_priv; - struct iwl_rxon_context *ctx = sta_priv->ctx; - - return (sta->ht_cap.cap & IEEE80211_HT_CAP_GRN_FLD) && - !(ctx->ht.non_gf_sta_present); + /* + * There's a bug somewhere in this code that causes the + * scaling to get stuck because GF+SGI can't be combined + * in SISO rates. Until we find that bug, disable GF, it + * has only limited benefit and we still interoperate with + * GF APs since we can always receive GF transmissions. + */ + return false; } /** diff --git a/trunk/drivers/net/wireless/iwlwifi/pcie/internal.h b/trunk/drivers/net/wireless/iwlwifi/pcie/internal.h index d9694c58208c..4ffc18dc3a57 100644 --- a/trunk/drivers/net/wireless/iwlwifi/pcie/internal.h +++ b/trunk/drivers/net/wireless/iwlwifi/pcie/internal.h @@ -350,7 +350,7 @@ int iwl_queue_space(const struct iwl_queue *q); /***************************************************** * Error handling ******************************************************/ -int iwl_dump_fh(struct iwl_trans *trans, char **buf, bool display); +int iwl_dump_fh(struct iwl_trans *trans, char **buf); void iwl_dump_csr(struct iwl_trans *trans); /***************************************************** diff --git a/trunk/drivers/net/wireless/iwlwifi/pcie/rx.c b/trunk/drivers/net/wireless/iwlwifi/pcie/rx.c index 39a6ca1f009c..d1a61ba6247a 100644 --- a/trunk/drivers/net/wireless/iwlwifi/pcie/rx.c +++ b/trunk/drivers/net/wireless/iwlwifi/pcie/rx.c @@ -555,7 +555,7 @@ static void iwl_irq_handle_error(struct iwl_trans *trans) } iwl_dump_csr(trans); - iwl_dump_fh(trans, NULL, false); + iwl_dump_fh(trans, NULL); iwl_op_mode_nic_error(trans->op_mode); } diff --git a/trunk/drivers/net/wireless/iwlwifi/pcie/trans.c b/trunk/drivers/net/wireless/iwlwifi/pcie/trans.c index 939c2f78df58..dbeebef562d5 100644 --- a/trunk/drivers/net/wireless/iwlwifi/pcie/trans.c +++ b/trunk/drivers/net/wireless/iwlwifi/pcie/trans.c @@ -1442,6 +1442,7 @@ static int iwl_trans_pcie_start_hw(struct iwl_trans *trans) return err; err_free_irq: + trans_pcie->irq_requested = false; free_irq(trans_pcie->irq, trans); error: iwl_free_isr_ict(trans); @@ -1649,13 +1650,9 @@ static const char *get_fh_string(int cmd) #undef IWL_CMD } -int iwl_dump_fh(struct iwl_trans *trans, char **buf, bool display) +int iwl_dump_fh(struct iwl_trans *trans, char **buf) { int i; -#ifdef CONFIG_IWLWIFI_DEBUG - int pos = 0; - size_t bufsz = 0; -#endif static const u32 fh_tbl[] = { FH_RSCSR_CHNL0_STTS_WPTR_REG, FH_RSCSR_CHNL0_RBDCB_BASE_REG, @@ -1667,29 +1664,35 @@ int iwl_dump_fh(struct iwl_trans *trans, char **buf, bool display) FH_TSSR_TX_STATUS_REG, FH_TSSR_TX_ERROR_REG }; -#ifdef CONFIG_IWLWIFI_DEBUG - if (display) { - bufsz = ARRAY_SIZE(fh_tbl) * 48 + 40; + +#ifdef CONFIG_IWLWIFI_DEBUGFS + if (buf) { + int pos = 0; + size_t bufsz = ARRAY_SIZE(fh_tbl) * 48 + 40; + *buf = kmalloc(bufsz, GFP_KERNEL); if (!*buf) return -ENOMEM; + pos += scnprintf(*buf + pos, bufsz - pos, "FH register values:\n"); - for (i = 0; i < ARRAY_SIZE(fh_tbl); i++) { + + for (i = 0; i < ARRAY_SIZE(fh_tbl); i++) pos += scnprintf(*buf + pos, bufsz - pos, " %34s: 0X%08x\n", get_fh_string(fh_tbl[i]), iwl_read_direct32(trans, fh_tbl[i])); - } + return pos; } #endif + IWL_ERR(trans, "FH register values:\n"); - for (i = 0; i < ARRAY_SIZE(fh_tbl); i++) { + for (i = 0; i < ARRAY_SIZE(fh_tbl); i++) IWL_ERR(trans, " %34s: 0X%08x\n", get_fh_string(fh_tbl[i]), iwl_read_direct32(trans, fh_tbl[i])); - } + return 0; } @@ -1982,11 +1985,11 @@ static ssize_t iwl_dbgfs_fh_reg_read(struct file *file, size_t count, loff_t *ppos) { struct iwl_trans *trans = file->private_data; - char *buf; + char *buf = NULL; int pos = 0; ssize_t ret = -EFAULT; - ret = pos = iwl_dump_fh(trans, &buf, true); + ret = pos = iwl_dump_fh(trans, &buf); if (buf) { ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos); diff --git a/trunk/drivers/net/wireless/libertas/cfg.c b/trunk/drivers/net/wireless/libertas/cfg.c index eb5de800ed90..1c10b542ab23 100644 --- a/trunk/drivers/net/wireless/libertas/cfg.c +++ b/trunk/drivers/net/wireless/libertas/cfg.c @@ -1254,6 +1254,7 @@ static int lbs_associate(struct lbs_private *priv, netif_tx_wake_all_queues(priv->dev); } + kfree(cmd); done: lbs_deb_leave_args(LBS_DEB_CFG80211, "ret %d", ret); return ret; diff --git a/trunk/drivers/net/wireless/libertas/if_sdio.c b/trunk/drivers/net/wireless/libertas/if_sdio.c index 76caebaa4397..4cb234349fbf 100644 --- a/trunk/drivers/net/wireless/libertas/if_sdio.c +++ b/trunk/drivers/net/wireless/libertas/if_sdio.c @@ -1314,6 +1314,7 @@ static void if_sdio_remove(struct sdio_func *func) kfree(packet); } + kfree(card); lbs_deb_leave(LBS_DEB_SDIO); } @@ -1325,6 +1326,11 @@ static int if_sdio_suspend(struct device *dev) mmc_pm_flag_t flags = sdio_get_host_pm_caps(func); + /* If we're powered off anyway, just let the mmc layer remove the + * card. */ + if (!lbs_iface_active(card->priv)) + return -ENOSYS; + dev_info(dev, "%s: suspend: PM flags = 0x%x\n", sdio_func_id(func), flags); diff --git a/trunk/drivers/net/wireless/libertas/main.c b/trunk/drivers/net/wireless/libertas/main.c index 58048189bd24..fe1ea43c5149 100644 --- a/trunk/drivers/net/wireless/libertas/main.c +++ b/trunk/drivers/net/wireless/libertas/main.c @@ -571,7 +571,10 @@ static int lbs_thread(void *data) netdev_info(dev, "Timeout submitting command 0x%04x\n", le16_to_cpu(cmdnode->cmdbuf->command)); lbs_complete_command(priv, cmdnode, -ETIMEDOUT); - if (priv->reset_card) + + /* Reset card, but only when it isn't in the process + * of being shutdown anyway. */ + if (!dev->dismantle && priv->reset_card) priv->reset_card(priv); } priv->cmd_timed_out = 0; diff --git a/trunk/drivers/net/wireless/mwifiex/cmdevt.c b/trunk/drivers/net/wireless/mwifiex/cmdevt.c index c68adec3cc8b..565527aee0ea 100644 --- a/trunk/drivers/net/wireless/mwifiex/cmdevt.c +++ b/trunk/drivers/net/wireless/mwifiex/cmdevt.c @@ -170,7 +170,20 @@ static int mwifiex_dnld_cmd_to_fw(struct mwifiex_private *priv, cmd_code = le16_to_cpu(host_cmd->command); cmd_size = le16_to_cpu(host_cmd->size); - skb_trim(cmd_node->cmd_skb, cmd_size); + /* Adjust skb length */ + if (cmd_node->cmd_skb->len > cmd_size) + /* + * cmd_size is less than sizeof(struct host_cmd_ds_command). + * Trim off the unused portion. + */ + skb_trim(cmd_node->cmd_skb, cmd_size); + else if (cmd_node->cmd_skb->len < cmd_size) + /* + * cmd_size is larger than sizeof(struct host_cmd_ds_command) + * because we have appended custom IE TLV. Increase skb length + * accordingly. + */ + skb_put(cmd_node->cmd_skb, cmd_size - cmd_node->cmd_skb->len); do_gettimeofday(&tstamp); dev_dbg(adapter->dev, "cmd: DNLD_CMD: (%lu.%lu): %#x, act %#x, len %d," diff --git a/trunk/drivers/net/wireless/p54/p54usb.c b/trunk/drivers/net/wireless/p54/p54usb.c index 7f207b6e9552..effb044a8a9d 100644 --- a/trunk/drivers/net/wireless/p54/p54usb.c +++ b/trunk/drivers/net/wireless/p54/p54usb.c @@ -42,7 +42,7 @@ MODULE_FIRMWARE("isl3887usb"); * whenever you add a new device. */ -static struct usb_device_id p54u_table[] __devinitdata = { +static struct usb_device_id p54u_table[] = { /* Version 1 devices (pci chip + net2280) */ {USB_DEVICE(0x0411, 0x0050)}, /* Buffalo WLI2-USB2-G54 */ {USB_DEVICE(0x045e, 0x00c2)}, /* Microsoft MN-710 */ diff --git a/trunk/drivers/net/wireless/rndis_wlan.c b/trunk/drivers/net/wireless/rndis_wlan.c index 241162e8111d..7a4ae9ee1c63 100644 --- a/trunk/drivers/net/wireless/rndis_wlan.c +++ b/trunk/drivers/net/wireless/rndis_wlan.c @@ -1803,6 +1803,7 @@ static struct ndis_80211_pmkid *update_pmkid(struct usbnet *usbdev, struct cfg80211_pmksa *pmksa, int max_pmkids) { + struct ndis_80211_pmkid *new_pmkids; int i, err, newlen; unsigned int count; @@ -1833,11 +1834,12 @@ static struct ndis_80211_pmkid *update_pmkid(struct usbnet *usbdev, /* add new pmkid */ newlen = sizeof(*pmkids) + (count + 1) * sizeof(pmkids->bssid_info[0]); - pmkids = krealloc(pmkids, newlen, GFP_KERNEL); - if (!pmkids) { + new_pmkids = krealloc(pmkids, newlen, GFP_KERNEL); + if (!new_pmkids) { err = -ENOMEM; goto error; } + pmkids = new_pmkids; pmkids->length = cpu_to_le32(newlen); pmkids->bssid_info_count = cpu_to_le32(count + 1); diff --git a/trunk/drivers/net/wireless/rt2x00/rt2400pci.c b/trunk/drivers/net/wireless/rt2x00/rt2400pci.c index 8b9dbd76a252..64328af496f5 100644 --- a/trunk/drivers/net/wireless/rt2x00/rt2400pci.c +++ b/trunk/drivers/net/wireless/rt2x00/rt2400pci.c @@ -1611,6 +1611,7 @@ static int rt2400pci_probe_hw_mode(struct rt2x00_dev *rt2x00dev) static int rt2400pci_probe_hw(struct rt2x00_dev *rt2x00dev) { int retval; + u32 reg; /* * Allocate eeprom data. @@ -1623,6 +1624,14 @@ static int rt2400pci_probe_hw(struct rt2x00_dev *rt2x00dev) if (retval) return retval; + /* + * Enable rfkill polling by setting GPIO direction of the + * rfkill switch GPIO pin correctly. + */ + rt2x00pci_register_read(rt2x00dev, GPIOCSR, ®); + rt2x00_set_field32(®, GPIOCSR_BIT8, 1); + rt2x00pci_register_write(rt2x00dev, GPIOCSR, reg); + /* * Initialize hw specifications. */ diff --git a/trunk/drivers/net/wireless/rt2x00/rt2400pci.h b/trunk/drivers/net/wireless/rt2x00/rt2400pci.h index d3a4a68cc439..7564ae992b73 100644 --- a/trunk/drivers/net/wireless/rt2x00/rt2400pci.h +++ b/trunk/drivers/net/wireless/rt2x00/rt2400pci.h @@ -670,6 +670,7 @@ #define GPIOCSR_BIT5 FIELD32(0x00000020) #define GPIOCSR_BIT6 FIELD32(0x00000040) #define GPIOCSR_BIT7 FIELD32(0x00000080) +#define GPIOCSR_BIT8 FIELD32(0x00000100) /* * BBPPCSR: BBP Pin control register. diff --git a/trunk/drivers/net/wireless/rt2x00/rt2500pci.c b/trunk/drivers/net/wireless/rt2x00/rt2500pci.c index d2cf8a4bc8b5..3de0406735f6 100644 --- a/trunk/drivers/net/wireless/rt2x00/rt2500pci.c +++ b/trunk/drivers/net/wireless/rt2x00/rt2500pci.c @@ -1929,6 +1929,7 @@ static int rt2500pci_probe_hw_mode(struct rt2x00_dev *rt2x00dev) static int rt2500pci_probe_hw(struct rt2x00_dev *rt2x00dev) { int retval; + u32 reg; /* * Allocate eeprom data. @@ -1941,6 +1942,14 @@ static int rt2500pci_probe_hw(struct rt2x00_dev *rt2x00dev) if (retval) return retval; + /* + * Enable rfkill polling by setting GPIO direction of the + * rfkill switch GPIO pin correctly. + */ + rt2x00pci_register_read(rt2x00dev, GPIOCSR, ®); + rt2x00_set_field32(®, GPIOCSR_DIR0, 1); + rt2x00pci_register_write(rt2x00dev, GPIOCSR, reg); + /* * Initialize hw specifications. */ diff --git a/trunk/drivers/net/wireless/rt2x00/rt2500usb.c b/trunk/drivers/net/wireless/rt2x00/rt2500usb.c index 3aae36bb0a9e..89fee311d8fd 100644 --- a/trunk/drivers/net/wireless/rt2x00/rt2500usb.c +++ b/trunk/drivers/net/wireless/rt2x00/rt2500usb.c @@ -283,7 +283,7 @@ static int rt2500usb_rfkill_poll(struct rt2x00_dev *rt2x00dev) u16 reg; rt2500usb_register_read(rt2x00dev, MAC_CSR19, ®); - return rt2x00_get_field32(reg, MAC_CSR19_BIT7); + return rt2x00_get_field16(reg, MAC_CSR19_BIT7); } #ifdef CONFIG_RT2X00_LIB_LEDS @@ -1768,6 +1768,7 @@ static int rt2500usb_probe_hw_mode(struct rt2x00_dev *rt2x00dev) static int rt2500usb_probe_hw(struct rt2x00_dev *rt2x00dev) { int retval; + u16 reg; /* * Allocate eeprom data. @@ -1780,6 +1781,14 @@ static int rt2500usb_probe_hw(struct rt2x00_dev *rt2x00dev) if (retval) return retval; + /* + * Enable rfkill polling by setting GPIO direction of the + * rfkill switch GPIO pin correctly. + */ + rt2500usb_register_read(rt2x00dev, MAC_CSR19, ®); + rt2x00_set_field16(®, MAC_CSR19_BIT8, 0); + rt2500usb_register_write(rt2x00dev, MAC_CSR19, reg); + /* * Initialize hw specifications. */ diff --git a/trunk/drivers/net/wireless/rt2x00/rt2500usb.h b/trunk/drivers/net/wireless/rt2x00/rt2500usb.h index b493306a7eed..196bd5103e4f 100644 --- a/trunk/drivers/net/wireless/rt2x00/rt2500usb.h +++ b/trunk/drivers/net/wireless/rt2x00/rt2500usb.h @@ -189,14 +189,15 @@ * MAC_CSR19: GPIO control register. */ #define MAC_CSR19 0x0426 -#define MAC_CSR19_BIT0 FIELD32(0x0001) -#define MAC_CSR19_BIT1 FIELD32(0x0002) -#define MAC_CSR19_BIT2 FIELD32(0x0004) -#define MAC_CSR19_BIT3 FIELD32(0x0008) -#define MAC_CSR19_BIT4 FIELD32(0x0010) -#define MAC_CSR19_BIT5 FIELD32(0x0020) -#define MAC_CSR19_BIT6 FIELD32(0x0040) -#define MAC_CSR19_BIT7 FIELD32(0x0080) +#define MAC_CSR19_BIT0 FIELD16(0x0001) +#define MAC_CSR19_BIT1 FIELD16(0x0002) +#define MAC_CSR19_BIT2 FIELD16(0x0004) +#define MAC_CSR19_BIT3 FIELD16(0x0008) +#define MAC_CSR19_BIT4 FIELD16(0x0010) +#define MAC_CSR19_BIT5 FIELD16(0x0020) +#define MAC_CSR19_BIT6 FIELD16(0x0040) +#define MAC_CSR19_BIT7 FIELD16(0x0080) +#define MAC_CSR19_BIT8 FIELD16(0x0100) /* * MAC_CSR20: LED control register. diff --git a/trunk/drivers/net/wireless/rt2x00/rt2800lib.c b/trunk/drivers/net/wireless/rt2x00/rt2800lib.c index 88455b1b9fe0..b93516d832fb 100644 --- a/trunk/drivers/net/wireless/rt2x00/rt2800lib.c +++ b/trunk/drivers/net/wireless/rt2x00/rt2800lib.c @@ -221,6 +221,67 @@ static void rt2800_rf_write(struct rt2x00_dev *rt2x00dev, mutex_unlock(&rt2x00dev->csr_mutex); } +static int rt2800_enable_wlan_rt3290(struct rt2x00_dev *rt2x00dev) +{ + u32 reg; + int i, count; + + rt2800_register_read(rt2x00dev, WLAN_FUN_CTRL, ®); + if (rt2x00_get_field32(reg, WLAN_EN)) + return 0; + + rt2x00_set_field32(®, WLAN_GPIO_OUT_OE_BIT_ALL, 0xff); + rt2x00_set_field32(®, FRC_WL_ANT_SET, 1); + rt2x00_set_field32(®, WLAN_CLK_EN, 0); + rt2x00_set_field32(®, WLAN_EN, 1); + rt2800_register_write(rt2x00dev, WLAN_FUN_CTRL, reg); + + udelay(REGISTER_BUSY_DELAY); + + count = 0; + do { + /* + * Check PLL_LD & XTAL_RDY. + */ + for (i = 0; i < REGISTER_BUSY_COUNT; i++) { + rt2800_register_read(rt2x00dev, CMB_CTRL, ®); + if (rt2x00_get_field32(reg, PLL_LD) && + rt2x00_get_field32(reg, XTAL_RDY)) + break; + udelay(REGISTER_BUSY_DELAY); + } + + if (i >= REGISTER_BUSY_COUNT) { + + if (count >= 10) + return -EIO; + + rt2800_register_write(rt2x00dev, 0x58, 0x018); + udelay(REGISTER_BUSY_DELAY); + rt2800_register_write(rt2x00dev, 0x58, 0x418); + udelay(REGISTER_BUSY_DELAY); + rt2800_register_write(rt2x00dev, 0x58, 0x618); + udelay(REGISTER_BUSY_DELAY); + count++; + } else { + count = 0; + } + + rt2800_register_read(rt2x00dev, WLAN_FUN_CTRL, ®); + rt2x00_set_field32(®, PCIE_APP0_CLK_REQ, 0); + rt2x00_set_field32(®, WLAN_CLK_EN, 1); + rt2x00_set_field32(®, WLAN_RESET, 1); + rt2800_register_write(rt2x00dev, WLAN_FUN_CTRL, reg); + udelay(10); + rt2x00_set_field32(®, WLAN_RESET, 0); + rt2800_register_write(rt2x00dev, WLAN_FUN_CTRL, reg); + udelay(10); + rt2800_register_write(rt2x00dev, INT_SOURCE_CSR, 0x7fffffff); + } while (count != 0); + + return 0; +} + void rt2800_mcu_request(struct rt2x00_dev *rt2x00dev, const u8 command, const u8 token, const u8 arg0, const u8 arg1) @@ -400,6 +461,13 @@ int rt2800_load_firmware(struct rt2x00_dev *rt2x00dev, { unsigned int i; u32 reg; + int retval; + + if (rt2x00_rt(rt2x00dev, RT3290)) { + retval = rt2800_enable_wlan_rt3290(rt2x00dev); + if (retval) + return -EBUSY; + } /* * If driver doesn't wake up firmware here, @@ -4021,6 +4089,7 @@ static int rt2800_init_rfcsr(struct rt2x00_dev *rt2x00dev) rt2800_register_write(rt2x00dev, LDO_CFG0, reg); msleep(1); rt2800_register_read(rt2x00dev, LDO_CFG0, ®); + rt2x00_set_field32(®, LDO_CFG0_LDO_CORE_VLEVEL, 0); rt2x00_set_field32(®, LDO_CFG0_BGSEL, 1); rt2800_register_write(rt2x00dev, LDO_CFG0, reg); } diff --git a/trunk/drivers/net/wireless/rt2x00/rt2800pci.c b/trunk/drivers/net/wireless/rt2x00/rt2800pci.c index 235376e9cb04..4765bbd654cd 100644 --- a/trunk/drivers/net/wireless/rt2x00/rt2800pci.c +++ b/trunk/drivers/net/wireless/rt2x00/rt2800pci.c @@ -980,69 +980,10 @@ static int rt2800pci_validate_eeprom(struct rt2x00_dev *rt2x00dev) return rt2800_validate_eeprom(rt2x00dev); } -static int rt2800_enable_wlan_rt3290(struct rt2x00_dev *rt2x00dev) -{ - u32 reg; - int i, count; - - rt2800_register_read(rt2x00dev, WLAN_FUN_CTRL, ®); - if (rt2x00_get_field32(reg, WLAN_EN)) - return 0; - - rt2x00_set_field32(®, WLAN_GPIO_OUT_OE_BIT_ALL, 0xff); - rt2x00_set_field32(®, FRC_WL_ANT_SET, 1); - rt2x00_set_field32(®, WLAN_CLK_EN, 0); - rt2x00_set_field32(®, WLAN_EN, 1); - rt2800_register_write(rt2x00dev, WLAN_FUN_CTRL, reg); - - udelay(REGISTER_BUSY_DELAY); - - count = 0; - do { - /* - * Check PLL_LD & XTAL_RDY. - */ - for (i = 0; i < REGISTER_BUSY_COUNT; i++) { - rt2800_register_read(rt2x00dev, CMB_CTRL, ®); - if (rt2x00_get_field32(reg, PLL_LD) && - rt2x00_get_field32(reg, XTAL_RDY)) - break; - udelay(REGISTER_BUSY_DELAY); - } - - if (i >= REGISTER_BUSY_COUNT) { - - if (count >= 10) - return -EIO; - - rt2800_register_write(rt2x00dev, 0x58, 0x018); - udelay(REGISTER_BUSY_DELAY); - rt2800_register_write(rt2x00dev, 0x58, 0x418); - udelay(REGISTER_BUSY_DELAY); - rt2800_register_write(rt2x00dev, 0x58, 0x618); - udelay(REGISTER_BUSY_DELAY); - count++; - } else { - count = 0; - } - - rt2800_register_read(rt2x00dev, WLAN_FUN_CTRL, ®); - rt2x00_set_field32(®, PCIE_APP0_CLK_REQ, 0); - rt2x00_set_field32(®, WLAN_CLK_EN, 1); - rt2x00_set_field32(®, WLAN_RESET, 1); - rt2800_register_write(rt2x00dev, WLAN_FUN_CTRL, reg); - udelay(10); - rt2x00_set_field32(®, WLAN_RESET, 0); - rt2800_register_write(rt2x00dev, WLAN_FUN_CTRL, reg); - udelay(10); - rt2800_register_write(rt2x00dev, INT_SOURCE_CSR, 0x7fffffff); - } while (count != 0); - - return 0; -} static int rt2800pci_probe_hw(struct rt2x00_dev *rt2x00dev) { int retval; + u32 reg; /* * Allocate eeprom data. @@ -1055,6 +996,14 @@ static int rt2800pci_probe_hw(struct rt2x00_dev *rt2x00dev) if (retval) return retval; + /* + * Enable rfkill polling by setting GPIO direction of the + * rfkill switch GPIO pin correctly. + */ + rt2x00pci_register_read(rt2x00dev, GPIO_CTRL_CFG, ®); + rt2x00_set_field32(®, GPIO_CTRL_CFG_GPIOD_BIT2, 1); + rt2x00pci_register_write(rt2x00dev, GPIO_CTRL_CFG, reg); + /* * Initialize hw specifications. */ @@ -1062,17 +1011,6 @@ static int rt2800pci_probe_hw(struct rt2x00_dev *rt2x00dev) if (retval) return retval; - /* - * In probe phase call rt2800_enable_wlan_rt3290 to enable wlan - * clk for rt3290. That avoid the MCU fail in start phase. - */ - if (rt2x00_rt(rt2x00dev, RT3290)) { - retval = rt2800_enable_wlan_rt3290(rt2x00dev); - - if (retval) - return retval; - } - /* * This device has multiple filters for control frames * and has a separate filter for PS Poll frames. diff --git a/trunk/drivers/net/wireless/rt2x00/rt2800usb.c b/trunk/drivers/net/wireless/rt2x00/rt2800usb.c index 6cf336595e25..6b4226b71618 100644 --- a/trunk/drivers/net/wireless/rt2x00/rt2800usb.c +++ b/trunk/drivers/net/wireless/rt2x00/rt2800usb.c @@ -667,8 +667,16 @@ static void rt2800usb_fill_rxdone(struct queue_entry *entry, skb_pull(entry->skb, RXINFO_DESC_SIZE); /* - * FIXME: we need to check for rx_pkt_len validity + * Check for rx_pkt_len validity. Return if invalid, leaving + * rxdesc->size zeroed out by the upper level. */ + if (unlikely(rx_pkt_len == 0 || + rx_pkt_len > entry->queue->data_size)) { + ERROR(entry->queue->rt2x00dev, + "Bad frame size %d, forcing to 0\n", rx_pkt_len); + return; + } + rxd = (__le32 *)(entry->skb->data + rx_pkt_len); /* @@ -736,6 +744,7 @@ static int rt2800usb_validate_eeprom(struct rt2x00_dev *rt2x00dev) static int rt2800usb_probe_hw(struct rt2x00_dev *rt2x00dev) { int retval; + u32 reg; /* * Allocate eeprom data. @@ -748,6 +757,14 @@ static int rt2800usb_probe_hw(struct rt2x00_dev *rt2x00dev) if (retval) return retval; + /* + * Enable rfkill polling by setting GPIO direction of the + * rfkill switch GPIO pin correctly. + */ + rt2x00usb_register_read(rt2x00dev, GPIO_CTRL_CFG, ®); + rt2x00_set_field32(®, GPIO_CTRL_CFG_GPIOD_BIT2, 1); + rt2x00usb_register_write(rt2x00dev, GPIO_CTRL_CFG, reg); + /* * Initialize hw specifications. */ @@ -1157,6 +1174,8 @@ static struct usb_device_id rt2800usb_device_table[] = { { USB_DEVICE(0x1690, 0x0744) }, { USB_DEVICE(0x1690, 0x0761) }, { USB_DEVICE(0x1690, 0x0764) }, + /* ASUS */ + { USB_DEVICE(0x0b05, 0x179d) }, /* Cisco */ { USB_DEVICE(0x167b, 0x4001) }, /* EnGenius */ @@ -1222,7 +1241,6 @@ static struct usb_device_id rt2800usb_device_table[] = { { USB_DEVICE(0x0b05, 0x1760) }, { USB_DEVICE(0x0b05, 0x1761) }, { USB_DEVICE(0x0b05, 0x1790) }, - { USB_DEVICE(0x0b05, 0x179d) }, /* AzureWave */ { USB_DEVICE(0x13d3, 0x3262) }, { USB_DEVICE(0x13d3, 0x3284) }, diff --git a/trunk/drivers/net/wireless/rt2x00/rt2x00dev.c b/trunk/drivers/net/wireless/rt2x00/rt2x00dev.c index a6b88bd4a1a5..3f07e36f462b 100644 --- a/trunk/drivers/net/wireless/rt2x00/rt2x00dev.c +++ b/trunk/drivers/net/wireless/rt2x00/rt2x00dev.c @@ -629,7 +629,7 @@ void rt2x00lib_rxdone(struct queue_entry *entry, gfp_t gfp) */ if (unlikely(rxdesc.size == 0 || rxdesc.size > entry->queue->data_size)) { - WARNING(rt2x00dev, "Wrong frame size %d max %d.\n", + ERROR(rt2x00dev, "Wrong frame size %d max %d.\n", rxdesc.size, entry->queue->data_size); dev_kfree_skb(entry->skb); goto renew_skb; diff --git a/trunk/drivers/net/wireless/rt2x00/rt61pci.c b/trunk/drivers/net/wireless/rt2x00/rt61pci.c index f32259686b45..b8ec96163922 100644 --- a/trunk/drivers/net/wireless/rt2x00/rt61pci.c +++ b/trunk/drivers/net/wireless/rt2x00/rt61pci.c @@ -2243,8 +2243,7 @@ static void rt61pci_txdone(struct rt2x00_dev *rt2x00dev) static void rt61pci_wakeup(struct rt2x00_dev *rt2x00dev) { - struct ieee80211_conf conf = { .flags = 0 }; - struct rt2x00lib_conf libconf = { .conf = &conf }; + struct rt2x00lib_conf libconf = { .conf = &rt2x00dev->hw->conf }; rt61pci_config(rt2x00dev, &libconf, IEEE80211_CONF_CHANGE_PS); } @@ -2833,6 +2832,7 @@ static int rt61pci_probe_hw_mode(struct rt2x00_dev *rt2x00dev) static int rt61pci_probe_hw(struct rt2x00_dev *rt2x00dev) { int retval; + u32 reg; /* * Disable power saving. @@ -2850,6 +2850,14 @@ static int rt61pci_probe_hw(struct rt2x00_dev *rt2x00dev) if (retval) return retval; + /* + * Enable rfkill polling by setting GPIO direction of the + * rfkill switch GPIO pin correctly. + */ + rt2x00pci_register_read(rt2x00dev, MAC_CSR13, ®); + rt2x00_set_field32(®, MAC_CSR13_BIT13, 1); + rt2x00pci_register_write(rt2x00dev, MAC_CSR13, reg); + /* * Initialize hw specifications. */ diff --git a/trunk/drivers/net/wireless/rt2x00/rt61pci.h b/trunk/drivers/net/wireless/rt2x00/rt61pci.h index e3cd6db76b0e..8f3da5a56766 100644 --- a/trunk/drivers/net/wireless/rt2x00/rt61pci.h +++ b/trunk/drivers/net/wireless/rt2x00/rt61pci.h @@ -372,6 +372,7 @@ struct hw_pairwise_ta_entry { #define MAC_CSR13_BIT10 FIELD32(0x00000400) #define MAC_CSR13_BIT11 FIELD32(0x00000800) #define MAC_CSR13_BIT12 FIELD32(0x00001000) +#define MAC_CSR13_BIT13 FIELD32(0x00002000) /* * MAC_CSR14: LED control register. diff --git a/trunk/drivers/net/wireless/rt2x00/rt73usb.c b/trunk/drivers/net/wireless/rt2x00/rt73usb.c index ba6e434b859d..248436c13ce0 100644 --- a/trunk/drivers/net/wireless/rt2x00/rt73usb.c +++ b/trunk/drivers/net/wireless/rt2x00/rt73usb.c @@ -2177,6 +2177,7 @@ static int rt73usb_probe_hw_mode(struct rt2x00_dev *rt2x00dev) static int rt73usb_probe_hw(struct rt2x00_dev *rt2x00dev) { int retval; + u32 reg; /* * Allocate eeprom data. @@ -2189,6 +2190,14 @@ static int rt73usb_probe_hw(struct rt2x00_dev *rt2x00dev) if (retval) return retval; + /* + * Enable rfkill polling by setting GPIO direction of the + * rfkill switch GPIO pin correctly. + */ + rt2x00usb_register_read(rt2x00dev, MAC_CSR13, ®); + rt2x00_set_field32(®, MAC_CSR13_BIT15, 0); + rt2x00usb_register_write(rt2x00dev, MAC_CSR13, reg); + /* * Initialize hw specifications. */ diff --git a/trunk/drivers/net/wireless/rt2x00/rt73usb.h b/trunk/drivers/net/wireless/rt2x00/rt73usb.h index 9f6b470414d3..df1cc116b83b 100644 --- a/trunk/drivers/net/wireless/rt2x00/rt73usb.h +++ b/trunk/drivers/net/wireless/rt2x00/rt73usb.h @@ -282,6 +282,9 @@ struct hw_pairwise_ta_entry { #define MAC_CSR13_BIT10 FIELD32(0x00000400) #define MAC_CSR13_BIT11 FIELD32(0x00000800) #define MAC_CSR13_BIT12 FIELD32(0x00001000) +#define MAC_CSR13_BIT13 FIELD32(0x00002000) +#define MAC_CSR13_BIT14 FIELD32(0x00004000) +#define MAC_CSR13_BIT15 FIELD32(0x00008000) /* * MAC_CSR14: LED control register. diff --git a/trunk/drivers/net/wireless/rtl818x/rtl8187/dev.c b/trunk/drivers/net/wireless/rtl818x/rtl8187/dev.c index 71a30b026089..533024095c43 100644 --- a/trunk/drivers/net/wireless/rtl818x/rtl8187/dev.c +++ b/trunk/drivers/net/wireless/rtl818x/rtl8187/dev.c @@ -44,7 +44,7 @@ MODULE_AUTHOR("Larry Finger "); MODULE_DESCRIPTION("RTL8187/RTL8187B USB wireless driver"); MODULE_LICENSE("GPL"); -static struct usb_device_id rtl8187_table[] __devinitdata = { +static struct usb_device_id rtl8187_table[] = { /* Asus */ {USB_DEVICE(0x0b05, 0x171d), .driver_info = DEVICE_RTL8187}, /* Belkin */ diff --git a/trunk/drivers/net/wireless/rtlwifi/rtl8192c/fw_common.c b/trunk/drivers/net/wireless/rtlwifi/rtl8192c/fw_common.c index 44febfde9493..8a7b864faca3 100644 --- a/trunk/drivers/net/wireless/rtlwifi/rtl8192c/fw_common.c +++ b/trunk/drivers/net/wireless/rtlwifi/rtl8192c/fw_common.c @@ -315,7 +315,7 @@ static void _rtl92c_fill_h2c_command(struct ieee80211_hw *hw, u16 box_reg = 0, box_extreg = 0; u8 u1b_tmp; bool isfw_read = false; - bool bwrite_sucess = false; + bool bwrite_success = false; u8 wait_h2c_limmit = 100; u8 wait_writeh2c_limmit = 100; u8 boxcontent[4], boxextcontent[2]; @@ -354,7 +354,7 @@ static void _rtl92c_fill_h2c_command(struct ieee80211_hw *hw, } } - while (!bwrite_sucess) { + while (!bwrite_success) { wait_writeh2c_limmit--; if (wait_writeh2c_limmit == 0) { RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, @@ -491,7 +491,7 @@ static void _rtl92c_fill_h2c_command(struct ieee80211_hw *hw, break; } - bwrite_sucess = true; + bwrite_success = true; rtlhal->last_hmeboxnum = boxnum + 1; if (rtlhal->last_hmeboxnum == 4) diff --git a/trunk/drivers/net/wireless/rtlwifi/rtl8192ce/def.h b/trunk/drivers/net/wireless/rtlwifi/rtl8192ce/def.h index 04c3aef8a4f6..2925094b2d91 100644 --- a/trunk/drivers/net/wireless/rtlwifi/rtl8192ce/def.h +++ b/trunk/drivers/net/wireless/rtlwifi/rtl8192ce/def.h @@ -117,6 +117,7 @@ #define CHIP_VER_B BIT(4) #define CHIP_92C_BITMASK BIT(0) +#define CHIP_UNKNOWN BIT(7) #define CHIP_92C_1T2R 0x03 #define CHIP_92C 0x01 #define CHIP_88C 0x00 diff --git a/trunk/drivers/net/wireless/rtlwifi/rtl8192ce/hw.c b/trunk/drivers/net/wireless/rtlwifi/rtl8192ce/hw.c index bd0da7ef290b..dd4bb0950a57 100644 --- a/trunk/drivers/net/wireless/rtlwifi/rtl8192ce/hw.c +++ b/trunk/drivers/net/wireless/rtlwifi/rtl8192ce/hw.c @@ -994,8 +994,16 @@ static enum version_8192c _rtl92ce_read_chip_version(struct ieee80211_hw *hw) version = (value32 & TYPE_ID) ? VERSION_A_CHIP_92C : VERSION_A_CHIP_88C; } else { - version = (value32 & TYPE_ID) ? VERSION_B_CHIP_92C : - VERSION_B_CHIP_88C; + version = (enum version_8192c) (CHIP_VER_B | + ((value32 & TYPE_ID) ? CHIP_92C_BITMASK : 0) | + ((value32 & VENDOR_ID) ? CHIP_VENDOR_UMC : 0)); + if ((!IS_CHIP_VENDOR_UMC(version)) && (value32 & + CHIP_VER_RTL_MASK)) { + version = (enum version_8192c)(version | + ((((value32 & CHIP_VER_RTL_MASK) == BIT(12)) + ? CHIP_VENDOR_UMC_B_CUT : CHIP_UNKNOWN) | + CHIP_VENDOR_UMC)); + } } switch (version) { diff --git a/trunk/drivers/net/wireless/rtlwifi/rtl8192ce/sw.c b/trunk/drivers/net/wireless/rtlwifi/rtl8192ce/sw.c index 3aa927f8b9b9..7d8f96405f42 100644 --- a/trunk/drivers/net/wireless/rtlwifi/rtl8192ce/sw.c +++ b/trunk/drivers/net/wireless/rtlwifi/rtl8192ce/sw.c @@ -162,10 +162,12 @@ int rtl92c_init_sw_vars(struct ieee80211_hw *hw) /* request fw */ if (IS_VENDOR_UMC_A_CUT(rtlhal->version) && - !IS_92C_SERIAL(rtlhal->version)) + !IS_92C_SERIAL(rtlhal->version)) { rtlpriv->cfg->fw_name = "rtlwifi/rtl8192cfwU.bin"; - else if (IS_81xxC_VENDOR_UMC_B_CUT(rtlhal->version)) + } else if (IS_81xxC_VENDOR_UMC_B_CUT(rtlhal->version)) { rtlpriv->cfg->fw_name = "rtlwifi/rtl8192cfwU_B.bin"; + pr_info("****** This B_CUT device may not work with kernels 3.6 and earlier\n"); + } rtlpriv->max_fw_size = 0x4000; pr_info("Using firmware %s\n", rtlpriv->cfg->fw_name); diff --git a/trunk/drivers/net/wireless/rtlwifi/rtl8192de/fw.c b/trunk/drivers/net/wireless/rtlwifi/rtl8192de/fw.c index 895ae6c1f354..eb22dccc418b 100644 --- a/trunk/drivers/net/wireless/rtlwifi/rtl8192de/fw.c +++ b/trunk/drivers/net/wireless/rtlwifi/rtl8192de/fw.c @@ -365,7 +365,7 @@ static void _rtl92d_fill_h2c_command(struct ieee80211_hw *hw, u8 u1b_tmp; bool isfw_read = false; u8 buf_index = 0; - bool bwrite_sucess = false; + bool bwrite_success = false; u8 wait_h2c_limmit = 100; u8 wait_writeh2c_limmit = 100; u8 boxcontent[4], boxextcontent[2]; @@ -408,7 +408,7 @@ static void _rtl92d_fill_h2c_command(struct ieee80211_hw *hw, break; } } - while (!bwrite_sucess) { + while (!bwrite_success) { wait_writeh2c_limmit--; if (wait_writeh2c_limmit == 0) { RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, @@ -515,7 +515,7 @@ static void _rtl92d_fill_h2c_command(struct ieee80211_hw *hw, "switch case not processed\n"); break; } - bwrite_sucess = true; + bwrite_success = true; rtlhal->last_hmeboxnum = boxnum + 1; if (rtlhal->last_hmeboxnum == 4) rtlhal->last_hmeboxnum = 0; diff --git a/trunk/drivers/net/xen-netfront.c b/trunk/drivers/net/xen-netfront.c index 30899901aef5..650f79a1f2bd 100644 --- a/trunk/drivers/net/xen-netfront.c +++ b/trunk/drivers/net/xen-netfront.c @@ -57,8 +57,7 @@ static const struct ethtool_ops xennet_ethtool_ops; struct netfront_cb { - struct page *page; - unsigned offset; + int pull_to; }; #define NETFRONT_SKB_CB(skb) ((struct netfront_cb *)((skb)->cb)) @@ -867,15 +866,9 @@ static int handle_incoming_queue(struct net_device *dev, struct sk_buff *skb; while ((skb = __skb_dequeue(rxq)) != NULL) { - struct page *page = NETFRONT_SKB_CB(skb)->page; - void *vaddr = page_address(page); - unsigned offset = NETFRONT_SKB_CB(skb)->offset; - - memcpy(skb->data, vaddr + offset, - skb_headlen(skb)); + int pull_to = NETFRONT_SKB_CB(skb)->pull_to; - if (page != skb_frag_page(&skb_shinfo(skb)->frags[0])) - __free_page(page); + __pskb_pull_tail(skb, pull_to - skb_headlen(skb)); /* Ethernet work: Delayed to here as it peeks the header. */ skb->protocol = eth_type_trans(skb, dev); @@ -913,7 +906,6 @@ static int xennet_poll(struct napi_struct *napi, int budget) struct sk_buff_head errq; struct sk_buff_head tmpq; unsigned long flags; - unsigned int len; int err; spin_lock(&np->rx_lock); @@ -955,24 +947,13 @@ static int xennet_poll(struct napi_struct *napi, int budget) } } - NETFRONT_SKB_CB(skb)->page = - skb_frag_page(&skb_shinfo(skb)->frags[0]); - NETFRONT_SKB_CB(skb)->offset = rx->offset; - - len = rx->status; - if (len > RX_COPY_THRESHOLD) - len = RX_COPY_THRESHOLD; - skb_put(skb, len); + NETFRONT_SKB_CB(skb)->pull_to = rx->status; + if (NETFRONT_SKB_CB(skb)->pull_to > RX_COPY_THRESHOLD) + NETFRONT_SKB_CB(skb)->pull_to = RX_COPY_THRESHOLD; - if (rx->status > len) { - skb_shinfo(skb)->frags[0].page_offset = - rx->offset + len; - skb_frag_size_set(&skb_shinfo(skb)->frags[0], rx->status - len); - skb->data_len = rx->status - len; - } else { - __skb_fill_page_desc(skb, 0, NULL, 0, 0); - skb_shinfo(skb)->nr_frags = 0; - } + skb_shinfo(skb)->frags[0].page_offset = rx->offset; + skb_frag_size_set(&skb_shinfo(skb)->frags[0], rx->status); + skb->data_len = rx->status; i = xennet_fill_frags(np, skb, &tmpq); @@ -999,7 +980,7 @@ static int xennet_poll(struct napi_struct *napi, int budget) * receive throughout using the standard receive * buffer size was cut by 25%(!!!). */ - skb->truesize += skb->data_len - (RX_COPY_THRESHOLD - len); + skb->truesize += skb->data_len - RX_COPY_THRESHOLD; skb->len += skb->data_len; if (rx->flags & XEN_NETRXF_csum_blank) diff --git a/trunk/drivers/of/base.c b/trunk/drivers/of/base.c index c181b94abc36..d4a1c9a043e1 100644 --- a/trunk/drivers/of/base.c +++ b/trunk/drivers/of/base.c @@ -363,6 +363,33 @@ struct device_node *of_get_next_child(const struct device_node *node, } EXPORT_SYMBOL(of_get_next_child); +/** + * of_get_next_available_child - Find the next available child node + * @node: parent node + * @prev: previous child of the parent node, or NULL to get first + * + * This function is like of_get_next_child(), except that it + * automatically skips any disabled nodes (i.e. status = "disabled"). + */ +struct device_node *of_get_next_available_child(const struct device_node *node, + struct device_node *prev) +{ + struct device_node *next; + + read_lock(&devtree_lock); + next = prev ? prev->sibling : node->child; + for (; next; next = next->sibling) { + if (!of_device_is_available(next)) + continue; + if (of_node_get(next)) + break; + } + of_node_put(prev); + read_unlock(&devtree_lock); + return next; +} +EXPORT_SYMBOL(of_get_next_available_child); + /** * of_find_node_by_path - Find a node matching a full OF path * @path: The full path to match diff --git a/trunk/drivers/oprofile/cpu_buffer.c b/trunk/drivers/oprofile/cpu_buffer.c index b8ef8ddcc292..8aa73fac6ad4 100644 --- a/trunk/drivers/oprofile/cpu_buffer.c +++ b/trunk/drivers/oprofile/cpu_buffer.c @@ -451,14 +451,9 @@ static void wq_sync_buffer(struct work_struct *work) { struct oprofile_cpu_buffer *b = container_of(work, struct oprofile_cpu_buffer, work.work); - if (b->cpu != smp_processor_id()) { - printk(KERN_DEBUG "WQ on CPU%d, prefer CPU%d\n", - smp_processor_id(), b->cpu); - - if (!cpu_online(b->cpu)) { - cancel_delayed_work(&b->work); - return; - } + if (b->cpu != smp_processor_id() && !cpu_online(b->cpu)) { + cancel_delayed_work(&b->work); + return; } sync_buffer(b->cpu); diff --git a/trunk/drivers/pci/.gitignore b/trunk/drivers/pci/.gitignore deleted file mode 100644 index f297ca8d313e..000000000000 --- a/trunk/drivers/pci/.gitignore +++ /dev/null @@ -1,4 +0,0 @@ -classlist.h -devlist.h -gen-devlist - diff --git a/trunk/drivers/pci/pci-acpi.c b/trunk/drivers/pci/pci-acpi.c index fbf7b26c7c8a..c5792d622dc4 100644 --- a/trunk/drivers/pci/pci-acpi.c +++ b/trunk/drivers/pci/pci-acpi.c @@ -266,8 +266,8 @@ static int acpi_pci_set_power_state(struct pci_dev *dev, pci_power_t state) } if (!error) - dev_printk(KERN_INFO, &dev->dev, - "power state changed by ACPI to D%d\n", state); + dev_info(&dev->dev, "power state changed by ACPI to %s\n", + pci_power_name(state)); return error; } diff --git a/trunk/drivers/pci/pci-driver.c b/trunk/drivers/pci/pci-driver.c index 185be3703343..d6fd6b6d9d4b 100644 --- a/trunk/drivers/pci/pci-driver.c +++ b/trunk/drivers/pci/pci-driver.c @@ -280,8 +280,12 @@ static long local_pci_probe(void *_ddi) { struct drv_dev_and_id *ddi = _ddi; struct device *dev = &ddi->dev->dev; + struct device *parent = dev->parent; int rc; + /* The parent bridge must be in active state when probing */ + if (parent) + pm_runtime_get_sync(parent); /* Unbound PCI devices are always set to disabled and suspended. * During probe, the device is set to enabled and active and the * usage count is incremented. If the driver supports runtime PM, @@ -298,6 +302,8 @@ static long local_pci_probe(void *_ddi) pm_runtime_set_suspended(dev); pm_runtime_put_noidle(dev); } + if (parent) + pm_runtime_put(parent); return rc; } @@ -959,6 +965,13 @@ static int pci_pm_poweroff_noirq(struct device *dev) if (!pci_dev->state_saved && !pci_is_bridge(pci_dev)) pci_prepare_to_sleep(pci_dev); + /* + * The reason for doing this here is the same as for the analogous code + * in pci_pm_suspend_noirq(). + */ + if (pci_dev->class == PCI_CLASS_SERIAL_USB_EHCI) + pci_write_config_word(pci_dev, PCI_COMMAND, 0); + return 0; } diff --git a/trunk/drivers/pci/pci-sysfs.c b/trunk/drivers/pci/pci-sysfs.c index 6869009c7393..02d107b15281 100644 --- a/trunk/drivers/pci/pci-sysfs.c +++ b/trunk/drivers/pci/pci-sysfs.c @@ -458,6 +458,40 @@ boot_vga_show(struct device *dev, struct device_attribute *attr, char *buf) } struct device_attribute vga_attr = __ATTR_RO(boot_vga); +static void +pci_config_pm_runtime_get(struct pci_dev *pdev) +{ + struct device *dev = &pdev->dev; + struct device *parent = dev->parent; + + if (parent) + pm_runtime_get_sync(parent); + pm_runtime_get_noresume(dev); + /* + * pdev->current_state is set to PCI_D3cold during suspending, + * so wait until suspending completes + */ + pm_runtime_barrier(dev); + /* + * Only need to resume devices in D3cold, because config + * registers are still accessible for devices suspended but + * not in D3cold. + */ + if (pdev->current_state == PCI_D3cold) + pm_runtime_resume(dev); +} + +static void +pci_config_pm_runtime_put(struct pci_dev *pdev) +{ + struct device *dev = &pdev->dev; + struct device *parent = dev->parent; + + pm_runtime_put(dev); + if (parent) + pm_runtime_put_sync(parent); +} + static ssize_t pci_read_config(struct file *filp, struct kobject *kobj, struct bin_attribute *bin_attr, @@ -484,6 +518,8 @@ pci_read_config(struct file *filp, struct kobject *kobj, size = count; } + pci_config_pm_runtime_get(dev); + if ((off & 1) && size) { u8 val; pci_user_read_config_byte(dev, off, &val); @@ -529,6 +565,8 @@ pci_read_config(struct file *filp, struct kobject *kobj, --size; } + pci_config_pm_runtime_put(dev); + return count; } @@ -549,6 +587,8 @@ pci_write_config(struct file* filp, struct kobject *kobj, count = size; } + pci_config_pm_runtime_get(dev); + if ((off & 1) && size) { pci_user_write_config_byte(dev, off, data[off - init_off]); off++; @@ -587,6 +627,8 @@ pci_write_config(struct file* filp, struct kobject *kobj, --size; } + pci_config_pm_runtime_put(dev); + return count; } diff --git a/trunk/drivers/pci/pci.c b/trunk/drivers/pci/pci.c index f3ea977a5b1b..ab4bf5a4c2f1 100644 --- a/trunk/drivers/pci/pci.c +++ b/trunk/drivers/pci/pci.c @@ -1941,6 +1941,7 @@ void pci_pm_init(struct pci_dev *dev) dev->pm_cap = pm; dev->d3_delay = PCI_PM_D3_WAIT; dev->d3cold_delay = PCI_PM_D3COLD_WAIT; + dev->d3cold_allowed = true; dev->d1_support = false; dev->d2_support = false; diff --git a/trunk/drivers/pci/pcie/portdrv_pci.c b/trunk/drivers/pci/pcie/portdrv_pci.c index 3a7eefcb270a..e76b44777dbf 100644 --- a/trunk/drivers/pci/pcie/portdrv_pci.c +++ b/trunk/drivers/pci/pcie/portdrv_pci.c @@ -140,9 +140,17 @@ static int pcie_port_runtime_resume(struct device *dev) { return 0; } + +static int pcie_port_runtime_idle(struct device *dev) +{ + /* Delay for a short while to prevent too frequent suspend/resume */ + pm_schedule_suspend(dev, 10); + return -EBUSY; +} #else #define pcie_port_runtime_suspend NULL #define pcie_port_runtime_resume NULL +#define pcie_port_runtime_idle NULL #endif static const struct dev_pm_ops pcie_portdrv_pm_ops = { @@ -155,6 +163,7 @@ static const struct dev_pm_ops pcie_portdrv_pm_ops = { .resume_noirq = pcie_port_resume_noirq, .runtime_suspend = pcie_port_runtime_suspend, .runtime_resume = pcie_port_runtime_resume, + .runtime_idle = pcie_port_runtime_idle, }; #define PCIE_PORTDRV_PM_OPS (&pcie_portdrv_pm_ops) @@ -200,6 +209,11 @@ static int __devinit pcie_portdrv_probe(struct pci_dev *dev, return status; pci_save_state(dev); + /* + * D3cold may not work properly on some PCIe port, so disable + * it by default. + */ + dev->d3cold_allowed = false; if (!pci_match_id(port_runtime_pm_black_list, dev)) pm_runtime_put_noidle(&dev->dev); diff --git a/trunk/drivers/pci/probe.c b/trunk/drivers/pci/probe.c index 6c143b4497ca..9f8a6b79a8ec 100644 --- a/trunk/drivers/pci/probe.c +++ b/trunk/drivers/pci/probe.c @@ -144,15 +144,13 @@ static inline unsigned long decode_bar(struct pci_dev *dev, u32 bar) case PCI_BASE_ADDRESS_MEM_TYPE_32: break; case PCI_BASE_ADDRESS_MEM_TYPE_1M: - dev_info(&dev->dev, "1M mem BAR treated as 32-bit BAR\n"); + /* 1M mem BAR treated as 32-bit BAR */ break; case PCI_BASE_ADDRESS_MEM_TYPE_64: flags |= IORESOURCE_MEM_64; break; default: - dev_warn(&dev->dev, - "mem unknown type %x treated as 32-bit BAR\n", - mem_type); + /* mem unknown type treated as 32-bit BAR */ break; } return flags; @@ -173,9 +171,11 @@ int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type, u32 l, sz, mask; u16 orig_cmd; struct pci_bus_region region; + bool bar_too_big = false, bar_disabled = false; mask = type ? PCI_ROM_ADDRESS_MASK : ~0; + /* No printks while decoding is disabled! */ if (!dev->mmio_always_on) { pci_read_config_word(dev, PCI_COMMAND, &orig_cmd); pci_write_config_word(dev, PCI_COMMAND, @@ -240,8 +240,7 @@ int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type, goto fail; if ((sizeof(resource_size_t) < 8) && (sz64 > 0x100000000ULL)) { - dev_err(&dev->dev, "reg %x: can't handle 64-bit BAR\n", - pos); + bar_too_big = true; goto fail; } @@ -252,12 +251,11 @@ int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type, region.start = 0; region.end = sz64; pcibios_bus_to_resource(dev, res, ®ion); + bar_disabled = true; } else { region.start = l64; region.end = l64 + sz64; pcibios_bus_to_resource(dev, res, ®ion); - dev_printk(KERN_DEBUG, &dev->dev, "reg %x: %pR\n", - pos, res); } } else { sz = pci_size(l, sz, mask); @@ -268,18 +266,23 @@ int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type, region.start = l; region.end = l + sz; pcibios_bus_to_resource(dev, res, ®ion); - - dev_printk(KERN_DEBUG, &dev->dev, "reg %x: %pR\n", pos, res); } - out: + goto out; + + +fail: + res->flags = 0; +out: if (!dev->mmio_always_on) pci_write_config_word(dev, PCI_COMMAND, orig_cmd); + if (bar_too_big) + dev_err(&dev->dev, "reg %x: can't handle 64-bit BAR\n", pos); + if (res->flags && !bar_disabled) + dev_printk(KERN_DEBUG, &dev->dev, "reg %x: %pR\n", pos, res); + return (res->flags & IORESOURCE_MEM_64) ? 1 : 0; - fail: - res->flags = 0; - goto out; } static void pci_read_bases(struct pci_dev *dev, unsigned int howmany, int rom) diff --git a/trunk/drivers/pinctrl/core.c b/trunk/drivers/pinctrl/core.c index fb7f3bebdc69..dc5c126e398a 100644 --- a/trunk/drivers/pinctrl/core.c +++ b/trunk/drivers/pinctrl/core.c @@ -657,11 +657,7 @@ static struct pinctrl *pinctrl_get_locked(struct device *dev) if (p != NULL) return ERR_PTR(-EBUSY); - p = create_pinctrl(dev); - if (IS_ERR(p)) - return p; - - return p; + return create_pinctrl(dev); } /** @@ -738,11 +734,8 @@ static struct pinctrl_state *pinctrl_lookup_state_locked(struct pinctrl *p, dev_dbg(p->dev, "using pinctrl dummy state (%s)\n", name); state = create_state(p, name); - if (IS_ERR(state)) - return state; - } else { - return ERR_PTR(-ENODEV); - } + } else + state = ERR_PTR(-ENODEV); } return state; diff --git a/trunk/drivers/pinctrl/pinctrl-imx23.c b/trunk/drivers/pinctrl/pinctrl-imx23.c index 75d3eff94296..3674d877ed7c 100644 --- a/trunk/drivers/pinctrl/pinctrl-imx23.c +++ b/trunk/drivers/pinctrl/pinctrl-imx23.c @@ -292,7 +292,7 @@ static int __init imx23_pinctrl_init(void) { return platform_driver_register(&imx23_pinctrl_driver); } -arch_initcall(imx23_pinctrl_init); +postcore_initcall(imx23_pinctrl_init); static void __exit imx23_pinctrl_exit(void) { diff --git a/trunk/drivers/pinctrl/pinctrl-imx28.c b/trunk/drivers/pinctrl/pinctrl-imx28.c index b973026811a2..0f5b2122b1ba 100644 --- a/trunk/drivers/pinctrl/pinctrl-imx28.c +++ b/trunk/drivers/pinctrl/pinctrl-imx28.c @@ -408,7 +408,7 @@ static int __init imx28_pinctrl_init(void) { return platform_driver_register(&imx28_pinctrl_driver); } -arch_initcall(imx28_pinctrl_init); +postcore_initcall(imx28_pinctrl_init); static void __exit imx28_pinctrl_exit(void) { diff --git a/trunk/drivers/pinctrl/pinctrl-imx51.c b/trunk/drivers/pinctrl/pinctrl-imx51.c index 689b3c88dd2e..9fd02162a3c2 100644 --- a/trunk/drivers/pinctrl/pinctrl-imx51.c +++ b/trunk/drivers/pinctrl/pinctrl-imx51.c @@ -974,7 +974,7 @@ static struct imx_pin_reg imx51_pin_regs[] = { IMX_PIN_REG(MX51_PAD_EIM_DA13, NO_PAD, 0x050, 0, 0x000, 0), /* MX51_PAD_EIM_DA13__EIM_DA13 */ IMX_PIN_REG(MX51_PAD_EIM_DA14, NO_PAD, 0x054, 0, 0x000, 0), /* MX51_PAD_EIM_DA14__EIM_DA14 */ IMX_PIN_REG(MX51_PAD_EIM_DA15, NO_PAD, 0x058, 0, 0x000, 0), /* MX51_PAD_EIM_DA15__EIM_DA15 */ - IMX_PIN_REG(MX51_PAD_SD2_CMD, NO_PAD, 0x3b4, 2, 0x91c, 3), /* MX51_PAD_SD2_CMD__CSPI_MOSI */ + IMX_PIN_REG(MX51_PAD_SD2_CMD, 0x7bc, 0x3b4, 2, 0x91c, 3), /* MX51_PAD_SD2_CMD__CSPI_MOSI */ IMX_PIN_REG(MX51_PAD_SD2_CMD, 0x7bc, 0x3b4, 1, 0x9b0, 2), /* MX51_PAD_SD2_CMD__I2C1_SCL */ IMX_PIN_REG(MX51_PAD_SD2_CMD, 0x7bc, 0x3b4, 0, 0x000, 0), /* MX51_PAD_SD2_CMD__SD2_CMD */ IMX_PIN_REG(MX51_PAD_SD2_CLK, 0x7c0, 0x3b8, 2, 0x914, 3), /* MX51_PAD_SD2_CLK__CSPI_SCLK */ diff --git a/trunk/drivers/pinctrl/pinctrl-nomadik-db8500.c b/trunk/drivers/pinctrl/pinctrl-nomadik-db8500.c index 6f99769c6733..a39fb7a6fc51 100644 --- a/trunk/drivers/pinctrl/pinctrl-nomadik-db8500.c +++ b/trunk/drivers/pinctrl/pinctrl-nomadik-db8500.c @@ -505,6 +505,8 @@ static const unsigned kp_b_1_pins[] = { DB8500_PIN_F3, DB8500_PIN_F1, DB8500_PIN_J3, DB8500_PIN_H2, DB8500_PIN_J2, DB8500_PIN_H1, DB8500_PIN_F4, DB8500_PIN_E3, DB8500_PIN_E4, DB8500_PIN_D2, DB8500_PIN_C1, DB8500_PIN_D3, DB8500_PIN_C2, DB8500_PIN_D5 }; +static const unsigned kp_b_2_pins[] = { DB8500_PIN_F3, DB8500_PIN_F1, + DB8500_PIN_G3, DB8500_PIN_G2, DB8500_PIN_F4, DB8500_PIN_E3}; static const unsigned sm_b_1_pins[] = { DB8500_PIN_C6, DB8500_PIN_B3, DB8500_PIN_C4, DB8500_PIN_E6, DB8500_PIN_A3, DB8500_PIN_B6, DB8500_PIN_D6, DB8500_PIN_B7, DB8500_PIN_D7, DB8500_PIN_D8, @@ -662,6 +664,7 @@ static const struct nmk_pingroup nmk_db8500_groups[] = { DB8500_PIN_GROUP(spi3_b_1, NMK_GPIO_ALT_B), DB8500_PIN_GROUP(msp1txrx_b_1, NMK_GPIO_ALT_B), DB8500_PIN_GROUP(kp_b_1, NMK_GPIO_ALT_B), + DB8500_PIN_GROUP(kp_b_2, NMK_GPIO_ALT_B), DB8500_PIN_GROUP(sm_b_1, NMK_GPIO_ALT_B), DB8500_PIN_GROUP(smcs0_b_1, NMK_GPIO_ALT_B), DB8500_PIN_GROUP(smcs1_b_1, NMK_GPIO_ALT_B), @@ -751,7 +754,7 @@ DB8500_FUNC_GROUPS(msp1, "msp1txrx_a_1", "msp1_a_1", "msp1txrx_b_1"); DB8500_FUNC_GROUPS(lcdb, "lcdb_a_1"); DB8500_FUNC_GROUPS(lcd, "lcdvsi0_a_1", "lcdvsi1_a_1", "lcd_d0_d7_a_1", "lcd_d8_d11_a_1", "lcd_d12_d23_a_1", "lcd_b_1"); -DB8500_FUNC_GROUPS(kp, "kp_a_1", "kp_b_1", "kp_c_1", "kp_oc1_1"); +DB8500_FUNC_GROUPS(kp, "kp_a_1", "kp_b_1", "kp_b_2", "kp_c_1", "kp_oc1_1"); DB8500_FUNC_GROUPS(mc2, "mc2_a_1", "mc2rstn_c_1"); DB8500_FUNC_GROUPS(ssp1, "ssp1_a_1"); DB8500_FUNC_GROUPS(ssp0, "ssp0_a_1"); @@ -766,7 +769,7 @@ DB8500_FUNC_GROUPS(ipgpio, "ipgpio0_a_1", "ipgpio1_a_1", "ipgpio7_b_1", DB8500_FUNC_GROUPS(msp2, "msp2sck_a_1", "msp2_a_1"); DB8500_FUNC_GROUPS(mc4, "mc4_a_1", "mc4rstn_c_1"); DB8500_FUNC_GROUPS(mc1, "mc1_a_1", "mc1dir_a_1"); -DB8500_FUNC_GROUPS(hsi, "hsir1_a_1", "hsit1_a_1", "hsit_a_2"); +DB8500_FUNC_GROUPS(hsi, "hsir_a_1", "hsit_a_1", "hsit_a_2"); DB8500_FUNC_GROUPS(clkout, "clkout_a_1", "clkout_a_2", "clkout_c_1"); DB8500_FUNC_GROUPS(usb, "usb_a_1"); DB8500_FUNC_GROUPS(trig, "trig_b_1"); diff --git a/trunk/drivers/pinctrl/pinctrl-nomadik.c b/trunk/drivers/pinctrl/pinctrl-nomadik.c index 53b0d49a7a1c..3dde6537adb8 100644 --- a/trunk/drivers/pinctrl/pinctrl-nomadik.c +++ b/trunk/drivers/pinctrl/pinctrl-nomadik.c @@ -1292,7 +1292,7 @@ static int __devinit nmk_gpio_probe(struct platform_device *dev) NOMADIK_GPIO_TO_IRQ(pdata->first_gpio), 0, &nmk_gpio_irq_simple_ops, nmk_chip); if (!nmk_chip->domain) { - pr_err("%s: Failed to create irqdomain\n", np->full_name); + dev_err(&dev->dev, "failed to create irqdomain\n"); ret = -ENOSYS; goto out; } @@ -1731,7 +1731,6 @@ static int __devinit nmk_pinctrl_probe(struct platform_device *pdev) for (i = 0; i < npct->soc->gpio_num_ranges; i++) { if (!nmk_gpio_chips[i]) { dev_warn(&pdev->dev, "GPIO chip %d not registered yet\n", i); - devm_kfree(&pdev->dev, npct); return -EPROBE_DEFER; } npct->soc->gpio_ranges[i].gc = &nmk_gpio_chips[i]->chip; diff --git a/trunk/drivers/pinctrl/pinctrl-sirf.c b/trunk/drivers/pinctrl/pinctrl-sirf.c index 2aae8a8978e9..7fca6ce5952b 100644 --- a/trunk/drivers/pinctrl/pinctrl-sirf.c +++ b/trunk/drivers/pinctrl/pinctrl-sirf.c @@ -1217,7 +1217,6 @@ static int __devinit sirfsoc_pinmux_probe(struct platform_device *pdev) iounmap(spmx->gpio_virtbase); out_no_gpio_remap: platform_set_drvdata(pdev, NULL); - devm_kfree(&pdev->dev, spmx); return ret; } diff --git a/trunk/drivers/pinctrl/pinctrl-u300.c b/trunk/drivers/pinctrl/pinctrl-u300.c index a7ad8c112d91..309f5b9a70ec 100644 --- a/trunk/drivers/pinctrl/pinctrl-u300.c +++ b/trunk/drivers/pinctrl/pinctrl-u300.c @@ -1121,10 +1121,8 @@ static int __devinit u300_pmx_probe(struct platform_device *pdev) upmx->dev = &pdev->dev; res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - if (!res) { - ret = -ENOENT; - goto out_no_resource; - } + if (!res) + return -ENOENT; upmx->phybase = res->start; upmx->physize = resource_size(res); @@ -1165,8 +1163,6 @@ static int __devinit u300_pmx_probe(struct platform_device *pdev) platform_set_drvdata(pdev, NULL); out_no_memregion: release_mem_region(upmx->phybase, upmx->physize); -out_no_resource: - devm_kfree(&pdev->dev, upmx); return ret; } diff --git a/trunk/drivers/platform/x86/Kconfig b/trunk/drivers/platform/x86/Kconfig index 2a262f5c5c0c..c86bae828c28 100644 --- a/trunk/drivers/platform/x86/Kconfig +++ b/trunk/drivers/platform/x86/Kconfig @@ -289,6 +289,7 @@ config IDEAPAD_LAPTOP tristate "Lenovo IdeaPad Laptop Extras" depends on ACPI depends on RFKILL && INPUT + depends on SERIO_I8042 select INPUT_SPARSEKMAP help This is a driver for the rfkill switches on Lenovo IdeaPad netbooks. @@ -758,8 +759,11 @@ config SAMSUNG_Q10 config APPLE_GMUX tristate "Apple Gmux Driver" + depends on ACPI depends on PNP - select BACKLIGHT_CLASS_DEVICE + depends on BACKLIGHT_CLASS_DEVICE + depends on BACKLIGHT_APPLE=n || BACKLIGHT_APPLE + depends on ACPI_VIDEO=n || ACPI_VIDEO ---help--- This driver provides support for the gmux device found on many Apple laptops, which controls the display mux for the hybrid diff --git a/trunk/drivers/platform/x86/acer-wmi.c b/trunk/drivers/platform/x86/acer-wmi.c index 3782e1cd3697..934d861a3235 100644 --- a/trunk/drivers/platform/x86/acer-wmi.c +++ b/trunk/drivers/platform/x86/acer-wmi.c @@ -2196,10 +2196,8 @@ static int __init acer_wmi_init(void) interface->capability &= ~ACER_CAP_BRIGHTNESS; pr_info("Brightness must be controlled by acpi video driver\n"); } else { -#ifdef CONFIG_ACPI_VIDEO pr_info("Disabling ACPI video driver\n"); acpi_video_unregister(); -#endif } if (wmi_has_guid(WMID_GUID3)) { diff --git a/trunk/drivers/platform/x86/apple-gmux.c b/trunk/drivers/platform/x86/apple-gmux.c index 905fa01ac8df..db8f63841b42 100644 --- a/trunk/drivers/platform/x86/apple-gmux.c +++ b/trunk/drivers/platform/x86/apple-gmux.c @@ -2,6 +2,7 @@ * Gmux driver for Apple laptops * * Copyright (C) Canonical Ltd. + * Copyright (C) 2010-2012 Andreas Heider * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as @@ -18,16 +19,30 @@ #include #include #include +#include +#include +#include #include #include struct apple_gmux_data { unsigned long iostart; unsigned long iolen; + bool indexed; + struct mutex index_lock; struct backlight_device *bdev; + + /* switcheroo data */ + acpi_handle dhandle; + int gpe; + enum vga_switcheroo_client_id resume_client_id; + enum vga_switcheroo_state power_state; + struct completion powerchange_done; }; +static struct apple_gmux_data *apple_gmux_data; + /* * gmux port offsets. Many of these are not yet used, but may be in the * future, and it's useful to have them documented here anyhow. @@ -45,6 +60,9 @@ struct apple_gmux_data { #define GMUX_PORT_DISCRETE_POWER 0x50 #define GMUX_PORT_MAX_BRIGHTNESS 0x70 #define GMUX_PORT_BRIGHTNESS 0x74 +#define GMUX_PORT_VALUE 0xc2 +#define GMUX_PORT_READ 0xd0 +#define GMUX_PORT_WRITE 0xd4 #define GMUX_MIN_IO_LEN (GMUX_PORT_BRIGHTNESS + 4) @@ -59,22 +77,174 @@ struct apple_gmux_data { #define GMUX_BRIGHTNESS_MASK 0x00ffffff #define GMUX_MAX_BRIGHTNESS GMUX_BRIGHTNESS_MASK -static inline u8 gmux_read8(struct apple_gmux_data *gmux_data, int port) +static u8 gmux_pio_read8(struct apple_gmux_data *gmux_data, int port) { return inb(gmux_data->iostart + port); } -static inline void gmux_write8(struct apple_gmux_data *gmux_data, int port, +static void gmux_pio_write8(struct apple_gmux_data *gmux_data, int port, u8 val) { outb(val, gmux_data->iostart + port); } -static inline u32 gmux_read32(struct apple_gmux_data *gmux_data, int port) +static u32 gmux_pio_read32(struct apple_gmux_data *gmux_data, int port) { return inl(gmux_data->iostart + port); } +static void gmux_pio_write32(struct apple_gmux_data *gmux_data, int port, + u32 val) +{ + int i; + u8 tmpval; + + for (i = 0; i < 4; i++) { + tmpval = (val >> (i * 8)) & 0xff; + outb(tmpval, gmux_data->iostart + port + i); + } +} + +static int gmux_index_wait_ready(struct apple_gmux_data *gmux_data) +{ + int i = 200; + u8 gwr = inb(gmux_data->iostart + GMUX_PORT_WRITE); + + while (i && (gwr & 0x01)) { + inb(gmux_data->iostart + GMUX_PORT_READ); + gwr = inb(gmux_data->iostart + GMUX_PORT_WRITE); + udelay(100); + i--; + } + + return !!i; +} + +static int gmux_index_wait_complete(struct apple_gmux_data *gmux_data) +{ + int i = 200; + u8 gwr = inb(gmux_data->iostart + GMUX_PORT_WRITE); + + while (i && !(gwr & 0x01)) { + gwr = inb(gmux_data->iostart + GMUX_PORT_WRITE); + udelay(100); + i--; + } + + if (gwr & 0x01) + inb(gmux_data->iostart + GMUX_PORT_READ); + + return !!i; +} + +static u8 gmux_index_read8(struct apple_gmux_data *gmux_data, int port) +{ + u8 val; + + mutex_lock(&gmux_data->index_lock); + gmux_index_wait_ready(gmux_data); + outb((port & 0xff), gmux_data->iostart + GMUX_PORT_READ); + gmux_index_wait_complete(gmux_data); + val = inb(gmux_data->iostart + GMUX_PORT_VALUE); + mutex_unlock(&gmux_data->index_lock); + + return val; +} + +static void gmux_index_write8(struct apple_gmux_data *gmux_data, int port, + u8 val) +{ + mutex_lock(&gmux_data->index_lock); + outb(val, gmux_data->iostart + GMUX_PORT_VALUE); + gmux_index_wait_ready(gmux_data); + outb(port & 0xff, gmux_data->iostart + GMUX_PORT_WRITE); + gmux_index_wait_complete(gmux_data); + mutex_unlock(&gmux_data->index_lock); +} + +static u32 gmux_index_read32(struct apple_gmux_data *gmux_data, int port) +{ + u32 val; + + mutex_lock(&gmux_data->index_lock); + gmux_index_wait_ready(gmux_data); + outb((port & 0xff), gmux_data->iostart + GMUX_PORT_READ); + gmux_index_wait_complete(gmux_data); + val = inl(gmux_data->iostart + GMUX_PORT_VALUE); + mutex_unlock(&gmux_data->index_lock); + + return val; +} + +static void gmux_index_write32(struct apple_gmux_data *gmux_data, int port, + u32 val) +{ + int i; + u8 tmpval; + + mutex_lock(&gmux_data->index_lock); + + for (i = 0; i < 4; i++) { + tmpval = (val >> (i * 8)) & 0xff; + outb(tmpval, gmux_data->iostart + GMUX_PORT_VALUE + i); + } + + gmux_index_wait_ready(gmux_data); + outb(port & 0xff, gmux_data->iostart + GMUX_PORT_WRITE); + gmux_index_wait_complete(gmux_data); + mutex_unlock(&gmux_data->index_lock); +} + +static u8 gmux_read8(struct apple_gmux_data *gmux_data, int port) +{ + if (gmux_data->indexed) + return gmux_index_read8(gmux_data, port); + else + return gmux_pio_read8(gmux_data, port); +} + +static void gmux_write8(struct apple_gmux_data *gmux_data, int port, u8 val) +{ + if (gmux_data->indexed) + gmux_index_write8(gmux_data, port, val); + else + gmux_pio_write8(gmux_data, port, val); +} + +static u32 gmux_read32(struct apple_gmux_data *gmux_data, int port) +{ + if (gmux_data->indexed) + return gmux_index_read32(gmux_data, port); + else + return gmux_pio_read32(gmux_data, port); +} + +static void gmux_write32(struct apple_gmux_data *gmux_data, int port, + u32 val) +{ + if (gmux_data->indexed) + gmux_index_write32(gmux_data, port, val); + else + gmux_pio_write32(gmux_data, port, val); +} + +static bool gmux_is_indexed(struct apple_gmux_data *gmux_data) +{ + u16 val; + + outb(0xaa, gmux_data->iostart + 0xcc); + outb(0x55, gmux_data->iostart + 0xcd); + outb(0x00, gmux_data->iostart + 0xce); + + val = inb(gmux_data->iostart + 0xcc) | + (inb(gmux_data->iostart + 0xcd) << 8); + + if (val == 0x55aa) + return true; + + return false; +} + static int gmux_get_brightness(struct backlight_device *bd) { struct apple_gmux_data *gmux_data = bl_get_data(bd); @@ -90,16 +260,7 @@ static int gmux_update_status(struct backlight_device *bd) if (bd->props.state & BL_CORE_SUSPENDED) return 0; - /* - * Older gmux versions require writing out lower bytes first then - * setting the upper byte to 0 to flush the values. Newer versions - * accept a single u32 write, but the old method also works, so we - * just use the old method for all gmux versions. - */ - gmux_write8(gmux_data, GMUX_PORT_BRIGHTNESS, brightness); - gmux_write8(gmux_data, GMUX_PORT_BRIGHTNESS + 1, brightness >> 8); - gmux_write8(gmux_data, GMUX_PORT_BRIGHTNESS + 2, brightness >> 16); - gmux_write8(gmux_data, GMUX_PORT_BRIGHTNESS + 3, 0); + gmux_write32(gmux_data, GMUX_PORT_BRIGHTNESS, brightness); return 0; } @@ -110,6 +271,146 @@ static const struct backlight_ops gmux_bl_ops = { .update_status = gmux_update_status, }; +static int gmux_switchto(enum vga_switcheroo_client_id id) +{ + if (id == VGA_SWITCHEROO_IGD) { + gmux_write8(apple_gmux_data, GMUX_PORT_SWITCH_DDC, 1); + gmux_write8(apple_gmux_data, GMUX_PORT_SWITCH_DISPLAY, 2); + gmux_write8(apple_gmux_data, GMUX_PORT_SWITCH_EXTERNAL, 2); + } else { + gmux_write8(apple_gmux_data, GMUX_PORT_SWITCH_DDC, 2); + gmux_write8(apple_gmux_data, GMUX_PORT_SWITCH_DISPLAY, 3); + gmux_write8(apple_gmux_data, GMUX_PORT_SWITCH_EXTERNAL, 3); + } + + return 0; +} + +static int gmux_set_discrete_state(struct apple_gmux_data *gmux_data, + enum vga_switcheroo_state state) +{ + INIT_COMPLETION(gmux_data->powerchange_done); + + if (state == VGA_SWITCHEROO_ON) { + gmux_write8(gmux_data, GMUX_PORT_DISCRETE_POWER, 1); + gmux_write8(gmux_data, GMUX_PORT_DISCRETE_POWER, 3); + pr_debug("Discrete card powered up\n"); + } else { + gmux_write8(gmux_data, GMUX_PORT_DISCRETE_POWER, 1); + gmux_write8(gmux_data, GMUX_PORT_DISCRETE_POWER, 0); + pr_debug("Discrete card powered down\n"); + } + + gmux_data->power_state = state; + + if (gmux_data->gpe >= 0 && + !wait_for_completion_interruptible_timeout(&gmux_data->powerchange_done, + msecs_to_jiffies(200))) + pr_warn("Timeout waiting for gmux switch to complete\n"); + + return 0; +} + +static int gmux_set_power_state(enum vga_switcheroo_client_id id, + enum vga_switcheroo_state state) +{ + if (id == VGA_SWITCHEROO_IGD) + return 0; + + return gmux_set_discrete_state(apple_gmux_data, state); +} + +static int gmux_get_client_id(struct pci_dev *pdev) +{ + /* + * Early Macbook Pros with switchable graphics use nvidia + * integrated graphics. Hardcode that the 9400M is integrated. + */ + if (pdev->vendor == PCI_VENDOR_ID_INTEL) + return VGA_SWITCHEROO_IGD; + else if (pdev->vendor == PCI_VENDOR_ID_NVIDIA && + pdev->device == 0x0863) + return VGA_SWITCHEROO_IGD; + else + return VGA_SWITCHEROO_DIS; +} + +static enum vga_switcheroo_client_id +gmux_active_client(struct apple_gmux_data *gmux_data) +{ + if (gmux_read8(gmux_data, GMUX_PORT_SWITCH_DISPLAY) == 2) + return VGA_SWITCHEROO_IGD; + + return VGA_SWITCHEROO_DIS; +} + +static struct vga_switcheroo_handler gmux_handler = { + .switchto = gmux_switchto, + .power_state = gmux_set_power_state, + .get_client_id = gmux_get_client_id, +}; + +static inline void gmux_disable_interrupts(struct apple_gmux_data *gmux_data) +{ + gmux_write8(gmux_data, GMUX_PORT_INTERRUPT_ENABLE, + GMUX_INTERRUPT_DISABLE); +} + +static inline void gmux_enable_interrupts(struct apple_gmux_data *gmux_data) +{ + gmux_write8(gmux_data, GMUX_PORT_INTERRUPT_ENABLE, + GMUX_INTERRUPT_ENABLE); +} + +static inline u8 gmux_interrupt_get_status(struct apple_gmux_data *gmux_data) +{ + return gmux_read8(gmux_data, GMUX_PORT_INTERRUPT_STATUS); +} + +static void gmux_clear_interrupts(struct apple_gmux_data *gmux_data) +{ + u8 status; + + /* to clear interrupts write back current status */ + status = gmux_interrupt_get_status(gmux_data); + gmux_write8(gmux_data, GMUX_PORT_INTERRUPT_STATUS, status); +} + +static void gmux_notify_handler(acpi_handle device, u32 value, void *context) +{ + u8 status; + struct pnp_dev *pnp = (struct pnp_dev *)context; + struct apple_gmux_data *gmux_data = pnp_get_drvdata(pnp); + + status = gmux_interrupt_get_status(gmux_data); + gmux_disable_interrupts(gmux_data); + pr_debug("Notify handler called: status %d\n", status); + + gmux_clear_interrupts(gmux_data); + gmux_enable_interrupts(gmux_data); + + if (status & GMUX_INTERRUPT_STATUS_POWER) + complete(&gmux_data->powerchange_done); +} + +static int gmux_suspend(struct pnp_dev *pnp, pm_message_t state) +{ + struct apple_gmux_data *gmux_data = pnp_get_drvdata(pnp); + gmux_data->resume_client_id = gmux_active_client(gmux_data); + gmux_disable_interrupts(gmux_data); + return 0; +} + +static int gmux_resume(struct pnp_dev *pnp) +{ + struct apple_gmux_data *gmux_data = pnp_get_drvdata(pnp); + gmux_enable_interrupts(gmux_data); + gmux_switchto(gmux_data->resume_client_id); + if (gmux_data->power_state == VGA_SWITCHEROO_OFF) + gmux_set_discrete_state(gmux_data, gmux_data->power_state); + return 0; +} + static int __devinit gmux_probe(struct pnp_dev *pnp, const struct pnp_device_id *id) { @@ -119,6 +420,11 @@ static int __devinit gmux_probe(struct pnp_dev *pnp, struct backlight_device *bdev; u8 ver_major, ver_minor, ver_release; int ret = -ENXIO; + acpi_status status; + unsigned long long gpe; + + if (apple_gmux_data) + return -EBUSY; gmux_data = kzalloc(sizeof(*gmux_data), GFP_KERNEL); if (!gmux_data) @@ -147,21 +453,32 @@ static int __devinit gmux_probe(struct pnp_dev *pnp, } /* - * On some machines the gmux is in ACPI even thought the machine - * doesn't really have a gmux. Check for invalid version information - * to detect this. + * Invalid version information may indicate either that the gmux + * device isn't present or that it's a new one that uses indexed + * io */ + ver_major = gmux_read8(gmux_data, GMUX_PORT_VERSION_MAJOR); ver_minor = gmux_read8(gmux_data, GMUX_PORT_VERSION_MINOR); ver_release = gmux_read8(gmux_data, GMUX_PORT_VERSION_RELEASE); if (ver_major == 0xff && ver_minor == 0xff && ver_release == 0xff) { - pr_info("gmux device not present\n"); - ret = -ENODEV; - goto err_release; + if (gmux_is_indexed(gmux_data)) { + u32 version; + mutex_init(&gmux_data->index_lock); + gmux_data->indexed = true; + version = gmux_read32(gmux_data, + GMUX_PORT_VERSION_MAJOR); + ver_major = (version >> 24) & 0xff; + ver_minor = (version >> 16) & 0xff; + ver_release = (version >> 8) & 0xff; + } else { + pr_info("gmux device not present\n"); + ret = -ENODEV; + goto err_release; + } } - - pr_info("Found gmux version %d.%d.%d\n", ver_major, ver_minor, - ver_release); + pr_info("Found gmux version %d.%d.%d [%s]\n", ver_major, ver_minor, + ver_release, (gmux_data->indexed ? "indexed" : "classic")); memset(&props, 0, sizeof(props)); props.type = BACKLIGHT_PLATFORM; @@ -194,13 +511,65 @@ static int __devinit gmux_probe(struct pnp_dev *pnp, * Disable the other backlight choices. */ acpi_video_dmi_promote_vendor(); -#ifdef CONFIG_ACPI_VIDEO acpi_video_unregister(); -#endif apple_bl_unregister(); + gmux_data->power_state = VGA_SWITCHEROO_ON; + + gmux_data->dhandle = DEVICE_ACPI_HANDLE(&pnp->dev); + if (!gmux_data->dhandle) { + pr_err("Cannot find acpi handle for pnp device %s\n", + dev_name(&pnp->dev)); + ret = -ENODEV; + goto err_notify; + } + + status = acpi_evaluate_integer(gmux_data->dhandle, "GMGP", NULL, &gpe); + if (ACPI_SUCCESS(status)) { + gmux_data->gpe = (int)gpe; + + status = acpi_install_notify_handler(gmux_data->dhandle, + ACPI_DEVICE_NOTIFY, + &gmux_notify_handler, pnp); + if (ACPI_FAILURE(status)) { + pr_err("Install notify handler failed: %s\n", + acpi_format_exception(status)); + ret = -ENODEV; + goto err_notify; + } + + status = acpi_enable_gpe(NULL, gmux_data->gpe); + if (ACPI_FAILURE(status)) { + pr_err("Cannot enable gpe: %s\n", + acpi_format_exception(status)); + goto err_enable_gpe; + } + } else { + pr_warn("No GPE found for gmux\n"); + gmux_data->gpe = -1; + } + + if (vga_switcheroo_register_handler(&gmux_handler)) { + ret = -ENODEV; + goto err_register_handler; + } + + init_completion(&gmux_data->powerchange_done); + apple_gmux_data = gmux_data; + gmux_enable_interrupts(gmux_data); + return 0; +err_register_handler: + if (gmux_data->gpe >= 0) + acpi_disable_gpe(NULL, gmux_data->gpe); +err_enable_gpe: + if (gmux_data->gpe >= 0) + acpi_remove_notify_handler(gmux_data->dhandle, + ACPI_DEVICE_NOTIFY, + &gmux_notify_handler); +err_notify: + backlight_device_unregister(bdev); err_release: release_region(gmux_data->iostart, gmux_data->iolen); err_free: @@ -212,14 +581,23 @@ static void __devexit gmux_remove(struct pnp_dev *pnp) { struct apple_gmux_data *gmux_data = pnp_get_drvdata(pnp); + vga_switcheroo_unregister_handler(); + gmux_disable_interrupts(gmux_data); + if (gmux_data->gpe >= 0) { + acpi_disable_gpe(NULL, gmux_data->gpe); + acpi_remove_notify_handler(gmux_data->dhandle, + ACPI_DEVICE_NOTIFY, + &gmux_notify_handler); + } + backlight_device_unregister(gmux_data->bdev); + release_region(gmux_data->iostart, gmux_data->iolen); + apple_gmux_data = NULL; kfree(gmux_data); acpi_video_dmi_demote_vendor(); -#ifdef CONFIG_ACPI_VIDEO acpi_video_register(); -#endif apple_bl_register(); } @@ -233,6 +611,8 @@ static struct pnp_driver gmux_pnp_driver = { .probe = gmux_probe, .remove = __devexit_p(gmux_remove), .id_table = gmux_device_ids, + .suspend = gmux_suspend, + .resume = gmux_resume }; static int __init apple_gmux_init(void) diff --git a/trunk/drivers/platform/x86/asus-laptop.c b/trunk/drivers/platform/x86/asus-laptop.c index e38f91be0b10..4b568df56643 100644 --- a/trunk/drivers/platform/x86/asus-laptop.c +++ b/trunk/drivers/platform/x86/asus-laptop.c @@ -85,7 +85,7 @@ static char *wled_type = "unknown"; static char *bled_type = "unknown"; module_param(wled_type, charp, 0444); -MODULE_PARM_DESC(wlan_status, "Set the wled type on boot " +MODULE_PARM_DESC(wled_type, "Set the wled type on boot " "(unknown, led or rfkill). " "default is unknown"); @@ -863,9 +863,9 @@ static ssize_t show_infos(struct device *dev, * The significance of others is yet to be found. * If we don't find the method, we assume the device are present. */ - rv = acpi_evaluate_integer(asus->handle, "HRWS", NULL, &temp); + rv = acpi_evaluate_integer(asus->handle, "HWRS", NULL, &temp); if (!ACPI_FAILURE(rv)) - len += sprintf(page + len, "HRWS value : %#x\n", + len += sprintf(page + len, "HWRS value : %#x\n", (uint) temp); /* * Another value for userspace: the ASYM method returns 0x02 for @@ -1751,9 +1751,9 @@ static int asus_laptop_get_info(struct asus_laptop *asus) * The significance of others is yet to be found. */ status = - acpi_evaluate_integer(asus->handle, "HRWS", NULL, &hwrs_result); + acpi_evaluate_integer(asus->handle, "HWRS", NULL, &hwrs_result); if (!ACPI_FAILURE(status)) - pr_notice(" HRWS returned %x", (int)hwrs_result); + pr_notice(" HWRS returned %x", (int)hwrs_result); if (!acpi_check_handle(asus->handle, METHOD_WL_STATUS, NULL)) asus->have_rsts = true; diff --git a/trunk/drivers/platform/x86/asus-wmi.c b/trunk/drivers/platform/x86/asus-wmi.c index c7a36f6b0580..c0e9ff489b24 100644 --- a/trunk/drivers/platform/x86/asus-wmi.c +++ b/trunk/drivers/platform/x86/asus-wmi.c @@ -47,9 +47,7 @@ #include #include #include -#ifdef CONFIG_ACPI_VIDEO #include -#endif #include "asus-wmi.h" @@ -101,6 +99,7 @@ MODULE_LICENSE("GPL"); #define ASUS_WMI_DEVID_WIRELESS_LED 0x00010002 #define ASUS_WMI_DEVID_CWAP 0x00010003 #define ASUS_WMI_DEVID_WLAN 0x00010011 +#define ASUS_WMI_DEVID_WLAN_LED 0x00010012 #define ASUS_WMI_DEVID_BLUETOOTH 0x00010013 #define ASUS_WMI_DEVID_GPS 0x00010015 #define ASUS_WMI_DEVID_WIMAX 0x00010017 @@ -731,8 +730,21 @@ static int asus_rfkill_set(void *data, bool blocked) { struct asus_rfkill *priv = data; u32 ctrl_param = !blocked; + u32 dev_id = priv->dev_id; - return asus_wmi_set_devstate(priv->dev_id, ctrl_param, NULL); + /* + * If the user bit is set, BIOS can't set and record the wlan status, + * it will report the value read from id ASUS_WMI_DEVID_WLAN_LED + * while we query the wlan status through WMI(ASUS_WMI_DEVID_WLAN). + * So, we have to record wlan status in id ASUS_WMI_DEVID_WLAN_LED + * while setting the wlan status through WMI. + * This is also the behavior that windows app will do. + */ + if ((dev_id == ASUS_WMI_DEVID_WLAN) && + priv->asus->driver->wlan_ctrl_by_user) + dev_id = ASUS_WMI_DEVID_WLAN_LED; + + return asus_wmi_set_devstate(dev_id, ctrl_param, NULL); } static void asus_rfkill_query(struct rfkill *rfkill, void *data) @@ -1653,6 +1665,7 @@ static int asus_wmi_add(struct platform_device *pdev) struct asus_wmi *asus; acpi_status status; int err; + u32 result; asus = kzalloc(sizeof(struct asus_wmi), GFP_KERNEL); if (!asus) @@ -1689,10 +1702,8 @@ static int asus_wmi_add(struct platform_device *pdev) if (asus->driver->quirks->wmi_backlight_power) acpi_video_dmi_promote_vendor(); if (!acpi_video_backlight_support()) { -#ifdef CONFIG_ACPI_VIDEO pr_info("Disabling ACPI video driver\n"); acpi_video_unregister(); -#endif err = asus_wmi_backlight_init(asus); if (err && err != -ENODEV) goto fail_backlight; @@ -1711,6 +1722,10 @@ static int asus_wmi_add(struct platform_device *pdev) if (err) goto fail_debugfs; + asus_wmi_get_devstate(asus, ASUS_WMI_DEVID_WLAN, &result); + if (result & (ASUS_WMI_DSTS_PRESENCE_BIT | ASUS_WMI_DSTS_USER_BIT)) + asus->driver->wlan_ctrl_by_user = 1; + return 0; fail_debugfs: diff --git a/trunk/drivers/platform/x86/asus-wmi.h b/trunk/drivers/platform/x86/asus-wmi.h index 9c1da8b81bea..4c9bd38bb0a2 100644 --- a/trunk/drivers/platform/x86/asus-wmi.h +++ b/trunk/drivers/platform/x86/asus-wmi.h @@ -46,6 +46,7 @@ struct quirk_entry { struct asus_wmi_driver { int brightness; int panel_power; + int wlan_ctrl_by_user; const char *name; struct module *owner; diff --git a/trunk/drivers/platform/x86/classmate-laptop.c b/trunk/drivers/platform/x86/classmate-laptop.c index 2ca7dd1ab3e4..c87ff16873f9 100644 --- a/trunk/drivers/platform/x86/classmate-laptop.c +++ b/trunk/drivers/platform/x86/classmate-laptop.c @@ -350,6 +350,7 @@ static void cmpc_accel_idev_init_v4(struct input_dev *inputdev) inputdev->close = cmpc_accel_close_v4; } +#ifdef CONFIG_PM_SLEEP static int cmpc_accel_suspend_v4(struct device *dev) { struct input_dev *inputdev; @@ -384,6 +385,7 @@ static int cmpc_accel_resume_v4(struct device *dev) return 0; } +#endif static int cmpc_accel_add_v4(struct acpi_device *acpi) { @@ -723,8 +725,10 @@ static void cmpc_tablet_handler(struct acpi_device *dev, u32 event) struct input_dev *inputdev = dev_get_drvdata(&dev->dev); if (event == 0x81) { - if (ACPI_SUCCESS(cmpc_get_tablet(dev->handle, &val))) + if (ACPI_SUCCESS(cmpc_get_tablet(dev->handle, &val))) { input_report_switch(inputdev, SW_TABLET_MODE, !val); + input_sync(inputdev); + } } } @@ -737,8 +741,10 @@ static void cmpc_tablet_idev_init(struct input_dev *inputdev) set_bit(SW_TABLET_MODE, inputdev->swbit); acpi = to_acpi_device(inputdev->dev.parent); - if (ACPI_SUCCESS(cmpc_get_tablet(acpi->handle, &val))) + if (ACPI_SUCCESS(cmpc_get_tablet(acpi->handle, &val))) { input_report_switch(inputdev, SW_TABLET_MODE, !val); + input_sync(inputdev); + } } static int cmpc_tablet_add(struct acpi_device *acpi) @@ -752,15 +758,19 @@ static int cmpc_tablet_remove(struct acpi_device *acpi, int type) return cmpc_remove_acpi_notify_device(acpi); } +#ifdef CONFIG_PM_SLEEP static int cmpc_tablet_resume(struct device *dev) { struct input_dev *inputdev = dev_get_drvdata(dev); unsigned long long val = 0; - if (ACPI_SUCCESS(cmpc_get_tablet(to_acpi_device(dev)->handle, &val))) + if (ACPI_SUCCESS(cmpc_get_tablet(to_acpi_device(dev)->handle, &val))) { input_report_switch(inputdev, SW_TABLET_MODE, !val); + input_sync(inputdev); + } return 0; } +#endif static SIMPLE_DEV_PM_OPS(cmpc_tablet_pm, NULL, cmpc_tablet_resume); diff --git a/trunk/drivers/platform/x86/dell-laptop.c b/trunk/drivers/platform/x86/dell-laptop.c index 4e96e8c0b60f..927c33af67ec 100644 --- a/trunk/drivers/platform/x86/dell-laptop.c +++ b/trunk/drivers/platform/x86/dell-laptop.c @@ -211,7 +211,7 @@ static struct dmi_system_id __devinitdata dell_quirks[] = { .ident = "Dell Inspiron 5420", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), - DMI_MATCH(DMI_PRODUCT_NAME, "Isnpiron 5420"), + DMI_MATCH(DMI_PRODUCT_NAME, "Inspiron 5420"), }, .driver_data = &quirk_dell_vostro_v130, }, @@ -220,7 +220,7 @@ static struct dmi_system_id __devinitdata dell_quirks[] = { .ident = "Dell Inspiron 5520", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), - DMI_MATCH(DMI_PRODUCT_NAME, "Isnpiron 5520"), + DMI_MATCH(DMI_PRODUCT_NAME, "Inspiron 5520"), }, .driver_data = &quirk_dell_vostro_v130, }, @@ -229,7 +229,7 @@ static struct dmi_system_id __devinitdata dell_quirks[] = { .ident = "Dell Inspiron 5720", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), - DMI_MATCH(DMI_PRODUCT_NAME, "Isnpiron 5720"), + DMI_MATCH(DMI_PRODUCT_NAME, "Inspiron 5720"), }, .driver_data = &quirk_dell_vostro_v130, }, @@ -238,7 +238,7 @@ static struct dmi_system_id __devinitdata dell_quirks[] = { .ident = "Dell Inspiron 7420", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), - DMI_MATCH(DMI_PRODUCT_NAME, "Isnpiron 7420"), + DMI_MATCH(DMI_PRODUCT_NAME, "Inspiron 7420"), }, .driver_data = &quirk_dell_vostro_v130, }, @@ -247,7 +247,7 @@ static struct dmi_system_id __devinitdata dell_quirks[] = { .ident = "Dell Inspiron 7520", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), - DMI_MATCH(DMI_PRODUCT_NAME, "Isnpiron 7520"), + DMI_MATCH(DMI_PRODUCT_NAME, "Inspiron 7520"), }, .driver_data = &quirk_dell_vostro_v130, }, @@ -256,7 +256,7 @@ static struct dmi_system_id __devinitdata dell_quirks[] = { .ident = "Dell Inspiron 7720", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), - DMI_MATCH(DMI_PRODUCT_NAME, "Isnpiron 7720"), + DMI_MATCH(DMI_PRODUCT_NAME, "Inspiron 7720"), }, .driver_data = &quirk_dell_vostro_v130, }, diff --git a/trunk/drivers/platform/x86/eeepc-laptop.c b/trunk/drivers/platform/x86/eeepc-laptop.c index dab91b48d22c..5ca264179f4e 100644 --- a/trunk/drivers/platform/x86/eeepc-laptop.c +++ b/trunk/drivers/platform/x86/eeepc-laptop.c @@ -610,12 +610,12 @@ static void eeepc_rfkill_hotplug(struct eeepc_laptop *eeepc, acpi_handle handle) if (!bus) { pr_warn("Unable to find PCI bus 1?\n"); - goto out_unlock; + goto out_put_dev; } if (pci_bus_read_config_dword(bus, 0, PCI_VENDOR_ID, &l)) { pr_err("Unable to read PCI config space?\n"); - goto out_unlock; + goto out_put_dev; } absent = (l == 0xffffffff); @@ -627,7 +627,7 @@ static void eeepc_rfkill_hotplug(struct eeepc_laptop *eeepc, acpi_handle handle) absent ? "absent" : "present"); pr_warn("skipped wireless hotplug as probably " "inappropriate for this model\n"); - goto out_unlock; + goto out_put_dev; } if (!blocked) { @@ -635,7 +635,7 @@ static void eeepc_rfkill_hotplug(struct eeepc_laptop *eeepc, acpi_handle handle) if (dev) { /* Device already present */ pci_dev_put(dev); - goto out_unlock; + goto out_put_dev; } dev = pci_scan_single_device(bus, 0); if (dev) { @@ -650,6 +650,8 @@ static void eeepc_rfkill_hotplug(struct eeepc_laptop *eeepc, acpi_handle handle) pci_dev_put(dev); } } +out_put_dev: + pci_dev_put(port); } out_unlock: diff --git a/trunk/drivers/platform/x86/fujitsu-tablet.c b/trunk/drivers/platform/x86/fujitsu-tablet.c index d2e41735a47b..7acae3f85f3b 100644 --- a/trunk/drivers/platform/x86/fujitsu-tablet.c +++ b/trunk/drivers/platform/x86/fujitsu-tablet.c @@ -440,11 +440,13 @@ static int __devexit acpi_fujitsu_remove(struct acpi_device *adev, int type) return 0; } +#ifdef CONFIG_PM_SLEEP static int acpi_fujitsu_resume(struct device *dev) { fujitsu_reset(); return 0; } +#endif static SIMPLE_DEV_PM_OPS(acpi_fujitsu_pm, NULL, acpi_fujitsu_resume); diff --git a/trunk/drivers/platform/x86/hdaps.c b/trunk/drivers/platform/x86/hdaps.c index d9ab6f64dcec..777c7e3dda51 100644 --- a/trunk/drivers/platform/x86/hdaps.c +++ b/trunk/drivers/platform/x86/hdaps.c @@ -305,10 +305,12 @@ static int hdaps_probe(struct platform_device *dev) return 0; } +#ifdef CONFIG_PM_SLEEP static int hdaps_resume(struct device *dev) { return hdaps_device_init(); } +#endif static SIMPLE_DEV_PM_OPS(hdaps_pm, NULL, hdaps_resume); diff --git a/trunk/drivers/platform/x86/hp_accel.c b/trunk/drivers/platform/x86/hp_accel.c index f4d91154ad67..6b9af989632b 100644 --- a/trunk/drivers/platform/x86/hp_accel.c +++ b/trunk/drivers/platform/x86/hp_accel.c @@ -352,7 +352,7 @@ static int lis3lv02d_remove(struct acpi_device *device, int type) } -#ifdef CONFIG_PM +#ifdef CONFIG_PM_SLEEP static int lis3lv02d_suspend(struct device *dev) { /* make sure the device is off when we suspend */ diff --git a/trunk/drivers/platform/x86/ideapad-laptop.c b/trunk/drivers/platform/x86/ideapad-laptop.c index 17f6dfd8dbfb..dae7abe1d711 100644 --- a/trunk/drivers/platform/x86/ideapad-laptop.c +++ b/trunk/drivers/platform/x86/ideapad-laptop.c @@ -36,6 +36,7 @@ #include #include #include +#include #define IDEAPAD_RFKILL_DEV_NUM (3) @@ -63,8 +64,11 @@ enum { VPCCMD_R_3G, VPCCMD_W_3G, VPCCMD_R_ODD, /* 0x21 */ - VPCCMD_R_RF = 0x23, + VPCCMD_W_FAN, + VPCCMD_R_RF, VPCCMD_W_RF, + VPCCMD_R_FAN = 0x2B, + VPCCMD_R_SPECIAL_BUTTONS = 0x31, VPCCMD_W_BL_POWER = 0x33, }; @@ -356,14 +360,46 @@ static ssize_t store_ideapad_cam(struct device *dev, return -EINVAL; ret = write_ec_cmd(ideapad_handle, VPCCMD_W_CAMERA, state); if (ret < 0) - return ret; + return -EIO; return count; } static DEVICE_ATTR(camera_power, 0644, show_ideapad_cam, store_ideapad_cam); +static ssize_t show_ideapad_fan(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + unsigned long result; + + if (read_ec_data(ideapad_handle, VPCCMD_R_FAN, &result)) + return sprintf(buf, "-1\n"); + return sprintf(buf, "%lu\n", result); +} + +static ssize_t store_ideapad_fan(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + int ret, state; + + if (!count) + return 0; + if (sscanf(buf, "%i", &state) != 1) + return -EINVAL; + if (state < 0 || state > 4 || state == 3) + return -EINVAL; + ret = write_ec_cmd(ideapad_handle, VPCCMD_W_FAN, state); + if (ret < 0) + return -EIO; + return count; +} + +static DEVICE_ATTR(fan_mode, 0644, show_ideapad_fan, store_ideapad_fan); + static struct attribute *ideapad_attributes[] = { &dev_attr_camera_power.attr, + &dev_attr_fan_mode.attr, NULL }; @@ -377,7 +413,10 @@ static umode_t ideapad_is_visible(struct kobject *kobj, if (attr == &dev_attr_camera_power.attr) supported = test_bit(CFG_CAMERA_BIT, &(priv->cfg)); - else + else if (attr == &dev_attr_fan_mode.attr) { + unsigned long value; + supported = !read_ec_data(ideapad_handle, VPCCMD_R_FAN, &value); + } else supported = true; return supported ? attr->mode : 0; @@ -518,9 +557,15 @@ static void ideapad_platform_exit(struct ideapad_private *priv) */ static const struct key_entry ideapad_keymap[] = { { KE_KEY, 6, { KEY_SWITCHVIDEOMODE } }, + { KE_KEY, 7, { KEY_CAMERA } }, + { KE_KEY, 11, { KEY_F16 } }, { KE_KEY, 13, { KEY_WLAN } }, { KE_KEY, 16, { KEY_PROG1 } }, { KE_KEY, 17, { KEY_PROG2 } }, + { KE_KEY, 64, { KEY_PROG3 } }, + { KE_KEY, 65, { KEY_PROG4 } }, + { KE_KEY, 66, { KEY_TOUCHPAD_OFF } }, + { KE_KEY, 67, { KEY_TOUCHPAD_ON } }, { KE_END, 0 }, }; @@ -587,6 +632,28 @@ static void ideapad_input_novokey(struct ideapad_private *priv) ideapad_input_report(priv, 16); } +static void ideapad_check_special_buttons(struct ideapad_private *priv) +{ + unsigned long bit, value; + + read_ec_data(ideapad_handle, VPCCMD_R_SPECIAL_BUTTONS, &value); + + for (bit = 0; bit < 16; bit++) { + if (test_bit(bit, &value)) { + switch (bit) { + case 6: + /* Thermal Management button */ + ideapad_input_report(priv, 65); + break; + case 1: + /* OneKey Theater button */ + ideapad_input_report(priv, 64); + break; + } + } + } +} + /* * backlight */ @@ -691,6 +758,24 @@ static const struct acpi_device_id ideapad_device_ids[] = { }; MODULE_DEVICE_TABLE(acpi, ideapad_device_ids); +static void ideapad_sync_touchpad_state(struct acpi_device *adevice) +{ + struct ideapad_private *priv = dev_get_drvdata(&adevice->dev); + unsigned long value; + + /* Without reading from EC touchpad LED doesn't switch state */ + if (!read_ec_data(adevice->handle, VPCCMD_R_TOUCHPAD, &value)) { + /* Some IdeaPads don't really turn off touchpad - they only + * switch the LED state. We (de)activate KBC AUX port to turn + * touchpad off and on. We send KEY_TOUCHPAD_OFF and + * KEY_TOUCHPAD_ON to not to get out of sync with LED */ + unsigned char param; + i8042_command(¶m, value ? I8042_CMD_AUX_ENABLE : + I8042_CMD_AUX_DISABLE); + ideapad_input_report(priv, value ? 67 : 66); + } +} + static int __devinit ideapad_acpi_add(struct acpi_device *adevice) { int ret, i; @@ -727,6 +812,7 @@ static int __devinit ideapad_acpi_add(struct acpi_device *adevice) priv->rfk[i] = NULL; } ideapad_sync_rfk_state(priv); + ideapad_sync_touchpad_state(adevice); if (!acpi_video_backlight_support()) { ret = ideapad_backlight_init(priv); @@ -785,9 +871,14 @@ static void ideapad_acpi_notify(struct acpi_device *adevice, u32 event) ideapad_sync_rfk_state(priv); break; case 13: + case 11: + case 7: case 6: ideapad_input_report(priv, vpc_bit); break; + case 5: + ideapad_sync_touchpad_state(adevice); + break; case 4: ideapad_backlight_notify_brightness(priv); break; @@ -797,6 +888,9 @@ static void ideapad_acpi_notify(struct acpi_device *adevice, u32 event) case 2: ideapad_backlight_notify_power(priv); break; + case 0: + ideapad_check_special_buttons(priv); + break; default: pr_info("Unknown event: %lu\n", vpc_bit); } @@ -804,6 +898,15 @@ static void ideapad_acpi_notify(struct acpi_device *adevice, u32 event) } } +static int ideapad_acpi_resume(struct device *device) +{ + ideapad_sync_rfk_state(ideapad_priv); + ideapad_sync_touchpad_state(to_acpi_device(device)); + return 0; +} + +static SIMPLE_DEV_PM_OPS(ideapad_pm, NULL, ideapad_acpi_resume); + static struct acpi_driver ideapad_acpi_driver = { .name = "ideapad_acpi", .class = "IdeaPad", @@ -811,6 +914,7 @@ static struct acpi_driver ideapad_acpi_driver = { .ops.add = ideapad_acpi_add, .ops.remove = ideapad_acpi_remove, .ops.notify = ideapad_acpi_notify, + .drv.pm = &ideapad_pm, .owner = THIS_MODULE, }; diff --git a/trunk/drivers/platform/x86/msi-laptop.c b/trunk/drivers/platform/x86/msi-laptop.c index f64441844317..2111dbb7e1e3 100644 --- a/trunk/drivers/platform/x86/msi-laptop.c +++ b/trunk/drivers/platform/x86/msi-laptop.c @@ -85,7 +85,9 @@ #define MSI_STANDARD_EC_TOUCHPAD_ADDRESS 0xe4 #define MSI_STANDARD_EC_TOUCHPAD_MASK (1 << 4) +#ifdef CONFIG_PM_SLEEP static int msi_laptop_resume(struct device *device); +#endif static SIMPLE_DEV_PM_OPS(msi_laptop_pm, NULL, msi_laptop_resume); #define MSI_STANDARD_EC_DEVICES_EXISTS_ADDRESS 0x2f @@ -753,6 +755,7 @@ static int rfkill_init(struct platform_device *sdev) return retval; } +#ifdef CONFIG_PM_SLEEP static int msi_laptop_resume(struct device *device) { u8 data; @@ -773,6 +776,7 @@ static int msi_laptop_resume(struct device *device) return 0; } +#endif static int __init msi_laptop_input_setup(void) { diff --git a/trunk/drivers/platform/x86/panasonic-laptop.c b/trunk/drivers/platform/x86/panasonic-laptop.c index 24480074bcf0..8e8caa767d6a 100644 --- a/trunk/drivers/platform/x86/panasonic-laptop.c +++ b/trunk/drivers/platform/x86/panasonic-laptop.c @@ -188,7 +188,9 @@ static const struct acpi_device_id pcc_device_ids[] = { }; MODULE_DEVICE_TABLE(acpi, pcc_device_ids); +#ifdef CONFIG_PM_SLEEP static int acpi_pcc_hotkey_resume(struct device *dev); +#endif static SIMPLE_DEV_PM_OPS(acpi_pcc_hotkey_pm, NULL, acpi_pcc_hotkey_resume); static struct acpi_driver acpi_pcc_driver = { @@ -540,6 +542,7 @@ static void acpi_pcc_destroy_input(struct pcc_acpi *pcc) /* kernel module interface */ +#ifdef CONFIG_PM_SLEEP static int acpi_pcc_hotkey_resume(struct device *dev) { struct pcc_acpi *pcc; @@ -556,6 +559,7 @@ static int acpi_pcc_hotkey_resume(struct device *dev) return acpi_pcc_write_sset(pcc, SINF_STICKY_KEY, pcc->sticky_mode); } +#endif static int acpi_pcc_hotkey_add(struct acpi_device *device) { diff --git a/trunk/drivers/platform/x86/samsung-laptop.c b/trunk/drivers/platform/x86/samsung-laptop.c index c1ca7bcebb66..dd90d15f5210 100644 --- a/trunk/drivers/platform/x86/samsung-laptop.c +++ b/trunk/drivers/platform/x86/samsung-laptop.c @@ -26,9 +26,7 @@ #include #include #include -#ifdef CONFIG_ACPI_VIDEO #include -#endif /* * This driver is needed because a number of Samsung laptops do not hook @@ -1558,9 +1556,7 @@ static int __init samsung_init(void) samsung->handle_backlight = false; } else if (samsung->quirks->broken_acpi_video) { pr_info("Disabling ACPI video driver\n"); -#ifdef CONFIG_ACPI_VIDEO acpi_video_unregister(); -#endif } #endif diff --git a/trunk/drivers/platform/x86/sony-laptop.c b/trunk/drivers/platform/x86/sony-laptop.c index 9363969ad07a..daaddec68def 100644 --- a/trunk/drivers/platform/x86/sony-laptop.c +++ b/trunk/drivers/platform/x86/sony-laptop.c @@ -140,7 +140,10 @@ MODULE_PARM_DESC(kbd_backlight_timeout, "1 for 30 seconds, 2 for 60 seconds and 3 to disable timeout " "(default: 0)"); +#ifdef CONFIG_PM_SLEEP static void sony_nc_kbd_backlight_resume(void); +static void sony_nc_thermal_resume(void); +#endif static int sony_nc_kbd_backlight_setup(struct platform_device *pd, unsigned int handle); static void sony_nc_kbd_backlight_cleanup(struct platform_device *pd); @@ -151,7 +154,6 @@ static void sony_nc_battery_care_cleanup(struct platform_device *pd); static int sony_nc_thermal_setup(struct platform_device *pd); static void sony_nc_thermal_cleanup(struct platform_device *pd); -static void sony_nc_thermal_resume(void); static int sony_nc_lid_resume_setup(struct platform_device *pd); static void sony_nc_lid_resume_cleanup(struct platform_device *pd); @@ -1431,6 +1433,7 @@ static void sony_nc_function_cleanup(struct platform_device *pd) sony_nc_handles_cleanup(pd); } +#ifdef CONFIG_PM_SLEEP static void sony_nc_function_resume(void) { unsigned int i, result, bitmask, arg; @@ -1508,6 +1511,7 @@ static int sony_nc_resume(struct device *dev) return 0; } +#endif static SIMPLE_DEV_PM_OPS(sony_nc_pm, NULL, sony_nc_resume); @@ -1872,6 +1876,7 @@ static void sony_nc_kbd_backlight_cleanup(struct platform_device *pd) } } +#ifdef CONFIG_PM_SLEEP static void sony_nc_kbd_backlight_resume(void) { int ignore = 0; @@ -1888,6 +1893,7 @@ static void sony_nc_kbd_backlight_resume(void) (kbdbl_ctl->base + 0x200) | (kbdbl_ctl->timeout << 0x10), &ignore); } +#endif struct battery_care_control { struct device_attribute attrs[2]; @@ -2210,6 +2216,7 @@ static void sony_nc_thermal_cleanup(struct platform_device *pd) } } +#ifdef CONFIG_PM_SLEEP static void sony_nc_thermal_resume(void) { unsigned int status = sony_nc_thermal_mode_get(); @@ -2217,6 +2224,7 @@ static void sony_nc_thermal_resume(void) if (status != th_handle->mode) sony_nc_thermal_mode_set(th_handle->mode); } +#endif /* resume on LID open */ struct snc_lid_resume_control { @@ -4287,6 +4295,7 @@ static int sony_pic_add(struct acpi_device *device) return result; } +#ifdef CONFIG_PM_SLEEP static int sony_pic_suspend(struct device *dev) { if (sony_pic_disable(to_acpi_device(dev))) @@ -4300,6 +4309,7 @@ static int sony_pic_resume(struct device *dev) spic_dev.cur_ioport, spic_dev.cur_irq); return 0; } +#endif static SIMPLE_DEV_PM_OPS(sony_pic_pm, sony_pic_suspend, sony_pic_resume); diff --git a/trunk/drivers/platform/x86/thinkpad_acpi.c b/trunk/drivers/platform/x86/thinkpad_acpi.c index e7f73287636c..52daaa816e53 100644 --- a/trunk/drivers/platform/x86/thinkpad_acpi.c +++ b/trunk/drivers/platform/x86/thinkpad_acpi.c @@ -545,7 +545,7 @@ TPACPI_HANDLE(hkey, ec, "\\_SB.HKEY", /* 600e/x, 770e, 770x */ */ static int acpi_evalf(acpi_handle handle, - void *res, char *method, char *fmt, ...) + int *res, char *method, char *fmt, ...) { char *fmt0 = fmt; struct acpi_object_list params; @@ -606,7 +606,7 @@ static int acpi_evalf(acpi_handle handle, success = (status == AE_OK && out_obj.type == ACPI_TYPE_INTEGER); if (success && res) - *(int *)res = out_obj.integer.value; + *res = out_obj.integer.value; break; case 'v': /* void */ success = status == AE_OK; @@ -922,6 +922,7 @@ static struct input_dev *tpacpi_inputdev; static struct mutex tpacpi_inputdev_send_mutex; static LIST_HEAD(tpacpi_all_drivers); +#ifdef CONFIG_PM_SLEEP static int tpacpi_suspend_handler(struct device *dev) { struct ibm_struct *ibm, *itmp; @@ -949,6 +950,7 @@ static int tpacpi_resume_handler(struct device *dev) return 0; } +#endif static SIMPLE_DEV_PM_OPS(tpacpi_pm, tpacpi_suspend_handler, tpacpi_resume_handler); @@ -7384,17 +7386,18 @@ static int fan_get_status(u8 *status) * Add TPACPI_FAN_RD_ACPI_FANS ? */ switch (fan_status_access_mode) { - case TPACPI_FAN_RD_ACPI_GFAN: + case TPACPI_FAN_RD_ACPI_GFAN: { /* 570, 600e/x, 770e, 770x */ + int res; - if (unlikely(!acpi_evalf(gfan_handle, &s, NULL, "d"))) + if (unlikely(!acpi_evalf(gfan_handle, &res, NULL, "d"))) return -EIO; if (likely(status)) - *status = s & 0x07; + *status = res & 0x07; break; - + } case TPACPI_FAN_RD_TPEC: /* all except 570, 600e/x, 770e, 770x */ if (unlikely(!acpi_ec_read(fan_status_offset, &s))) @@ -8662,6 +8665,13 @@ static int __must_check __init get_thinkpad_model_data( tp->model_str = kstrdup(s, GFP_KERNEL); if (!tp->model_str) return -ENOMEM; + } else { + s = dmi_get_system_info(DMI_BIOS_VENDOR); + if (s && !(strnicmp(s, "Lenovo", 6))) { + tp->model_str = kstrdup(s, GFP_KERNEL); + if (!tp->model_str) + return -ENOMEM; + } } s = dmi_get_system_info(DMI_PRODUCT_NAME); diff --git a/trunk/drivers/platform/x86/toshiba_acpi.c b/trunk/drivers/platform/x86/toshiba_acpi.c index c13ba5bac93f..5f1256d5e933 100644 --- a/trunk/drivers/platform/x86/toshiba_acpi.c +++ b/trunk/drivers/platform/x86/toshiba_acpi.c @@ -1296,6 +1296,7 @@ static void toshiba_acpi_notify(struct acpi_device *acpi_dev, u32 event) } } +#ifdef CONFIG_PM_SLEEP static int toshiba_acpi_suspend(struct device *device) { struct toshiba_acpi_dev *dev = acpi_driver_data(to_acpi_device(device)); @@ -1317,6 +1318,7 @@ static int toshiba_acpi_resume(struct device *device) return 0; } +#endif static SIMPLE_DEV_PM_OPS(toshiba_acpi_pm, toshiba_acpi_suspend, toshiba_acpi_resume); diff --git a/trunk/drivers/platform/x86/toshiba_bluetooth.c b/trunk/drivers/platform/x86/toshiba_bluetooth.c index 715a43cb5e3c..5e5d6317d690 100644 --- a/trunk/drivers/platform/x86/toshiba_bluetooth.c +++ b/trunk/drivers/platform/x86/toshiba_bluetooth.c @@ -41,7 +41,9 @@ static const struct acpi_device_id bt_device_ids[] = { }; MODULE_DEVICE_TABLE(acpi, bt_device_ids); +#ifdef CONFIG_PM_SLEEP static int toshiba_bt_resume(struct device *dev); +#endif static SIMPLE_DEV_PM_OPS(toshiba_bt_pm, NULL, toshiba_bt_resume); static struct acpi_driver toshiba_bt_rfkill_driver = { @@ -90,10 +92,12 @@ static void toshiba_bt_rfkill_notify(struct acpi_device *device, u32 event) toshiba_bluetooth_enable(device->handle); } +#ifdef CONFIG_PM_SLEEP static int toshiba_bt_resume(struct device *dev) { return toshiba_bluetooth_enable(to_acpi_device(dev)->handle); } +#endif static int toshiba_bt_rfkill_add(struct acpi_device *device) { diff --git a/trunk/drivers/platform/x86/xo15-ebook.c b/trunk/drivers/platform/x86/xo15-ebook.c index 849c07c13bf6..38ba39d7ca7d 100644 --- a/trunk/drivers/platform/x86/xo15-ebook.c +++ b/trunk/drivers/platform/x86/xo15-ebook.c @@ -77,10 +77,12 @@ static void ebook_switch_notify(struct acpi_device *device, u32 event) } } +#ifdef CONFIG_PM_SLEEP static int ebook_switch_resume(struct device *dev) { return ebook_send_state(to_acpi_device(dev)); } +#endif static SIMPLE_DEV_PM_OPS(ebook_switch_pm, NULL, ebook_switch_resume); diff --git a/trunk/drivers/pwm/Kconfig b/trunk/drivers/pwm/Kconfig index 8fc3808d7a3e..90c5c7357a50 100644 --- a/trunk/drivers/pwm/Kconfig +++ b/trunk/drivers/pwm/Kconfig @@ -1,12 +1,31 @@ menuconfig PWM - bool "PWM Support" + bool "Pulse-Width Modulation (PWM) Support" depends on !MACH_JZ4740 && !PUV3_PWM help - This enables PWM support through the generic PWM framework. - You only need to enable this, if you also want to enable - one or more of the PWM drivers below. - - If unsure, say N. + Generic Pulse-Width Modulation (PWM) support. + + In Pulse-Width Modulation, a variation of the width of pulses + in a rectangular pulse signal is used as a means to alter the + average power of the signal. Applications include efficient + power delivery and voltage regulation. In computer systems, + PWMs are commonly used to control fans or the brightness of + display backlights. + + This framework provides a generic interface to PWM devices + within the Linux kernel. On the driver side it provides an API + to register and unregister a PWM chip, an abstraction of a PWM + controller, that supports one or more PWM devices. Client + drivers can request PWM devices and use the generic framework + to configure as well as enable and disable them. + + This generic framework replaces the legacy PWM framework which + allows only a single driver implementing the required API. Not + all legacy implementations have been ported to the framework + yet. The framework provides an API that is backward compatible + with the legacy framework so that existing client drivers + continue to work as expected. + + If unsure, say no. if PWM diff --git a/trunk/drivers/pwm/core.c b/trunk/drivers/pwm/core.c index ecb76909e946..c6e05078d3ad 100644 --- a/trunk/drivers/pwm/core.c +++ b/trunk/drivers/pwm/core.c @@ -129,8 +129,8 @@ static int pwm_device_request(struct pwm_device *pwm, const char *label) return 0; } -static struct pwm_device *of_pwm_simple_xlate(struct pwm_chip *pc, - const struct of_phandle_args *args) +static struct pwm_device * +of_pwm_simple_xlate(struct pwm_chip *pc, const struct of_phandle_args *args) { struct pwm_device *pwm; @@ -149,7 +149,7 @@ static struct pwm_device *of_pwm_simple_xlate(struct pwm_chip *pc, return pwm; } -void of_pwmchip_add(struct pwm_chip *chip) +static void of_pwmchip_add(struct pwm_chip *chip) { if (!chip->dev || !chip->dev->of_node) return; @@ -162,7 +162,7 @@ void of_pwmchip_add(struct pwm_chip *chip) of_node_get(chip->dev->of_node); } -void of_pwmchip_remove(struct pwm_chip *chip) +static void of_pwmchip_remove(struct pwm_chip *chip) { if (chip->dev && chip->dev->of_node) of_node_put(chip->dev->of_node); @@ -527,7 +527,7 @@ void __init pwm_add_table(struct pwm_lookup *table, size_t num) struct pwm_device *pwm_get(struct device *dev, const char *con_id) { struct pwm_device *pwm = ERR_PTR(-EPROBE_DEFER); - const char *dev_id = dev ? dev_name(dev): NULL; + const char *dev_id = dev ? dev_name(dev) : NULL; struct pwm_chip *chip = NULL; unsigned int index = 0; unsigned int best = 0; @@ -609,7 +609,7 @@ void pwm_put(struct pwm_device *pwm) mutex_lock(&pwm_lock); if (!test_and_clear_bit(PWMF_REQUESTED, &pwm->flags)) { - pr_warning("PWM device already freed\n"); + pr_warn("PWM device already freed\n"); goto out; } diff --git a/trunk/drivers/pwm/pwm-samsung.c b/trunk/drivers/pwm/pwm-samsung.c index d10386528c9c..e5187c0ade9f 100644 --- a/trunk/drivers/pwm/pwm-samsung.c +++ b/trunk/drivers/pwm/pwm-samsung.c @@ -225,6 +225,7 @@ static int s3c_pwm_probe(struct platform_device *pdev) /* calculate base of control bits in TCON */ s3c->tcon_base = id == 0 ? 0 : (id * 4) + 4; + s3c->chip.dev = &pdev->dev; s3c->chip.ops = &s3c_pwm_ops; s3c->chip.base = -1; s3c->chip.npwm = 1; diff --git a/trunk/drivers/pwm/pwm-tegra.c b/trunk/drivers/pwm/pwm-tegra.c index 02ce18d5e49a..057465e0553c 100644 --- a/trunk/drivers/pwm/pwm-tegra.c +++ b/trunk/drivers/pwm/pwm-tegra.c @@ -187,10 +187,8 @@ static int tegra_pwm_probe(struct platform_device *pdev) } pwm->mmio_base = devm_request_and_ioremap(&pdev->dev, r); - if (!pwm->mmio_base) { - dev_err(&pdev->dev, "failed to ioremap() region\n"); + if (!pwm->mmio_base) return -EADDRNOTAVAIL; - } platform_set_drvdata(pdev, pwm); diff --git a/trunk/drivers/pwm/pwm-tiecap.c b/trunk/drivers/pwm/pwm-tiecap.c index 3c2ad284ee3e..4b6688909fee 100644 --- a/trunk/drivers/pwm/pwm-tiecap.c +++ b/trunk/drivers/pwm/pwm-tiecap.c @@ -100,6 +100,13 @@ static int ecap_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm, writel(period_cycles, pc->mmio_base + CAP3); } + if (!test_bit(PWMF_ENABLED, &pwm->flags)) { + reg_val = readw(pc->mmio_base + ECCTL2); + /* Disable APWM mode to put APWM output Low */ + reg_val &= ~ECCTL2_APWM_MODE; + writew(reg_val, pc->mmio_base + ECCTL2); + } + pm_runtime_put_sync(pc->chip.dev); return 0; } @@ -192,10 +199,8 @@ static int __devinit ecap_pwm_probe(struct platform_device *pdev) } pc->mmio_base = devm_request_and_ioremap(&pdev->dev, r); - if (!pc->mmio_base) { - dev_err(&pdev->dev, "failed to ioremap() registers\n"); + if (!pc->mmio_base) return -EADDRNOTAVAIL; - } ret = pwmchip_add(&pc->chip); if (ret < 0) { diff --git a/trunk/drivers/pwm/pwm-tiehrpwm.c b/trunk/drivers/pwm/pwm-tiehrpwm.c index 010d232cb0c8..b1996bcd5b78 100644 --- a/trunk/drivers/pwm/pwm-tiehrpwm.c +++ b/trunk/drivers/pwm/pwm-tiehrpwm.c @@ -104,6 +104,7 @@ struct ehrpwm_pwm_chip { struct pwm_chip chip; unsigned int clk_rate; void __iomem *mmio_base; + unsigned long period_cycles[NUM_PWM_CHANNEL]; }; static inline struct ehrpwm_pwm_chip *to_ehrpwm_pwm_chip(struct pwm_chip *chip) @@ -210,6 +211,7 @@ static int ehrpwm_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm, unsigned long long c; unsigned long period_cycles, duty_cycles; unsigned short ps_divval, tb_divval; + int i; if (period_ns < 0 || duty_ns < 0 || period_ns > NSEC_PER_SEC) return -ERANGE; @@ -229,6 +231,28 @@ static int ehrpwm_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm, duty_cycles = (unsigned long)c; } + /* + * Period values should be same for multiple PWM channels as IP uses + * same period register for multiple channels. + */ + for (i = 0; i < NUM_PWM_CHANNEL; i++) { + if (pc->period_cycles[i] && + (pc->period_cycles[i] != period_cycles)) { + /* + * Allow channel to reconfigure period if no other + * channels being configured. + */ + if (i == pwm->hwpwm) + continue; + + dev_err(chip->dev, "Period value conflicts with channel %d\n", + i); + return -EINVAL; + } + } + + pc->period_cycles[pwm->hwpwm] = period_cycles; + /* Configure clock prescaler to support Low frequency PWM wave */ if (set_prescale_div(period_cycles/PERIOD_MAX, &ps_divval, &tb_divval)) { @@ -320,10 +344,15 @@ static void ehrpwm_pwm_disable(struct pwm_chip *chip, struct pwm_device *pwm) static void ehrpwm_pwm_free(struct pwm_chip *chip, struct pwm_device *pwm) { + struct ehrpwm_pwm_chip *pc = to_ehrpwm_pwm_chip(chip); + if (test_bit(PWMF_ENABLED, &pwm->flags)) { dev_warn(chip->dev, "Removing PWM device without disabling\n"); pm_runtime_put_sync(chip->dev); } + + /* set period value to zero on free */ + pc->period_cycles[pwm->hwpwm] = 0; } static const struct pwm_ops ehrpwm_pwm_ops = { @@ -371,10 +400,8 @@ static int __devinit ehrpwm_pwm_probe(struct platform_device *pdev) } pc->mmio_base = devm_request_and_ioremap(&pdev->dev, r); - if (!pc->mmio_base) { - dev_err(&pdev->dev, "failed to ioremap() registers\n"); + if (!pc->mmio_base) return -EADDRNOTAVAIL; - } ret = pwmchip_add(&pc->chip); if (ret < 0) { diff --git a/trunk/drivers/pwm/pwm-vt8500.c b/trunk/drivers/pwm/pwm-vt8500.c index 548021439f0c..ad14389b7144 100644 --- a/trunk/drivers/pwm/pwm-vt8500.c +++ b/trunk/drivers/pwm/pwm-vt8500.c @@ -41,7 +41,7 @@ static inline void pwm_busy_wait(void __iomem *reg, u8 bitmask) cpu_relax(); if (unlikely(!loops)) - pr_warning("Waiting for status bits 0x%x to clear timed out\n", + pr_warn("Waiting for status bits 0x%x to clear timed out\n", bitmask); } diff --git a/trunk/drivers/rapidio/devices/tsi721.c b/trunk/drivers/rapidio/devices/tsi721.c index 722246cf20ab..5d44252b7342 100644 --- a/trunk/drivers/rapidio/devices/tsi721.c +++ b/trunk/drivers/rapidio/devices/tsi721.c @@ -435,6 +435,9 @@ static void tsi721_db_dpc(struct work_struct *work) " info %4.4x\n", DBELL_SID(idb.bytes), DBELL_TID(idb.bytes), DBELL_INF(idb.bytes)); } + + wr_ptr = ioread32(priv->regs + + TSI721_IDQ_WP(IDB_QUEUE)) % IDB_QSIZE; } iowrite32(rd_ptr & (IDB_QSIZE - 1), @@ -445,6 +448,10 @@ static void tsi721_db_dpc(struct work_struct *work) regval |= TSI721_SR_CHINT_IDBQRCV; iowrite32(regval, priv->regs + TSI721_SR_CHINTE(IDB_QUEUE)); + + wr_ptr = ioread32(priv->regs + TSI721_IDQ_WP(IDB_QUEUE)) % IDB_QSIZE; + if (wr_ptr != rd_ptr) + schedule_work(&priv->idb_work); } /** @@ -2212,7 +2219,7 @@ static int __devinit tsi721_probe(struct pci_dev *pdev, const struct pci_device_id *id) { struct tsi721_device *priv; - int i, cap; + int cap; int err; u32 regval; @@ -2232,12 +2239,15 @@ static int __devinit tsi721_probe(struct pci_dev *pdev, priv->pdev = pdev; #ifdef DEBUG + { + int i; for (i = 0; i <= PCI_STD_RESOURCE_END; i++) { dev_dbg(&pdev->dev, "res[%d] @ 0x%llx (0x%lx, 0x%lx)\n", i, (unsigned long long)pci_resource_start(pdev, i), (unsigned long)pci_resource_len(pdev, i), pci_resource_flags(pdev, i)); } + } #endif /* * Verify BAR configuration diff --git a/trunk/drivers/regulator/Kconfig b/trunk/drivers/regulator/Kconfig index 4e932cc695e9..e98a5e7827df 100644 --- a/trunk/drivers/regulator/Kconfig +++ b/trunk/drivers/regulator/Kconfig @@ -33,9 +33,8 @@ config REGULATOR_DUMMY help If this option is enabled then when a regulator lookup fails and the board has not specified that it has provided full - constraints then the regulator core will provide an always - enabled dummy regulator will be provided, allowing consumer - drivers to continue. + constraints the regulator core will provide an always + enabled dummy regulator, allowing consumer drivers to continue. A warning will be generated when this substitution is done. @@ -50,11 +49,11 @@ config REGULATOR_VIRTUAL_CONSUMER tristate "Virtual regulator consumer support" help This driver provides a virtual consumer for the voltage and - current regulator API which provides sysfs controls for - configuring the supplies requested. This is mainly useful - for test purposes. + current regulator API which provides sysfs controls for + configuring the supplies requested. This is mainly useful + for test purposes. - If unsure, say no. + If unsure, say no. config REGULATOR_USERSPACE_CONSUMER tristate "Userspace regulator consumer support" @@ -63,7 +62,7 @@ config REGULATOR_USERSPACE_CONSUMER from user space. Userspace consumer driver provides ability to control power supplies for such devices. - If unsure, say no. + If unsure, say no. config REGULATOR_GPIO tristate "GPIO regulator support" @@ -110,6 +109,17 @@ config REGULATOR_DA9052 This driver supports the voltage regulators of DA9052-BC and DA9053-AA/Bx PMIC. +config REGULATOR_FAN53555 + tristate "Fairchild FAN53555 Regulator" + depends on I2C + select REGMAP_I2C + help + This driver supports Fairchild FAN53555 Digitally Programmable + TinyBuck Regulator. The FAN53555 is a step-down switching voltage + regulator that delivers a digitally programmable output from an + input voltage supply of 2.5V to 5.5V. The output voltage is + programmed through an I2C interface. + config REGULATOR_ANATOP tristate "Freescale i.MX on-chip ANATOP LDO regulators" depends on MFD_ANATOP @@ -172,6 +182,14 @@ config REGULATOR_MAX8660 This driver controls a Maxim 8660/8661 voltage output regulator via I2C bus. +config REGULATOR_MAX8907 + tristate "Maxim 8907 voltage regulator" + depends on MFD_MAX8907 + help + This driver controls a Maxim 8907 voltage output regulator + via I2C bus. The provided regulator is suitable for Tegra + chip to control Step-Down DC-DC and LDOs. + config REGULATOR_MAX8925 tristate "Maxim MAX8925 Power Management IC" depends on MFD_MAX8925 @@ -247,7 +265,7 @@ config REGULATOR_LP8788 config REGULATOR_PCF50633 tristate "NXP PCF50633 regulator driver" - depends on MFD_PCF50633 + depends on MFD_PCF50633 help Say Y here to support the voltage regulators and convertors on PCF50633 @@ -416,7 +434,7 @@ config REGULATOR_WM8350 depends on MFD_WM8350 help This driver provides support for the voltage and current regulators - of the WM8350 AudioPlus PMIC. + of the WM8350 AudioPlus PMIC. config REGULATOR_WM8400 tristate "Wolfson Microelectronics WM8400 AudioPlus PMIC" diff --git a/trunk/drivers/regulator/Makefile b/trunk/drivers/regulator/Makefile index 3342615cf25e..e431eed8a878 100644 --- a/trunk/drivers/regulator/Makefile +++ b/trunk/drivers/regulator/Makefile @@ -20,6 +20,7 @@ obj-$(CONFIG_REGULATOR_DA903X) += da903x.o obj-$(CONFIG_REGULATOR_DA9052) += da9052-regulator.o obj-$(CONFIG_REGULATOR_DBX500_PRCMU) += dbx500-prcmu.o obj-$(CONFIG_REGULATOR_DB8500_PRCMU) += db8500-prcmu.o +obj-$(CONFIG_REGULATOR_FAN53555) += fan53555.o obj-$(CONFIG_REGULATOR_GPIO) += gpio-regulator.o obj-$(CONFIG_REGULATOR_ISL6271A) += isl6271a-regulator.o obj-$(CONFIG_REGULATOR_LP3971) += lp3971.o @@ -30,6 +31,7 @@ obj-$(CONFIG_REGULATOR_LP8788) += lp8788-ldo.o obj-$(CONFIG_REGULATOR_MAX1586) += max1586.o obj-$(CONFIG_REGULATOR_MAX8649) += max8649.o obj-$(CONFIG_REGULATOR_MAX8660) += max8660.o +obj-$(CONFIG_REGULATOR_MAX8907) += max8907-regulator.o obj-$(CONFIG_REGULATOR_MAX8925) += max8925-regulator.o obj-$(CONFIG_REGULATOR_MAX8952) += max8952.o obj-$(CONFIG_REGULATOR_MAX8997) += max8997.o diff --git a/trunk/drivers/regulator/aat2870-regulator.c b/trunk/drivers/regulator/aat2870-regulator.c index 6f45bfd22e83..167c93f21981 100644 --- a/trunk/drivers/regulator/aat2870-regulator.c +++ b/trunk/drivers/regulator/aat2870-regulator.c @@ -162,7 +162,7 @@ static struct aat2870_regulator *aat2870_get_regulator(int id) static int aat2870_regulator_probe(struct platform_device *pdev) { struct aat2870_regulator *ri; - struct regulator_config config = { 0 }; + struct regulator_config config = { }; struct regulator_dev *rdev; ri = aat2870_get_regulator(pdev->id); diff --git a/trunk/drivers/regulator/ab3100.c b/trunk/drivers/regulator/ab3100.c index 182b553059c9..65ad2b36ce36 100644 --- a/trunk/drivers/regulator/ab3100.c +++ b/trunk/drivers/regulator/ab3100.c @@ -347,17 +347,11 @@ static int ab3100_get_voltage_regulator_external(struct regulator_dev *reg) return abreg->plfdata->external_voltage; } -static int ab3100_get_fixed_voltage_regulator(struct regulator_dev *reg) -{ - return reg->desc->min_uV; -} - static struct regulator_ops regulator_ops_fixed = { .list_voltage = regulator_list_voltage_linear, .enable = ab3100_enable_regulator, .disable = ab3100_disable_regulator, .is_enabled = ab3100_is_enabled_regulator, - .get_voltage = ab3100_get_fixed_voltage_regulator, }; static struct regulator_ops regulator_ops_variable = { @@ -486,6 +480,7 @@ ab3100_regulator_desc[AB3100_NUM_REGULATORS] = { .id = AB3100_BUCK, .ops = ®ulator_ops_variable_sleepable, .n_voltages = ARRAY_SIZE(ldo_e_buck_typ_voltages), + .volt_table = ldo_e_buck_typ_voltages, .type = REGULATOR_VOLTAGE, .owner = THIS_MODULE, .enable_time = 1000, diff --git a/trunk/drivers/regulator/ab8500.c b/trunk/drivers/regulator/ab8500.c index 10f2f4d4d190..e3d1d063025a 100644 --- a/trunk/drivers/regulator/ab8500.c +++ b/trunk/drivers/regulator/ab8500.c @@ -37,6 +37,7 @@ * @voltage_bank: bank to control regulator voltage * @voltage_reg: register to control regulator voltage * @voltage_mask: mask to control regulator voltage + * @voltage_shift: shift to control regulator voltage * @delay: startup/set voltage delay in us */ struct ab8500_regulator_info { @@ -50,6 +51,7 @@ struct ab8500_regulator_info { u8 voltage_bank; u8 voltage_reg; u8 voltage_mask; + u8 voltage_shift; unsigned int delay; }; @@ -195,17 +197,14 @@ static int ab8500_regulator_get_voltage_sel(struct regulator_dev *rdev) } dev_vdbg(rdev_get_dev(rdev), - "%s-get_voltage (bank, reg, mask, value): 0x%x, 0x%x, 0x%x," - " 0x%x\n", - info->desc.name, info->voltage_bank, info->voltage_reg, - info->voltage_mask, regval); + "%s-get_voltage (bank, reg, mask, shift, value): " + "0x%x, 0x%x, 0x%x, 0x%x, 0x%x\n", + info->desc.name, info->voltage_bank, + info->voltage_reg, info->voltage_mask, + info->voltage_shift, regval); - /* vintcore has a different layout */ val = regval & info->voltage_mask; - if (info->desc.id == AB8500_LDO_INTCORE) - return val >> 0x3; - else - return val; + return val >> info->voltage_shift; } static int ab8500_regulator_set_voltage_sel(struct regulator_dev *rdev, @@ -221,7 +220,7 @@ static int ab8500_regulator_set_voltage_sel(struct regulator_dev *rdev, } /* set the registers for the request */ - regval = (u8)selector; + regval = (u8)selector << info->voltage_shift; ret = abx500_mask_and_set_register_interruptible(info->dev, info->voltage_bank, info->voltage_reg, info->voltage_mask, regval); @@ -238,13 +237,6 @@ static int ab8500_regulator_set_voltage_sel(struct regulator_dev *rdev, return ret; } -static int ab8500_regulator_enable_time(struct regulator_dev *rdev) -{ - struct ab8500_regulator_info *info = rdev_get_drvdata(rdev); - - return info->delay; -} - static int ab8500_regulator_set_voltage_time_sel(struct regulator_dev *rdev, unsigned int old_sel, unsigned int new_sel) @@ -261,22 +253,14 @@ static struct regulator_ops ab8500_regulator_ops = { .get_voltage_sel = ab8500_regulator_get_voltage_sel, .set_voltage_sel = ab8500_regulator_set_voltage_sel, .list_voltage = regulator_list_voltage_table, - .enable_time = ab8500_regulator_enable_time, .set_voltage_time_sel = ab8500_regulator_set_voltage_time_sel, }; -static int ab8500_fixed_get_voltage(struct regulator_dev *rdev) -{ - return rdev->desc->min_uV; -} - static struct regulator_ops ab8500_regulator_fixed_ops = { .enable = ab8500_regulator_enable, .disable = ab8500_regulator_disable, .is_enabled = ab8500_regulator_is_enabled, - .get_voltage = ab8500_fixed_get_voltage, .list_voltage = regulator_list_voltage_linear, - .enable_time = ab8500_regulator_enable_time, }; static struct ab8500_regulator_info @@ -358,6 +342,7 @@ static struct ab8500_regulator_info .voltage_bank = 0x03, .voltage_reg = 0x80, .voltage_mask = 0x38, + .voltage_shift = 3, }, /* @@ -374,6 +359,7 @@ static struct ab8500_regulator_info .owner = THIS_MODULE, .n_voltages = 1, .min_uV = 2000000, + .enable_time = 10000, }, .delay = 10000, .update_bank = 0x03, diff --git a/trunk/drivers/regulator/anatop-regulator.c b/trunk/drivers/regulator/anatop-regulator.c index e9c2085f9dfb..ce0fe72a428e 100644 --- a/trunk/drivers/regulator/anatop-regulator.c +++ b/trunk/drivers/regulator/anatop-regulator.c @@ -64,14 +64,15 @@ static int anatop_set_voltage_sel(struct regulator_dev *reg, unsigned selector) static int anatop_get_voltage_sel(struct regulator_dev *reg) { struct anatop_regulator *anatop_reg = rdev_get_drvdata(reg); - u32 val; + u32 val, mask; if (!anatop_reg->control_reg) return -ENOTSUPP; val = anatop_read_reg(anatop_reg->mfd, anatop_reg->control_reg); - val = (val & ((1 << anatop_reg->vol_bit_width) - 1)) >> + mask = ((1 << anatop_reg->vol_bit_width) - 1) << anatop_reg->vol_bit_shift; + val = (val & mask) >> anatop_reg->vol_bit_shift; return val - anatop_reg->min_bit_val; } diff --git a/trunk/drivers/regulator/arizona-ldo1.c b/trunk/drivers/regulator/arizona-ldo1.c index c8f95c07adb6..d184aa35abcb 100644 --- a/trunk/drivers/regulator/arizona-ldo1.c +++ b/trunk/drivers/regulator/arizona-ldo1.c @@ -39,6 +39,8 @@ static struct regulator_ops arizona_ldo1_ops = { .map_voltage = regulator_map_voltage_linear, .get_voltage_sel = regulator_get_voltage_sel_regmap, .set_voltage_sel = regulator_set_voltage_sel_regmap, + .get_bypass = regulator_get_bypass_regmap, + .set_bypass = regulator_set_bypass_regmap, }; static const struct regulator_desc arizona_ldo1 = { @@ -49,9 +51,11 @@ static const struct regulator_desc arizona_ldo1 = { .vsel_reg = ARIZONA_LDO1_CONTROL_1, .vsel_mask = ARIZONA_LDO1_VSEL_MASK, + .bypass_reg = ARIZONA_LDO1_CONTROL_1, + .bypass_mask = ARIZONA_LDO1_BYPASS, .min_uV = 900000, .uV_step = 50000, - .n_voltages = 7, + .n_voltages = 6, .owner = THIS_MODULE, }; diff --git a/trunk/drivers/regulator/arizona-micsupp.c b/trunk/drivers/regulator/arizona-micsupp.c index 450a069aa9b6..d9b1f82cc5bd 100644 --- a/trunk/drivers/regulator/arizona-micsupp.c +++ b/trunk/drivers/regulator/arizona-micsupp.c @@ -82,6 +82,9 @@ static struct regulator_ops arizona_micsupp_ops = { .get_voltage_sel = regulator_get_voltage_sel_regmap, .set_voltage_sel = regulator_set_voltage_sel_regmap, + + .get_bypass = regulator_get_bypass_regmap, + .set_bypass = regulator_set_bypass_regmap, }; static const struct regulator_desc arizona_micsupp = { @@ -95,6 +98,8 @@ static const struct regulator_desc arizona_micsupp = { .vsel_mask = ARIZONA_LDO2_VSEL_MASK, .enable_reg = ARIZONA_MIC_CHARGE_PUMP_1, .enable_mask = ARIZONA_CPMIC_ENA, + .bypass_reg = ARIZONA_MIC_CHARGE_PUMP_1, + .bypass_mask = ARIZONA_CPMIC_BYPASS, .owner = THIS_MODULE, }; diff --git a/trunk/drivers/regulator/core.c b/trunk/drivers/regulator/core.c index f092588a078c..2e0352dc26bd 100644 --- a/trunk/drivers/regulator/core.c +++ b/trunk/drivers/regulator/core.c @@ -77,6 +77,7 @@ struct regulator { struct device *dev; struct list_head list; unsigned int always_on:1; + unsigned int bypass:1; int uA_load; int min_uV; int max_uV; @@ -394,6 +395,9 @@ static ssize_t regulator_status_show(struct device *dev, case REGULATOR_STATUS_STANDBY: label = "standby"; break; + case REGULATOR_STATUS_BYPASS: + label = "bypass"; + break; case REGULATOR_STATUS_UNDEFINED: label = "undefined"; break; @@ -585,6 +589,27 @@ static ssize_t regulator_suspend_standby_state_show(struct device *dev, static DEVICE_ATTR(suspend_standby_state, 0444, regulator_suspend_standby_state_show, NULL); +static ssize_t regulator_bypass_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct regulator_dev *rdev = dev_get_drvdata(dev); + const char *report; + bool bypass; + int ret; + + ret = rdev->desc->ops->get_bypass(rdev, &bypass); + + if (ret != 0) + report = "unknown"; + else if (bypass) + report = "enabled"; + else + report = "disabled"; + + return sprintf(buf, "%s\n", report); +} +static DEVICE_ATTR(bypass, 0444, + regulator_bypass_show, NULL); /* * These are the only attributes are present for all regulators. @@ -778,6 +803,9 @@ static void print_constraints(struct regulator_dev *rdev) if (constraints->valid_modes_mask & REGULATOR_MODE_STANDBY) count += sprintf(buf + count, "standby"); + if (!count) + sprintf(buf, "no parameters"); + rdev_info(rdev, "%s\n", buf); if ((constraints->min_uV != constraints->max_uV) && @@ -974,6 +1002,7 @@ static int set_supply(struct regulator_dev *rdev, err = -ENOMEM; return err; } + supply_rdev->open_count++; return 0; } @@ -1720,6 +1749,9 @@ int regulator_disable_deferred(struct regulator *regulator, int ms) if (regulator->always_on) return 0; + if (!ms) + return regulator_disable(regulator); + mutex_lock(&rdev->mutex); rdev->deferred_disables++; mutex_unlock(&rdev->mutex); @@ -2178,9 +2210,12 @@ static int _regulator_do_set_voltage(struct regulator_dev *rdev, } } - if (ret == 0 && best_val >= 0) + if (ret == 0 && best_val >= 0) { + unsigned long data = best_val; + _notifier_call_chain(rdev, REGULATOR_EVENT_VOLTAGE_CHANGE, - (void *)best_val); + (void *)data); + } trace_regulator_set_voltage_complete(rdev_get_name(rdev), best_val); @@ -2291,8 +2326,8 @@ int regulator_set_voltage_time(struct regulator *regulator, EXPORT_SYMBOL_GPL(regulator_set_voltage_time); /** - *regulator_set_voltage_time_sel - get raise/fall time - * @regulator: regulator source + * regulator_set_voltage_time_sel - get raise/fall time + * @rdev: regulator source device * @old_selector: selector for starting voltage * @new_selector: selector for target voltage * @@ -2388,6 +2423,8 @@ static int _regulator_get_voltage(struct regulator_dev *rdev) ret = rdev->desc->ops->list_voltage(rdev, sel); } else if (rdev->desc->ops->get_voltage) { ret = rdev->desc->ops->get_voltage(rdev); + } else if (rdev->desc->ops->list_voltage) { + ret = rdev->desc->ops->list_voltage(rdev, 0); } else { return -EINVAL; } @@ -2673,6 +2710,100 @@ int regulator_set_optimum_mode(struct regulator *regulator, int uA_load) } EXPORT_SYMBOL_GPL(regulator_set_optimum_mode); +/** + * regulator_set_bypass_regmap - Default set_bypass() using regmap + * + * @rdev: device to operate on. + * @enable: state to set. + */ +int regulator_set_bypass_regmap(struct regulator_dev *rdev, bool enable) +{ + unsigned int val; + + if (enable) + val = rdev->desc->bypass_mask; + else + val = 0; + + return regmap_update_bits(rdev->regmap, rdev->desc->bypass_reg, + rdev->desc->bypass_mask, val); +} +EXPORT_SYMBOL_GPL(regulator_set_bypass_regmap); + +/** + * regulator_get_bypass_regmap - Default get_bypass() using regmap + * + * @rdev: device to operate on. + * @enable: current state. + */ +int regulator_get_bypass_regmap(struct regulator_dev *rdev, bool *enable) +{ + unsigned int val; + int ret; + + ret = regmap_read(rdev->regmap, rdev->desc->bypass_reg, &val); + if (ret != 0) + return ret; + + *enable = val & rdev->desc->bypass_mask; + + return 0; +} +EXPORT_SYMBOL_GPL(regulator_get_bypass_regmap); + +/** + * regulator_allow_bypass - allow the regulator to go into bypass mode + * + * @regulator: Regulator to configure + * @allow: enable or disable bypass mode + * + * Allow the regulator to go into bypass mode if all other consumers + * for the regulator also enable bypass mode and the machine + * constraints allow this. Bypass mode means that the regulator is + * simply passing the input directly to the output with no regulation. + */ +int regulator_allow_bypass(struct regulator *regulator, bool enable) +{ + struct regulator_dev *rdev = regulator->rdev; + int ret = 0; + + if (!rdev->desc->ops->set_bypass) + return 0; + + if (rdev->constraints && + !(rdev->constraints->valid_ops_mask & REGULATOR_CHANGE_BYPASS)) + return 0; + + mutex_lock(&rdev->mutex); + + if (enable && !regulator->bypass) { + rdev->bypass_count++; + + if (rdev->bypass_count == rdev->open_count) { + ret = rdev->desc->ops->set_bypass(rdev, enable); + if (ret != 0) + rdev->bypass_count--; + } + + } else if (!enable && regulator->bypass) { + rdev->bypass_count--; + + if (rdev->bypass_count != rdev->open_count) { + ret = rdev->desc->ops->set_bypass(rdev, enable); + if (ret != 0) + rdev->bypass_count++; + } + } + + if (ret == 0) + regulator->bypass = enable; + + mutex_unlock(&rdev->mutex); + + return ret; +} +EXPORT_SYMBOL_GPL(regulator_allow_bypass); + /** * regulator_register_notifier - register regulator event notifier * @regulator: regulator source @@ -3011,7 +3142,8 @@ static int add_regulator_attributes(struct regulator_dev *rdev) /* some attributes need specific methods to be displayed */ if ((ops->get_voltage && ops->get_voltage(rdev) >= 0) || - (ops->get_voltage_sel && ops->get_voltage_sel(rdev) >= 0)) { + (ops->get_voltage_sel && ops->get_voltage_sel(rdev) >= 0) || + (ops->list_voltage && ops->list_voltage(rdev, 0) >= 0)) { status = device_create_file(dev, &dev_attr_microvolts); if (status < 0) return status; @@ -3036,6 +3168,11 @@ static int add_regulator_attributes(struct regulator_dev *rdev) if (status < 0) return status; } + if (ops->get_bypass) { + status = device_create_file(dev, &dev_attr_bypass); + if (status < 0) + return status; + } /* some attributes are type-specific */ if (rdev->desc->type == REGULATOR_CURRENT) { @@ -3124,6 +3261,8 @@ static void rdev_init_debugfs(struct regulator_dev *rdev) &rdev->use_count); debugfs_create_u32("open_count", 0444, rdev->debugfs, &rdev->open_count); + debugfs_create_u32("bypass_count", 0444, rdev->debugfs, + &rdev->bypass_count); } /** @@ -3189,8 +3328,10 @@ regulator_register(const struct regulator_desc *regulator_desc, rdev->desc = regulator_desc; if (config->regmap) rdev->regmap = config->regmap; - else + else if (dev_get_regmap(dev, NULL)) rdev->regmap = dev_get_regmap(dev, NULL); + else if (dev->parent) + rdev->regmap = dev_get_regmap(dev->parent, NULL); INIT_LIST_HEAD(&rdev->consumer_list); INIT_LIST_HEAD(&rdev->list); BLOCKING_INIT_NOTIFIER_HEAD(&rdev->notifier); @@ -3217,7 +3358,7 @@ regulator_register(const struct regulator_desc *regulator_desc, dev_set_drvdata(&rdev->dev, rdev); - if (config->ena_gpio) { + if (config->ena_gpio && gpio_is_valid(config->ena_gpio)) { ret = gpio_request_one(config->ena_gpio, GPIOF_DIR_OUT | config->ena_gpio_flags, rdev_get_name(rdev)); diff --git a/trunk/drivers/regulator/da9052-regulator.c b/trunk/drivers/regulator/da9052-regulator.c index 903299cf15cf..27355b1199e5 100644 --- a/trunk/drivers/regulator/da9052-regulator.c +++ b/trunk/drivers/regulator/da9052-regulator.c @@ -133,8 +133,8 @@ static int da9052_dcdc_set_current_limit(struct regulator_dev *rdev, int min_uA, max_uA < da9052_current_limits[row][DA9052_MIN_UA]) return -EINVAL; - for (i = 0; i < DA9052_CURRENT_RANGE; i++) { - if (min_uA <= da9052_current_limits[row][i]) { + for (i = DA9052_CURRENT_RANGE - 1; i >= 0; i--) { + if (da9052_current_limits[row][i] <= max_uA) { reg_val = i; break; } diff --git a/trunk/drivers/regulator/dummy.c b/trunk/drivers/regulator/dummy.c index 86f655c7f7a1..03a1d7c11ef2 100644 --- a/trunk/drivers/regulator/dummy.c +++ b/trunk/drivers/regulator/dummy.c @@ -30,7 +30,7 @@ static struct regulator_init_data dummy_initdata; static struct regulator_ops dummy_ops; static struct regulator_desc dummy_desc = { - .name = "dummy", + .name = "regulator-dummy", .id = -1, .type = REGULATOR_VOLTAGE, .owner = THIS_MODULE, diff --git a/trunk/drivers/regulator/fan53555.c b/trunk/drivers/regulator/fan53555.c new file mode 100644 index 000000000000..339f4d732e97 --- /dev/null +++ b/trunk/drivers/regulator/fan53555.c @@ -0,0 +1,322 @@ +/* + * FAN53555 Fairchild Digitally Programmable TinyBuck Regulator Driver. + * + * Supported Part Numbers: + * FAN53555UC00X/01X/03X/04X/05X + * + * Copyright (c) 2012 Marvell Technology Ltd. + * Yunfan Zhang + * + * This package is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +/* Voltage setting */ +#define FAN53555_VSEL0 0x00 +#define FAN53555_VSEL1 0x01 +/* Control register */ +#define FAN53555_CONTROL 0x02 +/* IC Type */ +#define FAN53555_ID1 0x03 +/* IC mask version */ +#define FAN53555_ID2 0x04 +/* Monitor register */ +#define FAN53555_MONITOR 0x05 + +/* VSEL bit definitions */ +#define VSEL_BUCK_EN (1 << 7) +#define VSEL_MODE (1 << 6) +#define VSEL_NSEL_MASK 0x3F +/* Chip ID and Verison */ +#define DIE_ID 0x0F /* ID1 */ +#define DIE_REV 0x0F /* ID2 */ +/* Control bit definitions */ +#define CTL_OUTPUT_DISCHG (1 << 7) +#define CTL_SLEW_MASK (0x7 << 4) +#define CTL_SLEW_SHIFT 4 +#define CTL_RESET (1 << 2) + +#define FAN53555_NVOLTAGES 64 /* Numbers of voltages */ + +/* IC Type */ +enum { + FAN53555_CHIP_ID_00 = 0, + FAN53555_CHIP_ID_01, + FAN53555_CHIP_ID_02, + FAN53555_CHIP_ID_03, + FAN53555_CHIP_ID_04, + FAN53555_CHIP_ID_05, +}; + +struct fan53555_device_info { + struct regmap *regmap; + struct device *dev; + struct regulator_desc desc; + struct regulator_dev *rdev; + struct regulator_init_data *regulator; + /* IC Type and Rev */ + int chip_id; + int chip_rev; + /* Voltage setting register */ + unsigned int vol_reg; + unsigned int sleep_reg; + /* Voltage range and step(linear) */ + unsigned int vsel_min; + unsigned int vsel_step; + /* Voltage slew rate limiting */ + unsigned int slew_rate; + /* Sleep voltage cache */ + unsigned int sleep_vol_cache; +}; + +static int fan53555_set_suspend_voltage(struct regulator_dev *rdev, int uV) +{ + struct fan53555_device_info *di = rdev_get_drvdata(rdev); + int ret; + + if (di->sleep_vol_cache == uV) + return 0; + ret = regulator_map_voltage_linear(rdev, uV, uV); + if (ret < 0) + return -EINVAL; + ret = regmap_update_bits(di->regmap, di->sleep_reg, + VSEL_NSEL_MASK, ret); + if (ret < 0) + return -EINVAL; + /* Cache the sleep voltage setting. + * Might not be the real voltage which is rounded */ + di->sleep_vol_cache = uV; + + return 0; +} + +static int fan53555_set_mode(struct regulator_dev *rdev, unsigned int mode) +{ + struct fan53555_device_info *di = rdev_get_drvdata(rdev); + + switch (mode) { + case REGULATOR_MODE_FAST: + regmap_update_bits(di->regmap, di->vol_reg, + VSEL_MODE, VSEL_MODE); + break; + case REGULATOR_MODE_NORMAL: + regmap_update_bits(di->regmap, di->vol_reg, VSEL_MODE, 0); + break; + default: + return -EINVAL; + } + return 0; +} + +static unsigned int fan53555_get_mode(struct regulator_dev *rdev) +{ + struct fan53555_device_info *di = rdev_get_drvdata(rdev); + unsigned int val; + int ret = 0; + + ret = regmap_read(di->regmap, di->vol_reg, &val); + if (ret < 0) + return ret; + if (val & VSEL_MODE) + return REGULATOR_MODE_FAST; + else + return REGULATOR_MODE_NORMAL; +} + +static struct regulator_ops fan53555_regulator_ops = { + .set_voltage_sel = regulator_set_voltage_sel_regmap, + .get_voltage_sel = regulator_get_voltage_sel_regmap, + .map_voltage = regulator_map_voltage_linear, + .list_voltage = regulator_list_voltage_linear, + .set_suspend_voltage = fan53555_set_suspend_voltage, + .enable = regulator_enable_regmap, + .disable = regulator_disable_regmap, + .is_enabled = regulator_is_enabled_regmap, + .set_mode = fan53555_set_mode, + .get_mode = fan53555_get_mode, +}; + +/* For 00,01,03,05 options: + * VOUT = 0.60V + NSELx * 10mV, from 0.60 to 1.23V. + * For 04 option: + * VOUT = 0.603V + NSELx * 12.826mV, from 0.603 to 1.411V. + * */ +static int fan53555_device_setup(struct fan53555_device_info *di, + struct fan53555_platform_data *pdata) +{ + unsigned int reg, data, mask; + + /* Setup voltage control register */ + switch (pdata->sleep_vsel_id) { + case FAN53555_VSEL_ID_0: + di->sleep_reg = FAN53555_VSEL0; + di->vol_reg = FAN53555_VSEL1; + break; + case FAN53555_VSEL_ID_1: + di->sleep_reg = FAN53555_VSEL1; + di->vol_reg = FAN53555_VSEL0; + break; + default: + dev_err(di->dev, "Invalid VSEL ID!\n"); + return -EINVAL; + } + /* Init voltage range and step */ + switch (di->chip_id) { + case FAN53555_CHIP_ID_00: + case FAN53555_CHIP_ID_01: + case FAN53555_CHIP_ID_03: + case FAN53555_CHIP_ID_05: + di->vsel_min = 600000; + di->vsel_step = 10000; + break; + case FAN53555_CHIP_ID_04: + di->vsel_min = 603000; + di->vsel_step = 12826; + break; + default: + dev_err(di->dev, + "Chip ID[%d]\n not supported!\n", di->chip_id); + return -EINVAL; + } + /* Init slew rate */ + if (pdata->slew_rate & 0x7) + di->slew_rate = pdata->slew_rate; + else + di->slew_rate = FAN53555_SLEW_RATE_64MV; + reg = FAN53555_CONTROL; + data = di->slew_rate << CTL_SLEW_SHIFT; + mask = CTL_SLEW_MASK; + return regmap_update_bits(di->regmap, reg, mask, data); +} + +static int fan53555_regulator_register(struct fan53555_device_info *di, + struct regulator_config *config) +{ + struct regulator_desc *rdesc = &di->desc; + + rdesc->name = "fan53555-reg"; + rdesc->ops = &fan53555_regulator_ops; + rdesc->type = REGULATOR_VOLTAGE; + rdesc->n_voltages = FAN53555_NVOLTAGES; + rdesc->enable_reg = di->vol_reg; + rdesc->enable_mask = VSEL_BUCK_EN; + rdesc->min_uV = di->vsel_min; + rdesc->uV_step = di->vsel_step; + rdesc->vsel_reg = di->vol_reg; + rdesc->vsel_mask = VSEL_NSEL_MASK; + rdesc->owner = THIS_MODULE; + + di->rdev = regulator_register(&di->desc, config); + if (IS_ERR(di->rdev)) + return PTR_ERR(di->rdev); + return 0; + +} + +static struct regmap_config fan53555_regmap_config = { + .reg_bits = 8, + .val_bits = 8, +}; + +static int __devinit fan53555_regulator_probe(struct i2c_client *client, + const struct i2c_device_id *id) +{ + struct fan53555_device_info *di; + struct fan53555_platform_data *pdata; + struct regulator_config config = { }; + unsigned int val; + int ret; + + pdata = client->dev.platform_data; + if (!pdata || !pdata->regulator) { + dev_err(&client->dev, "Platform data not found!\n"); + return -ENODEV; + } + + di = devm_kzalloc(&client->dev, sizeof(struct fan53555_device_info), + GFP_KERNEL); + if (!di) { + dev_err(&client->dev, "Failed to allocate device info data!\n"); + return -ENOMEM; + } + di->regmap = devm_regmap_init_i2c(client, &fan53555_regmap_config); + if (IS_ERR(di->regmap)) { + dev_err(&client->dev, "Failed to allocate regmap!\n"); + return PTR_ERR(di->regmap); + } + di->dev = &client->dev; + di->regulator = pdata->regulator; + i2c_set_clientdata(client, di); + /* Get chip ID */ + ret = regmap_read(di->regmap, FAN53555_ID1, &val); + if (ret < 0) { + dev_err(&client->dev, "Failed to get chip ID!\n"); + return -ENODEV; + } + di->chip_id = val & DIE_ID; + /* Get chip revision */ + ret = regmap_read(di->regmap, FAN53555_ID2, &val); + if (ret < 0) { + dev_err(&client->dev, "Failed to get chip Rev!\n"); + return -ENODEV; + } + di->chip_rev = val & DIE_REV; + dev_info(&client->dev, "FAN53555 Option[%d] Rev[%d] Detected!\n", + di->chip_id, di->chip_rev); + /* Device init */ + ret = fan53555_device_setup(di, pdata); + if (ret < 0) { + dev_err(&client->dev, "Failed to setup device!\n"); + return ret; + } + /* Register regulator */ + config.dev = di->dev; + config.init_data = di->regulator; + config.regmap = di->regmap; + config.driver_data = di; + ret = fan53555_regulator_register(di, &config); + if (ret < 0) + dev_err(&client->dev, "Failed to register regulator!\n"); + return ret; + +} + +static int __devexit fan53555_regulator_remove(struct i2c_client *client) +{ + struct fan53555_device_info *di = i2c_get_clientdata(client); + + regulator_unregister(di->rdev); + return 0; +} + +static const struct i2c_device_id fan53555_id[] = { + {"fan53555", -1}, + { }, +}; + +static struct i2c_driver fan53555_regulator_driver = { + .driver = { + .name = "fan53555-regulator", + }, + .probe = fan53555_regulator_probe, + .remove = __devexit_p(fan53555_regulator_remove), + .id_table = fan53555_id, +}; + +module_i2c_driver(fan53555_regulator_driver); + +MODULE_AUTHOR("Yunfan Zhang "); +MODULE_DESCRIPTION("FAN53555 regulator driver"); +MODULE_LICENSE("GPL v2"); diff --git a/trunk/drivers/regulator/gpio-regulator.c b/trunk/drivers/regulator/gpio-regulator.c index 34b67bee9323..8b5944f2d7d1 100644 --- a/trunk/drivers/regulator/gpio-regulator.c +++ b/trunk/drivers/regulator/gpio-regulator.c @@ -57,16 +57,17 @@ static int gpio_regulator_get_value(struct regulator_dev *dev) return -EINVAL; } -static int gpio_regulator_set_value(struct regulator_dev *dev, - int min, int max, unsigned *selector) +static int gpio_regulator_set_voltage(struct regulator_dev *dev, + int min_uV, int max_uV, + unsigned *selector) { struct gpio_regulator_data *data = rdev_get_drvdata(dev); int ptr, target = 0, state, best_val = INT_MAX; for (ptr = 0; ptr < data->nr_states; ptr++) if (data->states[ptr].value < best_val && - data->states[ptr].value >= min && - data->states[ptr].value <= max) { + data->states[ptr].value >= min_uV && + data->states[ptr].value <= max_uV) { target = data->states[ptr].gpios; best_val = data->states[ptr].value; if (selector) @@ -85,13 +86,6 @@ static int gpio_regulator_set_value(struct regulator_dev *dev, return 0; } -static int gpio_regulator_set_voltage(struct regulator_dev *dev, - int min_uV, int max_uV, - unsigned *selector) -{ - return gpio_regulator_set_value(dev, min_uV, max_uV, selector); -} - static int gpio_regulator_list_voltage(struct regulator_dev *dev, unsigned selector) { @@ -106,7 +100,27 @@ static int gpio_regulator_list_voltage(struct regulator_dev *dev, static int gpio_regulator_set_current_limit(struct regulator_dev *dev, int min_uA, int max_uA) { - return gpio_regulator_set_value(dev, min_uA, max_uA, NULL); + struct gpio_regulator_data *data = rdev_get_drvdata(dev); + int ptr, target = 0, state, best_val = 0; + + for (ptr = 0; ptr < data->nr_states; ptr++) + if (data->states[ptr].value > best_val && + data->states[ptr].value >= min_uA && + data->states[ptr].value <= max_uA) { + target = data->states[ptr].gpios; + best_val = data->states[ptr].value; + } + + if (best_val == 0) + return -EINVAL; + + for (ptr = 0; ptr < data->nr_gpios; ptr++) { + state = (target & (1 << ptr)) >> ptr; + gpio_set_value(data->gpios[ptr].gpio, state); + } + data->state = target; + + return 0; } static struct regulator_ops gpio_regulator_voltage_ops = { diff --git a/trunk/drivers/regulator/isl6271a-regulator.c b/trunk/drivers/regulator/isl6271a-regulator.c index 1d145a07ada9..d8ecf49a5777 100644 --- a/trunk/drivers/regulator/isl6271a-regulator.c +++ b/trunk/drivers/regulator/isl6271a-regulator.c @@ -73,13 +73,7 @@ static struct regulator_ops isl_core_ops = { .map_voltage = regulator_map_voltage_linear, }; -static int isl6271a_get_fixed_voltage(struct regulator_dev *dev) -{ - return dev->desc->min_uV; -} - static struct regulator_ops isl_fixed_ops = { - .get_voltage = isl6271a_get_fixed_voltage, .list_voltage = regulator_list_voltage_linear, }; diff --git a/trunk/drivers/regulator/lp872x.c b/trunk/drivers/regulator/lp872x.c index 212c38eaba70..708f4b6a17dc 100644 --- a/trunk/drivers/regulator/lp872x.c +++ b/trunk/drivers/regulator/lp872x.c @@ -86,6 +86,10 @@ #define EXTERN_DVS_USED 0 #define MAX_DELAY 6 +/* Default DVS Mode */ +#define LP8720_DEFAULT_DVS 0 +#define LP8725_DEFAULT_DVS BIT(2) + /* dump registers in regmap-debugfs */ #define MAX_REGISTERS 0x0F @@ -269,9 +273,9 @@ static int lp872x_regulator_enable_time(struct regulator_dev *rdev) return val > MAX_DELAY ? 0 : val * time_step_us; } -static void lp872x_set_dvs(struct lp872x *lp, int gpio) +static void lp872x_set_dvs(struct lp872x *lp, enum lp872x_dvs_sel dvs_sel, + int gpio) { - enum lp872x_dvs_sel dvs_sel = lp->pdata->dvs->vsel; enum lp872x_dvs_state state; state = dvs_sel == SEL_V1 ? DVS_HIGH : DVS_LOW; @@ -339,10 +343,10 @@ static int lp872x_buck_set_voltage_sel(struct regulator_dev *rdev, struct lp872x *lp = rdev_get_drvdata(rdev); enum lp872x_regulator_id buck = rdev_get_id(rdev); u8 addr, mask = LP872X_VOUT_M; - struct lp872x_dvs *dvs = lp->pdata->dvs; + struct lp872x_dvs *dvs = lp->pdata ? lp->pdata->dvs : NULL; if (dvs && gpio_is_valid(dvs->gpio)) - lp872x_set_dvs(lp, dvs->gpio); + lp872x_set_dvs(lp, dvs->vsel, dvs->gpio); addr = lp872x_select_buck_vout_addr(lp, buck); if (!lp872x_is_valid_buck_addr(addr)) @@ -374,8 +378,8 @@ static int lp8725_buck_set_current_limit(struct regulator_dev *rdev, { struct lp872x *lp = rdev_get_drvdata(rdev); enum lp872x_regulator_id buck = rdev_get_id(rdev); - int i, max = ARRAY_SIZE(lp8725_buck_uA); - u8 addr, val; + int i; + u8 addr; switch (buck) { case LP8725_ID_BUCK1: @@ -388,17 +392,15 @@ static int lp8725_buck_set_current_limit(struct regulator_dev *rdev, return -EINVAL; } - for (i = 0 ; i < max ; i++) + for (i = ARRAY_SIZE(lp8725_buck_uA) - 1 ; i >= 0; i--) { if (lp8725_buck_uA[i] >= min_uA && lp8725_buck_uA[i] <= max_uA) - break; - - if (i == max) - return -EINVAL; - - val = i << LP8725_BUCK_CL_S; + return lp872x_update_bits(lp, addr, + LP8725_BUCK_CL_M, + i << LP8725_BUCK_CL_S); + } - return lp872x_update_bits(lp, addr, LP8725_BUCK_CL_M, val); + return -EINVAL; } static int lp8725_buck_get_current_limit(struct regulator_dev *rdev) @@ -727,39 +729,16 @@ static struct regulator_desc lp8725_regulator_desc[] = { }, }; -static int lp872x_check_dvs_validity(struct lp872x *lp) -{ - struct lp872x_dvs *dvs = lp->pdata->dvs; - u8 val = 0; - int ret; - - ret = lp872x_read_byte(lp, LP872X_GENERAL_CFG, &val); - if (ret) - return ret; - - ret = 0; - if (lp->chipid == LP8720) { - if (val & LP8720_EXT_DVS_M) - ret = dvs ? 0 : -EINVAL; - } else { - if ((val & LP8725_DVS1_M) == EXTERN_DVS_USED) - ret = dvs ? 0 : -EINVAL; - } - - return ret; -} - static int lp872x_init_dvs(struct lp872x *lp) { int ret, gpio; - struct lp872x_dvs *dvs = lp->pdata->dvs; + struct lp872x_dvs *dvs = lp->pdata ? lp->pdata->dvs : NULL; enum lp872x_dvs_state pinstate; + u8 mask[] = { LP8720_EXT_DVS_M, LP8725_DVS1_M | LP8725_DVS2_M }; + u8 default_dvs_mode[] = { LP8720_DEFAULT_DVS, LP8725_DEFAULT_DVS }; - ret = lp872x_check_dvs_validity(lp); - if (ret) { - dev_warn(lp->dev, "invalid dvs data: %d\n", ret); - return ret; - } + if (!dvs) + goto set_default_dvs_mode; gpio = dvs->gpio; if (!gpio_is_valid(gpio)) { @@ -778,6 +757,10 @@ static int lp872x_init_dvs(struct lp872x *lp) lp->dvs_gpio = gpio; return 0; + +set_default_dvs_mode: + return lp872x_update_bits(lp, LP872X_GENERAL_CFG, mask[lp->chipid], + default_dvs_mode[lp->chipid]); } static int lp872x_config(struct lp872x *lp) @@ -785,24 +768,29 @@ static int lp872x_config(struct lp872x *lp) struct lp872x_platform_data *pdata = lp->pdata; int ret; - if (!pdata->update_config) - return 0; + if (!pdata || !pdata->update_config) + goto init_dvs; ret = lp872x_write_byte(lp, LP872X_GENERAL_CFG, pdata->general_config); if (ret) return ret; +init_dvs: return lp872x_init_dvs(lp); } static struct regulator_init_data *lp872x_find_regulator_init_data(int id, struct lp872x *lp) { + struct lp872x_platform_data *pdata = lp->pdata; int i; + if (!pdata) + return NULL; + for (i = 0; i < lp->num_regulators; i++) { - if (lp->pdata->regulator_data[i].id == id) - return lp->pdata->regulator_data[i].init_data; + if (pdata->regulator_data[i].id == id) + return pdata->regulator_data[i].init_data; } return NULL; @@ -863,18 +851,12 @@ static const struct regmap_config lp872x_regmap_config = { static int lp872x_probe(struct i2c_client *cl, const struct i2c_device_id *id) { struct lp872x *lp; - struct lp872x_platform_data *pdata = cl->dev.platform_data; int ret, size, num_regulators; const int lp872x_num_regulators[] = { [LP8720] = LP8720_NUM_REGULATORS, [LP8725] = LP8725_NUM_REGULATORS, }; - if (!pdata) { - dev_err(&cl->dev, "no platform data\n"); - return -EINVAL; - } - lp = devm_kzalloc(&cl->dev, sizeof(struct lp872x), GFP_KERNEL); if (!lp) goto err_mem; @@ -894,7 +876,7 @@ static int lp872x_probe(struct i2c_client *cl, const struct i2c_device_id *id) } lp->dev = &cl->dev; - lp->pdata = pdata; + lp->pdata = cl->dev.platform_data; lp->chipid = id->driver_data; lp->num_regulators = num_regulators; i2c_set_clientdata(cl, lp); diff --git a/trunk/drivers/regulator/lp8788-buck.c b/trunk/drivers/regulator/lp8788-buck.c index 6356e821400f..ba3e0aa402de 100644 --- a/trunk/drivers/regulator/lp8788-buck.c +++ b/trunk/drivers/regulator/lp8788-buck.c @@ -69,6 +69,9 @@ #define PIN_HIGH 1 #define ENABLE_TIME_USEC 32 +#define BUCK_FPWM_MASK(x) (1 << (x)) +#define BUCK_FPWM_SHIFT(x) (x) + enum lp8788_dvs_state { DVS_LOW = GPIOF_OUT_INIT_LOW, DVS_HIGH = GPIOF_OUT_INIT_HIGH, @@ -86,15 +89,9 @@ enum lp8788_buck_id { BUCK4, }; -struct lp8788_pwm_map { - u8 mask; - u8 shift; -}; - struct lp8788_buck { struct lp8788 *lp; struct regulator_dev *regulator; - struct lp8788_pwm_map *pmap; void *dvs; }; @@ -106,29 +103,6 @@ static const int lp8788_buck_vtbl[] = { 1950000, 2000000, }; -/* buck pwm mode selection : used for set/get_mode in regulator ops - * @forced pwm : fast mode - * @auto pwm : normal mode - */ -static struct lp8788_pwm_map buck_pmap[] = { - [BUCK1] = { - .mask = LP8788_FPWM_BUCK1_M, - .shift = LP8788_FPWM_BUCK1_S, - }, - [BUCK2] = { - .mask = LP8788_FPWM_BUCK2_M, - .shift = LP8788_FPWM_BUCK2_S, - }, - [BUCK3] = { - .mask = LP8788_FPWM_BUCK3_M, - .shift = LP8788_FPWM_BUCK3_S, - }, - [BUCK4] = { - .mask = LP8788_FPWM_BUCK4_M, - .shift = LP8788_FPWM_BUCK4_S, - }, -}; - static const u8 buck1_vout_addr[] = { LP8788_BUCK1_VOUT0, LP8788_BUCK1_VOUT1, LP8788_BUCK1_VOUT2, LP8788_BUCK1_VOUT3, @@ -347,41 +321,37 @@ static int lp8788_buck_enable_time(struct regulator_dev *rdev) static int lp8788_buck_set_mode(struct regulator_dev *rdev, unsigned int mode) { struct lp8788_buck *buck = rdev_get_drvdata(rdev); - struct lp8788_pwm_map *pmap = buck->pmap; - u8 val; - - if (!pmap) - return -EINVAL; + enum lp8788_buck_id id = rdev_get_id(rdev); + u8 mask, val; + mask = BUCK_FPWM_MASK(id); switch (mode) { case REGULATOR_MODE_FAST: - val = LP8788_FORCE_PWM << pmap->shift; + val = LP8788_FORCE_PWM << BUCK_FPWM_SHIFT(id); break; case REGULATOR_MODE_NORMAL: - val = LP8788_AUTO_PWM << pmap->shift; + val = LP8788_AUTO_PWM << BUCK_FPWM_SHIFT(id); break; default: return -EINVAL; } - return lp8788_update_bits(buck->lp, LP8788_BUCK_PWM, pmap->mask, val); + return lp8788_update_bits(buck->lp, LP8788_BUCK_PWM, mask, val); } static unsigned int lp8788_buck_get_mode(struct regulator_dev *rdev) { struct lp8788_buck *buck = rdev_get_drvdata(rdev); - struct lp8788_pwm_map *pmap = buck->pmap; + enum lp8788_buck_id id = rdev_get_id(rdev); u8 val; int ret; - if (!pmap) - return -EINVAL; - ret = lp8788_read_byte(buck->lp, LP8788_BUCK_PWM, &val); if (ret) return ret; - return val & pmap->mask ? REGULATOR_MODE_FAST : REGULATOR_MODE_NORMAL; + return val & BUCK_FPWM_MASK(id) ? + REGULATOR_MODE_FAST : REGULATOR_MODE_NORMAL; } static struct regulator_ops lp8788_buck12_ops = { @@ -459,27 +429,6 @@ static struct regulator_desc lp8788_buck_desc[] = { }, }; -static int lp8788_set_default_dvs_ctrl_mode(struct lp8788 *lp, - enum lp8788_buck_id id) -{ - u8 mask, val; - - switch (id) { - case BUCK1: - mask = LP8788_BUCK1_DVS_SEL_M; - val = LP8788_BUCK1_DVS_I2C; - break; - case BUCK2: - mask = LP8788_BUCK2_DVS_SEL_M; - val = LP8788_BUCK2_DVS_I2C; - break; - default: - return 0; - } - - return lp8788_update_bits(lp, LP8788_BUCK_DVS_SEL, mask, val); -} - static int _gpio_request(struct lp8788_buck *buck, int gpio, char *name) { struct device *dev = buck->lp->dev; @@ -530,6 +479,7 @@ static int lp8788_init_dvs(struct lp8788_buck *buck, enum lp8788_buck_id id) struct lp8788_platform_data *pdata = buck->lp->pdata; u8 mask[] = { LP8788_BUCK1_DVS_SEL_M, LP8788_BUCK2_DVS_SEL_M }; u8 val[] = { LP8788_BUCK1_DVS_PIN, LP8788_BUCK2_DVS_PIN }; + u8 default_dvs_mode[] = { LP8788_BUCK1_DVS_I2C, LP8788_BUCK2_DVS_I2C }; /* no dvs for buck3, 4 */ if (id == BUCK3 || id == BUCK4) @@ -550,7 +500,8 @@ static int lp8788_init_dvs(struct lp8788_buck *buck, enum lp8788_buck_id id) val[id]); set_default_dvs_mode: - return lp8788_set_default_dvs_ctrl_mode(buck->lp, id); + return lp8788_update_bits(buck->lp, LP8788_BUCK_DVS_SEL, mask[id], + default_dvs_mode[id]); } static __devinit int lp8788_buck_probe(struct platform_device *pdev) @@ -567,7 +518,6 @@ static __devinit int lp8788_buck_probe(struct platform_device *pdev) return -ENOMEM; buck->lp = lp; - buck->pmap = &buck_pmap[id]; ret = lp8788_init_dvs(buck, id); if (ret) diff --git a/trunk/drivers/regulator/lp8788-ldo.c b/trunk/drivers/regulator/lp8788-ldo.c index d2122e41a96d..6796eeb47dc6 100644 --- a/trunk/drivers/regulator/lp8788-ldo.c +++ b/trunk/drivers/regulator/lp8788-ldo.c @@ -496,6 +496,7 @@ static struct regulator_desc lp8788_dldo_desc[] = { .name = "dldo12", .id = DLDO12, .ops = &lp8788_ldo_voltage_fixed_ops, + .n_voltages = 1, .type = REGULATOR_VOLTAGE, .owner = THIS_MODULE, .enable_reg = LP8788_EN_LDO_B, @@ -521,6 +522,7 @@ static struct regulator_desc lp8788_aldo_desc[] = { .name = "aldo2", .id = ALDO2, .ops = &lp8788_ldo_voltage_fixed_ops, + .n_voltages = 1, .type = REGULATOR_VOLTAGE, .owner = THIS_MODULE, .enable_reg = LP8788_EN_LDO_B, @@ -530,6 +532,7 @@ static struct regulator_desc lp8788_aldo_desc[] = { .name = "aldo3", .id = ALDO3, .ops = &lp8788_ldo_voltage_fixed_ops, + .n_voltages = 1, .type = REGULATOR_VOLTAGE, .owner = THIS_MODULE, .enable_reg = LP8788_EN_LDO_B, @@ -539,6 +542,7 @@ static struct regulator_desc lp8788_aldo_desc[] = { .name = "aldo4", .id = ALDO4, .ops = &lp8788_ldo_voltage_fixed_ops, + .n_voltages = 1, .type = REGULATOR_VOLTAGE, .owner = THIS_MODULE, .enable_reg = LP8788_EN_LDO_B, @@ -548,6 +552,7 @@ static struct regulator_desc lp8788_aldo_desc[] = { .name = "aldo5", .id = ALDO5, .ops = &lp8788_ldo_voltage_fixed_ops, + .n_voltages = 1, .type = REGULATOR_VOLTAGE, .owner = THIS_MODULE, .enable_reg = LP8788_EN_LDO_C, @@ -583,6 +588,7 @@ static struct regulator_desc lp8788_aldo_desc[] = { .name = "aldo8", .id = ALDO8, .ops = &lp8788_ldo_voltage_fixed_ops, + .n_voltages = 1, .type = REGULATOR_VOLTAGE, .owner = THIS_MODULE, .enable_reg = LP8788_EN_LDO_C, @@ -592,6 +598,7 @@ static struct regulator_desc lp8788_aldo_desc[] = { .name = "aldo9", .id = ALDO9, .ops = &lp8788_ldo_voltage_fixed_ops, + .n_voltages = 1, .type = REGULATOR_VOLTAGE, .owner = THIS_MODULE, .enable_reg = LP8788_EN_LDO_C, @@ -601,6 +608,7 @@ static struct regulator_desc lp8788_aldo_desc[] = { .name = "aldo10", .id = ALDO10, .ops = &lp8788_ldo_voltage_fixed_ops, + .n_voltages = 1, .type = REGULATOR_VOLTAGE, .owner = THIS_MODULE, .enable_reg = LP8788_EN_LDO_C, diff --git a/trunk/drivers/regulator/max77686.c b/trunk/drivers/regulator/max77686.c index c564af6f05a3..2a67d08658ad 100644 --- a/trunk/drivers/regulator/max77686.c +++ b/trunk/drivers/regulator/max77686.c @@ -66,7 +66,7 @@ enum max77686_ramp_rate { }; struct max77686_data { - struct regulator_dev **rdev; + struct regulator_dev *rdev[MAX77686_REGULATORS]; }; static int max77686_set_ramp_delay(struct regulator_dev *rdev, int ramp_delay) @@ -265,6 +265,7 @@ static int max77686_pmic_dt_parse_pdata(struct max77686_dev *iodev, rmatch.of_node = NULL; of_regulator_match(iodev->dev, regulators_np, &rmatch, 1); rdata[i].initdata = rmatch.init_data; + rdata[i].of_node = rmatch.of_node; } pdata->regulators = rdata; @@ -283,10 +284,8 @@ static __devinit int max77686_pmic_probe(struct platform_device *pdev) { struct max77686_dev *iodev = dev_get_drvdata(pdev->dev.parent); struct max77686_platform_data *pdata = dev_get_platdata(iodev->dev); - struct regulator_dev **rdev; struct max77686_data *max77686; - int i, size; - int ret = 0; + int i, ret = 0; struct regulator_config config = { }; dev_dbg(&pdev->dev, "%s\n", __func__); @@ -313,45 +312,38 @@ static __devinit int max77686_pmic_probe(struct platform_device *pdev) if (!max77686) return -ENOMEM; - size = sizeof(struct regulator_dev *) * MAX77686_REGULATORS; - max77686->rdev = devm_kzalloc(&pdev->dev, size, GFP_KERNEL); - if (!max77686->rdev) - return -ENOMEM; - - rdev = max77686->rdev; config.dev = &pdev->dev; config.regmap = iodev->regmap; platform_set_drvdata(pdev, max77686); for (i = 0; i < MAX77686_REGULATORS; i++) { config.init_data = pdata->regulators[i].initdata; + config.of_node = pdata->regulators[i].of_node; - rdev[i] = regulator_register(®ulators[i], &config); - if (IS_ERR(rdev[i])) { - ret = PTR_ERR(rdev[i]); + max77686->rdev[i] = regulator_register(®ulators[i], &config); + if (IS_ERR(max77686->rdev[i])) { + ret = PTR_ERR(max77686->rdev[i]); dev_err(&pdev->dev, "regulator init failed for %d\n", i); - rdev[i] = NULL; - goto err; + max77686->rdev[i] = NULL; + goto err; } } return 0; err: while (--i >= 0) - regulator_unregister(rdev[i]); + regulator_unregister(max77686->rdev[i]); return ret; } static int __devexit max77686_pmic_remove(struct platform_device *pdev) { struct max77686_data *max77686 = platform_get_drvdata(pdev); - struct regulator_dev **rdev = max77686->rdev; int i; for (i = 0; i < MAX77686_REGULATORS; i++) - if (rdev[i]) - regulator_unregister(rdev[i]); + regulator_unregister(max77686->rdev[i]); return 0; } diff --git a/trunk/drivers/regulator/max8907-regulator.c b/trunk/drivers/regulator/max8907-regulator.c new file mode 100644 index 000000000000..af7607515ab9 --- /dev/null +++ b/trunk/drivers/regulator/max8907-regulator.c @@ -0,0 +1,408 @@ +/* + * max8907-regulator.c -- support regulators in max8907 + * + * Copyright (C) 2010 Gyungoh Yoo + * Copyright (C) 2010-2012, NVIDIA CORPORATION. All rights reserved. + * + * Portions based on drivers/regulator/tps65910-regulator.c, + * Copyright 2010 Texas Instruments Inc. + * Author: Graeme Gregory + * Author: Jorge Eduardo Candelaria + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define MAX8907_II2RR_VERSION_MASK 0xF0 +#define MAX8907_II2RR_VERSION_REV_A 0x00 +#define MAX8907_II2RR_VERSION_REV_B 0x10 +#define MAX8907_II2RR_VERSION_REV_C 0x30 + +struct max8907_regulator { + struct regulator_desc desc[MAX8907_NUM_REGULATORS]; + struct regulator_dev *rdev[MAX8907_NUM_REGULATORS]; +}; + +#define REG_MBATT() \ + [MAX8907_MBATT] = { \ + .name = "MBATT", \ + .supply_name = "mbatt", \ + .id = MAX8907_MBATT, \ + .ops = &max8907_mbatt_ops, \ + .type = REGULATOR_VOLTAGE, \ + .owner = THIS_MODULE, \ + } + +#define REG_LDO(ids, supply, base, min, max, step) \ + [MAX8907_##ids] = { \ + .name = #ids, \ + .supply_name = supply, \ + .id = MAX8907_##ids, \ + .n_voltages = ((max) - (min)) / (step) + 1, \ + .ops = &max8907_ldo_ops, \ + .type = REGULATOR_VOLTAGE, \ + .owner = THIS_MODULE, \ + .min_uV = (min), \ + .uV_step = (step), \ + .vsel_reg = (base) + MAX8907_VOUT, \ + .vsel_mask = 0x3f, \ + .enable_reg = (base) + MAX8907_CTL, \ + .enable_mask = MAX8907_MASK_LDO_EN, \ + } + +#define REG_FIXED(ids, supply, voltage) \ + [MAX8907_##ids] = { \ + .name = #ids, \ + .supply_name = supply, \ + .id = MAX8907_##ids, \ + .n_voltages = 1, \ + .ops = &max8907_fixed_ops, \ + .type = REGULATOR_VOLTAGE, \ + .owner = THIS_MODULE, \ + .min_uV = (voltage), \ + } + +#define REG_OUT5V(ids, supply, base, voltage) \ + [MAX8907_##ids] = { \ + .name = #ids, \ + .supply_name = supply, \ + .id = MAX8907_##ids, \ + .n_voltages = 1, \ + .ops = &max8907_out5v_ops, \ + .type = REGULATOR_VOLTAGE, \ + .owner = THIS_MODULE, \ + .min_uV = (voltage), \ + .enable_reg = (base), \ + .enable_mask = MAX8907_MASK_OUT5V_EN, \ + } + +#define REG_BBAT(ids, supply, base, min, max, step) \ + [MAX8907_##ids] = { \ + .name = #ids, \ + .supply_name = supply, \ + .id = MAX8907_##ids, \ + .n_voltages = ((max) - (min)) / (step) + 1, \ + .ops = &max8907_bbat_ops, \ + .type = REGULATOR_VOLTAGE, \ + .owner = THIS_MODULE, \ + .min_uV = (min), \ + .uV_step = (step), \ + .vsel_reg = (base), \ + .vsel_mask = MAX8907_MASK_VBBATTCV, \ + } + +#define LDO_750_50(id, supply, base) REG_LDO(id, supply, (base), \ + 750000, 3900000, 50000) +#define LDO_650_25(id, supply, base) REG_LDO(id, supply, (base), \ + 650000, 2225000, 25000) + +static struct regulator_ops max8907_mbatt_ops = { +}; + +static struct regulator_ops max8907_ldo_ops = { + .list_voltage = regulator_list_voltage_linear, + .set_voltage_sel = regulator_set_voltage_sel_regmap, + .get_voltage_sel = regulator_get_voltage_sel_regmap, + .enable = regulator_enable_regmap, + .disable = regulator_disable_regmap, + .is_enabled = regulator_is_enabled_regmap, +}; + +static struct regulator_ops max8907_ldo_hwctl_ops = { + .list_voltage = regulator_list_voltage_linear, + .set_voltage_sel = regulator_set_voltage_sel_regmap, + .get_voltage_sel = regulator_get_voltage_sel_regmap, +}; + +static struct regulator_ops max8907_fixed_ops = { + .list_voltage = regulator_list_voltage_linear, +}; + +static struct regulator_ops max8907_out5v_ops = { + .list_voltage = regulator_list_voltage_linear, + .enable = regulator_enable_regmap, + .disable = regulator_disable_regmap, + .is_enabled = regulator_is_enabled_regmap, +}; + +static struct regulator_ops max8907_out5v_hwctl_ops = { + .list_voltage = regulator_list_voltage_linear, +}; + +static struct regulator_ops max8907_bbat_ops = { + .list_voltage = regulator_list_voltage_linear, + .set_voltage_sel = regulator_set_voltage_sel_regmap, + .get_voltage_sel = regulator_get_voltage_sel_regmap, +}; + +static struct regulator_desc max8907_regulators[] = { + REG_MBATT(), + REG_LDO(SD1, "in-v1", MAX8907_REG_SDCTL1, 650000, 2225000, 25000), + REG_LDO(SD2, "in-v2", MAX8907_REG_SDCTL2, 637500, 1425000, 12500), + REG_LDO(SD3, "in-v3", MAX8907_REG_SDCTL3, 750000, 3900000, 50000), + LDO_750_50(LDO1, "in1", MAX8907_REG_LDOCTL1), + LDO_650_25(LDO2, "in2", MAX8907_REG_LDOCTL2), + LDO_650_25(LDO3, "in3", MAX8907_REG_LDOCTL3), + LDO_750_50(LDO4, "in4", MAX8907_REG_LDOCTL4), + LDO_750_50(LDO5, "in5", MAX8907_REG_LDOCTL5), + LDO_750_50(LDO6, "in6", MAX8907_REG_LDOCTL6), + LDO_750_50(LDO7, "in7", MAX8907_REG_LDOCTL7), + LDO_750_50(LDO8, "in8", MAX8907_REG_LDOCTL8), + LDO_750_50(LDO9, "in9", MAX8907_REG_LDOCTL9), + LDO_750_50(LDO10, "in10", MAX8907_REG_LDOCTL10), + LDO_750_50(LDO11, "in11", MAX8907_REG_LDOCTL11), + LDO_750_50(LDO12, "in12", MAX8907_REG_LDOCTL12), + LDO_750_50(LDO13, "in13", MAX8907_REG_LDOCTL13), + LDO_750_50(LDO14, "in14", MAX8907_REG_LDOCTL14), + LDO_750_50(LDO15, "in15", MAX8907_REG_LDOCTL15), + LDO_750_50(LDO16, "in16", MAX8907_REG_LDOCTL16), + LDO_650_25(LDO17, "in17", MAX8907_REG_LDOCTL17), + LDO_650_25(LDO18, "in18", MAX8907_REG_LDOCTL18), + LDO_750_50(LDO19, "in19", MAX8907_REG_LDOCTL19), + LDO_750_50(LDO20, "in20", MAX8907_REG_LDOCTL20), + REG_OUT5V(OUT5V, "mbatt", MAX8907_REG_OUT5VEN, 5000000), + REG_OUT5V(OUT33V, "mbatt", MAX8907_REG_OUT33VEN, 3300000), + REG_BBAT(BBAT, "MBATT", MAX8907_REG_BBAT_CNFG, + 2400000, 3000000, 200000), + REG_FIXED(SDBY, "MBATT", 1200000), + REG_FIXED(VRTC, "MBATT", 3300000), +}; + +#ifdef CONFIG_OF + +#define MATCH(_name, _id) \ + [MAX8907_##_id] = { \ + .name = #_name, \ + .driver_data = (void *)&max8907_regulators[MAX8907_##_id], \ + } + +static struct of_regulator_match max8907_matches[] = { + MATCH(mbatt, MBATT), + MATCH(sd1, SD1), + MATCH(sd2, SD2), + MATCH(sd3, SD3), + MATCH(ldo1, LDO1), + MATCH(ldo2, LDO2), + MATCH(ldo3, LDO3), + MATCH(ldo4, LDO4), + MATCH(ldo5, LDO5), + MATCH(ldo6, LDO6), + MATCH(ldo7, LDO7), + MATCH(ldo8, LDO8), + MATCH(ldo9, LDO9), + MATCH(ldo10, LDO10), + MATCH(ldo11, LDO11), + MATCH(ldo12, LDO12), + MATCH(ldo13, LDO13), + MATCH(ldo14, LDO14), + MATCH(ldo15, LDO15), + MATCH(ldo16, LDO16), + MATCH(ldo17, LDO17), + MATCH(ldo18, LDO18), + MATCH(ldo19, LDO19), + MATCH(ldo20, LDO20), + MATCH(out5v, OUT5V), + MATCH(out33v, OUT33V), + MATCH(bbat, BBAT), + MATCH(sdby, SDBY), + MATCH(vrtc, VRTC), +}; + +static int max8907_regulator_parse_dt(struct platform_device *pdev) +{ + struct device_node *np = pdev->dev.parent->of_node; + struct device_node *regulators; + int ret; + + if (!pdev->dev.parent->of_node) + return 0; + + regulators = of_find_node_by_name(np, "regulators"); + if (!regulators) { + dev_err(&pdev->dev, "regulators node not found\n"); + return -EINVAL; + } + + ret = of_regulator_match(pdev->dev.parent, regulators, + max8907_matches, + ARRAY_SIZE(max8907_matches)); + if (ret < 0) { + dev_err(&pdev->dev, "Error parsing regulator init data: %d\n", + ret); + return ret; + } + + return 0; +} + +static inline struct regulator_init_data *match_init_data(int index) +{ + return max8907_matches[index].init_data; +} + +static inline struct device_node *match_of_node(int index) +{ + return max8907_matches[index].of_node; +} +#else +static int max8907_regulator_parse_dt(struct platform_device *pdev) +{ + return 0; +} + +static inline struct regulator_init_data *match_init_data(int index) +{ + return NULL; +} + +static inline struct device_node *match_of_node(int index) +{ + return NULL; +} +#endif + +static __devinit int max8907_regulator_probe(struct platform_device *pdev) +{ + struct max8907 *max8907 = dev_get_drvdata(pdev->dev.parent); + struct max8907_platform_data *pdata = dev_get_platdata(max8907->dev); + int ret; + struct max8907_regulator *pmic; + unsigned int val; + int i; + struct regulator_config config = {}; + struct regulator_init_data *idata; + const char *mbatt_rail_name = NULL; + + ret = max8907_regulator_parse_dt(pdev); + if (ret) + return ret; + + pmic = devm_kzalloc(&pdev->dev, sizeof(*pmic), GFP_KERNEL); + if (!pmic) { + dev_err(&pdev->dev, "Failed to alloc pmic\n"); + return -ENOMEM; + } + platform_set_drvdata(pdev, pmic); + + memcpy(pmic->desc, max8907_regulators, sizeof(pmic->desc)); + + /* Backwards compatibility with MAX8907B; SD1 uses different voltages */ + regmap_read(max8907->regmap_gen, MAX8907_REG_II2RR, &val); + if ((val & MAX8907_II2RR_VERSION_MASK) == + MAX8907_II2RR_VERSION_REV_B) { + pmic->desc[MAX8907_SD1].min_uV = 637500; + pmic->desc[MAX8907_SD1].uV_step = 12500; + pmic->desc[MAX8907_SD1].n_voltages = + (1425000 - 637500) / 12500 + 1; + } + + for (i = 0; i < MAX8907_NUM_REGULATORS; i++) { + config.dev = pdev->dev.parent; + if (pdata) + idata = pdata->init_data[i]; + else + idata = match_init_data(i); + config.init_data = idata; + config.driver_data = pmic; + config.regmap = max8907->regmap_gen; + config.of_node = match_of_node(i); + + switch (pmic->desc[i].id) { + case MAX8907_MBATT: + if (idata && idata->constraints.name) + mbatt_rail_name = idata->constraints.name; + else + mbatt_rail_name = pmic->desc[i].name; + break; + case MAX8907_BBAT: + case MAX8907_SDBY: + case MAX8907_VRTC: + idata->supply_regulator = mbatt_rail_name; + break; + } + + if (pmic->desc[i].ops == &max8907_ldo_ops) { + regmap_read(config.regmap, pmic->desc[i].enable_reg, + &val); + if ((val & MAX8907_MASK_LDO_SEQ) != + MAX8907_MASK_LDO_SEQ) + pmic->desc[i].ops = &max8907_ldo_hwctl_ops; + } else if (pmic->desc[i].ops == &max8907_out5v_ops) { + regmap_read(config.regmap, pmic->desc[i].enable_reg, + &val); + if ((val & (MAX8907_MASK_OUT5V_VINEN | + MAX8907_MASK_OUT5V_ENSRC)) != + MAX8907_MASK_OUT5V_ENSRC) + pmic->desc[i].ops = &max8907_out5v_hwctl_ops; + } + + pmic->rdev[i] = regulator_register(&pmic->desc[i], &config); + if (IS_ERR(pmic->rdev[i])) { + dev_err(&pdev->dev, + "failed to register %s regulator\n", + pmic->desc[i].name); + ret = PTR_ERR(pmic->rdev[i]); + goto err_unregister_regulator; + } + } + + return 0; + +err_unregister_regulator: + while (--i >= 0) + regulator_unregister(pmic->rdev[i]); + return ret; +} + +static __devexit int max8907_regulator_remove(struct platform_device *pdev) +{ + struct max8907_regulator *pmic = platform_get_drvdata(pdev); + int i; + + for (i = 0; i < MAX8907_NUM_REGULATORS; i++) + regulator_unregister(pmic->rdev[i]); + + return 0; +} + +static struct platform_driver max8907_regulator_driver = { + .driver = { + .name = "max8907-regulator", + .owner = THIS_MODULE, + }, + .probe = max8907_regulator_probe, + .remove = __devexit_p(max8907_regulator_remove), +}; + +static int __init max8907_regulator_init(void) +{ + return platform_driver_register(&max8907_regulator_driver); +} + +subsys_initcall(max8907_regulator_init); + +static void __exit max8907_reg_exit(void) +{ + platform_driver_unregister(&max8907_regulator_driver); +} + +module_exit(max8907_reg_exit); + +MODULE_DESCRIPTION("MAX8907 regulator driver"); +MODULE_AUTHOR("Gyungoh Yoo "); +MODULE_LICENSE("GPL v2"); +MODULE_ALIAS("platform:max8907-regulator"); diff --git a/trunk/drivers/regulator/mc13783-regulator.c b/trunk/drivers/regulator/mc13783-regulator.c index 4932e3449fe1..0801a6d0c122 100644 --- a/trunk/drivers/regulator/mc13783-regulator.c +++ b/trunk/drivers/regulator/mc13783-regulator.c @@ -21,6 +21,30 @@ #include #include "mc13xxx.h" +#define MC13783_REG_SWITCHERS0 24 +/* Enable does not exist for SW1A */ +#define MC13783_REG_SWITCHERS0_SW1AEN 0 +#define MC13783_REG_SWITCHERS0_SW1AVSEL 0 +#define MC13783_REG_SWITCHERS0_SW1AVSEL_M (63 << 0) + +#define MC13783_REG_SWITCHERS1 25 +/* Enable does not exist for SW1B */ +#define MC13783_REG_SWITCHERS1_SW1BEN 0 +#define MC13783_REG_SWITCHERS1_SW1BVSEL 0 +#define MC13783_REG_SWITCHERS1_SW1BVSEL_M (63 << 0) + +#define MC13783_REG_SWITCHERS2 26 +/* Enable does not exist for SW2A */ +#define MC13783_REG_SWITCHERS2_SW2AEN 0 +#define MC13783_REG_SWITCHERS2_SW2AVSEL 0 +#define MC13783_REG_SWITCHERS2_SW2AVSEL_M (63 << 0) + +#define MC13783_REG_SWITCHERS3 27 +/* Enable does not exist for SW2B */ +#define MC13783_REG_SWITCHERS3_SW2BEN 0 +#define MC13783_REG_SWITCHERS3_SW2BVSEL 0 +#define MC13783_REG_SWITCHERS3_SW2BVSEL_M (63 << 0) + #define MC13783_REG_SWITCHERS5 29 #define MC13783_REG_SWITCHERS5_SW3EN (1 << 20) #define MC13783_REG_SWITCHERS5_SW3VSEL 18 @@ -93,6 +117,44 @@ /* Voltage Values */ +static const int mc13783_sw1x_val[] = { + 900000, 925000, 950000, 975000, + 1000000, 1025000, 1050000, 1075000, + 1100000, 1125000, 1150000, 1175000, + 1200000, 1225000, 1250000, 1275000, + 1300000, 1325000, 1350000, 1375000, + 1400000, 1425000, 1450000, 1475000, + 1500000, 1525000, 1550000, 1575000, + 1600000, 1625000, 1650000, 1675000, + 1700000, 1700000, 1700000, 1700000, + 1800000, 1800000, 1800000, 1800000, + 1850000, 1850000, 1850000, 1850000, + 2000000, 2000000, 2000000, 2000000, + 2100000, 2100000, 2100000, 2100000, + 2200000, 2200000, 2200000, 2200000, + 2200000, 2200000, 2200000, 2200000, + 2200000, 2200000, 2200000, 2200000, +}; + +static const int mc13783_sw2x_val[] = { + 900000, 925000, 950000, 975000, + 1000000, 1025000, 1050000, 1075000, + 1100000, 1125000, 1150000, 1175000, + 1200000, 1225000, 1250000, 1275000, + 1300000, 1325000, 1350000, 1375000, + 1400000, 1425000, 1450000, 1475000, + 1500000, 1525000, 1550000, 1575000, + 1600000, 1625000, 1650000, 1675000, + 1700000, 1700000, 1700000, 1700000, + 1800000, 1800000, 1800000, 1800000, + 1900000, 1900000, 1900000, 1900000, + 2000000, 2000000, 2000000, 2000000, + 2100000, 2100000, 2100000, 2100000, + 2200000, 2200000, 2200000, 2200000, + 2200000, 2200000, 2200000, 2200000, + 2200000, 2200000, 2200000, 2200000, +}; + static const unsigned int mc13783_sw3_val[] = { 5000000, 5000000, 5000000, 5500000, }; @@ -188,6 +250,10 @@ static struct regulator_ops mc13783_gpo_regulator_ops; MC13783_DEFINE(REG, _name, _reg, _vsel_reg, _voltages) static struct mc13xxx_regulator mc13783_regulators[] = { + MC13783_DEFINE_SW(SW1A, SWITCHERS0, SWITCHERS0, mc13783_sw1x_val), + MC13783_DEFINE_SW(SW1B, SWITCHERS1, SWITCHERS1, mc13783_sw1x_val), + MC13783_DEFINE_SW(SW2A, SWITCHERS2, SWITCHERS2, mc13783_sw2x_val), + MC13783_DEFINE_SW(SW2B, SWITCHERS3, SWITCHERS3, mc13783_sw2x_val), MC13783_DEFINE_SW(SW3, SWITCHERS5, SWITCHERS5, mc13783_sw3_val), MC13783_FIXED_DEFINE(REG, VAUDIO, REGULATORMODE0, mc13783_vaudio_val), @@ -238,9 +304,10 @@ static int mc13783_powermisc_rmw(struct mc13xxx_regulator_priv *priv, u32 mask, BUG_ON(val & ~mask); + mc13xxx_lock(priv->mc13xxx); ret = mc13xxx_reg_read(mc13783, MC13783_REG_POWERMISC, &valread); if (ret) - return ret; + goto out; /* Update the stored state for Power Gates. */ priv->powermisc_pwgt_state = @@ -253,7 +320,10 @@ static int mc13783_powermisc_rmw(struct mc13xxx_regulator_priv *priv, u32 mask, valread = (valread & ~MC13783_REG_POWERMISC_PWGTSPI_M) | priv->powermisc_pwgt_state; - return mc13xxx_reg_write(mc13783, MC13783_REG_POWERMISC, valread); + ret = mc13xxx_reg_write(mc13783, MC13783_REG_POWERMISC, valread); +out: + mc13xxx_unlock(priv->mc13xxx); + return ret; } static int mc13783_gpo_regulator_enable(struct regulator_dev *rdev) @@ -261,7 +331,6 @@ static int mc13783_gpo_regulator_enable(struct regulator_dev *rdev) struct mc13xxx_regulator_priv *priv = rdev_get_drvdata(rdev); struct mc13xxx_regulator *mc13xxx_regulators = priv->mc13xxx_regulators; int id = rdev_get_id(rdev); - int ret; u32 en_val = mc13xxx_regulators[id].enable_bit; dev_dbg(rdev_get_dev(rdev), "%s id: %d\n", __func__, id); @@ -271,12 +340,8 @@ static int mc13783_gpo_regulator_enable(struct regulator_dev *rdev) id == MC13783_REG_PWGT2SPI) en_val = 0; - mc13xxx_lock(priv->mc13xxx); - ret = mc13783_powermisc_rmw(priv, mc13xxx_regulators[id].enable_bit, + return mc13783_powermisc_rmw(priv, mc13xxx_regulators[id].enable_bit, en_val); - mc13xxx_unlock(priv->mc13xxx); - - return ret; } static int mc13783_gpo_regulator_disable(struct regulator_dev *rdev) @@ -284,7 +349,6 @@ static int mc13783_gpo_regulator_disable(struct regulator_dev *rdev) struct mc13xxx_regulator_priv *priv = rdev_get_drvdata(rdev); struct mc13xxx_regulator *mc13xxx_regulators = priv->mc13xxx_regulators; int id = rdev_get_id(rdev); - int ret; u32 dis_val = 0; dev_dbg(rdev_get_dev(rdev), "%s id: %d\n", __func__, id); @@ -294,12 +358,8 @@ static int mc13783_gpo_regulator_disable(struct regulator_dev *rdev) id == MC13783_REG_PWGT2SPI) dis_val = mc13xxx_regulators[id].enable_bit; - mc13xxx_lock(priv->mc13xxx); - ret = mc13783_powermisc_rmw(priv, mc13xxx_regulators[id].enable_bit, + return mc13783_powermisc_rmw(priv, mc13xxx_regulators[id].enable_bit, dis_val); - mc13xxx_unlock(priv->mc13xxx); - - return ret; } static int mc13783_gpo_regulator_is_enabled(struct regulator_dev *rdev) @@ -330,7 +390,6 @@ static struct regulator_ops mc13783_gpo_regulator_ops = { .is_enabled = mc13783_gpo_regulator_is_enabled, .list_voltage = regulator_list_voltage_table, .set_voltage = mc13xxx_fixed_regulator_set_voltage, - .get_voltage = mc13xxx_fixed_regulator_get_voltage, }; static int __devinit mc13783_regulator_probe(struct platform_device *pdev) diff --git a/trunk/drivers/regulator/mc13892-regulator.c b/trunk/drivers/regulator/mc13892-regulator.c index b388b746452e..1fa63812f7ac 100644 --- a/trunk/drivers/regulator/mc13892-regulator.c +++ b/trunk/drivers/regulator/mc13892-regulator.c @@ -305,9 +305,10 @@ static int mc13892_powermisc_rmw(struct mc13xxx_regulator_priv *priv, u32 mask, BUG_ON(val & ~mask); + mc13xxx_lock(priv->mc13xxx); ret = mc13xxx_reg_read(mc13892, MC13892_POWERMISC, &valread); if (ret) - return ret; + goto out; /* Update the stored state for Power Gates. */ priv->powermisc_pwgt_state = @@ -320,14 +321,16 @@ static int mc13892_powermisc_rmw(struct mc13xxx_regulator_priv *priv, u32 mask, valread = (valread & ~MC13892_POWERMISC_PWGTSPI_M) | priv->powermisc_pwgt_state; - return mc13xxx_reg_write(mc13892, MC13892_POWERMISC, valread); + ret = mc13xxx_reg_write(mc13892, MC13892_POWERMISC, valread); +out: + mc13xxx_unlock(priv->mc13xxx); + return ret; } static int mc13892_gpo_regulator_enable(struct regulator_dev *rdev) { struct mc13xxx_regulator_priv *priv = rdev_get_drvdata(rdev); int id = rdev_get_id(rdev); - int ret; u32 en_val = mc13892_regulators[id].enable_bit; u32 mask = mc13892_regulators[id].enable_bit; @@ -340,18 +343,13 @@ static int mc13892_gpo_regulator_enable(struct regulator_dev *rdev) if (id == MC13892_GPO4) mask |= MC13892_POWERMISC_GPO4ADINEN; - mc13xxx_lock(priv->mc13xxx); - ret = mc13892_powermisc_rmw(priv, mask, en_val); - mc13xxx_unlock(priv->mc13xxx); - - return ret; + return mc13892_powermisc_rmw(priv, mask, en_val); } static int mc13892_gpo_regulator_disable(struct regulator_dev *rdev) { struct mc13xxx_regulator_priv *priv = rdev_get_drvdata(rdev); int id = rdev_get_id(rdev); - int ret; u32 dis_val = 0; dev_dbg(rdev_get_dev(rdev), "%s id: %d\n", __func__, id); @@ -360,12 +358,8 @@ static int mc13892_gpo_regulator_disable(struct regulator_dev *rdev) if (id == MC13892_PWGT1SPI || id == MC13892_PWGT2SPI) dis_val = mc13892_regulators[id].enable_bit; - mc13xxx_lock(priv->mc13xxx); - ret = mc13892_powermisc_rmw(priv, mc13892_regulators[id].enable_bit, + return mc13892_powermisc_rmw(priv, mc13892_regulators[id].enable_bit, dis_val); - mc13xxx_unlock(priv->mc13xxx); - - return ret; } static int mc13892_gpo_regulator_is_enabled(struct regulator_dev *rdev) @@ -396,14 +390,13 @@ static struct regulator_ops mc13892_gpo_regulator_ops = { .is_enabled = mc13892_gpo_regulator_is_enabled, .list_voltage = regulator_list_voltage_table, .set_voltage = mc13xxx_fixed_regulator_set_voltage, - .get_voltage = mc13xxx_fixed_regulator_get_voltage, }; -static int mc13892_sw_regulator_get_voltage(struct regulator_dev *rdev) +static int mc13892_sw_regulator_get_voltage_sel(struct regulator_dev *rdev) { struct mc13xxx_regulator_priv *priv = rdev_get_drvdata(rdev); int ret, id = rdev_get_id(rdev); - unsigned int val, hi; + unsigned int val; dev_dbg(rdev_get_dev(rdev), "%s id: %d\n", __func__, id); @@ -414,17 +407,11 @@ static int mc13892_sw_regulator_get_voltage(struct regulator_dev *rdev) if (ret) return ret; - hi = val & MC13892_SWITCHERS0_SWxHI; val = (val & mc13892_regulators[id].vsel_mask) >> mc13892_regulators[id].vsel_shift; dev_dbg(rdev_get_dev(rdev), "%s id: %d val: %d\n", __func__, id, val); - if (hi) - val = (25000 * val) + 1100000; - else - val = (25000 * val) + 600000; - return val; } @@ -432,37 +419,25 @@ static int mc13892_sw_regulator_set_voltage_sel(struct regulator_dev *rdev, unsigned selector) { struct mc13xxx_regulator_priv *priv = rdev_get_drvdata(rdev); - int hi, value, mask, id = rdev_get_id(rdev); - u32 valread; + int volt, mask, id = rdev_get_id(rdev); + u32 reg_value; int ret; - value = rdev->desc->volt_table[selector]; + volt = rdev->desc->volt_table[selector]; + mask = mc13892_regulators[id].vsel_mask; + reg_value = selector << mc13892_regulators[id].vsel_shift; + + if (volt > 1375000) { + mask |= MC13892_SWITCHERS0_SWxHI; + reg_value |= MC13892_SWITCHERS0_SWxHI; + } else if (volt < 1100000) { + mask |= MC13892_SWITCHERS0_SWxHI; + reg_value &= ~MC13892_SWITCHERS0_SWxHI; + } mc13xxx_lock(priv->mc13xxx); - ret = mc13xxx_reg_read(priv->mc13xxx, - mc13892_regulators[id].vsel_reg, &valread); - if (ret) - goto err; - - if (value > 1375000) - hi = 1; - else if (value < 1100000) - hi = 0; - else - hi = valread & MC13892_SWITCHERS0_SWxHI; - - if (hi) { - value = (value - 1100000) / 25000; - value |= MC13892_SWITCHERS0_SWxHI; - } else - value = (value - 600000) / 25000; - - mask = mc13892_regulators[id].vsel_mask | MC13892_SWITCHERS0_SWxHI; - valread = (valread & ~mask) | - (value << mc13892_regulators[id].vsel_shift); - ret = mc13xxx_reg_write(priv->mc13xxx, mc13892_regulators[id].vsel_reg, - valread); -err: + ret = mc13xxx_reg_rmw(priv->mc13xxx, mc13892_regulators[id].reg, mask, + reg_value); mc13xxx_unlock(priv->mc13xxx); return ret; @@ -471,7 +446,7 @@ static int mc13892_sw_regulator_set_voltage_sel(struct regulator_dev *rdev, static struct regulator_ops mc13892_sw_regulator_ops = { .list_voltage = regulator_list_voltage_table, .set_voltage_sel = mc13892_sw_regulator_set_voltage_sel, - .get_voltage = mc13892_sw_regulator_get_voltage, + .get_voltage_sel = mc13892_sw_regulator_get_voltage_sel, }; static int mc13892_vcam_set_mode(struct regulator_dev *rdev, unsigned int mode) diff --git a/trunk/drivers/regulator/mc13xxx-regulator-core.c b/trunk/drivers/regulator/mc13xxx-regulator-core.c index d6eda28ca5d0..88cbb832d555 100644 --- a/trunk/drivers/regulator/mc13xxx-regulator-core.c +++ b/trunk/drivers/regulator/mc13xxx-regulator-core.c @@ -143,30 +143,21 @@ int mc13xxx_fixed_regulator_set_voltage(struct regulator_dev *rdev, int min_uV, __func__, id, min_uV, max_uV); if (min_uV <= rdev->desc->volt_table[0] && - rdev->desc->volt_table[0] <= max_uV) + rdev->desc->volt_table[0] <= max_uV) { + *selector = 0; return 0; - else + } else { return -EINVAL; + } } EXPORT_SYMBOL_GPL(mc13xxx_fixed_regulator_set_voltage); -int mc13xxx_fixed_regulator_get_voltage(struct regulator_dev *rdev) -{ - int id = rdev_get_id(rdev); - - dev_dbg(rdev_get_dev(rdev), "%s id: %d\n", __func__, id); - - return rdev->desc->volt_table[0]; -} -EXPORT_SYMBOL_GPL(mc13xxx_fixed_regulator_get_voltage); - struct regulator_ops mc13xxx_fixed_regulator_ops = { .enable = mc13xxx_regulator_enable, .disable = mc13xxx_regulator_disable, .is_enabled = mc13xxx_regulator_is_enabled, .list_voltage = regulator_list_voltage_table, .set_voltage = mc13xxx_fixed_regulator_set_voltage, - .get_voltage = mc13xxx_fixed_regulator_get_voltage, }; EXPORT_SYMBOL_GPL(mc13xxx_fixed_regulator_ops); diff --git a/trunk/drivers/regulator/mc13xxx.h b/trunk/drivers/regulator/mc13xxx.h index eaff5510b6df..06c8903f182a 100644 --- a/trunk/drivers/regulator/mc13xxx.h +++ b/trunk/drivers/regulator/mc13xxx.h @@ -34,7 +34,6 @@ struct mc13xxx_regulator_priv { extern int mc13xxx_fixed_regulator_set_voltage(struct regulator_dev *rdev, int min_uV, int max_uV, unsigned *selector); -extern int mc13xxx_fixed_regulator_get_voltage(struct regulator_dev *rdev); #ifdef CONFIG_OF extern int mc13xxx_get_num_regulators_dt(struct platform_device *pdev); diff --git a/trunk/drivers/regulator/of_regulator.c b/trunk/drivers/regulator/of_regulator.c index 3e4106f2bda9..6f684916fd79 100644 --- a/trunk/drivers/regulator/of_regulator.c +++ b/trunk/drivers/regulator/of_regulator.c @@ -92,16 +92,18 @@ struct regulator_init_data *of_get_regulator_init_data(struct device *dev, EXPORT_SYMBOL_GPL(of_get_regulator_init_data); /** - * of_regulator_match - extract regulator init data when node - * property "regulator-compatible" matches with the regulator name. + * of_regulator_match - extract multiple regulator init data from device tree. * @dev: device requesting the data * @node: parent device node of the regulators * @matches: match table for the regulators * @num_matches: number of entries in match table * - * This function uses a match table specified by the regulator driver and - * looks up the corresponding init data in the device tree if - * regulator-compatible matches. Note that the match table is modified + * This function uses a match table specified by the regulator driver to + * parse regulator init data from the device tree. @node is expected to + * contain a set of child nodes, each providing the init data for one + * regulator. The data parsed from a child node will be matched to a regulator + * based on either the deprecated property regulator-compatible if present, + * or otherwise the child node's name. Note that the match table is modified * in place. * * Returns the number of matches found or a negative error code on failure. @@ -112,26 +114,23 @@ int of_regulator_match(struct device *dev, struct device_node *node, { unsigned int count = 0; unsigned int i; - const char *regulator_comp; + const char *name; struct device_node *child; if (!dev || !node) return -EINVAL; for_each_child_of_node(node, child) { - regulator_comp = of_get_property(child, + name = of_get_property(child, "regulator-compatible", NULL); - if (!regulator_comp) { - dev_err(dev, "regulator-compatible is missing for node %s\n", - child->name); - continue; - } + if (!name) + name = child->name; for (i = 0; i < num_matches; i++) { struct of_regulator_match *match = &matches[i]; if (match->of_node) continue; - if (strcmp(match->name, regulator_comp)) + if (strcmp(match->name, name)) continue; match->init_data = diff --git a/trunk/drivers/regulator/palmas-regulator.c b/trunk/drivers/regulator/palmas-regulator.c index 17d19fbbc490..2ba7502fa3b2 100644 --- a/trunk/drivers/regulator/palmas-regulator.c +++ b/trunk/drivers/regulator/palmas-regulator.c @@ -443,52 +443,17 @@ static int palmas_list_voltage_ldo(struct regulator_dev *dev, return 850000 + (selector * 50000); } -static int palmas_get_voltage_ldo_sel(struct regulator_dev *dev) -{ - struct palmas_pmic *pmic = rdev_get_drvdata(dev); - int id = rdev_get_id(dev); - int selector; - unsigned int reg; - unsigned int addr; - - addr = palmas_regs_info[id].vsel_addr; - - palmas_ldo_read(pmic->palmas, addr, ®); - - selector = reg & PALMAS_LDO1_VOLTAGE_VSEL_MASK; - - /* Adjust selector to match list_voltage ranges */ - if (selector > 49) - selector = 49; - - return selector; -} - -static int palmas_set_voltage_ldo_sel(struct regulator_dev *dev, - unsigned selector) -{ - struct palmas_pmic *pmic = rdev_get_drvdata(dev); - int id = rdev_get_id(dev); - unsigned int reg = 0; - unsigned int addr; - - addr = palmas_regs_info[id].vsel_addr; - - reg = selector; - - palmas_ldo_write(pmic->palmas, addr, reg); - - return 0; -} - static int palmas_map_voltage_ldo(struct regulator_dev *rdev, int min_uV, int max_uV) { int ret, voltage; - ret = ((min_uV - 900000) / 50000) + 1; - if (ret < 0) - return ret; + if (min_uV == 0) + return 0; + + if (min_uV < 900000) + min_uV = 900000; + ret = DIV_ROUND_UP(min_uV - 900000, 50000) + 1; /* Map back into a voltage to verify we're still in bounds */ voltage = palmas_list_voltage_ldo(rdev, ret); @@ -502,8 +467,8 @@ static struct regulator_ops palmas_ops_ldo = { .is_enabled = palmas_is_enabled_ldo, .enable = regulator_enable_regmap, .disable = regulator_disable_regmap, - .get_voltage_sel = palmas_get_voltage_ldo_sel, - .set_voltage_sel = palmas_set_voltage_ldo_sel, + .get_voltage_sel = regulator_get_voltage_sel_regmap, + .set_voltage_sel = regulator_set_voltage_sel_regmap, .list_voltage = palmas_list_voltage_ldo, .map_voltage = palmas_map_voltage_ldo, }; @@ -586,7 +551,7 @@ static int palmas_ldo_init(struct palmas *palmas, int id, addr = palmas_regs_info[id].ctrl_addr; - ret = palmas_smps_read(palmas, addr, ®); + ret = palmas_ldo_read(palmas, addr, ®); if (ret) return ret; @@ -596,7 +561,7 @@ static int palmas_ldo_init(struct palmas *palmas, int id, if (reg_init->mode_sleep) reg |= PALMAS_LDO1_CTRL_MODE_SLEEP; - ret = palmas_smps_write(palmas, addr, reg); + ret = palmas_ldo_write(palmas, addr, reg); if (ret) return ret; @@ -630,7 +595,7 @@ static __devinit int palmas_probe(struct platform_device *pdev) ret = palmas_smps_read(palmas, PALMAS_SMPS_CTRL, ®); if (ret) - goto err_unregister_regulator; + return ret; if (reg & PALMAS_SMPS_CTRL_SMPS12_SMPS123_EN) pmic->smps123 = 1; @@ -676,7 +641,9 @@ static __devinit int palmas_probe(struct platform_device *pdev) case PALMAS_REG_SMPS10: pmic->desc[id].n_voltages = PALMAS_SMPS10_NUM_VOLTAGES; pmic->desc[id].ops = &palmas_ops_smps10; - pmic->desc[id].vsel_reg = PALMAS_SMPS10_CTRL; + pmic->desc[id].vsel_reg = + PALMAS_BASE_TO_REG(PALMAS_SMPS_BASE, + PALMAS_SMPS10_CTRL); pmic->desc[id].vsel_mask = SMPS10_VSEL; pmic->desc[id].enable_reg = PALMAS_BASE_TO_REG(PALMAS_SMPS_BASE, @@ -752,6 +719,9 @@ static __devinit int palmas_probe(struct platform_device *pdev) pmic->desc[id].type = REGULATOR_VOLTAGE; pmic->desc[id].owner = THIS_MODULE; + pmic->desc[id].vsel_reg = PALMAS_BASE_TO_REG(PALMAS_LDO_BASE, + palmas_regs_info[id].vsel_addr); + pmic->desc[id].vsel_mask = PALMAS_LDO1_VOLTAGE_VSEL_MASK; pmic->desc[id].enable_reg = PALMAS_BASE_TO_REG(PALMAS_LDO_BASE, palmas_regs_info[id].ctrl_addr); pmic->desc[id].enable_mask = PALMAS_LDO1_CTRL_MODE_ACTIVE; @@ -778,8 +748,10 @@ static __devinit int palmas_probe(struct platform_device *pdev) reg_init = pdata->reg_init[id]; if (reg_init) { ret = palmas_ldo_init(palmas, id, reg_init); - if (ret) + if (ret) { + regulator_unregister(pmic->rdev[id]); goto err_unregister_regulator; + } } } } diff --git a/trunk/drivers/regulator/s2mps11.c b/trunk/drivers/regulator/s2mps11.c index 4669dc9ac74a..926f9c8f2fac 100644 --- a/trunk/drivers/regulator/s2mps11.c +++ b/trunk/drivers/regulator/s2mps11.c @@ -24,7 +24,7 @@ #include struct s2mps11_info { - struct regulator_dev **rdev; + struct regulator_dev *rdev[S2MPS11_REGULATOR_MAX]; int ramp_delay2; int ramp_delay34; @@ -236,9 +236,8 @@ static __devinit int s2mps11_pmic_probe(struct platform_device *pdev) struct sec_pmic_dev *iodev = dev_get_drvdata(pdev->dev.parent); struct sec_platform_data *pdata = dev_get_platdata(iodev->dev); struct regulator_config config = { }; - struct regulator_dev **rdev; struct s2mps11_info *s2mps11; - int i, ret, size; + int i, ret; unsigned char ramp_enable, ramp_reg = 0; if (!pdata) { @@ -251,13 +250,6 @@ static __devinit int s2mps11_pmic_probe(struct platform_device *pdev) if (!s2mps11) return -ENOMEM; - size = sizeof(struct regulator_dev *) * S2MPS11_REGULATOR_MAX; - s2mps11->rdev = devm_kzalloc(&pdev->dev, size, GFP_KERNEL); - if (!s2mps11->rdev) { - return -ENOMEM; - } - - rdev = s2mps11->rdev; platform_set_drvdata(pdev, s2mps11); s2mps11->ramp_delay2 = pdata->buck2_ramp_delay; @@ -297,12 +289,12 @@ static __devinit int s2mps11_pmic_probe(struct platform_device *pdev) config.init_data = pdata->regulators[i].initdata; config.driver_data = s2mps11; - rdev[i] = regulator_register(®ulators[i], &config); - if (IS_ERR(rdev[i])) { - ret = PTR_ERR(rdev[i]); + s2mps11->rdev[i] = regulator_register(®ulators[i], &config); + if (IS_ERR(s2mps11->rdev[i])) { + ret = PTR_ERR(s2mps11->rdev[i]); dev_err(&pdev->dev, "regulator init failed for %d\n", i); - rdev[i] = NULL; + s2mps11->rdev[i] = NULL; goto err; } } @@ -310,8 +302,7 @@ static __devinit int s2mps11_pmic_probe(struct platform_device *pdev) return 0; err: for (i = 0; i < S2MPS11_REGULATOR_MAX; i++) - if (rdev[i]) - regulator_unregister(rdev[i]); + regulator_unregister(s2mps11->rdev[i]); return ret; } @@ -319,12 +310,10 @@ static __devinit int s2mps11_pmic_probe(struct platform_device *pdev) static int __devexit s2mps11_pmic_remove(struct platform_device *pdev) { struct s2mps11_info *s2mps11 = platform_get_drvdata(pdev); - struct regulator_dev **rdev = s2mps11->rdev; int i; for (i = 0; i < S2MPS11_REGULATOR_MAX; i++) - if (rdev[i]) - regulator_unregister(rdev[i]); + regulator_unregister(s2mps11->rdev[i]); return 0; } diff --git a/trunk/drivers/regulator/tps65217-regulator.c b/trunk/drivers/regulator/tps65217-regulator.c index 6caa222af77a..ab00cab905b7 100644 --- a/trunk/drivers/regulator/tps65217-regulator.c +++ b/trunk/drivers/regulator/tps65217-regulator.c @@ -22,6 +22,7 @@ #include #include +#include #include #include #include @@ -281,37 +282,130 @@ static const struct regulator_desc regulators[] = { NULL), }; +#ifdef CONFIG_OF +static struct of_regulator_match reg_matches[] = { + { .name = "dcdc1", .driver_data = (void *)TPS65217_DCDC_1 }, + { .name = "dcdc2", .driver_data = (void *)TPS65217_DCDC_2 }, + { .name = "dcdc3", .driver_data = (void *)TPS65217_DCDC_3 }, + { .name = "ldo1", .driver_data = (void *)TPS65217_LDO_1 }, + { .name = "ldo2", .driver_data = (void *)TPS65217_LDO_2 }, + { .name = "ldo3", .driver_data = (void *)TPS65217_LDO_3 }, + { .name = "ldo4", .driver_data = (void *)TPS65217_LDO_4 }, +}; + +static struct tps65217_board *tps65217_parse_dt(struct platform_device *pdev) +{ + struct tps65217 *tps = dev_get_drvdata(pdev->dev.parent); + struct device_node *node = tps->dev->of_node; + struct tps65217_board *pdata; + struct device_node *regs; + int i, count; + + regs = of_find_node_by_name(node, "regulators"); + if (!regs) + return NULL; + + count = of_regulator_match(pdev->dev.parent, regs, + reg_matches, TPS65217_NUM_REGULATOR); + of_node_put(regs); + if ((count < 0) || (count > TPS65217_NUM_REGULATOR)) + return NULL; + + pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL); + if (!pdata) + return NULL; + + for (i = 0; i < count; i++) { + if (!reg_matches[i].init_data || !reg_matches[i].of_node) + continue; + + pdata->tps65217_init_data[i] = reg_matches[i].init_data; + pdata->of_node[i] = reg_matches[i].of_node; + } + + return pdata; +} +#else +static struct tps65217_board *tps65217_parse_dt(struct platform_device *pdev) +{ + return NULL; +} +#endif + static int __devinit tps65217_regulator_probe(struct platform_device *pdev) { + struct tps65217 *tps = dev_get_drvdata(pdev->dev.parent); + struct tps65217_board *pdata = dev_get_platdata(tps->dev); + struct regulator_init_data *reg_data; struct regulator_dev *rdev; - struct tps65217 *tps; - struct tps_info *info = &tps65217_pmic_regs[pdev->id]; struct regulator_config config = { }; + int i, ret; - /* Already set by core driver */ - tps = dev_to_tps65217(pdev->dev.parent); - tps->info[pdev->id] = info; + if (tps->dev->of_node) + pdata = tps65217_parse_dt(pdev); - config.dev = &pdev->dev; - config.of_node = pdev->dev.of_node; - config.init_data = pdev->dev.platform_data; - config.driver_data = tps; + if (!pdata) { + dev_err(&pdev->dev, "Platform data not found\n"); + return -EINVAL; + } - rdev = regulator_register(®ulators[pdev->id], &config); - if (IS_ERR(rdev)) - return PTR_ERR(rdev); + if (tps65217_chip_id(tps) != TPS65217) { + dev_err(&pdev->dev, "Invalid tps chip version\n"); + return -ENODEV; + } - platform_set_drvdata(pdev, rdev); + platform_set_drvdata(pdev, tps); + for (i = 0; i < TPS65217_NUM_REGULATOR; i++) { + + reg_data = pdata->tps65217_init_data[i]; + + /* + * Regulator API handles empty constraints but not NULL + * constraints + */ + if (!reg_data) + continue; + + /* Register the regulators */ + tps->info[i] = &tps65217_pmic_regs[i]; + + config.dev = tps->dev; + config.init_data = reg_data; + config.driver_data = tps; + config.regmap = tps->regmap; + if (tps->dev->of_node) + config.of_node = pdata->of_node[i]; + + rdev = regulator_register(®ulators[i], &config); + if (IS_ERR(rdev)) { + dev_err(tps->dev, "failed to register %s regulator\n", + pdev->name); + ret = PTR_ERR(rdev); + goto err_unregister_regulator; + } + + /* Save regulator for cleanup */ + tps->rdev[i] = rdev; + } return 0; + +err_unregister_regulator: + while (--i >= 0) + regulator_unregister(tps->rdev[i]); + + return ret; } static int __devexit tps65217_regulator_remove(struct platform_device *pdev) { - struct regulator_dev *rdev = platform_get_drvdata(pdev); + struct tps65217 *tps = platform_get_drvdata(pdev); + unsigned int i; + + for (i = 0; i < TPS65217_NUM_REGULATOR; i++) + regulator_unregister(tps->rdev[i]); platform_set_drvdata(pdev, NULL); - regulator_unregister(rdev); return 0; } diff --git a/trunk/drivers/regulator/tps6524x-regulator.c b/trunk/drivers/regulator/tps6524x-regulator.c index 947ece933d90..058d2f2675e9 100644 --- a/trunk/drivers/regulator/tps6524x-regulator.c +++ b/trunk/drivers/regulator/tps6524x-regulator.c @@ -502,15 +502,13 @@ static int set_current_limit(struct regulator_dev *rdev, int min_uA, if (info->n_ilimsels == 1) return -EINVAL; - for (i = 0; i < info->n_ilimsels; i++) + for (i = info->n_ilimsels - 1; i >= 0; i--) { if (min_uA <= info->ilimsels[i] && max_uA >= info->ilimsels[i]) - break; - - if (i >= info->n_ilimsels) - return -EINVAL; + return write_field(hw, &info->ilimsel, i); + } - return write_field(hw, &info->ilimsel, i); + return -EINVAL; } static int get_current_limit(struct regulator_dev *rdev) diff --git a/trunk/drivers/regulator/tps6586x-regulator.c b/trunk/drivers/regulator/tps6586x-regulator.c index e6da90ab5153..ce1e7cb8d513 100644 --- a/trunk/drivers/regulator/tps6586x-regulator.c +++ b/trunk/drivers/regulator/tps6586x-regulator.c @@ -57,9 +57,6 @@ struct tps6586x_regulator { struct regulator_desc desc; - int volt_reg; - int volt_shift; - int volt_nbits; int enable_bit[2]; int enable_reg[2]; @@ -81,10 +78,10 @@ static int tps6586x_set_voltage_sel(struct regulator_dev *rdev, int ret, val, rid = rdev_get_id(rdev); uint8_t mask; - val = selector << ri->volt_shift; - mask = ((1 << ri->volt_nbits) - 1) << ri->volt_shift; + val = selector << (ffs(rdev->desc->vsel_mask) - 1); + mask = rdev->desc->vsel_mask; - ret = tps6586x_update(parent, ri->volt_reg, val, mask); + ret = tps6586x_update(parent, rdev->desc->vsel_reg, val, mask); if (ret) return ret; @@ -100,66 +97,17 @@ static int tps6586x_set_voltage_sel(struct regulator_dev *rdev, return ret; } -static int tps6586x_get_voltage_sel(struct regulator_dev *rdev) -{ - struct tps6586x_regulator *ri = rdev_get_drvdata(rdev); - struct device *parent = to_tps6586x_dev(rdev); - uint8_t val, mask; - int ret; - - ret = tps6586x_read(parent, ri->volt_reg, &val); - if (ret) - return ret; - - mask = ((1 << ri->volt_nbits) - 1) << ri->volt_shift; - val = (val & mask) >> ri->volt_shift; - - if (val >= ri->desc.n_voltages) - BUG(); - - return val; -} - -static int tps6586x_regulator_enable(struct regulator_dev *rdev) -{ - struct tps6586x_regulator *ri = rdev_get_drvdata(rdev); - struct device *parent = to_tps6586x_dev(rdev); - - return tps6586x_set_bits(parent, ri->enable_reg[0], - 1 << ri->enable_bit[0]); -} - -static int tps6586x_regulator_disable(struct regulator_dev *rdev) -{ - struct tps6586x_regulator *ri = rdev_get_drvdata(rdev); - struct device *parent = to_tps6586x_dev(rdev); - - return tps6586x_clr_bits(parent, ri->enable_reg[0], - 1 << ri->enable_bit[0]); -} - -static int tps6586x_regulator_is_enabled(struct regulator_dev *rdev) -{ - struct tps6586x_regulator *ri = rdev_get_drvdata(rdev); - struct device *parent = to_tps6586x_dev(rdev); - uint8_t reg_val; - int ret; - - ret = tps6586x_read(parent, ri->enable_reg[0], ®_val); - if (ret) - return ret; - - return !!(reg_val & (1 << ri->enable_bit[0])); -} - static struct regulator_ops tps6586x_regulator_ops = { .list_voltage = regulator_list_voltage_table, - .get_voltage_sel = tps6586x_get_voltage_sel, + .get_voltage_sel = regulator_get_voltage_sel_regmap, .set_voltage_sel = tps6586x_set_voltage_sel, - .is_enabled = tps6586x_regulator_is_enabled, - .enable = tps6586x_regulator_enable, - .disable = tps6586x_regulator_disable, + .is_enabled = regulator_is_enabled_regmap, + .enable = regulator_enable_regmap, + .disable = regulator_disable_regmap, +}; + +static struct regulator_ops tps6586x_sys_regulator_ops = { }; static const unsigned int tps6586x_ldo0_voltages[] = { @@ -202,10 +150,11 @@ static const unsigned int tps6586x_dvm_voltages[] = { .n_voltages = ARRAY_SIZE(tps6586x_##vdata##_voltages), \ .volt_table = tps6586x_##vdata##_voltages, \ .owner = THIS_MODULE, \ + .enable_reg = TPS6586X_SUPPLY##ereg0, \ + .enable_mask = 1 << (ebit0), \ + .vsel_reg = TPS6586X_##vreg, \ + .vsel_mask = ((1 << (nbits)) - 1) << (shift), \ }, \ - .volt_reg = TPS6586X_##vreg, \ - .volt_shift = (shift), \ - .volt_nbits = (nbits), \ .enable_reg[0] = TPS6586X_SUPPLY##ereg0, \ .enable_bit[0] = (ebit0), \ .enable_reg[1] = TPS6586X_SUPPLY##ereg1, \ @@ -230,24 +179,39 @@ static const unsigned int tps6586x_dvm_voltages[] = { TPS6586X_REGULATOR_DVM_GOREG(goreg, gobit) \ } +#define TPS6586X_SYS_REGULATOR() \ +{ \ + .desc = { \ + .supply_name = "sys", \ + .name = "REG-SYS", \ + .ops = &tps6586x_sys_regulator_ops, \ + .type = REGULATOR_VOLTAGE, \ + .id = TPS6586X_ID_SYS, \ + .owner = THIS_MODULE, \ + }, \ +} + static struct tps6586x_regulator tps6586x_regulator[] = { + TPS6586X_SYS_REGULATOR(), TPS6586X_LDO(LDO_0, "vinldo01", ldo0, SUPPLYV1, 5, 3, ENC, 0, END, 0), TPS6586X_LDO(LDO_3, "vinldo23", ldo, SUPPLYV4, 0, 3, ENC, 2, END, 2), - TPS6586X_LDO(LDO_5, NULL, ldo, SUPPLYV6, 0, 3, ENE, 6, ENE, 6), + TPS6586X_LDO(LDO_5, "REG-SYS", ldo, SUPPLYV6, 0, 3, ENE, 6, ENE, 6), TPS6586X_LDO(LDO_6, "vinldo678", ldo, SUPPLYV3, 0, 3, ENC, 4, END, 4), TPS6586X_LDO(LDO_7, "vinldo678", ldo, SUPPLYV3, 3, 3, ENC, 5, END, 5), TPS6586X_LDO(LDO_8, "vinldo678", ldo, SUPPLYV2, 5, 3, ENC, 6, END, 6), TPS6586X_LDO(LDO_9, "vinldo9", ldo, SUPPLYV6, 3, 3, ENE, 7, ENE, 7), - TPS6586X_LDO(LDO_RTC, NULL, ldo, SUPPLYV4, 3, 3, V4, 7, V4, 7), + TPS6586X_LDO(LDO_RTC, "REG-SYS", ldo, SUPPLYV4, 3, 3, V4, 7, V4, 7), TPS6586X_LDO(LDO_1, "vinldo01", dvm, SUPPLYV1, 0, 5, ENC, 1, END, 1), - TPS6586X_LDO(SM_2, "sm2", sm2, SUPPLYV2, 0, 5, ENC, 7, END, 7), + TPS6586X_LDO(SM_2, "vin-sm2", sm2, SUPPLYV2, 0, 5, ENC, 7, END, 7), TPS6586X_DVM(LDO_2, "vinldo23", dvm, LDO2BV1, 0, 5, ENA, 3, ENB, 3, VCC2, 6), TPS6586X_DVM(LDO_4, "vinldo4", ldo4, LDO4V1, 0, 5, ENC, 3, END, 3, VCC1, 6), - TPS6586X_DVM(SM_0, "sm0", dvm, SM0V1, 0, 5, ENA, 1, ENB, 1, VCC1, 2), - TPS6586X_DVM(SM_1, "sm1", dvm, SM1V1, 0, 5, ENA, 0, ENB, 0, VCC1, 0), + TPS6586X_DVM(SM_0, "vin-sm0", dvm, SM0V1, 0, 5, ENA, 1, + ENB, 1, VCC1, 2), + TPS6586X_DVM(SM_1, "vin-sm1", dvm, SM1V1, 0, 5, ENA, 0, + ENB, 0, VCC1, 0), }; /* diff --git a/trunk/drivers/regulator/twl-regulator.c b/trunk/drivers/regulator/twl-regulator.c index 242fe90dc565..7eb986a40746 100644 --- a/trunk/drivers/regulator/twl-regulator.c +++ b/trunk/drivers/regulator/twl-regulator.c @@ -10,6 +10,8 @@ */ #include +#include +#include #include #include #include @@ -624,18 +626,9 @@ static int twlfixed_list_voltage(struct regulator_dev *rdev, unsigned index) return info->min_mV * 1000; } -static int twlfixed_get_voltage(struct regulator_dev *rdev) -{ - struct twlreg_info *info = rdev_get_drvdata(rdev); - - return info->min_mV * 1000; -} - static struct regulator_ops twl4030fixed_ops = { .list_voltage = twlfixed_list_voltage, - .get_voltage = twlfixed_get_voltage, - .enable = twl4030reg_enable, .disable = twl4030reg_disable, .is_enabled = twl4030reg_is_enabled, @@ -648,8 +641,6 @@ static struct regulator_ops twl4030fixed_ops = { static struct regulator_ops twl6030fixed_ops = { .list_voltage = twlfixed_list_voltage, - .get_voltage = twlfixed_get_voltage, - .enable = twl6030reg_enable, .disable = twl6030reg_disable, .is_enabled = twl6030reg_is_enabled, @@ -659,13 +650,6 @@ static struct regulator_ops twl6030fixed_ops = { .get_status = twl6030reg_get_status, }; -static struct regulator_ops twl6030_fixed_resource = { - .enable = twl6030reg_enable, - .disable = twl6030reg_disable, - .is_enabled = twl6030reg_is_enabled, - .get_status = twl6030reg_get_status, -}; - /* * SMPS status and control */ @@ -757,37 +741,32 @@ static int twl6030smps_list_voltage(struct regulator_dev *rdev, unsigned index) return voltage; } -static int -twl6030smps_set_voltage(struct regulator_dev *rdev, int min_uV, int max_uV, - unsigned int *selector) +static int twl6030smps_map_voltage(struct regulator_dev *rdev, int min_uV, + int max_uV) { - struct twlreg_info *info = rdev_get_drvdata(rdev); - int vsel = 0; + struct twlreg_info *info = rdev_get_drvdata(rdev); + int vsel = 0; switch (info->flags) { case 0: if (min_uV == 0) vsel = 0; else if ((min_uV >= 600000) && (min_uV <= 1300000)) { - int calc_uV; vsel = DIV_ROUND_UP(min_uV - 600000, 12500); vsel++; - calc_uV = twl6030smps_list_voltage(rdev, vsel); - if (calc_uV > max_uV) - return -EINVAL; } /* Values 1..57 for vsel are linear and can be calculated * values 58..62 are non linear. */ - else if ((min_uV > 1900000) && (max_uV >= 2100000)) + else if ((min_uV > 1900000) && (min_uV <= 2100000)) vsel = 62; - else if ((min_uV > 1800000) && (max_uV >= 1900000)) + else if ((min_uV > 1800000) && (min_uV <= 1900000)) vsel = 61; - else if ((min_uV > 1500000) && (max_uV >= 1800000)) + else if ((min_uV > 1500000) && (min_uV <= 1800000)) vsel = 60; - else if ((min_uV > 1350000) && (max_uV >= 1500000)) + else if ((min_uV > 1350000) && (min_uV <= 1500000)) vsel = 59; - else if ((min_uV > 1300000) && (max_uV >= 1350000)) + else if ((min_uV > 1300000) && (min_uV <= 1350000)) vsel = 58; else return -EINVAL; @@ -796,25 +775,21 @@ twl6030smps_set_voltage(struct regulator_dev *rdev, int min_uV, int max_uV, if (min_uV == 0) vsel = 0; else if ((min_uV >= 700000) && (min_uV <= 1420000)) { - int calc_uV; vsel = DIV_ROUND_UP(min_uV - 700000, 12500); vsel++; - calc_uV = twl6030smps_list_voltage(rdev, vsel); - if (calc_uV > max_uV) - return -EINVAL; } /* Values 1..57 for vsel are linear and can be calculated * values 58..62 are non linear. */ - else if ((min_uV > 1900000) && (max_uV >= 2100000)) + else if ((min_uV > 1900000) && (min_uV <= 2100000)) vsel = 62; - else if ((min_uV > 1800000) && (max_uV >= 1900000)) + else if ((min_uV > 1800000) && (min_uV <= 1900000)) vsel = 61; - else if ((min_uV > 1350000) && (max_uV >= 1800000)) + else if ((min_uV > 1350000) && (min_uV <= 1800000)) vsel = 60; - else if ((min_uV > 1350000) && (max_uV >= 1500000)) + else if ((min_uV > 1350000) && (min_uV <= 1500000)) vsel = 59; - else if ((min_uV > 1300000) && (max_uV >= 1350000)) + else if ((min_uV > 1300000) && (min_uV <= 1350000)) vsel = 58; else return -EINVAL; @@ -830,17 +805,23 @@ twl6030smps_set_voltage(struct regulator_dev *rdev, int min_uV, int max_uV, case SMPS_OFFSET_EN|SMPS_EXTENDED_EN: if (min_uV == 0) { vsel = 0; - } else if ((min_uV >= 2161000) && (max_uV <= 4321000)) { + } else if ((min_uV >= 2161000) && (min_uV <= 4321000)) { vsel = DIV_ROUND_UP(min_uV - 2161000, 38600); vsel++; } break; } - *selector = vsel; + return vsel; +} + +static int twl6030smps_set_voltage_sel(struct regulator_dev *rdev, + unsigned int selector) +{ + struct twlreg_info *info = rdev_get_drvdata(rdev); return twlreg_write(info, TWL_MODULE_PM_RECEIVER, VREG_VOLTAGE_SMPS, - vsel); + selector); } static int twl6030smps_get_voltage_sel(struct regulator_dev *rdev) @@ -852,8 +833,9 @@ static int twl6030smps_get_voltage_sel(struct regulator_dev *rdev) static struct regulator_ops twlsmps_ops = { .list_voltage = twl6030smps_list_voltage, + .map_voltage = twl6030smps_map_voltage, - .set_voltage = twl6030smps_set_voltage, + .set_voltage_sel = twl6030smps_set_voltage_sel, .get_voltage_sel = twl6030smps_get_voltage_sel, .enable = twl6030reg_enable, @@ -876,7 +858,7 @@ static struct regulator_ops twlsmps_ops = { 0x0, TWL6030, twl6030fixed_ops) #define TWL4030_ADJUSTABLE_LDO(label, offset, num, turnon_delay, remap_conf) \ -static struct twlreg_info TWL4030_INFO_##label = { \ +static const struct twlreg_info TWL4030_INFO_##label = { \ .base = offset, \ .id = num, \ .table_len = ARRAY_SIZE(label##_VSEL_table), \ @@ -894,7 +876,7 @@ static struct twlreg_info TWL4030_INFO_##label = { \ } #define TWL4030_ADJUSTABLE_SMPS(label, offset, num, turnon_delay, remap_conf) \ -static struct twlreg_info TWL4030_INFO_##label = { \ +static const struct twlreg_info TWL4030_INFO_##label = { \ .base = offset, \ .id = num, \ .remap = remap_conf, \ @@ -909,7 +891,7 @@ static struct twlreg_info TWL4030_INFO_##label = { \ } #define TWL6030_ADJUSTABLE_SMPS(label) \ -static struct twlreg_info TWL6030_INFO_##label = { \ +static const struct twlreg_info TWL6030_INFO_##label = { \ .desc = { \ .name = #label, \ .id = TWL6030_REG_##label, \ @@ -920,7 +902,7 @@ static struct twlreg_info TWL6030_INFO_##label = { \ } #define TWL6030_ADJUSTABLE_LDO(label, offset, min_mVolts, max_mVolts) \ -static struct twlreg_info TWL6030_INFO_##label = { \ +static const struct twlreg_info TWL6030_INFO_##label = { \ .base = offset, \ .min_mV = min_mVolts, \ .max_mV = max_mVolts, \ @@ -935,7 +917,7 @@ static struct twlreg_info TWL6030_INFO_##label = { \ } #define TWL6025_ADJUSTABLE_LDO(label, offset, min_mVolts, max_mVolts) \ -static struct twlreg_info TWL6025_INFO_##label = { \ +static const struct twlreg_info TWL6025_INFO_##label = { \ .base = offset, \ .min_mV = min_mVolts, \ .max_mV = max_mVolts, \ @@ -951,7 +933,7 @@ static struct twlreg_info TWL6025_INFO_##label = { \ #define TWL_FIXED_LDO(label, offset, mVolts, num, turnon_delay, remap_conf, \ family, operations) \ -static struct twlreg_info TWLFIXED_INFO_##label = { \ +static const struct twlreg_info TWLFIXED_INFO_##label = { \ .base = offset, \ .id = num, \ .min_mV = mVolts, \ @@ -981,7 +963,7 @@ static struct twlreg_info TWLRES_INFO_##label = { \ } #define TWL6025_ADJUSTABLE_SMPS(label, offset) \ -static struct twlreg_info TWLSMPS_INFO_##label = { \ +static const struct twlreg_info TWLSMPS_INFO_##label = { \ .base = offset, \ .min_mV = 600, \ .max_mV = 2100, \ @@ -1037,7 +1019,7 @@ TWL6025_ADJUSTABLE_LDO(LDO7, 0x74, 1000, 3300); TWL6025_ADJUSTABLE_LDO(LDO6, 0x60, 1000, 3300); TWL6025_ADJUSTABLE_LDO(LDOLN, 0x64, 1000, 3300); TWL6025_ADJUSTABLE_LDO(LDOUSB, 0x70, 1000, 3300); -TWL4030_FIXED_LDO(VINTANA2, 0x3f, 1500, 11, 100, 0x08); +TWL4030_FIXED_LDO(VINTANA1, 0x3f, 1500, 11, 100, 0x08); TWL4030_FIXED_LDO(VINTDIG, 0x47, 1500, 13, 100, 0x08); TWL4030_FIXED_LDO(VUSB1V5, 0x71, 1500, 17, 100, 0x08); TWL4030_FIXED_LDO(VUSB1V8, 0x74, 1800, 18, 100, 0x08); @@ -1048,7 +1030,6 @@ TWL6030_FIXED_LDO(VDAC, 0x64, 1800, 0); TWL6030_FIXED_LDO(VUSB, 0x70, 3300, 0); TWL6030_FIXED_LDO(V1V8, 0x16, 1800, 0); TWL6030_FIXED_LDO(V2V1, 0x1c, 2100, 0); -TWL6030_FIXED_RESOURCE(CLK32KG, 0x8C, 0); TWL6025_ADJUSTABLE_SMPS(SMPS3, 0x34); TWL6025_ADJUSTABLE_SMPS(SMPS4, 0x10); TWL6025_ADJUSTABLE_SMPS(VIO, 0x16); @@ -1117,7 +1098,7 @@ static const struct of_device_id twl_of_match[] __devinitconst = { TWL6025_OF_MATCH("ti,twl6025-ldo6", LDO6), TWL6025_OF_MATCH("ti,twl6025-ldoln", LDOLN), TWL6025_OF_MATCH("ti,twl6025-ldousb", LDOUSB), - TWLFIXED_OF_MATCH("ti,twl4030-vintana2", VINTANA2), + TWLFIXED_OF_MATCH("ti,twl4030-vintana1", VINTANA1), TWLFIXED_OF_MATCH("ti,twl4030-vintdig", VINTDIG), TWLFIXED_OF_MATCH("ti,twl4030-vusb1v5", VUSB1V5), TWLFIXED_OF_MATCH("ti,twl4030-vusb1v8", VUSB1V8), @@ -1139,6 +1120,7 @@ static int __devinit twlreg_probe(struct platform_device *pdev) { int i, id; struct twlreg_info *info; + const struct twlreg_info *template; struct regulator_init_data *initdata; struct regulation_constraints *c; struct regulator_dev *rdev; @@ -1148,17 +1130,17 @@ static int __devinit twlreg_probe(struct platform_device *pdev) match = of_match_device(twl_of_match, &pdev->dev); if (match) { - info = match->data; - id = info->desc.id; + template = match->data; + id = template->desc.id; initdata = of_get_regulator_init_data(&pdev->dev, pdev->dev.of_node); drvdata = NULL; } else { id = pdev->id; initdata = pdev->dev.platform_data; - for (i = 0, info = NULL; i < ARRAY_SIZE(twl_of_match); i++) { - info = twl_of_match[i].data; - if (info && info->desc.id == id) + for (i = 0, template = NULL; i < ARRAY_SIZE(twl_of_match); i++) { + template = twl_of_match[i].data; + if (template && template->desc.id == id) break; } if (i == ARRAY_SIZE(twl_of_match)) @@ -1169,12 +1151,16 @@ static int __devinit twlreg_probe(struct platform_device *pdev) return -EINVAL; } - if (!info) + if (!template) return -ENODEV; if (!initdata) return -EINVAL; + info = kmemdup(template, sizeof (*info), GFP_KERNEL); + if (!info) + return -ENOMEM; + if (drvdata) { /* copy the driver data into regulator data */ info->features = drvdata->features; @@ -1235,6 +1221,7 @@ static int __devinit twlreg_probe(struct platform_device *pdev) if (IS_ERR(rdev)) { dev_err(&pdev->dev, "can't register %s, %ld\n", info->desc.name, PTR_ERR(rdev)); + kfree(info); return PTR_ERR(rdev); } platform_set_drvdata(pdev, rdev); @@ -1256,7 +1243,11 @@ static int __devinit twlreg_probe(struct platform_device *pdev) static int __devexit twlreg_remove(struct platform_device *pdev) { - regulator_unregister(platform_get_drvdata(pdev)); + struct regulator_dev *rdev = platform_get_drvdata(pdev); + struct twlreg_info *info = rdev->reg_data; + + regulator_unregister(rdev); + kfree(info); return 0; } diff --git a/trunk/drivers/regulator/wm831x-dcdc.c b/trunk/drivers/regulator/wm831x-dcdc.c index 7413885be01b..90cbcc683704 100644 --- a/trunk/drivers/regulator/wm831x-dcdc.c +++ b/trunk/drivers/regulator/wm831x-dcdc.c @@ -339,16 +339,15 @@ static int wm831x_buckv_set_current_limit(struct regulator_dev *rdev, u16 reg = dcdc->base + WM831X_DCDC_CONTROL_2; int i; - for (i = 0; i < ARRAY_SIZE(wm831x_dcdc_ilim); i++) { + for (i = ARRAY_SIZE(wm831x_dcdc_ilim) - 1; i >= 0; i--) { if ((min_uA <= wm831x_dcdc_ilim[i]) && (wm831x_dcdc_ilim[i] <= max_uA)) - break; + return wm831x_set_bits(wm831x, reg, + WM831X_DC1_HC_THR_MASK, + i << WM831X_DC1_HC_THR_SHIFT); } - if (i == ARRAY_SIZE(wm831x_dcdc_ilim)) - return -EINVAL; - return wm831x_set_bits(wm831x, reg, WM831X_DC1_HC_THR_MASK, - i << WM831X_DC1_HC_THR_SHIFT); + return -EINVAL; } static int wm831x_buckv_get_current_limit(struct regulator_dev *rdev) diff --git a/trunk/drivers/regulator/wm831x-ldo.c b/trunk/drivers/regulator/wm831x-ldo.c index 5cb70ca1e98d..9af512672be1 100644 --- a/trunk/drivers/regulator/wm831x-ldo.c +++ b/trunk/drivers/regulator/wm831x-ldo.c @@ -205,6 +205,8 @@ static int wm831x_gp_ldo_get_status(struct regulator_dev *rdev) /* Is it reporting under voltage? */ ret = wm831x_reg_read(wm831x, WM831X_LDO_UV_STATUS); + if (ret < 0) + return ret; if (ret & mask) return REGULATOR_STATUS_ERROR; @@ -237,6 +239,8 @@ static struct regulator_ops wm831x_gp_ldo_ops = { .set_mode = wm831x_gp_ldo_set_mode, .get_status = wm831x_gp_ldo_get_status, .get_optimum_mode = wm831x_gp_ldo_get_optimum_mode, + .get_bypass = regulator_get_bypass_regmap, + .set_bypass = regulator_set_bypass_regmap, .is_enabled = regulator_is_enabled_regmap, .enable = regulator_enable_regmap, @@ -293,6 +297,8 @@ static __devinit int wm831x_gp_ldo_probe(struct platform_device *pdev) ldo->desc.vsel_mask = WM831X_LDO1_ON_VSEL_MASK; ldo->desc.enable_reg = WM831X_LDO_ENABLE; ldo->desc.enable_mask = 1 << id; + ldo->desc.bypass_reg = ldo->base; + ldo->desc.bypass_mask = WM831X_LDO1_SWI; config.dev = pdev->dev.parent; if (pdata) @@ -469,6 +475,8 @@ static int wm831x_aldo_get_status(struct regulator_dev *rdev) /* Is it reporting under voltage? */ ret = wm831x_reg_read(wm831x, WM831X_LDO_UV_STATUS); + if (ret < 0) + return ret; if (ret & mask) return REGULATOR_STATUS_ERROR; @@ -488,6 +496,8 @@ static struct regulator_ops wm831x_aldo_ops = { .get_mode = wm831x_aldo_get_mode, .set_mode = wm831x_aldo_set_mode, .get_status = wm831x_aldo_get_status, + .set_bypass = regulator_set_bypass_regmap, + .get_bypass = regulator_get_bypass_regmap, .is_enabled = regulator_is_enabled_regmap, .enable = regulator_enable_regmap, @@ -544,6 +554,8 @@ static __devinit int wm831x_aldo_probe(struct platform_device *pdev) ldo->desc.vsel_mask = WM831X_LDO7_ON_VSEL_MASK; ldo->desc.enable_reg = WM831X_LDO_ENABLE; ldo->desc.enable_mask = 1 << id; + ldo->desc.bypass_reg = ldo->base; + ldo->desc.bypass_mask = WM831X_LDO7_SWI; config.dev = pdev->dev.parent; if (pdata) diff --git a/trunk/drivers/regulator/wm8400-regulator.c b/trunk/drivers/regulator/wm8400-regulator.c index 9035dd053611..27c746ef0636 100644 --- a/trunk/drivers/regulator/wm8400-regulator.c +++ b/trunk/drivers/regulator/wm8400-regulator.c @@ -120,13 +120,8 @@ static int wm8400_dcdc_set_mode(struct regulator_dev *dev, unsigned int mode) case REGULATOR_MODE_IDLE: /* Datasheet: standby */ - ret = wm8400_set_bits(wm8400, WM8400_DCDC1_CONTROL_1 + offset, - WM8400_DC1_ACTIVE, 0); - if (ret != 0) - return ret; return wm8400_set_bits(wm8400, WM8400_DCDC1_CONTROL_1 + offset, - WM8400_DC1_SLEEP, 0); - + WM8400_DC1_ACTIVE | WM8400_DC1_SLEEP, 0); default: return -EINVAL; } diff --git a/trunk/drivers/rpmsg/virtio_rpmsg_bus.c b/trunk/drivers/rpmsg/virtio_rpmsg_bus.c index 590cfafc7c17..1859f71372e2 100644 --- a/trunk/drivers/rpmsg/virtio_rpmsg_bus.c +++ b/trunk/drivers/rpmsg/virtio_rpmsg_bus.c @@ -1008,8 +1008,8 @@ static int rpmsg_probe(struct virtio_device *vdev) return 0; free_coherent: - dma_free_coherent(vdev->dev.parent, RPMSG_TOTAL_BUF_SPACE, bufs_va, - vrp->bufs_dma); + dma_free_coherent(vdev->dev.parent->parent, RPMSG_TOTAL_BUF_SPACE, + bufs_va, vrp->bufs_dma); vqs_del: vdev->config->del_vqs(vrp->vdev); free_vrp: @@ -1043,7 +1043,7 @@ static void __devexit rpmsg_remove(struct virtio_device *vdev) vdev->config->del_vqs(vrp->vdev); - dma_free_coherent(vdev->dev.parent, RPMSG_TOTAL_BUF_SPACE, + dma_free_coherent(vdev->dev.parent->parent, RPMSG_TOTAL_BUF_SPACE, vrp->rbufs, vrp->bufs_dma); kfree(vrp); diff --git a/trunk/drivers/rtc/interface.c b/trunk/drivers/rtc/interface.c index eb415bd76494..9592b936b71b 100644 --- a/trunk/drivers/rtc/interface.c +++ b/trunk/drivers/rtc/interface.c @@ -582,6 +582,7 @@ enum hrtimer_restart rtc_pie_update_irq(struct hrtimer *timer) void rtc_update_irq(struct rtc_device *rtc, unsigned long num, unsigned long events) { + pm_stay_awake(rtc->dev.parent); schedule_work(&rtc->irqwork); } EXPORT_SYMBOL_GPL(rtc_update_irq); @@ -844,6 +845,7 @@ void rtc_timer_do_work(struct work_struct *work) mutex_lock(&rtc->ops_lock); again: + pm_relax(rtc->dev.parent); __rtc_read_time(rtc, &tm); now = rtc_tm_to_ktime(tm); while ((next = timerqueue_getnext(&rtc->timerqueue))) { diff --git a/trunk/drivers/rtc/rtc-at91sam9.c b/trunk/drivers/rtc/rtc-at91sam9.c index 831868904e02..1dd61f402b04 100644 --- a/trunk/drivers/rtc/rtc-at91sam9.c +++ b/trunk/drivers/rtc/rtc-at91sam9.c @@ -58,6 +58,7 @@ struct sam9_rtc { struct rtc_device *rtcdev; u32 imr; void __iomem *gpbr; + int irq; }; #define rtt_readl(rtc, field) \ @@ -292,7 +293,7 @@ static int __devinit at91_rtc_probe(struct platform_device *pdev) { struct resource *r, *r_gpbr; struct sam9_rtc *rtc; - int ret; + int ret, irq; u32 mr; r = platform_get_resource(pdev, IORESOURCE_MEM, 0); @@ -302,10 +303,18 @@ static int __devinit at91_rtc_probe(struct platform_device *pdev) return -ENODEV; } + irq = platform_get_irq(pdev, 0); + if (irq < 0) { + dev_err(&pdev->dev, "failed to get interrupt resource\n"); + return irq; + } + rtc = kzalloc(sizeof *rtc, GFP_KERNEL); if (!rtc) return -ENOMEM; + rtc->irq = irq; + /* platform setup code should have handled this; sigh */ if (!device_can_wakeup(&pdev->dev)) device_init_wakeup(&pdev->dev, 1); @@ -345,11 +354,10 @@ static int __devinit at91_rtc_probe(struct platform_device *pdev) } /* register irq handler after we know what name we'll use */ - ret = request_irq(AT91_ID_SYS, at91_rtc_interrupt, - IRQF_SHARED, + ret = request_irq(rtc->irq, at91_rtc_interrupt, IRQF_SHARED, dev_name(&rtc->rtcdev->dev), rtc); if (ret) { - dev_dbg(&pdev->dev, "can't share IRQ %d?\n", AT91_ID_SYS); + dev_dbg(&pdev->dev, "can't share IRQ %d?\n", rtc->irq); rtc_device_unregister(rtc->rtcdev); goto fail_register; } @@ -386,7 +394,7 @@ static int __devexit at91_rtc_remove(struct platform_device *pdev) /* disable all interrupts */ rtt_writel(rtc, MR, mr & ~(AT91_RTT_ALMIEN | AT91_RTT_RTTINCIEN)); - free_irq(AT91_ID_SYS, rtc); + free_irq(rtc->irq, rtc); rtc_device_unregister(rtc->rtcdev); @@ -423,7 +431,7 @@ static int at91_rtc_suspend(struct platform_device *pdev, rtc->imr = mr & (AT91_RTT_ALMIEN | AT91_RTT_RTTINCIEN); if (rtc->imr) { if (device_may_wakeup(&pdev->dev) && (mr & AT91_RTT_ALMIEN)) { - enable_irq_wake(AT91_ID_SYS); + enable_irq_wake(rtc->irq); /* don't let RTTINC cause wakeups */ if (mr & AT91_RTT_RTTINCIEN) rtt_writel(rtc, MR, mr & ~AT91_RTT_RTTINCIEN); @@ -441,7 +449,7 @@ static int at91_rtc_resume(struct platform_device *pdev) if (rtc->imr) { if (device_may_wakeup(&pdev->dev)) - disable_irq_wake(AT91_ID_SYS); + disable_irq_wake(rtc->irq); mr = rtt_readl(rtc, MR); rtt_writel(rtc, MR, mr | rtc->imr); } diff --git a/trunk/drivers/rtc/rtc-cmos.c b/trunk/drivers/rtc/rtc-cmos.c index 132333d75408..4267789ca995 100644 --- a/trunk/drivers/rtc/rtc-cmos.c +++ b/trunk/drivers/rtc/rtc-cmos.c @@ -568,7 +568,6 @@ static irqreturn_t cmos_interrupt(int irq, void *p) hpet_mask_rtc_irq_bit(RTC_AIE); CMOS_READ(RTC_INTR_FLAGS); - pm_wakeup_event(cmos_rtc.dev, 0); } spin_unlock(&rtc_lock); diff --git a/trunk/drivers/rtc/rtc-pcf2123.c b/trunk/drivers/rtc/rtc-pcf2123.c index 836118795c0b..13e4df63974f 100644 --- a/trunk/drivers/rtc/rtc-pcf2123.c +++ b/trunk/drivers/rtc/rtc-pcf2123.c @@ -43,6 +43,7 @@ #include #include #include +#include #define DRV_VERSION "0.6" @@ -292,6 +293,7 @@ static int __devinit pcf2123_probe(struct spi_device *spi) pdata->rtc = rtc; for (i = 0; i < 16; i++) { + sysfs_attr_init(&pdata->regs[i].attr.attr); sprintf(pdata->regs[i].name, "%1x", i); pdata->regs[i].attr.attr.mode = S_IRUGO | S_IWUSR; pdata->regs[i].attr.attr.name = pdata->regs[i].name; diff --git a/trunk/drivers/rtc/rtc-rs5c348.c b/trunk/drivers/rtc/rtc-rs5c348.c index 77074ccd2850..fd5c7af04ae5 100644 --- a/trunk/drivers/rtc/rtc-rs5c348.c +++ b/trunk/drivers/rtc/rtc-rs5c348.c @@ -122,9 +122,12 @@ rs5c348_rtc_read_time(struct device *dev, struct rtc_time *tm) tm->tm_min = bcd2bin(rxbuf[RS5C348_REG_MINS] & RS5C348_MINS_MASK); tm->tm_hour = bcd2bin(rxbuf[RS5C348_REG_HOURS] & RS5C348_HOURS_MASK); if (!pdata->rtc_24h) { - tm->tm_hour %= 12; - if (rxbuf[RS5C348_REG_HOURS] & RS5C348_BIT_PM) + if (rxbuf[RS5C348_REG_HOURS] & RS5C348_BIT_PM) { + tm->tm_hour -= 20; + tm->tm_hour %= 12; tm->tm_hour += 12; + } else + tm->tm_hour %= 12; } tm->tm_wday = bcd2bin(rxbuf[RS5C348_REG_WDAY] & RS5C348_WDAY_MASK); tm->tm_mday = bcd2bin(rxbuf[RS5C348_REG_DAY] & RS5C348_DAY_MASK); diff --git a/trunk/drivers/rtc/rtc-twl.c b/trunk/drivers/rtc/rtc-twl.c index c5d06fe83bba..9277d945bf48 100644 --- a/trunk/drivers/rtc/rtc-twl.c +++ b/trunk/drivers/rtc/rtc-twl.c @@ -495,6 +495,11 @@ static int __devinit twl_rtc_probe(struct platform_device *pdev) if (ret < 0) goto out1; + /* ensure interrupts are disabled, bootloaders can be strange */ + ret = twl_rtc_write_u8(0, REG_RTC_INTERRUPTS_REG); + if (ret < 0) + dev_warn(&pdev->dev, "unable to disable interrupt\n"); + /* init cached IRQ enable bits */ ret = twl_rtc_read_u8(&rtc_irq_bits, REG_RTC_INTERRUPTS_REG); if (ret < 0) diff --git a/trunk/drivers/s390/block/dasd.c b/trunk/drivers/s390/block/dasd.c index 15370a2c5ff0..0595c763dafd 100644 --- a/trunk/drivers/s390/block/dasd.c +++ b/trunk/drivers/s390/block/dasd.c @@ -534,11 +534,11 @@ static void dasd_change_state(struct dasd_device *device) if (rc) device->target = device->state; - if (device->state == device->target) - wake_up(&dasd_init_waitq); - /* let user-space know that the device status changed */ kobject_uevent(&device->cdev->dev.kobj, KOBJ_CHANGE); + + if (device->state == device->target) + wake_up(&dasd_init_waitq); } /* @@ -2157,6 +2157,7 @@ static int _dasd_sleep_on(struct dasd_ccw_req *maincqr, int interruptible) test_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags) && (!dasd_eer_enabled(device))) { cqr->status = DASD_CQR_FAILED; + cqr->intrc = -EAGAIN; continue; } /* Don't try to start requests if device is stopped */ @@ -3270,6 +3271,16 @@ void dasd_generic_path_event(struct ccw_device *cdev, int *path_event) dasd_schedule_device_bh(device); } if (path_event[chp] & PE_PATHGROUP_ESTABLISHED) { + if (!(device->path_data.opm & eventlpm) && + !(device->path_data.tbvpm & eventlpm)) { + /* + * we can not establish a pathgroup on an + * unavailable path, so trigger a path + * verification first + */ + device->path_data.tbvpm |= eventlpm; + dasd_schedule_device_bh(device); + } DBF_DEV_EVENT(DBF_WARNING, device, "%s", "Pathgroup re-established\n"); if (device->discipline->kick_validate) diff --git a/trunk/drivers/s390/block/dasd_alias.c b/trunk/drivers/s390/block/dasd_alias.c index 157defe5e069..6b556995bb33 100644 --- a/trunk/drivers/s390/block/dasd_alias.c +++ b/trunk/drivers/s390/block/dasd_alias.c @@ -384,6 +384,29 @@ static void _remove_device_from_lcu(struct alias_lcu *lcu, group->next = NULL; }; +static int +suborder_not_supported(struct dasd_ccw_req *cqr) +{ + char *sense; + char reason; + char msg_format; + char msg_no; + + sense = dasd_get_sense(&cqr->irb); + if (!sense) + return 0; + + reason = sense[0]; + msg_format = (sense[7] & 0xF0); + msg_no = (sense[7] & 0x0F); + + /* command reject, Format 0 MSG 4 - invalid parameter */ + if ((reason == 0x80) && (msg_format == 0x00) && (msg_no == 0x04)) + return 1; + + return 0; +} + static int read_unit_address_configuration(struct dasd_device *device, struct alias_lcu *lcu) { @@ -435,6 +458,8 @@ static int read_unit_address_configuration(struct dasd_device *device, do { rc = dasd_sleep_on(cqr); + if (rc && suborder_not_supported(cqr)) + return -EOPNOTSUPP; } while (rc && (cqr->retries > 0)); if (rc) { spin_lock_irqsave(&lcu->lock, flags); @@ -521,7 +546,7 @@ static void lcu_update_work(struct work_struct *work) * processing the data */ spin_lock_irqsave(&lcu->lock, flags); - if (rc || (lcu->flags & NEED_UAC_UPDATE)) { + if ((rc && (rc != -EOPNOTSUPP)) || (lcu->flags & NEED_UAC_UPDATE)) { DBF_DEV_EVENT(DBF_WARNING, device, "could not update" " alias data in lcu (rc = %d), retry later", rc); schedule_delayed_work(&lcu->ruac_data.dwork, 30*HZ); diff --git a/trunk/drivers/s390/block/dasd_eckd.c b/trunk/drivers/s390/block/dasd_eckd.c index 40a826a7295f..c48c72abbefc 100644 --- a/trunk/drivers/s390/block/dasd_eckd.c +++ b/trunk/drivers/s390/block/dasd_eckd.c @@ -1507,7 +1507,8 @@ static struct dasd_ccw_req *dasd_eckd_build_psf_ssc(struct dasd_device *device, * call might change behaviour of DASD devices. */ static int -dasd_eckd_psf_ssc(struct dasd_device *device, int enable_pav) +dasd_eckd_psf_ssc(struct dasd_device *device, int enable_pav, + unsigned long flags) { struct dasd_ccw_req *cqr; int rc; @@ -1516,10 +1517,19 @@ dasd_eckd_psf_ssc(struct dasd_device *device, int enable_pav) if (IS_ERR(cqr)) return PTR_ERR(cqr); + /* + * set flags e.g. turn on failfast, to prevent blocking + * the calling function should handle failed requests + */ + cqr->flags |= flags; + rc = dasd_sleep_on(cqr); if (!rc) /* trigger CIO to reprobe devices */ css_schedule_reprobe(); + else if (cqr->intrc == -EAGAIN) + rc = -EAGAIN; + dasd_sfree_request(cqr, cqr->memdev); return rc; } @@ -1527,7 +1537,8 @@ dasd_eckd_psf_ssc(struct dasd_device *device, int enable_pav) /* * Valide storage server of current device. */ -static void dasd_eckd_validate_server(struct dasd_device *device) +static int dasd_eckd_validate_server(struct dasd_device *device, + unsigned long flags) { int rc; struct dasd_eckd_private *private; @@ -1536,17 +1547,18 @@ static void dasd_eckd_validate_server(struct dasd_device *device) private = (struct dasd_eckd_private *) device->private; if (private->uid.type == UA_BASE_PAV_ALIAS || private->uid.type == UA_HYPER_PAV_ALIAS) - return; + return 0; if (dasd_nopav || MACHINE_IS_VM) enable_pav = 0; else enable_pav = 1; - rc = dasd_eckd_psf_ssc(device, enable_pav); + rc = dasd_eckd_psf_ssc(device, enable_pav, flags); /* may be requested feature is not available on server, * therefore just report error and go ahead */ DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "PSF-SSC for SSID %04x " "returned rc=%d", private->uid.ssid, rc); + return rc; } /* @@ -1556,7 +1568,13 @@ static void dasd_eckd_do_validate_server(struct work_struct *work) { struct dasd_device *device = container_of(work, struct dasd_device, kick_validate); - dasd_eckd_validate_server(device); + if (dasd_eckd_validate_server(device, DASD_CQR_FLAGS_FAILFAST) + == -EAGAIN) { + /* schedule worker again if failed */ + schedule_work(&device->kick_validate); + return; + } + dasd_put_device(device); } @@ -1685,7 +1703,7 @@ dasd_eckd_check_characteristics(struct dasd_device *device) if (rc) goto out_err2; - dasd_eckd_validate_server(device); + dasd_eckd_validate_server(device, 0); /* device may report different configuration data after LCU setup */ rc = dasd_eckd_read_conf(device); @@ -3804,7 +3822,7 @@ dasd_eckd_ioctl(struct dasd_block *block, unsigned int cmd, void __user *argp) case BIODASDSYMMIO: return dasd_symm_io(device, argp); default: - return -ENOIOCTLCMD; + return -ENOTTY; } } @@ -4153,7 +4171,7 @@ static int dasd_eckd_restore_device(struct dasd_device *device) rc = dasd_alias_make_device_known_to_lcu(device); if (rc) return rc; - dasd_eckd_validate_server(device); + dasd_eckd_validate_server(device, DASD_CQR_FLAGS_FAILFAST); /* RE-Read Configuration Data */ rc = dasd_eckd_read_conf(device); diff --git a/trunk/drivers/s390/block/dasd_ioctl.c b/trunk/drivers/s390/block/dasd_ioctl.c index cceae70279f6..654c6921a6d4 100644 --- a/trunk/drivers/s390/block/dasd_ioctl.c +++ b/trunk/drivers/s390/block/dasd_ioctl.c @@ -498,12 +498,9 @@ int dasd_ioctl(struct block_device *bdev, fmode_t mode, break; default: /* if the discipline has an ioctl method try it. */ - if (base->discipline->ioctl) { + rc = -ENOTTY; + if (base->discipline->ioctl) rc = base->discipline->ioctl(block, cmd, argp); - if (rc == -ENOIOCTLCMD) - rc = -EINVAL; - } else - rc = -EINVAL; } dasd_put_device(base); return rc; diff --git a/trunk/drivers/s390/char/sclp_sdias.c b/trunk/drivers/s390/char/sclp_sdias.c index 6a6f76bf6e3d..b1032931a1c4 100644 --- a/trunk/drivers/s390/char/sclp_sdias.c +++ b/trunk/drivers/s390/char/sclp_sdias.c @@ -242,11 +242,13 @@ int sclp_sdias_copy(void *dest, int start_blk, int nr_blks) switch (sdias_evbuf.event_status) { case EVSTATE_ALL_STORED: TRACE("all stored\n"); + break; case EVSTATE_PART_STORED: TRACE("part stored: %i\n", sdias_evbuf.blk_cnt); break; case EVSTATE_NO_DATA: TRACE("no data\n"); + /* fall through */ default: pr_err("Error from SCLP while copying hsa. " "Event status = %x\n", diff --git a/trunk/drivers/s390/cio/device.c b/trunk/drivers/s390/cio/device.c index ed25c8740a9c..fc916f5d7314 100644 --- a/trunk/drivers/s390/cio/device.c +++ b/trunk/drivers/s390/cio/device.c @@ -1426,6 +1426,8 @@ static enum io_sch_action sch_get_action(struct subchannel *sch) return IO_SCH_REPROBE; if (cdev->online) return IO_SCH_VERIFY; + if (cdev->private->state == DEV_STATE_NOT_OPER) + return IO_SCH_UNREG_ATTACH; return IO_SCH_NOP; } @@ -1519,11 +1521,14 @@ static int io_subchannel_sch_event(struct subchannel *sch, int process) goto out; break; case IO_SCH_UNREG_ATTACH: + spin_lock_irqsave(sch->lock, flags); if (cdev->private->flags.resuming) { /* Device will be handled later. */ rc = 0; - goto out; + goto out_unlock; } + sch_set_cdev(sch, NULL); + spin_unlock_irqrestore(sch->lock, flags); /* Unregister ccw device. */ ccw_device_unregister(cdev); break; diff --git a/trunk/drivers/scsi/aic7xxx/aic79xx_core.c b/trunk/drivers/scsi/aic7xxx/aic79xx_core.c index 25417d0e7acb..0bcacf71aef8 100644 --- a/trunk/drivers/scsi/aic7xxx/aic79xx_core.c +++ b/trunk/drivers/scsi/aic7xxx/aic79xx_core.c @@ -2888,7 +2888,7 @@ ahd_handle_lqiphase_error(struct ahd_softc *ahd, u_int lqistat1) ahd_outb(ahd, CLRINT, CLRSCSIINT); ahd_unpause(ahd); } else { - printk("Reseting Channel for LQI Phase error\n"); + printk("Resetting Channel for LQI Phase error\n"); ahd_dump_card_state(ahd); ahd_reset_channel(ahd, 'A', /*Initiate Reset*/TRUE); } diff --git a/trunk/drivers/scsi/bfa/bfa_ioc.c b/trunk/drivers/scsi/bfa/bfa_ioc.c index 8cdb79c2fcdf..21ad2902e5ce 100644 --- a/trunk/drivers/scsi/bfa/bfa_ioc.c +++ b/trunk/drivers/scsi/bfa/bfa_ioc.c @@ -5587,7 +5587,7 @@ static bfa_status_t bfa_dconf_flash_write(struct bfa_dconf_mod_s *dconf); static void bfa_dconf_init_cb(void *arg, bfa_status_t status); /* - * Begining state of dconf module. Waiting for an event to start. + * Beginning state of dconf module. Waiting for an event to start. */ static void bfa_dconf_sm_uninit(struct bfa_dconf_mod_s *dconf, enum bfa_dconf_event event) diff --git a/trunk/drivers/scsi/bfa/bfa_ioc.h b/trunk/drivers/scsi/bfa/bfa_ioc.h index 1a99d4b5b50f..7b916e04ca56 100644 --- a/trunk/drivers/scsi/bfa/bfa_ioc.h +++ b/trunk/drivers/scsi/bfa/bfa_ioc.h @@ -530,7 +530,7 @@ struct bfa_diag_results_fwping { struct bfa_diag_qtest_result_s { u32 status; - u16 count; /* sucessful queue test count */ + u16 count; /* successful queue test count */ u8 queue; u8 rsvd; /* 64-bit align */ }; diff --git a/trunk/drivers/scsi/bnx2fc/bnx2fc_fcoe.c b/trunk/drivers/scsi/bnx2fc/bnx2fc_fcoe.c index ae1cb7639d99..e0558656c646 100644 --- a/trunk/drivers/scsi/bnx2fc/bnx2fc_fcoe.c +++ b/trunk/drivers/scsi/bnx2fc/bnx2fc_fcoe.c @@ -908,7 +908,7 @@ static void bnx2fc_indicate_netevent(void *context, unsigned long event, return; default: - printk(KERN_ERR PFX "Unkonwn netevent %ld", event); + printk(KERN_ERR PFX "Unknown netevent %ld", event); return; } @@ -1738,7 +1738,7 @@ static int bnx2fc_ulp_get_stats(void *handle) /** * bnx2fc_ulp_start - cnic callback to initialize & start adapter instance * - * @handle: transport handle pointing to adapter struture + * @handle: transport handle pointing to adapter structure * * This function maps adapter structure to pcidev structure and initiates * firmware handshake to enable/initialize on-chip FCoE components. diff --git a/trunk/drivers/scsi/bnx2i/bnx2i_hwi.c b/trunk/drivers/scsi/bnx2i/bnx2i_hwi.c index 33d6630529de..91eec60252ee 100644 --- a/trunk/drivers/scsi/bnx2i/bnx2i_hwi.c +++ b/trunk/drivers/scsi/bnx2i/bnx2i_hwi.c @@ -1264,6 +1264,9 @@ int bnx2i_send_fw_iscsi_init_msg(struct bnx2i_hba *hba) int rc = 0; u64 mask64; + memset(&iscsi_init, 0x00, sizeof(struct iscsi_kwqe_init1)); + memset(&iscsi_init2, 0x00, sizeof(struct iscsi_kwqe_init2)); + bnx2i_adjust_qp_size(hba); iscsi_init.flags = diff --git a/trunk/drivers/scsi/gdth.h b/trunk/drivers/scsi/gdth.h index d3e4d7c6f577..fbf6f0f4b0dd 100644 --- a/trunk/drivers/scsi/gdth.h +++ b/trunk/drivers/scsi/gdth.h @@ -49,15 +49,6 @@ /* GDT_ISA */ #define GDT2_ID 0x0120941c /* GDT2000/2020 */ -/* vendor ID, device IDs (PCI) */ -/* these defines should already exist in */ -#ifndef PCI_VENDOR_ID_VORTEX -#define PCI_VENDOR_ID_VORTEX 0x1119 /* PCI controller vendor ID */ -#endif -#ifndef PCI_VENDOR_ID_INTEL -#define PCI_VENDOR_ID_INTEL 0x8086 -#endif - #ifndef PCI_DEVICE_ID_VORTEX_GDT60x0 /* GDT_PCI */ #define PCI_DEVICE_ID_VORTEX_GDT60x0 0 /* GDT6000/6020/6050 */ diff --git a/trunk/drivers/scsi/hpsa.c b/trunk/drivers/scsi/hpsa.c index 796482badf13..2b4261cb7742 100644 --- a/trunk/drivers/scsi/hpsa.c +++ b/trunk/drivers/scsi/hpsa.c @@ -1315,8 +1315,9 @@ static void complete_scsi_command(struct CommandList *cp) } break; case CMD_PROTOCOL_ERR: + cmd->result = DID_ERROR << 16; dev_warn(&h->pdev->dev, "cp %p has " - "protocol error \n", cp); + "protocol error\n", cp); break; case CMD_HARDWARE_ERR: cmd->result = DID_ERROR << 16; diff --git a/trunk/drivers/scsi/ipr.c b/trunk/drivers/scsi/ipr.c index 467dc38246f9..1059c99690e6 100644 --- a/trunk/drivers/scsi/ipr.c +++ b/trunk/drivers/scsi/ipr.c @@ -192,7 +192,7 @@ static const struct ipr_chip_t ipr_chip[] = { { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE, IPR_USE_MSI, IPR_SIS64, IPR_MMIO, &ipr_chip_cfg[2] } }; -static int ipr_max_bus_speeds [] = { +static int ipr_max_bus_speeds[] = { IPR_80MBs_SCSI_RATE, IPR_U160_SCSI_RATE, IPR_U320_SCSI_RATE }; @@ -562,7 +562,7 @@ static void ipr_trc_hook(struct ipr_cmnd *ipr_cmd, trace_entry->u.add_data = add_data; } #else -#define ipr_trc_hook(ipr_cmd, type, add_data) do { } while(0) +#define ipr_trc_hook(ipr_cmd, type, add_data) do { } while (0) #endif /** @@ -1002,7 +1002,7 @@ static void ipr_send_hcam(struct ipr_ioa_cfg *ioa_cfg, u8 type, **/ static void ipr_update_ata_class(struct ipr_resource_entry *res, unsigned int proto) { - switch(proto) { + switch (proto) { case IPR_PROTO_SATA: case IPR_PROTO_SAS_STP: res->ata_class = ATA_DEV_ATA; @@ -3043,7 +3043,7 @@ static void ipr_get_ioa_dump(struct ipr_ioa_cfg *ioa_cfg, struct ipr_dump *dump) } #else -#define ipr_get_ioa_dump(ioa_cfg, dump) do { } while(0) +#define ipr_get_ioa_dump(ioa_cfg, dump) do { } while (0) #endif /** @@ -3055,7 +3055,7 @@ static void ipr_get_ioa_dump(struct ipr_ioa_cfg *ioa_cfg, struct ipr_dump *dump) **/ static void ipr_release_dump(struct kref *kref) { - struct ipr_dump *dump = container_of(kref,struct ipr_dump,kref); + struct ipr_dump *dump = container_of(kref, struct ipr_dump, kref); struct ipr_ioa_cfg *ioa_cfg = dump->ioa_cfg; unsigned long lock_flags = 0; int i; @@ -3142,7 +3142,7 @@ static void ipr_worker_thread(struct work_struct *work) break; } } - } while(did_work); + } while (did_work); list_for_each_entry(res, &ioa_cfg->used_res_q, queue) { if (res->add_to_ml) { @@ -3268,7 +3268,7 @@ static ssize_t ipr_show_log_level(struct device *dev, * number of bytes printed to buffer **/ static ssize_t ipr_store_log_level(struct device *dev, - struct device_attribute *attr, + struct device_attribute *attr, const char *buf, size_t count) { struct Scsi_Host *shost = class_to_shost(dev); @@ -3315,7 +3315,7 @@ static ssize_t ipr_store_diagnostics(struct device *dev, return -EACCES; spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); - while(ioa_cfg->in_reset_reload) { + while (ioa_cfg->in_reset_reload) { spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload); spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); @@ -3682,7 +3682,7 @@ static int ipr_update_ioa_ucode(struct ipr_ioa_cfg *ioa_cfg, unsigned long lock_flags; spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); - while(ioa_cfg->in_reset_reload) { + while (ioa_cfg->in_reset_reload) { spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload); spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); @@ -3746,7 +3746,7 @@ static ssize_t ipr_store_update_fw(struct device *dev, len = snprintf(fname, 99, "%s", buf); fname[len-1] = '\0'; - if(request_firmware(&fw_entry, fname, &ioa_cfg->pdev->dev)) { + if (request_firmware(&fw_entry, fname, &ioa_cfg->pdev->dev)) { dev_err(&ioa_cfg->pdev->dev, "Firmware file %s not found\n", fname); return -EIO; } @@ -4612,7 +4612,7 @@ static int ipr_slave_alloc(struct scsi_device *sdev) * Return value: * SUCCESS / FAILED **/ -static int __ipr_eh_host_reset(struct scsi_cmnd * scsi_cmd) +static int __ipr_eh_host_reset(struct scsi_cmnd *scsi_cmd) { struct ipr_ioa_cfg *ioa_cfg; int rc; @@ -4634,7 +4634,7 @@ static int __ipr_eh_host_reset(struct scsi_cmnd * scsi_cmd) return rc; } -static int ipr_eh_host_reset(struct scsi_cmnd * cmd) +static int ipr_eh_host_reset(struct scsi_cmnd *cmd) { int rc; @@ -4701,7 +4701,7 @@ static int ipr_device_reset(struct ipr_ioa_cfg *ioa_cfg, } LEAVE; - return (IPR_IOASC_SENSE_KEY(ioasc) ? -EIO : 0); + return IPR_IOASC_SENSE_KEY(ioasc) ? -EIO : 0; } /** @@ -4725,7 +4725,7 @@ static int ipr_sata_reset(struct ata_link *link, unsigned int *classes, ENTER; spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); - while(ioa_cfg->in_reset_reload) { + while (ioa_cfg->in_reset_reload) { spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload); spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); @@ -4753,7 +4753,7 @@ static int ipr_sata_reset(struct ata_link *link, unsigned int *classes, * Return value: * SUCCESS / FAILED **/ -static int __ipr_eh_dev_reset(struct scsi_cmnd * scsi_cmd) +static int __ipr_eh_dev_reset(struct scsi_cmnd *scsi_cmd) { struct ipr_cmnd *ipr_cmd; struct ipr_ioa_cfg *ioa_cfg; @@ -4811,10 +4811,10 @@ static int __ipr_eh_dev_reset(struct scsi_cmnd * scsi_cmd) res->resetting_device = 0; LEAVE; - return (rc ? FAILED : SUCCESS); + return rc ? FAILED : SUCCESS; } -static int ipr_eh_dev_reset(struct scsi_cmnd * cmd) +static int ipr_eh_dev_reset(struct scsi_cmnd *cmd) { int rc; @@ -4910,7 +4910,7 @@ static void ipr_abort_timeout(struct ipr_cmnd *ipr_cmd) * Return value: * SUCCESS / FAILED **/ -static int ipr_cancel_op(struct scsi_cmnd * scsi_cmd) +static int ipr_cancel_op(struct scsi_cmnd *scsi_cmd) { struct ipr_cmnd *ipr_cmd; struct ipr_ioa_cfg *ioa_cfg; @@ -4979,7 +4979,7 @@ static int ipr_cancel_op(struct scsi_cmnd * scsi_cmd) res->needs_sync_complete = 1; LEAVE; - return (IPR_IOASC_SENSE_KEY(ioasc) ? FAILED : SUCCESS); + return IPR_IOASC_SENSE_KEY(ioasc) ? FAILED : SUCCESS; } /** @@ -4989,7 +4989,7 @@ static int ipr_cancel_op(struct scsi_cmnd * scsi_cmd) * Return value: * SUCCESS / FAILED **/ -static int ipr_eh_abort(struct scsi_cmnd * scsi_cmd) +static int ipr_eh_abort(struct scsi_cmnd *scsi_cmd) { unsigned long flags; int rc; @@ -5907,7 +5907,7 @@ static int ipr_ioctl(struct scsi_device *sdev, int cmd, void __user *arg) * Return value: * pointer to buffer with description string **/ -static const char * ipr_ioa_info(struct Scsi_Host *host) +static const char *ipr_ioa_info(struct Scsi_Host *host) { static char buffer[512]; struct ipr_ioa_cfg *ioa_cfg; @@ -5965,7 +5965,7 @@ static void ipr_ata_phy_reset(struct ata_port *ap) ENTER; spin_lock_irqsave(ioa_cfg->host->host_lock, flags); - while(ioa_cfg->in_reset_reload) { + while (ioa_cfg->in_reset_reload) { spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags); wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload); spin_lock_irqsave(ioa_cfg->host->host_lock, flags); @@ -6005,7 +6005,7 @@ static void ipr_ata_post_internal(struct ata_queued_cmd *qc) unsigned long flags; spin_lock_irqsave(ioa_cfg->host->host_lock, flags); - while(ioa_cfg->in_reset_reload) { + while (ioa_cfg->in_reset_reload) { spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags); wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload); spin_lock_irqsave(ioa_cfg->host->host_lock, flags); @@ -6330,7 +6330,7 @@ static int ipr_invalid_adapter(struct ipr_ioa_cfg *ioa_cfg) int i; if ((ioa_cfg->type == 0x5702) && (ioa_cfg->pdev->revision < 4)) { - for (i = 0; i < ARRAY_SIZE(ipr_blocked_processors); i++){ + for (i = 0; i < ARRAY_SIZE(ipr_blocked_processors); i++) { if (__is_processor(ipr_blocked_processors[i])) return 1; } @@ -6608,7 +6608,7 @@ static void ipr_scsi_bus_speed_limit(struct ipr_ioa_cfg *ioa_cfg) * none **/ static void ipr_modify_ioafp_mode_page_28(struct ipr_ioa_cfg *ioa_cfg, - struct ipr_mode_pages *mode_pages) + struct ipr_mode_pages *mode_pages) { int i, entry_length; struct ipr_dev_bus_entry *bus; @@ -8022,7 +8022,7 @@ static void ipr_reset_ioa_job(struct ipr_cmnd *ipr_cmd) ipr_reinit_ipr_cmnd(ipr_cmd); ipr_cmd->job_step_failed = ipr_reset_cmd_failed; rc = ipr_cmd->job_step(ipr_cmd); - } while(rc == IPR_RC_JOB_CONTINUE); + } while (rc == IPR_RC_JOB_CONTINUE); } /** @@ -8283,7 +8283,7 @@ static void ipr_free_cmd_blks(struct ipr_ioa_cfg *ioa_cfg) } if (ioa_cfg->ipr_cmd_pool) - pci_pool_destroy (ioa_cfg->ipr_cmd_pool); + pci_pool_destroy(ioa_cfg->ipr_cmd_pool); kfree(ioa_cfg->ipr_cmnd_list); kfree(ioa_cfg->ipr_cmnd_list_dma); @@ -8363,8 +8363,8 @@ static int __devinit ipr_alloc_cmd_blks(struct ipr_ioa_cfg *ioa_cfg) dma_addr_t dma_addr; int i; - ioa_cfg->ipr_cmd_pool = pci_pool_create (IPR_NAME, ioa_cfg->pdev, - sizeof(struct ipr_cmnd), 512, 0); + ioa_cfg->ipr_cmd_pool = pci_pool_create(IPR_NAME, ioa_cfg->pdev, + sizeof(struct ipr_cmnd), 512, 0); if (!ioa_cfg->ipr_cmd_pool) return -ENOMEM; @@ -8378,7 +8378,7 @@ static int __devinit ipr_alloc_cmd_blks(struct ipr_ioa_cfg *ioa_cfg) } for (i = 0; i < IPR_NUM_CMD_BLKS; i++) { - ipr_cmd = pci_pool_alloc (ioa_cfg->ipr_cmd_pool, GFP_KERNEL, &dma_addr); + ipr_cmd = pci_pool_alloc(ioa_cfg->ipr_cmd_pool, GFP_KERNEL, &dma_addr); if (!ipr_cmd) { ipr_free_cmd_blks(ioa_cfg); @@ -8964,7 +8964,7 @@ static void ipr_scan_vsets(struct ipr_ioa_cfg *ioa_cfg) int target, lun; for (target = 0; target < IPR_MAX_NUM_TARGETS_PER_BUS; target++) - for (lun = 0; lun < IPR_MAX_NUM_VSET_LUNS_PER_TARGET; lun++ ) + for (lun = 0; lun < IPR_MAX_NUM_VSET_LUNS_PER_TARGET; lun++) scsi_add_device(ioa_cfg->host, IPR_VSET_BUS, target, lun); } @@ -9010,7 +9010,7 @@ static void __ipr_remove(struct pci_dev *pdev) ENTER; spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags); - while(ioa_cfg->in_reset_reload) { + while (ioa_cfg->in_reset_reload) { spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags); wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload); spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags); @@ -9139,7 +9139,7 @@ static void ipr_shutdown(struct pci_dev *pdev) unsigned long lock_flags = 0; spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); - while(ioa_cfg->in_reset_reload) { + while (ioa_cfg->in_reset_reload) { spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload); spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); diff --git a/trunk/drivers/scsi/isci/host.c b/trunk/drivers/scsi/isci/host.c index 45385f531649..b334fdc1726a 100644 --- a/trunk/drivers/scsi/isci/host.c +++ b/trunk/drivers/scsi/isci/host.c @@ -492,7 +492,7 @@ static void sci_controller_process_completions(struct isci_host *ihost) u32 event_cycle; dev_dbg(&ihost->pdev->dev, - "%s: completion queue begining get:0x%08x\n", + "%s: completion queue beginning get:0x%08x\n", __func__, ihost->completion_queue_get); diff --git a/trunk/drivers/scsi/isci/init.c b/trunk/drivers/scsi/isci/init.c index 92c1d86d1fc6..9be45a2b2232 100644 --- a/trunk/drivers/scsi/isci/init.c +++ b/trunk/drivers/scsi/isci/init.c @@ -222,7 +222,7 @@ static struct sas_domain_function_template isci_transport_ops = { * @isci_host: This parameter specifies the lldd specific wrapper for the * libsas sas_ha struct. * - * This method returns an error code indicating sucess or failure. The user + * This method returns an error code indicating success or failure. The user * should check for possible memory allocation error return otherwise, a zero * indicates success. */ diff --git a/trunk/drivers/scsi/isci/port.c b/trunk/drivers/scsi/isci/port.c index 2fb85bf75449..13098b09a824 100644 --- a/trunk/drivers/scsi/isci/port.c +++ b/trunk/drivers/scsi/isci/port.c @@ -212,7 +212,7 @@ static void isci_port_link_up(struct isci_host *isci_host, memcpy(iphy->sas_phy.attached_sas_addr, iphy->frame_rcvd.iaf.sas_addr, SAS_ADDR_SIZE); } else { - dev_err(&isci_host->pdev->dev, "%s: unkown target\n", __func__); + dev_err(&isci_host->pdev->dev, "%s: unknown target\n", __func__); success = false; } diff --git a/trunk/drivers/scsi/isci/request.c b/trunk/drivers/scsi/isci/request.c index 7a0431c73493..c1bafc3f3fb1 100644 --- a/trunk/drivers/scsi/isci/request.c +++ b/trunk/drivers/scsi/isci/request.c @@ -2240,7 +2240,7 @@ static enum sci_status atapi_data_tc_completion_handler(struct isci_request *ire status = ireq->sci_status; sci_change_state(&idev->sm, SCI_STP_DEV_ATAPI_ERROR); } else { - /* If receiving any non-sucess TC status, no UF + /* If receiving any non-success TC status, no UF * received yet, then an UF for the status fis * is coming after (XXX: suspect this is * actually a protocol error or a bug like the diff --git a/trunk/drivers/scsi/isci/task.c b/trunk/drivers/scsi/isci/task.c index 6bc74eb012c9..b6f19a1db780 100644 --- a/trunk/drivers/scsi/isci/task.c +++ b/trunk/drivers/scsi/isci/task.c @@ -532,7 +532,7 @@ int isci_task_abort_task(struct sas_task *task) /* The request has already completed and there * is nothing to do here other than to set the task * done bit, and indicate that the task abort function - * was sucessful. + * was successful. */ spin_lock_irqsave(&task->task_state_lock, flags); task->task_state_flags |= SAS_TASK_STATE_DONE; diff --git a/trunk/drivers/scsi/lpfc/lpfc_init.c b/trunk/drivers/scsi/lpfc/lpfc_init.c index 45c15208be9f..29937b606c84 100644 --- a/trunk/drivers/scsi/lpfc/lpfc_init.c +++ b/trunk/drivers/scsi/lpfc/lpfc_init.c @@ -6607,7 +6607,7 @@ lpfc_sli4_queue_verify(struct lpfc_hba *phba) * we just use some constant number as place holder. * * Return codes - * 0 - sucessful + * 0 - successful * -ENOMEM - No availble memory * -EIO - The mailbox failed to complete successfully. **/ diff --git a/trunk/drivers/scsi/lpfc/lpfc_sli.c b/trunk/drivers/scsi/lpfc/lpfc_sli.c index 9cbd20b1328b..0e7e144507b2 100644 --- a/trunk/drivers/scsi/lpfc/lpfc_sli.c +++ b/trunk/drivers/scsi/lpfc/lpfc_sli.c @@ -4739,7 +4739,7 @@ lpfc_sli4_read_rev(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq, * is attached to. * * Return codes - * 0 - sucessful + * 0 - successful * otherwise - failed to retrieve physical port name **/ static int @@ -15209,7 +15209,7 @@ lpfc_check_next_fcf_pri_level(struct lpfc_hba *phba) /* * if next_fcf_pri was not set above and the list is not empty then * we have failed flogis on all of them. So reset flogi failed - * and start at the begining. + * and start at the beginning. */ if (!next_fcf_pri && !list_empty(&phba->fcf.fcf_pri_list)) { list_for_each_entry(fcf_pri, &phba->fcf.fcf_pri_list, list) { diff --git a/trunk/drivers/scsi/megaraid.c b/trunk/drivers/scsi/megaraid.c index 97825f116954..76ad72d32c3f 100644 --- a/trunk/drivers/scsi/megaraid.c +++ b/trunk/drivers/scsi/megaraid.c @@ -305,12 +305,11 @@ mega_query_adapter(adapter_t *adapter) adapter->host->sg_tablesize = adapter->sglen; - /* use HP firmware and bios version encoding Note: fw_version[0|1] and bios_version[0|1] were originally shifted right 8 bits making them zero. This 0 value was hardcoded to fix sparse warnings. */ - if (adapter->product_info.subsysvid == HP_SUBSYS_VID) { + if (adapter->product_info.subsysvid == PCI_VENDOR_ID_HP) { sprintf (adapter->fw_version, "%c%d%d.%d%d", adapter->product_info.fw_version[2], 0, @@ -4716,7 +4715,7 @@ megaraid_probe_one(struct pci_dev *pdev, const struct pci_device_id *id) * support, since this firmware cannot handle 64 bit * addressing */ - if ((subsysvid == HP_SUBSYS_VID) && + if ((subsysvid == PCI_VENDOR_ID_HP) && ((subsysid == 0x60E7) || (subsysid == 0x60E8))) { /* * which firmware diff --git a/trunk/drivers/scsi/megaraid.h b/trunk/drivers/scsi/megaraid.h index 9a7897f8ca43..4fb2adf6b80d 100644 --- a/trunk/drivers/scsi/megaraid.h +++ b/trunk/drivers/scsi/megaraid.h @@ -45,45 +45,10 @@ #define MAX_DEV_TYPE 32 -#ifndef PCI_VENDOR_ID_LSI_LOGIC -#define PCI_VENDOR_ID_LSI_LOGIC 0x1000 -#endif - -#ifndef PCI_VENDOR_ID_AMI -#define PCI_VENDOR_ID_AMI 0x101E -#endif - -#ifndef PCI_VENDOR_ID_DELL -#define PCI_VENDOR_ID_DELL 0x1028 -#endif - -#ifndef PCI_VENDOR_ID_INTEL -#define PCI_VENDOR_ID_INTEL 0x8086 -#endif - -#ifndef PCI_DEVICE_ID_AMI_MEGARAID -#define PCI_DEVICE_ID_AMI_MEGARAID 0x9010 -#endif - -#ifndef PCI_DEVICE_ID_AMI_MEGARAID2 -#define PCI_DEVICE_ID_AMI_MEGARAID2 0x9060 -#endif - -#ifndef PCI_DEVICE_ID_AMI_MEGARAID3 -#define PCI_DEVICE_ID_AMI_MEGARAID3 0x1960 -#endif - #define PCI_DEVICE_ID_DISCOVERY 0x000E #define PCI_DEVICE_ID_PERC4_DI 0x000F #define PCI_DEVICE_ID_PERC4_QC_VERDE 0x0407 -/* Sub-System Vendor IDs */ -#define AMI_SUBSYS_VID 0x101E -#define DELL_SUBSYS_VID 0x1028 -#define HP_SUBSYS_VID 0x103C -#define LSI_SUBSYS_VID 0x1000 -#define INTEL_SUBSYS_VID 0x8086 - #define HBA_SIGNATURE 0x3344 #define HBA_SIGNATURE_471 0xCCCC #define HBA_SIGNATURE_64BIT 0x0299 diff --git a/trunk/drivers/scsi/megaraid/megaraid_sas_base.c b/trunk/drivers/scsi/megaraid/megaraid_sas_base.c index dc27598785e5..ed38454228c6 100644 --- a/trunk/drivers/scsi/megaraid/megaraid_sas_base.c +++ b/trunk/drivers/scsi/megaraid/megaraid_sas_base.c @@ -4066,7 +4066,6 @@ megasas_probe_one(struct pci_dev *pdev, const struct pci_device_id *id) spin_lock_init(&instance->cmd_pool_lock); spin_lock_init(&instance->hba_lock); spin_lock_init(&instance->completion_lock); - spin_lock_init(&poll_aen_lock); mutex_init(&instance->aen_mutex); mutex_init(&instance->reset_mutex); @@ -5392,6 +5391,8 @@ static int __init megasas_init(void) printk(KERN_INFO "megasas: %s %s\n", MEGASAS_VERSION, MEGASAS_EXT_VERSION); + spin_lock_init(&poll_aen_lock); + support_poll_for_event = 2; support_device_change = 1; diff --git a/trunk/drivers/scsi/mpt2sas/mpt2sas_base.c b/trunk/drivers/scsi/mpt2sas/mpt2sas_base.c index 9d46fcbe7755..9d5a56c4b332 100644 --- a/trunk/drivers/scsi/mpt2sas/mpt2sas_base.c +++ b/trunk/drivers/scsi/mpt2sas/mpt2sas_base.c @@ -1209,6 +1209,13 @@ _base_check_enable_msix(struct MPT2SAS_ADAPTER *ioc) u16 message_control; + /* Check whether controller SAS2008 B0 controller, + if it is SAS2008 B0 controller use IO-APIC instead of MSIX */ + if (ioc->pdev->device == MPI2_MFGPAGE_DEVID_SAS2008 && + ioc->pdev->revision == 0x01) { + return -EINVAL; + } + base = pci_find_capability(ioc->pdev, PCI_CAP_ID_MSIX); if (!base) { dfailprintk(ioc, printk(MPT2SAS_INFO_FMT "msix not " @@ -2424,10 +2431,13 @@ _base_allocate_memory_pools(struct MPT2SAS_ADAPTER *ioc, int sleep_flag) } /* command line tunables for max controller queue depth */ - if (max_queue_depth != -1) - max_request_credit = (max_queue_depth < facts->RequestCredit) - ? max_queue_depth : facts->RequestCredit; - else + if (max_queue_depth != -1 && max_queue_depth != 0) { + max_request_credit = min_t(u16, max_queue_depth + + ioc->hi_priority_depth + ioc->internal_depth, + facts->RequestCredit); + if (max_request_credit > MAX_HBA_QUEUE_DEPTH) + max_request_credit = MAX_HBA_QUEUE_DEPTH; + } else max_request_credit = min_t(u16, facts->RequestCredit, MAX_HBA_QUEUE_DEPTH); @@ -2502,7 +2512,7 @@ _base_allocate_memory_pools(struct MPT2SAS_ADAPTER *ioc, int sleep_flag) /* set the scsi host can_queue depth * with some internal commands that could be outstanding */ - ioc->shost->can_queue = ioc->scsiio_depth - (2); + ioc->shost->can_queue = ioc->scsiio_depth; dinitprintk(ioc, printk(MPT2SAS_INFO_FMT "scsi host: " "can_queue depth (%d)\n", ioc->name, ioc->shost->can_queue)); diff --git a/trunk/drivers/scsi/mvumi.c b/trunk/drivers/scsi/mvumi.c index 88cf1db21a79..783edc7c6b98 100644 --- a/trunk/drivers/scsi/mvumi.c +++ b/trunk/drivers/scsi/mvumi.c @@ -122,7 +122,7 @@ static struct mvumi_res *mvumi_alloc_mem_resource(struct mvumi_hba *mhba, if (!res) { dev_err(&mhba->pdev->dev, - "Failed to allocate memory for resouce manager.\n"); + "Failed to allocate memory for resource manager.\n"); return NULL; } @@ -1007,13 +1007,13 @@ static int mvumi_handshake(struct mvumi_hba *mhba) tmp |= INT_MAP_COMAOUT | INT_MAP_COMAERR; iowrite32(tmp, regs + CPU_ENPOINTA_MASK_REG); iowrite32(mhba->list_num_io, mhba->ib_shadow); - /* Set InBound List Avaliable count shadow */ + /* Set InBound List Available count shadow */ iowrite32(lower_32_bits(mhba->ib_shadow_phys), regs + CLA_INB_AVAL_COUNT_BASEL); iowrite32(upper_32_bits(mhba->ib_shadow_phys), regs + CLA_INB_AVAL_COUNT_BASEH); - /* Set OutBound List Avaliable count shadow */ + /* Set OutBound List Available count shadow */ iowrite32((mhba->list_num_io-1) | CL_POINTER_TOGGLE, mhba->ob_shadow); iowrite32(lower_32_bits(mhba->ob_shadow_phys), regs + 0x5B0); diff --git a/trunk/drivers/scsi/qla4xxx/ql4_os.c b/trunk/drivers/scsi/qla4xxx/ql4_os.c index 9da426628b97..487e3c8411c9 100644 --- a/trunk/drivers/scsi/qla4xxx/ql4_os.c +++ b/trunk/drivers/scsi/qla4xxx/ql4_os.c @@ -803,7 +803,7 @@ static void qla4xxx_conn_get_stats(struct iscsi_cls_conn *cls_conn, iscsi_stats_dma); if (ret != QLA_SUCCESS) { ql4_printk(KERN_ERR, ha, - "Unable to retreive iscsi stats\n"); + "Unable to retrieve iscsi stats\n"); goto free_stats; } @@ -4338,7 +4338,7 @@ static int qla4xxx_compare_tuple_ddb(struct scsi_qla_host *ha, return QLA_ERROR; /* For multi sessions, driver generates the ISID, so do not compare - * ISID in reset path since it would be a comparision between the + * ISID in reset path since it would be a comparison between the * driver generated ISID and firmware generated ISID. This could * lead to adding duplicated DDBs in the list as driver generated * ISID would not match firmware generated ISID. @@ -5326,7 +5326,7 @@ static void qla4xxx_destroy_fw_ddb_session(struct scsi_qla_host *ha) } } /** - * qla4xxx_remove_adapter - calback function to remove adapter. + * qla4xxx_remove_adapter - callback function to remove adapter. * @pci_dev: PCI device pointer **/ static void __devexit qla4xxx_remove_adapter(struct pci_dev *pdev) diff --git a/trunk/drivers/scsi/scsi_error.c b/trunk/drivers/scsi/scsi_error.c index 4a6381c87253..de2337f255a7 100644 --- a/trunk/drivers/scsi/scsi_error.c +++ b/trunk/drivers/scsi/scsi_error.c @@ -42,6 +42,8 @@ #include +static void scsi_eh_done(struct scsi_cmnd *scmd); + #define SENSE_TIMEOUT (10*HZ) /* @@ -241,6 +243,14 @@ static int scsi_check_sense(struct scsi_cmnd *scmd) if (! scsi_command_normalize_sense(scmd, &sshdr)) return FAILED; /* no valid sense data */ + if (scmd->cmnd[0] == TEST_UNIT_READY && scmd->scsi_done != scsi_eh_done) + /* + * nasty: for mid-layer issued TURs, we need to return the + * actual sense data without any recovery attempt. For eh + * issued ones, we need to try to recover and interpret + */ + return SUCCESS; + if (scsi_sense_is_deferred(&sshdr)) return NEEDS_RETRY; diff --git a/trunk/drivers/scsi/scsi_lib.c b/trunk/drivers/scsi/scsi_lib.c index ffd77739ae3e..faa790fba134 100644 --- a/trunk/drivers/scsi/scsi_lib.c +++ b/trunk/drivers/scsi/scsi_lib.c @@ -776,7 +776,6 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes) } if (req->cmd_type == REQ_TYPE_BLOCK_PC) { /* SG_IO ioctl from block level */ - req->errors = result; if (result) { if (sense_valid && req->sense) { /* @@ -792,6 +791,10 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes) if (!sense_deferred) error = __scsi_error_from_host_byte(cmd, result); } + /* + * __scsi_error_from_host_byte may have reset the host_byte + */ + req->errors = cmd->result; req->resid_len = scsi_get_resid(cmd); diff --git a/trunk/drivers/scsi/scsi_scan.c b/trunk/drivers/scsi/scsi_scan.c index 56a93794c470..d947ffc20ceb 100644 --- a/trunk/drivers/scsi/scsi_scan.c +++ b/trunk/drivers/scsi/scsi_scan.c @@ -764,6 +764,16 @@ static int scsi_add_lun(struct scsi_device *sdev, unsigned char *inq_result, sdev->model = (char *) (sdev->inquiry + 16); sdev->rev = (char *) (sdev->inquiry + 32); + if (strncmp(sdev->vendor, "ATA ", 8) == 0) { + /* + * sata emulation layer device. This is a hack to work around + * the SATL power management specifications which state that + * when the SATL detects the device has gone into standby + * mode, it shall respond with NOT READY. + */ + sdev->allow_restart = 1; + } + if (*bflags & BLIST_ISROM) { sdev->type = TYPE_ROM; sdev->removable = 1; diff --git a/trunk/drivers/scsi/virtio_scsi.c b/trunk/drivers/scsi/virtio_scsi.c index c7030fbee79c..3e79a2f00042 100644 --- a/trunk/drivers/scsi/virtio_scsi.c +++ b/trunk/drivers/scsi/virtio_scsi.c @@ -331,7 +331,7 @@ static void virtscsi_map_sgl(struct scatterlist *sg, unsigned int *p_idx, int i; for_each_sg(table->sgl, sg_elem, table->nents, i) - sg_set_buf(&sg[idx++], sg_virt(sg_elem), sg_elem->length); + sg[idx++] = *sg_elem; *p_idx = idx; } diff --git a/trunk/drivers/scsi/vmw_pvscsi.c b/trunk/drivers/scsi/vmw_pvscsi.c index 4411d4224401..20b3a483c2cc 100644 --- a/trunk/drivers/scsi/vmw_pvscsi.c +++ b/trunk/drivers/scsi/vmw_pvscsi.c @@ -295,7 +295,7 @@ static void ll_adapter_reset(const struct pvscsi_adapter *adapter) static void ll_bus_reset(const struct pvscsi_adapter *adapter) { - dev_dbg(pvscsi_dev(adapter), "Reseting bus on %p\n", adapter); + dev_dbg(pvscsi_dev(adapter), "Resetting bus on %p\n", adapter); pvscsi_write_cmd_desc(adapter, PVSCSI_CMD_RESET_BUS, NULL, 0); } @@ -304,7 +304,7 @@ static void ll_device_reset(const struct pvscsi_adapter *adapter, u32 target) { struct PVSCSICmdDescResetDevice cmd = { 0 }; - dev_dbg(pvscsi_dev(adapter), "Reseting device: target=%u\n", target); + dev_dbg(pvscsi_dev(adapter), "Resetting device: target=%u\n", target); cmd.target = target; diff --git a/trunk/drivers/sh/intc/core.c b/trunk/drivers/sh/intc/core.c index 2374468615ed..8f32a1323a79 100644 --- a/trunk/drivers/sh/intc/core.c +++ b/trunk/drivers/sh/intc/core.c @@ -324,8 +324,16 @@ int __init register_intc_controller(struct intc_desc *desc) res = irq_create_identity_mapping(d->domain, irq); if (unlikely(res)) { - pr_err("can't get irq_desc for %d\n", irq); - continue; + if (res == -EEXIST) { + res = irq_domain_associate(d->domain, irq, irq); + if (unlikely(res)) { + pr_err("domain association failure\n"); + continue; + } + } else { + pr_err("can't identity map IRQ %d\n", irq); + continue; + } } intc_irq_xlate_set(irq, vect->enum_id, d); @@ -345,8 +353,19 @@ int __init register_intc_controller(struct intc_desc *desc) */ res = irq_create_identity_mapping(d->domain, irq2); if (unlikely(res)) { - pr_err("can't get irq_desc for %d\n", irq2); - continue; + if (res == -EEXIST) { + res = irq_domain_associate(d->domain, + irq2, irq2); + if (unlikely(res)) { + pr_err("domain association " + "failure\n"); + continue; + } + } else { + pr_err("can't identity map IRQ %d\n", + irq); + continue; + } } vect2->enum_id = 0; diff --git a/trunk/drivers/sh/pfc/pinctrl.c b/trunk/drivers/sh/pfc/pinctrl.c index 2804eaae804e..0646bf6e7889 100644 --- a/trunk/drivers/sh/pfc/pinctrl.c +++ b/trunk/drivers/sh/pfc/pinctrl.c @@ -208,10 +208,13 @@ static int sh_pfc_gpio_request_enable(struct pinctrl_dev *pctldev, break; case PINMUX_TYPE_GPIO: + case PINMUX_TYPE_INPUT: + case PINMUX_TYPE_OUTPUT: break; default: pr_err("Unsupported mux type (%d), bailing...\n", pinmux_type); - return -ENOTSUPP; + ret = -ENOTSUPP; + goto err; } ret = 0; diff --git a/trunk/drivers/spi/spi-au1550.c b/trunk/drivers/spi/spi-au1550.c index 5784c8799616..4de66d1cfe51 100644 --- a/trunk/drivers/spi/spi-au1550.c +++ b/trunk/drivers/spi/spi-au1550.c @@ -475,7 +475,7 @@ static irqreturn_t au1550_spi_dma_irq_callback(struct au1550_spi *hw) /* * due to an spi error we consider transfer as done, * so mask all events until before next transfer start - * and stop the possibly running dma immediatelly + * and stop the possibly running dma immediately */ au1550_spi_mask_ack_all(hw); au1xxx_dbdma_stop(hw->dma_rx_ch); diff --git a/trunk/drivers/spi/spi-bcm63xx.c b/trunk/drivers/spi/spi-bcm63xx.c index 6e25ef1bce91..a9f4049c6769 100644 --- a/trunk/drivers/spi/spi-bcm63xx.c +++ b/trunk/drivers/spi/spi-bcm63xx.c @@ -47,6 +47,8 @@ struct bcm63xx_spi { /* Platform data */ u32 speed_hz; unsigned fifo_size; + unsigned int msg_type_shift; + unsigned int msg_ctl_width; /* Data buffers */ const unsigned char *tx_ptr; @@ -221,13 +223,20 @@ static unsigned int bcm63xx_txrx_bufs(struct spi_device *spi, msg_ctl = (t->len << SPI_BYTE_CNT_SHIFT); if (t->rx_buf && t->tx_buf) - msg_ctl |= (SPI_FD_RW << SPI_MSG_TYPE_SHIFT); + msg_ctl |= (SPI_FD_RW << bs->msg_type_shift); else if (t->rx_buf) - msg_ctl |= (SPI_HD_R << SPI_MSG_TYPE_SHIFT); + msg_ctl |= (SPI_HD_R << bs->msg_type_shift); else if (t->tx_buf) - msg_ctl |= (SPI_HD_W << SPI_MSG_TYPE_SHIFT); - - bcm_spi_writew(bs, msg_ctl, SPI_MSG_CTL); + msg_ctl |= (SPI_HD_W << bs->msg_type_shift); + + switch (bs->msg_ctl_width) { + case 8: + bcm_spi_writeb(bs, msg_ctl, SPI_MSG_CTL); + break; + case 16: + bcm_spi_writew(bs, msg_ctl, SPI_MSG_CTL); + break; + } /* Issue the transfer */ cmd = SPI_CMD_START_IMMEDIATE; @@ -406,9 +415,21 @@ static int __devinit bcm63xx_spi_probe(struct platform_device *pdev) master->transfer_one_message = bcm63xx_spi_transfer_one; master->mode_bits = MODEBITS; bs->speed_hz = pdata->speed_hz; + bs->msg_type_shift = pdata->msg_type_shift; + bs->msg_ctl_width = pdata->msg_ctl_width; bs->tx_io = (u8 *)(bs->regs + bcm63xx_spireg(SPI_MSG_DATA)); bs->rx_io = (const u8 *)(bs->regs + bcm63xx_spireg(SPI_RX_DATA)); + switch (bs->msg_ctl_width) { + case 8: + case 16: + break; + default: + dev_err(dev, "unsupported MSG_CTL width: %d\n", + bs->msg_ctl_width); + goto out_clk_disable; + } + /* Initialize hardware */ clk_enable(bs->clk); bcm_spi_writeb(bs, SPI_INTR_CLEAR_ALL, SPI_INT_STATUS); @@ -438,7 +459,7 @@ static int __devinit bcm63xx_spi_probe(struct platform_device *pdev) static int __devexit bcm63xx_spi_remove(struct platform_device *pdev) { - struct spi_master *master = platform_get_drvdata(pdev); + struct spi_master *master = spi_master_get(platform_get_drvdata(pdev)); struct bcm63xx_spi *bs = spi_master_get_devdata(master); spi_unregister_master(master); @@ -452,6 +473,8 @@ static int __devexit bcm63xx_spi_remove(struct platform_device *pdev) platform_set_drvdata(pdev, 0); + spi_master_put(master); + return 0; } diff --git a/trunk/drivers/spi/spi-bfin-sport.c b/trunk/drivers/spi/spi-bfin-sport.c index 1fe51198a622..6555ecd07302 100644 --- a/trunk/drivers/spi/spi-bfin-sport.c +++ b/trunk/drivers/spi/spi-bfin-sport.c @@ -467,7 +467,7 @@ bfin_sport_spi_pump_transfers(unsigned long data) dev_dbg(drv_data->dev, "IO write error!\n"); drv_data->state = ERROR_STATE; } else { - /* Update total byte transfered */ + /* Update total byte transferred */ message->actual_length += transfer->len; /* Move to next transfer of this msg */ drv_data->state = bfin_sport_spi_next_transfer(drv_data); diff --git a/trunk/drivers/spi/spi-coldfire-qspi.c b/trunk/drivers/spi/spi-coldfire-qspi.c index b2d4b9e4e010..764bfee75920 100644 --- a/trunk/drivers/spi/spi-coldfire-qspi.c +++ b/trunk/drivers/spi/spi-coldfire-qspi.c @@ -533,7 +533,6 @@ static int __devexit mcfqspi_remove(struct platform_device *pdev) iounmap(mcfqspi->iobase); release_mem_region(res->start, resource_size(res)); spi_unregister_master(master); - spi_master_put(master); return 0; } @@ -541,7 +540,7 @@ static int __devexit mcfqspi_remove(struct platform_device *pdev) #ifdef CONFIG_PM_SLEEP static int mcfqspi_suspend(struct device *dev) { - struct spi_master *master = spi_master_get(dev_get_drvdata(dev)); + struct spi_master *master = dev_get_drvdata(dev); struct mcfqspi *mcfqspi = spi_master_get_devdata(master); spi_master_suspend(master); @@ -553,7 +552,7 @@ static int mcfqspi_suspend(struct device *dev) static int mcfqspi_resume(struct device *dev) { - struct spi_master *master = spi_master_get(dev_get_drvdata(dev)); + struct spi_master *master = dev_get_drvdata(dev); struct mcfqspi *mcfqspi = spi_master_get_devdata(master); spi_master_resume(master); diff --git a/trunk/drivers/spi/spi-oc-tiny.c b/trunk/drivers/spi/spi-oc-tiny.c index 698018fd992b..9d9071b730be 100644 --- a/trunk/drivers/spi/spi-oc-tiny.c +++ b/trunk/drivers/spi/spi-oc-tiny.c @@ -129,7 +129,7 @@ static int tiny_spi_txrx_bufs(struct spi_device *spi, struct spi_transfer *t) unsigned int i; if (hw->irq >= 0) { - /* use intrrupt driven data transfer */ + /* use interrupt driven data transfer */ hw->len = t->len; hw->txp = t->tx_buf; hw->rxp = t->rx_buf; diff --git a/trunk/drivers/spi/spi-omap2-mcspi.c b/trunk/drivers/spi/spi-omap2-mcspi.c index bc4778175e34..b2fb141da375 100644 --- a/trunk/drivers/spi/spi-omap2-mcspi.c +++ b/trunk/drivers/spi/spi-omap2-mcspi.c @@ -1228,18 +1228,16 @@ static int __devinit omap2_mcspi_probe(struct platform_device *pdev) status = spi_register_master(master); if (status < 0) - goto err_spi_register; + goto disable_pm; return status; -err_spi_register: - spi_master_put(master); disable_pm: pm_runtime_disable(&pdev->dev); dma_chnl_free: kfree(mcspi->dma_channels); free_master: - kfree(master); + spi_master_put(master); platform_set_drvdata(pdev, NULL); return status; } diff --git a/trunk/drivers/spi/spi-pl022.c b/trunk/drivers/spi/spi-pl022.c index aab518ec2bbc..6abbe23c39b4 100644 --- a/trunk/drivers/spi/spi-pl022.c +++ b/trunk/drivers/spi/spi-pl022.c @@ -2053,7 +2053,6 @@ pl022_probe(struct amba_device *adev, const struct amba_id *id) printk(KERN_INFO "pl022: mapped registers from 0x%08x to %p\n", adev->res.start, pl022->virtbase); - pm_runtime_enable(dev); pm_runtime_resume(dev); pl022->clk = clk_get(&adev->dev, NULL); diff --git a/trunk/drivers/spi/spi-ppc4xx.c b/trunk/drivers/spi/spi-ppc4xx.c index 75ac9d48ef46..7a85f22b6474 100644 --- a/trunk/drivers/spi/spi-ppc4xx.c +++ b/trunk/drivers/spi/spi-ppc4xx.c @@ -101,7 +101,7 @@ struct spi_ppc4xx_regs { u8 dummy; /* * Clock divisor modulus register - * This uses the follwing formula: + * This uses the following formula: * SCPClkOut = OPBCLK/(4(CDM + 1)) * or * CDM = (OPBCLK/4*SCPClkOut) - 1 @@ -201,7 +201,7 @@ static int spi_ppc4xx_setupxfer(struct spi_device *spi, struct spi_transfer *t) return -EINVAL; } - /* Write new configration */ + /* Write new configuration */ out_8(&hw->regs->mode, cs->mode); /* Set the clock */ diff --git a/trunk/drivers/spi/spi-s3c64xx.c b/trunk/drivers/spi/spi-s3c64xx.c index 646a7657fe62..d1c8441f638c 100644 --- a/trunk/drivers/spi/spi-s3c64xx.c +++ b/trunk/drivers/spi/spi-s3c64xx.c @@ -826,7 +826,7 @@ static struct s3c64xx_spi_csinfo *s3c64xx_get_slave_ctrldata( struct spi_device *spi) { struct s3c64xx_spi_csinfo *cs; - struct device_node *slave_np, *data_np; + struct device_node *slave_np, *data_np = NULL; u32 fb_delay = 0; slave_np = spi->dev.of_node; @@ -1479,40 +1479,40 @@ static const struct dev_pm_ops s3c64xx_spi_pm = { s3c64xx_spi_runtime_resume, NULL) }; -struct s3c64xx_spi_port_config s3c2443_spi_port_config = { +static struct s3c64xx_spi_port_config s3c2443_spi_port_config = { .fifo_lvl_mask = { 0x7f }, .rx_lvl_offset = 13, .tx_st_done = 21, .high_speed = true, }; -struct s3c64xx_spi_port_config s3c6410_spi_port_config = { +static struct s3c64xx_spi_port_config s3c6410_spi_port_config = { .fifo_lvl_mask = { 0x7f, 0x7F }, .rx_lvl_offset = 13, .tx_st_done = 21, }; -struct s3c64xx_spi_port_config s5p64x0_spi_port_config = { +static struct s3c64xx_spi_port_config s5p64x0_spi_port_config = { .fifo_lvl_mask = { 0x1ff, 0x7F }, .rx_lvl_offset = 15, .tx_st_done = 25, }; -struct s3c64xx_spi_port_config s5pc100_spi_port_config = { +static struct s3c64xx_spi_port_config s5pc100_spi_port_config = { .fifo_lvl_mask = { 0x7f, 0x7F }, .rx_lvl_offset = 13, .tx_st_done = 21, .high_speed = true, }; -struct s3c64xx_spi_port_config s5pv210_spi_port_config = { +static struct s3c64xx_spi_port_config s5pv210_spi_port_config = { .fifo_lvl_mask = { 0x1ff, 0x7F }, .rx_lvl_offset = 15, .tx_st_done = 25, .high_speed = true, }; -struct s3c64xx_spi_port_config exynos4_spi_port_config = { +static struct s3c64xx_spi_port_config exynos4_spi_port_config = { .fifo_lvl_mask = { 0x1ff, 0x7F, 0x7F }, .rx_lvl_offset = 15, .tx_st_done = 25, diff --git a/trunk/drivers/spi/spi-topcliff-pch.c b/trunk/drivers/spi/spi-topcliff-pch.c index cd56dcf46320..1284c9b74653 100644 --- a/trunk/drivers/spi/spi-topcliff-pch.c +++ b/trunk/drivers/spi/spi-topcliff-pch.c @@ -505,7 +505,7 @@ static int pch_spi_transfer(struct spi_device *pspi, struct spi_message *pmsg) } if (unlikely(pspi->max_speed_hz == 0)) { - dev_err(&pspi->dev, "%s pch_spi_tranfer maxspeed=%d\n", + dev_err(&pspi->dev, "%s pch_spi_transfer maxspeed=%d\n", __func__, pspi->max_speed_hz); retval = -EINVAL; goto err_out; diff --git a/trunk/drivers/staging/android/android_alarm.h b/trunk/drivers/staging/android/android_alarm.h index d0cafd637199..f2ffd963f1c3 100644 --- a/trunk/drivers/staging/android/android_alarm.h +++ b/trunk/drivers/staging/android/android_alarm.h @@ -51,10 +51,12 @@ enum android_alarm_return_flags { #define ANDROID_ALARM_WAIT _IO('a', 1) #define ALARM_IOW(c, type, size) _IOW('a', (c) | ((type) << 4), size) +#define ALARM_IOR(c, type, size) _IOR('a', (c) | ((type) << 4), size) + /* Set alarm */ #define ANDROID_ALARM_SET(type) ALARM_IOW(2, type, struct timespec) #define ANDROID_ALARM_SET_AND_WAIT(type) ALARM_IOW(3, type, struct timespec) -#define ANDROID_ALARM_GET_TIME(type) ALARM_IOW(4, type, struct timespec) +#define ANDROID_ALARM_GET_TIME(type) ALARM_IOR(4, type, struct timespec) #define ANDROID_ALARM_SET_RTC _IOW('a', 5, struct timespec) #define ANDROID_ALARM_BASE_CMD(cmd) (cmd & ~(_IOC(0, 0, 0xf0, 0))) #define ANDROID_ALARM_IOCTL_TO_TYPE(cmd) (_IOC_NR(cmd) >> 4) diff --git a/trunk/drivers/staging/comedi/drivers.c b/trunk/drivers/staging/comedi/drivers.c index c0fdb00783ed..2359151af7e1 100644 --- a/trunk/drivers/staging/comedi/drivers.c +++ b/trunk/drivers/staging/comedi/drivers.c @@ -168,7 +168,7 @@ int comedi_device_attach(struct comedi_device *dev, struct comedi_devconfig *it) dev->board_ptr = comedi_recognize(driv, it->board_name); if (dev->board_ptr) break; - } else if (strcmp(driv->driver_name, it->board_name)) + } else if (strcmp(driv->driver_name, it->board_name) == 0) break; module_put(driv->module); } diff --git a/trunk/drivers/staging/comedi/drivers/adv_pci1710.c b/trunk/drivers/staging/comedi/drivers/adv_pci1710.c index 31986608eaf1..6b4d0d68e637 100644 --- a/trunk/drivers/staging/comedi/drivers/adv_pci1710.c +++ b/trunk/drivers/staging/comedi/drivers/adv_pci1710.c @@ -1349,9 +1349,6 @@ static struct pci_dev *pci1710_find_pci_dev(struct comedi_device *dev, } if (pcidev->vendor != PCI_VENDOR_ID_ADVANTECH) continue; - if (pci_is_enabled(pcidev)) - continue; - if (strcmp(this_board->name, DRV_NAME) == 0) { for (i = 0; i < ARRAY_SIZE(boardtypes); ++i) { if (pcidev->device == boardtypes[i].device_id) { diff --git a/trunk/drivers/staging/comedi/drivers/adv_pci1723.c b/trunk/drivers/staging/comedi/drivers/adv_pci1723.c index da5ee69d2c9d..dfde0f6328dd 100644 --- a/trunk/drivers/staging/comedi/drivers/adv_pci1723.c +++ b/trunk/drivers/staging/comedi/drivers/adv_pci1723.c @@ -301,8 +301,6 @@ static struct pci_dev *pci1723_find_pci_dev(struct comedi_device *dev, } if (pcidev->vendor != PCI_VENDOR_ID_ADVANTECH) continue; - if (pci_is_enabled(pcidev)) - continue; return pcidev; } dev_err(dev->class_dev, diff --git a/trunk/drivers/staging/comedi/drivers/adv_pci_dio.c b/trunk/drivers/staging/comedi/drivers/adv_pci_dio.c index 97f06dc8e48d..2d4cb7f638b2 100644 --- a/trunk/drivers/staging/comedi/drivers/adv_pci_dio.c +++ b/trunk/drivers/staging/comedi/drivers/adv_pci_dio.c @@ -1064,8 +1064,6 @@ static struct pci_dev *pci_dio_find_pci_dev(struct comedi_device *dev, slot != PCI_SLOT(pcidev->devfn)) continue; } - if (pci_is_enabled(pcidev)) - continue; for (i = 0; i < ARRAY_SIZE(boardtypes); ++i) { if (boardtypes[i].vendor_id != pcidev->vendor) continue; diff --git a/trunk/drivers/staging/comedi/drivers/amplc_dio200.c b/trunk/drivers/staging/comedi/drivers/amplc_dio200.c index 6c81e377262c..cc8931fde839 100644 --- a/trunk/drivers/staging/comedi/drivers/amplc_dio200.c +++ b/trunk/drivers/staging/comedi/drivers/amplc_dio200.c @@ -1412,6 +1412,13 @@ static int __devinit dio200_attach_pci(struct comedi_device *dev, dev_err(dev->class_dev, "BUG! cannot determine board type!\n"); return -EINVAL; } + /* + * Need to 'get' the PCI device to match the 'put' in dio200_detach(). + * TODO: Remove the pci_dev_get() and matching pci_dev_put() once + * support for manual attachment of PCI devices via dio200_attach() + * has been removed. + */ + pci_dev_get(pci_dev); return dio200_pci_common_attach(dev, pci_dev); } diff --git a/trunk/drivers/staging/comedi/drivers/amplc_pc236.c b/trunk/drivers/staging/comedi/drivers/amplc_pc236.c index aabba9886b7d..f50287903038 100644 --- a/trunk/drivers/staging/comedi/drivers/amplc_pc236.c +++ b/trunk/drivers/staging/comedi/drivers/amplc_pc236.c @@ -565,6 +565,13 @@ static int __devinit pc236_attach_pci(struct comedi_device *dev, dev_err(dev->class_dev, "BUG! cannot determine board type!\n"); return -EINVAL; } + /* + * Need to 'get' the PCI device to match the 'put' in pc236_detach(). + * TODO: Remove the pci_dev_get() and matching pci_dev_put() once + * support for manual attachment of PCI devices via pc236_attach() + * has been removed. + */ + pci_dev_get(pci_dev); return pc236_pci_common_attach(dev, pci_dev); } diff --git a/trunk/drivers/staging/comedi/drivers/amplc_pc263.c b/trunk/drivers/staging/comedi/drivers/amplc_pc263.c index 40ec1ffebba6..8191c4e28e0a 100644 --- a/trunk/drivers/staging/comedi/drivers/amplc_pc263.c +++ b/trunk/drivers/staging/comedi/drivers/amplc_pc263.c @@ -298,6 +298,13 @@ static int __devinit pc263_attach_pci(struct comedi_device *dev, dev_err(dev->class_dev, "BUG! cannot determine board type!\n"); return -EINVAL; } + /* + * Need to 'get' the PCI device to match the 'put' in pc263_detach(). + * TODO: Remove the pci_dev_get() and matching pci_dev_put() once + * support for manual attachment of PCI devices via pc263_attach() + * has been removed. + */ + pci_dev_get(pci_dev); return pc263_pci_common_attach(dev, pci_dev); } diff --git a/trunk/drivers/staging/comedi/drivers/amplc_pci224.c b/trunk/drivers/staging/comedi/drivers/amplc_pci224.c index 4e17f13e57f6..8bf109e7bb05 100644 --- a/trunk/drivers/staging/comedi/drivers/amplc_pci224.c +++ b/trunk/drivers/staging/comedi/drivers/amplc_pci224.c @@ -1503,6 +1503,13 @@ pci224_attach_pci(struct comedi_device *dev, struct pci_dev *pci_dev) DRIVER_NAME ": BUG! cannot determine board type!\n"); return -EINVAL; } + /* + * Need to 'get' the PCI device to match the 'put' in pci224_detach(). + * TODO: Remove the pci_dev_get() and matching pci_dev_put() once + * support for manual attachment of PCI devices via pci224_attach() + * has been removed. + */ + pci_dev_get(pci_dev); return pci224_attach_common(dev, pci_dev, NULL); } diff --git a/trunk/drivers/staging/comedi/drivers/amplc_pci230.c b/trunk/drivers/staging/comedi/drivers/amplc_pci230.c index 1b67d0c61fa7..66e74bd12267 100644 --- a/trunk/drivers/staging/comedi/drivers/amplc_pci230.c +++ b/trunk/drivers/staging/comedi/drivers/amplc_pci230.c @@ -2925,6 +2925,13 @@ static int __devinit pci230_attach_pci(struct comedi_device *dev, "amplc_pci230: BUG! cannot determine board type!\n"); return -EINVAL; } + /* + * Need to 'get' the PCI device to match the 'put' in pci230_detach(). + * TODO: Remove the pci_dev_get() and matching pci_dev_put() once + * support for manual attachment of PCI devices via pci230_attach() + * has been removed. + */ + pci_dev_get(pci_dev); return pci230_attach_common(dev, pci_dev); } diff --git a/trunk/drivers/staging/comedi/drivers/daqboard2000.c b/trunk/drivers/staging/comedi/drivers/daqboard2000.c index ef28385c1482..cad559a1a730 100644 --- a/trunk/drivers/staging/comedi/drivers/daqboard2000.c +++ b/trunk/drivers/staging/comedi/drivers/daqboard2000.c @@ -718,7 +718,8 @@ static struct pci_dev *daqboard2000_find_pci_dev(struct comedi_device *dev, continue; } if (pcidev->vendor != PCI_VENDOR_ID_IOTECH || - pcidev->device != 0x0409) + pcidev->device != 0x0409 || + pcidev->subsystem_device != PCI_VENDOR_ID_IOTECH) continue; for (i = 0; i < ARRAY_SIZE(boardtypes); i++) { @@ -739,6 +740,7 @@ static int daqboard2000_attach(struct comedi_device *dev, { struct pci_dev *pcidev; struct comedi_subdevice *s; + resource_size_t pci_base; void *aux_data; unsigned int aux_len; int result; @@ -758,11 +760,12 @@ static int daqboard2000_attach(struct comedi_device *dev, "failed to enable PCI device and request regions\n"); return -EIO; } - dev->iobase = pci_resource_start(pcidev, 2); + dev->iobase = 1; /* the "detach" needs this */ - devpriv->plx = - ioremap(pci_resource_start(pcidev, 0), DAQBOARD2000_PLX_SIZE); - devpriv->daq = ioremap(dev->iobase, DAQBOARD2000_DAQ_SIZE); + pci_base = pci_resource_start(pcidev, 0); + devpriv->plx = ioremap(pci_base, DAQBOARD2000_PLX_SIZE); + pci_base = pci_resource_start(pcidev, 2); + devpriv->daq = ioremap(pci_base, DAQBOARD2000_DAQ_SIZE); if (!devpriv->plx || !devpriv->daq) return -ENOMEM; @@ -799,8 +802,6 @@ static int daqboard2000_attach(struct comedi_device *dev, printk("Interrupt after is: %x\n", interrupt); */ - dev->iobase = (unsigned long)devpriv->daq; - dev->board_name = this_board->name; s = dev->subdevices + 0; @@ -824,7 +825,7 @@ static int daqboard2000_attach(struct comedi_device *dev, s = dev->subdevices + 2; result = subdev_8255_init(dev, s, daqboard2000_8255_cb, - (unsigned long)(dev->iobase + 0x40)); + (unsigned long)(devpriv->daq + 0x40)); out: return result; diff --git a/trunk/drivers/staging/comedi/drivers/das08.c b/trunk/drivers/staging/comedi/drivers/das08.c index 874e02e47668..67a914a10b55 100644 --- a/trunk/drivers/staging/comedi/drivers/das08.c +++ b/trunk/drivers/staging/comedi/drivers/das08.c @@ -378,7 +378,7 @@ das08jr_ao_winsn(struct comedi_device *dev, struct comedi_subdevice *s, int chan; lsb = data[0] & 0xff; - msb = (data[0] >> 8) & 0xf; + msb = (data[0] >> 8) & 0xff; chan = CR_CHAN(insn->chanspec); @@ -623,7 +623,7 @@ static const struct das08_board_struct das08_boards[] = { .ai = das08_ai_rinsn, .ai_nbits = 16, .ai_pg = das08_pg_none, - .ai_encoding = das08_encode12, + .ai_encoding = das08_encode16, .ao = das08jr_ao_winsn, .ao_nbits = 16, .di = das08jr_di_rbits, @@ -922,6 +922,13 @@ das08_attach_pci(struct comedi_device *dev, struct pci_dev *pdev) dev_err(dev->class_dev, "BUG! cannot determine board type!\n"); return -EINVAL; } + /* + * Need to 'get' the PCI device to match the 'put' in das08_detach(). + * TODO: Remove the pci_dev_get() and matching pci_dev_put() once + * support for manual attachment of PCI devices via das08_attach() + * has been removed. + */ + pci_dev_get(pdev); return das08_pci_attach_common(dev, pdev); } diff --git a/trunk/drivers/staging/comedi/drivers/dt3000.c b/trunk/drivers/staging/comedi/drivers/dt3000.c index a6fe6c9be87e..3476cda0fff0 100644 --- a/trunk/drivers/staging/comedi/drivers/dt3000.c +++ b/trunk/drivers/staging/comedi/drivers/dt3000.c @@ -804,6 +804,7 @@ static int dt3000_attach(struct comedi_device *dev, struct comedi_devconfig *it) { struct pci_dev *pcidev; struct comedi_subdevice *s; + resource_size_t pci_base; int ret = 0; dev_dbg(dev->class_dev, "dt3000:\n"); @@ -820,9 +821,10 @@ static int dt3000_attach(struct comedi_device *dev, struct comedi_devconfig *it) ret = comedi_pci_enable(pcidev, "dt3000"); if (ret < 0) return ret; + dev->iobase = 1; /* the "detach" needs this */ - dev->iobase = pci_resource_start(pcidev, 0); - devpriv->io_addr = ioremap(dev->iobase, DT3000_SIZE); + pci_base = pci_resource_start(pcidev, 0); + devpriv->io_addr = ioremap(pci_base, DT3000_SIZE); if (!devpriv->io_addr) return -ENOMEM; diff --git a/trunk/drivers/staging/comedi/drivers/rtd520.c b/trunk/drivers/staging/comedi/drivers/rtd520.c index 112fdc3e9c69..5aa8be1e7b92 100644 --- a/trunk/drivers/staging/comedi/drivers/rtd520.c +++ b/trunk/drivers/staging/comedi/drivers/rtd520.c @@ -1619,9 +1619,8 @@ static int rtd_attach(struct comedi_device *dev, struct comedi_devconfig *it) struct rtdPrivate *devpriv; struct pci_dev *pcidev; struct comedi_subdevice *s; + resource_size_t pci_base; int ret; - resource_size_t physLas1; /* data area */ - resource_size_t physLcfg; /* PLX9080 */ #ifdef USE_DMA int index; #endif @@ -1655,20 +1654,15 @@ static int rtd_attach(struct comedi_device *dev, struct comedi_devconfig *it) printk(KERN_INFO "Failed to enable PCI device and request regions.\n"); return ret; } - - /* - * Initialize base addresses - */ - /* Get the physical address from PCI config */ - dev->iobase = pci_resource_start(pcidev, LAS0_PCIINDEX); - physLas1 = pci_resource_start(pcidev, LAS1_PCIINDEX); - physLcfg = pci_resource_start(pcidev, LCFG_PCIINDEX); - /* Now have the kernel map this into memory */ - /* ASSUME page aligned */ - devpriv->las0 = ioremap_nocache(dev->iobase, LAS0_PCISIZE); - devpriv->las1 = ioremap_nocache(physLas1, LAS1_PCISIZE); - devpriv->lcfg = ioremap_nocache(physLcfg, LCFG_PCISIZE); - + dev->iobase = 1; /* the "detach" needs this */ + + /* Initialize the base addresses */ + pci_base = pci_resource_start(pcidev, LAS0_PCIINDEX); + devpriv->las0 = ioremap_nocache(pci_base, LAS0_PCISIZE); + pci_base = pci_resource_start(pcidev, LAS1_PCIINDEX); + devpriv->las1 = ioremap_nocache(pci_base, LAS1_PCISIZE); + pci_base = pci_resource_start(pcidev, LCFG_PCIINDEX); + devpriv->lcfg = ioremap_nocache(pci_base, LCFG_PCISIZE); if (!devpriv->las0 || !devpriv->las1 || !devpriv->lcfg) return -ENOMEM; diff --git a/trunk/drivers/staging/comedi/drivers/usbdux.c b/trunk/drivers/staging/comedi/drivers/usbdux.c index 848c7ec06976..11ee83681da7 100644 --- a/trunk/drivers/staging/comedi/drivers/usbdux.c +++ b/trunk/drivers/staging/comedi/drivers/usbdux.c @@ -102,6 +102,7 @@ sampling rate. If you sample two channels you get 4kHz and so on. #define BULK_TIMEOUT 1000 /* constants for "firmware" upload and download */ +#define FIRMWARE "usbdux_firmware.bin" #define USBDUXSUB_FIRMWARE 0xA0 #define VENDOR_DIR_IN 0xC0 #define VENDOR_DIR_OUT 0x40 @@ -2791,7 +2792,7 @@ static int usbdux_usb_probe(struct usb_interface *uinterf, ret = request_firmware_nowait(THIS_MODULE, FW_ACTION_HOTPLUG, - "usbdux_firmware.bin", + FIRMWARE, &udev->dev, GFP_KERNEL, usbduxsub + index, @@ -2850,3 +2851,4 @@ module_comedi_usb_driver(usbdux_driver, usbdux_usb_driver); MODULE_AUTHOR("Bernd Porr, BerndPorr@f2s.com"); MODULE_DESCRIPTION("Stirling/ITL USB-DUX -- Bernd.Porr@f2s.com"); MODULE_LICENSE("GPL"); +MODULE_FIRMWARE(FIRMWARE); diff --git a/trunk/drivers/staging/comedi/drivers/usbduxfast.c b/trunk/drivers/staging/comedi/drivers/usbduxfast.c index d9911588c10a..8eb41257c6ce 100644 --- a/trunk/drivers/staging/comedi/drivers/usbduxfast.c +++ b/trunk/drivers/staging/comedi/drivers/usbduxfast.c @@ -57,6 +57,7 @@ /* * constants for "firmware" upload and download */ +#define FIRMWARE "usbduxfast_firmware.bin" #define USBDUXFASTSUB_FIRMWARE 0xA0 #define VENDOR_DIR_IN 0xC0 #define VENDOR_DIR_OUT 0x40 @@ -1706,7 +1707,7 @@ static int usbduxfast_usb_probe(struct usb_interface *uinterf, ret = request_firmware_nowait(THIS_MODULE, FW_ACTION_HOTPLUG, - "usbduxfast_firmware.bin", + FIRMWARE, &udev->dev, GFP_KERNEL, usbduxfastsub + index, @@ -1774,3 +1775,4 @@ module_comedi_usb_driver(usbduxfast_driver, usbduxfast_usb_driver); MODULE_AUTHOR("Bernd Porr, BerndPorr@f2s.com"); MODULE_DESCRIPTION("USB-DUXfast, BerndPorr@f2s.com"); MODULE_LICENSE("GPL"); +MODULE_FIRMWARE(FIRMWARE); diff --git a/trunk/drivers/staging/comedi/drivers/usbduxsigma.c b/trunk/drivers/staging/comedi/drivers/usbduxsigma.c index 543e604791e2..f54ab8c2fcfd 100644 --- a/trunk/drivers/staging/comedi/drivers/usbduxsigma.c +++ b/trunk/drivers/staging/comedi/drivers/usbduxsigma.c @@ -63,6 +63,7 @@ Status: testing #define BULK_TIMEOUT 1000 /* constants for "firmware" upload and download */ +#define FIRMWARE "usbduxsigma_firmware.bin" #define USBDUXSUB_FIRMWARE 0xA0 #define VENDOR_DIR_IN 0xC0 #define VENDOR_DIR_OUT 0x40 @@ -2780,7 +2781,7 @@ static int usbduxsigma_usb_probe(struct usb_interface *uinterf, ret = request_firmware_nowait(THIS_MODULE, FW_ACTION_HOTPLUG, - "usbduxsigma_firmware.bin", + FIRMWARE, &udev->dev, GFP_KERNEL, usbduxsub + index, @@ -2845,3 +2846,4 @@ module_comedi_usb_driver(usbduxsigma_driver, usbduxsigma_usb_driver); MODULE_AUTHOR("Bernd Porr, BerndPorr@f2s.com"); MODULE_DESCRIPTION("Stirling/ITL USB-DUX SIGMA -- Bernd.Porr@f2s.com"); MODULE_LICENSE("GPL"); +MODULE_FIRMWARE(FIRMWARE); diff --git a/trunk/drivers/staging/csr/Kconfig b/trunk/drivers/staging/csr/Kconfig index cee8d48d2af9..ad2a1096e920 100644 --- a/trunk/drivers/staging/csr/Kconfig +++ b/trunk/drivers/staging/csr/Kconfig @@ -1,6 +1,6 @@ config CSR_WIFI tristate "CSR wireless driver" - depends on MMC && CFG80211_WEXT + depends on MMC && CFG80211_WEXT && INET select WIRELESS_EXT select WEXT_PRIV help diff --git a/trunk/drivers/staging/iio/accel/lis3l02dq_ring.c b/trunk/drivers/staging/iio/accel/lis3l02dq_ring.c index 18d108fd967a..f3da59063ed2 100644 --- a/trunk/drivers/staging/iio/accel/lis3l02dq_ring.c +++ b/trunk/drivers/staging/iio/accel/lis3l02dq_ring.c @@ -121,8 +121,10 @@ static int lis3l02dq_get_buffer_element(struct iio_dev *indio_dev, if (rx_array == NULL) return -ENOMEM; ret = lis3l02dq_read_all(indio_dev, rx_array); - if (ret < 0) + if (ret < 0) { + kfree(rx_array); return ret; + } for (i = 0; i < scan_count; i++) data[i] = combine_8_to_16(rx_array[i*4+1], rx_array[i*4+3]); diff --git a/trunk/drivers/staging/iio/adc/ad7192.c b/trunk/drivers/staging/iio/adc/ad7192.c index 22c3923d55eb..19a064d649e3 100644 --- a/trunk/drivers/staging/iio/adc/ad7192.c +++ b/trunk/drivers/staging/iio/adc/ad7192.c @@ -647,6 +647,8 @@ static ssize_t ad7192_write_frequency(struct device *dev, ret = strict_strtoul(buf, 10, &lval); if (ret) return ret; + if (lval == 0) + return -EINVAL; mutex_lock(&indio_dev->mlock); if (iio_buffer_enabled(indio_dev)) { @@ -754,7 +756,7 @@ static ssize_t ad7192_set(struct device *dev, else st->mode &= ~AD7192_MODE_ACX; - ad7192_write_reg(st, AD7192_REG_GPOCON, 3, st->mode); + ad7192_write_reg(st, AD7192_REG_MODE, 3, st->mode); break; default: ret = -EINVAL; @@ -798,6 +800,11 @@ static const struct attribute_group ad7195_attribute_group = { .attrs = ad7195_attributes, }; +static unsigned int ad7192_get_temp_scale(bool unipolar) +{ + return unipolar ? 2815 * 2 : 2815; +} + static int ad7192_read_raw(struct iio_dev *indio_dev, struct iio_chan_spec const *chan, int *val, @@ -824,19 +831,6 @@ static int ad7192_read_raw(struct iio_dev *indio_dev, *val = (smpl >> chan->scan_type.shift) & ((1 << (chan->scan_type.realbits)) - 1); - switch (chan->type) { - case IIO_VOLTAGE: - if (!unipolar) - *val -= (1 << (chan->scan_type.realbits - 1)); - break; - case IIO_TEMP: - *val -= 0x800000; - *val /= 2815; /* temp Kelvin */ - *val -= 273; /* temp Celsius */ - break; - default: - return -EINVAL; - } return IIO_VAL_INT; case IIO_CHAN_INFO_SCALE: @@ -848,11 +842,21 @@ static int ad7192_read_raw(struct iio_dev *indio_dev, mutex_unlock(&indio_dev->mlock); return IIO_VAL_INT_PLUS_NANO; case IIO_TEMP: - *val = 1000; - return IIO_VAL_INT; + *val = 0; + *val2 = 1000000000 / ad7192_get_temp_scale(unipolar); + return IIO_VAL_INT_PLUS_NANO; default: return -EINVAL; } + case IIO_CHAN_INFO_OFFSET: + if (!unipolar) + *val = -(1 << (chan->scan_type.realbits - 1)); + else + *val = 0; + /* Kelvin to Celsius */ + if (chan->type == IIO_TEMP) + *val -= 273 * ad7192_get_temp_scale(unipolar); + return IIO_VAL_INT; } return -EINVAL; @@ -890,7 +894,7 @@ static int ad7192_write_raw(struct iio_dev *indio_dev, } ret = 0; } - + break; default: ret = -EINVAL; } @@ -942,20 +946,22 @@ static const struct iio_info ad7195_info = { .channel = _chan, \ .channel2 = _chan2, \ .info_mask = IIO_CHAN_INFO_RAW_SEPARATE_BIT | \ - IIO_CHAN_INFO_SCALE_SHARED_BIT, \ + IIO_CHAN_INFO_SCALE_SHARED_BIT | \ + IIO_CHAN_INFO_OFFSET_SHARED_BIT, \ .address = _address, \ .scan_index = _si, \ - .scan_type = IIO_ST('s', 24, 32, 0)} + .scan_type = IIO_ST('u', 24, 32, 0)} #define AD7192_CHAN(_chan, _address, _si) \ { .type = IIO_VOLTAGE, \ .indexed = 1, \ .channel = _chan, \ .info_mask = IIO_CHAN_INFO_RAW_SEPARATE_BIT | \ - IIO_CHAN_INFO_SCALE_SHARED_BIT, \ + IIO_CHAN_INFO_SCALE_SHARED_BIT | \ + IIO_CHAN_INFO_OFFSET_SHARED_BIT, \ .address = _address, \ .scan_index = _si, \ - .scan_type = IIO_ST('s', 24, 32, 0)} + .scan_type = IIO_ST('u', 24, 32, 0)} #define AD7192_CHAN_TEMP(_chan, _address, _si) \ { .type = IIO_TEMP, \ @@ -965,7 +971,7 @@ static const struct iio_info ad7195_info = { IIO_CHAN_INFO_SCALE_SEPARATE_BIT, \ .address = _address, \ .scan_index = _si, \ - .scan_type = IIO_ST('s', 24, 32, 0)} + .scan_type = IIO_ST('u', 24, 32, 0)} static struct iio_chan_spec ad7192_channels[] = { AD7192_CHAN_DIFF(1, 2, NULL, AD7192_CH_AIN1P_AIN2M, 0), diff --git a/trunk/drivers/staging/iio/adc/ad7298_ring.c b/trunk/drivers/staging/iio/adc/ad7298_ring.c index fd1d855ff57a..506016f01593 100644 --- a/trunk/drivers/staging/iio/adc/ad7298_ring.c +++ b/trunk/drivers/staging/iio/adc/ad7298_ring.c @@ -76,7 +76,7 @@ static irqreturn_t ad7298_trigger_handler(int irq, void *p) struct iio_dev *indio_dev = pf->indio_dev; struct ad7298_state *st = iio_priv(indio_dev); struct iio_buffer *ring = indio_dev->buffer; - s64 time_ns; + s64 time_ns = 0; __u16 buf[16]; int b_sent, i; diff --git a/trunk/drivers/staging/iio/adc/ad7780.c b/trunk/drivers/staging/iio/adc/ad7780.c index 1ece2ac8de56..19ee49c95de4 100644 --- a/trunk/drivers/staging/iio/adc/ad7780.c +++ b/trunk/drivers/staging/iio/adc/ad7780.c @@ -131,9 +131,10 @@ static const struct ad7780_chip_info ad7780_chip_info_tbl[] = { .indexed = 1, .channel = 0, .info_mask = IIO_CHAN_INFO_RAW_SEPARATE_BIT | - IIO_CHAN_INFO_SCALE_SHARED_BIT, + IIO_CHAN_INFO_SCALE_SHARED_BIT | + IIO_CHAN_INFO_OFFSET_SHARED_BIT, .scan_type = { - .sign = 's', + .sign = 'u', .realbits = 24, .storagebits = 32, .shift = 8, @@ -146,9 +147,10 @@ static const struct ad7780_chip_info ad7780_chip_info_tbl[] = { .indexed = 1, .channel = 0, .info_mask = IIO_CHAN_INFO_RAW_SEPARATE_BIT | - IIO_CHAN_INFO_SCALE_SHARED_BIT, + IIO_CHAN_INFO_SCALE_SHARED_BIT | + IIO_CHAN_INFO_OFFSET_SHARED_BIT, .scan_type = { - .sign = 's', + .sign = 'u', .realbits = 20, .storagebits = 32, .shift = 12, diff --git a/trunk/drivers/staging/iio/adc/ad7793.c b/trunk/drivers/staging/iio/adc/ad7793.c index 76fdd7145fc5..112e2b7b5bc4 100644 --- a/trunk/drivers/staging/iio/adc/ad7793.c +++ b/trunk/drivers/staging/iio/adc/ad7793.c @@ -563,8 +563,9 @@ static ssize_t ad7793_show_scale_available(struct device *dev, return len; } -static IIO_DEVICE_ATTR_NAMED(in_m_in_scale_available, in-in_scale_available, - S_IRUGO, ad7793_show_scale_available, NULL, 0); +static IIO_DEVICE_ATTR_NAMED(in_m_in_scale_available, + in_voltage-voltage_scale_available, S_IRUGO, + ad7793_show_scale_available, NULL, 0); static struct attribute *ad7793_attributes[] = { &iio_dev_attr_sampling_frequency.dev_attr.attr, @@ -604,9 +605,6 @@ static int ad7793_read_raw(struct iio_dev *indio_dev, *val = (smpl >> chan->scan_type.shift) & ((1 << (chan->scan_type.realbits)) - 1); - if (!unipolar) - *val -= (1 << (chan->scan_type.realbits - 1)); - return IIO_VAL_INT; case IIO_CHAN_INFO_SCALE: @@ -620,25 +618,38 @@ static int ad7793_read_raw(struct iio_dev *indio_dev, return IIO_VAL_INT_PLUS_NANO; } else { /* 1170mV / 2^23 * 6 */ - scale_uv = (1170ULL * 100000000ULL * 6ULL) - >> (chan->scan_type.realbits - - (unipolar ? 0 : 1)); + scale_uv = (1170ULL * 100000000ULL * 6ULL); } break; case IIO_TEMP: - /* Always uses unity gain and internal ref */ - scale_uv = (2500ULL * 100000000ULL) - >> (chan->scan_type.realbits - - (unipolar ? 0 : 1)); + /* 1170mV / 0.81 mV/C / 2^23 */ + scale_uv = 1444444444444ULL; break; default: return -EINVAL; } - *val2 = do_div(scale_uv, 100000000) * 10; - *val = scale_uv; - + scale_uv >>= (chan->scan_type.realbits - (unipolar ? 0 : 1)); + *val = 0; + *val2 = scale_uv; return IIO_VAL_INT_PLUS_NANO; + case IIO_CHAN_INFO_OFFSET: + if (!unipolar) + *val = -(1 << (chan->scan_type.realbits - 1)); + else + *val = 0; + + /* Kelvin to Celsius */ + if (chan->type == IIO_TEMP) { + unsigned long long offset; + unsigned int shift; + + shift = chan->scan_type.realbits - (unipolar ? 0 : 1); + offset = 273ULL << shift; + do_div(offset, 1444); + *val -= offset; + } + return IIO_VAL_INT; } return -EINVAL; } @@ -676,7 +687,7 @@ static int ad7793_write_raw(struct iio_dev *indio_dev, } ret = 0; } - + break; default: ret = -EINVAL; } @@ -720,9 +731,10 @@ static const struct ad7793_chip_info ad7793_chip_info_tbl[] = { .channel2 = 0, .address = AD7793_CH_AIN1P_AIN1M, .info_mask = IIO_CHAN_INFO_RAW_SEPARATE_BIT | - IIO_CHAN_INFO_SCALE_SHARED_BIT, + IIO_CHAN_INFO_SCALE_SHARED_BIT | + IIO_CHAN_INFO_OFFSET_SHARED_BIT, .scan_index = 0, - .scan_type = IIO_ST('s', 24, 32, 0) + .scan_type = IIO_ST('u', 24, 32, 0) }, .channel[1] = { .type = IIO_VOLTAGE, @@ -732,9 +744,10 @@ static const struct ad7793_chip_info ad7793_chip_info_tbl[] = { .channel2 = 1, .address = AD7793_CH_AIN2P_AIN2M, .info_mask = IIO_CHAN_INFO_RAW_SEPARATE_BIT | - IIO_CHAN_INFO_SCALE_SHARED_BIT, + IIO_CHAN_INFO_SCALE_SHARED_BIT | + IIO_CHAN_INFO_OFFSET_SHARED_BIT, .scan_index = 1, - .scan_type = IIO_ST('s', 24, 32, 0) + .scan_type = IIO_ST('u', 24, 32, 0) }, .channel[2] = { .type = IIO_VOLTAGE, @@ -744,9 +757,10 @@ static const struct ad7793_chip_info ad7793_chip_info_tbl[] = { .channel2 = 2, .address = AD7793_CH_AIN3P_AIN3M, .info_mask = IIO_CHAN_INFO_RAW_SEPARATE_BIT | - IIO_CHAN_INFO_SCALE_SHARED_BIT, + IIO_CHAN_INFO_SCALE_SHARED_BIT | + IIO_CHAN_INFO_OFFSET_SHARED_BIT, .scan_index = 2, - .scan_type = IIO_ST('s', 24, 32, 0) + .scan_type = IIO_ST('u', 24, 32, 0) }, .channel[3] = { .type = IIO_VOLTAGE, @@ -757,9 +771,10 @@ static const struct ad7793_chip_info ad7793_chip_info_tbl[] = { .channel2 = 2, .address = AD7793_CH_AIN1M_AIN1M, .info_mask = IIO_CHAN_INFO_RAW_SEPARATE_BIT | - IIO_CHAN_INFO_SCALE_SHARED_BIT, + IIO_CHAN_INFO_SCALE_SHARED_BIT | + IIO_CHAN_INFO_OFFSET_SHARED_BIT, .scan_index = 3, - .scan_type = IIO_ST('s', 24, 32, 0) + .scan_type = IIO_ST('u', 24, 32, 0) }, .channel[4] = { .type = IIO_TEMP, @@ -769,7 +784,7 @@ static const struct ad7793_chip_info ad7793_chip_info_tbl[] = { .info_mask = IIO_CHAN_INFO_RAW_SEPARATE_BIT | IIO_CHAN_INFO_SCALE_SEPARATE_BIT, .scan_index = 4, - .scan_type = IIO_ST('s', 24, 32, 0), + .scan_type = IIO_ST('u', 24, 32, 0), }, .channel[5] = { .type = IIO_VOLTAGE, @@ -778,9 +793,10 @@ static const struct ad7793_chip_info ad7793_chip_info_tbl[] = { .channel = 4, .address = AD7793_CH_AVDD_MONITOR, .info_mask = IIO_CHAN_INFO_RAW_SEPARATE_BIT | - IIO_CHAN_INFO_SCALE_SEPARATE_BIT, + IIO_CHAN_INFO_SCALE_SEPARATE_BIT | + IIO_CHAN_INFO_OFFSET_SHARED_BIT, .scan_index = 5, - .scan_type = IIO_ST('s', 24, 32, 0), + .scan_type = IIO_ST('u', 24, 32, 0), }, .channel[6] = IIO_CHAN_SOFT_TIMESTAMP(6), }, @@ -793,9 +809,10 @@ static const struct ad7793_chip_info ad7793_chip_info_tbl[] = { .channel2 = 0, .address = AD7793_CH_AIN1P_AIN1M, .info_mask = IIO_CHAN_INFO_RAW_SEPARATE_BIT | - IIO_CHAN_INFO_SCALE_SHARED_BIT, + IIO_CHAN_INFO_SCALE_SHARED_BIT | + IIO_CHAN_INFO_OFFSET_SHARED_BIT, .scan_index = 0, - .scan_type = IIO_ST('s', 16, 32, 0) + .scan_type = IIO_ST('u', 16, 32, 0) }, .channel[1] = { .type = IIO_VOLTAGE, @@ -805,9 +822,10 @@ static const struct ad7793_chip_info ad7793_chip_info_tbl[] = { .channel2 = 1, .address = AD7793_CH_AIN2P_AIN2M, .info_mask = IIO_CHAN_INFO_RAW_SEPARATE_BIT | - IIO_CHAN_INFO_SCALE_SHARED_BIT, + IIO_CHAN_INFO_SCALE_SHARED_BIT | + IIO_CHAN_INFO_OFFSET_SHARED_BIT, .scan_index = 1, - .scan_type = IIO_ST('s', 16, 32, 0) + .scan_type = IIO_ST('u', 16, 32, 0) }, .channel[2] = { .type = IIO_VOLTAGE, @@ -817,9 +835,10 @@ static const struct ad7793_chip_info ad7793_chip_info_tbl[] = { .channel2 = 2, .address = AD7793_CH_AIN3P_AIN3M, .info_mask = IIO_CHAN_INFO_RAW_SEPARATE_BIT | - IIO_CHAN_INFO_SCALE_SHARED_BIT, + IIO_CHAN_INFO_SCALE_SHARED_BIT | + IIO_CHAN_INFO_OFFSET_SHARED_BIT, .scan_index = 2, - .scan_type = IIO_ST('s', 16, 32, 0) + .scan_type = IIO_ST('u', 16, 32, 0) }, .channel[3] = { .type = IIO_VOLTAGE, @@ -830,9 +849,10 @@ static const struct ad7793_chip_info ad7793_chip_info_tbl[] = { .channel2 = 2, .address = AD7793_CH_AIN1M_AIN1M, .info_mask = IIO_CHAN_INFO_RAW_SEPARATE_BIT | - IIO_CHAN_INFO_SCALE_SHARED_BIT, + IIO_CHAN_INFO_SCALE_SHARED_BIT | + IIO_CHAN_INFO_OFFSET_SHARED_BIT, .scan_index = 3, - .scan_type = IIO_ST('s', 16, 32, 0) + .scan_type = IIO_ST('u', 16, 32, 0) }, .channel[4] = { .type = IIO_TEMP, @@ -842,7 +862,7 @@ static const struct ad7793_chip_info ad7793_chip_info_tbl[] = { .info_mask = IIO_CHAN_INFO_RAW_SEPARATE_BIT | IIO_CHAN_INFO_SCALE_SEPARATE_BIT, .scan_index = 4, - .scan_type = IIO_ST('s', 16, 32, 0), + .scan_type = IIO_ST('u', 16, 32, 0), }, .channel[5] = { .type = IIO_VOLTAGE, @@ -851,9 +871,10 @@ static const struct ad7793_chip_info ad7793_chip_info_tbl[] = { .channel = 4, .address = AD7793_CH_AVDD_MONITOR, .info_mask = IIO_CHAN_INFO_RAW_SEPARATE_BIT | - IIO_CHAN_INFO_SCALE_SEPARATE_BIT, + IIO_CHAN_INFO_SCALE_SEPARATE_BIT | + IIO_CHAN_INFO_OFFSET_SHARED_BIT, .scan_index = 5, - .scan_type = IIO_ST('s', 16, 32, 0), + .scan_type = IIO_ST('u', 16, 32, 0), }, .channel[6] = IIO_CHAN_SOFT_TIMESTAMP(6), }, @@ -901,7 +922,7 @@ static int __devinit ad7793_probe(struct spi_device *spi) else if (voltage_uv) st->int_vref_mv = voltage_uv / 1000; else - st->int_vref_mv = 2500; /* Build-in ref */ + st->int_vref_mv = 1170; /* Build-in ref */ spi_set_drvdata(spi, indio_dev); st->spi = spi; diff --git a/trunk/drivers/staging/iio/gyro/adis16260_core.c b/trunk/drivers/staging/iio/gyro/adis16260_core.c index 93aa431287ac..eb8e9d69efd3 100644 --- a/trunk/drivers/staging/iio/gyro/adis16260_core.c +++ b/trunk/drivers/staging/iio/gyro/adis16260_core.c @@ -195,6 +195,8 @@ static ssize_t adis16260_write_frequency(struct device *dev, ret = strict_strtol(buf, 10, &val); if (ret) return ret; + if (val == 0) + return -EINVAL; mutex_lock(&indio_dev->mlock); if (spi_get_device_id(st->us)) { diff --git a/trunk/drivers/staging/iio/imu/adis16400_core.c b/trunk/drivers/staging/iio/imu/adis16400_core.c index 1f4c17779b5a..a618327e06ed 100644 --- a/trunk/drivers/staging/iio/imu/adis16400_core.c +++ b/trunk/drivers/staging/iio/imu/adis16400_core.c @@ -234,6 +234,8 @@ static ssize_t adis16400_write_frequency(struct device *dev, ret = strict_strtol(buf, 10, &val); if (ret) return ret; + if (val == 0) + return -EINVAL; mutex_lock(&indio_dev->mlock); diff --git a/trunk/drivers/staging/iio/meter/ade7753.c b/trunk/drivers/staging/iio/meter/ade7753.c index f04ece7fbc2f..3ccff189f258 100644 --- a/trunk/drivers/staging/iio/meter/ade7753.c +++ b/trunk/drivers/staging/iio/meter/ade7753.c @@ -425,6 +425,8 @@ static ssize_t ade7753_write_frequency(struct device *dev, ret = strict_strtol(buf, 10, &val); if (ret) return ret; + if (val == 0) + return -EINVAL; mutex_lock(&indio_dev->mlock); diff --git a/trunk/drivers/staging/iio/meter/ade7754.c b/trunk/drivers/staging/iio/meter/ade7754.c index 6cee28a5e877..abb1e9c8d094 100644 --- a/trunk/drivers/staging/iio/meter/ade7754.c +++ b/trunk/drivers/staging/iio/meter/ade7754.c @@ -445,6 +445,8 @@ static ssize_t ade7754_write_frequency(struct device *dev, ret = strict_strtol(buf, 10, &val); if (ret) return ret; + if (val == 0) + return -EINVAL; mutex_lock(&indio_dev->mlock); diff --git a/trunk/drivers/staging/iio/meter/ade7759.c b/trunk/drivers/staging/iio/meter/ade7759.c index b3f7e0fa9612..eb0a2a98f388 100644 --- a/trunk/drivers/staging/iio/meter/ade7759.c +++ b/trunk/drivers/staging/iio/meter/ade7759.c @@ -385,6 +385,8 @@ static ssize_t ade7759_write_frequency(struct device *dev, ret = strict_strtol(buf, 10, &val); if (ret) return ret; + if (val == 0) + return -EINVAL; mutex_lock(&indio_dev->mlock); diff --git a/trunk/drivers/staging/nvec/nvec.c b/trunk/drivers/staging/nvec/nvec.c index 695ea35f75b0..d0a7e408efe9 100644 --- a/trunk/drivers/staging/nvec/nvec.c +++ b/trunk/drivers/staging/nvec/nvec.c @@ -837,7 +837,7 @@ static int __devinit tegra_nvec_probe(struct platform_device *pdev) } ret = mfd_add_devices(nvec->dev, -1, nvec_devices, - ARRAY_SIZE(nvec_devices), base, 0); + ARRAY_SIZE(nvec_devices), base, 0, NULL); if (ret) dev_err(nvec->dev, "error adding subdevices\n"); diff --git a/trunk/drivers/staging/omapdrm/omap_connector.c b/trunk/drivers/staging/omapdrm/omap_connector.c index 5e2856c0e0bb..55e9c8655850 100644 --- a/trunk/drivers/staging/omapdrm/omap_connector.c +++ b/trunk/drivers/staging/omapdrm/omap_connector.c @@ -48,13 +48,20 @@ static inline void copy_timings_omap_to_drm(struct drm_display_mode *mode, mode->vsync_end = mode->vsync_start + timings->vsw; mode->vtotal = mode->vsync_end + timings->vbp; - /* note: whether or not it is interlaced, +/- h/vsync, etc, - * which should be set in the mode flags, is not exposed in - * the omap_video_timings struct.. but hdmi driver tracks - * those separately so all we have to have to set the mode - * is the way to recover these timings values, and the - * omap_dss_driver would do the rest. - */ + mode->flags = 0; + + if (timings->interlace) + mode->flags |= DRM_MODE_FLAG_INTERLACE; + + if (timings->hsync_level == OMAPDSS_SIG_ACTIVE_HIGH) + mode->flags |= DRM_MODE_FLAG_PHSYNC; + else + mode->flags |= DRM_MODE_FLAG_NHSYNC; + + if (timings->vsync_level == OMAPDSS_SIG_ACTIVE_HIGH) + mode->flags |= DRM_MODE_FLAG_PVSYNC; + else + mode->flags |= DRM_MODE_FLAG_NVSYNC; } static inline void copy_timings_drm_to_omap(struct omap_video_timings *timings, @@ -71,6 +78,22 @@ static inline void copy_timings_drm_to_omap(struct omap_video_timings *timings, timings->vfp = mode->vsync_start - mode->vdisplay; timings->vsw = mode->vsync_end - mode->vsync_start; timings->vbp = mode->vtotal - mode->vsync_end; + + timings->interlace = !!(mode->flags & DRM_MODE_FLAG_INTERLACE); + + if (mode->flags & DRM_MODE_FLAG_PHSYNC) + timings->hsync_level = OMAPDSS_SIG_ACTIVE_HIGH; + else + timings->hsync_level = OMAPDSS_SIG_ACTIVE_LOW; + + if (mode->flags & DRM_MODE_FLAG_PVSYNC) + timings->vsync_level = OMAPDSS_SIG_ACTIVE_HIGH; + else + timings->vsync_level = OMAPDSS_SIG_ACTIVE_LOW; + + timings->data_pclk_edge = OMAPDSS_DRIVE_SIG_RISING_EDGE; + timings->de_level = OMAPDSS_SIG_ACTIVE_HIGH; + timings->sync_pclk_edge = OMAPDSS_DRIVE_SIG_OPPOSITE_EDGES; } static void omap_connector_dpms(struct drm_connector *connector, int mode) @@ -187,7 +210,7 @@ static int omap_connector_get_modes(struct drm_connector *connector) } } else { struct drm_display_mode *mode = drm_mode_create(dev); - struct omap_video_timings timings; + struct omap_video_timings timings = {0}; dssdrv->get_timings(dssdev, &timings); @@ -291,7 +314,7 @@ void omap_connector_mode_set(struct drm_connector *connector, struct omap_connector *omap_connector = to_omap_connector(connector); struct omap_dss_device *dssdev = omap_connector->dssdev; struct omap_dss_driver *dssdrv = dssdev->driver; - struct omap_video_timings timings; + struct omap_video_timings timings = {0}; copy_timings_drm_to_omap(&timings, mode); diff --git a/trunk/drivers/staging/ozwpan/ozcdev.c b/trunk/drivers/staging/ozwpan/ozcdev.c index d98321945802..758ce0a8d82e 100644 --- a/trunk/drivers/staging/ozwpan/ozcdev.c +++ b/trunk/drivers/staging/ozwpan/ozcdev.c @@ -8,6 +8,7 @@ #include #include #include +#include #include #include #include "ozconfig.h" @@ -213,7 +214,7 @@ static int oz_set_active_pd(u8 *addr) if (old_pd) oz_pd_put(old_pd); } else { - if (!memcmp(addr, "\0\0\0\0\0\0", sizeof(addr))) { + if (is_zero_ether_addr(addr)) { spin_lock_bh(&g_cdev.lock); pd = g_cdev.active_pd; g_cdev.active_pd = 0; diff --git a/trunk/drivers/staging/rtl8712/recv_linux.c b/trunk/drivers/staging/rtl8712/recv_linux.c index 0e26d5f6cf2d..495ee1205e02 100644 --- a/trunk/drivers/staging/rtl8712/recv_linux.c +++ b/trunk/drivers/staging/rtl8712/recv_linux.c @@ -117,13 +117,8 @@ void r8712_recv_indicatepkt(struct _adapter *padapter, if (skb == NULL) goto _recv_indicatepkt_drop; skb->data = precv_frame->u.hdr.rx_data; -#ifdef NET_SKBUFF_DATA_USES_OFFSET - skb->tail = (sk_buff_data_t)(precv_frame->u.hdr.rx_tail - - precv_frame->u.hdr.rx_head); -#else - skb->tail = (sk_buff_data_t)precv_frame->u.hdr.rx_tail; -#endif skb->len = precv_frame->u.hdr.len; + skb_set_tail_pointer(skb, skb->len); if ((pattrib->tcpchk_valid == 1) && (pattrib->tcp_chkrpt == 1)) skb->ip_summed = CHECKSUM_UNNECESSARY; else diff --git a/trunk/drivers/staging/vt6656/dpc.c b/trunk/drivers/staging/vt6656/dpc.c index e4bdf2a2b582..3aa895ec6507 100644 --- a/trunk/drivers/staging/vt6656/dpc.c +++ b/trunk/drivers/staging/vt6656/dpc.c @@ -200,7 +200,7 @@ s_vProcessRxMACHeader ( } else if (!compare_ether_addr(pbyRxBuffer, &pDevice->abySNAP_RFC1042[0])) { cbHeaderSize += 6; pwType = (PWORD) (pbyRxBufferAddr + cbHeaderSize); - if ((*pwType == cpu_to_le16(ETH_P_IPX)) || + if ((*pwType == cpu_to_be16(ETH_P_IPX)) || (*pwType == cpu_to_le16(0xF380))) { cbHeaderSize -= 8; pwType = (PWORD) (pbyRxBufferAddr + cbHeaderSize); diff --git a/trunk/drivers/staging/vt6656/main_usb.c b/trunk/drivers/staging/vt6656/main_usb.c index b06fd5b723fa..d536756549e6 100644 --- a/trunk/drivers/staging/vt6656/main_usb.c +++ b/trunk/drivers/staging/vt6656/main_usb.c @@ -189,7 +189,7 @@ DEVICE_PARAM(b80211hEnable, "802.11h mode"); // Static vars definitions // -static struct usb_device_id vt6656_table[] __devinitdata = { +static struct usb_device_id vt6656_table[] = { {USB_DEVICE(VNT_USB_VENDOR_ID, VNT_USB_PRODUCT_ID)}, {} }; diff --git a/trunk/drivers/staging/vt6656/rxtx.c b/trunk/drivers/staging/vt6656/rxtx.c index bb464527fc1b..b6e04e7b629b 100644 --- a/trunk/drivers/staging/vt6656/rxtx.c +++ b/trunk/drivers/staging/vt6656/rxtx.c @@ -1699,7 +1699,7 @@ s_bPacketToWirelessUsb( // 802.1H if (ntohs(psEthHeader->wType) > ETH_DATA_LEN) { if (pDevice->dwDiagRefCount == 0) { - if ((psEthHeader->wType == cpu_to_le16(ETH_P_IPX)) || + if ((psEthHeader->wType == cpu_to_be16(ETH_P_IPX)) || (psEthHeader->wType == cpu_to_le16(0xF380))) { memcpy((PBYTE) (pbyPayloadHead), abySNAP_Bridgetunnel, 6); @@ -2838,10 +2838,10 @@ int nsDMA_tx_packet(PSDevice pDevice, unsigned int uDMAIdx, struct sk_buff *skb) Packet_Type = skb->data[ETH_HLEN+1]; Descriptor_type = skb->data[ETH_HLEN+1+1+2]; Key_info = (skb->data[ETH_HLEN+1+1+2+1] << 8)|(skb->data[ETH_HLEN+1+1+2+2]); - if (pDevice->sTxEthHeader.wType == cpu_to_le16(ETH_P_PAE)) { - /* 802.1x OR eapol-key challenge frame transfer */ - if (((Protocol_Version == 1) || (Protocol_Version == 2)) && - (Packet_Type == 3)) { + if (pDevice->sTxEthHeader.wType == cpu_to_be16(ETH_P_PAE)) { + /* 802.1x OR eapol-key challenge frame transfer */ + if (((Protocol_Version == 1) || (Protocol_Version == 2)) && + (Packet_Type == 3)) { bTxeapol_key = TRUE; if(!(Key_info & BIT3) && //WPA or RSN group-key challenge (Key_info & BIT8) && (Key_info & BIT9)) { //send 2/2 key @@ -2987,19 +2987,19 @@ int nsDMA_tx_packet(PSDevice pDevice, unsigned int uDMAIdx, struct sk_buff *skb) } } - if (pDevice->sTxEthHeader.wType == cpu_to_le16(ETH_P_PAE)) { - if (pDevice->byBBType != BB_TYPE_11A) { - pDevice->wCurrentRate = RATE_1M; - pDevice->byACKRate = RATE_1M; - pDevice->byTopCCKBasicRate = RATE_1M; - pDevice->byTopOFDMBasicRate = RATE_6M; - } else { - pDevice->wCurrentRate = RATE_6M; - pDevice->byACKRate = RATE_6M; - pDevice->byTopCCKBasicRate = RATE_1M; - pDevice->byTopOFDMBasicRate = RATE_6M; - } - } + if (pDevice->sTxEthHeader.wType == cpu_to_be16(ETH_P_PAE)) { + if (pDevice->byBBType != BB_TYPE_11A) { + pDevice->wCurrentRate = RATE_1M; + pDevice->byACKRate = RATE_1M; + pDevice->byTopCCKBasicRate = RATE_1M; + pDevice->byTopOFDMBasicRate = RATE_6M; + } else { + pDevice->wCurrentRate = RATE_6M; + pDevice->byACKRate = RATE_6M; + pDevice->byTopCCKBasicRate = RATE_1M; + pDevice->byTopOFDMBasicRate = RATE_6M; + } + } DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "dma_tx: pDevice->wCurrentRate = %d\n", @@ -3015,7 +3015,7 @@ int nsDMA_tx_packet(PSDevice pDevice, unsigned int uDMAIdx, struct sk_buff *skb) if (bNeedEncryption == TRUE) { DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"ntohs Pkt Type=%04x\n", ntohs(pDevice->sTxEthHeader.wType)); - if ((pDevice->sTxEthHeader.wType) == cpu_to_le16(ETH_P_PAE)) { + if ((pDevice->sTxEthHeader.wType) == cpu_to_be16(ETH_P_PAE)) { bNeedEncryption = FALSE; DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"Pkt Type=%04x\n", (pDevice->sTxEthHeader.wType)); if ((pMgmt->eCurrMode == WMAC_MODE_ESS_STA) && (pMgmt->eCurrState == WMAC_STATE_ASSOC)) { diff --git a/trunk/drivers/staging/winbond/wbusb.c b/trunk/drivers/staging/winbond/wbusb.c index ef360547ecec..0ca857ac473e 100644 --- a/trunk/drivers/staging/winbond/wbusb.c +++ b/trunk/drivers/staging/winbond/wbusb.c @@ -25,7 +25,7 @@ MODULE_DESCRIPTION("IS89C35 802.11bg WLAN USB Driver"); MODULE_LICENSE("GPL"); MODULE_VERSION("0.1"); -static const struct usb_device_id wb35_table[] __devinitconst = { +static const struct usb_device_id wb35_table[] = { { USB_DEVICE(0x0416, 0x0035) }, { USB_DEVICE(0x18E8, 0x6201) }, { USB_DEVICE(0x18E8, 0x6206) }, diff --git a/trunk/drivers/staging/wlan-ng/cfg80211.c b/trunk/drivers/staging/wlan-ng/cfg80211.c index fabff4d650ef..0970127344e6 100644 --- a/trunk/drivers/staging/wlan-ng/cfg80211.c +++ b/trunk/drivers/staging/wlan-ng/cfg80211.c @@ -327,9 +327,9 @@ int prism2_get_station(struct wiphy *wiphy, struct net_device *dev, return result; } -int prism2_scan(struct wiphy *wiphy, struct net_device *dev, - struct cfg80211_scan_request *request) +int prism2_scan(struct wiphy *wiphy, struct cfg80211_scan_request *request) { + struct net_device *dev = request->wdev->netdev; struct prism2_wiphy_private *priv = wiphy_priv(wiphy); wlandevice_t *wlandev = dev->ml_priv; struct p80211msg_dot11req_scan msg1; diff --git a/trunk/drivers/staging/zcache/zcache-main.c b/trunk/drivers/staging/zcache/zcache-main.c index c214977b4ab4..52b43b7b83d7 100644 --- a/trunk/drivers/staging/zcache/zcache-main.c +++ b/trunk/drivers/staging/zcache/zcache-main.c @@ -1251,13 +1251,12 @@ static int zcache_pampd_get_data_and_free(char *data, size_t *bufsize, bool raw, void *pampd, struct tmem_pool *pool, struct tmem_oid *oid, uint32_t index) { - int ret = 0; - BUG_ON(!is_ephemeral(pool)); - zbud_decompress((struct page *)(data), pampd); + if (zbud_decompress((struct page *)(data), pampd) < 0) + return -EINVAL; zbud_free_and_delist((struct zbud_hdr *)pampd); atomic_dec(&zcache_curr_eph_pampd_count); - return ret; + return 0; } /* diff --git a/trunk/drivers/target/iscsi/iscsi_target_login.c b/trunk/drivers/target/iscsi/iscsi_target_login.c index 0694d9b1bce6..6aba4395e8d8 100644 --- a/trunk/drivers/target/iscsi/iscsi_target_login.c +++ b/trunk/drivers/target/iscsi/iscsi_target_login.c @@ -221,6 +221,7 @@ static int iscsi_login_zero_tsih_s1( { struct iscsi_session *sess = NULL; struct iscsi_login_req *pdu = (struct iscsi_login_req *)buf; + int ret; sess = kzalloc(sizeof(struct iscsi_session), GFP_KERNEL); if (!sess) { @@ -257,9 +258,17 @@ static int iscsi_login_zero_tsih_s1( return -ENOMEM; } spin_lock(&sess_idr_lock); - idr_get_new(&sess_idr, NULL, &sess->session_index); + ret = idr_get_new(&sess_idr, NULL, &sess->session_index); spin_unlock(&sess_idr_lock); + if (ret < 0) { + pr_err("idr_get_new() for sess_idr failed\n"); + iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR, + ISCSI_LOGIN_STATUS_NO_RESOURCES); + kfree(sess); + return -ENOMEM; + } + sess->creation_time = get_jiffies_64(); spin_lock_init(&sess->session_stats_lock); /* diff --git a/trunk/drivers/target/iscsi/iscsi_target_parameters.c b/trunk/drivers/target/iscsi/iscsi_target_parameters.c index 0c4760fabfc0..240f7aa76ed1 100644 --- a/trunk/drivers/target/iscsi/iscsi_target_parameters.c +++ b/trunk/drivers/target/iscsi/iscsi_target_parameters.c @@ -662,7 +662,7 @@ int iscsi_extract_key_value(char *textbuf, char **key, char **value) { *value = strchr(textbuf, '='); if (!*value) { - pr_err("Unable to locate \"=\" seperator for key," + pr_err("Unable to locate \"=\" separator for key," " ignoring request.\n"); return -1; } @@ -1269,7 +1269,7 @@ static int iscsi_check_value(struct iscsi_param *param, char *value) comma_ptr = strchr(value, ','); if (comma_ptr && !IS_TYPE_VALUE_LIST(param)) { - pr_err("Detected value seperator \",\", but" + pr_err("Detected value separator \",\", but" " key \"%s\" does not allow a value list," " protocol error.\n", param->name); return -1; diff --git a/trunk/drivers/target/target_core_alua.c b/trunk/drivers/target/target_core_alua.c index 91799973081a..41641ba54828 100644 --- a/trunk/drivers/target/target_core_alua.c +++ b/trunk/drivers/target/target_core_alua.c @@ -218,6 +218,13 @@ int target_emulate_set_target_port_groups(struct se_cmd *cmd) cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; return -EINVAL; } + if (cmd->data_length < 4) { + pr_warn("SET TARGET PORT GROUPS parameter list length %u too" + " small\n", cmd->data_length); + cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST; + return -EINVAL; + } + buf = transport_kmap_data_sg(cmd); /* diff --git a/trunk/drivers/target/target_core_device.c b/trunk/drivers/target/target_core_device.c index cf2c66f3c116..9fc9a6006ca0 100644 --- a/trunk/drivers/target/target_core_device.c +++ b/trunk/drivers/target/target_core_device.c @@ -669,6 +669,13 @@ int target_report_luns(struct se_cmd *se_cmd) unsigned char *buf; u32 lun_count = 0, offset = 8, i; + if (se_cmd->data_length < 16) { + pr_warn("REPORT LUNS allocation length %u too small\n", + se_cmd->data_length); + se_cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD; + return -EINVAL; + } + buf = transport_kmap_data_sg(se_cmd); if (!buf) return -ENOMEM; diff --git a/trunk/drivers/target/target_core_iblock.c b/trunk/drivers/target/target_core_iblock.c index 76db75e836ed..9ba495477fd2 100644 --- a/trunk/drivers/target/target_core_iblock.c +++ b/trunk/drivers/target/target_core_iblock.c @@ -325,17 +325,30 @@ static int iblock_execute_unmap(struct se_cmd *cmd) struct iblock_dev *ibd = dev->dev_ptr; unsigned char *buf, *ptr = NULL; sector_t lba; - int size = cmd->data_length; + int size; u32 range; int ret = 0; int dl, bd_dl; + if (cmd->data_length < 8) { + pr_warn("UNMAP parameter list length %u too small\n", + cmd->data_length); + cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST; + return -EINVAL; + } + buf = transport_kmap_data_sg(cmd); dl = get_unaligned_be16(&buf[0]); bd_dl = get_unaligned_be16(&buf[2]); - size = min(size - 8, bd_dl); + size = cmd->data_length - 8; + if (bd_dl > size) + pr_warn("UNMAP parameter list length %u too small, ignoring bd_dl %u\n", + cmd->data_length, bd_dl); + else + size = bd_dl; + if (size / 16 > dev->se_sub_dev->se_dev_attrib.max_unmap_block_desc_count) { cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST; ret = -EINVAL; diff --git a/trunk/drivers/target/target_core_pr.c b/trunk/drivers/target/target_core_pr.c index 1e946502c378..956c84c6b666 100644 --- a/trunk/drivers/target/target_core_pr.c +++ b/trunk/drivers/target/target_core_pr.c @@ -1540,6 +1540,14 @@ static int core_scsi3_decode_spec_i_port( tidh_new->dest_local_nexus = 1; list_add_tail(&tidh_new->dest_list, &tid_dest_list); + if (cmd->data_length < 28) { + pr_warn("SPC-PR: Received PR OUT parameter list" + " length too small: %u\n", cmd->data_length); + cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST; + ret = -EINVAL; + goto out; + } + buf = transport_kmap_data_sg(cmd); /* * For a PERSISTENT RESERVE OUT specify initiator ports payload, diff --git a/trunk/drivers/target/target_core_pscsi.c b/trunk/drivers/target/target_core_pscsi.c index 6e32ff6f2fa0..9d7ce3daa262 100644 --- a/trunk/drivers/target/target_core_pscsi.c +++ b/trunk/drivers/target/target_core_pscsi.c @@ -667,24 +667,32 @@ static void pscsi_free_device(void *p) kfree(pdv); } -static int pscsi_transport_complete(struct se_cmd *cmd, struct scatterlist *sg) +static void pscsi_transport_complete(struct se_cmd *cmd, struct scatterlist *sg, + unsigned char *sense_buffer) { struct pscsi_dev_virt *pdv = cmd->se_dev->dev_ptr; struct scsi_device *sd = pdv->pdv_sd; int result; struct pscsi_plugin_task *pt = cmd->priv; - unsigned char *cdb = &pt->pscsi_cdb[0]; + unsigned char *cdb; + /* + * Special case for REPORT_LUNs handling where pscsi_plugin_task has + * not been allocated because TCM is handling the emulation directly. + */ + if (!pt) + return; + cdb = &pt->pscsi_cdb[0]; result = pt->pscsi_result; /* * Hack to make sure that Write-Protect modepage is set if R/O mode is * forced. */ + if (!cmd->se_deve || !cmd->data_length) + goto after_mode_sense; + if (((cdb[0] == MODE_SENSE) || (cdb[0] == MODE_SENSE_10)) && (status_byte(result) << 1) == SAM_STAT_GOOD) { - if (!cmd->se_deve) - goto after_mode_sense; - if (cmd->se_deve->lun_flags & TRANSPORT_LUNFLAGS_READ_ONLY) { unsigned char *buf = transport_kmap_data_sg(cmd); @@ -701,7 +709,7 @@ static int pscsi_transport_complete(struct se_cmd *cmd, struct scatterlist *sg) } after_mode_sense: - if (sd->type != TYPE_TAPE) + if (sd->type != TYPE_TAPE || !cmd->data_length) goto after_mode_select; /* @@ -743,10 +751,10 @@ static int pscsi_transport_complete(struct se_cmd *cmd, struct scatterlist *sg) } after_mode_select: - if (status_byte(result) & CHECK_CONDITION) - return 1; - - return 0; + if (sense_buffer && (status_byte(result) & CHECK_CONDITION)) { + memcpy(sense_buffer, pt->pscsi_sense, TRANSPORT_SENSE_BUFFER); + cmd->se_cmd_flags |= SCF_TRANSPORT_TASK_SENSE; + } } enum { @@ -1177,13 +1185,6 @@ static int pscsi_execute_cmd(struct se_cmd *cmd) return -ENOMEM; } -static unsigned char *pscsi_get_sense_buffer(struct se_cmd *cmd) -{ - struct pscsi_plugin_task *pt = cmd->priv; - - return pt->pscsi_sense; -} - /* pscsi_get_device_rev(): * * @@ -1266,7 +1267,6 @@ static struct se_subsystem_api pscsi_template = { .check_configfs_dev_params = pscsi_check_configfs_dev_params, .set_configfs_dev_params = pscsi_set_configfs_dev_params, .show_configfs_dev_params = pscsi_show_configfs_dev_params, - .get_sense_buffer = pscsi_get_sense_buffer, .get_device_rev = pscsi_get_device_rev, .get_device_type = pscsi_get_device_type, .get_blocks = pscsi_get_blocks, diff --git a/trunk/drivers/target/target_core_spc.c b/trunk/drivers/target/target_core_spc.c index 4c861de538c9..388a922c8f6d 100644 --- a/trunk/drivers/target/target_core_spc.c +++ b/trunk/drivers/target/target_core_spc.c @@ -877,9 +877,11 @@ static int spc_emulate_modesense(struct se_cmd *cmd) static int spc_emulate_request_sense(struct se_cmd *cmd) { unsigned char *cdb = cmd->t_task_cdb; - unsigned char *buf; + unsigned char *rbuf; u8 ua_asc = 0, ua_ascq = 0; - int err = 0; + unsigned char buf[SE_SENSE_BUF]; + + memset(buf, 0, SE_SENSE_BUF); if (cdb[1] & 0x01) { pr_err("REQUEST_SENSE description emulation not" @@ -888,20 +890,21 @@ static int spc_emulate_request_sense(struct se_cmd *cmd) return -ENOSYS; } - buf = transport_kmap_data_sg(cmd); - - if (!core_scsi3_ua_clear_for_request_sense(cmd, &ua_asc, &ua_ascq)) { + rbuf = transport_kmap_data_sg(cmd); + if (cmd->scsi_sense_reason != 0) { + /* + * Out of memory. We will fail with CHECK CONDITION, so + * we must not clear the unit attention condition. + */ + target_complete_cmd(cmd, CHECK_CONDITION); + return 0; + } else if (!core_scsi3_ua_clear_for_request_sense(cmd, &ua_asc, &ua_ascq)) { /* * CURRENT ERROR, UNIT ATTENTION */ buf[0] = 0x70; buf[SPC_SENSE_KEY_OFFSET] = UNIT_ATTENTION; - if (cmd->data_length < 18) { - buf[7] = 0x00; - err = -EINVAL; - goto end; - } /* * The Additional Sense Code (ASC) from the UNIT ATTENTION */ @@ -915,11 +918,6 @@ static int spc_emulate_request_sense(struct se_cmd *cmd) buf[0] = 0x70; buf[SPC_SENSE_KEY_OFFSET] = NO_SENSE; - if (cmd->data_length < 18) { - buf[7] = 0x00; - err = -EINVAL; - goto end; - } /* * NO ADDITIONAL SENSE INFORMATION */ @@ -927,8 +925,11 @@ static int spc_emulate_request_sense(struct se_cmd *cmd) buf[7] = 0x0A; } -end: - transport_kunmap_data_sg(cmd); + if (rbuf) { + memcpy(rbuf, buf, min_t(u32, sizeof(buf), cmd->data_length)); + transport_kunmap_data_sg(cmd); + } + target_complete_cmd(cmd, GOOD); return 0; } diff --git a/trunk/drivers/target/target_core_transport.c b/trunk/drivers/target/target_core_transport.c index 0eaae23d12b5..269f54488397 100644 --- a/trunk/drivers/target/target_core_transport.c +++ b/trunk/drivers/target/target_core_transport.c @@ -567,6 +567,34 @@ static void target_complete_failure_work(struct work_struct *work) transport_generic_request_failure(cmd); } +/* + * Used when asking transport to copy Sense Data from the underlying + * Linux/SCSI struct scsi_cmnd + */ +static unsigned char *transport_get_sense_buffer(struct se_cmd *cmd) +{ + unsigned char *buffer = cmd->sense_buffer; + struct se_device *dev = cmd->se_dev; + u32 offset = 0; + + WARN_ON(!cmd->se_lun); + + if (!dev) + return NULL; + + if (cmd->se_cmd_flags & SCF_SENT_CHECK_CONDITION) + return NULL; + + offset = cmd->se_tfo->set_fabric_sense_len(cmd, TRANSPORT_SENSE_BUFFER); + + /* Automatically padded */ + cmd->scsi_sense_length = TRANSPORT_SENSE_BUFFER + offset; + + pr_debug("HBA_[%u]_PLUG[%s]: Requesting sense for SAM STATUS: 0x%02x\n", + dev->se_hba->hba_id, dev->transport->name, cmd->scsi_status); + return &buffer[offset]; +} + void target_complete_cmd(struct se_cmd *cmd, u8 scsi_status) { struct se_device *dev = cmd->se_dev; @@ -580,11 +608,11 @@ void target_complete_cmd(struct se_cmd *cmd, u8 scsi_status) cmd->transport_state &= ~CMD_T_BUSY; if (dev && dev->transport->transport_complete) { - if (dev->transport->transport_complete(cmd, - cmd->t_data_sg) != 0) { - cmd->se_cmd_flags |= SCF_TRANSPORT_TASK_SENSE; + dev->transport->transport_complete(cmd, + cmd->t_data_sg, + transport_get_sense_buffer(cmd)); + if (cmd->se_cmd_flags & SCF_TRANSPORT_TASK_SENSE) success = 1; - } } /* @@ -1165,8 +1193,6 @@ int target_cmd_size_check(struct se_cmd *cmd, unsigned int size) " 0x%02x\n", cmd->se_tfo->get_fabric_name(), cmd->data_length, size, cmd->t_task_cdb[0]); - cmd->cmd_spdtl = size; - if (cmd->data_direction == DMA_TO_DEVICE) { pr_err("Rejecting underflow/overflow" " WRITE data\n"); @@ -1183,15 +1209,20 @@ int target_cmd_size_check(struct se_cmd *cmd, unsigned int size) /* Returns CHECK_CONDITION + INVALID_CDB_FIELD */ goto out_invalid_cdb_field; } - + /* + * For the overflow case keep the existing fabric provided + * ->data_length. Otherwise for the underflow case, reset + * ->data_length to the smaller SCSI expected data transfer + * length. + */ if (size > cmd->data_length) { cmd->se_cmd_flags |= SCF_OVERFLOW_BIT; cmd->residual_count = (size - cmd->data_length); } else { cmd->se_cmd_flags |= SCF_UNDERFLOW_BIT; cmd->residual_count = (cmd->data_length - size); + cmd->data_length = size; } - cmd->data_length = size; } return 0; @@ -1817,61 +1848,6 @@ void target_execute_cmd(struct se_cmd *cmd) } EXPORT_SYMBOL(target_execute_cmd); -/* - * Used to obtain Sense Data from underlying Linux/SCSI struct scsi_cmnd - */ -static int transport_get_sense_data(struct se_cmd *cmd) -{ - unsigned char *buffer = cmd->sense_buffer, *sense_buffer = NULL; - struct se_device *dev = cmd->se_dev; - unsigned long flags; - u32 offset = 0; - - WARN_ON(!cmd->se_lun); - - if (!dev) - return 0; - - spin_lock_irqsave(&cmd->t_state_lock, flags); - if (cmd->se_cmd_flags & SCF_SENT_CHECK_CONDITION) { - spin_unlock_irqrestore(&cmd->t_state_lock, flags); - return 0; - } - - if (!(cmd->se_cmd_flags & SCF_TRANSPORT_TASK_SENSE)) - goto out; - - if (!dev->transport->get_sense_buffer) { - pr_err("dev->transport->get_sense_buffer is NULL\n"); - goto out; - } - - sense_buffer = dev->transport->get_sense_buffer(cmd); - if (!sense_buffer) { - pr_err("ITT 0x%08x cmd %p: Unable to locate" - " sense buffer for task with sense\n", - cmd->se_tfo->get_task_tag(cmd), cmd); - goto out; - } - - spin_unlock_irqrestore(&cmd->t_state_lock, flags); - - offset = cmd->se_tfo->set_fabric_sense_len(cmd, TRANSPORT_SENSE_BUFFER); - - memcpy(&buffer[offset], sense_buffer, TRANSPORT_SENSE_BUFFER); - - /* Automatically padded */ - cmd->scsi_sense_length = TRANSPORT_SENSE_BUFFER + offset; - - pr_debug("HBA_[%u]_PLUG[%s]: Set SAM STATUS: 0x%02x and sense\n", - dev->se_hba->hba_id, dev->transport->name, cmd->scsi_status); - return 0; - -out: - spin_unlock_irqrestore(&cmd->t_state_lock, flags); - return -1; -} - /* * Process all commands up to the last received ORDERED task attribute which * requires another blocking boundary @@ -1987,7 +1963,7 @@ static void transport_handle_queue_full( static void target_complete_ok_work(struct work_struct *work) { struct se_cmd *cmd = container_of(work, struct se_cmd, work); - int reason = 0, ret; + int ret; /* * Check if we need to move delayed/dormant tasks from cmds on the @@ -2004,23 +1980,19 @@ static void target_complete_ok_work(struct work_struct *work) schedule_work(&cmd->se_dev->qf_work_queue); /* - * Check if we need to retrieve a sense buffer from + * Check if we need to send a sense buffer from * the struct se_cmd in question. */ if (cmd->se_cmd_flags & SCF_TRANSPORT_TASK_SENSE) { - if (transport_get_sense_data(cmd) < 0) - reason = TCM_NON_EXISTENT_LUN; - - if (cmd->scsi_status) { - ret = transport_send_check_condition_and_sense( - cmd, reason, 1); - if (ret == -EAGAIN || ret == -ENOMEM) - goto queue_full; + WARN_ON(!cmd->scsi_status); + ret = transport_send_check_condition_and_sense( + cmd, 0, 1); + if (ret == -EAGAIN || ret == -ENOMEM) + goto queue_full; - transport_lun_remove_cmd(cmd); - transport_cmd_check_stop_to_fabric(cmd); - return; - } + transport_lun_remove_cmd(cmd); + transport_cmd_check_stop_to_fabric(cmd); + return; } /* * Check for a callback, used by amongst other things @@ -2218,7 +2190,6 @@ void *transport_kmap_data_sg(struct se_cmd *cmd) struct page **pages; int i; - BUG_ON(!sg); /* * We need to take into account a possible offset here for fabrics like * tcm_loop who may be using a contig buffer from the SCSI midlayer for @@ -2226,13 +2197,17 @@ void *transport_kmap_data_sg(struct se_cmd *cmd) */ if (!cmd->t_data_nents) return NULL; - else if (cmd->t_data_nents == 1) + + BUG_ON(!sg); + if (cmd->t_data_nents == 1) return kmap(sg_page(sg)) + sg->offset; /* >1 page. use vmap */ pages = kmalloc(sizeof(*pages) * cmd->t_data_nents, GFP_KERNEL); - if (!pages) + if (!pages) { + cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; return NULL; + } /* convert sg[] to pages[] */ for_each_sg(cmd->t_data_sg, sg, cmd->t_data_nents, i) { @@ -2241,8 +2216,10 @@ void *transport_kmap_data_sg(struct se_cmd *cmd) cmd->t_data_vmap = vmap(pages, cmd->t_data_nents, VM_MAP, PAGE_KERNEL); kfree(pages); - if (!cmd->t_data_vmap) + if (!cmd->t_data_vmap) { + cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; return NULL; + } return cmd->t_data_vmap + cmd->t_data_sg[0].offset; } @@ -2294,9 +2271,9 @@ transport_generic_get_mem(struct se_cmd *cmd) return 0; out: - while (i >= 0) { - __free_page(sg_page(&cmd->t_data_sg[i])); + while (i > 0) { i--; + __free_page(sg_page(&cmd->t_data_sg[i])); } kfree(cmd->t_data_sg); cmd->t_data_sg = NULL; @@ -2323,21 +2300,19 @@ int transport_generic_new_cmd(struct se_cmd *cmd) if (ret < 0) goto out_fail; } - - /* Workaround for handling zero-length control CDBs */ - if (!(cmd->se_cmd_flags & SCF_SCSI_DATA_CDB) && !cmd->data_length) { + /* + * If this command doesn't have any payload and we don't have to call + * into the fabric for data transfers, go ahead and complete it right + * away. + */ + if (!cmd->data_length && + cmd->t_task_cdb[0] != REQUEST_SENSE && + cmd->se_dev->transport->transport_type != TRANSPORT_PLUGIN_PHBA_PDEV) { spin_lock_irq(&cmd->t_state_lock); cmd->t_state = TRANSPORT_COMPLETE; cmd->transport_state |= CMD_T_ACTIVE; spin_unlock_irq(&cmd->t_state_lock); - if (cmd->t_task_cdb[0] == REQUEST_SENSE) { - u8 ua_asc = 0, ua_ascq = 0; - - core_scsi3_ua_clear_for_request_sense(cmd, - &ua_asc, &ua_ascq); - } - INIT_WORK(&cmd->work, target_complete_ok_work); queue_work(target_completion_wq, &cmd->work); return 0; diff --git a/trunk/drivers/target/tcm_fc/tcm_fc.h b/trunk/drivers/target/tcm_fc/tcm_fc.h index c5eb3c33c3db..eea69358ced3 100644 --- a/trunk/drivers/target/tcm_fc/tcm_fc.h +++ b/trunk/drivers/target/tcm_fc/tcm_fc.h @@ -131,6 +131,7 @@ extern struct list_head ft_lport_list; extern struct mutex ft_lport_lock; extern struct fc4_prov ft_prov; extern struct target_fabric_configfs *ft_configfs; +extern unsigned int ft_debug_logging; /* * Fabric methods. diff --git a/trunk/drivers/target/tcm_fc/tfc_cmd.c b/trunk/drivers/target/tcm_fc/tfc_cmd.c index b9cb5006177e..823e6922249d 100644 --- a/trunk/drivers/target/tcm_fc/tfc_cmd.c +++ b/trunk/drivers/target/tcm_fc/tfc_cmd.c @@ -48,7 +48,7 @@ /* * Dump cmd state for debugging. */ -void ft_dump_cmd(struct ft_cmd *cmd, const char *caller) +static void _ft_dump_cmd(struct ft_cmd *cmd, const char *caller) { struct fc_exch *ep; struct fc_seq *sp; @@ -80,6 +80,12 @@ void ft_dump_cmd(struct ft_cmd *cmd, const char *caller) } } +void ft_dump_cmd(struct ft_cmd *cmd, const char *caller) +{ + if (unlikely(ft_debug_logging)) + _ft_dump_cmd(cmd, caller); +} + static void ft_free_cmd(struct ft_cmd *cmd) { struct fc_frame *fp; diff --git a/trunk/drivers/target/tcm_fc/tfc_sess.c b/trunk/drivers/target/tcm_fc/tfc_sess.c index 87901fa74dd7..3c9e5b57caab 100644 --- a/trunk/drivers/target/tcm_fc/tfc_sess.c +++ b/trunk/drivers/target/tcm_fc/tfc_sess.c @@ -456,7 +456,9 @@ static void ft_prlo(struct fc_rport_priv *rdata) struct ft_tport *tport; mutex_lock(&ft_lport_lock); - tport = rcu_dereference(rdata->local_port->prov[FC_TYPE_FCP]); + tport = rcu_dereference_protected(rdata->local_port->prov[FC_TYPE_FCP], + lockdep_is_held(&ft_lport_lock)); + if (!tport) { mutex_unlock(&ft_lport_lock); return; diff --git a/trunk/drivers/tty/serial/Kconfig b/trunk/drivers/tty/serial/Kconfig index 070b442c1f81..4720b4ba096a 100644 --- a/trunk/drivers/tty/serial/Kconfig +++ b/trunk/drivers/tty/serial/Kconfig @@ -160,10 +160,12 @@ config SERIAL_KS8695_CONSOLE config SERIAL_CLPS711X tristate "CLPS711X serial port support" - depends on ARM && ARCH_CLPS711X + depends on ARCH_CLPS711X select SERIAL_CORE + default y help - ::: To be written ::: + This enables the driver for the on-chip UARTs of the Cirrus + Logic EP711x/EP721x/EP731x processors. config SERIAL_CLPS711X_CONSOLE bool "Support for console on CLPS711X serial port" @@ -173,9 +175,7 @@ config SERIAL_CLPS711X_CONSOLE Even if you say Y here, the currently visible virtual console (/dev/tty0) will still be used as the system console by default, but you can alter that using a kernel command line option such as - "console=ttyCL1". (Try "man bootparam" or see the documentation of - your boot loader (lilo or loadlin) about how to pass options to the - kernel at boot time.) + "console=ttyCL1". config SERIAL_SAMSUNG tristate "Samsung SoC serial support" diff --git a/trunk/drivers/tty/serial/ifx6x60.c b/trunk/drivers/tty/serial/ifx6x60.c index 144cd3987d4c..3ad079ffd049 100644 --- a/trunk/drivers/tty/serial/ifx6x60.c +++ b/trunk/drivers/tty/serial/ifx6x60.c @@ -1331,7 +1331,7 @@ static const struct spi_device_id ifx_id_table[] = { MODULE_DEVICE_TABLE(spi, ifx_id_table); /* spi operations */ -static const struct spi_driver ifx_spi_driver = { +static struct spi_driver ifx_spi_driver = { .driver = { .name = DRVNAME, .pm = &ifx_spi_pm, diff --git a/trunk/drivers/tty/serial/imx.c b/trunk/drivers/tty/serial/imx.c index d5c689d6217e..e309e8b0aaba 100644 --- a/trunk/drivers/tty/serial/imx.c +++ b/trunk/drivers/tty/serial/imx.c @@ -132,6 +132,7 @@ #define UCR4_OREN (1<<1) /* Receiver overrun interrupt enable */ #define UCR4_DREN (1<<0) /* Recv data ready interrupt enable */ #define UFCR_RXTL_SHF 0 /* Receiver trigger level shift */ +#define UFCR_DCEDTE (1<<6) /* DCE/DTE mode select */ #define UFCR_RFDIV (7<<7) /* Reference freq divider mask */ #define UFCR_RFDIV_REG(x) (((x) < 7 ? 6 - (x) : 6) << 7) #define UFCR_TXTL_SHF 10 /* Transmitter trigger level shift */ @@ -667,22 +668,11 @@ static void imx_break_ctl(struct uart_port *port, int break_state) static int imx_setup_ufcr(struct imx_port *sport, unsigned int mode) { unsigned int val; - unsigned int ufcr_rfdiv; - - /* set receiver / transmitter trigger level. - * RFDIV is set such way to satisfy requested uartclk value - */ - val = TXTL << 10 | RXTL; - ufcr_rfdiv = (clk_get_rate(sport->clk_per) + sport->port.uartclk / 2) - / sport->port.uartclk; - - if(!ufcr_rfdiv) - ufcr_rfdiv = 1; - - val |= UFCR_RFDIV_REG(ufcr_rfdiv); + /* set receiver / transmitter trigger level */ + val = readl(sport->port.membase + UFCR) & (UFCR_RFDIV | UFCR_DCEDTE); + val |= TXTL << UFCR_TXTL_SHF | RXTL; writel(val, sport->port.membase + UFCR); - return 0; } @@ -754,6 +744,7 @@ static int imx_startup(struct uart_port *port) } } + spin_lock_irqsave(&sport->port.lock, flags); /* * Finally, clear and enable interrupts */ @@ -807,7 +798,6 @@ static int imx_startup(struct uart_port *port) /* * Enable modem status interrupts */ - spin_lock_irqsave(&sport->port.lock,flags); imx_enable_ms(&sport->port); spin_unlock_irqrestore(&sport->port.lock,flags); @@ -837,10 +827,13 @@ static void imx_shutdown(struct uart_port *port) { struct imx_port *sport = (struct imx_port *)port; unsigned long temp; + unsigned long flags; + spin_lock_irqsave(&sport->port.lock, flags); temp = readl(sport->port.membase + UCR2); temp &= ~(UCR2_TXEN); writel(temp, sport->port.membase + UCR2); + spin_unlock_irqrestore(&sport->port.lock, flags); if (USE_IRDA(sport)) { struct imxuart_platform_data *pdata; @@ -869,12 +862,14 @@ static void imx_shutdown(struct uart_port *port) * Disable all interrupts, port and break condition. */ + spin_lock_irqsave(&sport->port.lock, flags); temp = readl(sport->port.membase + UCR1); temp &= ~(UCR1_TXMPTYEN | UCR1_RRDYEN | UCR1_RTSDEN | UCR1_UARTEN); if (USE_IRDA(sport)) temp &= ~(UCR1_IREN); writel(temp, sport->port.membase + UCR1); + spin_unlock_irqrestore(&sport->port.lock, flags); } static void @@ -1217,6 +1212,9 @@ imx_console_write(struct console *co, const char *s, unsigned int count) struct imx_port *sport = imx_ports[co->index]; struct imx_port_ucrs old_ucr; unsigned int ucr1; + unsigned long flags; + + spin_lock_irqsave(&sport->port.lock, flags); /* * First, save UCR1/2/3 and then disable interrupts @@ -1242,6 +1240,8 @@ imx_console_write(struct console *co, const char *s, unsigned int count) while (!(readl(sport->port.membase + USR2) & USR2_TXDC)); imx_port_ucrs_restore(&sport->port, &old_ucr); + + spin_unlock_irqrestore(&sport->port.lock, flags); } /* diff --git a/trunk/drivers/tty/serial/mxs-auart.c b/trunk/drivers/tty/serial/mxs-auart.c index 2e341b81ff89..3a667eed63d6 100644 --- a/trunk/drivers/tty/serial/mxs-auart.c +++ b/trunk/drivers/tty/serial/mxs-auart.c @@ -73,6 +73,7 @@ #define AUART_CTRL0_CLKGATE (1 << 30) #define AUART_CTRL2_CTSEN (1 << 15) +#define AUART_CTRL2_RTSEN (1 << 14) #define AUART_CTRL2_RTS (1 << 11) #define AUART_CTRL2_RXE (1 << 9) #define AUART_CTRL2_TXE (1 << 8) @@ -259,9 +260,12 @@ static void mxs_auart_set_mctrl(struct uart_port *u, unsigned mctrl) u32 ctrl = readl(u->membase + AUART_CTRL2); - ctrl &= ~AUART_CTRL2_RTS; - if (mctrl & TIOCM_RTS) - ctrl |= AUART_CTRL2_RTS; + ctrl &= ~AUART_CTRL2_RTSEN; + if (mctrl & TIOCM_RTS) { + if (u->state->port.flags & ASYNC_CTS_FLOW) + ctrl |= AUART_CTRL2_RTSEN; + } + s->ctrl = mctrl; writel(ctrl, u->membase + AUART_CTRL2); } @@ -359,9 +363,9 @@ static void mxs_auart_settermios(struct uart_port *u, /* figure out the hardware flow control settings */ if (cflag & CRTSCTS) - ctrl2 |= AUART_CTRL2_CTSEN; + ctrl2 |= AUART_CTRL2_CTSEN | AUART_CTRL2_RTSEN; else - ctrl2 &= ~AUART_CTRL2_CTSEN; + ctrl2 &= ~(AUART_CTRL2_CTSEN | AUART_CTRL2_RTSEN); /* set baud rate */ baud = uart_get_baud_rate(u, termios, old, 0, u->uartclk); diff --git a/trunk/drivers/tty/serial/pmac_zilog.c b/trunk/drivers/tty/serial/pmac_zilog.c index 654755a990df..333c8d012b0e 100644 --- a/trunk/drivers/tty/serial/pmac_zilog.c +++ b/trunk/drivers/tty/serial/pmac_zilog.c @@ -1348,10 +1348,16 @@ static int pmz_verify_port(struct uart_port *port, struct serial_struct *ser) static int pmz_poll_get_char(struct uart_port *port) { struct uart_pmac_port *uap = (struct uart_pmac_port *)port; + int tries = 2; - while ((read_zsreg(uap, R0) & Rx_CH_AV) == 0) - udelay(5); - return read_zsdata(uap); + while (tries) { + if ((read_zsreg(uap, R0) & Rx_CH_AV) != 0) + return read_zsdata(uap); + if (tries--) + udelay(5); + } + + return NO_POLL_CHAR; } static void pmz_poll_put_char(struct uart_port *port, unsigned char c) diff --git a/trunk/drivers/usb/Kconfig b/trunk/drivers/usb/Kconfig index a7773a3e02b1..7065df6036ca 100644 --- a/trunk/drivers/usb/Kconfig +++ b/trunk/drivers/usb/Kconfig @@ -13,7 +13,7 @@ config USB_ARCH_HAS_OHCI default y if PXA3xx default y if ARCH_EP93XX default y if ARCH_AT91 - default y if ARCH_PNX4008 && I2C + default y if ARCH_PNX4008 default y if MFD_TC6393XB default y if ARCH_W90X900 default y if ARCH_DAVINCI_DA8XX diff --git a/trunk/drivers/usb/chipidea/Kconfig b/trunk/drivers/usb/chipidea/Kconfig index 8337fb5d988d..47e499c9c0b6 100644 --- a/trunk/drivers/usb/chipidea/Kconfig +++ b/trunk/drivers/usb/chipidea/Kconfig @@ -1,9 +1,9 @@ config USB_CHIPIDEA tristate "ChipIdea Highspeed Dual Role Controller" - depends on USB + depends on USB || USB_GADGET help - Say Y here if your system has a dual role high speed USB - controller based on ChipIdea silicon IP. Currently, only the + Say Y here if your system has a dual role high speed USB + controller based on ChipIdea silicon IP. Currently, only the peripheral mode is supported. When compiled dynamically, the module will be called ci-hdrc.ko. @@ -12,7 +12,7 @@ if USB_CHIPIDEA config USB_CHIPIDEA_UDC bool "ChipIdea device controller" - depends on USB_GADGET + depends on USB_GADGET=y || USB_GADGET=USB_CHIPIDEA select USB_GADGET_DUALSPEED help Say Y here to enable device controller functionality of the @@ -20,6 +20,7 @@ config USB_CHIPIDEA_UDC config USB_CHIPIDEA_HOST bool "ChipIdea host controller" + depends on USB=y || USB=USB_CHIPIDEA select USB_EHCI_ROOT_HUB_TT help Say Y here to enable host controller functionality of the diff --git a/trunk/drivers/usb/chipidea/udc.c b/trunk/drivers/usb/chipidea/udc.c index c7a032a4f0c5..d214448b677e 100644 --- a/trunk/drivers/usb/chipidea/udc.c +++ b/trunk/drivers/usb/chipidea/udc.c @@ -78,8 +78,7 @@ static inline int ep_to_bit(struct ci13xxx *ci, int n) } /** - * hw_device_state: enables/disables interrupts & starts/stops device (execute - * without interruption) + * hw_device_state: enables/disables interrupts (execute without interruption) * @dma: 0 => disable, !0 => enable and set dma engine * * This function returns an error code @@ -91,9 +90,7 @@ static int hw_device_state(struct ci13xxx *ci, u32 dma) /* interrupt, error, port change, reset, sleep/suspend */ hw_write(ci, OP_USBINTR, ~0, USBi_UI|USBi_UEI|USBi_PCI|USBi_URI|USBi_SLI); - hw_write(ci, OP_USBCMD, USBCMD_RS, USBCMD_RS); } else { - hw_write(ci, OP_USBCMD, USBCMD_RS, 0); hw_write(ci, OP_USBINTR, ~0, 0); } return 0; @@ -774,10 +771,7 @@ __acquires(mEp->lock) { struct ci13xxx_req *mReq, *mReqTemp; struct ci13xxx_ep *mEpTemp = mEp; - int uninitialized_var(retval); - - if (list_empty(&mEp->qh.queue)) - return -EINVAL; + int retval = 0; list_for_each_entry_safe(mReq, mReqTemp, &mEp->qh.queue, queue) { @@ -1420,6 +1414,21 @@ static int ci13xxx_vbus_draw(struct usb_gadget *_gadget, unsigned mA) return -ENOTSUPP; } +/* Change Data+ pullup status + * this func is used by usb_gadget_connect/disconnet + */ +static int ci13xxx_pullup(struct usb_gadget *_gadget, int is_on) +{ + struct ci13xxx *ci = container_of(_gadget, struct ci13xxx, gadget); + + if (is_on) + hw_write(ci, OP_USBCMD, USBCMD_RS, USBCMD_RS); + else + hw_write(ci, OP_USBCMD, USBCMD_RS, 0); + + return 0; +} + static int ci13xxx_start(struct usb_gadget *gadget, struct usb_gadget_driver *driver); static int ci13xxx_stop(struct usb_gadget *gadget, @@ -1432,6 +1441,7 @@ static int ci13xxx_stop(struct usb_gadget *gadget, static const struct usb_gadget_ops usb_gadget_ops = { .vbus_session = ci13xxx_vbus_session, .wakeup = ci13xxx_wakeup, + .pullup = ci13xxx_pullup, .vbus_draw = ci13xxx_vbus_draw, .udc_start = ci13xxx_start, .udc_stop = ci13xxx_stop, @@ -1455,7 +1465,12 @@ static int init_eps(struct ci13xxx *ci) mEp->ep.name = mEp->name; mEp->ep.ops = &usb_ep_ops; - mEp->ep.maxpacket = CTRL_PAYLOAD_MAX; + /* + * for ep0: maxP defined in desc, for other + * eps, maxP is set by epautoconfig() called + * by gadget layer + */ + mEp->ep.maxpacket = (unsigned short)~0; INIT_LIST_HEAD(&mEp->qh.queue); mEp->qh.ptr = dma_pool_alloc(ci->qh_pool, GFP_KERNEL, @@ -1475,6 +1490,7 @@ static int init_eps(struct ci13xxx *ci) else ci->ep0in = mEp; + mEp->ep.maxpacket = CTRL_PAYLOAD_MAX; continue; } @@ -1484,6 +1500,17 @@ static int init_eps(struct ci13xxx *ci) return retval; } +static void destroy_eps(struct ci13xxx *ci) +{ + int i; + + for (i = 0; i < ci->hw_ep_max; i++) { + struct ci13xxx_ep *mEp = &ci->ci13xxx_ep[i]; + + dma_pool_free(ci->qh_pool, mEp->qh.ptr, mEp->qh.dma); + } +} + /** * ci13xxx_start: register a gadget driver * @gadget: our gadget @@ -1691,7 +1718,7 @@ static int udc_start(struct ci13xxx *ci) if (ci->platdata->flags & CI13XXX_REQUIRE_TRANSCEIVER) { if (ci->transceiver == NULL) { retval = -ENODEV; - goto free_pools; + goto destroy_eps; } } @@ -1729,7 +1756,7 @@ static int udc_start(struct ci13xxx *ci) remove_trans: if (!IS_ERR_OR_NULL(ci->transceiver)) { - otg_set_peripheral(ci->transceiver->otg, &ci->gadget); + otg_set_peripheral(ci->transceiver->otg, NULL); if (ci->global_phy) usb_put_phy(ci->transceiver); } @@ -1742,6 +1769,8 @@ static int udc_start(struct ci13xxx *ci) put_transceiver: if (!IS_ERR_OR_NULL(ci->transceiver) && ci->global_phy) usb_put_phy(ci->transceiver); +destroy_eps: + destroy_eps(ci); free_pools: dma_pool_destroy(ci->td_pool); free_qh_pool: @@ -1756,18 +1785,12 @@ static int udc_start(struct ci13xxx *ci) */ static void udc_stop(struct ci13xxx *ci) { - int i; - if (ci == NULL) return; usb_del_gadget_udc(&ci->gadget); - for (i = 0; i < ci->hw_ep_max; i++) { - struct ci13xxx_ep *mEp = &ci->ci13xxx_ep[i]; - - dma_pool_free(ci->qh_pool, mEp->qh.ptr, mEp->qh.dma); - } + destroy_eps(ci); dma_pool_destroy(ci->td_pool); dma_pool_destroy(ci->qh_pool); diff --git a/trunk/drivers/usb/class/cdc-acm.c b/trunk/drivers/usb/class/cdc-acm.c index 56d6bf668488..f763ed7ba91e 100644 --- a/trunk/drivers/usb/class/cdc-acm.c +++ b/trunk/drivers/usb/class/cdc-acm.c @@ -1104,7 +1104,8 @@ static int acm_probe(struct usb_interface *intf, } - if (data_interface->cur_altsetting->desc.bNumEndpoints < 2) + if (data_interface->cur_altsetting->desc.bNumEndpoints < 2 || + control_interface->cur_altsetting->desc.bNumEndpoints == 0) return -EINVAL; epctrl = &control_interface->cur_altsetting->endpoint[0].desc; diff --git a/trunk/drivers/usb/class/cdc-wdm.c b/trunk/drivers/usb/class/cdc-wdm.c index 65a55abb791f..5f0cb417b736 100644 --- a/trunk/drivers/usb/class/cdc-wdm.c +++ b/trunk/drivers/usb/class/cdc-wdm.c @@ -109,12 +109,14 @@ static struct usb_driver wdm_driver; /* return intfdata if we own the interface, else look up intf in the list */ static struct wdm_device *wdm_find_device(struct usb_interface *intf) { - struct wdm_device *desc = NULL; + struct wdm_device *desc; spin_lock(&wdm_device_list_lock); list_for_each_entry(desc, &wdm_device_list, device_list) if (desc->intf == intf) - break; + goto found; + desc = NULL; +found: spin_unlock(&wdm_device_list_lock); return desc; @@ -122,12 +124,14 @@ static struct wdm_device *wdm_find_device(struct usb_interface *intf) static struct wdm_device *wdm_find_device_by_minor(int minor) { - struct wdm_device *desc = NULL; + struct wdm_device *desc; spin_lock(&wdm_device_list_lock); list_for_each_entry(desc, &wdm_device_list, device_list) if (desc->intf->minor == minor) - break; + goto found; + desc = NULL; +found: spin_unlock(&wdm_device_list_lock); return desc; diff --git a/trunk/drivers/usb/core/devices.c b/trunk/drivers/usb/core/devices.c index d95696584762..3440812b4a84 100644 --- a/trunk/drivers/usb/core/devices.c +++ b/trunk/drivers/usb/core/devices.c @@ -624,7 +624,7 @@ static ssize_t usb_device_read(struct file *file, char __user *buf, /* print devices for all busses */ list_for_each_entry(bus, &usb_bus_list, bus_list) { /* recurse through all children of the root hub */ - if (!bus->root_hub) + if (!bus_to_hcd(bus)->rh_registered) continue; usb_lock_device(bus->root_hub); ret = usb_device_dump(&buf, &nbytes, &skip_bytes, ppos, diff --git a/trunk/drivers/usb/core/hcd.c b/trunk/drivers/usb/core/hcd.c index bc84106ac057..75ba2091f9b4 100644 --- a/trunk/drivers/usb/core/hcd.c +++ b/trunk/drivers/usb/core/hcd.c @@ -1011,10 +1011,7 @@ static int register_root_hub(struct usb_hcd *hcd) if (retval) { dev_err (parent_dev, "can't register root hub for %s, %d\n", dev_name(&usb_dev->dev), retval); - } - mutex_unlock(&usb_bus_list_lock); - - if (retval == 0) { + } else { spin_lock_irq (&hcd_root_hub_lock); hcd->rh_registered = 1; spin_unlock_irq (&hcd_root_hub_lock); @@ -1023,6 +1020,7 @@ static int register_root_hub(struct usb_hcd *hcd) if (HCD_DEAD(hcd)) usb_hc_died (hcd); /* This time clean up */ } + mutex_unlock(&usb_bus_list_lock); return retval; } diff --git a/trunk/drivers/usb/core/quirks.c b/trunk/drivers/usb/core/quirks.c index f15501f4c585..e77a8e8eaa23 100644 --- a/trunk/drivers/usb/core/quirks.c +++ b/trunk/drivers/usb/core/quirks.c @@ -71,6 +71,10 @@ static const struct usb_device_id usb_quirk_list[] = { { USB_DEVICE(0x04b4, 0x0526), .driver_info = USB_QUIRK_CONFIG_INTF_STRINGS }, + /* Microchip Joss Optical infrared touchboard device */ + { USB_DEVICE(0x04d8, 0x000c), .driver_info = + USB_QUIRK_CONFIG_INTF_STRINGS }, + /* Samsung Android phone modem - ID conflict with SPH-I500 */ { USB_DEVICE(0x04e8, 0x6601), .driver_info = USB_QUIRK_CONFIG_INTF_STRINGS }, diff --git a/trunk/drivers/usb/dwc3/core.c b/trunk/drivers/usb/dwc3/core.c index c34452a7304f..a68ff53124dc 100644 --- a/trunk/drivers/usb/dwc3/core.c +++ b/trunk/drivers/usb/dwc3/core.c @@ -436,16 +436,21 @@ static int __devinit dwc3_probe(struct platform_device *pdev) dev_err(dev, "missing IRQ\n"); return -ENODEV; } - dwc->xhci_resources[1] = *res; + dwc->xhci_resources[1].start = res->start; + dwc->xhci_resources[1].end = res->end; + dwc->xhci_resources[1].flags = res->flags; + dwc->xhci_resources[1].name = res->name; res = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!res) { dev_err(dev, "missing memory resource\n"); return -ENODEV; } - dwc->xhci_resources[0] = *res; + dwc->xhci_resources[0].start = res->start; dwc->xhci_resources[0].end = dwc->xhci_resources[0].start + DWC3_XHCI_REGS_END; + dwc->xhci_resources[0].flags = res->flags; + dwc->xhci_resources[0].name = res->name; /* * Request memory region but exclude xHCI regs, diff --git a/trunk/drivers/usb/dwc3/ep0.c b/trunk/drivers/usb/dwc3/ep0.c index 9b94886b66e5..e4d5ca86b9da 100644 --- a/trunk/drivers/usb/dwc3/ep0.c +++ b/trunk/drivers/usb/dwc3/ep0.c @@ -720,7 +720,6 @@ static void dwc3_ep0_complete_data(struct dwc3 *dwc, transferred = min_t(u32, ur->length, transfer_size - length); memcpy(ur->buf, dwc->ep0_bounce, transferred); - dwc->ep0_bounced = false; } else { transferred = ur->length - length; } diff --git a/trunk/drivers/usb/dwc3/gadget.c b/trunk/drivers/usb/dwc3/gadget.c index 58fdfad96b4d..c2813c2b005a 100644 --- a/trunk/drivers/usb/dwc3/gadget.c +++ b/trunk/drivers/usb/dwc3/gadget.c @@ -263,8 +263,11 @@ void dwc3_gadget_giveback(struct dwc3_ep *dep, struct dwc3_request *req, if (req->request.status == -EINPROGRESS) req->request.status = status; - usb_gadget_unmap_request(&dwc->gadget, &req->request, - req->direction); + if (dwc->ep0_bounced && dep->number == 0) + dwc->ep0_bounced = false; + else + usb_gadget_unmap_request(&dwc->gadget, &req->request, + req->direction); dev_dbg(dwc->dev, "request %p from %s completed %d/%d ===> %d\n", req, dep->name, req->request.actual, @@ -1026,6 +1029,7 @@ static void __dwc3_gadget_start_isoc(struct dwc3 *dwc, if (list_empty(&dep->request_list)) { dev_vdbg(dwc->dev, "ISOC ep %s run out for requests.\n", dep->name); + dep->flags |= DWC3_EP_PENDING_REQUEST; return; } @@ -1089,6 +1093,17 @@ static int __dwc3_gadget_ep_queue(struct dwc3_ep *dep, struct dwc3_request *req) if (dep->flags & DWC3_EP_PENDING_REQUEST) { int ret; + /* + * If xfernotready is already elapsed and it is a case + * of isoc transfer, then issue END TRANSFER, so that + * you can receive xfernotready again and can have + * notion of current microframe. + */ + if (usb_endpoint_xfer_isoc(dep->endpoint.desc)) { + dwc3_stop_active_transfer(dwc, dep->number); + return 0; + } + ret = __dwc3_gadget_kick_transfer(dep, 0, true); if (ret && ret != -EBUSY) { struct dwc3 *dwc = dep->dwc; diff --git a/trunk/drivers/usb/early/ehci-dbgp.c b/trunk/drivers/usb/early/ehci-dbgp.c index ee0ebacf8227..89dcf155d57e 100644 --- a/trunk/drivers/usb/early/ehci-dbgp.c +++ b/trunk/drivers/usb/early/ehci-dbgp.c @@ -450,7 +450,7 @@ static int dbgp_ehci_startup(void) writel(FLAG_CF, &ehci_regs->configured_flag); /* Wait until the controller is no longer halted */ - loop = 10; + loop = 1000; do { status = readl(&ehci_regs->status); if (!(status & STS_HALT)) diff --git a/trunk/drivers/usb/gadget/at91_udc.c b/trunk/drivers/usb/gadget/at91_udc.c index c9e66dfb02e6..1e35963bd4ed 100644 --- a/trunk/drivers/usb/gadget/at91_udc.c +++ b/trunk/drivers/usb/gadget/at91_udc.c @@ -475,8 +475,7 @@ static int at91_ep_enable(struct usb_ep *_ep, unsigned long flags; if (!_ep || !ep - || !desc || ep->ep.desc - || _ep->name == ep0name + || !desc || _ep->name == ep0name || desc->bDescriptorType != USB_DT_ENDPOINT || (maxpacket = usb_endpoint_maxp(desc)) == 0 || maxpacket > ep->maxpacket) { @@ -530,7 +529,6 @@ static int at91_ep_enable(struct usb_ep *_ep, tmp |= AT91_UDP_EPEDS; __raw_writel(tmp, ep->creg); - ep->ep.desc = desc; ep->ep.maxpacket = maxpacket; /* @@ -1635,7 +1633,6 @@ static int at91_start(struct usb_gadget *gadget, udc->driver = driver; udc->gadget.dev.driver = &driver->driver; udc->gadget.dev.of_node = udc->pdev->dev.of_node; - dev_set_drvdata(&udc->gadget.dev, &driver->driver); udc->enabled = 1; udc->selfpowered = 1; @@ -1656,7 +1653,6 @@ static int at91_stop(struct usb_gadget *gadget, spin_unlock_irqrestore(&udc->lock, flags); udc->gadget.dev.driver = NULL; - dev_set_drvdata(&udc->gadget.dev, NULL); udc->driver = NULL; DBG("unbound from %s\n", driver->driver.name); diff --git a/trunk/drivers/usb/gadget/dummy_hcd.c b/trunk/drivers/usb/gadget/dummy_hcd.c index b799106027ad..afdbb1cbf5d9 100644 --- a/trunk/drivers/usb/gadget/dummy_hcd.c +++ b/trunk/drivers/usb/gadget/dummy_hcd.c @@ -1916,6 +1916,27 @@ static int dummy_hub_status(struct usb_hcd *hcd, char *buf) return retval; } +/* usb 3.0 root hub device descriptor */ +struct { + struct usb_bos_descriptor bos; + struct usb_ss_cap_descriptor ss_cap; +} __packed usb3_bos_desc = { + + .bos = { + .bLength = USB_DT_BOS_SIZE, + .bDescriptorType = USB_DT_BOS, + .wTotalLength = cpu_to_le16(sizeof(usb3_bos_desc)), + .bNumDeviceCaps = 1, + }, + .ss_cap = { + .bLength = USB_DT_USB_SS_CAP_SIZE, + .bDescriptorType = USB_DT_DEVICE_CAPABILITY, + .bDevCapabilityType = USB_SS_CAP_TYPE, + .wSpeedSupported = cpu_to_le16(USB_5GBPS_OPERATION), + .bFunctionalitySupport = ilog2(USB_5GBPS_OPERATION), + }, +}; + static inline void ss_hub_descriptor(struct usb_hub_descriptor *desc) { @@ -2006,6 +2027,18 @@ static int dummy_hub_control( else hub_descriptor((struct usb_hub_descriptor *) buf); break; + + case DeviceRequest | USB_REQ_GET_DESCRIPTOR: + if (hcd->speed != HCD_USB3) + goto error; + + if ((wValue >> 8) != USB_DT_BOS) + goto error; + + memcpy(buf, &usb3_bos_desc, sizeof(usb3_bos_desc)); + retval = sizeof(usb3_bos_desc); + break; + case GetHubStatus: *(__le32 *) buf = cpu_to_le32(0); break; @@ -2503,10 +2536,8 @@ static int dummy_hcd_probe(struct platform_device *pdev) hs_hcd->has_tt = 1; retval = usb_add_hcd(hs_hcd, 0, 0); - if (retval != 0) { - usb_put_hcd(hs_hcd); - return retval; - } + if (retval) + goto put_usb2_hcd; if (mod_data.is_super_speed) { ss_hcd = usb_create_shared_hcd(&dummy_hcd, &pdev->dev, @@ -2525,6 +2556,8 @@ static int dummy_hcd_probe(struct platform_device *pdev) put_usb3_hcd: usb_put_hcd(ss_hcd); dealloc_usb2_hcd: + usb_remove_hcd(hs_hcd); +put_usb2_hcd: usb_put_hcd(hs_hcd); the_controller.hs_hcd = the_controller.ss_hcd = NULL; return retval; diff --git a/trunk/drivers/usb/gadget/f_fs.c b/trunk/drivers/usb/gadget/f_fs.c index 8adc79d1b402..829aba75a6df 100644 --- a/trunk/drivers/usb/gadget/f_fs.c +++ b/trunk/drivers/usb/gadget/f_fs.c @@ -34,11 +34,15 @@ /* Debugging ****************************************************************/ #ifdef VERBOSE_DEBUG +#ifndef pr_vdebug # define pr_vdebug pr_debug +#endif /* pr_vdebug */ # define ffs_dump_mem(prefix, ptr, len) \ print_hex_dump_bytes(pr_fmt(prefix ": "), DUMP_PREFIX_NONE, ptr, len) #else +#ifndef pr_vdebug # define pr_vdebug(...) do { } while (0) +#endif /* pr_vdebug */ # define ffs_dump_mem(prefix, ptr, len) do { } while (0) #endif /* VERBOSE_DEBUG */ diff --git a/trunk/drivers/usb/gadget/s3c-hsotg.c b/trunk/drivers/usb/gadget/s3c-hsotg.c index b13e0bb5f5b8..0bb617e1dda2 100644 --- a/trunk/drivers/usb/gadget/s3c-hsotg.c +++ b/trunk/drivers/usb/gadget/s3c-hsotg.c @@ -3599,6 +3599,7 @@ static int __devinit s3c_hsotg_probe(struct platform_device *pdev) if (hsotg->num_of_eps == 0) { dev_err(dev, "wrong number of EPs (zero)\n"); + ret = -EINVAL; goto err_supplies; } @@ -3606,6 +3607,7 @@ static int __devinit s3c_hsotg_probe(struct platform_device *pdev) GFP_KERNEL); if (!eps) { dev_err(dev, "cannot get memory\n"); + ret = -ENOMEM; goto err_supplies; } @@ -3622,6 +3624,7 @@ static int __devinit s3c_hsotg_probe(struct platform_device *pdev) GFP_KERNEL); if (!hsotg->ctrl_req) { dev_err(dev, "failed to allocate ctrl req\n"); + ret = -ENOMEM; goto err_ep_mem; } diff --git a/trunk/drivers/usb/gadget/u_ether.c b/trunk/drivers/usb/gadget/u_ether.c index 90e82e288eb9..0e5230926154 100644 --- a/trunk/drivers/usb/gadget/u_ether.c +++ b/trunk/drivers/usb/gadget/u_ether.c @@ -669,6 +669,8 @@ static int eth_stop(struct net_device *net) spin_lock_irqsave(&dev->lock, flags); if (dev->port_usb) { struct gether *link = dev->port_usb; + const struct usb_endpoint_descriptor *in; + const struct usb_endpoint_descriptor *out; if (link->close) link->close(link); @@ -682,10 +684,14 @@ static int eth_stop(struct net_device *net) * their own pace; the network stack can handle old packets. * For the moment we leave this here, since it works. */ + in = link->in_ep->desc; + out = link->out_ep->desc; usb_ep_disable(link->in_ep); usb_ep_disable(link->out_ep); if (netif_carrier_ok(net)) { DBG(dev, "host still using in/out endpoints\n"); + link->in_ep->desc = in; + link->out_ep->desc = out; usb_ep_enable(link->in_ep); usb_ep_enable(link->out_ep); } diff --git a/trunk/drivers/usb/gadget/u_serial.c b/trunk/drivers/usb/gadget/u_serial.c index 5b3f5fffea92..da6d479ff9a6 100644 --- a/trunk/drivers/usb/gadget/u_serial.c +++ b/trunk/drivers/usb/gadget/u_serial.c @@ -132,11 +132,15 @@ static unsigned n_ports; #ifdef VERBOSE_DEBUG +#ifndef pr_vdebug #define pr_vdebug(fmt, arg...) \ pr_debug(fmt, ##arg) +#endif /* pr_vdebug */ #else +#ifndef pr_vdebig #define pr_vdebug(fmt, arg...) \ ({ if (0) pr_debug(fmt, ##arg); }) +#endif /* pr_vdebug */ #endif /*-------------------------------------------------------------------------*/ diff --git a/trunk/drivers/usb/host/ehci-omap.c b/trunk/drivers/usb/host/ehci-omap.c index bb55eb4a7d48..d7fe287d0678 100644 --- a/trunk/drivers/usb/host/ehci-omap.c +++ b/trunk/drivers/usb/host/ehci-omap.c @@ -56,15 +56,6 @@ #define EHCI_INSNREG05_ULPI_EXTREGADD_SHIFT 8 #define EHCI_INSNREG05_ULPI_WRDATA_SHIFT 0 -/* Errata i693 */ -static struct clk *utmi_p1_fck; -static struct clk *utmi_p2_fck; -static struct clk *xclk60mhsp1_ck; -static struct clk *xclk60mhsp2_ck; -static struct clk *usbhost_p1_fck; -static struct clk *usbhost_p2_fck; -static struct clk *init_60m_fclk; - /*-------------------------------------------------------------------------*/ static const struct hc_driver ehci_omap_hc_driver; @@ -80,40 +71,6 @@ static inline u32 ehci_read(void __iomem *base, u32 reg) return __raw_readl(base + reg); } -/* Erratum i693 workaround sequence */ -static void omap_ehci_erratum_i693(struct ehci_hcd *ehci) -{ - int ret = 0; - - /* Switch to the internal 60 MHz clock */ - ret = clk_set_parent(utmi_p1_fck, init_60m_fclk); - if (ret != 0) - ehci_err(ehci, "init_60m_fclk set parent" - "failed error:%d\n", ret); - - ret = clk_set_parent(utmi_p2_fck, init_60m_fclk); - if (ret != 0) - ehci_err(ehci, "init_60m_fclk set parent" - "failed error:%d\n", ret); - - clk_enable(usbhost_p1_fck); - clk_enable(usbhost_p2_fck); - - /* Wait 1ms and switch back to the external clock */ - mdelay(1); - ret = clk_set_parent(utmi_p1_fck, xclk60mhsp1_ck); - if (ret != 0) - ehci_err(ehci, "xclk60mhsp1_ck set parent" - "failed error:%d\n", ret); - - ret = clk_set_parent(utmi_p2_fck, xclk60mhsp2_ck); - if (ret != 0) - ehci_err(ehci, "xclk60mhsp2_ck set parent" - "failed error:%d\n", ret); - - clk_disable(usbhost_p1_fck); - clk_disable(usbhost_p2_fck); -} static void omap_ehci_soft_phy_reset(struct usb_hcd *hcd, u8 port) { @@ -195,50 +152,6 @@ static int omap_ehci_init(struct usb_hcd *hcd) return rc; } -static int omap_ehci_hub_control( - struct usb_hcd *hcd, - u16 typeReq, - u16 wValue, - u16 wIndex, - char *buf, - u16 wLength -) -{ - struct ehci_hcd *ehci = hcd_to_ehci(hcd); - u32 __iomem *status_reg = &ehci->regs->port_status[ - (wIndex & 0xff) - 1]; - u32 temp; - unsigned long flags; - int retval = 0; - - spin_lock_irqsave(&ehci->lock, flags); - - if (typeReq == SetPortFeature && wValue == USB_PORT_FEAT_SUSPEND) { - temp = ehci_readl(ehci, status_reg); - if ((temp & PORT_PE) == 0 || (temp & PORT_RESET) != 0) { - retval = -EPIPE; - goto done; - } - - temp &= ~PORT_WKCONN_E; - temp |= PORT_WKDISC_E | PORT_WKOC_E; - ehci_writel(ehci, temp | PORT_SUSPEND, status_reg); - - omap_ehci_erratum_i693(ehci); - - set_bit((wIndex & 0xff) - 1, &ehci->suspended_ports); - goto done; - } - - spin_unlock_irqrestore(&ehci->lock, flags); - - /* Handle the hub control events here */ - return ehci_hub_control(hcd, typeReq, wValue, wIndex, buf, wLength); -done: - spin_unlock_irqrestore(&ehci->lock, flags); - return retval; -} - static void disable_put_regulator( struct ehci_hcd_omap_platform_data *pdata) { @@ -351,79 +264,9 @@ static int ehci_hcd_omap_probe(struct platform_device *pdev) goto err_pm_runtime; } - /* get clocks */ - utmi_p1_fck = clk_get(dev, "utmi_p1_gfclk"); - if (IS_ERR(utmi_p1_fck)) { - ret = PTR_ERR(utmi_p1_fck); - dev_err(dev, "utmi_p1_gfclk failed error:%d\n", ret); - goto err_add_hcd; - } - - xclk60mhsp1_ck = clk_get(dev, "xclk60mhsp1_ck"); - if (IS_ERR(xclk60mhsp1_ck)) { - ret = PTR_ERR(xclk60mhsp1_ck); - dev_err(dev, "xclk60mhsp1_ck failed error:%d\n", ret); - goto err_utmi_p1_fck; - } - - utmi_p2_fck = clk_get(dev, "utmi_p2_gfclk"); - if (IS_ERR(utmi_p2_fck)) { - ret = PTR_ERR(utmi_p2_fck); - dev_err(dev, "utmi_p2_gfclk failed error:%d\n", ret); - goto err_xclk60mhsp1_ck; - } - - xclk60mhsp2_ck = clk_get(dev, "xclk60mhsp2_ck"); - if (IS_ERR(xclk60mhsp2_ck)) { - ret = PTR_ERR(xclk60mhsp2_ck); - dev_err(dev, "xclk60mhsp2_ck failed error:%d\n", ret); - goto err_utmi_p2_fck; - } - - usbhost_p1_fck = clk_get(dev, "usb_host_hs_utmi_p1_clk"); - if (IS_ERR(usbhost_p1_fck)) { - ret = PTR_ERR(usbhost_p1_fck); - dev_err(dev, "usbhost_p1_fck failed error:%d\n", ret); - goto err_xclk60mhsp2_ck; - } - - usbhost_p2_fck = clk_get(dev, "usb_host_hs_utmi_p2_clk"); - if (IS_ERR(usbhost_p2_fck)) { - ret = PTR_ERR(usbhost_p2_fck); - dev_err(dev, "usbhost_p2_fck failed error:%d\n", ret); - goto err_usbhost_p1_fck; - } - - init_60m_fclk = clk_get(dev, "init_60m_fclk"); - if (IS_ERR(init_60m_fclk)) { - ret = PTR_ERR(init_60m_fclk); - dev_err(dev, "init_60m_fclk failed error:%d\n", ret); - goto err_usbhost_p2_fck; - } return 0; -err_usbhost_p2_fck: - clk_put(usbhost_p2_fck); - -err_usbhost_p1_fck: - clk_put(usbhost_p1_fck); - -err_xclk60mhsp2_ck: - clk_put(xclk60mhsp2_ck); - -err_utmi_p2_fck: - clk_put(utmi_p2_fck); - -err_xclk60mhsp1_ck: - clk_put(xclk60mhsp1_ck); - -err_utmi_p1_fck: - clk_put(utmi_p1_fck); - -err_add_hcd: - usb_remove_hcd(hcd); - err_pm_runtime: disable_put_regulator(pdata); pm_runtime_put_sync(dev); @@ -454,14 +297,6 @@ static int ehci_hcd_omap_remove(struct platform_device *pdev) iounmap(hcd->regs); usb_put_hcd(hcd); - clk_put(utmi_p1_fck); - clk_put(utmi_p2_fck); - clk_put(xclk60mhsp1_ck); - clk_put(xclk60mhsp2_ck); - clk_put(usbhost_p1_fck); - clk_put(usbhost_p2_fck); - clk_put(init_60m_fclk); - pm_runtime_put_sync(dev); pm_runtime_disable(dev); @@ -532,7 +367,7 @@ static const struct hc_driver ehci_omap_hc_driver = { * root hub support */ .hub_status_data = ehci_hub_status_data, - .hub_control = omap_ehci_hub_control, + .hub_control = ehci_hub_control, .bus_suspend = ehci_bus_suspend, .bus_resume = ehci_bus_resume, diff --git a/trunk/drivers/usb/host/ehci-q.c b/trunk/drivers/usb/host/ehci-q.c index 9bc39ca460c8..4b66374bdc8e 100644 --- a/trunk/drivers/usb/host/ehci-q.c +++ b/trunk/drivers/usb/host/ehci-q.c @@ -128,9 +128,17 @@ qh_refresh (struct ehci_hcd *ehci, struct ehci_qh *qh) else { qtd = list_entry (qh->qtd_list.next, struct ehci_qtd, qtd_list); - /* first qtd may already be partially processed */ - if (cpu_to_hc32(ehci, qtd->qtd_dma) == qh->hw->hw_current) + /* + * first qtd may already be partially processed. + * If we come here during unlink, the QH overlay region + * might have reference to the just unlinked qtd. The + * qtd is updated in qh_completions(). Update the QH + * overlay here. + */ + if (cpu_to_hc32(ehci, qtd->qtd_dma) == qh->hw->hw_current) { + qh->hw->hw_qtd_next = qtd->hw_next; qtd = NULL; + } } if (qtd) diff --git a/trunk/drivers/usb/host/ehci-sead3.c b/trunk/drivers/usb/host/ehci-sead3.c index 58c96bd50d22..0c9e43cfaff5 100644 --- a/trunk/drivers/usb/host/ehci-sead3.c +++ b/trunk/drivers/usb/host/ehci-sead3.c @@ -40,7 +40,7 @@ static int ehci_sead3_setup(struct usb_hcd *hcd) ehci->need_io_watchdog = 0; /* Set burst length to 16 words. */ - ehci_writel(ehci, 0x1010, &ehci->regs->reserved[1]); + ehci_writel(ehci, 0x1010, &ehci->regs->reserved1[1]); return ret; } diff --git a/trunk/drivers/usb/host/ehci-tegra.c b/trunk/drivers/usb/host/ehci-tegra.c index 950e95efa381..26dedb30ad0b 100644 --- a/trunk/drivers/usb/host/ehci-tegra.c +++ b/trunk/drivers/usb/host/ehci-tegra.c @@ -799,11 +799,12 @@ static int tegra_ehci_remove(struct platform_device *pdev) #endif usb_remove_hcd(hcd); - usb_put_hcd(hcd); tegra_usb_phy_close(tegra->phy); iounmap(hcd->regs); + usb_put_hcd(hcd); + clk_disable_unprepare(tegra->clk); clk_put(tegra->clk); diff --git a/trunk/drivers/usb/host/isp1362-hcd.c b/trunk/drivers/usb/host/isp1362-hcd.c index 2ed112d3e159..256326322cfd 100644 --- a/trunk/drivers/usb/host/isp1362-hcd.c +++ b/trunk/drivers/usb/host/isp1362-hcd.c @@ -543,12 +543,12 @@ static void postproc_ep(struct isp1362_hcd *isp1362_hcd, struct isp1362_ep *ep) usb_pipein(urb->pipe) ? "IN" : "OUT", ep->nextpid, short_ok ? "" : "not_", PTD_GET_COUNT(ptd), ep->maxpacket, len); + /* save the data underrun error code for later and + * proceed with the status stage + */ + urb->actual_length += PTD_GET_COUNT(ptd); if (usb_pipecontrol(urb->pipe)) { ep->nextpid = USB_PID_ACK; - /* save the data underrun error code for later and - * proceed with the status stage - */ - urb->actual_length += PTD_GET_COUNT(ptd); BUG_ON(urb->actual_length > urb->transfer_buffer_length); if (urb->status == -EINPROGRESS) diff --git a/trunk/drivers/usb/host/ohci-at91.c b/trunk/drivers/usb/host/ohci-at91.c index a665b3eaa746..0bf72f943b00 100644 --- a/trunk/drivers/usb/host/ohci-at91.c +++ b/trunk/drivers/usb/host/ohci-at91.c @@ -467,7 +467,8 @@ static irqreturn_t ohci_hcd_at91_overcurrent_irq(int irq, void *data) /* From the GPIO notifying the over-current situation, find * out the corresponding port */ at91_for_each_port(port) { - if (gpio_to_irq(pdata->overcurrent_pin[port]) == irq) { + if (gpio_is_valid(pdata->overcurrent_pin[port]) && + gpio_to_irq(pdata->overcurrent_pin[port]) == irq) { gpio = pdata->overcurrent_pin[port]; break; } @@ -570,6 +571,16 @@ static int __devinit ohci_hcd_at91_drv_probe(struct platform_device *pdev) if (pdata) { at91_for_each_port(i) { + /* + * do not configure PIO if not in relation with + * real USB port on board + */ + if (i >= pdata->ports) { + pdata->vbus_pin[i] = -EINVAL; + pdata->overcurrent_pin[i] = -EINVAL; + break; + } + if (!gpio_is_valid(pdata->vbus_pin[i])) continue; gpio = pdata->vbus_pin[i]; diff --git a/trunk/drivers/usb/host/ohci-omap.c b/trunk/drivers/usb/host/ohci-omap.c index e7d75d295988..f8b2d91851f7 100644 --- a/trunk/drivers/usb/host/ohci-omap.c +++ b/trunk/drivers/usb/host/ohci-omap.c @@ -403,8 +403,6 @@ static int usb_hcd_omap_probe (const struct hc_driver *driver, static inline void usb_hcd_omap_remove (struct usb_hcd *hcd, struct platform_device *pdev) { - struct ohci_hcd *ohci = hcd_to_ohci (hcd); - usb_remove_hcd(hcd); if (!IS_ERR_OR_NULL(hcd->phy)) { (void) otg_set_host(hcd->phy->otg, 0); diff --git a/trunk/drivers/usb/host/pci-quirks.c b/trunk/drivers/usb/host/pci-quirks.c index df0828cb2aa3..966d1484ee79 100644 --- a/trunk/drivers/usb/host/pci-quirks.c +++ b/trunk/drivers/usb/host/pci-quirks.c @@ -75,7 +75,9 @@ #define NB_PIF0_PWRDOWN_1 0x01100013 #define USB_INTEL_XUSB2PR 0xD0 +#define USB_INTEL_USB2PRM 0xD4 #define USB_INTEL_USB3_PSSEN 0xD8 +#define USB_INTEL_USB3PRM 0xDC static struct amd_chipset_info { struct pci_dev *nb_dev; @@ -772,10 +774,18 @@ void usb_enable_xhci_ports(struct pci_dev *xhci_pdev) return; } - ports_available = 0xffffffff; + /* Read USB3PRM, the USB 3.0 Port Routing Mask Register + * Indicate the ports that can be changed from OS. + */ + pci_read_config_dword(xhci_pdev, USB_INTEL_USB3PRM, + &ports_available); + + dev_dbg(&xhci_pdev->dev, "Configurable ports to enable SuperSpeed: 0x%x\n", + ports_available); + /* Write USB3_PSSEN, the USB 3.0 Port SuperSpeed Enable - * Register, to turn on SuperSpeed terminations for all - * available ports. + * Register, to turn on SuperSpeed terminations for the + * switchable ports. */ pci_write_config_dword(xhci_pdev, USB_INTEL_USB3_PSSEN, cpu_to_le32(ports_available)); @@ -785,7 +795,16 @@ void usb_enable_xhci_ports(struct pci_dev *xhci_pdev) dev_dbg(&xhci_pdev->dev, "USB 3.0 ports that are now enabled " "under xHCI: 0x%x\n", ports_available); - ports_available = 0xffffffff; + /* Read XUSB2PRM, xHCI USB 2.0 Port Routing Mask Register + * Indicate the USB 2.0 ports to be controlled by the xHCI host. + */ + + pci_read_config_dword(xhci_pdev, USB_INTEL_USB2PRM, + &ports_available); + + dev_dbg(&xhci_pdev->dev, "Configurable USB 2.0 ports to hand over to xCHI: 0x%x\n", + ports_available); + /* Write XUSB2PR, the xHC USB 2.0 Port Routing Register, to * switch the USB 2.0 power and data lines over to the xHCI * host. @@ -800,6 +819,13 @@ void usb_enable_xhci_ports(struct pci_dev *xhci_pdev) } EXPORT_SYMBOL_GPL(usb_enable_xhci_ports); +void usb_disable_xhci_ports(struct pci_dev *xhci_pdev) +{ + pci_write_config_dword(xhci_pdev, USB_INTEL_USB3_PSSEN, 0x0); + pci_write_config_dword(xhci_pdev, USB_INTEL_XUSB2PR, 0x0); +} +EXPORT_SYMBOL_GPL(usb_disable_xhci_ports); + /** * PCI Quirks for xHCI. * @@ -815,12 +841,12 @@ static void __devinit quirk_usb_handoff_xhci(struct pci_dev *pdev) void __iomem *op_reg_base; u32 val; int timeout; + int len = pci_resource_len(pdev, 0); if (!mmio_resource_enabled(pdev, 0)) return; - base = ioremap_nocache(pci_resource_start(pdev, 0), - pci_resource_len(pdev, 0)); + base = ioremap_nocache(pci_resource_start(pdev, 0), len); if (base == NULL) return; @@ -830,9 +856,17 @@ static void __devinit quirk_usb_handoff_xhci(struct pci_dev *pdev) */ ext_cap_offset = xhci_find_next_cap_offset(base, XHCI_HCC_PARAMS_OFFSET); do { + if ((ext_cap_offset + sizeof(val)) > len) { + /* We're reading garbage from the controller */ + dev_warn(&pdev->dev, + "xHCI controller failing to respond"); + return; + } + if (!ext_cap_offset) /* We've reached the end of the extended capabilities */ goto hc_init; + val = readl(base + ext_cap_offset); if (XHCI_EXT_CAPS_ID(val) == XHCI_EXT_CAPS_LEGACY) break; @@ -863,9 +897,10 @@ static void __devinit quirk_usb_handoff_xhci(struct pci_dev *pdev) /* Disable any BIOS SMIs and clear all SMI events*/ writel(val, base + ext_cap_offset + XHCI_LEGACY_CONTROL_OFFSET); +hc_init: if (usb_is_intel_switchable_xhci(pdev)) usb_enable_xhci_ports(pdev); -hc_init: + op_reg_base = base + XHCI_HC_LENGTH(readl(base)); /* Wait for the host controller to be ready before writing any diff --git a/trunk/drivers/usb/host/pci-quirks.h b/trunk/drivers/usb/host/pci-quirks.h index b1002a8ef96f..7f69a39163ce 100644 --- a/trunk/drivers/usb/host/pci-quirks.h +++ b/trunk/drivers/usb/host/pci-quirks.h @@ -10,10 +10,12 @@ void usb_amd_quirk_pll_disable(void); void usb_amd_quirk_pll_enable(void); bool usb_is_intel_switchable_xhci(struct pci_dev *pdev); void usb_enable_xhci_ports(struct pci_dev *xhci_pdev); +void usb_disable_xhci_ports(struct pci_dev *xhci_pdev); #else static inline void usb_amd_quirk_pll_disable(void) {} static inline void usb_amd_quirk_pll_enable(void) {} static inline void usb_amd_dev_put(void) {} +static inline void usb_disable_xhci_ports(struct pci_dev *xhci_pdev) {} #endif /* CONFIG_PCI */ #endif /* __LINUX_USB_PCI_QUIRKS_H */ diff --git a/trunk/drivers/usb/host/xhci-hub.c b/trunk/drivers/usb/host/xhci-hub.c index 74bfc868b7ad..d5eb357aa5c4 100644 --- a/trunk/drivers/usb/host/xhci-hub.c +++ b/trunk/drivers/usb/host/xhci-hub.c @@ -493,11 +493,48 @@ static void xhci_hub_report_link_state(u32 *status, u32 status_reg) * when this bit is set. */ pls |= USB_PORT_STAT_CONNECTION; + } else { + /* + * If CAS bit isn't set but the Port is already at + * Compliance Mode, fake a connection so the USB core + * notices the Compliance state and resets the port. + * This resolves an issue generated by the SN65LVPE502CP + * in which sometimes the port enters compliance mode + * caused by a delay on the host-device negotiation. + */ + if (pls == USB_SS_PORT_LS_COMP_MOD) + pls |= USB_PORT_STAT_CONNECTION; } + /* update status field */ *status |= pls; } +/* + * Function for Compliance Mode Quirk. + * + * This Function verifies if all xhc USB3 ports have entered U0, if so, + * the compliance mode timer is deleted. A port won't enter + * compliance mode if it has previously entered U0. + */ +void xhci_del_comp_mod_timer(struct xhci_hcd *xhci, u32 status, u16 wIndex) +{ + u32 all_ports_seen_u0 = ((1 << xhci->num_usb3_ports)-1); + bool port_in_u0 = ((status & PORT_PLS_MASK) == XDEV_U0); + + if (!(xhci->quirks & XHCI_COMP_MODE_QUIRK)) + return; + + if ((xhci->port_status_u0 != all_ports_seen_u0) && port_in_u0) { + xhci->port_status_u0 |= 1 << wIndex; + if (xhci->port_status_u0 == all_ports_seen_u0) { + del_timer_sync(&xhci->comp_mode_recovery_timer); + xhci_dbg(xhci, "All USB3 ports have entered U0 already!\n"); + xhci_dbg(xhci, "Compliance Mode Recovery Timer Deleted.\n"); + } + } +} + int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue, u16 wIndex, char *buf, u16 wLength) { @@ -651,6 +688,11 @@ int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue, /* Update Port Link State for super speed ports*/ if (hcd->speed == HCD_USB3) { xhci_hub_report_link_state(&status, temp); + /* + * Verify if all USB3 Ports Have entered U0 already. + * Delete Compliance Mode Timer if so. + */ + xhci_del_comp_mod_timer(xhci, temp, wIndex); } if (bus_state->port_c_suspend & (1 << wIndex)) status |= 1 << USB_PORT_FEAT_C_SUSPEND; diff --git a/trunk/drivers/usb/host/xhci-pci.c b/trunk/drivers/usb/host/xhci-pci.c index 18b231b0c5d3..9bfd4ca1153c 100644 --- a/trunk/drivers/usb/host/xhci-pci.c +++ b/trunk/drivers/usb/host/xhci-pci.c @@ -94,11 +94,21 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci) xhci->quirks |= XHCI_EP_LIMIT_QUIRK; xhci->limit_active_eps = 64; xhci->quirks |= XHCI_SW_BW_CHECKING; + /* + * PPT desktop boards DH77EB and DH77DF will power back on after + * a few seconds of being shutdown. The fix for this is to + * switch the ports from xHCI to EHCI on shutdown. We can't use + * DMI information to find those particular boards (since each + * vendor will change the board name), so we have to key off all + * PPT chipsets. + */ + xhci->quirks |= XHCI_SPURIOUS_REBOOT; } if (pdev->vendor == PCI_VENDOR_ID_ETRON && pdev->device == PCI_DEVICE_ID_ASROCK_P67) { xhci->quirks |= XHCI_RESET_ON_RESUME; xhci_dbg(xhci, "QUIRK: Resetting on resume\n"); + xhci->quirks |= XHCI_TRUST_TX_LENGTH; } if (pdev->vendor == PCI_VENDOR_ID_VIA) xhci->quirks |= XHCI_RESET_ON_RESUME; diff --git a/trunk/drivers/usb/host/xhci-plat.c b/trunk/drivers/usb/host/xhci-plat.c index 689bc18b051d..df90fe51b4aa 100644 --- a/trunk/drivers/usb/host/xhci-plat.c +++ b/trunk/drivers/usb/host/xhci-plat.c @@ -118,7 +118,7 @@ static int xhci_plat_probe(struct platform_device *pdev) goto put_hcd; } - hcd->regs = ioremap(hcd->rsrc_start, hcd->rsrc_len); + hcd->regs = ioremap_nocache(hcd->rsrc_start, hcd->rsrc_len); if (!hcd->regs) { dev_dbg(&pdev->dev, "error mapping memory\n"); ret = -EFAULT; diff --git a/trunk/drivers/usb/host/xhci-ring.c b/trunk/drivers/usb/host/xhci-ring.c index 8275645889da..643c2f3f3e73 100644 --- a/trunk/drivers/usb/host/xhci-ring.c +++ b/trunk/drivers/usb/host/xhci-ring.c @@ -145,29 +145,37 @@ static void next_trb(struct xhci_hcd *xhci, */ static void inc_deq(struct xhci_hcd *xhci, struct xhci_ring *ring) { - union xhci_trb *next; unsigned long long addr; ring->deq_updates++; - /* If this is not event ring, there is one more usable TRB */ + /* + * If this is not event ring, and the dequeue pointer + * is not on a link TRB, there is one more usable TRB + */ if (ring->type != TYPE_EVENT && !last_trb(xhci, ring, ring->deq_seg, ring->dequeue)) ring->num_trbs_free++; - next = ++(ring->dequeue); - /* Update the dequeue pointer further if that was a link TRB or we're at - * the end of an event ring segment (which doesn't have link TRBS) - */ - while (last_trb(xhci, ring, ring->deq_seg, next)) { - if (ring->type == TYPE_EVENT && last_trb_on_last_seg(xhci, - ring, ring->deq_seg, next)) { - ring->cycle_state = (ring->cycle_state ? 0 : 1); + do { + /* + * Update the dequeue pointer further if that was a link TRB or + * we're at the end of an event ring segment (which doesn't have + * link TRBS) + */ + if (last_trb(xhci, ring, ring->deq_seg, ring->dequeue)) { + if (ring->type == TYPE_EVENT && + last_trb_on_last_seg(xhci, ring, + ring->deq_seg, ring->dequeue)) { + ring->cycle_state = (ring->cycle_state ? 0 : 1); + } + ring->deq_seg = ring->deq_seg->next; + ring->dequeue = ring->deq_seg->trbs; + } else { + ring->dequeue++; } - ring->deq_seg = ring->deq_seg->next; - ring->dequeue = ring->deq_seg->trbs; - next = ring->dequeue; - } + } while (last_trb(xhci, ring, ring->deq_seg, ring->dequeue)); + addr = (unsigned long long) xhci_trb_virt_to_dma(ring->deq_seg, ring->dequeue); } @@ -2073,8 +2081,8 @@ static int handle_tx_event(struct xhci_hcd *xhci, if (xhci->quirks & XHCI_TRUST_TX_LENGTH) trb_comp_code = COMP_SHORT_TX; else - xhci_warn(xhci, "WARN Successful completion on short TX: " - "needs XHCI_TRUST_TX_LENGTH quirk?\n"); + xhci_warn_ratelimited(xhci, + "WARN Successful completion on short TX: needs XHCI_TRUST_TX_LENGTH quirk?\n"); case COMP_SHORT_TX: break; case COMP_STOP: diff --git a/trunk/drivers/usb/host/xhci.c b/trunk/drivers/usb/host/xhci.c index 7648b2d4b268..6ece0ed288d4 100644 --- a/trunk/drivers/usb/host/xhci.c +++ b/trunk/drivers/usb/host/xhci.c @@ -26,6 +26,7 @@ #include #include #include +#include #include "xhci.h" @@ -166,7 +167,7 @@ int xhci_reset(struct xhci_hcd *xhci) xhci_writel(xhci, command, &xhci->op_regs->command); ret = handshake(xhci, &xhci->op_regs->command, - CMD_RESET, 0, 250 * 1000); + CMD_RESET, 0, 10 * 1000 * 1000); if (ret) return ret; @@ -175,7 +176,8 @@ int xhci_reset(struct xhci_hcd *xhci) * xHCI cannot write to any doorbells or operational registers other * than status until the "Controller Not Ready" flag is cleared. */ - ret = handshake(xhci, &xhci->op_regs->status, STS_CNR, 0, 250 * 1000); + ret = handshake(xhci, &xhci->op_regs->status, + STS_CNR, 0, 10 * 1000 * 1000); for (i = 0; i < 2; ++i) { xhci->bus_state[i].port_c_suspend = 0; @@ -397,6 +399,95 @@ static void xhci_msix_sync_irqs(struct xhci_hcd *xhci) #endif +static void compliance_mode_recovery(unsigned long arg) +{ + struct xhci_hcd *xhci; + struct usb_hcd *hcd; + u32 temp; + int i; + + xhci = (struct xhci_hcd *)arg; + + for (i = 0; i < xhci->num_usb3_ports; i++) { + temp = xhci_readl(xhci, xhci->usb3_ports[i]); + if ((temp & PORT_PLS_MASK) == USB_SS_PORT_LS_COMP_MOD) { + /* + * Compliance Mode Detected. Letting USB Core + * handle the Warm Reset + */ + xhci_dbg(xhci, "Compliance Mode Detected->Port %d!\n", + i + 1); + xhci_dbg(xhci, "Attempting Recovery routine!\n"); + hcd = xhci->shared_hcd; + + if (hcd->state == HC_STATE_SUSPENDED) + usb_hcd_resume_root_hub(hcd); + + usb_hcd_poll_rh_status(hcd); + } + } + + if (xhci->port_status_u0 != ((1 << xhci->num_usb3_ports)-1)) + mod_timer(&xhci->comp_mode_recovery_timer, + jiffies + msecs_to_jiffies(COMP_MODE_RCVRY_MSECS)); +} + +/* + * Quirk to work around issue generated by the SN65LVPE502CP USB3.0 re-driver + * that causes ports behind that hardware to enter compliance mode sometimes. + * The quirk creates a timer that polls every 2 seconds the link state of + * each host controller's port and recovers it by issuing a Warm reset + * if Compliance mode is detected, otherwise the port will become "dead" (no + * device connections or disconnections will be detected anymore). Becasue no + * status event is generated when entering compliance mode (per xhci spec), + * this quirk is needed on systems that have the failing hardware installed. + */ +static void compliance_mode_recovery_timer_init(struct xhci_hcd *xhci) +{ + xhci->port_status_u0 = 0; + init_timer(&xhci->comp_mode_recovery_timer); + + xhci->comp_mode_recovery_timer.data = (unsigned long) xhci; + xhci->comp_mode_recovery_timer.function = compliance_mode_recovery; + xhci->comp_mode_recovery_timer.expires = jiffies + + msecs_to_jiffies(COMP_MODE_RCVRY_MSECS); + + set_timer_slack(&xhci->comp_mode_recovery_timer, + msecs_to_jiffies(COMP_MODE_RCVRY_MSECS)); + add_timer(&xhci->comp_mode_recovery_timer); + xhci_dbg(xhci, "Compliance Mode Recovery Timer Initialized.\n"); +} + +/* + * This function identifies the systems that have installed the SN65LVPE502CP + * USB3.0 re-driver and that need the Compliance Mode Quirk. + * Systems: + * Vendor: Hewlett-Packard -> System Models: Z420, Z620 and Z820 + */ +static bool compliance_mode_recovery_timer_quirk_check(void) +{ + const char *dmi_product_name, *dmi_sys_vendor; + + dmi_product_name = dmi_get_system_info(DMI_PRODUCT_NAME); + dmi_sys_vendor = dmi_get_system_info(DMI_SYS_VENDOR); + + if (!(strstr(dmi_sys_vendor, "Hewlett-Packard"))) + return false; + + if (strstr(dmi_product_name, "Z420") || + strstr(dmi_product_name, "Z620") || + strstr(dmi_product_name, "Z820")) + return true; + + return false; +} + +static int xhci_all_ports_seen_u0(struct xhci_hcd *xhci) +{ + return (xhci->port_status_u0 == ((1 << xhci->num_usb3_ports)-1)); +} + + /* * Initialize memory for HCD and xHC (one-time init). * @@ -420,6 +511,12 @@ int xhci_init(struct usb_hcd *hcd) retval = xhci_mem_init(xhci, GFP_KERNEL); xhci_dbg(xhci, "Finished xhci_init\n"); + /* Initializing Compliance Mode Recovery Data If Needed */ + if (compliance_mode_recovery_timer_quirk_check()) { + xhci->quirks |= XHCI_COMP_MODE_QUIRK; + compliance_mode_recovery_timer_init(xhci); + } + return retval; } @@ -628,6 +725,11 @@ void xhci_stop(struct usb_hcd *hcd) del_timer_sync(&xhci->event_ring_timer); #endif + /* Deleting Compliance Mode Recovery Timer */ + if ((xhci->quirks & XHCI_COMP_MODE_QUIRK) && + (!(xhci_all_ports_seen_u0(xhci)))) + del_timer_sync(&xhci->comp_mode_recovery_timer); + if (xhci->quirks & XHCI_AMD_PLL_FIX) usb_amd_dev_put(); @@ -658,6 +760,9 @@ void xhci_shutdown(struct usb_hcd *hcd) { struct xhci_hcd *xhci = hcd_to_xhci(hcd); + if (xhci->quirks & XHCI_SPURIOUS_REBOOT) + usb_disable_xhci_ports(to_pci_dev(hcd->self.controller)); + spin_lock_irq(&xhci->lock); xhci_halt(xhci); spin_unlock_irq(&xhci->lock); @@ -802,6 +907,16 @@ int xhci_suspend(struct xhci_hcd *xhci) } spin_unlock_irq(&xhci->lock); + /* + * Deleting Compliance Mode Recovery Timer because the xHCI Host + * is about to be suspended. + */ + if ((xhci->quirks & XHCI_COMP_MODE_QUIRK) && + (!(xhci_all_ports_seen_u0(xhci)))) { + del_timer_sync(&xhci->comp_mode_recovery_timer); + xhci_dbg(xhci, "Compliance Mode Recovery Timer Deleted!\n"); + } + /* step 5: remove core well power */ /* synchronize irq when using MSI-X */ xhci_msix_sync_irqs(xhci); @@ -934,6 +1049,16 @@ int xhci_resume(struct xhci_hcd *xhci, bool hibernated) usb_hcd_resume_root_hub(hcd); usb_hcd_resume_root_hub(xhci->shared_hcd); } + + /* + * If system is subject to the Quirk, Compliance Mode Timer needs to + * be re-initialized Always after a system resume. Ports are subject + * to suffer the Compliance Mode issue again. It doesn't matter if + * ports have entered previously to U0 before system's suspension. + */ + if (xhci->quirks & XHCI_COMP_MODE_QUIRK) + compliance_mode_recovery_timer_init(xhci); + return retval; } #endif /* CONFIG_PM */ diff --git a/trunk/drivers/usb/host/xhci.h b/trunk/drivers/usb/host/xhci.h index 55c0785810c9..1a05908c6673 100644 --- a/trunk/drivers/usb/host/xhci.h +++ b/trunk/drivers/usb/host/xhci.h @@ -1494,6 +1494,8 @@ struct xhci_hcd { #define XHCI_TRUST_TX_LENGTH (1 << 10) #define XHCI_LPM_SUPPORT (1 << 11) #define XHCI_INTEL_HOST (1 << 12) +#define XHCI_SPURIOUS_REBOOT (1 << 13) +#define XHCI_COMP_MODE_QUIRK (1 << 14) unsigned int num_active_eps; unsigned int limit_active_eps; /* There are two roothubs to keep track of bus suspend info for */ @@ -1510,6 +1512,11 @@ struct xhci_hcd { unsigned sw_lpm_support:1; /* support xHCI 1.0 spec USB2 hardware LPM */ unsigned hw_lpm_support:1; + /* Compliance Mode Recovery Data */ + struct timer_list comp_mode_recovery_timer; + u32 port_status_u0; +/* Compliance Mode Timer Triggered every 2 seconds */ +#define COMP_MODE_RCVRY_MSECS 2000 }; /* convert between an HCD pointer and the corresponding EHCI_HCD */ @@ -1537,6 +1544,8 @@ static inline struct usb_hcd *xhci_to_hcd(struct xhci_hcd *xhci) dev_err(xhci_to_hcd(xhci)->self.controller , fmt , ## args) #define xhci_warn(xhci, fmt, args...) \ dev_warn(xhci_to_hcd(xhci)->self.controller , fmt , ## args) +#define xhci_warn_ratelimited(xhci, fmt, args...) \ + dev_warn_ratelimited(xhci_to_hcd(xhci)->self.controller , fmt , ## args) /* TODO: copied from ehci.h - can be refactored? */ /* xHCI spec says all registers are little endian */ diff --git a/trunk/drivers/usb/misc/emi62.c b/trunk/drivers/usb/misc/emi62.c index ff08015b230c..ae794b90766b 100644 --- a/trunk/drivers/usb/misc/emi62.c +++ b/trunk/drivers/usb/misc/emi62.c @@ -232,7 +232,7 @@ static int emi62_load_firmware (struct usb_device *dev) return err; } -static const struct usb_device_id id_table[] __devinitconst = { +static const struct usb_device_id id_table[] = { { USB_DEVICE(EMI62_VENDOR_ID, EMI62_PRODUCT_ID) }, { } /* Terminating entry */ }; diff --git a/trunk/drivers/usb/musb/Kconfig b/trunk/drivers/usb/musb/Kconfig index ef0c3f9f0947..6259f0d99324 100644 --- a/trunk/drivers/usb/musb/Kconfig +++ b/trunk/drivers/usb/musb/Kconfig @@ -8,7 +8,7 @@ config USB_MUSB_HDRC tristate 'Inventra Highspeed Dual Role Controller (TI, ADI, ...)' depends on USB && USB_GADGET select NOP_USB_XCEIV if (ARCH_DAVINCI || MACH_OMAP3EVM || BLACKFIN) - select NOP_USB_XCEIV if (SOC_OMAPTI81XX || SOC_OMAPAM33XX) + select NOP_USB_XCEIV if (SOC_TI81XX || SOC_AM33XX) select TWL4030_USB if MACH_OMAP_3430SDP select TWL6030_USB if MACH_OMAP_4430SDP || MACH_OMAP4_PANDA select USB_OTG_UTILS @@ -57,7 +57,7 @@ config USB_MUSB_AM35X config USB_MUSB_DSPS tristate "TI DSPS platforms" - depends on SOC_OMAPTI81XX || SOC_OMAPAM33XX + depends on SOC_TI81XX || SOC_AM33XX config USB_MUSB_BLACKFIN tristate "Blackfin" diff --git a/trunk/drivers/usb/musb/musb_dsps.c b/trunk/drivers/usb/musb/musb_dsps.c index 217808d9fbe1..494772fc9e23 100644 --- a/trunk/drivers/usb/musb/musb_dsps.c +++ b/trunk/drivers/usb/musb/musb_dsps.c @@ -479,9 +479,9 @@ static int __devinit dsps_create_musb_pdev(struct dsps_glue *glue, u8 id) ret = -ENODEV; goto err0; } - strcpy((u8 *)res->name, "mc"); res->parent = NULL; resources[1] = *res; + resources[1].name = "mc"; /* allocate the child platform device */ musb = platform_device_alloc("musb-hdrc", -1); @@ -566,27 +566,28 @@ static int __devinit dsps_probe(struct platform_device *pdev) } platform_set_drvdata(pdev, glue); - /* create the child platform device for first instances of musb */ - ret = dsps_create_musb_pdev(glue, 0); - if (ret != 0) { - dev_err(&pdev->dev, "failed to create child pdev\n"); - goto err2; - } - /* enable the usbss clocks */ pm_runtime_enable(&pdev->dev); ret = pm_runtime_get_sync(&pdev->dev); if (ret < 0) { dev_err(&pdev->dev, "pm_runtime_get_sync FAILED"); + goto err2; + } + + /* create the child platform device for first instances of musb */ + ret = dsps_create_musb_pdev(glue, 0); + if (ret != 0) { + dev_err(&pdev->dev, "failed to create child pdev\n"); goto err3; } return 0; err3: - pm_runtime_disable(&pdev->dev); + pm_runtime_put(&pdev->dev); err2: + pm_runtime_disable(&pdev->dev); kfree(glue->wrp); err1: kfree(glue); diff --git a/trunk/drivers/usb/musb/musb_host.c b/trunk/drivers/usb/musb/musb_host.c index 4bb717d0bd41..1ae378d5fc6f 100644 --- a/trunk/drivers/usb/musb/musb_host.c +++ b/trunk/drivers/usb/musb/musb_host.c @@ -2049,7 +2049,7 @@ static int musb_urb_enqueue( * we only have work to do in the former case. */ spin_lock_irqsave(&musb->lock, flags); - if (hep->hcpriv) { + if (hep->hcpriv || !next_urb(qh)) { /* some concurrent activity submitted another urb to hep... * odd, rare, error prone, but legal. */ diff --git a/trunk/drivers/usb/musb/musbhsdma.c b/trunk/drivers/usb/musb/musbhsdma.c index 57a608584e16..c1be687e00ec 100644 --- a/trunk/drivers/usb/musb/musbhsdma.c +++ b/trunk/drivers/usb/musb/musbhsdma.c @@ -388,7 +388,7 @@ dma_controller_create(struct musb *musb, void __iomem *base) struct platform_device *pdev = to_platform_device(dev); int irq = platform_get_irq_byname(pdev, "dma"); - if (irq == 0) { + if (irq <= 0) { dev_err(dev, "No DMA interrupt line!\n"); return NULL; } diff --git a/trunk/drivers/usb/musb/tusb6010.c b/trunk/drivers/usb/musb/tusb6010.c index 1a1bd9cf40c5..341625442377 100644 --- a/trunk/drivers/usb/musb/tusb6010.c +++ b/trunk/drivers/usb/musb/tusb6010.c @@ -1215,7 +1215,7 @@ static int __devinit tusb_probe(struct platform_device *pdev) ret = platform_device_add(musb); if (ret) { dev_err(&pdev->dev, "failed to register musb device\n"); - goto err1; + goto err2; } return 0; diff --git a/trunk/drivers/usb/renesas_usbhs/common.c b/trunk/drivers/usb/renesas_usbhs/common.c index 8c9bb1ad3069..681da06170c2 100644 --- a/trunk/drivers/usb/renesas_usbhs/common.c +++ b/trunk/drivers/usb/renesas_usbhs/common.c @@ -603,12 +603,12 @@ static int usbhsc_resume(struct device *dev) struct usbhs_priv *priv = dev_get_drvdata(dev); struct platform_device *pdev = usbhs_priv_to_pdev(priv); - usbhs_platform_call(priv, phy_reset, pdev); - if (!usbhsc_flags_has(priv, USBHSF_RUNTIME_PWCTRL)) usbhsc_power_ctrl(priv, 1); - usbhsc_hotplug(priv); + usbhs_platform_call(priv, phy_reset, pdev); + + usbhsc_drvcllbck_notify_hotplug(pdev); return 0; } diff --git a/trunk/drivers/usb/renesas_usbhs/fifo.c b/trunk/drivers/usb/renesas_usbhs/fifo.c index ecd173032fd4..143c4e9e1be4 100644 --- a/trunk/drivers/usb/renesas_usbhs/fifo.c +++ b/trunk/drivers/usb/renesas_usbhs/fifo.c @@ -818,7 +818,7 @@ static int usbhsf_dma_prepare_push(struct usbhs_pkt *pkt, int *is_done) usbhs_pipe_is_dcp(pipe)) goto usbhsf_pio_prepare_push; - if (len % 4) /* 32bit alignment */ + if (len & 0x7) /* 8byte alignment */ goto usbhsf_pio_prepare_push; if ((uintptr_t)(pkt->buf + pkt->actual) & 0x7) /* 8byte alignment */ @@ -905,7 +905,7 @@ static int usbhsf_dma_try_pop(struct usbhs_pkt *pkt, int *is_done) /* use PIO if packet is less than pio_dma_border */ len = usbhsf_fifo_rcv_len(priv, fifo); len = min(pkt->length - pkt->actual, len); - if (len % 4) /* 32bit alignment */ + if (len & 0x7) /* 8byte alignment */ goto usbhsf_pio_prepare_pop_unselect; if (len < usbhs_get_dparam(priv, pio_dma_border)) diff --git a/trunk/drivers/usb/renesas_usbhs/mod_host.c b/trunk/drivers/usb/renesas_usbhs/mod_host.c index 1834cf50888c..9b69a1323294 100644 --- a/trunk/drivers/usb/renesas_usbhs/mod_host.c +++ b/trunk/drivers/usb/renesas_usbhs/mod_host.c @@ -1266,6 +1266,12 @@ static int usbhsh_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue, return ret; } +static int usbhsh_bus_nop(struct usb_hcd *hcd) +{ + /* nothing to do */ + return 0; +} + static struct hc_driver usbhsh_driver = { .description = usbhsh_hcd_name, .hcd_priv_size = sizeof(struct usbhsh_hpriv), @@ -1290,6 +1296,8 @@ static struct hc_driver usbhsh_driver = { */ .hub_status_data = usbhsh_hub_status_data, .hub_control = usbhsh_hub_control, + .bus_suspend = usbhsh_bus_nop, + .bus_resume = usbhsh_bus_nop, }; /* diff --git a/trunk/drivers/usb/serial/bus.c b/trunk/drivers/usb/serial/bus.c index f398d1e34474..c15f2e7cefc7 100644 --- a/trunk/drivers/usb/serial/bus.c +++ b/trunk/drivers/usb/serial/bus.c @@ -61,18 +61,23 @@ static int usb_serial_device_probe(struct device *dev) goto exit; } + /* make sure suspend/resume doesn't race against port_probe */ + retval = usb_autopm_get_interface(port->serial->interface); + if (retval) + goto exit; + driver = port->serial->type; if (driver->port_probe) { retval = driver->port_probe(port); if (retval) - goto exit; + goto exit_with_autopm; } retval = device_create_file(dev, &dev_attr_port_number); if (retval) { if (driver->port_remove) retval = driver->port_remove(port); - goto exit; + goto exit_with_autopm; } minor = port->number; @@ -81,6 +86,8 @@ static int usb_serial_device_probe(struct device *dev) "%s converter now attached to ttyUSB%d\n", driver->description, minor); +exit_with_autopm: + usb_autopm_put_interface(port->serial->interface); exit: return retval; } @@ -96,6 +103,9 @@ static int usb_serial_device_remove(struct device *dev) if (!port) return -ENODEV; + /* make sure suspend/resume doesn't race against port_remove */ + usb_autopm_get_interface(port->serial->interface); + device_remove_file(&port->dev, &dev_attr_port_number); driver = port->serial->type; @@ -107,6 +117,7 @@ static int usb_serial_device_remove(struct device *dev) dev_info(dev, "%s converter now disconnected from ttyUSB%d\n", driver->description, minor); + usb_autopm_put_interface(port->serial->interface); return retval; } diff --git a/trunk/drivers/usb/serial/ftdi_sio.c b/trunk/drivers/usb/serial/ftdi_sio.c index bc912e5a3beb..f906b3aec217 100644 --- a/trunk/drivers/usb/serial/ftdi_sio.c +++ b/trunk/drivers/usb/serial/ftdi_sio.c @@ -704,6 +704,7 @@ static struct usb_device_id id_table_combined [] = { { USB_DEVICE(FTDI_VID, FTDI_PCDJ_DAC2_PID) }, { USB_DEVICE(FTDI_VID, FTDI_RRCIRKITS_LOCOBUFFER_PID) }, { USB_DEVICE(FTDI_VID, FTDI_ASK_RDR400_PID) }, + { USB_DEVICE(FTDI_VID, FTDI_NZR_SEM_USB_PID) }, { USB_DEVICE(ICOM_VID, ICOM_ID_1_PID) }, { USB_DEVICE(ICOM_VID, ICOM_OPC_U_UC_PID) }, { USB_DEVICE(ICOM_VID, ICOM_ID_RP2C1_PID) }, @@ -804,13 +805,33 @@ static struct usb_device_id id_table_combined [] = { .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk }, { USB_DEVICE(ADI_VID, ADI_GNICEPLUS_PID), .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk }, - { USB_DEVICE(MICROCHIP_VID, MICROCHIP_USB_BOARD_PID) }, + { USB_DEVICE_AND_INTERFACE_INFO(MICROCHIP_VID, MICROCHIP_USB_BOARD_PID, + USB_CLASS_VENDOR_SPEC, + USB_SUBCLASS_VENDOR_SPEC, 0x00) }, { USB_DEVICE(JETI_VID, JETI_SPC1201_PID) }, { USB_DEVICE(MARVELL_VID, MARVELL_SHEEVAPLUG_PID), .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk }, { USB_DEVICE(LARSENBRUSGAARD_VID, LB_ALTITRACK_PID) }, { USB_DEVICE(GN_OTOMETRICS_VID, AURICAL_USB_PID) }, + { USB_DEVICE(FTDI_VID, PI_C865_PID) }, + { USB_DEVICE(FTDI_VID, PI_C857_PID) }, + { USB_DEVICE(PI_VID, PI_C866_PID) }, + { USB_DEVICE(PI_VID, PI_C663_PID) }, + { USB_DEVICE(PI_VID, PI_C725_PID) }, + { USB_DEVICE(PI_VID, PI_E517_PID) }, + { USB_DEVICE(PI_VID, PI_C863_PID) }, { USB_DEVICE(PI_VID, PI_E861_PID) }, + { USB_DEVICE(PI_VID, PI_C867_PID) }, + { USB_DEVICE(PI_VID, PI_E609_PID) }, + { USB_DEVICE(PI_VID, PI_E709_PID) }, + { USB_DEVICE(PI_VID, PI_100F_PID) }, + { USB_DEVICE(PI_VID, PI_1011_PID) }, + { USB_DEVICE(PI_VID, PI_1012_PID) }, + { USB_DEVICE(PI_VID, PI_1013_PID) }, + { USB_DEVICE(PI_VID, PI_1014_PID) }, + { USB_DEVICE(PI_VID, PI_1015_PID) }, + { USB_DEVICE(PI_VID, PI_1016_PID) }, + { USB_DEVICE(KONDO_VID, KONDO_USB_SERIAL_PID) }, { USB_DEVICE(BAYER_VID, BAYER_CONTOUR_CABLE_PID) }, { USB_DEVICE(FTDI_VID, MARVELL_OPENRD_PID), .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk }, diff --git a/trunk/drivers/usb/serial/ftdi_sio_ids.h b/trunk/drivers/usb/serial/ftdi_sio_ids.h index 5661c7e2d415..41fe5826100c 100644 --- a/trunk/drivers/usb/serial/ftdi_sio_ids.h +++ b/trunk/drivers/usb/serial/ftdi_sio_ids.h @@ -75,6 +75,9 @@ #define FTDI_OPENDCC_GATEWAY_PID 0xBFDB #define FTDI_OPENDCC_GBM_PID 0xBFDC +/* NZR SEM 16+ USB (http://www.nzr.de) */ +#define FTDI_NZR_SEM_USB_PID 0xC1E0 /* NZR SEM-LOG16+ */ + /* * RR-CirKits LocoBuffer USB (http://www.rr-cirkits.com) */ @@ -539,7 +542,10 @@ /* * Microchip Technology, Inc. * - * MICROCHIP_VID (0x04D8) and MICROCHIP_USB_BOARD_PID (0x000A) are also used by: + * MICROCHIP_VID (0x04D8) and MICROCHIP_USB_BOARD_PID (0x000A) are + * used by single function CDC ACM class based firmware demo + * applications. The VID/PID has also been used in firmware + * emulating FTDI serial chips by: * Hornby Elite - Digital Command Control Console * http://www.hornby.com/hornby-dcc/controllers/ */ @@ -791,8 +797,34 @@ * Physik Instrumente * http://www.physikinstrumente.com/en/products/ */ +/* These two devices use the VID of FTDI */ +#define PI_C865_PID 0xe0a0 /* PI C-865 Piezomotor Controller */ +#define PI_C857_PID 0xe0a1 /* PI Encoder Trigger Box */ + #define PI_VID 0x1a72 /* Vendor ID */ -#define PI_E861_PID 0x1008 /* E-861 piezo controller USB connection */ +#define PI_C866_PID 0x1000 /* PI C-866 Piezomotor Controller */ +#define PI_C663_PID 0x1001 /* PI C-663 Mercury-Step */ +#define PI_C725_PID 0x1002 /* PI C-725 Piezomotor Controller */ +#define PI_E517_PID 0x1005 /* PI E-517 Digital Piezo Controller Operation Module */ +#define PI_C863_PID 0x1007 /* PI C-863 */ +#define PI_E861_PID 0x1008 /* PI E-861 Piezomotor Controller */ +#define PI_C867_PID 0x1009 /* PI C-867 Piezomotor Controller */ +#define PI_E609_PID 0x100D /* PI E-609 Digital Piezo Controller */ +#define PI_E709_PID 0x100E /* PI E-709 Digital Piezo Controller */ +#define PI_100F_PID 0x100F /* PI Digital Piezo Controller */ +#define PI_1011_PID 0x1011 /* PI Digital Piezo Controller */ +#define PI_1012_PID 0x1012 /* PI Motion Controller */ +#define PI_1013_PID 0x1013 /* PI Motion Controller */ +#define PI_1014_PID 0x1014 /* PI Device */ +#define PI_1015_PID 0x1015 /* PI Device */ +#define PI_1016_PID 0x1016 /* PI Digital Servo Module */ + +/* + * Kondo Kagaku Co.Ltd. + * http://www.kondo-robot.com/EN + */ +#define KONDO_VID 0x165c +#define KONDO_USB_SERIAL_PID 0x0002 /* * Bayer Ascensia Contour blood glucose meter USB-converter cable. diff --git a/trunk/drivers/usb/serial/ipw.c b/trunk/drivers/usb/serial/ipw.c index 5811d34b6c6b..2cb30c535839 100644 --- a/trunk/drivers/usb/serial/ipw.c +++ b/trunk/drivers/usb/serial/ipw.c @@ -227,7 +227,6 @@ static void ipw_release(struct usb_serial *serial) { struct usb_wwan_intf_private *data = usb_get_serial_data(serial); - usb_wwan_release(serial); usb_set_serial_data(serial, NULL); kfree(data); } @@ -309,12 +308,12 @@ static struct usb_serial_driver ipw_device = { .description = "IPWireless converter", .id_table = id_table, .num_ports = 1, - .disconnect = usb_wwan_disconnect, .open = ipw_open, .close = ipw_close, .probe = ipw_probe, .attach = usb_wwan_startup, .release = ipw_release, + .port_remove = usb_wwan_port_remove, .dtr_rts = ipw_dtr_rts, .write = usb_wwan_write, }; diff --git a/trunk/drivers/usb/serial/mos7840.c b/trunk/drivers/usb/serial/mos7840.c index 57eca2448424..2f6da1e89bfa 100644 --- a/trunk/drivers/usb/serial/mos7840.c +++ b/trunk/drivers/usb/serial/mos7840.c @@ -82,8 +82,7 @@ * Defines used for sending commands to port */ -#define WAIT_FOR_EVER (HZ * 0) /* timeout urb is wait for ever */ -#define MOS_WDR_TIMEOUT (HZ * 5) /* default urb timeout */ +#define MOS_WDR_TIMEOUT 5000 /* default urb timeout */ #define MOS_PORT1 0x0200 #define MOS_PORT2 0x0300 @@ -1232,9 +1231,12 @@ static int mos7840_chars_in_buffer(struct tty_struct *tty) return 0; spin_lock_irqsave(&mos7840_port->pool_lock, flags); - for (i = 0; i < NUM_URBS; ++i) - if (mos7840_port->busy[i]) - chars += URB_TRANSFER_BUFFER_SIZE; + for (i = 0; i < NUM_URBS; ++i) { + if (mos7840_port->busy[i]) { + struct urb *urb = mos7840_port->write_urb_pool[i]; + chars += urb->transfer_buffer_length; + } + } spin_unlock_irqrestore(&mos7840_port->pool_lock, flags); dbg("%s - returns %d", __func__, chars); return chars; @@ -1344,7 +1346,7 @@ static void mos7840_close(struct usb_serial_port *port) static void mos7840_block_until_chase_response(struct tty_struct *tty, struct moschip_port *mos7840_port) { - int timeout = 1 * HZ; + int timeout = msecs_to_jiffies(1000); int wait = 10; int count; @@ -2672,7 +2674,7 @@ static int mos7840_startup(struct usb_serial *serial) /* setting configuration feature to one */ usb_control_msg(serial->dev, usb_sndctrlpipe(serial->dev, 0), - (__u8) 0x03, 0x00, 0x01, 0x00, NULL, 0x00, 5 * HZ); + (__u8) 0x03, 0x00, 0x01, 0x00, NULL, 0x00, MOS_WDR_TIMEOUT); return 0; error: for (/* nothing */; i >= 0; i--) { diff --git a/trunk/drivers/usb/serial/option.c b/trunk/drivers/usb/serial/option.c index 08ff9b862049..5ce88d1bc6f1 100644 --- a/trunk/drivers/usb/serial/option.c +++ b/trunk/drivers/usb/serial/option.c @@ -80,85 +80,9 @@ static void option_instat_callback(struct urb *urb); #define OPTION_PRODUCT_GTM380_MODEM 0x7201 #define HUAWEI_VENDOR_ID 0x12D1 -#define HUAWEI_PRODUCT_E600 0x1001 -#define HUAWEI_PRODUCT_E220 0x1003 -#define HUAWEI_PRODUCT_E220BIS 0x1004 -#define HUAWEI_PRODUCT_E1401 0x1401 -#define HUAWEI_PRODUCT_E1402 0x1402 -#define HUAWEI_PRODUCT_E1403 0x1403 -#define HUAWEI_PRODUCT_E1404 0x1404 -#define HUAWEI_PRODUCT_E1405 0x1405 -#define HUAWEI_PRODUCT_E1406 0x1406 -#define HUAWEI_PRODUCT_E1407 0x1407 -#define HUAWEI_PRODUCT_E1408 0x1408 -#define HUAWEI_PRODUCT_E1409 0x1409 -#define HUAWEI_PRODUCT_E140A 0x140A -#define HUAWEI_PRODUCT_E140B 0x140B -#define HUAWEI_PRODUCT_E140C 0x140C -#define HUAWEI_PRODUCT_E140D 0x140D -#define HUAWEI_PRODUCT_E140E 0x140E -#define HUAWEI_PRODUCT_E140F 0x140F -#define HUAWEI_PRODUCT_E1410 0x1410 -#define HUAWEI_PRODUCT_E1411 0x1411 -#define HUAWEI_PRODUCT_E1412 0x1412 -#define HUAWEI_PRODUCT_E1413 0x1413 -#define HUAWEI_PRODUCT_E1414 0x1414 -#define HUAWEI_PRODUCT_E1415 0x1415 -#define HUAWEI_PRODUCT_E1416 0x1416 -#define HUAWEI_PRODUCT_E1417 0x1417 -#define HUAWEI_PRODUCT_E1418 0x1418 -#define HUAWEI_PRODUCT_E1419 0x1419 -#define HUAWEI_PRODUCT_E141A 0x141A -#define HUAWEI_PRODUCT_E141B 0x141B -#define HUAWEI_PRODUCT_E141C 0x141C -#define HUAWEI_PRODUCT_E141D 0x141D -#define HUAWEI_PRODUCT_E141E 0x141E -#define HUAWEI_PRODUCT_E141F 0x141F -#define HUAWEI_PRODUCT_E1420 0x1420 -#define HUAWEI_PRODUCT_E1421 0x1421 -#define HUAWEI_PRODUCT_E1422 0x1422 -#define HUAWEI_PRODUCT_E1423 0x1423 -#define HUAWEI_PRODUCT_E1424 0x1424 -#define HUAWEI_PRODUCT_E1425 0x1425 -#define HUAWEI_PRODUCT_E1426 0x1426 -#define HUAWEI_PRODUCT_E1427 0x1427 -#define HUAWEI_PRODUCT_E1428 0x1428 -#define HUAWEI_PRODUCT_E1429 0x1429 -#define HUAWEI_PRODUCT_E142A 0x142A -#define HUAWEI_PRODUCT_E142B 0x142B -#define HUAWEI_PRODUCT_E142C 0x142C -#define HUAWEI_PRODUCT_E142D 0x142D -#define HUAWEI_PRODUCT_E142E 0x142E -#define HUAWEI_PRODUCT_E142F 0x142F -#define HUAWEI_PRODUCT_E1430 0x1430 -#define HUAWEI_PRODUCT_E1431 0x1431 -#define HUAWEI_PRODUCT_E1432 0x1432 -#define HUAWEI_PRODUCT_E1433 0x1433 -#define HUAWEI_PRODUCT_E1434 0x1434 -#define HUAWEI_PRODUCT_E1435 0x1435 -#define HUAWEI_PRODUCT_E1436 0x1436 -#define HUAWEI_PRODUCT_E1437 0x1437 -#define HUAWEI_PRODUCT_E1438 0x1438 -#define HUAWEI_PRODUCT_E1439 0x1439 -#define HUAWEI_PRODUCT_E143A 0x143A -#define HUAWEI_PRODUCT_E143B 0x143B -#define HUAWEI_PRODUCT_E143C 0x143C -#define HUAWEI_PRODUCT_E143D 0x143D -#define HUAWEI_PRODUCT_E143E 0x143E -#define HUAWEI_PRODUCT_E143F 0x143F #define HUAWEI_PRODUCT_K4505 0x1464 #define HUAWEI_PRODUCT_K3765 0x1465 -#define HUAWEI_PRODUCT_E14AC 0x14AC -#define HUAWEI_PRODUCT_K3806 0x14AE #define HUAWEI_PRODUCT_K4605 0x14C6 -#define HUAWEI_PRODUCT_K5005 0x14C8 -#define HUAWEI_PRODUCT_K3770 0x14C9 -#define HUAWEI_PRODUCT_K3771 0x14CA -#define HUAWEI_PRODUCT_K4510 0x14CB -#define HUAWEI_PRODUCT_K4511 0x14CC -#define HUAWEI_PRODUCT_ETS1220 0x1803 -#define HUAWEI_PRODUCT_E353 0x1506 -#define HUAWEI_PRODUCT_E173S 0x1C05 #define QUANTA_VENDOR_ID 0x0408 #define QUANTA_PRODUCT_Q101 0xEA02 @@ -615,104 +539,123 @@ static const struct usb_device_id option_ids[] = { { USB_DEVICE(QUANTA_VENDOR_ID, QUANTA_PRODUCT_GLX) }, { USB_DEVICE(QUANTA_VENDOR_ID, QUANTA_PRODUCT_GKE) }, { USB_DEVICE(QUANTA_VENDOR_ID, QUANTA_PRODUCT_GLE) }, - { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E600, 0xff, 0xff, 0xff) }, - { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E220, 0xff, 0xff, 0xff) }, - { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E220BIS, 0xff, 0xff, 0xff) }, - { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1401, 0xff, 0xff, 0xff) }, - { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1402, 0xff, 0xff, 0xff) }, - { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1403, 0xff, 0xff, 0xff) }, - { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1404, 0xff, 0xff, 0xff) }, - { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1405, 0xff, 0xff, 0xff) }, - { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1406, 0xff, 0xff, 0xff) }, - { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1407, 0xff, 0xff, 0xff) }, - { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1408, 0xff, 0xff, 0xff) }, - { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1409, 0xff, 0xff, 0xff) }, - { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E140A, 0xff, 0xff, 0xff) }, - { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E140B, 0xff, 0xff, 0xff) }, - { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E140C, 0xff, 0xff, 0xff) }, - { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E140D, 0xff, 0xff, 0xff) }, - { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E140E, 0xff, 0xff, 0xff) }, - { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E140F, 0xff, 0xff, 0xff) }, - { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1410, 0xff, 0xff, 0xff) }, - { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1411, 0xff, 0xff, 0xff) }, - { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1412, 0xff, 0xff, 0xff) }, - { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1413, 0xff, 0xff, 0xff) }, - { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1414, 0xff, 0xff, 0xff) }, - { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1415, 0xff, 0xff, 0xff) }, - { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1416, 0xff, 0xff, 0xff) }, - { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1417, 0xff, 0xff, 0xff) }, - { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1418, 0xff, 0xff, 0xff) }, - { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1419, 0xff, 0xff, 0xff) }, - { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E141A, 0xff, 0xff, 0xff) }, - { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E141B, 0xff, 0xff, 0xff) }, - { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E141C, 0xff, 0xff, 0xff) }, - { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E141D, 0xff, 0xff, 0xff) }, - { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E141E, 0xff, 0xff, 0xff) }, - { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E141F, 0xff, 0xff, 0xff) }, - { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1420, 0xff, 0xff, 0xff) }, - { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1421, 0xff, 0xff, 0xff) }, - { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1422, 0xff, 0xff, 0xff) }, - { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1423, 0xff, 0xff, 0xff) }, - { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1424, 0xff, 0xff, 0xff) }, - { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1425, 0xff, 0xff, 0xff) }, - { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1426, 0xff, 0xff, 0xff) }, - { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1427, 0xff, 0xff, 0xff) }, - { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1428, 0xff, 0xff, 0xff) }, - { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1429, 0xff, 0xff, 0xff) }, - { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E142A, 0xff, 0xff, 0xff) }, - { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E142B, 0xff, 0xff, 0xff) }, - { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E142C, 0xff, 0xff, 0xff) }, - { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E142D, 0xff, 0xff, 0xff) }, - { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E142E, 0xff, 0xff, 0xff) }, - { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E142F, 0xff, 0xff, 0xff) }, - { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1430, 0xff, 0xff, 0xff) }, - { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1431, 0xff, 0xff, 0xff) }, - { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1432, 0xff, 0xff, 0xff) }, - { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1433, 0xff, 0xff, 0xff) }, - { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1434, 0xff, 0xff, 0xff) }, - { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1435, 0xff, 0xff, 0xff) }, - { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1436, 0xff, 0xff, 0xff) }, - { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1437, 0xff, 0xff, 0xff) }, - { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1438, 0xff, 0xff, 0xff) }, - { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1439, 0xff, 0xff, 0xff) }, - { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E143A, 0xff, 0xff, 0xff) }, - { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E143B, 0xff, 0xff, 0xff) }, - { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E143C, 0xff, 0xff, 0xff) }, - { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E143D, 0xff, 0xff, 0xff) }, - { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E143E, 0xff, 0xff, 0xff) }, - { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E143F, 0xff, 0xff, 0xff) }, - { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E173S, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K4505, 0xff, 0xff, 0xff), .driver_info = (kernel_ulong_t) &huawei_cdc12_blacklist }, { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K3765, 0xff, 0xff, 0xff), .driver_info = (kernel_ulong_t) &huawei_cdc12_blacklist }, - { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_ETS1220, 0xff, 0xff, 0xff) }, - { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E14AC, 0xff, 0xff, 0xff) }, - { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K3806, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K4605, 0xff, 0xff, 0xff), .driver_info = (kernel_ulong_t) &huawei_cdc12_blacklist }, - { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K4605, 0xff, 0x01, 0x31) }, - { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K4605, 0xff, 0x01, 0x32) }, - { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K5005, 0xff, 0x01, 0x31) }, - { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K5005, 0xff, 0x01, 0x32) }, - { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K5005, 0xff, 0x01, 0x33) }, - { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K3770, 0xff, 0x02, 0x31) }, - { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K3770, 0xff, 0x02, 0x32) }, - { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K3771, 0xff, 0x02, 0x31) }, - { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K3771, 0xff, 0x02, 0x32) }, - { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K4510, 0xff, 0x01, 0x31) }, - { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K4510, 0xff, 0x01, 0x32) }, - { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K4511, 0xff, 0x01, 0x31) }, - { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K4511, 0xff, 0x01, 0x32) }, - { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E353, 0xff, 0x01, 0x01) }, - { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E353, 0xff, 0x01, 0x02) }, - { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E353, 0xff, 0x01, 0x03) }, - { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E353, 0xff, 0x01, 0x10) }, - { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E353, 0xff, 0x01, 0x12) }, - { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E353, 0xff, 0x01, 0x13) }, - { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E353, 0xff, 0x02, 0x01) }, /* E398 3G Modem */ - { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E353, 0xff, 0x02, 0x02) }, /* E398 3G PC UI Interface */ - { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E353, 0xff, 0x02, 0x03) }, /* E398 3G Application Interface */ + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0xff, 0xff) }, + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x01) }, + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x02) }, + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x03) }, + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x04) }, + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x05) }, + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x06) }, + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x0A) }, + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x0B) }, + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x0D) }, + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x0E) }, + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x0F) }, + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x10) }, + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x12) }, + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x13) }, + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x14) }, + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x15) }, + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x17) }, + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x18) }, + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x19) }, + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x1A) }, + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x1B) }, + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x1C) }, + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x31) }, + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x32) }, + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x33) }, + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x34) }, + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x35) }, + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x36) }, + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x3A) }, + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x3B) }, + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x3D) }, + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x3E) }, + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x3F) }, + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x48) }, + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x49) }, + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x4A) }, + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x4B) }, + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x4C) }, + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x61) }, + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x62) }, + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x63) }, + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x64) }, + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x65) }, + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x66) }, + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x6A) }, + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x6B) }, + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x6D) }, + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x6E) }, + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x6F) }, + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x78) }, + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x79) }, + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x7A) }, + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x7B) }, + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x7C) }, + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x01) }, + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x02) }, + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x03) }, + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x04) }, + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x05) }, + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x06) }, + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x0A) }, + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x0B) }, + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x0D) }, + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x0E) }, + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x0F) }, + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x10) }, + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x12) }, + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x13) }, + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x14) }, + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x15) }, + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x17) }, + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x18) }, + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x19) }, + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x1A) }, + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x1B) }, + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x1C) }, + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x31) }, + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x32) }, + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x33) }, + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x34) }, + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x35) }, + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x36) }, + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x3A) }, + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x3B) }, + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x3D) }, + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x3E) }, + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x3F) }, + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x48) }, + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x49) }, + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x4A) }, + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x4B) }, + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x4C) }, + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x61) }, + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x62) }, + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x63) }, + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x64) }, + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x65) }, + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x66) }, + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x6A) }, + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x6B) }, + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x6D) }, + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x6E) }, + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x6F) }, + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x78) }, + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x79) }, + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x7A) }, + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x7B) }, + { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x7C) }, + + { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_V640) }, { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_V620) }, { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_V740) }, @@ -1147,6 +1090,10 @@ static const struct usb_device_id option_ids[] = { .driver_info = (kernel_ulong_t)&zte_ad3812_z_blacklist }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_MC2716, 0xff, 0xff, 0xff), .driver_info = (kernel_ulong_t)&zte_mc2716_z_blacklist }, + { USB_VENDOR_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff, 0x02, 0x01) }, + { USB_VENDOR_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff, 0x02, 0x05) }, + { USB_VENDOR_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff, 0x86, 0x10) }, + { USB_DEVICE(BENQ_VENDOR_ID, BENQ_PRODUCT_H10) }, { USB_DEVICE(DLINK_VENDOR_ID, DLINK_PRODUCT_DWM_652) }, { USB_DEVICE(ALINK_VENDOR_ID, DLINK_PRODUCT_DWM_652_U5) }, /* Yes, ALINK_VENDOR_ID */ @@ -1297,8 +1244,8 @@ static struct usb_serial_driver option_1port_device = { .tiocmset = usb_wwan_tiocmset, .ioctl = usb_wwan_ioctl, .attach = usb_wwan_startup, - .disconnect = usb_wwan_disconnect, .release = option_release, + .port_remove = usb_wwan_port_remove, .read_int_callback = option_instat_callback, #ifdef CONFIG_PM .suspend = usb_wwan_suspend, @@ -1414,8 +1361,6 @@ static void option_release(struct usb_serial *serial) struct usb_wwan_intf_private *intfdata = usb_get_serial_data(serial); struct option_private *priv = intfdata->private; - usb_wwan_release(serial); - kfree(priv); kfree(intfdata); } diff --git a/trunk/drivers/usb/serial/qcserial.c b/trunk/drivers/usb/serial/qcserial.c index 8d103019d6aa..bfd50779f0c9 100644 --- a/trunk/drivers/usb/serial/qcserial.c +++ b/trunk/drivers/usb/serial/qcserial.c @@ -199,43 +199,49 @@ static int qcprobe(struct usb_serial *serial, const struct usb_device_id *id) /* default to enabling interface */ altsetting = 0; - switch (ifnum) { - /* Composite mode; don't bind to the QMI/net interface as that - * gets handled by other drivers. - */ + /* Composite mode; don't bind to the QMI/net interface as that + * gets handled by other drivers. + */ + + if (is_gobi1k) { /* Gobi 1K USB layout: * 0: serial port (doesn't respond) * 1: serial port (doesn't respond) * 2: AT-capable modem port * 3: QMI/net - * - * Gobi 2K+ USB layout: + */ + if (ifnum == 2) + dev_dbg(dev, "Modem port found\n"); + else + altsetting = -1; + } else { + /* Gobi 2K+ USB layout: * 0: QMI/net * 1: DM/DIAG (use libqcdm from ModemManager for communication) * 2: AT-capable modem port * 3: NMEA */ - - case 1: - if (is_gobi1k) + switch (ifnum) { + case 0: + /* Don't claim the QMI/net interface */ altsetting = -1; - else + break; + case 1: dev_dbg(dev, "Gobi 2K+ DM/DIAG interface found\n"); - break; - case 2: - dev_dbg(dev, "Modem port found\n"); - break; - case 3: - if (is_gobi1k) - altsetting = -1; - else + break; + case 2: + dev_dbg(dev, "Modem port found\n"); + break; + case 3: /* * NMEA (serial line 9600 8N1) * # echo "\$GPS_START" > /dev/ttyUSBx * # echo "\$GPS_STOP" > /dev/ttyUSBx */ dev_dbg(dev, "Gobi 2K+ NMEA GPS interface found\n"); + break; + } } done: @@ -262,8 +268,7 @@ static void qc_release(struct usb_serial *serial) { struct usb_wwan_intf_private *priv = usb_get_serial_data(serial); - /* Call usb_wwan release & free the private data allocated in qcprobe */ - usb_wwan_release(serial); + /* Free the private data allocated in qcprobe */ usb_set_serial_data(serial, NULL); kfree(priv); } @@ -283,8 +288,8 @@ static struct usb_serial_driver qcdevice = { .write_room = usb_wwan_write_room, .chars_in_buffer = usb_wwan_chars_in_buffer, .attach = usb_wwan_startup, - .disconnect = usb_wwan_disconnect, .release = qc_release, + .port_remove = usb_wwan_port_remove, #ifdef CONFIG_PM .suspend = usb_wwan_suspend, .resume = usb_wwan_resume, diff --git a/trunk/drivers/usb/serial/usb-wwan.h b/trunk/drivers/usb/serial/usb-wwan.h index c47b6ec03063..1f034d2397c6 100644 --- a/trunk/drivers/usb/serial/usb-wwan.h +++ b/trunk/drivers/usb/serial/usb-wwan.h @@ -9,8 +9,7 @@ extern void usb_wwan_dtr_rts(struct usb_serial_port *port, int on); extern int usb_wwan_open(struct tty_struct *tty, struct usb_serial_port *port); extern void usb_wwan_close(struct usb_serial_port *port); extern int usb_wwan_startup(struct usb_serial *serial); -extern void usb_wwan_disconnect(struct usb_serial *serial); -extern void usb_wwan_release(struct usb_serial *serial); +extern int usb_wwan_port_remove(struct usb_serial_port *port); extern int usb_wwan_write_room(struct tty_struct *tty); extern void usb_wwan_set_termios(struct tty_struct *tty, struct usb_serial_port *port, diff --git a/trunk/drivers/usb/serial/usb_wwan.c b/trunk/drivers/usb/serial/usb_wwan.c index f35971dff4a5..6855d5ed0331 100644 --- a/trunk/drivers/usb/serial/usb_wwan.c +++ b/trunk/drivers/usb/serial/usb_wwan.c @@ -565,62 +565,52 @@ int usb_wwan_startup(struct usb_serial *serial) } EXPORT_SYMBOL(usb_wwan_startup); -static void stop_read_write_urbs(struct usb_serial *serial) +int usb_wwan_port_remove(struct usb_serial_port *port) { - int i, j; - struct usb_serial_port *port; + int i; struct usb_wwan_port_private *portdata; - /* Stop reading/writing urbs */ - for (i = 0; i < serial->num_ports; ++i) { - port = serial->port[i]; - portdata = usb_get_serial_port_data(port); - for (j = 0; j < N_IN_URB; j++) - usb_kill_urb(portdata->in_urbs[j]); - for (j = 0; j < N_OUT_URB; j++) - usb_kill_urb(portdata->out_urbs[j]); + portdata = usb_get_serial_port_data(port); + usb_set_serial_port_data(port, NULL); + + /* Stop reading/writing urbs and free them */ + for (i = 0; i < N_IN_URB; i++) { + usb_kill_urb(portdata->in_urbs[i]); + usb_free_urb(portdata->in_urbs[i]); + free_page((unsigned long)portdata->in_buffer[i]); + } + for (i = 0; i < N_OUT_URB; i++) { + usb_kill_urb(portdata->out_urbs[i]); + usb_free_urb(portdata->out_urbs[i]); + kfree(portdata->out_buffer[i]); } -} -void usb_wwan_disconnect(struct usb_serial *serial) -{ - stop_read_write_urbs(serial); + /* Now free port private data */ + kfree(portdata); + return 0; } -EXPORT_SYMBOL(usb_wwan_disconnect); +EXPORT_SYMBOL(usb_wwan_port_remove); -void usb_wwan_release(struct usb_serial *serial) +#ifdef CONFIG_PM +static void stop_read_write_urbs(struct usb_serial *serial) { int i, j; struct usb_serial_port *port; struct usb_wwan_port_private *portdata; - /* Now free them */ + /* Stop reading/writing urbs */ for (i = 0; i < serial->num_ports; ++i) { port = serial->port[i]; portdata = usb_get_serial_port_data(port); - - for (j = 0; j < N_IN_URB; j++) { - usb_free_urb(portdata->in_urbs[j]); - free_page((unsigned long) - portdata->in_buffer[j]); - portdata->in_urbs[j] = NULL; - } - for (j = 0; j < N_OUT_URB; j++) { - usb_free_urb(portdata->out_urbs[j]); - kfree(portdata->out_buffer[j]); - portdata->out_urbs[j] = NULL; - } - } - - /* Now free per port private data */ - for (i = 0; i < serial->num_ports; i++) { - port = serial->port[i]; - kfree(usb_get_serial_port_data(port)); + if (!portdata) + continue; + for (j = 0; j < N_IN_URB; j++) + usb_kill_urb(portdata->in_urbs[j]); + for (j = 0; j < N_OUT_URB; j++) + usb_kill_urb(portdata->out_urbs[j]); } } -EXPORT_SYMBOL(usb_wwan_release); -#ifdef CONFIG_PM int usb_wwan_suspend(struct usb_serial *serial, pm_message_t message) { struct usb_wwan_intf_private *intfdata = serial->private; @@ -712,7 +702,7 @@ int usb_wwan_resume(struct usb_serial *serial) /* skip closed ports */ spin_lock_irq(&intfdata->susp_lock); - if (!portdata->opened) { + if (!portdata || !portdata->opened) { spin_unlock_irq(&intfdata->susp_lock); continue; } diff --git a/trunk/drivers/vfio/pci/vfio_pci_intrs.c b/trunk/drivers/vfio/pci/vfio_pci_intrs.c index 211a4920b88a..d8dedc7d3910 100644 --- a/trunk/drivers/vfio/pci/vfio_pci_intrs.c +++ b/trunk/drivers/vfio/pci/vfio_pci_intrs.c @@ -76,9 +76,24 @@ static int virqfd_wakeup(wait_queue_t *wait, unsigned mode, int sync, void *key) schedule_work(&virqfd->inject); } - if (flags & POLLHUP) - /* The eventfd is closing, detach from VFIO */ - virqfd_deactivate(virqfd); + if (flags & POLLHUP) { + unsigned long flags; + spin_lock_irqsave(&virqfd->vdev->irqlock, flags); + + /* + * The eventfd is closing, if the virqfd has not yet been + * queued for release, as determined by testing whether the + * vdev pointer to it is still valid, queue it now. As + * with kvm irqfds, we know we won't race against the virqfd + * going away because we hold wqh->lock to get here. + */ + if (*(virqfd->pvirqfd) == virqfd) { + *(virqfd->pvirqfd) = NULL; + virqfd_deactivate(virqfd); + } + + spin_unlock_irqrestore(&virqfd->vdev->irqlock, flags); + } return 0; } @@ -93,7 +108,6 @@ static void virqfd_ptable_queue_proc(struct file *file, static void virqfd_shutdown(struct work_struct *work) { struct virqfd *virqfd = container_of(work, struct virqfd, shutdown); - struct virqfd **pvirqfd = virqfd->pvirqfd; u64 cnt; eventfd_ctx_remove_wait_queue(virqfd->eventfd, &virqfd->wait, &cnt); @@ -101,7 +115,6 @@ static void virqfd_shutdown(struct work_struct *work) eventfd_ctx_put(virqfd->eventfd); kfree(virqfd); - *pvirqfd = NULL; } static void virqfd_inject(struct work_struct *work) @@ -122,15 +135,11 @@ static int virqfd_enable(struct vfio_pci_device *vdev, int ret = 0; unsigned int events; - if (*pvirqfd) - return -EBUSY; - virqfd = kzalloc(sizeof(*virqfd), GFP_KERNEL); if (!virqfd) return -ENOMEM; virqfd->pvirqfd = pvirqfd; - *pvirqfd = virqfd; virqfd->vdev = vdev; virqfd->handler = handler; virqfd->thread = thread; @@ -153,6 +162,23 @@ static int virqfd_enable(struct vfio_pci_device *vdev, virqfd->eventfd = ctx; + /* + * virqfds can be released by closing the eventfd or directly + * through ioctl. These are both done through a workqueue, so + * we update the pointer to the virqfd under lock to avoid + * pushing multiple jobs to release the same virqfd. + */ + spin_lock_irq(&vdev->irqlock); + + if (*pvirqfd) { + spin_unlock_irq(&vdev->irqlock); + ret = -EBUSY; + goto fail; + } + *pvirqfd = virqfd; + + spin_unlock_irq(&vdev->irqlock); + /* * Install our own custom wake-up handling so we are notified via * a callback whenever someone signals the underlying eventfd. @@ -187,19 +213,29 @@ static int virqfd_enable(struct vfio_pci_device *vdev, fput(file); kfree(virqfd); - *pvirqfd = NULL; return ret; } -static void virqfd_disable(struct virqfd *virqfd) +static void virqfd_disable(struct vfio_pci_device *vdev, + struct virqfd **pvirqfd) { - if (!virqfd) - return; + unsigned long flags; + + spin_lock_irqsave(&vdev->irqlock, flags); + + if (*pvirqfd) { + virqfd_deactivate(*pvirqfd); + *pvirqfd = NULL; + } - virqfd_deactivate(virqfd); + spin_unlock_irqrestore(&vdev->irqlock, flags); - /* Block until we know all outstanding shutdown jobs have completed. */ + /* + * Block until we know all outstanding shutdown jobs have completed. + * Even if we don't queue the job, flush the wq to be sure it's + * been released. + */ flush_workqueue(vfio_irqfd_cleanup_wq); } @@ -392,8 +428,8 @@ static int vfio_intx_set_signal(struct vfio_pci_device *vdev, int fd) static void vfio_intx_disable(struct vfio_pci_device *vdev) { vfio_intx_set_signal(vdev, -1); - virqfd_disable(vdev->ctx[0].unmask); - virqfd_disable(vdev->ctx[0].mask); + virqfd_disable(vdev, &vdev->ctx[0].unmask); + virqfd_disable(vdev, &vdev->ctx[0].mask); vdev->irq_type = VFIO_PCI_NUM_IRQS; vdev->num_ctx = 0; kfree(vdev->ctx); @@ -539,8 +575,8 @@ static void vfio_msi_disable(struct vfio_pci_device *vdev, bool msix) vfio_msi_set_block(vdev, 0, vdev->num_ctx, NULL, msix); for (i = 0; i < vdev->num_ctx; i++) { - virqfd_disable(vdev->ctx[i].unmask); - virqfd_disable(vdev->ctx[i].mask); + virqfd_disable(vdev, &vdev->ctx[i].unmask); + virqfd_disable(vdev, &vdev->ctx[i].mask); } if (msix) { @@ -577,7 +613,7 @@ static int vfio_pci_set_intx_unmask(struct vfio_pci_device *vdev, vfio_send_intx_eventfd, NULL, &vdev->ctx[0].unmask, fd); - virqfd_disable(vdev->ctx[0].unmask); + virqfd_disable(vdev, &vdev->ctx[0].unmask); } return 0; diff --git a/trunk/drivers/vfio/vfio.c b/trunk/drivers/vfio/vfio.c index 9591e2b509d7..17830c9c7cc6 100644 --- a/trunk/drivers/vfio/vfio.c +++ b/trunk/drivers/vfio/vfio.c @@ -264,6 +264,7 @@ static struct vfio_group *vfio_create_group(struct iommu_group *iommu_group) return group; } +/* called with vfio.group_lock held */ static void vfio_group_release(struct kref *kref) { struct vfio_group *group = container_of(kref, struct vfio_group, kref); @@ -287,13 +288,7 @@ static void vfio_group_release(struct kref *kref) static void vfio_group_put(struct vfio_group *group) { - mutex_lock(&vfio.group_lock); - /* - * Release needs to unlock to unregister the notifier, so only - * unlock if not released. - */ - if (!kref_put(&group->kref, vfio_group_release)) - mutex_unlock(&vfio.group_lock); + kref_put_mutex(&group->kref, vfio_group_release, &vfio.group_lock); } /* Assume group_lock or group reference is held */ @@ -401,7 +396,6 @@ static void vfio_device_release(struct kref *kref) struct vfio_device, kref); struct vfio_group *group = device->group; - mutex_lock(&group->device_lock); list_del(&device->group_next); mutex_unlock(&group->device_lock); @@ -416,8 +410,9 @@ static void vfio_device_release(struct kref *kref) /* Device reference always implies a group reference */ static void vfio_device_put(struct vfio_device *device) { - kref_put(&device->kref, vfio_device_release); - vfio_group_put(device->group); + struct vfio_group *group = device->group; + kref_put_mutex(&device->kref, vfio_device_release, &group->device_lock); + vfio_group_put(group); } static void vfio_device_get(struct vfio_device *device) @@ -1116,10 +1111,10 @@ static int vfio_group_get_device_fd(struct vfio_group *group, char *buf) */ filep->f_mode |= (FMODE_LSEEK | FMODE_PREAD | FMODE_PWRITE); - fd_install(ret, filep); - vfio_device_get(device); atomic_inc(&group->container_users); + + fd_install(ret, filep); break; } mutex_unlock(&group->device_lock); diff --git a/trunk/drivers/vhost/Kconfig b/trunk/drivers/vhost/Kconfig index e4e2fd1b5107..202bba6c997c 100644 --- a/trunk/drivers/vhost/Kconfig +++ b/trunk/drivers/vhost/Kconfig @@ -9,3 +9,6 @@ config VHOST_NET To compile this driver as a module, choose M here: the module will be called vhost_net. +if STAGING +source "drivers/vhost/Kconfig.tcm" +endif diff --git a/trunk/drivers/vhost/Kconfig.tcm b/trunk/drivers/vhost/Kconfig.tcm new file mode 100644 index 000000000000..a9c6f76e3208 --- /dev/null +++ b/trunk/drivers/vhost/Kconfig.tcm @@ -0,0 +1,6 @@ +config TCM_VHOST + tristate "TCM_VHOST fabric module (EXPERIMENTAL)" + depends on TARGET_CORE && EVENTFD && EXPERIMENTAL && m + default n + ---help--- + Say M here to enable the TCM_VHOST fabric module for use with virtio-scsi guests diff --git a/trunk/drivers/vhost/Makefile b/trunk/drivers/vhost/Makefile index 72dd02050bb9..a27b053bc9ab 100644 --- a/trunk/drivers/vhost/Makefile +++ b/trunk/drivers/vhost/Makefile @@ -1,2 +1,4 @@ obj-$(CONFIG_VHOST_NET) += vhost_net.o vhost_net-y := vhost.o net.o + +obj-$(CONFIG_TCM_VHOST) += tcm_vhost.o diff --git a/trunk/drivers/vhost/tcm_vhost.c b/trunk/drivers/vhost/tcm_vhost.c new file mode 100644 index 000000000000..ed8e2e6c8df2 --- /dev/null +++ b/trunk/drivers/vhost/tcm_vhost.c @@ -0,0 +1,1649 @@ +/******************************************************************************* + * Vhost kernel TCM fabric driver for virtio SCSI initiators + * + * (C) Copyright 2010-2012 RisingTide Systems LLC. + * (C) Copyright 2010-2012 IBM Corp. + * + * Licensed to the Linux Foundation under the General Public License (GPL) version 2. + * + * Authors: Nicholas A. Bellinger + * Stefan Hajnoczi + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + ****************************************************************************/ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include /* TODO vhost.h currently depends on this */ +#include + +#include "vhost.c" +#include "vhost.h" +#include "tcm_vhost.h" + +enum { + VHOST_SCSI_VQ_CTL = 0, + VHOST_SCSI_VQ_EVT = 1, + VHOST_SCSI_VQ_IO = 2, +}; + +struct vhost_scsi { + struct tcm_vhost_tpg *vs_tpg; /* Protected by vhost_scsi->dev.mutex */ + struct vhost_dev dev; + struct vhost_virtqueue vqs[3]; + + struct vhost_work vs_completion_work; /* cmd completion work item */ + struct list_head vs_completion_list; /* cmd completion queue */ + spinlock_t vs_completion_lock; /* protects s_completion_list */ +}; + +/* Local pointer to allocated TCM configfs fabric module */ +static struct target_fabric_configfs *tcm_vhost_fabric_configfs; + +static struct workqueue_struct *tcm_vhost_workqueue; + +/* Global spinlock to protect tcm_vhost TPG list for vhost IOCTL access */ +static DEFINE_MUTEX(tcm_vhost_mutex); +static LIST_HEAD(tcm_vhost_list); + +static int tcm_vhost_check_true(struct se_portal_group *se_tpg) +{ + return 1; +} + +static int tcm_vhost_check_false(struct se_portal_group *se_tpg) +{ + return 0; +} + +static char *tcm_vhost_get_fabric_name(void) +{ + return "vhost"; +} + +static u8 tcm_vhost_get_fabric_proto_ident(struct se_portal_group *se_tpg) +{ + struct tcm_vhost_tpg *tpg = container_of(se_tpg, + struct tcm_vhost_tpg, se_tpg); + struct tcm_vhost_tport *tport = tpg->tport; + + switch (tport->tport_proto_id) { + case SCSI_PROTOCOL_SAS: + return sas_get_fabric_proto_ident(se_tpg); + case SCSI_PROTOCOL_FCP: + return fc_get_fabric_proto_ident(se_tpg); + case SCSI_PROTOCOL_ISCSI: + return iscsi_get_fabric_proto_ident(se_tpg); + default: + pr_err("Unknown tport_proto_id: 0x%02x, using" + " SAS emulation\n", tport->tport_proto_id); + break; + } + + return sas_get_fabric_proto_ident(se_tpg); +} + +static char *tcm_vhost_get_fabric_wwn(struct se_portal_group *se_tpg) +{ + struct tcm_vhost_tpg *tpg = container_of(se_tpg, + struct tcm_vhost_tpg, se_tpg); + struct tcm_vhost_tport *tport = tpg->tport; + + return &tport->tport_name[0]; +} + +static u16 tcm_vhost_get_tag(struct se_portal_group *se_tpg) +{ + struct tcm_vhost_tpg *tpg = container_of(se_tpg, + struct tcm_vhost_tpg, se_tpg); + return tpg->tport_tpgt; +} + +static u32 tcm_vhost_get_default_depth(struct se_portal_group *se_tpg) +{ + return 1; +} + +static u32 tcm_vhost_get_pr_transport_id(struct se_portal_group *se_tpg, + struct se_node_acl *se_nacl, + struct t10_pr_registration *pr_reg, + int *format_code, + unsigned char *buf) +{ + struct tcm_vhost_tpg *tpg = container_of(se_tpg, + struct tcm_vhost_tpg, se_tpg); + struct tcm_vhost_tport *tport = tpg->tport; + + switch (tport->tport_proto_id) { + case SCSI_PROTOCOL_SAS: + return sas_get_pr_transport_id(se_tpg, se_nacl, pr_reg, + format_code, buf); + case SCSI_PROTOCOL_FCP: + return fc_get_pr_transport_id(se_tpg, se_nacl, pr_reg, + format_code, buf); + case SCSI_PROTOCOL_ISCSI: + return iscsi_get_pr_transport_id(se_tpg, se_nacl, pr_reg, + format_code, buf); + default: + pr_err("Unknown tport_proto_id: 0x%02x, using" + " SAS emulation\n", tport->tport_proto_id); + break; + } + + return sas_get_pr_transport_id(se_tpg, se_nacl, pr_reg, + format_code, buf); +} + +static u32 tcm_vhost_get_pr_transport_id_len(struct se_portal_group *se_tpg, + struct se_node_acl *se_nacl, + struct t10_pr_registration *pr_reg, + int *format_code) +{ + struct tcm_vhost_tpg *tpg = container_of(se_tpg, + struct tcm_vhost_tpg, se_tpg); + struct tcm_vhost_tport *tport = tpg->tport; + + switch (tport->tport_proto_id) { + case SCSI_PROTOCOL_SAS: + return sas_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg, + format_code); + case SCSI_PROTOCOL_FCP: + return fc_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg, + format_code); + case SCSI_PROTOCOL_ISCSI: + return iscsi_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg, + format_code); + default: + pr_err("Unknown tport_proto_id: 0x%02x, using" + " SAS emulation\n", tport->tport_proto_id); + break; + } + + return sas_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg, + format_code); +} + +static char *tcm_vhost_parse_pr_out_transport_id(struct se_portal_group *se_tpg, + const char *buf, + u32 *out_tid_len, + char **port_nexus_ptr) +{ + struct tcm_vhost_tpg *tpg = container_of(se_tpg, + struct tcm_vhost_tpg, se_tpg); + struct tcm_vhost_tport *tport = tpg->tport; + + switch (tport->tport_proto_id) { + case SCSI_PROTOCOL_SAS: + return sas_parse_pr_out_transport_id(se_tpg, buf, out_tid_len, + port_nexus_ptr); + case SCSI_PROTOCOL_FCP: + return fc_parse_pr_out_transport_id(se_tpg, buf, out_tid_len, + port_nexus_ptr); + case SCSI_PROTOCOL_ISCSI: + return iscsi_parse_pr_out_transport_id(se_tpg, buf, out_tid_len, + port_nexus_ptr); + default: + pr_err("Unknown tport_proto_id: 0x%02x, using" + " SAS emulation\n", tport->tport_proto_id); + break; + } + + return sas_parse_pr_out_transport_id(se_tpg, buf, out_tid_len, + port_nexus_ptr); +} + +static struct se_node_acl *tcm_vhost_alloc_fabric_acl( + struct se_portal_group *se_tpg) +{ + struct tcm_vhost_nacl *nacl; + + nacl = kzalloc(sizeof(struct tcm_vhost_nacl), GFP_KERNEL); + if (!nacl) { + pr_err("Unable to alocate struct tcm_vhost_nacl\n"); + return NULL; + } + + return &nacl->se_node_acl; +} + +static void tcm_vhost_release_fabric_acl(struct se_portal_group *se_tpg, + struct se_node_acl *se_nacl) +{ + struct tcm_vhost_nacl *nacl = container_of(se_nacl, + struct tcm_vhost_nacl, se_node_acl); + kfree(nacl); +} + +static u32 tcm_vhost_tpg_get_inst_index(struct se_portal_group *se_tpg) +{ + return 1; +} + +static void tcm_vhost_release_cmd(struct se_cmd *se_cmd) +{ + return; +} + +static int tcm_vhost_shutdown_session(struct se_session *se_sess) +{ + return 0; +} + +static void tcm_vhost_close_session(struct se_session *se_sess) +{ + return; +} + +static u32 tcm_vhost_sess_get_index(struct se_session *se_sess) +{ + return 0; +} + +static int tcm_vhost_write_pending(struct se_cmd *se_cmd) +{ + /* Go ahead and process the write immediately */ + target_execute_cmd(se_cmd); + return 0; +} + +static int tcm_vhost_write_pending_status(struct se_cmd *se_cmd) +{ + return 0; +} + +static void tcm_vhost_set_default_node_attrs(struct se_node_acl *nacl) +{ + return; +} + +static u32 tcm_vhost_get_task_tag(struct se_cmd *se_cmd) +{ + return 0; +} + +static int tcm_vhost_get_cmd_state(struct se_cmd *se_cmd) +{ + return 0; +} + +static void vhost_scsi_complete_cmd(struct tcm_vhost_cmd *tv_cmd) +{ + struct vhost_scsi *vs = tv_cmd->tvc_vhost; + + spin_lock_bh(&vs->vs_completion_lock); + list_add_tail(&tv_cmd->tvc_completion_list, &vs->vs_completion_list); + spin_unlock_bh(&vs->vs_completion_lock); + + vhost_work_queue(&vs->dev, &vs->vs_completion_work); +} + +static int tcm_vhost_queue_data_in(struct se_cmd *se_cmd) +{ + struct tcm_vhost_cmd *tv_cmd = container_of(se_cmd, + struct tcm_vhost_cmd, tvc_se_cmd); + vhost_scsi_complete_cmd(tv_cmd); + return 0; +} + +static int tcm_vhost_queue_status(struct se_cmd *se_cmd) +{ + struct tcm_vhost_cmd *tv_cmd = container_of(se_cmd, + struct tcm_vhost_cmd, tvc_se_cmd); + vhost_scsi_complete_cmd(tv_cmd); + return 0; +} + +static int tcm_vhost_queue_tm_rsp(struct se_cmd *se_cmd) +{ + return 0; +} + +static u16 tcm_vhost_set_fabric_sense_len(struct se_cmd *se_cmd, + u32 sense_length) +{ + return 0; +} + +static u16 tcm_vhost_get_fabric_sense_len(void) +{ + return 0; +} + +static void vhost_scsi_free_cmd(struct tcm_vhost_cmd *tv_cmd) +{ + struct se_cmd *se_cmd = &tv_cmd->tvc_se_cmd; + + /* TODO locking against target/backend threads? */ + transport_generic_free_cmd(se_cmd, 1); + + if (tv_cmd->tvc_sgl_count) { + u32 i; + for (i = 0; i < tv_cmd->tvc_sgl_count; i++) + put_page(sg_page(&tv_cmd->tvc_sgl[i])); + + kfree(tv_cmd->tvc_sgl); + } + + kfree(tv_cmd); +} + +/* Dequeue a command from the completion list */ +static struct tcm_vhost_cmd *vhost_scsi_get_cmd_from_completion( + struct vhost_scsi *vs) +{ + struct tcm_vhost_cmd *tv_cmd = NULL; + + spin_lock_bh(&vs->vs_completion_lock); + if (list_empty(&vs->vs_completion_list)) { + spin_unlock_bh(&vs->vs_completion_lock); + return NULL; + } + + list_for_each_entry(tv_cmd, &vs->vs_completion_list, + tvc_completion_list) { + list_del(&tv_cmd->tvc_completion_list); + break; + } + spin_unlock_bh(&vs->vs_completion_lock); + return tv_cmd; +} + +/* Fill in status and signal that we are done processing this command + * + * This is scheduled in the vhost work queue so we are called with the owner + * process mm and can access the vring. + */ +static void vhost_scsi_complete_cmd_work(struct vhost_work *work) +{ + struct vhost_scsi *vs = container_of(work, struct vhost_scsi, + vs_completion_work); + struct tcm_vhost_cmd *tv_cmd; + + while ((tv_cmd = vhost_scsi_get_cmd_from_completion(vs))) { + struct virtio_scsi_cmd_resp v_rsp; + struct se_cmd *se_cmd = &tv_cmd->tvc_se_cmd; + int ret; + + pr_debug("%s tv_cmd %p resid %u status %#02x\n", __func__, + tv_cmd, se_cmd->residual_count, se_cmd->scsi_status); + + memset(&v_rsp, 0, sizeof(v_rsp)); + v_rsp.resid = se_cmd->residual_count; + /* TODO is status_qualifier field needed? */ + v_rsp.status = se_cmd->scsi_status; + v_rsp.sense_len = se_cmd->scsi_sense_length; + memcpy(v_rsp.sense, tv_cmd->tvc_sense_buf, + v_rsp.sense_len); + ret = copy_to_user(tv_cmd->tvc_resp, &v_rsp, sizeof(v_rsp)); + if (likely(ret == 0)) + vhost_add_used(&vs->vqs[2], tv_cmd->tvc_vq_desc, 0); + else + pr_err("Faulted on virtio_scsi_cmd_resp\n"); + + vhost_scsi_free_cmd(tv_cmd); + } + + vhost_signal(&vs->dev, &vs->vqs[2]); +} + +static struct tcm_vhost_cmd *vhost_scsi_allocate_cmd( + struct tcm_vhost_tpg *tv_tpg, + struct virtio_scsi_cmd_req *v_req, + u32 exp_data_len, + int data_direction) +{ + struct tcm_vhost_cmd *tv_cmd; + struct tcm_vhost_nexus *tv_nexus; + struct se_portal_group *se_tpg = &tv_tpg->se_tpg; + struct se_session *se_sess; + struct se_cmd *se_cmd; + int sam_task_attr; + + tv_nexus = tv_tpg->tpg_nexus; + if (!tv_nexus) { + pr_err("Unable to locate active struct tcm_vhost_nexus\n"); + return ERR_PTR(-EIO); + } + se_sess = tv_nexus->tvn_se_sess; + + tv_cmd = kzalloc(sizeof(struct tcm_vhost_cmd), GFP_ATOMIC); + if (!tv_cmd) { + pr_err("Unable to allocate struct tcm_vhost_cmd\n"); + return ERR_PTR(-ENOMEM); + } + INIT_LIST_HEAD(&tv_cmd->tvc_completion_list); + tv_cmd->tvc_tag = v_req->tag; + + se_cmd = &tv_cmd->tvc_se_cmd; + /* + * Locate the SAM Task Attr from virtio_scsi_cmd_req + */ + sam_task_attr = v_req->task_attr; + /* + * Initialize struct se_cmd descriptor from TCM infrastructure + */ + transport_init_se_cmd(se_cmd, se_tpg->se_tpg_tfo, se_sess, exp_data_len, + data_direction, sam_task_attr, + &tv_cmd->tvc_sense_buf[0]); + +#if 0 /* FIXME: vhost_scsi_allocate_cmd() BIDI operation */ + if (bidi) + se_cmd->se_cmd_flags |= SCF_BIDI; +#endif + return tv_cmd; +} + +/* + * Map a user memory range into a scatterlist + * + * Returns the number of scatterlist entries used or -errno on error. + */ +static int vhost_scsi_map_to_sgl(struct scatterlist *sgl, + unsigned int sgl_count, void __user *ptr, size_t len, int write) +{ + struct scatterlist *sg = sgl; + unsigned int npages = 0; + int ret; + + while (len > 0) { + struct page *page; + unsigned int offset = (uintptr_t)ptr & ~PAGE_MASK; + unsigned int nbytes = min_t(unsigned int, + PAGE_SIZE - offset, len); + + if (npages == sgl_count) { + ret = -ENOBUFS; + goto err; + } + + ret = get_user_pages_fast((unsigned long)ptr, 1, write, &page); + BUG_ON(ret == 0); /* we should either get our page or fail */ + if (ret < 0) + goto err; + + sg_set_page(sg, page, nbytes, offset); + ptr += nbytes; + len -= nbytes; + sg++; + npages++; + } + return npages; + +err: + /* Put pages that we hold */ + for (sg = sgl; sg != &sgl[npages]; sg++) + put_page(sg_page(sg)); + return ret; +} + +static int vhost_scsi_map_iov_to_sgl(struct tcm_vhost_cmd *tv_cmd, + struct iovec *iov, unsigned int niov, int write) +{ + int ret; + unsigned int i; + u32 sgl_count; + struct scatterlist *sg; + + /* + * Find out how long sglist needs to be + */ + sgl_count = 0; + for (i = 0; i < niov; i++) { + sgl_count += (((uintptr_t)iov[i].iov_base + iov[i].iov_len + + PAGE_SIZE - 1) >> PAGE_SHIFT) - + ((uintptr_t)iov[i].iov_base >> PAGE_SHIFT); + } + /* TODO overflow checking */ + + sg = kmalloc(sizeof(tv_cmd->tvc_sgl[0]) * sgl_count, GFP_ATOMIC); + if (!sg) + return -ENOMEM; + pr_debug("%s sg %p sgl_count %u is_err %d\n", __func__, + sg, sgl_count, !sg); + sg_init_table(sg, sgl_count); + + tv_cmd->tvc_sgl = sg; + tv_cmd->tvc_sgl_count = sgl_count; + + pr_debug("Mapping %u iovecs for %u pages\n", niov, sgl_count); + for (i = 0; i < niov; i++) { + ret = vhost_scsi_map_to_sgl(sg, sgl_count, iov[i].iov_base, + iov[i].iov_len, write); + if (ret < 0) { + for (i = 0; i < tv_cmd->tvc_sgl_count; i++) + put_page(sg_page(&tv_cmd->tvc_sgl[i])); + kfree(tv_cmd->tvc_sgl); + tv_cmd->tvc_sgl = NULL; + tv_cmd->tvc_sgl_count = 0; + return ret; + } + + sg += ret; + sgl_count -= ret; + } + return 0; +} + +static void tcm_vhost_submission_work(struct work_struct *work) +{ + struct tcm_vhost_cmd *tv_cmd = + container_of(work, struct tcm_vhost_cmd, work); + struct se_cmd *se_cmd = &tv_cmd->tvc_se_cmd; + struct scatterlist *sg_ptr, *sg_bidi_ptr = NULL; + int rc, sg_no_bidi = 0; + /* + * Locate the struct se_lun pointer based on v_req->lun, and + * attach it to struct se_cmd + */ + rc = transport_lookup_cmd_lun(&tv_cmd->tvc_se_cmd, tv_cmd->tvc_lun); + if (rc < 0) { + pr_err("Failed to look up lun: %d\n", tv_cmd->tvc_lun); + transport_send_check_condition_and_sense(&tv_cmd->tvc_se_cmd, + tv_cmd->tvc_se_cmd.scsi_sense_reason, 0); + transport_generic_free_cmd(se_cmd, 0); + return; + } + + rc = target_setup_cmd_from_cdb(se_cmd, tv_cmd->tvc_cdb); + if (rc == -ENOMEM) { + transport_send_check_condition_and_sense(se_cmd, + TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE, 0); + transport_generic_free_cmd(se_cmd, 0); + return; + } else if (rc < 0) { + if (se_cmd->se_cmd_flags & SCF_SCSI_RESERVATION_CONFLICT) + tcm_vhost_queue_status(se_cmd); + else + transport_send_check_condition_and_sense(se_cmd, + se_cmd->scsi_sense_reason, 0); + transport_generic_free_cmd(se_cmd, 0); + return; + } + + if (tv_cmd->tvc_sgl_count) { + sg_ptr = tv_cmd->tvc_sgl; + /* + * For BIDI commands, pass in the extra READ buffer + * to transport_generic_map_mem_to_cmd() below.. + */ +/* FIXME: Fix BIDI operation in tcm_vhost_submission_work() */ +#if 0 + if (se_cmd->se_cmd_flags & SCF_BIDI) { + sg_bidi_ptr = NULL; + sg_no_bidi = 0; + } +#endif + } else { + sg_ptr = NULL; + } + + rc = transport_generic_map_mem_to_cmd(se_cmd, sg_ptr, + tv_cmd->tvc_sgl_count, sg_bidi_ptr, + sg_no_bidi); + if (rc < 0) { + transport_send_check_condition_and_sense(se_cmd, + se_cmd->scsi_sense_reason, 0); + transport_generic_free_cmd(se_cmd, 0); + return; + } + transport_handle_cdb_direct(se_cmd); +} + +static void vhost_scsi_handle_vq(struct vhost_scsi *vs) +{ + struct vhost_virtqueue *vq = &vs->vqs[2]; + struct virtio_scsi_cmd_req v_req; + struct tcm_vhost_tpg *tv_tpg; + struct tcm_vhost_cmd *tv_cmd; + u32 exp_data_len, data_first, data_num, data_direction; + unsigned out, in, i; + int head, ret; + + /* Must use ioctl VHOST_SCSI_SET_ENDPOINT */ + tv_tpg = vs->vs_tpg; + if (unlikely(!tv_tpg)) { + pr_err("%s endpoint not set\n", __func__); + return; + } + + mutex_lock(&vq->mutex); + vhost_disable_notify(&vs->dev, vq); + + for (;;) { + head = vhost_get_vq_desc(&vs->dev, vq, vq->iov, + ARRAY_SIZE(vq->iov), &out, &in, + NULL, NULL); + pr_debug("vhost_get_vq_desc: head: %d, out: %u in: %u\n", + head, out, in); + /* On error, stop handling until the next kick. */ + if (unlikely(head < 0)) + break; + /* Nothing new? Wait for eventfd to tell us they refilled. */ + if (head == vq->num) { + if (unlikely(vhost_enable_notify(&vs->dev, vq))) { + vhost_disable_notify(&vs->dev, vq); + continue; + } + break; + } + +/* FIXME: BIDI operation */ + if (out == 1 && in == 1) { + data_direction = DMA_NONE; + data_first = 0; + data_num = 0; + } else if (out == 1 && in > 1) { + data_direction = DMA_FROM_DEVICE; + data_first = out + 1; + data_num = in - 1; + } else if (out > 1 && in == 1) { + data_direction = DMA_TO_DEVICE; + data_first = 1; + data_num = out - 1; + } else { + vq_err(vq, "Invalid buffer layout out: %u in: %u\n", + out, in); + break; + } + + /* + * Check for a sane resp buffer so we can report errors to + * the guest. + */ + if (unlikely(vq->iov[out].iov_len != + sizeof(struct virtio_scsi_cmd_resp))) { + vq_err(vq, "Expecting virtio_scsi_cmd_resp, got %zu" + " bytes\n", vq->iov[out].iov_len); + break; + } + + if (unlikely(vq->iov[0].iov_len != sizeof(v_req))) { + vq_err(vq, "Expecting virtio_scsi_cmd_req, got %zu" + " bytes\n", vq->iov[0].iov_len); + break; + } + pr_debug("Calling __copy_from_user: vq->iov[0].iov_base: %p," + " len: %zu\n", vq->iov[0].iov_base, sizeof(v_req)); + ret = __copy_from_user(&v_req, vq->iov[0].iov_base, + sizeof(v_req)); + if (unlikely(ret)) { + vq_err(vq, "Faulted on virtio_scsi_cmd_req\n"); + break; + } + + exp_data_len = 0; + for (i = 0; i < data_num; i++) + exp_data_len += vq->iov[data_first + i].iov_len; + + tv_cmd = vhost_scsi_allocate_cmd(tv_tpg, &v_req, + exp_data_len, data_direction); + if (IS_ERR(tv_cmd)) { + vq_err(vq, "vhost_scsi_allocate_cmd failed %ld\n", + PTR_ERR(tv_cmd)); + break; + } + pr_debug("Allocated tv_cmd: %p exp_data_len: %d, data_direction" + ": %d\n", tv_cmd, exp_data_len, data_direction); + + tv_cmd->tvc_vhost = vs; + + if (unlikely(vq->iov[out].iov_len != + sizeof(struct virtio_scsi_cmd_resp))) { + vq_err(vq, "Expecting virtio_scsi_cmd_resp, got %zu" + " bytes, out: %d, in: %d\n", + vq->iov[out].iov_len, out, in); + break; + } + + tv_cmd->tvc_resp = vq->iov[out].iov_base; + + /* + * Copy in the recieved CDB descriptor into tv_cmd->tvc_cdb + * that will be used by tcm_vhost_new_cmd_map() and down into + * target_setup_cmd_from_cdb() + */ + memcpy(tv_cmd->tvc_cdb, v_req.cdb, TCM_VHOST_MAX_CDB_SIZE); + /* + * Check that the recieved CDB size does not exceeded our + * hardcoded max for tcm_vhost + */ + /* TODO what if cdb was too small for varlen cdb header? */ + if (unlikely(scsi_command_size(tv_cmd->tvc_cdb) > + TCM_VHOST_MAX_CDB_SIZE)) { + vq_err(vq, "Received SCSI CDB with command_size: %d that" + " exceeds SCSI_MAX_VARLEN_CDB_SIZE: %d\n", + scsi_command_size(tv_cmd->tvc_cdb), + TCM_VHOST_MAX_CDB_SIZE); + break; /* TODO */ + } + tv_cmd->tvc_lun = ((v_req.lun[2] << 8) | v_req.lun[3]) & 0x3FFF; + + pr_debug("vhost_scsi got command opcode: %#02x, lun: %d\n", + tv_cmd->tvc_cdb[0], tv_cmd->tvc_lun); + + if (data_direction != DMA_NONE) { + ret = vhost_scsi_map_iov_to_sgl(tv_cmd, + &vq->iov[data_first], data_num, + data_direction == DMA_TO_DEVICE); + if (unlikely(ret)) { + vq_err(vq, "Failed to map iov to sgl\n"); + break; /* TODO */ + } + } + + /* + * Save the descriptor from vhost_get_vq_desc() to be used to + * complete the virtio-scsi request in TCM callback context via + * tcm_vhost_queue_data_in() and tcm_vhost_queue_status() + */ + tv_cmd->tvc_vq_desc = head; + /* + * Dispatch tv_cmd descriptor for cmwq execution in process + * context provided by tcm_vhost_workqueue. This also ensures + * tv_cmd is executed on the same kworker CPU as this vhost + * thread to gain positive L2 cache locality effects.. + */ + INIT_WORK(&tv_cmd->work, tcm_vhost_submission_work); + queue_work(tcm_vhost_workqueue, &tv_cmd->work); + } + + mutex_unlock(&vq->mutex); +} + +static void vhost_scsi_ctl_handle_kick(struct vhost_work *work) +{ + pr_debug("%s: The handling func for control queue.\n", __func__); +} + +static void vhost_scsi_evt_handle_kick(struct vhost_work *work) +{ + pr_debug("%s: The handling func for event queue.\n", __func__); +} + +static void vhost_scsi_handle_kick(struct vhost_work *work) +{ + struct vhost_virtqueue *vq = container_of(work, struct vhost_virtqueue, + poll.work); + struct vhost_scsi *vs = container_of(vq->dev, struct vhost_scsi, dev); + + vhost_scsi_handle_vq(vs); +} + +/* + * Called from vhost_scsi_ioctl() context to walk the list of available + * tcm_vhost_tpg with an active struct tcm_vhost_nexus + */ +static int vhost_scsi_set_endpoint( + struct vhost_scsi *vs, + struct vhost_scsi_target *t) +{ + struct tcm_vhost_tport *tv_tport; + struct tcm_vhost_tpg *tv_tpg; + int index; + + mutex_lock(&vs->dev.mutex); + /* Verify that ring has been setup correctly. */ + for (index = 0; index < vs->dev.nvqs; ++index) { + /* Verify that ring has been setup correctly. */ + if (!vhost_vq_access_ok(&vs->vqs[index])) { + mutex_unlock(&vs->dev.mutex); + return -EFAULT; + } + } + mutex_unlock(&vs->dev.mutex); + + mutex_lock(&tcm_vhost_mutex); + list_for_each_entry(tv_tpg, &tcm_vhost_list, tv_tpg_list) { + mutex_lock(&tv_tpg->tv_tpg_mutex); + if (!tv_tpg->tpg_nexus) { + mutex_unlock(&tv_tpg->tv_tpg_mutex); + continue; + } + if (tv_tpg->tv_tpg_vhost_count != 0) { + mutex_unlock(&tv_tpg->tv_tpg_mutex); + continue; + } + tv_tport = tv_tpg->tport; + + if (!strcmp(tv_tport->tport_name, t->vhost_wwpn) && + (tv_tpg->tport_tpgt == t->vhost_tpgt)) { + tv_tpg->tv_tpg_vhost_count++; + mutex_unlock(&tv_tpg->tv_tpg_mutex); + mutex_unlock(&tcm_vhost_mutex); + + mutex_lock(&vs->dev.mutex); + if (vs->vs_tpg) { + mutex_unlock(&vs->dev.mutex); + mutex_lock(&tv_tpg->tv_tpg_mutex); + tv_tpg->tv_tpg_vhost_count--; + mutex_unlock(&tv_tpg->tv_tpg_mutex); + return -EEXIST; + } + + vs->vs_tpg = tv_tpg; + smp_mb__after_atomic_inc(); + mutex_unlock(&vs->dev.mutex); + return 0; + } + mutex_unlock(&tv_tpg->tv_tpg_mutex); + } + mutex_unlock(&tcm_vhost_mutex); + return -EINVAL; +} + +static int vhost_scsi_clear_endpoint( + struct vhost_scsi *vs, + struct vhost_scsi_target *t) +{ + struct tcm_vhost_tport *tv_tport; + struct tcm_vhost_tpg *tv_tpg; + int index, ret; + + mutex_lock(&vs->dev.mutex); + /* Verify that ring has been setup correctly. */ + for (index = 0; index < vs->dev.nvqs; ++index) { + if (!vhost_vq_access_ok(&vs->vqs[index])) { + ret = -EFAULT; + goto err; + } + } + + if (!vs->vs_tpg) { + ret = -ENODEV; + goto err; + } + tv_tpg = vs->vs_tpg; + tv_tport = tv_tpg->tport; + + if (strcmp(tv_tport->tport_name, t->vhost_wwpn) || + (tv_tpg->tport_tpgt != t->vhost_tpgt)) { + pr_warn("tv_tport->tport_name: %s, tv_tpg->tport_tpgt: %hu" + " does not match t->vhost_wwpn: %s, t->vhost_tpgt: %hu\n", + tv_tport->tport_name, tv_tpg->tport_tpgt, + t->vhost_wwpn, t->vhost_tpgt); + ret = -EINVAL; + goto err; + } + tv_tpg->tv_tpg_vhost_count--; + vs->vs_tpg = NULL; + mutex_unlock(&vs->dev.mutex); + + return 0; + +err: + mutex_unlock(&vs->dev.mutex); + return ret; +} + +static int vhost_scsi_open(struct inode *inode, struct file *f) +{ + struct vhost_scsi *s; + int r; + + s = kzalloc(sizeof(*s), GFP_KERNEL); + if (!s) + return -ENOMEM; + + vhost_work_init(&s->vs_completion_work, vhost_scsi_complete_cmd_work); + INIT_LIST_HEAD(&s->vs_completion_list); + spin_lock_init(&s->vs_completion_lock); + + s->vqs[VHOST_SCSI_VQ_CTL].handle_kick = vhost_scsi_ctl_handle_kick; + s->vqs[VHOST_SCSI_VQ_EVT].handle_kick = vhost_scsi_evt_handle_kick; + s->vqs[VHOST_SCSI_VQ_IO].handle_kick = vhost_scsi_handle_kick; + r = vhost_dev_init(&s->dev, s->vqs, 3); + if (r < 0) { + kfree(s); + return r; + } + + f->private_data = s; + return 0; +} + +static int vhost_scsi_release(struct inode *inode, struct file *f) +{ + struct vhost_scsi *s = f->private_data; + + if (s->vs_tpg && s->vs_tpg->tport) { + struct vhost_scsi_target backend; + + memcpy(backend.vhost_wwpn, s->vs_tpg->tport->tport_name, + sizeof(backend.vhost_wwpn)); + backend.vhost_tpgt = s->vs_tpg->tport_tpgt; + vhost_scsi_clear_endpoint(s, &backend); + } + + vhost_dev_cleanup(&s->dev, false); + kfree(s); + return 0; +} + +static void vhost_scsi_flush_vq(struct vhost_scsi *vs, int index) +{ + vhost_poll_flush(&vs->dev.vqs[index].poll); +} + +static void vhost_scsi_flush(struct vhost_scsi *vs) +{ + vhost_scsi_flush_vq(vs, VHOST_SCSI_VQ_CTL); + vhost_scsi_flush_vq(vs, VHOST_SCSI_VQ_EVT); + vhost_scsi_flush_vq(vs, VHOST_SCSI_VQ_IO); +} + +static int vhost_scsi_set_features(struct vhost_scsi *vs, u64 features) +{ + if (features & ~VHOST_FEATURES) + return -EOPNOTSUPP; + + mutex_lock(&vs->dev.mutex); + if ((features & (1 << VHOST_F_LOG_ALL)) && + !vhost_log_access_ok(&vs->dev)) { + mutex_unlock(&vs->dev.mutex); + return -EFAULT; + } + vs->dev.acked_features = features; + smp_wmb(); + vhost_scsi_flush(vs); + mutex_unlock(&vs->dev.mutex); + return 0; +} + +static long vhost_scsi_ioctl(struct file *f, unsigned int ioctl, + unsigned long arg) +{ + struct vhost_scsi *vs = f->private_data; + struct vhost_scsi_target backend; + void __user *argp = (void __user *)arg; + u64 __user *featurep = argp; + u64 features; + int r, abi_version = VHOST_SCSI_ABI_VERSION; + + switch (ioctl) { + case VHOST_SCSI_SET_ENDPOINT: + if (copy_from_user(&backend, argp, sizeof backend)) + return -EFAULT; + if (backend.reserved != 0) + return -EOPNOTSUPP; + + return vhost_scsi_set_endpoint(vs, &backend); + case VHOST_SCSI_CLEAR_ENDPOINT: + if (copy_from_user(&backend, argp, sizeof backend)) + return -EFAULT; + if (backend.reserved != 0) + return -EOPNOTSUPP; + + return vhost_scsi_clear_endpoint(vs, &backend); + case VHOST_SCSI_GET_ABI_VERSION: + if (copy_to_user(argp, &abi_version, sizeof abi_version)) + return -EFAULT; + return 0; + case VHOST_GET_FEATURES: + features = VHOST_FEATURES; + if (copy_to_user(featurep, &features, sizeof features)) + return -EFAULT; + return 0; + case VHOST_SET_FEATURES: + if (copy_from_user(&features, featurep, sizeof features)) + return -EFAULT; + return vhost_scsi_set_features(vs, features); + default: + mutex_lock(&vs->dev.mutex); + r = vhost_dev_ioctl(&vs->dev, ioctl, arg); + mutex_unlock(&vs->dev.mutex); + return r; + } +} + +#ifdef CONFIG_COMPAT +static long vhost_scsi_compat_ioctl(struct file *f, unsigned int ioctl, + unsigned long arg) +{ + return vhost_scsi_ioctl(f, ioctl, (unsigned long)compat_ptr(arg)); +} +#endif + +static const struct file_operations vhost_scsi_fops = { + .owner = THIS_MODULE, + .release = vhost_scsi_release, + .unlocked_ioctl = vhost_scsi_ioctl, +#ifdef CONFIG_COMPAT + .compat_ioctl = vhost_scsi_compat_ioctl, +#endif + .open = vhost_scsi_open, + .llseek = noop_llseek, +}; + +static struct miscdevice vhost_scsi_misc = { + MISC_DYNAMIC_MINOR, + "vhost-scsi", + &vhost_scsi_fops, +}; + +static int __init vhost_scsi_register(void) +{ + return misc_register(&vhost_scsi_misc); +} + +static int vhost_scsi_deregister(void) +{ + return misc_deregister(&vhost_scsi_misc); +} + +static char *tcm_vhost_dump_proto_id(struct tcm_vhost_tport *tport) +{ + switch (tport->tport_proto_id) { + case SCSI_PROTOCOL_SAS: + return "SAS"; + case SCSI_PROTOCOL_FCP: + return "FCP"; + case SCSI_PROTOCOL_ISCSI: + return "iSCSI"; + default: + break; + } + + return "Unknown"; +} + +static int tcm_vhost_port_link(struct se_portal_group *se_tpg, + struct se_lun *lun) +{ + struct tcm_vhost_tpg *tv_tpg = container_of(se_tpg, + struct tcm_vhost_tpg, se_tpg); + + mutex_lock(&tv_tpg->tv_tpg_mutex); + tv_tpg->tv_tpg_port_count++; + mutex_unlock(&tv_tpg->tv_tpg_mutex); + + return 0; +} + +static void tcm_vhost_port_unlink(struct se_portal_group *se_tpg, + struct se_lun *se_lun) +{ + struct tcm_vhost_tpg *tv_tpg = container_of(se_tpg, + struct tcm_vhost_tpg, se_tpg); + + mutex_lock(&tv_tpg->tv_tpg_mutex); + tv_tpg->tv_tpg_port_count--; + mutex_unlock(&tv_tpg->tv_tpg_mutex); +} + +static struct se_node_acl *tcm_vhost_make_nodeacl( + struct se_portal_group *se_tpg, + struct config_group *group, + const char *name) +{ + struct se_node_acl *se_nacl, *se_nacl_new; + struct tcm_vhost_nacl *nacl; + u64 wwpn = 0; + u32 nexus_depth; + + /* tcm_vhost_parse_wwn(name, &wwpn, 1) < 0) + return ERR_PTR(-EINVAL); */ + se_nacl_new = tcm_vhost_alloc_fabric_acl(se_tpg); + if (!se_nacl_new) + return ERR_PTR(-ENOMEM); + + nexus_depth = 1; + /* + * se_nacl_new may be released by core_tpg_add_initiator_node_acl() + * when converting a NodeACL from demo mode -> explict + */ + se_nacl = core_tpg_add_initiator_node_acl(se_tpg, se_nacl_new, + name, nexus_depth); + if (IS_ERR(se_nacl)) { + tcm_vhost_release_fabric_acl(se_tpg, se_nacl_new); + return se_nacl; + } + /* + * Locate our struct tcm_vhost_nacl and set the FC Nport WWPN + */ + nacl = container_of(se_nacl, struct tcm_vhost_nacl, se_node_acl); + nacl->iport_wwpn = wwpn; + + return se_nacl; +} + +static void tcm_vhost_drop_nodeacl(struct se_node_acl *se_acl) +{ + struct tcm_vhost_nacl *nacl = container_of(se_acl, + struct tcm_vhost_nacl, se_node_acl); + core_tpg_del_initiator_node_acl(se_acl->se_tpg, se_acl, 1); + kfree(nacl); +} + +static int tcm_vhost_make_nexus(struct tcm_vhost_tpg *tv_tpg, + const char *name) +{ + struct se_portal_group *se_tpg; + struct tcm_vhost_nexus *tv_nexus; + + mutex_lock(&tv_tpg->tv_tpg_mutex); + if (tv_tpg->tpg_nexus) { + mutex_unlock(&tv_tpg->tv_tpg_mutex); + pr_debug("tv_tpg->tpg_nexus already exists\n"); + return -EEXIST; + } + se_tpg = &tv_tpg->se_tpg; + + tv_nexus = kzalloc(sizeof(struct tcm_vhost_nexus), GFP_KERNEL); + if (!tv_nexus) { + mutex_unlock(&tv_tpg->tv_tpg_mutex); + pr_err("Unable to allocate struct tcm_vhost_nexus\n"); + return -ENOMEM; + } + /* + * Initialize the struct se_session pointer + */ + tv_nexus->tvn_se_sess = transport_init_session(); + if (IS_ERR(tv_nexus->tvn_se_sess)) { + mutex_unlock(&tv_tpg->tv_tpg_mutex); + kfree(tv_nexus); + return -ENOMEM; + } + /* + * Since we are running in 'demo mode' this call with generate a + * struct se_node_acl for the tcm_vhost struct se_portal_group with + * the SCSI Initiator port name of the passed configfs group 'name'. + */ + tv_nexus->tvn_se_sess->se_node_acl = core_tpg_check_initiator_node_acl( + se_tpg, (unsigned char *)name); + if (!tv_nexus->tvn_se_sess->se_node_acl) { + mutex_unlock(&tv_tpg->tv_tpg_mutex); + pr_debug("core_tpg_check_initiator_node_acl() failed" + " for %s\n", name); + transport_free_session(tv_nexus->tvn_se_sess); + kfree(tv_nexus); + return -ENOMEM; + } + /* + * Now register the TCM vhost virtual I_T Nexus as active with the + * call to __transport_register_session() + */ + __transport_register_session(se_tpg, tv_nexus->tvn_se_sess->se_node_acl, + tv_nexus->tvn_se_sess, tv_nexus); + tv_tpg->tpg_nexus = tv_nexus; + + mutex_unlock(&tv_tpg->tv_tpg_mutex); + return 0; +} + +static int tcm_vhost_drop_nexus(struct tcm_vhost_tpg *tpg) +{ + struct se_session *se_sess; + struct tcm_vhost_nexus *tv_nexus; + + mutex_lock(&tpg->tv_tpg_mutex); + tv_nexus = tpg->tpg_nexus; + if (!tv_nexus) { + mutex_unlock(&tpg->tv_tpg_mutex); + return -ENODEV; + } + + se_sess = tv_nexus->tvn_se_sess; + if (!se_sess) { + mutex_unlock(&tpg->tv_tpg_mutex); + return -ENODEV; + } + + if (tpg->tv_tpg_port_count != 0) { + mutex_unlock(&tpg->tv_tpg_mutex); + pr_err("Unable to remove TCM_vhost I_T Nexus with" + " active TPG port count: %d\n", + tpg->tv_tpg_port_count); + return -EBUSY; + } + + if (tpg->tv_tpg_vhost_count != 0) { + mutex_unlock(&tpg->tv_tpg_mutex); + pr_err("Unable to remove TCM_vhost I_T Nexus with" + " active TPG vhost count: %d\n", + tpg->tv_tpg_vhost_count); + return -EBUSY; + } + + pr_debug("TCM_vhost_ConfigFS: Removing I_T Nexus to emulated" + " %s Initiator Port: %s\n", tcm_vhost_dump_proto_id(tpg->tport), + tv_nexus->tvn_se_sess->se_node_acl->initiatorname); + /* + * Release the SCSI I_T Nexus to the emulated vhost Target Port + */ + transport_deregister_session(tv_nexus->tvn_se_sess); + tpg->tpg_nexus = NULL; + mutex_unlock(&tpg->tv_tpg_mutex); + + kfree(tv_nexus); + return 0; +} + +static ssize_t tcm_vhost_tpg_show_nexus(struct se_portal_group *se_tpg, + char *page) +{ + struct tcm_vhost_tpg *tv_tpg = container_of(se_tpg, + struct tcm_vhost_tpg, se_tpg); + struct tcm_vhost_nexus *tv_nexus; + ssize_t ret; + + mutex_lock(&tv_tpg->tv_tpg_mutex); + tv_nexus = tv_tpg->tpg_nexus; + if (!tv_nexus) { + mutex_unlock(&tv_tpg->tv_tpg_mutex); + return -ENODEV; + } + ret = snprintf(page, PAGE_SIZE, "%s\n", + tv_nexus->tvn_se_sess->se_node_acl->initiatorname); + mutex_unlock(&tv_tpg->tv_tpg_mutex); + + return ret; +} + +static ssize_t tcm_vhost_tpg_store_nexus(struct se_portal_group *se_tpg, + const char *page, + size_t count) +{ + struct tcm_vhost_tpg *tv_tpg = container_of(se_tpg, + struct tcm_vhost_tpg, se_tpg); + struct tcm_vhost_tport *tport_wwn = tv_tpg->tport; + unsigned char i_port[TCM_VHOST_NAMELEN], *ptr, *port_ptr; + int ret; + /* + * Shutdown the active I_T nexus if 'NULL' is passed.. + */ + if (!strncmp(page, "NULL", 4)) { + ret = tcm_vhost_drop_nexus(tv_tpg); + return (!ret) ? count : ret; + } + /* + * Otherwise make sure the passed virtual Initiator port WWN matches + * the fabric protocol_id set in tcm_vhost_make_tport(), and call + * tcm_vhost_make_nexus(). + */ + if (strlen(page) >= TCM_VHOST_NAMELEN) { + pr_err("Emulated NAA Sas Address: %s, exceeds" + " max: %d\n", page, TCM_VHOST_NAMELEN); + return -EINVAL; + } + snprintf(&i_port[0], TCM_VHOST_NAMELEN, "%s", page); + + ptr = strstr(i_port, "naa."); + if (ptr) { + if (tport_wwn->tport_proto_id != SCSI_PROTOCOL_SAS) { + pr_err("Passed SAS Initiator Port %s does not" + " match target port protoid: %s\n", i_port, + tcm_vhost_dump_proto_id(tport_wwn)); + return -EINVAL; + } + port_ptr = &i_port[0]; + goto check_newline; + } + ptr = strstr(i_port, "fc."); + if (ptr) { + if (tport_wwn->tport_proto_id != SCSI_PROTOCOL_FCP) { + pr_err("Passed FCP Initiator Port %s does not" + " match target port protoid: %s\n", i_port, + tcm_vhost_dump_proto_id(tport_wwn)); + return -EINVAL; + } + port_ptr = &i_port[3]; /* Skip over "fc." */ + goto check_newline; + } + ptr = strstr(i_port, "iqn."); + if (ptr) { + if (tport_wwn->tport_proto_id != SCSI_PROTOCOL_ISCSI) { + pr_err("Passed iSCSI Initiator Port %s does not" + " match target port protoid: %s\n", i_port, + tcm_vhost_dump_proto_id(tport_wwn)); + return -EINVAL; + } + port_ptr = &i_port[0]; + goto check_newline; + } + pr_err("Unable to locate prefix for emulated Initiator Port:" + " %s\n", i_port); + return -EINVAL; + /* + * Clear any trailing newline for the NAA WWN + */ +check_newline: + if (i_port[strlen(i_port)-1] == '\n') + i_port[strlen(i_port)-1] = '\0'; + + ret = tcm_vhost_make_nexus(tv_tpg, port_ptr); + if (ret < 0) + return ret; + + return count; +} + +TF_TPG_BASE_ATTR(tcm_vhost, nexus, S_IRUGO | S_IWUSR); + +static struct configfs_attribute *tcm_vhost_tpg_attrs[] = { + &tcm_vhost_tpg_nexus.attr, + NULL, +}; + +static struct se_portal_group *tcm_vhost_make_tpg(struct se_wwn *wwn, + struct config_group *group, + const char *name) +{ + struct tcm_vhost_tport *tport = container_of(wwn, + struct tcm_vhost_tport, tport_wwn); + + struct tcm_vhost_tpg *tpg; + unsigned long tpgt; + int ret; + + if (strstr(name, "tpgt_") != name) + return ERR_PTR(-EINVAL); + if (kstrtoul(name + 5, 10, &tpgt) || tpgt > UINT_MAX) + return ERR_PTR(-EINVAL); + + tpg = kzalloc(sizeof(struct tcm_vhost_tpg), GFP_KERNEL); + if (!tpg) { + pr_err("Unable to allocate struct tcm_vhost_tpg"); + return ERR_PTR(-ENOMEM); + } + mutex_init(&tpg->tv_tpg_mutex); + INIT_LIST_HEAD(&tpg->tv_tpg_list); + tpg->tport = tport; + tpg->tport_tpgt = tpgt; + + ret = core_tpg_register(&tcm_vhost_fabric_configfs->tf_ops, wwn, + &tpg->se_tpg, tpg, TRANSPORT_TPG_TYPE_NORMAL); + if (ret < 0) { + kfree(tpg); + return NULL; + } + mutex_lock(&tcm_vhost_mutex); + list_add_tail(&tpg->tv_tpg_list, &tcm_vhost_list); + mutex_unlock(&tcm_vhost_mutex); + + return &tpg->se_tpg; +} + +static void tcm_vhost_drop_tpg(struct se_portal_group *se_tpg) +{ + struct tcm_vhost_tpg *tpg = container_of(se_tpg, + struct tcm_vhost_tpg, se_tpg); + + mutex_lock(&tcm_vhost_mutex); + list_del(&tpg->tv_tpg_list); + mutex_unlock(&tcm_vhost_mutex); + /* + * Release the virtual I_T Nexus for this vhost TPG + */ + tcm_vhost_drop_nexus(tpg); + /* + * Deregister the se_tpg from TCM.. + */ + core_tpg_deregister(se_tpg); + kfree(tpg); +} + +static struct se_wwn *tcm_vhost_make_tport(struct target_fabric_configfs *tf, + struct config_group *group, + const char *name) +{ + struct tcm_vhost_tport *tport; + char *ptr; + u64 wwpn = 0; + int off = 0; + + /* if (tcm_vhost_parse_wwn(name, &wwpn, 1) < 0) + return ERR_PTR(-EINVAL); */ + + tport = kzalloc(sizeof(struct tcm_vhost_tport), GFP_KERNEL); + if (!tport) { + pr_err("Unable to allocate struct tcm_vhost_tport"); + return ERR_PTR(-ENOMEM); + } + tport->tport_wwpn = wwpn; + /* + * Determine the emulated Protocol Identifier and Target Port Name + * based on the incoming configfs directory name. + */ + ptr = strstr(name, "naa."); + if (ptr) { + tport->tport_proto_id = SCSI_PROTOCOL_SAS; + goto check_len; + } + ptr = strstr(name, "fc."); + if (ptr) { + tport->tport_proto_id = SCSI_PROTOCOL_FCP; + off = 3; /* Skip over "fc." */ + goto check_len; + } + ptr = strstr(name, "iqn."); + if (ptr) { + tport->tport_proto_id = SCSI_PROTOCOL_ISCSI; + goto check_len; + } + + pr_err("Unable to locate prefix for emulated Target Port:" + " %s\n", name); + kfree(tport); + return ERR_PTR(-EINVAL); + +check_len: + if (strlen(name) >= TCM_VHOST_NAMELEN) { + pr_err("Emulated %s Address: %s, exceeds" + " max: %d\n", name, tcm_vhost_dump_proto_id(tport), + TCM_VHOST_NAMELEN); + kfree(tport); + return ERR_PTR(-EINVAL); + } + snprintf(&tport->tport_name[0], TCM_VHOST_NAMELEN, "%s", &name[off]); + + pr_debug("TCM_VHost_ConfigFS: Allocated emulated Target" + " %s Address: %s\n", tcm_vhost_dump_proto_id(tport), name); + + return &tport->tport_wwn; +} + +static void tcm_vhost_drop_tport(struct se_wwn *wwn) +{ + struct tcm_vhost_tport *tport = container_of(wwn, + struct tcm_vhost_tport, tport_wwn); + + pr_debug("TCM_VHost_ConfigFS: Deallocating emulated Target" + " %s Address: %s\n", tcm_vhost_dump_proto_id(tport), + tport->tport_name); + + kfree(tport); +} + +static ssize_t tcm_vhost_wwn_show_attr_version( + struct target_fabric_configfs *tf, + char *page) +{ + return sprintf(page, "TCM_VHOST fabric module %s on %s/%s" + "on "UTS_RELEASE"\n", TCM_VHOST_VERSION, utsname()->sysname, + utsname()->machine); +} + +TF_WWN_ATTR_RO(tcm_vhost, version); + +static struct configfs_attribute *tcm_vhost_wwn_attrs[] = { + &tcm_vhost_wwn_version.attr, + NULL, +}; + +static struct target_core_fabric_ops tcm_vhost_ops = { + .get_fabric_name = tcm_vhost_get_fabric_name, + .get_fabric_proto_ident = tcm_vhost_get_fabric_proto_ident, + .tpg_get_wwn = tcm_vhost_get_fabric_wwn, + .tpg_get_tag = tcm_vhost_get_tag, + .tpg_get_default_depth = tcm_vhost_get_default_depth, + .tpg_get_pr_transport_id = tcm_vhost_get_pr_transport_id, + .tpg_get_pr_transport_id_len = tcm_vhost_get_pr_transport_id_len, + .tpg_parse_pr_out_transport_id = tcm_vhost_parse_pr_out_transport_id, + .tpg_check_demo_mode = tcm_vhost_check_true, + .tpg_check_demo_mode_cache = tcm_vhost_check_true, + .tpg_check_demo_mode_write_protect = tcm_vhost_check_false, + .tpg_check_prod_mode_write_protect = tcm_vhost_check_false, + .tpg_alloc_fabric_acl = tcm_vhost_alloc_fabric_acl, + .tpg_release_fabric_acl = tcm_vhost_release_fabric_acl, + .tpg_get_inst_index = tcm_vhost_tpg_get_inst_index, + .release_cmd = tcm_vhost_release_cmd, + .shutdown_session = tcm_vhost_shutdown_session, + .close_session = tcm_vhost_close_session, + .sess_get_index = tcm_vhost_sess_get_index, + .sess_get_initiator_sid = NULL, + .write_pending = tcm_vhost_write_pending, + .write_pending_status = tcm_vhost_write_pending_status, + .set_default_node_attributes = tcm_vhost_set_default_node_attrs, + .get_task_tag = tcm_vhost_get_task_tag, + .get_cmd_state = tcm_vhost_get_cmd_state, + .queue_data_in = tcm_vhost_queue_data_in, + .queue_status = tcm_vhost_queue_status, + .queue_tm_rsp = tcm_vhost_queue_tm_rsp, + .get_fabric_sense_len = tcm_vhost_get_fabric_sense_len, + .set_fabric_sense_len = tcm_vhost_set_fabric_sense_len, + /* + * Setup callers for generic logic in target_core_fabric_configfs.c + */ + .fabric_make_wwn = tcm_vhost_make_tport, + .fabric_drop_wwn = tcm_vhost_drop_tport, + .fabric_make_tpg = tcm_vhost_make_tpg, + .fabric_drop_tpg = tcm_vhost_drop_tpg, + .fabric_post_link = tcm_vhost_port_link, + .fabric_pre_unlink = tcm_vhost_port_unlink, + .fabric_make_np = NULL, + .fabric_drop_np = NULL, + .fabric_make_nodeacl = tcm_vhost_make_nodeacl, + .fabric_drop_nodeacl = tcm_vhost_drop_nodeacl, +}; + +static int tcm_vhost_register_configfs(void) +{ + struct target_fabric_configfs *fabric; + int ret; + + pr_debug("TCM_VHOST fabric module %s on %s/%s" + " on "UTS_RELEASE"\n", TCM_VHOST_VERSION, utsname()->sysname, + utsname()->machine); + /* + * Register the top level struct config_item_type with TCM core + */ + fabric = target_fabric_configfs_init(THIS_MODULE, "vhost"); + if (IS_ERR(fabric)) { + pr_err("target_fabric_configfs_init() failed\n"); + return PTR_ERR(fabric); + } + /* + * Setup fabric->tf_ops from our local tcm_vhost_ops + */ + fabric->tf_ops = tcm_vhost_ops; + /* + * Setup default attribute lists for various fabric->tf_cit_tmpl + */ + TF_CIT_TMPL(fabric)->tfc_wwn_cit.ct_attrs = tcm_vhost_wwn_attrs; + TF_CIT_TMPL(fabric)->tfc_tpg_base_cit.ct_attrs = tcm_vhost_tpg_attrs; + TF_CIT_TMPL(fabric)->tfc_tpg_attrib_cit.ct_attrs = NULL; + TF_CIT_TMPL(fabric)->tfc_tpg_param_cit.ct_attrs = NULL; + TF_CIT_TMPL(fabric)->tfc_tpg_np_base_cit.ct_attrs = NULL; + TF_CIT_TMPL(fabric)->tfc_tpg_nacl_base_cit.ct_attrs = NULL; + TF_CIT_TMPL(fabric)->tfc_tpg_nacl_attrib_cit.ct_attrs = NULL; + TF_CIT_TMPL(fabric)->tfc_tpg_nacl_auth_cit.ct_attrs = NULL; + TF_CIT_TMPL(fabric)->tfc_tpg_nacl_param_cit.ct_attrs = NULL; + /* + * Register the fabric for use within TCM + */ + ret = target_fabric_configfs_register(fabric); + if (ret < 0) { + pr_err("target_fabric_configfs_register() failed" + " for TCM_VHOST\n"); + return ret; + } + /* + * Setup our local pointer to *fabric + */ + tcm_vhost_fabric_configfs = fabric; + pr_debug("TCM_VHOST[0] - Set fabric -> tcm_vhost_fabric_configfs\n"); + return 0; +}; + +static void tcm_vhost_deregister_configfs(void) +{ + if (!tcm_vhost_fabric_configfs) + return; + + target_fabric_configfs_deregister(tcm_vhost_fabric_configfs); + tcm_vhost_fabric_configfs = NULL; + pr_debug("TCM_VHOST[0] - Cleared tcm_vhost_fabric_configfs\n"); +}; + +static int __init tcm_vhost_init(void) +{ + int ret = -ENOMEM; + /* + * Use our own dedicated workqueue for submitting I/O into + * target core to avoid contention within system_wq. + */ + tcm_vhost_workqueue = alloc_workqueue("tcm_vhost", 0, 0); + if (!tcm_vhost_workqueue) + goto out; + + ret = vhost_scsi_register(); + if (ret < 0) + goto out_destroy_workqueue; + + ret = tcm_vhost_register_configfs(); + if (ret < 0) + goto out_vhost_scsi_deregister; + + return 0; + +out_vhost_scsi_deregister: + vhost_scsi_deregister(); +out_destroy_workqueue: + destroy_workqueue(tcm_vhost_workqueue); +out: + return ret; +}; + +static void tcm_vhost_exit(void) +{ + tcm_vhost_deregister_configfs(); + vhost_scsi_deregister(); + destroy_workqueue(tcm_vhost_workqueue); +}; + +MODULE_DESCRIPTION("TCM_VHOST series fabric driver"); +MODULE_LICENSE("GPL"); +module_init(tcm_vhost_init); +module_exit(tcm_vhost_exit); diff --git a/trunk/drivers/vhost/tcm_vhost.h b/trunk/drivers/vhost/tcm_vhost.h new file mode 100644 index 000000000000..d9e93557d669 --- /dev/null +++ b/trunk/drivers/vhost/tcm_vhost.h @@ -0,0 +1,103 @@ +#define TCM_VHOST_VERSION "v0.1" +#define TCM_VHOST_NAMELEN 256 +#define TCM_VHOST_MAX_CDB_SIZE 32 + +struct tcm_vhost_cmd { + /* Descriptor from vhost_get_vq_desc() for virt_queue segment */ + int tvc_vq_desc; + /* The Tag from include/linux/virtio_scsi.h:struct virtio_scsi_cmd_req */ + u64 tvc_tag; + /* The number of scatterlists associated with this cmd */ + u32 tvc_sgl_count; + /* Saved unpacked SCSI LUN for tcm_vhost_submission_work() */ + u32 tvc_lun; + /* Pointer to the SGL formatted memory from virtio-scsi */ + struct scatterlist *tvc_sgl; + /* Pointer to response */ + struct virtio_scsi_cmd_resp __user *tvc_resp; + /* Pointer to vhost_scsi for our device */ + struct vhost_scsi *tvc_vhost; + /* The TCM I/O descriptor that is accessed via container_of() */ + struct se_cmd tvc_se_cmd; + /* work item used for cmwq dispatch to tcm_vhost_submission_work() */ + struct work_struct work; + /* Copy of the incoming SCSI command descriptor block (CDB) */ + unsigned char tvc_cdb[TCM_VHOST_MAX_CDB_SIZE]; + /* Sense buffer that will be mapped into outgoing status */ + unsigned char tvc_sense_buf[TRANSPORT_SENSE_BUFFER]; + /* Completed commands list, serviced from vhost worker thread */ + struct list_head tvc_completion_list; +}; + +struct tcm_vhost_nexus { + /* Pointer to TCM session for I_T Nexus */ + struct se_session *tvn_se_sess; +}; + +struct tcm_vhost_nacl { + /* Binary World Wide unique Port Name for Vhost Initiator port */ + u64 iport_wwpn; + /* ASCII formatted WWPN for Sas Initiator port */ + char iport_name[TCM_VHOST_NAMELEN]; + /* Returned by tcm_vhost_make_nodeacl() */ + struct se_node_acl se_node_acl; +}; + +struct tcm_vhost_tpg { + /* Vhost port target portal group tag for TCM */ + u16 tport_tpgt; + /* Used to track number of TPG Port/Lun Links wrt to explict I_T Nexus shutdown */ + int tv_tpg_port_count; + /* Used for vhost_scsi device reference to tpg_nexus, protected by tv_tpg_mutex */ + int tv_tpg_vhost_count; + /* list for tcm_vhost_list */ + struct list_head tv_tpg_list; + /* Used to protect access for tpg_nexus */ + struct mutex tv_tpg_mutex; + /* Pointer to the TCM VHost I_T Nexus for this TPG endpoint */ + struct tcm_vhost_nexus *tpg_nexus; + /* Pointer back to tcm_vhost_tport */ + struct tcm_vhost_tport *tport; + /* Returned by tcm_vhost_make_tpg() */ + struct se_portal_group se_tpg; +}; + +struct tcm_vhost_tport { + /* SCSI protocol the tport is providing */ + u8 tport_proto_id; + /* Binary World Wide unique Port Name for Vhost Target port */ + u64 tport_wwpn; + /* ASCII formatted WWPN for Vhost Target port */ + char tport_name[TCM_VHOST_NAMELEN]; + /* Returned by tcm_vhost_make_tport() */ + struct se_wwn tport_wwn; +}; + +/* + * As per request from MST, keep TCM_VHOST related ioctl defines out of + * linux/vhost.h (user-space) for now.. + */ + +#include + +/* + * Used by QEMU userspace to ensure a consistent vhost-scsi ABI. + * + * ABI Rev 0: July 2012 version starting point for v3.6-rc merge candidate + + * RFC-v2 vhost-scsi userspace. Add GET_ABI_VERSION ioctl usage + */ + +#define VHOST_SCSI_ABI_VERSION 0 + +struct vhost_scsi_target { + int abi_version; + char vhost_wwpn[TRANSPORT_IQN_LEN]; + unsigned short vhost_tpgt; + unsigned short reserved; +}; + +/* VHOST_SCSI specific defines */ +#define VHOST_SCSI_SET_ENDPOINT _IOW(VHOST_VIRTIO, 0x40, struct vhost_scsi_target) +#define VHOST_SCSI_CLEAR_ENDPOINT _IOW(VHOST_VIRTIO, 0x41, struct vhost_scsi_target) +/* Changing this breaks userspace. */ +#define VHOST_SCSI_GET_ABI_VERSION _IOW(VHOST_VIRTIO, 0x42, int) diff --git a/trunk/drivers/video/auo_k190x.c b/trunk/drivers/video/auo_k190x.c index 77da6a2f43dc..c03ecdd31e4c 100644 --- a/trunk/drivers/video/auo_k190x.c +++ b/trunk/drivers/video/auo_k190x.c @@ -987,7 +987,6 @@ int __devinit auok190x_common_probe(struct platform_device *pdev, fb_dealloc_cmap(&info->cmap); err_cmap: fb_deferred_io_cleanup(info); - kfree(info->fbdefio); err_defio: vfree((void *)info->screen_base); err_irq: @@ -1022,7 +1021,6 @@ int __devexit auok190x_common_remove(struct platform_device *pdev) fb_dealloc_cmap(&info->cmap); fb_deferred_io_cleanup(info); - kfree(info->fbdefio); vfree((void *)info->screen_base); diff --git a/trunk/drivers/video/backlight/88pm860x_bl.c b/trunk/drivers/video/backlight/88pm860x_bl.c index f75da8758adc..f49181c73113 100644 --- a/trunk/drivers/video/backlight/88pm860x_bl.c +++ b/trunk/drivers/video/backlight/88pm860x_bl.c @@ -228,7 +228,6 @@ static int pm860x_backlight_probe(struct platform_device *pdev) data->port = pdata->flags; if (data->port < 0) { dev_err(&pdev->dev, "wrong platform data is assigned"); - kfree(data); return -EINVAL; } diff --git a/trunk/drivers/video/console/bitblit.c b/trunk/drivers/video/console/bitblit.c index 28b1a834906b..61b182bf32a2 100644 --- a/trunk/drivers/video/console/bitblit.c +++ b/trunk/drivers/video/console/bitblit.c @@ -162,7 +162,7 @@ static void bit_putcs(struct vc_data *vc, struct fb_info *info, image.depth = 1; if (attribute) { - buf = kmalloc(cellsize, GFP_KERNEL); + buf = kmalloc(cellsize, GFP_ATOMIC); if (!buf) return; } diff --git a/trunk/drivers/video/console/fbcon.c b/trunk/drivers/video/console/fbcon.c index 2e471c22abf5..fdefa8fd72c4 100644 --- a/trunk/drivers/video/console/fbcon.c +++ b/trunk/drivers/video/console/fbcon.c @@ -372,8 +372,15 @@ static void fb_flashcursor(struct work_struct *work) struct vc_data *vc = NULL; int c; int mode; + int ret; + + /* FIXME: we should sort out the unbind locking instead */ + /* instead we just fail to flash the cursor if we can't get + * the lock instead of blocking fbcon deinit */ + ret = console_trylock(); + if (ret == 0) + return; - console_lock(); if (ops && ops->currcon != -1) vc = vc_cons[ops->currcon].d; @@ -442,7 +449,7 @@ static int __init fb_console_setup(char *this_opt) while ((options = strsep(&this_opt, ",")) != NULL) { if (!strncmp(options, "font:", 5)) - strcpy(fontname, options + 5); + strlcpy(fontname, options + 5, sizeof(fontname)); if (!strncmp(options, "scrollback:", 11)) { options += 11; diff --git a/trunk/drivers/video/efifb.c b/trunk/drivers/video/efifb.c index b4a632ada401..932abaa58a89 100644 --- a/trunk/drivers/video/efifb.c +++ b/trunk/drivers/video/efifb.c @@ -553,7 +553,9 @@ static int __init efifb_init(void) int ret; char *option = NULL; - dmi_check_system(dmi_system_table); + if (screen_info.orig_video_isVGA != VIDEO_TYPE_EFI || + !(screen_info.capabilities & VIDEO_CAPABILITY_SKIP_QUIRKS)) + dmi_check_system(dmi_system_table); if (screen_info.orig_video_isVGA != VIDEO_TYPE_EFI) return -ENODEV; diff --git a/trunk/drivers/video/exynos/exynos_mipi_dsi.c b/trunk/drivers/video/exynos/exynos_mipi_dsi.c index 4bc2b8a5dd8b..663c308d0e73 100644 --- a/trunk/drivers/video/exynos/exynos_mipi_dsi.c +++ b/trunk/drivers/video/exynos/exynos_mipi_dsi.c @@ -461,7 +461,7 @@ static int exynos_mipi_dsi_probe(struct platform_device *pdev) done: platform_set_drvdata(pdev, dsim); - dev_dbg(&pdev->dev, "%s() completed sucessfuly (%s mode)\n", __func__, + dev_dbg(&pdev->dev, "%s() completed successfully (%s mode)\n", __func__, dsim_config->e_interface == DSIM_COMMAND ? "CPU" : "RGB"); return 0; diff --git a/trunk/drivers/video/mb862xx/mb862xxfbdrv.c b/trunk/drivers/video/mb862xx/mb862xxfbdrv.c index 00ce1f34b496..57d940be5f3d 100644 --- a/trunk/drivers/video/mb862xx/mb862xxfbdrv.c +++ b/trunk/drivers/video/mb862xx/mb862xxfbdrv.c @@ -328,6 +328,8 @@ static int mb862xxfb_ioctl(struct fb_info *fbi, unsigned int cmd, case MB862XX_L1_SET_CFG: if (copy_from_user(l1_cfg, argp, sizeof(*l1_cfg))) return -EFAULT; + if (l1_cfg->dh == 0 || l1_cfg->dw == 0) + return -EINVAL; if ((l1_cfg->sw >= l1_cfg->dw) && (l1_cfg->sh >= l1_cfg->dh)) { /* downscaling */ outreg(cap, GC_CAP_CSC, diff --git a/trunk/drivers/video/omap2/dss/sdi.c b/trunk/drivers/video/omap2/dss/sdi.c index 5d31699fbd3c..f43bfe17b3b6 100644 --- a/trunk/drivers/video/omap2/dss/sdi.c +++ b/trunk/drivers/video/omap2/dss/sdi.c @@ -105,6 +105,20 @@ int omapdss_sdi_display_enable(struct omap_dss_device *dssdev) sdi_config_lcd_manager(dssdev); + /* + * LCLK and PCLK divisors are located in shadow registers, and we + * normally write them to DISPC registers when enabling the output. + * However, SDI uses pck-free as source clock for its PLL, and pck-free + * is affected by the divisors. And as we need the PLL before enabling + * the output, we need to write the divisors early. + * + * It seems just writing to the DISPC register is enough, and we don't + * need to care about the shadow register mechanism for pck-free. The + * exact reason for this is unknown. + */ + dispc_mgr_set_clock_div(dssdev->manager->id, + &sdi.mgr_config.clock_info); + dss_sdi_init(dssdev->phy.sdi.datapairs); r = dss_sdi_enable(); if (r) diff --git a/trunk/drivers/video/omap2/omapfb/omapfb-main.c b/trunk/drivers/video/omap2/omapfb/omapfb-main.c index 08ec1a7103f2..fc671d3d8004 100644 --- a/trunk/drivers/video/omap2/omapfb/omapfb-main.c +++ b/trunk/drivers/video/omap2/omapfb/omapfb-main.c @@ -1192,7 +1192,7 @@ static int _setcolreg(struct fb_info *fbi, u_int regno, u_int red, u_int green, break; if (regno < 16) { - u16 pal; + u32 pal; pal = ((red >> (16 - var->red.length)) << var->red.offset) | ((green >> (16 - var->green.length)) << diff --git a/trunk/drivers/video/tmiofb.c b/trunk/drivers/video/tmiofb.c index 8e4a446b5ed1..b244f060f151 100644 --- a/trunk/drivers/video/tmiofb.c +++ b/trunk/drivers/video/tmiofb.c @@ -694,6 +694,10 @@ static int __devinit tmiofb_probe(struct platform_device *dev) dev_err(&dev->dev, "NULL platform data!\n"); return -EINVAL; } + if (ccr == NULL || lcr == NULL || vram == NULL || irq < 0) { + dev_err(&dev->dev, "missing resources\n"); + return -EINVAL; + } info = framebuffer_alloc(sizeof(struct tmiofb_par), &dev->dev); diff --git a/trunk/drivers/w1/masters/ds1wm.c b/trunk/drivers/w1/masters/ds1wm.c index 530a2d309063..7c294f4dc0ed 100644 --- a/trunk/drivers/w1/masters/ds1wm.c +++ b/trunk/drivers/w1/masters/ds1wm.c @@ -349,7 +349,7 @@ static void ds1wm_search(void *data, struct w1_master *master_dev, "pass: %d entering ASM\n", pass); ds1wm_write_register(ds1wm_data, DS1WM_CMD, DS1WM_CMD_SRA); dev_dbg(&ds1wm_data->pdev->dev, - "pass: %d begining nibble loop\n", pass); + "pass: %d beginning nibble loop\n", pass); r_prime = 0; d = 0; diff --git a/trunk/drivers/w1/slaves/w1_therm.c b/trunk/drivers/w1/slaves/w1_therm.c index d90062b211f8..92d08e7fcba2 100644 --- a/trunk/drivers/w1/slaves/w1_therm.c +++ b/trunk/drivers/w1/slaves/w1_therm.c @@ -91,6 +91,11 @@ static struct w1_family w1_therm_family_DS28EA00 = { .fops = &w1_therm_fops, }; +static struct w1_family w1_therm_family_DS1825 = { + .fid = W1_THERM_DS1825, + .fops = &w1_therm_fops, +}; + struct w1_therm_family_converter { u8 broken; @@ -120,6 +125,10 @@ static struct w1_therm_family_converter w1_therm_families[] = { .f = &w1_therm_family_DS28EA00, .convert = w1_DS18B20_convert_temp }, + { + .f = &w1_therm_family_DS1825, + .convert = w1_DS18B20_convert_temp + } }; static inline int w1_DS18B20_convert_temp(u8 rom[9]) diff --git a/trunk/drivers/w1/w1_family.h b/trunk/drivers/w1/w1_family.h index b00ada44a89b..a1f0ce151d53 100644 --- a/trunk/drivers/w1/w1_family.h +++ b/trunk/drivers/w1/w1_family.h @@ -39,6 +39,7 @@ #define W1_EEPROM_DS2431 0x2D #define W1_FAMILY_DS2760 0x30 #define W1_FAMILY_DS2780 0x32 +#define W1_THERM_DS1825 0x3B #define W1_FAMILY_DS2781 0x3D #define W1_THERM_DS28EA00 0x42 diff --git a/trunk/drivers/watchdog/booke_wdt.c b/trunk/drivers/watchdog/booke_wdt.c index 3fe82d0e8caa..5b06d31ab6a9 100644 --- a/trunk/drivers/watchdog/booke_wdt.c +++ b/trunk/drivers/watchdog/booke_wdt.c @@ -166,18 +166,17 @@ static long booke_wdt_ioctl(struct file *file, switch (cmd) { case WDIOC_GETSUPPORT: - if (copy_to_user((void *)arg, &ident, sizeof(ident))) - return -EFAULT; + return copy_to_user(p, &ident, sizeof(ident)) ? -EFAULT : 0; case WDIOC_GETSTATUS: return put_user(0, p); case WDIOC_GETBOOTSTATUS: /* XXX: something is clearing TSR */ tmp = mfspr(SPRN_TSR) & TSR_WRS(3); /* returns CARDRESET if last reset was caused by the WDT */ - return (tmp ? WDIOF_CARDRESET : 0); + return put_user((tmp ? WDIOF_CARDRESET : 0), p); case WDIOC_SETOPTIONS: if (get_user(tmp, p)) - return -EINVAL; + return -EFAULT; if (tmp == WDIOS_ENABLECARD) { booke_wdt_ping(); break; diff --git a/trunk/drivers/watchdog/da9052_wdt.c b/trunk/drivers/watchdog/da9052_wdt.c index 3f75129eb0a9..f7abbaeebcaf 100644 --- a/trunk/drivers/watchdog/da9052_wdt.c +++ b/trunk/drivers/watchdog/da9052_wdt.c @@ -21,7 +21,6 @@ #include #include #include -#include #include #include diff --git a/trunk/drivers/watchdog/hpwdt.c b/trunk/drivers/watchdog/hpwdt.c index 1eff743ec497..ae60406ea8a1 100644 --- a/trunk/drivers/watchdog/hpwdt.c +++ b/trunk/drivers/watchdog/hpwdt.c @@ -814,6 +814,9 @@ static int __devinit hpwdt_init_one(struct pci_dev *dev, hpwdt_timer_reg = pci_mem_addr + 0x70; hpwdt_timer_con = pci_mem_addr + 0x72; + /* Make sure that timer is disabled until /dev/watchdog is opened */ + hpwdt_stop(); + /* Make sure that we have a valid soft_margin */ if (hpwdt_change_timer(soft_margin)) hpwdt_change_timer(DEFAULT_MARGIN); diff --git a/trunk/drivers/watchdog/watchdog_core.c b/trunk/drivers/watchdog/watchdog_core.c index 6aa46a90ff02..3796434991fa 100644 --- a/trunk/drivers/watchdog/watchdog_core.c +++ b/trunk/drivers/watchdog/watchdog_core.c @@ -128,11 +128,12 @@ EXPORT_SYMBOL_GPL(watchdog_register_device); void watchdog_unregister_device(struct watchdog_device *wdd) { int ret; - int devno = wdd->cdev.dev; + int devno; if (wdd == NULL) return; + devno = wdd->cdev.dev; ret = watchdog_dev_unregister(wdd); if (ret) pr_err("error unregistering /dev/watchdog (err=%d)\n", ret); diff --git a/trunk/drivers/xen/gntdev.c b/trunk/drivers/xen/gntdev.c index 1ffd03bf8e10..7f1241608489 100644 --- a/trunk/drivers/xen/gntdev.c +++ b/trunk/drivers/xen/gntdev.c @@ -314,8 +314,9 @@ static int __unmap_grant_pages(struct grant_map *map, int offset, int pages) } } - err = gnttab_unmap_refs(map->unmap_ops + offset, map->pages + offset, - pages, true); + err = gnttab_unmap_refs(map->unmap_ops + offset, + use_ptemod ? map->kmap_ops + offset : NULL, map->pages + offset, + pages); if (err) return err; diff --git a/trunk/drivers/xen/grant-table.c b/trunk/drivers/xen/grant-table.c index 0bfc1ef11259..006726688baf 100644 --- a/trunk/drivers/xen/grant-table.c +++ b/trunk/drivers/xen/grant-table.c @@ -870,7 +870,8 @@ int gnttab_map_refs(struct gnttab_map_grant_ref *map_ops, EXPORT_SYMBOL_GPL(gnttab_map_refs); int gnttab_unmap_refs(struct gnttab_unmap_grant_ref *unmap_ops, - struct page **pages, unsigned int count, bool clear_pte) + struct gnttab_map_grant_ref *kmap_ops, + struct page **pages, unsigned int count) { int i, ret; bool lazy = false; @@ -888,7 +889,8 @@ int gnttab_unmap_refs(struct gnttab_unmap_grant_ref *unmap_ops, } for (i = 0; i < count; i++) { - ret = m2p_remove_override(pages[i], clear_pte); + ret = m2p_remove_override(pages[i], kmap_ops ? + &kmap_ops[i] : NULL); if (ret) return ret; } diff --git a/trunk/drivers/xen/platform-pci.c b/trunk/drivers/xen/platform-pci.c index d4c50d63acbc..97ca359ae2bd 100644 --- a/trunk/drivers/xen/platform-pci.c +++ b/trunk/drivers/xen/platform-pci.c @@ -101,19 +101,6 @@ static int platform_pci_resume(struct pci_dev *pdev) return 0; } -static void __devinit prepare_shared_info(void) -{ -#ifdef CONFIG_KEXEC - unsigned long addr; - struct shared_info *hvm_shared_info; - - addr = alloc_xen_mmio(PAGE_SIZE); - hvm_shared_info = ioremap(addr, PAGE_SIZE); - memset(hvm_shared_info, 0, PAGE_SIZE); - xen_hvm_prepare_kexec(hvm_shared_info, addr >> PAGE_SHIFT); -#endif -} - static int __devinit platform_pci_init(struct pci_dev *pdev, const struct pci_device_id *ent) { @@ -151,8 +138,6 @@ static int __devinit platform_pci_init(struct pci_dev *pdev, platform_mmio = mmio_addr; platform_mmiolen = mmio_len; - prepare_shared_info(); - if (!xen_have_vector_callback) { ret = xen_allocate_irq(pdev); if (ret) { diff --git a/trunk/drivers/xen/swiotlb-xen.c b/trunk/drivers/xen/swiotlb-xen.c index 1afb4fba11b4..4d519488d304 100644 --- a/trunk/drivers/xen/swiotlb-xen.c +++ b/trunk/drivers/xen/swiotlb-xen.c @@ -232,7 +232,7 @@ xen_swiotlb_alloc_coherent(struct device *hwdev, size_t size, return ret; if (hwdev && hwdev->coherent_dma_mask) - dma_mask = hwdev->coherent_dma_mask; + dma_mask = dma_alloc_coherent_mask(hwdev, flags); phys = virt_to_phys(ret); dev_addr = xen_phys_to_bus(phys); diff --git a/trunk/drivers/xen/xen-pciback/pci_stub.c b/trunk/drivers/xen/xen-pciback/pci_stub.c index 097e536e8672..03342728bf23 100644 --- a/trunk/drivers/xen/xen-pciback/pci_stub.c +++ b/trunk/drivers/xen/xen-pciback/pci_stub.c @@ -353,16 +353,16 @@ static int __devinit pcistub_init_device(struct pci_dev *dev) if (err) goto config_release; - dev_dbg(&dev->dev, "reseting (FLR, D3, etc) the device\n"); - __pci_reset_function_locked(dev); - /* We need the device active to save the state. */ dev_dbg(&dev->dev, "save state of device\n"); pci_save_state(dev); dev_data->pci_saved_state = pci_store_saved_state(dev); if (!dev_data->pci_saved_state) dev_err(&dev->dev, "Could not store PCI conf saved state!\n"); - + else { + dev_dbg(&dev->dev, "reseting (FLR, D3, etc) the device\n"); + __pci_reset_function_locked(dev); + } /* Now disable the device (this also ensures some private device * data is setup before we export) */ diff --git a/trunk/drivers/zorro/zorro.c b/trunk/drivers/zorro/zorro.c index 181fa8158a8b..858c9714b2f3 100644 --- a/trunk/drivers/zorro/zorro.c +++ b/trunk/drivers/zorro/zorro.c @@ -37,7 +37,6 @@ struct zorro_dev zorro_autocon[ZORRO_NUM_AUTO]; */ struct zorro_bus { - struct list_head devices; /* list of devices on this bus */ struct device dev; }; @@ -136,7 +135,6 @@ static int __init amiga_zorro_probe(struct platform_device *pdev) if (!bus) return -ENOMEM; - INIT_LIST_HEAD(&bus->devices); bus->dev.parent = &pdev->dev; dev_set_name(&bus->dev, "zorro"); error = device_register(&bus->dev); diff --git a/trunk/fs/autofs4/expire.c b/trunk/fs/autofs4/expire.c index 1feb68ecef95..842d00048a65 100644 --- a/trunk/fs/autofs4/expire.c +++ b/trunk/fs/autofs4/expire.c @@ -94,25 +94,21 @@ static struct dentry *get_next_positive_subdir(struct dentry *prev, { struct autofs_sb_info *sbi = autofs4_sbi(root->d_sb); struct list_head *next; - struct dentry *p, *q; + struct dentry *q; spin_lock(&sbi->lookup_lock); + spin_lock(&root->d_lock); - if (prev == NULL) { - spin_lock(&root->d_lock); + if (prev) + next = prev->d_u.d_child.next; + else { prev = dget_dlock(root); next = prev->d_subdirs.next; - p = prev; - goto start; } - p = prev; - spin_lock(&p->d_lock); -again: - next = p->d_u.d_child.next; -start: +cont: if (next == &root->d_subdirs) { - spin_unlock(&p->d_lock); + spin_unlock(&root->d_lock); spin_unlock(&sbi->lookup_lock); dput(prev); return NULL; @@ -121,16 +117,15 @@ static struct dentry *get_next_positive_subdir(struct dentry *prev, q = list_entry(next, struct dentry, d_u.d_child); spin_lock_nested(&q->d_lock, DENTRY_D_LOCK_NESTED); - /* Negative dentry - try next */ - if (!simple_positive(q)) { - spin_unlock(&p->d_lock); - lock_set_subclass(&q->d_lock.dep_map, 0, _RET_IP_); - p = q; - goto again; + /* Already gone or negative dentry (under construction) - try next */ + if (q->d_count == 0 || !simple_positive(q)) { + spin_unlock(&q->d_lock); + next = q->d_u.d_child.next; + goto cont; } dget_dlock(q); spin_unlock(&q->d_lock); - spin_unlock(&p->d_lock); + spin_unlock(&root->d_lock); spin_unlock(&sbi->lookup_lock); dput(prev); @@ -404,11 +399,6 @@ struct dentry *autofs4_expire_indirect(struct super_block *sb, DPRINTK("checking mountpoint %p %.*s", dentry, (int)dentry->d_name.len, dentry->d_name.name); - /* Path walk currently on this dentry? */ - ino_count = atomic_read(&ino->count) + 2; - if (dentry->d_count > ino_count) - goto next; - /* Can we umount this guy */ if (autofs4_mount_busy(mnt, dentry)) goto next; diff --git a/trunk/fs/bio.c b/trunk/fs/bio.c index 73922abba832..71072ab99128 100644 --- a/trunk/fs/bio.c +++ b/trunk/fs/bio.c @@ -73,7 +73,7 @@ static struct kmem_cache *bio_find_or_create_slab(unsigned int extra_size) { unsigned int sz = sizeof(struct bio) + extra_size; struct kmem_cache *slab = NULL; - struct bio_slab *bslab; + struct bio_slab *bslab, *new_bio_slabs; unsigned int i, entry = -1; mutex_lock(&bio_slab_lock); @@ -97,11 +97,12 @@ static struct kmem_cache *bio_find_or_create_slab(unsigned int extra_size) if (bio_slab_nr == bio_slab_max && entry == -1) { bio_slab_max <<= 1; - bio_slabs = krealloc(bio_slabs, - bio_slab_max * sizeof(struct bio_slab), - GFP_KERNEL); - if (!bio_slabs) + new_bio_slabs = krealloc(bio_slabs, + bio_slab_max * sizeof(struct bio_slab), + GFP_KERNEL); + if (!new_bio_slabs) goto out_unlock; + bio_slabs = new_bio_slabs; } if (entry == -1) entry = bio_slab_nr++; @@ -1312,7 +1313,7 @@ EXPORT_SYMBOL(bio_copy_kern); * Note that this code is very hard to test under normal circumstances because * direct-io pins the pages with get_user_pages(). This makes * is_page_cache_freeable return false, and the VM will not clean the pages. - * But other code (eg, pdflush) could clean the pages if they are mapped + * But other code (eg, flusher threads) could clean the pages if they are mapped * pagecache. * * Simply disabling the call to bio_set_pages_dirty() is a good way to test the diff --git a/trunk/fs/block_dev.c b/trunk/fs/block_dev.c index 1e519195d45b..38e721b35d45 100644 --- a/trunk/fs/block_dev.c +++ b/trunk/fs/block_dev.c @@ -1578,10 +1578,12 @@ ssize_t blkdev_aio_write(struct kiocb *iocb, const struct iovec *iov, unsigned long nr_segs, loff_t pos) { struct file *file = iocb->ki_filp; + struct blk_plug plug; ssize_t ret; BUG_ON(iocb->ki_pos != pos); + blk_start_plug(&plug); ret = __generic_file_aio_write(iocb, iov, nr_segs, &iocb->ki_pos); if (ret > 0 || ret == -EIOCBQUEUED) { ssize_t err; @@ -1590,6 +1592,7 @@ ssize_t blkdev_aio_write(struct kiocb *iocb, const struct iovec *iov, if (err < 0 && ret > 0) ret = err; } + blk_finish_plug(&plug); return ret; } EXPORT_SYMBOL_GPL(blkdev_aio_write); diff --git a/trunk/fs/btrfs/backref.c b/trunk/fs/btrfs/backref.c index a256f3b2a845..ff6475f409d6 100644 --- a/trunk/fs/btrfs/backref.c +++ b/trunk/fs/btrfs/backref.c @@ -1438,10 +1438,10 @@ int iterate_inodes_from_logical(u64 logical, struct btrfs_fs_info *fs_info, ret = extent_from_logical(fs_info, logical, path, &found_key); btrfs_release_path(path); - if (ret & BTRFS_EXTENT_FLAG_TREE_BLOCK) - ret = -EINVAL; if (ret < 0) return ret; + if (ret & BTRFS_EXTENT_FLAG_TREE_BLOCK) + return -EINVAL; extent_item_pos = logical - found_key.objectid; ret = iterate_extent_inodes(fs_info, found_key.objectid, diff --git a/trunk/fs/btrfs/compression.c b/trunk/fs/btrfs/compression.c index 86eff48dab78..43d1c5a3a030 100644 --- a/trunk/fs/btrfs/compression.c +++ b/trunk/fs/btrfs/compression.c @@ -818,6 +818,7 @@ static void free_workspace(int type, struct list_head *workspace) btrfs_compress_op[idx]->free_workspace(workspace); atomic_dec(alloc_workspace); wake: + smp_mb(); if (waitqueue_active(workspace_wait)) wake_up(workspace_wait); } diff --git a/trunk/fs/btrfs/ctree.c b/trunk/fs/btrfs/ctree.c index 9d7621f271ff..6d183f60d63a 100644 --- a/trunk/fs/btrfs/ctree.c +++ b/trunk/fs/btrfs/ctree.c @@ -420,12 +420,6 @@ void btrfs_put_tree_mod_seq(struct btrfs_fs_info *fs_info, } spin_unlock(&fs_info->tree_mod_seq_lock); - /* - * we removed the lowest blocker from the blocker list, so there may be - * more processible delayed refs. - */ - wake_up(&fs_info->tree_mod_seq_wait); - /* * anything that's lower than the lowest existing (read: blocked) * sequence number can be removed from the tree. @@ -631,6 +625,9 @@ __tree_mod_log_free_eb(struct btrfs_fs_info *fs_info, struct extent_buffer *eb) u32 nritems; int ret; + if (btrfs_header_level(eb) == 0) + return; + nritems = btrfs_header_nritems(eb); for (i = nritems - 1; i >= 0; i--) { ret = tree_mod_log_insert_key_locked(fs_info, eb, i, diff --git a/trunk/fs/btrfs/ctree.h b/trunk/fs/btrfs/ctree.h index 4bab807227ad..9821b672f5a2 100644 --- a/trunk/fs/btrfs/ctree.h +++ b/trunk/fs/btrfs/ctree.h @@ -116,7 +116,7 @@ struct btrfs_ordered_sum; #define BTRFS_FREE_SPACE_OBJECTID -11ULL /* - * The inode number assigned to the special inode for sotring + * The inode number assigned to the special inode for storing * free ino cache */ #define BTRFS_FREE_INO_OBJECTID -12ULL @@ -1252,7 +1252,6 @@ struct btrfs_fs_info { atomic_t tree_mod_seq; struct list_head tree_mod_seq_list; struct seq_list tree_mod_seq_elem; - wait_queue_head_t tree_mod_seq_wait; /* this protects tree_mod_log */ rwlock_t tree_mod_log_lock; @@ -3192,7 +3191,7 @@ int btrfs_del_csums(struct btrfs_trans_handle *trans, int btrfs_lookup_bio_sums(struct btrfs_root *root, struct inode *inode, struct bio *bio, u32 *dst); int btrfs_lookup_bio_sums_dio(struct btrfs_root *root, struct inode *inode, - struct bio *bio, u64 logical_offset, u32 *dst); + struct bio *bio, u64 logical_offset); int btrfs_insert_file_extent(struct btrfs_trans_handle *trans, struct btrfs_root *root, u64 objectid, u64 pos, diff --git a/trunk/fs/btrfs/delayed-inode.c b/trunk/fs/btrfs/delayed-inode.c index 335605c8ceab..07d5eeb1e6f1 100644 --- a/trunk/fs/btrfs/delayed-inode.c +++ b/trunk/fs/btrfs/delayed-inode.c @@ -512,8 +512,8 @@ static void __btrfs_remove_delayed_item(struct btrfs_delayed_item *delayed_item) rb_erase(&delayed_item->rb_node, root); delayed_item->delayed_node->count--; - atomic_dec(&delayed_root->items); - if (atomic_read(&delayed_root->items) < BTRFS_DELAYED_BACKGROUND && + if (atomic_dec_return(&delayed_root->items) < + BTRFS_DELAYED_BACKGROUND && waitqueue_active(&delayed_root->wait)) wake_up(&delayed_root->wait); } @@ -1028,9 +1028,10 @@ static int btrfs_delete_delayed_items(struct btrfs_trans_handle *trans, btrfs_release_delayed_item(prev); ret = 0; btrfs_release_path(path); - if (curr) + if (curr) { + mutex_unlock(&node->mutex); goto do_again; - else + } else goto delete_fail; } @@ -1055,8 +1056,7 @@ static void btrfs_release_delayed_inode(struct btrfs_delayed_node *delayed_node) delayed_node->count--; delayed_root = delayed_node->root->fs_info->delayed_root; - atomic_dec(&delayed_root->items); - if (atomic_read(&delayed_root->items) < + if (atomic_dec_return(&delayed_root->items) < BTRFS_DELAYED_BACKGROUND && waitqueue_active(&delayed_root->wait)) wake_up(&delayed_root->wait); diff --git a/trunk/fs/btrfs/delayed-ref.c b/trunk/fs/btrfs/delayed-ref.c index da7419ed01bb..ae9411773397 100644 --- a/trunk/fs/btrfs/delayed-ref.c +++ b/trunk/fs/btrfs/delayed-ref.c @@ -38,17 +38,14 @@ static int comp_tree_refs(struct btrfs_delayed_tree_ref *ref2, struct btrfs_delayed_tree_ref *ref1) { - if (ref1->node.type == BTRFS_TREE_BLOCK_REF_KEY) { - if (ref1->root < ref2->root) - return -1; - if (ref1->root > ref2->root) - return 1; - } else { - if (ref1->parent < ref2->parent) - return -1; - if (ref1->parent > ref2->parent) - return 1; - } + if (ref1->root < ref2->root) + return -1; + if (ref1->root > ref2->root) + return 1; + if (ref1->parent < ref2->parent) + return -1; + if (ref1->parent > ref2->parent) + return 1; return 0; } @@ -85,7 +82,8 @@ static int comp_data_refs(struct btrfs_delayed_data_ref *ref2, * type of the delayed backrefs and content of delayed backrefs. */ static int comp_entry(struct btrfs_delayed_ref_node *ref2, - struct btrfs_delayed_ref_node *ref1) + struct btrfs_delayed_ref_node *ref1, + bool compare_seq) { if (ref1->bytenr < ref2->bytenr) return -1; @@ -102,10 +100,12 @@ static int comp_entry(struct btrfs_delayed_ref_node *ref2, if (ref1->type > ref2->type) return 1; /* merging of sequenced refs is not allowed */ - if (ref1->seq < ref2->seq) - return -1; - if (ref1->seq > ref2->seq) - return 1; + if (compare_seq) { + if (ref1->seq < ref2->seq) + return -1; + if (ref1->seq > ref2->seq) + return 1; + } if (ref1->type == BTRFS_TREE_BLOCK_REF_KEY || ref1->type == BTRFS_SHARED_BLOCK_REF_KEY) { return comp_tree_refs(btrfs_delayed_node_to_tree_ref(ref2), @@ -139,7 +139,7 @@ static struct btrfs_delayed_ref_node *tree_insert(struct rb_root *root, entry = rb_entry(parent_node, struct btrfs_delayed_ref_node, rb_node); - cmp = comp_entry(entry, ins); + cmp = comp_entry(entry, ins, 1); if (cmp < 0) p = &(*p)->rb_left; else if (cmp > 0) @@ -233,6 +233,114 @@ int btrfs_delayed_ref_lock(struct btrfs_trans_handle *trans, return 0; } +static void inline drop_delayed_ref(struct btrfs_trans_handle *trans, + struct btrfs_delayed_ref_root *delayed_refs, + struct btrfs_delayed_ref_node *ref) +{ + rb_erase(&ref->rb_node, &delayed_refs->root); + ref->in_tree = 0; + btrfs_put_delayed_ref(ref); + delayed_refs->num_entries--; + if (trans->delayed_ref_updates) + trans->delayed_ref_updates--; +} + +static int merge_ref(struct btrfs_trans_handle *trans, + struct btrfs_delayed_ref_root *delayed_refs, + struct btrfs_delayed_ref_node *ref, u64 seq) +{ + struct rb_node *node; + int merged = 0; + int mod = 0; + int done = 0; + + node = rb_prev(&ref->rb_node); + while (node) { + struct btrfs_delayed_ref_node *next; + + next = rb_entry(node, struct btrfs_delayed_ref_node, rb_node); + node = rb_prev(node); + if (next->bytenr != ref->bytenr) + break; + if (seq && next->seq >= seq) + break; + if (comp_entry(ref, next, 0)) + continue; + + if (ref->action == next->action) { + mod = next->ref_mod; + } else { + if (ref->ref_mod < next->ref_mod) { + struct btrfs_delayed_ref_node *tmp; + + tmp = ref; + ref = next; + next = tmp; + done = 1; + } + mod = -next->ref_mod; + } + + merged++; + drop_delayed_ref(trans, delayed_refs, next); + ref->ref_mod += mod; + if (ref->ref_mod == 0) { + drop_delayed_ref(trans, delayed_refs, ref); + break; + } else { + /* + * You can't have multiples of the same ref on a tree + * block. + */ + WARN_ON(ref->type == BTRFS_TREE_BLOCK_REF_KEY || + ref->type == BTRFS_SHARED_BLOCK_REF_KEY); + } + + if (done) + break; + node = rb_prev(&ref->rb_node); + } + + return merged; +} + +void btrfs_merge_delayed_refs(struct btrfs_trans_handle *trans, + struct btrfs_fs_info *fs_info, + struct btrfs_delayed_ref_root *delayed_refs, + struct btrfs_delayed_ref_head *head) +{ + struct rb_node *node; + u64 seq = 0; + + spin_lock(&fs_info->tree_mod_seq_lock); + if (!list_empty(&fs_info->tree_mod_seq_list)) { + struct seq_list *elem; + + elem = list_first_entry(&fs_info->tree_mod_seq_list, + struct seq_list, list); + seq = elem->seq; + } + spin_unlock(&fs_info->tree_mod_seq_lock); + + node = rb_prev(&head->node.rb_node); + while (node) { + struct btrfs_delayed_ref_node *ref; + + ref = rb_entry(node, struct btrfs_delayed_ref_node, + rb_node); + if (ref->bytenr != head->node.bytenr) + break; + + /* We can't merge refs that are outside of our seq count */ + if (seq && ref->seq >= seq) + break; + if (merge_ref(trans, delayed_refs, ref, seq)) + node = rb_prev(&head->node.rb_node); + else + node = rb_prev(node); + } +} + int btrfs_check_delayed_seq(struct btrfs_fs_info *fs_info, struct btrfs_delayed_ref_root *delayed_refs, u64 seq) @@ -336,18 +444,11 @@ update_existing_ref(struct btrfs_trans_handle *trans, * every changing the extent allocation tree. */ existing->ref_mod--; - if (existing->ref_mod == 0) { - rb_erase(&existing->rb_node, - &delayed_refs->root); - existing->in_tree = 0; - btrfs_put_delayed_ref(existing); - delayed_refs->num_entries--; - if (trans->delayed_ref_updates) - trans->delayed_ref_updates--; - } else { + if (existing->ref_mod == 0) + drop_delayed_ref(trans, delayed_refs, existing); + else WARN_ON(existing->type == BTRFS_TREE_BLOCK_REF_KEY || existing->type == BTRFS_SHARED_BLOCK_REF_KEY); - } } else { WARN_ON(existing->type == BTRFS_TREE_BLOCK_REF_KEY || existing->type == BTRFS_SHARED_BLOCK_REF_KEY); @@ -662,9 +763,6 @@ int btrfs_add_delayed_tree_ref(struct btrfs_fs_info *fs_info, add_delayed_tree_ref(fs_info, trans, &ref->node, bytenr, num_bytes, parent, ref_root, level, action, for_cow); - if (!need_ref_seq(for_cow, ref_root) && - waitqueue_active(&fs_info->tree_mod_seq_wait)) - wake_up(&fs_info->tree_mod_seq_wait); spin_unlock(&delayed_refs->lock); if (need_ref_seq(for_cow, ref_root)) btrfs_qgroup_record_ref(trans, &ref->node, extent_op); @@ -713,9 +811,6 @@ int btrfs_add_delayed_data_ref(struct btrfs_fs_info *fs_info, add_delayed_data_ref(fs_info, trans, &ref->node, bytenr, num_bytes, parent, ref_root, owner, offset, action, for_cow); - if (!need_ref_seq(for_cow, ref_root) && - waitqueue_active(&fs_info->tree_mod_seq_wait)) - wake_up(&fs_info->tree_mod_seq_wait); spin_unlock(&delayed_refs->lock); if (need_ref_seq(for_cow, ref_root)) btrfs_qgroup_record_ref(trans, &ref->node, extent_op); @@ -744,8 +839,6 @@ int btrfs_add_delayed_extent_op(struct btrfs_fs_info *fs_info, num_bytes, BTRFS_UPDATE_DELAYED_HEAD, extent_op->is_data); - if (waitqueue_active(&fs_info->tree_mod_seq_wait)) - wake_up(&fs_info->tree_mod_seq_wait); spin_unlock(&delayed_refs->lock); return 0; } diff --git a/trunk/fs/btrfs/delayed-ref.h b/trunk/fs/btrfs/delayed-ref.h index 0d7c90c366b6..c9d703693df0 100644 --- a/trunk/fs/btrfs/delayed-ref.h +++ b/trunk/fs/btrfs/delayed-ref.h @@ -18,7 +18,7 @@ #ifndef __DELAYED_REF__ #define __DELAYED_REF__ -/* these are the possible values of struct btrfs_delayed_ref->action */ +/* these are the possible values of struct btrfs_delayed_ref_node->action */ #define BTRFS_ADD_DELAYED_REF 1 /* add one backref to the tree */ #define BTRFS_DROP_DELAYED_REF 2 /* delete one backref from the tree */ #define BTRFS_ADD_DELAYED_EXTENT 3 /* record a full extent allocation */ @@ -167,6 +167,10 @@ int btrfs_add_delayed_extent_op(struct btrfs_fs_info *fs_info, struct btrfs_trans_handle *trans, u64 bytenr, u64 num_bytes, struct btrfs_delayed_extent_op *extent_op); +void btrfs_merge_delayed_refs(struct btrfs_trans_handle *trans, + struct btrfs_fs_info *fs_info, + struct btrfs_delayed_ref_root *delayed_refs, + struct btrfs_delayed_ref_head *head); struct btrfs_delayed_ref_head * btrfs_find_delayed_ref_head(struct btrfs_trans_handle *trans, u64 bytenr); diff --git a/trunk/fs/btrfs/disk-io.c b/trunk/fs/btrfs/disk-io.c index 62e0cafd6e25..22e98e04c2ea 100644 --- a/trunk/fs/btrfs/disk-io.c +++ b/trunk/fs/btrfs/disk-io.c @@ -377,9 +377,13 @@ static int btree_read_extent_buffer_pages(struct btrfs_root *root, ret = read_extent_buffer_pages(io_tree, eb, start, WAIT_COMPLETE, btree_get_extent, mirror_num); - if (!ret && !verify_parent_transid(io_tree, eb, + if (!ret) { + if (!verify_parent_transid(io_tree, eb, parent_transid, 0)) - break; + break; + else + ret = -EIO; + } /* * This buffer's crc is fine, but its contents are corrupted, so @@ -754,9 +758,7 @@ static void run_one_async_done(struct btrfs_work *work) limit = btrfs_async_submit_limit(fs_info); limit = limit * 2 / 3; - atomic_dec(&fs_info->nr_async_submits); - - if (atomic_read(&fs_info->nr_async_submits) < limit && + if (atomic_dec_return(&fs_info->nr_async_submits) < limit && waitqueue_active(&fs_info->async_submit_wait)) wake_up(&fs_info->async_submit_wait); @@ -2032,8 +2034,6 @@ int open_ctree(struct super_block *sb, fs_info->free_chunk_space = 0; fs_info->tree_mod_log = RB_ROOT; - init_waitqueue_head(&fs_info->tree_mod_seq_wait); - /* readahead state */ INIT_RADIX_TREE(&fs_info->reada_tree, GFP_NOFS & ~__GFP_WAIT); spin_lock_init(&fs_info->reada_lock); @@ -2528,8 +2528,7 @@ int open_ctree(struct super_block *sb, goto fail_trans_kthread; /* do not make disk changes in broken FS */ - if (btrfs_super_log_root(disk_super) != 0 && - !(fs_info->fs_state & BTRFS_SUPER_FLAG_ERROR)) { + if (btrfs_super_log_root(disk_super) != 0) { u64 bytenr = btrfs_super_log_root(disk_super); if (fs_devices->rw_devices == 0) { @@ -3189,30 +3188,14 @@ int close_ctree(struct btrfs_root *root) /* clear out the rbtree of defraggable inodes */ btrfs_run_defrag_inodes(fs_info); - /* - * Here come 2 situations when btrfs is broken to flip readonly: - * - * 1. when btrfs flips readonly somewhere else before - * btrfs_commit_super, sb->s_flags has MS_RDONLY flag, - * and btrfs will skip to write sb directly to keep - * ERROR state on disk. - * - * 2. when btrfs flips readonly just in btrfs_commit_super, - * and in such case, btrfs cannot write sb via btrfs_commit_super, - * and since fs_state has been set BTRFS_SUPER_FLAG_ERROR flag, - * btrfs will cleanup all FS resources first and write sb then. - */ if (!(fs_info->sb->s_flags & MS_RDONLY)) { ret = btrfs_commit_super(root); if (ret) printk(KERN_ERR "btrfs: commit super ret %d\n", ret); } - if (fs_info->fs_state & BTRFS_SUPER_FLAG_ERROR) { - ret = btrfs_error_commit_super(root); - if (ret) - printk(KERN_ERR "btrfs: commit super ret %d\n", ret); - } + if (fs_info->fs_state & BTRFS_SUPER_FLAG_ERROR) + btrfs_error_commit_super(root); btrfs_put_block_group_cache(fs_info); @@ -3434,18 +3417,11 @@ static int btrfs_check_super_valid(struct btrfs_fs_info *fs_info, if (read_only) return 0; - if (fs_info->fs_state & BTRFS_SUPER_FLAG_ERROR) { - printk(KERN_WARNING "warning: mount fs with errors, " - "running btrfsck is recommended\n"); - } - return 0; } -int btrfs_error_commit_super(struct btrfs_root *root) +void btrfs_error_commit_super(struct btrfs_root *root) { - int ret; - mutex_lock(&root->fs_info->cleaner_mutex); btrfs_run_delayed_iputs(root); mutex_unlock(&root->fs_info->cleaner_mutex); @@ -3455,10 +3431,6 @@ int btrfs_error_commit_super(struct btrfs_root *root) /* cleanup FS via transaction */ btrfs_cleanup_transaction(root); - - ret = write_ctree_super(NULL, root, 0); - - return ret; } static void btrfs_destroy_ordered_operations(struct btrfs_root *root) @@ -3782,14 +3754,17 @@ int btrfs_cleanup_transaction(struct btrfs_root *root) /* FIXME: cleanup wait for commit */ t->in_commit = 1; t->blocked = 1; + smp_mb(); if (waitqueue_active(&root->fs_info->transaction_blocked_wait)) wake_up(&root->fs_info->transaction_blocked_wait); t->blocked = 0; + smp_mb(); if (waitqueue_active(&root->fs_info->transaction_wait)) wake_up(&root->fs_info->transaction_wait); t->commit_done = 1; + smp_mb(); if (waitqueue_active(&t->commit_wait)) wake_up(&t->commit_wait); diff --git a/trunk/fs/btrfs/disk-io.h b/trunk/fs/btrfs/disk-io.h index 95e147eea239..c5b00a735fef 100644 --- a/trunk/fs/btrfs/disk-io.h +++ b/trunk/fs/btrfs/disk-io.h @@ -54,7 +54,7 @@ int write_ctree_super(struct btrfs_trans_handle *trans, struct btrfs_root *root, int max_mirrors); struct buffer_head *btrfs_read_dev_super(struct block_device *bdev); int btrfs_commit_super(struct btrfs_root *root); -int btrfs_error_commit_super(struct btrfs_root *root); +void btrfs_error_commit_super(struct btrfs_root *root); struct extent_buffer *btrfs_find_tree_block(struct btrfs_root *root, u64 bytenr, u32 blocksize); struct btrfs_root *btrfs_read_fs_root_no_radix(struct btrfs_root *tree_root, diff --git a/trunk/fs/btrfs/extent-tree.c b/trunk/fs/btrfs/extent-tree.c index 4e1b153b7c47..ba58024d40d3 100644 --- a/trunk/fs/btrfs/extent-tree.c +++ b/trunk/fs/btrfs/extent-tree.c @@ -2251,6 +2251,16 @@ static noinline int run_clustered_refs(struct btrfs_trans_handle *trans, } } + /* + * We need to try and merge add/drops of the same ref since we + * can run into issues with relocate dropping the implicit ref + * and then it being added back again before the drop can + * finish. If we merged anything we need to re-loop so we can + * get a good ref. + */ + btrfs_merge_delayed_refs(trans, fs_info, delayed_refs, + locked_ref); + /* * locked_ref is the head node, so we have to go one * node back for any delayed ref updates @@ -2318,12 +2328,23 @@ static noinline int run_clustered_refs(struct btrfs_trans_handle *trans, ref->in_tree = 0; rb_erase(&ref->rb_node, &delayed_refs->root); delayed_refs->num_entries--; - /* - * we modified num_entries, but as we're currently running - * delayed refs, skip - * wake_up(&delayed_refs->seq_wait); - * here. - */ + if (locked_ref) { + /* + * when we play the delayed ref, also correct the + * ref_mod on head + */ + switch (ref->action) { + case BTRFS_ADD_DELAYED_REF: + case BTRFS_ADD_DELAYED_EXTENT: + locked_ref->node.ref_mod -= ref->ref_mod; + break; + case BTRFS_DROP_DELAYED_REF: + locked_ref->node.ref_mod += ref->ref_mod; + break; + default: + WARN_ON(1); + } + } spin_unlock(&delayed_refs->lock); ret = run_one_delayed_ref(trans, root, ref, extent_op, @@ -2350,22 +2371,6 @@ static noinline int run_clustered_refs(struct btrfs_trans_handle *trans, return count; } -static void wait_for_more_refs(struct btrfs_fs_info *fs_info, - struct btrfs_delayed_ref_root *delayed_refs, - unsigned long num_refs, - struct list_head *first_seq) -{ - spin_unlock(&delayed_refs->lock); - pr_debug("waiting for more refs (num %ld, first %p)\n", - num_refs, first_seq); - wait_event(fs_info->tree_mod_seq_wait, - num_refs != delayed_refs->num_entries || - fs_info->tree_mod_seq_list.next != first_seq); - pr_debug("done waiting for more refs (num %ld, first %p)\n", - delayed_refs->num_entries, fs_info->tree_mod_seq_list.next); - spin_lock(&delayed_refs->lock); -} - #ifdef SCRAMBLE_DELAYED_REFS /* * Normally delayed refs get processed in ascending bytenr order. This @@ -2460,13 +2465,11 @@ int btrfs_run_delayed_refs(struct btrfs_trans_handle *trans, struct btrfs_delayed_ref_root *delayed_refs; struct btrfs_delayed_ref_node *ref; struct list_head cluster; - struct list_head *first_seq = NULL; int ret; u64 delayed_start; int run_all = count == (unsigned long)-1; int run_most = 0; - unsigned long num_refs = 0; - int consider_waiting; + int loops; /* We'll clean this up in btrfs_cleanup_transaction */ if (trans->aborted) @@ -2484,7 +2487,7 @@ int btrfs_run_delayed_refs(struct btrfs_trans_handle *trans, delayed_refs = &trans->transaction->delayed_refs; INIT_LIST_HEAD(&cluster); again: - consider_waiting = 0; + loops = 0; spin_lock(&delayed_refs->lock); #ifdef SCRAMBLE_DELAYED_REFS @@ -2512,31 +2515,6 @@ int btrfs_run_delayed_refs(struct btrfs_trans_handle *trans, if (ret) break; - if (delayed_start >= delayed_refs->run_delayed_start) { - if (consider_waiting == 0) { - /* - * btrfs_find_ref_cluster looped. let's do one - * more cycle. if we don't run any delayed ref - * during that cycle (because we can't because - * all of them are blocked) and if the number of - * refs doesn't change, we avoid busy waiting. - */ - consider_waiting = 1; - num_refs = delayed_refs->num_entries; - first_seq = root->fs_info->tree_mod_seq_list.next; - } else { - wait_for_more_refs(root->fs_info, delayed_refs, - num_refs, first_seq); - /* - * after waiting, things have changed. we - * dropped the lock and someone else might have - * run some refs, built new clusters and so on. - * therefore, we restart staleness detection. - */ - consider_waiting = 0; - } - } - ret = run_clustered_refs(trans, root, &cluster); if (ret < 0) { spin_unlock(&delayed_refs->lock); @@ -2549,9 +2527,26 @@ int btrfs_run_delayed_refs(struct btrfs_trans_handle *trans, if (count == 0) break; - if (ret || delayed_refs->run_delayed_start == 0) { + if (delayed_start >= delayed_refs->run_delayed_start) { + if (loops == 0) { + /* + * btrfs_find_ref_cluster looped. let's do one + * more cycle. if we don't run any delayed ref + * during that cycle (because we can't because + * all of them are blocked), bail out. + */ + loops = 1; + } else { + /* + * no runnable refs left, stop trying + */ + BUG_ON(run_all); + break; + } + } + if (ret) { /* refs were run, let's reset staleness detection */ - consider_waiting = 0; + loops = 0; } } @@ -3007,17 +3002,16 @@ static int cache_save_setup(struct btrfs_block_group_cache *block_group, } spin_unlock(&block_group->lock); - num_pages = (int)div64_u64(block_group->key.offset, 1024 * 1024 * 1024); + /* + * Try to preallocate enough space based on how big the block group is. + * Keep in mind this has to include any pinned space which could end up + * taking up quite a bit since it's not folded into the other space + * cache. + */ + num_pages = (int)div64_u64(block_group->key.offset, 256 * 1024 * 1024); if (!num_pages) num_pages = 1; - /* - * Just to make absolutely sure we have enough space, we're going to - * preallocate 12 pages worth of space for each block group. In - * practice we ought to use at most 8, but we need extra space so we can - * add our header and have a terminator between the extents and the - * bitmaps. - */ num_pages *= 16; num_pages *= PAGE_CACHE_SIZE; @@ -4571,8 +4565,10 @@ int btrfs_delalloc_reserve_metadata(struct inode *inode, u64 num_bytes) if (root->fs_info->quota_enabled) { ret = btrfs_qgroup_reserve(root, num_bytes + nr_extents * root->leafsize); - if (ret) + if (ret) { + mutex_unlock(&BTRFS_I(inode)->delalloc_mutex); return ret; + } } ret = reserve_metadata_bytes(root, block_rsv, to_reserve, flush); @@ -5294,9 +5290,6 @@ static noinline int check_ref_cleanup(struct btrfs_trans_handle *trans, rb_erase(&head->node.rb_node, &delayed_refs->root); delayed_refs->num_entries--; - smp_mb(); - if (waitqueue_active(&root->fs_info->tree_mod_seq_wait)) - wake_up(&root->fs_info->tree_mod_seq_wait); /* * we don't take a ref on the node because we're removing it from the diff --git a/trunk/fs/btrfs/extent_io.c b/trunk/fs/btrfs/extent_io.c index 45c81bb4ac82..4c878476bb91 100644 --- a/trunk/fs/btrfs/extent_io.c +++ b/trunk/fs/btrfs/extent_io.c @@ -2330,23 +2330,10 @@ static void end_bio_extent_readpage(struct bio *bio, int err) if (uptodate && tree->ops && tree->ops->readpage_end_io_hook) { ret = tree->ops->readpage_end_io_hook(page, start, end, state, mirror); - if (ret) { - /* no IO indicated but software detected errors - * in the block, either checksum errors or - * issues with the contents */ - struct btrfs_root *root = - BTRFS_I(page->mapping->host)->root; - struct btrfs_device *device; - + if (ret) uptodate = 0; - device = btrfs_find_device_for_logical( - root, start, mirror); - if (device) - btrfs_dev_stat_inc_and_print(device, - BTRFS_DEV_STAT_CORRUPTION_ERRS); - } else { + else clean_io_failure(start, page); - } } if (!uptodate && tree->ops && tree->ops->readpage_io_failed_hook) { diff --git a/trunk/fs/btrfs/file-item.c b/trunk/fs/btrfs/file-item.c index b45b9de0c21d..857d93cd01dc 100644 --- a/trunk/fs/btrfs/file-item.c +++ b/trunk/fs/btrfs/file-item.c @@ -272,9 +272,9 @@ int btrfs_lookup_bio_sums(struct btrfs_root *root, struct inode *inode, } int btrfs_lookup_bio_sums_dio(struct btrfs_root *root, struct inode *inode, - struct bio *bio, u64 offset, u32 *dst) + struct bio *bio, u64 offset) { - return __btrfs_lookup_bio_sums(root, inode, bio, offset, dst, 1); + return __btrfs_lookup_bio_sums(root, inode, bio, offset, NULL, 1); } int btrfs_lookup_csums_range(struct btrfs_root *root, u64 start, u64 end, diff --git a/trunk/fs/btrfs/inode.c b/trunk/fs/btrfs/inode.c index 83baec24946d..316b07a866d2 100644 --- a/trunk/fs/btrfs/inode.c +++ b/trunk/fs/btrfs/inode.c @@ -324,7 +324,8 @@ static noinline int add_async_extent(struct async_cow *cow, * If this code finds it can't get good compression, it puts an * entry onto the work queue to write the uncompressed bytes. This * makes sure that both compressed inodes and uncompressed inodes - * are written in the same order that pdflush sent them down. + * are written in the same order that the flusher thread sent them + * down. */ static noinline int compress_file_range(struct inode *inode, struct page *locked_page, @@ -1007,9 +1008,7 @@ static noinline void async_cow_submit(struct btrfs_work *work) nr_pages = (async_cow->end - async_cow->start + PAGE_CACHE_SIZE) >> PAGE_CACHE_SHIFT; - atomic_sub(nr_pages, &root->fs_info->async_delalloc_pages); - - if (atomic_read(&root->fs_info->async_delalloc_pages) < + if (atomic_sub_return(nr_pages, &root->fs_info->async_delalloc_pages) < 5 * 1024 * 1024 && waitqueue_active(&root->fs_info->async_submit_wait)) wake_up(&root->fs_info->async_submit_wait); @@ -1884,8 +1883,11 @@ static int btrfs_finish_ordered_io(struct btrfs_ordered_extent *ordered_extent) trans = btrfs_join_transaction_nolock(root); else trans = btrfs_join_transaction(root); - if (IS_ERR(trans)) - return PTR_ERR(trans); + if (IS_ERR(trans)) { + ret = PTR_ERR(trans); + trans = NULL; + goto out; + } trans->block_rsv = &root->fs_info->delalloc_block_rsv; ret = btrfs_update_inode_fallback(trans, root, inode); if (ret) /* -ENOMEM or corruption */ @@ -1969,8 +1971,8 @@ static int btrfs_finish_ordered_io(struct btrfs_ordered_extent *ordered_extent) ordered_extent->len - 1, NULL, GFP_NOFS); /* - * This needs to be dont to make sure anybody waiting knows we are done - * upating everything for this ordered extent. + * This needs to be done to make sure anybody waiting knows we are done + * updating everything for this ordered extent. */ btrfs_remove_ordered_extent(inode, ordered_extent); @@ -3173,7 +3175,7 @@ int btrfs_unlink_subvol(struct btrfs_trans_handle *trans, btrfs_i_size_write(dir, dir->i_size - name_len * 2); inode_inc_iversion(dir); dir->i_mtime = dir->i_ctime = CURRENT_TIME; - ret = btrfs_update_inode(trans, root, dir); + ret = btrfs_update_inode_fallback(trans, root, dir); if (ret) btrfs_abort_transaction(trans, root, ret); out: @@ -5773,18 +5775,112 @@ static noinline int can_nocow_odirect(struct btrfs_trans_handle *trans, return ret; } +static int lock_extent_direct(struct inode *inode, u64 lockstart, u64 lockend, + struct extent_state **cached_state, int writing) +{ + struct btrfs_ordered_extent *ordered; + int ret = 0; + + while (1) { + lock_extent_bits(&BTRFS_I(inode)->io_tree, lockstart, lockend, + 0, cached_state); + /* + * We're concerned with the entire range that we're going to be + * doing DIO to, so we need to make sure theres no ordered + * extents in this range. + */ + ordered = btrfs_lookup_ordered_range(inode, lockstart, + lockend - lockstart + 1); + + /* + * We need to make sure there are no buffered pages in this + * range either, we could have raced between the invalidate in + * generic_file_direct_write and locking the extent. The + * invalidate needs to happen so that reads after a write do not + * get stale data. + */ + if (!ordered && (!writing || + !test_range_bit(&BTRFS_I(inode)->io_tree, + lockstart, lockend, EXTENT_UPTODATE, 0, + *cached_state))) + break; + + unlock_extent_cached(&BTRFS_I(inode)->io_tree, lockstart, lockend, + cached_state, GFP_NOFS); + + if (ordered) { + btrfs_start_ordered_extent(inode, ordered, 1); + btrfs_put_ordered_extent(ordered); + } else { + /* Screw you mmap */ + ret = filemap_write_and_wait_range(inode->i_mapping, + lockstart, + lockend); + if (ret) + break; + + /* + * If we found a page that couldn't be invalidated just + * fall back to buffered. + */ + ret = invalidate_inode_pages2_range(inode->i_mapping, + lockstart >> PAGE_CACHE_SHIFT, + lockend >> PAGE_CACHE_SHIFT); + if (ret) + break; + } + + cond_resched(); + } + + return ret; +} + static int btrfs_get_blocks_direct(struct inode *inode, sector_t iblock, struct buffer_head *bh_result, int create) { struct extent_map *em; struct btrfs_root *root = BTRFS_I(inode)->root; + struct extent_state *cached_state = NULL; u64 start = iblock << inode->i_blkbits; + u64 lockstart, lockend; u64 len = bh_result->b_size; struct btrfs_trans_handle *trans; + int unlock_bits = EXTENT_LOCKED; + int ret; + + if (create) { + ret = btrfs_delalloc_reserve_space(inode, len); + if (ret) + return ret; + unlock_bits |= EXTENT_DELALLOC | EXTENT_DIRTY; + } else { + len = min_t(u64, len, root->sectorsize); + } + + lockstart = start; + lockend = start + len - 1; + + /* + * If this errors out it's because we couldn't invalidate pagecache for + * this range and we need to fallback to buffered. + */ + if (lock_extent_direct(inode, lockstart, lockend, &cached_state, create)) + return -ENOTBLK; + + if (create) { + ret = set_extent_bit(&BTRFS_I(inode)->io_tree, lockstart, + lockend, EXTENT_DELALLOC, NULL, + &cached_state, GFP_NOFS); + if (ret) + goto unlock_err; + } em = btrfs_get_extent(inode, NULL, 0, start, len, 0); - if (IS_ERR(em)) - return PTR_ERR(em); + if (IS_ERR(em)) { + ret = PTR_ERR(em); + goto unlock_err; + } /* * Ok for INLINE and COMPRESSED extents we need to fallback on buffered @@ -5803,17 +5899,16 @@ static int btrfs_get_blocks_direct(struct inode *inode, sector_t iblock, if (test_bit(EXTENT_FLAG_COMPRESSED, &em->flags) || em->block_start == EXTENT_MAP_INLINE) { free_extent_map(em); - return -ENOTBLK; + ret = -ENOTBLK; + goto unlock_err; } /* Just a good old fashioned hole, return */ if (!create && (em->block_start == EXTENT_MAP_HOLE || test_bit(EXTENT_FLAG_PREALLOC, &em->flags))) { free_extent_map(em); - /* DIO will do one hole at a time, so just unlock a sector */ - unlock_extent(&BTRFS_I(inode)->io_tree, start, - start + root->sectorsize - 1); - return 0; + ret = 0; + goto unlock_err; } /* @@ -5826,8 +5921,9 @@ static int btrfs_get_blocks_direct(struct inode *inode, sector_t iblock, * */ if (!create) { - len = em->len - (start - em->start); - goto map; + len = min(len, em->len - (start - em->start)); + lockstart = start + len; + goto unlock; } if (test_bit(EXTENT_FLAG_PREALLOC, &em->flags) || @@ -5859,7 +5955,7 @@ static int btrfs_get_blocks_direct(struct inode *inode, sector_t iblock, btrfs_end_transaction(trans, root); if (ret) { free_extent_map(em); - return ret; + goto unlock_err; } goto unlock; } @@ -5872,14 +5968,12 @@ static int btrfs_get_blocks_direct(struct inode *inode, sector_t iblock, */ len = bh_result->b_size; em = btrfs_new_extent_direct(inode, em, start, len); - if (IS_ERR(em)) - return PTR_ERR(em); + if (IS_ERR(em)) { + ret = PTR_ERR(em); + goto unlock_err; + } len = min(len, em->len - (start - em->start)); unlock: - clear_extent_bit(&BTRFS_I(inode)->io_tree, start, start + len - 1, - EXTENT_LOCKED | EXTENT_DELALLOC | EXTENT_DIRTY, 1, - 0, NULL, GFP_NOFS); -map: bh_result->b_blocknr = (em->block_start + (start - em->start)) >> inode->i_blkbits; bh_result->b_size = len; @@ -5897,9 +5991,44 @@ static int btrfs_get_blocks_direct(struct inode *inode, sector_t iblock, i_size_write(inode, start + len); } + /* + * In the case of write we need to clear and unlock the entire range, + * in the case of read we need to unlock only the end area that we + * aren't using if there is any left over space. + */ + if (lockstart < lockend) { + if (create && len < lockend - lockstart) { + clear_extent_bit(&BTRFS_I(inode)->io_tree, lockstart, + lockstart + len - 1, unlock_bits, 1, 0, + &cached_state, GFP_NOFS); + /* + * Beside unlock, we also need to cleanup reserved space + * for the left range by attaching EXTENT_DO_ACCOUNTING. + */ + clear_extent_bit(&BTRFS_I(inode)->io_tree, + lockstart + len, lockend, + unlock_bits | EXTENT_DO_ACCOUNTING, + 1, 0, NULL, GFP_NOFS); + } else { + clear_extent_bit(&BTRFS_I(inode)->io_tree, lockstart, + lockend, unlock_bits, 1, 0, + &cached_state, GFP_NOFS); + } + } else { + free_extent_state(cached_state); + } + free_extent_map(em); return 0; + +unlock_err: + if (create) + unlock_bits |= EXTENT_DO_ACCOUNTING; + + clear_extent_bit(&BTRFS_I(inode)->io_tree, lockstart, lockend, + unlock_bits, 1, 0, &cached_state, GFP_NOFS); + return ret; } struct btrfs_dio_private { @@ -5907,7 +6036,6 @@ struct btrfs_dio_private { u64 logical_offset; u64 disk_bytenr; u64 bytes; - u32 *csums; void *private; /* number of bios pending for this dio */ @@ -5927,7 +6055,6 @@ static void btrfs_endio_direct_read(struct bio *bio, int err) struct inode *inode = dip->inode; struct btrfs_root *root = BTRFS_I(inode)->root; u64 start; - u32 *private = dip->csums; start = dip->logical_offset; do { @@ -5935,8 +6062,12 @@ static void btrfs_endio_direct_read(struct bio *bio, int err) struct page *page = bvec->bv_page; char *kaddr; u32 csum = ~(u32)0; + u64 private = ~(u32)0; unsigned long flags; + if (get_state_private(&BTRFS_I(inode)->io_tree, + start, &private)) + goto failed; local_irq_save(flags); kaddr = kmap_atomic(page); csum = btrfs_csum_data(root, kaddr + bvec->bv_offset, @@ -5946,18 +6077,18 @@ static void btrfs_endio_direct_read(struct bio *bio, int err) local_irq_restore(flags); flush_dcache_page(bvec->bv_page); - if (csum != *private) { + if (csum != private) { +failed: printk(KERN_ERR "btrfs csum failed ino %llu off" " %llu csum %u private %u\n", (unsigned long long)btrfs_ino(inode), (unsigned long long)start, - csum, *private); + csum, (unsigned)private); err = -EIO; } } start += bvec->bv_len; - private++; bvec++; } while (bvec <= bvec_end); @@ -5965,7 +6096,6 @@ static void btrfs_endio_direct_read(struct bio *bio, int err) dip->logical_offset + dip->bytes - 1); bio->bi_private = dip->private; - kfree(dip->csums); kfree(dip); /* If we had a csum failure make sure to clear the uptodate flag */ @@ -6071,7 +6201,7 @@ static struct bio *btrfs_dio_bio_alloc(struct block_device *bdev, static inline int __btrfs_submit_dio_bio(struct bio *bio, struct inode *inode, int rw, u64 file_offset, int skip_sum, - u32 *csums, int async_submit) + int async_submit) { int write = rw & REQ_WRITE; struct btrfs_root *root = BTRFS_I(inode)->root; @@ -6104,8 +6234,7 @@ static inline int __btrfs_submit_dio_bio(struct bio *bio, struct inode *inode, if (ret) goto err; } else if (!skip_sum) { - ret = btrfs_lookup_bio_sums_dio(root, inode, bio, - file_offset, csums); + ret = btrfs_lookup_bio_sums_dio(root, inode, bio, file_offset); if (ret) goto err; } @@ -6131,10 +6260,8 @@ static int btrfs_submit_direct_hook(int rw, struct btrfs_dio_private *dip, u64 submit_len = 0; u64 map_length; int nr_pages = 0; - u32 *csums = dip->csums; int ret = 0; int async_submit = 0; - int write = rw & REQ_WRITE; map_length = orig_bio->bi_size; ret = btrfs_map_block(map_tree, READ, start_sector << 9, @@ -6170,16 +6297,13 @@ static int btrfs_submit_direct_hook(int rw, struct btrfs_dio_private *dip, atomic_inc(&dip->pending_bios); ret = __btrfs_submit_dio_bio(bio, inode, rw, file_offset, skip_sum, - csums, async_submit); + async_submit); if (ret) { bio_put(bio); atomic_dec(&dip->pending_bios); goto out_err; } - /* Write's use the ordered csums */ - if (!write && !skip_sum) - csums = csums + nr_pages; start_sector += submit_len >> 9; file_offset += submit_len; @@ -6209,7 +6333,7 @@ static int btrfs_submit_direct_hook(int rw, struct btrfs_dio_private *dip, submit: ret = __btrfs_submit_dio_bio(bio, inode, rw, file_offset, skip_sum, - csums, async_submit); + async_submit); if (!ret) return 0; @@ -6245,17 +6369,6 @@ static void btrfs_submit_direct(int rw, struct bio *bio, struct inode *inode, ret = -ENOMEM; goto free_ordered; } - dip->csums = NULL; - - /* Write's use the ordered csum stuff, so we don't need dip->csums */ - if (!write && !skip_sum) { - dip->csums = kmalloc(sizeof(u32) * bio->bi_vcnt, GFP_NOFS); - if (!dip->csums) { - kfree(dip); - ret = -ENOMEM; - goto free_ordered; - } - } dip->private = bio->bi_private; dip->inode = inode; @@ -6340,132 +6453,22 @@ static ssize_t check_direct_IO(struct btrfs_root *root, int rw, struct kiocb *io out: return retval; } + static ssize_t btrfs_direct_IO(int rw, struct kiocb *iocb, const struct iovec *iov, loff_t offset, unsigned long nr_segs) { struct file *file = iocb->ki_filp; struct inode *inode = file->f_mapping->host; - struct btrfs_ordered_extent *ordered; - struct extent_state *cached_state = NULL; - u64 lockstart, lockend; - ssize_t ret; - int writing = rw & WRITE; - int write_bits = 0; - size_t count = iov_length(iov, nr_segs); if (check_direct_IO(BTRFS_I(inode)->root, rw, iocb, iov, - offset, nr_segs)) { + offset, nr_segs)) return 0; - } - - lockstart = offset; - lockend = offset + count - 1; - - if (writing) { - ret = btrfs_delalloc_reserve_space(inode, count); - if (ret) - goto out; - } - - while (1) { - lock_extent_bits(&BTRFS_I(inode)->io_tree, lockstart, lockend, - 0, &cached_state); - /* - * We're concerned with the entire range that we're going to be - * doing DIO to, so we need to make sure theres no ordered - * extents in this range. - */ - ordered = btrfs_lookup_ordered_range(inode, lockstart, - lockend - lockstart + 1); - - /* - * We need to make sure there are no buffered pages in this - * range either, we could have raced between the invalidate in - * generic_file_direct_write and locking the extent. The - * invalidate needs to happen so that reads after a write do not - * get stale data. - */ - if (!ordered && (!writing || - !test_range_bit(&BTRFS_I(inode)->io_tree, - lockstart, lockend, EXTENT_UPTODATE, 0, - cached_state))) - break; - - unlock_extent_cached(&BTRFS_I(inode)->io_tree, lockstart, lockend, - &cached_state, GFP_NOFS); - - if (ordered) { - btrfs_start_ordered_extent(inode, ordered, 1); - btrfs_put_ordered_extent(ordered); - } else { - /* Screw you mmap */ - ret = filemap_write_and_wait_range(file->f_mapping, - lockstart, - lockend); - if (ret) - goto out; - - /* - * If we found a page that couldn't be invalidated just - * fall back to buffered. - */ - ret = invalidate_inode_pages2_range(file->f_mapping, - lockstart >> PAGE_CACHE_SHIFT, - lockend >> PAGE_CACHE_SHIFT); - if (ret) { - if (ret == -EBUSY) - ret = 0; - goto out; - } - } - - cond_resched(); - } - /* - * we don't use btrfs_set_extent_delalloc because we don't want - * the dirty or uptodate bits - */ - if (writing) { - write_bits = EXTENT_DELALLOC | EXTENT_DO_ACCOUNTING; - ret = set_extent_bit(&BTRFS_I(inode)->io_tree, lockstart, lockend, - EXTENT_DELALLOC, NULL, &cached_state, - GFP_NOFS); - if (ret) { - clear_extent_bit(&BTRFS_I(inode)->io_tree, lockstart, - lockend, EXTENT_LOCKED | write_bits, - 1, 0, &cached_state, GFP_NOFS); - goto out; - } - } - - free_extent_state(cached_state); - cached_state = NULL; - - ret = __blockdev_direct_IO(rw, iocb, inode, + return __blockdev_direct_IO(rw, iocb, inode, BTRFS_I(inode)->root->fs_info->fs_devices->latest_bdev, iov, offset, nr_segs, btrfs_get_blocks_direct, NULL, btrfs_submit_direct, 0); - - if (ret < 0 && ret != -EIOCBQUEUED) { - clear_extent_bit(&BTRFS_I(inode)->io_tree, offset, - offset + iov_length(iov, nr_segs) - 1, - EXTENT_LOCKED | write_bits, 1, 0, - &cached_state, GFP_NOFS); - } else if (ret >= 0 && ret < iov_length(iov, nr_segs)) { - /* - * We're falling back to buffered, unlock the section we didn't - * do IO on. - */ - clear_extent_bit(&BTRFS_I(inode)->io_tree, offset + ret, - offset + iov_length(iov, nr_segs) - 1, - EXTENT_LOCKED | write_bits, 1, 0, - &cached_state, GFP_NOFS); - } -out: - free_extent_state(cached_state); - return ret; } static int btrfs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo, diff --git a/trunk/fs/btrfs/ioctl.c b/trunk/fs/btrfs/ioctl.c index bc2f6ffff3cf..9df50fa8a078 100644 --- a/trunk/fs/btrfs/ioctl.c +++ b/trunk/fs/btrfs/ioctl.c @@ -424,7 +424,7 @@ static noinline int create_subvol(struct btrfs_root *root, uuid_le_gen(&new_uuid); memcpy(root_item.uuid, new_uuid.b, BTRFS_UUID_SIZE); root_item.otime.sec = cpu_to_le64(cur_time.tv_sec); - root_item.otime.nsec = cpu_to_le64(cur_time.tv_nsec); + root_item.otime.nsec = cpu_to_le32(cur_time.tv_nsec); root_item.ctime = root_item.otime; btrfs_set_root_ctransid(&root_item, trans->transid); btrfs_set_root_otransid(&root_item, trans->transid); @@ -664,10 +664,6 @@ static noinline int btrfs_mksubvol(struct path *parent, struct dentry *dentry; int error; - error = mnt_want_write(parent->mnt); - if (error) - return error; - mutex_lock_nested(&dir->i_mutex, I_MUTEX_PARENT); dentry = lookup_one_len(name, parent->dentry, namelen); @@ -703,7 +699,6 @@ static noinline int btrfs_mksubvol(struct path *parent, dput(dentry); out_unlock: mutex_unlock(&dir->i_mutex); - mnt_drop_write(parent->mnt); return error; } diff --git a/trunk/fs/btrfs/locking.c b/trunk/fs/btrfs/locking.c index a44eff074805..2a1762c66041 100644 --- a/trunk/fs/btrfs/locking.c +++ b/trunk/fs/btrfs/locking.c @@ -67,7 +67,7 @@ void btrfs_clear_lock_blocking_rw(struct extent_buffer *eb, int rw) { if (eb->lock_nested) { read_lock(&eb->lock); - if (&eb->lock_nested && current->pid == eb->lock_owner) { + if (eb->lock_nested && current->pid == eb->lock_owner) { read_unlock(&eb->lock); return; } diff --git a/trunk/fs/btrfs/ordered-data.c b/trunk/fs/btrfs/ordered-data.c index 643335a4fe3c..051c7fe551dd 100644 --- a/trunk/fs/btrfs/ordered-data.c +++ b/trunk/fs/btrfs/ordered-data.c @@ -596,7 +596,7 @@ void btrfs_start_ordered_extent(struct inode *inode, /* * pages in the range can be dirty, clean or writeback. We * start IO on any dirty ones so the wait doesn't stall waiting - * for pdflush to find them + * for the flusher thread to find them */ if (!test_bit(BTRFS_ORDERED_DIRECT, &entry->flags)) filemap_fdatawrite_range(inode->i_mapping, start, end); diff --git a/trunk/fs/btrfs/qgroup.c b/trunk/fs/btrfs/qgroup.c index bc424ae5a81a..b65015581744 100644 --- a/trunk/fs/btrfs/qgroup.c +++ b/trunk/fs/btrfs/qgroup.c @@ -1364,8 +1364,10 @@ int btrfs_qgroup_inherit(struct btrfs_trans_handle *trans, spin_lock(&fs_info->qgroup_lock); dstgroup = add_qgroup_rb(fs_info, objectid); - if (!dstgroup) + if (IS_ERR(dstgroup)) { + ret = PTR_ERR(dstgroup); goto unlock; + } if (srcid) { srcgroup = find_qgroup_rb(fs_info, srcid); diff --git a/trunk/fs/btrfs/root-tree.c b/trunk/fs/btrfs/root-tree.c index 6bb465cca20f..10d8e4d88071 100644 --- a/trunk/fs/btrfs/root-tree.c +++ b/trunk/fs/btrfs/root-tree.c @@ -544,8 +544,8 @@ void btrfs_update_root_times(struct btrfs_trans_handle *trans, struct timespec ct = CURRENT_TIME; spin_lock(&root->root_times_lock); - item->ctransid = trans->transid; + item->ctransid = cpu_to_le64(trans->transid); item->ctime.sec = cpu_to_le64(ct.tv_sec); - item->ctime.nsec = cpu_to_le64(ct.tv_nsec); + item->ctime.nsec = cpu_to_le32(ct.tv_nsec); spin_unlock(&root->root_times_lock); } diff --git a/trunk/fs/btrfs/super.c b/trunk/fs/btrfs/super.c index 8c6e61d6eed5..83d6f9f9c220 100644 --- a/trunk/fs/btrfs/super.c +++ b/trunk/fs/btrfs/super.c @@ -100,10 +100,6 @@ static void __save_error_info(struct btrfs_fs_info *fs_info) fs_info->fs_state = BTRFS_SUPER_FLAG_ERROR; } -/* NOTE: - * We move write_super stuff at umount in order to avoid deadlock - * for umount hold all lock. - */ static void save_error_info(struct btrfs_fs_info *fs_info) { __save_error_info(fs_info); @@ -842,7 +838,6 @@ int btrfs_sync_fs(struct super_block *sb, int wait) struct btrfs_trans_handle *trans; struct btrfs_fs_info *fs_info = btrfs_sb(sb); struct btrfs_root *root = fs_info->tree_root; - int ret; trace_btrfs_sync_fs(wait); @@ -853,11 +848,17 @@ int btrfs_sync_fs(struct super_block *sb, int wait) btrfs_wait_ordered_extents(root, 0, 0); - trans = btrfs_start_transaction(root, 0); + spin_lock(&fs_info->trans_lock); + if (!fs_info->running_transaction) { + spin_unlock(&fs_info->trans_lock); + return 0; + } + spin_unlock(&fs_info->trans_lock); + + trans = btrfs_join_transaction(root); if (IS_ERR(trans)) return PTR_ERR(trans); - ret = btrfs_commit_transaction(trans, root); - return ret; + return btrfs_commit_transaction(trans, root); } static int btrfs_show_options(struct seq_file *seq, struct dentry *dentry) @@ -1534,6 +1535,8 @@ static int btrfs_show_devname(struct seq_file *m, struct dentry *root) while (cur_devices) { head = &cur_devices->devices; list_for_each_entry(dev, head, dev_list) { + if (dev->missing) + continue; if (!first_dev || dev->devid < first_dev->devid) first_dev = dev; } diff --git a/trunk/fs/btrfs/transaction.c b/trunk/fs/btrfs/transaction.c index 17be3dedacba..27c26004e050 100644 --- a/trunk/fs/btrfs/transaction.c +++ b/trunk/fs/btrfs/transaction.c @@ -1031,6 +1031,7 @@ static noinline int create_pending_snapshot(struct btrfs_trans_handle *trans, btrfs_i_size_write(parent_inode, parent_inode->i_size + dentry->d_name.len * 2); + parent_inode->i_mtime = parent_inode->i_ctime = CURRENT_TIME; ret = btrfs_update_inode(trans, parent_root, parent_inode); if (ret) goto abort_trans_dput; @@ -1066,7 +1067,7 @@ static noinline int create_pending_snapshot(struct btrfs_trans_handle *trans, memcpy(new_root_item->parent_uuid, root->root_item.uuid, BTRFS_UUID_SIZE); new_root_item->otime.sec = cpu_to_le64(cur_time.tv_sec); - new_root_item->otime.nsec = cpu_to_le64(cur_time.tv_nsec); + new_root_item->otime.nsec = cpu_to_le32(cur_time.tv_nsec); btrfs_set_root_otransid(new_root_item, trans->transid); memset(&new_root_item->stime, 0, sizeof(new_root_item->stime)); memset(&new_root_item->rtime, 0, sizeof(new_root_item->rtime)); diff --git a/trunk/fs/btrfs/volumes.c b/trunk/fs/btrfs/volumes.c index b8708f994e67..88b969aeeb71 100644 --- a/trunk/fs/btrfs/volumes.c +++ b/trunk/fs/btrfs/volumes.c @@ -227,9 +227,8 @@ static noinline void run_scheduled_bios(struct btrfs_device *device) cur = pending; pending = pending->bi_next; cur->bi_next = NULL; - atomic_dec(&fs_info->nr_async_bios); - if (atomic_read(&fs_info->nr_async_bios) < limit && + if (atomic_dec_return(&fs_info->nr_async_bios) < limit && waitqueue_active(&fs_info->async_submit_wait)) wake_up(&fs_info->async_submit_wait); @@ -569,9 +568,11 @@ static int __btrfs_close_devices(struct btrfs_fs_devices *fs_devices) memcpy(new_device, device, sizeof(*new_device)); /* Safe because we are under uuid_mutex */ - name = rcu_string_strdup(device->name->str, GFP_NOFS); - BUG_ON(device->name && !name); /* -ENOMEM */ - rcu_assign_pointer(new_device->name, name); + if (device->name) { + name = rcu_string_strdup(device->name->str, GFP_NOFS); + BUG_ON(device->name && !name); /* -ENOMEM */ + rcu_assign_pointer(new_device->name, name); + } new_device->bdev = NULL; new_device->writeable = 0; new_device->in_fs_metadata = 0; @@ -1744,10 +1745,6 @@ int btrfs_init_new_device(struct btrfs_root *root, char *device_path) device->fs_devices = root->fs_info->fs_devices; - /* - * we don't want write_supers to jump in here with our device - * half setup - */ mutex_lock(&root->fs_info->fs_devices->device_list_mutex); list_add_rcu(&device->dev_list, &root->fs_info->fs_devices->devices); list_add(&device->dev_alloc_list, @@ -4609,28 +4606,6 @@ int btrfs_read_sys_array(struct btrfs_root *root) return ret; } -struct btrfs_device *btrfs_find_device_for_logical(struct btrfs_root *root, - u64 logical, int mirror_num) -{ - struct btrfs_mapping_tree *map_tree = &root->fs_info->mapping_tree; - int ret; - u64 map_length = 0; - struct btrfs_bio *bbio = NULL; - struct btrfs_device *device; - - BUG_ON(mirror_num == 0); - ret = btrfs_map_block(map_tree, WRITE, logical, &map_length, &bbio, - mirror_num); - if (ret) { - BUG_ON(bbio != NULL); - return NULL; - } - BUG_ON(mirror_num != bbio->mirror_num); - device = bbio->stripes[mirror_num - 1].dev; - kfree(bbio); - return device; -} - int btrfs_read_chunk_tree(struct btrfs_root *root) { struct btrfs_path *path; diff --git a/trunk/fs/btrfs/volumes.h b/trunk/fs/btrfs/volumes.h index 5479325987b3..53c06af92e8d 100644 --- a/trunk/fs/btrfs/volumes.h +++ b/trunk/fs/btrfs/volumes.h @@ -289,8 +289,6 @@ int btrfs_cancel_balance(struct btrfs_fs_info *fs_info); int btrfs_chunk_readonly(struct btrfs_root *root, u64 chunk_offset); int find_free_dev_extent(struct btrfs_device *device, u64 num_bytes, u64 *start, u64 *max_avail); -struct btrfs_device *btrfs_find_device_for_logical(struct btrfs_root *root, - u64 logical, int mirror_num); void btrfs_dev_stat_print_on_error(struct btrfs_device *device); void btrfs_dev_stat_inc_and_print(struct btrfs_device *dev, int index); int btrfs_get_dev_stats(struct btrfs_root *root, diff --git a/trunk/fs/buffer.c b/trunk/fs/buffer.c index 9f6d2e41281d..58e2e7b77372 100644 --- a/trunk/fs/buffer.c +++ b/trunk/fs/buffer.c @@ -914,7 +914,7 @@ link_dev_buffers(struct page *page, struct buffer_head *head) /* * Initialise the state of a blockdev page's buffers. */ -static void +static sector_t init_page_buffers(struct page *page, struct block_device *bdev, sector_t block, int size) { @@ -936,33 +936,41 @@ init_page_buffers(struct page *page, struct block_device *bdev, block++; bh = bh->b_this_page; } while (bh != head); + + /* + * Caller needs to validate requested block against end of device. + */ + return end_block; } /* * Create the page-cache page that contains the requested block. * - * This is user purely for blockdev mappings. + * This is used purely for blockdev mappings. */ -static struct page * +static int grow_dev_page(struct block_device *bdev, sector_t block, - pgoff_t index, int size) + pgoff_t index, int size, int sizebits) { struct inode *inode = bdev->bd_inode; struct page *page; struct buffer_head *bh; + sector_t end_block; + int ret = 0; /* Will call free_more_memory() */ page = find_or_create_page(inode->i_mapping, index, (mapping_gfp_mask(inode->i_mapping) & ~__GFP_FS)|__GFP_MOVABLE); if (!page) - return NULL; + return ret; BUG_ON(!PageLocked(page)); if (page_has_buffers(page)) { bh = page_buffers(page); if (bh->b_size == size) { - init_page_buffers(page, bdev, block, size); - return page; + end_block = init_page_buffers(page, bdev, + index << sizebits, size); + goto done; } if (!try_to_free_buffers(page)) goto failed; @@ -982,14 +990,14 @@ grow_dev_page(struct block_device *bdev, sector_t block, */ spin_lock(&inode->i_mapping->private_lock); link_dev_buffers(page, bh); - init_page_buffers(page, bdev, block, size); + end_block = init_page_buffers(page, bdev, index << sizebits, size); spin_unlock(&inode->i_mapping->private_lock); - return page; - +done: + ret = (block < end_block) ? 1 : -ENXIO; failed: unlock_page(page); page_cache_release(page); - return NULL; + return ret; } /* @@ -999,7 +1007,6 @@ grow_dev_page(struct block_device *bdev, sector_t block, static int grow_buffers(struct block_device *bdev, sector_t block, int size) { - struct page *page; pgoff_t index; int sizebits; @@ -1023,22 +1030,14 @@ grow_buffers(struct block_device *bdev, sector_t block, int size) bdevname(bdev, b)); return -EIO; } - block = index << sizebits; + /* Create a page with the proper size buffers.. */ - page = grow_dev_page(bdev, block, index, size); - if (!page) - return 0; - unlock_page(page); - page_cache_release(page); - return 1; + return grow_dev_page(bdev, block, index, size, sizebits); } static struct buffer_head * __getblk_slow(struct block_device *bdev, sector_t block, int size) { - int ret; - struct buffer_head *bh; - /* Size must be multiple of hard sectorsize */ if (unlikely(size & (bdev_logical_block_size(bdev)-1) || (size < 512 || size > PAGE_SIZE))) { @@ -1051,21 +1050,20 @@ __getblk_slow(struct block_device *bdev, sector_t block, int size) return NULL; } -retry: - bh = __find_get_block(bdev, block, size); - if (bh) - return bh; + for (;;) { + struct buffer_head *bh; + int ret; - ret = grow_buffers(bdev, block, size); - if (ret == 0) { - free_more_memory(); - goto retry; - } else if (ret > 0) { bh = __find_get_block(bdev, block, size); if (bh) return bh; + + ret = grow_buffers(bdev, block, size); + if (ret < 0) + return NULL; + if (ret == 0) + free_more_memory(); } - return NULL; } /* @@ -1321,10 +1319,6 @@ EXPORT_SYMBOL(__find_get_block); * which corresponds to the passed block_device, block and size. The * returned buffer has its reference count incremented. * - * __getblk() cannot fail - it just keeps trying. If you pass it an - * illegal block number, __getblk() will happily return a buffer_head - * which represents the non-existent block. Very weird. - * * __getblk() will lock up the machine if grow_dev_page's try_to_free_buffers() * attempt is failing. FIXME, perhaps? */ diff --git a/trunk/fs/ceph/debugfs.c b/trunk/fs/ceph/debugfs.c index fb962efdacee..6d59006bfa27 100644 --- a/trunk/fs/ceph/debugfs.c +++ b/trunk/fs/ceph/debugfs.c @@ -201,6 +201,7 @@ int ceph_fs_debugfs_init(struct ceph_fs_client *fsc) int err = -ENOMEM; dout("ceph_fs_debugfs_init\n"); + BUG_ON(!fsc->client->debugfs_dir); fsc->debugfs_congestion_kb = debugfs_create_file("writeback_congestion_kb", 0600, diff --git a/trunk/fs/ceph/inode.c b/trunk/fs/ceph/inode.c index 9fff9f3b17e4..4b5762ef7c2b 100644 --- a/trunk/fs/ceph/inode.c +++ b/trunk/fs/ceph/inode.c @@ -992,11 +992,15 @@ int ceph_fill_trace(struct super_block *sb, struct ceph_mds_request *req, if (rinfo->head->is_dentry) { struct inode *dir = req->r_locked_dir; - err = fill_inode(dir, &rinfo->diri, rinfo->dirfrag, - session, req->r_request_started, -1, - &req->r_caps_reservation); - if (err < 0) - return err; + if (dir) { + err = fill_inode(dir, &rinfo->diri, rinfo->dirfrag, + session, req->r_request_started, -1, + &req->r_caps_reservation); + if (err < 0) + return err; + } else { + WARN_ON_ONCE(1); + } } /* @@ -1004,6 +1008,7 @@ int ceph_fill_trace(struct super_block *sb, struct ceph_mds_request *req, * will have trouble splicing in the virtual snapdir later */ if (rinfo->head->is_dentry && !req->r_aborted && + req->r_locked_dir && (rinfo->head->is_target || strncmp(req->r_dentry->d_name.name, fsc->mount_options->snapdir_name, req->r_dentry->d_name.len))) { diff --git a/trunk/fs/ceph/ioctl.c b/trunk/fs/ceph/ioctl.c index 8e3fb69fbe62..1396ceb46797 100644 --- a/trunk/fs/ceph/ioctl.c +++ b/trunk/fs/ceph/ioctl.c @@ -42,7 +42,8 @@ static long __validate_layout(struct ceph_mds_client *mdsc, /* validate striping parameters */ if ((l->object_size & ~PAGE_MASK) || (l->stripe_unit & ~PAGE_MASK) || - ((unsigned)l->object_size % (unsigned)l->stripe_unit)) + (l->stripe_unit != 0 && + ((unsigned)l->object_size % (unsigned)l->stripe_unit))) return -EINVAL; /* make sure it's a valid data pool */ diff --git a/trunk/fs/cifs/cifs_unicode.c b/trunk/fs/cifs/cifs_unicode.c index 7dab9c04ad52..53cf2aabce87 100644 --- a/trunk/fs/cifs/cifs_unicode.c +++ b/trunk/fs/cifs/cifs_unicode.c @@ -328,7 +328,7 @@ cifsConvertToUTF16(__le16 *target, const char *source, int srclen, } ctoUTF16_out: - return i; + return j; } #ifdef CONFIG_CIFS_SMB2 diff --git a/trunk/fs/cifs/cifssmb.c b/trunk/fs/cifs/cifssmb.c index 074923ce593d..f0cf934ba877 100644 --- a/trunk/fs/cifs/cifssmb.c +++ b/trunk/fs/cifs/cifssmb.c @@ -1576,9 +1576,14 @@ cifs_readv_callback(struct mid_q_entry *mid) /* result already set, check signature */ if (server->sec_mode & (SECMODE_SIGN_REQUIRED | SECMODE_SIGN_ENABLED)) { - if (cifs_verify_signature(rdata->iov, rdata->nr_iov, - server, mid->sequence_number + 1)) - cERROR(1, "Unexpected SMB signature"); + int rc = 0; + + rc = cifs_verify_signature(rdata->iov, rdata->nr_iov, + server, + mid->sequence_number + 1); + if (rc) + cERROR(1, "SMB signature verification returned " + "error = %d", rc); } /* FIXME: should this be counted toward the initiating task? */ task_io_account_read(rdata->bytes); diff --git a/trunk/fs/cifs/dir.c b/trunk/fs/cifs/dir.c index cbe709ad6663..781025be48bc 100644 --- a/trunk/fs/cifs/dir.c +++ b/trunk/fs/cifs/dir.c @@ -356,19 +356,12 @@ cifs_do_create(struct inode *inode, struct dentry *direntry, unsigned int xid, cifs_create_set_dentry: if (rc != 0) { cFYI(1, "Create worked, get_inode_info failed rc = %d", rc); + CIFSSMBClose(xid, tcon, *fileHandle); goto out; } d_drop(direntry); d_add(direntry, newinode); - /* ENOENT for create? How weird... */ - rc = -ENOENT; - if (!newinode) { - CIFSSMBClose(xid, tcon, *fileHandle); - goto out; - } - rc = 0; - out: kfree(buf); kfree(full_path); diff --git a/trunk/fs/cifs/file.c b/trunk/fs/cifs/file.c index 9154192b0683..71e9ad9f5961 100644 --- a/trunk/fs/cifs/file.c +++ b/trunk/fs/cifs/file.c @@ -917,7 +917,7 @@ cifs_push_mandatory_locks(struct cifsFileInfo *cfile) if (!buf) { mutex_unlock(&cinode->lock_mutex); free_xid(xid); - return rc; + return -ENOMEM; } for (i = 0; i < 2; i++) { diff --git a/trunk/fs/cifs/inode.c b/trunk/fs/cifs/inode.c index 7354877fa3bd..cb79c7edecb0 100644 --- a/trunk/fs/cifs/inode.c +++ b/trunk/fs/cifs/inode.c @@ -124,10 +124,10 @@ cifs_fattr_to_inode(struct inode *inode, struct cifs_fattr *fattr) { struct cifsInodeInfo *cifs_i = CIFS_I(inode); struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb); - unsigned long oldtime = cifs_i->time; cifs_revalidate_cache(inode, fattr); + spin_lock(&inode->i_lock); inode->i_atime = fattr->cf_atime; inode->i_mtime = fattr->cf_mtime; inode->i_ctime = fattr->cf_ctime; @@ -148,9 +148,6 @@ cifs_fattr_to_inode(struct inode *inode, struct cifs_fattr *fattr) else cifs_i->time = jiffies; - cFYI(1, "inode 0x%p old_time=%ld new_time=%ld", inode, - oldtime, cifs_i->time); - cifs_i->delete_pending = fattr->cf_flags & CIFS_FATTR_DELETE_PENDING; cifs_i->server_eof = fattr->cf_eof; @@ -158,7 +155,6 @@ cifs_fattr_to_inode(struct inode *inode, struct cifs_fattr *fattr) * Can't safely change the file size here if the client is writing to * it due to potential races. */ - spin_lock(&inode->i_lock); if (is_size_safe_to_change(cifs_i, fattr->cf_eof)) { i_size_write(inode, fattr->cf_eof); @@ -859,12 +855,14 @@ struct inode *cifs_root_iget(struct super_block *sb) if (rc && tcon->ipc) { cFYI(1, "ipc connection - fake read inode"); + spin_lock(&inode->i_lock); inode->i_mode |= S_IFDIR; set_nlink(inode, 2); inode->i_op = &cifs_ipc_inode_ops; inode->i_fop = &simple_dir_operations; inode->i_uid = cifs_sb->mnt_uid; inode->i_gid = cifs_sb->mnt_gid; + spin_unlock(&inode->i_lock); } else if (rc) { iget_failed(inode); inode = ERR_PTR(rc); @@ -1110,6 +1108,15 @@ cifs_rename_pending_delete(char *full_path, struct dentry *dentry, goto out_close; } +/* copied from fs/nfs/dir.c with small changes */ +static void +cifs_drop_nlink(struct inode *inode) +{ + spin_lock(&inode->i_lock); + if (inode->i_nlink > 0) + drop_nlink(inode); + spin_unlock(&inode->i_lock); +} /* * If dentry->d_inode is null (usually meaning the cached dentry @@ -1166,13 +1173,13 @@ int cifs_unlink(struct inode *dir, struct dentry *dentry) psx_del_no_retry: if (!rc) { if (inode) - drop_nlink(inode); + cifs_drop_nlink(inode); } else if (rc == -ENOENT) { d_drop(dentry); } else if (rc == -ETXTBSY) { rc = cifs_rename_pending_delete(full_path, dentry, xid); if (rc == 0) - drop_nlink(inode); + cifs_drop_nlink(inode); } else if ((rc == -EACCES) && (dosattr == 0) && inode) { attrs = kzalloc(sizeof(*attrs), GFP_KERNEL); if (attrs == NULL) { @@ -1241,9 +1248,10 @@ cifs_mkdir_qinfo(struct inode *inode, struct dentry *dentry, umode_t mode, * setting nlink not necessary except in cases where we failed to get it * from the server or was set bogus */ + spin_lock(&dentry->d_inode->i_lock); if ((dentry->d_inode) && (dentry->d_inode->i_nlink < 2)) set_nlink(dentry->d_inode, 2); - + spin_unlock(&dentry->d_inode->i_lock); mode &= ~current_umask(); /* must turn on setgid bit if parent dir has it */ if (inode->i_mode & S_ISGID) diff --git a/trunk/fs/cifs/link.c b/trunk/fs/cifs/link.c index 09e4b3ae4564..e6ce3b112875 100644 --- a/trunk/fs/cifs/link.c +++ b/trunk/fs/cifs/link.c @@ -433,7 +433,9 @@ cifs_hardlink(struct dentry *old_file, struct inode *inode, if (old_file->d_inode) { cifsInode = CIFS_I(old_file->d_inode); if (rc == 0) { + spin_lock(&old_file->d_inode->i_lock); inc_nlink(old_file->d_inode); + spin_unlock(&old_file->d_inode->i_lock); /* BB should we make this contingent on superblock flag NOATIME? */ /* old_file->d_inode->i_ctime = CURRENT_TIME;*/ /* parent dir timestamps will update from srv diff --git a/trunk/fs/cifs/smb2misc.c b/trunk/fs/cifs/smb2misc.c index a4ff5d547554..e4d3b9964167 100644 --- a/trunk/fs/cifs/smb2misc.c +++ b/trunk/fs/cifs/smb2misc.c @@ -52,7 +52,8 @@ check_smb2_hdr(struct smb2_hdr *hdr, __u64 mid) cERROR(1, "Bad protocol string signature header %x", *(unsigned int *) hdr->ProtocolId); if (mid != hdr->MessageId) - cERROR(1, "Mids do not match"); + cERROR(1, "Mids do not match: %llu and %llu", mid, + hdr->MessageId); } cERROR(1, "Bad SMB detected. The Mid=%llu", hdr->MessageId); return 1; @@ -107,7 +108,7 @@ smb2_check_message(char *buf, unsigned int length) * ie Validate the wct via smb2_struct_sizes table above */ - if (length < 2 + sizeof(struct smb2_hdr)) { + if (length < sizeof(struct smb2_pdu)) { if ((length >= sizeof(struct smb2_hdr)) && (hdr->Status != 0)) { pdu->StructureSize2 = 0; /* @@ -121,15 +122,15 @@ smb2_check_message(char *buf, unsigned int length) return 1; } if (len > CIFSMaxBufSize + MAX_SMB2_HDR_SIZE - 4) { - cERROR(1, "SMB length greater than maximum, mid=%lld", mid); + cERROR(1, "SMB length greater than maximum, mid=%llu", mid); return 1; } if (check_smb2_hdr(hdr, mid)) return 1; - if (hdr->StructureSize != SMB2_HEADER_SIZE) { - cERROR(1, "Illegal structure size %d", + if (hdr->StructureSize != SMB2_HEADER_STRUCTURE_SIZE) { + cERROR(1, "Illegal structure size %u", le16_to_cpu(hdr->StructureSize)); return 1; } @@ -161,8 +162,9 @@ smb2_check_message(char *buf, unsigned int length) if (4 + len != clc_len) { cFYI(1, "Calculated size %u length %u mismatch mid %llu", clc_len, 4 + len, mid); - if (clc_len == 4 + len + 1) /* BB FIXME (fix samba) */ - return 0; /* BB workaround Samba 3 bug SessSetup rsp */ + /* server can return one byte more */ + if (clc_len == 4 + len + 1) + return 0; return 1; } return 0; diff --git a/trunk/fs/cifs/smb2pdu.h b/trunk/fs/cifs/smb2pdu.h index f37a1b41b402..15dc8eea8273 100644 --- a/trunk/fs/cifs/smb2pdu.h +++ b/trunk/fs/cifs/smb2pdu.h @@ -87,10 +87,6 @@ #define SMB2_PROTO_NUMBER __constant_cpu_to_le32(0x424d53fe) -#define SMB2_HEADER_SIZE __constant_le16_to_cpu(64) - -#define SMB2_ERROR_STRUCTURE_SIZE2 __constant_le16_to_cpu(9) - /* * SMB2 Header Definition * @@ -99,6 +95,9 @@ * "PDU" : "Protocol Data Unit" (ie a network "frame") * */ + +#define SMB2_HEADER_STRUCTURE_SIZE __constant_cpu_to_le16(64) + struct smb2_hdr { __be32 smb2_buf_length; /* big endian on wire */ /* length is only two or three bytes - with @@ -140,6 +139,9 @@ struct smb2_pdu { * command code name for the struct. Note that structures must be packed. * */ + +#define SMB2_ERROR_STRUCTURE_SIZE2 __constant_cpu_to_le16(9) + struct smb2_err_rsp { struct smb2_hdr hdr; __le16 StructureSize; diff --git a/trunk/fs/cifs/transport.c b/trunk/fs/cifs/transport.c index 83867ef348df..d9b639b95fa8 100644 --- a/trunk/fs/cifs/transport.c +++ b/trunk/fs/cifs/transport.c @@ -503,13 +503,16 @@ cifs_check_receive(struct mid_q_entry *mid, struct TCP_Server_Info *server, /* convert the length into a more usable form */ if (server->sec_mode & (SECMODE_SIGN_REQUIRED | SECMODE_SIGN_ENABLED)) { struct kvec iov; + int rc = 0; iov.iov_base = mid->resp_buf; iov.iov_len = len; /* FIXME: add code to kill session */ - if (cifs_verify_signature(&iov, 1, server, - mid->sequence_number + 1) != 0) - cERROR(1, "Unexpected SMB signature"); + rc = cifs_verify_signature(&iov, 1, server, + mid->sequence_number + 1); + if (rc) + cERROR(1, "SMB signature verification returned error = " + "%d", rc); } /* BB special case reconnect tid and uid here? */ diff --git a/trunk/fs/compat.c b/trunk/fs/compat.c index 6161255fac45..1bdb350ea5d3 100644 --- a/trunk/fs/compat.c +++ b/trunk/fs/compat.c @@ -1155,11 +1155,14 @@ compat_sys_readv(unsigned long fd, const struct compat_iovec __user *vec, struct file *file; int fput_needed; ssize_t ret; + loff_t pos; file = fget_light(fd, &fput_needed); if (!file) return -EBADF; - ret = compat_readv(file, vec, vlen, &file->f_pos); + pos = file->f_pos; + ret = compat_readv(file, vec, vlen, &pos); + file->f_pos = pos; fput_light(file, fput_needed); return ret; } @@ -1221,11 +1224,14 @@ compat_sys_writev(unsigned long fd, const struct compat_iovec __user *vec, struct file *file; int fput_needed; ssize_t ret; + loff_t pos; file = fget_light(fd, &fput_needed); if (!file) return -EBADF; - ret = compat_writev(file, vec, vlen, &file->f_pos); + pos = file->f_pos; + ret = compat_writev(file, vec, vlen, &pos); + file->f_pos = pos; fput_light(file, fput_needed); return ret; } diff --git a/trunk/fs/dcache.c b/trunk/fs/dcache.c index 8086636bf796..693f95bf1cae 100644 --- a/trunk/fs/dcache.c +++ b/trunk/fs/dcache.c @@ -389,7 +389,7 @@ static struct dentry *d_kill(struct dentry *dentry, struct dentry *parent) * Inform try_to_ascend() that we are no longer attached to the * dentry tree */ - dentry->d_flags |= DCACHE_DISCONNECTED; + dentry->d_flags |= DCACHE_DENTRY_KILLED; if (parent) spin_unlock(&parent->d_lock); dentry_iput(dentry); @@ -1048,7 +1048,7 @@ static struct dentry *try_to_ascend(struct dentry *old, int locked, unsigned seq * or deletion */ if (new != old->d_parent || - (old->d_flags & DCACHE_DISCONNECTED) || + (old->d_flags & DCACHE_DENTRY_KILLED) || (!locked && read_seqretry(&rename_lock, seq))) { spin_unlock(&new->d_lock); new = NULL; @@ -1134,6 +1134,8 @@ int have_submounts(struct dentry *parent) return 1; rename_retry: + if (locked) + goto again; locked = 1; write_seqlock(&rename_lock); goto again; @@ -1141,7 +1143,7 @@ int have_submounts(struct dentry *parent) EXPORT_SYMBOL(have_submounts); /* - * Search the dentry child list for the specified parent, + * Search the dentry child list of the specified parent, * and move any unused dentries to the end of the unused * list for prune_dcache(). We descend to the next level * whenever the d_subdirs list is non-empty and continue @@ -1236,6 +1238,8 @@ static int select_parent(struct dentry *parent, struct list_head *dispose) rename_retry: if (found) return found; + if (locked) + goto again; locked = 1; write_seqlock(&rename_lock); goto again; @@ -3035,6 +3039,8 @@ void d_genocide(struct dentry *root) return; rename_retry: + if (locked) + goto again; locked = 1; write_seqlock(&rename_lock); goto again; diff --git a/trunk/fs/debugfs/file.c b/trunk/fs/debugfs/file.c index 2340f6978d6e..c5ca6ae5a30c 100644 --- a/trunk/fs/debugfs/file.c +++ b/trunk/fs/debugfs/file.c @@ -526,73 +526,51 @@ struct array_data { u32 elements; }; -static int u32_array_open(struct inode *inode, struct file *file) -{ - file->private_data = NULL; - return nonseekable_open(inode, file); -} - -static size_t format_array(char *buf, size_t bufsize, const char *fmt, - u32 *array, u32 array_size) +static size_t u32_format_array(char *buf, size_t bufsize, + u32 *array, int array_size) { size_t ret = 0; - u32 i; - for (i = 0; i < array_size; i++) { + while (--array_size >= 0) { size_t len; + char term = array_size ? ' ' : '\n'; - len = snprintf(buf, bufsize, fmt, array[i]); - len++; /* ' ' or '\n' */ + len = snprintf(buf, bufsize, "%u%c", *array++, term); ret += len; - if (buf) { - buf += len; - bufsize -= len; - buf[-1] = (i == array_size-1) ? '\n' : ' '; - } + buf += len; + bufsize -= len; } - - ret++; /* \0 */ - if (buf) - *buf = '\0'; - return ret; } -static char *format_array_alloc(const char *fmt, u32 *array, - u32 array_size) +static int u32_array_open(struct inode *inode, struct file *file) { - size_t len = format_array(NULL, 0, fmt, array, array_size); - char *ret; - - ret = kmalloc(len, GFP_KERNEL); - if (ret == NULL) - return NULL; + struct array_data *data = inode->i_private; + int size, elements = data->elements; + char *buf; + + /* + * Max size: + * - 10 digits + ' '/'\n' = 11 bytes per number + * - terminating NUL character + */ + size = elements*11; + buf = kmalloc(size+1, GFP_KERNEL); + if (!buf) + return -ENOMEM; + buf[size] = 0; + + file->private_data = buf; + u32_format_array(buf, size, data->array, data->elements); - format_array(ret, len, fmt, array, array_size); - return ret; + return nonseekable_open(inode, file); } static ssize_t u32_array_read(struct file *file, char __user *buf, size_t len, loff_t *ppos) { - struct inode *inode = file->f_path.dentry->d_inode; - struct array_data *data = inode->i_private; - size_t size; - - if (*ppos == 0) { - if (file->private_data) { - kfree(file->private_data); - file->private_data = NULL; - } - - file->private_data = format_array_alloc("%u", data->array, - data->elements); - } - - size = 0; - if (file->private_data) - size = strlen(file->private_data); + size_t size = strlen(file->private_data); return simple_read_from_buffer(buf, len, ppos, file->private_data, size); diff --git a/trunk/fs/direct-io.c b/trunk/fs/direct-io.c index 1faf4cb56f39..f86c720dba0e 100644 --- a/trunk/fs/direct-io.c +++ b/trunk/fs/direct-io.c @@ -1062,6 +1062,7 @@ do_blockdev_direct_IO(int rw, struct kiocb *iocb, struct inode *inode, unsigned long user_addr; size_t bytes; struct buffer_head map_bh = { 0, }; + struct blk_plug plug; if (rw & WRITE) rw = WRITE_ODIRECT; @@ -1177,6 +1178,8 @@ do_blockdev_direct_IO(int rw, struct kiocb *iocb, struct inode *inode, PAGE_SIZE - user_addr / PAGE_SIZE); } + blk_start_plug(&plug); + for (seg = 0; seg < nr_segs; seg++) { user_addr = (unsigned long)iov[seg].iov_base; sdio.size += bytes = iov[seg].iov_len; @@ -1235,6 +1238,8 @@ do_blockdev_direct_IO(int rw, struct kiocb *iocb, struct inode *inode, if (sdio.bio) dio_bio_submit(dio, &sdio); + blk_finish_plug(&plug); + /* * It is possible that, we return short IO due to end of file. * In that case, we need to release all the pages we got hold on. diff --git a/trunk/fs/ecryptfs/file.c b/trunk/fs/ecryptfs/file.c index 44ce5c6a541d..d45ba4568128 100644 --- a/trunk/fs/ecryptfs/file.c +++ b/trunk/fs/ecryptfs/file.c @@ -275,8 +275,14 @@ static int ecryptfs_open(struct inode *inode, struct file *file) static int ecryptfs_flush(struct file *file, fl_owner_t td) { - return file->f_mode & FMODE_WRITE - ? filemap_write_and_wait(file->f_mapping) : 0; + struct file *lower_file = ecryptfs_file_to_lower(file); + + if (lower_file->f_op && lower_file->f_op->flush) { + filemap_write_and_wait(file->f_mapping); + return lower_file->f_op->flush(lower_file, td); + } + + return 0; } static int ecryptfs_release(struct inode *inode, struct file *file) diff --git a/trunk/fs/ecryptfs/inode.c b/trunk/fs/ecryptfs/inode.c index 534b129ea676..cc7709e7c508 100644 --- a/trunk/fs/ecryptfs/inode.c +++ b/trunk/fs/ecryptfs/inode.c @@ -619,6 +619,7 @@ ecryptfs_rename(struct inode *old_dir, struct dentry *old_dentry, struct dentry *lower_old_dir_dentry; struct dentry *lower_new_dir_dentry; struct dentry *trap = NULL; + struct inode *target_inode; lower_old_dentry = ecryptfs_dentry_to_lower(old_dentry); lower_new_dentry = ecryptfs_dentry_to_lower(new_dentry); @@ -626,6 +627,7 @@ ecryptfs_rename(struct inode *old_dir, struct dentry *old_dentry, dget(lower_new_dentry); lower_old_dir_dentry = dget_parent(lower_old_dentry); lower_new_dir_dentry = dget_parent(lower_new_dentry); + target_inode = new_dentry->d_inode; trap = lock_rename(lower_old_dir_dentry, lower_new_dir_dentry); /* source should not be ancestor of target */ if (trap == lower_old_dentry) { @@ -641,6 +643,9 @@ ecryptfs_rename(struct inode *old_dir, struct dentry *old_dentry, lower_new_dir_dentry->d_inode, lower_new_dentry); if (rc) goto out_lock; + if (target_inode) + fsstack_copy_attr_all(target_inode, + ecryptfs_inode_to_lower(target_inode)); fsstack_copy_attr_all(new_dir, lower_new_dir_dentry->d_inode); if (new_dir != old_dir) fsstack_copy_attr_all(old_dir, lower_old_dir_dentry->d_inode); diff --git a/trunk/fs/ecryptfs/main.c b/trunk/fs/ecryptfs/main.c index 2768138eefee..9b627c15010a 100644 --- a/trunk/fs/ecryptfs/main.c +++ b/trunk/fs/ecryptfs/main.c @@ -162,6 +162,7 @@ void ecryptfs_put_lower_file(struct inode *inode) inode_info = ecryptfs_inode_to_private(inode); if (atomic_dec_and_mutex_lock(&inode_info->lower_file_count, &inode_info->lower_file_mutex)) { + filemap_write_and_wait(inode->i_mapping); fput(inode_info->lower_file); inode_info->lower_file = NULL; mutex_unlock(&inode_info->lower_file_mutex); diff --git a/trunk/fs/eventpoll.c b/trunk/fs/eventpoll.c index 1c8b55670804..eedec84c1809 100644 --- a/trunk/fs/eventpoll.c +++ b/trunk/fs/eventpoll.c @@ -1654,8 +1654,8 @@ SYSCALL_DEFINE1(epoll_create1, int, flags) error = PTR_ERR(file); goto out_free_fd; } - fd_install(fd, file); ep->file = file; + fd_install(fd, file); return fd; out_free_fd: diff --git a/trunk/fs/exofs/inode.c b/trunk/fs/exofs/inode.c index 5badb0c039de..1562c27a2fab 100644 --- a/trunk/fs/exofs/inode.c +++ b/trunk/fs/exofs/inode.c @@ -37,15 +37,12 @@ #define EXOFS_DBGMSG2(M...) do {} while (0) -enum {MAX_PAGES_KMALLOC = PAGE_SIZE / sizeof(struct page *), }; - unsigned exofs_max_io_pages(struct ore_layout *layout, unsigned expected_pages) { - unsigned pages = min_t(unsigned, expected_pages, MAX_PAGES_KMALLOC); + unsigned pages = min_t(unsigned, expected_pages, + layout->max_io_length / PAGE_SIZE); - /* TODO: easily support bio chaining */ - pages = min_t(unsigned, pages, layout->max_io_length / PAGE_SIZE); return pages; } @@ -101,7 +98,8 @@ static void _pcol_reset(struct page_collect *pcol) * it might not end here. don't be left with nothing */ if (!pcol->expected_pages) - pcol->expected_pages = MAX_PAGES_KMALLOC; + pcol->expected_pages = + exofs_max_io_pages(&pcol->sbi->layout, ~0); } static int pcol_try_alloc(struct page_collect *pcol) @@ -389,6 +387,8 @@ static int readpage_strip(void *data, struct page *page) size_t len; int ret; + BUG_ON(!PageLocked(page)); + /* FIXME: Just for debugging, will be removed */ if (PageUptodate(page)) EXOFS_ERR("PageUptodate(0x%lx, 0x%lx)\n", pcol->inode->i_ino, @@ -572,8 +572,16 @@ static struct page *__r4w_get_page(void *priv, u64 offset, bool *uptodate) if (!pcol->that_locked_page || (pcol->that_locked_page->index != index)) { - struct page *page = find_get_page(pcol->inode->i_mapping, index); + struct page *page; + loff_t i_size = i_size_read(pcol->inode); + + if (offset >= i_size) { + *uptodate = true; + EXOFS_DBGMSG("offset >= i_size index=0x%lx\n", index); + return ZERO_PAGE(0); + } + page = find_get_page(pcol->inode->i_mapping, index); if (!page) { page = find_or_create_page(pcol->inode->i_mapping, index, GFP_NOFS); @@ -602,12 +610,13 @@ static void __r4w_put_page(void *priv, struct page *page) { struct page_collect *pcol = priv; - if (pcol->that_locked_page != page) { + if ((pcol->that_locked_page != page) && (ZERO_PAGE(0) != page)) { EXOFS_DBGMSG("index=0x%lx\n", page->index); page_cache_release(page); return; } - EXOFS_DBGMSG("that_locked_page index=0x%lx\n", page->index); + EXOFS_DBGMSG("that_locked_page index=0x%lx\n", + ZERO_PAGE(0) == page ? -1 : page->index); } static const struct _ore_r4w_op _r4w_op = { diff --git a/trunk/fs/exofs/ore.c b/trunk/fs/exofs/ore.c index 24a49d47e935..1585db1aa365 100644 --- a/trunk/fs/exofs/ore.c +++ b/trunk/fs/exofs/ore.c @@ -837,11 +837,11 @@ static int _write_mirror(struct ore_io_state *ios, int cur_comp) bio->bi_rw |= REQ_WRITE; } - osd_req_write(or, _ios_obj(ios, dev), per_dev->offset, - bio, per_dev->length); + osd_req_write(or, _ios_obj(ios, cur_comp), + per_dev->offset, bio, per_dev->length); ORE_DBGMSG("write(0x%llx) offset=0x%llx " "length=0x%llx dev=%d\n", - _LLU(_ios_obj(ios, dev)->id), + _LLU(_ios_obj(ios, cur_comp)->id), _LLU(per_dev->offset), _LLU(per_dev->length), dev); } else if (ios->kern_buff) { @@ -853,20 +853,20 @@ static int _write_mirror(struct ore_io_state *ios, int cur_comp) (ios->si.unit_off + ios->length > ios->layout->stripe_unit)); - ret = osd_req_write_kern(or, _ios_obj(ios, per_dev->dev), + ret = osd_req_write_kern(or, _ios_obj(ios, cur_comp), per_dev->offset, ios->kern_buff, ios->length); if (unlikely(ret)) goto out; ORE_DBGMSG2("write_kern(0x%llx) offset=0x%llx " "length=0x%llx dev=%d\n", - _LLU(_ios_obj(ios, dev)->id), + _LLU(_ios_obj(ios, cur_comp)->id), _LLU(per_dev->offset), _LLU(ios->length), per_dev->dev); } else { - osd_req_set_attributes(or, _ios_obj(ios, dev)); + osd_req_set_attributes(or, _ios_obj(ios, cur_comp)); ORE_DBGMSG2("obj(0x%llx) set_attributes=%d dev=%d\n", - _LLU(_ios_obj(ios, dev)->id), + _LLU(_ios_obj(ios, cur_comp)->id), ios->out_attr_len, dev); } diff --git a/trunk/fs/exofs/super.c b/trunk/fs/exofs/super.c index 433783624d10..dde41a75c7c8 100644 --- a/trunk/fs/exofs/super.c +++ b/trunk/fs/exofs/super.c @@ -400,8 +400,6 @@ static int exofs_sync_fs(struct super_block *sb, int wait) ret = ore_write(ios); if (unlikely(ret)) EXOFS_ERR("%s: ore_write failed.\n", __func__); - else - sb->s_dirt = 0; unlock_super(sb); @@ -412,14 +410,6 @@ static int exofs_sync_fs(struct super_block *sb, int wait) return ret; } -static void exofs_write_super(struct super_block *sb) -{ - if (!(sb->s_flags & MS_RDONLY)) - exofs_sync_fs(sb, 1); - else - sb->s_dirt = 0; -} - static void _exofs_print_device(const char *msg, const char *dev_path, struct osd_dev *od, u64 pid) { @@ -952,7 +942,6 @@ static const struct super_operations exofs_sops = { .write_inode = exofs_write_inode, .evict_inode = exofs_evict_inode, .put_super = exofs_put_super, - .write_super = exofs_write_super, .sync_fs = exofs_sync_fs, .statfs = exofs_statfs, }; diff --git a/trunk/fs/ext2/balloc.c b/trunk/fs/ext2/balloc.c index 376aa77f3ca7..2616d0ea5c5c 100644 --- a/trunk/fs/ext2/balloc.c +++ b/trunk/fs/ext2/balloc.c @@ -479,7 +479,7 @@ void ext2_discard_reservation(struct inode *inode) /** * ext2_free_blocks() -- Free given blocks and update quota and i_blocks * @inode: inode - * @block: start physcial block to free + * @block: start physical block to free * @count: number of blocks to free */ void ext2_free_blocks (struct inode * inode, unsigned long block, diff --git a/trunk/fs/ext3/balloc.c b/trunk/fs/ext3/balloc.c index 90d901f0486b..7320a66e958f 100644 --- a/trunk/fs/ext3/balloc.c +++ b/trunk/fs/ext3/balloc.c @@ -483,7 +483,7 @@ void ext3_discard_reservation(struct inode *inode) * ext3_free_blocks_sb() -- Free given blocks and update quota * @handle: handle to this transaction * @sb: super block - * @block: start physcial block to free + * @block: start physical block to free * @count: number of blocks to free * @pdquot_freed_blocks: pointer to quota */ diff --git a/trunk/fs/ext3/inode.c b/trunk/fs/ext3/inode.c index 9a4a5c48b1c9..7e87e37a372a 100644 --- a/trunk/fs/ext3/inode.c +++ b/trunk/fs/ext3/inode.c @@ -3072,6 +3072,8 @@ static int ext3_do_update_inode(handle_t *handle, struct ext3_inode_info *ei = EXT3_I(inode); struct buffer_head *bh = iloc->bh; int err = 0, rc, block; + int need_datasync = 0; + __le32 disksize; uid_t i_uid; gid_t i_gid; @@ -3113,7 +3115,11 @@ static int ext3_do_update_inode(handle_t *handle, raw_inode->i_gid_high = 0; } raw_inode->i_links_count = cpu_to_le16(inode->i_nlink); - raw_inode->i_size = cpu_to_le32(ei->i_disksize); + disksize = cpu_to_le32(ei->i_disksize); + if (disksize != raw_inode->i_size) { + need_datasync = 1; + raw_inode->i_size = disksize; + } raw_inode->i_atime = cpu_to_le32(inode->i_atime.tv_sec); raw_inode->i_ctime = cpu_to_le32(inode->i_ctime.tv_sec); raw_inode->i_mtime = cpu_to_le32(inode->i_mtime.tv_sec); @@ -3129,8 +3135,11 @@ static int ext3_do_update_inode(handle_t *handle, if (!S_ISREG(inode->i_mode)) { raw_inode->i_dir_acl = cpu_to_le32(ei->i_dir_acl); } else { - raw_inode->i_size_high = - cpu_to_le32(ei->i_disksize >> 32); + disksize = cpu_to_le32(ei->i_disksize >> 32); + if (disksize != raw_inode->i_size_high) { + raw_inode->i_size_high = disksize; + need_datasync = 1; + } if (ei->i_disksize > 0x7fffffffULL) { struct super_block *sb = inode->i_sb; if (!EXT3_HAS_RO_COMPAT_FEATURE(sb, @@ -3183,6 +3192,8 @@ static int ext3_do_update_inode(handle_t *handle, ext3_clear_inode_state(inode, EXT3_STATE_NEW); atomic_set(&ei->i_sync_tid, handle->h_transaction->t_tid); + if (need_datasync) + atomic_set(&ei->i_datasync_tid, handle->h_transaction->t_tid); out_brelse: brelse (bh); ext3_std_error(inode->i_sb, err); @@ -3196,7 +3207,7 @@ static int ext3_do_update_inode(handle_t *handle, * * - Within generic_file_write() for O_SYNC files. * Here, there will be no transaction running. We wait for any running - * trasnaction to commit. + * transaction to commit. * * - Within sys_sync(), kupdate and such. * We wait on commit, if tol to. @@ -3459,14 +3470,6 @@ ext3_reserve_inode_write(handle_t *handle, struct inode *inode, * inode out, but prune_icache isn't a user-visible syncing function. * Whenever the user wants stuff synced (sys_sync, sys_msync, sys_fsync) * we start and wait on commits. - * - * Is this efficient/effective? Well, we're being nice to the system - * by cleaning up our inodes proactively so they can be reaped - * without I/O. But we are potentially leaving up to five seconds' - * worth of inodes floating about which prune_icache wants us to - * write out. One way to fix that would be to get prune_icache() - * to do a write_super() to free up some memory. It has the desired - * effect. */ int ext3_mark_inode_dirty(handle_t *handle, struct inode *inode) { diff --git a/trunk/fs/ext3/super.c b/trunk/fs/ext3/super.c index ff9bcdc5b0d5..8c892e93d8e7 100644 --- a/trunk/fs/ext3/super.c +++ b/trunk/fs/ext3/super.c @@ -64,11 +64,6 @@ static int ext3_freeze(struct super_block *sb); /* * Wrappers for journal_start/end. - * - * The only special thing we need to do here is to make sure that all - * journal_end calls result in the superblock being marked dirty, so - * that sync() will call the filesystem's write_super callback if - * appropriate. */ handle_t *ext3_journal_start_sb(struct super_block *sb, int nblocks) { @@ -90,12 +85,6 @@ handle_t *ext3_journal_start_sb(struct super_block *sb, int nblocks) return journal_start(journal, nblocks); } -/* - * The only special thing we need to do here is to make sure that all - * journal_stop calls result in the superblock being marked dirty, so - * that sync() will call the filesystem's write_super callback if - * appropriate. - */ int __ext3_journal_stop(const char *where, handle_t *handle) { struct super_block *sb; diff --git a/trunk/fs/ext4/balloc.c b/trunk/fs/ext4/balloc.c index d23b31ca9d7a..1b5089067d01 100644 --- a/trunk/fs/ext4/balloc.c +++ b/trunk/fs/ext4/balloc.c @@ -280,14 +280,18 @@ struct ext4_group_desc * ext4_get_group_desc(struct super_block *sb, return desc; } -static int ext4_valid_block_bitmap(struct super_block *sb, - struct ext4_group_desc *desc, - unsigned int block_group, - struct buffer_head *bh) +/* + * Return the block number which was discovered to be invalid, or 0 if + * the block bitmap is valid. + */ +static ext4_fsblk_t ext4_valid_block_bitmap(struct super_block *sb, + struct ext4_group_desc *desc, + unsigned int block_group, + struct buffer_head *bh) { ext4_grpblk_t offset; ext4_grpblk_t next_zero_bit; - ext4_fsblk_t bitmap_blk; + ext4_fsblk_t blk; ext4_fsblk_t group_first_block; if (EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_FLEX_BG)) { @@ -297,37 +301,33 @@ static int ext4_valid_block_bitmap(struct super_block *sb, * or it has to also read the block group where the bitmaps * are located to verify they are set. */ - return 1; + return 0; } group_first_block = ext4_group_first_block_no(sb, block_group); /* check whether block bitmap block number is set */ - bitmap_blk = ext4_block_bitmap(sb, desc); - offset = bitmap_blk - group_first_block; + blk = ext4_block_bitmap(sb, desc); + offset = blk - group_first_block; if (!ext4_test_bit(offset, bh->b_data)) /* bad block bitmap */ - goto err_out; + return blk; /* check whether the inode bitmap block number is set */ - bitmap_blk = ext4_inode_bitmap(sb, desc); - offset = bitmap_blk - group_first_block; + blk = ext4_inode_bitmap(sb, desc); + offset = blk - group_first_block; if (!ext4_test_bit(offset, bh->b_data)) /* bad block bitmap */ - goto err_out; + return blk; /* check whether the inode table block number is set */ - bitmap_blk = ext4_inode_table(sb, desc); - offset = bitmap_blk - group_first_block; + blk = ext4_inode_table(sb, desc); + offset = blk - group_first_block; next_zero_bit = ext4_find_next_zero_bit(bh->b_data, offset + EXT4_SB(sb)->s_itb_per_group, offset); - if (next_zero_bit >= offset + EXT4_SB(sb)->s_itb_per_group) - /* good bitmap for inode tables */ - return 1; - -err_out: - ext4_error(sb, "Invalid block bitmap - block_group = %d, block = %llu", - block_group, bitmap_blk); + if (next_zero_bit < offset + EXT4_SB(sb)->s_itb_per_group) + /* bad bitmap for inode tables */ + return blk; return 0; } @@ -336,14 +336,26 @@ void ext4_validate_block_bitmap(struct super_block *sb, unsigned int block_group, struct buffer_head *bh) { + ext4_fsblk_t blk; + if (buffer_verified(bh)) return; ext4_lock_group(sb, block_group); - if (ext4_valid_block_bitmap(sb, desc, block_group, bh) && - ext4_block_bitmap_csum_verify(sb, block_group, desc, bh, - EXT4_BLOCKS_PER_GROUP(sb) / 8)) - set_buffer_verified(bh); + blk = ext4_valid_block_bitmap(sb, desc, block_group, bh); + if (unlikely(blk != 0)) { + ext4_unlock_group(sb, block_group); + ext4_error(sb, "bg %u: block %llu: invalid block bitmap", + block_group, blk); + return; + } + if (unlikely(!ext4_block_bitmap_csum_verify(sb, block_group, + desc, bh, EXT4_BLOCKS_PER_GROUP(sb) / 8))) { + ext4_unlock_group(sb, block_group); + ext4_error(sb, "bg %u: bad block bitmap checksum", block_group); + return; + } + set_buffer_verified(bh); ext4_unlock_group(sb, block_group); } diff --git a/trunk/fs/ext4/bitmap.c b/trunk/fs/ext4/bitmap.c index f8716eab9995..5c2d1813ebe9 100644 --- a/trunk/fs/ext4/bitmap.c +++ b/trunk/fs/ext4/bitmap.c @@ -79,7 +79,6 @@ int ext4_block_bitmap_csum_verify(struct super_block *sb, ext4_group_t group, if (provided == calculated) return 1; - ext4_error(sb, "Bad block bitmap checksum: block_group = %u", group); return 0; } diff --git a/trunk/fs/ext4/extents.c b/trunk/fs/ext4/extents.c index cd0c7ed06772..aabbb3f53683 100644 --- a/trunk/fs/ext4/extents.c +++ b/trunk/fs/ext4/extents.c @@ -2662,6 +2662,7 @@ static int ext4_ext_remove_space(struct inode *inode, ext4_lblk_t start, } path[0].p_depth = depth; path[0].p_hdr = ext_inode_hdr(inode); + i = 0; if (ext4_ext_check(inode, path[0].p_hdr, depth)) { err = -EIO; diff --git a/trunk/fs/ext4/inode.c b/trunk/fs/ext4/inode.c index 6324f74e0342..c862ee5fe79d 100644 --- a/trunk/fs/ext4/inode.c +++ b/trunk/fs/ext4/inode.c @@ -1970,7 +1970,7 @@ static void ext4_end_io_buffer_write(struct buffer_head *bh, int uptodate); * This function can get called via... * - ext4_da_writepages after taking page lock (have journal handle) * - journal_submit_inode_data_buffers (no journal handle) - * - shrink_page_list via pdflush (no journal handle) + * - shrink_page_list via the kswapd/direct reclaim (no journal handle) * - grab_page_cache when doing write_begin (have journal handle) * * We don't do any block allocation in this function. If we have page with @@ -3313,7 +3313,7 @@ int ext4_discard_partial_page_buffers(handle_t *handle, * handle: The journal handle * inode: The files inode * page: A locked page that contains the offset "from" - * from: The starting byte offset (from the begining of the file) + * from: The starting byte offset (from the beginning of the file) * to begin discarding * len: The length of bytes to discard * flags: Optional flags that may be used: @@ -3321,11 +3321,11 @@ int ext4_discard_partial_page_buffers(handle_t *handle, * EXT4_DISCARD_PARTIAL_PG_ZERO_UNMAPPED * Only zero the regions of the page whose buffer heads * have already been unmapped. This flag is appropriate - * for updateing the contents of a page whose blocks may + * for updating the contents of a page whose blocks may * have already been released, and we only want to zero * out the regions that correspond to those released blocks. * - * Returns zero on sucess or negative on failure. + * Returns zero on success or negative on failure. */ static int ext4_discard_partial_page_buffers_no_lock(handle_t *handle, struct inode *inode, struct page *page, loff_t from, @@ -3486,7 +3486,7 @@ int ext4_can_truncate(struct inode *inode) * @offset: The offset where the hole will begin * @len: The length of the hole * - * Returns: 0 on sucess or negative on failure + * Returns: 0 on success or negative on failure */ int ext4_punch_hole(struct file *file, loff_t offset, loff_t length) @@ -4008,7 +4008,7 @@ static int ext4_inode_blocks_set(handle_t *handle, if (i_blocks <= ~0U) { /* - * i_blocks can be represnted in a 32 bit variable + * i_blocks can be represented in a 32 bit variable * as multiple of 512 bytes */ raw_inode->i_blocks_lo = cpu_to_le32(i_blocks); @@ -4169,7 +4169,7 @@ static int ext4_do_update_inode(handle_t *handle, * * - Within generic_file_write() for O_SYNC files. * Here, there will be no transaction running. We wait for any running - * trasnaction to commit. + * transaction to commit. * * - Within sys_sync(), kupdate and such. * We wait on commit, if tol to. @@ -4413,7 +4413,7 @@ static int ext4_index_trans_blocks(struct inode *inode, int nrblocks, int chunk) * worse case, the indexs blocks spread over different block groups * * If datablocks are discontiguous, they are possible to spread over - * different block groups too. If they are contiuguous, with flexbg, + * different block groups too. If they are contiguous, with flexbg, * they could still across block group boundary. * * Also account for superblock, inode, quota and xattr blocks @@ -4589,14 +4589,6 @@ static int ext4_expand_extra_isize(struct inode *inode, * inode out, but prune_icache isn't a user-visible syncing function. * Whenever the user wants stuff synced (sys_sync, sys_msync, sys_fsync) * we start and wait on commits. - * - * Is this efficient/effective? Well, we're being nice to the system - * by cleaning up our inodes proactively so they can be reaped - * without I/O. But we are potentially leaving up to five seconds' - * worth of inodes floating about which prune_icache wants us to - * write out. One way to fix that would be to get prune_icache() - * to do a write_super() to free up some memory. It has the desired - * effect. */ int ext4_mark_inode_dirty(handle_t *handle, struct inode *inode) { diff --git a/trunk/fs/ext4/mballoc.c b/trunk/fs/ext4/mballoc.c index 8eae94771c45..08778f6cdfe9 100644 --- a/trunk/fs/ext4/mballoc.c +++ b/trunk/fs/ext4/mballoc.c @@ -4709,7 +4709,7 @@ void ext4_free_blocks(handle_t *handle, struct inode *inode, * ext4_group_add_blocks() -- Add given blocks to an existing group * @handle: handle to this transaction * @sb: super block - * @block: start physcial block to add to the block group + * @block: start physical block to add to the block group * @count: number of blocks to free * * This marks the blocks as free in the bitmap and buddy. diff --git a/trunk/fs/ext4/super.c b/trunk/fs/ext4/super.c index d76ec8277d3f..c6e0cb3d1f4a 100644 --- a/trunk/fs/ext4/super.c +++ b/trunk/fs/ext4/super.c @@ -326,11 +326,6 @@ static void ext4_put_nojournal(handle_t *handle) /* * Wrappers for jbd2_journal_start/end. - * - * The only special thing we need to do here is to make sure that all - * journal_end calls result in the superblock being marked dirty, so - * that sync() will call the filesystem's write_super callback if - * appropriate. */ handle_t *ext4_journal_start_sb(struct super_block *sb, int nblocks) { @@ -356,12 +351,6 @@ handle_t *ext4_journal_start_sb(struct super_block *sb, int nblocks) return jbd2_journal_start(journal, nblocks); } -/* - * The only special thing we need to do here is to make sure that all - * jbd2_journal_stop calls result in the superblock being marked dirty, so - * that sync() will call the filesystem's write_super callback if - * appropriate. - */ int __ext4_journal_stop(const char *where, unsigned int line, handle_t *handle) { struct super_block *sb; @@ -959,6 +948,7 @@ static struct inode *ext4_alloc_inode(struct super_block *sb) ei->i_reserved_meta_blocks = 0; ei->i_allocated_meta_blocks = 0; ei->i_da_metadata_calc_len = 0; + ei->i_da_metadata_calc_last_lblock = 0; spin_lock_init(&(ei->i_block_reservation_lock)); #ifdef CONFIG_QUOTA ei->i_reserved_quota = 0; @@ -3119,6 +3109,10 @@ static int count_overhead(struct super_block *sb, ext4_group_t grp, ext4_group_t i, ngroups = ext4_get_groups_count(sb); int s, j, count = 0; + if (!EXT4_HAS_RO_COMPAT_FEATURE(sb, EXT4_FEATURE_RO_COMPAT_BIGALLOC)) + return (ext4_bg_has_super(sb, grp) + ext4_bg_num_gdb(sb, grp) + + sbi->s_itb_per_group + 2); + first_block = le32_to_cpu(sbi->s_es->s_first_data_block) + (grp * EXT4_BLOCKS_PER_GROUP(sb)); last_block = first_block + EXT4_BLOCKS_PER_GROUP(sb) - 1; @@ -4430,6 +4424,7 @@ static void ext4_clear_journal_err(struct super_block *sb, ext4_commit_super(sb, 1); jbd2_journal_clear_err(journal); + jbd2_journal_update_sb_errno(journal); } } diff --git a/trunk/fs/fs-writeback.c b/trunk/fs/fs-writeback.c index be3efc4f64f4..6d46c0d78338 100644 --- a/trunk/fs/fs-writeback.c +++ b/trunk/fs/fs-writeback.c @@ -577,10 +577,6 @@ static long writeback_chunk_size(struct backing_dev_info *bdi, /* * Write a portion of b_io inodes which belong to @sb. * - * If @only_this_sb is true, then find and write all such - * inodes. Otherwise write only ones which go sequentially - * in reverse order. - * * Return the number of pages and/or inodes written. */ static long writeback_sb_inodes(struct super_block *sb, diff --git a/trunk/fs/fuse/control.c b/trunk/fs/fuse/control.c index 03ff5b1eba93..75a20c092dd4 100644 --- a/trunk/fs/fuse/control.c +++ b/trunk/fs/fuse/control.c @@ -117,7 +117,7 @@ static ssize_t fuse_conn_max_background_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos) { - unsigned val; + unsigned uninitialized_var(val); ssize_t ret; ret = fuse_conn_limit_write(file, buf, count, ppos, &val, @@ -154,7 +154,7 @@ static ssize_t fuse_conn_congestion_threshold_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos) { - unsigned val; + unsigned uninitialized_var(val); ssize_t ret; ret = fuse_conn_limit_write(file, buf, count, ppos, &val, diff --git a/trunk/fs/fuse/cuse.c b/trunk/fs/fuse/cuse.c index 3426521f3205..ee8d55042298 100644 --- a/trunk/fs/fuse/cuse.c +++ b/trunk/fs/fuse/cuse.c @@ -396,7 +396,7 @@ static void cuse_process_init_reply(struct fuse_conn *fc, struct fuse_req *req) err_region: unregister_chrdev_region(devt, 1); err: - fc->conn_error = 1; + fuse_conn_kill(fc); goto out; } @@ -532,8 +532,6 @@ static int cuse_channel_release(struct inode *inode, struct file *file) cdev_del(cc->cdev); } - /* kill connection and shutdown channel */ - fuse_conn_kill(&cc->fc); rc = fuse_dev_release(inode, file); /* puts the base reference */ return rc; diff --git a/trunk/fs/fuse/dev.c b/trunk/fs/fuse/dev.c index 7df2b5e8fbe1..f4246cfc8d87 100644 --- a/trunk/fs/fuse/dev.c +++ b/trunk/fs/fuse/dev.c @@ -1576,6 +1576,7 @@ static int fuse_retrieve(struct fuse_conn *fc, struct inode *inode, req->pages[req->num_pages] = page; req->num_pages++; + offset = 0; num -= this_num; total_len += this_num; index++; diff --git a/trunk/fs/fuse/dir.c b/trunk/fs/fuse/dir.c index 8964cf3999b2..324bc0850534 100644 --- a/trunk/fs/fuse/dir.c +++ b/trunk/fs/fuse/dir.c @@ -383,6 +383,9 @@ static int fuse_create_open(struct inode *dir, struct dentry *entry, struct fuse_entry_out outentry; struct fuse_file *ff; + /* Userspace expects S_IFREG in create mode */ + BUG_ON((mode & S_IFMT) != S_IFREG); + forget = fuse_alloc_forget(); err = -ENOMEM; if (!forget) diff --git a/trunk/fs/fuse/file.c b/trunk/fs/fuse/file.c index 93d8d6c9494d..aba15f1b7ad2 100644 --- a/trunk/fs/fuse/file.c +++ b/trunk/fs/fuse/file.c @@ -703,13 +703,16 @@ static ssize_t fuse_file_aio_read(struct kiocb *iocb, const struct iovec *iov, unsigned long nr_segs, loff_t pos) { struct inode *inode = iocb->ki_filp->f_mapping->host; + struct fuse_conn *fc = get_fuse_conn(inode); - if (pos + iov_length(iov, nr_segs) > i_size_read(inode)) { + /* + * In auto invalidate mode, always update attributes on read. + * Otherwise, only update if we attempt to read past EOF (to ensure + * i_size is up to date). + */ + if (fc->auto_inval_data || + (pos + iov_length(iov, nr_segs) > i_size_read(inode))) { int err; - /* - * If trying to read past EOF, make sure the i_size - * attribute is up-to-date. - */ err = fuse_update_attributes(inode, NULL, iocb->ki_filp, NULL); if (err) return err; @@ -1700,7 +1703,7 @@ static int fuse_verify_ioctl_iov(struct iovec *iov, size_t count) size_t n; u32 max = FUSE_MAX_PAGES_PER_REQ << PAGE_SHIFT; - for (n = 0; n < count; n++) { + for (n = 0; n < count; n++, iov++) { if (iov->iov_len > (size_t) max) return -ENOMEM; max -= iov->iov_len; diff --git a/trunk/fs/fuse/fuse_i.h b/trunk/fs/fuse/fuse_i.h index 771fb6322c07..e24dd74e3068 100644 --- a/trunk/fs/fuse/fuse_i.h +++ b/trunk/fs/fuse/fuse_i.h @@ -484,6 +484,9 @@ struct fuse_conn { /** Is fallocate not implemented by fs? */ unsigned no_fallocate:1; + /** Use enhanced/automatic page cache invalidation. */ + unsigned auto_inval_data:1; + /** The number of requests waiting for completion */ atomic_t num_waiting; diff --git a/trunk/fs/fuse/inode.c b/trunk/fs/fuse/inode.c index 1cd61652018c..fca222dabe3c 100644 --- a/trunk/fs/fuse/inode.c +++ b/trunk/fs/fuse/inode.c @@ -197,6 +197,7 @@ void fuse_change_attributes(struct inode *inode, struct fuse_attr *attr, struct fuse_conn *fc = get_fuse_conn(inode); struct fuse_inode *fi = get_fuse_inode(inode); loff_t oldsize; + struct timespec old_mtime; spin_lock(&fc->lock); if (attr_version != 0 && fi->attr_version > attr_version) { @@ -204,15 +205,35 @@ void fuse_change_attributes(struct inode *inode, struct fuse_attr *attr, return; } + old_mtime = inode->i_mtime; fuse_change_attributes_common(inode, attr, attr_valid); oldsize = inode->i_size; i_size_write(inode, attr->size); spin_unlock(&fc->lock); - if (S_ISREG(inode->i_mode) && oldsize != attr->size) { - truncate_pagecache(inode, oldsize, attr->size); - invalidate_inode_pages2(inode->i_mapping); + if (S_ISREG(inode->i_mode)) { + bool inval = false; + + if (oldsize != attr->size) { + truncate_pagecache(inode, oldsize, attr->size); + inval = true; + } else if (fc->auto_inval_data) { + struct timespec new_mtime = { + .tv_sec = attr->mtime, + .tv_nsec = attr->mtimensec, + }; + + /* + * Auto inval mode also checks and invalidates if mtime + * has changed. + */ + if (!timespec_equal(&old_mtime, &new_mtime)) + inval = true; + } + + if (inval) + invalidate_inode_pages2(inode->i_mapping); } } @@ -346,11 +367,6 @@ void fuse_conn_kill(struct fuse_conn *fc) wake_up_all(&fc->waitq); wake_up_all(&fc->blocked_waitq); wake_up_all(&fc->reserved_req_waitq); - mutex_lock(&fuse_mutex); - list_del(&fc->entry); - fuse_ctl_remove_conn(fc); - mutex_unlock(&fuse_mutex); - fuse_bdi_destroy(fc); } EXPORT_SYMBOL_GPL(fuse_conn_kill); @@ -359,7 +375,14 @@ static void fuse_put_super(struct super_block *sb) struct fuse_conn *fc = get_fuse_conn_super(sb); fuse_send_destroy(fc); + fuse_conn_kill(fc); + mutex_lock(&fuse_mutex); + list_del(&fc->entry); + fuse_ctl_remove_conn(fc); + mutex_unlock(&fuse_mutex); + fuse_bdi_destroy(fc); + fuse_conn_put(fc); } @@ -834,6 +857,8 @@ static void process_init_reply(struct fuse_conn *fc, struct fuse_req *req) fc->big_writes = 1; if (arg->flags & FUSE_DONT_MASK) fc->dont_mask = 1; + if (arg->flags & FUSE_AUTO_INVAL_DATA) + fc->auto_inval_data = 1; } else { ra_pages = fc->max_read / PAGE_CACHE_SIZE; fc->no_lock = 1; @@ -859,7 +884,8 @@ static void fuse_send_init(struct fuse_conn *fc, struct fuse_req *req) arg->max_readahead = fc->bdi.ra_pages * PAGE_CACHE_SIZE; arg->flags |= FUSE_ASYNC_READ | FUSE_POSIX_LOCKS | FUSE_ATOMIC_O_TRUNC | FUSE_EXPORT_SUPPORT | FUSE_BIG_WRITES | FUSE_DONT_MASK | - FUSE_FLOCK_LOCKS; + FUSE_SPLICE_WRITE | FUSE_SPLICE_MOVE | FUSE_SPLICE_READ | + FUSE_FLOCK_LOCKS | FUSE_IOCTL_DIR | FUSE_AUTO_INVAL_DATA; req->in.h.opcode = FUSE_INIT; req->in.numargs = 1; req->in.args[0].size = sizeof(*arg); diff --git a/trunk/fs/gfs2/aops.c b/trunk/fs/gfs2/aops.c index d6526347d386..01c4975da4bc 100644 --- a/trunk/fs/gfs2/aops.c +++ b/trunk/fs/gfs2/aops.c @@ -612,6 +612,7 @@ static int gfs2_write_begin(struct file *file, struct address_space *mapping, struct gfs2_sbd *sdp = GFS2_SB(mapping->host); struct gfs2_inode *m_ip = GFS2_I(sdp->sd_statfs_inode); unsigned int data_blocks = 0, ind_blocks = 0, rblocks; + unsigned requested = 0; int alloc_required; int error = 0; pgoff_t index = pos >> PAGE_CACHE_SHIFT; @@ -641,7 +642,8 @@ static int gfs2_write_begin(struct file *file, struct address_space *mapping, if (error) goto out_unlock; - error = gfs2_inplace_reserve(ip, data_blocks + ind_blocks); + requested = data_blocks + ind_blocks; + error = gfs2_inplace_reserve(ip, requested); if (error) goto out_qunlock; } @@ -654,7 +656,7 @@ static int gfs2_write_begin(struct file *file, struct address_space *mapping, if (&ip->i_inode == sdp->sd_rindex) rblocks += 2 * RES_STATFS; if (alloc_required) - rblocks += gfs2_rg_blocks(ip); + rblocks += gfs2_rg_blocks(ip, requested); error = gfs2_trans_begin(sdp, rblocks, PAGE_CACHE_SIZE/sdp->sd_sb.sb_bsize); @@ -868,8 +870,7 @@ static int gfs2_write_end(struct file *file, struct address_space *mapping, brelse(dibh); failed: gfs2_trans_end(sdp); - if (gfs2_mb_reserved(ip)) - gfs2_inplace_release(ip); + gfs2_inplace_release(ip); if (ip->i_res->rs_qa_qd_num) gfs2_quota_unlock(ip); if (inode == sdp->sd_rindex) { @@ -1023,7 +1024,7 @@ static ssize_t gfs2_direct_IO(int rw, struct kiocb *iocb, offset, nr_segs, gfs2_get_block_direct, NULL, NULL, 0); out: - gfs2_glock_dq_m(1, &gh); + gfs2_glock_dq(&gh); gfs2_holder_uninit(&gh); return rv; } diff --git a/trunk/fs/gfs2/bmap.c b/trunk/fs/gfs2/bmap.c index 49cd7dd4a9fa..1fd3ae237bdd 100644 --- a/trunk/fs/gfs2/bmap.c +++ b/trunk/fs/gfs2/bmap.c @@ -786,7 +786,7 @@ static int do_strip(struct gfs2_inode *ip, struct buffer_head *dibh, goto out_rlist; if (gfs2_rs_active(ip->i_res)) /* needs to be done with the rgrp glock held */ - gfs2_rs_deltree(ip->i_res); + gfs2_rs_deltree(ip, ip->i_res); error = gfs2_trans_begin(sdp, rg_blocks + RES_DINODE + RES_INDIRECT + RES_STATFS + RES_QUOTA, diff --git a/trunk/fs/gfs2/file.c b/trunk/fs/gfs2/file.c index d1d791ef38de..30e21997a1a1 100644 --- a/trunk/fs/gfs2/file.c +++ b/trunk/fs/gfs2/file.c @@ -322,6 +322,29 @@ static long gfs2_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) return -ENOTTY; } +/** + * gfs2_size_hint - Give a hint to the size of a write request + * @file: The struct file + * @offset: The file offset of the write + * @size: The length of the write + * + * When we are about to do a write, this function records the total + * write size in order to provide a suitable hint to the lower layers + * about how many blocks will be required. + * + */ + +static void gfs2_size_hint(struct file *filep, loff_t offset, size_t size) +{ + struct inode *inode = filep->f_dentry->d_inode; + struct gfs2_sbd *sdp = GFS2_SB(inode); + struct gfs2_inode *ip = GFS2_I(inode); + size_t blks = (size + sdp->sd_sb.sb_bsize - 1) >> sdp->sd_sb.sb_bsize_shift; + int hint = min_t(size_t, INT_MAX, blks); + + atomic_set(&ip->i_res->rs_sizehint, hint); +} + /** * gfs2_allocate_page_backing - Use bmap to allocate blocks * @page: The (locked) page to allocate backing for @@ -382,8 +405,7 @@ static int gfs2_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf) if (ret) return ret; - atomic_set(&ip->i_res->rs_sizehint, - PAGE_CACHE_SIZE >> sdp->sd_sb.sb_bsize_shift); + gfs2_size_hint(vma->vm_file, pos, PAGE_CACHE_SIZE); gfs2_holder_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &gh); ret = gfs2_glock_nq(&gh); @@ -419,7 +441,7 @@ static int gfs2_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf) rblocks += data_blocks ? data_blocks : 1; if (ind_blocks || data_blocks) { rblocks += RES_STATFS + RES_QUOTA; - rblocks += gfs2_rg_blocks(ip); + rblocks += gfs2_rg_blocks(ip, data_blocks + ind_blocks); } ret = gfs2_trans_begin(sdp, rblocks, 0); if (ret) @@ -663,7 +685,8 @@ static ssize_t gfs2_file_aio_write(struct kiocb *iocb, const struct iovec *iov, if (ret) return ret; - atomic_set(&ip->i_res->rs_sizehint, writesize >> sdp->sd_sb.sb_bsize_shift); + gfs2_size_hint(file, pos, writesize); + if (file->f_flags & O_APPEND) { struct gfs2_holder gh; @@ -789,7 +812,7 @@ static long gfs2_fallocate(struct file *file, int mode, loff_t offset, if (unlikely(error)) goto out_uninit; - atomic_set(&ip->i_res->rs_sizehint, len >> sdp->sd_sb.sb_bsize_shift); + gfs2_size_hint(file, offset, len); while (len > 0) { if (len < bytes) @@ -822,7 +845,7 @@ static long gfs2_fallocate(struct file *file, int mode, loff_t offset, &max_bytes, &data_blocks, &ind_blocks); rblocks = RES_DINODE + ind_blocks + RES_STATFS + RES_QUOTA + - RES_RG_HDR + gfs2_rg_blocks(ip); + RES_RG_HDR + gfs2_rg_blocks(ip, data_blocks + ind_blocks); if (gfs2_is_jdata(ip)) rblocks += data_blocks ? data_blocks : 1; diff --git a/trunk/fs/gfs2/glock.c b/trunk/fs/gfs2/glock.c index 1ed81f40da0d..e6c2fd53cab2 100644 --- a/trunk/fs/gfs2/glock.c +++ b/trunk/fs/gfs2/glock.c @@ -185,20 +185,6 @@ static void gfs2_glock_remove_from_lru(struct gfs2_glock *gl) spin_unlock(&lru_lock); } -/** - * __gfs2_glock_schedule_for_reclaim - Add a glock to the reclaim list - * @gl: the glock - * - * If the glock is demotable, then we add it (or move it) to the end - * of the glock LRU list. - */ - -static void __gfs2_glock_schedule_for_reclaim(struct gfs2_glock *gl) -{ - if (demote_ok(gl)) - gfs2_glock_add_to_lru(gl); -} - /** * gfs2_glock_put_nolock() - Decrement reference count on glock * @gl: The glock to put @@ -883,7 +869,14 @@ static int gfs2_glock_demote_wait(void *word) return 0; } -static void wait_on_holder(struct gfs2_holder *gh) +/** + * gfs2_glock_wait - wait on a glock acquisition + * @gh: the glock holder + * + * Returns: 0 on success + */ + +int gfs2_glock_wait(struct gfs2_holder *gh) { unsigned long time1 = jiffies; @@ -894,12 +887,7 @@ static void wait_on_holder(struct gfs2_holder *gh) gh->gh_gl->gl_hold_time = min(gh->gh_gl->gl_hold_time + GL_GLOCK_HOLD_INCR, GL_GLOCK_MAX_HOLD); -} - -static void wait_on_demote(struct gfs2_glock *gl) -{ - might_sleep(); - wait_on_bit(&gl->gl_flags, GLF_DEMOTE, gfs2_glock_demote_wait, TASK_UNINTERRUPTIBLE); + return gh->gh_error; } /** @@ -929,19 +917,6 @@ static void handle_callback(struct gfs2_glock *gl, unsigned int state, trace_gfs2_demote_rq(gl); } -/** - * gfs2_glock_wait - wait on a glock acquisition - * @gh: the glock holder - * - * Returns: 0 on success - */ - -int gfs2_glock_wait(struct gfs2_holder *gh) -{ - wait_on_holder(gh); - return gh->gh_error; -} - void gfs2_print_dbg(struct seq_file *seq, const char *fmt, ...) { struct va_format vaf; @@ -979,7 +954,7 @@ __acquires(&gl->gl_spin) struct gfs2_sbd *sdp = gl->gl_sbd; struct list_head *insert_pt = NULL; struct gfs2_holder *gh2; - int try_lock = 0; + int try_futile = 0; BUG_ON(gh->gh_owner_pid == NULL); if (test_and_set_bit(HIF_WAIT, &gh->gh_iflags)) @@ -987,7 +962,7 @@ __acquires(&gl->gl_spin) if (gh->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB)) { if (test_bit(GLF_LOCK, &gl->gl_flags)) - try_lock = 1; + try_futile = !may_grant(gl, gh); if (test_bit(GLF_INVALIDATE_IN_PROGRESS, &gl->gl_flags)) goto fail; } @@ -996,9 +971,8 @@ __acquires(&gl->gl_spin) if (unlikely(gh2->gh_owner_pid == gh->gh_owner_pid && (gh->gh_gl->gl_ops->go_type != LM_TYPE_FLOCK))) goto trap_recursive; - if (try_lock && - !(gh2->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB)) && - !may_grant(gl, gh)) { + if (try_futile && + !(gh2->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB))) { fail: gh->gh_error = GLR_TRYFAILED; gfs2_holder_wake(gh); @@ -1121,8 +1095,9 @@ void gfs2_glock_dq(struct gfs2_holder *gh) !test_bit(GLF_DEMOTE, &gl->gl_flags)) fast_path = 1; } - if (!test_bit(GLF_LFLUSH, &gl->gl_flags)) - __gfs2_glock_schedule_for_reclaim(gl); + if (!test_bit(GLF_LFLUSH, &gl->gl_flags) && demote_ok(gl)) + gfs2_glock_add_to_lru(gl); + trace_gfs2_glock_queue(gh, 0); spin_unlock(&gl->gl_spin); if (likely(fast_path)) @@ -1141,7 +1116,8 @@ void gfs2_glock_dq_wait(struct gfs2_holder *gh) { struct gfs2_glock *gl = gh->gh_gl; gfs2_glock_dq(gh); - wait_on_demote(gl); + might_sleep(); + wait_on_bit(&gl->gl_flags, GLF_DEMOTE, gfs2_glock_demote_wait, TASK_UNINTERRUPTIBLE); } /** diff --git a/trunk/fs/gfs2/glops.c b/trunk/fs/gfs2/glops.c index 4bdcf3784187..32cc4fde975c 100644 --- a/trunk/fs/gfs2/glops.c +++ b/trunk/fs/gfs2/glops.c @@ -94,6 +94,7 @@ static void gfs2_ail_empty_gl(struct gfs2_glock *gl) /* A shortened, inline version of gfs2_trans_begin() */ tr.tr_reserved = 1 + gfs2_struct2blk(sdp, tr.tr_revokes, sizeof(u64)); tr.tr_ip = (unsigned long)__builtin_return_address(0); + sb_start_intwrite(sdp->sd_vfs); gfs2_log_reserve(sdp, tr.tr_reserved); BUG_ON(current->journal_info); current->journal_info = &tr; diff --git a/trunk/fs/gfs2/incore.h b/trunk/fs/gfs2/incore.h index aaecc8085fc5..3d469d37345e 100644 --- a/trunk/fs/gfs2/incore.h +++ b/trunk/fs/gfs2/incore.h @@ -99,9 +99,26 @@ struct gfs2_rgrpd { #define GFS2_RDF_MASK 0xf0000000 /* mask for internal flags */ spinlock_t rd_rsspin; /* protects reservation related vars */ struct rb_root rd_rstree; /* multi-block reservation tree */ - u32 rd_rs_cnt; /* count of current reservations */ }; +struct gfs2_rbm { + struct gfs2_rgrpd *rgd; + struct gfs2_bitmap *bi; /* Bitmap must belong to the rgd */ + u32 offset; /* The offset is bitmap relative */ +}; + +static inline u64 gfs2_rbm_to_block(const struct gfs2_rbm *rbm) +{ + return rbm->rgd->rd_data0 + (rbm->bi->bi_start * GFS2_NBBY) + rbm->offset; +} + +static inline bool gfs2_rbm_eq(const struct gfs2_rbm *rbm1, + const struct gfs2_rbm *rbm2) +{ + return (rbm1->rgd == rbm2->rgd) && (rbm1->bi == rbm2->bi) && + (rbm1->offset == rbm2->offset); +} + enum gfs2_state_bits { BH_Pinned = BH_PrivateStart, BH_Escaped = BH_PrivateStart + 1, @@ -250,18 +267,11 @@ struct gfs2_blkreserv { /* components used during write (step 1): */ atomic_t rs_sizehint; /* hint of the write size */ - /* components used during inplace_reserve (step 2): */ - u32 rs_requested; /* Filled in by caller of gfs2_inplace_reserve() */ - - /* components used during get_local_rgrp (step 3): */ - struct gfs2_rgrpd *rs_rgd; /* pointer to the gfs2_rgrpd */ struct gfs2_holder rs_rgd_gh; /* Filled in by get_local_rgrp */ struct rb_node rs_node; /* link to other block reservations */ - - /* components used during block searches and assignments (step 4): */ - struct gfs2_bitmap *rs_bi; /* bitmap for the current allocation */ - u32 rs_biblk; /* start block relative to the bi */ + struct gfs2_rbm rs_rbm; /* Start of reservation */ u32 rs_free; /* how many blocks are still free */ + u64 rs_inum; /* Inode number for reservation */ /* ancillary quota stuff */ struct gfs2_quota_data *rs_qa_qd[2 * MAXQUOTAS]; diff --git a/trunk/fs/gfs2/inode.c b/trunk/fs/gfs2/inode.c index 4ce22e547308..381893ceefa4 100644 --- a/trunk/fs/gfs2/inode.c +++ b/trunk/fs/gfs2/inode.c @@ -712,14 +712,9 @@ static int gfs2_create_inode(struct inode *dir, struct dentry *dentry, if (error) goto fail_gunlock2; - /* The newly created inode needs a reservation so it can allocate - xattrs. At the same time, we want new blocks allocated to the new - dinode to be as contiguous as possible. Since we allocated the - dinode block under the directory's reservation, we transfer - ownership of that reservation to the new inode. The directory - doesn't need a reservation unless it needs a new allocation. */ - ip->i_res = dip->i_res; - dip->i_res = NULL; + error = gfs2_rs_alloc(ip); + if (error) + goto fail_gunlock2; error = gfs2_acl_create(dip, inode); if (error) @@ -737,10 +732,7 @@ static int gfs2_create_inode(struct inode *dir, struct dentry *dentry, brelse(bh); gfs2_trans_end(sdp); - /* Check if we reserved space in the rgrp. Function link_dinode may - not, depending on whether alloc is required. */ - if (gfs2_mb_reserved(dip)) - gfs2_inplace_release(dip); + gfs2_inplace_release(dip); gfs2_quota_unlock(dip); mark_inode_dirty(inode); gfs2_glock_dq_uninit_m(2, ghs); @@ -897,7 +889,7 @@ static int gfs2_link(struct dentry *old_dentry, struct inode *dir, goto out_gunlock_q; error = gfs2_trans_begin(sdp, sdp->sd_max_dirres + - gfs2_rg_blocks(dip) + + gfs2_rg_blocks(dip, sdp->sd_max_dirres) + 2 * RES_DINODE + RES_STATFS + RES_QUOTA, 0); if (error) @@ -1378,7 +1370,7 @@ static int gfs2_rename(struct inode *odir, struct dentry *odentry, goto out_gunlock_q; error = gfs2_trans_begin(sdp, sdp->sd_max_dirres + - gfs2_rg_blocks(ndip) + + gfs2_rg_blocks(ndip, sdp->sd_max_dirres) + 4 * RES_DINODE + 4 * RES_LEAF + RES_STATFS + RES_QUOTA + 4, 0); if (error) @@ -1722,7 +1714,9 @@ static int gfs2_setxattr(struct dentry *dentry, const char *name, gfs2_holder_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &gh); ret = gfs2_glock_nq(&gh); if (ret == 0) { - ret = generic_setxattr(dentry, name, data, size, flags); + ret = gfs2_rs_alloc(ip); + if (ret == 0) + ret = generic_setxattr(dentry, name, data, size, flags); gfs2_glock_dq(&gh); } gfs2_holder_uninit(&gh); @@ -1757,7 +1751,9 @@ static int gfs2_removexattr(struct dentry *dentry, const char *name) gfs2_holder_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &gh); ret = gfs2_glock_nq(&gh); if (ret == 0) { - ret = generic_removexattr(dentry, name); + ret = gfs2_rs_alloc(ip); + if (ret == 0) + ret = generic_removexattr(dentry, name); gfs2_glock_dq(&gh); } gfs2_holder_uninit(&gh); diff --git a/trunk/fs/gfs2/meta_io.c b/trunk/fs/gfs2/meta_io.c index 3a56c8d94de0..22255d96b27e 100644 --- a/trunk/fs/gfs2/meta_io.c +++ b/trunk/fs/gfs2/meta_io.c @@ -52,7 +52,7 @@ static int gfs2_aspace_writepage(struct page *page, struct writeback_control *wb /* * If it's a fully non-blocking write attempt and we cannot * lock the buffer then redirty the page. Note that this can - * potentially cause a busy-wait loop from pdflush and kswapd + * potentially cause a busy-wait loop from flusher thread and kswapd * activity, but those code paths have their own higher-level * throttling. */ diff --git a/trunk/fs/gfs2/ops_fstype.c b/trunk/fs/gfs2/ops_fstype.c index e5af9dc420ef..e443966c8106 100644 --- a/trunk/fs/gfs2/ops_fstype.c +++ b/trunk/fs/gfs2/ops_fstype.c @@ -19,6 +19,7 @@ #include #include #include +#include #include "gfs2.h" #include "incore.h" @@ -766,6 +767,7 @@ static int init_journal(struct gfs2_sbd *sdp, int undo) return error; } +static struct lock_class_key gfs2_quota_imutex_key; static int init_inodes(struct gfs2_sbd *sdp, int undo) { @@ -803,6 +805,12 @@ static int init_inodes(struct gfs2_sbd *sdp, int undo) fs_err(sdp, "can't get quota file inode: %d\n", error); goto fail_rindex; } + /* + * i_mutex on quota files is special. Since this inode is hidden system + * file, we are safe to define locking ourselves. + */ + lockdep_set_class(&sdp->sd_quota_inode->i_mutex, + &gfs2_quota_imutex_key); error = gfs2_rindex_update(sdp); if (error) diff --git a/trunk/fs/gfs2/quota.c b/trunk/fs/gfs2/quota.c index a3bde91645c2..4021deca61ef 100644 --- a/trunk/fs/gfs2/quota.c +++ b/trunk/fs/gfs2/quota.c @@ -765,6 +765,7 @@ static int do_sync(unsigned int num_qd, struct gfs2_quota_data **qda) struct gfs2_holder *ghs, i_gh; unsigned int qx, x; struct gfs2_quota_data *qd; + unsigned reserved; loff_t offset; unsigned int nalloc = 0, blocks; int error; @@ -781,7 +782,7 @@ static int do_sync(unsigned int num_qd, struct gfs2_quota_data **qda) return -ENOMEM; sort(qda, num_qd, sizeof(struct gfs2_quota_data *), sort_qd, NULL); - mutex_lock_nested(&ip->i_inode.i_mutex, I_MUTEX_QUOTA); + mutex_lock(&ip->i_inode.i_mutex); for (qx = 0; qx < num_qd; qx++) { error = gfs2_glock_nq_init(qda[qx]->qd_gl, LM_ST_EXCLUSIVE, GL_NOCACHE, &ghs[qx]); @@ -811,13 +812,13 @@ static int do_sync(unsigned int num_qd, struct gfs2_quota_data **qda) * two blocks need to be updated instead of 1 */ blocks = num_qd * data_blocks + RES_DINODE + num_qd + 3; - error = gfs2_inplace_reserve(ip, 1 + - (nalloc * (data_blocks + ind_blocks))); + reserved = 1 + (nalloc * (data_blocks + ind_blocks)); + error = gfs2_inplace_reserve(ip, reserved); if (error) goto out_alloc; if (nalloc) - blocks += gfs2_rg_blocks(ip) + nalloc * ind_blocks + RES_STATFS; + blocks += gfs2_rg_blocks(ip, reserved) + nalloc * ind_blocks + RES_STATFS; error = gfs2_trans_begin(sdp, blocks, 0); if (error) @@ -1598,7 +1599,7 @@ static int gfs2_set_dqblk(struct super_block *sb, int type, qid_t id, error = gfs2_inplace_reserve(ip, blocks); if (error) goto out_i; - blocks += gfs2_rg_blocks(ip); + blocks += gfs2_rg_blocks(ip, blocks); } /* Some quotas span block boundaries and can update two blocks, diff --git a/trunk/fs/gfs2/rgrp.c b/trunk/fs/gfs2/rgrp.c index 4d34887a601d..3cc402ce6fea 100644 --- a/trunk/fs/gfs2/rgrp.c +++ b/trunk/fs/gfs2/rgrp.c @@ -35,9 +35,6 @@ #define BFITNOENT ((u32)~0) #define NO_BLOCK ((u64)~0) -#define RSRV_CONTENTION_FACTOR 4 -#define RGRP_RSRV_MAX_CONTENDERS 2 - #if BITS_PER_LONG == 32 #define LBITMASK (0x55555555UL) #define LBITSKIP55 (0x55555555UL) @@ -67,53 +64,48 @@ static const char valid_change[16] = { 1, 0, 0, 0 }; -static u32 rgblk_search(struct gfs2_rgrpd *rgd, u32 goal, - unsigned char old_state, - struct gfs2_bitmap **rbi); +static int gfs2_rbm_find(struct gfs2_rbm *rbm, u8 state, u32 minext, + const struct gfs2_inode *ip, bool nowrap); + /** * gfs2_setbit - Set a bit in the bitmaps - * @rgd: the resource group descriptor - * @buf2: the clone buffer that holds the bitmaps - * @bi: the bitmap structure - * @block: the block to set + * @rbm: The position of the bit to set + * @do_clone: Also set the clone bitmap, if it exists * @new_state: the new state of the block * */ -static inline void gfs2_setbit(struct gfs2_rgrpd *rgd, unsigned char *buf2, - struct gfs2_bitmap *bi, u32 block, +static inline void gfs2_setbit(const struct gfs2_rbm *rbm, bool do_clone, unsigned char new_state) { unsigned char *byte1, *byte2, *end, cur_state; - unsigned int buflen = bi->bi_len; - const unsigned int bit = (block % GFS2_NBBY) * GFS2_BIT_SIZE; + unsigned int buflen = rbm->bi->bi_len; + const unsigned int bit = (rbm->offset % GFS2_NBBY) * GFS2_BIT_SIZE; - byte1 = bi->bi_bh->b_data + bi->bi_offset + (block / GFS2_NBBY); - end = bi->bi_bh->b_data + bi->bi_offset + buflen; + byte1 = rbm->bi->bi_bh->b_data + rbm->bi->bi_offset + (rbm->offset / GFS2_NBBY); + end = rbm->bi->bi_bh->b_data + rbm->bi->bi_offset + buflen; BUG_ON(byte1 >= end); cur_state = (*byte1 >> bit) & GFS2_BIT_MASK; if (unlikely(!valid_change[new_state * 4 + cur_state])) { - printk(KERN_WARNING "GFS2: buf_blk = 0x%llx old_state=%d, " - "new_state=%d\n", - (unsigned long long)block, cur_state, new_state); - printk(KERN_WARNING "GFS2: rgrp=0x%llx bi_start=0x%lx\n", - (unsigned long long)rgd->rd_addr, - (unsigned long)bi->bi_start); - printk(KERN_WARNING "GFS2: bi_offset=0x%lx bi_len=0x%lx\n", - (unsigned long)bi->bi_offset, - (unsigned long)bi->bi_len); + printk(KERN_WARNING "GFS2: buf_blk = 0x%x old_state=%d, " + "new_state=%d\n", rbm->offset, cur_state, new_state); + printk(KERN_WARNING "GFS2: rgrp=0x%llx bi_start=0x%x\n", + (unsigned long long)rbm->rgd->rd_addr, + rbm->bi->bi_start); + printk(KERN_WARNING "GFS2: bi_offset=0x%x bi_len=0x%x\n", + rbm->bi->bi_offset, rbm->bi->bi_len); dump_stack(); - gfs2_consist_rgrpd(rgd); + gfs2_consist_rgrpd(rbm->rgd); return; } *byte1 ^= (cur_state ^ new_state) << bit; - if (buf2) { - byte2 = buf2 + bi->bi_offset + (block / GFS2_NBBY); + if (do_clone && rbm->bi->bi_clone) { + byte2 = rbm->bi->bi_clone + rbm->bi->bi_offset + (rbm->offset / GFS2_NBBY); cur_state = (*byte2 >> bit) & GFS2_BIT_MASK; *byte2 ^= (cur_state ^ new_state) << bit; } @@ -121,30 +113,21 @@ static inline void gfs2_setbit(struct gfs2_rgrpd *rgd, unsigned char *buf2, /** * gfs2_testbit - test a bit in the bitmaps - * @rgd: the resource group descriptor - * @buffer: the buffer that holds the bitmaps - * @buflen: the length (in bytes) of the buffer - * @block: the block to read + * @rbm: The bit to test * + * Returns: The two bit block state of the requested bit */ -static inline unsigned char gfs2_testbit(struct gfs2_rgrpd *rgd, - const unsigned char *buffer, - unsigned int buflen, u32 block) +static inline u8 gfs2_testbit(const struct gfs2_rbm *rbm) { - const unsigned char *byte, *end; - unsigned char cur_state; + const u8 *buffer = rbm->bi->bi_bh->b_data + rbm->bi->bi_offset; + const u8 *byte; unsigned int bit; - byte = buffer + (block / GFS2_NBBY); - bit = (block % GFS2_NBBY) * GFS2_BIT_SIZE; - end = buffer + buflen; - - gfs2_assert(rgd->rd_sbd, byte < end); + byte = buffer + (rbm->offset / GFS2_NBBY); + bit = (rbm->offset % GFS2_NBBY) * GFS2_BIT_SIZE; - cur_state = (*byte >> bit) & GFS2_BIT_MASK; - - return cur_state; + return (*byte >> bit) & GFS2_BIT_MASK; } /** @@ -192,7 +175,7 @@ static inline u64 gfs2_bit_search(const __le64 *ptr, u64 mask, u8 state) */ static inline int rs_cmp(u64 blk, u32 len, struct gfs2_blkreserv *rs) { - u64 startblk = gfs2_rs_startblk(rs); + u64 startblk = gfs2_rbm_to_block(&rs->rs_rbm); if (blk >= startblk + rs->rs_free) return 1; @@ -201,36 +184,6 @@ static inline int rs_cmp(u64 blk, u32 len, struct gfs2_blkreserv *rs) return 0; } -/** - * rs_find - Find a rgrp multi-block reservation that contains a given block - * @rgd: The rgrp - * @rgblk: The block we're looking for, relative to the rgrp - */ -static struct gfs2_blkreserv *rs_find(struct gfs2_rgrpd *rgd, u32 rgblk) -{ - struct rb_node **newn; - int rc; - u64 fsblk = rgblk + rgd->rd_data0; - - spin_lock(&rgd->rd_rsspin); - newn = &rgd->rd_rstree.rb_node; - while (*newn) { - struct gfs2_blkreserv *cur = - rb_entry(*newn, struct gfs2_blkreserv, rs_node); - rc = rs_cmp(fsblk, 1, cur); - if (rc < 0) - newn = &((*newn)->rb_left); - else if (rc > 0) - newn = &((*newn)->rb_right); - else { - spin_unlock(&rgd->rd_rsspin); - return cur; - } - } - spin_unlock(&rgd->rd_rsspin); - return NULL; -} - /** * gfs2_bitfit - Search an rgrp's bitmap buffer to find a bit-pair representing * a block in a given allocation state. @@ -262,8 +215,6 @@ static u32 gfs2_bitfit(const u8 *buf, const unsigned int len, u64 mask = 0x5555555555555555ULL; u32 bit; - BUG_ON(state > 3); - /* Mask off bits we don't care about at the start of the search */ mask <<= spoint; tmp = gfs2_bit_search(ptr, mask, state); @@ -284,6 +235,131 @@ static u32 gfs2_bitfit(const u8 *buf, const unsigned int len, return (((const unsigned char *)ptr - buf) * GFS2_NBBY) + bit; } +/** + * gfs2_rbm_from_block - Set the rbm based upon rgd and block number + * @rbm: The rbm with rgd already set correctly + * @block: The block number (filesystem relative) + * + * This sets the bi and offset members of an rbm based on a + * resource group and a filesystem relative block number. The + * resource group must be set in the rbm on entry, the bi and + * offset members will be set by this function. + * + * Returns: 0 on success, or an error code + */ + +static int gfs2_rbm_from_block(struct gfs2_rbm *rbm, u64 block) +{ + u64 rblock = block - rbm->rgd->rd_data0; + u32 goal = (u32)rblock; + int x; + + if (WARN_ON_ONCE(rblock > UINT_MAX)) + return -EINVAL; + if (block >= rbm->rgd->rd_data0 + rbm->rgd->rd_data) + return -E2BIG; + + for (x = 0; x < rbm->rgd->rd_length; x++) { + rbm->bi = rbm->rgd->rd_bits + x; + if (goal < (rbm->bi->bi_start + rbm->bi->bi_len) * GFS2_NBBY) { + rbm->offset = goal - (rbm->bi->bi_start * GFS2_NBBY); + break; + } + } + + return 0; +} + +/** + * gfs2_unaligned_extlen - Look for free blocks which are not byte aligned + * @rbm: Position to search (value/result) + * @n_unaligned: Number of unaligned blocks to check + * @len: Decremented for each block found (terminate on zero) + * + * Returns: true if a non-free block is encountered + */ + +static bool gfs2_unaligned_extlen(struct gfs2_rbm *rbm, u32 n_unaligned, u32 *len) +{ + u64 block; + u32 n; + u8 res; + + for (n = 0; n < n_unaligned; n++) { + res = gfs2_testbit(rbm); + if (res != GFS2_BLKST_FREE) + return true; + (*len)--; + if (*len == 0) + return true; + block = gfs2_rbm_to_block(rbm); + if (gfs2_rbm_from_block(rbm, block + 1)) + return true; + } + + return false; +} + +/** + * gfs2_free_extlen - Return extent length of free blocks + * @rbm: Starting position + * @len: Max length to check + * + * Starting at the block specified by the rbm, see how many free blocks + * there are, not reading more than len blocks ahead. This can be done + * using memchr_inv when the blocks are byte aligned, but has to be done + * on a block by block basis in case of unaligned blocks. Also this + * function can cope with bitmap boundaries (although it must stop on + * a resource group boundary) + * + * Returns: Number of free blocks in the extent + */ + +static u32 gfs2_free_extlen(const struct gfs2_rbm *rrbm, u32 len) +{ + struct gfs2_rbm rbm = *rrbm; + u32 n_unaligned = rbm.offset & 3; + u32 size = len; + u32 bytes; + u32 chunk_size; + u8 *ptr, *start, *end; + u64 block; + + if (n_unaligned && + gfs2_unaligned_extlen(&rbm, 4 - n_unaligned, &len)) + goto out; + + n_unaligned = len & 3; + /* Start is now byte aligned */ + while (len > 3) { + start = rbm.bi->bi_bh->b_data; + if (rbm.bi->bi_clone) + start = rbm.bi->bi_clone; + end = start + rbm.bi->bi_bh->b_size; + start += rbm.bi->bi_offset; + BUG_ON(rbm.offset & 3); + start += (rbm.offset / GFS2_NBBY); + bytes = min_t(u32, len / GFS2_NBBY, (end - start)); + ptr = memchr_inv(start, 0, bytes); + chunk_size = ((ptr == NULL) ? bytes : (ptr - start)); + chunk_size *= GFS2_NBBY; + BUG_ON(len < chunk_size); + len -= chunk_size; + block = gfs2_rbm_to_block(&rbm); + gfs2_rbm_from_block(&rbm, block + chunk_size); + n_unaligned = 3; + if (ptr) + break; + n_unaligned = len & 3; + } + + /* Deal with any bits left over at the end */ + if (n_unaligned) + gfs2_unaligned_extlen(&rbm, n_unaligned, &len); +out: + return size - len; +} + /** * gfs2_bitcount - count the number of bits in a certain state * @rgd: the resource group descriptor @@ -487,6 +563,8 @@ int gfs2_rs_alloc(struct gfs2_inode *ip) if (!res) error = -ENOMEM; + RB_CLEAR_NODE(&res->rs_node); + down_write(&ip->i_rw_mutex); if (ip->i_res) kmem_cache_free(gfs2_rsrv_cachep, res); @@ -496,11 +574,12 @@ int gfs2_rs_alloc(struct gfs2_inode *ip) return error; } -static void dump_rs(struct seq_file *seq, struct gfs2_blkreserv *rs) +static void dump_rs(struct seq_file *seq, const struct gfs2_blkreserv *rs) { - gfs2_print_dbg(seq, " r: %llu s:%llu b:%u f:%u\n", - rs->rs_rgd->rd_addr, gfs2_rs_startblk(rs), rs->rs_biblk, - rs->rs_free); + gfs2_print_dbg(seq, " B: n:%llu s:%llu b:%u f:%u\n", + (unsigned long long)rs->rs_inum, + (unsigned long long)gfs2_rbm_to_block(&rs->rs_rbm), + rs->rs_rbm.offset, rs->rs_free); } /** @@ -508,41 +587,26 @@ static void dump_rs(struct seq_file *seq, struct gfs2_blkreserv *rs) * @rs: The reservation to remove * */ -static void __rs_deltree(struct gfs2_blkreserv *rs) +static void __rs_deltree(struct gfs2_inode *ip, struct gfs2_blkreserv *rs) { struct gfs2_rgrpd *rgd; if (!gfs2_rs_active(rs)) return; - rgd = rs->rs_rgd; - /* We can't do this: The reason is that when the rgrp is invalidated, - it's in the "middle" of acquiring the glock, but the HOLDER bit - isn't set yet: - BUG_ON(!gfs2_glock_is_locked_by_me(rs->rs_rgd->rd_gl));*/ - trace_gfs2_rs(NULL, rs, TRACE_RS_TREEDEL); - - if (!RB_EMPTY_ROOT(&rgd->rd_rstree)) - rb_erase(&rs->rs_node, &rgd->rd_rstree); - BUG_ON(!rgd->rd_rs_cnt); - rgd->rd_rs_cnt--; + rgd = rs->rs_rbm.rgd; + trace_gfs2_rs(rs, TRACE_RS_TREEDEL); + rb_erase(&rs->rs_node, &rgd->rd_rstree); + RB_CLEAR_NODE(&rs->rs_node); if (rs->rs_free) { /* return reserved blocks to the rgrp and the ip */ - BUG_ON(rs->rs_rgd->rd_reserved < rs->rs_free); - rs->rs_rgd->rd_reserved -= rs->rs_free; + BUG_ON(rs->rs_rbm.rgd->rd_reserved < rs->rs_free); + rs->rs_rbm.rgd->rd_reserved -= rs->rs_free; rs->rs_free = 0; - clear_bit(GBF_FULL, &rs->rs_bi->bi_flags); + clear_bit(GBF_FULL, &rs->rs_rbm.bi->bi_flags); smp_mb__after_clear_bit(); } - /* We can't change any of the step 1 or step 2 components of the rs. - E.g. We can't set rs_rgd to NULL because the rgd glock is held and - dequeued through this pointer. - Can't: atomic_set(&rs->rs_sizehint, 0); - Can't: rs->rs_requested = 0; - Can't: rs->rs_rgd = NULL;*/ - rs->rs_bi = NULL; - rs->rs_biblk = 0; } /** @@ -550,17 +614,16 @@ static void __rs_deltree(struct gfs2_blkreserv *rs) * @rs: The reservation to remove * */ -void gfs2_rs_deltree(struct gfs2_blkreserv *rs) +void gfs2_rs_deltree(struct gfs2_inode *ip, struct gfs2_blkreserv *rs) { struct gfs2_rgrpd *rgd; - if (!gfs2_rs_active(rs)) - return; - - rgd = rs->rs_rgd; - spin_lock(&rgd->rd_rsspin); - __rs_deltree(rs); - spin_unlock(&rgd->rd_rsspin); + rgd = rs->rs_rbm.rgd; + if (rgd) { + spin_lock(&rgd->rd_rsspin); + __rs_deltree(ip, rs); + spin_unlock(&rgd->rd_rsspin); + } } /** @@ -572,8 +635,7 @@ void gfs2_rs_delete(struct gfs2_inode *ip) { down_write(&ip->i_rw_mutex); if (ip->i_res) { - gfs2_rs_deltree(ip->i_res); - trace_gfs2_rs(ip, ip->i_res, TRACE_RS_DELETE); + gfs2_rs_deltree(ip, ip->i_res); BUG_ON(ip->i_res->rs_free); kmem_cache_free(gfs2_rsrv_cachep, ip->i_res); ip->i_res = NULL; @@ -597,7 +659,7 @@ static void return_all_reservations(struct gfs2_rgrpd *rgd) spin_lock(&rgd->rd_rsspin); while ((n = rb_first(&rgd->rd_rstree))) { rs = rb_entry(n, struct gfs2_blkreserv, rs_node); - __rs_deltree(rs); + __rs_deltree(NULL, rs); } spin_unlock(&rgd->rd_rsspin); } @@ -1270,211 +1332,276 @@ int gfs2_fitrim(struct file *filp, void __user *argp) /** * rs_insert - insert a new multi-block reservation into the rgrp's rb_tree - * @bi: the bitmap with the blocks * @ip: the inode structure - * @biblk: the 32-bit block number relative to the start of the bitmap - * @amount: the number of blocks to reserve * - * Returns: NULL - reservation was already taken, so not inserted - * pointer to the inserted reservation */ -static struct gfs2_blkreserv *rs_insert(struct gfs2_bitmap *bi, - struct gfs2_inode *ip, u32 biblk, - int amount) +static void rs_insert(struct gfs2_inode *ip) { struct rb_node **newn, *parent = NULL; int rc; struct gfs2_blkreserv *rs = ip->i_res; - struct gfs2_rgrpd *rgd = rs->rs_rgd; - u64 fsblock = gfs2_bi2rgd_blk(bi, biblk) + rgd->rd_data0; + struct gfs2_rgrpd *rgd = rs->rs_rbm.rgd; + u64 fsblock = gfs2_rbm_to_block(&rs->rs_rbm); + + BUG_ON(gfs2_rs_active(rs)); spin_lock(&rgd->rd_rsspin); newn = &rgd->rd_rstree.rb_node; - BUG_ON(!ip->i_res); - BUG_ON(gfs2_rs_active(rs)); - /* Figure out where to put new node */ - /*BUG_ON(!gfs2_glock_is_locked_by_me(rgd->rd_gl));*/ while (*newn) { struct gfs2_blkreserv *cur = rb_entry(*newn, struct gfs2_blkreserv, rs_node); parent = *newn; - rc = rs_cmp(fsblock, amount, cur); + rc = rs_cmp(fsblock, rs->rs_free, cur); if (rc > 0) newn = &((*newn)->rb_right); else if (rc < 0) newn = &((*newn)->rb_left); else { spin_unlock(&rgd->rd_rsspin); - return NULL; /* reservation already in use */ + WARN_ON(1); + return; } } - /* Do our reservation work */ - rs = ip->i_res; - rs->rs_free = amount; - rs->rs_biblk = biblk; - rs->rs_bi = bi; rb_link_node(&rs->rs_node, parent, newn); rb_insert_color(&rs->rs_node, &rgd->rd_rstree); - /* Do our inode accounting for the reservation */ - /*BUG_ON(!gfs2_glock_is_locked_by_me(ip->i_gl));*/ - /* Do our rgrp accounting for the reservation */ - rgd->rd_reserved += amount; /* blocks reserved */ - rgd->rd_rs_cnt++; /* number of in-tree reservations */ + rgd->rd_reserved += rs->rs_free; /* blocks reserved */ spin_unlock(&rgd->rd_rsspin); - trace_gfs2_rs(ip, rs, TRACE_RS_INSERT); - return rs; + trace_gfs2_rs(rs, TRACE_RS_INSERT); } /** - * unclaimed_blocks - return number of blocks that aren't spoken for - */ -static u32 unclaimed_blocks(struct gfs2_rgrpd *rgd) -{ - return rgd->rd_free_clone - rgd->rd_reserved; -} - -/** - * rg_mblk_search - find a group of multiple free blocks + * rg_mblk_search - find a group of multiple free blocks to form a reservation * @rgd: the resource group descriptor - * @rs: the block reservation * @ip: pointer to the inode for which we're reserving blocks + * @requested: number of blocks required for this allocation * - * This is very similar to rgblk_search, except we're looking for whole - * 64-bit words that represent a chunk of 32 free blocks. I'm only focusing - * on aligned dwords for speed's sake. - * - * Returns: 0 if successful or BFITNOENT if there isn't enough free space */ -static int rg_mblk_search(struct gfs2_rgrpd *rgd, struct gfs2_inode *ip) +static void rg_mblk_search(struct gfs2_rgrpd *rgd, struct gfs2_inode *ip, + unsigned requested) { - struct gfs2_bitmap *bi = rgd->rd_bits; - const u32 length = rgd->rd_length; - u32 blk; - unsigned int buf, x, search_bytes; - u8 *buffer = NULL; - u8 *ptr, *end, *nonzero; - u32 goal, rsv_bytes; - struct gfs2_blkreserv *rs; - u32 best_rs_bytes, unclaimed; - int best_rs_blocks; + struct gfs2_rbm rbm = { .rgd = rgd, }; + u64 goal; + struct gfs2_blkreserv *rs = ip->i_res; + u32 extlen; + u32 free_blocks = rgd->rd_free_clone - rgd->rd_reserved; + int ret; + + extlen = max_t(u32, atomic_read(&rs->rs_sizehint), requested); + extlen = clamp(extlen, RGRP_RSRV_MINBLKS, free_blocks); + if ((rgd->rd_free_clone < rgd->rd_reserved) || (free_blocks < extlen)) + return; /* Find bitmap block that contains bits for goal block */ if (rgrp_contains_block(rgd, ip->i_goal)) - goal = ip->i_goal - rgd->rd_data0; + goal = ip->i_goal; else - goal = rgd->rd_last_alloc; - for (buf = 0; buf < length; buf++) { - bi = rgd->rd_bits + buf; - /* Convert scope of "goal" from rgrp-wide to within - found bit block */ - if (goal < (bi->bi_start + bi->bi_len) * GFS2_NBBY) { - goal -= bi->bi_start * GFS2_NBBY; - goto do_search; - } + goal = rgd->rd_last_alloc + rgd->rd_data0; + + if (WARN_ON(gfs2_rbm_from_block(&rbm, goal))) + return; + + ret = gfs2_rbm_find(&rbm, GFS2_BLKST_FREE, extlen, ip, true); + if (ret == 0) { + rs->rs_rbm = rbm; + rs->rs_free = extlen; + rs->rs_inum = ip->i_no_addr; + rs_insert(ip); } - buf = 0; - goal = 0; - -do_search: - best_rs_blocks = max_t(int, atomic_read(&ip->i_res->rs_sizehint), - (RGRP_RSRV_MINBLKS * rgd->rd_length)); - best_rs_bytes = (best_rs_blocks * - (1 + (RSRV_CONTENTION_FACTOR * rgd->rd_rs_cnt))) / - GFS2_NBBY; /* 1 + is for our not-yet-created reservation */ - best_rs_bytes = ALIGN(best_rs_bytes, sizeof(u64)); - unclaimed = unclaimed_blocks(rgd); - if (best_rs_bytes * GFS2_NBBY > unclaimed) - best_rs_bytes = unclaimed >> GFS2_BIT_SIZE; - - for (x = 0; x <= length; x++) { - bi = rgd->rd_bits + buf; +} - if (test_bit(GBF_FULL, &bi->bi_flags)) - goto skip; +/** + * gfs2_next_unreserved_block - Return next block that is not reserved + * @rgd: The resource group + * @block: The starting block + * @length: The required length + * @ip: Ignore any reservations for this inode + * + * If the block does not appear in any reservation, then return the + * block number unchanged. If it does appear in the reservation, then + * keep looking through the tree of reservations in order to find the + * first block number which is not reserved. + */ - WARN_ON(!buffer_uptodate(bi->bi_bh)); - if (bi->bi_clone) - buffer = bi->bi_clone + bi->bi_offset; +static u64 gfs2_next_unreserved_block(struct gfs2_rgrpd *rgd, u64 block, + u32 length, + const struct gfs2_inode *ip) +{ + struct gfs2_blkreserv *rs; + struct rb_node *n; + int rc; + + spin_lock(&rgd->rd_rsspin); + n = rgd->rd_rstree.rb_node; + while (n) { + rs = rb_entry(n, struct gfs2_blkreserv, rs_node); + rc = rs_cmp(block, length, rs); + if (rc < 0) + n = n->rb_left; + else if (rc > 0) + n = n->rb_right; else - buffer = bi->bi_bh->b_data + bi->bi_offset; - - /* We have to keep the reservations aligned on u64 boundaries - otherwise we could get situations where a byte can't be - used because it's after a reservation, but a free bit still - is within the reservation's area. */ - ptr = buffer + ALIGN(goal >> GFS2_BIT_SIZE, sizeof(u64)); - end = (buffer + bi->bi_len); - while (ptr < end) { - rsv_bytes = 0; - if ((ptr + best_rs_bytes) <= end) - search_bytes = best_rs_bytes; - else - search_bytes = end - ptr; - BUG_ON(!search_bytes); - nonzero = memchr_inv(ptr, 0, search_bytes); - /* If the lot is all zeroes, reserve the whole size. If - there's enough zeroes to satisfy the request, use - what we can. If there's not enough, keep looking. */ - if (nonzero == NULL) - rsv_bytes = search_bytes; - else if ((nonzero - ptr) * GFS2_NBBY >= - ip->i_res->rs_requested) - rsv_bytes = (nonzero - ptr); - - if (rsv_bytes) { - blk = ((ptr - buffer) * GFS2_NBBY); - BUG_ON(blk >= bi->bi_len * GFS2_NBBY); - rs = rs_insert(bi, ip, blk, - rsv_bytes * GFS2_NBBY); - if (IS_ERR(rs)) - return PTR_ERR(rs); - if (rs) - return 0; - } - ptr += ALIGN(search_bytes, sizeof(u64)); + break; + } + + if (n) { + while ((rs_cmp(block, length, rs) == 0) && (ip->i_res != rs)) { + block = gfs2_rbm_to_block(&rs->rs_rbm) + rs->rs_free; + n = n->rb_right; + if (n == NULL) + break; + rs = rb_entry(n, struct gfs2_blkreserv, rs_node); } -skip: - /* Try next bitmap block (wrap back to rgrp header - if at end) */ - buf++; - buf %= length; - goal = 0; } - return BFITNOENT; + spin_unlock(&rgd->rd_rsspin); + return block; } /** - * try_rgrp_fit - See if a given reservation will fit in a given RG - * @rgd: the RG data - * @ip: the inode + * gfs2_reservation_check_and_update - Check for reservations during block alloc + * @rbm: The current position in the resource group + * @ip: The inode for which we are searching for blocks + * @minext: The minimum extent length * - * If there's room for the requested blocks to be allocated from the RG: - * This will try to get a multi-block reservation first, and if that doesn't - * fit, it will take what it can. + * This checks the current position in the rgrp to see whether there is + * a reservation covering this block. If not then this function is a + * no-op. If there is, then the position is moved to the end of the + * contiguous reservation(s) so that we are pointing at the first + * non-reserved block. * - * Returns: 1 on success (it fits), 0 on failure (it doesn't fit) + * Returns: 0 if no reservation, 1 if @rbm has changed, otherwise an error */ -static int try_rgrp_fit(struct gfs2_rgrpd *rgd, struct gfs2_inode *ip) +static int gfs2_reservation_check_and_update(struct gfs2_rbm *rbm, + const struct gfs2_inode *ip, + u32 minext) { - struct gfs2_blkreserv *rs = ip->i_res; + u64 block = gfs2_rbm_to_block(rbm); + u32 extlen = 1; + u64 nblock; + int ret; - if (rgd->rd_flags & (GFS2_RGF_NOALLOC | GFS2_RDF_ERROR)) + /* + * If we have a minimum extent length, then skip over any extent + * which is less than the min extent length in size. + */ + if (minext) { + extlen = gfs2_free_extlen(rbm, minext); + nblock = block + extlen; + if (extlen < minext) + goto fail; + } + + /* + * Check the extent which has been found against the reservations + * and skip if parts of it are already reserved + */ + nblock = gfs2_next_unreserved_block(rbm->rgd, block, extlen, ip); + if (nblock == block) return 0; - /* Look for a multi-block reservation. */ - if (unclaimed_blocks(rgd) >= RGRP_RSRV_MINBLKS && - rg_mblk_search(rgd, ip) != BFITNOENT) - return 1; - if (unclaimed_blocks(rgd) >= rs->rs_requested) - return 1; +fail: + ret = gfs2_rbm_from_block(rbm, nblock); + if (ret < 0) + return ret; + return 1; +} - return 0; +/** + * gfs2_rbm_find - Look for blocks of a particular state + * @rbm: Value/result starting position and final position + * @state: The state which we want to find + * @minext: The requested extent length (0 for a single block) + * @ip: If set, check for reservations + * @nowrap: Stop looking at the end of the rgrp, rather than wrapping + * around until we've reached the starting point. + * + * Side effects: + * - If looking for free blocks, we set GBF_FULL on each bitmap which + * has no free blocks in it. + * + * Returns: 0 on success, -ENOSPC if there is no block of the requested state + */ + +static int gfs2_rbm_find(struct gfs2_rbm *rbm, u8 state, u32 minext, + const struct gfs2_inode *ip, bool nowrap) +{ + struct buffer_head *bh; + struct gfs2_bitmap *initial_bi; + u32 initial_offset; + u32 offset; + u8 *buffer; + int index; + int n = 0; + int iters = rbm->rgd->rd_length; + int ret; + + /* If we are not starting at the beginning of a bitmap, then we + * need to add one to the bitmap count to ensure that we search + * the starting bitmap twice. + */ + if (rbm->offset != 0) + iters++; + + while(1) { + if (test_bit(GBF_FULL, &rbm->bi->bi_flags) && + (state == GFS2_BLKST_FREE)) + goto next_bitmap; + + bh = rbm->bi->bi_bh; + buffer = bh->b_data + rbm->bi->bi_offset; + WARN_ON(!buffer_uptodate(bh)); + if (state != GFS2_BLKST_UNLINKED && rbm->bi->bi_clone) + buffer = rbm->bi->bi_clone + rbm->bi->bi_offset; + initial_offset = rbm->offset; + offset = gfs2_bitfit(buffer, rbm->bi->bi_len, rbm->offset, state); + if (offset == BFITNOENT) + goto bitmap_full; + rbm->offset = offset; + if (ip == NULL) + return 0; + + initial_bi = rbm->bi; + ret = gfs2_reservation_check_and_update(rbm, ip, minext); + if (ret == 0) + return 0; + if (ret > 0) { + n += (rbm->bi - initial_bi); + goto next_iter; + } + if (ret == -E2BIG) { + index = 0; + rbm->offset = 0; + n += (rbm->bi - initial_bi); + goto res_covered_end_of_rgrp; + } + return ret; + +bitmap_full: /* Mark bitmap as full and fall through */ + if ((state == GFS2_BLKST_FREE) && initial_offset == 0) + set_bit(GBF_FULL, &rbm->bi->bi_flags); + +next_bitmap: /* Find next bitmap in the rgrp */ + rbm->offset = 0; + index = rbm->bi - rbm->rgd->rd_bits; + index++; + if (index == rbm->rgd->rd_length) + index = 0; +res_covered_end_of_rgrp: + rbm->bi = &rbm->rgd->rd_bits[index]; + if ((index == 0) && nowrap) + break; + n++; +next_iter: + if (n >= iters) + break; + } + + return -ENOSPC; } /** @@ -1489,34 +1616,33 @@ static int try_rgrp_fit(struct gfs2_rgrpd *rgd, struct gfs2_inode *ip) static void try_rgrp_unlink(struct gfs2_rgrpd *rgd, u64 *last_unlinked, u64 skip) { - u32 goal = 0, block; - u64 no_addr; + u64 block; struct gfs2_sbd *sdp = rgd->rd_sbd; struct gfs2_glock *gl; struct gfs2_inode *ip; int error; int found = 0; - struct gfs2_bitmap *bi; + struct gfs2_rbm rbm = { .rgd = rgd, .bi = rgd->rd_bits, .offset = 0 }; - while (goal < rgd->rd_data) { + while (1) { down_write(&sdp->sd_log_flush_lock); - block = rgblk_search(rgd, goal, GFS2_BLKST_UNLINKED, &bi); + error = gfs2_rbm_find(&rbm, GFS2_BLKST_UNLINKED, 0, NULL, true); up_write(&sdp->sd_log_flush_lock); - if (block == BFITNOENT) + if (error == -ENOSPC) + break; + if (WARN_ON_ONCE(error)) break; - block = gfs2_bi2rgd_blk(bi, block); - /* rgblk_search can return a block < goal, so we need to - keep it marching forward. */ - no_addr = block + rgd->rd_data0; - goal = max(block + 1, goal + 1); - if (*last_unlinked != NO_BLOCK && no_addr <= *last_unlinked) + block = gfs2_rbm_to_block(&rbm); + if (gfs2_rbm_from_block(&rbm, block + 1)) + break; + if (*last_unlinked != NO_BLOCK && block <= *last_unlinked) continue; - if (no_addr == skip) + if (block == skip) continue; - *last_unlinked = no_addr; + *last_unlinked = block; - error = gfs2_glock_get(sdp, no_addr, &gfs2_inode_glops, CREATE, &gl); + error = gfs2_glock_get(sdp, block, &gfs2_inode_glops, CREATE, &gl); if (error) continue; @@ -1543,6 +1669,19 @@ static void try_rgrp_unlink(struct gfs2_rgrpd *rgd, u64 *last_unlinked, u64 skip return; } +static bool gfs2_select_rgrp(struct gfs2_rgrpd **pos, const struct gfs2_rgrpd *begin) +{ + struct gfs2_rgrpd *rgd = *pos; + + rgd = gfs2_rgrpd_get_next(rgd); + if (rgd == NULL) + rgd = gfs2_rgrpd_get_next(NULL); + *pos = rgd; + if (rgd != begin) /* If we didn't wrap */ + return true; + return false; +} + /** * gfs2_inplace_reserve - Reserve space in the filesystem * @ip: the inode to reserve space for @@ -1562,103 +1701,96 @@ int gfs2_inplace_reserve(struct gfs2_inode *ip, u32 requested) if (sdp->sd_args.ar_rgrplvb) flags |= GL_SKIP; - rs->rs_requested = requested; - if (gfs2_assert_warn(sdp, requested)) { - error = -EINVAL; - goto out; - } + if (gfs2_assert_warn(sdp, requested)) + return -EINVAL; if (gfs2_rs_active(rs)) { - begin = rs->rs_rgd; + begin = rs->rs_rbm.rgd; flags = 0; /* Yoda: Do or do not. There is no try */ } else if (ip->i_rgd && rgrp_contains_block(ip->i_rgd, ip->i_goal)) { - rs->rs_rgd = begin = ip->i_rgd; + rs->rs_rbm.rgd = begin = ip->i_rgd; } else { - rs->rs_rgd = begin = gfs2_blk2rgrpd(sdp, ip->i_goal, 1); + rs->rs_rbm.rgd = begin = gfs2_blk2rgrpd(sdp, ip->i_goal, 1); } - if (rs->rs_rgd == NULL) + if (rs->rs_rbm.rgd == NULL) return -EBADSLT; while (loops < 3) { - rg_locked = 0; - - if (gfs2_glock_is_locked_by_me(rs->rs_rgd->rd_gl)) { - rg_locked = 1; - error = 0; - } else if (!loops && !gfs2_rs_active(rs) && - rs->rs_rgd->rd_rs_cnt > RGRP_RSRV_MAX_CONTENDERS) { - /* If the rgrp already is maxed out for contenders, - we can eliminate it as a "first pass" without even - requesting the rgrp glock. */ - error = GLR_TRYFAILED; - } else { - error = gfs2_glock_nq_init(rs->rs_rgd->rd_gl, + rg_locked = 1; + + if (!gfs2_glock_is_locked_by_me(rs->rs_rbm.rgd->rd_gl)) { + rg_locked = 0; + error = gfs2_glock_nq_init(rs->rs_rbm.rgd->rd_gl, LM_ST_EXCLUSIVE, flags, &rs->rs_rgd_gh); - if (!error && sdp->sd_args.ar_rgrplvb) { - error = update_rgrp_lvb(rs->rs_rgd); - if (error) { + if (error == GLR_TRYFAILED) + goto next_rgrp; + if (unlikely(error)) + return error; + if (sdp->sd_args.ar_rgrplvb) { + error = update_rgrp_lvb(rs->rs_rbm.rgd); + if (unlikely(error)) { gfs2_glock_dq_uninit(&rs->rs_rgd_gh); return error; } } } - switch (error) { - case 0: - if (gfs2_rs_active(rs)) { - if (unclaimed_blocks(rs->rs_rgd) + - rs->rs_free >= rs->rs_requested) { - ip->i_rgd = rs->rs_rgd; - return 0; - } - /* We have a multi-block reservation, but the - rgrp doesn't have enough free blocks to - satisfy the request. Free the reservation - and look for a suitable rgrp. */ - gfs2_rs_deltree(rs); - } - if (try_rgrp_fit(rs->rs_rgd, ip)) { - if (sdp->sd_args.ar_rgrplvb) - gfs2_rgrp_bh_get(rs->rs_rgd); - ip->i_rgd = rs->rs_rgd; - return 0; - } - if (rs->rs_rgd->rd_flags & GFS2_RDF_CHECK) { - if (sdp->sd_args.ar_rgrplvb) - gfs2_rgrp_bh_get(rs->rs_rgd); - try_rgrp_unlink(rs->rs_rgd, &last_unlinked, - ip->i_no_addr); - } - if (!rg_locked) - gfs2_glock_dq_uninit(&rs->rs_rgd_gh); - /* fall through */ - case GLR_TRYFAILED: - rs->rs_rgd = gfs2_rgrpd_get_next(rs->rs_rgd); - rs->rs_rgd = rs->rs_rgd ? : begin; /* if NULL, wrap */ - if (rs->rs_rgd != begin) /* If we didn't wrap */ - break; - flags &= ~LM_FLAG_TRY; - loops++; - /* Check that fs hasn't grown if writing to rindex */ - if (ip == GFS2_I(sdp->sd_rindex) && - !sdp->sd_rindex_uptodate) { - error = gfs2_ri_update(ip); - if (error) - goto out; - } else if (loops == 2) - /* Flushing the log may release space */ - gfs2_log_flush(sdp, NULL); - break; - default: - goto out; + /* Skip unuseable resource groups */ + if (rs->rs_rbm.rgd->rd_flags & (GFS2_RGF_NOALLOC | GFS2_RDF_ERROR)) + goto skip_rgrp; + + if (sdp->sd_args.ar_rgrplvb) + gfs2_rgrp_bh_get(rs->rs_rbm.rgd); + + /* Get a reservation if we don't already have one */ + if (!gfs2_rs_active(rs)) + rg_mblk_search(rs->rs_rbm.rgd, ip, requested); + + /* Skip rgrps when we can't get a reservation on first pass */ + if (!gfs2_rs_active(rs) && (loops < 1)) + goto check_rgrp; + + /* If rgrp has enough free space, use it */ + if (rs->rs_rbm.rgd->rd_free_clone >= requested) { + ip->i_rgd = rs->rs_rbm.rgd; + return 0; + } + + /* Drop reservation, if we couldn't use reserved rgrp */ + if (gfs2_rs_active(rs)) + gfs2_rs_deltree(ip, rs); +check_rgrp: + /* Check for unlinked inodes which can be reclaimed */ + if (rs->rs_rbm.rgd->rd_flags & GFS2_RDF_CHECK) + try_rgrp_unlink(rs->rs_rbm.rgd, &last_unlinked, + ip->i_no_addr); +skip_rgrp: + /* Unlock rgrp if required */ + if (!rg_locked) + gfs2_glock_dq_uninit(&rs->rs_rgd_gh); +next_rgrp: + /* Find the next rgrp, and continue looking */ + if (gfs2_select_rgrp(&rs->rs_rbm.rgd, begin)) + continue; + + /* If we've scanned all the rgrps, but found no free blocks + * then this checks for some less likely conditions before + * trying again. + */ + flags &= ~LM_FLAG_TRY; + loops++; + /* Check that fs hasn't grown if writing to rindex */ + if (ip == GFS2_I(sdp->sd_rindex) && !sdp->sd_rindex_uptodate) { + error = gfs2_ri_update(ip); + if (error) + return error; } + /* Flushing the log may release space */ + if (loops == 2) + gfs2_log_flush(sdp, NULL); } - error = -ENOSPC; -out: - if (error) - rs->rs_requested = 0; - return error; + return -ENOSPC; } /** @@ -1672,15 +1804,8 @@ void gfs2_inplace_release(struct gfs2_inode *ip) { struct gfs2_blkreserv *rs = ip->i_res; - if (!rs) - return; - - if (!rs->rs_free) - gfs2_rs_deltree(rs); - if (rs->rs_rgd_gh.gh_gl) gfs2_glock_dq_uninit(&rs->rs_rgd_gh); - rs->rs_requested = 0; } /** @@ -1693,173 +1818,47 @@ void gfs2_inplace_release(struct gfs2_inode *ip) static unsigned char gfs2_get_block_type(struct gfs2_rgrpd *rgd, u64 block) { - struct gfs2_bitmap *bi = NULL; - u32 length, rgrp_block, buf_block; - unsigned int buf; - unsigned char type; - - length = rgd->rd_length; - rgrp_block = block - rgd->rd_data0; - - for (buf = 0; buf < length; buf++) { - bi = rgd->rd_bits + buf; - if (rgrp_block < (bi->bi_start + bi->bi_len) * GFS2_NBBY) - break; - } + struct gfs2_rbm rbm = { .rgd = rgd, }; + int ret; - gfs2_assert(rgd->rd_sbd, buf < length); - buf_block = rgrp_block - bi->bi_start * GFS2_NBBY; + ret = gfs2_rbm_from_block(&rbm, block); + WARN_ON_ONCE(ret != 0); - type = gfs2_testbit(rgd, bi->bi_bh->b_data + bi->bi_offset, - bi->bi_len, buf_block); - - return type; + return gfs2_testbit(&rbm); } -/** - * rgblk_search - find a block in @state - * @rgd: the resource group descriptor - * @goal: the goal block within the RG (start here to search for avail block) - * @state: GFS2_BLKST_XXX the before-allocation state to find - * @rbi: address of the pointer to the bitmap containing the block found - * - * Walk rgrp's bitmap to find bits that represent a block in @state. - * - * This function never fails, because we wouldn't call it unless we - * know (from reservation results, etc.) that a block is available. - * - * Scope of @goal is just within rgrp, not the whole filesystem. - * Scope of @returned block is just within bitmap, not the whole filesystem. - * - * Returns: the block number found relative to the bitmap rbi - */ - -static u32 rgblk_search(struct gfs2_rgrpd *rgd, u32 goal, unsigned char state, - struct gfs2_bitmap **rbi) -{ - struct gfs2_bitmap *bi = NULL; - const u32 length = rgd->rd_length; - u32 biblk = BFITNOENT; - unsigned int buf, x; - const u8 *buffer = NULL; - - *rbi = NULL; - /* Find bitmap block that contains bits for goal block */ - for (buf = 0; buf < length; buf++) { - bi = rgd->rd_bits + buf; - /* Convert scope of "goal" from rgrp-wide to within found bit block */ - if (goal < (bi->bi_start + bi->bi_len) * GFS2_NBBY) { - goal -= bi->bi_start * GFS2_NBBY; - goto do_search; - } - } - buf = 0; - goal = 0; - -do_search: - /* Search (up to entire) bitmap in this rgrp for allocatable block. - "x <= length", instead of "x < length", because we typically start - the search in the middle of a bit block, but if we can't find an - allocatable block anywhere else, we want to be able wrap around and - search in the first part of our first-searched bit block. */ - for (x = 0; x <= length; x++) { - bi = rgd->rd_bits + buf; - - if (test_bit(GBF_FULL, &bi->bi_flags) && - (state == GFS2_BLKST_FREE)) - goto skip; - - /* The GFS2_BLKST_UNLINKED state doesn't apply to the clone - bitmaps, so we must search the originals for that. */ - buffer = bi->bi_bh->b_data + bi->bi_offset; - WARN_ON(!buffer_uptodate(bi->bi_bh)); - if (state != GFS2_BLKST_UNLINKED && bi->bi_clone) - buffer = bi->bi_clone + bi->bi_offset; - - while (1) { - struct gfs2_blkreserv *rs; - u32 rgblk; - - biblk = gfs2_bitfit(buffer, bi->bi_len, goal, state); - if (biblk == BFITNOENT) - break; - /* Check if this block is reserved() */ - rgblk = gfs2_bi2rgd_blk(bi, biblk); - rs = rs_find(rgd, rgblk); - if (rs == NULL) - break; - - BUG_ON(rs->rs_bi != bi); - biblk = BFITNOENT; - /* This should jump to the first block after the - reservation. */ - goal = rs->rs_biblk + rs->rs_free; - if (goal >= bi->bi_len * GFS2_NBBY) - break; - } - if (biblk != BFITNOENT) - break; - - if ((goal == 0) && (state == GFS2_BLKST_FREE)) - set_bit(GBF_FULL, &bi->bi_flags); - - /* Try next bitmap block (wrap back to rgrp header if at end) */ -skip: - buf++; - buf %= length; - goal = 0; - } - - if (biblk != BFITNOENT) - *rbi = bi; - - return biblk; -} /** * gfs2_alloc_extent - allocate an extent from a given bitmap - * @rgd: the resource group descriptor - * @bi: the bitmap within the rgrp - * @blk: the block within the bitmap + * @rbm: the resource group information * @dinode: TRUE if the first block we allocate is for a dinode - * @n: The extent length + * @n: The extent length (value/result) * - * Add the found bitmap buffer to the transaction. + * Add the bitmap buffer to the transaction. * Set the found bits to @new_state to change block's allocation state. - * Returns: starting block number of the extent (fs scope) */ -static u64 gfs2_alloc_extent(struct gfs2_rgrpd *rgd, struct gfs2_bitmap *bi, - u32 blk, bool dinode, unsigned int *n) +static void gfs2_alloc_extent(const struct gfs2_rbm *rbm, bool dinode, + unsigned int *n) { + struct gfs2_rbm pos = { .rgd = rbm->rgd, }; const unsigned int elen = *n; - u32 goal, rgblk; - const u8 *buffer = NULL; - struct gfs2_blkreserv *rs; - - *n = 0; - buffer = bi->bi_bh->b_data + bi->bi_offset; - gfs2_trans_add_bh(rgd->rd_gl, bi->bi_bh, 1); - gfs2_setbit(rgd, bi->bi_clone, bi, blk, - dinode ? GFS2_BLKST_DINODE : GFS2_BLKST_USED); - (*n)++; - goal = blk; + u64 block; + int ret; + + *n = 1; + block = gfs2_rbm_to_block(rbm); + gfs2_trans_add_bh(rbm->rgd->rd_gl, rbm->bi->bi_bh, 1); + gfs2_setbit(rbm, true, dinode ? GFS2_BLKST_DINODE : GFS2_BLKST_USED); + block++; while (*n < elen) { - goal++; - if (goal >= (bi->bi_len * GFS2_NBBY)) - break; - rgblk = gfs2_bi2rgd_blk(bi, goal); - rs = rs_find(rgd, rgblk); - if (rs) /* Oops, we bumped into someone's reservation */ - break; - if (gfs2_testbit(rgd, buffer, bi->bi_len, goal) != - GFS2_BLKST_FREE) + ret = gfs2_rbm_from_block(&pos, block); + if (ret || gfs2_testbit(&pos) != GFS2_BLKST_FREE) break; - gfs2_setbit(rgd, bi->bi_clone, bi, goal, GFS2_BLKST_USED); + gfs2_trans_add_bh(pos.rgd->rd_gl, pos.bi->bi_bh, 1); + gfs2_setbit(&pos, true, GFS2_BLKST_USED); (*n)++; + block++; } - blk = gfs2_bi2rgd_blk(bi, blk); - rgd->rd_last_alloc = blk + *n - 1; - return rgd->rd_data0 + blk; } /** @@ -1875,46 +1874,30 @@ static u64 gfs2_alloc_extent(struct gfs2_rgrpd *rgd, struct gfs2_bitmap *bi, static struct gfs2_rgrpd *rgblk_free(struct gfs2_sbd *sdp, u64 bstart, u32 blen, unsigned char new_state) { - struct gfs2_rgrpd *rgd; - struct gfs2_bitmap *bi = NULL; - u32 length, rgrp_blk, buf_blk; - unsigned int buf; + struct gfs2_rbm rbm; - rgd = gfs2_blk2rgrpd(sdp, bstart, 1); - if (!rgd) { + rbm.rgd = gfs2_blk2rgrpd(sdp, bstart, 1); + if (!rbm.rgd) { if (gfs2_consist(sdp)) fs_err(sdp, "block = %llu\n", (unsigned long long)bstart); return NULL; } - length = rgd->rd_length; - - rgrp_blk = bstart - rgd->rd_data0; - while (blen--) { - for (buf = 0; buf < length; buf++) { - bi = rgd->rd_bits + buf; - if (rgrp_blk < (bi->bi_start + bi->bi_len) * GFS2_NBBY) - break; - } - - gfs2_assert(rgd->rd_sbd, buf < length); - - buf_blk = rgrp_blk - bi->bi_start * GFS2_NBBY; - rgrp_blk++; - - if (!bi->bi_clone) { - bi->bi_clone = kmalloc(bi->bi_bh->b_size, - GFP_NOFS | __GFP_NOFAIL); - memcpy(bi->bi_clone + bi->bi_offset, - bi->bi_bh->b_data + bi->bi_offset, - bi->bi_len); + gfs2_rbm_from_block(&rbm, bstart); + bstart++; + if (!rbm.bi->bi_clone) { + rbm.bi->bi_clone = kmalloc(rbm.bi->bi_bh->b_size, + GFP_NOFS | __GFP_NOFAIL); + memcpy(rbm.bi->bi_clone + rbm.bi->bi_offset, + rbm.bi->bi_bh->b_data + rbm.bi->bi_offset, + rbm.bi->bi_len); } - gfs2_trans_add_bh(rgd->rd_gl, bi->bi_bh, 1); - gfs2_setbit(rgd, NULL, bi, buf_blk, new_state); + gfs2_trans_add_bh(rbm.rgd->rd_gl, rbm.bi->bi_bh, 1); + gfs2_setbit(&rbm, false, new_state); } - return rgd; + return rbm.rgd; } /** @@ -1956,56 +1939,41 @@ static void gfs2_rgrp_error(struct gfs2_rgrpd *rgd) } /** - * claim_reserved_blks - Claim previously reserved blocks - * @ip: the inode that's claiming the reservation - * @dinode: 1 if this block is a dinode block, otherwise data block - * @nblocks: desired extent length + * gfs2_adjust_reservation - Adjust (or remove) a reservation after allocation + * @ip: The inode we have just allocated blocks for + * @rbm: The start of the allocated blocks + * @len: The extent length * - * Lay claim to previously allocated block reservation blocks. - * Returns: Starting block number of the blocks claimed. - * Sets *nblocks to the actual extent length allocated. + * Adjusts a reservation after an allocation has taken place. If the + * reservation does not match the allocation, or if it is now empty + * then it is removed. */ -static u64 claim_reserved_blks(struct gfs2_inode *ip, bool dinode, - unsigned int *nblocks) + +static void gfs2_adjust_reservation(struct gfs2_inode *ip, + const struct gfs2_rbm *rbm, unsigned len) { struct gfs2_blkreserv *rs = ip->i_res; - struct gfs2_rgrpd *rgd = rs->rs_rgd; - struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode); - struct gfs2_bitmap *bi; - u64 start_block = gfs2_rs_startblk(rs); - const unsigned int elen = *nblocks; - - /*BUG_ON(!gfs2_glock_is_locked_by_me(ip->i_gl));*/ - gfs2_assert_withdraw(sdp, rgd); - /*BUG_ON(!gfs2_glock_is_locked_by_me(rgd->rd_gl));*/ - bi = rs->rs_bi; - gfs2_trans_add_bh(rgd->rd_gl, bi->bi_bh, 1); - - for (*nblocks = 0; *nblocks < elen && rs->rs_free; (*nblocks)++) { - /* Make sure the bitmap hasn't changed */ - gfs2_setbit(rgd, bi->bi_clone, bi, rs->rs_biblk, - dinode ? GFS2_BLKST_DINODE : GFS2_BLKST_USED); - rs->rs_biblk++; - rs->rs_free--; - - BUG_ON(!rgd->rd_reserved); - rgd->rd_reserved--; - dinode = false; - trace_gfs2_rs(ip, rs, TRACE_RS_CLAIM); - } - - if (!rs->rs_free) { - struct gfs2_rgrpd *rgd = ip->i_res->rs_rgd; + struct gfs2_rgrpd *rgd = rbm->rgd; + unsigned rlen; + u64 block; + int ret; - gfs2_rs_deltree(rs); - /* -nblocks because we haven't returned to do the math yet. - I'm doing the math backwards to prevent negative numbers, - but think of it as: - if (unclaimed_blocks(rgd) - *nblocks >= RGRP_RSRV_MINBLKS */ - if (unclaimed_blocks(rgd) >= RGRP_RSRV_MINBLKS + *nblocks) - rg_mblk_search(rgd, ip); + spin_lock(&rgd->rd_rsspin); + if (gfs2_rs_active(rs)) { + if (gfs2_rbm_eq(&rs->rs_rbm, rbm)) { + block = gfs2_rbm_to_block(rbm); + ret = gfs2_rbm_from_block(&rs->rs_rbm, block + len); + rlen = min(rs->rs_free, len); + rs->rs_free -= rlen; + rgd->rd_reserved -= rlen; + trace_gfs2_rs(rs, TRACE_RS_CLAIM); + if (rs->rs_free && !ret) + goto out; + } + __rs_deltree(ip, rs); } - return start_block; +out: + spin_unlock(&rgd->rd_rsspin); } /** @@ -2024,47 +1992,40 @@ int gfs2_alloc_blocks(struct gfs2_inode *ip, u64 *bn, unsigned int *nblocks, { struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode); struct buffer_head *dibh; - struct gfs2_rgrpd *rgd; + struct gfs2_rbm rbm = { .rgd = ip->i_rgd, }; unsigned int ndata; - u32 goal, blk; /* block, within the rgrp scope */ + u64 goal; u64 block; /* block, within the file system scope */ int error; - struct gfs2_bitmap *bi; - /* Only happens if there is a bug in gfs2, return something distinctive - * to ensure that it is noticed. - */ - if (ip->i_res->rs_requested == 0) - return -ECANCELED; - - /* Check if we have a multi-block reservation, and if so, claim the - next free block from it. */ - if (gfs2_rs_active(ip->i_res)) { - BUG_ON(!ip->i_res->rs_free); - rgd = ip->i_res->rs_rgd; - block = claim_reserved_blks(ip, dinode, nblocks); - } else { - rgd = ip->i_rgd; + if (gfs2_rs_active(ip->i_res)) + goal = gfs2_rbm_to_block(&ip->i_res->rs_rbm); + else if (!dinode && rgrp_contains_block(rbm.rgd, ip->i_goal)) + goal = ip->i_goal; + else + goal = rbm.rgd->rd_last_alloc + rbm.rgd->rd_data0; - if (!dinode && rgrp_contains_block(rgd, ip->i_goal)) - goal = ip->i_goal - rgd->rd_data0; - else - goal = rgd->rd_last_alloc; - - blk = rgblk_search(rgd, goal, GFS2_BLKST_FREE, &bi); - - /* Since all blocks are reserved in advance, this shouldn't - happen */ - if (blk == BFITNOENT) { - printk(KERN_WARNING "BFITNOENT, nblocks=%u\n", - *nblocks); - printk(KERN_WARNING "FULL=%d\n", - test_bit(GBF_FULL, &rgd->rd_bits->bi_flags)); - goto rgrp_error; - } + gfs2_rbm_from_block(&rbm, goal); + error = gfs2_rbm_find(&rbm, GFS2_BLKST_FREE, 0, ip, false); - block = gfs2_alloc_extent(rgd, bi, blk, dinode, nblocks); + if (error == -ENOSPC) { + gfs2_rbm_from_block(&rbm, goal); + error = gfs2_rbm_find(&rbm, GFS2_BLKST_FREE, 0, NULL, false); + } + + /* Since all blocks are reserved in advance, this shouldn't happen */ + if (error) { + fs_warn(sdp, "inum=%llu error=%d, nblocks=%u, full=%d\n", + (unsigned long long)ip->i_no_addr, error, *nblocks, + test_bit(GBF_FULL, &rbm.rgd->rd_bits->bi_flags)); + goto rgrp_error; } + + gfs2_alloc_extent(&rbm, dinode, nblocks); + block = gfs2_rbm_to_block(&rbm); + rbm.rgd->rd_last_alloc = block - rbm.rgd->rd_data0; + if (gfs2_rs_active(ip->i_res)) + gfs2_adjust_reservation(ip, &rbm, *nblocks); ndata = *nblocks; if (dinode) ndata--; @@ -2081,22 +2042,22 @@ int gfs2_alloc_blocks(struct gfs2_inode *ip, u64 *bn, unsigned int *nblocks, brelse(dibh); } } - if (rgd->rd_free < *nblocks) { + if (rbm.rgd->rd_free < *nblocks) { printk(KERN_WARNING "nblocks=%u\n", *nblocks); goto rgrp_error; } - rgd->rd_free -= *nblocks; + rbm.rgd->rd_free -= *nblocks; if (dinode) { - rgd->rd_dinodes++; - *generation = rgd->rd_igeneration++; + rbm.rgd->rd_dinodes++; + *generation = rbm.rgd->rd_igeneration++; if (*generation == 0) - *generation = rgd->rd_igeneration++; + *generation = rbm.rgd->rd_igeneration++; } - gfs2_trans_add_bh(rgd->rd_gl, rgd->rd_bits[0].bi_bh, 1); - gfs2_rgrp_out(rgd, rgd->rd_bits[0].bi_bh->b_data); - gfs2_rgrp_ondisk2lvb(rgd->rd_rgl, rgd->rd_bits[0].bi_bh->b_data); + gfs2_trans_add_bh(rbm.rgd->rd_gl, rbm.rgd->rd_bits[0].bi_bh, 1); + gfs2_rgrp_out(rbm.rgd, rbm.rgd->rd_bits[0].bi_bh->b_data); + gfs2_rgrp_ondisk2lvb(rbm.rgd->rd_rgl, rbm.rgd->rd_bits[0].bi_bh->b_data); gfs2_statfs_change(sdp, 0, -(s64)*nblocks, dinode ? 1 : 0); if (dinode) @@ -2110,14 +2071,14 @@ int gfs2_alloc_blocks(struct gfs2_inode *ip, u64 *bn, unsigned int *nblocks, gfs2_quota_change(ip, ndata, ip->i_inode.i_uid, ip->i_inode.i_gid); - rgd->rd_free_clone -= *nblocks; - trace_gfs2_block_alloc(ip, rgd, block, *nblocks, + rbm.rgd->rd_free_clone -= *nblocks; + trace_gfs2_block_alloc(ip, rbm.rgd, block, *nblocks, dinode ? GFS2_BLKST_DINODE : GFS2_BLKST_USED); *bn = block; return 0; rgrp_error: - gfs2_rgrp_error(rgd); + gfs2_rgrp_error(rbm.rgd); return -EIO; } diff --git a/trunk/fs/gfs2/rgrp.h b/trunk/fs/gfs2/rgrp.h index ca6e26729b86..24077958dcf6 100644 --- a/trunk/fs/gfs2/rgrp.h +++ b/trunk/fs/gfs2/rgrp.h @@ -46,7 +46,7 @@ extern int gfs2_alloc_blocks(struct gfs2_inode *ip, u64 *bn, unsigned int *n, bool dinode, u64 *generation); extern int gfs2_rs_alloc(struct gfs2_inode *ip); -extern void gfs2_rs_deltree(struct gfs2_blkreserv *rs); +extern void gfs2_rs_deltree(struct gfs2_inode *ip, struct gfs2_blkreserv *rs); extern void gfs2_rs_delete(struct gfs2_inode *ip); extern void __gfs2_free_blocks(struct gfs2_inode *ip, u64 bstart, u32 blen, int meta); extern void gfs2_free_meta(struct gfs2_inode *ip, u64 bstart, u32 blen); @@ -73,30 +73,10 @@ extern int gfs2_rgrp_send_discards(struct gfs2_sbd *sdp, u64 offset, const struct gfs2_bitmap *bi, unsigned minlen, u64 *ptrimmed); extern int gfs2_fitrim(struct file *filp, void __user *argp); -/* This is how to tell if a multi-block reservation is "inplace" reserved: */ -static inline int gfs2_mb_reserved(struct gfs2_inode *ip) +/* This is how to tell if a reservation is in the rgrp tree: */ +static inline bool gfs2_rs_active(struct gfs2_blkreserv *rs) { - if (ip->i_res && ip->i_res->rs_requested) - return 1; - return 0; -} - -/* This is how to tell if a multi-block reservation is in the rgrp tree: */ -static inline int gfs2_rs_active(struct gfs2_blkreserv *rs) -{ - if (rs && rs->rs_bi) - return 1; - return 0; -} - -static inline u32 gfs2_bi2rgd_blk(const struct gfs2_bitmap *bi, u32 blk) -{ - return (bi->bi_start * GFS2_NBBY) + blk; -} - -static inline u64 gfs2_rs_startblk(const struct gfs2_blkreserv *rs) -{ - return gfs2_bi2rgd_blk(rs->rs_bi, rs->rs_biblk) + rs->rs_rgd->rd_data0; + return rs && !RB_EMPTY_NODE(&rs->rs_node); } #endif /* __RGRP_DOT_H__ */ diff --git a/trunk/fs/gfs2/super.c b/trunk/fs/gfs2/super.c index fc3168f47a14..a8d90f2f576c 100644 --- a/trunk/fs/gfs2/super.c +++ b/trunk/fs/gfs2/super.c @@ -1366,6 +1366,8 @@ static int gfs2_show_options(struct seq_file *s, struct dentry *root) val = sdp->sd_tune.gt_statfs_quantum; if (val != 30) seq_printf(s, ",statfs_quantum=%d", val); + else if (sdp->sd_tune.gt_statfs_slow) + seq_puts(s, ",statfs_quantum=0"); val = sdp->sd_tune.gt_quota_quantum; if (val != 60) seq_printf(s, ",quota_quantum=%d", val); @@ -1543,6 +1545,11 @@ static void gfs2_evict_inode(struct inode *inode) out_truncate: gfs2_log_flush(sdp, ip->i_gl); + if (test_bit(GLF_DIRTY, &ip->i_gl->gl_flags)) { + struct address_space *metamapping = gfs2_glock2aspace(ip->i_gl); + filemap_fdatawrite(metamapping); + filemap_fdatawait(metamapping); + } write_inode_now(inode, 1); gfs2_ail_flush(ip->i_gl, 0); @@ -1557,7 +1564,7 @@ static void gfs2_evict_inode(struct inode *inode) out_unlock: /* Error path for case 1 */ if (gfs2_rs_active(ip->i_res)) - gfs2_rs_deltree(ip->i_res); + gfs2_rs_deltree(ip, ip->i_res); if (test_bit(HIF_HOLDER, &ip->i_iopen_gh.gh_iflags)) gfs2_glock_dq(&ip->i_iopen_gh); diff --git a/trunk/fs/gfs2/trace_gfs2.h b/trunk/fs/gfs2/trace_gfs2.h index a25c252fe412..bbdc78af60ca 100644 --- a/trunk/fs/gfs2/trace_gfs2.h +++ b/trunk/fs/gfs2/trace_gfs2.h @@ -509,10 +509,9 @@ TRACE_EVENT(gfs2_block_alloc, /* Keep track of multi-block reservations as they are allocated/freed */ TRACE_EVENT(gfs2_rs, - TP_PROTO(const struct gfs2_inode *ip, const struct gfs2_blkreserv *rs, - u8 func), + TP_PROTO(const struct gfs2_blkreserv *rs, u8 func), - TP_ARGS(ip, rs, func), + TP_ARGS(rs, func), TP_STRUCT__entry( __field( dev_t, dev ) @@ -526,18 +525,17 @@ TRACE_EVENT(gfs2_rs, ), TP_fast_assign( - __entry->dev = rs->rs_rgd ? rs->rs_rgd->rd_sbd->sd_vfs->s_dev : 0; - __entry->rd_addr = rs->rs_rgd ? rs->rs_rgd->rd_addr : 0; - __entry->rd_free_clone = rs->rs_rgd ? rs->rs_rgd->rd_free_clone : 0; - __entry->rd_reserved = rs->rs_rgd ? rs->rs_rgd->rd_reserved : 0; - __entry->inum = ip ? ip->i_no_addr : 0; - __entry->start = gfs2_rs_startblk(rs); + __entry->dev = rs->rs_rbm.rgd->rd_sbd->sd_vfs->s_dev; + __entry->rd_addr = rs->rs_rbm.rgd->rd_addr; + __entry->rd_free_clone = rs->rs_rbm.rgd->rd_free_clone; + __entry->rd_reserved = rs->rs_rbm.rgd->rd_reserved; + __entry->inum = rs->rs_inum; + __entry->start = gfs2_rbm_to_block(&rs->rs_rbm); __entry->free = rs->rs_free; __entry->func = func; ), - TP_printk("%u,%u bmap %llu resrv %llu rg:%llu rf:%lu rr:%lu %s " - "f:%lu", + TP_printk("%u,%u bmap %llu resrv %llu rg:%llu rf:%lu rr:%lu %s f:%lu", MAJOR(__entry->dev), MINOR(__entry->dev), (unsigned long long)__entry->inum, (unsigned long long)__entry->start, diff --git a/trunk/fs/gfs2/trans.h b/trunk/fs/gfs2/trans.h index 41f42cdccbb8..bf2ae9aeee7a 100644 --- a/trunk/fs/gfs2/trans.h +++ b/trunk/fs/gfs2/trans.h @@ -28,11 +28,10 @@ struct gfs2_glock; /* reserve either the number of blocks to be allocated plus the rg header * block, or all of the blocks in the rg, whichever is smaller */ -static inline unsigned int gfs2_rg_blocks(const struct gfs2_inode *ip) +static inline unsigned int gfs2_rg_blocks(const struct gfs2_inode *ip, unsigned requested) { - const struct gfs2_blkreserv *rs = ip->i_res; - if (rs && rs->rs_requested < ip->i_rgd->rd_length) - return rs->rs_requested + 1; + if (requested < ip->i_rgd->rd_length) + return requested + 1; return ip->i_rgd->rd_length; } diff --git a/trunk/fs/gfs2/xattr.c b/trunk/fs/gfs2/xattr.c index 27a0b4a901f5..db330e5518cd 100644 --- a/trunk/fs/gfs2/xattr.c +++ b/trunk/fs/gfs2/xattr.c @@ -448,17 +448,18 @@ ssize_t gfs2_listxattr(struct dentry *dentry, char *buffer, size_t size) } /** - * ea_get_unstuffed - actually copies the unstuffed data into the - * request buffer + * ea_iter_unstuffed - copies the unstuffed xattr data to/from the + * request buffer * @ip: The GFS2 inode * @ea: The extended attribute header structure - * @data: The data to be copied + * @din: The data to be copied in + * @dout: The data to be copied out (one of din,dout will be NULL) * * Returns: errno */ -static int ea_get_unstuffed(struct gfs2_inode *ip, struct gfs2_ea_header *ea, - char *data) +static int gfs2_iter_unstuffed(struct gfs2_inode *ip, struct gfs2_ea_header *ea, + const char *din, char *dout) { struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode); struct buffer_head **bh; @@ -467,6 +468,8 @@ static int ea_get_unstuffed(struct gfs2_inode *ip, struct gfs2_ea_header *ea, __be64 *dataptrs = GFS2_EA2DATAPTRS(ea); unsigned int x; int error = 0; + unsigned char *pos; + unsigned cp_size; bh = kcalloc(nptrs, sizeof(struct buffer_head *), GFP_NOFS); if (!bh) @@ -497,12 +500,21 @@ static int ea_get_unstuffed(struct gfs2_inode *ip, struct gfs2_ea_header *ea, goto out; } - memcpy(data, bh[x]->b_data + sizeof(struct gfs2_meta_header), - (sdp->sd_jbsize > amount) ? amount : sdp->sd_jbsize); + pos = bh[x]->b_data + sizeof(struct gfs2_meta_header); + cp_size = (sdp->sd_jbsize > amount) ? amount : sdp->sd_jbsize; - amount -= sdp->sd_jbsize; - data += sdp->sd_jbsize; + if (dout) { + memcpy(dout, pos, cp_size); + dout += sdp->sd_jbsize; + } + + if (din) { + gfs2_trans_add_bh(ip->i_gl, bh[x], 1); + memcpy(pos, din, cp_size); + din += sdp->sd_jbsize; + } + amount -= sdp->sd_jbsize; brelse(bh[x]); } @@ -523,7 +535,7 @@ static int gfs2_ea_get_copy(struct gfs2_inode *ip, struct gfs2_ea_location *el, memcpy(data, GFS2_EA2DATA(el->el_ea), len); return len; } - ret = ea_get_unstuffed(ip, el->el_ea, data); + ret = gfs2_iter_unstuffed(ip, el->el_ea, NULL, data); if (ret < 0) return ret; return len; @@ -727,7 +739,7 @@ static int ea_alloc_skeleton(struct gfs2_inode *ip, struct gfs2_ea_request *er, goto out_gunlock_q; error = gfs2_trans_begin(GFS2_SB(&ip->i_inode), - blks + gfs2_rg_blocks(ip) + + blks + gfs2_rg_blocks(ip, blks) + RES_DINODE + RES_STATFS + RES_QUOTA, 0); if (error) goto out_ipres; @@ -1220,69 +1232,23 @@ static int gfs2_xattr_set(struct dentry *dentry, const char *name, size, flags, type); } + static int ea_acl_chmod_unstuffed(struct gfs2_inode *ip, struct gfs2_ea_header *ea, char *data) { struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode); - struct buffer_head **bh; unsigned int amount = GFS2_EA_DATA_LEN(ea); unsigned int nptrs = DIV_ROUND_UP(amount, sdp->sd_jbsize); - __be64 *dataptrs = GFS2_EA2DATAPTRS(ea); - unsigned int x; - int error; - - bh = kcalloc(nptrs, sizeof(struct buffer_head *), GFP_NOFS); - if (!bh) - return -ENOMEM; - - error = gfs2_trans_begin(sdp, nptrs + RES_DINODE, 0); - if (error) - goto out; - - for (x = 0; x < nptrs; x++) { - error = gfs2_meta_read(ip->i_gl, be64_to_cpu(*dataptrs), 0, - bh + x); - if (error) { - while (x--) - brelse(bh[x]); - goto fail; - } - dataptrs++; - } - - for (x = 0; x < nptrs; x++) { - error = gfs2_meta_wait(sdp, bh[x]); - if (error) { - for (; x < nptrs; x++) - brelse(bh[x]); - goto fail; - } - if (gfs2_metatype_check(sdp, bh[x], GFS2_METATYPE_ED)) { - for (; x < nptrs; x++) - brelse(bh[x]); - error = -EIO; - goto fail; - } - - gfs2_trans_add_bh(ip->i_gl, bh[x], 1); - - memcpy(bh[x]->b_data + sizeof(struct gfs2_meta_header), data, - (sdp->sd_jbsize > amount) ? amount : sdp->sd_jbsize); - - amount -= sdp->sd_jbsize; - data += sdp->sd_jbsize; - - brelse(bh[x]); - } + int ret; -out: - kfree(bh); - return error; + ret = gfs2_trans_begin(sdp, nptrs + RES_DINODE, 0); + if (ret) + return ret; -fail: + ret = gfs2_iter_unstuffed(ip, ea, data, NULL); gfs2_trans_end(sdp); - kfree(bh); - return error; + + return ret; } int gfs2_xattr_acl_chmod(struct gfs2_inode *ip, struct iattr *attr, char *data) diff --git a/trunk/fs/hfs/mdb.c b/trunk/fs/hfs/mdb.c index 5fd51a5833ff..b7ec224910c5 100644 --- a/trunk/fs/hfs/mdb.c +++ b/trunk/fs/hfs/mdb.c @@ -236,10 +236,10 @@ int hfs_mdb_get(struct super_block *sb) * hfs_mdb_commit() * * Description: - * This updates the MDB on disk (look also at hfs_write_super()). + * This updates the MDB on disk. * It does not check, if the superblock has been modified, or * if the filesystem has been mounted read-only. It is mainly - * called by hfs_write_super() and hfs_btree_extend(). + * called by hfs_sync_fs() and flush_mdb(). * Input Variable(s): * struct hfs_mdb *mdb: Pointer to the hfs MDB * int backup; diff --git a/trunk/fs/jbd/journal.c b/trunk/fs/jbd/journal.c index 425c2f2cf170..a2862339323b 100644 --- a/trunk/fs/jbd/journal.c +++ b/trunk/fs/jbd/journal.c @@ -534,8 +534,8 @@ int journal_start_commit(journal_t *journal, tid_t *ptid) ret = 1; } else if (journal->j_committing_transaction) { /* - * If ext3_write_super() recently started a commit, then we - * have to wait for completion of that transaction + * If commit has been started, then we have to wait for + * completion of that transaction. */ if (ptid) *ptid = journal->j_committing_transaction->t_tid; @@ -1113,6 +1113,11 @@ static void mark_journal_empty(journal_t *journal) BUG_ON(!mutex_is_locked(&journal->j_checkpoint_mutex)); spin_lock(&journal->j_state_lock); + /* Is it already empty? */ + if (sb->s_start == 0) { + spin_unlock(&journal->j_state_lock); + return; + } jbd_debug(1, "JBD: Marking journal as empty (seq %d)\n", journal->j_tail_sequence); diff --git a/trunk/fs/jbd2/journal.c b/trunk/fs/jbd2/journal.c index e9a3c4c85594..e149b99a7ffb 100644 --- a/trunk/fs/jbd2/journal.c +++ b/trunk/fs/jbd2/journal.c @@ -612,8 +612,8 @@ int jbd2_journal_start_commit(journal_t *journal, tid_t *ptid) ret = 1; } else if (journal->j_committing_transaction) { /* - * If ext3_write_super() recently started a commit, then we - * have to wait for completion of that transaction + * If commit has been started, then we have to wait for + * completion of that transaction. */ if (ptid) *ptid = journal->j_committing_transaction->t_tid; @@ -1377,7 +1377,7 @@ static void jbd2_mark_journal_empty(journal_t *journal) * Update a journal's errno. Write updated superblock to disk waiting for IO * to complete. */ -static void jbd2_journal_update_sb_errno(journal_t *journal) +void jbd2_journal_update_sb_errno(journal_t *journal) { journal_superblock_t *sb = journal->j_superblock; @@ -1390,6 +1390,7 @@ static void jbd2_journal_update_sb_errno(journal_t *journal) jbd2_write_superblock(journal, WRITE_SYNC); } +EXPORT_SYMBOL(jbd2_journal_update_sb_errno); /* * Read the superblock for a given journal, performing initial diff --git a/trunk/fs/libfs.c b/trunk/fs/libfs.c index a74cb1725ac6..7cc37ca19cd8 100644 --- a/trunk/fs/libfs.c +++ b/trunk/fs/libfs.c @@ -874,7 +874,7 @@ struct dentry *generic_fh_to_dentry(struct super_block *sb, struct fid *fid, EXPORT_SYMBOL_GPL(generic_fh_to_dentry); /** - * generic_fh_to_dentry - generic helper for the fh_to_parent export operation + * generic_fh_to_parent - generic helper for the fh_to_parent export operation * @sb: filesystem to do the file handle conversion on * @fid: file handle to convert * @fh_len: length of the file handle in bytes diff --git a/trunk/fs/lockd/svclock.c b/trunk/fs/lockd/svclock.c index fb1a2bedbe97..8d80c990dffd 100644 --- a/trunk/fs/lockd/svclock.c +++ b/trunk/fs/lockd/svclock.c @@ -289,7 +289,6 @@ static void nlmsvc_free_block(struct kref *kref) dprintk("lockd: freeing block %p...\n", block); /* Remove block from file's list of blocks */ - mutex_lock(&file->f_mutex); list_del_init(&block->b_flist); mutex_unlock(&file->f_mutex); @@ -303,7 +302,7 @@ static void nlmsvc_free_block(struct kref *kref) static void nlmsvc_release_block(struct nlm_block *block) { if (block != NULL) - kref_put(&block->b_count, nlmsvc_free_block); + kref_put_mutex(&block->b_count, nlmsvc_free_block, &block->b_file->f_mutex); } /* diff --git a/trunk/fs/logfs/dev_bdev.c b/trunk/fs/logfs/dev_bdev.c index df0de27c2733..e784a217b500 100644 --- a/trunk/fs/logfs/dev_bdev.c +++ b/trunk/fs/logfs/dev_bdev.c @@ -26,6 +26,7 @@ static int sync_request(struct page *page, struct block_device *bdev, int rw) struct completion complete; bio_init(&bio); + bio.bi_max_vecs = 1; bio.bi_io_vec = &bio_vec; bio_vec.bv_page = page; bio_vec.bv_len = PAGE_SIZE; @@ -95,12 +96,11 @@ static int __bdev_writeseg(struct super_block *sb, u64 ofs, pgoff_t index, struct address_space *mapping = super->s_mapping_inode->i_mapping; struct bio *bio; struct page *page; - struct request_queue *q = bdev_get_queue(sb->s_bdev); - unsigned int max_pages = queue_max_hw_sectors(q) >> (PAGE_SHIFT - 9); + unsigned int max_pages; int i; - if (max_pages > BIO_MAX_PAGES) - max_pages = BIO_MAX_PAGES; + max_pages = min(nr_pages, (size_t) bio_get_nr_vecs(super->s_bdev)); + bio = bio_alloc(GFP_NOFS, max_pages); BUG_ON(!bio); @@ -190,12 +190,11 @@ static int do_erase(struct super_block *sb, u64 ofs, pgoff_t index, { struct logfs_super *super = logfs_super(sb); struct bio *bio; - struct request_queue *q = bdev_get_queue(sb->s_bdev); - unsigned int max_pages = queue_max_hw_sectors(q) >> (PAGE_SHIFT - 9); + unsigned int max_pages; int i; - if (max_pages > BIO_MAX_PAGES) - max_pages = BIO_MAX_PAGES; + max_pages = min(nr_pages, (size_t) bio_get_nr_vecs(super->s_bdev)); + bio = bio_alloc(GFP_NOFS, max_pages); BUG_ON(!bio); diff --git a/trunk/fs/logfs/inode.c b/trunk/fs/logfs/inode.c index a422f42238b2..6984562738d3 100644 --- a/trunk/fs/logfs/inode.c +++ b/trunk/fs/logfs/inode.c @@ -156,10 +156,26 @@ static void __logfs_destroy_inode(struct inode *inode) call_rcu(&inode->i_rcu, logfs_i_callback); } +static void __logfs_destroy_meta_inode(struct inode *inode) +{ + struct logfs_inode *li = logfs_inode(inode); + BUG_ON(li->li_block); + call_rcu(&inode->i_rcu, logfs_i_callback); +} + static void logfs_destroy_inode(struct inode *inode) { struct logfs_inode *li = logfs_inode(inode); + if (inode->i_ino < LOGFS_RESERVED_INOS) { + /* + * The reserved inodes are never destroyed unless we are in + * unmont path. + */ + __logfs_destroy_meta_inode(inode); + return; + } + BUG_ON(list_empty(&li->li_freeing_list)); spin_lock(&logfs_inode_lock); li->li_refcount--; @@ -373,8 +389,8 @@ static void logfs_put_super(struct super_block *sb) { struct logfs_super *super = logfs_super(sb); /* kill the meta-inodes */ - iput(super->s_master_inode); iput(super->s_segfile_inode); + iput(super->s_master_inode); iput(super->s_mapping_inode); } diff --git a/trunk/fs/logfs/journal.c b/trunk/fs/logfs/journal.c index 1e1c369df22b..2a09b8d73989 100644 --- a/trunk/fs/logfs/journal.c +++ b/trunk/fs/logfs/journal.c @@ -565,7 +565,7 @@ static void write_wbuf(struct super_block *sb, struct logfs_area *area, index = ofs >> PAGE_SHIFT; page_ofs = ofs & (PAGE_SIZE - 1); - page = find_lock_page(mapping, index); + page = find_or_create_page(mapping, index, GFP_NOFS); BUG_ON(!page); memcpy(wbuf, page_address(page) + page_ofs, super->s_writesize); unlock_page(page); diff --git a/trunk/fs/logfs/readwrite.c b/trunk/fs/logfs/readwrite.c index f1cb512c5019..5be0abef603d 100644 --- a/trunk/fs/logfs/readwrite.c +++ b/trunk/fs/logfs/readwrite.c @@ -2189,7 +2189,6 @@ void logfs_evict_inode(struct inode *inode) return; } - BUG_ON(inode->i_ino < LOGFS_RESERVED_INOS); page = inode_to_page(inode); BUG_ON(!page); /* FIXME: Use emergency page */ logfs_put_write_page(page); diff --git a/trunk/fs/logfs/segment.c b/trunk/fs/logfs/segment.c index e28d090c98d6..038da0991794 100644 --- a/trunk/fs/logfs/segment.c +++ b/trunk/fs/logfs/segment.c @@ -886,7 +886,7 @@ static struct logfs_area *alloc_area(struct super_block *sb) static void map_invalidatepage(struct page *page, unsigned long l) { - BUG(); + return; } static int map_releasepage(struct page *page, gfp_t g) diff --git a/trunk/fs/namei.c b/trunk/fs/namei.c index 1b464390dde8..dd1ed1b8e98e 100644 --- a/trunk/fs/namei.c +++ b/trunk/fs/namei.c @@ -352,6 +352,7 @@ int __inode_permission(struct inode *inode, int mask) /** * sb_permission - Check superblock-level permissions * @sb: Superblock of inode to check permission on + * @inode: Inode to check permission on * @mask: Right to check for (%MAY_READ, %MAY_WRITE, %MAY_EXEC) * * Separate out file-system wide checks from inode-specific permission checks. @@ -656,6 +657,7 @@ int sysctl_protected_hardlinks __read_mostly = 1; /** * may_follow_link - Check symlink following for unsafe situations * @link: The path of the symlink + * @nd: nameidata pathwalk data * * In the case of the sysctl_protected_symlinks sysctl being enabled, * CAP_DAC_OVERRIDE needs to be specifically ignored if the symlink is @@ -2414,7 +2416,7 @@ static int atomic_open(struct nameidata *nd, struct dentry *dentry, goto out; } - mode = op->mode & S_IALLUGO; + mode = op->mode; if ((open_flag & O_CREAT) && !IS_POSIXACL(dir)) mode &= ~current_umask(); @@ -2452,7 +2454,7 @@ static int atomic_open(struct nameidata *nd, struct dentry *dentry, } if (open_flag & O_CREAT) { - error = may_o_create(&nd->path, dentry, op->mode); + error = may_o_create(&nd->path, dentry, mode); if (error) { create_error = error; if (open_flag & O_EXCL) @@ -2489,6 +2491,10 @@ static int atomic_open(struct nameidata *nd, struct dentry *dentry, dput(dentry); dentry = file->f_path.dentry; } + if (create_error && dentry->d_inode == NULL) { + error = create_error; + goto out; + } goto looked_up; } diff --git a/trunk/fs/namespace.c b/trunk/fs/namespace.c index 4d31f73e2561..7bdf7907413f 100644 --- a/trunk/fs/namespace.c +++ b/trunk/fs/namespace.c @@ -1886,8 +1886,14 @@ static int do_add_mount(struct mount *newmnt, struct path *path, int mnt_flags) return err; err = -EINVAL; - if (!(mnt_flags & MNT_SHRINKABLE) && !check_mnt(real_mount(path->mnt))) - goto unlock; + if (unlikely(!check_mnt(real_mount(path->mnt)))) { + /* that's acceptable only for automounts done in private ns */ + if (!(mnt_flags & MNT_SHRINKABLE)) + goto unlock; + /* ... and for those we'd better have mountpoint still alive */ + if (!real_mount(path->mnt)->mnt_ns) + goto unlock; + } /* Refuse the same filesystem on the same mount point */ err = -EBUSY; diff --git a/trunk/fs/nfs/Makefile b/trunk/fs/nfs/Makefile index 8bf3a3f6925a..b7db60897f91 100644 --- a/trunk/fs/nfs/Makefile +++ b/trunk/fs/nfs/Makefile @@ -12,19 +12,19 @@ nfs-$(CONFIG_ROOT_NFS) += nfsroot.o nfs-$(CONFIG_SYSCTL) += sysctl.o nfs-$(CONFIG_NFS_FSCACHE) += fscache.o fscache-index.o -obj-$(CONFIG_NFS_V2) += nfs2.o -nfs2-y := nfs2super.o proc.o nfs2xdr.o +obj-$(CONFIG_NFS_V2) += nfsv2.o +nfsv2-y := nfs2super.o proc.o nfs2xdr.o -obj-$(CONFIG_NFS_V3) += nfs3.o -nfs3-y := nfs3super.o nfs3client.o nfs3proc.o nfs3xdr.o -nfs3-$(CONFIG_NFS_V3_ACL) += nfs3acl.o +obj-$(CONFIG_NFS_V3) += nfsv3.o +nfsv3-y := nfs3super.o nfs3client.o nfs3proc.o nfs3xdr.o +nfsv3-$(CONFIG_NFS_V3_ACL) += nfs3acl.o -obj-$(CONFIG_NFS_V4) += nfs4.o -nfs4-y := nfs4proc.o nfs4xdr.o nfs4state.o nfs4renewd.o nfs4super.o nfs4file.o \ +obj-$(CONFIG_NFS_V4) += nfsv4.o +nfsv4-y := nfs4proc.o nfs4xdr.o nfs4state.o nfs4renewd.o nfs4super.o nfs4file.o \ delegation.o idmap.o callback.o callback_xdr.o callback_proc.o \ nfs4namespace.o nfs4getroot.o nfs4client.o -nfs4-$(CONFIG_SYSCTL) += nfs4sysctl.o -nfs4-$(CONFIG_NFS_V4_1) += pnfs.o pnfs_dev.o +nfsv4-$(CONFIG_SYSCTL) += nfs4sysctl.o +nfsv4-$(CONFIG_NFS_V4_1) += pnfs.o pnfs_dev.o obj-$(CONFIG_PNFS_FILE_LAYOUT) += nfs_layout_nfsv41_files.o nfs_layout_nfsv41_files-y := nfs4filelayout.o nfs4filelayoutdev.o diff --git a/trunk/fs/nfs/client.c b/trunk/fs/nfs/client.c index 9fc0d9dfc91b..99694442b93f 100644 --- a/trunk/fs/nfs/client.c +++ b/trunk/fs/nfs/client.c @@ -105,7 +105,7 @@ struct nfs_subversion *get_nfs_version(unsigned int version) if (IS_ERR(nfs)) { mutex_lock(&nfs_version_mutex); - request_module("nfs%d", version); + request_module("nfsv%d", version); nfs = find_nfs_version(version); mutex_unlock(&nfs_version_mutex); } diff --git a/trunk/fs/nfs/file.c b/trunk/fs/nfs/file.c index 75d6d0a3d32e..6a7fcab7ecb3 100644 --- a/trunk/fs/nfs/file.c +++ b/trunk/fs/nfs/file.c @@ -287,10 +287,12 @@ nfs_file_fsync(struct file *file, loff_t start, loff_t end, int datasync) struct inode *inode = file->f_path.dentry->d_inode; ret = filemap_write_and_wait_range(inode->i_mapping, start, end); + if (ret != 0) + goto out; mutex_lock(&inode->i_mutex); ret = nfs_file_fsync_commit(file, start, end, datasync); mutex_unlock(&inode->i_mutex); - +out: return ret; } diff --git a/trunk/fs/nfs/idmap.c b/trunk/fs/nfs/idmap.c index b701358c39c3..a850079467d8 100644 --- a/trunk/fs/nfs/idmap.c +++ b/trunk/fs/nfs/idmap.c @@ -61,6 +61,12 @@ struct idmap { struct mutex idmap_mutex; }; +struct idmap_legacy_upcalldata { + struct rpc_pipe_msg pipe_msg; + struct idmap_msg idmap_msg; + struct idmap *idmap; +}; + /** * nfs_fattr_init_names - initialise the nfs_fattr owner_name/group_name fields * @fattr: fully initialised struct nfs_fattr @@ -324,6 +330,7 @@ static ssize_t nfs_idmap_get_key(const char *name, size_t namelen, ret = nfs_idmap_request_key(&key_type_id_resolver_legacy, name, namelen, type, data, data_size, idmap); + idmap->idmap_key_cons = NULL; mutex_unlock(&idmap->idmap_mutex); } return ret; @@ -380,11 +387,13 @@ static const match_table_t nfs_idmap_tokens = { static int nfs_idmap_legacy_upcall(struct key_construction *, const char *, void *); static ssize_t idmap_pipe_downcall(struct file *, const char __user *, size_t); +static void idmap_release_pipe(struct inode *); static void idmap_pipe_destroy_msg(struct rpc_pipe_msg *); static const struct rpc_pipe_ops idmap_upcall_ops = { .upcall = rpc_pipe_generic_upcall, .downcall = idmap_pipe_downcall, + .release_pipe = idmap_release_pipe, .destroy_msg = idmap_pipe_destroy_msg, }; @@ -616,7 +625,8 @@ void nfs_idmap_quit(void) nfs_idmap_quit_keyring(); } -static int nfs_idmap_prepare_message(char *desc, struct idmap_msg *im, +static int nfs_idmap_prepare_message(char *desc, struct idmap *idmap, + struct idmap_msg *im, struct rpc_pipe_msg *msg) { substring_t substr; @@ -659,6 +669,7 @@ static int nfs_idmap_legacy_upcall(struct key_construction *cons, const char *op, void *aux) { + struct idmap_legacy_upcalldata *data; struct rpc_pipe_msg *msg; struct idmap_msg *im; struct idmap *idmap = (struct idmap *)aux; @@ -666,15 +677,15 @@ static int nfs_idmap_legacy_upcall(struct key_construction *cons, int ret = -ENOMEM; /* msg and im are freed in idmap_pipe_destroy_msg */ - msg = kmalloc(sizeof(*msg), GFP_KERNEL); - if (!msg) - goto out0; - - im = kmalloc(sizeof(*im), GFP_KERNEL); - if (!im) + data = kmalloc(sizeof(*data), GFP_KERNEL); + if (!data) goto out1; - ret = nfs_idmap_prepare_message(key->description, im, msg); + msg = &data->pipe_msg; + im = &data->idmap_msg; + data->idmap = idmap; + + ret = nfs_idmap_prepare_message(key->description, idmap, im, msg); if (ret < 0) goto out2; @@ -683,15 +694,15 @@ static int nfs_idmap_legacy_upcall(struct key_construction *cons, ret = rpc_queue_upcall(idmap->idmap_pipe, msg); if (ret < 0) - goto out2; + goto out3; return ret; +out3: + idmap->idmap_key_cons = NULL; out2: - kfree(im); + kfree(data); out1: - kfree(msg); -out0: complete_request_key(cons, ret); return ret; } @@ -749,9 +760,8 @@ idmap_pipe_downcall(struct file *filp, const char __user *src, size_t mlen) } if (!(im.im_status & IDMAP_STATUS_SUCCESS)) { - ret = mlen; - complete_request_key(cons, -ENOKEY); - goto out_incomplete; + ret = -ENOKEY; + goto out; } namelen_in = strnlen(im.im_name, IDMAP_NAMESZ); @@ -768,16 +778,32 @@ idmap_pipe_downcall(struct file *filp, const char __user *src, size_t mlen) out: complete_request_key(cons, ret); -out_incomplete: return ret; } static void idmap_pipe_destroy_msg(struct rpc_pipe_msg *msg) { + struct idmap_legacy_upcalldata *data = container_of(msg, + struct idmap_legacy_upcalldata, + pipe_msg); + struct idmap *idmap = data->idmap; + struct key_construction *cons; + if (msg->errno) { + cons = ACCESS_ONCE(idmap->idmap_key_cons); + idmap->idmap_key_cons = NULL; + complete_request_key(cons, msg->errno); + } /* Free memory allocated in nfs_idmap_legacy_upcall() */ - kfree(msg->data); - kfree(msg); + kfree(data); +} + +static void +idmap_release_pipe(struct inode *inode) +{ + struct rpc_inode *rpci = RPC_I(inode); + struct idmap *idmap = (struct idmap *)rpci->private; + idmap->idmap_key_cons = NULL; } int nfs_map_name_to_uid(const struct nfs_server *server, const char *name, size_t namelen, __u32 *uid) diff --git a/trunk/fs/nfs/inode.c b/trunk/fs/nfs/inode.c index c6e895f0fbf3..9b47610338f5 100644 --- a/trunk/fs/nfs/inode.c +++ b/trunk/fs/nfs/inode.c @@ -154,7 +154,7 @@ static void nfs_zap_caches_locked(struct inode *inode) nfsi->attrtimeo = NFS_MINATTRTIMEO(inode); nfsi->attrtimeo_timestamp = jiffies; - memset(NFS_COOKIEVERF(inode), 0, sizeof(NFS_COOKIEVERF(inode))); + memset(NFS_I(inode)->cookieverf, 0, sizeof(NFS_I(inode)->cookieverf)); if (S_ISREG(mode) || S_ISDIR(mode) || S_ISLNK(mode)) nfsi->cache_validity |= NFS_INO_INVALID_ATTR|NFS_INO_INVALID_DATA|NFS_INO_INVALID_ACCESS|NFS_INO_INVALID_ACL|NFS_INO_REVAL_PAGECACHE; else diff --git a/trunk/fs/nfs/nfs3proc.c b/trunk/fs/nfs/nfs3proc.c index 0952c791df36..69322096c325 100644 --- a/trunk/fs/nfs/nfs3proc.c +++ b/trunk/fs/nfs/nfs3proc.c @@ -69,7 +69,7 @@ do_proc_get_root(struct rpc_clnt *client, struct nfs_fh *fhandle, nfs_fattr_init(info->fattr); status = rpc_call_sync(client, &msg, 0); dprintk("%s: reply fsinfo: %d\n", __func__, status); - if (!(info->fattr->valid & NFS_ATTR_FATTR)) { + if (status == 0 && !(info->fattr->valid & NFS_ATTR_FATTR)) { msg.rpc_proc = &nfs3_procedures[NFS3PROC_GETATTR]; msg.rpc_resp = info->fattr; status = rpc_call_sync(client, &msg, 0); @@ -643,7 +643,7 @@ nfs3_proc_readdir(struct dentry *dentry, struct rpc_cred *cred, u64 cookie, struct page **pages, unsigned int count, int plus) { struct inode *dir = dentry->d_inode; - __be32 *verf = NFS_COOKIEVERF(dir); + __be32 *verf = NFS_I(dir)->cookieverf; struct nfs3_readdirargs arg = { .fh = NFS_FH(dir), .cookie = cookie, diff --git a/trunk/fs/nfs/nfs4_fs.h b/trunk/fs/nfs/nfs4_fs.h index 3b950dd81e81..da0618aeeadb 100644 --- a/trunk/fs/nfs/nfs4_fs.h +++ b/trunk/fs/nfs/nfs4_fs.h @@ -205,6 +205,9 @@ extern const struct dentry_operations nfs4_dentry_operations; int nfs_atomic_open(struct inode *, struct dentry *, struct file *, unsigned, umode_t, int *); +/* super.c */ +extern struct file_system_type nfs4_fs_type; + /* nfs4namespace.c */ rpc_authflavor_t nfs_find_best_sec(struct nfs4_secinfo_flavors *); struct rpc_clnt *nfs4_create_sec_client(struct rpc_clnt *, struct inode *, struct qstr *); diff --git a/trunk/fs/nfs/nfs4client.c b/trunk/fs/nfs/nfs4client.c index cbcdfaf32505..24eb663f8ed5 100644 --- a/trunk/fs/nfs/nfs4client.c +++ b/trunk/fs/nfs/nfs4client.c @@ -74,7 +74,7 @@ struct nfs_client *nfs4_alloc_client(const struct nfs_client_initdata *cl_init) return clp; error: - kfree(clp); + nfs_free_client(clp); return ERR_PTR(err); } diff --git a/trunk/fs/nfs/nfs4file.c b/trunk/fs/nfs/nfs4file.c index acb65e7887f8..eb5eb8eef4d3 100644 --- a/trunk/fs/nfs/nfs4file.c +++ b/trunk/fs/nfs/nfs4file.c @@ -96,13 +96,15 @@ nfs4_file_fsync(struct file *file, loff_t start, loff_t end, int datasync) struct inode *inode = file->f_path.dentry->d_inode; ret = filemap_write_and_wait_range(inode->i_mapping, start, end); + if (ret != 0) + goto out; mutex_lock(&inode->i_mutex); ret = nfs_file_fsync_commit(file, start, end, datasync); if (!ret && !datasync) /* application has asked for meta-data sync */ ret = pnfs_layoutcommit_inode(inode, true); mutex_unlock(&inode->i_mutex); - +out: return ret; } diff --git a/trunk/fs/nfs/nfs4proc.c b/trunk/fs/nfs/nfs4proc.c index a99a8d948721..1e50326d00dd 100644 --- a/trunk/fs/nfs/nfs4proc.c +++ b/trunk/fs/nfs/nfs4proc.c @@ -3215,11 +3215,11 @@ static int _nfs4_proc_readdir(struct dentry *dentry, struct rpc_cred *cred, dentry->d_parent->d_name.name, dentry->d_name.name, (unsigned long long)cookie); - nfs4_setup_readdir(cookie, NFS_COOKIEVERF(dir), dentry, &args); + nfs4_setup_readdir(cookie, NFS_I(dir)->cookieverf, dentry, &args); res.pgbase = args.pgbase; status = nfs4_call_sync(NFS_SERVER(dir)->client, NFS_SERVER(dir), &msg, &args.seq_args, &res.seq_res, 0); if (status >= 0) { - memcpy(NFS_COOKIEVERF(dir), res.verifier.data, NFS4_VERIFIER_SIZE); + memcpy(NFS_I(dir)->cookieverf, res.verifier.data, NFS4_VERIFIER_SIZE); status += args.pgbase; } @@ -3653,11 +3653,11 @@ static inline int nfs4_server_supports_acls(struct nfs_server *server) && (server->acl_bitmask & ACL4_SUPPORT_DENY_ACL); } -/* Assuming that XATTR_SIZE_MAX is a multiple of PAGE_CACHE_SIZE, and that - * it's OK to put sizeof(void) * (XATTR_SIZE_MAX/PAGE_CACHE_SIZE) bytes on +/* Assuming that XATTR_SIZE_MAX is a multiple of PAGE_SIZE, and that + * it's OK to put sizeof(void) * (XATTR_SIZE_MAX/PAGE_SIZE) bytes on * the stack. */ -#define NFS4ACL_MAXPAGES (XATTR_SIZE_MAX >> PAGE_CACHE_SHIFT) +#define NFS4ACL_MAXPAGES DIV_ROUND_UP(XATTR_SIZE_MAX, PAGE_SIZE) static int buf_to_pages_noslab(const void *buf, size_t buflen, struct page **pages, unsigned int *pgbase) @@ -3668,7 +3668,7 @@ static int buf_to_pages_noslab(const void *buf, size_t buflen, spages = pages; do { - len = min_t(size_t, PAGE_CACHE_SIZE, buflen); + len = min_t(size_t, PAGE_SIZE, buflen); newpage = alloc_page(GFP_KERNEL); if (newpage == NULL) @@ -3737,9 +3737,10 @@ static inline ssize_t nfs4_read_cached_acl(struct inode *inode, char *buf, size_ static void nfs4_write_cached_acl(struct inode *inode, struct page **pages, size_t pgbase, size_t acl_len) { struct nfs4_cached_acl *acl; + size_t buflen = sizeof(*acl) + acl_len; - if (pages && acl_len <= PAGE_SIZE) { - acl = kmalloc(sizeof(*acl) + acl_len, GFP_KERNEL); + if (buflen <= PAGE_SIZE) { + acl = kmalloc(buflen, GFP_KERNEL); if (acl == NULL) goto out; acl->cached = 1; @@ -3781,17 +3782,15 @@ static ssize_t __nfs4_get_acl_uncached(struct inode *inode, void *buf, size_t bu .rpc_argp = &args, .rpc_resp = &res, }; - int ret = -ENOMEM, npages, i; - size_t acl_len = 0; + unsigned int npages = DIV_ROUND_UP(buflen, PAGE_SIZE); + int ret = -ENOMEM, i; - npages = (buflen + PAGE_SIZE - 1) >> PAGE_SHIFT; /* As long as we're doing a round trip to the server anyway, * let's be prepared for a page of acl data. */ if (npages == 0) npages = 1; - - /* Add an extra page to handle the bitmap returned */ - npages++; + if (npages > ARRAY_SIZE(pages)) + return -ERANGE; for (i = 0; i < npages; i++) { pages[i] = alloc_page(GFP_KERNEL); @@ -3807,11 +3806,6 @@ static ssize_t __nfs4_get_acl_uncached(struct inode *inode, void *buf, size_t bu args.acl_len = npages * PAGE_SIZE; args.acl_pgbase = 0; - /* Let decode_getfacl know not to fail if the ACL data is larger than - * the page we send as a guess */ - if (buf == NULL) - res.acl_flags |= NFS4_ACL_LEN_REQUEST; - dprintk("%s buf %p buflen %zu npages %d args.acl_len %zu\n", __func__, buf, buflen, npages, args.acl_len); ret = nfs4_call_sync(NFS_SERVER(inode)->client, NFS_SERVER(inode), @@ -3819,20 +3813,19 @@ static ssize_t __nfs4_get_acl_uncached(struct inode *inode, void *buf, size_t bu if (ret) goto out_free; - acl_len = res.acl_len - res.acl_data_offset; - if (acl_len > args.acl_len) - nfs4_write_cached_acl(inode, NULL, 0, acl_len); - else - nfs4_write_cached_acl(inode, pages, res.acl_data_offset, - acl_len); - if (buf) { + /* Handle the case where the passed-in buffer is too short */ + if (res.acl_flags & NFS4_ACL_TRUNC) { + /* Did the user only issue a request for the acl length? */ + if (buf == NULL) + goto out_ok; ret = -ERANGE; - if (acl_len > buflen) - goto out_free; - _copy_from_pages(buf, pages, res.acl_data_offset, - acl_len); + goto out_free; } - ret = acl_len; + nfs4_write_cached_acl(inode, pages, res.acl_data_offset, res.acl_len); + if (buf) + _copy_from_pages(buf, pages, res.acl_data_offset, res.acl_len); +out_ok: + ret = res.acl_len; out_free: for (i = 0; i < npages; i++) if (pages[i]) @@ -3890,10 +3883,13 @@ static int __nfs4_proc_set_acl(struct inode *inode, const void *buf, size_t bufl .rpc_argp = &arg, .rpc_resp = &res, }; + unsigned int npages = DIV_ROUND_UP(buflen, PAGE_SIZE); int ret, i; if (!nfs4_server_supports_acls(server)) return -EOPNOTSUPP; + if (npages > ARRAY_SIZE(pages)) + return -ERANGE; i = buf_to_pages_noslab(buf, buflen, arg.acl_pages, &arg.acl_pgbase); if (i < 0) return i; @@ -6223,11 +6219,58 @@ static void nfs4_layoutget_done(struct rpc_task *task, void *calldata) dprintk("<-- %s\n", __func__); } +static size_t max_response_pages(struct nfs_server *server) +{ + u32 max_resp_sz = server->nfs_client->cl_session->fc_attrs.max_resp_sz; + return nfs_page_array_len(0, max_resp_sz); +} + +static void nfs4_free_pages(struct page **pages, size_t size) +{ + int i; + + if (!pages) + return; + + for (i = 0; i < size; i++) { + if (!pages[i]) + break; + __free_page(pages[i]); + } + kfree(pages); +} + +static struct page **nfs4_alloc_pages(size_t size, gfp_t gfp_flags) +{ + struct page **pages; + int i; + + pages = kcalloc(size, sizeof(struct page *), gfp_flags); + if (!pages) { + dprintk("%s: can't alloc array of %zu pages\n", __func__, size); + return NULL; + } + + for (i = 0; i < size; i++) { + pages[i] = alloc_page(gfp_flags); + if (!pages[i]) { + dprintk("%s: failed to allocate page\n", __func__); + nfs4_free_pages(pages, size); + return NULL; + } + } + + return pages; +} + static void nfs4_layoutget_release(void *calldata) { struct nfs4_layoutget *lgp = calldata; + struct nfs_server *server = NFS_SERVER(lgp->args.inode); + size_t max_pages = max_response_pages(server); dprintk("--> %s\n", __func__); + nfs4_free_pages(lgp->args.layout.pages, max_pages); put_nfs_open_context(lgp->args.ctx); kfree(calldata); dprintk("<-- %s\n", __func__); @@ -6239,9 +6282,10 @@ static const struct rpc_call_ops nfs4_layoutget_call_ops = { .rpc_release = nfs4_layoutget_release, }; -int nfs4_proc_layoutget(struct nfs4_layoutget *lgp) +void nfs4_proc_layoutget(struct nfs4_layoutget *lgp, gfp_t gfp_flags) { struct nfs_server *server = NFS_SERVER(lgp->args.inode); + size_t max_pages = max_response_pages(server); struct rpc_task *task; struct rpc_message msg = { .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_LAYOUTGET], @@ -6259,12 +6303,19 @@ int nfs4_proc_layoutget(struct nfs4_layoutget *lgp) dprintk("--> %s\n", __func__); + lgp->args.layout.pages = nfs4_alloc_pages(max_pages, gfp_flags); + if (!lgp->args.layout.pages) { + nfs4_layoutget_release(lgp); + return; + } + lgp->args.layout.pglen = max_pages * PAGE_SIZE; + lgp->res.layoutp = &lgp->args.layout; lgp->res.seq_res.sr_slot = NULL; nfs41_init_sequence(&lgp->args.seq_args, &lgp->res.seq_res, 0); task = rpc_run_task(&task_setup_data); if (IS_ERR(task)) - return PTR_ERR(task); + return; status = nfs4_wait_for_completion_rpc_task(task); if (status == 0) status = task->tk_status; @@ -6272,7 +6323,7 @@ int nfs4_proc_layoutget(struct nfs4_layoutget *lgp) status = pnfs_layout_process(lgp); rpc_put_task(task); dprintk("<-- %s status=%d\n", __func__, status); - return status; + return; } static void @@ -6304,12 +6355,8 @@ static void nfs4_layoutreturn_done(struct rpc_task *task, void *calldata) return; } spin_lock(&lo->plh_inode->i_lock); - if (task->tk_status == 0) { - if (lrp->res.lrs_present) { - pnfs_set_layout_stateid(lo, &lrp->res.stateid, true); - } else - BUG_ON(!list_empty(&lo->plh_segs)); - } + if (task->tk_status == 0 && lrp->res.lrs_present) + pnfs_set_layout_stateid(lo, &lrp->res.stateid, true); lo->plh_block_lgets--; spin_unlock(&lo->plh_inode->i_lock); dprintk("<-- %s\n", __func__); diff --git a/trunk/fs/nfs/nfs4super.c b/trunk/fs/nfs/nfs4super.c index 12a31a9dbcdd..bd61221ad2c5 100644 --- a/trunk/fs/nfs/nfs4super.c +++ b/trunk/fs/nfs/nfs4super.c @@ -23,14 +23,6 @@ static struct dentry *nfs4_referral_mount(struct file_system_type *fs_type, static struct dentry *nfs4_remote_referral_mount(struct file_system_type *fs_type, int flags, const char *dev_name, void *raw_data); -static struct file_system_type nfs4_fs_type = { - .owner = THIS_MODULE, - .name = "nfs4", - .mount = nfs_fs_mount, - .kill_sb = nfs_kill_super, - .fs_flags = FS_RENAME_DOES_D_MOVE|FS_REVAL_DOT|FS_BINARY_MOUNTDATA, -}; - static struct file_system_type nfs4_remote_fs_type = { .owner = THIS_MODULE, .name = "nfs4", @@ -344,14 +336,8 @@ static int __init init_nfs_v4(void) if (err) goto out1; - err = register_filesystem(&nfs4_fs_type); - if (err < 0) - goto out2; - register_nfs_version(&nfs_v4); return 0; -out2: - nfs4_unregister_sysctl(); out1: nfs_idmap_quit(); out: @@ -361,7 +347,6 @@ static int __init init_nfs_v4(void) static void __exit exit_nfs_v4(void) { unregister_nfs_version(&nfs_v4); - unregister_filesystem(&nfs4_fs_type); nfs4_unregister_sysctl(); nfs_idmap_quit(); } diff --git a/trunk/fs/nfs/nfs4xdr.c b/trunk/fs/nfs/nfs4xdr.c index ca13483edd60..8dba6bd48557 100644 --- a/trunk/fs/nfs/nfs4xdr.c +++ b/trunk/fs/nfs/nfs4xdr.c @@ -5045,22 +5045,19 @@ static int decode_getacl(struct xdr_stream *xdr, struct rpc_rqst *req, struct nfs_getaclres *res) { unsigned int savep; - __be32 *bm_p; uint32_t attrlen, bitmap[3] = {0}; int status; - size_t page_len = xdr->buf->page_len; + unsigned int pg_offset; res->acl_len = 0; if ((status = decode_op_hdr(xdr, OP_GETATTR)) != 0) goto out; - bm_p = xdr->p; - res->acl_data_offset = be32_to_cpup(bm_p) + 2; - res->acl_data_offset <<= 2; - /* Check if the acl data starts beyond the allocated buffer */ - if (res->acl_data_offset > page_len) - return -ERANGE; + xdr_enter_page(xdr, xdr->buf->page_len); + + /* Calculate the offset of the page data */ + pg_offset = xdr->buf->head[0].iov_len; if ((status = decode_attr_bitmap(xdr, bitmap)) != 0) goto out; @@ -5074,23 +5071,16 @@ static int decode_getacl(struct xdr_stream *xdr, struct rpc_rqst *req, /* The bitmap (xdr len + bitmaps) and the attr xdr len words * are stored with the acl data to handle the problem of * variable length bitmaps.*/ - xdr->p = bm_p; - - /* We ignore &savep and don't do consistency checks on - * the attr length. Let userspace figure it out.... */ - attrlen += res->acl_data_offset; - if (attrlen > page_len) { - if (res->acl_flags & NFS4_ACL_LEN_REQUEST) { - /* getxattr interface called with a NULL buf */ - res->acl_len = attrlen; - goto out; - } - dprintk("NFS: acl reply: attrlen %u > page_len %zu\n", - attrlen, page_len); - return -EINVAL; - } - xdr_read_pages(xdr, attrlen); + res->acl_data_offset = xdr_stream_pos(xdr) - pg_offset; res->acl_len = attrlen; + + /* Check for receive buffer overflow */ + if (res->acl_len > (xdr->nwords << 2) || + res->acl_len + res->acl_data_offset > xdr->buf->page_len) { + res->acl_flags |= NFS4_ACL_TRUNC; + dprintk("NFS: acl reply: attrlen %u > page_len %u\n", + attrlen, xdr->nwords << 2); + } } else status = -EOPNOTSUPP; @@ -6235,7 +6225,8 @@ static int nfs4_xdr_dec_open(struct rpc_rqst *rqstp, struct xdr_stream *xdr, status = decode_open(xdr, res); if (status) goto out; - if (decode_getfh(xdr, &res->fh) != 0) + status = decode_getfh(xdr, &res->fh); + if (status) goto out; decode_getfattr(xdr, res->f_attr, res->server); out: diff --git a/trunk/fs/nfs/objlayout/objio_osd.c b/trunk/fs/nfs/objlayout/objio_osd.c index f50d3e8d6f22..ea6d111b03e9 100644 --- a/trunk/fs/nfs/objlayout/objio_osd.c +++ b/trunk/fs/nfs/objlayout/objio_osd.c @@ -570,17 +570,66 @@ static bool objio_pg_test(struct nfs_pageio_descriptor *pgio, return false; return pgio->pg_count + req->wb_bytes <= - OBJIO_LSEG(pgio->pg_lseg)->layout.max_io_length; + (unsigned long)pgio->pg_layout_private; +} + +void objio_init_read(struct nfs_pageio_descriptor *pgio, struct nfs_page *req) +{ + pnfs_generic_pg_init_read(pgio, req); + if (unlikely(pgio->pg_lseg == NULL)) + return; /* Not pNFS */ + + pgio->pg_layout_private = (void *) + OBJIO_LSEG(pgio->pg_lseg)->layout.max_io_length; +} + +static bool aligned_on_raid_stripe(u64 offset, struct ore_layout *layout, + unsigned long *stripe_end) +{ + u32 stripe_off; + unsigned stripe_size; + + if (layout->raid_algorithm == PNFS_OSD_RAID_0) + return true; + + stripe_size = layout->stripe_unit * + (layout->group_width - layout->parity); + + div_u64_rem(offset, stripe_size, &stripe_off); + if (!stripe_off) + return true; + + *stripe_end = stripe_size - stripe_off; + return false; +} + +void objio_init_write(struct nfs_pageio_descriptor *pgio, struct nfs_page *req) +{ + unsigned long stripe_end = 0; + + pnfs_generic_pg_init_write(pgio, req); + if (unlikely(pgio->pg_lseg == NULL)) + return; /* Not pNFS */ + + if (req->wb_offset || + !aligned_on_raid_stripe(req->wb_index * PAGE_SIZE, + &OBJIO_LSEG(pgio->pg_lseg)->layout, + &stripe_end)) { + pgio->pg_layout_private = (void *)stripe_end; + } else { + pgio->pg_layout_private = (void *) + OBJIO_LSEG(pgio->pg_lseg)->layout.max_io_length; + } } static const struct nfs_pageio_ops objio_pg_read_ops = { - .pg_init = pnfs_generic_pg_init_read, + .pg_init = objio_init_read, .pg_test = objio_pg_test, .pg_doio = pnfs_generic_pg_readpages, }; static const struct nfs_pageio_ops objio_pg_write_ops = { - .pg_init = pnfs_generic_pg_init_write, + .pg_init = objio_init_write, .pg_test = objio_pg_test, .pg_doio = pnfs_generic_pg_writepages, }; diff --git a/trunk/fs/nfs/pagelist.c b/trunk/fs/nfs/pagelist.c index 1a6732ed04a4..311a79681e2b 100644 --- a/trunk/fs/nfs/pagelist.c +++ b/trunk/fs/nfs/pagelist.c @@ -49,6 +49,7 @@ void nfs_pgheader_init(struct nfs_pageio_descriptor *desc, hdr->io_start = req_offset(hdr->req); hdr->good_bytes = desc->pg_count; hdr->dreq = desc->pg_dreq; + hdr->layout_private = desc->pg_layout_private; hdr->release = release; hdr->completion_ops = desc->pg_completion_ops; if (hdr->completion_ops->init_hdr) @@ -268,6 +269,7 @@ void nfs_pageio_init(struct nfs_pageio_descriptor *desc, desc->pg_error = 0; desc->pg_lseg = NULL; desc->pg_dreq = NULL; + desc->pg_layout_private = NULL; } EXPORT_SYMBOL_GPL(nfs_pageio_init); diff --git a/trunk/fs/nfs/pnfs.c b/trunk/fs/nfs/pnfs.c index 76875bfcf19c..2e00feacd4be 100644 --- a/trunk/fs/nfs/pnfs.c +++ b/trunk/fs/nfs/pnfs.c @@ -583,9 +583,6 @@ send_layoutget(struct pnfs_layout_hdr *lo, struct nfs_server *server = NFS_SERVER(ino); struct nfs4_layoutget *lgp; struct pnfs_layout_segment *lseg = NULL; - struct page **pages = NULL; - int i; - u32 max_resp_sz, max_pages; dprintk("--> %s\n", __func__); @@ -594,20 +591,6 @@ send_layoutget(struct pnfs_layout_hdr *lo, if (lgp == NULL) return NULL; - /* allocate pages for xdr post processing */ - max_resp_sz = server->nfs_client->cl_session->fc_attrs.max_resp_sz; - max_pages = nfs_page_array_len(0, max_resp_sz); - - pages = kcalloc(max_pages, sizeof(struct page *), gfp_flags); - if (!pages) - goto out_err_free; - - for (i = 0; i < max_pages; i++) { - pages[i] = alloc_page(gfp_flags); - if (!pages[i]) - goto out_err_free; - } - lgp->args.minlength = PAGE_CACHE_SIZE; if (lgp->args.minlength > range->length) lgp->args.minlength = range->length; @@ -616,39 +599,19 @@ send_layoutget(struct pnfs_layout_hdr *lo, lgp->args.type = server->pnfs_curr_ld->id; lgp->args.inode = ino; lgp->args.ctx = get_nfs_open_context(ctx); - lgp->args.layout.pages = pages; - lgp->args.layout.pglen = max_pages * PAGE_SIZE; lgp->lsegpp = &lseg; lgp->gfp_flags = gfp_flags; /* Synchronously retrieve layout information from server and * store in lseg. */ - nfs4_proc_layoutget(lgp); + nfs4_proc_layoutget(lgp, gfp_flags); if (!lseg) { /* remember that LAYOUTGET failed and suspend trying */ set_bit(lo_fail_bit(range->iomode), &lo->plh_flags); } - /* free xdr pages */ - for (i = 0; i < max_pages; i++) - __free_page(pages[i]); - kfree(pages); - return lseg; - -out_err_free: - /* free any allocated xdr pages, lgp as it's not used */ - if (pages) { - for (i = 0; i < max_pages; i++) { - if (!pages[i]) - break; - __free_page(pages[i]); - } - kfree(pages); - } - kfree(lgp); - return NULL; } /* diff --git a/trunk/fs/nfs/pnfs.h b/trunk/fs/nfs/pnfs.h index 2c6c80503ba4..745aa1b39e7c 100644 --- a/trunk/fs/nfs/pnfs.h +++ b/trunk/fs/nfs/pnfs.h @@ -172,7 +172,7 @@ extern int nfs4_proc_getdevicelist(struct nfs_server *server, struct pnfs_devicelist *devlist); extern int nfs4_proc_getdeviceinfo(struct nfs_server *server, struct pnfs_device *dev); -extern int nfs4_proc_layoutget(struct nfs4_layoutget *lgp); +extern void nfs4_proc_layoutget(struct nfs4_layoutget *lgp, gfp_t gfp_flags); extern int nfs4_proc_layoutreturn(struct nfs4_layoutreturn *lrp); /* pnfs.c */ diff --git a/trunk/fs/nfs/super.c b/trunk/fs/nfs/super.c index ac6a3c55dce4..d2c7f5db0847 100644 --- a/trunk/fs/nfs/super.c +++ b/trunk/fs/nfs/super.c @@ -319,6 +319,34 @@ EXPORT_SYMBOL_GPL(nfs_sops); static void nfs4_validate_mount_flags(struct nfs_parsed_mount_data *); static int nfs4_validate_mount_data(void *options, struct nfs_parsed_mount_data *args, const char *dev_name); + +struct file_system_type nfs4_fs_type = { + .owner = THIS_MODULE, + .name = "nfs4", + .mount = nfs_fs_mount, + .kill_sb = nfs_kill_super, + .fs_flags = FS_RENAME_DOES_D_MOVE|FS_REVAL_DOT|FS_BINARY_MOUNTDATA, +}; +EXPORT_SYMBOL_GPL(nfs4_fs_type); + +static int __init register_nfs4_fs(void) +{ + return register_filesystem(&nfs4_fs_type); +} + +static void unregister_nfs4_fs(void) +{ + unregister_filesystem(&nfs4_fs_type); +} +#else +static int __init register_nfs4_fs(void) +{ + return 0; +} + +static void unregister_nfs4_fs(void) +{ +} #endif static struct shrinker acl_shrinker = { @@ -337,12 +365,18 @@ int __init register_nfs_fs(void) if (ret < 0) goto error_0; - ret = nfs_register_sysctl(); + ret = register_nfs4_fs(); if (ret < 0) goto error_1; + + ret = nfs_register_sysctl(); + if (ret < 0) + goto error_2; register_shrinker(&acl_shrinker); return 0; +error_2: + unregister_nfs4_fs(); error_1: unregister_filesystem(&nfs_fs_type); error_0: @@ -356,6 +390,7 @@ void __exit unregister_nfs_fs(void) { unregister_shrinker(&acl_shrinker); nfs_unregister_sysctl(); + unregister_nfs4_fs(); unregister_filesystem(&nfs_fs_type); } @@ -1502,7 +1537,7 @@ static int nfs_parse_mount_options(char *raw, /* * verify that any proto=/mountproto= options match the address - * familiies in the addr=/mountaddr= options. + * families in the addr=/mountaddr= options. */ if (protofamily != AF_UNSPEC && protofamily != mnt->nfs_server.address.ss_family) @@ -1832,6 +1867,7 @@ static int nfs23_validate_mount_data(void *options, memcpy(sap, &data->addr, sizeof(data->addr)); args->nfs_server.addrlen = sizeof(data->addr); + args->nfs_server.port = ntohs(data->addr.sin_port); if (!nfs_verify_server_address(sap)) goto out_no_address; @@ -2529,6 +2565,7 @@ static int nfs4_validate_mount_data(void *options, return -EFAULT; if (!nfs_verify_server_address(sap)) goto out_no_address; + args->nfs_server.port = ntohs(((struct sockaddr_in *)sap)->sin_port); if (data->auth_flavourlen) { if (data->auth_flavourlen > 1) @@ -2645,4 +2682,6 @@ MODULE_PARM_DESC(max_session_slots, "Maximum number of outstanding NFSv4.1 " module_param(send_implementation_id, ushort, 0644); MODULE_PARM_DESC(send_implementation_id, "Send implementation ID with NFSv4.1 exchange_id"); +MODULE_ALIAS("nfs4"); + #endif /* CONFIG_NFS_V4 */ diff --git a/trunk/fs/nfs/write.c b/trunk/fs/nfs/write.c index 5829d0ce7cfb..e3b55372726c 100644 --- a/trunk/fs/nfs/write.c +++ b/trunk/fs/nfs/write.c @@ -1814,19 +1814,19 @@ int __init nfs_init_writepagecache(void) nfs_wdata_mempool = mempool_create_slab_pool(MIN_POOL_WRITE, nfs_wdata_cachep); if (nfs_wdata_mempool == NULL) - return -ENOMEM; + goto out_destroy_write_cache; nfs_cdata_cachep = kmem_cache_create("nfs_commit_data", sizeof(struct nfs_commit_data), 0, SLAB_HWCACHE_ALIGN, NULL); if (nfs_cdata_cachep == NULL) - return -ENOMEM; + goto out_destroy_write_mempool; nfs_commit_mempool = mempool_create_slab_pool(MIN_POOL_COMMIT, nfs_wdata_cachep); if (nfs_commit_mempool == NULL) - return -ENOMEM; + goto out_destroy_commit_cache; /* * NFS congestion size, scale with available memory. @@ -1849,11 +1849,20 @@ int __init nfs_init_writepagecache(void) nfs_congestion_kb = 256*1024; return 0; + +out_destroy_commit_cache: + kmem_cache_destroy(nfs_cdata_cachep); +out_destroy_write_mempool: + mempool_destroy(nfs_wdata_mempool); +out_destroy_write_cache: + kmem_cache_destroy(nfs_wdata_cachep); + return -ENOMEM; } void nfs_destroy_writepagecache(void) { mempool_destroy(nfs_commit_mempool); + kmem_cache_destroy(nfs_cdata_cachep); mempool_destroy(nfs_wdata_mempool); kmem_cache_destroy(nfs_wdata_cachep); } diff --git a/trunk/fs/nfsd/nfs4callback.c b/trunk/fs/nfsd/nfs4callback.c index cbaf4f8bb7b7..4c7bd35b1876 100644 --- a/trunk/fs/nfsd/nfs4callback.c +++ b/trunk/fs/nfsd/nfs4callback.c @@ -651,12 +651,12 @@ static int setup_callback_client(struct nfs4_client *clp, struct nfs4_cb_conn *c if (clp->cl_minorversion == 0) { if (!clp->cl_cred.cr_principal && - (clp->cl_flavor >= RPC_AUTH_GSS_KRB5)) + (clp->cl_cred.cr_flavor >= RPC_AUTH_GSS_KRB5)) return -EINVAL; args.client_name = clp->cl_cred.cr_principal; args.prognumber = conn->cb_prog, args.protocol = XPRT_TRANSPORT_TCP; - args.authflavor = clp->cl_flavor; + args.authflavor = clp->cl_cred.cr_flavor; clp->cl_cb_ident = conn->cb_ident; } else { if (!conn->cb_xprt) diff --git a/trunk/fs/nfsd/state.h b/trunk/fs/nfsd/state.h index e6173147f982..22bd0a66c356 100644 --- a/trunk/fs/nfsd/state.h +++ b/trunk/fs/nfsd/state.h @@ -231,7 +231,6 @@ struct nfs4_client { nfs4_verifier cl_verifier; /* generated by client */ time_t cl_time; /* time of last lease renewal */ struct sockaddr_storage cl_addr; /* client ipaddress */ - u32 cl_flavor; /* setclientid pseudoflavor */ struct svc_cred cl_cred; /* setclientid principal */ clientid_t cl_clientid; /* generated by server */ nfs4_verifier cl_confirm; /* generated by server */ diff --git a/trunk/fs/nilfs2/super.c b/trunk/fs/nilfs2/super.c index 6522cac6057c..6a10812711c1 100644 --- a/trunk/fs/nilfs2/super.c +++ b/trunk/fs/nilfs2/super.c @@ -676,17 +676,13 @@ static const struct super_operations nilfs_sops = { .alloc_inode = nilfs_alloc_inode, .destroy_inode = nilfs_destroy_inode, .dirty_inode = nilfs_dirty_inode, - /* .write_inode = nilfs_write_inode, */ - /* .drop_inode = nilfs_drop_inode, */ .evict_inode = nilfs_evict_inode, .put_super = nilfs_put_super, - /* .write_super = nilfs_write_super, */ .sync_fs = nilfs_sync_fs, .freeze_fs = nilfs_freeze, .unfreeze_fs = nilfs_unfreeze, .statfs = nilfs_statfs, .remount_fs = nilfs_remount, - /* .umount_begin */ .show_options = nilfs_show_options }; diff --git a/trunk/fs/nilfs2/the_nilfs.h b/trunk/fs/nilfs2/the_nilfs.h index 6eee4177807b..be1267a34cea 100644 --- a/trunk/fs/nilfs2/the_nilfs.h +++ b/trunk/fs/nilfs2/the_nilfs.h @@ -107,8 +107,6 @@ struct the_nilfs { * used for * - loading the latest checkpoint exclusively. * - allocating a new full segment. - * - protecting s_dirt in the super_block struct - * (see nilfs_write_super) and the following fields. */ struct buffer_head *ns_sbh[2]; struct nilfs_super_block *ns_sbp[2]; diff --git a/trunk/fs/open.c b/trunk/fs/open.c index f3d96e7e7b19..e1f2cdb91a4d 100644 --- a/trunk/fs/open.c +++ b/trunk/fs/open.c @@ -717,7 +717,7 @@ static int do_dentry_open(struct file *f, * here, so just reset the state. */ file_reset_write(f); - mnt_drop_write(f->f_path.mnt); + __mnt_drop_write(f->f_path.mnt); } } cleanup_file: @@ -852,9 +852,10 @@ static inline int build_open_flags(int flags, umode_t mode, struct open_flags *o int lookup_flags = 0; int acc_mode; - if (!(flags & O_CREAT)) - mode = 0; - op->mode = mode; + if (flags & O_CREAT) + op->mode = (mode & S_IALLUGO) | S_IFREG; + else + op->mode = 0; /* Must never be set by userspace */ flags &= ~FMODE_NONOTIFY; diff --git a/trunk/fs/proc/proc_sysctl.c b/trunk/fs/proc/proc_sysctl.c index dfafeb2b05a0..eb7cc91b7258 100644 --- a/trunk/fs/proc/proc_sysctl.c +++ b/trunk/fs/proc/proc_sysctl.c @@ -462,9 +462,6 @@ static struct dentry *proc_sys_lookup(struct inode *dir, struct dentry *dentry, err = ERR_PTR(-ENOMEM); inode = proc_sys_make_inode(dir->i_sb, h ? h : head, p); - if (h) - sysctl_head_finish(h); - if (!inode) goto out; @@ -473,6 +470,8 @@ static struct dentry *proc_sys_lookup(struct inode *dir, struct dentry *dentry, d_add(dentry, inode); out: + if (h) + sysctl_head_finish(h); sysctl_head_finish(head); return err; } diff --git a/trunk/fs/quota/dquot.c b/trunk/fs/quota/dquot.c index 36a29b753c79..c495a3055e2a 100644 --- a/trunk/fs/quota/dquot.c +++ b/trunk/fs/quota/dquot.c @@ -1589,10 +1589,10 @@ int __dquot_alloc_space(struct inode *inode, qsize_t number, int flags) goto out; } - down_read(&sb_dqopt(inode->i_sb)->dqptr_sem); for (cnt = 0; cnt < MAXQUOTAS; cnt++) warn[cnt].w_type = QUOTA_NL_NOWARN; + down_read(&sb_dqopt(inode->i_sb)->dqptr_sem); spin_lock(&dq_data_lock); for (cnt = 0; cnt < MAXQUOTAS; cnt++) { if (!dquots[cnt]) diff --git a/trunk/fs/reiserfs/bitmap.c b/trunk/fs/reiserfs/bitmap.c index 4c0c7d163d15..a98b7740a0fc 100644 --- a/trunk/fs/reiserfs/bitmap.c +++ b/trunk/fs/reiserfs/bitmap.c @@ -1334,9 +1334,7 @@ struct buffer_head *reiserfs_read_bitmap_block(struct super_block *sb, else if (bitmap == 0) block = (REISERFS_DISK_OFFSET_IN_BYTES >> sb->s_blocksize_bits) + 1; - reiserfs_write_unlock(sb); bh = sb_bread(sb, block); - reiserfs_write_lock(sb); if (bh == NULL) reiserfs_warning(sb, "sh-2029: %s: bitmap block (#%u) " "reading failed", __func__, block); diff --git a/trunk/fs/reiserfs/inode.c b/trunk/fs/reiserfs/inode.c index a6d4268fb6c1..855da58db145 100644 --- a/trunk/fs/reiserfs/inode.c +++ b/trunk/fs/reiserfs/inode.c @@ -76,10 +76,10 @@ void reiserfs_evict_inode(struct inode *inode) ; } out: + reiserfs_write_unlock_once(inode->i_sb, depth); clear_inode(inode); /* note this must go after the journal_end to prevent deadlock */ dquot_drop(inode); inode->i_blocks = 0; - reiserfs_write_unlock_once(inode->i_sb, depth); return; no_delete: diff --git a/trunk/fs/stat.c b/trunk/fs/stat.c index b6ff11825fc8..40780229a032 100644 --- a/trunk/fs/stat.c +++ b/trunk/fs/stat.c @@ -58,7 +58,7 @@ EXPORT_SYMBOL(vfs_getattr); int vfs_fstat(unsigned int fd, struct kstat *stat) { int fput_needed; - struct file *f = fget_light(fd, &fput_needed); + struct file *f = fget_raw_light(fd, &fput_needed); int error = -EBADF; if (f) { diff --git a/trunk/fs/super.c b/trunk/fs/super.c index b05cf47463d0..0902cfa6a12e 100644 --- a/trunk/fs/super.c +++ b/trunk/fs/super.c @@ -536,46 +536,6 @@ void drop_super(struct super_block *sb) EXPORT_SYMBOL(drop_super); -/** - * sync_supers - helper for periodic superblock writeback - * - * Call the write_super method if present on all dirty superblocks in - * the system. This is for the periodic writeback used by most older - * filesystems. For data integrity superblock writeback use - * sync_filesystems() instead. - * - * Note: check the dirty flag before waiting, so we don't - * hold up the sync while mounting a device. (The newly - * mounted device won't need syncing.) - */ -void sync_supers(void) -{ - struct super_block *sb, *p = NULL; - - spin_lock(&sb_lock); - list_for_each_entry(sb, &super_blocks, s_list) { - if (hlist_unhashed(&sb->s_instances)) - continue; - if (sb->s_op->write_super && sb->s_dirt) { - sb->s_count++; - spin_unlock(&sb_lock); - - down_read(&sb->s_umount); - if (sb->s_root && sb->s_dirt && (sb->s_flags & MS_BORN)) - sb->s_op->write_super(sb); - up_read(&sb->s_umount); - - spin_lock(&sb_lock); - if (p) - __put_super(p); - p = sb; - } - } - if (p) - __put_super(p); - spin_unlock(&sb_lock); -} - /** * iterate_supers - call function for all active superblocks * @f: function to call diff --git a/trunk/fs/ubifs/debug.h b/trunk/fs/ubifs/debug.h index 8b8cc4e945f4..760de723dadb 100644 --- a/trunk/fs/ubifs/debug.h +++ b/trunk/fs/ubifs/debug.h @@ -167,7 +167,7 @@ struct ubifs_global_debug_info { #define ubifs_dbg_msg(type, fmt, ...) \ pr_debug("UBIFS DBG " type ": " fmt "\n", ##__VA_ARGS__) -#define DBG_KEY_BUF_LEN 32 +#define DBG_KEY_BUF_LEN 48 #define ubifs_dbg_msg_key(type, key, fmt, ...) do { \ char __tmp_key_buf[DBG_KEY_BUF_LEN]; \ pr_debug("UBIFS DBG " type ": " fmt "%s\n", ##__VA_ARGS__, \ diff --git a/trunk/fs/ubifs/file.c b/trunk/fs/ubifs/file.c index 35389ca2d267..7bd6e72afd11 100644 --- a/trunk/fs/ubifs/file.c +++ b/trunk/fs/ubifs/file.c @@ -37,11 +37,11 @@ * * A thing to keep in mind: inode @i_mutex is locked in most VFS operations we * implement. However, this is not true for 'ubifs_writepage()', which may be - * called with @i_mutex unlocked. For example, when pdflush is doing background - * write-back, it calls 'ubifs_writepage()' with unlocked @i_mutex. At "normal" - * work-paths the @i_mutex is locked in 'ubifs_writepage()', e.g. in the - * "sys_write -> alloc_pages -> direct reclaim path". So, in 'ubifs_writepage()' - * we are only guaranteed that the page is locked. + * called with @i_mutex unlocked. For example, when flusher thread is doing + * background write-back, it calls 'ubifs_writepage()' with unlocked @i_mutex. + * At "normal" work-paths the @i_mutex is locked in 'ubifs_writepage()', e.g. + * in the "sys_write -> alloc_pages -> direct reclaim path". So, in + * 'ubifs_writepage()' we are only guaranteed that the page is locked. * * Similarly, @i_mutex is not always locked in 'ubifs_readpage()', e.g., the * read-ahead path does not lock it ("sys_read -> generic_file_aio_read -> diff --git a/trunk/fs/ubifs/lpt.c b/trunk/fs/ubifs/lpt.c index ce33b2beb151..8640920766ed 100644 --- a/trunk/fs/ubifs/lpt.c +++ b/trunk/fs/ubifs/lpt.c @@ -1749,7 +1749,10 @@ int ubifs_lpt_init(struct ubifs_info *c, int rd, int wr) return 0; out_err: - ubifs_lpt_free(c, 0); + if (wr) + ubifs_lpt_free(c, 1); + if (rd) + ubifs_lpt_free(c, 0); return err; } diff --git a/trunk/fs/ubifs/recovery.c b/trunk/fs/ubifs/recovery.c index c30d976b4be8..edeec499c048 100644 --- a/trunk/fs/ubifs/recovery.c +++ b/trunk/fs/ubifs/recovery.c @@ -788,7 +788,7 @@ struct ubifs_scan_leb *ubifs_recover_leb(struct ubifs_info *c, int lnum, corrupted_rescan: /* Re-scan the corrupted data with verbose messages */ - ubifs_err("corruptio %d", ret); + ubifs_err("corruption %d", ret); ubifs_scan_a_node(c, buf, len, lnum, offs, 1); corrupted: ubifs_scanned_corruption(c, lnum, offs, buf); diff --git a/trunk/fs/ubifs/replay.c b/trunk/fs/ubifs/replay.c index eba46d4a7619..94d78fc5d4e0 100644 --- a/trunk/fs/ubifs/replay.c +++ b/trunk/fs/ubifs/replay.c @@ -1026,7 +1026,6 @@ int ubifs_replay_journal(struct ubifs_info *c) c->replaying = 1; lnum = c->ltail_lnum = c->lhead_lnum; - lnum = UBIFS_LOG_LNUM; do { err = replay_log_leb(c, lnum, 0, c->sbuf); if (err == 1) @@ -1035,7 +1034,7 @@ int ubifs_replay_journal(struct ubifs_info *c) if (err) goto out; lnum = ubifs_next_log_lnum(c, lnum); - } while (lnum != UBIFS_LOG_LNUM); + } while (lnum != c->ltail_lnum); err = replay_buds(c); if (err) diff --git a/trunk/fs/ubifs/super.c b/trunk/fs/ubifs/super.c index 1c766c39c038..71a197f0f93d 100644 --- a/trunk/fs/ubifs/super.c +++ b/trunk/fs/ubifs/super.c @@ -303,7 +303,7 @@ static int ubifs_write_inode(struct inode *inode, struct writeback_control *wbc) mutex_lock(&ui->ui_mutex); /* * Due to races between write-back forced by budgeting - * (see 'sync_some_inodes()') and pdflush write-back, the inode may + * (see 'sync_some_inodes()') and background write-back, the inode may * have already been synchronized, do not do this again. This might * also happen if it was synchronized in an VFS operation, e.g. * 'ubifs_link()'. @@ -1157,9 +1157,6 @@ static int check_free_space(struct ubifs_info *c) * * This function mounts UBIFS file system. Returns zero in case of success and * a negative error code in case of failure. - * - * Note, the function does not de-allocate resources it it fails half way - * through, and the caller has to do this instead. */ static int mount_ubifs(struct ubifs_info *c) { diff --git a/trunk/fs/udf/file.c b/trunk/fs/udf/file.c index 7f3f7ba3df6e..d1c6093fd3d3 100644 --- a/trunk/fs/udf/file.c +++ b/trunk/fs/udf/file.c @@ -39,20 +39,24 @@ #include "udf_i.h" #include "udf_sb.h" -static int udf_adinicb_readpage(struct file *file, struct page *page) +static void __udf_adinicb_readpage(struct page *page) { struct inode *inode = page->mapping->host; char *kaddr; struct udf_inode_info *iinfo = UDF_I(inode); - BUG_ON(!PageLocked(page)); - kaddr = kmap(page); - memset(kaddr, 0, PAGE_CACHE_SIZE); memcpy(kaddr, iinfo->i_ext.i_data + iinfo->i_lenEAttr, inode->i_size); + memset(kaddr + inode->i_size, 0, PAGE_CACHE_SIZE - inode->i_size); flush_dcache_page(page); SetPageUptodate(page); kunmap(page); +} + +static int udf_adinicb_readpage(struct file *file, struct page *page) +{ + BUG_ON(!PageLocked(page)); + __udf_adinicb_readpage(page); unlock_page(page); return 0; @@ -77,6 +81,25 @@ static int udf_adinicb_writepage(struct page *page, return 0; } +static int udf_adinicb_write_begin(struct file *file, + struct address_space *mapping, loff_t pos, + unsigned len, unsigned flags, struct page **pagep, + void **fsdata) +{ + struct page *page; + + if (WARN_ON_ONCE(pos >= PAGE_CACHE_SIZE)) + return -EIO; + page = grab_cache_page_write_begin(mapping, 0, flags); + if (!page) + return -ENOMEM; + *pagep = page; + + if (!PageUptodate(page) && len != PAGE_CACHE_SIZE) + __udf_adinicb_readpage(page); + return 0; +} + static int udf_adinicb_write_end(struct file *file, struct address_space *mapping, loff_t pos, unsigned len, unsigned copied, @@ -98,8 +121,8 @@ static int udf_adinicb_write_end(struct file *file, const struct address_space_operations udf_adinicb_aops = { .readpage = udf_adinicb_readpage, .writepage = udf_adinicb_writepage, - .write_begin = simple_write_begin, - .write_end = udf_adinicb_write_end, + .write_begin = udf_adinicb_write_begin, + .write_end = udf_adinicb_write_end, }; static ssize_t udf_file_aio_write(struct kiocb *iocb, const struct iovec *iov, diff --git a/trunk/fs/udf/inode.c b/trunk/fs/udf/inode.c index fafaad795cd6..aa233469b3c1 100644 --- a/trunk/fs/udf/inode.c +++ b/trunk/fs/udf/inode.c @@ -1124,14 +1124,17 @@ int udf_setsize(struct inode *inode, loff_t newsize) if (err) return err; down_write(&iinfo->i_data_sem); - } else + } else { iinfo->i_lenAlloc = newsize; + goto set_size; + } } err = udf_extend_file(inode, newsize); if (err) { up_write(&iinfo->i_data_sem); return err; } +set_size: truncate_setsize(inode, newsize); up_write(&iinfo->i_data_sem); } else { diff --git a/trunk/fs/udf/super.c b/trunk/fs/udf/super.c index dcbf98722afc..18fc038a438d 100644 --- a/trunk/fs/udf/super.c +++ b/trunk/fs/udf/super.c @@ -1344,6 +1344,7 @@ static int udf_load_logicalvol(struct super_block *sb, sector_t block, udf_err(sb, "error loading logical volume descriptor: " "Partition table too long (%u > %lu)\n", table_len, sb->s_blocksize - sizeof(*lvd)); + ret = 1; goto out_bh; } @@ -1388,8 +1389,10 @@ static int udf_load_logicalvol(struct super_block *sb, sector_t block, UDF_ID_SPARABLE, strlen(UDF_ID_SPARABLE))) { if (udf_load_sparable_map(sb, map, - (struct sparablePartitionMap *)gpm) < 0) + (struct sparablePartitionMap *)gpm) < 0) { + ret = 1; goto out_bh; + } } else if (!strncmp(upm2->partIdent.ident, UDF_ID_METADATA, strlen(UDF_ID_METADATA))) { @@ -2000,6 +2003,8 @@ static int udf_fill_super(struct super_block *sb, void *options, int silent) if (!silent) pr_notice("Rescanning with blocksize %d\n", UDF_DEFAULT_BLOCKSIZE); + brelse(sbi->s_lvid_bh); + sbi->s_lvid_bh = NULL; uopt.blocksize = UDF_DEFAULT_BLOCKSIZE; ret = udf_load_vrs(sb, &uopt, silent, &fileset); } diff --git a/trunk/fs/xfs/xfs_buf.c b/trunk/fs/xfs/xfs_buf.c index d7a9dd735e1e..933b7930b863 100644 --- a/trunk/fs/xfs/xfs_buf.c +++ b/trunk/fs/xfs/xfs_buf.c @@ -96,6 +96,7 @@ xfs_buf_lru_add( atomic_inc(&bp->b_hold); list_add_tail(&bp->b_lru, &btp->bt_lru); btp->bt_lru_nr++; + bp->b_lru_flags &= ~_XBF_LRU_DISPOSE; } spin_unlock(&btp->bt_lru_lock); } @@ -154,7 +155,8 @@ xfs_buf_stale( struct xfs_buftarg *btp = bp->b_target; spin_lock(&btp->bt_lru_lock); - if (!list_empty(&bp->b_lru)) { + if (!list_empty(&bp->b_lru) && + !(bp->b_lru_flags & _XBF_LRU_DISPOSE)) { list_del_init(&bp->b_lru); btp->bt_lru_nr--; atomic_dec(&bp->b_hold); @@ -1501,6 +1503,7 @@ xfs_buftarg_shrink( */ list_move(&bp->b_lru, &dispose); btp->bt_lru_nr--; + bp->b_lru_flags |= _XBF_LRU_DISPOSE; } spin_unlock(&btp->bt_lru_lock); diff --git a/trunk/fs/xfs/xfs_buf.h b/trunk/fs/xfs/xfs_buf.h index d03b73b9604e..7c0b6a0a1557 100644 --- a/trunk/fs/xfs/xfs_buf.h +++ b/trunk/fs/xfs/xfs_buf.h @@ -38,27 +38,28 @@ typedef enum { XBRW_ZERO = 3, /* Zero target memory */ } xfs_buf_rw_t; -#define XBF_READ (1 << 0) /* buffer intended for reading from device */ -#define XBF_WRITE (1 << 1) /* buffer intended for writing to device */ -#define XBF_READ_AHEAD (1 << 2) /* asynchronous read-ahead */ -#define XBF_ASYNC (1 << 4) /* initiator will not wait for completion */ -#define XBF_DONE (1 << 5) /* all pages in the buffer uptodate */ -#define XBF_STALE (1 << 6) /* buffer has been staled, do not find it */ +#define XBF_READ (1 << 0) /* buffer intended for reading from device */ +#define XBF_WRITE (1 << 1) /* buffer intended for writing to device */ +#define XBF_READ_AHEAD (1 << 2) /* asynchronous read-ahead */ +#define XBF_ASYNC (1 << 4) /* initiator will not wait for completion */ +#define XBF_DONE (1 << 5) /* all pages in the buffer uptodate */ +#define XBF_STALE (1 << 6) /* buffer has been staled, do not find it */ /* I/O hints for the BIO layer */ -#define XBF_SYNCIO (1 << 10)/* treat this buffer as synchronous I/O */ -#define XBF_FUA (1 << 11)/* force cache write through mode */ -#define XBF_FLUSH (1 << 12)/* flush the disk cache before a write */ +#define XBF_SYNCIO (1 << 10)/* treat this buffer as synchronous I/O */ +#define XBF_FUA (1 << 11)/* force cache write through mode */ +#define XBF_FLUSH (1 << 12)/* flush the disk cache before a write */ /* flags used only as arguments to access routines */ -#define XBF_TRYLOCK (1 << 16)/* lock requested, but do not wait */ -#define XBF_UNMAPPED (1 << 17)/* do not map the buffer */ +#define XBF_TRYLOCK (1 << 16)/* lock requested, but do not wait */ +#define XBF_UNMAPPED (1 << 17)/* do not map the buffer */ /* flags used only internally */ -#define _XBF_PAGES (1 << 20)/* backed by refcounted pages */ -#define _XBF_KMEM (1 << 21)/* backed by heap memory */ -#define _XBF_DELWRI_Q (1 << 22)/* buffer on a delwri queue */ -#define _XBF_COMPOUND (1 << 23)/* compound buffer */ +#define _XBF_PAGES (1 << 20)/* backed by refcounted pages */ +#define _XBF_KMEM (1 << 21)/* backed by heap memory */ +#define _XBF_DELWRI_Q (1 << 22)/* buffer on a delwri queue */ +#define _XBF_COMPOUND (1 << 23)/* compound buffer */ +#define _XBF_LRU_DISPOSE (1 << 24)/* buffer being discarded */ typedef unsigned int xfs_buf_flags_t; @@ -72,12 +73,13 @@ typedef unsigned int xfs_buf_flags_t; { XBF_SYNCIO, "SYNCIO" }, \ { XBF_FUA, "FUA" }, \ { XBF_FLUSH, "FLUSH" }, \ - { XBF_TRYLOCK, "TRYLOCK" }, /* should never be set */\ + { XBF_TRYLOCK, "TRYLOCK" }, /* should never be set */\ { XBF_UNMAPPED, "UNMAPPED" }, /* ditto */\ { _XBF_PAGES, "PAGES" }, \ { _XBF_KMEM, "KMEM" }, \ { _XBF_DELWRI_Q, "DELWRI_Q" }, \ - { _XBF_COMPOUND, "COMPOUND" } + { _XBF_COMPOUND, "COMPOUND" }, \ + { _XBF_LRU_DISPOSE, "LRU_DISPOSE" } typedef struct xfs_buftarg { dev_t bt_dev; @@ -124,7 +126,12 @@ typedef struct xfs_buf { xfs_buf_flags_t b_flags; /* status flags */ struct semaphore b_sema; /* semaphore for lockables */ + /* + * concurrent access to b_lru and b_lru_flags are protected by + * bt_lru_lock and not by b_sema + */ struct list_head b_lru; /* lru list */ + xfs_buf_flags_t b_lru_flags; /* internal lru status flags */ wait_queue_head_t b_waiters; /* unpin waiters */ struct list_head b_list; struct xfs_perag *b_pag; /* contains rbtree root */ diff --git a/trunk/fs/xfs/xfs_discard.c b/trunk/fs/xfs/xfs_discard.c index f9c3fe304a17..69cf4fcde03e 100644 --- a/trunk/fs/xfs/xfs_discard.c +++ b/trunk/fs/xfs/xfs_discard.c @@ -179,12 +179,14 @@ xfs_ioc_trim( * used by the fstrim application. In the end it really doesn't * matter as trimming blocks is an advisory interface. */ + if (range.start >= XFS_FSB_TO_B(mp, mp->m_sb.sb_dblocks) || + range.minlen > XFS_FSB_TO_B(mp, XFS_ALLOC_AG_MAX_USABLE(mp))) + return -XFS_ERROR(EINVAL); + start = BTOBB(range.start); end = start + BTOBBT(range.len) - 1; minlen = BTOBB(max_t(u64, granularity, range.minlen)); - if (XFS_BB_TO_FSB(mp, start) >= mp->m_sb.sb_dblocks) - return -XFS_ERROR(EINVAL); if (end > XFS_FSB_TO_BB(mp, mp->m_sb.sb_dblocks) - 1) end = XFS_FSB_TO_BB(mp, mp->m_sb.sb_dblocks)- 1; diff --git a/trunk/fs/xfs/xfs_ialloc.c b/trunk/fs/xfs/xfs_ialloc.c index 21e37b55f7e5..5aceb3f8ecd6 100644 --- a/trunk/fs/xfs/xfs_ialloc.c +++ b/trunk/fs/xfs/xfs_ialloc.c @@ -962,23 +962,22 @@ xfs_dialloc( if (!pag->pagi_freecount && !okalloc) goto nextag; + /* + * Then read in the AGI buffer and recheck with the AGI buffer + * lock held. + */ error = xfs_ialloc_read_agi(mp, tp, agno, &agbp); if (error) goto out_error; - /* - * Once the AGI has been read in we have to recheck - * pagi_freecount with the AGI buffer lock held. - */ if (pag->pagi_freecount) { xfs_perag_put(pag); goto out_alloc; } - if (!okalloc) { - xfs_trans_brelse(tp, agbp); - goto nextag; - } + if (!okalloc) + goto nextag_relse_buffer; + error = xfs_ialloc_ag_alloc(tp, agbp, &ialloced); if (error) { @@ -1007,6 +1006,8 @@ xfs_dialloc( return 0; } +nextag_relse_buffer: + xfs_trans_brelse(tp, agbp); nextag: xfs_perag_put(pag); if (++agno == mp->m_sb.sb_agcount) diff --git a/trunk/fs/xfs/xfs_rtalloc.c b/trunk/fs/xfs/xfs_rtalloc.c index 92d4331cd4f1..ca28a4ba4b54 100644 --- a/trunk/fs/xfs/xfs_rtalloc.c +++ b/trunk/fs/xfs/xfs_rtalloc.c @@ -857,7 +857,7 @@ xfs_rtbuf_get( xfs_buf_t *bp; /* block buffer, result */ xfs_inode_t *ip; /* bitmap or summary inode */ xfs_bmbt_irec_t map; - int nmap; + int nmap = 1; int error; /* error value */ ip = issum ? mp->m_rsumip : mp->m_rbmip; diff --git a/trunk/fs/xfs/xfs_super.c b/trunk/fs/xfs/xfs_super.c index bdaf4cb9f4a2..19e2380fb867 100644 --- a/trunk/fs/xfs/xfs_super.c +++ b/trunk/fs/xfs/xfs_super.c @@ -919,6 +919,7 @@ xfs_fs_put_super( struct xfs_mount *mp = XFS_M(sb); xfs_filestream_unmount(mp); + cancel_delayed_work_sync(&mp->m_sync_work); xfs_unmountfs(mp); xfs_syncd_stop(mp); xfs_freesb(mp); diff --git a/trunk/include/acpi/acpixf.h b/trunk/include/acpi/acpixf.h index 2c744c7a5b3d..26a92fc28a59 100644 --- a/trunk/include/acpi/acpixf.h +++ b/trunk/include/acpi/acpixf.h @@ -491,11 +491,11 @@ acpi_get_sleep_type_data(u8 sleep_state, u8 * slp_typ_a, u8 * slp_typ_b); acpi_status acpi_enter_sleep_state_prep(u8 sleep_state); -acpi_status asmlinkage acpi_enter_sleep_state(u8 sleep_state, u8 flags); +acpi_status asmlinkage acpi_enter_sleep_state(u8 sleep_state); ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status asmlinkage acpi_enter_sleep_state_s4bios(void)) -acpi_status acpi_leave_sleep_state_prep(u8 sleep_state, u8 flags); +acpi_status acpi_leave_sleep_state_prep(u8 sleep_state); acpi_status acpi_leave_sleep_state(u8 sleep_state); diff --git a/trunk/include/acpi/actypes.h b/trunk/include/acpi/actypes.h index 3af87de6a68c..3d00bd5bd7e3 100644 --- a/trunk/include/acpi/actypes.h +++ b/trunk/include/acpi/actypes.h @@ -803,7 +803,7 @@ typedef u8 acpi_adr_space_type; /* Sleep function dispatch */ -typedef acpi_status(*ACPI_SLEEP_FUNCTION) (u8 sleep_state, u8 flags); +typedef acpi_status(*ACPI_SLEEP_FUNCTION) (u8 sleep_state); struct acpi_sleep_functions { ACPI_SLEEP_FUNCTION legacy_function; diff --git a/trunk/include/asm-generic/mutex-xchg.h b/trunk/include/asm-generic/mutex-xchg.h index 580a6d35c700..c04e0db8a2d6 100644 --- a/trunk/include/asm-generic/mutex-xchg.h +++ b/trunk/include/asm-generic/mutex-xchg.h @@ -26,7 +26,13 @@ static inline void __mutex_fastpath_lock(atomic_t *count, void (*fail_fn)(atomic_t *)) { if (unlikely(atomic_xchg(count, 0) != 1)) - fail_fn(count); + /* + * We failed to acquire the lock, so mark it contended + * to ensure that any waiting tasks are woken up by the + * unlock slow path. + */ + if (likely(atomic_xchg(count, -1) != 1)) + fail_fn(count); } /** @@ -43,7 +49,8 @@ static inline int __mutex_fastpath_lock_retval(atomic_t *count, int (*fail_fn)(atomic_t *)) { if (unlikely(atomic_xchg(count, 0) != 1)) - return fail_fn(count); + if (likely(atomic_xchg(count, -1) != 1)) + return fail_fn(count); return 0; } diff --git a/trunk/include/asm-generic/unistd.h b/trunk/include/asm-generic/unistd.h index 991ef01cd77e..3748ec92dcbc 100644 --- a/trunk/include/asm-generic/unistd.h +++ b/trunk/include/asm-generic/unistd.h @@ -691,9 +691,11 @@ __SC_COMP(__NR_process_vm_readv, sys_process_vm_readv, \ #define __NR_process_vm_writev 271 __SC_COMP(__NR_process_vm_writev, sys_process_vm_writev, \ compat_sys_process_vm_writev) +#define __NR_kcmp 272 +__SYSCALL(__NR_kcmp, sys_kcmp) #undef __NR_syscalls -#define __NR_syscalls 272 +#define __NR_syscalls 273 /* * All syscalls below here should go away really, diff --git a/trunk/include/drm/drm_crtc.h b/trunk/include/drm/drm_crtc.h index a1a0386e0160..bfacf0d5a225 100644 --- a/trunk/include/drm/drm_crtc.h +++ b/trunk/include/drm/drm_crtc.h @@ -118,7 +118,8 @@ enum drm_mode_status { .hdisplay = (hd), .hsync_start = (hss), .hsync_end = (hse), \ .htotal = (ht), .hskew = (hsk), .vdisplay = (vd), \ .vsync_start = (vss), .vsync_end = (vse), .vtotal = (vt), \ - .vscan = (vs), .flags = (f), .vrefresh = 0 + .vscan = (vs), .flags = (f), .vrefresh = 0, \ + .base.type = DRM_MODE_OBJECT_MODE #define CRTC_INTERLACE_HALVE_V 0x1 /* halve V values for interlacing */ @@ -166,8 +167,6 @@ struct drm_display_mode { int crtc_vsync_start; int crtc_vsync_end; int crtc_vtotal; - int crtc_hadjusted; - int crtc_vadjusted; /* Driver private mode info */ int private_size; diff --git a/trunk/include/drm/drm_fourcc.h b/trunk/include/drm/drm_fourcc.h index bdf0152cbbe9..f4621184a9b4 100644 --- a/trunk/include/drm/drm_fourcc.h +++ b/trunk/include/drm/drm_fourcc.h @@ -107,8 +107,7 @@ #define DRM_FORMAT_NV16 fourcc_code('N', 'V', '1', '6') /* 2x1 subsampled Cr:Cb plane */ #define DRM_FORMAT_NV61 fourcc_code('N', 'V', '6', '1') /* 2x1 subsampled Cb:Cr plane */ -/* 2 non contiguous plane YCbCr */ -#define DRM_FORMAT_NV12M fourcc_code('N', 'M', '1', '2') /* 2x2 subsampled Cr:Cb plane */ +/* special NV12 tiled format */ #define DRM_FORMAT_NV12MT fourcc_code('T', 'M', '1', '2') /* 2x2 subsampled Cr:Cb plane 64x32 macroblocks */ /* @@ -131,7 +130,4 @@ #define DRM_FORMAT_YUV444 fourcc_code('Y', 'U', '2', '4') /* non-subsampled Cb (1) and Cr (2) planes */ #define DRM_FORMAT_YVU444 fourcc_code('Y', 'V', '2', '4') /* non-subsampled Cr (1) and Cb (2) planes */ -/* 3 non contiguous plane YCbCr */ -#define DRM_FORMAT_YUV420M fourcc_code('Y', 'M', '1', '2') /* 2x2 subsampled Cb (1) and Cr (2) planes */ - #endif /* DRM_FOURCC_H */ diff --git a/trunk/include/drm/drm_mode.h b/trunk/include/drm/drm_mode.h index 5581980b14f6..3d6301b6ec16 100644 --- a/trunk/include/drm/drm_mode.h +++ b/trunk/include/drm/drm_mode.h @@ -359,8 +359,9 @@ struct drm_mode_mode_cmd { struct drm_mode_modeinfo mode; }; -#define DRM_MODE_CURSOR_BO (1<<0) -#define DRM_MODE_CURSOR_MOVE (1<<1) +#define DRM_MODE_CURSOR_BO 0x01 +#define DRM_MODE_CURSOR_MOVE 0x02 +#define DRM_MODE_CURSOR_FLAGS 0x03 /* * depending on the value in flags different members are used. diff --git a/trunk/include/drm/drm_pciids.h b/trunk/include/drm/drm_pciids.h index 7ff5c99b1638..c78bb997e2c6 100644 --- a/trunk/include/drm/drm_pciids.h +++ b/trunk/include/drm/drm_pciids.h @@ -213,9 +213,12 @@ {0x1002, 0x6800, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PITCAIRN|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ {0x1002, 0x6801, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PITCAIRN|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ {0x1002, 0x6802, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PITCAIRN|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x6806, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PITCAIRN|RADEON_NEW_MEMMAP}, \ {0x1002, 0x6808, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PITCAIRN|RADEON_NEW_MEMMAP}, \ {0x1002, 0x6809, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PITCAIRN|RADEON_NEW_MEMMAP}, \ {0x1002, 0x6810, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PITCAIRN|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x6816, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PITCAIRN|RADEON_NEW_MEMMAP}, \ + {0x1002, 0x6817, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PITCAIRN|RADEON_NEW_MEMMAP}, \ {0x1002, 0x6818, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PITCAIRN|RADEON_NEW_MEMMAP}, \ {0x1002, 0x6819, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PITCAIRN|RADEON_NEW_MEMMAP}, \ {0x1002, 0x6820, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ diff --git a/trunk/include/drm/radeon_drm.h b/trunk/include/drm/radeon_drm.h index 58056865b8e9..dc3a8cd7db8a 100644 --- a/trunk/include/drm/radeon_drm.h +++ b/trunk/include/drm/radeon_drm.h @@ -964,6 +964,8 @@ struct drm_radeon_cs { #define RADEON_INFO_IB_VM_MAX_SIZE 0x0f /* max pipes - needed for compute shaders */ #define RADEON_INFO_MAX_PIPES 0x10 +/* timestamp for GL_ARB_timer_query (OpenGL), returns the current GPU clock */ +#define RADEON_INFO_TIMESTAMP 0x11 struct drm_radeon_info { uint32_t request; diff --git a/trunk/include/linux/Kbuild b/trunk/include/linux/Kbuild index d9a754474878..fa217607c582 100644 --- a/trunk/include/linux/Kbuild +++ b/trunk/include/linux/Kbuild @@ -391,6 +391,7 @@ header-y += v4l2-dv-timings.h header-y += v4l2-mediabus.h header-y += v4l2-subdev.h header-y += veth.h +header-y += vfio.h header-y += vhost.h header-y += videodev2.h header-y += virtio_9p.h diff --git a/trunk/include/linux/acpi.h b/trunk/include/linux/acpi.h index 3ad510b25283..4f2a76224509 100644 --- a/trunk/include/linux/acpi.h +++ b/trunk/include/linux/acpi.h @@ -96,7 +96,7 @@ void acpi_table_print_madt_entry (struct acpi_subtable_header *madt); void acpi_numa_slit_init (struct acpi_table_slit *slit); void acpi_numa_processor_affinity_init (struct acpi_srat_cpu_affinity *pa); void acpi_numa_x2apic_affinity_init(struct acpi_srat_x2apic_cpu_affinity *pa); -void acpi_numa_memory_affinity_init (struct acpi_srat_mem_affinity *ma); +int acpi_numa_memory_affinity_init (struct acpi_srat_mem_affinity *ma); void acpi_numa_arch_fixup(void); #ifdef CONFIG_ACPI_HOTPLUG_CPU diff --git a/trunk/include/linux/atmel-ssc.h b/trunk/include/linux/atmel-ssc.h index 06023393fba9..4eb31752e2b7 100644 --- a/trunk/include/linux/atmel-ssc.h +++ b/trunk/include/linux/atmel-ssc.h @@ -3,6 +3,7 @@ #include #include +#include struct ssc_device { struct list_head list; diff --git a/trunk/include/linux/backing-dev.h b/trunk/include/linux/backing-dev.h index c97c6b9cd38e..2a9a9abc9126 100644 --- a/trunk/include/linux/backing-dev.h +++ b/trunk/include/linux/backing-dev.h @@ -124,7 +124,6 @@ void bdi_start_writeback(struct backing_dev_info *bdi, long nr_pages, void bdi_start_background_writeback(struct backing_dev_info *bdi); int bdi_writeback_thread(void *data); int bdi_has_dirty_io(struct backing_dev_info *bdi); -void bdi_arm_supers_timer(void); void bdi_wakeup_thread_delayed(struct backing_dev_info *bdi); void bdi_lock_two(struct bdi_writeback *wb1, struct bdi_writeback *wb2); diff --git a/trunk/include/linux/bcma/bcma_driver_chipcommon.h b/trunk/include/linux/bcma/bcma_driver_chipcommon.h index 3c80885fa829..d323a4b4143c 100644 --- a/trunk/include/linux/bcma/bcma_driver_chipcommon.h +++ b/trunk/include/linux/bcma/bcma_driver_chipcommon.h @@ -89,6 +89,12 @@ #define BCMA_CC_CHIPST_4313_OTP_PRESENT 2 #define BCMA_CC_CHIPST_4331_SPROM_PRESENT 2 #define BCMA_CC_CHIPST_4331_OTP_PRESENT 4 +#define BCMA_CC_CHIPST_43228_ILP_DIV_EN 0x00000001 +#define BCMA_CC_CHIPST_43228_OTP_PRESENT 0x00000002 +#define BCMA_CC_CHIPST_43228_SERDES_REFCLK_PADSEL 0x00000004 +#define BCMA_CC_CHIPST_43228_SDIO_MODE 0x00000008 +#define BCMA_CC_CHIPST_43228_SDIO_OTP_PRESENT 0x00000010 +#define BCMA_CC_CHIPST_43228_SDIO_RESET 0x00000020 #define BCMA_CC_CHIPST_4706_PKG_OPTION BIT(0) /* 0: full-featured package 1: low-cost package */ #define BCMA_CC_CHIPST_4706_SFLASH_PRESENT BIT(1) /* 0: parallel, 1: serial flash is present */ #define BCMA_CC_CHIPST_4706_SFLASH_TYPE BIT(2) /* 0: 8b-p/ST-s flash, 1: 16b-p/Atmal-s flash */ diff --git a/trunk/include/linux/blkdev.h b/trunk/include/linux/blkdev.h index 4e72a9d48232..4a2ab7c85393 100644 --- a/trunk/include/linux/blkdev.h +++ b/trunk/include/linux/blkdev.h @@ -601,7 +601,7 @@ static inline void blk_clear_rl_full(struct request_list *rl, bool sync) * it already be started by driver. */ #define RQ_NOMERGE_FLAGS \ - (REQ_NOMERGE | REQ_STARTED | REQ_SOFTBARRIER | REQ_FLUSH | REQ_FUA) + (REQ_NOMERGE | REQ_STARTED | REQ_SOFTBARRIER | REQ_FLUSH | REQ_FUA | REQ_DISCARD) #define rq_mergeable(rq) \ (!((rq)->cmd_flags & RQ_NOMERGE_FLAGS) && \ (((rq)->cmd_flags & REQ_DISCARD) || \ @@ -894,6 +894,8 @@ extern void blk_queue_flush_queueable(struct request_queue *q, bool queueable); extern struct backing_dev_info *blk_get_backing_dev_info(struct block_device *bdev); extern int blk_rq_map_sg(struct request_queue *, struct request *, struct scatterlist *); +extern int blk_bio_map_sg(struct request_queue *q, struct bio *bio, + struct scatterlist *sglist); extern void blk_dump_rq_flags(struct request *, char *); extern long nr_blockdev_pages(void); @@ -1139,6 +1141,16 @@ static inline int queue_limit_discard_alignment(struct queue_limits *lim, sector & (lim->discard_granularity - 1); } +static inline int bdev_discard_alignment(struct block_device *bdev) +{ + struct request_queue *q = bdev_get_queue(bdev); + + if (bdev != bdev->bd_contains) + return bdev->bd_part->discard_alignment; + + return q->limits.discard_alignment; +} + static inline unsigned int queue_discard_zeroes_data(struct request_queue *q) { if (q->limits.max_discard_sectors && q->limits.discard_zeroes_data == 1) diff --git a/trunk/include/linux/can.h b/trunk/include/linux/can.h index 018055efc034..e52958d7c2d1 100644 --- a/trunk/include/linux/can.h +++ b/trunk/include/linux/can.h @@ -74,20 +74,21 @@ struct can_frame { /* * defined bits for canfd_frame.flags * - * As the default for CAN FD should be to support the high data rate in the - * payload section of the frame (HDR) and to support up to 64 byte in the - * data section (EDL) the bits are only set in the non-default case. - * Btw. as long as there's no real implementation for CAN FD network driver - * these bits are only preliminary. + * The use of struct canfd_frame implies the Extended Data Length (EDL) bit to + * be set in the CAN frame bitstream on the wire. The EDL bit switch turns + * the CAN controllers bitstream processor into the CAN FD mode which creates + * two new options within the CAN FD frame specification: * - * RX: NOHDR/NOEDL - info about received CAN FD frame - * ESI - bit from originating CAN controller - * TX: NOHDR/NOEDL - control per-frame settings if supported by CAN controller - * ESI - bit is set by local CAN controller + * Bit Rate Switch - to indicate a second bitrate is/was used for the payload + * Error State Indicator - represents the error state of the transmitting node + * + * As the CANFD_ESI bit is internally generated by the transmitting CAN + * controller only the CANFD_BRS bit is relevant for real CAN controllers when + * building a CAN FD frame for transmission. Setting the CANFD_ESI bit can make + * sense for virtual CAN interfaces to test applications with echoed frames. */ -#define CANFD_NOHDR 0x01 /* frame without high data rate */ -#define CANFD_NOEDL 0x02 /* frame without extended data length */ -#define CANFD_ESI 0x04 /* error state indicator */ +#define CANFD_BRS 0x01 /* bit rate switch (second bitrate for payload data) */ +#define CANFD_ESI 0x02 /* error state indicator of the transmitting node */ /** * struct canfd_frame - CAN flexible data rate frame structure diff --git a/trunk/include/linux/compaction.h b/trunk/include/linux/compaction.h index 133ddcf83397..ef658147e4e8 100644 --- a/trunk/include/linux/compaction.h +++ b/trunk/include/linux/compaction.h @@ -22,7 +22,7 @@ extern int sysctl_extfrag_handler(struct ctl_table *table, int write, extern int fragmentation_index(struct zone *zone, unsigned int order); extern unsigned long try_to_compact_pages(struct zonelist *zonelist, int order, gfp_t gfp_mask, nodemask_t *mask, - bool sync); + bool sync, bool *contended); extern int compact_pgdat(pg_data_t *pgdat, int order); extern unsigned long compaction_suitable(struct zone *zone, int order); @@ -64,7 +64,7 @@ static inline bool compaction_deferred(struct zone *zone, int order) #else static inline unsigned long try_to_compact_pages(struct zonelist *zonelist, int order, gfp_t gfp_mask, nodemask_t *nodemask, - bool sync) + bool sync, bool *contended) { return COMPACT_CONTINUE; } diff --git a/trunk/include/linux/compiler-gcc4.h b/trunk/include/linux/compiler-gcc4.h index 2f4079175afb..934bc34d5f99 100644 --- a/trunk/include/linux/compiler-gcc4.h +++ b/trunk/include/linux/compiler-gcc4.h @@ -49,6 +49,13 @@ #endif #endif +#if __GNUC_MINOR__ >= 6 +/* + * Tell the optimizer that something else uses this function or variable. + */ +#define __visible __attribute__((externally_visible)) +#endif + #if __GNUC_MINOR__ > 0 #define __compiletime_object_size(obj) __builtin_object_size(obj, 0) #endif diff --git a/trunk/include/linux/compiler.h b/trunk/include/linux/compiler.h index 923d093c9cea..f430e4162f41 100644 --- a/trunk/include/linux/compiler.h +++ b/trunk/include/linux/compiler.h @@ -278,6 +278,10 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect); # define __section(S) __attribute__ ((__section__(#S))) #endif +#ifndef __visible +#define __visible +#endif + /* Are two types/vars the same type (ignoring qualifiers)? */ #ifndef __same_type # define __same_type(a, b) __builtin_types_compatible_p(typeof(a), typeof(b)) diff --git a/trunk/include/linux/cpuidle.h b/trunk/include/linux/cpuidle.h index 040b13b5c14a..279b1eaa8b73 100644 --- a/trunk/include/linux/cpuidle.h +++ b/trunk/include/linux/cpuidle.h @@ -194,6 +194,10 @@ static inline int cpuidle_play_dead(void) {return -ENODEV; } #ifdef CONFIG_ARCH_NEEDS_CPU_IDLE_COUPLED void cpuidle_coupled_parallel_barrier(struct cpuidle_device *dev, atomic_t *a); +#else +static inline void cpuidle_coupled_parallel_barrier(struct cpuidle_device *dev, atomic_t *a) +{ +} #endif /****************************** diff --git a/trunk/include/linux/dcache.h b/trunk/include/linux/dcache.h index caa34e50537e..59200795482e 100644 --- a/trunk/include/linux/dcache.h +++ b/trunk/include/linux/dcache.h @@ -206,6 +206,8 @@ struct dentry_operations { #define DCACHE_MANAGED_DENTRY \ (DCACHE_MOUNTED|DCACHE_NEED_AUTOMOUNT|DCACHE_MANAGE_TRANSIT) +#define DCACHE_DENTRY_KILLED 0x100000 + extern seqlock_t rename_lock; static inline int dname_external(struct dentry *dentry) diff --git a/trunk/include/linux/efi-bgrt.h b/trunk/include/linux/efi-bgrt.h new file mode 100644 index 000000000000..051b21fedf68 --- /dev/null +++ b/trunk/include/linux/efi-bgrt.h @@ -0,0 +1,21 @@ +#ifndef _LINUX_EFI_BGRT_H +#define _LINUX_EFI_BGRT_H + +#ifdef CONFIG_ACPI_BGRT + +#include + +void efi_bgrt_init(void); + +/* The BGRT data itself; only valid if bgrt_image != NULL. */ +extern void *bgrt_image; +extern size_t bgrt_image_size; +extern struct acpi_table_bgrt *bgrt_tab; + +#else /* !CONFIG_ACPI_BGRT */ + +static inline void efi_bgrt_init(void) {} + +#endif /* !CONFIG_ACPI_BGRT */ + +#endif /* _LINUX_EFI_BGRT_H */ diff --git a/trunk/include/linux/efi.h b/trunk/include/linux/efi.h index 103adc6d7e3a..8670eb1eb8cd 100644 --- a/trunk/include/linux/efi.h +++ b/trunk/include/linux/efi.h @@ -496,6 +496,14 @@ extern void efi_map_pal_code (void); extern void efi_memmap_walk (efi_freemem_callback_t callback, void *arg); extern void efi_gettimeofday (struct timespec *ts); extern void efi_enter_virtual_mode (void); /* switch EFI to virtual mode, if possible */ +#ifdef CONFIG_X86 +extern void efi_late_init(void); +extern void efi_free_boot_services(void); +#else +static inline void efi_late_init(void) {} +static inline void efi_free_boot_services(void) {} +#endif +extern void __iomem *efi_lookup_mapped_addr(u64 phys_addr); extern u64 efi_get_iobase (void); extern u32 efi_mem_type (unsigned long phys_addr); extern u64 efi_mem_attributes (unsigned long phys_addr); @@ -503,6 +511,8 @@ extern u64 efi_mem_attribute (unsigned long phys_addr, unsigned long size); extern int __init efi_uart_console_only (void); extern void efi_initialize_iomem_resources(struct resource *code_resource, struct resource *data_resource, struct resource *bss_resource); +extern unsigned long efi_get_time(void); +extern int efi_set_rtc_mmss(unsigned long nowtime); extern void efi_reserve_boot_services(void); extern struct efi_memory_map memmap; diff --git a/trunk/include/linux/fs.h b/trunk/include/linux/fs.h index 38dba16c4176..aa110476a95b 100644 --- a/trunk/include/linux/fs.h +++ b/trunk/include/linux/fs.h @@ -1491,7 +1491,6 @@ struct sb_writers { struct super_block { struct list_head s_list; /* Keep this first */ dev_t s_dev; /* search index; _not_ kdev_t */ - unsigned char s_dirt; unsigned char s_blocksize_bits; unsigned long s_blocksize; loff_t s_maxbytes; /* Max file size */ @@ -1861,7 +1860,6 @@ struct super_operations { int (*drop_inode) (struct inode *); void (*evict_inode) (struct inode *); void (*put_super) (struct super_block *); - void (*write_super) (struct super_block *); int (*sync_fs)(struct super_block *sb, int wait); int (*freeze_fs) (struct super_block *); int (*unfreeze_fs) (struct super_block *); @@ -2397,7 +2395,6 @@ extern int vfs_fsync_range(struct file *file, loff_t start, loff_t end, int datasync); extern int vfs_fsync(struct file *file, int datasync); extern int generic_write_sync(struct file *file, loff_t pos, loff_t count); -extern void sync_supers(void); extern void emergency_sync(void); extern void emergency_remount(void); #ifdef CONFIG_BLOCK diff --git a/trunk/include/linux/ftrace.h b/trunk/include/linux/ftrace.h index 55e6d63d46d0..a52f2f4fe030 100644 --- a/trunk/include/linux/ftrace.h +++ b/trunk/include/linux/ftrace.h @@ -10,6 +10,7 @@ #include #include #include +#include #include #include #include @@ -18,6 +19,28 @@ #include +/* + * If the arch supports passing the variable contents of + * function_trace_op as the third parameter back from the + * mcount call, then the arch should define this as 1. + */ +#ifndef ARCH_SUPPORTS_FTRACE_OPS +#define ARCH_SUPPORTS_FTRACE_OPS 0 +#endif + +/* + * If the arch's mcount caller does not support all of ftrace's + * features, then it must call an indirect function that + * does. Or at least does enough to prevent any unwelcomed side effects. + */ +#if !defined(CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST) || \ + !ARCH_SUPPORTS_FTRACE_OPS +# define FTRACE_FORCE_LIST_FUNC 1 +#else +# define FTRACE_FORCE_LIST_FUNC 0 +#endif + + struct module; struct ftrace_hash; @@ -29,7 +52,10 @@ ftrace_enable_sysctl(struct ctl_table *table, int write, void __user *buffer, size_t *lenp, loff_t *ppos); -typedef void (*ftrace_func_t)(unsigned long ip, unsigned long parent_ip); +struct ftrace_ops; + +typedef void (*ftrace_func_t)(unsigned long ip, unsigned long parent_ip, + struct ftrace_ops *op, struct pt_regs *regs); /* * FTRACE_OPS_FL_* bits denote the state of ftrace_ops struct and are @@ -45,12 +71,33 @@ typedef void (*ftrace_func_t)(unsigned long ip, unsigned long parent_ip); * could be controled by following calls: * ftrace_function_local_enable * ftrace_function_local_disable + * SAVE_REGS - The ftrace_ops wants regs saved at each function called + * and passed to the callback. If this flag is set, but the + * architecture does not support passing regs + * (ARCH_SUPPORTS_FTRACE_SAVE_REGS is not defined), then the + * ftrace_ops will fail to register, unless the next flag + * is set. + * SAVE_REGS_IF_SUPPORTED - This is the same as SAVE_REGS, but if the + * handler can handle an arch that does not save regs + * (the handler tests if regs == NULL), then it can set + * this flag instead. It will not fail registering the ftrace_ops + * but, the regs field will be NULL if the arch does not support + * passing regs to the handler. + * Note, if this flag is set, the SAVE_REGS flag will automatically + * get set upon registering the ftrace_ops, if the arch supports it. + * RECURSION_SAFE - The ftrace_ops can set this to tell the ftrace infrastructure + * that the call back has its own recursion protection. If it does + * not set this, then the ftrace infrastructure will add recursion + * protection for the caller. */ enum { - FTRACE_OPS_FL_ENABLED = 1 << 0, - FTRACE_OPS_FL_GLOBAL = 1 << 1, - FTRACE_OPS_FL_DYNAMIC = 1 << 2, - FTRACE_OPS_FL_CONTROL = 1 << 3, + FTRACE_OPS_FL_ENABLED = 1 << 0, + FTRACE_OPS_FL_GLOBAL = 1 << 1, + FTRACE_OPS_FL_DYNAMIC = 1 << 2, + FTRACE_OPS_FL_CONTROL = 1 << 3, + FTRACE_OPS_FL_SAVE_REGS = 1 << 4, + FTRACE_OPS_FL_SAVE_REGS_IF_SUPPORTED = 1 << 5, + FTRACE_OPS_FL_RECURSION_SAFE = 1 << 6, }; struct ftrace_ops { @@ -163,7 +210,8 @@ static inline int ftrace_function_local_disabled(struct ftrace_ops *ops) return *this_cpu_ptr(ops->disabled); } -extern void ftrace_stub(unsigned long a0, unsigned long a1); +extern void ftrace_stub(unsigned long a0, unsigned long a1, + struct ftrace_ops *op, struct pt_regs *regs); #else /* !CONFIG_FUNCTION_TRACER */ /* @@ -172,6 +220,10 @@ extern void ftrace_stub(unsigned long a0, unsigned long a1); */ #define register_ftrace_function(ops) ({ 0; }) #define unregister_ftrace_function(ops) ({ 0; }) +static inline int ftrace_nr_registered_ops(void) +{ + return 0; +} static inline void clear_ftrace_function(void) { } static inline void ftrace_kill(void) { } static inline void ftrace_stop(void) { } @@ -227,12 +279,33 @@ extern void unregister_ftrace_function_probe_all(char *glob); extern int ftrace_text_reserved(void *start, void *end); +extern int ftrace_nr_registered_ops(void); + +/* + * The dyn_ftrace record's flags field is split into two parts. + * the first part which is '0-FTRACE_REF_MAX' is a counter of + * the number of callbacks that have registered the function that + * the dyn_ftrace descriptor represents. + * + * The second part is a mask: + * ENABLED - the function is being traced + * REGS - the record wants the function to save regs + * REGS_EN - the function is set up to save regs. + * + * When a new ftrace_ops is registered and wants a function to save + * pt_regs, the rec->flag REGS is set. When the function has been + * set up to save regs, the REG_EN flag is set. Once a function + * starts saving regs it will do so until all ftrace_ops are removed + * from tracing that function. + */ enum { - FTRACE_FL_ENABLED = (1 << 30), + FTRACE_FL_ENABLED = (1UL << 29), + FTRACE_FL_REGS = (1UL << 30), + FTRACE_FL_REGS_EN = (1UL << 31) }; -#define FTRACE_FL_MASK (0x3UL << 30) -#define FTRACE_REF_MAX ((1 << 30) - 1) +#define FTRACE_FL_MASK (0x7UL << 29) +#define FTRACE_REF_MAX ((1UL << 29) - 1) struct dyn_ftrace { union { @@ -244,6 +317,8 @@ struct dyn_ftrace { }; int ftrace_force_update(void); +int ftrace_set_filter_ip(struct ftrace_ops *ops, unsigned long ip, + int remove, int reset); int ftrace_set_filter(struct ftrace_ops *ops, unsigned char *buf, int len, int reset); int ftrace_set_notrace(struct ftrace_ops *ops, unsigned char *buf, @@ -263,9 +338,23 @@ enum { FTRACE_STOP_FUNC_RET = (1 << 4), }; +/* + * The FTRACE_UPDATE_* enum is used to pass information back + * from the ftrace_update_record() and ftrace_test_record() + * functions. These are called by the code update routines + * to find out what is to be done for a given function. + * + * IGNORE - The function is already what we want it to be + * MAKE_CALL - Start tracing the function + * MODIFY_CALL - Stop saving regs for the function + * MODIFY_CALL_REGS - Start saving regs for the function + * MAKE_NOP - Stop tracing the function + */ enum { FTRACE_UPDATE_IGNORE, FTRACE_UPDATE_MAKE_CALL, + FTRACE_UPDATE_MODIFY_CALL, + FTRACE_UPDATE_MODIFY_CALL_REGS, FTRACE_UPDATE_MAKE_NOP, }; @@ -317,7 +406,9 @@ extern int ftrace_dyn_arch_init(void *data); extern void ftrace_replace_code(int enable); extern int ftrace_update_ftrace_func(ftrace_func_t func); extern void ftrace_caller(void); +extern void ftrace_regs_caller(void); extern void ftrace_call(void); +extern void ftrace_regs_call(void); extern void mcount_call(void); void ftrace_modify_all_code(int command); @@ -325,6 +416,15 @@ void ftrace_modify_all_code(int command); #ifndef FTRACE_ADDR #define FTRACE_ADDR ((unsigned long)ftrace_caller) #endif + +#ifndef FTRACE_REGS_ADDR +#ifdef ARCH_SUPPORTS_FTRACE_SAVE_REGS +# define FTRACE_REGS_ADDR ((unsigned long)ftrace_regs_caller) +#else +# define FTRACE_REGS_ADDR FTRACE_ADDR +#endif +#endif + #ifdef CONFIG_FUNCTION_GRAPH_TRACER extern void ftrace_graph_caller(void); extern int ftrace_enable_ftrace_graph_caller(void); @@ -380,6 +480,39 @@ extern int ftrace_make_nop(struct module *mod, */ extern int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr); +#ifdef ARCH_SUPPORTS_FTRACE_SAVE_REGS +/** + * ftrace_modify_call - convert from one addr to another (no nop) + * @rec: the mcount call site record + * @old_addr: the address expected to be currently called to + * @addr: the address to change to + * + * This is a very sensitive operation and great care needs + * to be taken by the arch. The operation should carefully + * read the location, check to see if what is read is indeed + * what we expect it to be, and then on success of the compare, + * it should write to the location. + * + * The code segment at @rec->ip should be a caller to @old_addr + * + * Return must be: + * 0 on success + * -EFAULT on error reading the location + * -EINVAL on a failed compare of the contents + * -EPERM on error writing to the location + * Any other value will be considered a failure. + */ +extern int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr, + unsigned long addr); +#else +/* Should never be called */ +static inline int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr, + unsigned long addr) +{ + return -EINVAL; +} +#endif + /* May be defined in arch */ extern int ftrace_arch_read_dyn_info(char *buf, int size); @@ -387,7 +520,7 @@ extern int skip_trace(unsigned long ip); extern void ftrace_disable_daemon(void); extern void ftrace_enable_daemon(void); -#else +#else /* CONFIG_DYNAMIC_FTRACE */ static inline int skip_trace(unsigned long ip) { return 0; } static inline int ftrace_force_update(void) { return 0; } static inline void ftrace_disable_daemon(void) { } @@ -405,6 +538,10 @@ static inline int ftrace_text_reserved(void *start, void *end) { return 0; } +static inline unsigned long ftrace_location(unsigned long ip) +{ + return 0; +} /* * Again users of functions that have ftrace_ops may not @@ -413,6 +550,7 @@ static inline int ftrace_text_reserved(void *start, void *end) */ #define ftrace_regex_open(ops, flag, inod, file) ({ -ENODEV; }) #define ftrace_set_early_filter(ops, buf, enable) do { } while (0) +#define ftrace_set_filter_ip(ops, ip, remove, reset) ({ -ENODEV; }) #define ftrace_set_filter(ops, buf, len, reset) ({ -ENODEV; }) #define ftrace_set_notrace(ops, buf, len, reset) ({ -ENODEV; }) #define ftrace_free_filter(ops) do { } while (0) diff --git a/trunk/include/linux/ftrace_event.h b/trunk/include/linux/ftrace_event.h index af961d6f7ab1..642928cf57b4 100644 --- a/trunk/include/linux/ftrace_event.h +++ b/trunk/include/linux/ftrace_event.h @@ -306,9 +306,10 @@ extern void *perf_trace_buf_prepare(int size, unsigned short type, static inline void perf_trace_buf_submit(void *raw_data, int size, int rctx, u64 addr, - u64 count, struct pt_regs *regs, void *head) + u64 count, struct pt_regs *regs, void *head, + struct task_struct *task) { - perf_tp_event(addr, count, raw_data, size, regs, head, rctx); + perf_tp_event(addr, count, raw_data, size, regs, head, rctx, task); } #endif diff --git a/trunk/include/linux/fuse.h b/trunk/include/linux/fuse.h index 9303348965fb..d8c713e148e3 100644 --- a/trunk/include/linux/fuse.h +++ b/trunk/include/linux/fuse.h @@ -57,6 +57,9 @@ * * 7.19 * - add FUSE_FALLOCATE + * + * 7.20 + * - add FUSE_AUTO_INVAL_DATA */ #ifndef _LINUX_FUSE_H @@ -88,7 +91,7 @@ #define FUSE_KERNEL_VERSION 7 /** Minor version number of this interface */ -#define FUSE_KERNEL_MINOR_VERSION 19 +#define FUSE_KERNEL_MINOR_VERSION 20 /** The node ID of the root inode */ #define FUSE_ROOT_ID 1 @@ -163,10 +166,19 @@ struct fuse_file_lock { /** * INIT request/reply flags * + * FUSE_ASYNC_READ: asynchronous read requests * FUSE_POSIX_LOCKS: remote locking for POSIX file locks + * FUSE_FILE_OPS: kernel sends file handle for fstat, etc... (not yet supported) + * FUSE_ATOMIC_O_TRUNC: handles the O_TRUNC open flag in the filesystem * FUSE_EXPORT_SUPPORT: filesystem handles lookups of "." and ".." + * FUSE_BIG_WRITES: filesystem can handle write size larger than 4kB * FUSE_DONT_MASK: don't apply umask to file mode on create operations + * FUSE_SPLICE_WRITE: kernel supports splice write on the device + * FUSE_SPLICE_MOVE: kernel supports splice move on the device + * FUSE_SPLICE_READ: kernel supports splice read on the device * FUSE_FLOCK_LOCKS: remote locking for BSD style file locks + * FUSE_HAS_IOCTL_DIR: kernel supports ioctl on directories + * FUSE_AUTO_INVAL_DATA: automatically invalidate cached pages */ #define FUSE_ASYNC_READ (1 << 0) #define FUSE_POSIX_LOCKS (1 << 1) @@ -175,7 +187,12 @@ struct fuse_file_lock { #define FUSE_EXPORT_SUPPORT (1 << 4) #define FUSE_BIG_WRITES (1 << 5) #define FUSE_DONT_MASK (1 << 6) +#define FUSE_SPLICE_WRITE (1 << 7) +#define FUSE_SPLICE_MOVE (1 << 8) +#define FUSE_SPLICE_READ (1 << 9) #define FUSE_FLOCK_LOCKS (1 << 10) +#define FUSE_HAS_IOCTL_DIR (1 << 11) +#define FUSE_AUTO_INVAL_DATA (1 << 12) /** * CUSE INIT request/reply flags diff --git a/trunk/include/linux/hardirq.h b/trunk/include/linux/hardirq.h index bb7f30971858..cab3da3d0949 100644 --- a/trunk/include/linux/hardirq.h +++ b/trunk/include/linux/hardirq.h @@ -22,7 +22,7 @@ * * - bits 16-25 are the hardirq count (max # of nested hardirqs: 1024) * - bit 26 is the NMI_MASK - * - bit 28 is the PREEMPT_ACTIVE flag + * - bit 27 is the PREEMPT_ACTIVE flag * * PREEMPT_MASK: 0x000000ff * SOFTIRQ_MASK: 0x0000ff00 @@ -132,11 +132,11 @@ extern void synchronize_irq(unsigned int irq); struct task_struct; #if !defined(CONFIG_VIRT_CPU_ACCOUNTING) && !defined(CONFIG_IRQ_TIME_ACCOUNTING) -static inline void account_system_vtime(struct task_struct *tsk) +static inline void vtime_account(struct task_struct *tsk) { } #else -extern void account_system_vtime(struct task_struct *tsk); +extern void vtime_account(struct task_struct *tsk); #endif #if defined(CONFIG_TINY_RCU) || defined(CONFIG_TINY_PREEMPT_RCU) @@ -162,7 +162,7 @@ extern void rcu_nmi_exit(void); */ #define __irq_enter() \ do { \ - account_system_vtime(current); \ + vtime_account(current); \ add_preempt_count(HARDIRQ_OFFSET); \ trace_hardirq_enter(); \ } while (0) @@ -178,7 +178,7 @@ extern void irq_enter(void); #define __irq_exit() \ do { \ trace_hardirq_exit(); \ - account_system_vtime(current); \ + vtime_account(current); \ sub_preempt_count(HARDIRQ_OFFSET); \ } while (0) diff --git a/trunk/include/linux/hid.h b/trunk/include/linux/hid.h index 42970de1b40c..7e1f37db7582 100644 --- a/trunk/include/linux/hid.h +++ b/trunk/include/linux/hid.h @@ -414,7 +414,7 @@ struct hid_field { __u16 dpad; /* dpad input code */ }; -#define HID_MAX_FIELDS 128 +#define HID_MAX_FIELDS 256 struct hid_report { struct list_head list; @@ -626,6 +626,7 @@ struct hid_usage_id { * @report_fixup: called before report descriptor parsing (NULL means nop) * @input_mapping: invoked on input registering before mapping an usage * @input_mapped: invoked on input registering after mapping an usage + * @input_configured: invoked just before the device is registered * @feature_mapping: invoked on feature registering * @suspend: invoked on suspend (NULL means nop) * @resume: invoked on resume if device was not reset (NULL means nop) @@ -670,6 +671,8 @@ struct hid_driver { int (*input_mapped)(struct hid_device *hdev, struct hid_input *hidinput, struct hid_field *field, struct hid_usage *usage, unsigned long **bit, int *max); + void (*input_configured)(struct hid_device *hdev, + struct hid_input *hidinput); void (*feature_mapping)(struct hid_device *hdev, struct hid_field *field, struct hid_usage *usage); diff --git a/trunk/include/linux/i2c-pnx.h b/trunk/include/linux/i2c-pnx.h index 1bc74afe7a35..49ed17fdf055 100644 --- a/trunk/include/linux/i2c-pnx.h +++ b/trunk/include/linux/i2c-pnx.h @@ -22,6 +22,7 @@ struct i2c_pnx_mif { struct timer_list timer; /* Timeout */ u8 * buf; /* Data buffer */ int len; /* Length of data buffer */ + int order; /* RX Bytes to order via TX */ }; struct i2c_pnx_algo_data { diff --git a/trunk/include/linux/if_team.h b/trunk/include/linux/if_team.h index 6960fc1841a7..aa2e167e1ef4 100644 --- a/trunk/include/linux/if_team.h +++ b/trunk/include/linux/if_team.h @@ -96,21 +96,6 @@ static inline void team_netpoll_send_skb(struct team_port *port, } #endif -static inline int team_dev_queue_xmit(struct team *team, struct team_port *port, - struct sk_buff *skb) -{ - BUILD_BUG_ON(sizeof(skb->queue_mapping) != - sizeof(qdisc_skb_cb(skb)->slave_dev_queue_mapping)); - skb_set_queue_mapping(skb, qdisc_skb_cb(skb)->slave_dev_queue_mapping); - - skb->dev = port->dev; - if (unlikely(netpoll_tx_running(port->dev))) { - team_netpoll_send_skb(port, skb); - return 0; - } - return dev_queue_xmit(skb); -} - struct team_mode_ops { int (*init)(struct team *team); void (*exit)(struct team *team); @@ -200,6 +185,21 @@ struct team { long mode_priv[TEAM_MODE_PRIV_LONGS]; }; +static inline int team_dev_queue_xmit(struct team *team, struct team_port *port, + struct sk_buff *skb) +{ + BUILD_BUG_ON(sizeof(skb->queue_mapping) != + sizeof(qdisc_skb_cb(skb)->slave_dev_queue_mapping)); + skb_set_queue_mapping(skb, qdisc_skb_cb(skb)->slave_dev_queue_mapping); + + skb->dev = port->dev; + if (unlikely(netpoll_tx_running(team->dev))) { + team_netpoll_send_skb(port, skb); + return 0; + } + return dev_queue_xmit(skb); +} + static inline struct hlist_head *team_port_index_hash(struct team *team, int port_index) { diff --git a/trunk/include/linux/iio/frequency/adf4350.h b/trunk/include/linux/iio/frequency/adf4350.h index b76b4a87065e..be91f344d5fc 100644 --- a/trunk/include/linux/iio/frequency/adf4350.h +++ b/trunk/include/linux/iio/frequency/adf4350.h @@ -87,6 +87,8 @@ #define ADF4350_MAX_BANDSEL_CLK 125000 /* Hz */ #define ADF4350_MAX_FREQ_REFIN 250000000 /* Hz */ #define ADF4350_MAX_MODULUS 4095 +#define ADF4350_MAX_R_CNT 1023 + /** * struct adf4350_platform_data - platform specific information diff --git a/trunk/include/linux/input.h b/trunk/include/linux/input.h index 725dcd0f63a4..ba4874302939 100644 --- a/trunk/include/linux/input.h +++ b/trunk/include/linux/input.h @@ -1168,6 +1168,18 @@ struct ff_effect { #include #include +/** + * struct input_value - input value representation + * @type: type of value (EV_KEY, EV_ABS, etc) + * @code: the value code + * @value: the value + */ +struct input_value { + __u16 type; + __u16 code; + __s32 value; +}; + /** * struct input_dev - represents an input device * @name: name of the device @@ -1203,11 +1215,7 @@ struct ff_effect { * software autorepeat * @timer: timer for software autorepeat * @rep: current values for autorepeat parameters (delay, rate) - * @mt: pointer to array of struct input_mt_slot holding current values - * of tracked contacts - * @mtsize: number of MT slots the device uses - * @slot: MT slot currently being transmitted - * @trkid: stores MT tracking ID for the current contact + * @mt: pointer to multitouch state * @absinfo: array of &struct input_absinfo elements holding information * about absolute axes (current value, min, max, flat, fuzz, * resolution) @@ -1244,7 +1252,6 @@ struct ff_effect { * last user closes the device * @going_away: marks devices that are in a middle of unregistering and * causes input_open_device*() fail with -ENODEV. - * @sync: set to %true when there were no new events since last EV_SYN * @dev: driver model's view of this device * @h_list: list of input handles associated with the device. When * accessing the list dev->mutex must be held @@ -1287,10 +1294,7 @@ struct input_dev { int rep[REP_CNT]; - struct input_mt_slot *mt; - int mtsize; - int slot; - int trkid; + struct input_mt *mt; struct input_absinfo *absinfo; @@ -1312,12 +1316,14 @@ struct input_dev { unsigned int users; bool going_away; - bool sync; - struct device dev; struct list_head h_list; struct list_head node; + + unsigned int num_vals; + unsigned int max_vals; + struct input_value *vals; }; #define to_input_dev(d) container_of(d, struct input_dev, dev) @@ -1378,6 +1384,9 @@ struct input_handle; * @event: event handler. This method is being called by input core with * interrupts disabled and dev->event_lock spinlock held and so * it may not sleep + * @events: event sequence handler. This method is being called by + * input core with interrupts disabled and dev->event_lock + * spinlock held and so it may not sleep * @filter: similar to @event; separates normal event handlers from * "filters". * @match: called after comparing device's id with handler's id_table @@ -1414,6 +1423,8 @@ struct input_handler { void *private; void (*event)(struct input_handle *handle, unsigned int type, unsigned int code, int value); + void (*events)(struct input_handle *handle, + const struct input_value *vals, unsigned int count); bool (*filter)(struct input_handle *handle, unsigned int type, unsigned int code, int value); bool (*match)(struct input_handler *handler, struct input_dev *dev); int (*connect)(struct input_handler *handler, struct input_dev *dev, const struct input_device_id *id); diff --git a/trunk/include/linux/input/eeti_ts.h b/trunk/include/linux/input/eeti_ts.h index f875b316249d..16625d799b6f 100644 --- a/trunk/include/linux/input/eeti_ts.h +++ b/trunk/include/linux/input/eeti_ts.h @@ -2,6 +2,7 @@ #define LINUX_INPUT_EETI_TS_H struct eeti_ts_platform_data { + int irq_gpio; unsigned int irq_active_high; }; diff --git a/trunk/include/linux/input/mt.h b/trunk/include/linux/input/mt.h index f86737586e19..cc5cca774bab 100644 --- a/trunk/include/linux/input/mt.h +++ b/trunk/include/linux/input/mt.h @@ -15,12 +15,41 @@ #define TRKID_MAX 0xffff +#define INPUT_MT_POINTER 0x0001 /* pointer device, e.g. trackpad */ +#define INPUT_MT_DIRECT 0x0002 /* direct device, e.g. touchscreen */ +#define INPUT_MT_DROP_UNUSED 0x0004 /* drop contacts not seen in frame */ +#define INPUT_MT_TRACK 0x0008 /* use in-kernel tracking */ + /** * struct input_mt_slot - represents the state of an input MT slot * @abs: holds current values of ABS_MT axes for this slot + * @frame: last frame at which input_mt_report_slot_state() was called + * @key: optional driver designation of this slot */ struct input_mt_slot { int abs[ABS_MT_LAST - ABS_MT_FIRST + 1]; + unsigned int frame; + unsigned int key; +}; + +/** + * struct input_mt - state of tracked contacts + * @trkid: stores MT tracking ID for the next contact + * @num_slots: number of MT slots the device uses + * @slot: MT slot currently being transmitted + * @flags: input_mt operation flags + * @frame: increases every time input_mt_sync_frame() is called + * @red: reduced cost matrix for in-kernel tracking + * @slots: array of slots holding current values of tracked contacts + */ +struct input_mt { + int trkid; + int num_slots; + int slot; + unsigned int flags; + unsigned int frame; + int *red; + struct input_mt_slot slots[]; }; static inline void input_mt_set_value(struct input_mt_slot *slot, @@ -35,12 +64,18 @@ static inline int input_mt_get_value(const struct input_mt_slot *slot, return slot->abs[code - ABS_MT_FIRST]; } -int input_mt_init_slots(struct input_dev *dev, unsigned int num_slots); +static inline bool input_mt_is_active(const struct input_mt_slot *slot) +{ + return input_mt_get_value(slot, ABS_MT_TRACKING_ID) >= 0; +} + +int input_mt_init_slots(struct input_dev *dev, unsigned int num_slots, + unsigned int flags); void input_mt_destroy_slots(struct input_dev *dev); -static inline int input_mt_new_trkid(struct input_dev *dev) +static inline int input_mt_new_trkid(struct input_mt *mt) { - return dev->trkid++ & TRKID_MAX; + return mt->trkid++ & TRKID_MAX; } static inline void input_mt_slot(struct input_dev *dev, int slot) @@ -64,4 +99,20 @@ void input_mt_report_slot_state(struct input_dev *dev, void input_mt_report_finger_count(struct input_dev *dev, int count); void input_mt_report_pointer_emulation(struct input_dev *dev, bool use_count); +void input_mt_sync_frame(struct input_dev *dev); + +/** + * struct input_mt_pos - contact position + * @x: horizontal coordinate + * @y: vertical coordinate + */ +struct input_mt_pos { + s16 x, y; +}; + +int input_mt_assign_slots(struct input_dev *dev, int *slots, + const struct input_mt_pos *pos, int num_pos); + +int input_mt_get_slot_by_key(struct input_dev *dev, int key); + #endif diff --git a/trunk/include/linux/interrupt.h b/trunk/include/linux/interrupt.h index c5f856a040b9..5e4e6170f43a 100644 --- a/trunk/include/linux/interrupt.h +++ b/trunk/include/linux/interrupt.h @@ -430,6 +430,8 @@ enum NR_SOFTIRQS }; +#define SOFTIRQ_STOP_IDLE_MASK (~(1 << RCU_SOFTIRQ)) + /* map softirq index to softirq name. update 'softirq_to_name' in * kernel/softirq.c when adding a new softirq. */ diff --git a/trunk/include/linux/iommu.h b/trunk/include/linux/iommu.h index 54d6d690073c..f3b99e1c1042 100644 --- a/trunk/include/linux/iommu.h +++ b/trunk/include/linux/iommu.h @@ -20,6 +20,7 @@ #define __LINUX_IOMMU_H #include +#include #define IOMMU_READ (1) #define IOMMU_WRITE (2) @@ -30,6 +31,7 @@ struct iommu_group; struct bus_type; struct device; struct iommu_domain; +struct notifier_block; /* iommu fault flags */ #define IOMMU_FAULT_READ 0x0 @@ -254,72 +256,78 @@ static inline void iommu_set_fault_handler(struct iommu_domain *domain, { } -int iommu_attach_group(struct iommu_domain *domain, struct iommu_group *group) +static inline int iommu_attach_group(struct iommu_domain *domain, + struct iommu_group *group) { return -ENODEV; } -void iommu_detach_group(struct iommu_domain *domain, struct iommu_group *group) +static inline void iommu_detach_group(struct iommu_domain *domain, + struct iommu_group *group) { } -struct iommu_group *iommu_group_alloc(void) +static inline struct iommu_group *iommu_group_alloc(void) { return ERR_PTR(-ENODEV); } -void *iommu_group_get_iommudata(struct iommu_group *group) +static inline void *iommu_group_get_iommudata(struct iommu_group *group) { return NULL; } -void iommu_group_set_iommudata(struct iommu_group *group, void *iommu_data, - void (*release)(void *iommu_data)) +static inline void iommu_group_set_iommudata(struct iommu_group *group, + void *iommu_data, + void (*release)(void *iommu_data)) { } -int iommu_group_set_name(struct iommu_group *group, const char *name) +static inline int iommu_group_set_name(struct iommu_group *group, + const char *name) { return -ENODEV; } -int iommu_group_add_device(struct iommu_group *group, struct device *dev) +static inline int iommu_group_add_device(struct iommu_group *group, + struct device *dev) { return -ENODEV; } -void iommu_group_remove_device(struct device *dev) +static inline void iommu_group_remove_device(struct device *dev) { } -int iommu_group_for_each_dev(struct iommu_group *group, void *data, - int (*fn)(struct device *, void *)) +static inline int iommu_group_for_each_dev(struct iommu_group *group, + void *data, + int (*fn)(struct device *, void *)) { return -ENODEV; } -struct iommu_group *iommu_group_get(struct device *dev) +static inline struct iommu_group *iommu_group_get(struct device *dev) { return NULL; } -void iommu_group_put(struct iommu_group *group) +static inline void iommu_group_put(struct iommu_group *group) { } -int iommu_group_register_notifier(struct iommu_group *group, - struct notifier_block *nb) +static inline int iommu_group_register_notifier(struct iommu_group *group, + struct notifier_block *nb) { return -ENODEV; } -int iommu_group_unregister_notifier(struct iommu_group *group, - struct notifier_block *nb) +static inline int iommu_group_unregister_notifier(struct iommu_group *group, + struct notifier_block *nb) { return 0; } -int iommu_group_id(struct iommu_group *group) +static inline int iommu_group_id(struct iommu_group *group) { return -ENODEV; } diff --git a/trunk/include/linux/ipv6.h b/trunk/include/linux/ipv6.h index 379e433e15e0..879db26ec401 100644 --- a/trunk/include/linux/ipv6.h +++ b/trunk/include/linux/ipv6.h @@ -369,6 +369,7 @@ struct ipv6_pinfo { __u8 rcv_tclass; __u32 dst_cookie; + __u32 rx_dst_cookie; struct ipv6_mc_socklist __rcu *ipv6_mc_list; struct ipv6_ac_socklist *ipv6_ac_list; diff --git a/trunk/include/linux/irq.h b/trunk/include/linux/irq.h index 553fb66da130..216b0ba109d7 100644 --- a/trunk/include/linux/irq.h +++ b/trunk/include/linux/irq.h @@ -349,6 +349,7 @@ enum { IRQCHIP_MASK_ON_SUSPEND = (1 << 2), IRQCHIP_ONOFFLINE_ENABLED = (1 << 3), IRQCHIP_SKIP_SET_WAKE = (1 << 4), + IRQCHIP_ONESHOT_SAFE = (1 << 5), }; /* This include will go away once we isolated irq_desc usage to core code */ diff --git a/trunk/include/linux/irqdesc.h b/trunk/include/linux/irqdesc.h index 9a323d12de1c..0ba014c55056 100644 --- a/trunk/include/linux/irqdesc.h +++ b/trunk/include/linux/irqdesc.h @@ -10,12 +10,10 @@ struct irq_affinity_notify; struct proc_dir_entry; -struct timer_rand_state; struct module; /** * struct irq_desc - interrupt descriptor * @irq_data: per irq and chip data passed down to chip functions - * @timer_rand_state: pointer to timer rand state struct * @kstat_irqs: irq stats per cpu * @handle_irq: highlevel irq-events handler * @preflow_handler: handler called before the flow handler (currently used by sparc) diff --git a/trunk/include/linux/jbd2.h b/trunk/include/linux/jbd2.h index f334c7fab967..3efc43f3f162 100644 --- a/trunk/include/linux/jbd2.h +++ b/trunk/include/linux/jbd2.h @@ -1125,6 +1125,7 @@ extern int jbd2_journal_destroy (journal_t *); extern int jbd2_journal_recover (journal_t *journal); extern int jbd2_journal_wipe (journal_t *, int); extern int jbd2_journal_skip_recovery (journal_t *); +extern void jbd2_journal_update_sb_errno(journal_t *); extern void jbd2_journal_update_sb_log_tail (journal_t *, tid_t, unsigned long, int); extern void __jbd2_journal_abort_hard (journal_t *); diff --git a/trunk/include/linux/jiffies.h b/trunk/include/linux/jiffies.h index 265e2c3cbd1c..82680541576d 100644 --- a/trunk/include/linux/jiffies.h +++ b/trunk/include/linux/jiffies.h @@ -39,9 +39,6 @@ # error Invalid value of HZ. #endif -/* LATCH is used in the interval timer and ftape setup. */ -#define LATCH ((CLOCK_TICK_RATE + HZ/2) / HZ) /* For divider */ - /* Suppose we want to divide two numbers NOM and DEN: NOM/DEN, then we can * improve accuracy by shifting LSH bits, hence calculating: * (NOM << LSH) / DEN @@ -54,18 +51,30 @@ #define SH_DIV(NOM,DEN,LSH) ( (((NOM) / (DEN)) << (LSH)) \ + ((((NOM) % (DEN)) << (LSH)) + (DEN) / 2) / (DEN)) -/* HZ is the requested value. ACTHZ is actual HZ ("<< 8" is for accuracy) */ -#define ACTHZ (SH_DIV (CLOCK_TICK_RATE, LATCH, 8)) +#ifdef CLOCK_TICK_RATE +/* LATCH is used in the interval timer and ftape setup. */ +# define LATCH ((CLOCK_TICK_RATE + HZ/2) / HZ) /* For divider */ + +/* + * HZ is the requested value. However the CLOCK_TICK_RATE may not allow + * for exactly HZ. So SHIFTED_HZ is high res HZ ("<< 8" is for accuracy) + */ +# define SHIFTED_HZ (SH_DIV(CLOCK_TICK_RATE, LATCH, 8)) +#else +# define SHIFTED_HZ (HZ << 8) +#endif -/* TICK_NSEC is the time between ticks in nsec assuming real ACTHZ */ -#define TICK_NSEC (SH_DIV (1000000UL * 1000, ACTHZ, 8)) +/* TICK_NSEC is the time between ticks in nsec assuming SHIFTED_HZ */ +#define TICK_NSEC (SH_DIV(1000000UL * 1000, SHIFTED_HZ, 8)) /* TICK_USEC is the time between ticks in usec assuming fake USER_HZ */ #define TICK_USEC ((1000000UL + USER_HZ/2) / USER_HZ) -/* TICK_USEC_TO_NSEC is the time between ticks in nsec assuming real ACTHZ and */ -/* a value TUSEC for TICK_USEC (can be set bij adjtimex) */ -#define TICK_USEC_TO_NSEC(TUSEC) (SH_DIV (TUSEC * USER_HZ * 1000, ACTHZ, 8)) +/* + * TICK_USEC_TO_NSEC is the time between ticks in nsec assuming SHIFTED_HZ and + * a value TUSEC for TICK_USEC (can be set bij adjtimex) + */ +#define TICK_USEC_TO_NSEC(TUSEC) (SH_DIV(TUSEC * USER_HZ * 1000, SHIFTED_HZ, 8)) /* some arch's have a small-data section that can be accessed register-relative * but that can only take up to, say, 4-byte variables. jiffies being part of diff --git a/trunk/include/linux/kdb.h b/trunk/include/linux/kdb.h index 064725854db8..42d9e863a313 100644 --- a/trunk/include/linux/kdb.h +++ b/trunk/include/linux/kdb.h @@ -75,8 +75,6 @@ extern const char *kdb_diemsg; #define KDB_FLAG_CATASTROPHIC (1 << 1) /* A catastrophic event has occurred */ #define KDB_FLAG_CMD_INTERRUPT (1 << 2) /* Previous command was interrupted */ #define KDB_FLAG_NOIPI (1 << 3) /* Do not send IPIs */ -#define KDB_FLAG_ONLY_DO_DUMP (1 << 4) /* Only do a dump, used when - * kdb is off */ #define KDB_FLAG_NO_CONSOLE (1 << 5) /* No console is available, * kdb is disabled */ #define KDB_FLAG_NO_VT_CONSOLE (1 << 6) /* No VT console is available, do diff --git a/trunk/include/linux/kernel.h b/trunk/include/linux/kernel.h index 604382143bcf..2451f1f7a1d9 100644 --- a/trunk/include/linux/kernel.h +++ b/trunk/include/linux/kernel.h @@ -82,10 +82,18 @@ __x - (__x % (y)); \ } \ ) + +/* + * Divide positive or negative dividend by positive divisor and round + * to closest integer. Result is undefined for negative divisors. + */ #define DIV_ROUND_CLOSEST(x, divisor)( \ { \ - typeof(divisor) __divisor = divisor; \ - (((x) + ((__divisor) / 2)) / (__divisor)); \ + typeof(x) __x = x; \ + typeof(divisor) __d = divisor; \ + (((typeof(x))-1) > 0 || (__x) > 0) ? \ + (((__x) + ((__d) / 2)) / (__d)) : \ + (((__x) - ((__d) / 2)) / (__d)); \ } \ ) diff --git a/trunk/include/linux/kernel_stat.h b/trunk/include/linux/kernel_stat.h index 2fbd9053c2df..36d12f0884c3 100644 --- a/trunk/include/linux/kernel_stat.h +++ b/trunk/include/linux/kernel_stat.h @@ -130,4 +130,12 @@ extern void account_process_tick(struct task_struct *, int user); extern void account_steal_ticks(unsigned long ticks); extern void account_idle_ticks(unsigned long ticks); +#ifdef CONFIG_VIRT_CPU_ACCOUNTING +extern void vtime_task_switch(struct task_struct *prev); +extern void vtime_account_system(struct task_struct *tsk); +extern void vtime_account_idle(struct task_struct *tsk); +#else +static inline void vtime_task_switch(struct task_struct *prev) { } +#endif + #endif /* _LINUX_KERNEL_STAT_H */ diff --git a/trunk/include/linux/kobject.h b/trunk/include/linux/kobject.h index fc615a97e2d3..1e57449395b1 100644 --- a/trunk/include/linux/kobject.h +++ b/trunk/include/linux/kobject.h @@ -224,7 +224,7 @@ static inline int kobject_uevent_env(struct kobject *kobj, static inline __printf(2, 3) int add_uevent_var(struct kobj_uevent_env *env, const char *format, ...) -{ return 0; } +{ return -ENOMEM; } static inline int kobject_action_type(const char *buf, size_t count, enum kobject_action *type) diff --git a/trunk/include/linux/kprobes.h b/trunk/include/linux/kprobes.h index b6e1f8c00577..23755ba42abc 100644 --- a/trunk/include/linux/kprobes.h +++ b/trunk/include/linux/kprobes.h @@ -38,6 +38,7 @@ #include #include #include +#include #ifdef CONFIG_KPROBES #include @@ -48,14 +49,26 @@ #define KPROBE_REENTER 0x00000004 #define KPROBE_HIT_SSDONE 0x00000008 +/* + * If function tracer is enabled and the arch supports full + * passing of pt_regs to function tracing, then kprobes can + * optimize on top of function tracing. + */ +#if defined(CONFIG_FUNCTION_TRACER) && defined(ARCH_SUPPORTS_FTRACE_SAVE_REGS) \ + && defined(ARCH_SUPPORTS_KPROBES_ON_FTRACE) +# define KPROBES_CAN_USE_FTRACE +#endif + /* Attach to insert probes on any functions which should be ignored*/ #define __kprobes __attribute__((__section__(".kprobes.text"))) + #else /* CONFIG_KPROBES */ typedef int kprobe_opcode_t; struct arch_specific_insn { int dummy; }; #define __kprobes + #endif /* CONFIG_KPROBES */ struct kprobe; @@ -128,6 +141,7 @@ struct kprobe { * NOTE: * this flag is only for optimized_kprobe. */ +#define KPROBE_FLAG_FTRACE 8 /* probe is using ftrace */ /* Has this kprobe gone ? */ static inline int kprobe_gone(struct kprobe *p) @@ -146,6 +160,13 @@ static inline int kprobe_optimized(struct kprobe *p) { return p->flags & KPROBE_FLAG_OPTIMIZED; } + +/* Is this kprobe uses ftrace ? */ +static inline int kprobe_ftrace(struct kprobe *p) +{ + return p->flags & KPROBE_FLAG_FTRACE; +} + /* * Special probe type that uses setjmp-longjmp type tricks to resume * execution at a specified entry with a matching prototype corresponding @@ -295,6 +316,12 @@ extern int proc_kprobes_optimization_handler(struct ctl_table *table, #endif #endif /* CONFIG_OPTPROBES */ +#ifdef KPROBES_CAN_USE_FTRACE +extern void kprobe_ftrace_handler(unsigned long ip, unsigned long parent_ip, + struct ftrace_ops *ops, struct pt_regs *regs); +extern int arch_prepare_kprobe_ftrace(struct kprobe *p); +#endif + /* Get the kprobe at this addr (if any) - called with preemption disabled */ struct kprobe *get_kprobe(void *addr); diff --git a/trunk/include/linux/kref.h b/trunk/include/linux/kref.h index 9c07dcebded7..65af6887872f 100644 --- a/trunk/include/linux/kref.h +++ b/trunk/include/linux/kref.h @@ -18,6 +18,7 @@ #include #include #include +#include struct kref { atomic_t refcount; @@ -93,4 +94,21 @@ static inline int kref_put(struct kref *kref, void (*release)(struct kref *kref) { return kref_sub(kref, 1, release); } + +static inline int kref_put_mutex(struct kref *kref, + void (*release)(struct kref *kref), + struct mutex *lock) +{ + WARN_ON(release == NULL); + if (unlikely(!atomic_add_unless(&kref->refcount, -1, 1))) { + mutex_lock(lock); + if (unlikely(!atomic_dec_and_test(&kref->refcount))) { + mutex_unlock(lock); + return 0; + } + release(kref); + return 1; + } + return 0; +} #endif /* _KREF_H_ */ diff --git a/trunk/include/linux/kthread.h b/trunk/include/linux/kthread.h index 22ccf9dee177..8d816646f766 100644 --- a/trunk/include/linux/kthread.h +++ b/trunk/include/linux/kthread.h @@ -14,6 +14,11 @@ struct task_struct *kthread_create_on_node(int (*threadfn)(void *data), kthread_create_on_node(threadfn, data, -1, namefmt, ##arg) +struct task_struct *kthread_create_on_cpu(int (*threadfn)(void *data), + void *data, + unsigned int cpu, + const char *namefmt); + /** * kthread_run - create and wake a thread. * @threadfn: the function to run until signal_pending(current). @@ -34,9 +39,13 @@ struct task_struct *kthread_create_on_node(int (*threadfn)(void *data), void kthread_bind(struct task_struct *k, unsigned int cpu); int kthread_stop(struct task_struct *k); -int kthread_should_stop(void); +bool kthread_should_stop(void); +bool kthread_should_park(void); bool kthread_freezable_should_stop(bool *was_frozen); void *kthread_data(struct task_struct *k); +int kthread_park(struct task_struct *k); +void kthread_unpark(struct task_struct *k); +void kthread_parkme(void); int kthreadd(void *unused); extern struct task_struct *kthreadd_task; diff --git a/trunk/include/linux/ktime.h b/trunk/include/linux/ktime.h index 603bec2913b0..06177ba10a16 100644 --- a/trunk/include/linux/ktime.h +++ b/trunk/include/linux/ktime.h @@ -58,13 +58,6 @@ union ktime { typedef union ktime ktime_t; /* Kill this */ -#define KTIME_MAX ((s64)~((u64)1 << 63)) -#if (BITS_PER_LONG == 64) -# define KTIME_SEC_MAX (KTIME_MAX / NSEC_PER_SEC) -#else -# define KTIME_SEC_MAX LONG_MAX -#endif - /* * ktime_t definitions when using the 64-bit scalar representation: */ diff --git a/trunk/include/linux/kvm_host.h b/trunk/include/linux/kvm_host.h index b70b48b01098..8a59e0abe5fa 100644 --- a/trunk/include/linux/kvm_host.h +++ b/trunk/include/linux/kvm_host.h @@ -685,7 +685,7 @@ static inline int kvm_deassign_device(struct kvm *kvm, static inline void kvm_guest_enter(void) { BUG_ON(preemptible()); - account_system_vtime(current); + vtime_account(current); current->flags |= PF_VCPU; /* KVM does not hold any references to rcu protected data when it * switches CPU into a guest mode. In fact switching to a guest mode @@ -699,7 +699,7 @@ static inline void kvm_guest_enter(void) static inline void kvm_guest_exit(void) { - account_system_vtime(current); + vtime_account(current); current->flags &= ~PF_VCPU; } diff --git a/trunk/include/linux/mISDNhw.h b/trunk/include/linux/mISDNhw.h index d0752eca9b44..9d96d5d4dfed 100644 --- a/trunk/include/linux/mISDNhw.h +++ b/trunk/include/linux/mISDNhw.h @@ -183,7 +183,7 @@ extern int mISDN_initbchannel(struct bchannel *, unsigned short, unsigned short); extern int mISDN_freedchannel(struct dchannel *); extern void mISDN_clear_bchannel(struct bchannel *); -extern int mISDN_freebchannel(struct bchannel *); +extern void mISDN_freebchannel(struct bchannel *); extern int mISDN_ctrl_bchannel(struct bchannel *, struct mISDN_ctrl_req *); extern void queue_ch_frame(struct mISDNchannel *, u_int, int, struct sk_buff *); diff --git a/trunk/include/linux/memory.h b/trunk/include/linux/memory.h index 1ac7f6e405f9..ff9a9f8e0ed9 100644 --- a/trunk/include/linux/memory.h +++ b/trunk/include/linux/memory.h @@ -19,7 +19,7 @@ #include #include -#define MIN_MEMORY_BLOCK_SIZE (1 << SECTION_SIZE_BITS) +#define MIN_MEMORY_BLOCK_SIZE (1UL << SECTION_SIZE_BITS) struct memory_block { unsigned long start_section_nr; diff --git a/trunk/include/linux/mfd/core.h b/trunk/include/linux/mfd/core.h index 3a8435a8058f..cebe97ee98b8 100644 --- a/trunk/include/linux/mfd/core.h +++ b/trunk/include/linux/mfd/core.h @@ -16,6 +16,8 @@ #include +struct irq_domain; + /* * This struct describes the MFD part ("cell"). * After registration the copy of this structure will become the platform data @@ -98,7 +100,7 @@ static inline const struct mfd_cell *mfd_get_cell(struct platform_device *pdev) extern int mfd_add_devices(struct device *parent, int id, struct mfd_cell *cells, int n_devs, struct resource *mem_base, - int irq_base); + int irq_base, struct irq_domain *irq_domain); extern void mfd_remove_devices(struct device *parent); diff --git a/trunk/include/linux/mfd/ezx-pcap.h b/trunk/include/linux/mfd/ezx-pcap.h index 40c372165f3e..32a1b5cfeba1 100644 --- a/trunk/include/linux/mfd/ezx-pcap.h +++ b/trunk/include/linux/mfd/ezx-pcap.h @@ -16,6 +16,7 @@ struct pcap_subdev { struct pcap_platform_data { unsigned int irq_base; unsigned int config; + int gpio; void (*init) (void *); /* board specific init */ int num_subdevs; struct pcap_subdev *subdevs; diff --git a/trunk/include/linux/mfd/max77686.h b/trunk/include/linux/mfd/max77686.h index 3d7ae4d7fd36..46c0f320ed76 100644 --- a/trunk/include/linux/mfd/max77686.h +++ b/trunk/include/linux/mfd/max77686.h @@ -74,6 +74,7 @@ enum max77686_regulators { struct max77686_regulator_data { int id; struct regulator_init_data *initdata; + struct device_node *of_node; }; enum max77686_opmode { diff --git a/trunk/include/linux/mfd/max8998.h b/trunk/include/linux/mfd/max8998.h index f4f0dfa4698a..6823548d0c0a 100644 --- a/trunk/include/linux/mfd/max8998.h +++ b/trunk/include/linux/mfd/max8998.h @@ -67,7 +67,7 @@ struct max8998_regulator_data { /** * struct max8998_board - packages regulator init data * @regulators: array of defined regulators - * @num_regulators: number of regultors used + * @num_regulators: number of regulators used * @irq_base: base IRQ number for max8998, required for IRQs * @ono: power onoff IRQ number for max8998 * @buck_voltage_lock: Do NOT change the values of the following six diff --git a/trunk/include/linux/mfd/tps65217.h b/trunk/include/linux/mfd/tps65217.h index 12c06870829a..7cd83d826ed8 100644 --- a/trunk/include/linux/mfd/tps65217.h +++ b/trunk/include/linux/mfd/tps65217.h @@ -22,6 +22,9 @@ #include #include +/* TPS chip id list */ +#define TPS65217 0xF0 + /* I2C ID for TPS65217 part */ #define TPS65217_I2C_ID 0x24 @@ -248,13 +251,11 @@ struct tps_info { struct tps65217 { struct device *dev; struct tps65217_board *pdata; + unsigned int id; struct regulator_desc desc[TPS65217_NUM_REGULATOR]; struct regulator_dev *rdev[TPS65217_NUM_REGULATOR]; struct tps_info *info[TPS65217_NUM_REGULATOR]; struct regmap *regmap; - - /* Client devices */ - struct platform_device *regulator_pdev[TPS65217_NUM_REGULATOR]; }; static inline struct tps65217 *dev_to_tps65217(struct device *dev) @@ -262,6 +263,11 @@ static inline struct tps65217 *dev_to_tps65217(struct device *dev) return dev_get_drvdata(dev); } +static inline int tps65217_chip_id(struct tps65217 *tps65217) +{ + return tps65217->id; +} + int tps65217_reg_read(struct tps65217 *tps, unsigned int reg, unsigned int *val); int tps65217_reg_write(struct tps65217 *tps, unsigned int reg, diff --git a/trunk/include/linux/mfd/tps6586x.h b/trunk/include/linux/mfd/tps6586x.h index f350fd0ba1df..94514710a03f 100644 --- a/trunk/include/linux/mfd/tps6586x.h +++ b/trunk/include/linux/mfd/tps6586x.h @@ -14,6 +14,7 @@ #define TPS6586X_SLEW_RATE_MASK 0x07 enum { + TPS6586X_ID_SYS, TPS6586X_ID_SM_0, TPS6586X_ID_SM_1, TPS6586X_ID_SM_2, diff --git a/trunk/include/linux/micrel_phy.h b/trunk/include/linux/micrel_phy.h index 61f0905bdc48..de201203bc7c 100644 --- a/trunk/include/linux/micrel_phy.h +++ b/trunk/include/linux/micrel_phy.h @@ -1,3 +1,15 @@ +/* + * include/linux/micrel_phy.h + * + * Micrel PHY IDs + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * + */ + #ifndef _MICREL_PHY_H #define _MICREL_PHY_H @@ -5,10 +17,11 @@ #define PHY_ID_KSZ9021 0x00221610 #define PHY_ID_KS8737 0x00221720 -#define PHY_ID_KS8041 0x00221510 -#define PHY_ID_KS8051 0x00221550 +#define PHY_ID_KSZ8021 0x00221555 +#define PHY_ID_KSZ8041 0x00221510 +#define PHY_ID_KSZ8051 0x00221550 /* both for ks8001 Rev. A/B, and for ks8721 Rev 3. */ -#define PHY_ID_KS8001 0x0022161A +#define PHY_ID_KSZ8001 0x0022161A /* struct phy_device dev_flags definitions */ #define MICREL_PHY_50MHZ_CLK 0x00000001 diff --git a/trunk/include/linux/mlx4/device.h b/trunk/include/linux/mlx4/device.h index bd6c9fcdf2dd..6e1b0f973a03 100644 --- a/trunk/include/linux/mlx4/device.h +++ b/trunk/include/linux/mlx4/device.h @@ -796,6 +796,19 @@ enum mlx4_net_trans_rule_id { MLX4_NET_TRANS_RULE_NUM, /* should be last */ }; +extern const u16 __sw_id_hw[]; + +static inline int map_hw_to_sw_id(u16 header_id) +{ + + int i; + for (i = 0; i < MLX4_NET_TRANS_RULE_NUM; i++) { + if (header_id == __sw_id_hw[i]) + return i; + } + return -EINVAL; +} + enum mlx4_net_trans_promisc_mode { MLX4_FS_PROMISC_NONE = 0, MLX4_FS_PROMISC_UPLINK, diff --git a/trunk/include/linux/mmc/card.h b/trunk/include/linux/mmc/card.h index 111aca5e97f3..4b27f9f503e4 100644 --- a/trunk/include/linux/mmc/card.h +++ b/trunk/include/linux/mmc/card.h @@ -239,6 +239,7 @@ struct mmc_card { #define MMC_QUIRK_BLK_NO_CMD23 (1<<7) /* Avoid CMD23 for regular multiblock */ #define MMC_QUIRK_BROKEN_BYTE_MODE_512 (1<<8) /* Avoid sending 512 bytes in */ #define MMC_QUIRK_LONG_READ_TIME (1<<9) /* Data read time > CSD says */ +#define MMC_QUIRK_SEC_ERASE_TRIM_BROKEN (1<<10) /* Skip secure for erase/trim */ /* byte mode */ unsigned int poweroff_notify_state; /* eMMC4.5 notify feature */ #define MMC_NO_POWER_NOTIFICATION 0 diff --git a/trunk/include/linux/mv643xx_eth.h b/trunk/include/linux/mv643xx_eth.h index 51bf8ada6dc0..49258e0ed1c6 100644 --- a/trunk/include/linux/mv643xx_eth.h +++ b/trunk/include/linux/mv643xx_eth.h @@ -15,6 +15,8 @@ #define MV643XX_ETH_SIZE_REG_4 0x2224 #define MV643XX_ETH_BASE_ADDR_ENABLE_REG 0x2290 +#define MV643XX_TX_CSUM_DEFAULT_LIMIT 0 + struct mv643xx_eth_shared_platform_data { struct mbus_dram_target_info *dram; struct platform_device *shared_smi; diff --git a/trunk/include/linux/netdevice.h b/trunk/include/linux/netdevice.h index eb06e58bed0b..59dc05f38247 100644 --- a/trunk/include/linux/netdevice.h +++ b/trunk/include/linux/netdevice.h @@ -953,7 +953,8 @@ struct net_device_ops { #ifdef CONFIG_NET_POLL_CONTROLLER void (*ndo_poll_controller)(struct net_device *dev); int (*ndo_netpoll_setup)(struct net_device *dev, - struct netpoll_info *info); + struct netpoll_info *info, + gfp_t gfp); void (*ndo_netpoll_cleanup)(struct net_device *dev); #endif int (*ndo_set_vf_mac)(struct net_device *dev, @@ -1300,6 +1301,8 @@ struct net_device { /* for setting kernel sock attribute on TCP connection setup */ #define GSO_MAX_SIZE 65536 unsigned int gso_max_size; +#define GSO_MAX_SEGS 65535 + u16 gso_max_segs; #ifdef CONFIG_DCB /* Data Center Bridging netlink ops */ @@ -1519,6 +1522,8 @@ struct packet_type { struct sk_buff **(*gro_receive)(struct sk_buff **head, struct sk_buff *skb); int (*gro_complete)(struct sk_buff *skb); + bool (*id_match)(struct packet_type *ptype, + struct sock *sk); void *af_packet_priv; struct list_head list; }; diff --git a/trunk/include/linux/netfilter/nf_conntrack_sip.h b/trunk/include/linux/netfilter/nf_conntrack_sip.h index 0dfc8b7210a3..89f2a627f3f0 100644 --- a/trunk/include/linux/netfilter/nf_conntrack_sip.h +++ b/trunk/include/linux/netfilter/nf_conntrack_sip.h @@ -164,7 +164,7 @@ extern int ct_sip_parse_address_param(const struct nf_conn *ct, const char *dptr unsigned int dataoff, unsigned int datalen, const char *name, unsigned int *matchoff, unsigned int *matchlen, - union nf_inet_addr *addr); + union nf_inet_addr *addr, bool delim); extern int ct_sip_parse_numerical_param(const struct nf_conn *ct, const char *dptr, unsigned int off, unsigned int datalen, const char *name, diff --git a/trunk/include/linux/netpoll.h b/trunk/include/linux/netpoll.h index 28f5389c924b..66d5379c305e 100644 --- a/trunk/include/linux/netpoll.h +++ b/trunk/include/linux/netpoll.h @@ -23,6 +23,7 @@ struct netpoll { u8 remote_mac[ETH_ALEN]; struct list_head rx; /* rx_np list element */ + struct rcu_head rcu; }; struct netpoll_info { @@ -38,28 +39,40 @@ struct netpoll_info { struct delayed_work tx_work; struct netpoll *netpoll; + struct rcu_head rcu; }; void netpoll_send_udp(struct netpoll *np, const char *msg, int len); void netpoll_print_options(struct netpoll *np); int netpoll_parse_options(struct netpoll *np, char *opt); -int __netpoll_setup(struct netpoll *np, struct net_device *ndev); +int __netpoll_setup(struct netpoll *np, struct net_device *ndev, gfp_t gfp); int netpoll_setup(struct netpoll *np); int netpoll_trap(void); void netpoll_set_trap(int trap); void __netpoll_cleanup(struct netpoll *np); +void __netpoll_free_rcu(struct netpoll *np); void netpoll_cleanup(struct netpoll *np); -int __netpoll_rx(struct sk_buff *skb); +int __netpoll_rx(struct sk_buff *skb, struct netpoll_info *npinfo); void netpoll_send_skb_on_dev(struct netpoll *np, struct sk_buff *skb, struct net_device *dev); static inline void netpoll_send_skb(struct netpoll *np, struct sk_buff *skb) { + unsigned long flags; + local_irq_save(flags); netpoll_send_skb_on_dev(np, skb, np->dev); + local_irq_restore(flags); } #ifdef CONFIG_NETPOLL +static inline bool netpoll_rx_on(struct sk_buff *skb) +{ + struct netpoll_info *npinfo = rcu_dereference_bh(skb->dev->npinfo); + + return npinfo && (!list_empty(&npinfo->rx_np) || npinfo->rx_flags); +} + static inline bool netpoll_rx(struct sk_buff *skb) { struct netpoll_info *npinfo; @@ -67,14 +80,14 @@ static inline bool netpoll_rx(struct sk_buff *skb) bool ret = false; local_irq_save(flags); - npinfo = rcu_dereference_bh(skb->dev->npinfo); - if (!npinfo || (list_empty(&npinfo->rx_np) && !npinfo->rx_flags)) + if (!netpoll_rx_on(skb)) goto out; + npinfo = rcu_dereference_bh(skb->dev->npinfo); spin_lock(&npinfo->rx_lock); /* check rx_flags again with the lock held */ - if (npinfo->rx_flags && __netpoll_rx(skb)) + if (npinfo->rx_flags && __netpoll_rx(skb, npinfo)) ret = true; spin_unlock(&npinfo->rx_lock); @@ -83,13 +96,6 @@ static inline bool netpoll_rx(struct sk_buff *skb) return ret; } -static inline int netpoll_rx_on(struct sk_buff *skb) -{ - struct netpoll_info *npinfo = rcu_dereference_bh(skb->dev->npinfo); - - return npinfo && (!list_empty(&npinfo->rx_np) || npinfo->rx_flags); -} - static inline int netpoll_receive_skb(struct sk_buff *skb) { if (!list_empty(&skb->dev->napi_list)) @@ -119,7 +125,7 @@ static inline void netpoll_poll_unlock(void *have) } } -static inline int netpoll_tx_running(struct net_device *dev) +static inline bool netpoll_tx_running(struct net_device *dev) { return irqs_disabled(); } @@ -127,11 +133,11 @@ static inline int netpoll_tx_running(struct net_device *dev) #else static inline bool netpoll_rx(struct sk_buff *skb) { - return 0; + return false; } -static inline int netpoll_rx_on(struct sk_buff *skb) +static inline bool netpoll_rx_on(struct sk_buff *skb) { - return 0; + return false; } static inline int netpoll_receive_skb(struct sk_buff *skb) { @@ -147,9 +153,9 @@ static inline void netpoll_poll_unlock(void *have) static inline void netpoll_netdev_init(struct net_device *dev) { } -static inline int netpoll_tx_running(struct net_device *dev) +static inline bool netpoll_tx_running(struct net_device *dev) { - return 0; + return false; } #endif diff --git a/trunk/include/linux/nfs_fs.h b/trunk/include/linux/nfs_fs.h index 1f8fc7f9bcd8..4b03f56e280e 100644 --- a/trunk/include/linux/nfs_fs.h +++ b/trunk/include/linux/nfs_fs.h @@ -265,11 +265,6 @@ static inline const struct nfs_rpc_ops *NFS_PROTO(const struct inode *inode) return NFS_SERVER(inode)->nfs_client->rpc_ops; } -static inline __be32 *NFS_COOKIEVERF(const struct inode *inode) -{ - return NFS_I(inode)->cookieverf; -} - static inline unsigned NFS_MINATTRTIMEO(const struct inode *inode) { struct nfs_server *nfss = NFS_SERVER(inode); diff --git a/trunk/include/linux/nfs_page.h b/trunk/include/linux/nfs_page.h index 880805774f9f..92ce5783b707 100644 --- a/trunk/include/linux/nfs_page.h +++ b/trunk/include/linux/nfs_page.h @@ -69,6 +69,7 @@ struct nfs_pageio_descriptor { const struct nfs_pgio_completion_ops *pg_completion_ops; struct pnfs_layout_segment *pg_lseg; struct nfs_direct_req *pg_dreq; + void *pg_layout_private; }; #define NFS_WBACK_BUSY(req) (test_bit(PG_BUSY,&(req)->wb_flags)) diff --git a/trunk/include/linux/nfs_xdr.h b/trunk/include/linux/nfs_xdr.h index 00485e084394..be9cf3c7e79e 100644 --- a/trunk/include/linux/nfs_xdr.h +++ b/trunk/include/linux/nfs_xdr.h @@ -652,7 +652,7 @@ struct nfs_getaclargs { }; /* getxattr ACL interface flags */ -#define NFS4_ACL_LEN_REQUEST 0x0001 /* zero length getxattr buffer */ +#define NFS4_ACL_TRUNC 0x0001 /* ACL was truncated */ struct nfs_getaclres { size_t acl_len; size_t acl_data_offset; @@ -1248,6 +1248,7 @@ struct nfs_pgio_header { void (*release) (struct nfs_pgio_header *hdr); const struct nfs_pgio_completion_ops *completion_ops; struct nfs_direct_req *dreq; + void *layout_private; spinlock_t lock; /* fields protected by lock */ int pnfs_error; diff --git a/trunk/include/linux/nvme.h b/trunk/include/linux/nvme.h index 9490a00529f4..c25cccaa555a 100644 --- a/trunk/include/linux/nvme.h +++ b/trunk/include/linux/nvme.h @@ -35,8 +35,10 @@ struct nvme_bar { __u64 acq; /* Admin CQ Base Address */ }; +#define NVME_CAP_MQES(cap) ((cap) & 0xffff) #define NVME_CAP_TIMEOUT(cap) (((cap) >> 24) & 0xff) #define NVME_CAP_STRIDE(cap) (((cap) >> 32) & 0xf) +#define NVME_CAP_MPSMIN(cap) (((cap) >> 48) & 0xf) enum { NVME_CC_ENABLE = 1 << 0, diff --git a/trunk/include/linux/of.h b/trunk/include/linux/of.h index 5919ee33f2b7..1b1163225f3b 100644 --- a/trunk/include/linux/of.h +++ b/trunk/include/linux/of.h @@ -190,10 +190,17 @@ extern struct device_node *of_get_parent(const struct device_node *node); extern struct device_node *of_get_next_parent(struct device_node *node); extern struct device_node *of_get_next_child(const struct device_node *node, struct device_node *prev); +extern struct device_node *of_get_next_available_child( + const struct device_node *node, struct device_node *prev); + #define for_each_child_of_node(parent, child) \ for (child = of_get_next_child(parent, NULL); child != NULL; \ child = of_get_next_child(parent, child)) +#define for_each_available_child_of_node(parent, child) \ + for (child = of_get_next_available_child(parent, NULL); child != NULL; \ + child = of_get_next_available_child(parent, child)) + static inline int of_get_child_count(const struct device_node *np) { struct device_node *child; diff --git a/trunk/include/linux/pci_ids.h b/trunk/include/linux/pci_ids.h index fc3526077348..6b4565c440c8 100644 --- a/trunk/include/linux/pci_ids.h +++ b/trunk/include/linux/pci_ids.h @@ -2149,7 +2149,7 @@ #define PCI_DEVICE_ID_TIGON3_5704S 0x16a8 #define PCI_DEVICE_ID_NX2_57800_VF 0x16a9 #define PCI_DEVICE_ID_NX2_5706S 0x16aa -#define PCI_DEVICE_ID_NX2_57840_MF 0x16ab +#define PCI_DEVICE_ID_NX2_57840_MF 0x16a4 #define PCI_DEVICE_ID_NX2_5708S 0x16ac #define PCI_DEVICE_ID_NX2_57840_VF 0x16ad #define PCI_DEVICE_ID_NX2_57810_MF 0x16ae diff --git a/trunk/include/linux/perf_event.h b/trunk/include/linux/perf_event.h index 76c5c8b724a7..599afc4bb67e 100644 --- a/trunk/include/linux/perf_event.h +++ b/trunk/include/linux/perf_event.h @@ -130,8 +130,10 @@ enum perf_event_sample_format { PERF_SAMPLE_STREAM_ID = 1U << 9, PERF_SAMPLE_RAW = 1U << 10, PERF_SAMPLE_BRANCH_STACK = 1U << 11, + PERF_SAMPLE_REGS_USER = 1U << 12, + PERF_SAMPLE_STACK_USER = 1U << 13, - PERF_SAMPLE_MAX = 1U << 12, /* non-ABI */ + PERF_SAMPLE_MAX = 1U << 14, /* non-ABI */ }; /* @@ -162,6 +164,15 @@ enum perf_branch_sample_type { PERF_SAMPLE_BRANCH_KERNEL|\ PERF_SAMPLE_BRANCH_HV) +/* + * Values to determine ABI of the registers dump. + */ +enum perf_sample_regs_abi { + PERF_SAMPLE_REGS_ABI_NONE = 0, + PERF_SAMPLE_REGS_ABI_32 = 1, + PERF_SAMPLE_REGS_ABI_64 = 2, +}; + /* * The format of the data returned by read() on a perf event fd, * as specified by attr.read_format: @@ -194,6 +205,8 @@ enum perf_event_read_format { #define PERF_ATTR_SIZE_VER0 64 /* sizeof first published struct */ #define PERF_ATTR_SIZE_VER1 72 /* add: config2 */ #define PERF_ATTR_SIZE_VER2 80 /* add: branch_sample_type */ +#define PERF_ATTR_SIZE_VER3 96 /* add: sample_regs_user */ + /* add: sample_stack_user */ /* * Hardware event_id to monitor via a performance monitoring event: @@ -255,7 +268,10 @@ struct perf_event_attr { exclude_host : 1, /* don't count in host */ exclude_guest : 1, /* don't count in guest */ - __reserved_1 : 43; + exclude_callchain_kernel : 1, /* exclude kernel callchains */ + exclude_callchain_user : 1, /* exclude user callchains */ + + __reserved_1 : 41; union { __u32 wakeup_events; /* wakeup every n events */ @@ -271,9 +287,25 @@ struct perf_event_attr { __u64 bp_len; __u64 config2; /* extension of config1 */ }; - __u64 branch_sample_type; /* enum branch_sample_type */ + __u64 branch_sample_type; /* enum perf_branch_sample_type */ + + /* + * Defines set of user regs to dump on samples. + * See asm/perf_regs.h for details. + */ + __u64 sample_regs_user; + + /* + * Defines size of the user stack to dump on samples. + */ + __u32 sample_stack_user; + + /* Align to u64. */ + __u32 __reserved_2; }; +#define perf_flags(attr) (*(&(attr)->read_format + 1)) + /* * Ioctls that can be done on a perf event fd: */ @@ -548,6 +580,13 @@ enum perf_event_type { * char data[size];}&& PERF_SAMPLE_RAW * * { u64 from, to, flags } lbr[nr];} && PERF_SAMPLE_BRANCH_STACK + * + * { u64 abi; # enum perf_sample_regs_abi + * u64 regs[weight(mask)]; } && PERF_SAMPLE_REGS_USER + * + * { u64 size; + * char data[size]; + * u64 dyn_size; } && PERF_SAMPLE_STACK_USER * }; */ PERF_RECORD_SAMPLE = 9, @@ -609,6 +648,7 @@ struct perf_guest_info_callbacks { #include #include #include +#include #include struct perf_callchain_entry { @@ -654,6 +694,11 @@ struct perf_branch_stack { struct perf_branch_entry entries[0]; }; +struct perf_regs_user { + __u64 abi; + struct pt_regs *regs; +}; + struct task_struct; /* @@ -926,7 +971,7 @@ struct perf_event { struct hw_perf_event hw; struct perf_event_context *ctx; - struct file *filp; + atomic_long_t refcount; /* * These accumulate total time (in nanoseconds) that children @@ -1133,6 +1178,8 @@ struct perf_sample_data { struct perf_callchain_entry *callchain; struct perf_raw_record *raw; struct perf_branch_stack *br_stack; + struct perf_regs_user regs_user; + u64 stack_user_size; }; static inline void perf_sample_data_init(struct perf_sample_data *data, @@ -1142,7 +1189,10 @@ static inline void perf_sample_data_init(struct perf_sample_data *data, data->addr = addr; data->raw = NULL; data->br_stack = NULL; - data->period = period; + data->period = period; + data->regs_user.abi = PERF_SAMPLE_REGS_ABI_NONE; + data->regs_user.regs = NULL; + data->stack_user_size = 0; } extern void perf_output_sample(struct perf_output_handle *handle, @@ -1272,7 +1322,8 @@ static inline bool perf_paranoid_kernel(void) extern void perf_event_init(void); extern void perf_tp_event(u64 addr, u64 count, void *record, int entry_size, struct pt_regs *regs, - struct hlist_head *head, int rctx); + struct hlist_head *head, int rctx, + struct task_struct *task); extern void perf_bp_event(struct perf_event *event, void *data); #ifndef perf_misc_flags @@ -1289,12 +1340,15 @@ static inline bool has_branch_stack(struct perf_event *event) extern int perf_output_begin(struct perf_output_handle *handle, struct perf_event *event, unsigned int size); extern void perf_output_end(struct perf_output_handle *handle); -extern void perf_output_copy(struct perf_output_handle *handle, +extern unsigned int perf_output_copy(struct perf_output_handle *handle, const void *buf, unsigned int len); +extern unsigned int perf_output_skip(struct perf_output_handle *handle, + unsigned int len); extern int perf_swevent_get_recursion_context(void); extern void perf_swevent_put_recursion_context(int rctx); extern void perf_event_enable(struct perf_event *event); extern void perf_event_disable(struct perf_event *event); +extern int __perf_event_disable(void *info); extern void perf_event_task_tick(void); #else static inline void @@ -1333,6 +1387,7 @@ static inline int perf_swevent_get_recursion_context(void) { return -1; } static inline void perf_swevent_put_recursion_context(int rctx) { } static inline void perf_event_enable(struct perf_event *event) { } static inline void perf_event_disable(struct perf_event *event) { } +static inline int __perf_event_disable(void *info) { return -1; } static inline void perf_event_task_tick(void) { } #endif diff --git a/trunk/include/linux/perf_regs.h b/trunk/include/linux/perf_regs.h new file mode 100644 index 000000000000..3c73d5fe18be --- /dev/null +++ b/trunk/include/linux/perf_regs.h @@ -0,0 +1,25 @@ +#ifndef _LINUX_PERF_REGS_H +#define _LINUX_PERF_REGS_H + +#ifdef CONFIG_HAVE_PERF_REGS +#include +u64 perf_reg_value(struct pt_regs *regs, int idx); +int perf_reg_validate(u64 mask); +u64 perf_reg_abi(struct task_struct *task); +#else +static inline u64 perf_reg_value(struct pt_regs *regs, int idx) +{ + return 0; +} + +static inline int perf_reg_validate(u64 mask) +{ + return mask ? -ENOSYS : 0; +} + +static inline u64 perf_reg_abi(struct task_struct *task) +{ + return PERF_SAMPLE_REGS_ABI_NONE; +} +#endif /* CONFIG_HAVE_PERF_REGS */ +#endif /* _LINUX_PERF_REGS_H */ diff --git a/trunk/include/linux/pinctrl/consumer.h b/trunk/include/linux/pinctrl/consumer.h index 6dd96fb45482..e9b7f4350844 100644 --- a/trunk/include/linux/pinctrl/consumer.h +++ b/trunk/include/linux/pinctrl/consumer.h @@ -20,6 +20,7 @@ /* This struct is private to the core and should be regarded as a cookie */ struct pinctrl; struct pinctrl_state; +struct device; #ifdef CONFIG_PINCTRL diff --git a/trunk/include/linux/rcupdate.h b/trunk/include/linux/rcupdate.h index 115ead2b5155..7c968e4f929e 100644 --- a/trunk/include/linux/rcupdate.h +++ b/trunk/include/linux/rcupdate.h @@ -191,6 +191,21 @@ extern void rcu_idle_enter(void); extern void rcu_idle_exit(void); extern void rcu_irq_enter(void); extern void rcu_irq_exit(void); + +#ifdef CONFIG_RCU_USER_QS +extern void rcu_user_enter(void); +extern void rcu_user_exit(void); +extern void rcu_user_enter_after_irq(void); +extern void rcu_user_exit_after_irq(void); +extern void rcu_user_hooks_switch(struct task_struct *prev, + struct task_struct *next); +#else +static inline void rcu_user_enter(void) { } +static inline void rcu_user_exit(void) { } +static inline void rcu_user_enter_after_irq(void) { } +static inline void rcu_user_exit_after_irq(void) { } +#endif /* CONFIG_RCU_USER_QS */ + extern void exit_rcu(void); /** @@ -210,14 +225,12 @@ extern void exit_rcu(void); * to nest RCU_NONIDLE() wrappers, but the nesting level is currently * quite limited. If deeper nesting is required, it will be necessary * to adjust DYNTICK_TASK_NESTING_VALUE accordingly. - * - * This macro may be used from process-level code only. */ #define RCU_NONIDLE(a) \ do { \ - rcu_idle_exit(); \ + rcu_irq_enter(); \ do { a; } while (0); \ - rcu_idle_enter(); \ + rcu_irq_exit(); \ } while (0) /* diff --git a/trunk/include/linux/regmap.h b/trunk/include/linux/regmap.h index 7f7e00df3adf..e3bcc3f4dcb8 100644 --- a/trunk/include/linux/regmap.h +++ b/trunk/include/linux/regmap.h @@ -285,6 +285,7 @@ struct regmap_irq { * @ack_base: Base ack address. If zero then the chip is clear on read. * @wake_base: Base address for wake enables. If zero unsupported. * @irq_reg_stride: Stride to use for chips where registers are not contiguous. + * @runtime_pm: Hold a runtime PM lock on the device when accessing it. * * @num_regs: Number of registers in each control bank. * @irqs: Descriptors for individual IRQs. Interrupt numbers are @@ -299,6 +300,8 @@ struct regmap_irq_chip { unsigned int ack_base; unsigned int wake_base; unsigned int irq_reg_stride; + unsigned int mask_invert; + bool runtime_pm; int num_regs; diff --git a/trunk/include/linux/regulator/consumer.h b/trunk/include/linux/regulator/consumer.h index da339fd8c755..c43cd3556b1f 100644 --- a/trunk/include/linux/regulator/consumer.h +++ b/trunk/include/linux/regulator/consumer.h @@ -177,6 +177,8 @@ int regulator_set_mode(struct regulator *regulator, unsigned int mode); unsigned int regulator_get_mode(struct regulator *regulator); int regulator_set_optimum_mode(struct regulator *regulator, int load_uA); +int regulator_allow_bypass(struct regulator *regulator, bool allow); + /* regulator notifier block */ int regulator_register_notifier(struct regulator *regulator, struct notifier_block *nb); @@ -328,6 +330,12 @@ static inline int regulator_set_optimum_mode(struct regulator *regulator, return REGULATOR_MODE_NORMAL; } +static inline int regulator_allow_bypass(struct regulator *regulator, + bool allow) +{ + return 0; +} + static inline int regulator_register_notifier(struct regulator *regulator, struct notifier_block *nb) { @@ -352,4 +360,11 @@ static inline void regulator_set_drvdata(struct regulator *regulator, #endif +static inline int regulator_set_voltage_tol(struct regulator *regulator, + int new_uV, int tol_uV) +{ + return regulator_set_voltage(regulator, + new_uV - tol_uV, new_uV + tol_uV); +} + #endif diff --git a/trunk/include/linux/regulator/driver.h b/trunk/include/linux/regulator/driver.h index bac4c871f3bd..7932a3bf21bd 100644 --- a/trunk/include/linux/regulator/driver.h +++ b/trunk/include/linux/regulator/driver.h @@ -32,6 +32,8 @@ enum regulator_status { REGULATOR_STATUS_NORMAL, REGULATOR_STATUS_IDLE, REGULATOR_STATUS_STANDBY, + /* The regulator is enabled but not regulating */ + REGULATOR_STATUS_BYPASS, /* in case that any other status doesn't apply */ REGULATOR_STATUS_UNDEFINED, }; @@ -58,6 +60,7 @@ enum regulator_status { * regulator_desc.n_voltages. Voltages may be reported in any order. * * @set_current_limit: Configure a limit for a current-limited regulator. + * The driver should select the current closest to max_uA. * @get_current_limit: Get the configured limit for a current-limited regulator. * * @set_mode: Set the configured operating mode for the regulator. @@ -67,6 +70,9 @@ enum regulator_status { * @get_optimum_mode: Get the most efficient operating mode for the regulator * when running with the specified parameters. * + * @set_bypass: Set the regulator in bypass mode. + * @get_bypass: Get the regulator bypass mode state. + * * @enable_time: Time taken for the regulator voltage output voltage to * stabilise after being enabled, in microseconds. * @set_ramp_delay: Set the ramp delay for the regulator. The driver should @@ -133,6 +139,10 @@ struct regulator_ops { unsigned int (*get_optimum_mode) (struct regulator_dev *, int input_uV, int output_uV, int load_uA); + /* control and report on bypass mode */ + int (*set_bypass)(struct regulator_dev *dev, bool enable); + int (*get_bypass)(struct regulator_dev *dev, bool *enable); + /* the operations below are for configuration of regulator state when * its parent PMIC enters a global STANDBY/HIBERNATE state */ @@ -205,6 +215,8 @@ struct regulator_desc { unsigned int vsel_mask; unsigned int enable_reg; unsigned int enable_mask; + unsigned int bypass_reg; + unsigned int bypass_mask; unsigned int enable_time; }; @@ -221,7 +233,8 @@ struct regulator_desc { * @driver_data: private regulator data * @of_node: OpenFirmware node to parse for device tree bindings (may be * NULL). - * @regmap: regmap to use for core regmap helpers + * @regmap: regmap to use for core regmap helpers if dev_get_regulator() is + * insufficient. * @ena_gpio: GPIO controlling regulator enable. * @ena_gpio_invert: Sense for GPIO enable control. * @ena_gpio_flags: Flags to use when calling gpio_request_one() @@ -253,6 +266,7 @@ struct regulator_dev { int exclusive; u32 use_count; u32 open_count; + u32 bypass_count; /* lists we belong to */ struct list_head list; /* list of all regulators */ @@ -310,6 +324,8 @@ int regulator_disable_regmap(struct regulator_dev *rdev); int regulator_set_voltage_time_sel(struct regulator_dev *rdev, unsigned int old_selector, unsigned int new_selector); +int regulator_set_bypass_regmap(struct regulator_dev *rdev, bool enable); +int regulator_get_bypass_regmap(struct regulator_dev *rdev, bool *enable); void *regulator_get_init_drvdata(struct regulator_init_data *reg_init_data); diff --git a/trunk/include/linux/regulator/fan53555.h b/trunk/include/linux/regulator/fan53555.h new file mode 100644 index 000000000000..5c45c85d52ca --- /dev/null +++ b/trunk/include/linux/regulator/fan53555.h @@ -0,0 +1,60 @@ +/* + * fan53555.h - Fairchild Regulator FAN53555 Driver + * + * Copyright (C) 2012 Marvell Technology Ltd. + * Yunfan Zhang + * + * This package is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + */ + +#ifndef __FAN53555_H__ + +/* VSEL ID */ +enum { + FAN53555_VSEL_ID_0 = 0, + FAN53555_VSEL_ID_1, +}; + +/* Transition slew rate limiting from a low to high voltage. + * ----------------------- + * Bin |Slew Rate(mV/uS) + * ------|---------------- + * 000 | 64.00 + * ------|---------------- + * 001 | 32.00 + * ------|---------------- + * 010 | 16.00 + * ------|---------------- + * 011 | 8.00 + * ------|---------------- + * 100 | 4.00 + * ------|---------------- + * 101 | 2.00 + * ------|---------------- + * 110 | 1.00 + * ------|---------------- + * 111 | 0.50 + * ----------------------- + */ +enum { + FAN53555_SLEW_RATE_64MV = 0, + FAN53555_SLEW_RATE_32MV, + FAN53555_SLEW_RATE_16MV, + FAN53555_SLEW_RATE_8MV, + FAN53555_SLEW_RATE_4MV, + FAN53555_SLEW_RATE_2MV, + FAN53555_SLEW_RATE_1MV, + FAN53555_SLEW_RATE_0_5MV, +}; + +struct fan53555_platform_data { + struct regulator_init_data *regulator; + unsigned int slew_rate; + /* Sleep VSEL ID */ + unsigned int sleep_vsel_id; +}; + +#endif /* __FAN53555_H__ */ diff --git a/trunk/include/linux/regulator/machine.h b/trunk/include/linux/regulator/machine.h index 40dd0a394cfa..36adbc82de6a 100644 --- a/trunk/include/linux/regulator/machine.h +++ b/trunk/include/linux/regulator/machine.h @@ -32,6 +32,7 @@ struct regulator; * board/machine. * STATUS: Regulator can be enabled and disabled. * DRMS: Dynamic Regulator Mode Switching is enabled for this regulator. + * BYPASS: Regulator can be put into bypass mode */ #define REGULATOR_CHANGE_VOLTAGE 0x1 @@ -39,6 +40,7 @@ struct regulator; #define REGULATOR_CHANGE_MODE 0x4 #define REGULATOR_CHANGE_STATUS 0x8 #define REGULATOR_CHANGE_DRMS 0x10 +#define REGULATOR_CHANGE_BYPASS 0x20 /** * struct regulator_state - regulator state during low power system states diff --git a/trunk/include/linux/sched.h b/trunk/include/linux/sched.h index c147e7024f11..765dffbb085e 100644 --- a/trunk/include/linux/sched.h +++ b/trunk/include/linux/sched.h @@ -273,11 +273,11 @@ extern void init_idle_bootup_task(struct task_struct *idle); extern int runqueue_is_locked(int cpu); #if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ) -extern void select_nohz_load_balancer(int stop_tick); +extern void nohz_balance_enter_idle(int cpu); extern void set_cpu_sd_state_idle(void); extern int get_nohz_timer_target(void); #else -static inline void select_nohz_load_balancer(int stop_tick) { } +static inline void nohz_balance_enter_idle(int cpu) { } static inline void set_cpu_sd_state_idle(void) { } #endif @@ -334,14 +334,6 @@ static inline void lockup_detector_init(void) } #endif -#if defined(CONFIG_LOCKUP_DETECTOR) && defined(CONFIG_SUSPEND) -void lockup_detector_bootcpu_resume(void); -#else -static inline void lockup_detector_bootcpu_resume(void) -{ -} -#endif - #ifdef CONFIG_DETECT_HUNG_TASK extern unsigned int sysctl_hung_task_panic; extern unsigned long sysctl_hung_task_check_count; @@ -454,6 +446,9 @@ extern int get_dumpable(struct mm_struct *mm); #define MMF_VM_HUGEPAGE 17 /* set when VM_HUGEPAGE is set on vma */ #define MMF_EXE_FILE_CHANGED 18 /* see prctl_set_mm_exe_file() */ +#define MMF_HAS_UPROBES 19 /* has uprobes */ +#define MMF_RECALC_UPROBES 20 /* MMF_HAS_UPROBES can be wrong */ + #define MMF_INIT_MASK (MMF_DUMPABLE_MASK | MMF_DUMP_FILTER_MASK) struct sighand_struct { @@ -686,11 +681,6 @@ struct signal_struct { * (notably. ptrace) */ }; -/* Context switch must be unlocked if interrupts are to be enabled */ -#ifdef __ARCH_WANT_INTERRUPTS_ON_CTXSW -# define __ARCH_WANT_UNLOCKED_CTXSW -#endif - /* * Bits in flags field of signal_struct. */ @@ -868,7 +858,6 @@ enum cpu_idle_type { #define SD_BALANCE_FORK 0x0008 /* Balance on fork, clone */ #define SD_BALANCE_WAKE 0x0010 /* Balance on wakeup */ #define SD_WAKE_AFFINE 0x0020 /* Wake task to waking CPU */ -#define SD_PREFER_LOCAL 0x0040 /* Prefer to keep tasks local to this domain */ #define SD_SHARE_CPUPOWER 0x0080 /* Domain members share cpu power */ #define SD_SHARE_PKG_RESOURCES 0x0200 /* Domain members share cpu pkg resources */ #define SD_SERIALIZE 0x0400 /* Only a single load balancing instance */ @@ -962,7 +951,6 @@ struct sched_domain { unsigned int smt_gain; int flags; /* See SD_* */ int level; - int idle_buddy; /* cpu assigned to select_idle_sibling() */ /* Runtime fields. */ unsigned long last_balance; /* init to jiffies. units in jiffies */ @@ -1894,6 +1882,14 @@ static inline void rcu_copy_process(struct task_struct *p) #endif +static inline void rcu_switch(struct task_struct *prev, + struct task_struct *next) +{ +#ifdef CONFIG_RCU_USER_QS + rcu_user_hooks_switch(prev, next); +#endif +} + static inline void tsk_restore_flags(struct task_struct *task, unsigned long orig_flags, unsigned long flags) { diff --git a/trunk/include/linux/screen_info.h b/trunk/include/linux/screen_info.h index 899fbb487c94..fb3c5a8fef3d 100644 --- a/trunk/include/linux/screen_info.h +++ b/trunk/include/linux/screen_info.h @@ -68,6 +68,8 @@ struct screen_info { #define VIDEO_FLAGS_NOCURSOR (1 << 0) /* The video mode has no cursor set */ +#define VIDEO_CAPABILITY_SKIP_QUIRKS (1 << 0) + #ifdef __KERNEL__ extern struct screen_info screen_info; diff --git a/trunk/include/linux/security.h b/trunk/include/linux/security.h index 4e5a73cdbbef..d143b8e01954 100644 --- a/trunk/include/linux/security.h +++ b/trunk/include/linux/security.h @@ -118,6 +118,7 @@ void reset_security_ops(void); extern unsigned long mmap_min_addr; extern unsigned long dac_mmap_min_addr; #else +#define mmap_min_addr 0UL #define dac_mmap_min_addr 0UL #endif @@ -1242,8 +1243,6 @@ static inline void security_free_mnt_opts(struct security_mnt_opts *opts) * Check that the @parent process has sufficient permission to trace the * current process before allowing the current process to present itself * to the @parent process for tracing. - * The parent process will still have to undergo the ptrace_access_check - * checks before it is allowed to trace this one. * @parent contains the task_struct structure for debugger process. * Return 0 if permission is granted. * @capget: diff --git a/trunk/include/linux/smpboot.h b/trunk/include/linux/smpboot.h new file mode 100644 index 000000000000..e0106d8581d3 --- /dev/null +++ b/trunk/include/linux/smpboot.h @@ -0,0 +1,43 @@ +#ifndef _LINUX_SMPBOOT_H +#define _LINUX_SMPBOOT_H + +#include + +struct task_struct; +/* Cookie handed to the thread_fn*/ +struct smpboot_thread_data; + +/** + * struct smp_hotplug_thread - CPU hotplug related thread descriptor + * @store: Pointer to per cpu storage for the task pointers + * @list: List head for core management + * @thread_should_run: Check whether the thread should run or not. Called with + * preemption disabled. + * @thread_fn: The associated thread function + * @setup: Optional setup function, called when the thread gets + * operational the first time + * @cleanup: Optional cleanup function, called when the thread + * should stop (module exit) + * @park: Optional park function, called when the thread is + * parked (cpu offline) + * @unpark: Optional unpark function, called when the thread is + * unparked (cpu online) + * @thread_comm: The base name of the thread + */ +struct smp_hotplug_thread { + struct task_struct __percpu **store; + struct list_head list; + int (*thread_should_run)(unsigned int cpu); + void (*thread_fn)(unsigned int cpu); + void (*setup)(unsigned int cpu); + void (*cleanup)(unsigned int cpu, bool online); + void (*park)(unsigned int cpu); + void (*unpark)(unsigned int cpu); + const char *thread_comm; +}; + +int smpboot_register_percpu_thread(struct smp_hotplug_thread *plug_thread); +void smpboot_unregister_percpu_thread(struct smp_hotplug_thread *plug_thread); +int smpboot_thread_schedule(void); + +#endif diff --git a/trunk/include/linux/string.h b/trunk/include/linux/string.h index ffe0442e18d2..b9178812d9df 100644 --- a/trunk/include/linux/string.h +++ b/trunk/include/linux/string.h @@ -144,8 +144,8 @@ static inline bool strstarts(const char *str, const char *prefix) { return strncmp(str, prefix, strlen(prefix)) == 0; } -#endif extern size_t memweight(const void *ptr, size_t bytes); +#endif /* __KERNEL__ */ #endif /* _LINUX_STRING_H_ */ diff --git a/trunk/include/linux/sunrpc/xprt.h b/trunk/include/linux/sunrpc/xprt.h index cff40aa7db62..bf8c49ff7530 100644 --- a/trunk/include/linux/sunrpc/xprt.h +++ b/trunk/include/linux/sunrpc/xprt.h @@ -114,6 +114,7 @@ struct rpc_xprt_ops { void (*set_buffer_size)(struct rpc_xprt *xprt, size_t sndsize, size_t rcvsize); int (*reserve_xprt)(struct rpc_xprt *xprt, struct rpc_task *task); void (*release_xprt)(struct rpc_xprt *xprt, struct rpc_task *task); + void (*alloc_slot)(struct rpc_xprt *xprt, struct rpc_task *task); void (*rpcbind)(struct rpc_task *task); void (*set_port)(struct rpc_xprt *xprt, unsigned short port); void (*connect)(struct rpc_task *task); @@ -281,6 +282,8 @@ void xprt_connect(struct rpc_task *task); void xprt_reserve(struct rpc_task *task); int xprt_reserve_xprt(struct rpc_xprt *xprt, struct rpc_task *task); int xprt_reserve_xprt_cong(struct rpc_xprt *xprt, struct rpc_task *task); +void xprt_alloc_slot(struct rpc_xprt *xprt, struct rpc_task *task); +void xprt_lock_and_alloc_slot(struct rpc_xprt *xprt, struct rpc_task *task); int xprt_prepare_transmit(struct rpc_task *task); void xprt_transmit(struct rpc_task *task); void xprt_end_transmit(struct rpc_task *task); diff --git a/trunk/include/linux/task_work.h b/trunk/include/linux/task_work.h index fb46b03b1852..ca5a1cf27dae 100644 --- a/trunk/include/linux/task_work.h +++ b/trunk/include/linux/task_work.h @@ -18,8 +18,7 @@ void task_work_run(void); static inline void exit_task_work(struct task_struct *task) { - if (unlikely(task->task_works)) - task_work_run(); + task_work_run(); } #endif /* _LINUX_TASK_WORK_H */ diff --git a/trunk/include/linux/time.h b/trunk/include/linux/time.h index c81c5e40fcb5..b51e664c83e7 100644 --- a/trunk/include/linux/time.h +++ b/trunk/include/linux/time.h @@ -107,11 +107,36 @@ static inline struct timespec timespec_sub(struct timespec lhs, return ts_delta; } +#define KTIME_MAX ((s64)~((u64)1 << 63)) +#if (BITS_PER_LONG == 64) +# define KTIME_SEC_MAX (KTIME_MAX / NSEC_PER_SEC) +#else +# define KTIME_SEC_MAX LONG_MAX +#endif + /* * Returns true if the timespec is norm, false if denorm: */ -#define timespec_valid(ts) \ - (((ts)->tv_sec >= 0) && (((unsigned long) (ts)->tv_nsec) < NSEC_PER_SEC)) +static inline bool timespec_valid(const struct timespec *ts) +{ + /* Dates before 1970 are bogus */ + if (ts->tv_sec < 0) + return false; + /* Can't have more nanoseconds then a second */ + if ((unsigned long)ts->tv_nsec >= NSEC_PER_SEC) + return false; + return true; +} + +static inline bool timespec_valid_strict(const struct timespec *ts) +{ + if (!timespec_valid(ts)) + return false; + /* Disallow values that could overflow ktime_t */ + if ((unsigned long long)ts->tv_sec >= KTIME_SEC_MAX) + return false; + return true; +} extern void read_persistent_clock(struct timespec *ts); extern void read_boot_clock(struct timespec *ts); diff --git a/trunk/include/linux/timer.h b/trunk/include/linux/timer.h index 6abd9138beda..8c5a197e1587 100644 --- a/trunk/include/linux/timer.h +++ b/trunk/include/linux/timer.h @@ -49,147 +49,112 @@ extern struct tvec_base boot_tvec_bases; #endif /* - * Note that all tvec_bases are 2 byte aligned and lower bit of - * base in timer_list is guaranteed to be zero. Use the LSB to - * indicate whether the timer is deferrable. + * Note that all tvec_bases are at least 4 byte aligned and lower two bits + * of base in timer_list is guaranteed to be zero. Use them for flags. * * A deferrable timer will work normally when the system is busy, but * will not cause a CPU to come out of idle just to service it; instead, * the timer will be serviced when the CPU eventually wakes up with a * subsequent non-deferrable timer. + * + * An irqsafe timer is executed with IRQ disabled and it's safe to wait for + * the completion of the running instance from IRQ handlers, for example, + * by calling del_timer_sync(). + * + * Note: The irq disabled callback execution is a special case for + * workqueue locking issues. It's not meant for executing random crap + * with interrupts disabled. Abuse is monitored! */ -#define TBASE_DEFERRABLE_FLAG (0x1) +#define TIMER_DEFERRABLE 0x1LU +#define TIMER_IRQSAFE 0x2LU -#define TIMER_INITIALIZER(_function, _expires, _data) { \ +#define TIMER_FLAG_MASK 0x3LU + +#define __TIMER_INITIALIZER(_function, _expires, _data, _flags) { \ .entry = { .prev = TIMER_ENTRY_STATIC }, \ .function = (_function), \ .expires = (_expires), \ .data = (_data), \ - .base = &boot_tvec_bases, \ + .base = (void *)((unsigned long)&boot_tvec_bases + (_flags)), \ .slack = -1, \ __TIMER_LOCKDEP_MAP_INITIALIZER( \ __FILE__ ":" __stringify(__LINE__)) \ } -#define TBASE_MAKE_DEFERRED(ptr) ((struct tvec_base *) \ - ((unsigned char *)(ptr) + TBASE_DEFERRABLE_FLAG)) +#define TIMER_INITIALIZER(_function, _expires, _data) \ + __TIMER_INITIALIZER((_function), (_expires), (_data), 0) -#define TIMER_DEFERRED_INITIALIZER(_function, _expires, _data) {\ - .entry = { .prev = TIMER_ENTRY_STATIC }, \ - .function = (_function), \ - .expires = (_expires), \ - .data = (_data), \ - .base = TBASE_MAKE_DEFERRED(&boot_tvec_bases), \ - __TIMER_LOCKDEP_MAP_INITIALIZER( \ - __FILE__ ":" __stringify(__LINE__)) \ - } +#define TIMER_DEFERRED_INITIALIZER(_function, _expires, _data) \ + __TIMER_INITIALIZER((_function), (_expires), (_data), TIMER_DEFERRABLE) #define DEFINE_TIMER(_name, _function, _expires, _data) \ struct timer_list _name = \ TIMER_INITIALIZER(_function, _expires, _data) -void init_timer_key(struct timer_list *timer, - const char *name, - struct lock_class_key *key); -void init_timer_deferrable_key(struct timer_list *timer, - const char *name, - struct lock_class_key *key); +void init_timer_key(struct timer_list *timer, unsigned int flags, + const char *name, struct lock_class_key *key); + +#ifdef CONFIG_DEBUG_OBJECTS_TIMERS +extern void init_timer_on_stack_key(struct timer_list *timer, + unsigned int flags, const char *name, + struct lock_class_key *key); +extern void destroy_timer_on_stack(struct timer_list *timer); +#else +static inline void destroy_timer_on_stack(struct timer_list *timer) { } +static inline void init_timer_on_stack_key(struct timer_list *timer, + unsigned int flags, const char *name, + struct lock_class_key *key) +{ + init_timer_key(timer, flags, name, key); +} +#endif #ifdef CONFIG_LOCKDEP -#define init_timer(timer) \ +#define __init_timer(_timer, _flags) \ do { \ static struct lock_class_key __key; \ - init_timer_key((timer), #timer, &__key); \ + init_timer_key((_timer), (_flags), #_timer, &__key); \ } while (0) -#define init_timer_deferrable(timer) \ +#define __init_timer_on_stack(_timer, _flags) \ do { \ static struct lock_class_key __key; \ - init_timer_deferrable_key((timer), #timer, &__key); \ + init_timer_on_stack_key((_timer), (_flags), #_timer, &__key); \ } while (0) +#else +#define __init_timer(_timer, _flags) \ + init_timer_key((_timer), (_flags), NULL, NULL) +#define __init_timer_on_stack(_timer, _flags) \ + init_timer_on_stack_key((_timer), (_flags), NULL, NULL) +#endif +#define init_timer(timer) \ + __init_timer((timer), 0) +#define init_timer_deferrable(timer) \ + __init_timer((timer), TIMER_DEFERRABLE) #define init_timer_on_stack(timer) \ + __init_timer_on_stack((timer), 0) + +#define __setup_timer(_timer, _fn, _data, _flags) \ do { \ - static struct lock_class_key __key; \ - init_timer_on_stack_key((timer), #timer, &__key); \ + __init_timer((_timer), (_flags)); \ + (_timer)->function = (_fn); \ + (_timer)->data = (_data); \ } while (0) -#define setup_timer(timer, fn, data) \ +#define __setup_timer_on_stack(_timer, _fn, _data, _flags) \ do { \ - static struct lock_class_key __key; \ - setup_timer_key((timer), #timer, &__key, (fn), (data));\ + __init_timer_on_stack((_timer), (_flags)); \ + (_timer)->function = (_fn); \ + (_timer)->data = (_data); \ } while (0) +#define setup_timer(timer, fn, data) \ + __setup_timer((timer), (fn), (data), 0) #define setup_timer_on_stack(timer, fn, data) \ - do { \ - static struct lock_class_key __key; \ - setup_timer_on_stack_key((timer), #timer, &__key, \ - (fn), (data)); \ - } while (0) + __setup_timer_on_stack((timer), (fn), (data), 0) #define setup_deferrable_timer_on_stack(timer, fn, data) \ - do { \ - static struct lock_class_key __key; \ - setup_deferrable_timer_on_stack_key((timer), #timer, \ - &__key, (fn), \ - (data)); \ - } while (0) -#else -#define init_timer(timer)\ - init_timer_key((timer), NULL, NULL) -#define init_timer_deferrable(timer)\ - init_timer_deferrable_key((timer), NULL, NULL) -#define init_timer_on_stack(timer)\ - init_timer_on_stack_key((timer), NULL, NULL) -#define setup_timer(timer, fn, data)\ - setup_timer_key((timer), NULL, NULL, (fn), (data)) -#define setup_timer_on_stack(timer, fn, data)\ - setup_timer_on_stack_key((timer), NULL, NULL, (fn), (data)) -#define setup_deferrable_timer_on_stack(timer, fn, data)\ - setup_deferrable_timer_on_stack_key((timer), NULL, NULL, (fn), (data)) -#endif - -#ifdef CONFIG_DEBUG_OBJECTS_TIMERS -extern void init_timer_on_stack_key(struct timer_list *timer, - const char *name, - struct lock_class_key *key); -extern void destroy_timer_on_stack(struct timer_list *timer); -#else -static inline void destroy_timer_on_stack(struct timer_list *timer) { } -static inline void init_timer_on_stack_key(struct timer_list *timer, - const char *name, - struct lock_class_key *key) -{ - init_timer_key(timer, name, key); -} -#endif - -static inline void setup_timer_key(struct timer_list * timer, - const char *name, - struct lock_class_key *key, - void (*function)(unsigned long), - unsigned long data) -{ - timer->function = function; - timer->data = data; - init_timer_key(timer, name, key); -} - -static inline void setup_timer_on_stack_key(struct timer_list *timer, - const char *name, - struct lock_class_key *key, - void (*function)(unsigned long), - unsigned long data) -{ - timer->function = function; - timer->data = data; - init_timer_on_stack_key(timer, name, key); -} - -extern void setup_deferrable_timer_on_stack_key(struct timer_list *timer, - const char *name, - struct lock_class_key *key, - void (*function)(unsigned long), - unsigned long data); + __setup_timer_on_stack((timer), (fn), (data), TIMER_DEFERRABLE) /** * timer_pending - is a timer pending? diff --git a/trunk/include/linux/timex.h b/trunk/include/linux/timex.h index 99bc88b1fc02..7c5ceb20e03a 100644 --- a/trunk/include/linux/timex.h +++ b/trunk/include/linux/timex.h @@ -232,7 +232,7 @@ struct timex { * estimated error = NTP dispersion. */ extern unsigned long tick_usec; /* USER_HZ period (usec) */ -extern unsigned long tick_nsec; /* ACTHZ period (nsec) */ +extern unsigned long tick_nsec; /* SHIFTED_HZ period (nsec) */ extern void ntp_init(void); extern void ntp_clear(void); diff --git a/trunk/include/linux/topology.h b/trunk/include/linux/topology.h index e91cd43394df..d3cf0d6e7712 100644 --- a/trunk/include/linux/topology.h +++ b/trunk/include/linux/topology.h @@ -129,7 +129,6 @@ int arch_update_cpu_topology(void); | 1*SD_BALANCE_FORK \ | 0*SD_BALANCE_WAKE \ | 1*SD_WAKE_AFFINE \ - | 0*SD_PREFER_LOCAL \ | 0*SD_SHARE_CPUPOWER \ | 1*SD_SHARE_PKG_RESOURCES \ | 0*SD_SERIALIZE \ @@ -160,10 +159,10 @@ int arch_update_cpu_topology(void); | 1*SD_BALANCE_FORK \ | 0*SD_BALANCE_WAKE \ | 1*SD_WAKE_AFFINE \ - | 0*SD_PREFER_LOCAL \ | 0*SD_SHARE_CPUPOWER \ | 0*SD_SHARE_PKG_RESOURCES \ | 0*SD_SERIALIZE \ + | 1*SD_PREFER_SIBLING \ , \ .last_balance = jiffies, \ .balance_interval = 1, \ diff --git a/trunk/include/linux/tracepoint.h b/trunk/include/linux/tracepoint.h index 802de56c41e8..2f322c38bd4d 100644 --- a/trunk/include/linux/tracepoint.h +++ b/trunk/include/linux/tracepoint.h @@ -136,6 +136,22 @@ static inline void tracepoint_synchronize_unregister(void) postrcu; \ } while (0) +#ifndef MODULE +#define __DECLARE_TRACE_RCU(name, proto, args, cond, data_proto, data_args) \ + static inline void trace_##name##_rcuidle(proto) \ + { \ + if (static_key_false(&__tracepoint_##name.key)) \ + __DO_TRACE(&__tracepoint_##name, \ + TP_PROTO(data_proto), \ + TP_ARGS(data_args), \ + TP_CONDITION(cond), \ + rcu_idle_exit(), \ + rcu_idle_enter()); \ + } +#else +#define __DECLARE_TRACE_RCU(name, proto, args, cond, data_proto, data_args) +#endif + /* * Make sure the alignment of the structure in the __tracepoints section will * not add unwanted padding between the beginning of the section and the @@ -151,16 +167,8 @@ static inline void tracepoint_synchronize_unregister(void) TP_ARGS(data_args), \ TP_CONDITION(cond),,); \ } \ - static inline void trace_##name##_rcuidle(proto) \ - { \ - if (static_key_false(&__tracepoint_##name.key)) \ - __DO_TRACE(&__tracepoint_##name, \ - TP_PROTO(data_proto), \ - TP_ARGS(data_args), \ - TP_CONDITION(cond), \ - rcu_idle_exit(), \ - rcu_idle_enter()); \ - } \ + __DECLARE_TRACE_RCU(name, PARAMS(proto), PARAMS(args), \ + PARAMS(cond), PARAMS(data_proto), PARAMS(data_args)) \ static inline int \ register_trace_##name(void (*probe)(data_proto), void *data) \ { \ diff --git a/trunk/include/linux/uprobes.h b/trunk/include/linux/uprobes.h index efe4b3308c74..e6f0331e3d45 100644 --- a/trunk/include/linux/uprobes.h +++ b/trunk/include/linux/uprobes.h @@ -99,25 +99,27 @@ struct xol_area { struct uprobes_state { struct xol_area *xol_area; - atomic_t count; }; + extern int __weak set_swbp(struct arch_uprobe *aup, struct mm_struct *mm, unsigned long vaddr); -extern int __weak set_orig_insn(struct arch_uprobe *aup, struct mm_struct *mm, unsigned long vaddr, bool verify); +extern int __weak set_orig_insn(struct arch_uprobe *aup, struct mm_struct *mm, unsigned long vaddr); extern bool __weak is_swbp_insn(uprobe_opcode_t *insn); extern int uprobe_register(struct inode *inode, loff_t offset, struct uprobe_consumer *uc); extern void uprobe_unregister(struct inode *inode, loff_t offset, struct uprobe_consumer *uc); extern int uprobe_mmap(struct vm_area_struct *vma); extern void uprobe_munmap(struct vm_area_struct *vma, unsigned long start, unsigned long end); +extern void uprobe_dup_mmap(struct mm_struct *oldmm, struct mm_struct *newmm); extern void uprobe_free_utask(struct task_struct *t); extern void uprobe_copy_process(struct task_struct *t); extern unsigned long __weak uprobe_get_swbp_addr(struct pt_regs *regs); +extern void __weak arch_uprobe_enable_step(struct arch_uprobe *arch); +extern void __weak arch_uprobe_disable_step(struct arch_uprobe *arch); extern int uprobe_post_sstep_notifier(struct pt_regs *regs); extern int uprobe_pre_sstep_notifier(struct pt_regs *regs); extern void uprobe_notify_resume(struct pt_regs *regs); extern bool uprobe_deny_signal(void); extern bool __weak arch_uprobe_skip_sstep(struct arch_uprobe *aup, struct pt_regs *regs); extern void uprobe_clear_state(struct mm_struct *mm); -extern void uprobe_reset_state(struct mm_struct *mm); #else /* !CONFIG_UPROBES */ struct uprobes_state { }; @@ -138,6 +140,10 @@ static inline void uprobe_munmap(struct vm_area_struct *vma, unsigned long start, unsigned long end) { } +static inline void +uprobe_dup_mmap(struct mm_struct *oldmm, struct mm_struct *newmm) +{ +} static inline void uprobe_notify_resume(struct pt_regs *regs) { } @@ -158,8 +164,5 @@ static inline void uprobe_copy_process(struct task_struct *t) static inline void uprobe_clear_state(struct mm_struct *mm) { } -static inline void uprobe_reset_state(struct mm_struct *mm) -{ -} #endif /* !CONFIG_UPROBES */ #endif /* _LINUX_UPROBES_H */ diff --git a/trunk/include/linux/writeback.h b/trunk/include/linux/writeback.h index c66fe3332d83..50c3e8fa06a8 100644 --- a/trunk/include/linux/writeback.h +++ b/trunk/include/linux/writeback.h @@ -104,7 +104,6 @@ static inline void wait_on_inode(struct inode *inode) wait_on_bit(&inode->i_state, __I_NEW, inode_wait, TASK_UNINTERRUPTIBLE); } - /* * mm/page-writeback.c */ diff --git a/trunk/include/linux/xfrm.h b/trunk/include/linux/xfrm.h index 22e61fdf75a2..28e493b5b94c 100644 --- a/trunk/include/linux/xfrm.h +++ b/trunk/include/linux/xfrm.h @@ -84,6 +84,8 @@ struct xfrm_replay_state { __u32 bitmap; }; +#define XFRMA_REPLAY_ESN_MAX 4096 + struct xfrm_replay_state_esn { unsigned int bmp_len; __u32 oseq; diff --git a/trunk/include/net/bluetooth/smp.h b/trunk/include/net/bluetooth/smp.h index ca356a734920..8b27927b2a55 100644 --- a/trunk/include/net/bluetooth/smp.h +++ b/trunk/include/net/bluetooth/smp.h @@ -136,7 +136,7 @@ struct smp_chan { }; /* SMP Commands */ -int smp_conn_security(struct l2cap_conn *conn, __u8 sec_level); +int smp_conn_security(struct hci_conn *hcon, __u8 sec_level); int smp_sig_channel(struct l2cap_conn *conn, struct sk_buff *skb); int smp_distribute_keys(struct l2cap_conn *conn, __u8 force); int smp_user_confirm_reply(struct hci_conn *conn, u16 mgmt_op, __le32 passkey); diff --git a/trunk/include/net/cfg80211.h b/trunk/include/net/cfg80211.h index 493fa0c79005..3d254e10ff30 100644 --- a/trunk/include/net/cfg80211.h +++ b/trunk/include/net/cfg80211.h @@ -96,6 +96,7 @@ enum ieee80211_band { * is not permitted. * @IEEE80211_CHAN_NO_HT40MINUS: extension channel below this channel * is not permitted. + * @IEEE80211_CHAN_NO_OFDM: OFDM is not allowed on this channel. */ enum ieee80211_channel_flags { IEEE80211_CHAN_DISABLED = 1<<0, @@ -104,6 +105,7 @@ enum ieee80211_channel_flags { IEEE80211_CHAN_RADAR = 1<<3, IEEE80211_CHAN_NO_HT40PLUS = 1<<4, IEEE80211_CHAN_NO_HT40MINUS = 1<<5, + IEEE80211_CHAN_NO_OFDM = 1<<6, }; #define IEEE80211_CHAN_NO_HT40 \ diff --git a/trunk/include/net/codel.h b/trunk/include/net/codel.h index 550debfc2403..389cf621161d 100644 --- a/trunk/include/net/codel.h +++ b/trunk/include/net/codel.h @@ -305,6 +305,8 @@ static struct sk_buff *codel_dequeue(struct Qdisc *sch, } } } else if (drop) { + u32 delta; + if (params->ecn && INET_ECN_set_ce(skb)) { stats->ecn_mark++; } else { @@ -320,9 +322,11 @@ static struct sk_buff *codel_dequeue(struct Qdisc *sch, * assume that the drop rate that controlled the queue on the * last cycle is a good starting point to control it now. */ - if (codel_time_before(now - vars->drop_next, + delta = vars->count - vars->lastcount; + if (delta > 1 && + codel_time_before(now - vars->drop_next, 16 * params->interval)) { - vars->count = (vars->count - vars->lastcount) | 1; + vars->count = delta; /* we dont care if rec_inv_sqrt approximation * is not very precise : * Next Newton steps will correct it quadratically. diff --git a/trunk/include/net/dst.h b/trunk/include/net/dst.h index baf597890064..621e3513ef5e 100644 --- a/trunk/include/net/dst.h +++ b/trunk/include/net/dst.h @@ -110,7 +110,7 @@ struct dst_entry { }; extern u32 *dst_cow_metrics_generic(struct dst_entry *dst, unsigned long old); -extern const u32 dst_default_metrics[RTAX_MAX]; +extern const u32 dst_default_metrics[]; #define DST_METRICS_READ_ONLY 0x1UL #define __DST_METRICS_PTR(Y) \ diff --git a/trunk/include/net/inet_connection_sock.h b/trunk/include/net/inet_connection_sock.h index 5ee66f517b4f..ba1d3615acbb 100644 --- a/trunk/include/net/inet_connection_sock.h +++ b/trunk/include/net/inet_connection_sock.h @@ -39,6 +39,7 @@ struct inet_connection_sock_af_ops { int (*queue_xmit)(struct sk_buff *skb, struct flowi *fl); void (*send_check)(struct sock *sk, struct sk_buff *skb); int (*rebuild_header)(struct sock *sk); + void (*sk_rx_dst_set)(struct sock *sk, const struct sk_buff *skb); int (*conn_request)(struct sock *sk, struct sk_buff *skb); struct sock *(*syn_recv_sock)(struct sock *sk, struct sk_buff *skb, struct request_sock *req, diff --git a/trunk/include/net/inet_sock.h b/trunk/include/net/inet_sock.h index 83b567fe1941..613cfa401672 100644 --- a/trunk/include/net/inet_sock.h +++ b/trunk/include/net/inet_sock.h @@ -249,13 +249,4 @@ static inline __u8 inet_sk_flowi_flags(const struct sock *sk) return flags; } -static inline void inet_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb) -{ - struct dst_entry *dst = skb_dst(skb); - - dst_hold(dst); - sk->sk_rx_dst = dst; - inet_sk(sk)->rx_dst_ifindex = skb->skb_iif; -} - #endif /* _INET_SOCK_H */ diff --git a/trunk/include/net/ip.h b/trunk/include/net/ip.h index bd5e444a19ce..5a5d84d3d2c6 100644 --- a/trunk/include/net/ip.h +++ b/trunk/include/net/ip.h @@ -120,7 +120,7 @@ extern struct sk_buff *__ip_make_skb(struct sock *sk, struct flowi4 *fl4, struct sk_buff_head *queue, struct inet_cork *cork); -extern int ip_send_skb(struct sk_buff *skb); +extern int ip_send_skb(struct net *net, struct sk_buff *skb); extern int ip_push_pending_frames(struct sock *sk, struct flowi4 *fl4); extern void ip_flush_pending_frames(struct sock *sk); extern struct sk_buff *ip_make_skb(struct sock *sk, diff --git a/trunk/include/net/ip6_fib.h b/trunk/include/net/ip6_fib.h index 0fedbd8d747a..9fc7114159e8 100644 --- a/trunk/include/net/ip6_fib.h +++ b/trunk/include/net/ip6_fib.h @@ -111,9 +111,8 @@ struct rt6_info { struct inet6_dev *rt6i_idev; unsigned long _rt6i_peer; -#ifdef CONFIG_XFRM - u32 rt6i_flow_cache_genid; -#endif + u32 rt6i_genid; + /* more non-fragment space at head required */ unsigned short rt6i_nfheader_len; diff --git a/trunk/include/net/llc.h b/trunk/include/net/llc.h index 226c846cab08..f2d0fc570527 100644 --- a/trunk/include/net/llc.h +++ b/trunk/include/net/llc.h @@ -133,7 +133,7 @@ extern int llc_build_and_send_ui_pkt(struct llc_sap *sap, struct sk_buff *skb, extern void llc_sap_handler(struct llc_sap *sap, struct sk_buff *skb); extern void llc_conn_handler(struct llc_sap *sap, struct sk_buff *skb); -extern int llc_station_init(void); +extern void llc_station_init(void); extern void llc_station_exit(void); #ifdef CONFIG_PROC_FS diff --git a/trunk/include/net/net_namespace.h b/trunk/include/net/net_namespace.h index ae1cd6c9ba52..fd87963a0ea5 100644 --- a/trunk/include/net/net_namespace.h +++ b/trunk/include/net/net_namespace.h @@ -102,6 +102,7 @@ struct net { #endif struct netns_ipvs *ipvs; struct sock *diag_nlsk; + atomic_t rt_genid; }; @@ -300,5 +301,14 @@ static inline void unregister_net_sysctl_table(struct ctl_table_header *header) } #endif +static inline int rt_genid(struct net *net) +{ + return atomic_read(&net->rt_genid); +} + +static inline void rt_genid_bump(struct net *net) +{ + atomic_inc(&net->rt_genid); +} #endif /* __NET_NET_NAMESPACE_H */ diff --git a/trunk/include/net/netfilter/nf_conntrack_ecache.h b/trunk/include/net/netfilter/nf_conntrack_ecache.h index e1ce1048fe5f..4a045cda9c60 100644 --- a/trunk/include/net/netfilter/nf_conntrack_ecache.h +++ b/trunk/include/net/netfilter/nf_conntrack_ecache.h @@ -18,6 +18,7 @@ struct nf_conntrack_ecache { u16 ctmask; /* bitmask of ct events to be delivered */ u16 expmask; /* bitmask of expect events to be delivered */ u32 pid; /* netlink pid of destroyer */ + struct timer_list timeout; }; static inline struct nf_conntrack_ecache * diff --git a/trunk/include/net/netns/ipv4.h b/trunk/include/net/netns/ipv4.h index 1474dd65c66f..eb24dbccd81e 100644 --- a/trunk/include/net/netns/ipv4.h +++ b/trunk/include/net/netns/ipv4.h @@ -65,7 +65,6 @@ struct netns_ipv4 { unsigned int sysctl_ping_group_range[2]; long sysctl_tcp_mem[3]; - atomic_t rt_genid; atomic_t dev_addr_genid; #ifdef CONFIG_IP_MROUTE diff --git a/trunk/include/net/route.h b/trunk/include/net/route.h index 776a27f1ab78..da22243d2760 100644 --- a/trunk/include/net/route.h +++ b/trunk/include/net/route.h @@ -108,7 +108,7 @@ extern struct ip_rt_acct __percpu *ip_rt_acct; struct in_device; extern int ip_rt_init(void); -extern void rt_cache_flush(struct net *net, int how); +extern void rt_cache_flush(struct net *net); extern void rt_flush_dev(struct net_device *dev); extern struct rtable *__ip_route_output_key(struct net *, struct flowi4 *flp); extern struct rtable *ip_route_output_flow(struct net *, struct flowi4 *flp, diff --git a/trunk/include/net/scm.h b/trunk/include/net/scm.h index 079d7887dac1..7dc0854f0b38 100644 --- a/trunk/include/net/scm.h +++ b/trunk/include/net/scm.h @@ -70,9 +70,11 @@ static __inline__ void scm_destroy(struct scm_cookie *scm) } static __inline__ int scm_send(struct socket *sock, struct msghdr *msg, - struct scm_cookie *scm) + struct scm_cookie *scm, bool forcecreds) { memset(scm, 0, sizeof(*scm)); + if (forcecreds) + scm_set_cred(scm, task_tgid(current), current_cred()); unix_get_peersec_dgram(sock, scm); if (msg->msg_controllen <= 0) return 0; diff --git a/trunk/include/net/sock.h b/trunk/include/net/sock.h index b3730239bf18..adb7da20b5a1 100644 --- a/trunk/include/net/sock.h +++ b/trunk/include/net/sock.h @@ -218,6 +218,7 @@ struct cg_proto; * @sk_route_nocaps: forbidden route capabilities (e.g NETIF_F_GSO_MASK) * @sk_gso_type: GSO type (e.g. %SKB_GSO_TCPV4) * @sk_gso_max_size: Maximum GSO segment size to build + * @sk_gso_max_segs: Maximum number of GSO segments * @sk_lingertime: %SO_LINGER l_linger setting * @sk_backlog: always used with the per-socket spinlock held * @sk_callback_lock: used with the callbacks in the end of this struct @@ -338,6 +339,7 @@ struct sock { netdev_features_t sk_route_nocaps; int sk_gso_type; unsigned int sk_gso_max_size; + u16 sk_gso_max_segs; int sk_rcvlowat; unsigned long sk_lingertime; struct sk_buff_head sk_error_queue; @@ -1330,7 +1332,7 @@ static inline bool sk_wmem_schedule(struct sock *sk, int size) } static inline bool -sk_rmem_schedule(struct sock *sk, struct sk_buff *skb, unsigned int size) +sk_rmem_schedule(struct sock *sk, struct sk_buff *skb, int size) { if (!sk_has_account(sk)) return true; diff --git a/trunk/include/net/tcp.h b/trunk/include/net/tcp.h index e19124b84cd2..1f000ffe7075 100644 --- a/trunk/include/net/tcp.h +++ b/trunk/include/net/tcp.h @@ -464,6 +464,7 @@ extern int tcp_disconnect(struct sock *sk, int flags); void tcp_connect_init(struct sock *sk); void tcp_finish_connect(struct sock *sk, struct sk_buff *skb); int tcp_send_rcvq(struct sock *sk, struct msghdr *msg, size_t size); +void inet_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb); /* From syncookies.c */ extern __u32 syncookie_secret[2][16-4+SHA_DIGEST_WORDS]; diff --git a/trunk/include/net/xfrm.h b/trunk/include/net/xfrm.h index d9509eb29b80..639dd1316d37 100644 --- a/trunk/include/net/xfrm.h +++ b/trunk/include/net/xfrm.h @@ -213,6 +213,9 @@ struct xfrm_state { struct xfrm_lifetime_cur curlft; struct tasklet_hrtimer mtimer; + /* used to fix curlft->add_time when changing date */ + long saved_tmo; + /* Last used time */ unsigned long lastused; @@ -238,6 +241,7 @@ static inline struct net *xs_net(struct xfrm_state *x) /* xflags - make enum if more show up */ #define XFRM_TIME_DEFER 1 +#define XFRM_SOFT_EXPIRE 2 enum { XFRM_STATE_VOID, @@ -269,6 +273,9 @@ struct xfrm_replay { int (*check)(struct xfrm_state *x, struct sk_buff *skb, __be32 net_seq); + int (*recheck)(struct xfrm_state *x, + struct sk_buff *skb, + __be32 net_seq); void (*notify)(struct xfrm_state *x, int event); int (*overflow)(struct xfrm_state *x, struct sk_buff *skb); }; @@ -288,6 +295,8 @@ struct xfrm_policy_afinfo { struct flowi *fl, int reverse); int (*get_tos)(const struct flowi *fl); + void (*init_dst)(struct net *net, + struct xfrm_dst *dst); int (*init_path)(struct xfrm_dst *path, struct dst_entry *dst, int nfheader_len); diff --git a/trunk/include/sound/pcm.h b/trunk/include/sound/pcm.h index c75c0d1a85e2..cdca2ab1e711 100644 --- a/trunk/include/sound/pcm.h +++ b/trunk/include/sound/pcm.h @@ -1075,7 +1075,8 @@ static inline void snd_pcm_limit_isa_dma_size(int dma, size_t *max) const char *snd_pcm_format_name(snd_pcm_format_t format); /** - * Get a string naming the direction of a stream + * snd_pcm_stream_str - Get a string naming the direction of a stream + * @substream: the pcm substream instance */ static inline const char *snd_pcm_stream_str(struct snd_pcm_substream *substream) { diff --git a/trunk/include/target/target_core_backend.h b/trunk/include/target/target_core_backend.h index f1405d335a96..941c84bf1065 100644 --- a/trunk/include/target/target_core_backend.h +++ b/trunk/include/target/target_core_backend.h @@ -23,7 +23,9 @@ struct se_subsystem_api { struct se_device *(*create_virtdevice)(struct se_hba *, struct se_subsystem_dev *, void *); void (*free_device)(void *); - int (*transport_complete)(struct se_cmd *cmd, struct scatterlist *); + void (*transport_complete)(struct se_cmd *cmd, + struct scatterlist *, + unsigned char *); int (*parse_cdb)(struct se_cmd *cmd); ssize_t (*check_configfs_dev_params)(struct se_hba *, diff --git a/trunk/include/target/target_core_base.h b/trunk/include/target/target_core_base.h index 128ce46fa48a..5be89373ceac 100644 --- a/trunk/include/target/target_core_base.h +++ b/trunk/include/target/target_core_base.h @@ -121,6 +121,7 @@ #define SE_INQUIRY_BUF 512 #define SE_MODE_PAGE_BUF 512 +#define SE_SENSE_BUF 96 /* struct se_hba->hba_flags */ enum hba_flags_table { @@ -503,8 +504,6 @@ struct se_cmd { u32 se_ordered_id; /* Total size in bytes associated with command */ u32 data_length; - /* SCSI Presented Data Transfer Length */ - u32 cmd_spdtl; u32 residual_count; u32 orig_fe_lun; /* Persistent Reservation key */ diff --git a/trunk/include/trace/define_trace.h b/trunk/include/trace/define_trace.h index b0b4eb24d592..1905ca8dd399 100644 --- a/trunk/include/trace/define_trace.h +++ b/trunk/include/trace/define_trace.h @@ -1,5 +1,5 @@ /* - * Trace files that want to automate creationg of all tracepoints defined + * Trace files that want to automate creation of all tracepoints defined * in their file should include this file. The following are macros that the * trace file may define: * diff --git a/trunk/include/trace/events/kmem.h b/trunk/include/trace/events/kmem.h index 5f889f16b0c8..08fa27244da7 100644 --- a/trunk/include/trace/events/kmem.h +++ b/trunk/include/trace/events/kmem.h @@ -214,7 +214,7 @@ TRACE_EVENT(mm_page_alloc, TP_printk("page=%p pfn=%lu order=%d migratetype=%d gfp_flags=%s", __entry->page, - page_to_pfn(__entry->page), + __entry->page ? page_to_pfn(__entry->page) : 0, __entry->order, __entry->migratetype, show_gfp_flags(__entry->gfp_flags)) @@ -240,7 +240,7 @@ DECLARE_EVENT_CLASS(mm_page, TP_printk("page=%p pfn=%lu order=%u migratetype=%d percpu_refill=%d", __entry->page, - page_to_pfn(__entry->page), + __entry->page ? page_to_pfn(__entry->page) : 0, __entry->order, __entry->migratetype, __entry->order == 0) diff --git a/trunk/include/trace/events/sched.h b/trunk/include/trace/events/sched.h index ea7a2035456d..5a8671e8a67f 100644 --- a/trunk/include/trace/events/sched.h +++ b/trunk/include/trace/events/sched.h @@ -73,6 +73,9 @@ DECLARE_EVENT_CLASS(sched_wakeup_template, __entry->prio = p->prio; __entry->success = success; __entry->target_cpu = task_cpu(p); + ) + TP_perf_assign( + __perf_task(p); ), TP_printk("comm=%s pid=%d prio=%d success=%d target_cpu=%03d", @@ -325,6 +328,7 @@ DECLARE_EVENT_CLASS(sched_stat_template, ) TP_perf_assign( __perf_count(delay); + __perf_task(tsk); ), TP_printk("comm=%s pid=%d delay=%Lu [ns]", diff --git a/trunk/include/trace/ftrace.h b/trunk/include/trace/ftrace.h index c6bc2faaf261..a763888a36f9 100644 --- a/trunk/include/trace/ftrace.h +++ b/trunk/include/trace/ftrace.h @@ -712,6 +712,9 @@ __attribute__((section("_ftrace_events"))) *__event_##call = &event_##call #undef __perf_count #define __perf_count(c) __count = (c) +#undef __perf_task +#define __perf_task(t) __task = (t) + #undef TP_perf_assign #define TP_perf_assign(args...) args @@ -725,6 +728,7 @@ perf_trace_##call(void *__data, proto) \ struct ftrace_raw_##call *entry; \ struct pt_regs __regs; \ u64 __addr = 0, __count = 1; \ + struct task_struct *__task = NULL; \ struct hlist_head *head; \ int __entry_size; \ int __data_size; \ @@ -752,7 +756,7 @@ perf_trace_##call(void *__data, proto) \ \ head = this_cpu_ptr(event_call->perf_events); \ perf_trace_buf_submit(entry, __entry_size, rctx, __addr, \ - __count, &__regs, head); \ + __count, &__regs, head, __task); \ } /* diff --git a/trunk/include/xen/events.h b/trunk/include/xen/events.h index 9c641deb65d2..04399b28e821 100644 --- a/trunk/include/xen/events.h +++ b/trunk/include/xen/events.h @@ -58,8 +58,6 @@ void notify_remote_via_irq(int irq); void xen_irq_resume(void); -void xen_hvm_prepare_kexec(struct shared_info *sip, unsigned long pfn); - /* Clear an irq's pending state, in preparation for polling on it */ void xen_clear_irq_pending(int irq); void xen_set_irq_pending(int irq); diff --git a/trunk/include/xen/grant_table.h b/trunk/include/xen/grant_table.h index 11e27c3af3cb..f19fff8650e9 100644 --- a/trunk/include/xen/grant_table.h +++ b/trunk/include/xen/grant_table.h @@ -187,6 +187,7 @@ int gnttab_map_refs(struct gnttab_map_grant_ref *map_ops, struct gnttab_map_grant_ref *kmap_ops, struct page **pages, unsigned int count); int gnttab_unmap_refs(struct gnttab_unmap_grant_ref *unmap_ops, - struct page **pages, unsigned int count, bool clear_pte); + struct gnttab_map_grant_ref *kunmap_ops, + struct page **pages, unsigned int count); #endif /* __ASM_GNTTAB_H__ */ diff --git a/trunk/init/Kconfig b/trunk/init/Kconfig index af6c7f8ba019..3466a6e017b7 100644 --- a/trunk/init/Kconfig +++ b/trunk/init/Kconfig @@ -267,6 +267,106 @@ config POSIX_MQUEUE_SYSCTL depends on SYSCTL default y +config FHANDLE + bool "open by fhandle syscalls" + select EXPORTFS + help + If you say Y here, a user level program will be able to map + file names to handle and then later use the handle for + different file system operations. This is useful in implementing + userspace file servers, which now track files using handles instead + of names. The handle would remain the same even if file names + get renamed. Enables open_by_handle_at(2) and name_to_handle_at(2) + syscalls. + +config AUDIT + bool "Auditing support" + depends on NET + help + Enable auditing infrastructure that can be used with another + kernel subsystem, such as SELinux (which requires this for + logging of avc messages output). Does not do system-call + auditing without CONFIG_AUDITSYSCALL. + +config AUDITSYSCALL + bool "Enable system-call auditing support" + depends on AUDIT && (X86 || PPC || S390 || IA64 || UML || SPARC64 || SUPERH || (ARM && AEABI && !OABI_COMPAT)) + default y if SECURITY_SELINUX + help + Enable low-overhead system-call auditing infrastructure that + can be used independently or with another kernel subsystem, + such as SELinux. + +config AUDIT_WATCH + def_bool y + depends on AUDITSYSCALL + select FSNOTIFY + +config AUDIT_TREE + def_bool y + depends on AUDITSYSCALL + select FSNOTIFY + +config AUDIT_LOGINUID_IMMUTABLE + bool "Make audit loginuid immutable" + depends on AUDIT + help + The config option toggles if a task setting its loginuid requires + CAP_SYS_AUDITCONTROL or if that task should require no special permissions + but should instead only allow setting its loginuid if it was never + previously set. On systems which use systemd or a similar central + process to restart login services this should be set to true. On older + systems in which an admin would typically have to directly stop and + start processes this should be set to false. Setting this to true allows + one to drop potentially dangerous capabilites from the login tasks, + but may not be backwards compatible with older init systems. + +source "kernel/irq/Kconfig" +source "kernel/time/Kconfig" + +menu "CPU/Task time and stats accounting" + +choice + prompt "Cputime accounting" + default TICK_CPU_ACCOUNTING if !PPC64 + default VIRT_CPU_ACCOUNTING if PPC64 + +# Kind of a stub config for the pure tick based cputime accounting +config TICK_CPU_ACCOUNTING + bool "Simple tick based cputime accounting" + depends on !S390 + help + This is the basic tick based cputime accounting that maintains + statistics about user, system and idle time spent on per jiffies + granularity. + + If unsure, say Y. + +config VIRT_CPU_ACCOUNTING + bool "Deterministic task and CPU time accounting" + depends on HAVE_VIRT_CPU_ACCOUNTING + help + Select this option to enable more accurate task and CPU time + accounting. This is done by reading a CPU counter on each + kernel entry and exit and on transitions within the kernel + between system, softirq and hardirq state, so there is a + small performance impact. In the case of s390 or IBM POWER > 5, + this also enables accounting of stolen time on logically-partitioned + systems. + +config IRQ_TIME_ACCOUNTING + bool "Fine granularity task level IRQ time accounting" + depends on HAVE_IRQ_TIME_ACCOUNTING + help + Select this option to enable fine granularity task irq time + accounting. This is done by reading a timestamp on each + transitions between softirq and hardirq state, so there can be a + small performance impact. + + If in doubt, say N here. + +endchoice + config BSD_PROCESS_ACCT bool "BSD Process Accounting" help @@ -292,18 +392,6 @@ config BSD_PROCESS_ACCT_V3 for processing it. A preliminary version of these tools is available at . -config FHANDLE - bool "open by fhandle syscalls" - select EXPORTFS - help - If you say Y here, a user level program will be able to map - file names to handle and then later use the handle for - different file system operations. This is useful in implementing - userspace file servers, which now track files using handles instead - of names. The handle would remain the same even if file names - get renamed. Enables open_by_handle_at(2) and name_to_handle_at(2) - syscalls. - config TASKSTATS bool "Export task/process statistics through netlink (EXPERIMENTAL)" depends on NET @@ -346,50 +434,7 @@ config TASK_IO_ACCOUNTING Say N if unsure. -config AUDIT - bool "Auditing support" - depends on NET - help - Enable auditing infrastructure that can be used with another - kernel subsystem, such as SELinux (which requires this for - logging of avc messages output). Does not do system-call - auditing without CONFIG_AUDITSYSCALL. - -config AUDITSYSCALL - bool "Enable system-call auditing support" - depends on AUDIT && (X86 || PPC || S390 || IA64 || UML || SPARC64 || SUPERH || (ARM && AEABI && !OABI_COMPAT)) - default y if SECURITY_SELINUX - help - Enable low-overhead system-call auditing infrastructure that - can be used independently or with another kernel subsystem, - such as SELinux. - -config AUDIT_WATCH - def_bool y - depends on AUDITSYSCALL - select FSNOTIFY - -config AUDIT_TREE - def_bool y - depends on AUDITSYSCALL - select FSNOTIFY - -config AUDIT_LOGINUID_IMMUTABLE - bool "Make audit loginuid immutable" - depends on AUDIT - help - The config option toggles if a task setting its loginuid requires - CAP_SYS_AUDITCONTROL or if that task should require no special permissions - but should instead only allow setting its loginuid if it was never - previously set. On systems which use systemd or a similar central - process to restart login services this should be set to true. On older - systems in which an admin would typically have to directly stop and - start processes this should be set to false. Setting this to true allows - one to drop potentially dangerous capabilites from the login tasks, - but may not be backwards compatible with older init systems. - -source "kernel/irq/Kconfig" -source "kernel/time/Kconfig" +endmenu # "CPU/Task time and stats accounting" menu "RCU Subsystem" @@ -441,6 +486,24 @@ config PREEMPT_RCU This option enables preemptible-RCU code that is common between the TREE_PREEMPT_RCU and TINY_PREEMPT_RCU implementations. +config RCU_USER_QS + bool "Consider userspace as in RCU extended quiescent state" + depends on HAVE_RCU_USER_QS && SMP + help + This option sets hooks on kernel / userspace boundaries and + puts RCU in extended quiescent state when the CPU runs in + userspace. It means that when a CPU runs in userspace, it is + excluded from the global RCU state machine and thus doesn't + to keep the timer tick on for RCU. + +config RCU_USER_QS_FORCE + bool "Force userspace extended QS by default" + depends on RCU_USER_QS + help + Set the hooks in user/kernel boundaries by default in order to + test this feature that treats userspace as an extended quiescent + state until we have a real user like a full adaptive nohz option. + config RCU_FANOUT int "Tree-based hierarchical RCU fanout value" range 2 64 if 64BIT diff --git a/trunk/init/main.c b/trunk/init/main.c index e60679de61c3..db34c0ec4711 100644 --- a/trunk/init/main.c +++ b/trunk/init/main.c @@ -461,10 +461,6 @@ static void __init mm_init(void) percpu_init_late(); pgtable_cache_init(); vmalloc_init(); -#ifdef CONFIG_X86 - if (efi_enabled) - efi_enter_virtual_mode(); -#endif } asmlinkage void __init start_kernel(void) @@ -606,6 +602,10 @@ asmlinkage void __init start_kernel(void) calibrate_delay(); pidmap_init(); anon_vma_init(); +#ifdef CONFIG_X86 + if (efi_enabled) + efi_enter_virtual_mode(); +#endif thread_info_cache_init(); cred_init(); fork_init(totalram_pages); @@ -631,6 +631,11 @@ asmlinkage void __init start_kernel(void) acpi_early_init(); /* before LAPIC and SMP init */ sfi_init_late(); + if (efi_enabled) { + efi_late_init(); + efi_free_boot_services(); + } + ftrace_init(); /* Do the rest non-__init'ed, we're now alive */ diff --git a/trunk/ipc/mqueue.c b/trunk/ipc/mqueue.c index f8e54f5b9080..9a08acc9e649 100644 --- a/trunk/ipc/mqueue.c +++ b/trunk/ipc/mqueue.c @@ -726,7 +726,6 @@ static struct file *do_create(struct ipc_namespace *ipc_ns, struct inode *dir, struct mq_attr *attr) { const struct cred *cred = current_cred(); - struct file *result; int ret; if (attr) { @@ -748,21 +747,11 @@ static struct file *do_create(struct ipc_namespace *ipc_ns, struct inode *dir, } mode &= ~current_umask(); - ret = mnt_want_write(path->mnt); - if (ret) - return ERR_PTR(ret); ret = vfs_create(dir, path->dentry, mode, true); path->dentry->d_fsdata = NULL; - if (!ret) - result = dentry_open(path, oflag, cred); - else - result = ERR_PTR(ret); - /* - * dentry_open() took a persistent mnt_want_write(), - * so we can now drop this one. - */ - mnt_drop_write(path->mnt); - return result; + if (ret) + return ERR_PTR(ret); + return dentry_open(path, oflag, cred); } /* Opens existing queue */ @@ -788,7 +777,9 @@ SYSCALL_DEFINE4(mq_open, const char __user *, u_name, int, oflag, umode_t, mode, struct mq_attr attr; int fd, error; struct ipc_namespace *ipc_ns = current->nsproxy->ipc_ns; - struct dentry *root = ipc_ns->mq_mnt->mnt_root; + struct vfsmount *mnt = ipc_ns->mq_mnt; + struct dentry *root = mnt->mnt_root; + int ro; if (u_attr && copy_from_user(&attr, u_attr, sizeof(struct mq_attr))) return -EFAULT; @@ -802,6 +793,7 @@ SYSCALL_DEFINE4(mq_open, const char __user *, u_name, int, oflag, umode_t, mode, if (fd < 0) goto out_putname; + ro = mnt_want_write(mnt); /* we'll drop it in any case */ error = 0; mutex_lock(&root->d_inode->i_mutex); path.dentry = lookup_one_len(name, root, strlen(name)); @@ -809,7 +801,7 @@ SYSCALL_DEFINE4(mq_open, const char __user *, u_name, int, oflag, umode_t, mode, error = PTR_ERR(path.dentry); goto out_putfd; } - path.mnt = mntget(ipc_ns->mq_mnt); + path.mnt = mntget(mnt); if (oflag & O_CREAT) { if (path.dentry->d_inode) { /* entry already exists */ @@ -820,6 +812,10 @@ SYSCALL_DEFINE4(mq_open, const char __user *, u_name, int, oflag, umode_t, mode, } filp = do_open(&path, oflag); } else { + if (ro) { + error = ro; + goto out; + } filp = do_create(ipc_ns, root->d_inode, &path, oflag, mode, u_attr ? &attr : NULL); @@ -845,6 +841,7 @@ SYSCALL_DEFINE4(mq_open, const char __user *, u_name, int, oflag, umode_t, mode, fd = error; } mutex_unlock(&root->d_inode->i_mutex); + mnt_drop_write(mnt); out_putname: putname(name); return fd; @@ -857,40 +854,38 @@ SYSCALL_DEFINE1(mq_unlink, const char __user *, u_name) struct dentry *dentry; struct inode *inode = NULL; struct ipc_namespace *ipc_ns = current->nsproxy->ipc_ns; + struct vfsmount *mnt = ipc_ns->mq_mnt; name = getname(u_name); if (IS_ERR(name)) return PTR_ERR(name); - mutex_lock_nested(&ipc_ns->mq_mnt->mnt_root->d_inode->i_mutex, - I_MUTEX_PARENT); - dentry = lookup_one_len(name, ipc_ns->mq_mnt->mnt_root, strlen(name)); + err = mnt_want_write(mnt); + if (err) + goto out_name; + mutex_lock_nested(&mnt->mnt_root->d_inode->i_mutex, I_MUTEX_PARENT); + dentry = lookup_one_len(name, mnt->mnt_root, strlen(name)); if (IS_ERR(dentry)) { err = PTR_ERR(dentry); goto out_unlock; } - if (!dentry->d_inode) { - err = -ENOENT; - goto out_err; - } - inode = dentry->d_inode; - if (inode) + if (!inode) { + err = -ENOENT; + } else { ihold(inode); - err = mnt_want_write(ipc_ns->mq_mnt); - if (err) - goto out_err; - err = vfs_unlink(dentry->d_parent->d_inode, dentry); - mnt_drop_write(ipc_ns->mq_mnt); -out_err: + err = vfs_unlink(dentry->d_parent->d_inode, dentry); + } dput(dentry); out_unlock: - mutex_unlock(&ipc_ns->mq_mnt->mnt_root->d_inode->i_mutex); - putname(name); + mutex_unlock(&mnt->mnt_root->d_inode->i_mutex); if (inode) iput(inode); + mnt_drop_write(mnt); +out_name: + putname(name); return err; } diff --git a/trunk/kernel/Kconfig.locks b/trunk/kernel/Kconfig.locks index 2251882daf53..44511d100eaa 100644 --- a/trunk/kernel/Kconfig.locks +++ b/trunk/kernel/Kconfig.locks @@ -87,6 +87,9 @@ config ARCH_INLINE_WRITE_UNLOCK_IRQ config ARCH_INLINE_WRITE_UNLOCK_IRQRESTORE bool +config UNINLINE_SPIN_UNLOCK + bool + # # lock_* functions are inlined when: # - DEBUG_SPINLOCK=n and GENERIC_LOCKBREAK=n and ARCH_INLINE_*LOCK=y @@ -103,100 +106,120 @@ config ARCH_INLINE_WRITE_UNLOCK_IRQRESTORE # - DEBUG_SPINLOCK=n and ARCH_INLINE_*LOCK=y # +if !DEBUG_SPINLOCK + config INLINE_SPIN_TRYLOCK - def_bool !DEBUG_SPINLOCK && ARCH_INLINE_SPIN_TRYLOCK + def_bool y + depends on ARCH_INLINE_SPIN_TRYLOCK config INLINE_SPIN_TRYLOCK_BH - def_bool !DEBUG_SPINLOCK && ARCH_INLINE_SPIN_TRYLOCK_BH + def_bool y + depends on ARCH_INLINE_SPIN_TRYLOCK_BH config INLINE_SPIN_LOCK - def_bool !DEBUG_SPINLOCK && !GENERIC_LOCKBREAK && ARCH_INLINE_SPIN_LOCK + def_bool y + depends on !GENERIC_LOCKBREAK && ARCH_INLINE_SPIN_LOCK config INLINE_SPIN_LOCK_BH - def_bool !DEBUG_SPINLOCK && !GENERIC_LOCKBREAK && \ - ARCH_INLINE_SPIN_LOCK_BH + def_bool y + depends on !GENERIC_LOCKBREAK && ARCH_INLINE_SPIN_LOCK_BH config INLINE_SPIN_LOCK_IRQ - def_bool !DEBUG_SPINLOCK && !GENERIC_LOCKBREAK && \ - ARCH_INLINE_SPIN_LOCK_IRQ + def_bool y + depends on !GENERIC_LOCKBREAK && ARCH_INLINE_SPIN_LOCK_IRQ config INLINE_SPIN_LOCK_IRQSAVE - def_bool !DEBUG_SPINLOCK && !GENERIC_LOCKBREAK && \ - ARCH_INLINE_SPIN_LOCK_IRQSAVE - -config UNINLINE_SPIN_UNLOCK - bool + def_bool y + depends on !GENERIC_LOCKBREAK && ARCH_INLINE_SPIN_LOCK_IRQSAVE config INLINE_SPIN_UNLOCK_BH - def_bool !DEBUG_SPINLOCK && ARCH_INLINE_SPIN_UNLOCK_BH + def_bool y + depends on ARCH_INLINE_SPIN_UNLOCK_BH config INLINE_SPIN_UNLOCK_IRQ - def_bool !DEBUG_SPINLOCK && (!PREEMPT || ARCH_INLINE_SPIN_UNLOCK_BH) + def_bool y + depends on !PREEMPT || ARCH_INLINE_SPIN_UNLOCK_BH config INLINE_SPIN_UNLOCK_IRQRESTORE - def_bool !DEBUG_SPINLOCK && ARCH_INLINE_SPIN_UNLOCK_IRQRESTORE + def_bool y + depends on ARCH_INLINE_SPIN_UNLOCK_IRQRESTORE config INLINE_READ_TRYLOCK - def_bool !DEBUG_SPINLOCK && ARCH_INLINE_READ_TRYLOCK + def_bool y + depends on ARCH_INLINE_READ_TRYLOCK config INLINE_READ_LOCK - def_bool !DEBUG_SPINLOCK && !GENERIC_LOCKBREAK && ARCH_INLINE_READ_LOCK + def_bool y + depends on !GENERIC_LOCKBREAK && ARCH_INLINE_READ_LOCK config INLINE_READ_LOCK_BH - def_bool !DEBUG_SPINLOCK && !GENERIC_LOCKBREAK && \ - ARCH_INLINE_READ_LOCK_BH + def_bool y + depends on !GENERIC_LOCKBREAK && ARCH_INLINE_READ_LOCK_BH config INLINE_READ_LOCK_IRQ - def_bool !DEBUG_SPINLOCK && !GENERIC_LOCKBREAK && \ - ARCH_INLINE_READ_LOCK_IRQ + def_bool y + depends on !GENERIC_LOCKBREAK && ARCH_INLINE_READ_LOCK_IRQ config INLINE_READ_LOCK_IRQSAVE - def_bool !DEBUG_SPINLOCK && !GENERIC_LOCKBREAK && \ - ARCH_INLINE_READ_LOCK_IRQSAVE + def_bool y + depends on !GENERIC_LOCKBREAK && ARCH_INLINE_READ_LOCK_IRQSAVE config INLINE_READ_UNLOCK - def_bool !DEBUG_SPINLOCK && (!PREEMPT || ARCH_INLINE_READ_UNLOCK) + def_bool y + depends on !PREEMPT || ARCH_INLINE_READ_UNLOCK config INLINE_READ_UNLOCK_BH - def_bool !DEBUG_SPINLOCK && ARCH_INLINE_READ_UNLOCK_BH + def_bool y + depends on ARCH_INLINE_READ_UNLOCK_BH config INLINE_READ_UNLOCK_IRQ - def_bool !DEBUG_SPINLOCK && (!PREEMPT || ARCH_INLINE_READ_UNLOCK_BH) + def_bool y + depends on !PREEMPT || ARCH_INLINE_READ_UNLOCK_BH config INLINE_READ_UNLOCK_IRQRESTORE - def_bool !DEBUG_SPINLOCK && ARCH_INLINE_READ_UNLOCK_IRQRESTORE + def_bool y + depends on ARCH_INLINE_READ_UNLOCK_IRQRESTORE config INLINE_WRITE_TRYLOCK - def_bool !DEBUG_SPINLOCK && ARCH_INLINE_WRITE_TRYLOCK + def_bool y + depends on ARCH_INLINE_WRITE_TRYLOCK config INLINE_WRITE_LOCK - def_bool !DEBUG_SPINLOCK && !GENERIC_LOCKBREAK && ARCH_INLINE_WRITE_LOCK + def_bool y + depends on !GENERIC_LOCKBREAK && ARCH_INLINE_WRITE_LOCK config INLINE_WRITE_LOCK_BH - def_bool !DEBUG_SPINLOCK && !GENERIC_LOCKBREAK && \ - ARCH_INLINE_WRITE_LOCK_BH + def_bool y + depends on !GENERIC_LOCKBREAK && ARCH_INLINE_WRITE_LOCK_BH config INLINE_WRITE_LOCK_IRQ - def_bool !DEBUG_SPINLOCK && !GENERIC_LOCKBREAK && \ - ARCH_INLINE_WRITE_LOCK_IRQ + def_bool y + depends on !GENERIC_LOCKBREAK && ARCH_INLINE_WRITE_LOCK_IRQ config INLINE_WRITE_LOCK_IRQSAVE - def_bool !DEBUG_SPINLOCK && !GENERIC_LOCKBREAK && \ - ARCH_INLINE_WRITE_LOCK_IRQSAVE + def_bool y + depends on !GENERIC_LOCKBREAK && ARCH_INLINE_WRITE_LOCK_IRQSAVE config INLINE_WRITE_UNLOCK - def_bool !DEBUG_SPINLOCK && (!PREEMPT || ARCH_INLINE_WRITE_UNLOCK) + def_bool y + depends on !PREEMPT || ARCH_INLINE_WRITE_UNLOCK config INLINE_WRITE_UNLOCK_BH - def_bool !DEBUG_SPINLOCK && ARCH_INLINE_WRITE_UNLOCK_BH + def_bool y + depends on ARCH_INLINE_WRITE_UNLOCK_BH config INLINE_WRITE_UNLOCK_IRQ - def_bool !DEBUG_SPINLOCK && (!PREEMPT || ARCH_INLINE_WRITE_UNLOCK_BH) + def_bool y + depends on !PREEMPT || ARCH_INLINE_WRITE_UNLOCK_BH config INLINE_WRITE_UNLOCK_IRQRESTORE - def_bool !DEBUG_SPINLOCK && ARCH_INLINE_WRITE_UNLOCK_IRQRESTORE + def_bool y + depends on ARCH_INLINE_WRITE_UNLOCK_IRQRESTORE + +endif config MUTEX_SPIN_ON_OWNER - def_bool SMP && !DEBUG_MUTEXES + def_bool y + depends on SMP && !DEBUG_MUTEXES diff --git a/trunk/kernel/Makefile b/trunk/kernel/Makefile index c0cc67ad764c..5404911eaee9 100644 --- a/trunk/kernel/Makefile +++ b/trunk/kernel/Makefile @@ -10,7 +10,7 @@ obj-y = fork.o exec_domain.o panic.o printk.o \ kthread.o wait.o kfifo.o sys_ni.o posix-cpu-timers.o mutex.o \ hrtimer.o rwsem.o nsproxy.o srcu.o semaphore.o \ notifier.o ksysfs.o cred.o \ - async.o range.o groups.o lglock.o + async.o range.o groups.o lglock.o smpboot.o ifdef CONFIG_FUNCTION_TRACER # Do not trace debug files and internal ftrace files @@ -46,7 +46,6 @@ obj-$(CONFIG_DEBUG_RT_MUTEXES) += rtmutex-debug.o obj-$(CONFIG_RT_MUTEX_TESTER) += rtmutex-tester.o obj-$(CONFIG_GENERIC_ISA_DMA) += dma.o obj-$(CONFIG_SMP) += smp.o -obj-$(CONFIG_SMP) += smpboot.o ifneq ($(CONFIG_SMP),y) obj-y += up.o endif @@ -98,7 +97,7 @@ obj-$(CONFIG_COMPAT_BINFMT_ELF) += elfcore.o obj-$(CONFIG_BINFMT_ELF_FDPIC) += elfcore.o obj-$(CONFIG_FUNCTION_TRACER) += trace/ obj-$(CONFIG_TRACING) += trace/ -obj-$(CONFIG_X86_DS) += trace/ +obj-$(CONFIG_TRACE_CLOCK) += trace/ obj-$(CONFIG_RING_BUFFER) += trace/ obj-$(CONFIG_TRACEPOINTS) += trace/ obj-$(CONFIG_IRQ_WORK) += irq_work.o diff --git a/trunk/kernel/audit_tree.c b/trunk/kernel/audit_tree.c index 3a5ca582ba1e..ed206fd88cca 100644 --- a/trunk/kernel/audit_tree.c +++ b/trunk/kernel/audit_tree.c @@ -250,7 +250,6 @@ static void untag_chunk(struct node *p) spin_unlock(&hash_lock); spin_unlock(&entry->lock); fsnotify_destroy_mark(entry); - fsnotify_put_mark(entry); goto out; } @@ -259,7 +258,7 @@ static void untag_chunk(struct node *p) fsnotify_duplicate_mark(&new->mark, entry); if (fsnotify_add_mark(&new->mark, new->mark.group, new->mark.i.inode, NULL, 1)) { - free_chunk(new); + fsnotify_put_mark(&new->mark); goto Fallback; } @@ -293,7 +292,7 @@ static void untag_chunk(struct node *p) spin_unlock(&hash_lock); spin_unlock(&entry->lock); fsnotify_destroy_mark(entry); - fsnotify_put_mark(entry); + fsnotify_put_mark(&new->mark); /* drop initial reference */ goto out; Fallback: @@ -322,7 +321,7 @@ static int create_chunk(struct inode *inode, struct audit_tree *tree) entry = &chunk->mark; if (fsnotify_add_mark(entry, audit_tree_group, inode, NULL, 0)) { - free_chunk(chunk); + fsnotify_put_mark(entry); return -ENOSPC; } @@ -347,6 +346,7 @@ static int create_chunk(struct inode *inode, struct audit_tree *tree) insert_hash(chunk); spin_unlock(&hash_lock); spin_unlock(&entry->lock); + fsnotify_put_mark(entry); /* drop initial reference */ return 0; } @@ -396,7 +396,7 @@ static int tag_chunk(struct inode *inode, struct audit_tree *tree) fsnotify_duplicate_mark(chunk_entry, old_entry); if (fsnotify_add_mark(chunk_entry, chunk_entry->group, chunk_entry->i.inode, NULL, 1)) { spin_unlock(&old_entry->lock); - free_chunk(chunk); + fsnotify_put_mark(chunk_entry); fsnotify_put_mark(old_entry); return -ENOSPC; } @@ -444,8 +444,8 @@ static int tag_chunk(struct inode *inode, struct audit_tree *tree) spin_unlock(&chunk_entry->lock); spin_unlock(&old_entry->lock); fsnotify_destroy_mark(old_entry); + fsnotify_put_mark(chunk_entry); /* drop initial reference */ fsnotify_put_mark(old_entry); /* pair to fsnotify_find mark_entry */ - fsnotify_put_mark(old_entry); /* and kill it */ return 0; } @@ -916,7 +916,12 @@ static void audit_tree_freeing_mark(struct fsnotify_mark *entry, struct fsnotify struct audit_chunk *chunk = container_of(entry, struct audit_chunk, mark); evict_chunk(chunk); - fsnotify_put_mark(entry); + + /* + * We are guaranteed to have at least one reference to the mark from + * either the inode or the caller of fsnotify_destroy_mark(). + */ + BUG_ON(atomic_read(&entry->refcnt) < 1); } static bool audit_tree_send_event(struct fsnotify_group *group, struct inode *inode, diff --git a/trunk/kernel/cpu.c b/trunk/kernel/cpu.c index 14d32588cccd..f560598807c1 100644 --- a/trunk/kernel/cpu.c +++ b/trunk/kernel/cpu.c @@ -280,12 +280,13 @@ static int __ref _cpu_down(unsigned int cpu, int tasks_frozen) __func__, cpu); goto out_release; } + smpboot_park_threads(cpu); err = __stop_machine(take_cpu_down, &tcd_param, cpumask_of(cpu)); if (err) { /* CPU didn't die: tell everyone. Can't complain. */ + smpboot_unpark_threads(cpu); cpu_notify_nofail(CPU_DOWN_FAILED | mod, hcpu); - goto out_release; } BUG_ON(cpu_online(cpu)); @@ -354,6 +355,10 @@ static int __cpuinit _cpu_up(unsigned int cpu, int tasks_frozen) goto out; } + ret = smpboot_create_threads(cpu); + if (ret) + goto out; + ret = __cpu_notify(CPU_UP_PREPARE | mod, hcpu, -1, &nr_calls); if (ret) { nr_calls--; @@ -368,6 +373,9 @@ static int __cpuinit _cpu_up(unsigned int cpu, int tasks_frozen) goto out_notify; BUG_ON(!cpu_online(cpu)); + /* Wake the per cpu threads */ + smpboot_unpark_threads(cpu); + /* Now call notifier in preparation. */ cpu_notify(CPU_ONLINE | mod, hcpu); @@ -439,14 +447,6 @@ EXPORT_SYMBOL_GPL(cpu_up); #ifdef CONFIG_PM_SLEEP_SMP static cpumask_var_t frozen_cpus; -void __weak arch_disable_nonboot_cpus_begin(void) -{ -} - -void __weak arch_disable_nonboot_cpus_end(void) -{ -} - int disable_nonboot_cpus(void) { int cpu, first_cpu, error = 0; @@ -458,7 +458,6 @@ int disable_nonboot_cpus(void) * with the userspace trying to use the CPU hotplug at the same time */ cpumask_clear(frozen_cpus); - arch_disable_nonboot_cpus_begin(); printk("Disabling non-boot CPUs ...\n"); for_each_online_cpu(cpu) { @@ -474,8 +473,6 @@ int disable_nonboot_cpus(void) } } - arch_disable_nonboot_cpus_end(); - if (!error) { BUG_ON(num_online_cpus() > 1); /* Make sure the CPUs won't be enabled by someone else */ diff --git a/trunk/kernel/debug/kdb/kdb_debugger.c b/trunk/kernel/debug/kdb/kdb_debugger.c index 8b68ce78ff17..be7b33b73d30 100644 --- a/trunk/kernel/debug/kdb/kdb_debugger.c +++ b/trunk/kernel/debug/kdb/kdb_debugger.c @@ -12,6 +12,7 @@ #include #include #include +#include #include "kdb_private.h" #include "../debug_core.h" @@ -52,6 +53,9 @@ int kdb_stub(struct kgdb_state *ks) if (atomic_read(&kgdb_setting_breakpoint)) reason = KDB_REASON_KEYBOARD; + if (in_nmi()) + reason = KDB_REASON_NMI; + for (i = 0, bp = kdb_breakpoints; i < KDB_MAXBPT; i++, bp++) { if ((bp->bp_enabled) && (bp->bp_addr == addr)) { reason = KDB_REASON_BREAK; diff --git a/trunk/kernel/debug/kdb/kdb_io.c b/trunk/kernel/debug/kdb/kdb_io.c index bb9520f0f6ff..0a69d2adc4f3 100644 --- a/trunk/kernel/debug/kdb/kdb_io.c +++ b/trunk/kernel/debug/kdb/kdb_io.c @@ -715,9 +715,6 @@ int vkdb_printf(const char *fmt, va_list ap) /* check for having reached the LINES number of printed lines */ if (kdb_nextline == linecount) { char buf1[16] = ""; -#if defined(CONFIG_SMP) - char buf2[32]; -#endif /* Watch out for recursion here. Any routine that calls * kdb_printf will come back through here. And kdb_read @@ -732,14 +729,6 @@ int vkdb_printf(const char *fmt, va_list ap) if (moreprompt == NULL) moreprompt = "more> "; -#if defined(CONFIG_SMP) - if (strchr(moreprompt, '%')) { - sprintf(buf2, moreprompt, get_cpu()); - put_cpu(); - moreprompt = buf2; - } -#endif - kdb_input_flush(); c = console_drivers; diff --git a/trunk/kernel/debug/kdb/kdb_main.c b/trunk/kernel/debug/kdb/kdb_main.c index 1f91413edb87..31df1706b9a9 100644 --- a/trunk/kernel/debug/kdb/kdb_main.c +++ b/trunk/kernel/debug/kdb/kdb_main.c @@ -139,11 +139,10 @@ static const int __nkdb_err = sizeof(kdbmsgs) / sizeof(kdbmsg_t); static char *__env[] = { #if defined(CONFIG_SMP) "PROMPT=[%d]kdb> ", - "MOREPROMPT=[%d]more> ", #else "PROMPT=kdb> ", - "MOREPROMPT=more> ", #endif + "MOREPROMPT=more> ", "RADIX=16", "MDCOUNT=8", /* lines of md output */ KDB_PLATFORM_ENV, @@ -1236,18 +1235,6 @@ static int kdb_local(kdb_reason_t reason, int error, struct pt_regs *regs, *cmdbuf = '\0'; *(cmd_hist[cmd_head]) = '\0'; - if (KDB_FLAG(ONLY_DO_DUMP)) { - /* kdb is off but a catastrophic error requires a dump. - * Take the dump and reboot. - * Turn on logging so the kdb output appears in the log - * buffer in the dump. - */ - const char *setargs[] = { "set", "LOGGING", "1" }; - kdb_set(2, setargs); - kdb_reboot(0, NULL); - /*NOTREACHED*/ - } - do_full_getstr: #if defined(CONFIG_SMP) snprintf(kdb_prompt_str, CMD_BUFLEN, kdbgetenv("PROMPT"), diff --git a/trunk/kernel/events/callchain.c b/trunk/kernel/events/callchain.c index 6581a040f399..c77206184b8b 100644 --- a/trunk/kernel/events/callchain.c +++ b/trunk/kernel/events/callchain.c @@ -153,11 +153,17 @@ put_callchain_entry(int rctx) put_recursion_context(__get_cpu_var(callchain_recursion), rctx); } -struct perf_callchain_entry *perf_callchain(struct pt_regs *regs) +struct perf_callchain_entry * +perf_callchain(struct perf_event *event, struct pt_regs *regs) { int rctx; struct perf_callchain_entry *entry; + int kernel = !event->attr.exclude_callchain_kernel; + int user = !event->attr.exclude_callchain_user; + + if (!kernel && !user) + return NULL; entry = get_callchain_entry(&rctx); if (rctx == -1) @@ -168,18 +174,29 @@ struct perf_callchain_entry *perf_callchain(struct pt_regs *regs) entry->nr = 0; - if (!user_mode(regs)) { + if (kernel && !user_mode(regs)) { perf_callchain_store(entry, PERF_CONTEXT_KERNEL); perf_callchain_kernel(entry, regs); - if (current->mm) - regs = task_pt_regs(current); - else - regs = NULL; } - if (regs) { - perf_callchain_store(entry, PERF_CONTEXT_USER); - perf_callchain_user(entry, regs); + if (user) { + if (!user_mode(regs)) { + if (current->mm) + regs = task_pt_regs(current); + else + regs = NULL; + } + + if (regs) { + /* + * Disallow cross-task user callchains. + */ + if (event->ctx->task && event->ctx->task != current) + goto exit_put; + + perf_callchain_store(entry, PERF_CONTEXT_USER); + perf_callchain_user(entry, regs); + } } exit_put: diff --git a/trunk/kernel/events/core.c b/trunk/kernel/events/core.c index f1cf0edeb39a..7b9df353ba1b 100644 --- a/trunk/kernel/events/core.c +++ b/trunk/kernel/events/core.c @@ -36,6 +36,7 @@ #include #include #include +#include #include "internal.h" @@ -1253,7 +1254,7 @@ static void perf_remove_from_context(struct perf_event *event) /* * Cross CPU call to disable a performance event */ -static int __perf_event_disable(void *info) +int __perf_event_disable(void *info) { struct perf_event *event = info; struct perf_event_context *ctx = event->ctx; @@ -2935,12 +2936,12 @@ EXPORT_SYMBOL_GPL(perf_event_release_kernel); /* * Called when the last reference to the file is gone. */ -static int perf_release(struct inode *inode, struct file *file) +static void put_event(struct perf_event *event) { - struct perf_event *event = file->private_data; struct task_struct *owner; - file->private_data = NULL; + if (!atomic_long_dec_and_test(&event->refcount)) + return; rcu_read_lock(); owner = ACCESS_ONCE(event->owner); @@ -2975,7 +2976,13 @@ static int perf_release(struct inode *inode, struct file *file) put_task_struct(owner); } - return perf_event_release_kernel(event); + perf_event_release_kernel(event); +} + +static int perf_release(struct inode *inode, struct file *file) +{ + put_event(file->private_data); + return 0; } u64 perf_event_read_value(struct perf_event *event, u64 *enabled, u64 *running) @@ -3227,7 +3234,7 @@ static int perf_event_period(struct perf_event *event, u64 __user *arg) static const struct file_operations perf_fops; -static struct perf_event *perf_fget_light(int fd, int *fput_needed) +static struct file *perf_fget_light(int fd, int *fput_needed) { struct file *file; @@ -3241,7 +3248,7 @@ static struct perf_event *perf_fget_light(int fd, int *fput_needed) return ERR_PTR(-EBADF); } - return file->private_data; + return file; } static int perf_event_set_output(struct perf_event *event, @@ -3273,19 +3280,21 @@ static long perf_ioctl(struct file *file, unsigned int cmd, unsigned long arg) case PERF_EVENT_IOC_SET_OUTPUT: { + struct file *output_file = NULL; struct perf_event *output_event = NULL; int fput_needed = 0; int ret; if (arg != -1) { - output_event = perf_fget_light(arg, &fput_needed); - if (IS_ERR(output_event)) - return PTR_ERR(output_event); + output_file = perf_fget_light(arg, &fput_needed); + if (IS_ERR(output_file)) + return PTR_ERR(output_file); + output_event = output_file->private_data; } ret = perf_event_set_output(event, output_event); if (output_event) - fput_light(output_event->filp, fput_needed); + fput_light(output_file, fput_needed); return ret; } @@ -3756,6 +3765,132 @@ int perf_unregister_guest_info_callbacks(struct perf_guest_info_callbacks *cbs) } EXPORT_SYMBOL_GPL(perf_unregister_guest_info_callbacks); +static void +perf_output_sample_regs(struct perf_output_handle *handle, + struct pt_regs *regs, u64 mask) +{ + int bit; + + for_each_set_bit(bit, (const unsigned long *) &mask, + sizeof(mask) * BITS_PER_BYTE) { + u64 val; + + val = perf_reg_value(regs, bit); + perf_output_put(handle, val); + } +} + +static void perf_sample_regs_user(struct perf_regs_user *regs_user, + struct pt_regs *regs) +{ + if (!user_mode(regs)) { + if (current->mm) + regs = task_pt_regs(current); + else + regs = NULL; + } + + if (regs) { + regs_user->regs = regs; + regs_user->abi = perf_reg_abi(current); + } +} + +/* + * Get remaining task size from user stack pointer. + * + * It'd be better to take stack vma map and limit this more + * precisly, but there's no way to get it safely under interrupt, + * so using TASK_SIZE as limit. + */ +static u64 perf_ustack_task_size(struct pt_regs *regs) +{ + unsigned long addr = perf_user_stack_pointer(regs); + + if (!addr || addr >= TASK_SIZE) + return 0; + + return TASK_SIZE - addr; +} + +static u16 +perf_sample_ustack_size(u16 stack_size, u16 header_size, + struct pt_regs *regs) +{ + u64 task_size; + + /* No regs, no stack pointer, no dump. */ + if (!regs) + return 0; + + /* + * Check if we fit in with the requested stack size into the: + * - TASK_SIZE + * If we don't, we limit the size to the TASK_SIZE. + * + * - remaining sample size + * If we don't, we customize the stack size to + * fit in to the remaining sample size. + */ + + task_size = min((u64) USHRT_MAX, perf_ustack_task_size(regs)); + stack_size = min(stack_size, (u16) task_size); + + /* Current header size plus static size and dynamic size. */ + header_size += 2 * sizeof(u64); + + /* Do we fit in with the current stack dump size? */ + if ((u16) (header_size + stack_size) < header_size) { + /* + * If we overflow the maximum size for the sample, + * we customize the stack dump size to fit in. + */ + stack_size = USHRT_MAX - header_size - sizeof(u64); + stack_size = round_up(stack_size, sizeof(u64)); + } + + return stack_size; +} + +static void +perf_output_sample_ustack(struct perf_output_handle *handle, u64 dump_size, + struct pt_regs *regs) +{ + /* Case of a kernel thread, nothing to dump */ + if (!regs) { + u64 size = 0; + perf_output_put(handle, size); + } else { + unsigned long sp; + unsigned int rem; + u64 dyn_size; + + /* + * We dump: + * static size + * - the size requested by user or the best one we can fit + * in to the sample max size + * data + * - user stack dump data + * dynamic size + * - the actual dumped size + */ + + /* Static size. */ + perf_output_put(handle, dump_size); + + /* Data. */ + sp = perf_user_stack_pointer(regs); + rem = __output_copy_user(handle, (void *) sp, dump_size); + dyn_size = dump_size - rem; + + perf_output_skip(handle, rem); + + /* Dynamic size. */ + perf_output_put(handle, dyn_size); + } +} + static void __perf_event_header__init_id(struct perf_event_header *header, struct perf_sample_data *data, struct perf_event *event) @@ -4016,6 +4151,28 @@ void perf_output_sample(struct perf_output_handle *handle, perf_output_put(handle, nr); } } + + if (sample_type & PERF_SAMPLE_REGS_USER) { + u64 abi = data->regs_user.abi; + + /* + * If there are no regs to dump, notice it through + * first u64 being zero (PERF_SAMPLE_REGS_ABI_NONE). + */ + perf_output_put(handle, abi); + + if (abi) { + u64 mask = event->attr.sample_regs_user; + perf_output_sample_regs(handle, + data->regs_user.regs, + mask); + } + } + + if (sample_type & PERF_SAMPLE_STACK_USER) + perf_output_sample_ustack(handle, + data->stack_user_size, + data->regs_user.regs); } void perf_prepare_sample(struct perf_event_header *header, @@ -4039,7 +4196,7 @@ void perf_prepare_sample(struct perf_event_header *header, if (sample_type & PERF_SAMPLE_CALLCHAIN) { int size = 1; - data->callchain = perf_callchain(regs); + data->callchain = perf_callchain(event, regs); if (data->callchain) size += data->callchain->nr; @@ -4067,6 +4224,49 @@ void perf_prepare_sample(struct perf_event_header *header, } header->size += size; } + + if (sample_type & PERF_SAMPLE_REGS_USER) { + /* regs dump ABI info */ + int size = sizeof(u64); + + perf_sample_regs_user(&data->regs_user, regs); + + if (data->regs_user.regs) { + u64 mask = event->attr.sample_regs_user; + size += hweight64(mask) * sizeof(u64); + } + + header->size += size; + } + + if (sample_type & PERF_SAMPLE_STACK_USER) { + /* + * Either we need PERF_SAMPLE_STACK_USER bit to be allways + * processed as the last one or have additional check added + * in case new sample type is added, because we could eat + * up the rest of the sample size. + */ + struct perf_regs_user *uregs = &data->regs_user; + u16 stack_size = event->attr.sample_stack_user; + u16 size = sizeof(u64); + + if (!uregs->abi) + perf_sample_regs_user(uregs, regs); + + stack_size = perf_sample_ustack_size(stack_size, header->size, + uregs->regs); + + /* + * If there is something to dump, add space for the dump + * itself and for the field that tells the dynamic size, + * which is how many have been actually dumped. + */ + if (stack_size) + size += sizeof(u64) + stack_size; + + data->stack_user_size = stack_size; + header->size += size; + } } static void perf_event_output(struct perf_event *event, @@ -5209,7 +5409,8 @@ static int perf_tp_event_match(struct perf_event *event, } void perf_tp_event(u64 addr, u64 count, void *record, int entry_size, - struct pt_regs *regs, struct hlist_head *head, int rctx) + struct pt_regs *regs, struct hlist_head *head, int rctx, + struct task_struct *task) { struct perf_sample_data data; struct perf_event *event; @@ -5228,6 +5429,31 @@ void perf_tp_event(u64 addr, u64 count, void *record, int entry_size, perf_swevent_event(event, count, &data, regs); } + /* + * If we got specified a target task, also iterate its context and + * deliver this event there too. + */ + if (task && task != current) { + struct perf_event_context *ctx; + struct trace_entry *entry = record; + + rcu_read_lock(); + ctx = rcu_dereference(task->perf_event_ctxp[perf_sw_context]); + if (!ctx) + goto unlock; + + list_for_each_entry_rcu(event, &ctx->event_list, event_entry) { + if (event->attr.type != PERF_TYPE_TRACEPOINT) + continue; + if (event->attr.config != entry->type) + continue; + if (perf_tp_event_match(event, &data, regs)) + perf_swevent_event(event, count, &data, regs); + } +unlock: + rcu_read_unlock(); + } + perf_swevent_put_recursion_context(rctx); } EXPORT_SYMBOL_GPL(perf_tp_event); @@ -5924,6 +6150,7 @@ perf_event_alloc(struct perf_event_attr *attr, int cpu, mutex_init(&event->mmap_mutex); + atomic_long_set(&event->refcount, 1); event->cpu = cpu; event->attr = *attr; event->group_leader = group_leader; @@ -6116,6 +6343,28 @@ static int perf_copy_attr(struct perf_event_attr __user *uattr, attr->branch_sample_type = mask; } } + + if (attr->sample_type & PERF_SAMPLE_REGS_USER) { + ret = perf_reg_validate(attr->sample_regs_user); + if (ret) + return ret; + } + + if (attr->sample_type & PERF_SAMPLE_STACK_USER) { + if (!arch_perf_have_user_stack_dump()) + return -ENOSYS; + + /* + * We have __u32 type for the size, but so far + * we can only use __u16 as maximum due to the + * __u16 sample size limit. + */ + if (attr->sample_stack_user >= USHRT_MAX) + ret = -EINVAL; + else if (!IS_ALIGNED(attr->sample_stack_user, sizeof(u64))) + ret = -EINVAL; + } + out: return ret; @@ -6234,12 +6483,12 @@ SYSCALL_DEFINE5(perf_event_open, return event_fd; if (group_fd != -1) { - group_leader = perf_fget_light(group_fd, &fput_needed); - if (IS_ERR(group_leader)) { - err = PTR_ERR(group_leader); + group_file = perf_fget_light(group_fd, &fput_needed); + if (IS_ERR(group_file)) { + err = PTR_ERR(group_file); goto err_fd; } - group_file = group_leader->filp; + group_leader = group_file->private_data; if (flags & PERF_FLAG_FD_OUTPUT) output_event = group_leader; if (flags & PERF_FLAG_FD_NO_GROUP) @@ -6376,7 +6625,6 @@ SYSCALL_DEFINE5(perf_event_open, put_ctx(gctx); } - event->filp = event_file; WARN_ON_ONCE(ctx->parent_ctx); mutex_lock(&ctx->mutex); @@ -6470,7 +6718,6 @@ perf_event_create_kernel_counter(struct perf_event_attr *attr, int cpu, goto err_free; } - event->filp = NULL; WARN_ON_ONCE(ctx->parent_ctx); mutex_lock(&ctx->mutex); perf_install_in_context(ctx, event, cpu); @@ -6552,7 +6799,7 @@ static void sync_child_event(struct perf_event *child_event, * Release the parent event, if this was the last * reference to it. */ - fput(parent_event->filp); + put_event(parent_event); } static void @@ -6628,9 +6875,8 @@ static void perf_event_exit_task_context(struct task_struct *child, int ctxn) * * __perf_event_exit_task() * sync_child_event() - * fput(parent_event->filp) - * perf_release() - * mutex_lock(&ctx->mutex) + * put_event() + * mutex_lock(&ctx->mutex) * * But since its the parent context it won't be the same instance. */ @@ -6698,7 +6944,7 @@ static void perf_free_event(struct perf_event *event, list_del_init(&event->child_list); mutex_unlock(&parent->child_mutex); - fput(parent->filp); + put_event(parent); perf_group_detach(event); list_del_event(event, ctx); @@ -6778,6 +7024,12 @@ inherit_event(struct perf_event *parent_event, NULL, NULL); if (IS_ERR(child_event)) return child_event; + + if (!atomic_long_inc_not_zero(&parent_event->refcount)) { + free_event(child_event); + return NULL; + } + get_ctx(child_ctx); /* @@ -6818,14 +7070,6 @@ inherit_event(struct perf_event *parent_event, add_event_to_ctx(child_event, child_ctx); raw_spin_unlock_irqrestore(&child_ctx->lock, flags); - /* - * Get a reference to the parent filp - we will fput it - * when the child event exits. This is safe to do because - * we are in the parent and we know that the filp still - * exists and has a nonzero count: - */ - atomic_long_inc(&parent_event->filp->f_count); - /* * Link this into the parent event's child list */ diff --git a/trunk/kernel/events/hw_breakpoint.c b/trunk/kernel/events/hw_breakpoint.c index bb38c4d3ee12..9a7b487c6fe2 100644 --- a/trunk/kernel/events/hw_breakpoint.c +++ b/trunk/kernel/events/hw_breakpoint.c @@ -453,7 +453,16 @@ int modify_user_hw_breakpoint(struct perf_event *bp, struct perf_event_attr *att int old_type = bp->attr.bp_type; int err = 0; - perf_event_disable(bp); + /* + * modify_user_hw_breakpoint can be invoked with IRQs disabled and hence it + * will not be possible to raise IPIs that invoke __perf_event_disable. + * So call the function directly after making sure we are targeting the + * current task. + */ + if (irqs_disabled() && bp->ctx && bp->ctx->task == current) + __perf_event_disable(bp); + else + perf_event_disable(bp); bp->attr.bp_addr = attr->bp_addr; bp->attr.bp_type = attr->bp_type; diff --git a/trunk/kernel/events/internal.h b/trunk/kernel/events/internal.h index b0b107f90afc..d56a64c99a8b 100644 --- a/trunk/kernel/events/internal.h +++ b/trunk/kernel/events/internal.h @@ -2,6 +2,7 @@ #define _KERNEL_EVENTS_INTERNAL_H #include +#include /* Buffer handling */ @@ -76,32 +77,56 @@ static inline unsigned long perf_data_size(struct ring_buffer *rb) return rb->nr_pages << (PAGE_SHIFT + page_order(rb)); } -static inline void -__output_copy(struct perf_output_handle *handle, - const void *buf, unsigned int len) +#define DEFINE_OUTPUT_COPY(func_name, memcpy_func) \ +static inline unsigned int \ +func_name(struct perf_output_handle *handle, \ + const void *buf, unsigned int len) \ +{ \ + unsigned long size, written; \ + \ + do { \ + size = min_t(unsigned long, handle->size, len); \ + \ + written = memcpy_func(handle->addr, buf, size); \ + \ + len -= written; \ + handle->addr += written; \ + buf += written; \ + handle->size -= written; \ + if (!handle->size) { \ + struct ring_buffer *rb = handle->rb; \ + \ + handle->page++; \ + handle->page &= rb->nr_pages - 1; \ + handle->addr = rb->data_pages[handle->page]; \ + handle->size = PAGE_SIZE << page_order(rb); \ + } \ + } while (len && written == size); \ + \ + return len; \ +} + +static inline int memcpy_common(void *dst, const void *src, size_t n) { - do { - unsigned long size = min_t(unsigned long, handle->size, len); - - memcpy(handle->addr, buf, size); - - len -= size; - handle->addr += size; - buf += size; - handle->size -= size; - if (!handle->size) { - struct ring_buffer *rb = handle->rb; - - handle->page++; - handle->page &= rb->nr_pages - 1; - handle->addr = rb->data_pages[handle->page]; - handle->size = PAGE_SIZE << page_order(rb); - } - } while (len); + memcpy(dst, src, n); + return n; } +DEFINE_OUTPUT_COPY(__output_copy, memcpy_common) + +#define MEMCPY_SKIP(dst, src, n) (n) + +DEFINE_OUTPUT_COPY(__output_skip, MEMCPY_SKIP) + +#ifndef arch_perf_out_copy_user +#define arch_perf_out_copy_user __copy_from_user_inatomic +#endif + +DEFINE_OUTPUT_COPY(__output_copy_user, arch_perf_out_copy_user) + /* Callchain handling */ -extern struct perf_callchain_entry *perf_callchain(struct pt_regs *regs); +extern struct perf_callchain_entry * +perf_callchain(struct perf_event *event, struct pt_regs *regs); extern int get_callchain_buffers(void); extern void put_callchain_buffers(void); @@ -133,4 +158,20 @@ static inline void put_recursion_context(int *recursion, int rctx) recursion[rctx]--; } +#ifdef CONFIG_HAVE_PERF_USER_STACK_DUMP +static inline bool arch_perf_have_user_stack_dump(void) +{ + return true; +} + +#define perf_user_stack_pointer(regs) user_stack_pointer(regs) +#else +static inline bool arch_perf_have_user_stack_dump(void) +{ + return false; +} + +#define perf_user_stack_pointer(regs) 0 +#endif /* CONFIG_HAVE_PERF_USER_STACK_DUMP */ + #endif /* _KERNEL_EVENTS_INTERNAL_H */ diff --git a/trunk/kernel/events/ring_buffer.c b/trunk/kernel/events/ring_buffer.c index 6ddaba43fb7a..23cb34ff3973 100644 --- a/trunk/kernel/events/ring_buffer.c +++ b/trunk/kernel/events/ring_buffer.c @@ -182,10 +182,16 @@ int perf_output_begin(struct perf_output_handle *handle, return -ENOSPC; } -void perf_output_copy(struct perf_output_handle *handle, +unsigned int perf_output_copy(struct perf_output_handle *handle, const void *buf, unsigned int len) { - __output_copy(handle, buf, len); + return __output_copy(handle, buf, len); +} + +unsigned int perf_output_skip(struct perf_output_handle *handle, + unsigned int len) +{ + return __output_skip(handle, NULL, len); } void perf_output_end(struct perf_output_handle *handle) diff --git a/trunk/kernel/events/uprobes.c b/trunk/kernel/events/uprobes.c index c08a22d02f72..912ef48d28ab 100644 --- a/trunk/kernel/events/uprobes.c +++ b/trunk/kernel/events/uprobes.c @@ -280,12 +280,10 @@ static int read_opcode(struct mm_struct *mm, unsigned long vaddr, uprobe_opcode_ if (ret <= 0) return ret; - lock_page(page); vaddr_new = kmap_atomic(page); vaddr &= ~PAGE_MASK; memcpy(opcode, vaddr_new + vaddr, UPROBE_SWBP_INSN_SIZE); kunmap_atomic(vaddr_new); - unlock_page(page); put_page(page); @@ -334,7 +332,7 @@ int __weak set_swbp(struct arch_uprobe *auprobe, struct mm_struct *mm, unsigned */ result = is_swbp_at_addr(mm, vaddr); if (result == 1) - return -EEXIST; + return 0; if (result) return result; @@ -347,24 +345,22 @@ int __weak set_swbp(struct arch_uprobe *auprobe, struct mm_struct *mm, unsigned * @mm: the probed process address space. * @auprobe: arch specific probepoint information. * @vaddr: the virtual address to insert the opcode. - * @verify: if true, verify existance of breakpoint instruction. * * For mm @mm, restore the original opcode (opcode) at @vaddr. * Return 0 (success) or a negative errno. */ int __weak -set_orig_insn(struct arch_uprobe *auprobe, struct mm_struct *mm, unsigned long vaddr, bool verify) +set_orig_insn(struct arch_uprobe *auprobe, struct mm_struct *mm, unsigned long vaddr) { - if (verify) { - int result; + int result; + + result = is_swbp_at_addr(mm, vaddr); + if (!result) + return -EINVAL; - result = is_swbp_at_addr(mm, vaddr); - if (!result) - return -EINVAL; + if (result != 1) + return result; - if (result != 1) - return result; - } return write_opcode(auprobe, mm, vaddr, *(uprobe_opcode_t *)auprobe->insn); } @@ -415,11 +411,10 @@ static struct uprobe *__find_uprobe(struct inode *inode, loff_t offset) static struct uprobe *find_uprobe(struct inode *inode, loff_t offset) { struct uprobe *uprobe; - unsigned long flags; - spin_lock_irqsave(&uprobes_treelock, flags); + spin_lock(&uprobes_treelock); uprobe = __find_uprobe(inode, offset); - spin_unlock_irqrestore(&uprobes_treelock, flags); + spin_unlock(&uprobes_treelock); return uprobe; } @@ -466,12 +461,11 @@ static struct uprobe *__insert_uprobe(struct uprobe *uprobe) */ static struct uprobe *insert_uprobe(struct uprobe *uprobe) { - unsigned long flags; struct uprobe *u; - spin_lock_irqsave(&uprobes_treelock, flags); + spin_lock(&uprobes_treelock); u = __insert_uprobe(uprobe); - spin_unlock_irqrestore(&uprobes_treelock, flags); + spin_unlock(&uprobes_treelock); /* For now assume that the instruction need not be single-stepped */ uprobe->flags |= UPROBE_SKIP_SSTEP; @@ -649,6 +643,7 @@ static int install_breakpoint(struct uprobe *uprobe, struct mm_struct *mm, struct vm_area_struct *vma, unsigned long vaddr) { + bool first_uprobe; int ret; /* @@ -659,7 +654,7 @@ install_breakpoint(struct uprobe *uprobe, struct mm_struct *mm, * Hence behave as if probe already existed. */ if (!uprobe->consumers) - return -EEXIST; + return 0; if (!(uprobe->flags & UPROBE_COPY_INSN)) { ret = copy_insn(uprobe, vma->vm_file); @@ -681,17 +676,18 @@ install_breakpoint(struct uprobe *uprobe, struct mm_struct *mm, } /* - * Ideally, should be updating the probe count after the breakpoint - * has been successfully inserted. However a thread could hit the - * breakpoint we just inserted even before the probe count is - * incremented. If this is the first breakpoint placed, breakpoint - * notifier might ignore uprobes and pass the trap to the thread. - * Hence increment before and decrement on failure. + * set MMF_HAS_UPROBES in advance for uprobe_pre_sstep_notifier(), + * the task can hit this breakpoint right after __replace_page(). */ - atomic_inc(&mm->uprobes_state.count); + first_uprobe = !test_bit(MMF_HAS_UPROBES, &mm->flags); + if (first_uprobe) + set_bit(MMF_HAS_UPROBES, &mm->flags); + ret = set_swbp(&uprobe->arch, mm, vaddr); - if (ret) - atomic_dec(&mm->uprobes_state.count); + if (!ret) + clear_bit(MMF_RECALC_UPROBES, &mm->flags); + else if (first_uprobe) + clear_bit(MMF_HAS_UPROBES, &mm->flags); return ret; } @@ -699,8 +695,12 @@ install_breakpoint(struct uprobe *uprobe, struct mm_struct *mm, static void remove_breakpoint(struct uprobe *uprobe, struct mm_struct *mm, unsigned long vaddr) { - if (!set_orig_insn(&uprobe->arch, mm, vaddr, true)) - atomic_dec(&mm->uprobes_state.count); + /* can happen if uprobe_register() fails */ + if (!test_bit(MMF_HAS_UPROBES, &mm->flags)) + return; + + set_bit(MMF_RECALC_UPROBES, &mm->flags); + set_orig_insn(&uprobe->arch, mm, vaddr); } /* @@ -710,11 +710,9 @@ remove_breakpoint(struct uprobe *uprobe, struct mm_struct *mm, unsigned long vad */ static void delete_uprobe(struct uprobe *uprobe) { - unsigned long flags; - - spin_lock_irqsave(&uprobes_treelock, flags); + spin_lock(&uprobes_treelock); rb_erase(&uprobe->rb_node, &uprobes_tree); - spin_unlock_irqrestore(&uprobes_treelock, flags); + spin_unlock(&uprobes_treelock); iput(uprobe->inode); put_uprobe(uprobe); atomic_dec(&uprobe_events); @@ -831,17 +829,11 @@ static int register_for_each_vma(struct uprobe *uprobe, bool is_register) vaddr_to_offset(vma, info->vaddr) != uprobe->offset) goto unlock; - if (is_register) { + if (is_register) err = install_breakpoint(uprobe, mm, vma, info->vaddr); - /* - * We can race against uprobe_mmap(), see the - * comment near uprobe_hash(). - */ - if (err == -EEXIST) - err = 0; - } else { + else remove_breakpoint(uprobe, mm, info->vaddr); - } + unlock: up_write(&mm->mmap_sem); free: @@ -908,7 +900,8 @@ int uprobe_register(struct inode *inode, loff_t offset, struct uprobe_consumer * } mutex_unlock(uprobes_hash(inode)); - put_uprobe(uprobe); + if (uprobe) + put_uprobe(uprobe); return ret; } @@ -978,7 +971,6 @@ static void build_probe_list(struct inode *inode, struct list_head *head) { loff_t min, max; - unsigned long flags; struct rb_node *n, *t; struct uprobe *u; @@ -986,7 +978,7 @@ static void build_probe_list(struct inode *inode, min = vaddr_to_offset(vma, start); max = min + (end - start) - 1; - spin_lock_irqsave(&uprobes_treelock, flags); + spin_lock(&uprobes_treelock); n = find_node_in_range(inode, min, max); if (n) { for (t = n; t; t = rb_prev(t)) { @@ -1004,27 +996,20 @@ static void build_probe_list(struct inode *inode, atomic_inc(&u->ref); } } - spin_unlock_irqrestore(&uprobes_treelock, flags); + spin_unlock(&uprobes_treelock); } /* - * Called from mmap_region. - * called with mm->mmap_sem acquired. + * Called from mmap_region/vma_adjust with mm->mmap_sem acquired. * - * Return -ve no if we fail to insert probes and we cannot - * bail-out. - * Return 0 otherwise. i.e: - * - * - successful insertion of probes - * - (or) no possible probes to be inserted. - * - (or) insertion of probes failed but we can bail-out. + * Currently we ignore all errors and always return 0, the callers + * can't handle the failure anyway. */ int uprobe_mmap(struct vm_area_struct *vma) { struct list_head tmp_list; struct uprobe *uprobe, *u; struct inode *inode; - int ret, count; if (!atomic_read(&uprobe_events) || !valid_vma(vma, true)) return 0; @@ -1036,44 +1021,35 @@ int uprobe_mmap(struct vm_area_struct *vma) mutex_lock(uprobes_mmap_hash(inode)); build_probe_list(inode, vma, vma->vm_start, vma->vm_end, &tmp_list); - ret = 0; - count = 0; - list_for_each_entry_safe(uprobe, u, &tmp_list, pending_list) { - if (!ret) { + if (!fatal_signal_pending(current)) { unsigned long vaddr = offset_to_vaddr(vma, uprobe->offset); - - ret = install_breakpoint(uprobe, vma->vm_mm, vma, vaddr); - /* - * We can race against uprobe_register(), see the - * comment near uprobe_hash(). - */ - if (ret == -EEXIST) { - ret = 0; - - if (!is_swbp_at_addr(vma->vm_mm, vaddr)) - continue; - - /* - * Unable to insert a breakpoint, but - * breakpoint lies underneath. Increment the - * probe count. - */ - atomic_inc(&vma->vm_mm->uprobes_state.count); - } - - if (!ret) - count++; + install_breakpoint(uprobe, vma->vm_mm, vma, vaddr); } put_uprobe(uprobe); } - mutex_unlock(uprobes_mmap_hash(inode)); - if (ret) - atomic_sub(count, &vma->vm_mm->uprobes_state.count); + return 0; +} - return ret; +static bool +vma_has_uprobes(struct vm_area_struct *vma, unsigned long start, unsigned long end) +{ + loff_t min, max; + struct inode *inode; + struct rb_node *n; + + inode = vma->vm_file->f_mapping->host; + + min = vaddr_to_offset(vma, start); + max = min + (end - start) - 1; + + spin_lock(&uprobes_treelock); + n = find_node_in_range(inode, min, max); + spin_unlock(&uprobes_treelock); + + return !!n; } /* @@ -1081,37 +1057,18 @@ int uprobe_mmap(struct vm_area_struct *vma) */ void uprobe_munmap(struct vm_area_struct *vma, unsigned long start, unsigned long end) { - struct list_head tmp_list; - struct uprobe *uprobe, *u; - struct inode *inode; - if (!atomic_read(&uprobe_events) || !valid_vma(vma, false)) return; if (!atomic_read(&vma->vm_mm->mm_users)) /* called by mmput() ? */ return; - if (!atomic_read(&vma->vm_mm->uprobes_state.count)) - return; - - inode = vma->vm_file->f_mapping->host; - if (!inode) + if (!test_bit(MMF_HAS_UPROBES, &vma->vm_mm->flags) || + test_bit(MMF_RECALC_UPROBES, &vma->vm_mm->flags)) return; - mutex_lock(uprobes_mmap_hash(inode)); - build_probe_list(inode, vma, start, end, &tmp_list); - - list_for_each_entry_safe(uprobe, u, &tmp_list, pending_list) { - unsigned long vaddr = offset_to_vaddr(vma, uprobe->offset); - /* - * An unregister could have removed the probe before - * unmap. So check before we decrement the count. - */ - if (is_swbp_at_addr(vma->vm_mm, vaddr) == 1) - atomic_dec(&vma->vm_mm->uprobes_state.count); - put_uprobe(uprobe); - } - mutex_unlock(uprobes_mmap_hash(inode)); + if (vma_has_uprobes(vma, start, end)) + set_bit(MMF_RECALC_UPROBES, &vma->vm_mm->flags); } /* Slot allocation for XOL */ @@ -1213,13 +1170,15 @@ void uprobe_clear_state(struct mm_struct *mm) kfree(area); } -/* - * uprobe_reset_state - Free the area allocated for slots. - */ -void uprobe_reset_state(struct mm_struct *mm) +void uprobe_dup_mmap(struct mm_struct *oldmm, struct mm_struct *newmm) { - mm->uprobes_state.xol_area = NULL; - atomic_set(&mm->uprobes_state.count, 0); + newmm->uprobes_state.xol_area = NULL; + + if (test_bit(MMF_HAS_UPROBES, &oldmm->flags)) { + set_bit(MMF_HAS_UPROBES, &newmm->flags); + /* unconditionally, dup_mmap() skips VM_DONTCOPY vmas */ + set_bit(MMF_RECALC_UPROBES, &newmm->flags); + } } /* @@ -1437,6 +1396,25 @@ static bool can_skip_sstep(struct uprobe *uprobe, struct pt_regs *regs) return false; } +static void mmf_recalc_uprobes(struct mm_struct *mm) +{ + struct vm_area_struct *vma; + + for (vma = mm->mmap; vma; vma = vma->vm_next) { + if (!valid_vma(vma, false)) + continue; + /* + * This is not strictly accurate, we can race with + * uprobe_unregister() and see the already removed + * uprobe if delete_uprobe() was not yet called. + */ + if (vma_has_uprobes(vma, vma->vm_start, vma->vm_end)) + return; + } + + clear_bit(MMF_HAS_UPROBES, &mm->flags); +} + static struct uprobe *find_active_uprobe(unsigned long bp_vaddr, int *is_swbp) { struct mm_struct *mm = current->mm; @@ -1458,11 +1436,24 @@ static struct uprobe *find_active_uprobe(unsigned long bp_vaddr, int *is_swbp) } else { *is_swbp = -EFAULT; } + + if (!uprobe && test_and_clear_bit(MMF_RECALC_UPROBES, &mm->flags)) + mmf_recalc_uprobes(mm); up_read(&mm->mmap_sem); return uprobe; } +void __weak arch_uprobe_enable_step(struct arch_uprobe *arch) +{ + user_enable_single_step(current); +} + +void __weak arch_uprobe_disable_step(struct arch_uprobe *arch) +{ + user_disable_single_step(current); +} + /* * Run handler and ask thread to singlestep. * Ensure all non-fatal signals cannot interrupt thread while it singlesteps. @@ -1509,7 +1500,7 @@ static void handle_swbp(struct pt_regs *regs) utask->state = UTASK_SSTEP; if (!pre_ssout(uprobe, regs, bp_vaddr)) { - user_enable_single_step(current); + arch_uprobe_enable_step(&uprobe->arch); return; } @@ -1518,17 +1509,15 @@ static void handle_swbp(struct pt_regs *regs) utask->active_uprobe = NULL; utask->state = UTASK_RUNNING; } - if (uprobe) { - if (!(uprobe->flags & UPROBE_SKIP_SSTEP)) + if (!(uprobe->flags & UPROBE_SKIP_SSTEP)) - /* - * cannot singlestep; cannot skip instruction; - * re-execute the instruction. - */ - instruction_pointer_set(regs, bp_vaddr); + /* + * cannot singlestep; cannot skip instruction; + * re-execute the instruction. + */ + instruction_pointer_set(regs, bp_vaddr); - put_uprobe(uprobe); - } + put_uprobe(uprobe); } /* @@ -1547,10 +1536,10 @@ static void handle_singlestep(struct uprobe_task *utask, struct pt_regs *regs) else WARN_ON_ONCE(1); + arch_uprobe_disable_step(&uprobe->arch); put_uprobe(uprobe); utask->active_uprobe = NULL; utask->state = UTASK_RUNNING; - user_disable_single_step(current); xol_free_insn_slot(current); spin_lock_irq(¤t->sighand->siglock); @@ -1589,8 +1578,7 @@ int uprobe_pre_sstep_notifier(struct pt_regs *regs) { struct uprobe_task *utask; - if (!current->mm || !atomic_read(¤t->mm->uprobes_state.count)) - /* task is currently not uprobed */ + if (!current->mm || !test_bit(MMF_HAS_UPROBES, ¤t->mm->flags)) return 0; utask = current->utask; diff --git a/trunk/kernel/fork.c b/trunk/kernel/fork.c index 3bd2280d79f6..5a0e74d89a5a 100644 --- a/trunk/kernel/fork.c +++ b/trunk/kernel/fork.c @@ -353,6 +353,7 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm) down_write(&oldmm->mmap_sem); flush_cache_dup_mm(oldmm); + uprobe_dup_mmap(oldmm, mm); /* * Not linked in yet - no deadlock potential: */ @@ -454,9 +455,6 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm) if (retval) goto out; - - if (file && uprobe_mmap(tmp)) - goto out; } /* a new mm has just been created */ arch_dup_mmap(oldmm, mm); @@ -839,8 +837,6 @@ struct mm_struct *dup_mm(struct task_struct *tsk) #ifdef CONFIG_TRANSPARENT_HUGEPAGE mm->pmd_huge_pte = NULL; #endif - uprobe_reset_state(mm); - if (!mm_init(mm, tsk)) goto fail_nomem; @@ -1280,11 +1276,7 @@ static struct task_struct *copy_process(unsigned long clone_flags, #endif #ifdef CONFIG_TRACE_IRQFLAGS p->irq_events = 0; -#ifdef __ARCH_WANT_INTERRUPTS_ON_CTXSW - p->hardirqs_enabled = 1; -#else p->hardirqs_enabled = 0; -#endif p->hardirq_enable_ip = 0; p->hardirq_enable_event = 0; p->hardirq_disable_ip = _THIS_IP_; diff --git a/trunk/kernel/futex.c b/trunk/kernel/futex.c index e2b0fb9a0b3b..3717e7b306e0 100644 --- a/trunk/kernel/futex.c +++ b/trunk/kernel/futex.c @@ -2231,11 +2231,11 @@ int handle_early_requeue_pi_wakeup(struct futex_hash_bucket *hb, * @uaddr2: the pi futex we will take prior to returning to user-space * * The caller will wait on uaddr and will be requeued by futex_requeue() to - * uaddr2 which must be PI aware. Normal wakeup will wake on uaddr2 and - * complete the acquisition of the rt_mutex prior to returning to userspace. - * This ensures the rt_mutex maintains an owner when it has waiters; without - * one, the pi logic wouldn't know which task to boost/deboost, if there was a - * need to. + * uaddr2 which must be PI aware and unique from uaddr. Normal wakeup will wake + * on uaddr2 and complete the acquisition of the rt_mutex prior to returning to + * userspace. This ensures the rt_mutex maintains an owner when it has waiters; + * without one, the pi logic would not know which task to boost/deboost, if + * there was a need to. * * We call schedule in futex_wait_queue_me() when we enqueue and return there * via the following: @@ -2272,6 +2272,9 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags, struct futex_q q = futex_q_init; int res, ret; + if (uaddr == uaddr2) + return -EINVAL; + if (!bitset) return -EINVAL; @@ -2343,7 +2346,7 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags, * signal. futex_unlock_pi() will not destroy the lock_ptr nor * the pi_state. */ - WARN_ON(!&q.pi_state); + WARN_ON(!q.pi_state); pi_mutex = &q.pi_state->pi_mutex; ret = rt_mutex_finish_proxy_lock(pi_mutex, to, &rt_waiter, 1); debug_rt_mutex_free_waiter(&rt_waiter); @@ -2370,7 +2373,7 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags, * fault, unlock the rt_mutex and return the fault to userspace. */ if (ret == -EFAULT) { - if (rt_mutex_owner(pi_mutex) == current) + if (pi_mutex && rt_mutex_owner(pi_mutex) == current) rt_mutex_unlock(pi_mutex); } else if (ret == -EINTR) { /* diff --git a/trunk/kernel/irq/chip.c b/trunk/kernel/irq/chip.c index eebd6d5cfb44..57d86d07221e 100644 --- a/trunk/kernel/irq/chip.c +++ b/trunk/kernel/irq/chip.c @@ -671,6 +671,7 @@ irq_set_chip_and_handler_name(unsigned int irq, struct irq_chip *chip, irq_set_chip(irq, chip); __irq_set_handler(irq, handle, 0, name); } +EXPORT_SYMBOL_GPL(irq_set_chip_and_handler_name); void irq_modify_status(unsigned int irq, unsigned long clr, unsigned long set) { diff --git a/trunk/kernel/irq/dummychip.c b/trunk/kernel/irq/dummychip.c index b5fcd96c7102..988dc58e8847 100644 --- a/trunk/kernel/irq/dummychip.c +++ b/trunk/kernel/irq/dummychip.c @@ -6,6 +6,7 @@ */ #include #include +#include #include "internals.h" @@ -57,3 +58,4 @@ struct irq_chip dummy_irq_chip = { .irq_mask = noop, .irq_unmask = noop, }; +EXPORT_SYMBOL_GPL(dummy_irq_chip); diff --git a/trunk/kernel/irq/manage.c b/trunk/kernel/irq/manage.c index 0a8e8f059627..4c69326aa773 100644 --- a/trunk/kernel/irq/manage.c +++ b/trunk/kernel/irq/manage.c @@ -943,6 +943,18 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new) goto out_thread; } + /* + * Drivers are often written to work w/o knowledge about the + * underlying irq chip implementation, so a request for a + * threaded irq without a primary hard irq context handler + * requires the ONESHOT flag to be set. Some irq chips like + * MSI based interrupts are per se one shot safe. Check the + * chip flags, so we can avoid the unmask dance at the end of + * the threaded handler for those. + */ + if (desc->irq_data.chip->flags & IRQCHIP_ONESHOT_SAFE) + new->flags &= ~IRQF_ONESHOT; + /* * The following block of code has to be executed atomically */ @@ -1017,7 +1029,8 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new) */ new->thread_mask = 1 << ffz(thread_mask); - } else if (new->handler == irq_default_primary_handler) { + } else if (new->handler == irq_default_primary_handler && + !(desc->irq_data.chip->flags & IRQCHIP_ONESHOT_SAFE)) { /* * The interrupt was requested with handler = NULL, so * we use the default primary handler for it. But it diff --git a/trunk/kernel/kprobes.c b/trunk/kernel/kprobes.c index c62b8546cc90..098f396aa409 100644 --- a/trunk/kernel/kprobes.c +++ b/trunk/kernel/kprobes.c @@ -561,9 +561,9 @@ static __kprobes void kprobe_optimizer(struct work_struct *work) { LIST_HEAD(free_list); + mutex_lock(&kprobe_mutex); /* Lock modules while optimizing kprobes */ mutex_lock(&module_mutex); - mutex_lock(&kprobe_mutex); /* * Step 1: Unoptimize kprobes and collect cleaned (unused and disarmed) @@ -586,8 +586,8 @@ static __kprobes void kprobe_optimizer(struct work_struct *work) /* Step 4: Free cleaned kprobes after quiesence period */ do_free_cleaned_kprobes(&free_list); - mutex_unlock(&kprobe_mutex); mutex_unlock(&module_mutex); + mutex_unlock(&kprobe_mutex); /* Step 5: Kick optimizer again if needed */ if (!list_empty(&optimizing_list) || !list_empty(&unoptimizing_list)) @@ -759,20 +759,32 @@ static __kprobes void try_to_optimize_kprobe(struct kprobe *p) struct kprobe *ap; struct optimized_kprobe *op; + /* Impossible to optimize ftrace-based kprobe */ + if (kprobe_ftrace(p)) + return; + + /* For preparing optimization, jump_label_text_reserved() is called */ + jump_label_lock(); + mutex_lock(&text_mutex); + ap = alloc_aggr_kprobe(p); if (!ap) - return; + goto out; op = container_of(ap, struct optimized_kprobe, kp); if (!arch_prepared_optinsn(&op->optinsn)) { /* If failed to setup optimizing, fallback to kprobe */ arch_remove_optimized_kprobe(op); kfree(op); - return; + goto out; } init_aggr_kprobe(ap, p); - optimize_kprobe(ap); + optimize_kprobe(ap); /* This just kicks optimizer thread */ + +out: + mutex_unlock(&text_mutex); + jump_label_unlock(); } #ifdef CONFIG_SYSCTL @@ -907,9 +919,64 @@ static __kprobes struct kprobe *alloc_aggr_kprobe(struct kprobe *p) } #endif /* CONFIG_OPTPROBES */ +#ifdef KPROBES_CAN_USE_FTRACE +static struct ftrace_ops kprobe_ftrace_ops __read_mostly = { + .func = kprobe_ftrace_handler, + .flags = FTRACE_OPS_FL_SAVE_REGS, +}; +static int kprobe_ftrace_enabled; + +/* Must ensure p->addr is really on ftrace */ +static int __kprobes prepare_kprobe(struct kprobe *p) +{ + if (!kprobe_ftrace(p)) + return arch_prepare_kprobe(p); + + return arch_prepare_kprobe_ftrace(p); +} + +/* Caller must lock kprobe_mutex */ +static void __kprobes arm_kprobe_ftrace(struct kprobe *p) +{ + int ret; + + ret = ftrace_set_filter_ip(&kprobe_ftrace_ops, + (unsigned long)p->addr, 0, 0); + WARN(ret < 0, "Failed to arm kprobe-ftrace at %p (%d)\n", p->addr, ret); + kprobe_ftrace_enabled++; + if (kprobe_ftrace_enabled == 1) { + ret = register_ftrace_function(&kprobe_ftrace_ops); + WARN(ret < 0, "Failed to init kprobe-ftrace (%d)\n", ret); + } +} + +/* Caller must lock kprobe_mutex */ +static void __kprobes disarm_kprobe_ftrace(struct kprobe *p) +{ + int ret; + + kprobe_ftrace_enabled--; + if (kprobe_ftrace_enabled == 0) { + ret = unregister_ftrace_function(&kprobe_ftrace_ops); + WARN(ret < 0, "Failed to init kprobe-ftrace (%d)\n", ret); + } + ret = ftrace_set_filter_ip(&kprobe_ftrace_ops, + (unsigned long)p->addr, 1, 0); + WARN(ret < 0, "Failed to disarm kprobe-ftrace at %p (%d)\n", p->addr, ret); +} +#else /* !KPROBES_CAN_USE_FTRACE */ +#define prepare_kprobe(p) arch_prepare_kprobe(p) +#define arm_kprobe_ftrace(p) do {} while (0) +#define disarm_kprobe_ftrace(p) do {} while (0) +#endif + /* Arm a kprobe with text_mutex */ static void __kprobes arm_kprobe(struct kprobe *kp) { + if (unlikely(kprobe_ftrace(kp))) { + arm_kprobe_ftrace(kp); + return; + } /* * Here, since __arm_kprobe() doesn't use stop_machine(), * this doesn't cause deadlock on text_mutex. So, we don't @@ -921,11 +988,15 @@ static void __kprobes arm_kprobe(struct kprobe *kp) } /* Disarm a kprobe with text_mutex */ -static void __kprobes disarm_kprobe(struct kprobe *kp) +static void __kprobes disarm_kprobe(struct kprobe *kp, bool reopt) { + if (unlikely(kprobe_ftrace(kp))) { + disarm_kprobe_ftrace(kp); + return; + } /* Ditto */ mutex_lock(&text_mutex); - __disarm_kprobe(kp, true); + __disarm_kprobe(kp, reopt); mutex_unlock(&text_mutex); } @@ -1144,12 +1215,6 @@ static int __kprobes add_new_kprobe(struct kprobe *ap, struct kprobe *p) if (p->post_handler && !ap->post_handler) ap->post_handler = aggr_post_handler; - if (kprobe_disabled(ap) && !kprobe_disabled(p)) { - ap->flags &= ~KPROBE_FLAG_DISABLED; - if (!kprobes_all_disarmed) - /* Arm the breakpoint again. */ - __arm_kprobe(ap); - } return 0; } @@ -1189,11 +1254,22 @@ static int __kprobes register_aggr_kprobe(struct kprobe *orig_p, int ret = 0; struct kprobe *ap = orig_p; + /* For preparing optimization, jump_label_text_reserved() is called */ + jump_label_lock(); + /* + * Get online CPUs to avoid text_mutex deadlock.with stop machine, + * which is invoked by unoptimize_kprobe() in add_new_kprobe() + */ + get_online_cpus(); + mutex_lock(&text_mutex); + if (!kprobe_aggrprobe(orig_p)) { /* If orig_p is not an aggr_kprobe, create new aggr_kprobe. */ ap = alloc_aggr_kprobe(orig_p); - if (!ap) - return -ENOMEM; + if (!ap) { + ret = -ENOMEM; + goto out; + } init_aggr_kprobe(ap, orig_p); } else if (kprobe_unused(ap)) /* This probe is going to die. Rescue it */ @@ -1213,7 +1289,7 @@ static int __kprobes register_aggr_kprobe(struct kprobe *orig_p, * free aggr_probe. It will be used next time, or * freed by unregister_kprobe. */ - return ret; + goto out; /* Prepare optimized instructions if possible. */ prepare_optimized_kprobe(ap); @@ -1228,7 +1304,20 @@ static int __kprobes register_aggr_kprobe(struct kprobe *orig_p, /* Copy ap's insn slot to p */ copy_kprobe(ap, p); - return add_new_kprobe(ap, p); + ret = add_new_kprobe(ap, p); + +out: + mutex_unlock(&text_mutex); + put_online_cpus(); + jump_label_unlock(); + + if (ret == 0 && kprobe_disabled(ap) && !kprobe_disabled(p)) { + ap->flags &= ~KPROBE_FLAG_DISABLED; + if (!kprobes_all_disarmed) + /* Arm the breakpoint again. */ + arm_kprobe(ap); + } + return ret; } static int __kprobes in_kprobes_functions(unsigned long addr) @@ -1313,71 +1402,96 @@ static inline int check_kprobe_rereg(struct kprobe *p) return ret; } -int __kprobes register_kprobe(struct kprobe *p) +static __kprobes int check_kprobe_address_safe(struct kprobe *p, + struct module **probed_mod) { int ret = 0; - struct kprobe *old_p; - struct module *probed_mod; - kprobe_opcode_t *addr; - - addr = kprobe_addr(p); - if (IS_ERR(addr)) - return PTR_ERR(addr); - p->addr = addr; + unsigned long ftrace_addr; - ret = check_kprobe_rereg(p); - if (ret) - return ret; + /* + * If the address is located on a ftrace nop, set the + * breakpoint to the following instruction. + */ + ftrace_addr = ftrace_location((unsigned long)p->addr); + if (ftrace_addr) { +#ifdef KPROBES_CAN_USE_FTRACE + /* Given address is not on the instruction boundary */ + if ((unsigned long)p->addr != ftrace_addr) + return -EILSEQ; + p->flags |= KPROBE_FLAG_FTRACE; +#else /* !KPROBES_CAN_USE_FTRACE */ + return -EINVAL; +#endif + } jump_label_lock(); preempt_disable(); + + /* Ensure it is not in reserved area nor out of text */ if (!kernel_text_address((unsigned long) p->addr) || in_kprobes_functions((unsigned long) p->addr) || - ftrace_text_reserved(p->addr, p->addr) || jump_label_text_reserved(p->addr, p->addr)) { ret = -EINVAL; - goto cannot_probe; + goto out; } - /* User can pass only KPROBE_FLAG_DISABLED to register_kprobe */ - p->flags &= KPROBE_FLAG_DISABLED; - - /* - * Check if are we probing a module. - */ - probed_mod = __module_text_address((unsigned long) p->addr); - if (probed_mod) { - /* Return -ENOENT if fail. */ - ret = -ENOENT; + /* Check if are we probing a module */ + *probed_mod = __module_text_address((unsigned long) p->addr); + if (*probed_mod) { /* * We must hold a refcount of the probed module while updating * its code to prohibit unexpected unloading. */ - if (unlikely(!try_module_get(probed_mod))) - goto cannot_probe; + if (unlikely(!try_module_get(*probed_mod))) { + ret = -ENOENT; + goto out; + } /* * If the module freed .init.text, we couldn't insert * kprobes in there. */ - if (within_module_init((unsigned long)p->addr, probed_mod) && - probed_mod->state != MODULE_STATE_COMING) { - module_put(probed_mod); - goto cannot_probe; + if (within_module_init((unsigned long)p->addr, *probed_mod) && + (*probed_mod)->state != MODULE_STATE_COMING) { + module_put(*probed_mod); + *probed_mod = NULL; + ret = -ENOENT; } - /* ret will be updated by following code */ } +out: preempt_enable(); jump_label_unlock(); + return ret; +} + +int __kprobes register_kprobe(struct kprobe *p) +{ + int ret; + struct kprobe *old_p; + struct module *probed_mod; + kprobe_opcode_t *addr; + + /* Adjust probe address from symbol */ + addr = kprobe_addr(p); + if (IS_ERR(addr)) + return PTR_ERR(addr); + p->addr = addr; + + ret = check_kprobe_rereg(p); + if (ret) + return ret; + + /* User can pass only KPROBE_FLAG_DISABLED to register_kprobe */ + p->flags &= KPROBE_FLAG_DISABLED; p->nmissed = 0; INIT_LIST_HEAD(&p->list); - mutex_lock(&kprobe_mutex); - jump_label_lock(); /* needed to call jump_label_text_reserved() */ + ret = check_kprobe_address_safe(p, &probed_mod); + if (ret) + return ret; - get_online_cpus(); /* For avoiding text_mutex deadlock. */ - mutex_lock(&text_mutex); + mutex_lock(&kprobe_mutex); old_p = get_kprobe(p->addr); if (old_p) { @@ -1386,7 +1500,9 @@ int __kprobes register_kprobe(struct kprobe *p) goto out; } - ret = arch_prepare_kprobe(p); + mutex_lock(&text_mutex); /* Avoiding text modification */ + ret = prepare_kprobe(p); + mutex_unlock(&text_mutex); if (ret) goto out; @@ -1395,26 +1511,18 @@ int __kprobes register_kprobe(struct kprobe *p) &kprobe_table[hash_ptr(p->addr, KPROBE_HASH_BITS)]); if (!kprobes_all_disarmed && !kprobe_disabled(p)) - __arm_kprobe(p); + arm_kprobe(p); /* Try to optimize kprobe */ try_to_optimize_kprobe(p); out: - mutex_unlock(&text_mutex); - put_online_cpus(); - jump_label_unlock(); mutex_unlock(&kprobe_mutex); if (probed_mod) module_put(probed_mod); return ret; - -cannot_probe: - preempt_enable(); - jump_label_unlock(); - return ret; } EXPORT_SYMBOL_GPL(register_kprobe); @@ -1451,7 +1559,7 @@ static struct kprobe *__kprobes __disable_kprobe(struct kprobe *p) /* Try to disarm and disable this/parent probe */ if (p == orig_p || aggr_kprobe_disabled(orig_p)) { - disarm_kprobe(orig_p); + disarm_kprobe(orig_p, true); orig_p->flags |= KPROBE_FLAG_DISABLED; } } @@ -2049,10 +2157,11 @@ static void __kprobes report_probe(struct seq_file *pi, struct kprobe *p, if (!pp) pp = p; - seq_printf(pi, "%s%s%s\n", + seq_printf(pi, "%s%s%s%s\n", (kprobe_gone(p) ? "[GONE]" : ""), ((kprobe_disabled(p) && !kprobe_gone(p)) ? "[DISABLED]" : ""), - (kprobe_optimized(pp) ? "[OPTIMIZED]" : "")); + (kprobe_optimized(pp) ? "[OPTIMIZED]" : ""), + (kprobe_ftrace(pp) ? "[FTRACE]" : "")); } static void __kprobes *kprobe_seq_start(struct seq_file *f, loff_t *pos) @@ -2131,14 +2240,12 @@ static void __kprobes arm_all_kprobes(void) goto already_enabled; /* Arming kprobes doesn't optimize kprobe itself */ - mutex_lock(&text_mutex); for (i = 0; i < KPROBE_TABLE_SIZE; i++) { head = &kprobe_table[i]; hlist_for_each_entry_rcu(p, node, head, hlist) if (!kprobe_disabled(p)) - __arm_kprobe(p); + arm_kprobe(p); } - mutex_unlock(&text_mutex); kprobes_all_disarmed = false; printk(KERN_INFO "Kprobes globally enabled\n"); @@ -2166,15 +2273,13 @@ static void __kprobes disarm_all_kprobes(void) kprobes_all_disarmed = true; printk(KERN_INFO "Kprobes globally disabled\n"); - mutex_lock(&text_mutex); for (i = 0; i < KPROBE_TABLE_SIZE; i++) { head = &kprobe_table[i]; hlist_for_each_entry_rcu(p, node, head, hlist) { if (!arch_trampoline_kprobe(p) && !kprobe_disabled(p)) - __disarm_kprobe(p, false); + disarm_kprobe(p, false); } } - mutex_unlock(&text_mutex); mutex_unlock(&kprobe_mutex); /* Wait for disarming all kprobes by optimizer */ diff --git a/trunk/kernel/kthread.c b/trunk/kernel/kthread.c index b579af57ea10..146a6fa96825 100644 --- a/trunk/kernel/kthread.c +++ b/trunk/kernel/kthread.c @@ -37,11 +37,20 @@ struct kthread_create_info }; struct kthread { - int should_stop; + unsigned long flags; + unsigned int cpu; void *data; + struct completion parked; struct completion exited; }; +enum KTHREAD_BITS { + KTHREAD_IS_PER_CPU = 0, + KTHREAD_SHOULD_STOP, + KTHREAD_SHOULD_PARK, + KTHREAD_IS_PARKED, +}; + #define to_kthread(tsk) \ container_of((tsk)->vfork_done, struct kthread, exited) @@ -52,12 +61,28 @@ struct kthread { * and this will return true. You should then return, and your return * value will be passed through to kthread_stop(). */ -int kthread_should_stop(void) +bool kthread_should_stop(void) { - return to_kthread(current)->should_stop; + return test_bit(KTHREAD_SHOULD_STOP, &to_kthread(current)->flags); } EXPORT_SYMBOL(kthread_should_stop); +/** + * kthread_should_park - should this kthread park now? + * + * When someone calls kthread_park() on your kthread, it will be woken + * and this will return true. You should then do the necessary + * cleanup and call kthread_parkme() + * + * Similar to kthread_should_stop(), but this keeps the thread alive + * and in a park position. kthread_unpark() "restarts" the thread and + * calls the thread function again. + */ +bool kthread_should_park(void) +{ + return test_bit(KTHREAD_SHOULD_PARK, &to_kthread(current)->flags); +} + /** * kthread_freezable_should_stop - should this freezable kthread return now? * @was_frozen: optional out parameter, indicates whether %current was frozen @@ -96,6 +121,24 @@ void *kthread_data(struct task_struct *task) return to_kthread(task)->data; } +static void __kthread_parkme(struct kthread *self) +{ + __set_current_state(TASK_INTERRUPTIBLE); + while (test_bit(KTHREAD_SHOULD_PARK, &self->flags)) { + if (!test_and_set_bit(KTHREAD_IS_PARKED, &self->flags)) + complete(&self->parked); + schedule(); + __set_current_state(TASK_INTERRUPTIBLE); + } + clear_bit(KTHREAD_IS_PARKED, &self->flags); + __set_current_state(TASK_RUNNING); +} + +void kthread_parkme(void) +{ + __kthread_parkme(to_kthread(current)); +} + static int kthread(void *_create) { /* Copy data: it's on kthread's stack */ @@ -105,9 +148,10 @@ static int kthread(void *_create) struct kthread self; int ret; - self.should_stop = 0; + self.flags = 0; self.data = data; init_completion(&self.exited); + init_completion(&self.parked); current->vfork_done = &self.exited; /* OK, tell user we're spawned, wait for stop or wakeup */ @@ -117,9 +161,11 @@ static int kthread(void *_create) schedule(); ret = -EINTR; - if (!self.should_stop) - ret = threadfn(data); + if (!test_bit(KTHREAD_SHOULD_STOP, &self.flags)) { + __kthread_parkme(&self); + ret = threadfn(data); + } /* we can't just return, we must preserve "self" on stack */ do_exit(ret); } @@ -172,8 +218,7 @@ static void create_kthread(struct kthread_create_info *create) * Returns a task_struct or ERR_PTR(-ENOMEM). */ struct task_struct *kthread_create_on_node(int (*threadfn)(void *data), - void *data, - int node, + void *data, int node, const char namefmt[], ...) { @@ -210,6 +255,13 @@ struct task_struct *kthread_create_on_node(int (*threadfn)(void *data), } EXPORT_SYMBOL(kthread_create_on_node); +static void __kthread_bind(struct task_struct *p, unsigned int cpu) +{ + /* It's safe because the task is inactive. */ + do_set_cpus_allowed(p, cpumask_of(cpu)); + p->flags |= PF_THREAD_BOUND; +} + /** * kthread_bind - bind a just-created kthread to a cpu. * @p: thread created by kthread_create(). @@ -226,13 +278,111 @@ void kthread_bind(struct task_struct *p, unsigned int cpu) WARN_ON(1); return; } - - /* It's safe because the task is inactive. */ - do_set_cpus_allowed(p, cpumask_of(cpu)); - p->flags |= PF_THREAD_BOUND; + __kthread_bind(p, cpu); } EXPORT_SYMBOL(kthread_bind); +/** + * kthread_create_on_cpu - Create a cpu bound kthread + * @threadfn: the function to run until signal_pending(current). + * @data: data ptr for @threadfn. + * @cpu: The cpu on which the thread should be bound, + * @namefmt: printf-style name for the thread. Format is restricted + * to "name.*%u". Code fills in cpu number. + * + * Description: This helper function creates and names a kernel thread + * The thread will be woken and put into park mode. + */ +struct task_struct *kthread_create_on_cpu(int (*threadfn)(void *data), + void *data, unsigned int cpu, + const char *namefmt) +{ + struct task_struct *p; + + p = kthread_create_on_node(threadfn, data, cpu_to_node(cpu), namefmt, + cpu); + if (IS_ERR(p)) + return p; + set_bit(KTHREAD_IS_PER_CPU, &to_kthread(p)->flags); + to_kthread(p)->cpu = cpu; + /* Park the thread to get it out of TASK_UNINTERRUPTIBLE state */ + kthread_park(p); + return p; +} + +static struct kthread *task_get_live_kthread(struct task_struct *k) +{ + struct kthread *kthread; + + get_task_struct(k); + kthread = to_kthread(k); + /* It might have exited */ + barrier(); + if (k->vfork_done != NULL) + return kthread; + return NULL; +} + +/** + * kthread_unpark - unpark a thread created by kthread_create(). + * @k: thread created by kthread_create(). + * + * Sets kthread_should_park() for @k to return false, wakes it, and + * waits for it to return. If the thread is marked percpu then its + * bound to the cpu again. + */ +void kthread_unpark(struct task_struct *k) +{ + struct kthread *kthread = task_get_live_kthread(k); + + if (kthread) { + clear_bit(KTHREAD_SHOULD_PARK, &kthread->flags); + /* + * We clear the IS_PARKED bit here as we don't wait + * until the task has left the park code. So if we'd + * park before that happens we'd see the IS_PARKED bit + * which might be about to be cleared. + */ + if (test_and_clear_bit(KTHREAD_IS_PARKED, &kthread->flags)) { + if (test_bit(KTHREAD_IS_PER_CPU, &kthread->flags)) + __kthread_bind(k, kthread->cpu); + wake_up_process(k); + } + } + put_task_struct(k); +} + +/** + * kthread_park - park a thread created by kthread_create(). + * @k: thread created by kthread_create(). + * + * Sets kthread_should_park() for @k to return true, wakes it, and + * waits for it to return. This can also be called after kthread_create() + * instead of calling wake_up_process(): the thread will park without + * calling threadfn(). + * + * Returns 0 if the thread is parked, -ENOSYS if the thread exited. + * If called by the kthread itself just the park bit is set. + */ +int kthread_park(struct task_struct *k) +{ + struct kthread *kthread = task_get_live_kthread(k); + int ret = -ENOSYS; + + if (kthread) { + if (!test_bit(KTHREAD_IS_PARKED, &kthread->flags)) { + set_bit(KTHREAD_SHOULD_PARK, &kthread->flags); + if (k != current) { + wake_up_process(k); + wait_for_completion(&kthread->parked); + } + } + ret = 0; + } + put_task_struct(k); + return ret; +} + /** * kthread_stop - stop a thread created by kthread_create(). * @k: thread created by kthread_create(). @@ -250,16 +400,13 @@ EXPORT_SYMBOL(kthread_bind); */ int kthread_stop(struct task_struct *k) { - struct kthread *kthread; + struct kthread *kthread = task_get_live_kthread(k); int ret; trace_sched_kthread_stop(k); - get_task_struct(k); - - kthread = to_kthread(k); - barrier(); /* it might have exited */ - if (k->vfork_done != NULL) { - kthread->should_stop = 1; + if (kthread) { + set_bit(KTHREAD_SHOULD_STOP, &kthread->flags); + clear_bit(KTHREAD_SHOULD_PARK, &kthread->flags); wake_up_process(k); wait_for_completion(&kthread->exited); } diff --git a/trunk/kernel/lockdep.c b/trunk/kernel/lockdep.c index ea9ee4518c35..7981e5b2350d 100644 --- a/trunk/kernel/lockdep.c +++ b/trunk/kernel/lockdep.c @@ -2998,6 +2998,42 @@ EXPORT_SYMBOL_GPL(lockdep_init_map); struct lock_class_key __lockdep_no_validate__; +static int +print_lock_nested_lock_not_held(struct task_struct *curr, + struct held_lock *hlock, + unsigned long ip) +{ + if (!debug_locks_off()) + return 0; + if (debug_locks_silent) + return 0; + + printk("\n"); + printk("==================================\n"); + printk("[ BUG: Nested lock was not taken ]\n"); + print_kernel_ident(); + printk("----------------------------------\n"); + + printk("%s/%d is trying to lock:\n", curr->comm, task_pid_nr(curr)); + print_lock(hlock); + + printk("\nbut this task is not holding:\n"); + printk("%s\n", hlock->nest_lock->name); + + printk("\nstack backtrace:\n"); + dump_stack(); + + printk("\nother info that might help us debug this:\n"); + lockdep_print_held_locks(curr); + + printk("\nstack backtrace:\n"); + dump_stack(); + + return 0; +} + +static int __lock_is_held(struct lockdep_map *lock); + /* * This gets called for every mutex_lock*()/spin_lock*() operation. * We maintain the dependency maps and validate the locking attempt: @@ -3139,6 +3175,9 @@ static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass, } chain_key = iterate_chain_key(chain_key, id); + if (nest_lock && !__lock_is_held(nest_lock)) + return print_lock_nested_lock_not_held(curr, hlock, ip); + if (!validate_chain(curr, lock, hlock, chain_head, chain_key)) return 0; diff --git a/trunk/kernel/pid_namespace.c b/trunk/kernel/pid_namespace.c index b3c7fd554250..6144bab8fd8e 100644 --- a/trunk/kernel/pid_namespace.c +++ b/trunk/kernel/pid_namespace.c @@ -232,15 +232,19 @@ static int pid_ns_ctl_handler(struct ctl_table *table, int write, */ tmp.data = ¤t->nsproxy->pid_ns->last_pid; - return proc_dointvec(&tmp, write, buffer, lenp, ppos); + return proc_dointvec_minmax(&tmp, write, buffer, lenp, ppos); } +extern int pid_max; +static int zero = 0; static struct ctl_table pid_ns_ctl_table[] = { { .procname = "ns_last_pid", .maxlen = sizeof(int), .mode = 0666, /* permissions are checked in the handler */ .proc_handler = pid_ns_ctl_handler, + .extra1 = &zero, + .extra2 = &pid_max, }, { } }; diff --git a/trunk/kernel/power/suspend.c b/trunk/kernel/power/suspend.c index 1da39ea248fd..c8b7446b27df 100644 --- a/trunk/kernel/power/suspend.c +++ b/trunk/kernel/power/suspend.c @@ -178,9 +178,6 @@ static int suspend_enter(suspend_state_t state, bool *wakeup) arch_suspend_enable_irqs(); BUG_ON(irqs_disabled()); - /* Kick the lockup detector */ - lockup_detector_bootcpu_resume(); - Enable_cpus: enable_nonboot_cpus(); diff --git a/trunk/kernel/printk.c b/trunk/kernel/printk.c index 6a76ab9d4476..66a2ea37b576 100644 --- a/trunk/kernel/printk.c +++ b/trunk/kernel/printk.c @@ -1034,6 +1034,7 @@ static int syslog_print_all(char __user *buf, int size, bool clear) struct log *msg = log_from_idx(idx); len += msg_print_text(msg, prev, true, NULL, 0); + prev = msg->flags; idx = log_next(idx); seq++; } @@ -1046,6 +1047,7 @@ static int syslog_print_all(char __user *buf, int size, bool clear) struct log *msg = log_from_idx(idx); len -= msg_print_text(msg, prev, true, NULL, 0); + prev = msg->flags; idx = log_next(idx); seq++; } diff --git a/trunk/kernel/rcupdate.c b/trunk/kernel/rcupdate.c index 4e6a61b15e86..29ca1c6da594 100644 --- a/trunk/kernel/rcupdate.c +++ b/trunk/kernel/rcupdate.c @@ -45,6 +45,7 @@ #include #include #include +#include #define CREATE_TRACE_POINTS #include @@ -81,6 +82,9 @@ void __rcu_read_unlock(void) } else { barrier(); /* critical section before exit code. */ t->rcu_read_lock_nesting = INT_MIN; +#ifdef CONFIG_PROVE_RCU_DELAY + udelay(10); /* Make preemption more probable. */ +#endif /* #ifdef CONFIG_PROVE_RCU_DELAY */ barrier(); /* assign before ->rcu_read_unlock_special load */ if (unlikely(ACCESS_ONCE(t->rcu_read_unlock_special))) rcu_read_unlock_special(t); diff --git a/trunk/kernel/rcutiny.c b/trunk/kernel/rcutiny.c index 547b1fe5b052..e4c6a598d6f7 100644 --- a/trunk/kernel/rcutiny.c +++ b/trunk/kernel/rcutiny.c @@ -56,25 +56,28 @@ static void __call_rcu(struct rcu_head *head, static long long rcu_dynticks_nesting = DYNTICK_TASK_EXIT_IDLE; /* Common code for rcu_idle_enter() and rcu_irq_exit(), see kernel/rcutree.c. */ -static void rcu_idle_enter_common(long long oldval) +static void rcu_idle_enter_common(long long newval) { - if (rcu_dynticks_nesting) { + if (newval) { RCU_TRACE(trace_rcu_dyntick("--=", - oldval, rcu_dynticks_nesting)); + rcu_dynticks_nesting, newval)); + rcu_dynticks_nesting = newval; return; } - RCU_TRACE(trace_rcu_dyntick("Start", oldval, rcu_dynticks_nesting)); + RCU_TRACE(trace_rcu_dyntick("Start", rcu_dynticks_nesting, newval)); if (!is_idle_task(current)) { struct task_struct *idle = idle_task(smp_processor_id()); RCU_TRACE(trace_rcu_dyntick("Error on entry: not idle task", - oldval, rcu_dynticks_nesting)); + rcu_dynticks_nesting, newval)); ftrace_dump(DUMP_ALL); WARN_ONCE(1, "Current pid: %d comm: %s / Idle pid: %d comm: %s", current->pid, current->comm, idle->pid, idle->comm); /* must be idle task! */ } rcu_sched_qs(0); /* implies rcu_bh_qsctr_inc(0) */ + barrier(); + rcu_dynticks_nesting = newval; } /* @@ -84,17 +87,16 @@ static void rcu_idle_enter_common(long long oldval) void rcu_idle_enter(void) { unsigned long flags; - long long oldval; + long long newval; local_irq_save(flags); - oldval = rcu_dynticks_nesting; WARN_ON_ONCE((rcu_dynticks_nesting & DYNTICK_TASK_NEST_MASK) == 0); if ((rcu_dynticks_nesting & DYNTICK_TASK_NEST_MASK) == DYNTICK_TASK_NEST_VALUE) - rcu_dynticks_nesting = 0; + newval = 0; else - rcu_dynticks_nesting -= DYNTICK_TASK_NEST_VALUE; - rcu_idle_enter_common(oldval); + newval = rcu_dynticks_nesting - DYNTICK_TASK_NEST_VALUE; + rcu_idle_enter_common(newval); local_irq_restore(flags); } EXPORT_SYMBOL_GPL(rcu_idle_enter); @@ -105,15 +107,15 @@ EXPORT_SYMBOL_GPL(rcu_idle_enter); void rcu_irq_exit(void) { unsigned long flags; - long long oldval; + long long newval; local_irq_save(flags); - oldval = rcu_dynticks_nesting; - rcu_dynticks_nesting--; - WARN_ON_ONCE(rcu_dynticks_nesting < 0); - rcu_idle_enter_common(oldval); + newval = rcu_dynticks_nesting - 1; + WARN_ON_ONCE(newval < 0); + rcu_idle_enter_common(newval); local_irq_restore(flags); } +EXPORT_SYMBOL_GPL(rcu_irq_exit); /* Common code for rcu_idle_exit() and rcu_irq_enter(), see kernel/rcutree.c. */ static void rcu_idle_exit_common(long long oldval) @@ -171,6 +173,7 @@ void rcu_irq_enter(void) rcu_idle_exit_common(oldval); local_irq_restore(flags); } +EXPORT_SYMBOL_GPL(rcu_irq_enter); #ifdef CONFIG_DEBUG_LOCK_ALLOC diff --git a/trunk/kernel/rcutiny_plugin.h b/trunk/kernel/rcutiny_plugin.h index 918fd1e8509c..3d0190282204 100644 --- a/trunk/kernel/rcutiny_plugin.h +++ b/trunk/kernel/rcutiny_plugin.h @@ -278,7 +278,7 @@ static int rcu_boost(void) rcu_preempt_ctrlblk.exp_tasks == NULL) return 0; /* Nothing to boost. */ - raw_local_irq_save(flags); + local_irq_save(flags); /* * Recheck with irqs disabled: all tasks in need of boosting @@ -287,7 +287,7 @@ static int rcu_boost(void) */ if (rcu_preempt_ctrlblk.boost_tasks == NULL && rcu_preempt_ctrlblk.exp_tasks == NULL) { - raw_local_irq_restore(flags); + local_irq_restore(flags); return 0; } @@ -317,7 +317,7 @@ static int rcu_boost(void) t = container_of(tb, struct task_struct, rcu_node_entry); rt_mutex_init_proxy_locked(&mtx, t); t->rcu_boost_mutex = &mtx; - raw_local_irq_restore(flags); + local_irq_restore(flags); rt_mutex_lock(&mtx); rt_mutex_unlock(&mtx); /* Keep lockdep happy. */ @@ -991,9 +991,9 @@ static void rcu_trace_sub_qlen(struct rcu_ctrlblk *rcp, int n) { unsigned long flags; - raw_local_irq_save(flags); + local_irq_save(flags); rcp->qlen -= n; - raw_local_irq_restore(flags); + local_irq_restore(flags); } /* diff --git a/trunk/kernel/rcutorture.c b/trunk/kernel/rcutorture.c index 25b15033c61f..aaa7b9f3532a 100644 --- a/trunk/kernel/rcutorture.c +++ b/trunk/kernel/rcutorture.c @@ -53,10 +53,11 @@ MODULE_AUTHOR("Paul E. McKenney and Josh Triplett can_boost, - test_boost_interval, test_boost_duration, shutdown_secs, - onoff_interval, onoff_holdoff); + pr_alert("%s" TORTURE_FLAG + "--- %s: nreaders=%d nfakewriters=%d " + "stat_interval=%d verbose=%d test_no_idle_hz=%d " + "shuffle_interval=%d stutter=%d irqreader=%d " + "fqs_duration=%d fqs_holdoff=%d fqs_stutter=%d " + "test_boost=%d/%d test_boost_interval=%d " + "test_boost_duration=%d shutdown_secs=%d " + "onoff_interval=%d onoff_holdoff=%d\n", + torture_type, tag, nrealreaders, nfakewriters, + stat_interval, verbose, test_no_idle_hz, shuffle_interval, + stutter, irqreader, fqs_duration, fqs_holdoff, fqs_stutter, + test_boost, cur_ops->can_boost, + test_boost_interval, test_boost_duration, shutdown_secs, + onoff_interval, onoff_holdoff); } static struct notifier_block rcutorture_shutdown_nb = { @@ -1460,9 +1469,9 @@ rcu_torture_shutdown(void *arg) !kthread_should_stop()) { delta = shutdown_time - jiffies_snap; if (verbose) - printk(KERN_ALERT "%s" TORTURE_FLAG - "rcu_torture_shutdown task: %lu jiffies remaining\n", - torture_type, delta); + pr_alert("%s" TORTURE_FLAG + "rcu_torture_shutdown task: %lu jiffies remaining\n", + torture_type, delta); schedule_timeout_interruptible(delta); jiffies_snap = ACCESS_ONCE(jiffies); } @@ -1490,8 +1499,10 @@ static int __cpuinit rcu_torture_onoff(void *arg) { int cpu; + unsigned long delta; int maxcpu = -1; DEFINE_RCU_RANDOM(rand); + unsigned long starttime; VERBOSE_PRINTK_STRING("rcu_torture_onoff task started"); for_each_online_cpu(cpu) @@ -1506,29 +1517,51 @@ rcu_torture_onoff(void *arg) cpu = (rcu_random(&rand) >> 4) % (maxcpu + 1); if (cpu_online(cpu) && cpu_is_hotpluggable(cpu)) { if (verbose) - printk(KERN_ALERT "%s" TORTURE_FLAG - "rcu_torture_onoff task: offlining %d\n", - torture_type, cpu); + pr_alert("%s" TORTURE_FLAG + "rcu_torture_onoff task: offlining %d\n", + torture_type, cpu); + starttime = jiffies; n_offline_attempts++; if (cpu_down(cpu) == 0) { if (verbose) - printk(KERN_ALERT "%s" TORTURE_FLAG - "rcu_torture_onoff task: offlined %d\n", - torture_type, cpu); + pr_alert("%s" TORTURE_FLAG + "rcu_torture_onoff task: offlined %d\n", + torture_type, cpu); n_offline_successes++; + delta = jiffies - starttime; + sum_offline += delta; + if (min_offline < 0) { + min_offline = delta; + max_offline = delta; + } + if (min_offline > delta) + min_offline = delta; + if (max_offline < delta) + max_offline = delta; } } else if (cpu_is_hotpluggable(cpu)) { if (verbose) - printk(KERN_ALERT "%s" TORTURE_FLAG - "rcu_torture_onoff task: onlining %d\n", - torture_type, cpu); + pr_alert("%s" TORTURE_FLAG + "rcu_torture_onoff task: onlining %d\n", + torture_type, cpu); + starttime = jiffies; n_online_attempts++; if (cpu_up(cpu) == 0) { if (verbose) - printk(KERN_ALERT "%s" TORTURE_FLAG - "rcu_torture_onoff task: onlined %d\n", - torture_type, cpu); + pr_alert("%s" TORTURE_FLAG + "rcu_torture_onoff task: onlined %d\n", + torture_type, cpu); n_online_successes++; + delta = jiffies - starttime; + sum_online += delta; + if (min_online < 0) { + min_online = delta; + max_online = delta; + } + if (min_online > delta) + min_online = delta; + if (max_online < delta) + max_online = delta; } } schedule_timeout_interruptible(onoff_interval * HZ); @@ -1593,14 +1626,14 @@ static int __cpuinit rcu_torture_stall(void *args) if (!kthread_should_stop()) { stop_at = get_seconds() + stall_cpu; /* RCU CPU stall is expected behavior in following code. */ - printk(KERN_ALERT "rcu_torture_stall start.\n"); + pr_alert("rcu_torture_stall start.\n"); rcu_read_lock(); preempt_disable(); while (ULONG_CMP_LT(get_seconds(), stop_at)) continue; /* Induce RCU CPU stall warning. */ preempt_enable(); rcu_read_unlock(); - printk(KERN_ALERT "rcu_torture_stall end.\n"); + pr_alert("rcu_torture_stall end.\n"); } rcutorture_shutdown_absorb("rcu_torture_stall"); while (!kthread_should_stop()) @@ -1716,12 +1749,12 @@ static int rcu_torture_barrier_init(void) if (n_barrier_cbs == 0) return 0; if (cur_ops->call == NULL || cur_ops->cb_barrier == NULL) { - printk(KERN_ALERT "%s" TORTURE_FLAG - " Call or barrier ops missing for %s,\n", - torture_type, cur_ops->name); - printk(KERN_ALERT "%s" TORTURE_FLAG - " RCU barrier testing omitted from run.\n", - torture_type); + pr_alert("%s" TORTURE_FLAG + " Call or barrier ops missing for %s,\n", + torture_type, cur_ops->name); + pr_alert("%s" TORTURE_FLAG + " RCU barrier testing omitted from run.\n", + torture_type); return 0; } atomic_set(&barrier_cbs_count, 0); @@ -1814,7 +1847,7 @@ rcu_torture_cleanup(void) mutex_lock(&fullstop_mutex); rcutorture_record_test_transition(); if (fullstop == FULLSTOP_SHUTDOWN) { - printk(KERN_WARNING /* but going down anyway, so... */ + pr_warn(/* but going down anyway, so... */ "Concurrent 'rmmod rcutorture' and shutdown illegal!\n"); mutex_unlock(&fullstop_mutex); schedule_timeout_uninterruptible(10); @@ -1938,17 +1971,17 @@ rcu_torture_init(void) break; } if (i == ARRAY_SIZE(torture_ops)) { - printk(KERN_ALERT "rcu-torture: invalid torture type: \"%s\"\n", - torture_type); - printk(KERN_ALERT "rcu-torture types:"); + pr_alert("rcu-torture: invalid torture type: \"%s\"\n", + torture_type); + pr_alert("rcu-torture types:"); for (i = 0; i < ARRAY_SIZE(torture_ops); i++) - printk(KERN_ALERT " %s", torture_ops[i]->name); - printk(KERN_ALERT "\n"); + pr_alert(" %s", torture_ops[i]->name); + pr_alert("\n"); mutex_unlock(&fullstop_mutex); return -EINVAL; } if (cur_ops->fqs == NULL && fqs_duration != 0) { - printk(KERN_ALERT "rcu-torture: ->fqs NULL and non-zero fqs_duration, fqs disabled.\n"); + pr_alert("rcu-torture: ->fqs NULL and non-zero fqs_duration, fqs disabled.\n"); fqs_duration = 0; } if (cur_ops->init) @@ -1996,14 +2029,15 @@ rcu_torture_init(void) /* Start up the kthreads. */ VERBOSE_PRINTK_STRING("Creating rcu_torture_writer task"); - writer_task = kthread_run(rcu_torture_writer, NULL, - "rcu_torture_writer"); + writer_task = kthread_create(rcu_torture_writer, NULL, + "rcu_torture_writer"); if (IS_ERR(writer_task)) { firsterr = PTR_ERR(writer_task); VERBOSE_PRINTK_ERRSTRING("Failed to create writer"); writer_task = NULL; goto unwind; } + wake_up_process(writer_task); fakewriter_tasks = kzalloc(nfakewriters * sizeof(fakewriter_tasks[0]), GFP_KERNEL); if (fakewriter_tasks == NULL) { @@ -2118,14 +2152,15 @@ rcu_torture_init(void) } if (shutdown_secs > 0) { shutdown_time = jiffies + shutdown_secs * HZ; - shutdown_task = kthread_run(rcu_torture_shutdown, NULL, - "rcu_torture_shutdown"); + shutdown_task = kthread_create(rcu_torture_shutdown, NULL, + "rcu_torture_shutdown"); if (IS_ERR(shutdown_task)) { firsterr = PTR_ERR(shutdown_task); VERBOSE_PRINTK_ERRSTRING("Failed to create shutdown"); shutdown_task = NULL; goto unwind; } + wake_up_process(shutdown_task); } i = rcu_torture_onoff_init(); if (i != 0) { diff --git a/trunk/kernel/rcutree.c b/trunk/kernel/rcutree.c index f280e542e3e9..4fb2376ddf06 100644 --- a/trunk/kernel/rcutree.c +++ b/trunk/kernel/rcutree.c @@ -52,6 +52,7 @@ #include #include #include +#include #include "rcutree.h" #include @@ -61,6 +62,7 @@ /* Data structures. */ static struct lock_class_key rcu_node_class[RCU_NUM_LVLS]; +static struct lock_class_key rcu_fqs_class[RCU_NUM_LVLS]; #define RCU_STATE_INITIALIZER(sname, cr) { \ .level = { &sname##_state.node[0] }, \ @@ -72,7 +74,6 @@ static struct lock_class_key rcu_node_class[RCU_NUM_LVLS]; .orphan_nxttail = &sname##_state.orphan_nxtlist, \ .orphan_donetail = &sname##_state.orphan_donelist, \ .barrier_mutex = __MUTEX_INITIALIZER(sname##_state.barrier_mutex), \ - .fqslock = __RAW_SPIN_LOCK_UNLOCKED(&sname##_state.fqslock), \ .name = #sname, \ } @@ -88,7 +89,7 @@ LIST_HEAD(rcu_struct_flavors); /* Increase (but not decrease) the CONFIG_RCU_FANOUT_LEAF at boot time. */ static int rcu_fanout_leaf = CONFIG_RCU_FANOUT_LEAF; -module_param(rcu_fanout_leaf, int, 0); +module_param(rcu_fanout_leaf, int, 0444); int rcu_num_lvls __read_mostly = RCU_NUM_LVLS; static int num_rcu_lvl[] = { /* Number of rcu_nodes at specified level. */ NUM_RCU_LVL_0, @@ -133,13 +134,12 @@ static int rcu_scheduler_fully_active __read_mostly; */ static DEFINE_PER_CPU(struct task_struct *, rcu_cpu_kthread_task); DEFINE_PER_CPU(unsigned int, rcu_cpu_kthread_status); -DEFINE_PER_CPU(int, rcu_cpu_kthread_cpu); DEFINE_PER_CPU(unsigned int, rcu_cpu_kthread_loops); DEFINE_PER_CPU(char, rcu_cpu_has_work); #endif /* #ifdef CONFIG_RCU_BOOST */ -static void rcu_node_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu); +static void rcu_boost_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu); static void invoke_rcu_core(void); static void invoke_rcu_callbacks(struct rcu_state *rsp, struct rcu_data *rdp); @@ -175,8 +175,6 @@ void rcu_sched_qs(int cpu) { struct rcu_data *rdp = &per_cpu(rcu_sched_data, cpu); - rdp->passed_quiesce_gpnum = rdp->gpnum; - barrier(); if (rdp->passed_quiesce == 0) trace_rcu_grace_period("rcu_sched", rdp->gpnum, "cpuqs"); rdp->passed_quiesce = 1; @@ -186,8 +184,6 @@ void rcu_bh_qs(int cpu) { struct rcu_data *rdp = &per_cpu(rcu_bh_data, cpu); - rdp->passed_quiesce_gpnum = rdp->gpnum; - barrier(); if (rdp->passed_quiesce == 0) trace_rcu_grace_period("rcu_bh", rdp->gpnum, "cpuqs"); rdp->passed_quiesce = 1; @@ -210,15 +206,18 @@ EXPORT_SYMBOL_GPL(rcu_note_context_switch); DEFINE_PER_CPU(struct rcu_dynticks, rcu_dynticks) = { .dynticks_nesting = DYNTICK_TASK_EXIT_IDLE, .dynticks = ATOMIC_INIT(1), +#if defined(CONFIG_RCU_USER_QS) && !defined(CONFIG_RCU_USER_QS_FORCE) + .ignore_user_qs = true, +#endif }; static int blimit = 10; /* Maximum callbacks per rcu_do_batch. */ static int qhimark = 10000; /* If this many pending, ignore blimit. */ static int qlowmark = 100; /* Once only this many pending, use blimit. */ -module_param(blimit, int, 0); -module_param(qhimark, int, 0); -module_param(qlowmark, int, 0); +module_param(blimit, int, 0444); +module_param(qhimark, int, 0444); +module_param(qlowmark, int, 0444); int rcu_cpu_stall_suppress __read_mostly; /* 1 = suppress stall warnings. */ int rcu_cpu_stall_timeout __read_mostly = CONFIG_RCU_CPU_STALL_TIMEOUT; @@ -226,7 +225,14 @@ int rcu_cpu_stall_timeout __read_mostly = CONFIG_RCU_CPU_STALL_TIMEOUT; module_param(rcu_cpu_stall_suppress, int, 0644); module_param(rcu_cpu_stall_timeout, int, 0644); -static void force_quiescent_state(struct rcu_state *rsp, int relaxed); +static ulong jiffies_till_first_fqs = RCU_JIFFIES_TILL_FORCE_QS; +static ulong jiffies_till_next_fqs = RCU_JIFFIES_TILL_FORCE_QS; + +module_param(jiffies_till_first_fqs, ulong, 0644); +module_param(jiffies_till_next_fqs, ulong, 0644); + +static void force_qs_rnp(struct rcu_state *rsp, int (*f)(struct rcu_data *)); +static void force_quiescent_state(struct rcu_state *rsp); static int rcu_pending(int cpu); /* @@ -252,7 +258,7 @@ EXPORT_SYMBOL_GPL(rcu_batches_completed_bh); */ void rcu_bh_force_quiescent_state(void) { - force_quiescent_state(&rcu_bh_state, 0); + force_quiescent_state(&rcu_bh_state); } EXPORT_SYMBOL_GPL(rcu_bh_force_quiescent_state); @@ -286,7 +292,7 @@ EXPORT_SYMBOL_GPL(rcutorture_record_progress); */ void rcu_sched_force_quiescent_state(void) { - force_quiescent_state(&rcu_sched_state, 0); + force_quiescent_state(&rcu_sched_state); } EXPORT_SYMBOL_GPL(rcu_sched_force_quiescent_state); @@ -305,7 +311,9 @@ cpu_has_callbacks_ready_to_invoke(struct rcu_data *rdp) static int cpu_needs_another_gp(struct rcu_state *rsp, struct rcu_data *rdp) { - return *rdp->nxttail[RCU_DONE_TAIL] && !rcu_gp_in_progress(rsp); + return *rdp->nxttail[RCU_DONE_TAIL + + ACCESS_ONCE(rsp->completed) != rdp->completed] && + !rcu_gp_in_progress(rsp); } /* @@ -317,45 +325,17 @@ static struct rcu_node *rcu_get_root(struct rcu_state *rsp) } /* - * If the specified CPU is offline, tell the caller that it is in - * a quiescent state. Otherwise, whack it with a reschedule IPI. - * Grace periods can end up waiting on an offline CPU when that - * CPU is in the process of coming online -- it will be added to the - * rcu_node bitmasks before it actually makes it online. The same thing - * can happen while a CPU is in the process of coming online. Because this - * race is quite rare, we check for it after detecting that the grace - * period has been delayed rather than checking each and every CPU - * each and every time we start a new grace period. - */ -static int rcu_implicit_offline_qs(struct rcu_data *rdp) -{ - /* - * If the CPU is offline for more than a jiffy, it is in a quiescent - * state. We can trust its state not to change because interrupts - * are disabled. The reason for the jiffy's worth of slack is to - * handle CPUs initializing on the way up and finding their way - * to the idle loop on the way down. - */ - if (cpu_is_offline(rdp->cpu) && - ULONG_CMP_LT(rdp->rsp->gp_start + 2, jiffies)) { - trace_rcu_fqs(rdp->rsp->name, rdp->gpnum, rdp->cpu, "ofl"); - rdp->offline_fqs++; - return 1; - } - return 0; -} - -/* - * rcu_idle_enter_common - inform RCU that current CPU is moving towards idle + * rcu_eqs_enter_common - current CPU is moving towards extended quiescent state * * If the new value of the ->dynticks_nesting counter now is zero, * we really have entered idle, and must do the appropriate accounting. * The caller must have disabled interrupts. */ -static void rcu_idle_enter_common(struct rcu_dynticks *rdtp, long long oldval) +static void rcu_eqs_enter_common(struct rcu_dynticks *rdtp, long long oldval, + bool user) { trace_rcu_dyntick("Start", oldval, 0); - if (!is_idle_task(current)) { + if (!user && !is_idle_task(current)) { struct task_struct *idle = idle_task(smp_processor_id()); trace_rcu_dyntick("Error on entry: not idle task", oldval, 0); @@ -372,7 +352,7 @@ static void rcu_idle_enter_common(struct rcu_dynticks *rdtp, long long oldval) WARN_ON_ONCE(atomic_read(&rdtp->dynticks) & 0x1); /* - * The idle task is not permitted to enter the idle loop while + * It is illegal to enter an extended quiescent state while * in an RCU read-side critical section. */ rcu_lockdep_assert(!lock_is_held(&rcu_lock_map), @@ -383,6 +363,25 @@ static void rcu_idle_enter_common(struct rcu_dynticks *rdtp, long long oldval) "Illegal idle entry in RCU-sched read-side critical section."); } +/* + * Enter an RCU extended quiescent state, which can be either the + * idle loop or adaptive-tickless usermode execution. + */ +static void rcu_eqs_enter(bool user) +{ + long long oldval; + struct rcu_dynticks *rdtp; + + rdtp = &__get_cpu_var(rcu_dynticks); + oldval = rdtp->dynticks_nesting; + WARN_ON_ONCE((oldval & DYNTICK_TASK_NEST_MASK) == 0); + if ((oldval & DYNTICK_TASK_NEST_MASK) == DYNTICK_TASK_NEST_VALUE) + rdtp->dynticks_nesting = 0; + else + rdtp->dynticks_nesting -= DYNTICK_TASK_NEST_VALUE; + rcu_eqs_enter_common(rdtp, oldval, user); +} + /** * rcu_idle_enter - inform RCU that current CPU is entering idle * @@ -398,21 +397,70 @@ static void rcu_idle_enter_common(struct rcu_dynticks *rdtp, long long oldval) void rcu_idle_enter(void) { unsigned long flags; - long long oldval; + + local_irq_save(flags); + rcu_eqs_enter(false); + local_irq_restore(flags); +} +EXPORT_SYMBOL_GPL(rcu_idle_enter); + +#ifdef CONFIG_RCU_USER_QS +/** + * rcu_user_enter - inform RCU that we are resuming userspace. + * + * Enter RCU idle mode right before resuming userspace. No use of RCU + * is permitted between this call and rcu_user_exit(). This way the + * CPU doesn't need to maintain the tick for RCU maintenance purposes + * when the CPU runs in userspace. + */ +void rcu_user_enter(void) +{ + unsigned long flags; struct rcu_dynticks *rdtp; + /* + * Some contexts may involve an exception occuring in an irq, + * leading to that nesting: + * rcu_irq_enter() rcu_user_exit() rcu_user_exit() rcu_irq_exit() + * This would mess up the dyntick_nesting count though. And rcu_irq_*() + * helpers are enough to protect RCU uses inside the exception. So + * just return immediately if we detect we are in an IRQ. + */ + if (in_interrupt()) + return; + + WARN_ON_ONCE(!current->mm); + local_irq_save(flags); rdtp = &__get_cpu_var(rcu_dynticks); - oldval = rdtp->dynticks_nesting; - WARN_ON_ONCE((oldval & DYNTICK_TASK_NEST_MASK) == 0); - if ((oldval & DYNTICK_TASK_NEST_MASK) == DYNTICK_TASK_NEST_VALUE) - rdtp->dynticks_nesting = 0; - else - rdtp->dynticks_nesting -= DYNTICK_TASK_NEST_VALUE; - rcu_idle_enter_common(rdtp, oldval); + if (!rdtp->ignore_user_qs && !rdtp->in_user) { + rdtp->in_user = true; + rcu_eqs_enter(true); + } local_irq_restore(flags); } -EXPORT_SYMBOL_GPL(rcu_idle_enter); + +/** + * rcu_user_enter_after_irq - inform RCU that we are going to resume userspace + * after the current irq returns. + * + * This is similar to rcu_user_enter() but in the context of a non-nesting + * irq. After this call, RCU enters into idle mode when the interrupt + * returns. + */ +void rcu_user_enter_after_irq(void) +{ + unsigned long flags; + struct rcu_dynticks *rdtp; + + local_irq_save(flags); + rdtp = &__get_cpu_var(rcu_dynticks); + /* Ensure this irq is interrupting a non-idle RCU state. */ + WARN_ON_ONCE(!(rdtp->dynticks_nesting & DYNTICK_TASK_MASK)); + rdtp->dynticks_nesting = 1; + local_irq_restore(flags); +} +#endif /* CONFIG_RCU_USER_QS */ /** * rcu_irq_exit - inform RCU that current CPU is exiting irq towards idle @@ -444,18 +492,19 @@ void rcu_irq_exit(void) if (rdtp->dynticks_nesting) trace_rcu_dyntick("--=", oldval, rdtp->dynticks_nesting); else - rcu_idle_enter_common(rdtp, oldval); + rcu_eqs_enter_common(rdtp, oldval, true); local_irq_restore(flags); } /* - * rcu_idle_exit_common - inform RCU that current CPU is moving away from idle + * rcu_eqs_exit_common - current CPU moving away from extended quiescent state * * If the new value of the ->dynticks_nesting counter was previously zero, * we really have exited idle, and must do the appropriate accounting. * The caller must have disabled interrupts. */ -static void rcu_idle_exit_common(struct rcu_dynticks *rdtp, long long oldval) +static void rcu_eqs_exit_common(struct rcu_dynticks *rdtp, long long oldval, + int user) { smp_mb__before_atomic_inc(); /* Force ordering w/previous sojourn. */ atomic_inc(&rdtp->dynticks); @@ -464,7 +513,7 @@ static void rcu_idle_exit_common(struct rcu_dynticks *rdtp, long long oldval) WARN_ON_ONCE(!(atomic_read(&rdtp->dynticks) & 0x1)); rcu_cleanup_after_idle(smp_processor_id()); trace_rcu_dyntick("End", oldval, rdtp->dynticks_nesting); - if (!is_idle_task(current)) { + if (!user && !is_idle_task(current)) { struct task_struct *idle = idle_task(smp_processor_id()); trace_rcu_dyntick("Error on exit: not idle task", @@ -476,6 +525,25 @@ static void rcu_idle_exit_common(struct rcu_dynticks *rdtp, long long oldval) } } +/* + * Exit an RCU extended quiescent state, which can be either the + * idle loop or adaptive-tickless usermode execution. + */ +static void rcu_eqs_exit(bool user) +{ + struct rcu_dynticks *rdtp; + long long oldval; + + rdtp = &__get_cpu_var(rcu_dynticks); + oldval = rdtp->dynticks_nesting; + WARN_ON_ONCE(oldval < 0); + if (oldval & DYNTICK_TASK_NEST_MASK) + rdtp->dynticks_nesting += DYNTICK_TASK_NEST_VALUE; + else + rdtp->dynticks_nesting = DYNTICK_TASK_EXIT_IDLE; + rcu_eqs_exit_common(rdtp, oldval, user); +} + /** * rcu_idle_exit - inform RCU that current CPU is leaving idle * @@ -488,23 +556,69 @@ static void rcu_idle_exit_common(struct rcu_dynticks *rdtp, long long oldval) * now starting. */ void rcu_idle_exit(void) +{ + unsigned long flags; + + local_irq_save(flags); + rcu_eqs_exit(false); + local_irq_restore(flags); +} +EXPORT_SYMBOL_GPL(rcu_idle_exit); + +#ifdef CONFIG_RCU_USER_QS +/** + * rcu_user_exit - inform RCU that we are exiting userspace. + * + * Exit RCU idle mode while entering the kernel because it can + * run a RCU read side critical section anytime. + */ +void rcu_user_exit(void) { unsigned long flags; struct rcu_dynticks *rdtp; - long long oldval; + + /* + * Some contexts may involve an exception occuring in an irq, + * leading to that nesting: + * rcu_irq_enter() rcu_user_exit() rcu_user_exit() rcu_irq_exit() + * This would mess up the dyntick_nesting count though. And rcu_irq_*() + * helpers are enough to protect RCU uses inside the exception. So + * just return immediately if we detect we are in an IRQ. + */ + if (in_interrupt()) + return; local_irq_save(flags); rdtp = &__get_cpu_var(rcu_dynticks); - oldval = rdtp->dynticks_nesting; - WARN_ON_ONCE(oldval < 0); - if (oldval & DYNTICK_TASK_NEST_MASK) - rdtp->dynticks_nesting += DYNTICK_TASK_NEST_VALUE; - else - rdtp->dynticks_nesting = DYNTICK_TASK_EXIT_IDLE; - rcu_idle_exit_common(rdtp, oldval); + if (rdtp->in_user) { + rdtp->in_user = false; + rcu_eqs_exit(true); + } local_irq_restore(flags); } -EXPORT_SYMBOL_GPL(rcu_idle_exit); + +/** + * rcu_user_exit_after_irq - inform RCU that we won't resume to userspace + * idle mode after the current non-nesting irq returns. + * + * This is similar to rcu_user_exit() but in the context of an irq. + * This is called when the irq has interrupted a userspace RCU idle mode + * context. When the current non-nesting interrupt returns after this call, + * the CPU won't restore the RCU idle mode. + */ +void rcu_user_exit_after_irq(void) +{ + unsigned long flags; + struct rcu_dynticks *rdtp; + + local_irq_save(flags); + rdtp = &__get_cpu_var(rcu_dynticks); + /* Ensure we are interrupting an RCU idle mode. */ + WARN_ON_ONCE(rdtp->dynticks_nesting & DYNTICK_TASK_NEST_MASK); + rdtp->dynticks_nesting += DYNTICK_TASK_EXIT_IDLE; + local_irq_restore(flags); +} +#endif /* CONFIG_RCU_USER_QS */ /** * rcu_irq_enter - inform RCU that current CPU is entering irq away from idle @@ -539,7 +653,7 @@ void rcu_irq_enter(void) if (oldval) trace_rcu_dyntick("++=", oldval, rdtp->dynticks_nesting); else - rcu_idle_exit_common(rdtp, oldval); + rcu_eqs_exit_common(rdtp, oldval, true); local_irq_restore(flags); } @@ -603,6 +717,21 @@ int rcu_is_cpu_idle(void) } EXPORT_SYMBOL(rcu_is_cpu_idle); +#ifdef CONFIG_RCU_USER_QS +void rcu_user_hooks_switch(struct task_struct *prev, + struct task_struct *next) +{ + struct rcu_dynticks *rdtp; + + /* Interrupts are disabled in context switch */ + rdtp = &__get_cpu_var(rcu_dynticks); + if (!rdtp->ignore_user_qs) { + clear_tsk_thread_flag(prev, TIF_NOHZ); + set_tsk_thread_flag(next, TIF_NOHZ); + } +} +#endif /* #ifdef CONFIG_RCU_USER_QS */ + #if defined(CONFIG_PROVE_RCU) && defined(CONFIG_HOTPLUG_CPU) /* @@ -673,7 +802,7 @@ static int dyntick_save_progress_counter(struct rcu_data *rdp) * Return true if the specified CPU has passed through a quiescent * state by virtue of being in or having passed through an dynticks * idle state since the last call to dyntick_save_progress_counter() - * for this same CPU. + * for this same CPU, or by virtue of having been offline. */ static int rcu_implicit_dynticks_qs(struct rcu_data *rdp) { @@ -697,8 +826,26 @@ static int rcu_implicit_dynticks_qs(struct rcu_data *rdp) return 1; } - /* Go check for the CPU being offline. */ - return rcu_implicit_offline_qs(rdp); + /* + * Check for the CPU being offline, but only if the grace period + * is old enough. We don't need to worry about the CPU changing + * state: If we see it offline even once, it has been through a + * quiescent state. + * + * The reason for insisting that the grace period be at least + * one jiffy old is that CPUs that are not quite online and that + * have just gone offline can still execute RCU read-side critical + * sections. + */ + if (ULONG_CMP_GE(rdp->rsp->gp_start + 2, jiffies)) + return 0; /* Grace period is not old enough. */ + barrier(); + if (cpu_is_offline(rdp->cpu)) { + trace_rcu_fqs(rdp->rsp->name, rdp->gpnum, rdp->cpu, "ofl"); + rdp->offline_fqs++; + return 1; + } + return 0; } static int jiffies_till_stall_check(void) @@ -755,14 +902,15 @@ static void print_other_cpu_stall(struct rcu_state *rsp) rcu_for_each_leaf_node(rsp, rnp) { raw_spin_lock_irqsave(&rnp->lock, flags); ndetected += rcu_print_task_stall(rnp); + if (rnp->qsmask != 0) { + for (cpu = 0; cpu <= rnp->grphi - rnp->grplo; cpu++) + if (rnp->qsmask & (1UL << cpu)) { + print_cpu_stall_info(rsp, + rnp->grplo + cpu); + ndetected++; + } + } raw_spin_unlock_irqrestore(&rnp->lock, flags); - if (rnp->qsmask == 0) - continue; - for (cpu = 0; cpu <= rnp->grphi - rnp->grplo; cpu++) - if (rnp->qsmask & (1UL << cpu)) { - print_cpu_stall_info(rsp, rnp->grplo + cpu); - ndetected++; - } } /* @@ -782,11 +930,11 @@ static void print_other_cpu_stall(struct rcu_state *rsp) else if (!trigger_all_cpu_backtrace()) dump_stack(); - /* If so configured, complain about tasks blocking the grace period. */ + /* Complain about tasks blocking the grace period. */ rcu_print_detail_task_stall(rsp); - force_quiescent_state(rsp, 0); /* Kick them all. */ + force_quiescent_state(rsp); /* Kick them all. */ } static void print_cpu_stall(struct rcu_state *rsp) @@ -827,7 +975,8 @@ static void check_cpu_stall(struct rcu_state *rsp, struct rcu_data *rdp) j = ACCESS_ONCE(jiffies); js = ACCESS_ONCE(rsp->jiffies_stall); rnp = rdp->mynode; - if ((ACCESS_ONCE(rnp->qsmask) & rdp->grpmask) && ULONG_CMP_GE(j, js)) { + if (rcu_gp_in_progress(rsp) && + (ACCESS_ONCE(rnp->qsmask) & rdp->grpmask) && ULONG_CMP_GE(j, js)) { /* We haven't checked in, so go dump stack. */ print_cpu_stall(rsp); @@ -889,12 +1038,8 @@ static void __note_new_gpnum(struct rcu_state *rsp, struct rcu_node *rnp, struct */ rdp->gpnum = rnp->gpnum; trace_rcu_grace_period(rsp->name, rdp->gpnum, "cpustart"); - if (rnp->qsmask & rdp->grpmask) { - rdp->qs_pending = 1; - rdp->passed_quiesce = 0; - } else { - rdp->qs_pending = 0; - } + rdp->passed_quiesce = 0; + rdp->qs_pending = !!(rnp->qsmask & rdp->grpmask); zero_cpu_stall_ticks(rdp); } } @@ -974,10 +1119,13 @@ __rcu_process_gp_end(struct rcu_state *rsp, struct rcu_node *rnp, struct rcu_dat * our behalf. Catch up with this state to avoid noting * spurious new grace periods. If another grace period * has started, then rnp->gpnum will have advanced, so - * we will detect this later on. + * we will detect this later on. Of course, any quiescent + * states we found for the old GP are now invalid. */ - if (ULONG_CMP_LT(rdp->gpnum, rdp->completed)) + if (ULONG_CMP_LT(rdp->gpnum, rdp->completed)) { rdp->gpnum = rdp->completed; + rdp->passed_quiesce = 0; + } /* * If RCU does not need a quiescent state from this CPU, @@ -1021,97 +1169,56 @@ rcu_start_gp_per_cpu(struct rcu_state *rsp, struct rcu_node *rnp, struct rcu_dat /* Prior grace period ended, so advance callbacks for current CPU. */ __rcu_process_gp_end(rsp, rnp, rdp); - /* - * Because this CPU just now started the new grace period, we know - * that all of its callbacks will be covered by this upcoming grace - * period, even the ones that were registered arbitrarily recently. - * Therefore, advance all outstanding callbacks to RCU_WAIT_TAIL. - * - * Other CPUs cannot be sure exactly when the grace period started. - * Therefore, their recently registered callbacks must pass through - * an additional RCU_NEXT_READY stage, so that they will be handled - * by the next RCU grace period. - */ - rdp->nxttail[RCU_NEXT_READY_TAIL] = rdp->nxttail[RCU_NEXT_TAIL]; - rdp->nxttail[RCU_WAIT_TAIL] = rdp->nxttail[RCU_NEXT_TAIL]; - /* Set state so that this CPU will detect the next quiescent state. */ __note_new_gpnum(rsp, rnp, rdp); } /* - * Start a new RCU grace period if warranted, re-initializing the hierarchy - * in preparation for detecting the next grace period. The caller must hold - * the root node's ->lock, which is released before return. Hard irqs must - * be disabled. - * - * Note that it is legal for a dying CPU (which is marked as offline) to - * invoke this function. This can happen when the dying CPU reports its - * quiescent state. + * Initialize a new grace period. */ -static void -rcu_start_gp(struct rcu_state *rsp, unsigned long flags) - __releases(rcu_get_root(rsp)->lock) +static int rcu_gp_init(struct rcu_state *rsp) { - struct rcu_data *rdp = this_cpu_ptr(rsp->rda); + struct rcu_data *rdp; struct rcu_node *rnp = rcu_get_root(rsp); - if (!rcu_scheduler_fully_active || - !cpu_needs_another_gp(rsp, rdp)) { - /* - * Either the scheduler hasn't yet spawned the first - * non-idle task or this CPU does not need another - * grace period. Either way, don't start a new grace - * period. - */ - raw_spin_unlock_irqrestore(&rnp->lock, flags); - return; - } + raw_spin_lock_irq(&rnp->lock); + rsp->gp_flags = 0; /* Clear all flags: New grace period. */ - if (rsp->fqs_active) { - /* - * This CPU needs a grace period, but force_quiescent_state() - * is running. Tell it to start one on this CPU's behalf. - */ - rsp->fqs_need_gp = 1; - raw_spin_unlock_irqrestore(&rnp->lock, flags); - return; + if (rcu_gp_in_progress(rsp)) { + /* Grace period already in progress, don't start another. */ + raw_spin_unlock_irq(&rnp->lock); + return 0; } /* Advance to a new grace period and initialize state. */ rsp->gpnum++; trace_rcu_grace_period(rsp->name, rsp->gpnum, "start"); - WARN_ON_ONCE(rsp->fqs_state == RCU_GP_INIT); - rsp->fqs_state = RCU_GP_INIT; /* Hold off force_quiescent_state. */ - rsp->jiffies_force_qs = jiffies + RCU_JIFFIES_TILL_FORCE_QS; record_gp_stall_check_time(rsp); - raw_spin_unlock(&rnp->lock); /* leave irqs disabled. */ + raw_spin_unlock_irq(&rnp->lock); /* Exclude any concurrent CPU-hotplug operations. */ - raw_spin_lock(&rsp->onofflock); /* irqs already disabled. */ + get_online_cpus(); /* * Set the quiescent-state-needed bits in all the rcu_node - * structures for all currently online CPUs in breadth-first - * order, starting from the root rcu_node structure. This - * operation relies on the layout of the hierarchy within the - * rsp->node[] array. Note that other CPUs will access only - * the leaves of the hierarchy, which still indicate that no + * structures for all currently online CPUs in breadth-first order, + * starting from the root rcu_node structure, relying on the layout + * of the tree within the rsp->node[] array. Note that other CPUs + * will access only the leaves of the hierarchy, thus seeing that no * grace period is in progress, at least until the corresponding * leaf node has been initialized. In addition, we have excluded * CPU-hotplug operations. * - * Note that the grace period cannot complete until we finish - * the initialization process, as there will be at least one - * qsmask bit set in the root node until that time, namely the - * one corresponding to this CPU, due to the fact that we have - * irqs disabled. + * The grace period cannot complete until the initialization + * process finishes, because this kthread handles both. */ rcu_for_each_node_breadth_first(rsp, rnp) { - raw_spin_lock(&rnp->lock); /* irqs already disabled. */ + raw_spin_lock_irq(&rnp->lock); + rdp = this_cpu_ptr(rsp->rda); rcu_preempt_check_blocked_tasks(rnp); rnp->qsmask = rnp->qsmaskinit; rnp->gpnum = rsp->gpnum; + WARN_ON_ONCE(rnp->completed != rsp->completed); rnp->completed = rsp->completed; if (rnp == rdp->mynode) rcu_start_gp_per_cpu(rsp, rnp, rdp); @@ -1119,37 +1226,54 @@ rcu_start_gp(struct rcu_state *rsp, unsigned long flags) trace_rcu_grace_period_init(rsp->name, rnp->gpnum, rnp->level, rnp->grplo, rnp->grphi, rnp->qsmask); - raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */ + raw_spin_unlock_irq(&rnp->lock); +#ifdef CONFIG_PROVE_RCU_DELAY + if ((random32() % (rcu_num_nodes * 8)) == 0) + schedule_timeout_uninterruptible(2); +#endif /* #ifdef CONFIG_PROVE_RCU_DELAY */ + cond_resched(); } - rnp = rcu_get_root(rsp); - raw_spin_lock(&rnp->lock); /* irqs already disabled. */ - rsp->fqs_state = RCU_SIGNAL_INIT; /* force_quiescent_state now OK. */ - raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */ - raw_spin_unlock_irqrestore(&rsp->onofflock, flags); + put_online_cpus(); + return 1; } /* - * Report a full set of quiescent states to the specified rcu_state - * data structure. This involves cleaning up after the prior grace - * period and letting rcu_start_gp() start up the next grace period - * if one is needed. Note that the caller must hold rnp->lock, as - * required by rcu_start_gp(), which will release it. + * Do one round of quiescent-state forcing. */ -static void rcu_report_qs_rsp(struct rcu_state *rsp, unsigned long flags) - __releases(rcu_get_root(rsp)->lock) +int rcu_gp_fqs(struct rcu_state *rsp, int fqs_state_in) { - unsigned long gp_duration; + int fqs_state = fqs_state_in; struct rcu_node *rnp = rcu_get_root(rsp); - struct rcu_data *rdp = this_cpu_ptr(rsp->rda); - WARN_ON_ONCE(!rcu_gp_in_progress(rsp)); + rsp->n_force_qs++; + if (fqs_state == RCU_SAVE_DYNTICK) { + /* Collect dyntick-idle snapshots. */ + force_qs_rnp(rsp, dyntick_save_progress_counter); + fqs_state = RCU_FORCE_QS; + } else { + /* Handle dyntick-idle and offline CPUs. */ + force_qs_rnp(rsp, rcu_implicit_dynticks_qs); + } + /* Clear flag to prevent immediate re-entry. */ + if (ACCESS_ONCE(rsp->gp_flags) & RCU_GP_FLAG_FQS) { + raw_spin_lock_irq(&rnp->lock); + rsp->gp_flags &= ~RCU_GP_FLAG_FQS; + raw_spin_unlock_irq(&rnp->lock); + } + return fqs_state; +} - /* - * Ensure that all grace-period and pre-grace-period activity - * is seen before the assignment to rsp->completed. - */ - smp_mb(); /* See above block comment. */ +/* + * Clean up after the old grace period. + */ +static void rcu_gp_cleanup(struct rcu_state *rsp) +{ + unsigned long gp_duration; + struct rcu_data *rdp; + struct rcu_node *rnp = rcu_get_root(rsp); + + raw_spin_lock_irq(&rnp->lock); gp_duration = jiffies - rsp->gp_start; if (gp_duration > rsp->gp_max) rsp->gp_max = gp_duration; @@ -1161,35 +1285,149 @@ static void rcu_report_qs_rsp(struct rcu_state *rsp, unsigned long flags) * they can do to advance the grace period. It is therefore * safe for us to drop the lock in order to mark the grace * period as completed in all of the rcu_node structures. - * - * But if this CPU needs another grace period, it will take - * care of this while initializing the next grace period. - * We use RCU_WAIT_TAIL instead of the usual RCU_DONE_TAIL - * because the callbacks have not yet been advanced: Those - * callbacks are waiting on the grace period that just now - * completed. */ - if (*rdp->nxttail[RCU_WAIT_TAIL] == NULL) { - raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */ + raw_spin_unlock_irq(&rnp->lock); - /* - * Propagate new ->completed value to rcu_node structures - * so that other CPUs don't have to wait until the start - * of the next grace period to process their callbacks. - */ - rcu_for_each_node_breadth_first(rsp, rnp) { - raw_spin_lock(&rnp->lock); /* irqs already disabled. */ - rnp->completed = rsp->gpnum; - raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */ - } - rnp = rcu_get_root(rsp); - raw_spin_lock(&rnp->lock); /* irqs already disabled. */ + /* + * Propagate new ->completed value to rcu_node structures so + * that other CPUs don't have to wait until the start of the next + * grace period to process their callbacks. This also avoids + * some nasty RCU grace-period initialization races by forcing + * the end of the current grace period to be completely recorded in + * all of the rcu_node structures before the beginning of the next + * grace period is recorded in any of the rcu_node structures. + */ + rcu_for_each_node_breadth_first(rsp, rnp) { + raw_spin_lock_irq(&rnp->lock); + rnp->completed = rsp->gpnum; + raw_spin_unlock_irq(&rnp->lock); + cond_resched(); } + rnp = rcu_get_root(rsp); + raw_spin_lock_irq(&rnp->lock); - rsp->completed = rsp->gpnum; /* Declare the grace period complete. */ + rsp->completed = rsp->gpnum; /* Declare grace period done. */ trace_rcu_grace_period(rsp->name, rsp->completed, "end"); rsp->fqs_state = RCU_GP_IDLE; - rcu_start_gp(rsp, flags); /* releases root node's rnp->lock. */ + rdp = this_cpu_ptr(rsp->rda); + if (cpu_needs_another_gp(rsp, rdp)) + rsp->gp_flags = 1; + raw_spin_unlock_irq(&rnp->lock); +} + +/* + * Body of kthread that handles grace periods. + */ +static int __noreturn rcu_gp_kthread(void *arg) +{ + int fqs_state; + unsigned long j; + int ret; + struct rcu_state *rsp = arg; + struct rcu_node *rnp = rcu_get_root(rsp); + + for (;;) { + + /* Handle grace-period start. */ + for (;;) { + wait_event_interruptible(rsp->gp_wq, + rsp->gp_flags & + RCU_GP_FLAG_INIT); + if ((rsp->gp_flags & RCU_GP_FLAG_INIT) && + rcu_gp_init(rsp)) + break; + cond_resched(); + flush_signals(current); + } + + /* Handle quiescent-state forcing. */ + fqs_state = RCU_SAVE_DYNTICK; + j = jiffies_till_first_fqs; + if (j > HZ) { + j = HZ; + jiffies_till_first_fqs = HZ; + } + for (;;) { + rsp->jiffies_force_qs = jiffies + j; + ret = wait_event_interruptible_timeout(rsp->gp_wq, + (rsp->gp_flags & RCU_GP_FLAG_FQS) || + (!ACCESS_ONCE(rnp->qsmask) && + !rcu_preempt_blocked_readers_cgp(rnp)), + j); + /* If grace period done, leave loop. */ + if (!ACCESS_ONCE(rnp->qsmask) && + !rcu_preempt_blocked_readers_cgp(rnp)) + break; + /* If time for quiescent-state forcing, do it. */ + if (ret == 0 || (rsp->gp_flags & RCU_GP_FLAG_FQS)) { + fqs_state = rcu_gp_fqs(rsp, fqs_state); + cond_resched(); + } else { + /* Deal with stray signal. */ + cond_resched(); + flush_signals(current); + } + j = jiffies_till_next_fqs; + if (j > HZ) { + j = HZ; + jiffies_till_next_fqs = HZ; + } else if (j < 1) { + j = 1; + jiffies_till_next_fqs = 1; + } + } + + /* Handle grace-period end. */ + rcu_gp_cleanup(rsp); + } +} + +/* + * Start a new RCU grace period if warranted, re-initializing the hierarchy + * in preparation for detecting the next grace period. The caller must hold + * the root node's ->lock, which is released before return. Hard irqs must + * be disabled. + * + * Note that it is legal for a dying CPU (which is marked as offline) to + * invoke this function. This can happen when the dying CPU reports its + * quiescent state. + */ +static void +rcu_start_gp(struct rcu_state *rsp, unsigned long flags) + __releases(rcu_get_root(rsp)->lock) +{ + struct rcu_data *rdp = this_cpu_ptr(rsp->rda); + struct rcu_node *rnp = rcu_get_root(rsp); + + if (!rsp->gp_kthread || + !cpu_needs_another_gp(rsp, rdp)) { + /* + * Either we have not yet spawned the grace-period + * task or this CPU does not need another grace period. + * Either way, don't start a new grace period. + */ + raw_spin_unlock_irqrestore(&rnp->lock, flags); + return; + } + + rsp->gp_flags = RCU_GP_FLAG_INIT; + raw_spin_unlock_irqrestore(&rnp->lock, flags); + wake_up(&rsp->gp_wq); +} + +/* + * Report a full set of quiescent states to the specified rcu_state + * data structure. This involves cleaning up after the prior grace + * period and letting rcu_start_gp() start up the next grace period + * if one is needed. Note that the caller must hold rnp->lock, as + * required by rcu_start_gp(), which will release it. + */ +static void rcu_report_qs_rsp(struct rcu_state *rsp, unsigned long flags) + __releases(rcu_get_root(rsp)->lock) +{ + WARN_ON_ONCE(!rcu_gp_in_progress(rsp)); + raw_spin_unlock_irqrestore(&rcu_get_root(rsp)->lock, flags); + wake_up(&rsp->gp_wq); /* Memory barrier implied by wake_up() path. */ } /* @@ -1258,7 +1496,7 @@ rcu_report_qs_rnp(unsigned long mask, struct rcu_state *rsp, * based on quiescent states detected in an earlier grace period! */ static void -rcu_report_qs_rdp(int cpu, struct rcu_state *rsp, struct rcu_data *rdp, long lastgp) +rcu_report_qs_rdp(int cpu, struct rcu_state *rsp, struct rcu_data *rdp) { unsigned long flags; unsigned long mask; @@ -1266,7 +1504,8 @@ rcu_report_qs_rdp(int cpu, struct rcu_state *rsp, struct rcu_data *rdp, long las rnp = rdp->mynode; raw_spin_lock_irqsave(&rnp->lock, flags); - if (lastgp != rnp->gpnum || rnp->completed == rnp->gpnum) { + if (rdp->passed_quiesce == 0 || rdp->gpnum != rnp->gpnum || + rnp->completed == rnp->gpnum) { /* * The grace period in which this quiescent state was @@ -1325,7 +1564,7 @@ rcu_check_quiescent_state(struct rcu_state *rsp, struct rcu_data *rdp) * Tell RCU we are done (but rcu_report_qs_rdp() will be the * judge of that). */ - rcu_report_qs_rdp(rdp->cpu, rsp, rdp, rdp->passed_quiesce_gpnum); + rcu_report_qs_rdp(rdp->cpu, rsp, rdp); } #ifdef CONFIG_HOTPLUG_CPU @@ -1390,17 +1629,6 @@ static void rcu_adopt_orphan_cbs(struct rcu_state *rsp) int i; struct rcu_data *rdp = __this_cpu_ptr(rsp->rda); - /* - * If there is an rcu_barrier() operation in progress, then - * only the task doing that operation is permitted to adopt - * callbacks. To do otherwise breaks rcu_barrier() and friends - * by causing them to fail to wait for the callbacks in the - * orphanage. - */ - if (rsp->rcu_barrier_in_progress && - rsp->rcu_barrier_in_progress != current) - return; - /* Do the accounting first. */ rdp->qlen_lazy += rsp->qlen_lazy; rdp->qlen += rsp->qlen; @@ -1455,9 +1683,8 @@ static void rcu_cleanup_dying_cpu(struct rcu_state *rsp) * The CPU has been completely removed, and some other CPU is reporting * this fact from process context. Do the remainder of the cleanup, * including orphaning the outgoing CPU's RCU callbacks, and also - * adopting them, if there is no _rcu_barrier() instance running. - * There can only be one CPU hotplug operation at a time, so no other - * CPU can be attempting to update rcu_cpu_kthread_task. + * adopting them. There can only be one CPU hotplug operation at a time, + * so no other CPU can be attempting to update rcu_cpu_kthread_task. */ static void rcu_cleanup_dead_cpu(int cpu, struct rcu_state *rsp) { @@ -1468,8 +1695,7 @@ static void rcu_cleanup_dead_cpu(int cpu, struct rcu_state *rsp) struct rcu_node *rnp = rdp->mynode; /* Outgoing CPU's rdp & rnp. */ /* Adjust any no-longer-needed kthreads. */ - rcu_stop_cpu_kthread(cpu); - rcu_node_kthread_setaffinity(rnp, -1); + rcu_boost_kthread_setaffinity(rnp, -1); /* Remove the dead CPU from the bitmasks in the rcu_node hierarchy. */ @@ -1515,14 +1741,13 @@ static void rcu_cleanup_dead_cpu(int cpu, struct rcu_state *rsp) WARN_ONCE(rdp->qlen != 0 || rdp->nxtlist != NULL, "rcu_cleanup_dead_cpu: Callbacks on offline CPU %d: qlen=%lu, nxtlist=%p\n", cpu, rdp->qlen, rdp->nxtlist); + init_callback_list(rdp); + /* Disallow further callbacks on this CPU. */ + rdp->nxttail[RCU_NEXT_TAIL] = NULL; } #else /* #ifdef CONFIG_HOTPLUG_CPU */ -static void rcu_adopt_orphan_cbs(struct rcu_state *rsp) -{ -} - static void rcu_cleanup_dying_cpu(struct rcu_state *rsp) { } @@ -1687,6 +1912,7 @@ static void force_qs_rnp(struct rcu_state *rsp, int (*f)(struct rcu_data *)) struct rcu_node *rnp; rcu_for_each_leaf_node(rsp, rnp) { + cond_resched(); mask = 0; raw_spin_lock_irqsave(&rnp->lock, flags); if (!rcu_gp_in_progress(rsp)) { @@ -1723,72 +1949,39 @@ static void force_qs_rnp(struct rcu_state *rsp, int (*f)(struct rcu_data *)) * Force quiescent states on reluctant CPUs, and also detect which * CPUs are in dyntick-idle mode. */ -static void force_quiescent_state(struct rcu_state *rsp, int relaxed) +static void force_quiescent_state(struct rcu_state *rsp) { unsigned long flags; - struct rcu_node *rnp = rcu_get_root(rsp); - - trace_rcu_utilization("Start fqs"); - if (!rcu_gp_in_progress(rsp)) { - trace_rcu_utilization("End fqs"); - return; /* No grace period in progress, nothing to force. */ - } - if (!raw_spin_trylock_irqsave(&rsp->fqslock, flags)) { - rsp->n_force_qs_lh++; /* Inexact, can lose counts. Tough! */ - trace_rcu_utilization("End fqs"); - return; /* Someone else is already on the job. */ - } - if (relaxed && ULONG_CMP_GE(rsp->jiffies_force_qs, jiffies)) - goto unlock_fqs_ret; /* no emergency and done recently. */ - rsp->n_force_qs++; - raw_spin_lock(&rnp->lock); /* irqs already disabled */ - rsp->jiffies_force_qs = jiffies + RCU_JIFFIES_TILL_FORCE_QS; - if(!rcu_gp_in_progress(rsp)) { - rsp->n_force_qs_ngp++; - raw_spin_unlock(&rnp->lock); /* irqs remain disabled */ - goto unlock_fqs_ret; /* no GP in progress, time updated. */ - } - rsp->fqs_active = 1; - switch (rsp->fqs_state) { - case RCU_GP_IDLE: - case RCU_GP_INIT: - - break; /* grace period idle or initializing, ignore. */ - - case RCU_SAVE_DYNTICK: - - raw_spin_unlock(&rnp->lock); /* irqs remain disabled */ - - /* Record dyntick-idle state. */ - force_qs_rnp(rsp, dyntick_save_progress_counter); - raw_spin_lock(&rnp->lock); /* irqs already disabled */ - if (rcu_gp_in_progress(rsp)) - rsp->fqs_state = RCU_FORCE_QS; - break; - - case RCU_FORCE_QS: - - /* Check dyntick-idle state, send IPI to laggarts. */ - raw_spin_unlock(&rnp->lock); /* irqs remain disabled */ - force_qs_rnp(rsp, rcu_implicit_dynticks_qs); - - /* Leave state in case more forcing is required. */ - - raw_spin_lock(&rnp->lock); /* irqs already disabled */ - break; + bool ret; + struct rcu_node *rnp; + struct rcu_node *rnp_old = NULL; + + /* Funnel through hierarchy to reduce memory contention. */ + rnp = per_cpu_ptr(rsp->rda, raw_smp_processor_id())->mynode; + for (; rnp != NULL; rnp = rnp->parent) { + ret = (ACCESS_ONCE(rsp->gp_flags) & RCU_GP_FLAG_FQS) || + !raw_spin_trylock(&rnp->fqslock); + if (rnp_old != NULL) + raw_spin_unlock(&rnp_old->fqslock); + if (ret) { + rsp->n_force_qs_lh++; + return; + } + rnp_old = rnp; } - rsp->fqs_active = 0; - if (rsp->fqs_need_gp) { - raw_spin_unlock(&rsp->fqslock); /* irqs remain disabled */ - rsp->fqs_need_gp = 0; - rcu_start_gp(rsp, flags); /* releases rnp->lock */ - trace_rcu_utilization("End fqs"); - return; + /* rnp_old == rcu_get_root(rsp), rnp == NULL. */ + + /* Reached the root of the rcu_node tree, acquire lock. */ + raw_spin_lock_irqsave(&rnp_old->lock, flags); + raw_spin_unlock(&rnp_old->fqslock); + if (ACCESS_ONCE(rsp->gp_flags) & RCU_GP_FLAG_FQS) { + rsp->n_force_qs_lh++; + raw_spin_unlock_irqrestore(&rnp_old->lock, flags); + return; /* Someone beat us to it. */ } - raw_spin_unlock(&rnp->lock); /* irqs remain disabled */ -unlock_fqs_ret: - raw_spin_unlock_irqrestore(&rsp->fqslock, flags); - trace_rcu_utilization("End fqs"); + rsp->gp_flags |= RCU_GP_FLAG_FQS; + raw_spin_unlock_irqrestore(&rnp_old->lock, flags); + wake_up(&rsp->gp_wq); /* Memory barrier implied by wake_up() path. */ } /* @@ -1804,13 +1997,6 @@ __rcu_process_callbacks(struct rcu_state *rsp) WARN_ON_ONCE(rdp->beenonline == 0); - /* - * If an RCU GP has gone long enough, go check for dyntick - * idle CPUs and, if needed, send resched IPIs. - */ - if (ULONG_CMP_LT(ACCESS_ONCE(rsp->jiffies_force_qs), jiffies)) - force_quiescent_state(rsp, 1); - /* * Advance callbacks in response to end of earlier grace * period that some other CPU ended. @@ -1838,6 +2024,8 @@ static void rcu_process_callbacks(struct softirq_action *unused) { struct rcu_state *rsp; + if (cpu_is_offline(smp_processor_id())) + return; trace_rcu_utilization("Start RCU core"); for_each_rcu_flavor(rsp) __rcu_process_callbacks(rsp); @@ -1909,12 +2097,11 @@ static void __call_rcu_core(struct rcu_state *rsp, struct rcu_data *rdp, rdp->blimit = LONG_MAX; if (rsp->n_force_qs == rdp->n_force_qs_snap && *rdp->nxttail[RCU_DONE_TAIL] != head) - force_quiescent_state(rsp, 0); + force_quiescent_state(rsp); rdp->n_force_qs_snap = rsp->n_force_qs; rdp->qlen_last_fqs_check = rdp->qlen; } - } else if (ULONG_CMP_LT(ACCESS_ONCE(rsp->jiffies_force_qs), jiffies)) - force_quiescent_state(rsp, 1); + } } static void @@ -1929,8 +2116,6 @@ __call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu), head->func = func; head->next = NULL; - smp_mb(); /* Ensure RCU update seen before callback registry. */ - /* * Opportunistically note grace-period endings and beginnings. * Note that we might see a beginning right after we see an @@ -1941,6 +2126,12 @@ __call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu), rdp = this_cpu_ptr(rsp->rda); /* Add the callback to our list. */ + if (unlikely(rdp->nxttail[RCU_NEXT_TAIL] == NULL)) { + /* _call_rcu() is illegal on offline CPU; leak the callback. */ + WARN_ON_ONCE(1); + local_irq_restore(flags); + return; + } ACCESS_ONCE(rdp->qlen)++; if (lazy) rdp->qlen_lazy++; @@ -2195,17 +2386,7 @@ static int __rcu_pending(struct rcu_state *rsp, struct rcu_data *rdp) /* Is the RCU core waiting for a quiescent state from this CPU? */ if (rcu_scheduler_fully_active && rdp->qs_pending && !rdp->passed_quiesce) { - - /* - * If force_quiescent_state() coming soon and this CPU - * needs a quiescent state, and this is either RCU-sched - * or RCU-bh, force a local reschedule. - */ rdp->n_rp_qs_pending++; - if (!rdp->preemptible && - ULONG_CMP_LT(ACCESS_ONCE(rsp->jiffies_force_qs) - 1, - jiffies)) - set_need_resched(); } else if (rdp->qs_pending && rdp->passed_quiesce) { rdp->n_rp_report_qs++; return 1; @@ -2235,13 +2416,6 @@ static int __rcu_pending(struct rcu_state *rsp, struct rcu_data *rdp) return 1; } - /* Has an RCU GP gone long enough to send resched IPIs &c? */ - if (rcu_gp_in_progress(rsp) && - ULONG_CMP_LT(ACCESS_ONCE(rsp->jiffies_force_qs), jiffies)) { - rdp->n_rp_need_fqs++; - return 1; - } - /* nothing to do */ rdp->n_rp_need_nothing++; return 0; @@ -2326,13 +2500,10 @@ static void rcu_barrier_func(void *type) static void _rcu_barrier(struct rcu_state *rsp) { int cpu; - unsigned long flags; struct rcu_data *rdp; - struct rcu_data rd; unsigned long snap = ACCESS_ONCE(rsp->n_barrier_done); unsigned long snap_done; - init_rcu_head_on_stack(&rd.barrier_head); _rcu_barrier_trace(rsp, "Begin", -1, snap); /* Take mutex to serialize concurrent rcu_barrier() requests. */ @@ -2372,70 +2543,30 @@ static void _rcu_barrier(struct rcu_state *rsp) /* * Initialize the count to one rather than to zero in order to * avoid a too-soon return to zero in case of a short grace period - * (or preemption of this task). Also flag this task as doing - * an rcu_barrier(). This will prevent anyone else from adopting - * orphaned callbacks, which could cause otherwise failure if a - * CPU went offline and quickly came back online. To see this, - * consider the following sequence of events: - * - * 1. We cause CPU 0 to post an rcu_barrier_callback() callback. - * 2. CPU 1 goes offline, orphaning its callbacks. - * 3. CPU 0 adopts CPU 1's orphaned callbacks. - * 4. CPU 1 comes back online. - * 5. We cause CPU 1 to post an rcu_barrier_callback() callback. - * 6. Both rcu_barrier_callback() callbacks are invoked, awakening - * us -- but before CPU 1's orphaned callbacks are invoked!!! + * (or preemption of this task). Exclude CPU-hotplug operations + * to ensure that no offline CPU has callbacks queued. */ init_completion(&rsp->barrier_completion); atomic_set(&rsp->barrier_cpu_count, 1); - raw_spin_lock_irqsave(&rsp->onofflock, flags); - rsp->rcu_barrier_in_progress = current; - raw_spin_unlock_irqrestore(&rsp->onofflock, flags); + get_online_cpus(); /* - * Force every CPU with callbacks to register a new callback - * that will tell us when all the preceding callbacks have - * been invoked. If an offline CPU has callbacks, wait for - * it to either come back online or to finish orphaning those - * callbacks. + * Force each CPU with callbacks to register a new callback. + * When that callback is invoked, we will know that all of the + * corresponding CPU's preceding callbacks have been invoked. */ - for_each_possible_cpu(cpu) { - preempt_disable(); + for_each_online_cpu(cpu) { rdp = per_cpu_ptr(rsp->rda, cpu); - if (cpu_is_offline(cpu)) { - _rcu_barrier_trace(rsp, "Offline", cpu, - rsp->n_barrier_done); - preempt_enable(); - while (cpu_is_offline(cpu) && ACCESS_ONCE(rdp->qlen)) - schedule_timeout_interruptible(1); - } else if (ACCESS_ONCE(rdp->qlen)) { + if (ACCESS_ONCE(rdp->qlen)) { _rcu_barrier_trace(rsp, "OnlineQ", cpu, rsp->n_barrier_done); smp_call_function_single(cpu, rcu_barrier_func, rsp, 1); - preempt_enable(); } else { _rcu_barrier_trace(rsp, "OnlineNQ", cpu, rsp->n_barrier_done); - preempt_enable(); } } - - /* - * Now that all online CPUs have rcu_barrier_callback() callbacks - * posted, we can adopt all of the orphaned callbacks and place - * an rcu_barrier_callback() callback after them. When that is done, - * we are guaranteed to have an rcu_barrier_callback() callback - * following every callback that could possibly have been - * registered before _rcu_barrier() was called. - */ - raw_spin_lock_irqsave(&rsp->onofflock, flags); - rcu_adopt_orphan_cbs(rsp); - rsp->rcu_barrier_in_progress = NULL; - raw_spin_unlock_irqrestore(&rsp->onofflock, flags); - atomic_inc(&rsp->barrier_cpu_count); - smp_mb__after_atomic_inc(); /* Ensure atomic_inc() before callback. */ - rd.rsp = rsp; - rsp->call(&rd.barrier_head, rcu_barrier_callback); + put_online_cpus(); /* * Now that we have an rcu_barrier_callback() callback on each @@ -2456,8 +2587,6 @@ static void _rcu_barrier(struct rcu_state *rsp) /* Other rcu_barrier() invocations can now safely proceed. */ mutex_unlock(&rsp->barrier_mutex); - - destroy_rcu_head_on_stack(&rd.barrier_head); } /** @@ -2497,6 +2626,9 @@ rcu_boot_init_percpu_data(int cpu, struct rcu_state *rsp) rdp->dynticks = &per_cpu(rcu_dynticks, cpu); WARN_ON_ONCE(rdp->dynticks->dynticks_nesting != DYNTICK_TASK_EXIT_IDLE); WARN_ON_ONCE(atomic_read(&rdp->dynticks->dynticks) != 1); +#ifdef CONFIG_RCU_USER_QS + WARN_ON_ONCE(rdp->dynticks->in_user); +#endif rdp->cpu = cpu; rdp->rsp = rsp; raw_spin_unlock_irqrestore(&rnp->lock, flags); @@ -2523,6 +2655,7 @@ rcu_init_percpu_data(int cpu, struct rcu_state *rsp, int preemptible) rdp->qlen_last_fqs_check = 0; rdp->n_force_qs_snap = rsp->n_force_qs; rdp->blimit = blimit; + init_callback_list(rdp); /* Re-enable callbacks on this CPU. */ rdp->dynticks->dynticks_nesting = DYNTICK_TASK_EXIT_IDLE; atomic_set(&rdp->dynticks->dynticks, (atomic_read(&rdp->dynticks->dynticks) & ~0x1) + 1); @@ -2555,7 +2688,6 @@ rcu_init_percpu_data(int cpu, struct rcu_state *rsp, int preemptible) rdp->completed = rnp->completed; rdp->passed_quiesce = 0; rdp->qs_pending = 0; - rdp->passed_quiesce_gpnum = rnp->gpnum - 1; trace_rcu_grace_period(rsp->name, rdp->gpnum, "cpuonl"); } raw_spin_unlock(&rnp->lock); /* irqs already disabled. */ @@ -2594,12 +2726,10 @@ static int __cpuinit rcu_cpu_notify(struct notifier_block *self, break; case CPU_ONLINE: case CPU_DOWN_FAILED: - rcu_node_kthread_setaffinity(rnp, -1); - rcu_cpu_kthread_setrt(cpu, 1); + rcu_boost_kthread_setaffinity(rnp, -1); break; case CPU_DOWN_PREPARE: - rcu_node_kthread_setaffinity(rnp, cpu); - rcu_cpu_kthread_setrt(cpu, 0); + rcu_boost_kthread_setaffinity(rnp, cpu); break; case CPU_DYING: case CPU_DYING_FROZEN: @@ -2626,6 +2756,28 @@ static int __cpuinit rcu_cpu_notify(struct notifier_block *self, return NOTIFY_OK; } +/* + * Spawn the kthread that handles this RCU flavor's grace periods. + */ +static int __init rcu_spawn_gp_kthread(void) +{ + unsigned long flags; + struct rcu_node *rnp; + struct rcu_state *rsp; + struct task_struct *t; + + for_each_rcu_flavor(rsp) { + t = kthread_run(rcu_gp_kthread, rsp, rsp->name); + BUG_ON(IS_ERR(t)); + rnp = rcu_get_root(rsp); + raw_spin_lock_irqsave(&rnp->lock, flags); + rsp->gp_kthread = t; + raw_spin_unlock_irqrestore(&rnp->lock, flags); + } + return 0; +} +early_initcall(rcu_spawn_gp_kthread); + /* * This function is invoked towards the end of the scheduler's initialization * process. Before this is called, the idle task might contain @@ -2661,7 +2813,7 @@ static void __init rcu_init_levelspread(struct rcu_state *rsp) int cprv; int i; - cprv = NR_CPUS; + cprv = nr_cpu_ids; for (i = rcu_num_lvls - 1; i >= 0; i--) { ccur = rsp->levelcnt[i]; rsp->levelspread[i] = (cprv + ccur - 1) / ccur; @@ -2676,10 +2828,14 @@ static void __init rcu_init_levelspread(struct rcu_state *rsp) static void __init rcu_init_one(struct rcu_state *rsp, struct rcu_data __percpu *rda) { - static char *buf[] = { "rcu_node_level_0", - "rcu_node_level_1", - "rcu_node_level_2", - "rcu_node_level_3" }; /* Match MAX_RCU_LVLS */ + static char *buf[] = { "rcu_node_0", + "rcu_node_1", + "rcu_node_2", + "rcu_node_3" }; /* Match MAX_RCU_LVLS */ + static char *fqs[] = { "rcu_node_fqs_0", + "rcu_node_fqs_1", + "rcu_node_fqs_2", + "rcu_node_fqs_3" }; /* Match MAX_RCU_LVLS */ int cpustride = 1; int i; int j; @@ -2704,7 +2860,11 @@ static void __init rcu_init_one(struct rcu_state *rsp, raw_spin_lock_init(&rnp->lock); lockdep_set_class_and_name(&rnp->lock, &rcu_node_class[i], buf[i]); - rnp->gpnum = 0; + raw_spin_lock_init(&rnp->fqslock); + lockdep_set_class_and_name(&rnp->fqslock, + &rcu_fqs_class[i], fqs[i]); + rnp->gpnum = rsp->gpnum; + rnp->completed = rsp->completed; rnp->qsmask = 0; rnp->qsmaskinit = 0; rnp->grplo = j * cpustride; @@ -2727,6 +2887,7 @@ static void __init rcu_init_one(struct rcu_state *rsp, } rsp->rda = rda; + init_waitqueue_head(&rsp->gp_wq); rnp = rsp->level[rcu_num_lvls - 1]; for_each_possible_cpu(i) { while (i > rnp->grphi) @@ -2750,7 +2911,8 @@ static void __init rcu_init_geometry(void) int rcu_capacity[MAX_RCU_LVLS + 1]; /* If the compile-time values are accurate, just leave. */ - if (rcu_fanout_leaf == CONFIG_RCU_FANOUT_LEAF) + if (rcu_fanout_leaf == CONFIG_RCU_FANOUT_LEAF && + nr_cpu_ids == NR_CPUS) return; /* diff --git a/trunk/kernel/rcutree.h b/trunk/kernel/rcutree.h index 4d29169f2124..5faf05d68326 100644 --- a/trunk/kernel/rcutree.h +++ b/trunk/kernel/rcutree.h @@ -102,6 +102,10 @@ struct rcu_dynticks { /* idle-period nonlazy_posted snapshot. */ int tick_nohz_enabled_snap; /* Previously seen value from sysfs. */ #endif /* #ifdef CONFIG_RCU_FAST_NO_HZ */ +#ifdef CONFIG_RCU_USER_QS + bool ignore_user_qs; /* Treat userspace as extended QS or not */ + bool in_user; /* Is the CPU in userland from RCU POV? */ +#endif }; /* RCU's kthread states for tracing. */ @@ -196,12 +200,7 @@ struct rcu_node { /* Refused to boost: not sure why, though. */ /* This can happen due to race conditions. */ #endif /* #ifdef CONFIG_RCU_BOOST */ - struct task_struct *node_kthread_task; - /* kthread that takes care of this rcu_node */ - /* structure, for example, awakening the */ - /* per-CPU kthreads as needed. */ - unsigned int node_kthread_status; - /* State of node_kthread_task for tracing. */ + raw_spinlock_t fqslock ____cacheline_internodealigned_in_smp; } ____cacheline_internodealigned_in_smp; /* @@ -245,8 +244,6 @@ struct rcu_data { /* in order to detect GP end. */ unsigned long gpnum; /* Highest gp number that this CPU */ /* is aware of having started. */ - unsigned long passed_quiesce_gpnum; - /* gpnum at time of quiescent state. */ bool passed_quiesce; /* User-mode/idle loop etc. */ bool qs_pending; /* Core waits for quiesc state. */ bool beenonline; /* CPU online at least once. */ @@ -312,11 +309,13 @@ struct rcu_data { unsigned long n_rp_cpu_needs_gp; unsigned long n_rp_gp_completed; unsigned long n_rp_gp_started; - unsigned long n_rp_need_fqs; unsigned long n_rp_need_nothing; - /* 6) _rcu_barrier() callback. */ + /* 6) _rcu_barrier() and OOM callbacks. */ struct rcu_head barrier_head; +#ifdef CONFIG_RCU_FAST_NO_HZ + struct rcu_head oom_head; +#endif /* #ifdef CONFIG_RCU_FAST_NO_HZ */ int cpu; struct rcu_state *rsp; @@ -375,20 +374,17 @@ struct rcu_state { u8 fqs_state ____cacheline_internodealigned_in_smp; /* Force QS state. */ - u8 fqs_active; /* force_quiescent_state() */ - /* is running. */ - u8 fqs_need_gp; /* A CPU was prevented from */ - /* starting a new grace */ - /* period because */ - /* force_quiescent_state() */ - /* was running. */ u8 boost; /* Subject to priority boost. */ unsigned long gpnum; /* Current gp number. */ unsigned long completed; /* # of last completed gp. */ + struct task_struct *gp_kthread; /* Task for grace periods. */ + wait_queue_head_t gp_wq; /* Where GP task waits. */ + int gp_flags; /* Commands for GP task. */ /* End of fields guarded by root rcu_node's lock. */ - raw_spinlock_t onofflock; /* exclude on/offline and */ + raw_spinlock_t onofflock ____cacheline_internodealigned_in_smp; + /* exclude on/offline and */ /* starting new GP. */ struct rcu_head *orphan_nxtlist; /* Orphaned callbacks that */ /* need a grace period. */ @@ -398,16 +394,11 @@ struct rcu_state { struct rcu_head **orphan_donetail; /* Tail of above. */ long qlen_lazy; /* Number of lazy callbacks. */ long qlen; /* Total number of callbacks. */ - struct task_struct *rcu_barrier_in_progress; - /* Task doing rcu_barrier(), */ - /* or NULL if no barrier. */ struct mutex barrier_mutex; /* Guards barrier fields. */ atomic_t barrier_cpu_count; /* # CPUs waiting on. */ struct completion barrier_completion; /* Wake at barrier end. */ unsigned long n_barrier_done; /* ++ at start and end of */ /* _rcu_barrier(). */ - raw_spinlock_t fqslock; /* Only one task forcing */ - /* quiescent states. */ unsigned long jiffies_force_qs; /* Time at which to invoke */ /* force_quiescent_state(). */ unsigned long n_force_qs; /* Number of calls to */ @@ -426,6 +417,10 @@ struct rcu_state { struct list_head flavors; /* List of RCU flavors. */ }; +/* Values for rcu_state structure's gp_flags field. */ +#define RCU_GP_FLAG_INIT 0x1 /* Need grace-period initialization. */ +#define RCU_GP_FLAG_FQS 0x2 /* Need grace-period quiescent-state forcing. */ + extern struct list_head rcu_struct_flavors; #define for_each_rcu_flavor(rsp) \ list_for_each_entry((rsp), &rcu_struct_flavors, flavors) @@ -468,7 +463,6 @@ static int rcu_preempt_blocked_readers_cgp(struct rcu_node *rnp); #ifdef CONFIG_HOTPLUG_CPU static void rcu_report_unblock_qs_rnp(struct rcu_node *rnp, unsigned long flags); -static void rcu_stop_cpu_kthread(int cpu); #endif /* #ifdef CONFIG_HOTPLUG_CPU */ static void rcu_print_detail_task_stall(struct rcu_state *rsp); static int rcu_print_task_stall(struct rcu_node *rnp); @@ -491,15 +485,9 @@ static void invoke_rcu_callbacks_kthread(void); static bool rcu_is_callbacks_kthread(void); #ifdef CONFIG_RCU_BOOST static void rcu_preempt_do_callbacks(void); -static void rcu_boost_kthread_setaffinity(struct rcu_node *rnp, - cpumask_var_t cm); static int __cpuinit rcu_spawn_one_boost_kthread(struct rcu_state *rsp, - struct rcu_node *rnp, - int rnp_index); -static void invoke_rcu_node_kthread(struct rcu_node *rnp); -static void rcu_yield(void (*f)(unsigned long), unsigned long arg); + struct rcu_node *rnp); #endif /* #ifdef CONFIG_RCU_BOOST */ -static void rcu_cpu_kthread_setrt(int cpu, int to_rt); static void __cpuinit rcu_prepare_kthreads(int cpu); static void rcu_prepare_for_idle_init(int cpu); static void rcu_cleanup_after_idle(int cpu); diff --git a/trunk/kernel/rcutree_plugin.h b/trunk/kernel/rcutree_plugin.h index 7f3244c0df01..f92115488187 100644 --- a/trunk/kernel/rcutree_plugin.h +++ b/trunk/kernel/rcutree_plugin.h @@ -25,6 +25,8 @@ */ #include +#include +#include #define RCU_KTHREAD_PRIO 1 @@ -118,7 +120,7 @@ EXPORT_SYMBOL_GPL(rcu_batches_completed); */ void rcu_force_quiescent_state(void) { - force_quiescent_state(&rcu_preempt_state, 0); + force_quiescent_state(&rcu_preempt_state); } EXPORT_SYMBOL_GPL(rcu_force_quiescent_state); @@ -136,8 +138,6 @@ static void rcu_preempt_qs(int cpu) { struct rcu_data *rdp = &per_cpu(rcu_preempt_data, cpu); - rdp->passed_quiesce_gpnum = rdp->gpnum; - barrier(); if (rdp->passed_quiesce == 0) trace_rcu_grace_period("rcu_preempt", rdp->gpnum, "cpuqs"); rdp->passed_quiesce = 1; @@ -422,9 +422,11 @@ static void rcu_print_detail_task_stall_rnp(struct rcu_node *rnp) unsigned long flags; struct task_struct *t; - if (!rcu_preempt_blocked_readers_cgp(rnp)) - return; raw_spin_lock_irqsave(&rnp->lock, flags); + if (!rcu_preempt_blocked_readers_cgp(rnp)) { + raw_spin_unlock_irqrestore(&rnp->lock, flags); + return; + } t = list_entry(rnp->gp_tasks, struct task_struct, rcu_node_entry); list_for_each_entry_continue(t, &rnp->blkd_tasks, rcu_node_entry) @@ -584,17 +586,23 @@ static int rcu_preempt_offline_tasks(struct rcu_state *rsp, raw_spin_unlock(&rnp_root->lock); /* irqs still disabled */ } + rnp->gp_tasks = NULL; + rnp->exp_tasks = NULL; #ifdef CONFIG_RCU_BOOST - /* In case root is being boosted and leaf is not. */ + rnp->boost_tasks = NULL; + /* + * In case root is being boosted and leaf was not. Make sure + * that we boost the tasks blocking the current grace period + * in this case. + */ raw_spin_lock(&rnp_root->lock); /* irqs already disabled */ if (rnp_root->boost_tasks != NULL && - rnp_root->boost_tasks != rnp_root->gp_tasks) + rnp_root->boost_tasks != rnp_root->gp_tasks && + rnp_root->boost_tasks != rnp_root->exp_tasks) rnp_root->boost_tasks = rnp_root->gp_tasks; raw_spin_unlock(&rnp_root->lock); /* irqs still disabled */ #endif /* #ifdef CONFIG_RCU_BOOST */ - rnp->gp_tasks = NULL; - rnp->exp_tasks = NULL; return retval; } @@ -676,7 +684,7 @@ void synchronize_rcu(void) EXPORT_SYMBOL_GPL(synchronize_rcu); static DECLARE_WAIT_QUEUE_HEAD(sync_rcu_preempt_exp_wq); -static long sync_rcu_preempt_exp_count; +static unsigned long sync_rcu_preempt_exp_count; static DEFINE_MUTEX(sync_rcu_preempt_exp_mutex); /* @@ -791,41 +799,55 @@ void synchronize_rcu_expedited(void) unsigned long flags; struct rcu_node *rnp; struct rcu_state *rsp = &rcu_preempt_state; - long snap; + unsigned long snap; int trycount = 0; smp_mb(); /* Caller's modifications seen first by other CPUs. */ snap = ACCESS_ONCE(sync_rcu_preempt_exp_count) + 1; smp_mb(); /* Above access cannot bleed into critical section. */ + /* + * Block CPU-hotplug operations. This means that any CPU-hotplug + * operation that finds an rcu_node structure with tasks in the + * process of being boosted will know that all tasks blocking + * this expedited grace period will already be in the process of + * being boosted. This simplifies the process of moving tasks + * from leaf to root rcu_node structures. + */ + get_online_cpus(); + /* * Acquire lock, falling back to synchronize_rcu() if too many * lock-acquisition failures. Of course, if someone does the * expedited grace period for us, just leave. */ while (!mutex_trylock(&sync_rcu_preempt_exp_mutex)) { + if (ULONG_CMP_LT(snap, + ACCESS_ONCE(sync_rcu_preempt_exp_count))) { + put_online_cpus(); + goto mb_ret; /* Others did our work for us. */ + } if (trycount++ < 10) { udelay(trycount * num_online_cpus()); } else { + put_online_cpus(); synchronize_rcu(); return; } - if ((ACCESS_ONCE(sync_rcu_preempt_exp_count) - snap) > 0) - goto mb_ret; /* Others did our work for us. */ } - if ((ACCESS_ONCE(sync_rcu_preempt_exp_count) - snap) > 0) + if (ULONG_CMP_LT(snap, ACCESS_ONCE(sync_rcu_preempt_exp_count))) { + put_online_cpus(); goto unlock_mb_ret; /* Others did our work for us. */ + } /* force all RCU readers onto ->blkd_tasks lists. */ synchronize_sched_expedited(); - raw_spin_lock_irqsave(&rsp->onofflock, flags); - /* Initialize ->expmask for all non-leaf rcu_node structures. */ rcu_for_each_nonleaf_node_breadth_first(rsp, rnp) { - raw_spin_lock(&rnp->lock); /* irqs already disabled. */ + raw_spin_lock_irqsave(&rnp->lock, flags); rnp->expmask = rnp->qsmaskinit; - raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */ + raw_spin_unlock_irqrestore(&rnp->lock, flags); } /* Snapshot current state of ->blkd_tasks lists. */ @@ -834,7 +856,7 @@ void synchronize_rcu_expedited(void) if (NUM_RCU_NODES > 1) sync_rcu_preempt_exp_init(rsp, rcu_get_root(rsp)); - raw_spin_unlock_irqrestore(&rsp->onofflock, flags); + put_online_cpus(); /* Wait for snapshotted ->blkd_tasks lists to drain. */ rnp = rcu_get_root(rsp); @@ -1069,6 +1091,16 @@ static void rcu_initiate_boost_trace(struct rcu_node *rnp) #endif /* #else #ifdef CONFIG_RCU_TRACE */ +static void rcu_wake_cond(struct task_struct *t, int status) +{ + /* + * If the thread is yielding, only wake it when this + * is invoked from idle + */ + if (status != RCU_KTHREAD_YIELDING || is_idle_task(current)) + wake_up_process(t); +} + /* * Carry out RCU priority boosting on the task indicated by ->exp_tasks * or ->boost_tasks, advancing the pointer to the next task in the @@ -1140,17 +1172,6 @@ static int rcu_boost(struct rcu_node *rnp) ACCESS_ONCE(rnp->boost_tasks) != NULL; } -/* - * Timer handler to initiate waking up of boost kthreads that - * have yielded the CPU due to excessive numbers of tasks to - * boost. We wake up the per-rcu_node kthread, which in turn - * will wake up the booster kthread. - */ -static void rcu_boost_kthread_timer(unsigned long arg) -{ - invoke_rcu_node_kthread((struct rcu_node *)arg); -} - /* * Priority-boosting kthread. One per leaf rcu_node and one for the * root rcu_node. @@ -1174,8 +1195,9 @@ static int rcu_boost_kthread(void *arg) else spincnt = 0; if (spincnt > 10) { + rnp->boost_kthread_status = RCU_KTHREAD_YIELDING; trace_rcu_utilization("End boost kthread@rcu_yield"); - rcu_yield(rcu_boost_kthread_timer, (unsigned long)rnp); + schedule_timeout_interruptible(2); trace_rcu_utilization("Start boost kthread@rcu_yield"); spincnt = 0; } @@ -1191,9 +1213,9 @@ static int rcu_boost_kthread(void *arg) * kthread to start boosting them. If there is an expedited grace * period in progress, it is always time to boost. * - * The caller must hold rnp->lock, which this function releases, - * but irqs remain disabled. The ->boost_kthread_task is immortal, - * so we don't need to worry about it going away. + * The caller must hold rnp->lock, which this function releases. + * The ->boost_kthread_task is immortal, so we don't need to worry + * about it going away. */ static void rcu_initiate_boost(struct rcu_node *rnp, unsigned long flags) { @@ -1213,8 +1235,8 @@ static void rcu_initiate_boost(struct rcu_node *rnp, unsigned long flags) rnp->boost_tasks = rnp->gp_tasks; raw_spin_unlock_irqrestore(&rnp->lock, flags); t = rnp->boost_kthread_task; - if (t != NULL) - wake_up_process(t); + if (t) + rcu_wake_cond(t, rnp->boost_kthread_status); } else { rcu_initiate_boost_trace(rnp); raw_spin_unlock_irqrestore(&rnp->lock, flags); @@ -1231,8 +1253,10 @@ static void invoke_rcu_callbacks_kthread(void) local_irq_save(flags); __this_cpu_write(rcu_cpu_has_work, 1); if (__this_cpu_read(rcu_cpu_kthread_task) != NULL && - current != __this_cpu_read(rcu_cpu_kthread_task)) - wake_up_process(__this_cpu_read(rcu_cpu_kthread_task)); + current != __this_cpu_read(rcu_cpu_kthread_task)) { + rcu_wake_cond(__this_cpu_read(rcu_cpu_kthread_task), + __this_cpu_read(rcu_cpu_kthread_status)); + } local_irq_restore(flags); } @@ -1245,21 +1269,6 @@ static bool rcu_is_callbacks_kthread(void) return __get_cpu_var(rcu_cpu_kthread_task) == current; } -/* - * Set the affinity of the boost kthread. The CPU-hotplug locks are - * held, so no one should be messing with the existence of the boost - * kthread. - */ -static void rcu_boost_kthread_setaffinity(struct rcu_node *rnp, - cpumask_var_t cm) -{ - struct task_struct *t; - - t = rnp->boost_kthread_task; - if (t != NULL) - set_cpus_allowed_ptr(rnp->boost_kthread_task, cm); -} - #define RCU_BOOST_DELAY_JIFFIES DIV_ROUND_UP(CONFIG_RCU_BOOST_DELAY * HZ, 1000) /* @@ -1276,15 +1285,19 @@ static void rcu_preempt_boost_start_gp(struct rcu_node *rnp) * Returns zero if all is well, a negated errno otherwise. */ static int __cpuinit rcu_spawn_one_boost_kthread(struct rcu_state *rsp, - struct rcu_node *rnp, - int rnp_index) + struct rcu_node *rnp) { + int rnp_index = rnp - &rsp->node[0]; unsigned long flags; struct sched_param sp; struct task_struct *t; if (&rcu_preempt_state != rsp) return 0; + + if (!rcu_scheduler_fully_active || rnp->qsmaskinit == 0) + return 0; + rsp->boost = 1; if (rnp->boost_kthread_task != NULL) return 0; @@ -1301,25 +1314,6 @@ static int __cpuinit rcu_spawn_one_boost_kthread(struct rcu_state *rsp, return 0; } -#ifdef CONFIG_HOTPLUG_CPU - -/* - * Stop the RCU's per-CPU kthread when its CPU goes offline,. - */ -static void rcu_stop_cpu_kthread(int cpu) -{ - struct task_struct *t; - - /* Stop the CPU's kthread. */ - t = per_cpu(rcu_cpu_kthread_task, cpu); - if (t != NULL) { - per_cpu(rcu_cpu_kthread_task, cpu) = NULL; - kthread_stop(t); - } -} - -#endif /* #ifdef CONFIG_HOTPLUG_CPU */ - static void rcu_kthread_do_work(void) { rcu_do_batch(&rcu_sched_state, &__get_cpu_var(rcu_sched_data)); @@ -1327,112 +1321,22 @@ static void rcu_kthread_do_work(void) rcu_preempt_do_callbacks(); } -/* - * Wake up the specified per-rcu_node-structure kthread. - * Because the per-rcu_node kthreads are immortal, we don't need - * to do anything to keep them alive. - */ -static void invoke_rcu_node_kthread(struct rcu_node *rnp) -{ - struct task_struct *t; - - t = rnp->node_kthread_task; - if (t != NULL) - wake_up_process(t); -} - -/* - * Set the specified CPU's kthread to run RT or not, as specified by - * the to_rt argument. The CPU-hotplug locks are held, so the task - * is not going away. - */ -static void rcu_cpu_kthread_setrt(int cpu, int to_rt) +static void rcu_cpu_kthread_setup(unsigned int cpu) { - int policy; struct sched_param sp; - struct task_struct *t; - t = per_cpu(rcu_cpu_kthread_task, cpu); - if (t == NULL) - return; - if (to_rt) { - policy = SCHED_FIFO; - sp.sched_priority = RCU_KTHREAD_PRIO; - } else { - policy = SCHED_NORMAL; - sp.sched_priority = 0; - } - sched_setscheduler_nocheck(t, policy, &sp); + sp.sched_priority = RCU_KTHREAD_PRIO; + sched_setscheduler_nocheck(current, SCHED_FIFO, &sp); } -/* - * Timer handler to initiate the waking up of per-CPU kthreads that - * have yielded the CPU due to excess numbers of RCU callbacks. - * We wake up the per-rcu_node kthread, which in turn will wake up - * the booster kthread. - */ -static void rcu_cpu_kthread_timer(unsigned long arg) +static void rcu_cpu_kthread_park(unsigned int cpu) { - struct rcu_data *rdp = per_cpu_ptr(rcu_state->rda, arg); - struct rcu_node *rnp = rdp->mynode; - - atomic_or(rdp->grpmask, &rnp->wakemask); - invoke_rcu_node_kthread(rnp); + per_cpu(rcu_cpu_kthread_status, cpu) = RCU_KTHREAD_OFFCPU; } -/* - * Drop to non-real-time priority and yield, but only after posting a - * timer that will cause us to regain our real-time priority if we - * remain preempted. Either way, we restore our real-time priority - * before returning. - */ -static void rcu_yield(void (*f)(unsigned long), unsigned long arg) +static int rcu_cpu_kthread_should_run(unsigned int cpu) { - struct sched_param sp; - struct timer_list yield_timer; - int prio = current->rt_priority; - - setup_timer_on_stack(&yield_timer, f, arg); - mod_timer(&yield_timer, jiffies + 2); - sp.sched_priority = 0; - sched_setscheduler_nocheck(current, SCHED_NORMAL, &sp); - set_user_nice(current, 19); - schedule(); - set_user_nice(current, 0); - sp.sched_priority = prio; - sched_setscheduler_nocheck(current, SCHED_FIFO, &sp); - del_timer(&yield_timer); -} - -/* - * Handle cases where the rcu_cpu_kthread() ends up on the wrong CPU. - * This can happen while the corresponding CPU is either coming online - * or going offline. We cannot wait until the CPU is fully online - * before starting the kthread, because the various notifier functions - * can wait for RCU grace periods. So we park rcu_cpu_kthread() until - * the corresponding CPU is online. - * - * Return 1 if the kthread needs to stop, 0 otherwise. - * - * Caller must disable bh. This function can momentarily enable it. - */ -static int rcu_cpu_kthread_should_stop(int cpu) -{ - while (cpu_is_offline(cpu) || - !cpumask_equal(¤t->cpus_allowed, cpumask_of(cpu)) || - smp_processor_id() != cpu) { - if (kthread_should_stop()) - return 1; - per_cpu(rcu_cpu_kthread_status, cpu) = RCU_KTHREAD_OFFCPU; - per_cpu(rcu_cpu_kthread_cpu, cpu) = raw_smp_processor_id(); - local_bh_enable(); - schedule_timeout_uninterruptible(1); - if (!cpumask_equal(¤t->cpus_allowed, cpumask_of(cpu))) - set_cpus_allowed_ptr(current, cpumask_of(cpu)); - local_bh_disable(); - } - per_cpu(rcu_cpu_kthread_cpu, cpu) = cpu; - return 0; + return __get_cpu_var(rcu_cpu_has_work); } /* @@ -1440,138 +1344,35 @@ static int rcu_cpu_kthread_should_stop(int cpu) * RCU softirq used in flavors and configurations of RCU that do not * support RCU priority boosting. */ -static int rcu_cpu_kthread(void *arg) +static void rcu_cpu_kthread(unsigned int cpu) { - int cpu = (int)(long)arg; - unsigned long flags; - int spincnt = 0; - unsigned int *statusp = &per_cpu(rcu_cpu_kthread_status, cpu); - char work; - char *workp = &per_cpu(rcu_cpu_has_work, cpu); + unsigned int *statusp = &__get_cpu_var(rcu_cpu_kthread_status); + char work, *workp = &__get_cpu_var(rcu_cpu_has_work); + int spincnt; - trace_rcu_utilization("Start CPU kthread@init"); - for (;;) { - *statusp = RCU_KTHREAD_WAITING; - trace_rcu_utilization("End CPU kthread@rcu_wait"); - rcu_wait(*workp != 0 || kthread_should_stop()); + for (spincnt = 0; spincnt < 10; spincnt++) { trace_rcu_utilization("Start CPU kthread@rcu_wait"); local_bh_disable(); - if (rcu_cpu_kthread_should_stop(cpu)) { - local_bh_enable(); - break; - } *statusp = RCU_KTHREAD_RUNNING; - per_cpu(rcu_cpu_kthread_loops, cpu)++; - local_irq_save(flags); + this_cpu_inc(rcu_cpu_kthread_loops); + local_irq_disable(); work = *workp; *workp = 0; - local_irq_restore(flags); + local_irq_enable(); if (work) rcu_kthread_do_work(); local_bh_enable(); - if (*workp != 0) - spincnt++; - else - spincnt = 0; - if (spincnt > 10) { - *statusp = RCU_KTHREAD_YIELDING; - trace_rcu_utilization("End CPU kthread@rcu_yield"); - rcu_yield(rcu_cpu_kthread_timer, (unsigned long)cpu); - trace_rcu_utilization("Start CPU kthread@rcu_yield"); - spincnt = 0; - } - } - *statusp = RCU_KTHREAD_STOPPED; - trace_rcu_utilization("End CPU kthread@term"); - return 0; -} - -/* - * Spawn a per-CPU kthread, setting up affinity and priority. - * Because the CPU hotplug lock is held, no other CPU will be attempting - * to manipulate rcu_cpu_kthread_task. There might be another CPU - * attempting to access it during boot, but the locking in kthread_bind() - * will enforce sufficient ordering. - * - * Please note that we cannot simply refuse to wake up the per-CPU - * kthread because kthreads are created in TASK_UNINTERRUPTIBLE state, - * which can result in softlockup complaints if the task ends up being - * idle for more than a couple of minutes. - * - * However, please note also that we cannot bind the per-CPU kthread to its - * CPU until that CPU is fully online. We also cannot wait until the - * CPU is fully online before we create its per-CPU kthread, as this would - * deadlock the system when CPU notifiers tried waiting for grace - * periods. So we bind the per-CPU kthread to its CPU only if the CPU - * is online. If its CPU is not yet fully online, then the code in - * rcu_cpu_kthread() will wait until it is fully online, and then do - * the binding. - */ -static int __cpuinit rcu_spawn_one_cpu_kthread(int cpu) -{ - struct sched_param sp; - struct task_struct *t; - - if (!rcu_scheduler_fully_active || - per_cpu(rcu_cpu_kthread_task, cpu) != NULL) - return 0; - t = kthread_create_on_node(rcu_cpu_kthread, - (void *)(long)cpu, - cpu_to_node(cpu), - "rcuc/%d", cpu); - if (IS_ERR(t)) - return PTR_ERR(t); - if (cpu_online(cpu)) - kthread_bind(t, cpu); - per_cpu(rcu_cpu_kthread_cpu, cpu) = cpu; - WARN_ON_ONCE(per_cpu(rcu_cpu_kthread_task, cpu) != NULL); - sp.sched_priority = RCU_KTHREAD_PRIO; - sched_setscheduler_nocheck(t, SCHED_FIFO, &sp); - per_cpu(rcu_cpu_kthread_task, cpu) = t; - wake_up_process(t); /* Get to TASK_INTERRUPTIBLE quickly. */ - return 0; -} - -/* - * Per-rcu_node kthread, which is in charge of waking up the per-CPU - * kthreads when needed. We ignore requests to wake up kthreads - * for offline CPUs, which is OK because force_quiescent_state() - * takes care of this case. - */ -static int rcu_node_kthread(void *arg) -{ - int cpu; - unsigned long flags; - unsigned long mask; - struct rcu_node *rnp = (struct rcu_node *)arg; - struct sched_param sp; - struct task_struct *t; - - for (;;) { - rnp->node_kthread_status = RCU_KTHREAD_WAITING; - rcu_wait(atomic_read(&rnp->wakemask) != 0); - rnp->node_kthread_status = RCU_KTHREAD_RUNNING; - raw_spin_lock_irqsave(&rnp->lock, flags); - mask = atomic_xchg(&rnp->wakemask, 0); - rcu_initiate_boost(rnp, flags); /* releases rnp->lock. */ - for (cpu = rnp->grplo; cpu <= rnp->grphi; cpu++, mask >>= 1) { - if ((mask & 0x1) == 0) - continue; - preempt_disable(); - t = per_cpu(rcu_cpu_kthread_task, cpu); - if (!cpu_online(cpu) || t == NULL) { - preempt_enable(); - continue; - } - per_cpu(rcu_cpu_has_work, cpu) = 1; - sp.sched_priority = RCU_KTHREAD_PRIO; - sched_setscheduler_nocheck(t, SCHED_FIFO, &sp); - preempt_enable(); + if (*workp == 0) { + trace_rcu_utilization("End CPU kthread@rcu_wait"); + *statusp = RCU_KTHREAD_WAITING; + return; } } - /* NOTREACHED */ - rnp->node_kthread_status = RCU_KTHREAD_STOPPED; - return 0; + *statusp = RCU_KTHREAD_YIELDING; + trace_rcu_utilization("Start CPU kthread@rcu_yield"); + schedule_timeout_interruptible(2); + trace_rcu_utilization("End CPU kthread@rcu_yield"); + *statusp = RCU_KTHREAD_WAITING; } /* @@ -1583,17 +1384,17 @@ static int rcu_node_kthread(void *arg) * no outgoing CPU. If there are no CPUs left in the affinity set, * this function allows the kthread to execute on any CPU. */ -static void rcu_node_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu) +static void rcu_boost_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu) { + struct task_struct *t = rnp->boost_kthread_task; + unsigned long mask = rnp->qsmaskinit; cpumask_var_t cm; int cpu; - unsigned long mask = rnp->qsmaskinit; - if (rnp->node_kthread_task == NULL) + if (!t) return; - if (!alloc_cpumask_var(&cm, GFP_KERNEL)) + if (!zalloc_cpumask_var(&cm, GFP_KERNEL)) return; - cpumask_clear(cm); for (cpu = rnp->grplo; cpu <= rnp->grphi; cpu++, mask >>= 1) if ((mask & 0x1) && cpu != outgoingcpu) cpumask_set_cpu(cpu, cm); @@ -1603,62 +1404,36 @@ static void rcu_node_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu) cpumask_clear_cpu(cpu, cm); WARN_ON_ONCE(cpumask_weight(cm) == 0); } - set_cpus_allowed_ptr(rnp->node_kthread_task, cm); - rcu_boost_kthread_setaffinity(rnp, cm); + set_cpus_allowed_ptr(t, cm); free_cpumask_var(cm); } -/* - * Spawn a per-rcu_node kthread, setting priority and affinity. - * Called during boot before online/offline can happen, or, if - * during runtime, with the main CPU-hotplug locks held. So only - * one of these can be executing at a time. - */ -static int __cpuinit rcu_spawn_one_node_kthread(struct rcu_state *rsp, - struct rcu_node *rnp) -{ - unsigned long flags; - int rnp_index = rnp - &rsp->node[0]; - struct sched_param sp; - struct task_struct *t; - - if (!rcu_scheduler_fully_active || - rnp->qsmaskinit == 0) - return 0; - if (rnp->node_kthread_task == NULL) { - t = kthread_create(rcu_node_kthread, (void *)rnp, - "rcun/%d", rnp_index); - if (IS_ERR(t)) - return PTR_ERR(t); - raw_spin_lock_irqsave(&rnp->lock, flags); - rnp->node_kthread_task = t; - raw_spin_unlock_irqrestore(&rnp->lock, flags); - sp.sched_priority = 99; - sched_setscheduler_nocheck(t, SCHED_FIFO, &sp); - wake_up_process(t); /* get to TASK_INTERRUPTIBLE quickly. */ - } - return rcu_spawn_one_boost_kthread(rsp, rnp, rnp_index); -} +static struct smp_hotplug_thread rcu_cpu_thread_spec = { + .store = &rcu_cpu_kthread_task, + .thread_should_run = rcu_cpu_kthread_should_run, + .thread_fn = rcu_cpu_kthread, + .thread_comm = "rcuc/%u", + .setup = rcu_cpu_kthread_setup, + .park = rcu_cpu_kthread_park, +}; /* * Spawn all kthreads -- called as soon as the scheduler is running. */ static int __init rcu_spawn_kthreads(void) { - int cpu; struct rcu_node *rnp; + int cpu; rcu_scheduler_fully_active = 1; - for_each_possible_cpu(cpu) { + for_each_possible_cpu(cpu) per_cpu(rcu_cpu_has_work, cpu) = 0; - if (cpu_online(cpu)) - (void)rcu_spawn_one_cpu_kthread(cpu); - } + BUG_ON(smpboot_register_percpu_thread(&rcu_cpu_thread_spec)); rnp = rcu_get_root(rcu_state); - (void)rcu_spawn_one_node_kthread(rcu_state, rnp); + (void)rcu_spawn_one_boost_kthread(rcu_state, rnp); if (NUM_RCU_NODES > 1) { rcu_for_each_leaf_node(rcu_state, rnp) - (void)rcu_spawn_one_node_kthread(rcu_state, rnp); + (void)rcu_spawn_one_boost_kthread(rcu_state, rnp); } return 0; } @@ -1670,11 +1445,8 @@ static void __cpuinit rcu_prepare_kthreads(int cpu) struct rcu_node *rnp = rdp->mynode; /* Fire up the incoming CPU's kthread and leaf rcu_node kthread. */ - if (rcu_scheduler_fully_active) { - (void)rcu_spawn_one_cpu_kthread(cpu); - if (rnp->node_kthread_task == NULL) - (void)rcu_spawn_one_node_kthread(rcu_state, rnp); - } + if (rcu_scheduler_fully_active) + (void)rcu_spawn_one_boost_kthread(rcu_state, rnp); } #else /* #ifdef CONFIG_RCU_BOOST */ @@ -1698,19 +1470,7 @@ static void rcu_preempt_boost_start_gp(struct rcu_node *rnp) { } -#ifdef CONFIG_HOTPLUG_CPU - -static void rcu_stop_cpu_kthread(int cpu) -{ -} - -#endif /* #ifdef CONFIG_HOTPLUG_CPU */ - -static void rcu_node_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu) -{ -} - -static void rcu_cpu_kthread_setrt(int cpu, int to_rt) +static void rcu_boost_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu) { } @@ -1997,6 +1757,26 @@ static void rcu_prepare_for_idle(int cpu) if (!tne) return; + /* Adaptive-tick mode, where usermode execution is idle to RCU. */ + if (!is_idle_task(current)) { + rdtp->dyntick_holdoff = jiffies - 1; + if (rcu_cpu_has_nonlazy_callbacks(cpu)) { + trace_rcu_prep_idle("User dyntick with callbacks"); + rdtp->idle_gp_timer_expires = + round_up(jiffies + RCU_IDLE_GP_DELAY, + RCU_IDLE_GP_DELAY); + } else if (rcu_cpu_has_callbacks(cpu)) { + rdtp->idle_gp_timer_expires = + round_jiffies(jiffies + RCU_IDLE_LAZY_GP_DELAY); + trace_rcu_prep_idle("User dyntick with lazy callbacks"); + } else { + return; + } + tp = &rdtp->idle_gp_timer; + mod_timer_pinned(tp, rdtp->idle_gp_timer_expires); + return; + } + /* * If this is an idle re-entry, for example, due to use of * RCU_NONIDLE() or the new idle-loop tracing API within the idle @@ -2075,16 +1855,16 @@ static void rcu_prepare_for_idle(int cpu) #ifdef CONFIG_TREE_PREEMPT_RCU if (per_cpu(rcu_preempt_data, cpu).nxtlist) { rcu_preempt_qs(cpu); - force_quiescent_state(&rcu_preempt_state, 0); + force_quiescent_state(&rcu_preempt_state); } #endif /* #ifdef CONFIG_TREE_PREEMPT_RCU */ if (per_cpu(rcu_sched_data, cpu).nxtlist) { rcu_sched_qs(cpu); - force_quiescent_state(&rcu_sched_state, 0); + force_quiescent_state(&rcu_sched_state); } if (per_cpu(rcu_bh_data, cpu).nxtlist) { rcu_bh_qs(cpu); - force_quiescent_state(&rcu_bh_state, 0); + force_quiescent_state(&rcu_bh_state); } /* @@ -2112,6 +1892,88 @@ static void rcu_idle_count_callbacks_posted(void) __this_cpu_add(rcu_dynticks.nonlazy_posted, 1); } +/* + * Data for flushing lazy RCU callbacks at OOM time. + */ +static atomic_t oom_callback_count; +static DECLARE_WAIT_QUEUE_HEAD(oom_callback_wq); + +/* + * RCU OOM callback -- decrement the outstanding count and deliver the + * wake-up if we are the last one. + */ +static void rcu_oom_callback(struct rcu_head *rhp) +{ + if (atomic_dec_and_test(&oom_callback_count)) + wake_up(&oom_callback_wq); +} + +/* + * Post an rcu_oom_notify callback on the current CPU if it has at + * least one lazy callback. This will unnecessarily post callbacks + * to CPUs that already have a non-lazy callback at the end of their + * callback list, but this is an infrequent operation, so accept some + * extra overhead to keep things simple. + */ +static void rcu_oom_notify_cpu(void *unused) +{ + struct rcu_state *rsp; + struct rcu_data *rdp; + + for_each_rcu_flavor(rsp) { + rdp = __this_cpu_ptr(rsp->rda); + if (rdp->qlen_lazy != 0) { + atomic_inc(&oom_callback_count); + rsp->call(&rdp->oom_head, rcu_oom_callback); + } + } +} + +/* + * If low on memory, ensure that each CPU has a non-lazy callback. + * This will wake up CPUs that have only lazy callbacks, in turn + * ensuring that they free up the corresponding memory in a timely manner. + * Because an uncertain amount of memory will be freed in some uncertain + * timeframe, we do not claim to have freed anything. + */ +static int rcu_oom_notify(struct notifier_block *self, + unsigned long notused, void *nfreed) +{ + int cpu; + + /* Wait for callbacks from earlier instance to complete. */ + wait_event(oom_callback_wq, atomic_read(&oom_callback_count) == 0); + + /* + * Prevent premature wakeup: ensure that all increments happen + * before there is a chance of the counter reaching zero. + */ + atomic_set(&oom_callback_count, 1); + + get_online_cpus(); + for_each_online_cpu(cpu) { + smp_call_function_single(cpu, rcu_oom_notify_cpu, NULL, 1); + cond_resched(); + } + put_online_cpus(); + + /* Unconditionally decrement: no need to wake ourselves up. */ + atomic_dec(&oom_callback_count); + + return NOTIFY_OK; +} + +static struct notifier_block rcu_oom_nb = { + .notifier_call = rcu_oom_notify +}; + +static int __init rcu_register_oom_notifier(void) +{ + register_oom_notifier(&rcu_oom_nb); + return 0; +} +early_initcall(rcu_register_oom_notifier); + #endif /* #else #if !defined(CONFIG_RCU_FAST_NO_HZ) */ #ifdef CONFIG_RCU_CPU_STALL_INFO @@ -2122,11 +1984,15 @@ static void print_cpu_stall_fast_no_hz(char *cp, int cpu) { struct rcu_dynticks *rdtp = &per_cpu(rcu_dynticks, cpu); struct timer_list *tltp = &rdtp->idle_gp_timer; + char c; - sprintf(cp, "drain=%d %c timer=%lu", - rdtp->dyntick_drain, - rdtp->dyntick_holdoff == jiffies ? 'H' : '.', - timer_pending(tltp) ? tltp->expires - jiffies : -1); + c = rdtp->dyntick_holdoff == jiffies ? 'H' : '.'; + if (timer_pending(tltp)) + sprintf(cp, "drain=%d %c timer=%lu", + rdtp->dyntick_drain, c, tltp->expires - jiffies); + else + sprintf(cp, "drain=%d %c timer not pending", + rdtp->dyntick_drain, c); } #else /* #ifdef CONFIG_RCU_FAST_NO_HZ */ @@ -2194,11 +2060,10 @@ static void zero_cpu_stall_ticks(struct rcu_data *rdp) /* Increment ->ticks_this_gp for all flavors of RCU. */ static void increment_cpu_stall_ticks(void) { - __get_cpu_var(rcu_sched_data).ticks_this_gp++; - __get_cpu_var(rcu_bh_data).ticks_this_gp++; -#ifdef CONFIG_TREE_PREEMPT_RCU - __get_cpu_var(rcu_preempt_data).ticks_this_gp++; -#endif /* #ifdef CONFIG_TREE_PREEMPT_RCU */ + struct rcu_state *rsp; + + for_each_rcu_flavor(rsp) + __this_cpu_ptr(rsp->rda)->ticks_this_gp++; } #else /* #ifdef CONFIG_RCU_CPU_STALL_INFO */ diff --git a/trunk/kernel/rcutree_trace.c b/trunk/kernel/rcutree_trace.c index abffb486e94e..693513bc50e6 100644 --- a/trunk/kernel/rcutree_trace.c +++ b/trunk/kernel/rcutree_trace.c @@ -51,8 +51,8 @@ static int show_rcubarrier(struct seq_file *m, void *unused) struct rcu_state *rsp; for_each_rcu_flavor(rsp) - seq_printf(m, "%s: %c bcc: %d nbd: %lu\n", - rsp->name, rsp->rcu_barrier_in_progress ? 'B' : '.', + seq_printf(m, "%s: bcc: %d nbd: %lu\n", + rsp->name, atomic_read(&rsp->barrier_cpu_count), rsp->n_barrier_done); return 0; @@ -86,12 +86,11 @@ static void print_one_rcu_data(struct seq_file *m, struct rcu_data *rdp) { if (!rdp->beenonline) return; - seq_printf(m, "%3d%cc=%lu g=%lu pq=%d pgp=%lu qp=%d", + seq_printf(m, "%3d%cc=%lu g=%lu pq=%d qp=%d", rdp->cpu, cpu_is_offline(rdp->cpu) ? '!' : ' ', rdp->completed, rdp->gpnum, - rdp->passed_quiesce, rdp->passed_quiesce_gpnum, - rdp->qs_pending); + rdp->passed_quiesce, rdp->qs_pending); seq_printf(m, " dt=%d/%llx/%d df=%lu", atomic_read(&rdp->dynticks->dynticks), rdp->dynticks->dynticks_nesting, @@ -108,11 +107,10 @@ static void print_one_rcu_data(struct seq_file *m, struct rcu_data *rdp) rdp->nxttail[RCU_WAIT_TAIL]], ".D"[&rdp->nxtlist != rdp->nxttail[RCU_DONE_TAIL]]); #ifdef CONFIG_RCU_BOOST - seq_printf(m, " kt=%d/%c/%d ktl=%x", + seq_printf(m, " kt=%d/%c ktl=%x", per_cpu(rcu_cpu_has_work, rdp->cpu), convert_kthread_status(per_cpu(rcu_cpu_kthread_status, rdp->cpu)), - per_cpu(rcu_cpu_kthread_cpu, rdp->cpu), per_cpu(rcu_cpu_kthread_loops, rdp->cpu) & 0xffff); #endif /* #ifdef CONFIG_RCU_BOOST */ seq_printf(m, " b=%ld", rdp->blimit); @@ -150,12 +148,11 @@ static void print_one_rcu_data_csv(struct seq_file *m, struct rcu_data *rdp) { if (!rdp->beenonline) return; - seq_printf(m, "%d,%s,%lu,%lu,%d,%lu,%d", + seq_printf(m, "%d,%s,%lu,%lu,%d,%d", rdp->cpu, cpu_is_offline(rdp->cpu) ? "\"N\"" : "\"Y\"", rdp->completed, rdp->gpnum, - rdp->passed_quiesce, rdp->passed_quiesce_gpnum, - rdp->qs_pending); + rdp->passed_quiesce, rdp->qs_pending); seq_printf(m, ",%d,%llx,%d,%lu", atomic_read(&rdp->dynticks->dynticks), rdp->dynticks->dynticks_nesting, @@ -186,7 +183,7 @@ static int show_rcudata_csv(struct seq_file *m, void *unused) int cpu; struct rcu_state *rsp; - seq_puts(m, "\"CPU\",\"Online?\",\"c\",\"g\",\"pq\",\"pgp\",\"pq\","); + seq_puts(m, "\"CPU\",\"Online?\",\"c\",\"g\",\"pq\",\"pq\","); seq_puts(m, "\"dt\",\"dt nesting\",\"dt NMI nesting\",\"df\","); seq_puts(m, "\"of\",\"qll\",\"ql\",\"qs\""); #ifdef CONFIG_RCU_BOOST @@ -386,10 +383,9 @@ static void print_one_rcu_pending(struct seq_file *m, struct rcu_data *rdp) rdp->n_rp_report_qs, rdp->n_rp_cb_ready, rdp->n_rp_cpu_needs_gp); - seq_printf(m, "gpc=%ld gps=%ld nf=%ld nn=%ld\n", + seq_printf(m, "gpc=%ld gps=%ld nn=%ld\n", rdp->n_rp_gp_completed, rdp->n_rp_gp_started, - rdp->n_rp_need_fqs, rdp->n_rp_need_nothing); } diff --git a/trunk/kernel/sched/Makefile b/trunk/kernel/sched/Makefile index 173ea52f3af0..f06d249e103b 100644 --- a/trunk/kernel/sched/Makefile +++ b/trunk/kernel/sched/Makefile @@ -11,7 +11,7 @@ ifneq ($(CONFIG_SCHED_OMIT_FRAME_POINTER),y) CFLAGS_core.o := $(PROFILING) -fno-omit-frame-pointer endif -obj-y += core.o clock.o idle_task.o fair.o rt.o stop_task.o +obj-y += core.o clock.o cputime.o idle_task.o fair.o rt.o stop_task.o obj-$(CONFIG_SMP) += cpupri.o obj-$(CONFIG_SCHED_AUTOGROUP) += auto_group.o obj-$(CONFIG_SCHEDSTATS) += stats.o diff --git a/trunk/kernel/sched/core.c b/trunk/kernel/sched/core.c index d325c4b2dcbb..c17747236438 100644 --- a/trunk/kernel/sched/core.c +++ b/trunk/kernel/sched/core.c @@ -740,126 +740,6 @@ void deactivate_task(struct rq *rq, struct task_struct *p, int flags) dequeue_task(rq, p, flags); } -#ifdef CONFIG_IRQ_TIME_ACCOUNTING - -/* - * There are no locks covering percpu hardirq/softirq time. - * They are only modified in account_system_vtime, on corresponding CPU - * with interrupts disabled. So, writes are safe. - * They are read and saved off onto struct rq in update_rq_clock(). - * This may result in other CPU reading this CPU's irq time and can - * race with irq/account_system_vtime on this CPU. We would either get old - * or new value with a side effect of accounting a slice of irq time to wrong - * task when irq is in progress while we read rq->clock. That is a worthy - * compromise in place of having locks on each irq in account_system_time. - */ -static DEFINE_PER_CPU(u64, cpu_hardirq_time); -static DEFINE_PER_CPU(u64, cpu_softirq_time); - -static DEFINE_PER_CPU(u64, irq_start_time); -static int sched_clock_irqtime; - -void enable_sched_clock_irqtime(void) -{ - sched_clock_irqtime = 1; -} - -void disable_sched_clock_irqtime(void) -{ - sched_clock_irqtime = 0; -} - -#ifndef CONFIG_64BIT -static DEFINE_PER_CPU(seqcount_t, irq_time_seq); - -static inline void irq_time_write_begin(void) -{ - __this_cpu_inc(irq_time_seq.sequence); - smp_wmb(); -} - -static inline void irq_time_write_end(void) -{ - smp_wmb(); - __this_cpu_inc(irq_time_seq.sequence); -} - -static inline u64 irq_time_read(int cpu) -{ - u64 irq_time; - unsigned seq; - - do { - seq = read_seqcount_begin(&per_cpu(irq_time_seq, cpu)); - irq_time = per_cpu(cpu_softirq_time, cpu) + - per_cpu(cpu_hardirq_time, cpu); - } while (read_seqcount_retry(&per_cpu(irq_time_seq, cpu), seq)); - - return irq_time; -} -#else /* CONFIG_64BIT */ -static inline void irq_time_write_begin(void) -{ -} - -static inline void irq_time_write_end(void) -{ -} - -static inline u64 irq_time_read(int cpu) -{ - return per_cpu(cpu_softirq_time, cpu) + per_cpu(cpu_hardirq_time, cpu); -} -#endif /* CONFIG_64BIT */ - -/* - * Called before incrementing preempt_count on {soft,}irq_enter - * and before decrementing preempt_count on {soft,}irq_exit. - */ -void account_system_vtime(struct task_struct *curr) -{ - unsigned long flags; - s64 delta; - int cpu; - - if (!sched_clock_irqtime) - return; - - local_irq_save(flags); - - cpu = smp_processor_id(); - delta = sched_clock_cpu(cpu) - __this_cpu_read(irq_start_time); - __this_cpu_add(irq_start_time, delta); - - irq_time_write_begin(); - /* - * We do not account for softirq time from ksoftirqd here. - * We want to continue accounting softirq time to ksoftirqd thread - * in that case, so as not to confuse scheduler with a special task - * that do not consume any time, but still wants to run. - */ - if (hardirq_count()) - __this_cpu_add(cpu_hardirq_time, delta); - else if (in_serving_softirq() && curr != this_cpu_ksoftirqd()) - __this_cpu_add(cpu_softirq_time, delta); - - irq_time_write_end(); - local_irq_restore(flags); -} -EXPORT_SYMBOL_GPL(account_system_vtime); - -#endif /* CONFIG_IRQ_TIME_ACCOUNTING */ - -#ifdef CONFIG_PARAVIRT -static inline u64 steal_ticks(u64 steal) -{ - if (unlikely(steal > NSEC_PER_SEC)) - return div_u64(steal, TICK_NSEC); - - return __iter_div_u64_rem(steal, TICK_NSEC, &steal); -} -#endif - static void update_rq_clock_task(struct rq *rq, s64 delta) { /* @@ -920,43 +800,6 @@ static void update_rq_clock_task(struct rq *rq, s64 delta) #endif } -#ifdef CONFIG_IRQ_TIME_ACCOUNTING -static int irqtime_account_hi_update(void) -{ - u64 *cpustat = kcpustat_this_cpu->cpustat; - unsigned long flags; - u64 latest_ns; - int ret = 0; - - local_irq_save(flags); - latest_ns = this_cpu_read(cpu_hardirq_time); - if (nsecs_to_cputime64(latest_ns) > cpustat[CPUTIME_IRQ]) - ret = 1; - local_irq_restore(flags); - return ret; -} - -static int irqtime_account_si_update(void) -{ - u64 *cpustat = kcpustat_this_cpu->cpustat; - unsigned long flags; - u64 latest_ns; - int ret = 0; - - local_irq_save(flags); - latest_ns = this_cpu_read(cpu_softirq_time); - if (nsecs_to_cputime64(latest_ns) > cpustat[CPUTIME_SOFTIRQ]) - ret = 1; - local_irq_restore(flags); - return ret; -} - -#else /* CONFIG_IRQ_TIME_ACCOUNTING */ - -#define sched_clock_irqtime (0) - -#endif - void sched_set_stop_task(int cpu, struct task_struct *stop) { struct sched_param param = { .sched_priority = MAX_RT_PRIO - 1 }; @@ -1518,25 +1361,6 @@ static void ttwu_queue_remote(struct task_struct *p, int cpu) smp_send_reschedule(cpu); } -#ifdef __ARCH_WANT_INTERRUPTS_ON_CTXSW -static int ttwu_activate_remote(struct task_struct *p, int wake_flags) -{ - struct rq *rq; - int ret = 0; - - rq = __task_rq_lock(p); - if (p->on_cpu) { - ttwu_activate(rq, p, ENQUEUE_WAKEUP); - ttwu_do_wakeup(rq, p, wake_flags); - ret = 1; - } - __task_rq_unlock(rq); - - return ret; - -} -#endif /* __ARCH_WANT_INTERRUPTS_ON_CTXSW */ - bool cpus_share_cache(int this_cpu, int that_cpu) { return per_cpu(sd_llc_id, this_cpu) == per_cpu(sd_llc_id, that_cpu); @@ -1597,21 +1421,8 @@ try_to_wake_up(struct task_struct *p, unsigned int state, int wake_flags) * If the owning (remote) cpu is still in the middle of schedule() with * this task as prev, wait until its done referencing the task. */ - while (p->on_cpu) { -#ifdef __ARCH_WANT_INTERRUPTS_ON_CTXSW - /* - * In case the architecture enables interrupts in - * context_switch(), we cannot busy wait, since that - * would lead to deadlocks when an interrupt hits and - * tries to wake up @prev. So bail and do a complete - * remote wakeup. - */ - if (ttwu_activate_remote(p, wake_flags)) - goto stat; -#else + while (p->on_cpu) cpu_relax(); -#endif - } /* * Pairs with the smp_wmb() in finish_lock_switch(). */ @@ -1953,14 +1764,9 @@ static void finish_task_switch(struct rq *rq, struct task_struct *prev) * Manfred Spraul */ prev_state = prev->state; + vtime_task_switch(prev); finish_arch_switch(prev); -#ifdef __ARCH_WANT_INTERRUPTS_ON_CTXSW - local_irq_disable(); -#endif /* __ARCH_WANT_INTERRUPTS_ON_CTXSW */ perf_event_task_sched_in(prev, current); -#ifdef __ARCH_WANT_INTERRUPTS_ON_CTXSW - local_irq_enable(); -#endif /* __ARCH_WANT_INTERRUPTS_ON_CTXSW */ finish_lock_switch(rq, prev); finish_arch_post_lock_switch(); @@ -2081,6 +1887,7 @@ context_switch(struct rq *rq, struct task_struct *prev, #endif /* Here we just switch the register state and the stack. */ + rcu_switch(prev, next); switch_to(prev, next, prev); barrier(); @@ -2809,398 +2616,6 @@ unsigned long long task_sched_runtime(struct task_struct *p) return ns; } -#ifdef CONFIG_CGROUP_CPUACCT -struct cgroup_subsys cpuacct_subsys; -struct cpuacct root_cpuacct; -#endif - -static inline void task_group_account_field(struct task_struct *p, int index, - u64 tmp) -{ -#ifdef CONFIG_CGROUP_CPUACCT - struct kernel_cpustat *kcpustat; - struct cpuacct *ca; -#endif - /* - * Since all updates are sure to touch the root cgroup, we - * get ourselves ahead and touch it first. If the root cgroup - * is the only cgroup, then nothing else should be necessary. - * - */ - __get_cpu_var(kernel_cpustat).cpustat[index] += tmp; - -#ifdef CONFIG_CGROUP_CPUACCT - if (unlikely(!cpuacct_subsys.active)) - return; - - rcu_read_lock(); - ca = task_ca(p); - while (ca && (ca != &root_cpuacct)) { - kcpustat = this_cpu_ptr(ca->cpustat); - kcpustat->cpustat[index] += tmp; - ca = parent_ca(ca); - } - rcu_read_unlock(); -#endif -} - - -/* - * Account user cpu time to a process. - * @p: the process that the cpu time gets accounted to - * @cputime: the cpu time spent in user space since the last update - * @cputime_scaled: cputime scaled by cpu frequency - */ -void account_user_time(struct task_struct *p, cputime_t cputime, - cputime_t cputime_scaled) -{ - int index; - - /* Add user time to process. */ - p->utime += cputime; - p->utimescaled += cputime_scaled; - account_group_user_time(p, cputime); - - index = (TASK_NICE(p) > 0) ? CPUTIME_NICE : CPUTIME_USER; - - /* Add user time to cpustat. */ - task_group_account_field(p, index, (__force u64) cputime); - - /* Account for user time used */ - acct_update_integrals(p); -} - -/* - * Account guest cpu time to a process. - * @p: the process that the cpu time gets accounted to - * @cputime: the cpu time spent in virtual machine since the last update - * @cputime_scaled: cputime scaled by cpu frequency - */ -static void account_guest_time(struct task_struct *p, cputime_t cputime, - cputime_t cputime_scaled) -{ - u64 *cpustat = kcpustat_this_cpu->cpustat; - - /* Add guest time to process. */ - p->utime += cputime; - p->utimescaled += cputime_scaled; - account_group_user_time(p, cputime); - p->gtime += cputime; - - /* Add guest time to cpustat. */ - if (TASK_NICE(p) > 0) { - cpustat[CPUTIME_NICE] += (__force u64) cputime; - cpustat[CPUTIME_GUEST_NICE] += (__force u64) cputime; - } else { - cpustat[CPUTIME_USER] += (__force u64) cputime; - cpustat[CPUTIME_GUEST] += (__force u64) cputime; - } -} - -/* - * Account system cpu time to a process and desired cpustat field - * @p: the process that the cpu time gets accounted to - * @cputime: the cpu time spent in kernel space since the last update - * @cputime_scaled: cputime scaled by cpu frequency - * @target_cputime64: pointer to cpustat field that has to be updated - */ -static inline -void __account_system_time(struct task_struct *p, cputime_t cputime, - cputime_t cputime_scaled, int index) -{ - /* Add system time to process. */ - p->stime += cputime; - p->stimescaled += cputime_scaled; - account_group_system_time(p, cputime); - - /* Add system time to cpustat. */ - task_group_account_field(p, index, (__force u64) cputime); - - /* Account for system time used */ - acct_update_integrals(p); -} - -/* - * Account system cpu time to a process. - * @p: the process that the cpu time gets accounted to - * @hardirq_offset: the offset to subtract from hardirq_count() - * @cputime: the cpu time spent in kernel space since the last update - * @cputime_scaled: cputime scaled by cpu frequency - */ -void account_system_time(struct task_struct *p, int hardirq_offset, - cputime_t cputime, cputime_t cputime_scaled) -{ - int index; - - if ((p->flags & PF_VCPU) && (irq_count() - hardirq_offset == 0)) { - account_guest_time(p, cputime, cputime_scaled); - return; - } - - if (hardirq_count() - hardirq_offset) - index = CPUTIME_IRQ; - else if (in_serving_softirq()) - index = CPUTIME_SOFTIRQ; - else - index = CPUTIME_SYSTEM; - - __account_system_time(p, cputime, cputime_scaled, index); -} - -/* - * Account for involuntary wait time. - * @cputime: the cpu time spent in involuntary wait - */ -void account_steal_time(cputime_t cputime) -{ - u64 *cpustat = kcpustat_this_cpu->cpustat; - - cpustat[CPUTIME_STEAL] += (__force u64) cputime; -} - -/* - * Account for idle time. - * @cputime: the cpu time spent in idle wait - */ -void account_idle_time(cputime_t cputime) -{ - u64 *cpustat = kcpustat_this_cpu->cpustat; - struct rq *rq = this_rq(); - - if (atomic_read(&rq->nr_iowait) > 0) - cpustat[CPUTIME_IOWAIT] += (__force u64) cputime; - else - cpustat[CPUTIME_IDLE] += (__force u64) cputime; -} - -static __always_inline bool steal_account_process_tick(void) -{ -#ifdef CONFIG_PARAVIRT - if (static_key_false(¶virt_steal_enabled)) { - u64 steal, st = 0; - - steal = paravirt_steal_clock(smp_processor_id()); - steal -= this_rq()->prev_steal_time; - - st = steal_ticks(steal); - this_rq()->prev_steal_time += st * TICK_NSEC; - - account_steal_time(st); - return st; - } -#endif - return false; -} - -#ifndef CONFIG_VIRT_CPU_ACCOUNTING - -#ifdef CONFIG_IRQ_TIME_ACCOUNTING -/* - * Account a tick to a process and cpustat - * @p: the process that the cpu time gets accounted to - * @user_tick: is the tick from userspace - * @rq: the pointer to rq - * - * Tick demultiplexing follows the order - * - pending hardirq update - * - pending softirq update - * - user_time - * - idle_time - * - system time - * - check for guest_time - * - else account as system_time - * - * Check for hardirq is done both for system and user time as there is - * no timer going off while we are on hardirq and hence we may never get an - * opportunity to update it solely in system time. - * p->stime and friends are only updated on system time and not on irq - * softirq as those do not count in task exec_runtime any more. - */ -static void irqtime_account_process_tick(struct task_struct *p, int user_tick, - struct rq *rq) -{ - cputime_t one_jiffy_scaled = cputime_to_scaled(cputime_one_jiffy); - u64 *cpustat = kcpustat_this_cpu->cpustat; - - if (steal_account_process_tick()) - return; - - if (irqtime_account_hi_update()) { - cpustat[CPUTIME_IRQ] += (__force u64) cputime_one_jiffy; - } else if (irqtime_account_si_update()) { - cpustat[CPUTIME_SOFTIRQ] += (__force u64) cputime_one_jiffy; - } else if (this_cpu_ksoftirqd() == p) { - /* - * ksoftirqd time do not get accounted in cpu_softirq_time. - * So, we have to handle it separately here. - * Also, p->stime needs to be updated for ksoftirqd. - */ - __account_system_time(p, cputime_one_jiffy, one_jiffy_scaled, - CPUTIME_SOFTIRQ); - } else if (user_tick) { - account_user_time(p, cputime_one_jiffy, one_jiffy_scaled); - } else if (p == rq->idle) { - account_idle_time(cputime_one_jiffy); - } else if (p->flags & PF_VCPU) { /* System time or guest time */ - account_guest_time(p, cputime_one_jiffy, one_jiffy_scaled); - } else { - __account_system_time(p, cputime_one_jiffy, one_jiffy_scaled, - CPUTIME_SYSTEM); - } -} - -static void irqtime_account_idle_ticks(int ticks) -{ - int i; - struct rq *rq = this_rq(); - - for (i = 0; i < ticks; i++) - irqtime_account_process_tick(current, 0, rq); -} -#else /* CONFIG_IRQ_TIME_ACCOUNTING */ -static void irqtime_account_idle_ticks(int ticks) {} -static void irqtime_account_process_tick(struct task_struct *p, int user_tick, - struct rq *rq) {} -#endif /* CONFIG_IRQ_TIME_ACCOUNTING */ - -/* - * Account a single tick of cpu time. - * @p: the process that the cpu time gets accounted to - * @user_tick: indicates if the tick is a user or a system tick - */ -void account_process_tick(struct task_struct *p, int user_tick) -{ - cputime_t one_jiffy_scaled = cputime_to_scaled(cputime_one_jiffy); - struct rq *rq = this_rq(); - - if (sched_clock_irqtime) { - irqtime_account_process_tick(p, user_tick, rq); - return; - } - - if (steal_account_process_tick()) - return; - - if (user_tick) - account_user_time(p, cputime_one_jiffy, one_jiffy_scaled); - else if ((p != rq->idle) || (irq_count() != HARDIRQ_OFFSET)) - account_system_time(p, HARDIRQ_OFFSET, cputime_one_jiffy, - one_jiffy_scaled); - else - account_idle_time(cputime_one_jiffy); -} - -/* - * Account multiple ticks of steal time. - * @p: the process from which the cpu time has been stolen - * @ticks: number of stolen ticks - */ -void account_steal_ticks(unsigned long ticks) -{ - account_steal_time(jiffies_to_cputime(ticks)); -} - -/* - * Account multiple ticks of idle time. - * @ticks: number of stolen ticks - */ -void account_idle_ticks(unsigned long ticks) -{ - - if (sched_clock_irqtime) { - irqtime_account_idle_ticks(ticks); - return; - } - - account_idle_time(jiffies_to_cputime(ticks)); -} - -#endif - -/* - * Use precise platform statistics if available: - */ -#ifdef CONFIG_VIRT_CPU_ACCOUNTING -void task_times(struct task_struct *p, cputime_t *ut, cputime_t *st) -{ - *ut = p->utime; - *st = p->stime; -} - -void thread_group_times(struct task_struct *p, cputime_t *ut, cputime_t *st) -{ - struct task_cputime cputime; - - thread_group_cputime(p, &cputime); - - *ut = cputime.utime; - *st = cputime.stime; -} -#else - -#ifndef nsecs_to_cputime -# define nsecs_to_cputime(__nsecs) nsecs_to_jiffies(__nsecs) -#endif - -void task_times(struct task_struct *p, cputime_t *ut, cputime_t *st) -{ - cputime_t rtime, utime = p->utime, total = utime + p->stime; - - /* - * Use CFS's precise accounting: - */ - rtime = nsecs_to_cputime(p->se.sum_exec_runtime); - - if (total) { - u64 temp = (__force u64) rtime; - - temp *= (__force u64) utime; - do_div(temp, (__force u32) total); - utime = (__force cputime_t) temp; - } else - utime = rtime; - - /* - * Compare with previous values, to keep monotonicity: - */ - p->prev_utime = max(p->prev_utime, utime); - p->prev_stime = max(p->prev_stime, rtime - p->prev_utime); - - *ut = p->prev_utime; - *st = p->prev_stime; -} - -/* - * Must be called with siglock held. - */ -void thread_group_times(struct task_struct *p, cputime_t *ut, cputime_t *st) -{ - struct signal_struct *sig = p->signal; - struct task_cputime cputime; - cputime_t rtime, utime, total; - - thread_group_cputime(p, &cputime); - - total = cputime.utime + cputime.stime; - rtime = nsecs_to_cputime(cputime.sum_exec_runtime); - - if (total) { - u64 temp = (__force u64) rtime; - - temp *= (__force u64) cputime.utime; - do_div(temp, (__force u32) total); - utime = (__force cputime_t) temp; - } else - utime = rtime; - - sig->prev_utime = max(sig->prev_utime, utime); - sig->prev_stime = max(sig->prev_stime, rtime - sig->prev_utime); - - *ut = sig->prev_utime; - *st = sig->prev_stime; -} -#endif - /* * This function gets called by the timer code, with HZ frequency. * We call it with interrupts disabled. @@ -3361,6 +2776,40 @@ pick_next_task(struct rq *rq) /* * __schedule() is the main scheduler function. + * + * The main means of driving the scheduler and thus entering this function are: + * + * 1. Explicit blocking: mutex, semaphore, waitqueue, etc. + * + * 2. TIF_NEED_RESCHED flag is checked on interrupt and userspace return + * paths. For example, see arch/x86/entry_64.S. + * + * To drive preemption between tasks, the scheduler sets the flag in timer + * interrupt handler scheduler_tick(). + * + * 3. Wakeups don't really cause entry into schedule(). They add a + * task to the run-queue and that's it. + * + * Now, if the new task added to the run-queue preempts the current + * task, then the wakeup sets TIF_NEED_RESCHED and schedule() gets + * called on the nearest possible occasion: + * + * - If the kernel is preemptible (CONFIG_PREEMPT=y): + * + * - in syscall or exception context, at the next outmost + * preempt_enable(). (this might be as soon as the wake_up()'s + * spin_unlock()!) + * + * - in IRQ context, return from interrupt-handler to + * preemptible context + * + * - If the kernel is not preemptible (CONFIG_PREEMPT is not set) + * then at the next: + * + * - cond_resched() call + * - explicit schedule() call + * - return from syscall or exception to user-space + * - return from interrupt-handler to user-space */ static void __sched __schedule(void) { @@ -3462,6 +2911,21 @@ asmlinkage void __sched schedule(void) } EXPORT_SYMBOL(schedule); +#ifdef CONFIG_RCU_USER_QS +asmlinkage void __sched schedule_user(void) +{ + /* + * If we come here after a random call to set_need_resched(), + * or we have been woken up remotely but the IPI has not yet arrived, + * we haven't yet exited the RCU idle mode. Do it here manually until + * we find a better solution. + */ + rcu_user_exit(); + schedule(); + rcu_user_enter(); +} +#endif + /** * schedule_preempt_disabled - called with preemption disabled * @@ -3563,6 +3027,7 @@ asmlinkage void __sched preempt_schedule_irq(void) /* Catch callers which need to be fixed */ BUG_ON(ti->preempt_count || !irqs_disabled()); + rcu_user_exit(); do { add_preempt_count(PREEMPT_ACTIVE); local_irq_enable(); @@ -4340,9 +3805,7 @@ static int __sched_setscheduler(struct task_struct *p, int policy, */ if (unlikely(policy == p->policy && (!rt_policy(policy) || param->sched_priority == p->rt_priority))) { - - __task_rq_unlock(rq); - raw_spin_unlock_irqrestore(&p->pi_lock, flags); + task_rq_unlock(rq, p, &flags); return 0; } @@ -4864,13 +4327,6 @@ bool __sched yield_to(struct task_struct *p, bool preempt) */ if (preempt && rq != p_rq) resched_task(p_rq->curr); - } else { - /* - * We might have set it in task_yield_fair(), but are - * not going to schedule(), so don't want to skip - * the next update. - */ - rq->skip_clock_update = 0; } out: @@ -5300,27 +4756,17 @@ void idle_task_exit(void) } /* - * While a dead CPU has no uninterruptible tasks queued at this point, - * it might still have a nonzero ->nr_uninterruptible counter, because - * for performance reasons the counter is not stricly tracking tasks to - * their home CPUs. So we just add the counter to another CPU's counter, - * to keep the global sum constant after CPU-down: - */ -static void migrate_nr_uninterruptible(struct rq *rq_src) -{ - struct rq *rq_dest = cpu_rq(cpumask_any(cpu_active_mask)); - - rq_dest->nr_uninterruptible += rq_src->nr_uninterruptible; - rq_src->nr_uninterruptible = 0; -} - -/* - * remove the tasks which were accounted by rq from calc_load_tasks. + * Since this CPU is going 'away' for a while, fold any nr_active delta + * we might have. Assumes we're called after migrate_tasks() so that the + * nr_active count is stable. + * + * Also see the comment "Global load-average calculations". */ -static void calc_global_load_remove(struct rq *rq) +static void calc_load_migrate(struct rq *rq) { - atomic_long_sub(rq->calc_load_active, &calc_load_tasks); - rq->calc_load_active = 0; + long delta = calc_load_fold_active(rq); + if (delta) + atomic_long_add(delta, &calc_load_tasks); } /* @@ -5348,9 +4794,6 @@ static void migrate_tasks(unsigned int dead_cpu) */ rq->stop = NULL; - /* Ensure any throttled groups are reachable by pick_next_task */ - unthrottle_offline_cfs_rqs(rq); - for ( ; ; ) { /* * There's this thread running, bail when that's the only @@ -5425,16 +4868,25 @@ static void sd_free_ctl_entry(struct ctl_table **tablep) *tablep = NULL; } +static int min_load_idx = 0; +static int max_load_idx = CPU_LOAD_IDX_MAX; + static void set_table_entry(struct ctl_table *entry, const char *procname, void *data, int maxlen, - umode_t mode, proc_handler *proc_handler) + umode_t mode, proc_handler *proc_handler, + bool load_idx) { entry->procname = procname; entry->data = data; entry->maxlen = maxlen; entry->mode = mode; entry->proc_handler = proc_handler; + + if (load_idx) { + entry->extra1 = &min_load_idx; + entry->extra2 = &max_load_idx; + } } static struct ctl_table * @@ -5446,30 +4898,30 @@ sd_alloc_ctl_domain_table(struct sched_domain *sd) return NULL; set_table_entry(&table[0], "min_interval", &sd->min_interval, - sizeof(long), 0644, proc_doulongvec_minmax); + sizeof(long), 0644, proc_doulongvec_minmax, false); set_table_entry(&table[1], "max_interval", &sd->max_interval, - sizeof(long), 0644, proc_doulongvec_minmax); + sizeof(long), 0644, proc_doulongvec_minmax, false); set_table_entry(&table[2], "busy_idx", &sd->busy_idx, - sizeof(int), 0644, proc_dointvec_minmax); + sizeof(int), 0644, proc_dointvec_minmax, true); set_table_entry(&table[3], "idle_idx", &sd->idle_idx, - sizeof(int), 0644, proc_dointvec_minmax); + sizeof(int), 0644, proc_dointvec_minmax, true); set_table_entry(&table[4], "newidle_idx", &sd->newidle_idx, - sizeof(int), 0644, proc_dointvec_minmax); + sizeof(int), 0644, proc_dointvec_minmax, true); set_table_entry(&table[5], "wake_idx", &sd->wake_idx, - sizeof(int), 0644, proc_dointvec_minmax); + sizeof(int), 0644, proc_dointvec_minmax, true); set_table_entry(&table[6], "forkexec_idx", &sd->forkexec_idx, - sizeof(int), 0644, proc_dointvec_minmax); + sizeof(int), 0644, proc_dointvec_minmax, true); set_table_entry(&table[7], "busy_factor", &sd->busy_factor, - sizeof(int), 0644, proc_dointvec_minmax); + sizeof(int), 0644, proc_dointvec_minmax, false); set_table_entry(&table[8], "imbalance_pct", &sd->imbalance_pct, - sizeof(int), 0644, proc_dointvec_minmax); + sizeof(int), 0644, proc_dointvec_minmax, false); set_table_entry(&table[9], "cache_nice_tries", &sd->cache_nice_tries, - sizeof(int), 0644, proc_dointvec_minmax); + sizeof(int), 0644, proc_dointvec_minmax, false); set_table_entry(&table[10], "flags", &sd->flags, - sizeof(int), 0644, proc_dointvec_minmax); + sizeof(int), 0644, proc_dointvec_minmax, false); set_table_entry(&table[11], "name", sd->name, - CORENAME_MAX_SIZE, 0444, proc_dostring); + CORENAME_MAX_SIZE, 0444, proc_dostring, false); /* &table[12] is terminator */ return table; @@ -5613,9 +5065,10 @@ migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu) migrate_tasks(cpu); BUG_ON(rq->nr_running != 1); /* the migration thread */ raw_spin_unlock_irqrestore(&rq->lock, flags); + break; - migrate_nr_uninterruptible(rq); - calc_global_load_remove(rq); + case CPU_DEAD: + calc_load_migrate(rq); break; #endif } @@ -6024,11 +5477,6 @@ static void destroy_sched_domains(struct sched_domain *sd, int cpu) * SD_SHARE_PKG_RESOURCE set (Last Level Cache Domain) for this * allows us to avoid some pointer chasing select_idle_sibling(). * - * Iterate domains and sched_groups downward, assigning CPUs to be - * select_idle_sibling() hw buddy. Cross-wiring hw makes bouncing - * due to random perturbation self canceling, ie sw buddies pull - * their counterpart to their CPU's hw counterpart. - * * Also keep a unique ID per domain (we use the first cpu number in * the cpumask of the domain), this allows us to quickly tell if * two cpus are in the same cache domain, see cpus_share_cache(). @@ -6042,40 +5490,8 @@ static void update_top_cache_domain(int cpu) int id = cpu; sd = highest_flag_domain(cpu, SD_SHARE_PKG_RESOURCES); - if (sd) { - struct sched_domain *tmp = sd; - struct sched_group *sg, *prev; - bool right; - - /* - * Traverse to first CPU in group, and count hops - * to cpu from there, switching direction on each - * hop, never ever pointing the last CPU rightward. - */ - do { - id = cpumask_first(sched_domain_span(tmp)); - prev = sg = tmp->groups; - right = 1; - - while (cpumask_first(sched_group_cpus(sg)) != id) - sg = sg->next; - - while (!cpumask_test_cpu(cpu, sched_group_cpus(sg))) { - prev = sg; - sg = sg->next; - right = !right; - } - - /* A CPU went down, never point back to domain start. */ - if (right && cpumask_first(sched_group_cpus(sg->next)) == id) - right = false; - - sg = right ? sg->next : prev; - tmp->idle_buddy = cpumask_first(sched_group_cpus(sg)); - } while ((tmp = tmp->child)); - + if (sd) id = cpumask_first(sched_domain_span(sd)); - } rcu_assign_pointer(per_cpu(sd_llc, cpu), sd); per_cpu(sd_llc_id, cpu) = id; @@ -6584,7 +6000,6 @@ sd_numa_init(struct sched_domain_topology_level *tl, int cpu) | 0*SD_BALANCE_FORK | 0*SD_BALANCE_WAKE | 0*SD_WAKE_AFFINE - | 0*SD_PREFER_LOCAL | 0*SD_SHARE_CPUPOWER | 0*SD_SHARE_PKG_RESOURCES | 1*SD_SERIALIZE @@ -7248,6 +6663,7 @@ int in_sched_functions(unsigned long addr) #ifdef CONFIG_CGROUP_SCHED struct task_group root_task_group; +LIST_HEAD(task_groups); #endif DECLARE_PER_CPU(cpumask_var_t, load_balance_tmpmask); @@ -8381,6 +7797,8 @@ struct cgroup_subsys cpu_cgroup_subsys = { * (balbir@in.ibm.com). */ +struct cpuacct root_cpuacct; + /* create a new cpu accounting group */ static struct cgroup_subsys_state *cpuacct_create(struct cgroup *cgrp) { diff --git a/trunk/kernel/sched/cpupri.c b/trunk/kernel/sched/cpupri.c index d72586fdf660..23aa789c53ee 100644 --- a/trunk/kernel/sched/cpupri.c +++ b/trunk/kernel/sched/cpupri.c @@ -65,8 +65,8 @@ static int convert_prio(int prio) int cpupri_find(struct cpupri *cp, struct task_struct *p, struct cpumask *lowest_mask) { - int idx = 0; - int task_pri = convert_prio(p->prio); + int idx = 0; + int task_pri = convert_prio(p->prio); if (task_pri >= MAX_RT_PRIO) return 0; @@ -137,9 +137,9 @@ int cpupri_find(struct cpupri *cp, struct task_struct *p, */ void cpupri_set(struct cpupri *cp, int cpu, int newpri) { - int *currpri = &cp->cpu_to_pri[cpu]; - int oldpri = *currpri; - int do_mb = 0; + int *currpri = &cp->cpu_to_pri[cpu]; + int oldpri = *currpri; + int do_mb = 0; newpri = convert_prio(newpri); diff --git a/trunk/kernel/sched/cputime.c b/trunk/kernel/sched/cputime.c new file mode 100644 index 000000000000..81b763ba58a6 --- /dev/null +++ b/trunk/kernel/sched/cputime.c @@ -0,0 +1,530 @@ +#include +#include +#include +#include +#include +#include "sched.h" + + +#ifdef CONFIG_IRQ_TIME_ACCOUNTING + +/* + * There are no locks covering percpu hardirq/softirq time. + * They are only modified in vtime_account, on corresponding CPU + * with interrupts disabled. So, writes are safe. + * They are read and saved off onto struct rq in update_rq_clock(). + * This may result in other CPU reading this CPU's irq time and can + * race with irq/vtime_account on this CPU. We would either get old + * or new value with a side effect of accounting a slice of irq time to wrong + * task when irq is in progress while we read rq->clock. That is a worthy + * compromise in place of having locks on each irq in account_system_time. + */ +DEFINE_PER_CPU(u64, cpu_hardirq_time); +DEFINE_PER_CPU(u64, cpu_softirq_time); + +static DEFINE_PER_CPU(u64, irq_start_time); +static int sched_clock_irqtime; + +void enable_sched_clock_irqtime(void) +{ + sched_clock_irqtime = 1; +} + +void disable_sched_clock_irqtime(void) +{ + sched_clock_irqtime = 0; +} + +#ifndef CONFIG_64BIT +DEFINE_PER_CPU(seqcount_t, irq_time_seq); +#endif /* CONFIG_64BIT */ + +/* + * Called before incrementing preempt_count on {soft,}irq_enter + * and before decrementing preempt_count on {soft,}irq_exit. + */ +void vtime_account(struct task_struct *curr) +{ + unsigned long flags; + s64 delta; + int cpu; + + if (!sched_clock_irqtime) + return; + + local_irq_save(flags); + + cpu = smp_processor_id(); + delta = sched_clock_cpu(cpu) - __this_cpu_read(irq_start_time); + __this_cpu_add(irq_start_time, delta); + + irq_time_write_begin(); + /* + * We do not account for softirq time from ksoftirqd here. + * We want to continue accounting softirq time to ksoftirqd thread + * in that case, so as not to confuse scheduler with a special task + * that do not consume any time, but still wants to run. + */ + if (hardirq_count()) + __this_cpu_add(cpu_hardirq_time, delta); + else if (in_serving_softirq() && curr != this_cpu_ksoftirqd()) + __this_cpu_add(cpu_softirq_time, delta); + + irq_time_write_end(); + local_irq_restore(flags); +} +EXPORT_SYMBOL_GPL(vtime_account); + +static int irqtime_account_hi_update(void) +{ + u64 *cpustat = kcpustat_this_cpu->cpustat; + unsigned long flags; + u64 latest_ns; + int ret = 0; + + local_irq_save(flags); + latest_ns = this_cpu_read(cpu_hardirq_time); + if (nsecs_to_cputime64(latest_ns) > cpustat[CPUTIME_IRQ]) + ret = 1; + local_irq_restore(flags); + return ret; +} + +static int irqtime_account_si_update(void) +{ + u64 *cpustat = kcpustat_this_cpu->cpustat; + unsigned long flags; + u64 latest_ns; + int ret = 0; + + local_irq_save(flags); + latest_ns = this_cpu_read(cpu_softirq_time); + if (nsecs_to_cputime64(latest_ns) > cpustat[CPUTIME_SOFTIRQ]) + ret = 1; + local_irq_restore(flags); + return ret; +} + +#else /* CONFIG_IRQ_TIME_ACCOUNTING */ + +#define sched_clock_irqtime (0) + +#endif /* !CONFIG_IRQ_TIME_ACCOUNTING */ + +static inline void task_group_account_field(struct task_struct *p, int index, + u64 tmp) +{ +#ifdef CONFIG_CGROUP_CPUACCT + struct kernel_cpustat *kcpustat; + struct cpuacct *ca; +#endif + /* + * Since all updates are sure to touch the root cgroup, we + * get ourselves ahead and touch it first. If the root cgroup + * is the only cgroup, then nothing else should be necessary. + * + */ + __get_cpu_var(kernel_cpustat).cpustat[index] += tmp; + +#ifdef CONFIG_CGROUP_CPUACCT + if (unlikely(!cpuacct_subsys.active)) + return; + + rcu_read_lock(); + ca = task_ca(p); + while (ca && (ca != &root_cpuacct)) { + kcpustat = this_cpu_ptr(ca->cpustat); + kcpustat->cpustat[index] += tmp; + ca = parent_ca(ca); + } + rcu_read_unlock(); +#endif +} + +/* + * Account user cpu time to a process. + * @p: the process that the cpu time gets accounted to + * @cputime: the cpu time spent in user space since the last update + * @cputime_scaled: cputime scaled by cpu frequency + */ +void account_user_time(struct task_struct *p, cputime_t cputime, + cputime_t cputime_scaled) +{ + int index; + + /* Add user time to process. */ + p->utime += cputime; + p->utimescaled += cputime_scaled; + account_group_user_time(p, cputime); + + index = (TASK_NICE(p) > 0) ? CPUTIME_NICE : CPUTIME_USER; + + /* Add user time to cpustat. */ + task_group_account_field(p, index, (__force u64) cputime); + + /* Account for user time used */ + acct_update_integrals(p); +} + +/* + * Account guest cpu time to a process. + * @p: the process that the cpu time gets accounted to + * @cputime: the cpu time spent in virtual machine since the last update + * @cputime_scaled: cputime scaled by cpu frequency + */ +static void account_guest_time(struct task_struct *p, cputime_t cputime, + cputime_t cputime_scaled) +{ + u64 *cpustat = kcpustat_this_cpu->cpustat; + + /* Add guest time to process. */ + p->utime += cputime; + p->utimescaled += cputime_scaled; + account_group_user_time(p, cputime); + p->gtime += cputime; + + /* Add guest time to cpustat. */ + if (TASK_NICE(p) > 0) { + cpustat[CPUTIME_NICE] += (__force u64) cputime; + cpustat[CPUTIME_GUEST_NICE] += (__force u64) cputime; + } else { + cpustat[CPUTIME_USER] += (__force u64) cputime; + cpustat[CPUTIME_GUEST] += (__force u64) cputime; + } +} + +/* + * Account system cpu time to a process and desired cpustat field + * @p: the process that the cpu time gets accounted to + * @cputime: the cpu time spent in kernel space since the last update + * @cputime_scaled: cputime scaled by cpu frequency + * @target_cputime64: pointer to cpustat field that has to be updated + */ +static inline +void __account_system_time(struct task_struct *p, cputime_t cputime, + cputime_t cputime_scaled, int index) +{ + /* Add system time to process. */ + p->stime += cputime; + p->stimescaled += cputime_scaled; + account_group_system_time(p, cputime); + + /* Add system time to cpustat. */ + task_group_account_field(p, index, (__force u64) cputime); + + /* Account for system time used */ + acct_update_integrals(p); +} + +/* + * Account system cpu time to a process. + * @p: the process that the cpu time gets accounted to + * @hardirq_offset: the offset to subtract from hardirq_count() + * @cputime: the cpu time spent in kernel space since the last update + * @cputime_scaled: cputime scaled by cpu frequency + */ +void account_system_time(struct task_struct *p, int hardirq_offset, + cputime_t cputime, cputime_t cputime_scaled) +{ + int index; + + if ((p->flags & PF_VCPU) && (irq_count() - hardirq_offset == 0)) { + account_guest_time(p, cputime, cputime_scaled); + return; + } + + if (hardirq_count() - hardirq_offset) + index = CPUTIME_IRQ; + else if (in_serving_softirq()) + index = CPUTIME_SOFTIRQ; + else + index = CPUTIME_SYSTEM; + + __account_system_time(p, cputime, cputime_scaled, index); +} + +/* + * Account for involuntary wait time. + * @cputime: the cpu time spent in involuntary wait + */ +void account_steal_time(cputime_t cputime) +{ + u64 *cpustat = kcpustat_this_cpu->cpustat; + + cpustat[CPUTIME_STEAL] += (__force u64) cputime; +} + +/* + * Account for idle time. + * @cputime: the cpu time spent in idle wait + */ +void account_idle_time(cputime_t cputime) +{ + u64 *cpustat = kcpustat_this_cpu->cpustat; + struct rq *rq = this_rq(); + + if (atomic_read(&rq->nr_iowait) > 0) + cpustat[CPUTIME_IOWAIT] += (__force u64) cputime; + else + cpustat[CPUTIME_IDLE] += (__force u64) cputime; +} + +static __always_inline bool steal_account_process_tick(void) +{ +#ifdef CONFIG_PARAVIRT + if (static_key_false(¶virt_steal_enabled)) { + u64 steal, st = 0; + + steal = paravirt_steal_clock(smp_processor_id()); + steal -= this_rq()->prev_steal_time; + + st = steal_ticks(steal); + this_rq()->prev_steal_time += st * TICK_NSEC; + + account_steal_time(st); + return st; + } +#endif + return false; +} + +#ifndef CONFIG_VIRT_CPU_ACCOUNTING + +#ifdef CONFIG_IRQ_TIME_ACCOUNTING +/* + * Account a tick to a process and cpustat + * @p: the process that the cpu time gets accounted to + * @user_tick: is the tick from userspace + * @rq: the pointer to rq + * + * Tick demultiplexing follows the order + * - pending hardirq update + * - pending softirq update + * - user_time + * - idle_time + * - system time + * - check for guest_time + * - else account as system_time + * + * Check for hardirq is done both for system and user time as there is + * no timer going off while we are on hardirq and hence we may never get an + * opportunity to update it solely in system time. + * p->stime and friends are only updated on system time and not on irq + * softirq as those do not count in task exec_runtime any more. + */ +static void irqtime_account_process_tick(struct task_struct *p, int user_tick, + struct rq *rq) +{ + cputime_t one_jiffy_scaled = cputime_to_scaled(cputime_one_jiffy); + u64 *cpustat = kcpustat_this_cpu->cpustat; + + if (steal_account_process_tick()) + return; + + if (irqtime_account_hi_update()) { + cpustat[CPUTIME_IRQ] += (__force u64) cputime_one_jiffy; + } else if (irqtime_account_si_update()) { + cpustat[CPUTIME_SOFTIRQ] += (__force u64) cputime_one_jiffy; + } else if (this_cpu_ksoftirqd() == p) { + /* + * ksoftirqd time do not get accounted in cpu_softirq_time. + * So, we have to handle it separately here. + * Also, p->stime needs to be updated for ksoftirqd. + */ + __account_system_time(p, cputime_one_jiffy, one_jiffy_scaled, + CPUTIME_SOFTIRQ); + } else if (user_tick) { + account_user_time(p, cputime_one_jiffy, one_jiffy_scaled); + } else if (p == rq->idle) { + account_idle_time(cputime_one_jiffy); + } else if (p->flags & PF_VCPU) { /* System time or guest time */ + account_guest_time(p, cputime_one_jiffy, one_jiffy_scaled); + } else { + __account_system_time(p, cputime_one_jiffy, one_jiffy_scaled, + CPUTIME_SYSTEM); + } +} + +static void irqtime_account_idle_ticks(int ticks) +{ + int i; + struct rq *rq = this_rq(); + + for (i = 0; i < ticks; i++) + irqtime_account_process_tick(current, 0, rq); +} +#else /* CONFIG_IRQ_TIME_ACCOUNTING */ +static void irqtime_account_idle_ticks(int ticks) {} +static void irqtime_account_process_tick(struct task_struct *p, int user_tick, + struct rq *rq) {} +#endif /* CONFIG_IRQ_TIME_ACCOUNTING */ + +/* + * Account a single tick of cpu time. + * @p: the process that the cpu time gets accounted to + * @user_tick: indicates if the tick is a user or a system tick + */ +void account_process_tick(struct task_struct *p, int user_tick) +{ + cputime_t one_jiffy_scaled = cputime_to_scaled(cputime_one_jiffy); + struct rq *rq = this_rq(); + + if (sched_clock_irqtime) { + irqtime_account_process_tick(p, user_tick, rq); + return; + } + + if (steal_account_process_tick()) + return; + + if (user_tick) + account_user_time(p, cputime_one_jiffy, one_jiffy_scaled); + else if ((p != rq->idle) || (irq_count() != HARDIRQ_OFFSET)) + account_system_time(p, HARDIRQ_OFFSET, cputime_one_jiffy, + one_jiffy_scaled); + else + account_idle_time(cputime_one_jiffy); +} + +/* + * Account multiple ticks of steal time. + * @p: the process from which the cpu time has been stolen + * @ticks: number of stolen ticks + */ +void account_steal_ticks(unsigned long ticks) +{ + account_steal_time(jiffies_to_cputime(ticks)); +} + +/* + * Account multiple ticks of idle time. + * @ticks: number of stolen ticks + */ +void account_idle_ticks(unsigned long ticks) +{ + + if (sched_clock_irqtime) { + irqtime_account_idle_ticks(ticks); + return; + } + + account_idle_time(jiffies_to_cputime(ticks)); +} + +#endif + +/* + * Use precise platform statistics if available: + */ +#ifdef CONFIG_VIRT_CPU_ACCOUNTING +void task_times(struct task_struct *p, cputime_t *ut, cputime_t *st) +{ + *ut = p->utime; + *st = p->stime; +} + +void thread_group_times(struct task_struct *p, cputime_t *ut, cputime_t *st) +{ + struct task_cputime cputime; + + thread_group_cputime(p, &cputime); + + *ut = cputime.utime; + *st = cputime.stime; +} + +/* + * Archs that account the whole time spent in the idle task + * (outside irq) as idle time can rely on this and just implement + * vtime_account_system() and vtime_account_idle(). Archs that + * have other meaning of the idle time (s390 only includes the + * time spent by the CPU when it's in low power mode) must override + * vtime_account(). + */ +#ifndef __ARCH_HAS_VTIME_ACCOUNT +void vtime_account(struct task_struct *tsk) +{ + unsigned long flags; + + local_irq_save(flags); + + if (in_interrupt() || !is_idle_task(tsk)) + vtime_account_system(tsk); + else + vtime_account_idle(tsk); + + local_irq_restore(flags); +} +EXPORT_SYMBOL_GPL(vtime_account); +#endif /* __ARCH_HAS_VTIME_ACCOUNT */ + +#else + +#ifndef nsecs_to_cputime +# define nsecs_to_cputime(__nsecs) nsecs_to_jiffies(__nsecs) +#endif + +static cputime_t scale_utime(cputime_t utime, cputime_t rtime, cputime_t total) +{ + u64 temp = (__force u64) rtime; + + temp *= (__force u64) utime; + + if (sizeof(cputime_t) == 4) + temp = div_u64(temp, (__force u32) total); + else + temp = div64_u64(temp, (__force u64) total); + + return (__force cputime_t) temp; +} + +void task_times(struct task_struct *p, cputime_t *ut, cputime_t *st) +{ + cputime_t rtime, utime = p->utime, total = utime + p->stime; + + /* + * Use CFS's precise accounting: + */ + rtime = nsecs_to_cputime(p->se.sum_exec_runtime); + + if (total) + utime = scale_utime(utime, rtime, total); + else + utime = rtime; + + /* + * Compare with previous values, to keep monotonicity: + */ + p->prev_utime = max(p->prev_utime, utime); + p->prev_stime = max(p->prev_stime, rtime - p->prev_utime); + + *ut = p->prev_utime; + *st = p->prev_stime; +} + +/* + * Must be called with siglock held. + */ +void thread_group_times(struct task_struct *p, cputime_t *ut, cputime_t *st) +{ + struct signal_struct *sig = p->signal; + struct task_cputime cputime; + cputime_t rtime, utime, total; + + thread_group_cputime(p, &cputime); + + total = cputime.utime + cputime.stime; + rtime = nsecs_to_cputime(cputime.sum_exec_runtime); + + if (total) + utime = scale_utime(cputime.utime, rtime, total); + else + utime = rtime; + + sig->prev_utime = max(sig->prev_utime, utime); + sig->prev_stime = max(sig->prev_stime, rtime - sig->prev_utime); + + *ut = sig->prev_utime; + *st = sig->prev_stime; +} +#endif diff --git a/trunk/kernel/sched/fair.c b/trunk/kernel/sched/fair.c index 22321db64952..6b800a14b990 100644 --- a/trunk/kernel/sched/fair.c +++ b/trunk/kernel/sched/fair.c @@ -597,7 +597,7 @@ calc_delta_fair(unsigned long delta, struct sched_entity *se) /* * The idea is to set a period in which each task runs once. * - * When there are too many tasks (sysctl_sched_nr_latency) we have to stretch + * When there are too many tasks (sched_nr_latency) we have to stretch * this period because otherwise the slices get too small. * * p = (nr <= nl) ? l : l*nr/nl @@ -2052,7 +2052,7 @@ static void destroy_cfs_bandwidth(struct cfs_bandwidth *cfs_b) hrtimer_cancel(&cfs_b->slack_timer); } -void unthrottle_offline_cfs_rqs(struct rq *rq) +static void unthrottle_offline_cfs_rqs(struct rq *rq) { struct cfs_rq *cfs_rq; @@ -2106,7 +2106,7 @@ static inline struct cfs_bandwidth *tg_cfs_bandwidth(struct task_group *tg) return NULL; } static inline void destroy_cfs_bandwidth(struct cfs_bandwidth *cfs_b) {} -void unthrottle_offline_cfs_rqs(struct rq *rq) {} +static inline void unthrottle_offline_cfs_rqs(struct rq *rq) {} #endif /* CONFIG_CFS_BANDWIDTH */ @@ -2637,6 +2637,8 @@ static int select_idle_sibling(struct task_struct *p, int target) int cpu = smp_processor_id(); int prev_cpu = task_cpu(p); struct sched_domain *sd; + struct sched_group *sg; + int i; /* * If the task is going to be woken-up on this cpu and if it is @@ -2653,17 +2655,29 @@ static int select_idle_sibling(struct task_struct *p, int target) return prev_cpu; /* - * Otherwise, check assigned siblings to find an elegible idle cpu. + * Otherwise, iterate the domains and find an elegible idle cpu. */ sd = rcu_dereference(per_cpu(sd_llc, target)); - for_each_lower_domain(sd) { - if (!cpumask_test_cpu(sd->idle_buddy, tsk_cpus_allowed(p))) - continue; - if (idle_cpu(sd->idle_buddy)) - return sd->idle_buddy; - } + sg = sd->groups; + do { + if (!cpumask_intersects(sched_group_cpus(sg), + tsk_cpus_allowed(p))) + goto next; + for_each_cpu(i, sched_group_cpus(sg)) { + if (!idle_cpu(i)) + goto next; + } + + target = cpumask_first_and(sched_group_cpus(sg), + tsk_cpus_allowed(p)); + goto done; +next: + sg = sg->next; + } while (sg != sd->groups); + } +done: return target; } @@ -2686,7 +2700,6 @@ select_task_rq_fair(struct task_struct *p, int sd_flag, int wake_flags) int prev_cpu = task_cpu(p); int new_cpu = cpu; int want_affine = 0; - int want_sd = 1; int sync = wake_flags & WF_SYNC; if (p->nr_cpus_allowed == 1) @@ -2703,27 +2716,6 @@ select_task_rq_fair(struct task_struct *p, int sd_flag, int wake_flags) if (!(tmp->flags & SD_LOAD_BALANCE)) continue; - /* - * If power savings logic is enabled for a domain, see if we - * are not overloaded, if so, don't balance wider. - */ - if (tmp->flags & (SD_PREFER_LOCAL)) { - unsigned long power = 0; - unsigned long nr_running = 0; - unsigned long capacity; - int i; - - for_each_cpu(i, sched_domain_span(tmp)) { - power += power_of(i); - nr_running += cpu_rq(i)->cfs.nr_running; - } - - capacity = DIV_ROUND_CLOSEST(power, SCHED_POWER_SCALE); - - if (nr_running < capacity) - want_sd = 0; - } - /* * If both cpu and prev_cpu are part of this domain, * cpu is a valid SD_WAKE_AFFINE target. @@ -2731,21 +2723,15 @@ select_task_rq_fair(struct task_struct *p, int sd_flag, int wake_flags) if (want_affine && (tmp->flags & SD_WAKE_AFFINE) && cpumask_test_cpu(prev_cpu, sched_domain_span(tmp))) { affine_sd = tmp; - want_affine = 0; - } - - if (!want_sd && !want_affine) break; + } - if (!(tmp->flags & sd_flag)) - continue; - - if (want_sd) + if (tmp->flags & sd_flag) sd = tmp; } if (affine_sd) { - if (cpu == prev_cpu || wake_affine(affine_sd, p, sync)) + if (cpu != prev_cpu && wake_affine(affine_sd, p, sync)) prev_cpu = cpu; new_cpu = select_idle_sibling(p, prev_cpu); @@ -3069,6 +3055,9 @@ struct lb_env { int new_dst_cpu; enum cpu_idle_type idle; long imbalance; + /* The set of CPUs under consideration for load-balancing */ + struct cpumask *cpus; + unsigned int flags; unsigned int loop; @@ -3384,6 +3373,14 @@ static int tg_load_down(struct task_group *tg, void *data) static void update_h_load(long cpu) { + struct rq *rq = cpu_rq(cpu); + unsigned long now = jiffies; + + if (rq->h_load_throttle == now) + return; + + rq->h_load_throttle = now; + rcu_read_lock(); walk_tg_tree(tg_load_down, tg_nop, (void *)cpu); rcu_read_unlock(); @@ -3647,14 +3644,12 @@ fix_small_capacity(struct sched_domain *sd, struct sched_group *group) * @group: sched_group whose statistics are to be updated. * @load_idx: Load index of sched_domain of this_cpu for load calc. * @local_group: Does group contain this_cpu. - * @cpus: Set of cpus considered for load balancing. * @balance: Should we balance. * @sgs: variable to hold the statistics for this group. */ static inline void update_sg_lb_stats(struct lb_env *env, struct sched_group *group, int load_idx, - int local_group, const struct cpumask *cpus, - int *balance, struct sg_lb_stats *sgs) + int local_group, int *balance, struct sg_lb_stats *sgs) { unsigned long nr_running, max_nr_running, min_nr_running; unsigned long load, max_cpu_load, min_cpu_load; @@ -3671,7 +3666,7 @@ static inline void update_sg_lb_stats(struct lb_env *env, max_nr_running = 0; min_nr_running = ~0UL; - for_each_cpu_and(i, sched_group_cpus(group), cpus) { + for_each_cpu_and(i, sched_group_cpus(group), env->cpus) { struct rq *rq = cpu_rq(i); nr_running = rq->nr_running; @@ -3795,13 +3790,11 @@ static bool update_sd_pick_busiest(struct lb_env *env, /** * update_sd_lb_stats - Update sched_domain's statistics for load balancing. * @env: The load balancing environment. - * @cpus: Set of cpus considered for load balancing. * @balance: Should we balance. * @sds: variable to hold the statistics for this sched_domain. */ static inline void update_sd_lb_stats(struct lb_env *env, - const struct cpumask *cpus, - int *balance, struct sd_lb_stats *sds) + int *balance, struct sd_lb_stats *sds) { struct sched_domain *child = env->sd->child; struct sched_group *sg = env->sd->groups; @@ -3818,8 +3811,7 @@ static inline void update_sd_lb_stats(struct lb_env *env, local_group = cpumask_test_cpu(env->dst_cpu, sched_group_cpus(sg)); memset(&sgs, 0, sizeof(sgs)); - update_sg_lb_stats(env, sg, load_idx, local_group, - cpus, balance, &sgs); + update_sg_lb_stats(env, sg, load_idx, local_group, balance, &sgs); if (local_group && !(*balance)) return; @@ -4055,7 +4047,6 @@ static inline void calculate_imbalance(struct lb_env *env, struct sd_lb_stats *s * to restore balance. * * @env: The load balancing environment. - * @cpus: The set of CPUs under consideration for load-balancing. * @balance: Pointer to a variable indicating if this_cpu * is the appropriate cpu to perform load balancing at this_level. * @@ -4065,7 +4056,7 @@ static inline void calculate_imbalance(struct lb_env *env, struct sd_lb_stats *s * put to idle by rebalancing its tasks onto our group. */ static struct sched_group * -find_busiest_group(struct lb_env *env, const struct cpumask *cpus, int *balance) +find_busiest_group(struct lb_env *env, int *balance) { struct sd_lb_stats sds; @@ -4075,7 +4066,7 @@ find_busiest_group(struct lb_env *env, const struct cpumask *cpus, int *balance) * Compute the various statistics relavent for load balancing at * this level. */ - update_sd_lb_stats(env, cpus, balance, &sds); + update_sd_lb_stats(env, balance, &sds); /* * this_cpu is not the appropriate cpu to perform load balancing at @@ -4155,8 +4146,7 @@ find_busiest_group(struct lb_env *env, const struct cpumask *cpus, int *balance) * find_busiest_queue - find the busiest runqueue among the cpus in group. */ static struct rq *find_busiest_queue(struct lb_env *env, - struct sched_group *group, - const struct cpumask *cpus) + struct sched_group *group) { struct rq *busiest = NULL, *rq; unsigned long max_load = 0; @@ -4171,7 +4161,7 @@ static struct rq *find_busiest_queue(struct lb_env *env, if (!capacity) capacity = fix_small_capacity(env->sd, group); - if (!cpumask_test_cpu(i, cpus)) + if (!cpumask_test_cpu(i, env->cpus)) continue; rq = cpu_rq(i); @@ -4252,6 +4242,7 @@ static int load_balance(int this_cpu, struct rq *this_rq, .dst_grpmask = sched_group_cpus(sd->groups), .idle = idle, .loop_break = sched_nr_migrate_break, + .cpus = cpus, }; cpumask_copy(cpus, cpu_active_mask); @@ -4260,7 +4251,7 @@ static int load_balance(int this_cpu, struct rq *this_rq, schedstat_inc(sd, lb_count[idle]); redo: - group = find_busiest_group(&env, cpus, balance); + group = find_busiest_group(&env, balance); if (*balance == 0) goto out_balanced; @@ -4270,13 +4261,13 @@ static int load_balance(int this_cpu, struct rq *this_rq, goto out_balanced; } - busiest = find_busiest_queue(&env, group, cpus); + busiest = find_busiest_queue(&env, group); if (!busiest) { schedstat_inc(sd, lb_nobusyq[idle]); goto out_balanced; } - BUG_ON(busiest == this_rq); + BUG_ON(busiest == env.dst_rq); schedstat_add(sd, lb_imbalance[idle], env.imbalance); @@ -4294,11 +4285,10 @@ static int load_balance(int this_cpu, struct rq *this_rq, env.src_rq = busiest; env.loop_max = min(sysctl_sched_nr_migrate, busiest->nr_running); + update_h_load(env.src_cpu); more_balance: local_irq_save(flags); - double_rq_lock(this_rq, busiest); - if (!env.loop) - update_h_load(env.src_cpu); + double_rq_lock(env.dst_rq, busiest); /* * cur_ld_moved - load moved in current iteration @@ -4306,7 +4296,7 @@ static int load_balance(int this_cpu, struct rq *this_rq, */ cur_ld_moved = move_tasks(&env); ld_moved += cur_ld_moved; - double_rq_unlock(this_rq, busiest); + double_rq_unlock(env.dst_rq, busiest); local_irq_restore(flags); if (env.flags & LBF_NEED_BREAK) { @@ -4342,8 +4332,7 @@ static int load_balance(int this_cpu, struct rq *this_rq, if ((env.flags & LBF_SOME_PINNED) && env.imbalance > 0 && lb_iterations++ < max_lb_iterations) { - this_rq = cpu_rq(env.new_dst_cpu); - env.dst_rq = this_rq; + env.dst_rq = cpu_rq(env.new_dst_cpu); env.dst_cpu = env.new_dst_cpu; env.flags &= ~LBF_SOME_PINNED; env.loop = 0; @@ -4628,7 +4617,7 @@ static void nohz_balancer_kick(int cpu) return; } -static inline void clear_nohz_tick_stopped(int cpu) +static inline void nohz_balance_exit_idle(int cpu) { if (unlikely(test_bit(NOHZ_TICK_STOPPED, nohz_flags(cpu)))) { cpumask_clear_cpu(cpu, nohz.idle_cpus_mask); @@ -4668,28 +4657,23 @@ void set_cpu_sd_state_idle(void) } /* - * This routine will record that this cpu is going idle with tick stopped. + * This routine will record that the cpu is going idle with tick stopped. * This info will be used in performing idle load balancing in the future. */ -void select_nohz_load_balancer(int stop_tick) +void nohz_balance_enter_idle(int cpu) { - int cpu = smp_processor_id(); - /* * If this cpu is going down, then nothing needs to be done. */ if (!cpu_active(cpu)) return; - if (stop_tick) { - if (test_bit(NOHZ_TICK_STOPPED, nohz_flags(cpu))) - return; + if (test_bit(NOHZ_TICK_STOPPED, nohz_flags(cpu))) + return; - cpumask_set_cpu(cpu, nohz.idle_cpus_mask); - atomic_inc(&nohz.nr_cpus); - set_bit(NOHZ_TICK_STOPPED, nohz_flags(cpu)); - } - return; + cpumask_set_cpu(cpu, nohz.idle_cpus_mask); + atomic_inc(&nohz.nr_cpus); + set_bit(NOHZ_TICK_STOPPED, nohz_flags(cpu)); } static int __cpuinit sched_ilb_notifier(struct notifier_block *nfb, @@ -4697,7 +4681,7 @@ static int __cpuinit sched_ilb_notifier(struct notifier_block *nfb, { switch (action & ~CPU_TASKS_FROZEN) { case CPU_DYING: - clear_nohz_tick_stopped(smp_processor_id()); + nohz_balance_exit_idle(smp_processor_id()); return NOTIFY_OK; default: return NOTIFY_DONE; @@ -4819,14 +4803,15 @@ static void nohz_idle_balance(int this_cpu, enum cpu_idle_type idle) if (need_resched()) break; - raw_spin_lock_irq(&this_rq->lock); - update_rq_clock(this_rq); - update_idle_cpu_load(this_rq); - raw_spin_unlock_irq(&this_rq->lock); + rq = cpu_rq(balance_cpu); + + raw_spin_lock_irq(&rq->lock); + update_rq_clock(rq); + update_idle_cpu_load(rq); + raw_spin_unlock_irq(&rq->lock); rebalance_domains(balance_cpu, CPU_IDLE); - rq = cpu_rq(balance_cpu); if (time_after(this_rq->next_balance, rq->next_balance)) this_rq->next_balance = rq->next_balance; } @@ -4857,7 +4842,7 @@ static inline int nohz_kick_needed(struct rq *rq, int cpu) * busy tick after returning from idle, we will update the busy stats. */ set_cpu_sd_state_busy(); - clear_nohz_tick_stopped(cpu); + nohz_balance_exit_idle(cpu); /* * None are in tickless mode and hence no need for NOHZ idle load @@ -4950,6 +4935,9 @@ static void rq_online_fair(struct rq *rq) static void rq_offline_fair(struct rq *rq) { update_sysctl(); + + /* Ensure any throttled groups are reachable by pick_next_task */ + unthrottle_offline_cfs_rqs(rq); } #endif /* CONFIG_SMP */ diff --git a/trunk/kernel/sched/features.h b/trunk/kernel/sched/features.h index de00a486c5c6..eebefcad7027 100644 --- a/trunk/kernel/sched/features.h +++ b/trunk/kernel/sched/features.h @@ -11,14 +11,6 @@ SCHED_FEAT(GENTLE_FAIR_SLEEPERS, true) */ SCHED_FEAT(START_DEBIT, true) -/* - * Based on load and program behaviour, see if it makes sense to place - * a newly woken task on the same cpu as the task that woke it -- - * improve cache locality. Typically used with SYNC wakeups as - * generated by pipes and the like, see also SYNC_WAKEUPS. - */ -SCHED_FEAT(AFFINE_WAKEUPS, true) - /* * Prefer to schedule the task we woke last (assuming it failed * wakeup-preemption), since its likely going to consume data we @@ -42,7 +34,7 @@ SCHED_FEAT(CACHE_HOT_BUDDY, true) /* * Use arch dependent cpu power functions */ -SCHED_FEAT(ARCH_POWER, false) +SCHED_FEAT(ARCH_POWER, true) SCHED_FEAT(HRTICK, false) SCHED_FEAT(DOUBLE_TICK, false) diff --git a/trunk/kernel/sched/rt.c b/trunk/kernel/sched/rt.c index 573e1ca01102..418feb01344e 100644 --- a/trunk/kernel/sched/rt.c +++ b/trunk/kernel/sched/rt.c @@ -691,6 +691,7 @@ static void __disable_runtime(struct rq *rq) * runtime - in which case borrowing doesn't make sense. */ rt_rq->rt_runtime = RUNTIME_INF; + rt_rq->rt_throttled = 0; raw_spin_unlock(&rt_rq->rt_runtime_lock); raw_spin_unlock(&rt_b->rt_runtime_lock); } @@ -788,6 +789,19 @@ static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun) const struct cpumask *span; span = sched_rt_period_mask(); +#ifdef CONFIG_RT_GROUP_SCHED + /* + * FIXME: isolated CPUs should really leave the root task group, + * whether they are isolcpus or were isolated via cpusets, lest + * the timer run on a CPU which does not service all runqueues, + * potentially leaving other CPUs indefinitely throttled. If + * isolation is really required, the user will turn the throttle + * off to kill the perturbations it causes anyway. Meanwhile, + * this maintains functionality for boot and/or troubleshooting. + */ + if (rt_b == &root_task_group.rt_bandwidth) + span = cpu_online_mask; +#endif for_each_cpu(i, span) { int enqueue = 0; struct rt_rq *rt_rq = sched_rt_period_rt_rq(rt_b, i); @@ -1618,11 +1632,6 @@ static int push_rt_task(struct rq *rq) if (!next_task) return 0; -#ifdef __ARCH_WANT_INTERRUPTS_ON_CTXSW - if (unlikely(task_running(rq, next_task))) - return 0; -#endif - retry: if (unlikely(next_task == rq->curr)) { WARN_ON(1); diff --git a/trunk/kernel/sched/sched.h b/trunk/kernel/sched/sched.h index c35a1a7dd4d6..7a7db09cfabc 100644 --- a/trunk/kernel/sched/sched.h +++ b/trunk/kernel/sched/sched.h @@ -80,7 +80,7 @@ extern struct mutex sched_domains_mutex; struct cfs_rq; struct rt_rq; -static LIST_HEAD(task_groups); +extern struct list_head task_groups; struct cfs_bandwidth { #ifdef CONFIG_CFS_BANDWIDTH @@ -374,7 +374,11 @@ struct rq { #ifdef CONFIG_FAIR_GROUP_SCHED /* list of leaf cfs_rq on this cpu: */ struct list_head leaf_cfs_rq_list; -#endif +#ifdef CONFIG_SMP + unsigned long h_load_throttle; +#endif /* CONFIG_SMP */ +#endif /* CONFIG_FAIR_GROUP_SCHED */ + #ifdef CONFIG_RT_GROUP_SCHED struct list_head leaf_rt_rq_list; #endif @@ -733,11 +737,7 @@ static inline void prepare_lock_switch(struct rq *rq, struct task_struct *next) */ next->on_cpu = 1; #endif -#ifdef __ARCH_WANT_INTERRUPTS_ON_CTXSW - raw_spin_unlock_irq(&rq->lock); -#else raw_spin_unlock(&rq->lock); -#endif } static inline void finish_lock_switch(struct rq *rq, struct task_struct *prev) @@ -751,9 +751,7 @@ static inline void finish_lock_switch(struct rq *rq, struct task_struct *prev) smp_wmb(); prev->on_cpu = 0; #endif -#ifndef __ARCH_WANT_INTERRUPTS_ON_CTXSW local_irq_enable(); -#endif } #endif /* __ARCH_WANT_UNLOCKED_CTXSW */ @@ -887,6 +885,9 @@ struct cpuacct { struct kernel_cpustat __percpu *cpustat; }; +extern struct cgroup_subsys cpuacct_subsys; +extern struct cpuacct root_cpuacct; + /* return cpu accounting group corresponding to this container */ static inline struct cpuacct *cgroup_ca(struct cgroup *cgrp) { @@ -913,6 +914,16 @@ extern void cpuacct_charge(struct task_struct *tsk, u64 cputime); static inline void cpuacct_charge(struct task_struct *tsk, u64 cputime) {} #endif +#ifdef CONFIG_PARAVIRT +static inline u64 steal_ticks(u64 steal) +{ + if (unlikely(steal > NSEC_PER_SEC)) + return div_u64(steal, TICK_NSEC); + + return __iter_div_u64_rem(steal, TICK_NSEC, &steal); +} +#endif + static inline void inc_nr_running(struct rq *rq) { rq->nr_running++; @@ -1140,7 +1151,6 @@ extern void print_rt_stats(struct seq_file *m, int cpu); extern void init_cfs_rq(struct cfs_rq *cfs_rq); extern void init_rt_rq(struct rt_rq *rt_rq, struct rq *rq); -extern void unthrottle_offline_cfs_rqs(struct rq *rq); extern void account_cfs_bandwidth_used(int enabled, int was_enabled); @@ -1153,3 +1163,53 @@ enum rq_nohz_flag_bits { #define nohz_flags(cpu) (&cpu_rq(cpu)->nohz_flags) #endif + +#ifdef CONFIG_IRQ_TIME_ACCOUNTING + +DECLARE_PER_CPU(u64, cpu_hardirq_time); +DECLARE_PER_CPU(u64, cpu_softirq_time); + +#ifndef CONFIG_64BIT +DECLARE_PER_CPU(seqcount_t, irq_time_seq); + +static inline void irq_time_write_begin(void) +{ + __this_cpu_inc(irq_time_seq.sequence); + smp_wmb(); +} + +static inline void irq_time_write_end(void) +{ + smp_wmb(); + __this_cpu_inc(irq_time_seq.sequence); +} + +static inline u64 irq_time_read(int cpu) +{ + u64 irq_time; + unsigned seq; + + do { + seq = read_seqcount_begin(&per_cpu(irq_time_seq, cpu)); + irq_time = per_cpu(cpu_softirq_time, cpu) + + per_cpu(cpu_hardirq_time, cpu); + } while (read_seqcount_retry(&per_cpu(irq_time_seq, cpu), seq)); + + return irq_time; +} +#else /* CONFIG_64BIT */ +static inline void irq_time_write_begin(void) +{ +} + +static inline void irq_time_write_end(void) +{ +} + +static inline u64 irq_time_read(int cpu) +{ + return per_cpu(cpu_softirq_time, cpu) + per_cpu(cpu_hardirq_time, cpu); +} +#endif /* CONFIG_64BIT */ +#endif /* CONFIG_IRQ_TIME_ACCOUNTING */ + diff --git a/trunk/kernel/sched/stop_task.c b/trunk/kernel/sched/stop_task.c index 7b386e86fd23..da5eb5bed84a 100644 --- a/trunk/kernel/sched/stop_task.c +++ b/trunk/kernel/sched/stop_task.c @@ -27,8 +27,10 @@ static struct task_struct *pick_next_task_stop(struct rq *rq) { struct task_struct *stop = rq->stop; - if (stop && stop->on_rq) + if (stop && stop->on_rq) { + stop->se.exec_start = rq->clock_task; return stop; + } return NULL; } @@ -52,6 +54,21 @@ static void yield_task_stop(struct rq *rq) static void put_prev_task_stop(struct rq *rq, struct task_struct *prev) { + struct task_struct *curr = rq->curr; + u64 delta_exec; + + delta_exec = rq->clock_task - curr->se.exec_start; + if (unlikely((s64)delta_exec < 0)) + delta_exec = 0; + + schedstat_set(curr->se.statistics.exec_max, + max(curr->se.statistics.exec_max, delta_exec)); + + curr->se.sum_exec_runtime += delta_exec; + account_group_exec_runtime(curr, delta_exec); + + curr->se.exec_start = rq->clock_task; + cpuacct_charge(curr, delta_exec); } static void task_tick_stop(struct rq *rq, struct task_struct *curr, int queued) @@ -60,6 +77,9 @@ static void task_tick_stop(struct rq *rq, struct task_struct *curr, int queued) static void set_curr_task_stop(struct rq *rq) { + struct task_struct *stop = rq->stop; + + stop->se.exec_start = rq->clock_task; } static void switched_to_stop(struct rq *rq, struct task_struct *p) diff --git a/trunk/kernel/signal.c b/trunk/kernel/signal.c index be4f856d52f8..2c681f11b7d2 100644 --- a/trunk/kernel/signal.c +++ b/trunk/kernel/signal.c @@ -1971,13 +1971,8 @@ static void ptrace_do_notify(int signr, int exit_code, int why) void ptrace_notify(int exit_code) { BUG_ON((exit_code & (0x7f | ~0xffff)) != SIGTRAP); - if (unlikely(current->task_works)) { - if (test_and_clear_ti_thread_flag(current_thread_info(), - TIF_NOTIFY_RESUME)) { - smp_mb__after_clear_bit(); - task_work_run(); - } - } + if (unlikely(current->task_works)) + task_work_run(); spin_lock_irq(¤t->sighand->siglock); ptrace_do_notify(SIGTRAP, exit_code, CLD_TRAPPED); @@ -2198,13 +2193,8 @@ int get_signal_to_deliver(siginfo_t *info, struct k_sigaction *return_ka, struct signal_struct *signal = current->signal; int signr; - if (unlikely(current->task_works)) { - if (test_and_clear_ti_thread_flag(current_thread_info(), - TIF_NOTIFY_RESUME)) { - smp_mb__after_clear_bit(); - task_work_run(); - } - } + if (unlikely(current->task_works)) + task_work_run(); if (unlikely(uprobe_deny_signal())) return 0; diff --git a/trunk/kernel/smpboot.c b/trunk/kernel/smpboot.c index 98f60c5caa1b..d6c5fc054242 100644 --- a/trunk/kernel/smpboot.c +++ b/trunk/kernel/smpboot.c @@ -1,14 +1,22 @@ /* * Common SMP CPU bringup/teardown functions */ +#include #include #include #include +#include +#include #include +#include #include +#include +#include #include "smpboot.h" +#ifdef CONFIG_SMP + #ifdef CONFIG_GENERIC_SMP_IDLE_THREAD /* * For the hotplug case we keep the task structs around and reuse @@ -65,3 +73,228 @@ void __init idle_threads_init(void) } } #endif + +#endif /* #ifdef CONFIG_SMP */ + +static LIST_HEAD(hotplug_threads); +static DEFINE_MUTEX(smpboot_threads_lock); + +struct smpboot_thread_data { + unsigned int cpu; + unsigned int status; + struct smp_hotplug_thread *ht; +}; + +enum { + HP_THREAD_NONE = 0, + HP_THREAD_ACTIVE, + HP_THREAD_PARKED, +}; + +/** + * smpboot_thread_fn - percpu hotplug thread loop function + * @data: thread data pointer + * + * Checks for thread stop and park conditions. Calls the necessary + * setup, cleanup, park and unpark functions for the registered + * thread. + * + * Returns 1 when the thread should exit, 0 otherwise. + */ +static int smpboot_thread_fn(void *data) +{ + struct smpboot_thread_data *td = data; + struct smp_hotplug_thread *ht = td->ht; + + while (1) { + set_current_state(TASK_INTERRUPTIBLE); + preempt_disable(); + if (kthread_should_stop()) { + set_current_state(TASK_RUNNING); + preempt_enable(); + if (ht->cleanup) + ht->cleanup(td->cpu, cpu_online(td->cpu)); + kfree(td); + return 0; + } + + if (kthread_should_park()) { + __set_current_state(TASK_RUNNING); + preempt_enable(); + if (ht->park && td->status == HP_THREAD_ACTIVE) { + BUG_ON(td->cpu != smp_processor_id()); + ht->park(td->cpu); + td->status = HP_THREAD_PARKED; + } + kthread_parkme(); + /* We might have been woken for stop */ + continue; + } + + BUG_ON(td->cpu != smp_processor_id()); + + /* Check for state change setup */ + switch (td->status) { + case HP_THREAD_NONE: + preempt_enable(); + if (ht->setup) + ht->setup(td->cpu); + td->status = HP_THREAD_ACTIVE; + preempt_disable(); + break; + case HP_THREAD_PARKED: + preempt_enable(); + if (ht->unpark) + ht->unpark(td->cpu); + td->status = HP_THREAD_ACTIVE; + preempt_disable(); + break; + } + + if (!ht->thread_should_run(td->cpu)) { + preempt_enable(); + schedule(); + } else { + set_current_state(TASK_RUNNING); + preempt_enable(); + ht->thread_fn(td->cpu); + } + } +} + +static int +__smpboot_create_thread(struct smp_hotplug_thread *ht, unsigned int cpu) +{ + struct task_struct *tsk = *per_cpu_ptr(ht->store, cpu); + struct smpboot_thread_data *td; + + if (tsk) + return 0; + + td = kzalloc_node(sizeof(*td), GFP_KERNEL, cpu_to_node(cpu)); + if (!td) + return -ENOMEM; + td->cpu = cpu; + td->ht = ht; + + tsk = kthread_create_on_cpu(smpboot_thread_fn, td, cpu, + ht->thread_comm); + if (IS_ERR(tsk)) { + kfree(td); + return PTR_ERR(tsk); + } + + get_task_struct(tsk); + *per_cpu_ptr(ht->store, cpu) = tsk; + return 0; +} + +int smpboot_create_threads(unsigned int cpu) +{ + struct smp_hotplug_thread *cur; + int ret = 0; + + mutex_lock(&smpboot_threads_lock); + list_for_each_entry(cur, &hotplug_threads, list) { + ret = __smpboot_create_thread(cur, cpu); + if (ret) + break; + } + mutex_unlock(&smpboot_threads_lock); + return ret; +} + +static void smpboot_unpark_thread(struct smp_hotplug_thread *ht, unsigned int cpu) +{ + struct task_struct *tsk = *per_cpu_ptr(ht->store, cpu); + + kthread_unpark(tsk); +} + +void smpboot_unpark_threads(unsigned int cpu) +{ + struct smp_hotplug_thread *cur; + + mutex_lock(&smpboot_threads_lock); + list_for_each_entry(cur, &hotplug_threads, list) + smpboot_unpark_thread(cur, cpu); + mutex_unlock(&smpboot_threads_lock); +} + +static void smpboot_park_thread(struct smp_hotplug_thread *ht, unsigned int cpu) +{ + struct task_struct *tsk = *per_cpu_ptr(ht->store, cpu); + + if (tsk) + kthread_park(tsk); +} + +void smpboot_park_threads(unsigned int cpu) +{ + struct smp_hotplug_thread *cur; + + mutex_lock(&smpboot_threads_lock); + list_for_each_entry_reverse(cur, &hotplug_threads, list) + smpboot_park_thread(cur, cpu); + mutex_unlock(&smpboot_threads_lock); +} + +static void smpboot_destroy_threads(struct smp_hotplug_thread *ht) +{ + unsigned int cpu; + + /* We need to destroy also the parked threads of offline cpus */ + for_each_possible_cpu(cpu) { + struct task_struct *tsk = *per_cpu_ptr(ht->store, cpu); + + if (tsk) { + kthread_stop(tsk); + put_task_struct(tsk); + *per_cpu_ptr(ht->store, cpu) = NULL; + } + } +} + +/** + * smpboot_register_percpu_thread - Register a per_cpu thread related to hotplug + * @plug_thread: Hotplug thread descriptor + * + * Creates and starts the threads on all online cpus. + */ +int smpboot_register_percpu_thread(struct smp_hotplug_thread *plug_thread) +{ + unsigned int cpu; + int ret = 0; + + mutex_lock(&smpboot_threads_lock); + for_each_online_cpu(cpu) { + ret = __smpboot_create_thread(plug_thread, cpu); + if (ret) { + smpboot_destroy_threads(plug_thread); + goto out; + } + smpboot_unpark_thread(plug_thread, cpu); + } + list_add(&plug_thread->list, &hotplug_threads); +out: + mutex_unlock(&smpboot_threads_lock); + return ret; +} +EXPORT_SYMBOL_GPL(smpboot_register_percpu_thread); + +/** + * smpboot_unregister_percpu_thread - Unregister a per_cpu thread related to hotplug + * @plug_thread: Hotplug thread descriptor + * + * Stops all threads on all possible cpus. + */ +void smpboot_unregister_percpu_thread(struct smp_hotplug_thread *plug_thread) +{ + get_online_cpus(); + mutex_lock(&smpboot_threads_lock); + list_del(&plug_thread->list); + smpboot_destroy_threads(plug_thread); + mutex_unlock(&smpboot_threads_lock); + put_online_cpus(); +} +EXPORT_SYMBOL_GPL(smpboot_unregister_percpu_thread); diff --git a/trunk/kernel/smpboot.h b/trunk/kernel/smpboot.h index 6ef9433e1c70..72415a0eb955 100644 --- a/trunk/kernel/smpboot.h +++ b/trunk/kernel/smpboot.h @@ -13,4 +13,8 @@ static inline void idle_thread_set_boot_cpu(void) { } static inline void idle_threads_init(void) { } #endif +int smpboot_create_threads(unsigned int cpu); +void smpboot_park_threads(unsigned int cpu); +void smpboot_unpark_threads(unsigned int cpu); + #endif diff --git a/trunk/kernel/softirq.c b/trunk/kernel/softirq.c index b73e681df09e..cc96bdc0c2c9 100644 --- a/trunk/kernel/softirq.c +++ b/trunk/kernel/softirq.c @@ -23,6 +23,7 @@ #include #include #include +#include #include #define CREATE_TRACE_POINTS @@ -220,7 +221,7 @@ asmlinkage void __do_softirq(void) current->flags &= ~PF_MEMALLOC; pending = local_softirq_pending(); - account_system_vtime(current); + vtime_account(current); __local_bh_disable((unsigned long)__builtin_return_address(0), SOFTIRQ_OFFSET); @@ -271,7 +272,7 @@ asmlinkage void __do_softirq(void) lockdep_softirq_exit(); - account_system_vtime(current); + vtime_account(current); __local_bh_enable(SOFTIRQ_OFFSET); tsk_restore_flags(current, old_flags, PF_MEMALLOC); } @@ -340,7 +341,7 @@ static inline void invoke_softirq(void) */ void irq_exit(void) { - account_system_vtime(current); + vtime_account(current); trace_hardirq_exit(); sub_preempt_count(IRQ_EXIT_OFFSET); if (!in_interrupt() && local_softirq_pending()) @@ -742,49 +743,22 @@ void __init softirq_init(void) open_softirq(HI_SOFTIRQ, tasklet_hi_action); } -static int run_ksoftirqd(void * __bind_cpu) +static int ksoftirqd_should_run(unsigned int cpu) { - set_current_state(TASK_INTERRUPTIBLE); - - while (!kthread_should_stop()) { - preempt_disable(); - if (!local_softirq_pending()) { - schedule_preempt_disabled(); - } - - __set_current_state(TASK_RUNNING); - - while (local_softirq_pending()) { - /* Preempt disable stops cpu going offline. - If already offline, we'll be on wrong CPU: - don't process */ - if (cpu_is_offline((long)__bind_cpu)) - goto wait_to_die; - local_irq_disable(); - if (local_softirq_pending()) - __do_softirq(); - local_irq_enable(); - sched_preempt_enable_no_resched(); - cond_resched(); - preempt_disable(); - rcu_note_context_switch((long)__bind_cpu); - } - preempt_enable(); - set_current_state(TASK_INTERRUPTIBLE); - } - __set_current_state(TASK_RUNNING); - return 0; + return local_softirq_pending(); +} -wait_to_die: - preempt_enable(); - /* Wait for kthread_stop */ - set_current_state(TASK_INTERRUPTIBLE); - while (!kthread_should_stop()) { - schedule(); - set_current_state(TASK_INTERRUPTIBLE); +static void run_ksoftirqd(unsigned int cpu) +{ + local_irq_disable(); + if (local_softirq_pending()) { + __do_softirq(); + rcu_note_context_switch(cpu); + local_irq_enable(); + cond_resched(); + return; } - __set_current_state(TASK_RUNNING); - return 0; + local_irq_enable(); } #ifdef CONFIG_HOTPLUG_CPU @@ -850,50 +824,14 @@ static int __cpuinit cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu) { - int hotcpu = (unsigned long)hcpu; - struct task_struct *p; - switch (action) { - case CPU_UP_PREPARE: - case CPU_UP_PREPARE_FROZEN: - p = kthread_create_on_node(run_ksoftirqd, - hcpu, - cpu_to_node(hotcpu), - "ksoftirqd/%d", hotcpu); - if (IS_ERR(p)) { - printk("ksoftirqd for %i failed\n", hotcpu); - return notifier_from_errno(PTR_ERR(p)); - } - kthread_bind(p, hotcpu); - per_cpu(ksoftirqd, hotcpu) = p; - break; - case CPU_ONLINE: - case CPU_ONLINE_FROZEN: - wake_up_process(per_cpu(ksoftirqd, hotcpu)); - break; #ifdef CONFIG_HOTPLUG_CPU - case CPU_UP_CANCELED: - case CPU_UP_CANCELED_FROZEN: - if (!per_cpu(ksoftirqd, hotcpu)) - break; - /* Unbind so it can run. Fall thru. */ - kthread_bind(per_cpu(ksoftirqd, hotcpu), - cpumask_any(cpu_online_mask)); case CPU_DEAD: - case CPU_DEAD_FROZEN: { - static const struct sched_param param = { - .sched_priority = MAX_RT_PRIO-1 - }; - - p = per_cpu(ksoftirqd, hotcpu); - per_cpu(ksoftirqd, hotcpu) = NULL; - sched_setscheduler_nocheck(p, SCHED_FIFO, ¶m); - kthread_stop(p); - takeover_tasklets(hotcpu); + case CPU_DEAD_FROZEN: + takeover_tasklets((unsigned long)hcpu); break; - } #endif /* CONFIG_HOTPLUG_CPU */ - } + } return NOTIFY_OK; } @@ -901,14 +839,19 @@ static struct notifier_block __cpuinitdata cpu_nfb = { .notifier_call = cpu_callback }; +static struct smp_hotplug_thread softirq_threads = { + .store = &ksoftirqd, + .thread_should_run = ksoftirqd_should_run, + .thread_fn = run_ksoftirqd, + .thread_comm = "ksoftirqd/%u", +}; + static __init int spawn_ksoftirqd(void) { - void *cpu = (void *)(long)smp_processor_id(); - int err = cpu_callback(&cpu_nfb, CPU_UP_PREPARE, cpu); - - BUG_ON(err != NOTIFY_OK); - cpu_callback(&cpu_nfb, CPU_ONLINE, cpu); register_cpu_notifier(&cpu_nfb); + + BUG_ON(smpboot_register_percpu_thread(&softirq_threads)); + return 0; } early_initcall(spawn_ksoftirqd); diff --git a/trunk/kernel/sysctl.c b/trunk/kernel/sysctl.c index 87174ef59161..81c7b1a1a307 100644 --- a/trunk/kernel/sysctl.c +++ b/trunk/kernel/sysctl.c @@ -307,7 +307,7 @@ static struct ctl_table kern_table[] = { .extra2 = &max_sched_tunable_scaling, }, { - .procname = "sched_migration_cost", + .procname = "sched_migration_cost_ns", .data = &sysctl_sched_migration_cost, .maxlen = sizeof(unsigned int), .mode = 0644, @@ -321,14 +321,14 @@ static struct ctl_table kern_table[] = { .proc_handler = proc_dointvec, }, { - .procname = "sched_time_avg", + .procname = "sched_time_avg_ms", .data = &sysctl_sched_time_avg, .maxlen = sizeof(unsigned int), .mode = 0644, .proc_handler = proc_dointvec, }, { - .procname = "sched_shares_window", + .procname = "sched_shares_window_ns", .data = &sysctl_sched_shares_window, .maxlen = sizeof(unsigned int), .mode = 0644, diff --git a/trunk/kernel/task_work.c b/trunk/kernel/task_work.c index 91d4e1742a0c..65bd3c92d6f3 100644 --- a/trunk/kernel/task_work.c +++ b/trunk/kernel/task_work.c @@ -2,26 +2,20 @@ #include #include +static struct callback_head work_exited; /* all we need is ->next == NULL */ + int -task_work_add(struct task_struct *task, struct callback_head *twork, bool notify) +task_work_add(struct task_struct *task, struct callback_head *work, bool notify) { - struct callback_head *last, *first; - unsigned long flags; + struct callback_head *head; - /* - * Not inserting the new work if the task has already passed - * exit_task_work() is the responisbility of callers. - */ - raw_spin_lock_irqsave(&task->pi_lock, flags); - last = task->task_works; - first = last ? last->next : twork; - twork->next = first; - if (last) - last->next = twork; - task->task_works = twork; - raw_spin_unlock_irqrestore(&task->pi_lock, flags); + do { + head = ACCESS_ONCE(task->task_works); + if (unlikely(head == &work_exited)) + return -ESRCH; + work->next = head; + } while (cmpxchg(&task->task_works, head, work) != head); - /* test_and_set_bit() implies mb(), see tracehook_notify_resume(). */ if (notify) set_notify_resume(task); return 0; @@ -30,51 +24,69 @@ task_work_add(struct task_struct *task, struct callback_head *twork, bool notify struct callback_head * task_work_cancel(struct task_struct *task, task_work_func_t func) { + struct callback_head **pprev = &task->task_works; + struct callback_head *work = NULL; unsigned long flags; - struct callback_head *last, *res = NULL; - + /* + * If cmpxchg() fails we continue without updating pprev. + * Either we raced with task_work_add() which added the + * new entry before this work, we will find it again. Or + * we raced with task_work_run(), *pprev == NULL/exited. + */ raw_spin_lock_irqsave(&task->pi_lock, flags); - last = task->task_works; - if (last) { - struct callback_head *q = last, *p = q->next; - while (1) { - if (p->func == func) { - q->next = p->next; - if (p == last) - task->task_works = q == p ? NULL : q; - res = p; - break; - } - if (p == last) - break; - q = p; - p = q->next; - } + while ((work = ACCESS_ONCE(*pprev))) { + read_barrier_depends(); + if (work->func != func) + pprev = &work->next; + else if (cmpxchg(pprev, work, work->next) == work) + break; } raw_spin_unlock_irqrestore(&task->pi_lock, flags); - return res; + + return work; } void task_work_run(void) { struct task_struct *task = current; - struct callback_head *p, *q; + struct callback_head *work, *head, *next; + + for (;;) { + /* + * work->func() can do task_work_add(), do not set + * work_exited unless the list is empty. + */ + do { + work = ACCESS_ONCE(task->task_works); + head = !work && (task->flags & PF_EXITING) ? + &work_exited : NULL; + } while (cmpxchg(&task->task_works, work, head) != work); - while (1) { - raw_spin_lock_irq(&task->pi_lock); - p = task->task_works; - task->task_works = NULL; - raw_spin_unlock_irq(&task->pi_lock); + if (!work) + break; + /* + * Synchronize with task_work_cancel(). It can't remove + * the first entry == work, cmpxchg(task_works) should + * fail, but it can play with *work and other entries. + */ + raw_spin_unlock_wait(&task->pi_lock); + smp_mb(); - if (unlikely(!p)) - return; + /* Reverse the list to run the works in fifo order */ + head = NULL; + do { + next = work->next; + work->next = head; + head = work; + work = next; + } while (work); - q = p->next; /* head */ - p->next = NULL; /* cut it */ - while (q) { - p = q->next; - q->func(q); - q = p; - } + work = head; + do { + next = work->next; + work->func(work); + work = next; + cond_resched(); + } while (work); } } diff --git a/trunk/kernel/time/jiffies.c b/trunk/kernel/time/jiffies.c index a470154e0408..46da0537c10b 100644 --- a/trunk/kernel/time/jiffies.c +++ b/trunk/kernel/time/jiffies.c @@ -37,7 +37,7 @@ * requested HZ value. It is also not recommended * for "tick-less" systems. */ -#define NSEC_PER_JIFFY ((u32)((((u64)NSEC_PER_SEC)<<8)/ACTHZ)) +#define NSEC_PER_JIFFY ((u32)((((u64)NSEC_PER_SEC)<<8)/SHIFTED_HZ)) /* Since jiffies uses a simple NSEC_PER_JIFFY multiplier * conversion, the .shift value could be zero. However diff --git a/trunk/kernel/time/ntp.c b/trunk/kernel/time/ntp.c index b7fbadc5c973..24174b4d669b 100644 --- a/trunk/kernel/time/ntp.c +++ b/trunk/kernel/time/ntp.c @@ -28,7 +28,7 @@ DEFINE_SPINLOCK(ntp_lock); /* USER_HZ period (usecs): */ unsigned long tick_usec = TICK_USEC; -/* ACTHZ period (nsecs): */ +/* SHIFTED_HZ period (nsecs): */ unsigned long tick_nsec; static u64 tick_length; diff --git a/trunk/kernel/time/tick-sched.c b/trunk/kernel/time/tick-sched.c index 024540f97f74..f423bdd035c2 100644 --- a/trunk/kernel/time/tick-sched.c +++ b/trunk/kernel/time/tick-sched.c @@ -372,7 +372,7 @@ static ktime_t tick_nohz_stop_sched_tick(struct tick_sched *ts, * the scheduler tick in nohz_restart_sched_tick. */ if (!ts->tick_stopped) { - select_nohz_load_balancer(1); + nohz_balance_enter_idle(cpu); calc_load_enter_idle(); ts->last_tick = hrtimer_get_expires(&ts->sched_timer); @@ -436,7 +436,8 @@ static bool can_stop_idle_tick(int cpu, struct tick_sched *ts) if (unlikely(local_softirq_pending() && cpu_online(cpu))) { static int ratelimit; - if (ratelimit < 10) { + if (ratelimit < 10 && + (local_softirq_pending() & SOFTIRQ_STOP_IDLE_MASK)) { printk(KERN_ERR "NOHZ: local_softirq_pending %02x\n", (unsigned int) local_softirq_pending()); ratelimit++; @@ -569,10 +570,10 @@ static void tick_nohz_restart(struct tick_sched *ts, ktime_t now) static void tick_nohz_restart_sched_tick(struct tick_sched *ts, ktime_t now) { /* Update jiffies first */ - select_nohz_load_balancer(0); tick_do_update_jiffies64(now); update_cpu_load_nohz(); + calc_load_exit_idle(); touch_softlockup_watchdog(); /* * Cancel the scheduled timer and restore the tick diff --git a/trunk/kernel/time/timekeeping.c b/trunk/kernel/time/timekeeping.c index f045cc50832d..d3b91e75cecd 100644 --- a/trunk/kernel/time/timekeeping.c +++ b/trunk/kernel/time/timekeeping.c @@ -65,14 +65,14 @@ struct timekeeper { * used instead. */ struct timespec wall_to_monotonic; - /* time spent in suspend */ - struct timespec total_sleep_time; - /* The raw monotonic time for the CLOCK_MONOTONIC_RAW posix clock. */ - struct timespec raw_time; /* Offset clock monotonic -> clock realtime */ ktime_t offs_real; + /* time spent in suspend */ + struct timespec total_sleep_time; /* Offset clock monotonic -> clock boottime */ ktime_t offs_boot; + /* The raw monotonic time for the CLOCK_MONOTONIC_RAW posix clock. */ + struct timespec raw_time; /* Seqlock for all timekeeper values */ seqlock_t lock; }; @@ -108,13 +108,39 @@ static struct timespec tk_xtime(struct timekeeper *tk) static void tk_set_xtime(struct timekeeper *tk, const struct timespec *ts) { tk->xtime_sec = ts->tv_sec; - tk->xtime_nsec = ts->tv_nsec << tk->shift; + tk->xtime_nsec = (u64)ts->tv_nsec << tk->shift; } static void tk_xtime_add(struct timekeeper *tk, const struct timespec *ts) { tk->xtime_sec += ts->tv_sec; - tk->xtime_nsec += ts->tv_nsec << tk->shift; + tk->xtime_nsec += (u64)ts->tv_nsec << tk->shift; + tk_normalize_xtime(tk); +} + +static void tk_set_wall_to_mono(struct timekeeper *tk, struct timespec wtm) +{ + struct timespec tmp; + + /* + * Verify consistency of: offset_real = -wall_to_monotonic + * before modifying anything + */ + set_normalized_timespec(&tmp, -tk->wall_to_monotonic.tv_sec, + -tk->wall_to_monotonic.tv_nsec); + WARN_ON_ONCE(tk->offs_real.tv64 != timespec_to_ktime(tmp).tv64); + tk->wall_to_monotonic = wtm; + set_normalized_timespec(&tmp, -wtm.tv_sec, -wtm.tv_nsec); + tk->offs_real = timespec_to_ktime(tmp); +} + +static void tk_set_sleep_time(struct timekeeper *tk, struct timespec t) +{ + /* Verify consistency before modifying */ + WARN_ON_ONCE(tk->offs_boot.tv64 != timespec_to_ktime(tk->total_sleep_time).tv64); + + tk->total_sleep_time = t; + tk->offs_boot = timespec_to_ktime(t); } /** @@ -217,14 +243,6 @@ static inline s64 timekeeping_get_ns_raw(struct timekeeper *tk) return nsec + arch_gettimeoffset(); } -static void update_rt_offset(struct timekeeper *tk) -{ - struct timespec tmp, *wtm = &tk->wall_to_monotonic; - - set_normalized_timespec(&tmp, -wtm->tv_sec, -wtm->tv_nsec); - tk->offs_real = timespec_to_ktime(tmp); -} - /* must hold write on timekeeper.lock */ static void timekeeping_update(struct timekeeper *tk, bool clearntp) { @@ -234,12 +252,10 @@ static void timekeeping_update(struct timekeeper *tk, bool clearntp) tk->ntp_error = 0; ntp_clear(); } - update_rt_offset(tk); xt = tk_xtime(tk); update_vsyscall(&xt, &tk->wall_to_monotonic, tk->clock, tk->mult); } - /** * timekeeping_forward_now - update clock to the current time * @@ -261,7 +277,7 @@ static void timekeeping_forward_now(struct timekeeper *tk) tk->xtime_nsec += cycle_delta * tk->mult; /* If arch requires, add in gettimeoffset() */ - tk->xtime_nsec += arch_gettimeoffset() << tk->shift; + tk->xtime_nsec += (u64)arch_gettimeoffset() << tk->shift; tk_normalize_xtime(tk); @@ -277,38 +293,39 @@ static void timekeeping_forward_now(struct timekeeper *tk) */ void getnstimeofday(struct timespec *ts) { + struct timekeeper *tk = &timekeeper; unsigned long seq; s64 nsecs = 0; WARN_ON(timekeeping_suspended); do { - seq = read_seqbegin(&timekeeper.lock); + seq = read_seqbegin(&tk->lock); - ts->tv_sec = timekeeper.xtime_sec; - ts->tv_nsec = timekeeping_get_ns(&timekeeper); + ts->tv_sec = tk->xtime_sec; + nsecs = timekeeping_get_ns(tk); - } while (read_seqretry(&timekeeper.lock, seq)); + } while (read_seqretry(&tk->lock, seq)); + ts->tv_nsec = 0; timespec_add_ns(ts, nsecs); } EXPORT_SYMBOL(getnstimeofday); ktime_t ktime_get(void) { + struct timekeeper *tk = &timekeeper; unsigned int seq; s64 secs, nsecs; WARN_ON(timekeeping_suspended); do { - seq = read_seqbegin(&timekeeper.lock); - secs = timekeeper.xtime_sec + - timekeeper.wall_to_monotonic.tv_sec; - nsecs = timekeeping_get_ns(&timekeeper) + - timekeeper.wall_to_monotonic.tv_nsec; + seq = read_seqbegin(&tk->lock); + secs = tk->xtime_sec + tk->wall_to_monotonic.tv_sec; + nsecs = timekeeping_get_ns(tk) + tk->wall_to_monotonic.tv_nsec; - } while (read_seqretry(&timekeeper.lock, seq)); + } while (read_seqretry(&tk->lock, seq)); /* * Use ktime_set/ktime_add_ns to create a proper ktime on * 32-bit architectures without CONFIG_KTIME_SCALAR. @@ -327,21 +344,24 @@ EXPORT_SYMBOL_GPL(ktime_get); */ void ktime_get_ts(struct timespec *ts) { + struct timekeeper *tk = &timekeeper; struct timespec tomono; + s64 nsec; unsigned int seq; WARN_ON(timekeeping_suspended); do { - seq = read_seqbegin(&timekeeper.lock); - ts->tv_sec = timekeeper.xtime_sec; - ts->tv_nsec = timekeeping_get_ns(&timekeeper); - tomono = timekeeper.wall_to_monotonic; + seq = read_seqbegin(&tk->lock); + ts->tv_sec = tk->xtime_sec; + nsec = timekeeping_get_ns(tk); + tomono = tk->wall_to_monotonic; - } while (read_seqretry(&timekeeper.lock, seq)); + } while (read_seqretry(&tk->lock, seq)); - set_normalized_timespec(ts, ts->tv_sec + tomono.tv_sec, - ts->tv_nsec + tomono.tv_nsec); + ts->tv_sec += tomono.tv_sec; + ts->tv_nsec = 0; + timespec_add_ns(ts, nsec + tomono.tv_nsec); } EXPORT_SYMBOL_GPL(ktime_get_ts); @@ -358,22 +378,23 @@ EXPORT_SYMBOL_GPL(ktime_get_ts); */ void getnstime_raw_and_real(struct timespec *ts_raw, struct timespec *ts_real) { + struct timekeeper *tk = &timekeeper; unsigned long seq; s64 nsecs_raw, nsecs_real; WARN_ON_ONCE(timekeeping_suspended); do { - seq = read_seqbegin(&timekeeper.lock); + seq = read_seqbegin(&tk->lock); - *ts_raw = timekeeper.raw_time; - ts_real->tv_sec = timekeeper.xtime_sec; + *ts_raw = tk->raw_time; + ts_real->tv_sec = tk->xtime_sec; ts_real->tv_nsec = 0; - nsecs_raw = timekeeping_get_ns_raw(&timekeeper); - nsecs_real = timekeeping_get_ns(&timekeeper); + nsecs_raw = timekeeping_get_ns_raw(tk); + nsecs_real = timekeeping_get_ns(tk); - } while (read_seqretry(&timekeeper.lock, seq)); + } while (read_seqretry(&tk->lock, seq)); timespec_add_ns(ts_raw, nsecs_raw); timespec_add_ns(ts_real, nsecs_real); @@ -406,28 +427,28 @@ EXPORT_SYMBOL(do_gettimeofday); */ int do_settimeofday(const struct timespec *tv) { + struct timekeeper *tk = &timekeeper; struct timespec ts_delta, xt; unsigned long flags; - if ((unsigned long)tv->tv_nsec >= NSEC_PER_SEC) + if (!timespec_valid_strict(tv)) return -EINVAL; - write_seqlock_irqsave(&timekeeper.lock, flags); + write_seqlock_irqsave(&tk->lock, flags); - timekeeping_forward_now(&timekeeper); + timekeeping_forward_now(tk); - xt = tk_xtime(&timekeeper); + xt = tk_xtime(tk); ts_delta.tv_sec = tv->tv_sec - xt.tv_sec; ts_delta.tv_nsec = tv->tv_nsec - xt.tv_nsec; - timekeeper.wall_to_monotonic = - timespec_sub(timekeeper.wall_to_monotonic, ts_delta); + tk_set_wall_to_mono(tk, timespec_sub(tk->wall_to_monotonic, ts_delta)); - tk_set_xtime(&timekeeper, tv); + tk_set_xtime(tk, tv); - timekeeping_update(&timekeeper, true); + timekeeping_update(tk, true); - write_sequnlock_irqrestore(&timekeeper.lock, flags); + write_sequnlock_irqrestore(&tk->lock, flags); /* signal hrtimers about time change */ clock_was_set(); @@ -436,7 +457,6 @@ int do_settimeofday(const struct timespec *tv) } EXPORT_SYMBOL(do_settimeofday); - /** * timekeeping_inject_offset - Adds or subtracts from the current time. * @tv: pointer to the timespec variable containing the offset @@ -445,28 +465,37 @@ EXPORT_SYMBOL(do_settimeofday); */ int timekeeping_inject_offset(struct timespec *ts) { + struct timekeeper *tk = &timekeeper; unsigned long flags; + struct timespec tmp; + int ret = 0; if ((unsigned long)ts->tv_nsec >= NSEC_PER_SEC) return -EINVAL; - write_seqlock_irqsave(&timekeeper.lock, flags); + write_seqlock_irqsave(&tk->lock, flags); - timekeeping_forward_now(&timekeeper); + timekeeping_forward_now(tk); + /* Make sure the proposed value is valid */ + tmp = timespec_add(tk_xtime(tk), *ts); + if (!timespec_valid_strict(&tmp)) { + ret = -EINVAL; + goto error; + } - tk_xtime_add(&timekeeper, ts); - timekeeper.wall_to_monotonic = - timespec_sub(timekeeper.wall_to_monotonic, *ts); + tk_xtime_add(tk, ts); + tk_set_wall_to_mono(tk, timespec_sub(tk->wall_to_monotonic, *ts)); - timekeeping_update(&timekeeper, true); +error: /* even if we error out, we forwarded the time, so call update */ + timekeeping_update(tk, true); - write_sequnlock_irqrestore(&timekeeper.lock, flags); + write_sequnlock_irqrestore(&tk->lock, flags); /* signal hrtimers about time change */ clock_was_set(); - return 0; + return ret; } EXPORT_SYMBOL(timekeeping_inject_offset); @@ -477,23 +506,24 @@ EXPORT_SYMBOL(timekeeping_inject_offset); */ static int change_clocksource(void *data) { + struct timekeeper *tk = &timekeeper; struct clocksource *new, *old; unsigned long flags; new = (struct clocksource *) data; - write_seqlock_irqsave(&timekeeper.lock, flags); + write_seqlock_irqsave(&tk->lock, flags); - timekeeping_forward_now(&timekeeper); + timekeeping_forward_now(tk); if (!new->enable || new->enable(new) == 0) { - old = timekeeper.clock; - tk_setup_internals(&timekeeper, new); + old = tk->clock; + tk_setup_internals(tk, new); if (old->disable) old->disable(old); } - timekeeping_update(&timekeeper, true); + timekeeping_update(tk, true); - write_sequnlock_irqrestore(&timekeeper.lock, flags); + write_sequnlock_irqrestore(&tk->lock, flags); return 0; } @@ -507,7 +537,9 @@ static int change_clocksource(void *data) */ void timekeeping_notify(struct clocksource *clock) { - if (timekeeper.clock == clock) + struct timekeeper *tk = &timekeeper; + + if (tk->clock == clock) return; stop_machine(change_clocksource, clock, NULL); tick_clock_notify(); @@ -536,35 +568,36 @@ EXPORT_SYMBOL_GPL(ktime_get_real); */ void getrawmonotonic(struct timespec *ts) { + struct timekeeper *tk = &timekeeper; unsigned long seq; s64 nsecs; do { - seq = read_seqbegin(&timekeeper.lock); - nsecs = timekeeping_get_ns_raw(&timekeeper); - *ts = timekeeper.raw_time; + seq = read_seqbegin(&tk->lock); + nsecs = timekeeping_get_ns_raw(tk); + *ts = tk->raw_time; - } while (read_seqretry(&timekeeper.lock, seq)); + } while (read_seqretry(&tk->lock, seq)); timespec_add_ns(ts, nsecs); } EXPORT_SYMBOL(getrawmonotonic); - /** * timekeeping_valid_for_hres - Check if timekeeping is suitable for hres */ int timekeeping_valid_for_hres(void) { + struct timekeeper *tk = &timekeeper; unsigned long seq; int ret; do { - seq = read_seqbegin(&timekeeper.lock); + seq = read_seqbegin(&tk->lock); - ret = timekeeper.clock->flags & CLOCK_SOURCE_VALID_FOR_HRES; + ret = tk->clock->flags & CLOCK_SOURCE_VALID_FOR_HRES; - } while (read_seqretry(&timekeeper.lock, seq)); + } while (read_seqretry(&tk->lock, seq)); return ret; } @@ -574,15 +607,16 @@ int timekeeping_valid_for_hres(void) */ u64 timekeeping_max_deferment(void) { + struct timekeeper *tk = &timekeeper; unsigned long seq; u64 ret; do { - seq = read_seqbegin(&timekeeper.lock); + seq = read_seqbegin(&tk->lock); - ret = timekeeper.clock->max_idle_ns; + ret = tk->clock->max_idle_ns; - } while (read_seqretry(&timekeeper.lock, seq)); + } while (read_seqretry(&tk->lock, seq)); return ret; } @@ -622,46 +656,56 @@ void __attribute__((weak)) read_boot_clock(struct timespec *ts) */ void __init timekeeping_init(void) { + struct timekeeper *tk = &timekeeper; struct clocksource *clock; unsigned long flags; - struct timespec now, boot; + struct timespec now, boot, tmp; read_persistent_clock(&now); + if (!timespec_valid_strict(&now)) { + pr_warn("WARNING: Persistent clock returned invalid value!\n" + " Check your CMOS/BIOS settings.\n"); + now.tv_sec = 0; + now.tv_nsec = 0; + } + read_boot_clock(&boot); + if (!timespec_valid_strict(&boot)) { + pr_warn("WARNING: Boot clock returned invalid value!\n" + " Check your CMOS/BIOS settings.\n"); + boot.tv_sec = 0; + boot.tv_nsec = 0; + } - seqlock_init(&timekeeper.lock); + seqlock_init(&tk->lock); ntp_init(); - write_seqlock_irqsave(&timekeeper.lock, flags); + write_seqlock_irqsave(&tk->lock, flags); clock = clocksource_default_clock(); if (clock->enable) clock->enable(clock); - tk_setup_internals(&timekeeper, clock); + tk_setup_internals(tk, clock); - tk_set_xtime(&timekeeper, &now); - timekeeper.raw_time.tv_sec = 0; - timekeeper.raw_time.tv_nsec = 0; + tk_set_xtime(tk, &now); + tk->raw_time.tv_sec = 0; + tk->raw_time.tv_nsec = 0; if (boot.tv_sec == 0 && boot.tv_nsec == 0) - boot = tk_xtime(&timekeeper); - - set_normalized_timespec(&timekeeper.wall_to_monotonic, - -boot.tv_sec, -boot.tv_nsec); - update_rt_offset(&timekeeper); - timekeeper.total_sleep_time.tv_sec = 0; - timekeeper.total_sleep_time.tv_nsec = 0; - write_sequnlock_irqrestore(&timekeeper.lock, flags); + boot = tk_xtime(tk); + + set_normalized_timespec(&tmp, -boot.tv_sec, -boot.tv_nsec); + tk_set_wall_to_mono(tk, tmp); + + tmp.tv_sec = 0; + tmp.tv_nsec = 0; + tk_set_sleep_time(tk, tmp); + + write_sequnlock_irqrestore(&tk->lock, flags); } /* time in seconds when suspend began */ static struct timespec timekeeping_suspend_time; -static void update_sleep_time(struct timespec t) -{ - timekeeper.total_sleep_time = t; - timekeeper.offs_boot = timespec_to_ktime(t); -} - /** * __timekeeping_inject_sleeptime - Internal function to add sleep interval * @delta: pointer to a timespec delta value @@ -672,18 +716,16 @@ static void update_sleep_time(struct timespec t) static void __timekeeping_inject_sleeptime(struct timekeeper *tk, struct timespec *delta) { - if (!timespec_valid(delta)) { + if (!timespec_valid_strict(delta)) { printk(KERN_WARNING "__timekeeping_inject_sleeptime: Invalid " "sleep delta value!\n"); return; } - tk_xtime_add(tk, delta); - tk->wall_to_monotonic = timespec_sub(tk->wall_to_monotonic, *delta); - update_sleep_time(timespec_add(tk->total_sleep_time, *delta)); + tk_set_wall_to_mono(tk, timespec_sub(tk->wall_to_monotonic, *delta)); + tk_set_sleep_time(tk, timespec_add(tk->total_sleep_time, *delta)); } - /** * timekeeping_inject_sleeptime - Adds suspend interval to timeekeeping values * @delta: pointer to a timespec delta value @@ -696,6 +738,7 @@ static void __timekeeping_inject_sleeptime(struct timekeeper *tk, */ void timekeeping_inject_sleeptime(struct timespec *delta) { + struct timekeeper *tk = &timekeeper; unsigned long flags; struct timespec ts; @@ -704,21 +747,20 @@ void timekeeping_inject_sleeptime(struct timespec *delta) if (!(ts.tv_sec == 0 && ts.tv_nsec == 0)) return; - write_seqlock_irqsave(&timekeeper.lock, flags); + write_seqlock_irqsave(&tk->lock, flags); - timekeeping_forward_now(&timekeeper); + timekeeping_forward_now(tk); - __timekeeping_inject_sleeptime(&timekeeper, delta); + __timekeeping_inject_sleeptime(tk, delta); - timekeeping_update(&timekeeper, true); + timekeeping_update(tk, true); - write_sequnlock_irqrestore(&timekeeper.lock, flags); + write_sequnlock_irqrestore(&tk->lock, flags); /* signal hrtimers about time change */ clock_was_set(); } - /** * timekeeping_resume - Resumes the generic timekeeping subsystem. * @@ -728,6 +770,7 @@ void timekeeping_inject_sleeptime(struct timespec *delta) */ static void timekeeping_resume(void) { + struct timekeeper *tk = &timekeeper; unsigned long flags; struct timespec ts; @@ -735,18 +778,18 @@ static void timekeeping_resume(void) clocksource_resume(); - write_seqlock_irqsave(&timekeeper.lock, flags); + write_seqlock_irqsave(&tk->lock, flags); if (timespec_compare(&ts, &timekeeping_suspend_time) > 0) { ts = timespec_sub(ts, timekeeping_suspend_time); - __timekeeping_inject_sleeptime(&timekeeper, &ts); + __timekeeping_inject_sleeptime(tk, &ts); } /* re-base the last cycle value */ - timekeeper.clock->cycle_last = timekeeper.clock->read(timekeeper.clock); - timekeeper.ntp_error = 0; + tk->clock->cycle_last = tk->clock->read(tk->clock); + tk->ntp_error = 0; timekeeping_suspended = 0; - timekeeping_update(&timekeeper, false); - write_sequnlock_irqrestore(&timekeeper.lock, flags); + timekeeping_update(tk, false); + write_sequnlock_irqrestore(&tk->lock, flags); touch_softlockup_watchdog(); @@ -758,14 +801,15 @@ static void timekeeping_resume(void) static int timekeeping_suspend(void) { + struct timekeeper *tk = &timekeeper; unsigned long flags; struct timespec delta, delta_delta; static struct timespec old_delta; read_persistent_clock(&timekeeping_suspend_time); - write_seqlock_irqsave(&timekeeper.lock, flags); - timekeeping_forward_now(&timekeeper); + write_seqlock_irqsave(&tk->lock, flags); + timekeeping_forward_now(tk); timekeeping_suspended = 1; /* @@ -774,7 +818,7 @@ static int timekeeping_suspend(void) * try to compensate so the difference in system time * and persistent_clock time stays close to constant. */ - delta = timespec_sub(tk_xtime(&timekeeper), timekeeping_suspend_time); + delta = timespec_sub(tk_xtime(tk), timekeeping_suspend_time); delta_delta = timespec_sub(delta, old_delta); if (abs(delta_delta.tv_sec) >= 2) { /* @@ -787,7 +831,7 @@ static int timekeeping_suspend(void) timekeeping_suspend_time = timespec_add(timekeeping_suspend_time, delta_delta); } - write_sequnlock_irqrestore(&timekeeper.lock, flags); + write_sequnlock_irqrestore(&tk->lock, flags); clockevents_notify(CLOCK_EVT_NOTIFY_SUSPEND, NULL); clocksource_suspend(); @@ -898,27 +942,29 @@ static void timekeeping_adjust(struct timekeeper *tk, s64 offset) * the error. This causes the likely below to be unlikely. * * The proper fix is to avoid rounding up by using - * the high precision timekeeper.xtime_nsec instead of + * the high precision tk->xtime_nsec instead of * xtime.tv_nsec everywhere. Fixing this will take some * time. */ if (likely(error <= interval)) adj = 1; else - adj = timekeeping_bigadjust(tk, error, &interval, - &offset); - } else if (error < -interval) { - /* See comment above, this is just switched for the negative */ - error >>= 2; - if (likely(error >= -interval)) { - adj = -1; - interval = -interval; - offset = -offset; - } else - adj = timekeeping_bigadjust(tk, error, &interval, - &offset); - } else - return; + adj = timekeeping_bigadjust(tk, error, &interval, &offset); + } else { + if (error < -interval) { + /* See comment above, this is just switched for the negative */ + error >>= 2; + if (likely(error >= -interval)) { + adj = -1; + interval = -interval; + offset = -offset; + } else { + adj = timekeeping_bigadjust(tk, error, &interval, &offset); + } + } else { + goto out_adjust; + } + } if (unlikely(tk->clock->maxadj && (tk->mult + adj > tk->clock->mult + tk->clock->maxadj))) { @@ -981,6 +1027,7 @@ static void timekeeping_adjust(struct timekeeper *tk, s64 offset) tk->xtime_nsec -= offset; tk->ntp_error -= (interval - offset) << tk->ntp_error_shift; +out_adjust: /* * It may be possible that when we entered this function, xtime_nsec * was very small. Further, if we're slightly speeding the clocksource @@ -1003,7 +1050,6 @@ static void timekeeping_adjust(struct timekeeper *tk, s64 offset) } - /** * accumulate_nsecs_to_secs - Accumulates nsecs into secs * @@ -1024,15 +1070,21 @@ static inline void accumulate_nsecs_to_secs(struct timekeeper *tk) /* Figure out if its a leap sec and apply if needed */ leap = second_overflow(tk->xtime_sec); - tk->xtime_sec += leap; - tk->wall_to_monotonic.tv_sec -= leap; - if (leap) - clock_was_set_delayed(); + if (unlikely(leap)) { + struct timespec ts; + + tk->xtime_sec += leap; + + ts.tv_sec = leap; + ts.tv_nsec = 0; + tk_set_wall_to_mono(tk, + timespec_sub(tk->wall_to_monotonic, ts)); + clock_was_set_delayed(); + } } } - /** * logarithmic_accumulation - shifted accumulation of cycles * @@ -1076,7 +1128,6 @@ static cycle_t logarithmic_accumulation(struct timekeeper *tk, cycle_t offset, return offset; } - /** * update_wall_time - Uses the current clocksource to increment the wall time * @@ -1084,25 +1135,30 @@ static cycle_t logarithmic_accumulation(struct timekeeper *tk, cycle_t offset, static void update_wall_time(void) { struct clocksource *clock; + struct timekeeper *tk = &timekeeper; cycle_t offset; int shift = 0, maxshift; unsigned long flags; s64 remainder; - write_seqlock_irqsave(&timekeeper.lock, flags); + write_seqlock_irqsave(&tk->lock, flags); /* Make sure we're fully resumed: */ if (unlikely(timekeeping_suspended)) goto out; - clock = timekeeper.clock; + clock = tk->clock; #ifdef CONFIG_ARCH_USES_GETTIMEOFFSET - offset = timekeeper.cycle_interval; + offset = tk->cycle_interval; #else offset = (clock->read(clock) - clock->cycle_last) & clock->mask; #endif + /* Check if there's really nothing to do */ + if (offset < tk->cycle_interval) + goto out; + /* * With NO_HZ we may have to accumulate many cycle_intervals * (think "ticks") worth of time at once. To do this efficiently, @@ -1111,19 +1167,19 @@ static void update_wall_time(void) * chunk in one go, and then try to consume the next smaller * doubled multiple. */ - shift = ilog2(offset) - ilog2(timekeeper.cycle_interval); + shift = ilog2(offset) - ilog2(tk->cycle_interval); shift = max(0, shift); /* Bound shift to one less than what overflows tick_length */ maxshift = (64 - (ilog2(ntp_tick_length())+1)) - 1; shift = min(shift, maxshift); - while (offset >= timekeeper.cycle_interval) { - offset = logarithmic_accumulation(&timekeeper, offset, shift); - if(offset < timekeeper.cycle_interval<= tk->cycle_interval) { + offset = logarithmic_accumulation(tk, offset, shift); + if (offset < tk->cycle_interval<xtime_nsec & ((1ULL << tk->shift) - 1); + tk->xtime_nsec -= remainder; + tk->xtime_nsec += 1ULL << tk->shift; + tk->ntp_error += remainder << tk->ntp_error_shift; /* * Finally, make sure that after the rounding * xtime_nsec isn't larger than NSEC_PER_SEC */ - accumulate_nsecs_to_secs(&timekeeper); + accumulate_nsecs_to_secs(tk); - timekeeping_update(&timekeeper, false); + timekeeping_update(tk, false); out: - write_sequnlock_irqrestore(&timekeeper.lock, flags); + write_sequnlock_irqrestore(&tk->lock, flags); } @@ -1166,18 +1222,18 @@ static void update_wall_time(void) */ void getboottime(struct timespec *ts) { + struct timekeeper *tk = &timekeeper; struct timespec boottime = { - .tv_sec = timekeeper.wall_to_monotonic.tv_sec + - timekeeper.total_sleep_time.tv_sec, - .tv_nsec = timekeeper.wall_to_monotonic.tv_nsec + - timekeeper.total_sleep_time.tv_nsec + .tv_sec = tk->wall_to_monotonic.tv_sec + + tk->total_sleep_time.tv_sec, + .tv_nsec = tk->wall_to_monotonic.tv_nsec + + tk->total_sleep_time.tv_nsec }; set_normalized_timespec(ts, -boottime.tv_sec, -boottime.tv_nsec); } EXPORT_SYMBOL_GPL(getboottime); - /** * get_monotonic_boottime - Returns monotonic time since boot * @ts: pointer to the timespec to be set @@ -1189,22 +1245,25 @@ EXPORT_SYMBOL_GPL(getboottime); */ void get_monotonic_boottime(struct timespec *ts) { + struct timekeeper *tk = &timekeeper; struct timespec tomono, sleep; + s64 nsec; unsigned int seq; WARN_ON(timekeeping_suspended); do { - seq = read_seqbegin(&timekeeper.lock); - ts->tv_sec = timekeeper.xtime_sec; - ts->tv_nsec = timekeeping_get_ns(&timekeeper); - tomono = timekeeper.wall_to_monotonic; - sleep = timekeeper.total_sleep_time; + seq = read_seqbegin(&tk->lock); + ts->tv_sec = tk->xtime_sec; + nsec = timekeeping_get_ns(tk); + tomono = tk->wall_to_monotonic; + sleep = tk->total_sleep_time; - } while (read_seqretry(&timekeeper.lock, seq)); + } while (read_seqretry(&tk->lock, seq)); - set_normalized_timespec(ts, ts->tv_sec + tomono.tv_sec + sleep.tv_sec, - ts->tv_nsec + tomono.tv_nsec + sleep.tv_nsec); + ts->tv_sec += tomono.tv_sec + sleep.tv_sec; + ts->tv_nsec = 0; + timespec_add_ns(ts, nsec + tomono.tv_nsec + sleep.tv_nsec); } EXPORT_SYMBOL_GPL(get_monotonic_boottime); @@ -1231,31 +1290,38 @@ EXPORT_SYMBOL_GPL(ktime_get_boottime); */ void monotonic_to_bootbased(struct timespec *ts) { - *ts = timespec_add(*ts, timekeeper.total_sleep_time); + struct timekeeper *tk = &timekeeper; + + *ts = timespec_add(*ts, tk->total_sleep_time); } EXPORT_SYMBOL_GPL(monotonic_to_bootbased); unsigned long get_seconds(void) { - return timekeeper.xtime_sec; + struct timekeeper *tk = &timekeeper; + + return tk->xtime_sec; } EXPORT_SYMBOL(get_seconds); struct timespec __current_kernel_time(void) { - return tk_xtime(&timekeeper); + struct timekeeper *tk = &timekeeper; + + return tk_xtime(tk); } struct timespec current_kernel_time(void) { + struct timekeeper *tk = &timekeeper; struct timespec now; unsigned long seq; do { - seq = read_seqbegin(&timekeeper.lock); + seq = read_seqbegin(&tk->lock); - now = tk_xtime(&timekeeper); - } while (read_seqretry(&timekeeper.lock, seq)); + now = tk_xtime(tk); + } while (read_seqretry(&tk->lock, seq)); return now; } @@ -1263,15 +1329,16 @@ EXPORT_SYMBOL(current_kernel_time); struct timespec get_monotonic_coarse(void) { + struct timekeeper *tk = &timekeeper; struct timespec now, mono; unsigned long seq; do { - seq = read_seqbegin(&timekeeper.lock); + seq = read_seqbegin(&tk->lock); - now = tk_xtime(&timekeeper); - mono = timekeeper.wall_to_monotonic; - } while (read_seqretry(&timekeeper.lock, seq)); + now = tk_xtime(tk); + mono = tk->wall_to_monotonic; + } while (read_seqretry(&tk->lock, seq)); set_normalized_timespec(&now, now.tv_sec + mono.tv_sec, now.tv_nsec + mono.tv_nsec); @@ -1300,14 +1367,15 @@ void do_timer(unsigned long ticks) void get_xtime_and_monotonic_and_sleep_offset(struct timespec *xtim, struct timespec *wtom, struct timespec *sleep) { + struct timekeeper *tk = &timekeeper; unsigned long seq; do { - seq = read_seqbegin(&timekeeper.lock); - *xtim = tk_xtime(&timekeeper); - *wtom = timekeeper.wall_to_monotonic; - *sleep = timekeeper.total_sleep_time; - } while (read_seqretry(&timekeeper.lock, seq)); + seq = read_seqbegin(&tk->lock); + *xtim = tk_xtime(tk); + *wtom = tk->wall_to_monotonic; + *sleep = tk->total_sleep_time; + } while (read_seqretry(&tk->lock, seq)); } #ifdef CONFIG_HIGH_RES_TIMERS @@ -1321,19 +1389,20 @@ void get_xtime_and_monotonic_and_sleep_offset(struct timespec *xtim, */ ktime_t ktime_get_update_offsets(ktime_t *offs_real, ktime_t *offs_boot) { + struct timekeeper *tk = &timekeeper; ktime_t now; unsigned int seq; u64 secs, nsecs; do { - seq = read_seqbegin(&timekeeper.lock); + seq = read_seqbegin(&tk->lock); - secs = timekeeper.xtime_sec; - nsecs = timekeeping_get_ns(&timekeeper); + secs = tk->xtime_sec; + nsecs = timekeeping_get_ns(tk); - *offs_real = timekeeper.offs_real; - *offs_boot = timekeeper.offs_boot; - } while (read_seqretry(&timekeeper.lock, seq)); + *offs_real = tk->offs_real; + *offs_boot = tk->offs_boot; + } while (read_seqretry(&tk->lock, seq)); now = ktime_add_ns(ktime_set(secs, 0), nsecs); now = ktime_sub(now, *offs_real); @@ -1346,19 +1415,19 @@ ktime_t ktime_get_update_offsets(ktime_t *offs_real, ktime_t *offs_boot) */ ktime_t ktime_get_monotonic_offset(void) { + struct timekeeper *tk = &timekeeper; unsigned long seq; struct timespec wtom; do { - seq = read_seqbegin(&timekeeper.lock); - wtom = timekeeper.wall_to_monotonic; - } while (read_seqretry(&timekeeper.lock, seq)); + seq = read_seqbegin(&tk->lock); + wtom = tk->wall_to_monotonic; + } while (read_seqretry(&tk->lock, seq)); return timespec_to_ktime(wtom); } EXPORT_SYMBOL_GPL(ktime_get_monotonic_offset); - /** * xtime_update() - advances the timekeeping infrastructure * @ticks: number of ticks, that have elapsed since the last call. diff --git a/trunk/kernel/timer.c b/trunk/kernel/timer.c index a61c09374eba..d5de1b2292aa 100644 --- a/trunk/kernel/timer.c +++ b/trunk/kernel/timer.c @@ -92,24 +92,25 @@ static DEFINE_PER_CPU(struct tvec_base *, tvec_bases) = &boot_tvec_bases; /* Functions below help us manage 'deferrable' flag */ static inline unsigned int tbase_get_deferrable(struct tvec_base *base) { - return ((unsigned int)(unsigned long)base & TBASE_DEFERRABLE_FLAG); + return ((unsigned int)(unsigned long)base & TIMER_DEFERRABLE); } -static inline struct tvec_base *tbase_get_base(struct tvec_base *base) +static inline unsigned int tbase_get_irqsafe(struct tvec_base *base) { - return ((struct tvec_base *)((unsigned long)base & ~TBASE_DEFERRABLE_FLAG)); + return ((unsigned int)(unsigned long)base & TIMER_IRQSAFE); } -static inline void timer_set_deferrable(struct timer_list *timer) +static inline struct tvec_base *tbase_get_base(struct tvec_base *base) { - timer->base = TBASE_MAKE_DEFERRED(timer->base); + return ((struct tvec_base *)((unsigned long)base & ~TIMER_FLAG_MASK)); } static inline void timer_set_base(struct timer_list *timer, struct tvec_base *new_base) { - timer->base = (struct tvec_base *)((unsigned long)(new_base) | - tbase_get_deferrable(timer->base)); + unsigned long flags = (unsigned long)timer->base & TIMER_FLAG_MASK; + + timer->base = (struct tvec_base *)((unsigned long)(new_base) | flags); } static unsigned long round_jiffies_common(unsigned long j, int cpu, @@ -563,16 +564,14 @@ static inline void debug_timer_assert_init(struct timer_list *timer) debug_object_assert_init(timer, &timer_debug_descr); } -static void __init_timer(struct timer_list *timer, - const char *name, - struct lock_class_key *key); +static void do_init_timer(struct timer_list *timer, unsigned int flags, + const char *name, struct lock_class_key *key); -void init_timer_on_stack_key(struct timer_list *timer, - const char *name, - struct lock_class_key *key) +void init_timer_on_stack_key(struct timer_list *timer, unsigned int flags, + const char *name, struct lock_class_key *key) { debug_object_init_on_stack(timer, &timer_debug_descr); - __init_timer(timer, name, key); + do_init_timer(timer, flags, name, key); } EXPORT_SYMBOL_GPL(init_timer_on_stack_key); @@ -613,12 +612,13 @@ static inline void debug_assert_init(struct timer_list *timer) debug_timer_assert_init(timer); } -static void __init_timer(struct timer_list *timer, - const char *name, - struct lock_class_key *key) +static void do_init_timer(struct timer_list *timer, unsigned int flags, + const char *name, struct lock_class_key *key) { + struct tvec_base *base = __raw_get_cpu_var(tvec_bases); + timer->entry.next = NULL; - timer->base = __raw_get_cpu_var(tvec_bases); + timer->base = (void *)((unsigned long)base | flags); timer->slack = -1; #ifdef CONFIG_TIMER_STATS timer->start_site = NULL; @@ -628,22 +628,10 @@ static void __init_timer(struct timer_list *timer, lockdep_init_map(&timer->lockdep_map, name, key, 0); } -void setup_deferrable_timer_on_stack_key(struct timer_list *timer, - const char *name, - struct lock_class_key *key, - void (*function)(unsigned long), - unsigned long data) -{ - timer->function = function; - timer->data = data; - init_timer_on_stack_key(timer, name, key); - timer_set_deferrable(timer); -} -EXPORT_SYMBOL_GPL(setup_deferrable_timer_on_stack_key); - /** * init_timer_key - initialize a timer * @timer: the timer to be initialized + * @flags: timer flags * @name: name of the timer * @key: lockdep class key of the fake lock used for tracking timer * sync lock dependencies @@ -651,24 +639,14 @@ EXPORT_SYMBOL_GPL(setup_deferrable_timer_on_stack_key); * init_timer_key() must be done to a timer prior calling *any* of the * other timer functions. */ -void init_timer_key(struct timer_list *timer, - const char *name, - struct lock_class_key *key) +void init_timer_key(struct timer_list *timer, unsigned int flags, + const char *name, struct lock_class_key *key) { debug_init(timer); - __init_timer(timer, name, key); + do_init_timer(timer, flags, name, key); } EXPORT_SYMBOL(init_timer_key); -void init_timer_deferrable_key(struct timer_list *timer, - const char *name, - struct lock_class_key *key) -{ - init_timer_key(timer, name, key); - timer_set_deferrable(timer); -} -EXPORT_SYMBOL(init_timer_deferrable_key); - static inline void detach_timer(struct timer_list *timer, bool clear_pending) { struct list_head *entry = &timer->entry; @@ -686,7 +664,7 @@ detach_expired_timer(struct timer_list *timer, struct tvec_base *base) { detach_timer(timer, true); if (!tbase_get_deferrable(timer->base)) - timer->base->active_timers--; + base->active_timers--; } static int detach_if_pending(struct timer_list *timer, struct tvec_base *base, @@ -697,7 +675,7 @@ static int detach_if_pending(struct timer_list *timer, struct tvec_base *base, detach_timer(timer, clear_pending); if (!tbase_get_deferrable(timer->base)) { - timer->base->active_timers--; + base->active_timers--; if (timer->expires == base->next_timer) base->next_timer = base->timer_jiffies; } @@ -1029,14 +1007,14 @@ EXPORT_SYMBOL(try_to_del_timer_sync); * * Synchronization rules: Callers must prevent restarting of the timer, * otherwise this function is meaningless. It must not be called from - * interrupt contexts. The caller must not hold locks which would prevent - * completion of the timer's handler. The timer's handler must not call - * add_timer_on(). Upon exit the timer is not queued and the handler is - * not running on any CPU. + * interrupt contexts unless the timer is an irqsafe one. The caller must + * not hold locks which would prevent completion of the timer's + * handler. The timer's handler must not call add_timer_on(). Upon exit the + * timer is not queued and the handler is not running on any CPU. * - * Note: You must not hold locks that are held in interrupt context - * while calling this function. Even if the lock has nothing to do - * with the timer in question. Here's why: + * Note: For !irqsafe timers, you must not hold locks that are held in + * interrupt context while calling this function. Even if the lock has + * nothing to do with the timer in question. Here's why: * * CPU0 CPU1 * ---- ---- @@ -1073,7 +1051,7 @@ int del_timer_sync(struct timer_list *timer) * don't use it in hardirq context, because it * could lead to deadlock. */ - WARN_ON(in_irq()); + WARN_ON(in_irq() && !tbase_get_irqsafe(timer->base)); for (;;) { int ret = try_to_del_timer_sync(timer); if (ret >= 0) @@ -1180,19 +1158,27 @@ static inline void __run_timers(struct tvec_base *base) while (!list_empty(head)) { void (*fn)(unsigned long); unsigned long data; + bool irqsafe; timer = list_first_entry(head, struct timer_list,entry); fn = timer->function; data = timer->data; + irqsafe = tbase_get_irqsafe(timer->base); timer_stats_account_timer(timer); base->running_timer = timer; detach_expired_timer(timer, base); - spin_unlock_irq(&base->lock); - call_timer_fn(timer, fn, data); - spin_lock_irq(&base->lock); + if (irqsafe) { + spin_unlock(&base->lock); + call_timer_fn(timer, fn, data); + spin_lock(&base->lock); + } else { + spin_unlock_irq(&base->lock); + call_timer_fn(timer, fn, data); + spin_lock_irq(&base->lock); + } } } base->running_timer = NULL; @@ -1407,13 +1393,6 @@ SYSCALL_DEFINE1(alarm, unsigned int, seconds) #endif -#ifndef __alpha__ - -/* - * The Alpha uses getxpid, getxuid, and getxgid instead. Maybe this - * should be moved into arch/i386 instead? - */ - /** * sys_getpid - return the thread group id of the current process * @@ -1469,8 +1448,6 @@ SYSCALL_DEFINE0(getegid) return from_kgid_munged(current_user_ns(), current_egid()); } -#endif - static void process_timeout(unsigned long __data) { wake_up_process((struct task_struct *)__data); @@ -1800,9 +1777,13 @@ static struct notifier_block __cpuinitdata timers_nb = { void __init init_timers(void) { - int err = timer_cpu_notify(&timers_nb, (unsigned long)CPU_UP_PREPARE, - (void *)(long)smp_processor_id()); + int err; + + /* ensure there are enough low bits for flags in timer->base pointer */ + BUILD_BUG_ON(__alignof__(struct tvec_base) & TIMER_FLAG_MASK); + err = timer_cpu_notify(&timers_nb, (unsigned long)CPU_UP_PREPARE, + (void *)(long)smp_processor_id()); init_timer_stats(); BUG_ON(err != NOTIFY_OK); diff --git a/trunk/kernel/trace/Kconfig b/trunk/kernel/trace/Kconfig index 8c4c07071cc5..4cea4f41c1d9 100644 --- a/trunk/kernel/trace/Kconfig +++ b/trunk/kernel/trace/Kconfig @@ -49,6 +49,11 @@ config HAVE_SYSCALL_TRACEPOINTS help See Documentation/trace/ftrace-design.txt +config HAVE_FENTRY + bool + help + Arch supports the gcc options -pg with -mfentry + config HAVE_C_RECORDMCOUNT bool help @@ -57,8 +62,12 @@ config HAVE_C_RECORDMCOUNT config TRACER_MAX_TRACE bool +config TRACE_CLOCK + bool + config RING_BUFFER bool + select TRACE_CLOCK config FTRACE_NMI_ENTER bool @@ -109,6 +118,7 @@ config TRACING select NOP_TRACER select BINARY_PRINTF select EVENT_TRACING + select TRACE_CLOCK config GENERIC_TRACER bool diff --git a/trunk/kernel/trace/Makefile b/trunk/kernel/trace/Makefile index b831087c8200..d7e2068e4b71 100644 --- a/trunk/kernel/trace/Makefile +++ b/trunk/kernel/trace/Makefile @@ -5,10 +5,12 @@ ifdef CONFIG_FUNCTION_TRACER ORIG_CFLAGS := $(KBUILD_CFLAGS) KBUILD_CFLAGS = $(subst -pg,,$(ORIG_CFLAGS)) +ifdef CONFIG_FTRACE_SELFTEST # selftest needs instrumentation CFLAGS_trace_selftest_dynamic.o = -pg obj-y += trace_selftest_dynamic.o endif +endif # If unlikely tracing is enabled, do not trace these files ifdef CONFIG_TRACING_BRANCHES @@ -17,11 +19,7 @@ endif CFLAGS_trace_events_filter.o := -I$(src) -# -# Make the trace clocks available generally: it's infrastructure -# relied on by ptrace for example: -# -obj-y += trace_clock.o +obj-$(CONFIG_TRACE_CLOCK) += trace_clock.o obj-$(CONFIG_FUNCTION_TRACER) += libftrace.o obj-$(CONFIG_RING_BUFFER) += ring_buffer.o diff --git a/trunk/kernel/trace/ftrace.c b/trunk/kernel/trace/ftrace.c index b4f20fba09fc..9dcf15d38380 100644 --- a/trunk/kernel/trace/ftrace.c +++ b/trunk/kernel/trace/ftrace.c @@ -64,12 +64,20 @@ #define FL_GLOBAL_CONTROL_MASK (FTRACE_OPS_FL_GLOBAL | FTRACE_OPS_FL_CONTROL) +static struct ftrace_ops ftrace_list_end __read_mostly = { + .func = ftrace_stub, + .flags = FTRACE_OPS_FL_RECURSION_SAFE, +}; + /* ftrace_enabled is a method to turn ftrace on or off */ int ftrace_enabled __read_mostly; static int last_ftrace_enabled; /* Quick disabling of function tracer. */ -int function_trace_stop; +int function_trace_stop __read_mostly; + +/* Current function tracing op */ +struct ftrace_ops *function_trace_op __read_mostly = &ftrace_list_end; /* List for set_ftrace_pid's pids. */ LIST_HEAD(ftrace_pids); @@ -86,22 +94,43 @@ static int ftrace_disabled __read_mostly; static DEFINE_MUTEX(ftrace_lock); -static struct ftrace_ops ftrace_list_end __read_mostly = { - .func = ftrace_stub, -}; - static struct ftrace_ops *ftrace_global_list __read_mostly = &ftrace_list_end; static struct ftrace_ops *ftrace_control_list __read_mostly = &ftrace_list_end; static struct ftrace_ops *ftrace_ops_list __read_mostly = &ftrace_list_end; ftrace_func_t ftrace_trace_function __read_mostly = ftrace_stub; -static ftrace_func_t __ftrace_trace_function_delay __read_mostly = ftrace_stub; -ftrace_func_t __ftrace_trace_function __read_mostly = ftrace_stub; ftrace_func_t ftrace_pid_function __read_mostly = ftrace_stub; static struct ftrace_ops global_ops; static struct ftrace_ops control_ops; -static void -ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip); +#if ARCH_SUPPORTS_FTRACE_OPS +static void ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip, + struct ftrace_ops *op, struct pt_regs *regs); +#else +/* See comment below, where ftrace_ops_list_func is defined */ +static void ftrace_ops_no_ops(unsigned long ip, unsigned long parent_ip); +#define ftrace_ops_list_func ((ftrace_func_t)ftrace_ops_no_ops) +#endif + +/** + * ftrace_nr_registered_ops - return number of ops registered + * + * Returns the number of ftrace_ops registered and tracing functions + */ +int ftrace_nr_registered_ops(void) +{ + struct ftrace_ops *ops; + int cnt = 0; + + mutex_lock(&ftrace_lock); + + for (ops = ftrace_ops_list; + ops != &ftrace_list_end; ops = ops->next) + cnt++; + + mutex_unlock(&ftrace_lock); + + return cnt; +} /* * Traverse the ftrace_global_list, invoking all entries. The reason that we @@ -112,29 +141,29 @@ ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip); * * Silly Alpha and silly pointer-speculation compiler optimizations! */ -static void ftrace_global_list_func(unsigned long ip, - unsigned long parent_ip) +static void +ftrace_global_list_func(unsigned long ip, unsigned long parent_ip, + struct ftrace_ops *op, struct pt_regs *regs) { - struct ftrace_ops *op; - if (unlikely(trace_recursion_test(TRACE_GLOBAL_BIT))) return; trace_recursion_set(TRACE_GLOBAL_BIT); op = rcu_dereference_raw(ftrace_global_list); /*see above*/ while (op != &ftrace_list_end) { - op->func(ip, parent_ip); + op->func(ip, parent_ip, op, regs); op = rcu_dereference_raw(op->next); /*see above*/ }; trace_recursion_clear(TRACE_GLOBAL_BIT); } -static void ftrace_pid_func(unsigned long ip, unsigned long parent_ip) +static void ftrace_pid_func(unsigned long ip, unsigned long parent_ip, + struct ftrace_ops *op, struct pt_regs *regs) { if (!test_tsk_trace_trace(current)) return; - ftrace_pid_function(ip, parent_ip); + ftrace_pid_function(ip, parent_ip, op, regs); } static void set_ftrace_pid_function(ftrace_func_t func) @@ -153,25 +182,9 @@ static void set_ftrace_pid_function(ftrace_func_t func) void clear_ftrace_function(void) { ftrace_trace_function = ftrace_stub; - __ftrace_trace_function = ftrace_stub; - __ftrace_trace_function_delay = ftrace_stub; ftrace_pid_function = ftrace_stub; } -#ifndef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST -/* - * For those archs that do not test ftrace_trace_stop in their - * mcount call site, we need to do it from C. - */ -static void ftrace_test_stop_func(unsigned long ip, unsigned long parent_ip) -{ - if (function_trace_stop) - return; - - __ftrace_trace_function(ip, parent_ip); -} -#endif - static void control_ops_disable_all(struct ftrace_ops *ops) { int cpu; @@ -230,28 +243,27 @@ static void update_ftrace_function(void) /* * If we are at the end of the list and this ops is - * not dynamic, then have the mcount trampoline call - * the function directly + * recursion safe and not dynamic and the arch supports passing ops, + * then have the mcount trampoline call the function directly. */ if (ftrace_ops_list == &ftrace_list_end || (ftrace_ops_list->next == &ftrace_list_end && - !(ftrace_ops_list->flags & FTRACE_OPS_FL_DYNAMIC))) + !(ftrace_ops_list->flags & FTRACE_OPS_FL_DYNAMIC) && + (ftrace_ops_list->flags & FTRACE_OPS_FL_RECURSION_SAFE) && + !FTRACE_FORCE_LIST_FUNC)) { + /* Set the ftrace_ops that the arch callback uses */ + if (ftrace_ops_list == &global_ops) + function_trace_op = ftrace_global_list; + else + function_trace_op = ftrace_ops_list; func = ftrace_ops_list->func; - else + } else { + /* Just use the default ftrace_ops */ + function_trace_op = &ftrace_list_end; func = ftrace_ops_list_func; + } -#ifdef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST ftrace_trace_function = func; -#else -#ifdef CONFIG_DYNAMIC_FTRACE - /* do not update till all functions have been modified */ - __ftrace_trace_function_delay = func; -#else - __ftrace_trace_function = func; -#endif - ftrace_trace_function = - (func == ftrace_stub) ? func : ftrace_test_stop_func; -#endif } static void add_ftrace_ops(struct ftrace_ops **list, struct ftrace_ops *ops) @@ -325,6 +337,20 @@ static int __register_ftrace_function(struct ftrace_ops *ops) if ((ops->flags & FL_GLOBAL_CONTROL_MASK) == FL_GLOBAL_CONTROL_MASK) return -EINVAL; +#ifndef ARCH_SUPPORTS_FTRACE_SAVE_REGS + /* + * If the ftrace_ops specifies SAVE_REGS, then it only can be used + * if the arch supports it, or SAVE_REGS_IF_SUPPORTED is also set. + * Setting SAVE_REGS_IF_SUPPORTED makes SAVE_REGS irrelevant. + */ + if (ops->flags & FTRACE_OPS_FL_SAVE_REGS && + !(ops->flags & FTRACE_OPS_FL_SAVE_REGS_IF_SUPPORTED)) + return -EINVAL; + + if (ops->flags & FTRACE_OPS_FL_SAVE_REGS_IF_SUPPORTED) + ops->flags |= FTRACE_OPS_FL_SAVE_REGS; +#endif + if (!core_kernel_data((unsigned long)ops)) ops->flags |= FTRACE_OPS_FL_DYNAMIC; @@ -773,7 +799,8 @@ ftrace_profile_alloc(struct ftrace_profile_stat *stat, unsigned long ip) } static void -function_profile_call(unsigned long ip, unsigned long parent_ip) +function_profile_call(unsigned long ip, unsigned long parent_ip, + struct ftrace_ops *ops, struct pt_regs *regs) { struct ftrace_profile_stat *stat; struct ftrace_profile *rec; @@ -803,7 +830,7 @@ function_profile_call(unsigned long ip, unsigned long parent_ip) #ifdef CONFIG_FUNCTION_GRAPH_TRACER static int profile_graph_entry(struct ftrace_graph_ent *trace) { - function_profile_call(trace->func, 0); + function_profile_call(trace->func, 0, NULL, NULL); return 1; } @@ -863,6 +890,7 @@ static void unregister_ftrace_profiler(void) #else static struct ftrace_ops ftrace_profile_ops __read_mostly = { .func = function_profile_call, + .flags = FTRACE_OPS_FL_RECURSION_SAFE, }; static int register_ftrace_profiler(void) @@ -1045,6 +1073,7 @@ static struct ftrace_ops global_ops = { .func = ftrace_stub, .notrace_hash = EMPTY_HASH, .filter_hash = EMPTY_HASH, + .flags = FTRACE_OPS_FL_RECURSION_SAFE, }; static DEFINE_MUTEX(ftrace_regex_lock); @@ -1525,6 +1554,12 @@ static void __ftrace_hash_rec_update(struct ftrace_ops *ops, rec->flags++; if (FTRACE_WARN_ON((rec->flags & ~FTRACE_FL_MASK) == FTRACE_REF_MAX)) return; + /* + * If any ops wants regs saved for this function + * then all ops will get saved regs. + */ + if (ops->flags & FTRACE_OPS_FL_SAVE_REGS) + rec->flags |= FTRACE_FL_REGS; } else { if (FTRACE_WARN_ON((rec->flags & ~FTRACE_FL_MASK) == 0)) return; @@ -1616,18 +1651,59 @@ static int ftrace_check_record(struct dyn_ftrace *rec, int enable, int update) if (enable && (rec->flags & ~FTRACE_FL_MASK)) flag = FTRACE_FL_ENABLED; + /* + * If enabling and the REGS flag does not match the REGS_EN, then + * do not ignore this record. Set flags to fail the compare against + * ENABLED. + */ + if (flag && + (!(rec->flags & FTRACE_FL_REGS) != !(rec->flags & FTRACE_FL_REGS_EN))) + flag |= FTRACE_FL_REGS; + /* If the state of this record hasn't changed, then do nothing */ if ((rec->flags & FTRACE_FL_ENABLED) == flag) return FTRACE_UPDATE_IGNORE; if (flag) { - if (update) + /* Save off if rec is being enabled (for return value) */ + flag ^= rec->flags & FTRACE_FL_ENABLED; + + if (update) { rec->flags |= FTRACE_FL_ENABLED; - return FTRACE_UPDATE_MAKE_CALL; + if (flag & FTRACE_FL_REGS) { + if (rec->flags & FTRACE_FL_REGS) + rec->flags |= FTRACE_FL_REGS_EN; + else + rec->flags &= ~FTRACE_FL_REGS_EN; + } + } + + /* + * If this record is being updated from a nop, then + * return UPDATE_MAKE_CALL. + * Otherwise, if the EN flag is set, then return + * UPDATE_MODIFY_CALL_REGS to tell the caller to convert + * from the non-save regs, to a save regs function. + * Otherwise, + * return UPDATE_MODIFY_CALL to tell the caller to convert + * from the save regs, to a non-save regs function. + */ + if (flag & FTRACE_FL_ENABLED) + return FTRACE_UPDATE_MAKE_CALL; + else if (rec->flags & FTRACE_FL_REGS_EN) + return FTRACE_UPDATE_MODIFY_CALL_REGS; + else + return FTRACE_UPDATE_MODIFY_CALL; } - if (update) - rec->flags &= ~FTRACE_FL_ENABLED; + if (update) { + /* If there's no more users, clear all flags */ + if (!(rec->flags & ~FTRACE_FL_MASK)) + rec->flags = 0; + else + /* Just disable the record (keep REGS state) */ + rec->flags &= ~FTRACE_FL_ENABLED; + } return FTRACE_UPDATE_MAKE_NOP; } @@ -1662,13 +1738,17 @@ int ftrace_test_record(struct dyn_ftrace *rec, int enable) static int __ftrace_replace_code(struct dyn_ftrace *rec, int enable) { + unsigned long ftrace_old_addr; unsigned long ftrace_addr; int ret; - ftrace_addr = (unsigned long)FTRACE_ADDR; - ret = ftrace_update_record(rec, enable); + if (rec->flags & FTRACE_FL_REGS) + ftrace_addr = (unsigned long)FTRACE_REGS_ADDR; + else + ftrace_addr = (unsigned long)FTRACE_ADDR; + switch (ret) { case FTRACE_UPDATE_IGNORE: return 0; @@ -1678,6 +1758,15 @@ __ftrace_replace_code(struct dyn_ftrace *rec, int enable) case FTRACE_UPDATE_MAKE_NOP: return ftrace_make_nop(NULL, rec, ftrace_addr); + + case FTRACE_UPDATE_MODIFY_CALL_REGS: + case FTRACE_UPDATE_MODIFY_CALL: + if (rec->flags & FTRACE_FL_REGS) + ftrace_old_addr = (unsigned long)FTRACE_ADDR; + else + ftrace_old_addr = (unsigned long)FTRACE_REGS_ADDR; + + return ftrace_modify_call(rec, ftrace_old_addr, ftrace_addr); } return -1; /* unknow ftrace bug */ @@ -1882,16 +1971,6 @@ static void ftrace_run_update_code(int command) */ arch_ftrace_update_code(command); -#ifndef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST - /* - * For archs that call ftrace_test_stop_func(), we must - * wait till after we update all the function callers - * before we update the callback. This keeps different - * ops that record different functions from corrupting - * each other. - */ - __ftrace_trace_function = __ftrace_trace_function_delay; -#endif function_trace_stop--; ret = ftrace_arch_code_modify_post_process(); @@ -2441,8 +2520,9 @@ static int t_show(struct seq_file *m, void *v) seq_printf(m, "%ps", (void *)rec->ip); if (iter->flags & FTRACE_ITER_ENABLED) - seq_printf(m, " (%ld)", - rec->flags & ~FTRACE_FL_MASK); + seq_printf(m, " (%ld)%s", + rec->flags & ~FTRACE_FL_MASK, + rec->flags & FTRACE_FL_REGS ? " R" : ""); seq_printf(m, "\n"); return 0; @@ -2790,8 +2870,8 @@ static int __init ftrace_mod_cmd_init(void) } device_initcall(ftrace_mod_cmd_init); -static void -function_trace_probe_call(unsigned long ip, unsigned long parent_ip) +static void function_trace_probe_call(unsigned long ip, unsigned long parent_ip, + struct ftrace_ops *op, struct pt_regs *pt_regs) { struct ftrace_func_probe *entry; struct hlist_head *hhd; @@ -3162,8 +3242,27 @@ ftrace_notrace_write(struct file *file, const char __user *ubuf, } static int -ftrace_set_regex(struct ftrace_ops *ops, unsigned char *buf, int len, - int reset, int enable) +ftrace_match_addr(struct ftrace_hash *hash, unsigned long ip, int remove) +{ + struct ftrace_func_entry *entry; + + if (!ftrace_location(ip)) + return -EINVAL; + + if (remove) { + entry = ftrace_lookup_ip(hash, ip); + if (!entry) + return -ENOENT; + free_hash_entry(hash, entry); + return 0; + } + + return add_hash_entry(hash, ip); +} + +static int +ftrace_set_hash(struct ftrace_ops *ops, unsigned char *buf, int len, + unsigned long ip, int remove, int reset, int enable) { struct ftrace_hash **orig_hash; struct ftrace_hash *hash; @@ -3192,6 +3291,11 @@ ftrace_set_regex(struct ftrace_ops *ops, unsigned char *buf, int len, ret = -EINVAL; goto out_regex_unlock; } + if (ip) { + ret = ftrace_match_addr(hash, ip, remove); + if (ret < 0) + goto out_regex_unlock; + } mutex_lock(&ftrace_lock); ret = ftrace_hash_move(ops, enable, orig_hash, hash); @@ -3208,6 +3312,37 @@ ftrace_set_regex(struct ftrace_ops *ops, unsigned char *buf, int len, return ret; } +static int +ftrace_set_addr(struct ftrace_ops *ops, unsigned long ip, int remove, + int reset, int enable) +{ + return ftrace_set_hash(ops, 0, 0, ip, remove, reset, enable); +} + +/** + * ftrace_set_filter_ip - set a function to filter on in ftrace by address + * @ops - the ops to set the filter with + * @ip - the address to add to or remove from the filter. + * @remove - non zero to remove the ip from the filter + * @reset - non zero to reset all filters before applying this filter. + * + * Filters denote which functions should be enabled when tracing is enabled + * If @ip is NULL, it failes to update filter. + */ +int ftrace_set_filter_ip(struct ftrace_ops *ops, unsigned long ip, + int remove, int reset) +{ + return ftrace_set_addr(ops, ip, remove, reset, 1); +} +EXPORT_SYMBOL_GPL(ftrace_set_filter_ip); + +static int +ftrace_set_regex(struct ftrace_ops *ops, unsigned char *buf, int len, + int reset, int enable) +{ + return ftrace_set_hash(ops, buf, len, 0, 0, reset, enable); +} + /** * ftrace_set_filter - set a function to filter on in ftrace * @ops - the ops to set the filter with @@ -3912,6 +4047,7 @@ void __init ftrace_init(void) static struct ftrace_ops global_ops = { .func = ftrace_stub, + .flags = FTRACE_OPS_FL_RECURSION_SAFE, }; static int __init ftrace_nodyn_init(void) @@ -3942,10 +4078,9 @@ ftrace_ops_test(struct ftrace_ops *ops, unsigned long ip) #endif /* CONFIG_DYNAMIC_FTRACE */ static void -ftrace_ops_control_func(unsigned long ip, unsigned long parent_ip) +ftrace_ops_control_func(unsigned long ip, unsigned long parent_ip, + struct ftrace_ops *op, struct pt_regs *regs) { - struct ftrace_ops *op; - if (unlikely(trace_recursion_test(TRACE_CONTROL_BIT))) return; @@ -3959,7 +4094,7 @@ ftrace_ops_control_func(unsigned long ip, unsigned long parent_ip) while (op != &ftrace_list_end) { if (!ftrace_function_local_disabled(op) && ftrace_ops_test(op, ip)) - op->func(ip, parent_ip); + op->func(ip, parent_ip, op, regs); op = rcu_dereference_raw(op->next); }; @@ -3969,13 +4104,18 @@ ftrace_ops_control_func(unsigned long ip, unsigned long parent_ip) static struct ftrace_ops control_ops = { .func = ftrace_ops_control_func, + .flags = FTRACE_OPS_FL_RECURSION_SAFE, }; -static void -ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip) +static inline void +__ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip, + struct ftrace_ops *ignored, struct pt_regs *regs) { struct ftrace_ops *op; + if (function_trace_stop) + return; + if (unlikely(trace_recursion_test(TRACE_INTERNAL_BIT))) return; @@ -3988,13 +4128,39 @@ ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip) op = rcu_dereference_raw(ftrace_ops_list); while (op != &ftrace_list_end) { if (ftrace_ops_test(op, ip)) - op->func(ip, parent_ip); + op->func(ip, parent_ip, op, regs); op = rcu_dereference_raw(op->next); }; preempt_enable_notrace(); trace_recursion_clear(TRACE_INTERNAL_BIT); } +/* + * Some archs only support passing ip and parent_ip. Even though + * the list function ignores the op parameter, we do not want any + * C side effects, where a function is called without the caller + * sending a third parameter. + * Archs are to support both the regs and ftrace_ops at the same time. + * If they support ftrace_ops, it is assumed they support regs. + * If call backs want to use regs, they must either check for regs + * being NULL, or ARCH_SUPPORTS_FTRACE_SAVE_REGS. + * Note, ARCH_SUPPORT_SAVE_REGS expects a full regs to be saved. + * An architecture can pass partial regs with ftrace_ops and still + * set the ARCH_SUPPORT_FTARCE_OPS. + */ +#if ARCH_SUPPORTS_FTRACE_OPS +static void ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip, + struct ftrace_ops *op, struct pt_regs *regs) +{ + __ftrace_ops_list_func(ip, parent_ip, NULL, regs); +} +#else +static void ftrace_ops_no_ops(unsigned long ip, unsigned long parent_ip) +{ + __ftrace_ops_list_func(ip, parent_ip, NULL, NULL); +} +#endif + static void clear_ftrace_swapper(void) { struct task_struct *p; diff --git a/trunk/kernel/trace/ring_buffer.c b/trunk/kernel/trace/ring_buffer.c index 49491fa7daa2..b32ed0e385a5 100644 --- a/trunk/kernel/trace/ring_buffer.c +++ b/trunk/kernel/trace/ring_buffer.c @@ -2816,7 +2816,7 @@ EXPORT_SYMBOL_GPL(ring_buffer_record_enable); * to the buffer after this will fail and return NULL. * * This is different than ring_buffer_record_disable() as - * it works like an on/off switch, where as the disable() verison + * it works like an on/off switch, where as the disable() version * must be paired with a enable(). */ void ring_buffer_record_off(struct ring_buffer *buffer) @@ -2839,7 +2839,7 @@ EXPORT_SYMBOL_GPL(ring_buffer_record_off); * ring_buffer_record_off(). * * This is different than ring_buffer_record_enable() as - * it works like an on/off switch, where as the enable() verison + * it works like an on/off switch, where as the enable() version * must be paired with a disable(). */ void ring_buffer_record_on(struct ring_buffer *buffer) diff --git a/trunk/kernel/trace/trace.c b/trunk/kernel/trace/trace.c index 5c38c81496ce..1ec5c1dab629 100644 --- a/trunk/kernel/trace/trace.c +++ b/trunk/kernel/trace/trace.c @@ -328,7 +328,7 @@ static DECLARE_WAIT_QUEUE_HEAD(trace_wait); unsigned long trace_flags = TRACE_ITER_PRINT_PARENT | TRACE_ITER_PRINTK | TRACE_ITER_ANNOTATE | TRACE_ITER_CONTEXT_INFO | TRACE_ITER_SLEEP_TIME | TRACE_ITER_GRAPH_TIME | TRACE_ITER_RECORD_CMD | TRACE_ITER_OVERWRITE | - TRACE_ITER_IRQ_INFO; + TRACE_ITER_IRQ_INFO | TRACE_ITER_MARKERS; static int trace_stop_count; static DEFINE_RAW_SPINLOCK(tracing_start_lock); @@ -426,15 +426,15 @@ __setup("trace_buf_size=", set_buf_size); static int __init set_tracing_thresh(char *str) { - unsigned long threshhold; + unsigned long threshold; int ret; if (!str) return 0; - ret = strict_strtoul(str, 0, &threshhold); + ret = strict_strtoul(str, 0, &threshold); if (ret < 0) return 0; - tracing_thresh = threshhold * 1000; + tracing_thresh = threshold * 1000; return 1; } __setup("tracing_thresh=", set_tracing_thresh); @@ -470,6 +470,7 @@ static const char *trace_options[] = { "overwrite", "disable_on_free", "irq-info", + "markers", NULL }; @@ -3886,6 +3887,9 @@ tracing_mark_write(struct file *filp, const char __user *ubuf, if (tracing_disabled) return -EINVAL; + if (!(trace_flags & TRACE_ITER_MARKERS)) + return -EINVAL; + if (cnt > TRACE_BUF_SIZE) cnt = TRACE_BUF_SIZE; diff --git a/trunk/kernel/trace/trace.h b/trunk/kernel/trace/trace.h index 55e1f7f0db12..63a2da0b9a6e 100644 --- a/trunk/kernel/trace/trace.h +++ b/trunk/kernel/trace/trace.h @@ -472,11 +472,11 @@ extern void trace_find_cmdline(int pid, char comm[]); #ifdef CONFIG_DYNAMIC_FTRACE extern unsigned long ftrace_update_tot_cnt; +#endif #define DYN_FTRACE_TEST_NAME trace_selftest_dynamic_test_func extern int DYN_FTRACE_TEST_NAME(void); #define DYN_FTRACE_TEST_NAME2 trace_selftest_dynamic_test_func2 extern int DYN_FTRACE_TEST_NAME2(void); -#endif extern int ring_buffer_expanded; extern bool tracing_selftest_disabled; @@ -680,6 +680,7 @@ enum trace_iterator_flags { TRACE_ITER_OVERWRITE = 0x200000, TRACE_ITER_STOP_ON_FREE = 0x400000, TRACE_ITER_IRQ_INFO = 0x800000, + TRACE_ITER_MARKERS = 0x1000000, }; /* diff --git a/trunk/kernel/trace/trace_event_perf.c b/trunk/kernel/trace/trace_event_perf.c index fee3752ae8f6..84b1e045faba 100644 --- a/trunk/kernel/trace/trace_event_perf.c +++ b/trunk/kernel/trace/trace_event_perf.c @@ -258,7 +258,8 @@ EXPORT_SYMBOL_GPL(perf_trace_buf_prepare); #ifdef CONFIG_FUNCTION_TRACER static void -perf_ftrace_function_call(unsigned long ip, unsigned long parent_ip) +perf_ftrace_function_call(unsigned long ip, unsigned long parent_ip, + struct ftrace_ops *ops, struct pt_regs *pt_regs) { struct ftrace_entry *entry; struct hlist_head *head; @@ -281,7 +282,7 @@ perf_ftrace_function_call(unsigned long ip, unsigned long parent_ip) head = this_cpu_ptr(event_function.perf_events); perf_trace_buf_submit(entry, ENTRY_SIZE, rctx, 0, - 1, ®s, head); + 1, ®s, head, NULL); #undef ENTRY_SIZE } diff --git a/trunk/kernel/trace/trace_events.c b/trunk/kernel/trace/trace_events.c index 29111da1d100..d608d09d08c0 100644 --- a/trunk/kernel/trace/trace_events.c +++ b/trunk/kernel/trace/trace_events.c @@ -1199,6 +1199,31 @@ event_create_dir(struct ftrace_event_call *call, struct dentry *d_events, return 0; } +static void event_remove(struct ftrace_event_call *call) +{ + ftrace_event_enable_disable(call, 0); + if (call->event.funcs) + __unregister_ftrace_event(&call->event); + list_del(&call->list); +} + +static int event_init(struct ftrace_event_call *call) +{ + int ret = 0; + + if (WARN_ON(!call->name)) + return -EINVAL; + + if (call->class->raw_init) { + ret = call->class->raw_init(call); + if (ret < 0 && ret != -ENOSYS) + pr_warn("Could not initialize trace events/%s\n", + call->name); + } + + return ret; +} + static int __trace_add_event_call(struct ftrace_event_call *call, struct module *mod, const struct file_operations *id, @@ -1209,19 +1234,9 @@ __trace_add_event_call(struct ftrace_event_call *call, struct module *mod, struct dentry *d_events; int ret; - /* The linker may leave blanks */ - if (!call->name) - return -EINVAL; - - if (call->class->raw_init) { - ret = call->class->raw_init(call); - if (ret < 0) { - if (ret != -ENOSYS) - pr_warning("Could not initialize trace events/%s\n", - call->name); - return ret; - } - } + ret = event_init(call); + if (ret < 0) + return ret; d_events = event_trace_events_dir(); if (!d_events) @@ -1272,13 +1287,10 @@ static void remove_subsystem_dir(const char *name) */ static void __trace_remove_event_call(struct ftrace_event_call *call) { - ftrace_event_enable_disable(call, 0); - if (call->event.funcs) - __unregister_ftrace_event(&call->event); - debugfs_remove_recursive(call->dir); - list_del(&call->list); + event_remove(call); trace_destroy_fields(call); destroy_preds(call); + debugfs_remove_recursive(call->dir); remove_subsystem_dir(call->class->system); } @@ -1450,15 +1462,43 @@ static __init int setup_trace_event(char *str) } __setup("trace_event=", setup_trace_event); +static __init int event_trace_enable(void) +{ + struct ftrace_event_call **iter, *call; + char *buf = bootup_event_buf; + char *token; + int ret; + + for_each_event(iter, __start_ftrace_events, __stop_ftrace_events) { + + call = *iter; + ret = event_init(call); + if (!ret) + list_add(&call->list, &ftrace_events); + } + + while (true) { + token = strsep(&buf, ","); + + if (!token) + break; + if (!*token) + continue; + + ret = ftrace_set_clr_event(token, 1); + if (ret) + pr_warn("Failed to enable trace event: %s\n", token); + } + return 0; +} + static __init int event_trace_init(void) { - struct ftrace_event_call **call; + struct ftrace_event_call *call; struct dentry *d_tracer; struct dentry *entry; struct dentry *d_events; int ret; - char *buf = bootup_event_buf; - char *token; d_tracer = tracing_init_dentry(); if (!d_tracer) @@ -1497,24 +1537,19 @@ static __init int event_trace_init(void) if (trace_define_common_fields()) pr_warning("tracing: Failed to allocate common fields"); - for_each_event(call, __start_ftrace_events, __stop_ftrace_events) { - __trace_add_event_call(*call, NULL, &ftrace_event_id_fops, + /* + * Early initialization already enabled ftrace event. + * Now it's only necessary to create the event directory. + */ + list_for_each_entry(call, &ftrace_events, list) { + + ret = event_create_dir(call, d_events, + &ftrace_event_id_fops, &ftrace_enable_fops, &ftrace_event_filter_fops, &ftrace_event_format_fops); - } - - while (true) { - token = strsep(&buf, ","); - - if (!token) - break; - if (!*token) - continue; - - ret = ftrace_set_clr_event(token, 1); - if (ret) - pr_warning("Failed to enable trace event: %s\n", token); + if (ret < 0) + event_remove(call); } ret = register_module_notifier(&trace_module_nb); @@ -1523,6 +1558,7 @@ static __init int event_trace_init(void) return 0; } +core_initcall(event_trace_enable); fs_initcall(event_trace_init); #ifdef CONFIG_FTRACE_STARTUP_TEST @@ -1646,9 +1682,11 @@ static __init void event_trace_self_tests(void) event_test_stuff(); ret = __ftrace_set_clr_event(NULL, system->name, NULL, 0); - if (WARN_ON_ONCE(ret)) + if (WARN_ON_ONCE(ret)) { pr_warning("error disabling system %s\n", system->name); + continue; + } pr_cont("OK\n"); } @@ -1681,7 +1719,8 @@ static __init void event_trace_self_tests(void) static DEFINE_PER_CPU(atomic_t, ftrace_test_event_disable); static void -function_test_events_call(unsigned long ip, unsigned long parent_ip) +function_test_events_call(unsigned long ip, unsigned long parent_ip, + struct ftrace_ops *op, struct pt_regs *pt_regs) { struct ring_buffer_event *event; struct ring_buffer *buffer; @@ -1720,6 +1759,7 @@ function_test_events_call(unsigned long ip, unsigned long parent_ip) static struct ftrace_ops trace_ops __initdata = { .func = function_test_events_call, + .flags = FTRACE_OPS_FL_RECURSION_SAFE, }; static __init void event_trace_self_test_with_function(void) diff --git a/trunk/kernel/trace/trace_events_filter.c b/trunk/kernel/trace/trace_events_filter.c index 431dba8b7542..c154797a7ff7 100644 --- a/trunk/kernel/trace/trace_events_filter.c +++ b/trunk/kernel/trace/trace_events_filter.c @@ -2002,7 +2002,7 @@ static int ftrace_function_set_regexp(struct ftrace_ops *ops, int filter, static int __ftrace_function_set_filter(int filter, char *buf, int len, struct function_filter_data *data) { - int i, re_cnt, ret; + int i, re_cnt, ret = -EINVAL; int *reset; char **re; diff --git a/trunk/kernel/trace/trace_functions.c b/trunk/kernel/trace/trace_functions.c index a426f410c060..483162a9f908 100644 --- a/trunk/kernel/trace/trace_functions.c +++ b/trunk/kernel/trace/trace_functions.c @@ -49,7 +49,8 @@ static void function_trace_start(struct trace_array *tr) } static void -function_trace_call_preempt_only(unsigned long ip, unsigned long parent_ip) +function_trace_call_preempt_only(unsigned long ip, unsigned long parent_ip, + struct ftrace_ops *op, struct pt_regs *pt_regs) { struct trace_array *tr = func_trace; struct trace_array_cpu *data; @@ -84,7 +85,9 @@ enum { static struct tracer_flags func_flags; static void -function_trace_call(unsigned long ip, unsigned long parent_ip) +function_trace_call(unsigned long ip, unsigned long parent_ip, + struct ftrace_ops *op, struct pt_regs *pt_regs) + { struct trace_array *tr = func_trace; struct trace_array_cpu *data; @@ -121,7 +124,8 @@ function_trace_call(unsigned long ip, unsigned long parent_ip) } static void -function_stack_trace_call(unsigned long ip, unsigned long parent_ip) +function_stack_trace_call(unsigned long ip, unsigned long parent_ip, + struct ftrace_ops *op, struct pt_regs *pt_regs) { struct trace_array *tr = func_trace; struct trace_array_cpu *data; @@ -164,13 +168,13 @@ function_stack_trace_call(unsigned long ip, unsigned long parent_ip) static struct ftrace_ops trace_ops __read_mostly = { .func = function_trace_call, - .flags = FTRACE_OPS_FL_GLOBAL, + .flags = FTRACE_OPS_FL_GLOBAL | FTRACE_OPS_FL_RECURSION_SAFE, }; static struct ftrace_ops trace_stack_ops __read_mostly = { .func = function_stack_trace_call, - .flags = FTRACE_OPS_FL_GLOBAL, + .flags = FTRACE_OPS_FL_GLOBAL | FTRACE_OPS_FL_RECURSION_SAFE, }; static struct tracer_opt func_opts[] = { diff --git a/trunk/kernel/trace/trace_functions_graph.c b/trunk/kernel/trace/trace_functions_graph.c index ce27c8ba8d31..99b4378393d5 100644 --- a/trunk/kernel/trace/trace_functions_graph.c +++ b/trunk/kernel/trace/trace_functions_graph.c @@ -143,7 +143,7 @@ ftrace_pop_return_trace(struct ftrace_graph_ret *trace, unsigned long *ret, return; } -#ifdef CONFIG_HAVE_FUNCTION_GRAPH_FP_TEST +#if defined(CONFIG_HAVE_FUNCTION_GRAPH_FP_TEST) && !defined(CC_USING_FENTRY) /* * The arch may choose to record the frame pointer used * and check it here to make sure that it is what we expect it @@ -154,6 +154,9 @@ ftrace_pop_return_trace(struct ftrace_graph_ret *trace, unsigned long *ret, * * Currently, x86_32 with optimize for size (-Os) makes the latest * gcc do the above. + * + * Note, -mfentry does not use frame pointers, and this test + * is not needed if CC_USING_FENTRY is set. */ if (unlikely(current->ret_stack[index].fp != frame_pointer)) { ftrace_graph_stop(); diff --git a/trunk/kernel/trace/trace_irqsoff.c b/trunk/kernel/trace/trace_irqsoff.c index 99d20e920368..d98ee8283b29 100644 --- a/trunk/kernel/trace/trace_irqsoff.c +++ b/trunk/kernel/trace/trace_irqsoff.c @@ -136,7 +136,8 @@ static int func_prolog_dec(struct trace_array *tr, * irqsoff uses its own tracer function to keep the overhead down: */ static void -irqsoff_tracer_call(unsigned long ip, unsigned long parent_ip) +irqsoff_tracer_call(unsigned long ip, unsigned long parent_ip, + struct ftrace_ops *op, struct pt_regs *pt_regs) { struct trace_array *tr = irqsoff_trace; struct trace_array_cpu *data; @@ -153,7 +154,7 @@ irqsoff_tracer_call(unsigned long ip, unsigned long parent_ip) static struct ftrace_ops trace_ops __read_mostly = { .func = irqsoff_tracer_call, - .flags = FTRACE_OPS_FL_GLOBAL, + .flags = FTRACE_OPS_FL_GLOBAL | FTRACE_OPS_FL_RECURSION_SAFE, }; #endif /* CONFIG_FUNCTION_TRACER */ diff --git a/trunk/kernel/trace/trace_kprobe.c b/trunk/kernel/trace/trace_kprobe.c index b31d3d5699fe..1a2117043bb1 100644 --- a/trunk/kernel/trace/trace_kprobe.c +++ b/trunk/kernel/trace/trace_kprobe.c @@ -1002,7 +1002,8 @@ static __kprobes void kprobe_perf_func(struct kprobe *kp, store_trace_args(sizeof(*entry), tp, regs, (u8 *)&entry[1], dsize); head = this_cpu_ptr(call->perf_events); - perf_trace_buf_submit(entry, size, rctx, entry->ip, 1, regs, head); + perf_trace_buf_submit(entry, size, rctx, + entry->ip, 1, regs, head, NULL); } /* Kretprobe profile handler */ @@ -1033,7 +1034,8 @@ static __kprobes void kretprobe_perf_func(struct kretprobe_instance *ri, store_trace_args(sizeof(*entry), tp, regs, (u8 *)&entry[1], dsize); head = this_cpu_ptr(call->perf_events); - perf_trace_buf_submit(entry, size, rctx, entry->ret_ip, 1, regs, head); + perf_trace_buf_submit(entry, size, rctx, + entry->ret_ip, 1, regs, head, NULL); } #endif /* CONFIG_PERF_EVENTS */ diff --git a/trunk/kernel/trace/trace_sched_wakeup.c b/trunk/kernel/trace/trace_sched_wakeup.c index ff791ea48b57..02170c00c413 100644 --- a/trunk/kernel/trace/trace_sched_wakeup.c +++ b/trunk/kernel/trace/trace_sched_wakeup.c @@ -108,7 +108,8 @@ func_prolog_preempt_disable(struct trace_array *tr, * wakeup uses its own tracer function to keep the overhead down: */ static void -wakeup_tracer_call(unsigned long ip, unsigned long parent_ip) +wakeup_tracer_call(unsigned long ip, unsigned long parent_ip, + struct ftrace_ops *op, struct pt_regs *pt_regs) { struct trace_array *tr = wakeup_trace; struct trace_array_cpu *data; @@ -129,7 +130,7 @@ wakeup_tracer_call(unsigned long ip, unsigned long parent_ip) static struct ftrace_ops trace_ops __read_mostly = { .func = wakeup_tracer_call, - .flags = FTRACE_OPS_FL_GLOBAL, + .flags = FTRACE_OPS_FL_GLOBAL | FTRACE_OPS_FL_RECURSION_SAFE, }; #endif /* CONFIG_FUNCTION_TRACER */ diff --git a/trunk/kernel/trace/trace_selftest.c b/trunk/kernel/trace/trace_selftest.c index 288541f977fb..2c00a691a540 100644 --- a/trunk/kernel/trace/trace_selftest.c +++ b/trunk/kernel/trace/trace_selftest.c @@ -103,54 +103,67 @@ static inline void warn_failed_init_tracer(struct tracer *trace, int init_ret) static int trace_selftest_test_probe1_cnt; static void trace_selftest_test_probe1_func(unsigned long ip, - unsigned long pip) + unsigned long pip, + struct ftrace_ops *op, + struct pt_regs *pt_regs) { trace_selftest_test_probe1_cnt++; } static int trace_selftest_test_probe2_cnt; static void trace_selftest_test_probe2_func(unsigned long ip, - unsigned long pip) + unsigned long pip, + struct ftrace_ops *op, + struct pt_regs *pt_regs) { trace_selftest_test_probe2_cnt++; } static int trace_selftest_test_probe3_cnt; static void trace_selftest_test_probe3_func(unsigned long ip, - unsigned long pip) + unsigned long pip, + struct ftrace_ops *op, + struct pt_regs *pt_regs) { trace_selftest_test_probe3_cnt++; } static int trace_selftest_test_global_cnt; static void trace_selftest_test_global_func(unsigned long ip, - unsigned long pip) + unsigned long pip, + struct ftrace_ops *op, + struct pt_regs *pt_regs) { trace_selftest_test_global_cnt++; } static int trace_selftest_test_dyn_cnt; static void trace_selftest_test_dyn_func(unsigned long ip, - unsigned long pip) + unsigned long pip, + struct ftrace_ops *op, + struct pt_regs *pt_regs) { trace_selftest_test_dyn_cnt++; } static struct ftrace_ops test_probe1 = { .func = trace_selftest_test_probe1_func, + .flags = FTRACE_OPS_FL_RECURSION_SAFE, }; static struct ftrace_ops test_probe2 = { .func = trace_selftest_test_probe2_func, + .flags = FTRACE_OPS_FL_RECURSION_SAFE, }; static struct ftrace_ops test_probe3 = { .func = trace_selftest_test_probe3_func, + .flags = FTRACE_OPS_FL_RECURSION_SAFE, }; static struct ftrace_ops test_global = { - .func = trace_selftest_test_global_func, - .flags = FTRACE_OPS_FL_GLOBAL, + .func = trace_selftest_test_global_func, + .flags = FTRACE_OPS_FL_GLOBAL | FTRACE_OPS_FL_RECURSION_SAFE, }; static void print_counts(void) @@ -393,10 +406,253 @@ int trace_selftest_startup_dynamic_tracing(struct tracer *trace, return ret; } + +static int trace_selftest_recursion_cnt; +static void trace_selftest_test_recursion_func(unsigned long ip, + unsigned long pip, + struct ftrace_ops *op, + struct pt_regs *pt_regs) +{ + /* + * This function is registered without the recursion safe flag. + * The ftrace infrastructure should provide the recursion + * protection. If not, this will crash the kernel! + */ + trace_selftest_recursion_cnt++; + DYN_FTRACE_TEST_NAME(); +} + +static void trace_selftest_test_recursion_safe_func(unsigned long ip, + unsigned long pip, + struct ftrace_ops *op, + struct pt_regs *pt_regs) +{ + /* + * We said we would provide our own recursion. By calling + * this function again, we should recurse back into this function + * and count again. But this only happens if the arch supports + * all of ftrace features and nothing else is using the function + * tracing utility. + */ + if (trace_selftest_recursion_cnt++) + return; + DYN_FTRACE_TEST_NAME(); +} + +static struct ftrace_ops test_rec_probe = { + .func = trace_selftest_test_recursion_func, +}; + +static struct ftrace_ops test_recsafe_probe = { + .func = trace_selftest_test_recursion_safe_func, + .flags = FTRACE_OPS_FL_RECURSION_SAFE, +}; + +static int +trace_selftest_function_recursion(void) +{ + int save_ftrace_enabled = ftrace_enabled; + int save_tracer_enabled = tracer_enabled; + char *func_name; + int len; + int ret; + int cnt; + + /* The previous test PASSED */ + pr_cont("PASSED\n"); + pr_info("Testing ftrace recursion: "); + + + /* enable tracing, and record the filter function */ + ftrace_enabled = 1; + tracer_enabled = 1; + + /* Handle PPC64 '.' name */ + func_name = "*" __stringify(DYN_FTRACE_TEST_NAME); + len = strlen(func_name); + + ret = ftrace_set_filter(&test_rec_probe, func_name, len, 1); + if (ret) { + pr_cont("*Could not set filter* "); + goto out; + } + + ret = register_ftrace_function(&test_rec_probe); + if (ret) { + pr_cont("*could not register callback* "); + goto out; + } + + DYN_FTRACE_TEST_NAME(); + + unregister_ftrace_function(&test_rec_probe); + + ret = -1; + if (trace_selftest_recursion_cnt != 1) { + pr_cont("*callback not called once (%d)* ", + trace_selftest_recursion_cnt); + goto out; + } + + trace_selftest_recursion_cnt = 1; + + pr_cont("PASSED\n"); + pr_info("Testing ftrace recursion safe: "); + + ret = ftrace_set_filter(&test_recsafe_probe, func_name, len, 1); + if (ret) { + pr_cont("*Could not set filter* "); + goto out; + } + + ret = register_ftrace_function(&test_recsafe_probe); + if (ret) { + pr_cont("*could not register callback* "); + goto out; + } + + DYN_FTRACE_TEST_NAME(); + + unregister_ftrace_function(&test_recsafe_probe); + + /* + * If arch supports all ftrace features, and no other task + * was on the list, we should be fine. + */ + if (!ftrace_nr_registered_ops() && !FTRACE_FORCE_LIST_FUNC) + cnt = 2; /* Should have recursed */ + else + cnt = 1; + + ret = -1; + if (trace_selftest_recursion_cnt != cnt) { + pr_cont("*callback not called expected %d times (%d)* ", + cnt, trace_selftest_recursion_cnt); + goto out; + } + + ret = 0; +out: + ftrace_enabled = save_ftrace_enabled; + tracer_enabled = save_tracer_enabled; + + return ret; +} #else # define trace_selftest_startup_dynamic_tracing(trace, tr, func) ({ 0; }) +# define trace_selftest_function_recursion() ({ 0; }) #endif /* CONFIG_DYNAMIC_FTRACE */ +static enum { + TRACE_SELFTEST_REGS_START, + TRACE_SELFTEST_REGS_FOUND, + TRACE_SELFTEST_REGS_NOT_FOUND, +} trace_selftest_regs_stat; + +static void trace_selftest_test_regs_func(unsigned long ip, + unsigned long pip, + struct ftrace_ops *op, + struct pt_regs *pt_regs) +{ + if (pt_regs) + trace_selftest_regs_stat = TRACE_SELFTEST_REGS_FOUND; + else + trace_selftest_regs_stat = TRACE_SELFTEST_REGS_NOT_FOUND; +} + +static struct ftrace_ops test_regs_probe = { + .func = trace_selftest_test_regs_func, + .flags = FTRACE_OPS_FL_RECURSION_SAFE | FTRACE_OPS_FL_SAVE_REGS, +}; + +static int +trace_selftest_function_regs(void) +{ + int save_ftrace_enabled = ftrace_enabled; + int save_tracer_enabled = tracer_enabled; + char *func_name; + int len; + int ret; + int supported = 0; + +#ifdef ARCH_SUPPORTS_FTRACE_SAVE_REGS + supported = 1; +#endif + + /* The previous test PASSED */ + pr_cont("PASSED\n"); + pr_info("Testing ftrace regs%s: ", + !supported ? "(no arch support)" : ""); + + /* enable tracing, and record the filter function */ + ftrace_enabled = 1; + tracer_enabled = 1; + + /* Handle PPC64 '.' name */ + func_name = "*" __stringify(DYN_FTRACE_TEST_NAME); + len = strlen(func_name); + + ret = ftrace_set_filter(&test_regs_probe, func_name, len, 1); + /* + * If DYNAMIC_FTRACE is not set, then we just trace all functions. + * This test really doesn't care. + */ + if (ret && ret != -ENODEV) { + pr_cont("*Could not set filter* "); + goto out; + } + + ret = register_ftrace_function(&test_regs_probe); + /* + * Now if the arch does not support passing regs, then this should + * have failed. + */ + if (!supported) { + if (!ret) { + pr_cont("*registered save-regs without arch support* "); + goto out; + } + test_regs_probe.flags |= FTRACE_OPS_FL_SAVE_REGS_IF_SUPPORTED; + ret = register_ftrace_function(&test_regs_probe); + } + if (ret) { + pr_cont("*could not register callback* "); + goto out; + } + + + DYN_FTRACE_TEST_NAME(); + + unregister_ftrace_function(&test_regs_probe); + + ret = -1; + + switch (trace_selftest_regs_stat) { + case TRACE_SELFTEST_REGS_START: + pr_cont("*callback never called* "); + goto out; + + case TRACE_SELFTEST_REGS_FOUND: + if (supported) + break; + pr_cont("*callback received regs without arch support* "); + goto out; + + case TRACE_SELFTEST_REGS_NOT_FOUND: + if (!supported) + break; + pr_cont("*callback received NULL regs* "); + goto out; + } + + ret = 0; +out: + ftrace_enabled = save_ftrace_enabled; + tracer_enabled = save_tracer_enabled; + + return ret; +} + /* * Simple verification test of ftrace function tracer. * Enable ftrace, sleep 1/10 second, and then read the trace @@ -442,7 +698,14 @@ trace_selftest_startup_function(struct tracer *trace, struct trace_array *tr) ret = trace_selftest_startup_dynamic_tracing(trace, tr, DYN_FTRACE_TEST_NAME); + if (ret) + goto out; + ret = trace_selftest_function_recursion(); + if (ret) + goto out; + + ret = trace_selftest_function_regs(); out: ftrace_enabled = save_ftrace_enabled; tracer_enabled = save_tracer_enabled; @@ -778,6 +1041,8 @@ static int trace_wakeup_test_thread(void *data) set_current_state(TASK_INTERRUPTIBLE); schedule(); + complete(x); + /* we are awake, now wait to disappear */ while (!kthread_should_stop()) { /* @@ -821,24 +1086,21 @@ trace_selftest_startup_wakeup(struct tracer *trace, struct trace_array *tr) /* reset the max latency */ tracing_max_latency = 0; - /* sleep to let the RT thread sleep too */ - msleep(100); + while (p->on_rq) { + /* + * Sleep to make sure the RT thread is asleep too. + * On virtual machines we can't rely on timings, + * but we want to make sure this test still works. + */ + msleep(100); + } - /* - * Yes this is slightly racy. It is possible that for some - * strange reason that the RT thread we created, did not - * call schedule for 100ms after doing the completion, - * and we do a wakeup on a task that already is awake. - * But that is extremely unlikely, and the worst thing that - * happens in such a case, is that we disable tracing. - * Honestly, if this race does happen something is horrible - * wrong with the system. - */ + init_completion(&isrt); wake_up_process(p); - /* give a little time to let the thread wake up */ - msleep(100); + /* Wait for the task to wake up */ + wait_for_completion(&isrt); /* stop the tracing. */ tracing_stop(); diff --git a/trunk/kernel/trace/trace_stack.c b/trunk/kernel/trace/trace_stack.c index d4545f49242e..0c1b165778e5 100644 --- a/trunk/kernel/trace/trace_stack.c +++ b/trunk/kernel/trace/trace_stack.c @@ -111,7 +111,8 @@ static inline void check_stack(void) } static void -stack_trace_call(unsigned long ip, unsigned long parent_ip) +stack_trace_call(unsigned long ip, unsigned long parent_ip, + struct ftrace_ops *op, struct pt_regs *pt_regs) { int cpu; @@ -136,6 +137,7 @@ stack_trace_call(unsigned long ip, unsigned long parent_ip) static struct ftrace_ops trace_ops __read_mostly = { .func = stack_trace_call, + .flags = FTRACE_OPS_FL_RECURSION_SAFE, }; static ssize_t diff --git a/trunk/kernel/trace/trace_syscalls.c b/trunk/kernel/trace/trace_syscalls.c index 96fc73369099..2485a7d09b11 100644 --- a/trunk/kernel/trace/trace_syscalls.c +++ b/trunk/kernel/trace/trace_syscalls.c @@ -487,7 +487,7 @@ int __init init_ftrace_syscalls(void) return 0; } -core_initcall(init_ftrace_syscalls); +early_initcall(init_ftrace_syscalls); #ifdef CONFIG_PERF_EVENTS @@ -506,6 +506,8 @@ static void perf_syscall_enter(void *ignore, struct pt_regs *regs, long id) int size; syscall_nr = syscall_get_nr(current, regs); + if (syscall_nr < 0) + return; if (!test_bit(syscall_nr, enabled_perf_enter_syscalls)) return; @@ -532,7 +534,7 @@ static void perf_syscall_enter(void *ignore, struct pt_regs *regs, long id) (unsigned long *)&rec->args); head = this_cpu_ptr(sys_data->enter_event->perf_events); - perf_trace_buf_submit(rec, size, rctx, 0, 1, regs, head); + perf_trace_buf_submit(rec, size, rctx, 0, 1, regs, head, NULL); } int perf_sysenter_enable(struct ftrace_event_call *call) @@ -580,6 +582,8 @@ static void perf_syscall_exit(void *ignore, struct pt_regs *regs, long ret) int size; syscall_nr = syscall_get_nr(current, regs); + if (syscall_nr < 0) + return; if (!test_bit(syscall_nr, enabled_perf_exit_syscalls)) return; @@ -608,7 +612,7 @@ static void perf_syscall_exit(void *ignore, struct pt_regs *regs, long ret) rec->ret = syscall_get_return_value(current, regs); head = this_cpu_ptr(sys_data->exit_event->perf_events); - perf_trace_buf_submit(rec, size, rctx, 0, 1, regs, head); + perf_trace_buf_submit(rec, size, rctx, 0, 1, regs, head, NULL); } int perf_sysexit_enable(struct ftrace_event_call *call) diff --git a/trunk/kernel/trace/trace_uprobe.c b/trunk/kernel/trace/trace_uprobe.c index 2b36ac68549e..03003cd7dd96 100644 --- a/trunk/kernel/trace/trace_uprobe.c +++ b/trunk/kernel/trace/trace_uprobe.c @@ -670,7 +670,7 @@ static void uprobe_perf_func(struct trace_uprobe *tu, struct pt_regs *regs) call_fetch(&tu->args[i].fetch, regs, data + tu->args[i].offset); head = this_cpu_ptr(call->perf_events); - perf_trace_buf_submit(entry, size, rctx, entry->ip, 1, regs, head); + perf_trace_buf_submit(entry, size, rctx, entry->ip, 1, regs, head, NULL); out: preempt_enable(); diff --git a/trunk/kernel/watchdog.c b/trunk/kernel/watchdog.c index 69add8a9da68..9d4c8d5a1f53 100644 --- a/trunk/kernel/watchdog.c +++ b/trunk/kernel/watchdog.c @@ -22,6 +22,7 @@ #include #include #include +#include #include #include @@ -29,16 +30,18 @@ int watchdog_enabled = 1; int __read_mostly watchdog_thresh = 10; +static int __read_mostly watchdog_disabled; static DEFINE_PER_CPU(unsigned long, watchdog_touch_ts); static DEFINE_PER_CPU(struct task_struct *, softlockup_watchdog); static DEFINE_PER_CPU(struct hrtimer, watchdog_hrtimer); static DEFINE_PER_CPU(bool, softlockup_touch_sync); static DEFINE_PER_CPU(bool, soft_watchdog_warn); +static DEFINE_PER_CPU(unsigned long, hrtimer_interrupts); +static DEFINE_PER_CPU(unsigned long, soft_lockup_hrtimer_cnt); #ifdef CONFIG_HARDLOCKUP_DETECTOR static DEFINE_PER_CPU(bool, hard_watchdog_warn); static DEFINE_PER_CPU(bool, watchdog_nmi_touch); -static DEFINE_PER_CPU(unsigned long, hrtimer_interrupts); static DEFINE_PER_CPU(unsigned long, hrtimer_interrupts_saved); static DEFINE_PER_CPU(struct perf_event *, watchdog_ev); #endif @@ -248,13 +251,15 @@ static void watchdog_overflow_callback(struct perf_event *event, __this_cpu_write(hard_watchdog_warn, false); return; } +#endif /* CONFIG_HARDLOCKUP_DETECTOR */ + static void watchdog_interrupt_count(void) { __this_cpu_inc(hrtimer_interrupts); } -#else -static inline void watchdog_interrupt_count(void) { return; } -#endif /* CONFIG_HARDLOCKUP_DETECTOR */ + +static int watchdog_nmi_enable(unsigned int cpu); +static void watchdog_nmi_disable(unsigned int cpu); /* watchdog kicker functions */ static enum hrtimer_restart watchdog_timer_fn(struct hrtimer *hrtimer) @@ -327,49 +332,68 @@ static enum hrtimer_restart watchdog_timer_fn(struct hrtimer *hrtimer) return HRTIMER_RESTART; } +static void watchdog_set_prio(unsigned int policy, unsigned int prio) +{ + struct sched_param param = { .sched_priority = prio }; -/* - * The watchdog thread - touches the timestamp. - */ -static int watchdog(void *unused) + sched_setscheduler(current, policy, ¶m); +} + +static void watchdog_enable(unsigned int cpu) { - struct sched_param param = { .sched_priority = 0 }; struct hrtimer *hrtimer = &__raw_get_cpu_var(watchdog_hrtimer); - /* initialize timestamp */ - __touch_watchdog(); + if (!watchdog_enabled) { + kthread_park(current); + return; + } + + /* Enable the perf event */ + watchdog_nmi_enable(cpu); /* kick off the timer for the hardlockup detector */ + hrtimer_init(hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); + hrtimer->function = watchdog_timer_fn; + /* done here because hrtimer_start can only pin to smp_processor_id() */ hrtimer_start(hrtimer, ns_to_ktime(get_sample_period()), HRTIMER_MODE_REL_PINNED); - set_current_state(TASK_INTERRUPTIBLE); - /* - * Run briefly (kicked by the hrtimer callback function) once every - * get_sample_period() seconds (4 seconds by default) to reset the - * softlockup timestamp. If this gets delayed for more than - * 2*watchdog_thresh seconds then the debug-printout triggers in - * watchdog_timer_fn(). - */ - while (!kthread_should_stop()) { - __touch_watchdog(); - schedule(); + /* initialize timestamp */ + watchdog_set_prio(SCHED_FIFO, MAX_RT_PRIO - 1); + __touch_watchdog(); +} + +static void watchdog_disable(unsigned int cpu) +{ + struct hrtimer *hrtimer = &__raw_get_cpu_var(watchdog_hrtimer); - if (kthread_should_stop()) - break; + watchdog_set_prio(SCHED_NORMAL, 0); + hrtimer_cancel(hrtimer); + /* disable the perf event */ + watchdog_nmi_disable(cpu); +} - set_current_state(TASK_INTERRUPTIBLE); - } - /* - * Drop the policy/priority elevation during thread exit to avoid a - * scheduling latency spike. - */ - __set_current_state(TASK_RUNNING); - sched_setscheduler(current, SCHED_NORMAL, ¶m); - return 0; +static int watchdog_should_run(unsigned int cpu) +{ + return __this_cpu_read(hrtimer_interrupts) != + __this_cpu_read(soft_lockup_hrtimer_cnt); } +/* + * The watchdog thread function - touches the timestamp. + * + * It only runs once every get_sample_period() seconds (4 seconds by + * default) to reset the softlockup timestamp. If this gets delayed + * for more than 2*watchdog_thresh seconds then the debug-printout + * triggers in watchdog_timer_fn(). + */ +static void watchdog(unsigned int cpu) +{ + __this_cpu_write(soft_lockup_hrtimer_cnt, + __this_cpu_read(hrtimer_interrupts)); + __touch_watchdog(); +} #ifdef CONFIG_HARDLOCKUP_DETECTOR /* @@ -379,7 +403,7 @@ static int watchdog(void *unused) */ static unsigned long cpu0_err; -static int watchdog_nmi_enable(int cpu) +static int watchdog_nmi_enable(unsigned int cpu) { struct perf_event_attr *wd_attr; struct perf_event *event = per_cpu(watchdog_ev, cpu); @@ -433,7 +457,7 @@ static int watchdog_nmi_enable(int cpu) return 0; } -static void watchdog_nmi_disable(int cpu) +static void watchdog_nmi_disable(unsigned int cpu) { struct perf_event *event = per_cpu(watchdog_ev, cpu); @@ -447,107 +471,35 @@ static void watchdog_nmi_disable(int cpu) return; } #else -static int watchdog_nmi_enable(int cpu) { return 0; } -static void watchdog_nmi_disable(int cpu) { return; } +static int watchdog_nmi_enable(unsigned int cpu) { return 0; } +static void watchdog_nmi_disable(unsigned int cpu) { return; } #endif /* CONFIG_HARDLOCKUP_DETECTOR */ /* prepare/enable/disable routines */ -static void watchdog_prepare_cpu(int cpu) -{ - struct hrtimer *hrtimer = &per_cpu(watchdog_hrtimer, cpu); - - WARN_ON(per_cpu(softlockup_watchdog, cpu)); - hrtimer_init(hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); - hrtimer->function = watchdog_timer_fn; -} - -static int watchdog_enable(int cpu) -{ - struct task_struct *p = per_cpu(softlockup_watchdog, cpu); - int err = 0; - - /* enable the perf event */ - err = watchdog_nmi_enable(cpu); - - /* Regardless of err above, fall through and start softlockup */ - - /* create the watchdog thread */ - if (!p) { - struct sched_param param = { .sched_priority = MAX_RT_PRIO-1 }; - p = kthread_create_on_node(watchdog, NULL, cpu_to_node(cpu), "watchdog/%d", cpu); - if (IS_ERR(p)) { - pr_err("softlockup watchdog for %i failed\n", cpu); - if (!err) { - /* if hardlockup hasn't already set this */ - err = PTR_ERR(p); - /* and disable the perf event */ - watchdog_nmi_disable(cpu); - } - goto out; - } - sched_setscheduler(p, SCHED_FIFO, ¶m); - kthread_bind(p, cpu); - per_cpu(watchdog_touch_ts, cpu) = 0; - per_cpu(softlockup_watchdog, cpu) = p; - wake_up_process(p); - } - -out: - return err; -} - -static void watchdog_disable(int cpu) -{ - struct task_struct *p = per_cpu(softlockup_watchdog, cpu); - struct hrtimer *hrtimer = &per_cpu(watchdog_hrtimer, cpu); - - /* - * cancel the timer first to stop incrementing the stats - * and waking up the kthread - */ - hrtimer_cancel(hrtimer); - - /* disable the perf event */ - watchdog_nmi_disable(cpu); - - /* stop the watchdog thread */ - if (p) { - per_cpu(softlockup_watchdog, cpu) = NULL; - kthread_stop(p); - } -} - /* sysctl functions */ #ifdef CONFIG_SYSCTL static void watchdog_enable_all_cpus(void) { - int cpu; - - watchdog_enabled = 0; - - for_each_online_cpu(cpu) - if (!watchdog_enable(cpu)) - /* if any cpu succeeds, watchdog is considered - enabled for the system */ - watchdog_enabled = 1; - - if (!watchdog_enabled) - pr_err("failed to be enabled on some cpus\n"); + unsigned int cpu; + if (watchdog_disabled) { + watchdog_disabled = 0; + for_each_online_cpu(cpu) + kthread_unpark(per_cpu(softlockup_watchdog, cpu)); + } } static void watchdog_disable_all_cpus(void) { - int cpu; + unsigned int cpu; - for_each_online_cpu(cpu) - watchdog_disable(cpu); - - /* if all watchdogs are disabled, then they are disabled for the system */ - watchdog_enabled = 0; + if (!watchdog_disabled) { + watchdog_disabled = 1; + for_each_online_cpu(cpu) + kthread_park(per_cpu(softlockup_watchdog, cpu)); + } } - /* * proc handler for /proc/sys/kernel/nmi_watchdog,watchdog_thresh */ @@ -557,90 +509,36 @@ int proc_dowatchdog(struct ctl_table *table, int write, { int ret; + if (watchdog_disabled < 0) + return -ENODEV; + ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos); if (ret || !write) - goto out; + return ret; if (watchdog_enabled && watchdog_thresh) watchdog_enable_all_cpus(); else watchdog_disable_all_cpus(); -out: return ret; } #endif /* CONFIG_SYSCTL */ - -/* - * Create/destroy watchdog threads as CPUs come and go: - */ -static int -cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu) -{ - int hotcpu = (unsigned long)hcpu; - - switch (action) { - case CPU_UP_PREPARE: - case CPU_UP_PREPARE_FROZEN: - watchdog_prepare_cpu(hotcpu); - break; - case CPU_ONLINE: - case CPU_ONLINE_FROZEN: - if (watchdog_enabled) - watchdog_enable(hotcpu); - break; -#ifdef CONFIG_HOTPLUG_CPU - case CPU_UP_CANCELED: - case CPU_UP_CANCELED_FROZEN: - watchdog_disable(hotcpu); - break; - case CPU_DEAD: - case CPU_DEAD_FROZEN: - watchdog_disable(hotcpu); - break; -#endif /* CONFIG_HOTPLUG_CPU */ - } - - /* - * hardlockup and softlockup are not important enough - * to block cpu bring up. Just always succeed and - * rely on printk output to flag problems. - */ - return NOTIFY_OK; -} - -static struct notifier_block cpu_nfb = { - .notifier_call = cpu_callback +static struct smp_hotplug_thread watchdog_threads = { + .store = &softlockup_watchdog, + .thread_should_run = watchdog_should_run, + .thread_fn = watchdog, + .thread_comm = "watchdog/%u", + .setup = watchdog_enable, + .park = watchdog_disable, + .unpark = watchdog_enable, }; -#ifdef CONFIG_SUSPEND -/* - * On exit from suspend we force an offline->online transition on the boot CPU - * so that the PMU state that was lost while in suspended state gets set up - * properly for the boot CPU. This information is required for restarting the - * NMI watchdog. - */ -void lockup_detector_bootcpu_resume(void) -{ - void *cpu = (void *)(long)smp_processor_id(); - - cpu_callback(&cpu_nfb, CPU_DEAD_FROZEN, cpu); - cpu_callback(&cpu_nfb, CPU_UP_PREPARE_FROZEN, cpu); - cpu_callback(&cpu_nfb, CPU_ONLINE_FROZEN, cpu); -} -#endif - void __init lockup_detector_init(void) { - void *cpu = (void *)(long)smp_processor_id(); - int err; - - err = cpu_callback(&cpu_nfb, CPU_UP_PREPARE, cpu); - WARN_ON(notifier_to_errno(err)); - - cpu_callback(&cpu_nfb, CPU_ONLINE, cpu); - register_cpu_notifier(&cpu_nfb); - - return; + if (smpboot_register_percpu_thread(&watchdog_threads)) { + pr_err("Failed to create watchdog threads, disabled\n"); + watchdog_disabled = -ENODEV; + } } diff --git a/trunk/kernel/workqueue.c b/trunk/kernel/workqueue.c index 692d97628a10..3c5a79e2134c 100644 --- a/trunk/kernel/workqueue.c +++ b/trunk/kernel/workqueue.c @@ -66,6 +66,7 @@ enum { /* pool flags */ POOL_MANAGE_WORKERS = 1 << 0, /* need to manage workers */ + POOL_MANAGING_WORKERS = 1 << 1, /* managing workers */ /* worker flags */ WORKER_STARTED = 1 << 0, /* started */ @@ -652,7 +653,7 @@ static bool need_to_manage_workers(struct worker_pool *pool) /* Do we have too many workers and should some go away? */ static bool too_many_workers(struct worker_pool *pool) { - bool managing = mutex_is_locked(&pool->manager_mutex); + bool managing = pool->flags & POOL_MANAGING_WORKERS; int nr_idle = pool->nr_idle + managing; /* manager is considered idle */ int nr_busy = pool->nr_workers - nr_idle; @@ -1326,6 +1327,15 @@ static void idle_worker_rebind(struct worker *worker) /* we did our part, wait for rebind_workers() to finish up */ wait_event(gcwq->rebind_hold, !(worker->flags & WORKER_REBIND)); + + /* + * rebind_workers() shouldn't finish until all workers passed the + * above WORKER_REBIND wait. Tell it when done. + */ + spin_lock_irq(&worker->pool->gcwq->lock); + if (!--worker->idle_rebind->cnt) + complete(&worker->idle_rebind->done); + spin_unlock_irq(&worker->pool->gcwq->lock); } /* @@ -1339,8 +1349,16 @@ static void busy_worker_rebind_fn(struct work_struct *work) struct worker *worker = container_of(work, struct worker, rebind_work); struct global_cwq *gcwq = worker->pool->gcwq; - if (worker_maybe_bind_and_lock(worker)) - worker_clr_flags(worker, WORKER_REBIND); + worker_maybe_bind_and_lock(worker); + + /* + * %WORKER_REBIND must be cleared even if the above binding failed; + * otherwise, we may confuse the next CPU_UP cycle or oops / get + * stuck by calling idle_worker_rebind() prematurely. If CPU went + * down again inbetween, %WORKER_UNBOUND would be set, so clearing + * %WORKER_REBIND is always safe. + */ + worker_clr_flags(worker, WORKER_REBIND); spin_unlock_irq(&gcwq->lock); } @@ -1396,12 +1414,15 @@ static void rebind_workers(struct global_cwq *gcwq) /* set REBIND and kick idle ones, we'll wait for these later */ for_each_worker_pool(pool, gcwq) { list_for_each_entry(worker, &pool->idle_list, entry) { + unsigned long worker_flags = worker->flags; + if (worker->flags & WORKER_REBIND) continue; - /* morph UNBOUND to REBIND */ - worker->flags &= ~WORKER_UNBOUND; - worker->flags |= WORKER_REBIND; + /* morph UNBOUND to REBIND atomically */ + worker_flags &= ~WORKER_UNBOUND; + worker_flags |= WORKER_REBIND; + ACCESS_ONCE(worker->flags) = worker_flags; idle_rebind.cnt++; worker->idle_rebind = &idle_rebind; @@ -1419,25 +1440,15 @@ static void rebind_workers(struct global_cwq *gcwq) goto retry; } - /* - * All idle workers are rebound and waiting for %WORKER_REBIND to - * be cleared inside idle_worker_rebind(). Clear and release. - * Clearing %WORKER_REBIND from this foreign context is safe - * because these workers are still guaranteed to be idle. - */ - for_each_worker_pool(pool, gcwq) - list_for_each_entry(worker, &pool->idle_list, entry) - worker->flags &= ~WORKER_REBIND; - - wake_up_all(&gcwq->rebind_hold); - - /* rebind busy workers */ + /* all idle workers are rebound, rebind busy workers */ for_each_busy_worker(worker, i, pos, gcwq) { struct work_struct *rebind_work = &worker->rebind_work; + unsigned long worker_flags = worker->flags; - /* morph UNBOUND to REBIND */ - worker->flags &= ~WORKER_UNBOUND; - worker->flags |= WORKER_REBIND; + /* morph UNBOUND to REBIND atomically */ + worker_flags &= ~WORKER_UNBOUND; + worker_flags |= WORKER_REBIND; + ACCESS_ONCE(worker->flags) = worker_flags; if (test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(rebind_work))) @@ -1449,6 +1460,34 @@ static void rebind_workers(struct global_cwq *gcwq) worker->scheduled.next, work_color_to_flags(WORK_NO_COLOR)); } + + /* + * All idle workers are rebound and waiting for %WORKER_REBIND to + * be cleared inside idle_worker_rebind(). Clear and release. + * Clearing %WORKER_REBIND from this foreign context is safe + * because these workers are still guaranteed to be idle. + * + * We need to make sure all idle workers passed WORKER_REBIND wait + * in idle_worker_rebind() before returning; otherwise, workers can + * get stuck at the wait if hotplug cycle repeats. + */ + idle_rebind.cnt = 1; + INIT_COMPLETION(idle_rebind.done); + + for_each_worker_pool(pool, gcwq) { + list_for_each_entry(worker, &pool->idle_list, entry) { + worker->flags &= ~WORKER_REBIND; + idle_rebind.cnt++; + } + } + + wake_up_all(&gcwq->rebind_hold); + + if (--idle_rebind.cnt) { + spin_unlock_irq(&gcwq->lock); + wait_for_completion(&idle_rebind.done); + spin_lock_irq(&gcwq->lock); + } } static struct worker *alloc_worker(void) @@ -1794,9 +1833,45 @@ static bool manage_workers(struct worker *worker) struct worker_pool *pool = worker->pool; bool ret = false; - if (!mutex_trylock(&pool->manager_mutex)) + if (pool->flags & POOL_MANAGING_WORKERS) return ret; + pool->flags |= POOL_MANAGING_WORKERS; + + /* + * To simplify both worker management and CPU hotplug, hold off + * management while hotplug is in progress. CPU hotplug path can't + * grab %POOL_MANAGING_WORKERS to achieve this because that can + * lead to idle worker depletion (all become busy thinking someone + * else is managing) which in turn can result in deadlock under + * extreme circumstances. Use @pool->manager_mutex to synchronize + * manager against CPU hotplug. + * + * manager_mutex would always be free unless CPU hotplug is in + * progress. trylock first without dropping @gcwq->lock. + */ + if (unlikely(!mutex_trylock(&pool->manager_mutex))) { + spin_unlock_irq(&pool->gcwq->lock); + mutex_lock(&pool->manager_mutex); + /* + * CPU hotplug could have happened while we were waiting + * for manager_mutex. Hotplug itself can't handle us + * because manager isn't either on idle or busy list, and + * @gcwq's state and ours could have deviated. + * + * As hotplug is now excluded via manager_mutex, we can + * simply try to bind. It will succeed or fail depending + * on @gcwq's current state. Try it and adjust + * %WORKER_UNBOUND accordingly. + */ + if (worker_maybe_bind_and_lock(worker)) + worker->flags &= ~WORKER_UNBOUND; + else + worker->flags |= WORKER_UNBOUND; + + ret = true; + } + pool->flags &= ~POOL_MANAGE_WORKERS; /* @@ -1806,6 +1881,7 @@ static bool manage_workers(struct worker *worker) ret |= maybe_destroy_workers(pool); ret |= maybe_create_worker(pool); + pool->flags &= ~POOL_MANAGING_WORKERS; mutex_unlock(&pool->manager_mutex); return ret; } @@ -3500,18 +3576,17 @@ static int __devinit workqueue_cpu_down_callback(struct notifier_block *nfb, #ifdef CONFIG_SMP struct work_for_cpu { - struct completion completion; + struct work_struct work; long (*fn)(void *); void *arg; long ret; }; -static int do_work_for_cpu(void *_wfc) +static void work_for_cpu_fn(struct work_struct *work) { - struct work_for_cpu *wfc = _wfc; + struct work_for_cpu *wfc = container_of(work, struct work_for_cpu, work); + wfc->ret = wfc->fn(wfc->arg); - complete(&wfc->completion); - return 0; } /** @@ -3526,19 +3601,11 @@ static int do_work_for_cpu(void *_wfc) */ long work_on_cpu(unsigned int cpu, long (*fn)(void *), void *arg) { - struct task_struct *sub_thread; - struct work_for_cpu wfc = { - .completion = COMPLETION_INITIALIZER_ONSTACK(wfc.completion), - .fn = fn, - .arg = arg, - }; + struct work_for_cpu wfc = { .fn = fn, .arg = arg }; - sub_thread = kthread_create(do_work_for_cpu, &wfc, "work_for_cpu"); - if (IS_ERR(sub_thread)) - return PTR_ERR(sub_thread); - kthread_bind(sub_thread, cpu); - wake_up_process(sub_thread); - wait_for_completion(&wfc.completion); + INIT_WORK_ONSTACK(&wfc.work, work_for_cpu_fn); + schedule_work_on(cpu, &wfc.work); + flush_work(&wfc.work); return wfc.ret; } EXPORT_SYMBOL_GPL(work_on_cpu); diff --git a/trunk/lib/Kconfig.debug b/trunk/lib/Kconfig.debug index 2403a63b5da5..dacbbe4d7a80 100644 --- a/trunk/lib/Kconfig.debug +++ b/trunk/lib/Kconfig.debug @@ -629,6 +629,20 @@ config PROVE_RCU_REPEATEDLY Say N if you are unsure. +config PROVE_RCU_DELAY + bool "RCU debugging: preemptible RCU race provocation" + depends on DEBUG_KERNEL && PREEMPT_RCU + default n + help + There is a class of races that involve an unlikely preemption + of __rcu_read_unlock() just after ->rcu_read_lock_nesting has + been set to INT_MIN. This feature inserts a delay at that + point to increase the probability of these races. + + Say Y to increase probability of preemption of __rcu_read_unlock(). + + Say N if you are unsure. + config SPARSE_RCU_POINTER bool "RCU debugging: sparse-based checks for pointer usage" default n diff --git a/trunk/lib/digsig.c b/trunk/lib/digsig.c index 286d558033e2..8c0e62975c88 100644 --- a/trunk/lib/digsig.c +++ b/trunk/lib/digsig.c @@ -163,9 +163,11 @@ static int digsig_verify_rsa(struct key *key, memcpy(out1 + head, p, l); err = pkcs_1_v1_5_decode_emsa(out1, len, mblen, out2, &len); + if (err) + goto err; - if (!err && len == hlen) - err = memcmp(out2, h, hlen); + if (len != hlen || memcmp(out2, h, hlen)) + err = -EINVAL; err: mpi_free(in); diff --git a/trunk/lib/flex_proportions.c b/trunk/lib/flex_proportions.c index c785554f9523..ebf3bac460b0 100644 --- a/trunk/lib/flex_proportions.c +++ b/trunk/lib/flex_proportions.c @@ -62,7 +62,7 @@ void fprop_global_destroy(struct fprop_global *p) */ bool fprop_new_period(struct fprop_global *p, int periods) { - u64 events; + s64 events; unsigned long flags; local_irq_save(flags); diff --git a/trunk/mm/backing-dev.c b/trunk/mm/backing-dev.c index 6b4718e2ee34..b41823cc05e6 100644 --- a/trunk/mm/backing-dev.c +++ b/trunk/mm/backing-dev.c @@ -39,12 +39,6 @@ DEFINE_SPINLOCK(bdi_lock); LIST_HEAD(bdi_list); LIST_HEAD(bdi_pending_list); -static struct task_struct *sync_supers_tsk; -static struct timer_list sync_supers_timer; - -static int bdi_sync_supers(void *); -static void sync_supers_timer_fn(unsigned long); - void bdi_lock_two(struct bdi_writeback *wb1, struct bdi_writeback *wb2) { if (wb1 < wb2) { @@ -250,12 +244,6 @@ static int __init default_bdi_init(void) { int err; - sync_supers_tsk = kthread_run(bdi_sync_supers, NULL, "sync_supers"); - BUG_ON(IS_ERR(sync_supers_tsk)); - - setup_timer(&sync_supers_timer, sync_supers_timer_fn, 0); - bdi_arm_supers_timer(); - err = bdi_init(&default_backing_dev_info); if (!err) bdi_register(&default_backing_dev_info, NULL, "default"); @@ -270,46 +258,6 @@ int bdi_has_dirty_io(struct backing_dev_info *bdi) return wb_has_dirty_io(&bdi->wb); } -/* - * kupdated() used to do this. We cannot do it from the bdi_forker_thread() - * or we risk deadlocking on ->s_umount. The longer term solution would be - * to implement sync_supers_bdi() or similar and simply do it from the - * bdi writeback thread individually. - */ -static int bdi_sync_supers(void *unused) -{ - set_user_nice(current, 0); - - while (!kthread_should_stop()) { - set_current_state(TASK_INTERRUPTIBLE); - schedule(); - - /* - * Do this periodically, like kupdated() did before. - */ - sync_supers(); - } - - return 0; -} - -void bdi_arm_supers_timer(void) -{ - unsigned long next; - - if (!dirty_writeback_interval) - return; - - next = msecs_to_jiffies(dirty_writeback_interval * 10) + jiffies; - mod_timer(&sync_supers_timer, round_jiffies_up(next)); -} - -static void sync_supers_timer_fn(unsigned long unused) -{ - wake_up_process(sync_supers_tsk); - bdi_arm_supers_timer(); -} - static void wakeup_timer_fn(unsigned long data) { struct backing_dev_info *bdi = (struct backing_dev_info *)data; diff --git a/trunk/mm/bootmem.c b/trunk/mm/bootmem.c index bcb63ac48cc5..f468185b3b28 100644 --- a/trunk/mm/bootmem.c +++ b/trunk/mm/bootmem.c @@ -419,7 +419,7 @@ int __init reserve_bootmem_node(pg_data_t *pgdat, unsigned long physaddr, } /** - * reserve_bootmem - mark a page range as usable + * reserve_bootmem - mark a page range as reserved * @addr: starting address of the range * @size: size of the range in bytes * @flags: reservation flags (see linux/bootmem.h) diff --git a/trunk/mm/compaction.c b/trunk/mm/compaction.c index e78cb9688421..7fcd3a52e68d 100644 --- a/trunk/mm/compaction.c +++ b/trunk/mm/compaction.c @@ -50,6 +50,47 @@ static inline bool migrate_async_suitable(int migratetype) return is_migrate_cma(migratetype) || migratetype == MIGRATE_MOVABLE; } +/* + * Compaction requires the taking of some coarse locks that are potentially + * very heavily contended. Check if the process needs to be scheduled or + * if the lock is contended. For async compaction, back out in the event + * if contention is severe. For sync compaction, schedule. + * + * Returns true if the lock is held. + * Returns false if the lock is released and compaction should abort + */ +static bool compact_checklock_irqsave(spinlock_t *lock, unsigned long *flags, + bool locked, struct compact_control *cc) +{ + if (need_resched() || spin_is_contended(lock)) { + if (locked) { + spin_unlock_irqrestore(lock, *flags); + locked = false; + } + + /* async aborts if taking too long or contended */ + if (!cc->sync) { + if (cc->contended) + *cc->contended = true; + return false; + } + + cond_resched(); + if (fatal_signal_pending(current)) + return false; + } + + if (!locked) + spin_lock_irqsave(lock, *flags); + return true; +} + +static inline bool compact_trylock_irqsave(spinlock_t *lock, + unsigned long *flags, struct compact_control *cc) +{ + return compact_checklock_irqsave(lock, flags, false, cc); +} + /* * Isolate free pages onto a private freelist. Caller must hold zone->lock. * If @strict is true, will abort returning 0 on any invalid PFNs or non-free @@ -173,7 +214,7 @@ isolate_freepages_range(unsigned long start_pfn, unsigned long end_pfn) } /* Update the number of anon and file isolated pages in the zone */ -static void acct_isolated(struct zone *zone, struct compact_control *cc) +static void acct_isolated(struct zone *zone, bool locked, struct compact_control *cc) { struct page *page; unsigned int count[2] = { 0, }; @@ -181,8 +222,14 @@ static void acct_isolated(struct zone *zone, struct compact_control *cc) list_for_each_entry(page, &cc->migratepages, lru) count[!!page_is_file_cache(page)]++; - __mod_zone_page_state(zone, NR_ISOLATED_ANON, count[0]); - __mod_zone_page_state(zone, NR_ISOLATED_FILE, count[1]); + /* If locked we can use the interrupt unsafe versions */ + if (locked) { + __mod_zone_page_state(zone, NR_ISOLATED_ANON, count[0]); + __mod_zone_page_state(zone, NR_ISOLATED_FILE, count[1]); + } else { + mod_zone_page_state(zone, NR_ISOLATED_ANON, count[0]); + mod_zone_page_state(zone, NR_ISOLATED_FILE, count[1]); + } } /* Similar to reclaim, but different enough that they don't share logic */ @@ -228,6 +275,8 @@ isolate_migratepages_range(struct zone *zone, struct compact_control *cc, struct list_head *migratelist = &cc->migratepages; isolate_mode_t mode = 0; struct lruvec *lruvec; + unsigned long flags; + bool locked; /* * Ensure that there are not too many pages isolated from the LRU @@ -247,25 +296,22 @@ isolate_migratepages_range(struct zone *zone, struct compact_control *cc, /* Time to isolate some pages for migration */ cond_resched(); - spin_lock_irq(&zone->lru_lock); + spin_lock_irqsave(&zone->lru_lock, flags); + locked = true; for (; low_pfn < end_pfn; low_pfn++) { struct page *page; - bool locked = true; /* give a chance to irqs before checking need_resched() */ if (!((low_pfn+1) % SWAP_CLUSTER_MAX)) { - spin_unlock_irq(&zone->lru_lock); + spin_unlock_irqrestore(&zone->lru_lock, flags); locked = false; } - if (need_resched() || spin_is_contended(&zone->lru_lock)) { - if (locked) - spin_unlock_irq(&zone->lru_lock); - cond_resched(); - spin_lock_irq(&zone->lru_lock); - if (fatal_signal_pending(current)) - break; - } else if (!locked) - spin_lock_irq(&zone->lru_lock); + + /* Check if it is ok to still hold the lock */ + locked = compact_checklock_irqsave(&zone->lru_lock, &flags, + locked, cc); + if (!locked) + break; /* * migrate_pfn does not necessarily start aligned to a @@ -349,9 +395,10 @@ isolate_migratepages_range(struct zone *zone, struct compact_control *cc, } } - acct_isolated(zone, cc); + acct_isolated(zone, locked, cc); - spin_unlock_irq(&zone->lru_lock); + if (locked) + spin_unlock_irqrestore(&zone->lru_lock, flags); trace_mm_compaction_isolate_migratepages(nr_scanned, nr_isolated); @@ -383,6 +430,20 @@ static bool suitable_migration_target(struct page *page) return false; } +/* + * Returns the start pfn of the last page block in a zone. This is the starting + * point for full compaction of a zone. Compaction searches for free pages from + * the end of each zone, while isolate_freepages_block scans forward inside each + * page block. + */ +static unsigned long start_free_pfn(struct zone *zone) +{ + unsigned long free_pfn; + free_pfn = zone->zone_start_pfn + zone->spanned_pages; + free_pfn &= ~(pageblock_nr_pages-1); + return free_pfn; +} + /* * Based on information in the current compact_control, find blocks * suitable for isolating free pages from and then isolate them. @@ -422,17 +483,6 @@ static void isolate_freepages(struct zone *zone, pfn -= pageblock_nr_pages) { unsigned long isolated; - /* - * Skip ahead if another thread is compacting in the area - * simultaneously. If we wrapped around, we can only skip - * ahead if zone->compact_cached_free_pfn also wrapped to - * above our starting point. - */ - if (cc->order > 0 && (!cc->wrapped || - zone->compact_cached_free_pfn > - cc->start_free_pfn)) - pfn = min(pfn, zone->compact_cached_free_pfn); - if (!pfn_valid(pfn)) continue; @@ -458,7 +508,16 @@ static void isolate_freepages(struct zone *zone, * are disabled */ isolated = 0; - spin_lock_irqsave(&zone->lock, flags); + + /* + * The zone lock must be held to isolate freepages. This + * unfortunately this is a very coarse lock and can be + * heavily contended if there are parallel allocations + * or parallel compactions. For async compaction do not + * spin on the lock + */ + if (!compact_trylock_irqsave(&zone->lock, &flags, cc)) + break; if (suitable_migration_target(page)) { end_pfn = min(pfn + pageblock_nr_pages, zone_end_pfn); isolated = isolate_freepages_block(pfn, end_pfn, @@ -474,7 +533,15 @@ static void isolate_freepages(struct zone *zone, */ if (isolated) { high_pfn = max(high_pfn, pfn); - if (cc->order > 0) + + /* + * If the free scanner has wrapped, update + * compact_cached_free_pfn to point to the highest + * pageblock with free pages. This reduces excessive + * scanning of full pageblocks near the end of the + * zone + */ + if (cc->order > 0 && cc->wrapped) zone->compact_cached_free_pfn = high_pfn; } } @@ -484,6 +551,11 @@ static void isolate_freepages(struct zone *zone, cc->free_pfn = high_pfn; cc->nr_freepages = nr_freepages; + + /* If compact_cached_free_pfn is reset then set it now */ + if (cc->order > 0 && !cc->wrapped && + zone->compact_cached_free_pfn == start_free_pfn(zone)) + zone->compact_cached_free_pfn = high_pfn; } /* @@ -570,20 +642,6 @@ static isolate_migrate_t isolate_migratepages(struct zone *zone, return ISOLATE_SUCCESS; } -/* - * Returns the start pfn of the last page block in a zone. This is the starting - * point for full compaction of a zone. Compaction searches for free pages from - * the end of each zone, while isolate_freepages_block scans forward inside each - * page block. - */ -static unsigned long start_free_pfn(struct zone *zone) -{ - unsigned long free_pfn; - free_pfn = zone->zone_start_pfn + zone->spanned_pages; - free_pfn &= ~(pageblock_nr_pages-1); - return free_pfn; -} - static int compact_finished(struct zone *zone, struct compact_control *cc) { @@ -771,7 +829,7 @@ static int compact_zone(struct zone *zone, struct compact_control *cc) static unsigned long compact_zone_order(struct zone *zone, int order, gfp_t gfp_mask, - bool sync) + bool sync, bool *contended) { struct compact_control cc = { .nr_freepages = 0, @@ -780,6 +838,7 @@ static unsigned long compact_zone_order(struct zone *zone, .migratetype = allocflags_to_migratetype(gfp_mask), .zone = zone, .sync = sync, + .contended = contended, }; INIT_LIST_HEAD(&cc.freepages); INIT_LIST_HEAD(&cc.migratepages); @@ -801,7 +860,7 @@ int sysctl_extfrag_threshold = 500; */ unsigned long try_to_compact_pages(struct zonelist *zonelist, int order, gfp_t gfp_mask, nodemask_t *nodemask, - bool sync) + bool sync, bool *contended) { enum zone_type high_zoneidx = gfp_zone(gfp_mask); int may_enter_fs = gfp_mask & __GFP_FS; @@ -825,7 +884,8 @@ unsigned long try_to_compact_pages(struct zonelist *zonelist, nodemask) { int status; - status = compact_zone_order(zone, order, gfp_mask, sync); + status = compact_zone_order(zone, order, gfp_mask, sync, + contended); rc = max(status, rc); /* If a normal allocation would succeed, stop compacting */ @@ -861,7 +921,7 @@ static int __compact_pgdat(pg_data_t *pgdat, struct compact_control *cc) if (cc->order > 0) { int ok = zone_watermark_ok(zone, cc->order, low_wmark_pages(zone), 0, 0); - if (ok && cc->order > zone->compact_order_failed) + if (ok && cc->order >= zone->compact_order_failed) zone->compact_order_failed = cc->order + 1; /* Currently async compaction is never deferred. */ else if (!ok && cc->sync) diff --git a/trunk/mm/filemap.c b/trunk/mm/filemap.c index fa5ca304148e..384344575c37 100644 --- a/trunk/mm/filemap.c +++ b/trunk/mm/filemap.c @@ -1412,12 +1412,8 @@ generic_file_aio_read(struct kiocb *iocb, const struct iovec *iov, retval = filemap_write_and_wait_range(mapping, pos, pos + iov_length(iov, nr_segs) - 1); if (!retval) { - struct blk_plug plug; - - blk_start_plug(&plug); retval = mapping->a_ops->direct_IO(READ, iocb, iov, pos, nr_segs); - blk_finish_plug(&plug); } if (retval > 0) { *ppos = pos + retval; @@ -2527,14 +2523,12 @@ ssize_t generic_file_aio_write(struct kiocb *iocb, const struct iovec *iov, { struct file *file = iocb->ki_filp; struct inode *inode = file->f_mapping->host; - struct blk_plug plug; ssize_t ret; BUG_ON(iocb->ki_pos != pos); sb_start_write(inode->i_sb); mutex_lock(&inode->i_mutex); - blk_start_plug(&plug); ret = __generic_file_aio_write(iocb, iov, nr_segs, &iocb->ki_pos); mutex_unlock(&inode->i_mutex); @@ -2545,7 +2539,6 @@ ssize_t generic_file_aio_write(struct kiocb *iocb, const struct iovec *iov, if (err < 0 && ret > 0) ret = err; } - blk_finish_plug(&plug); sb_end_write(inode->i_sb); return ret; } diff --git a/trunk/mm/huge_memory.c b/trunk/mm/huge_memory.c index 57c4b9309015..141dbb695097 100644 --- a/trunk/mm/huge_memory.c +++ b/trunk/mm/huge_memory.c @@ -1811,7 +1811,6 @@ static void __collapse_huge_page_copy(pte_t *pte, struct page *page, src_page = pte_page(pteval); copy_user_highpage(page, src_page, address, vma); VM_BUG_ON(page_mapcount(src_page) != 1); - VM_BUG_ON(page_count(src_page) != 2); release_pte_page(src_page); /* * ptl mostly unnecessary, but preempt has to diff --git a/trunk/mm/internal.h b/trunk/mm/internal.h index 3314f79d775a..b8c91b342e24 100644 --- a/trunk/mm/internal.h +++ b/trunk/mm/internal.h @@ -130,6 +130,7 @@ struct compact_control { int order; /* order a direct compactor needs */ int migratetype; /* MOVABLE, RECLAIMABLE etc */ struct zone *zone; + bool *contended; /* True if a lock was contended */ }; unsigned long diff --git a/trunk/mm/kmemleak.c b/trunk/mm/kmemleak.c index 45eb6217bf38..0de83b4541e9 100644 --- a/trunk/mm/kmemleak.c +++ b/trunk/mm/kmemleak.c @@ -1483,13 +1483,11 @@ static void *kmemleak_seq_next(struct seq_file *seq, void *v, loff_t *pos) { struct kmemleak_object *prev_obj = v; struct kmemleak_object *next_obj = NULL; - struct list_head *n = &prev_obj->object_list; + struct kmemleak_object *obj = prev_obj; ++(*pos); - list_for_each_continue_rcu(n, &object_list) { - struct kmemleak_object *obj = - list_entry(n, struct kmemleak_object, object_list); + list_for_each_entry_continue_rcu(obj, &object_list, object_list) { if (get_object(obj)) { next_obj = obj; break; diff --git a/trunk/mm/memblock.c b/trunk/mm/memblock.c index 4d9393c7edc9..82aa349d2f7a 100644 --- a/trunk/mm/memblock.c +++ b/trunk/mm/memblock.c @@ -246,7 +246,7 @@ static int __init_memblock memblock_double_array(struct memblock_type *type, min(new_area_start, memblock.current_limit), new_alloc_size, PAGE_SIZE); - new_array = addr ? __va(addr) : 0; + new_array = addr ? __va(addr) : NULL; } if (!addr) { pr_err("memblock: Failed to double %s array from %ld to %ld entries !\n", diff --git a/trunk/mm/memory_hotplug.c b/trunk/mm/memory_hotplug.c index 3ad25f9d1fc1..6a5b90d0cfd7 100644 --- a/trunk/mm/memory_hotplug.c +++ b/trunk/mm/memory_hotplug.c @@ -126,9 +126,6 @@ static void register_page_bootmem_info_section(unsigned long start_pfn) struct mem_section *ms; struct page *page, *memmap; - if (!pfn_valid(start_pfn)) - return; - section_nr = pfn_to_section_nr(start_pfn); ms = __nr_to_section(section_nr); @@ -187,9 +184,16 @@ void register_page_bootmem_info_node(struct pglist_data *pgdat) end_pfn = pfn + pgdat->node_spanned_pages; /* register_section info */ - for (; pfn < end_pfn; pfn += PAGES_PER_SECTION) - register_page_bootmem_info_section(pfn); - + for (; pfn < end_pfn; pfn += PAGES_PER_SECTION) { + /* + * Some platforms can assign the same pfn to multiple nodes - on + * node0 as well as nodeN. To avoid registering a pfn against + * multiple nodes we check that this pfn does not already + * reside in some other node. + */ + if (pfn_valid(pfn) && (pfn_to_nid(pfn) == node)) + register_page_bootmem_info_section(pfn); + } } #endif /* !CONFIG_SPARSEMEM_VMEMMAP */ diff --git a/trunk/mm/mempolicy.c b/trunk/mm/mempolicy.c index bd92431d4c49..4ada3be6e252 100644 --- a/trunk/mm/mempolicy.c +++ b/trunk/mm/mempolicy.c @@ -2562,7 +2562,7 @@ int mpol_to_str(char *buffer, int maxlen, struct mempolicy *pol, int no_context) break; default: - BUG(); + return -EINVAL; } l = strlen(policy_modes[mode]); diff --git a/trunk/mm/mmap.c b/trunk/mm/mmap.c index e3e86914f11a..ae18a48e7e4e 100644 --- a/trunk/mm/mmap.c +++ b/trunk/mm/mmap.c @@ -1356,9 +1356,8 @@ unsigned long mmap_region(struct file *file, unsigned long addr, } else if ((flags & MAP_POPULATE) && !(flags & MAP_NONBLOCK)) make_pages_present(addr, addr + len); - if (file && uprobe_mmap(vma)) - /* matching probes but cannot insert */ - goto unmap_and_free_vma; + if (file) + uprobe_mmap(vma); return addr; @@ -2309,7 +2308,7 @@ void exit_mmap(struct mm_struct *mm) } vm_unacct_memory(nr_accounted); - BUG_ON(mm->nr_ptes > (FIRST_USER_ADDRESS+PMD_SIZE-1)>>PMD_SHIFT); + WARN_ON(mm->nr_ptes > (FIRST_USER_ADDRESS+PMD_SIZE-1)>>PMD_SHIFT); } /* Insert vm structure into process list sorted by address diff --git a/trunk/mm/page-writeback.c b/trunk/mm/page-writeback.c index e5363f34e025..5ad5ce23c1e0 100644 --- a/trunk/mm/page-writeback.c +++ b/trunk/mm/page-writeback.c @@ -1532,7 +1532,6 @@ int dirty_writeback_centisecs_handler(ctl_table *table, int write, void __user *buffer, size_t *length, loff_t *ppos) { proc_dointvec(table, write, buffer, length, ppos); - bdi_arm_supers_timer(); return 0; } diff --git a/trunk/mm/page_alloc.c b/trunk/mm/page_alloc.c index 009ac285fea7..c13ea7538891 100644 --- a/trunk/mm/page_alloc.c +++ b/trunk/mm/page_alloc.c @@ -584,7 +584,7 @@ static inline void __free_one_page(struct page *page, combined_idx = buddy_idx & page_idx; higher_page = page + (combined_idx - page_idx); buddy_idx = __find_buddy_index(combined_idx, order + 1); - higher_buddy = page + (buddy_idx - combined_idx); + higher_buddy = higher_page + (buddy_idx - combined_idx); if (page_is_buddy(higher_page, higher_buddy, order + 1)) { list_add_tail(&page->lru, &zone->free_area[order].free_list[migratetype]); @@ -1928,6 +1928,17 @@ get_page_from_freelist(gfp_t gfp_mask, nodemask_t *nodemask, unsigned int order, zlc_active = 0; goto zonelist_scan; } + + if (page) + /* + * page->pfmemalloc is set when ALLOC_NO_WATERMARKS was + * necessary to allocate the page. The expectation is + * that the caller is taking steps that will free more + * memory. The caller should avoid the page being used + * for !PFMEMALLOC purposes. + */ + page->pfmemalloc = !!(alloc_flags & ALLOC_NO_WATERMARKS); + return page; } @@ -2091,7 +2102,7 @@ __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order, struct zonelist *zonelist, enum zone_type high_zoneidx, nodemask_t *nodemask, int alloc_flags, struct zone *preferred_zone, int migratetype, bool sync_migration, - bool *deferred_compaction, + bool *contended_compaction, bool *deferred_compaction, unsigned long *did_some_progress) { struct page *page; @@ -2106,7 +2117,8 @@ __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order, current->flags |= PF_MEMALLOC; *did_some_progress = try_to_compact_pages(zonelist, order, gfp_mask, - nodemask, sync_migration); + nodemask, sync_migration, + contended_compaction); current->flags &= ~PF_MEMALLOC; if (*did_some_progress != COMPACT_SKIPPED) { @@ -2152,7 +2164,7 @@ __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order, struct zonelist *zonelist, enum zone_type high_zoneidx, nodemask_t *nodemask, int alloc_flags, struct zone *preferred_zone, int migratetype, bool sync_migration, - bool *deferred_compaction, + bool *contended_compaction, bool *deferred_compaction, unsigned long *did_some_progress) { return NULL; @@ -2325,6 +2337,7 @@ __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order, unsigned long did_some_progress; bool sync_migration = false; bool deferred_compaction = false; + bool contended_compaction = false; /* * In the slowpath, we sanity check order to avoid ever trying to @@ -2389,14 +2402,6 @@ __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order, zonelist, high_zoneidx, nodemask, preferred_zone, migratetype); if (page) { - /* - * page->pfmemalloc is set when ALLOC_NO_WATERMARKS was - * necessary to allocate the page. The expectation is - * that the caller is taking steps that will free more - * memory. The caller should avoid the page being used - * for !PFMEMALLOC purposes. - */ - page->pfmemalloc = true; goto got_pg; } } @@ -2422,6 +2427,7 @@ __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order, nodemask, alloc_flags, preferred_zone, migratetype, sync_migration, + &contended_compaction, &deferred_compaction, &did_some_progress); if (page) @@ -2431,10 +2437,11 @@ __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order, /* * If compaction is deferred for high-order allocations, it is because * sync compaction recently failed. In this is the case and the caller - * has requested the system not be heavily disrupted, fail the - * allocation now instead of entering direct reclaim + * requested a movable allocation that does not heavily disrupt the + * system then fail the allocation instead of entering direct reclaim. */ - if (deferred_compaction && (gfp_mask & __GFP_NO_KSWAPD)) + if ((deferred_compaction || contended_compaction) && + (gfp_mask & __GFP_NO_KSWAPD)) goto nopage; /* Try direct reclaim and then allocating */ @@ -2505,6 +2512,7 @@ __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order, nodemask, alloc_flags, preferred_zone, migratetype, sync_migration, + &contended_compaction, &deferred_compaction, &did_some_progress); if (page) @@ -2569,8 +2577,6 @@ __alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order, page = __alloc_pages_slowpath(gfp_mask, order, zonelist, high_zoneidx, nodemask, preferred_zone, migratetype); - else - page->pfmemalloc = false; trace_mm_page_alloc(page, order, gfp_mask, migratetype); diff --git a/trunk/mm/slab.c b/trunk/mm/slab.c index f8b0d539b482..c6854759bcf1 100644 --- a/trunk/mm/slab.c +++ b/trunk/mm/slab.c @@ -983,7 +983,7 @@ static void *__ac_get_obj(struct kmem_cache *cachep, struct array_cache *ac, } /* The caller cannot use PFMEMALLOC objects, find another one */ - for (i = 1; i < ac->avail; i++) { + for (i = 0; i < ac->avail; i++) { /* If a !PFMEMALLOC object is found, swap them */ if (!is_obj_pfmemalloc(ac->entry[i])) { objp = ac->entry[i]; @@ -1000,7 +1000,7 @@ static void *__ac_get_obj(struct kmem_cache *cachep, struct array_cache *ac, l3 = cachep->nodelists[numa_mem_id()]; if (!list_empty(&l3->slabs_free) && force_refill) { struct slab *slabp = virt_to_slab(objp); - ClearPageSlabPfmemalloc(virt_to_page(slabp->s_mem)); + ClearPageSlabPfmemalloc(virt_to_head_page(slabp->s_mem)); clear_obj_pfmemalloc(&objp); recheck_pfmemalloc_active(cachep, ac); return objp; @@ -1032,7 +1032,7 @@ static void *__ac_put_obj(struct kmem_cache *cachep, struct array_cache *ac, { if (unlikely(pfmemalloc_active)) { /* Some pfmemalloc slabs exist, check if this is one */ - struct page *page = virt_to_page(objp); + struct page *page = virt_to_head_page(objp); if (PageSlabPfmemalloc(page)) set_obj_pfmemalloc(&objp); } @@ -3260,6 +3260,7 @@ static void *cache_alloc_refill(struct kmem_cache *cachep, gfp_t flags, /* cache_grow can reenable interrupts, then ac could change. */ ac = cpu_cache_get(cachep); + node = numa_mem_id(); /* no objects in sight? abort */ if (!x && (ac->avail == 0 || force_refill)) diff --git a/trunk/mm/slub.c b/trunk/mm/slub.c index 8f78e2577031..2fdd96f9e998 100644 --- a/trunk/mm/slub.c +++ b/trunk/mm/slub.c @@ -1524,12 +1524,13 @@ static inline void *acquire_slab(struct kmem_cache *s, } static int put_cpu_partial(struct kmem_cache *s, struct page *page, int drain); +static inline bool pfmemalloc_match(struct page *page, gfp_t gfpflags); /* * Try to allocate a partial slab from a specific node. */ -static void *get_partial_node(struct kmem_cache *s, - struct kmem_cache_node *n, struct kmem_cache_cpu *c) +static void *get_partial_node(struct kmem_cache *s, struct kmem_cache_node *n, + struct kmem_cache_cpu *c, gfp_t flags) { struct page *page, *page2; void *object = NULL; @@ -1545,9 +1546,13 @@ static void *get_partial_node(struct kmem_cache *s, spin_lock(&n->list_lock); list_for_each_entry_safe(page, page2, &n->partial, lru) { - void *t = acquire_slab(s, n, page, object == NULL); + void *t; int available; + if (!pfmemalloc_match(page, flags)) + continue; + + t = acquire_slab(s, n, page, object == NULL); if (!t) break; @@ -1614,7 +1619,7 @@ static void *get_any_partial(struct kmem_cache *s, gfp_t flags, if (n && cpuset_zone_allowed_hardwall(zone, flags) && n->nr_partial > s->min_partial) { - object = get_partial_node(s, n, c); + object = get_partial_node(s, n, c, flags); if (object) { /* * Return the object even if @@ -1643,7 +1648,7 @@ static void *get_partial(struct kmem_cache *s, gfp_t flags, int node, void *object; int searchnode = (node == NUMA_NO_NODE) ? numa_node_id() : node; - object = get_partial_node(s, get_node(s, searchnode), c); + object = get_partial_node(s, get_node(s, searchnode), c, flags); if (object || node != NUMA_NO_NODE) return object; diff --git a/trunk/mm/vmscan.c b/trunk/mm/vmscan.c index 8d01243d9560..99b434b674c0 100644 --- a/trunk/mm/vmscan.c +++ b/trunk/mm/vmscan.c @@ -3102,6 +3102,7 @@ int kswapd_run(int nid) /* failure at boot is fatal */ BUG_ON(system_state == SYSTEM_BOOTING); printk("Failed to start kswapd on node %d\n",nid); + pgdat->kswapd = NULL; ret = -1; } return ret; diff --git a/trunk/net/8021q/vlan_dev.c b/trunk/net/8021q/vlan_dev.c index 73a2a83ee2da..402442402af7 100644 --- a/trunk/net/8021q/vlan_dev.c +++ b/trunk/net/8021q/vlan_dev.c @@ -137,9 +137,21 @@ static int vlan_dev_hard_header(struct sk_buff *skb, struct net_device *dev, return rc; } +static inline netdev_tx_t vlan_netpoll_send_skb(struct vlan_dev_priv *vlan, struct sk_buff *skb) +{ +#ifdef CONFIG_NET_POLL_CONTROLLER + if (vlan->netpoll) + netpoll_send_skb(vlan->netpoll, skb); +#else + BUG(); +#endif + return NETDEV_TX_OK; +} + static netdev_tx_t vlan_dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev) { + struct vlan_dev_priv *vlan = vlan_dev_priv(dev); struct vlan_ethhdr *veth = (struct vlan_ethhdr *)(skb->data); unsigned int len; int ret; @@ -150,29 +162,30 @@ static netdev_tx_t vlan_dev_hard_start_xmit(struct sk_buff *skb, * OTHER THINGS LIKE FDDI/TokenRing/802.3 SNAPs... */ if (veth->h_vlan_proto != htons(ETH_P_8021Q) || - vlan_dev_priv(dev)->flags & VLAN_FLAG_REORDER_HDR) { + vlan->flags & VLAN_FLAG_REORDER_HDR) { u16 vlan_tci; - vlan_tci = vlan_dev_priv(dev)->vlan_id; + vlan_tci = vlan->vlan_id; vlan_tci |= vlan_dev_get_egress_qos_mask(dev, skb); skb = __vlan_hwaccel_put_tag(skb, vlan_tci); } - skb->dev = vlan_dev_priv(dev)->real_dev; + skb->dev = vlan->real_dev; len = skb->len; - if (netpoll_tx_running(dev)) - return skb->dev->netdev_ops->ndo_start_xmit(skb, skb->dev); + if (unlikely(netpoll_tx_running(dev))) + return vlan_netpoll_send_skb(vlan, skb); + ret = dev_queue_xmit(skb); if (likely(ret == NET_XMIT_SUCCESS || ret == NET_XMIT_CN)) { struct vlan_pcpu_stats *stats; - stats = this_cpu_ptr(vlan_dev_priv(dev)->vlan_pcpu_stats); + stats = this_cpu_ptr(vlan->vlan_pcpu_stats); u64_stats_update_begin(&stats->syncp); stats->tx_packets++; stats->tx_bytes += len; u64_stats_update_end(&stats->syncp); } else { - this_cpu_inc(vlan_dev_priv(dev)->vlan_pcpu_stats->tx_dropped); + this_cpu_inc(vlan->vlan_pcpu_stats->tx_dropped); } return ret; @@ -669,25 +682,26 @@ static void vlan_dev_poll_controller(struct net_device *dev) return; } -static int vlan_dev_netpoll_setup(struct net_device *dev, struct netpoll_info *npinfo) +static int vlan_dev_netpoll_setup(struct net_device *dev, struct netpoll_info *npinfo, + gfp_t gfp) { - struct vlan_dev_priv *info = vlan_dev_priv(dev); - struct net_device *real_dev = info->real_dev; + struct vlan_dev_priv *vlan = vlan_dev_priv(dev); + struct net_device *real_dev = vlan->real_dev; struct netpoll *netpoll; int err = 0; - netpoll = kzalloc(sizeof(*netpoll), GFP_KERNEL); + netpoll = kzalloc(sizeof(*netpoll), gfp); err = -ENOMEM; if (!netpoll) goto out; - err = __netpoll_setup(netpoll, real_dev); + err = __netpoll_setup(netpoll, real_dev, gfp); if (err) { kfree(netpoll); goto out; } - info->netpoll = netpoll; + vlan->netpoll = netpoll; out: return err; @@ -695,19 +709,15 @@ static int vlan_dev_netpoll_setup(struct net_device *dev, struct netpoll_info *n static void vlan_dev_netpoll_cleanup(struct net_device *dev) { - struct vlan_dev_priv *info = vlan_dev_priv(dev); - struct netpoll *netpoll = info->netpoll; + struct vlan_dev_priv *vlan= vlan_dev_priv(dev); + struct netpoll *netpoll = vlan->netpoll; if (!netpoll) return; - info->netpoll = NULL; - - /* Wait for transmitting packets to finish before freeing. */ - synchronize_rcu_bh(); + vlan->netpoll = NULL; - __netpoll_cleanup(netpoll); - kfree(netpoll); + __netpoll_free_rcu(netpoll); } #endif /* CONFIG_NET_POLL_CONTROLLER */ diff --git a/trunk/net/8021q/vlanproc.c b/trunk/net/8021q/vlanproc.c index c718fd3664b6..4de77ea5fa37 100644 --- a/trunk/net/8021q/vlanproc.c +++ b/trunk/net/8021q/vlanproc.c @@ -105,7 +105,7 @@ static const struct file_operations vlandev_fops = { }; /* - * Proc filesystem derectory entries. + * Proc filesystem directory entries. */ /* Strings */ diff --git a/trunk/net/atm/common.c b/trunk/net/atm/common.c index b4b44dbed645..0c0ad930a632 100644 --- a/trunk/net/atm/common.c +++ b/trunk/net/atm/common.c @@ -812,6 +812,7 @@ int vcc_getsockopt(struct socket *sock, int level, int optname, if (!vcc->dev || !test_bit(ATM_VF_ADDR, &vcc->flags)) return -ENOTCONN; + memset(&pvc, 0, sizeof(pvc)); pvc.sap_family = AF_ATMPVC; pvc.sap_addr.itf = vcc->dev->number; pvc.sap_addr.vpi = vcc->vpi; diff --git a/trunk/net/atm/pvc.c b/trunk/net/atm/pvc.c index 3a734919c36c..ae0324021407 100644 --- a/trunk/net/atm/pvc.c +++ b/trunk/net/atm/pvc.c @@ -95,6 +95,7 @@ static int pvc_getname(struct socket *sock, struct sockaddr *sockaddr, return -ENOTCONN; *sockaddr_len = sizeof(struct sockaddr_atmpvc); addr = (struct sockaddr_atmpvc *)sockaddr; + memset(addr, 0, sizeof(*addr)); addr->sap_family = AF_ATMPVC; addr->sap_addr.itf = vcc->dev->number; addr->sap_addr.vpi = vcc->vpi; diff --git a/trunk/net/batman-adv/bat_iv_ogm.c b/trunk/net/batman-adv/bat_iv_ogm.c index e877af8bdd1e..469daabd90c7 100644 --- a/trunk/net/batman-adv/bat_iv_ogm.c +++ b/trunk/net/batman-adv/bat_iv_ogm.c @@ -642,7 +642,8 @@ batadv_iv_ogm_orig_update(struct batadv_priv *bat_priv, struct batadv_neigh_node *router = NULL; struct batadv_orig_node *orig_node_tmp; struct hlist_node *node; - uint8_t bcast_own_sum_orig, bcast_own_sum_neigh; + int if_num; + uint8_t sum_orig, sum_neigh; uint8_t *neigh_addr; batadv_dbg(BATADV_DBG_BATMAN, bat_priv, @@ -727,17 +728,17 @@ batadv_iv_ogm_orig_update(struct batadv_priv *bat_priv, if (router && (neigh_node->tq_avg == router->tq_avg)) { orig_node_tmp = router->orig_node; spin_lock_bh(&orig_node_tmp->ogm_cnt_lock); - bcast_own_sum_orig = - orig_node_tmp->bcast_own_sum[if_incoming->if_num]; + if_num = router->if_incoming->if_num; + sum_orig = orig_node_tmp->bcast_own_sum[if_num]; spin_unlock_bh(&orig_node_tmp->ogm_cnt_lock); orig_node_tmp = neigh_node->orig_node; spin_lock_bh(&orig_node_tmp->ogm_cnt_lock); - bcast_own_sum_neigh = - orig_node_tmp->bcast_own_sum[if_incoming->if_num]; + if_num = neigh_node->if_incoming->if_num; + sum_neigh = orig_node_tmp->bcast_own_sum[if_num]; spin_unlock_bh(&orig_node_tmp->ogm_cnt_lock); - if (bcast_own_sum_orig >= bcast_own_sum_neigh) + if (sum_orig >= sum_neigh) goto update_tt; } diff --git a/trunk/net/batman-adv/bitarray.h b/trunk/net/batman-adv/bitarray.h index a081ce1c0514..cebaae7e148b 100644 --- a/trunk/net/batman-adv/bitarray.h +++ b/trunk/net/batman-adv/bitarray.h @@ -20,8 +20,8 @@ #ifndef _NET_BATMAN_ADV_BITARRAY_H_ #define _NET_BATMAN_ADV_BITARRAY_H_ -/* returns true if the corresponding bit in the given seq_bits indicates true - * and curr_seqno is within range of last_seqno +/* Returns 1 if the corresponding bit in the given seq_bits indicates true + * and curr_seqno is within range of last_seqno. Otherwise returns 0. */ static inline int batadv_test_bit(const unsigned long *seq_bits, uint32_t last_seqno, uint32_t curr_seqno) @@ -32,7 +32,7 @@ static inline int batadv_test_bit(const unsigned long *seq_bits, if (diff < 0 || diff >= BATADV_TQ_LOCAL_WINDOW_SIZE) return 0; else - return test_bit(diff, seq_bits); + return test_bit(diff, seq_bits) != 0; } /* turn corresponding bit on, so we can remember that we got the packet */ diff --git a/trunk/net/batman-adv/gateway_client.c b/trunk/net/batman-adv/gateway_client.c index b421cc49d2cd..fc866f2e4528 100644 --- a/trunk/net/batman-adv/gateway_client.c +++ b/trunk/net/batman-adv/gateway_client.c @@ -200,11 +200,11 @@ void batadv_gw_election(struct batadv_priv *bat_priv) if (atomic_read(&bat_priv->gw_mode) != BATADV_GW_MODE_CLIENT) goto out; - if (!batadv_atomic_dec_not_zero(&bat_priv->gw_reselect)) - goto out; - curr_gw = batadv_gw_get_selected_gw_node(bat_priv); + if (!batadv_atomic_dec_not_zero(&bat_priv->gw_reselect) && curr_gw) + goto out; + next_gw = batadv_gw_get_best_gw_node(bat_priv); if (curr_gw == next_gw) diff --git a/trunk/net/batman-adv/soft-interface.c b/trunk/net/batman-adv/soft-interface.c index 109ea2aae96c..21c53577c8d6 100644 --- a/trunk/net/batman-adv/soft-interface.c +++ b/trunk/net/batman-adv/soft-interface.c @@ -100,18 +100,21 @@ static int batadv_interface_set_mac_addr(struct net_device *dev, void *p) { struct batadv_priv *bat_priv = netdev_priv(dev); struct sockaddr *addr = p; + uint8_t old_addr[ETH_ALEN]; if (!is_valid_ether_addr(addr->sa_data)) return -EADDRNOTAVAIL; + memcpy(old_addr, dev->dev_addr, ETH_ALEN); + memcpy(dev->dev_addr, addr->sa_data, ETH_ALEN); + /* only modify transtable if it has been initialized before */ if (atomic_read(&bat_priv->mesh_state) == BATADV_MESH_ACTIVE) { - batadv_tt_local_remove(bat_priv, dev->dev_addr, + batadv_tt_local_remove(bat_priv, old_addr, "mac address changed", false); batadv_tt_local_add(dev, addr->sa_data, BATADV_NULL_IFINDEX); } - memcpy(dev->dev_addr, addr->sa_data, ETH_ALEN); dev->addr_assign_type &= ~NET_ADDR_RANDOM; return 0; } diff --git a/trunk/net/batman-adv/translation-table.c b/trunk/net/batman-adv/translation-table.c index a438f4b582fc..99dd8f75b3ff 100644 --- a/trunk/net/batman-adv/translation-table.c +++ b/trunk/net/batman-adv/translation-table.c @@ -197,6 +197,7 @@ static void batadv_tt_local_event(struct batadv_priv *bat_priv, del: list_del(&entry->list); kfree(entry); + kfree(tt_change_node); event_removed = true; goto unlock; } diff --git a/trunk/net/bluetooth/bnep/sock.c b/trunk/net/bluetooth/bnep/sock.c index 5e5f5b410e0b..1eaacf10d19d 100644 --- a/trunk/net/bluetooth/bnep/sock.c +++ b/trunk/net/bluetooth/bnep/sock.c @@ -58,7 +58,7 @@ static int bnep_sock_ioctl(struct socket *sock, unsigned int cmd, unsigned long switch (cmd) { case BNEPCONNADD: if (!capable(CAP_NET_ADMIN)) - return -EACCES; + return -EPERM; if (copy_from_user(&ca, argp, sizeof(ca))) return -EFAULT; @@ -84,7 +84,7 @@ static int bnep_sock_ioctl(struct socket *sock, unsigned int cmd, unsigned long case BNEPCONNDEL: if (!capable(CAP_NET_ADMIN)) - return -EACCES; + return -EPERM; if (copy_from_user(&cd, argp, sizeof(cd))) return -EFAULT; diff --git a/trunk/net/bluetooth/cmtp/sock.c b/trunk/net/bluetooth/cmtp/sock.c index 311668d14571..32dc83dcb6b2 100644 --- a/trunk/net/bluetooth/cmtp/sock.c +++ b/trunk/net/bluetooth/cmtp/sock.c @@ -72,7 +72,7 @@ static int cmtp_sock_ioctl(struct socket *sock, unsigned int cmd, unsigned long switch (cmd) { case CMTPCONNADD: if (!capable(CAP_NET_ADMIN)) - return -EACCES; + return -EPERM; if (copy_from_user(&ca, argp, sizeof(ca))) return -EFAULT; @@ -97,7 +97,7 @@ static int cmtp_sock_ioctl(struct socket *sock, unsigned int cmd, unsigned long case CMTPCONNDEL: if (!capable(CAP_NET_ADMIN)) - return -EACCES; + return -EPERM; if (copy_from_user(&cd, argp, sizeof(cd))) return -EFAULT; diff --git a/trunk/net/bluetooth/hci_conn.c b/trunk/net/bluetooth/hci_conn.c index 5ad7da217474..3c094e78dde9 100644 --- a/trunk/net/bluetooth/hci_conn.c +++ b/trunk/net/bluetooth/hci_conn.c @@ -29,6 +29,7 @@ #include #include #include +#include static void hci_le_connect(struct hci_conn *conn) { @@ -619,6 +620,9 @@ int hci_conn_security(struct hci_conn *conn, __u8 sec_level, __u8 auth_type) { BT_DBG("hcon %p", conn); + if (conn->type == LE_LINK) + return smp_conn_security(conn, sec_level); + /* For sdp we don't need the link key. */ if (sec_level == BT_SECURITY_SDP) return 1; diff --git a/trunk/net/bluetooth/hci_core.c b/trunk/net/bluetooth/hci_core.c index d4de5db18d5a..0b997c8f9655 100644 --- a/trunk/net/bluetooth/hci_core.c +++ b/trunk/net/bluetooth/hci_core.c @@ -734,6 +734,8 @@ static int hci_dev_do_close(struct hci_dev *hdev) cancel_work_sync(&hdev->le_scan); + cancel_delayed_work(&hdev->power_off); + hci_req_cancel(hdev, ENODEV); hci_req_lock(hdev); diff --git a/trunk/net/bluetooth/hci_event.c b/trunk/net/bluetooth/hci_event.c index 41ff978a33f9..715d7e33fba0 100644 --- a/trunk/net/bluetooth/hci_event.c +++ b/trunk/net/bluetooth/hci_event.c @@ -1365,6 +1365,9 @@ static bool hci_resolve_next_name(struct hci_dev *hdev) return false; e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY, NAME_NEEDED); + if (!e) + return false; + if (hci_resolve_name(hdev, e) == 0) { e->name_state = NAME_PENDING; return true; @@ -1393,12 +1396,20 @@ static void hci_check_pending_name(struct hci_dev *hdev, struct hci_conn *conn, return; e = hci_inquiry_cache_lookup_resolve(hdev, bdaddr, NAME_PENDING); - if (e) { + /* If the device was not found in a list of found devices names of which + * are pending. there is no need to continue resolving a next name as it + * will be done upon receiving another Remote Name Request Complete + * Event */ + if (!e) + return; + + list_del(&e->list); + if (name) { e->name_state = NAME_KNOWN; - list_del(&e->list); - if (name) - mgmt_remote_name(hdev, bdaddr, ACL_LINK, 0x00, - e->data.rssi, name, name_len); + mgmt_remote_name(hdev, bdaddr, ACL_LINK, 0x00, + e->data.rssi, name, name_len); + } else { + e->name_state = NAME_NOT_KNOWN; } if (hci_resolve_next_name(hdev)) @@ -1762,7 +1773,12 @@ static void hci_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb) if (conn->type == ACL_LINK) { conn->state = BT_CONFIG; hci_conn_hold(conn); - conn->disc_timeout = HCI_DISCONN_TIMEOUT; + + if (!conn->out && !hci_conn_ssp_enabled(conn) && + !hci_find_link_key(hdev, &ev->bdaddr)) + conn->disc_timeout = HCI_PAIRING_TIMEOUT; + else + conn->disc_timeout = HCI_DISCONN_TIMEOUT; } else conn->state = BT_CONNECTED; diff --git a/trunk/net/bluetooth/hci_sock.c b/trunk/net/bluetooth/hci_sock.c index a7f04de03d79..d5ace1eda3ed 100644 --- a/trunk/net/bluetooth/hci_sock.c +++ b/trunk/net/bluetooth/hci_sock.c @@ -490,7 +490,7 @@ static int hci_sock_bound_ioctl(struct sock *sk, unsigned int cmd, switch (cmd) { case HCISETRAW: if (!capable(CAP_NET_ADMIN)) - return -EACCES; + return -EPERM; if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks)) return -EPERM; @@ -510,12 +510,12 @@ static int hci_sock_bound_ioctl(struct sock *sk, unsigned int cmd, case HCIBLOCKADDR: if (!capable(CAP_NET_ADMIN)) - return -EACCES; + return -EPERM; return hci_sock_blacklist_add(hdev, (void __user *) arg); case HCIUNBLOCKADDR: if (!capable(CAP_NET_ADMIN)) - return -EACCES; + return -EPERM; return hci_sock_blacklist_del(hdev, (void __user *) arg); default: @@ -546,22 +546,22 @@ static int hci_sock_ioctl(struct socket *sock, unsigned int cmd, case HCIDEVUP: if (!capable(CAP_NET_ADMIN)) - return -EACCES; + return -EPERM; return hci_dev_open(arg); case HCIDEVDOWN: if (!capable(CAP_NET_ADMIN)) - return -EACCES; + return -EPERM; return hci_dev_close(arg); case HCIDEVRESET: if (!capable(CAP_NET_ADMIN)) - return -EACCES; + return -EPERM; return hci_dev_reset(arg); case HCIDEVRESTAT: if (!capable(CAP_NET_ADMIN)) - return -EACCES; + return -EPERM; return hci_dev_reset_stat(arg); case HCISETSCAN: @@ -573,7 +573,7 @@ static int hci_sock_ioctl(struct socket *sock, unsigned int cmd, case HCISETACLMTU: case HCISETSCOMTU: if (!capable(CAP_NET_ADMIN)) - return -EACCES; + return -EPERM; return hci_dev_cmd(cmd, argp); case HCIINQUIRY: @@ -694,6 +694,7 @@ static int hci_sock_getname(struct socket *sock, struct sockaddr *addr, *addr_len = sizeof(*haddr); haddr->hci_family = AF_BLUETOOTH; haddr->hci_dev = hdev->id; + haddr->hci_channel= 0; release_sock(sk); return 0; @@ -1009,6 +1010,7 @@ static int hci_sock_getsockopt(struct socket *sock, int level, int optname, { struct hci_filter *f = &hci_pi(sk)->filter; + memset(&uf, 0, sizeof(uf)); uf.type_mask = f->type_mask; uf.opcode = f->opcode; uf.event_mask[0] = *((u32 *) f->event_mask + 0); diff --git a/trunk/net/bluetooth/hidp/sock.c b/trunk/net/bluetooth/hidp/sock.c index 18b3f6892a36..b24fb3bd8625 100644 --- a/trunk/net/bluetooth/hidp/sock.c +++ b/trunk/net/bluetooth/hidp/sock.c @@ -56,7 +56,7 @@ static int hidp_sock_ioctl(struct socket *sock, unsigned int cmd, unsigned long switch (cmd) { case HIDPCONNADD: if (!capable(CAP_NET_ADMIN)) - return -EACCES; + return -EPERM; if (copy_from_user(&ca, argp, sizeof(ca))) return -EFAULT; @@ -91,7 +91,7 @@ static int hidp_sock_ioctl(struct socket *sock, unsigned int cmd, unsigned long case HIDPCONNDEL: if (!capable(CAP_NET_ADMIN)) - return -EACCES; + return -EPERM; if (copy_from_user(&cd, argp, sizeof(cd))) return -EFAULT; diff --git a/trunk/net/bluetooth/l2cap_core.c b/trunk/net/bluetooth/l2cap_core.c index a8964db04bfb..38c00f142203 100644 --- a/trunk/net/bluetooth/l2cap_core.c +++ b/trunk/net/bluetooth/l2cap_core.c @@ -1008,7 +1008,7 @@ static void l2cap_send_disconn_req(struct l2cap_conn *conn, struct l2cap_chan *c if (!conn) return; - if (chan->mode == L2CAP_MODE_ERTM) { + if (chan->mode == L2CAP_MODE_ERTM && chan->state == BT_CONNECTED) { __clear_retrans_timer(chan); __clear_monitor_timer(chan); __clear_ack_timer(chan); @@ -1181,6 +1181,7 @@ static void l2cap_le_conn_ready(struct l2cap_conn *conn) sk = chan->sk; hci_conn_hold(conn->hcon); + conn->hcon->disc_timeout = HCI_DISCONN_TIMEOUT; bacpy(&bt_sk(sk)->src, conn->src); bacpy(&bt_sk(sk)->dst, conn->dst); @@ -1198,14 +1199,15 @@ static void l2cap_le_conn_ready(struct l2cap_conn *conn) static void l2cap_conn_ready(struct l2cap_conn *conn) { struct l2cap_chan *chan; + struct hci_conn *hcon = conn->hcon; BT_DBG("conn %p", conn); - if (!conn->hcon->out && conn->hcon->type == LE_LINK) + if (!hcon->out && hcon->type == LE_LINK) l2cap_le_conn_ready(conn); - if (conn->hcon->out && conn->hcon->type == LE_LINK) - smp_conn_security(conn, conn->hcon->pending_sec_level); + if (hcon->out && hcon->type == LE_LINK) + smp_conn_security(hcon, hcon->pending_sec_level); mutex_lock(&conn->chan_lock); @@ -1218,8 +1220,8 @@ static void l2cap_conn_ready(struct l2cap_conn *conn) continue; } - if (conn->hcon->type == LE_LINK) { - if (smp_conn_security(conn, chan->sec_level)) + if (hcon->type == LE_LINK) { + if (smp_conn_security(hcon, chan->sec_level)) l2cap_chan_ready(chan); } else if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) { diff --git a/trunk/net/bluetooth/l2cap_sock.c b/trunk/net/bluetooth/l2cap_sock.c index a4bb27e8427e..34bbe1c5e389 100644 --- a/trunk/net/bluetooth/l2cap_sock.c +++ b/trunk/net/bluetooth/l2cap_sock.c @@ -245,6 +245,7 @@ static int l2cap_sock_getname(struct socket *sock, struct sockaddr *addr, int *l BT_DBG("sock %p, sk %p", sock, sk); + memset(la, 0, sizeof(struct sockaddr_l2)); addr->sa_family = AF_BLUETOOTH; *len = sizeof(struct sockaddr_l2); @@ -615,7 +616,7 @@ static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname, ch break; } - if (smp_conn_security(conn, sec.level)) + if (smp_conn_security(conn->hcon, sec.level)) break; sk->sk_state = BT_CONFIG; chan->state = BT_CONFIG; @@ -1174,7 +1175,7 @@ static struct sock *l2cap_sock_alloc(struct net *net, struct socket *sock, int p chan = l2cap_chan_create(); if (!chan) { - l2cap_sock_kill(sk); + sk_free(sk); return NULL; } diff --git a/trunk/net/bluetooth/mgmt.c b/trunk/net/bluetooth/mgmt.c index ad6613d17ca6..eba022de3c20 100644 --- a/trunk/net/bluetooth/mgmt.c +++ b/trunk/net/bluetooth/mgmt.c @@ -2875,6 +2875,22 @@ int mgmt_powered(struct hci_dev *hdev, u8 powered) if (scan) hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan); + if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) { + u8 ssp = 1; + + hci_send_cmd(hdev, HCI_OP_WRITE_SSP_MODE, 1, &ssp); + } + + if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) { + struct hci_cp_write_le_host_supported cp; + + cp.le = 1; + cp.simul = !!(hdev->features[6] & LMP_SIMUL_LE_BR); + + hci_send_cmd(hdev, HCI_OP_WRITE_LE_HOST_SUPPORTED, + sizeof(cp), &cp); + } + update_class(hdev); update_name(hdev, hdev->dev_name); update_eir(hdev); diff --git a/trunk/net/bluetooth/rfcomm/sock.c b/trunk/net/bluetooth/rfcomm/sock.c index 7e1e59645c05..1a17850d093c 100644 --- a/trunk/net/bluetooth/rfcomm/sock.c +++ b/trunk/net/bluetooth/rfcomm/sock.c @@ -528,6 +528,7 @@ static int rfcomm_sock_getname(struct socket *sock, struct sockaddr *addr, int * BT_DBG("sock %p, sk %p", sock, sk); + memset(sa, 0, sizeof(*sa)); sa->rc_family = AF_BLUETOOTH; sa->rc_channel = rfcomm_pi(sk)->channel; if (peer) @@ -822,6 +823,7 @@ static int rfcomm_sock_getsockopt(struct socket *sock, int level, int optname, c } sec.level = rfcomm_pi(sk)->sec_level; + sec.key_size = 0; len = min_t(unsigned int, len, sizeof(sec)); if (copy_to_user(optval, (char *) &sec, len)) diff --git a/trunk/net/bluetooth/rfcomm/tty.c b/trunk/net/bluetooth/rfcomm/tty.c index cb960773c002..56f182393c4c 100644 --- a/trunk/net/bluetooth/rfcomm/tty.c +++ b/trunk/net/bluetooth/rfcomm/tty.c @@ -456,7 +456,7 @@ static int rfcomm_get_dev_list(void __user *arg) size = sizeof(*dl) + dev_num * sizeof(*di); - dl = kmalloc(size, GFP_KERNEL); + dl = kzalloc(size, GFP_KERNEL); if (!dl) return -ENOMEM; diff --git a/trunk/net/bluetooth/sco.c b/trunk/net/bluetooth/sco.c index 40bbe25dcff7..3589e21edb09 100644 --- a/trunk/net/bluetooth/sco.c +++ b/trunk/net/bluetooth/sco.c @@ -131,6 +131,15 @@ static int sco_conn_del(struct hci_conn *hcon, int err) sco_sock_clear_timer(sk); sco_chan_del(sk, err); bh_unlock_sock(sk); + + sco_conn_lock(conn); + conn->sk = NULL; + sco_pi(sk)->conn = NULL; + sco_conn_unlock(conn); + + if (conn->hcon) + hci_conn_put(conn->hcon); + sco_sock_kill(sk); } @@ -821,16 +830,6 @@ static void sco_chan_del(struct sock *sk, int err) BT_DBG("sk %p, conn %p, err %d", sk, conn, err); - if (conn) { - sco_conn_lock(conn); - conn->sk = NULL; - sco_pi(sk)->conn = NULL; - sco_conn_unlock(conn); - - if (conn->hcon) - hci_conn_put(conn->hcon); - } - sk->sk_state = BT_CLOSED; sk->sk_err = err; sk->sk_state_change(sk); diff --git a/trunk/net/bluetooth/smp.c b/trunk/net/bluetooth/smp.c index 16ef0dc85a0a..8c225ef349cd 100644 --- a/trunk/net/bluetooth/smp.c +++ b/trunk/net/bluetooth/smp.c @@ -267,10 +267,10 @@ static void smp_failure(struct l2cap_conn *conn, u8 reason, u8 send) mgmt_auth_failed(conn->hcon->hdev, conn->dst, hcon->type, hcon->dst_type, reason); - if (test_and_clear_bit(HCI_CONN_LE_SMP_PEND, &conn->hcon->flags)) { - cancel_delayed_work_sync(&conn->security_timer); + cancel_delayed_work_sync(&conn->security_timer); + + if (test_and_clear_bit(HCI_CONN_LE_SMP_PEND, &conn->hcon->flags)) smp_chan_destroy(conn); - } } #define JUST_WORKS 0x00 @@ -579,8 +579,11 @@ static u8 smp_cmd_pairing_req(struct l2cap_conn *conn, struct sk_buff *skb) if (!test_and_set_bit(HCI_CONN_LE_SMP_PEND, &conn->hcon->flags)) smp = smp_chan_create(conn); + else + smp = conn->smp_chan; - smp = conn->smp_chan; + if (!smp) + return SMP_UNSPECIFIED; smp->preq[0] = SMP_CMD_PAIRING_REQ; memcpy(&smp->preq[1], req, sizeof(*req)); @@ -757,9 +760,9 @@ static u8 smp_cmd_security_req(struct l2cap_conn *conn, struct sk_buff *skb) return 0; } -int smp_conn_security(struct l2cap_conn *conn, __u8 sec_level) +int smp_conn_security(struct hci_conn *hcon, __u8 sec_level) { - struct hci_conn *hcon = conn->hcon; + struct l2cap_conn *conn = hcon->l2cap_data; struct smp_chan *smp = conn->smp_chan; __u8 authreq; diff --git a/trunk/net/bridge/br_device.c b/trunk/net/bridge/br_device.c index 333484537600..070e8a68cfc6 100644 --- a/trunk/net/bridge/br_device.c +++ b/trunk/net/bridge/br_device.c @@ -31,9 +31,11 @@ netdev_tx_t br_dev_xmit(struct sk_buff *skb, struct net_device *dev) struct net_bridge_mdb_entry *mdst; struct br_cpu_netstats *brstats = this_cpu_ptr(br->stats); + rcu_read_lock(); #ifdef CONFIG_BRIDGE_NETFILTER if (skb->nf_bridge && (skb->nf_bridge->mask & BRNF_BRIDGED_DNAT)) { br_nf_pre_routing_finish_bridge_slow(skb); + rcu_read_unlock(); return NETDEV_TX_OK; } #endif @@ -48,7 +50,6 @@ netdev_tx_t br_dev_xmit(struct sk_buff *skb, struct net_device *dev) skb_reset_mac_header(skb); skb_pull(skb, ETH_HLEN); - rcu_read_lock(); if (is_broadcast_ether_addr(dest)) br_flood_deliver(br, skb); else if (is_multicast_ether_addr(dest)) { @@ -206,24 +207,23 @@ static void br_poll_controller(struct net_device *br_dev) static void br_netpoll_cleanup(struct net_device *dev) { struct net_bridge *br = netdev_priv(dev); - struct net_bridge_port *p, *n; + struct net_bridge_port *p; - list_for_each_entry_safe(p, n, &br->port_list, list) { + list_for_each_entry(p, &br->port_list, list) br_netpoll_disable(p); - } } -static int br_netpoll_setup(struct net_device *dev, struct netpoll_info *ni) +static int br_netpoll_setup(struct net_device *dev, struct netpoll_info *ni, + gfp_t gfp) { struct net_bridge *br = netdev_priv(dev); - struct net_bridge_port *p, *n; + struct net_bridge_port *p; int err = 0; - list_for_each_entry_safe(p, n, &br->port_list, list) { + list_for_each_entry(p, &br->port_list, list) { if (!p->dev) continue; - - err = br_netpoll_enable(p); + err = br_netpoll_enable(p, gfp); if (err) goto fail; } @@ -236,17 +236,17 @@ static int br_netpoll_setup(struct net_device *dev, struct netpoll_info *ni) goto out; } -int br_netpoll_enable(struct net_bridge_port *p) +int br_netpoll_enable(struct net_bridge_port *p, gfp_t gfp) { struct netpoll *np; int err = 0; - np = kzalloc(sizeof(*p->np), GFP_KERNEL); + np = kzalloc(sizeof(*p->np), gfp); err = -ENOMEM; if (!np) goto out; - err = __netpoll_setup(np, p->dev); + err = __netpoll_setup(np, p->dev, gfp); if (err) { kfree(np); goto out; @@ -267,11 +267,7 @@ void br_netpoll_disable(struct net_bridge_port *p) p->np = NULL; - /* Wait for transmitting packets to finish before freeing. */ - synchronize_rcu_bh(); - - __netpoll_cleanup(np); - kfree(np); + __netpoll_free_rcu(np); } #endif diff --git a/trunk/net/bridge/br_forward.c b/trunk/net/bridge/br_forward.c index e9466d412707..02015a505d2a 100644 --- a/trunk/net/bridge/br_forward.c +++ b/trunk/net/bridge/br_forward.c @@ -65,7 +65,7 @@ static void __br_deliver(const struct net_bridge_port *to, struct sk_buff *skb) { skb->dev = to->dev; - if (unlikely(netpoll_tx_running(to->dev))) { + if (unlikely(netpoll_tx_running(to->br->dev))) { if (packet_length(skb) > skb->dev->mtu && !skb_is_gso(skb)) kfree_skb(skb); else { diff --git a/trunk/net/bridge/br_if.c b/trunk/net/bridge/br_if.c index e1144e1617be..1c8fdc3558cd 100644 --- a/trunk/net/bridge/br_if.c +++ b/trunk/net/bridge/br_if.c @@ -361,7 +361,7 @@ int br_add_if(struct net_bridge *br, struct net_device *dev) if (err) goto err2; - if (br_netpoll_info(br) && ((err = br_netpoll_enable(p)))) + if (br_netpoll_info(br) && ((err = br_netpoll_enable(p, GFP_KERNEL)))) goto err3; err = netdev_set_master(dev, br->dev); @@ -427,6 +427,10 @@ int br_del_if(struct net_bridge *br, struct net_device *dev) if (!p || p->br != br) return -EINVAL; + /* Since more than one interface can be attached to a bridge, + * there still maybe an alternate path for netconsole to use; + * therefore there is no reason for a NETDEV_RELEASE event. + */ del_nbp(p); spin_lock_bh(&br->lock); diff --git a/trunk/net/bridge/br_private.h b/trunk/net/bridge/br_private.h index a768b2408edf..f507d2af9646 100644 --- a/trunk/net/bridge/br_private.h +++ b/trunk/net/bridge/br_private.h @@ -316,7 +316,7 @@ static inline void br_netpoll_send_skb(const struct net_bridge_port *p, netpoll_send_skb(np, skb); } -extern int br_netpoll_enable(struct net_bridge_port *p); +extern int br_netpoll_enable(struct net_bridge_port *p, gfp_t gfp); extern void br_netpoll_disable(struct net_bridge_port *p); #else static inline struct netpoll_info *br_netpoll_info(struct net_bridge *br) @@ -329,7 +329,7 @@ static inline void br_netpoll_send_skb(const struct net_bridge_port *p, { } -static inline int br_netpoll_enable(struct net_bridge_port *p) +static inline int br_netpoll_enable(struct net_bridge_port *p, gfp_t gfp) { return 0; } diff --git a/trunk/net/bridge/netfilter/ebt_log.c b/trunk/net/bridge/netfilter/ebt_log.c index f88ee537fb2b..92de5e5f9db2 100644 --- a/trunk/net/bridge/netfilter/ebt_log.c +++ b/trunk/net/bridge/netfilter/ebt_log.c @@ -80,7 +80,7 @@ ebt_log_packet(u_int8_t pf, unsigned int hooknum, unsigned int bitmask; spin_lock_bh(&ebt_log_lock); - printk("<%c>%s IN=%s OUT=%s MAC source = %pM MAC dest = %pM proto = 0x%04x", + printk(KERN_SOH "%c%s IN=%s OUT=%s MAC source = %pM MAC dest = %pM proto = 0x%04x", '0' + loginfo->u.log.level, prefix, in ? in->name : "", out ? out->name : "", eth_hdr(skb)->h_source, eth_hdr(skb)->h_dest, diff --git a/trunk/net/caif/cfsrvl.c b/trunk/net/caif/cfsrvl.c index dd485f6128e8..ba217e90765e 100644 --- a/trunk/net/caif/cfsrvl.c +++ b/trunk/net/caif/cfsrvl.c @@ -211,9 +211,10 @@ void caif_client_register_refcnt(struct cflayer *adapt_layer, void (*put)(struct cflayer *lyr)) { struct cfsrvl *service; - service = container_of(adapt_layer->dn, struct cfsrvl, layer); - WARN_ON(adapt_layer == NULL || adapt_layer->dn == NULL); + if (WARN_ON(adapt_layer == NULL || adapt_layer->dn == NULL)) + return; + service = container_of(adapt_layer->dn, struct cfsrvl, layer); service->hold = hold; service->put = put; } diff --git a/trunk/net/caif/chnl_net.c b/trunk/net/caif/chnl_net.c index 69771c04ba8f..e597733affb8 100644 --- a/trunk/net/caif/chnl_net.c +++ b/trunk/net/caif/chnl_net.c @@ -94,6 +94,10 @@ static int chnl_recv_cb(struct cflayer *layr, struct cfpkt *pkt) /* check the version of IP */ ip_version = skb_header_pointer(skb, 0, 1, &buf); + if (!ip_version) { + kfree_skb(skb); + return -EINVAL; + } switch (*ip_version >> 4) { case 4: diff --git a/trunk/net/ceph/ceph_common.c b/trunk/net/ceph/ceph_common.c index 69e38db28e5f..a8020293f342 100644 --- a/trunk/net/ceph/ceph_common.c +++ b/trunk/net/ceph/ceph_common.c @@ -84,7 +84,6 @@ int ceph_check_fsid(struct ceph_client *client, struct ceph_fsid *fsid) return -1; } } else { - pr_info("client%lld fsid %pU\n", ceph_client_id(client), fsid); memcpy(&client->fsid, fsid, sizeof(*fsid)); } return 0; diff --git a/trunk/net/ceph/debugfs.c b/trunk/net/ceph/debugfs.c index 54b531a01121..38b5dc1823d4 100644 --- a/trunk/net/ceph/debugfs.c +++ b/trunk/net/ceph/debugfs.c @@ -189,6 +189,9 @@ int ceph_debugfs_client_init(struct ceph_client *client) snprintf(name, sizeof(name), "%pU.client%lld", &client->fsid, client->monc.auth->global_id); + dout("ceph_debugfs_client_init %p %s\n", client, name); + + BUG_ON(client->debugfs_dir); client->debugfs_dir = debugfs_create_dir(name, ceph_debugfs_dir); if (!client->debugfs_dir) goto out; @@ -234,6 +237,7 @@ int ceph_debugfs_client_init(struct ceph_client *client) void ceph_debugfs_client_cleanup(struct ceph_client *client) { + dout("ceph_debugfs_client_cleanup %p\n", client); debugfs_remove(client->debugfs_osdmap); debugfs_remove(client->debugfs_monmap); debugfs_remove(client->osdc.debugfs_file); diff --git a/trunk/net/ceph/messenger.c b/trunk/net/ceph/messenger.c index b9796750034a..159aa8bef9e7 100644 --- a/trunk/net/ceph/messenger.c +++ b/trunk/net/ceph/messenger.c @@ -915,7 +915,6 @@ static int prepare_write_connect(struct ceph_connection *con) con->out_connect.authorizer_len = auth ? cpu_to_le32(auth->authorizer_buf_len) : 0; - con_out_kvec_reset(con); con_out_kvec_add(con, sizeof (con->out_connect), &con->out_connect); if (auth && auth->authorizer_buf_len) @@ -1074,16 +1073,13 @@ static int write_partial_msg_pages(struct ceph_connection *con) BUG_ON(kaddr == NULL); base = kaddr + con->out_msg_pos.page_pos + bio_offset; crc = crc32c(crc, base, len); + kunmap(page); msg->footer.data_crc = cpu_to_le32(crc); con->out_msg_pos.did_page_crc = true; } ret = ceph_tcp_sendpage(con->sock, page, con->out_msg_pos.page_pos + bio_offset, len, 1); - - if (do_datacrc) - kunmap(page); - if (ret <= 0) goto out; @@ -1557,6 +1553,7 @@ static int process_connect(struct ceph_connection *con) return -1; } con->auth_retry = 1; + con_out_kvec_reset(con); ret = prepare_write_connect(con); if (ret < 0) return ret; @@ -1577,6 +1574,7 @@ static int process_connect(struct ceph_connection *con) ENTITY_NAME(con->peer_name), ceph_pr_addr(&con->peer_addr.in_addr)); reset_connection(con); + con_out_kvec_reset(con); ret = prepare_write_connect(con); if (ret < 0) return ret; @@ -1601,6 +1599,7 @@ static int process_connect(struct ceph_connection *con) le32_to_cpu(con->out_connect.connect_seq), le32_to_cpu(con->in_reply.connect_seq)); con->connect_seq = le32_to_cpu(con->in_reply.connect_seq); + con_out_kvec_reset(con); ret = prepare_write_connect(con); if (ret < 0) return ret; @@ -1617,6 +1616,7 @@ static int process_connect(struct ceph_connection *con) le32_to_cpu(con->in_reply.global_seq)); get_global_seq(con->msgr, le32_to_cpu(con->in_reply.global_seq)); + con_out_kvec_reset(con); ret = prepare_write_connect(con); if (ret < 0) return ret; @@ -2135,7 +2135,11 @@ static int try_read(struct ceph_connection *con) BUG_ON(con->state != CON_STATE_CONNECTING); con->state = CON_STATE_NEGOTIATING; - /* Banner is good, exchange connection info */ + /* + * Received banner is good, exchange connection info. + * Do not reset out_kvec, as sending our banner raced + * with receiving peer banner after connect completed. + */ ret = prepare_write_connect(con); if (ret < 0) goto out; diff --git a/trunk/net/ceph/mon_client.c b/trunk/net/ceph/mon_client.c index 105d533b55f3..900ea0f043fc 100644 --- a/trunk/net/ceph/mon_client.c +++ b/trunk/net/ceph/mon_client.c @@ -310,6 +310,17 @@ int ceph_monc_open_session(struct ceph_mon_client *monc) } EXPORT_SYMBOL(ceph_monc_open_session); +/* + * We require the fsid and global_id in order to initialize our + * debugfs dir. + */ +static bool have_debugfs_info(struct ceph_mon_client *monc) +{ + dout("have_debugfs_info fsid %d globalid %lld\n", + (int)monc->client->have_fsid, monc->auth->global_id); + return monc->client->have_fsid && monc->auth->global_id > 0; +} + /* * The monitor responds with mount ack indicate mount success. The * included client ticket allows the client to talk to MDSs and OSDs. @@ -320,9 +331,12 @@ static void ceph_monc_handle_map(struct ceph_mon_client *monc, struct ceph_client *client = monc->client; struct ceph_monmap *monmap = NULL, *old = monc->monmap; void *p, *end; + int had_debugfs_info, init_debugfs = 0; mutex_lock(&monc->mutex); + had_debugfs_info = have_debugfs_info(monc); + dout("handle_monmap\n"); p = msg->front.iov_base; end = p + msg->front.iov_len; @@ -344,12 +358,22 @@ static void ceph_monc_handle_map(struct ceph_mon_client *monc, if (!client->have_fsid) { client->have_fsid = true; + if (!had_debugfs_info && have_debugfs_info(monc)) { + pr_info("client%lld fsid %pU\n", + ceph_client_id(monc->client), + &monc->client->fsid); + init_debugfs = 1; + } mutex_unlock(&monc->mutex); - /* - * do debugfs initialization without mutex to avoid - * creating a locking dependency - */ - ceph_debugfs_client_init(client); + + if (init_debugfs) { + /* + * do debugfs initialization without mutex to avoid + * creating a locking dependency + */ + ceph_debugfs_client_init(monc->client); + } + goto out_unlocked; } out: @@ -865,8 +889,10 @@ static void handle_auth_reply(struct ceph_mon_client *monc, { int ret; int was_auth = 0; + int had_debugfs_info, init_debugfs = 0; mutex_lock(&monc->mutex); + had_debugfs_info = have_debugfs_info(monc); if (monc->auth->ops) was_auth = monc->auth->ops->is_authenticated(monc->auth); monc->pending_auth = 0; @@ -889,7 +915,22 @@ static void handle_auth_reply(struct ceph_mon_client *monc, __send_subscribe(monc); __resend_generic_request(monc); } + + if (!had_debugfs_info && have_debugfs_info(monc)) { + pr_info("client%lld fsid %pU\n", + ceph_client_id(monc->client), + &monc->client->fsid); + init_debugfs = 1; + } mutex_unlock(&monc->mutex); + + if (init_debugfs) { + /* + * do debugfs initialization without mutex to avoid + * creating a locking dependency + */ + ceph_debugfs_client_init(monc->client); + } } static int __validate_auth(struct ceph_mon_client *monc) diff --git a/trunk/net/core/dev.c b/trunk/net/core/dev.c index 0cb3fe8d8e72..89e33a5d4d93 100644 --- a/trunk/net/core/dev.c +++ b/trunk/net/core/dev.c @@ -1055,6 +1055,8 @@ int dev_change_name(struct net_device *dev, const char *newname) */ int dev_set_alias(struct net_device *dev, const char *alias, size_t len) { + char *new_ifalias; + ASSERT_RTNL(); if (len >= IFALIASZ) @@ -1068,9 +1070,10 @@ int dev_set_alias(struct net_device *dev, const char *alias, size_t len) return 0; } - dev->ifalias = krealloc(dev->ifalias, len + 1, GFP_KERNEL); - if (!dev->ifalias) + new_ifalias = krealloc(dev->ifalias, len + 1, GFP_KERNEL); + if (!new_ifalias) return -ENOMEM; + dev->ifalias = new_ifalias; strlcpy(dev->ifalias, alias, len+1); return len; @@ -1639,6 +1642,19 @@ static inline int deliver_skb(struct sk_buff *skb, return pt_prev->func(skb, skb->dev, pt_prev, orig_dev); } +static inline bool skb_loop_sk(struct packet_type *ptype, struct sk_buff *skb) +{ + if (ptype->af_packet_priv == NULL) + return false; + + if (ptype->id_match) + return ptype->id_match(ptype, skb->sk); + else if ((struct sock *)ptype->af_packet_priv == skb->sk) + return true; + + return false; +} + /* * Support routine. Sends outgoing frames to any network * taps currently in use. @@ -1656,8 +1672,7 @@ static void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev) * they originated from - MvS (miquels@drinkel.ow.org) */ if ((ptype->dev == dev || !ptype->dev) && - (ptype->af_packet_priv == NULL || - (struct sock *)ptype->af_packet_priv != skb->sk)) { + (!skb_loop_sk(ptype, skb))) { if (pt_prev) { deliver_skb(skb2, pt_prev, skb->dev); pt_prev = ptype; @@ -2119,7 +2134,8 @@ static bool can_checksum_protocol(netdev_features_t features, __be16 protocol) static netdev_features_t harmonize_features(struct sk_buff *skb, __be16 protocol, netdev_features_t features) { - if (!can_checksum_protocol(features, protocol)) { + if (skb->ip_summed != CHECKSUM_NONE && + !can_checksum_protocol(features, protocol)) { features &= ~NETIF_F_ALL_CSUM; features &= ~NETIF_F_SG; } else if (illegal_highdma(skb->dev, skb)) { @@ -2134,6 +2150,9 @@ netdev_features_t netif_skb_features(struct sk_buff *skb) __be16 protocol = skb->protocol; netdev_features_t features = skb->dev->features; + if (skb_shinfo(skb)->gso_segs > skb->dev->gso_max_segs) + features &= ~NETIF_F_GSO_MASK; + if (protocol == htons(ETH_P_8021Q)) { struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data; protocol = veh->h_vlan_encapsulated_proto; @@ -2629,15 +2648,16 @@ void __skb_get_rxhash(struct sk_buff *skb) if (!skb_flow_dissect(skb, &keys)) return; - if (keys.ports) { - if ((__force u16)keys.port16[1] < (__force u16)keys.port16[0]) - swap(keys.port16[0], keys.port16[1]); + if (keys.ports) skb->l4_rxhash = 1; - } /* get a consistent hash (same value on both flow directions) */ - if ((__force u32)keys.dst < (__force u32)keys.src) + if (((__force u32)keys.dst < (__force u32)keys.src) || + (((__force u32)keys.dst == (__force u32)keys.src) && + ((__force u16)keys.port16[1] < (__force u16)keys.port16[0]))) { swap(keys.dst, keys.src); + swap(keys.port16[0], keys.port16[1]); + } hash = jhash_3words((__force u32)keys.dst, (__force u32)keys.src, @@ -3303,7 +3323,7 @@ static int __netif_receive_skb(struct sk_buff *skb) if (pt_prev) { if (unlikely(skb_orphan_frags(skb, GFP_ATOMIC))) - ret = -ENOMEM; + goto drop; else ret = pt_prev->func(skb, skb->dev, pt_prev, orig_dev); } else { @@ -5726,6 +5746,7 @@ EXPORT_SYMBOL(netdev_refcnt_read); /** * netdev_wait_allrefs - wait until all references are gone. + * @dev: target net_device * * This is called when unregistering network devices. * @@ -5986,6 +6007,7 @@ struct net_device *alloc_netdev_mqs(int sizeof_priv, const char *name, dev_net_set(dev, &init_net); dev->gso_max_size = GSO_MAX_SIZE; + dev->gso_max_segs = GSO_MAX_SEGS; INIT_LIST_HEAD(&dev->napi_list); INIT_LIST_HEAD(&dev->unreg_list); diff --git a/trunk/net/core/dst.c b/trunk/net/core/dst.c index 069d51d29414..56d63612e1e4 100644 --- a/trunk/net/core/dst.c +++ b/trunk/net/core/dst.c @@ -149,7 +149,15 @@ int dst_discard(struct sk_buff *skb) } EXPORT_SYMBOL(dst_discard); -const u32 dst_default_metrics[RTAX_MAX]; +const u32 dst_default_metrics[RTAX_MAX + 1] = { + /* This initializer is needed to force linker to place this variable + * into const section. Otherwise it might end into bss section. + * We really want to avoid false sharing on this variable, and catch + * any writes on it. + */ + [RTAX_MAX] = 0xdeadbeef, +}; + void *dst_alloc(struct dst_ops *ops, struct net_device *dev, int initial_ref, int initial_obsolete, unsigned short flags) diff --git a/trunk/net/core/netpoll.c b/trunk/net/core/netpoll.c index b4c90e42b443..e4ba3e70c174 100644 --- a/trunk/net/core/netpoll.c +++ b/trunk/net/core/netpoll.c @@ -26,6 +26,7 @@ #include #include #include +#include #include #include #include @@ -54,7 +55,7 @@ static atomic_t trapped; MAX_UDP_CHUNK) static void zap_completion_queue(void); -static void arp_reply(struct sk_buff *skb); +static void netpoll_arp_reply(struct sk_buff *skb, struct netpoll_info *npinfo); static unsigned int carrier_timeout = 4; module_param(carrier_timeout, uint, 0644); @@ -170,7 +171,8 @@ static void poll_napi(struct net_device *dev) list_for_each_entry(napi, &dev->napi_list, dev_list) { if (napi->poll_owner != smp_processor_id() && spin_trylock(&napi->poll_lock)) { - budget = poll_one_napi(dev->npinfo, napi, budget); + budget = poll_one_napi(rcu_dereference_bh(dev->npinfo), + napi, budget); spin_unlock(&napi->poll_lock); if (!budget) @@ -185,13 +187,14 @@ static void service_arp_queue(struct netpoll_info *npi) struct sk_buff *skb; while ((skb = skb_dequeue(&npi->arp_tx))) - arp_reply(skb); + netpoll_arp_reply(skb, npi); } } static void netpoll_poll_dev(struct net_device *dev) { const struct net_device_ops *ops; + struct netpoll_info *ni = rcu_dereference_bh(dev->npinfo); if (!dev || !netif_running(dev)) return; @@ -206,17 +209,18 @@ static void netpoll_poll_dev(struct net_device *dev) poll_napi(dev); if (dev->flags & IFF_SLAVE) { - if (dev->npinfo) { + if (ni) { struct net_device *bond_dev = dev->master; struct sk_buff *skb; - while ((skb = skb_dequeue(&dev->npinfo->arp_tx))) { + struct netpoll_info *bond_ni = rcu_dereference_bh(bond_dev->npinfo); + while ((skb = skb_dequeue(&ni->arp_tx))) { skb->dev = bond_dev; - skb_queue_tail(&bond_dev->npinfo->arp_tx, skb); + skb_queue_tail(&bond_ni->arp_tx, skb); } } } - service_arp_queue(dev->npinfo); + service_arp_queue(ni); zap_completion_queue(); } @@ -302,6 +306,7 @@ static int netpoll_owner_active(struct net_device *dev) return 0; } +/* call with IRQ disabled */ void netpoll_send_skb_on_dev(struct netpoll *np, struct sk_buff *skb, struct net_device *dev) { @@ -309,8 +314,11 @@ void netpoll_send_skb_on_dev(struct netpoll *np, struct sk_buff *skb, unsigned long tries; const struct net_device_ops *ops = dev->netdev_ops; /* It is up to the caller to keep npinfo alive. */ - struct netpoll_info *npinfo = np->dev->npinfo; + struct netpoll_info *npinfo; + + WARN_ON_ONCE(!irqs_disabled()); + npinfo = rcu_dereference_bh(np->dev->npinfo); if (!npinfo || !netif_running(dev) || !netif_device_present(dev)) { __kfree_skb(skb); return; @@ -319,16 +327,22 @@ void netpoll_send_skb_on_dev(struct netpoll *np, struct sk_buff *skb, /* don't get messages out of order, and no recursion */ if (skb_queue_len(&npinfo->txq) == 0 && !netpoll_owner_active(dev)) { struct netdev_queue *txq; - unsigned long flags; txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb)); - local_irq_save(flags); /* try until next clock tick */ for (tries = jiffies_to_usecs(1)/USEC_PER_POLL; tries > 0; --tries) { if (__netif_tx_trylock(txq)) { if (!netif_xmit_stopped(txq)) { + if (vlan_tx_tag_present(skb) && + !(netif_skb_features(skb) & NETIF_F_HW_VLAN_TX)) { + skb = __vlan_put_tag(skb, vlan_tx_tag_get(skb)); + if (unlikely(!skb)) + break; + skb->vlan_tci = 0; + } + status = ops->ndo_start_xmit(skb, dev); if (status == NETDEV_TX_OK) txq_trans_update(txq); @@ -347,10 +361,9 @@ void netpoll_send_skb_on_dev(struct netpoll *np, struct sk_buff *skb, } WARN_ONCE(!irqs_disabled(), - "netpoll_send_skb(): %s enabled interrupts in poll (%pF)\n", + "netpoll_send_skb_on_dev(): %s enabled interrupts in poll (%pF)\n", dev->name, ops->ndo_start_xmit); - local_irq_restore(flags); } if (status != NETDEV_TX_OK) { @@ -423,9 +436,8 @@ void netpoll_send_udp(struct netpoll *np, const char *msg, int len) } EXPORT_SYMBOL(netpoll_send_udp); -static void arp_reply(struct sk_buff *skb) +static void netpoll_arp_reply(struct sk_buff *skb, struct netpoll_info *npinfo) { - struct netpoll_info *npinfo = skb->dev->npinfo; struct arphdr *arp; unsigned char *arp_ptr; int size, type = ARPOP_REPLY, ptype = ETH_P_ARP; @@ -543,13 +555,12 @@ static void arp_reply(struct sk_buff *skb) spin_unlock_irqrestore(&npinfo->rx_lock, flags); } -int __netpoll_rx(struct sk_buff *skb) +int __netpoll_rx(struct sk_buff *skb, struct netpoll_info *npinfo) { int proto, len, ulen; int hits = 0; const struct iphdr *iph; struct udphdr *uh; - struct netpoll_info *npinfo = skb->dev->npinfo; struct netpoll *np, *tmp; if (list_empty(&npinfo->rx_np)) @@ -565,6 +576,12 @@ int __netpoll_rx(struct sk_buff *skb) return 1; } + if (skb->protocol == cpu_to_be16(ETH_P_8021Q)) { + skb = vlan_untag(skb); + if (unlikely(!skb)) + goto out; + } + proto = ntohs(eth_hdr(skb)->h_proto); if (proto != ETH_P_IP) goto out; @@ -715,7 +732,7 @@ int netpoll_parse_options(struct netpoll *np, char *opt) } EXPORT_SYMBOL(netpoll_parse_options); -int __netpoll_setup(struct netpoll *np, struct net_device *ndev) +int __netpoll_setup(struct netpoll *np, struct net_device *ndev, gfp_t gfp) { struct netpoll_info *npinfo; const struct net_device_ops *ops; @@ -734,7 +751,7 @@ int __netpoll_setup(struct netpoll *np, struct net_device *ndev) } if (!ndev->npinfo) { - npinfo = kmalloc(sizeof(*npinfo), GFP_KERNEL); + npinfo = kmalloc(sizeof(*npinfo), gfp); if (!npinfo) { err = -ENOMEM; goto out; @@ -752,7 +769,7 @@ int __netpoll_setup(struct netpoll *np, struct net_device *ndev) ops = np->dev->netdev_ops; if (ops->ndo_netpoll_setup) { - err = ops->ndo_netpoll_setup(ndev, npinfo); + err = ops->ndo_netpoll_setup(ndev, npinfo, gfp); if (err) goto free_npinfo; } @@ -857,7 +874,7 @@ int netpoll_setup(struct netpoll *np) refill_skbs(); rtnl_lock(); - err = __netpoll_setup(np, ndev); + err = __netpoll_setup(np, ndev, GFP_KERNEL); rtnl_unlock(); if (err) @@ -878,6 +895,24 @@ static int __init netpoll_init(void) } core_initcall(netpoll_init); +static void rcu_cleanup_netpoll_info(struct rcu_head *rcu_head) +{ + struct netpoll_info *npinfo = + container_of(rcu_head, struct netpoll_info, rcu); + + skb_queue_purge(&npinfo->arp_tx); + skb_queue_purge(&npinfo->txq); + + /* we can't call cancel_delayed_work_sync here, as we are in softirq */ + cancel_delayed_work(&npinfo->tx_work); + + /* clean after last, unfinished work */ + __skb_queue_purge(&npinfo->txq); + /* now cancel it again */ + cancel_delayed_work(&npinfo->tx_work); + kfree(npinfo); +} + void __netpoll_cleanup(struct netpoll *np) { struct netpoll_info *npinfo; @@ -903,20 +938,24 @@ void __netpoll_cleanup(struct netpoll *np) ops->ndo_netpoll_cleanup(np->dev); RCU_INIT_POINTER(np->dev->npinfo, NULL); + call_rcu_bh(&npinfo->rcu, rcu_cleanup_netpoll_info); + } +} +EXPORT_SYMBOL_GPL(__netpoll_cleanup); - /* avoid racing with NAPI reading npinfo */ - synchronize_rcu_bh(); +static void rcu_cleanup_netpoll(struct rcu_head *rcu_head) +{ + struct netpoll *np = container_of(rcu_head, struct netpoll, rcu); - skb_queue_purge(&npinfo->arp_tx); - skb_queue_purge(&npinfo->txq); - cancel_delayed_work_sync(&npinfo->tx_work); + __netpoll_cleanup(np); + kfree(np); +} - /* clean after last, unfinished work */ - __skb_queue_purge(&npinfo->txq); - kfree(npinfo); - } +void __netpoll_free_rcu(struct netpoll *np) +{ + call_rcu_bh(&np->rcu, rcu_cleanup_netpoll); } -EXPORT_SYMBOL_GPL(__netpoll_cleanup); +EXPORT_SYMBOL_GPL(__netpoll_free_rcu); void netpoll_cleanup(struct netpoll *np) { diff --git a/trunk/net/core/netprio_cgroup.c b/trunk/net/core/netprio_cgroup.c index ed0c0431fcd8..c75e3f9d060f 100644 --- a/trunk/net/core/netprio_cgroup.c +++ b/trunk/net/core/netprio_cgroup.c @@ -101,12 +101,10 @@ static int write_update_netdev_table(struct net_device *dev) u32 max_len; struct netprio_map *map; - rtnl_lock(); max_len = atomic_read(&max_prioidx) + 1; map = rtnl_dereference(dev->priomap); if (!map || map->priomap_len < max_len) ret = extend_netdev_table(dev, max_len); - rtnl_unlock(); return ret; } @@ -256,17 +254,17 @@ static int write_priomap(struct cgroup *cgrp, struct cftype *cft, if (!dev) goto out_free_devname; + rtnl_lock(); ret = write_update_netdev_table(dev); if (ret < 0) goto out_put_dev; - rcu_read_lock(); - map = rcu_dereference(dev->priomap); + map = rtnl_dereference(dev->priomap); if (map) map->priomap[prioidx] = priority; - rcu_read_unlock(); out_put_dev: + rtnl_unlock(); dev_put(dev); out_free_devname: @@ -277,12 +275,6 @@ static int write_priomap(struct cgroup *cgrp, struct cftype *cft, void net_prio_attach(struct cgroup *cgrp, struct cgroup_taskset *tset) { struct task_struct *p; - char *tmp = kzalloc(sizeof(char) * PATH_MAX, GFP_KERNEL); - - if (!tmp) { - pr_warn("Unable to attach cgrp due to alloc failure!\n"); - return; - } cgroup_taskset_for_each(p, cgrp, tset) { unsigned int fd; @@ -296,32 +288,24 @@ void net_prio_attach(struct cgroup *cgrp, struct cgroup_taskset *tset) continue; } - rcu_read_lock(); + spin_lock(&files->file_lock); fdt = files_fdtable(files); for (fd = 0; fd < fdt->max_fds; fd++) { - char *path; struct file *file; struct socket *sock; - unsigned long s; - int rv, err = 0; + int err; file = fcheck_files(files, fd); if (!file) continue; - path = d_path(&file->f_path, tmp, PAGE_SIZE); - rv = sscanf(path, "socket:[%lu]", &s); - if (rv <= 0) - continue; - sock = sock_from_file(file, &err); - if (!err) + if (sock) sock_update_netprioidx(sock->sk, p); } - rcu_read_unlock(); + spin_unlock(&files->file_lock); task_unlock(p); } - kfree(tmp); } static struct cftype ss_files[] = { diff --git a/trunk/net/core/pktgen.c b/trunk/net/core/pktgen.c index cce9e53528b1..148e73d2c451 100644 --- a/trunk/net/core/pktgen.c +++ b/trunk/net/core/pktgen.c @@ -2721,7 +2721,7 @@ static struct sk_buff *fill_packet_ipv4(struct net_device *odev, /* Eth + IPh + UDPh + mpls */ datalen = pkt_dev->cur_pkt_size - 14 - 20 - 8 - pkt_dev->pkt_overhead; - if (datalen < sizeof(struct pktgen_hdr)) + if (datalen < 0 || datalen < sizeof(struct pktgen_hdr)) datalen = sizeof(struct pktgen_hdr); udph->source = htons(pkt_dev->cur_udp_src); diff --git a/trunk/net/core/scm.c b/trunk/net/core/scm.c index 8f6ccfd68ef4..040cebeed45b 100644 --- a/trunk/net/core/scm.c +++ b/trunk/net/core/scm.c @@ -265,6 +265,7 @@ void scm_detach_fds(struct msghdr *msg, struct scm_cookie *scm) for (i=0, cmfptr=(__force int __user *)CMSG_DATA(cm); isk, current); fd_install(new_fd, fp[i]); } diff --git a/trunk/net/core/skbuff.c b/trunk/net/core/skbuff.c index fe00d1208167..e33ebae519c8 100644 --- a/trunk/net/core/skbuff.c +++ b/trunk/net/core/skbuff.c @@ -3502,7 +3502,9 @@ bool skb_try_coalesce(struct sk_buff *to, struct sk_buff *from, if (!skb_cloned(from)) skb_shinfo(from)->nr_frags = 0; - /* if the skb is cloned this does nothing since we set nr_frags to 0 */ + /* if the skb is not cloned this does nothing + * since we set nr_frags to 0. + */ for (i = 0; i < skb_shinfo(from)->nr_frags; i++) skb_frag_ref(from, i); diff --git a/trunk/net/core/sock.c b/trunk/net/core/sock.c index 6b654b3ddfda..a6000fbad294 100644 --- a/trunk/net/core/sock.c +++ b/trunk/net/core/sock.c @@ -691,7 +691,8 @@ int sock_setsockopt(struct socket *sock, int level, int optname, case SO_KEEPALIVE: #ifdef CONFIG_INET - if (sk->sk_protocol == IPPROTO_TCP) + if (sk->sk_protocol == IPPROTO_TCP && + sk->sk_type == SOCK_STREAM) tcp_set_keepalive(sk, valbool); #endif sock_valbool_flag(sk, SOCK_KEEPOPEN, valbool); @@ -1458,6 +1459,7 @@ void sk_setup_caps(struct sock *sk, struct dst_entry *dst) } else { sk->sk_route_caps |= NETIF_F_SG | NETIF_F_HW_CSUM; sk->sk_gso_max_size = dst->dev->gso_max_size; + sk->sk_gso_max_segs = dst->dev->gso_max_segs; } } } @@ -1522,7 +1524,14 @@ EXPORT_SYMBOL(sock_rfree); void sock_edemux(struct sk_buff *skb) { - sock_put(skb->sk); + struct sock *sk = skb->sk; + +#ifdef CONFIG_INET + if (sk->sk_state == TCP_TIME_WAIT) + inet_twsk_put(inet_twsk(sk)); + else +#endif + sock_put(sk); } EXPORT_SYMBOL(sock_edemux); diff --git a/trunk/net/dccp/ccid.h b/trunk/net/dccp/ccid.h index 75c3582a7678..fb85d371a8de 100644 --- a/trunk/net/dccp/ccid.h +++ b/trunk/net/dccp/ccid.h @@ -246,7 +246,7 @@ static inline int ccid_hc_rx_getsockopt(struct ccid *ccid, struct sock *sk, u32 __user *optval, int __user *optlen) { int rc = -ENOPROTOOPT; - if (ccid->ccid_ops->ccid_hc_rx_getsockopt != NULL) + if (ccid != NULL && ccid->ccid_ops->ccid_hc_rx_getsockopt != NULL) rc = ccid->ccid_ops->ccid_hc_rx_getsockopt(sk, optname, len, optval, optlen); return rc; @@ -257,7 +257,7 @@ static inline int ccid_hc_tx_getsockopt(struct ccid *ccid, struct sock *sk, u32 __user *optval, int __user *optlen) { int rc = -ENOPROTOOPT; - if (ccid->ccid_ops->ccid_hc_tx_getsockopt != NULL) + if (ccid != NULL && ccid->ccid_ops->ccid_hc_tx_getsockopt != NULL) rc = ccid->ccid_ops->ccid_hc_tx_getsockopt(sk, optname, len, optval, optlen); return rc; diff --git a/trunk/net/dccp/ccids/ccid3.c b/trunk/net/dccp/ccids/ccid3.c index d65e98798eca..119c04317d48 100644 --- a/trunk/net/dccp/ccids/ccid3.c +++ b/trunk/net/dccp/ccids/ccid3.c @@ -535,6 +535,7 @@ static int ccid3_hc_tx_getsockopt(struct sock *sk, const int optname, int len, case DCCP_SOCKOPT_CCID_TX_INFO: if (len < sizeof(tfrc)) return -EINVAL; + memset(&tfrc, 0, sizeof(tfrc)); tfrc.tfrctx_x = hc->tx_x; tfrc.tfrctx_x_recv = hc->tx_x_recv; tfrc.tfrctx_x_calc = hc->tx_x_calc; diff --git a/trunk/net/ipv4/arp.c b/trunk/net/ipv4/arp.c index 77e87aff419a..47800459e4cb 100644 --- a/trunk/net/ipv4/arp.c +++ b/trunk/net/ipv4/arp.c @@ -1225,7 +1225,7 @@ static int arp_netdev_event(struct notifier_block *this, unsigned long event, switch (event) { case NETDEV_CHANGEADDR: neigh_changeaddr(&arp_tbl, dev); - rt_cache_flush(dev_net(dev), 0); + rt_cache_flush(dev_net(dev)); break; default: break; diff --git a/trunk/net/ipv4/devinet.c b/trunk/net/ipv4/devinet.c index 44bf82e3aef7..e12fad773852 100644 --- a/trunk/net/ipv4/devinet.c +++ b/trunk/net/ipv4/devinet.c @@ -725,7 +725,7 @@ int devinet_ioctl(struct net *net, unsigned int cmd, void __user *arg) break; case SIOCSIFFLAGS: - ret = -EACCES; + ret = -EPERM; if (!capable(CAP_NET_ADMIN)) goto out; break; @@ -733,7 +733,7 @@ int devinet_ioctl(struct net *net, unsigned int cmd, void __user *arg) case SIOCSIFBRDADDR: /* Set the broadcast address */ case SIOCSIFDSTADDR: /* Set the destination address */ case SIOCSIFNETMASK: /* Set the netmask for the interface */ - ret = -EACCES; + ret = -EPERM; if (!capable(CAP_NET_ADMIN)) goto out; ret = -EINVAL; @@ -1503,7 +1503,7 @@ static int devinet_conf_proc(ctl_table *ctl, int write, if (i == IPV4_DEVCONF_ACCEPT_LOCAL - 1 || i == IPV4_DEVCONF_ROUTE_LOCALNET - 1) if ((new_value == 0) && (old_value != 0)) - rt_cache_flush(net, 0); + rt_cache_flush(net); } return ret; @@ -1537,7 +1537,7 @@ static int devinet_sysctl_forward(ctl_table *ctl, int write, dev_disable_lro(idev->dev); } rtnl_unlock(); - rt_cache_flush(net, 0); + rt_cache_flush(net); } } @@ -1554,7 +1554,7 @@ static int ipv4_doint_and_flush(ctl_table *ctl, int write, struct net *net = ctl->extra2; if (write && *valp != val) - rt_cache_flush(net, 0); + rt_cache_flush(net); return ret; } diff --git a/trunk/net/ipv4/fib_frontend.c b/trunk/net/ipv4/fib_frontend.c index c43ae3fba792..8e2b475da9fa 100644 --- a/trunk/net/ipv4/fib_frontend.c +++ b/trunk/net/ipv4/fib_frontend.c @@ -148,7 +148,7 @@ static void fib_flush(struct net *net) } if (flushed) - rt_cache_flush(net, -1); + rt_cache_flush(net); } /* @@ -999,11 +999,11 @@ static void nl_fib_lookup_exit(struct net *net) net->ipv4.fibnl = NULL; } -static void fib_disable_ip(struct net_device *dev, int force, int delay) +static void fib_disable_ip(struct net_device *dev, int force) { if (fib_sync_down_dev(dev, force)) fib_flush(dev_net(dev)); - rt_cache_flush(dev_net(dev), delay); + rt_cache_flush(dev_net(dev)); arp_ifdown(dev); } @@ -1020,7 +1020,7 @@ static int fib_inetaddr_event(struct notifier_block *this, unsigned long event, fib_sync_up(dev); #endif atomic_inc(&net->ipv4.dev_addr_genid); - rt_cache_flush(dev_net(dev), -1); + rt_cache_flush(dev_net(dev)); break; case NETDEV_DOWN: fib_del_ifaddr(ifa, NULL); @@ -1029,9 +1029,9 @@ static int fib_inetaddr_event(struct notifier_block *this, unsigned long event, /* Last address was deleted from this interface. * Disable IP. */ - fib_disable_ip(dev, 1, 0); + fib_disable_ip(dev, 1); } else { - rt_cache_flush(dev_net(dev), -1); + rt_cache_flush(dev_net(dev)); } break; } @@ -1045,7 +1045,7 @@ static int fib_netdev_event(struct notifier_block *this, unsigned long event, vo struct net *net = dev_net(dev); if (event == NETDEV_UNREGISTER) { - fib_disable_ip(dev, 2, -1); + fib_disable_ip(dev, 2); rt_flush_dev(dev); return NOTIFY_DONE; } @@ -1062,14 +1062,14 @@ static int fib_netdev_event(struct notifier_block *this, unsigned long event, vo fib_sync_up(dev); #endif atomic_inc(&net->ipv4.dev_addr_genid); - rt_cache_flush(dev_net(dev), -1); + rt_cache_flush(dev_net(dev)); break; case NETDEV_DOWN: - fib_disable_ip(dev, 0, 0); + fib_disable_ip(dev, 0); break; case NETDEV_CHANGEMTU: case NETDEV_CHANGE: - rt_cache_flush(dev_net(dev), 0); + rt_cache_flush(dev_net(dev)); break; case NETDEV_UNREGISTER_BATCH: break; diff --git a/trunk/net/ipv4/fib_rules.c b/trunk/net/ipv4/fib_rules.c index a83d74e498d2..274309d3aded 100644 --- a/trunk/net/ipv4/fib_rules.c +++ b/trunk/net/ipv4/fib_rules.c @@ -259,7 +259,7 @@ static size_t fib4_rule_nlmsg_payload(struct fib_rule *rule) static void fib4_rule_flush_cache(struct fib_rules_ops *ops) { - rt_cache_flush(ops->fro_net, -1); + rt_cache_flush(ops->fro_net); } static const struct fib_rules_ops __net_initdata fib4_rules_ops_template = { diff --git a/trunk/net/ipv4/fib_trie.c b/trunk/net/ipv4/fib_trie.c index f0cdb30921c0..d1b93595b4a7 100644 --- a/trunk/net/ipv4/fib_trie.c +++ b/trunk/net/ipv4/fib_trie.c @@ -367,7 +367,7 @@ static void __leaf_free_rcu(struct rcu_head *head) static inline void free_leaf(struct leaf *l) { - call_rcu_bh(&l->rcu, __leaf_free_rcu); + call_rcu(&l->rcu, __leaf_free_rcu); } static inline void free_leaf_info(struct leaf_info *leaf) @@ -1286,7 +1286,7 @@ int fib_table_insert(struct fib_table *tb, struct fib_config *cfg) fib_release_info(fi_drop); if (state & FA_S_ACCESSED) - rt_cache_flush(cfg->fc_nlinfo.nl_net, -1); + rt_cache_flush(cfg->fc_nlinfo.nl_net); rtmsg_fib(RTM_NEWROUTE, htonl(key), new_fa, plen, tb->tb_id, &cfg->fc_nlinfo, NLM_F_REPLACE); @@ -1333,7 +1333,7 @@ int fib_table_insert(struct fib_table *tb, struct fib_config *cfg) list_add_tail_rcu(&new_fa->fa_list, (fa ? &fa->fa_list : fa_head)); - rt_cache_flush(cfg->fc_nlinfo.nl_net, -1); + rt_cache_flush(cfg->fc_nlinfo.nl_net); rtmsg_fib(RTM_NEWROUTE, htonl(key), new_fa, plen, tb->tb_id, &cfg->fc_nlinfo, 0); succeeded: @@ -1708,7 +1708,7 @@ int fib_table_delete(struct fib_table *tb, struct fib_config *cfg) trie_leaf_remove(t, l); if (fa->fa_state & FA_S_ACCESSED) - rt_cache_flush(cfg->fc_nlinfo.nl_net, -1); + rt_cache_flush(cfg->fc_nlinfo.nl_net); fib_release_info(fa->fa_info); alias_free_mem_rcu(fa); diff --git a/trunk/net/ipv4/inet_connection_sock.c b/trunk/net/ipv4/inet_connection_sock.c index db0cf17c00f7..7f75f21d7b83 100644 --- a/trunk/net/ipv4/inet_connection_sock.c +++ b/trunk/net/ipv4/inet_connection_sock.c @@ -404,12 +404,15 @@ struct dst_entry *inet_csk_route_child_sock(struct sock *sk, { const struct inet_request_sock *ireq = inet_rsk(req); struct inet_sock *newinet = inet_sk(newsk); - struct ip_options_rcu *opt = ireq->opt; + struct ip_options_rcu *opt; struct net *net = sock_net(sk); struct flowi4 *fl4; struct rtable *rt; fl4 = &newinet->cork.fl.u.ip4; + + rcu_read_lock(); + opt = rcu_dereference(newinet->inet_opt); flowi4_init_output(fl4, sk->sk_bound_dev_if, sk->sk_mark, RT_CONN_FLAGS(sk), RT_SCOPE_UNIVERSE, sk->sk_protocol, inet_sk_flowi_flags(sk), @@ -421,11 +424,13 @@ struct dst_entry *inet_csk_route_child_sock(struct sock *sk, goto no_route; if (opt && opt->opt.is_strictroute && rt->rt_gateway) goto route_err; + rcu_read_unlock(); return &rt->dst; route_err: ip_rt_put(rt); no_route: + rcu_read_unlock(); IP_INC_STATS_BH(net, IPSTATS_MIB_OUTNOROUTES); return NULL; } diff --git a/trunk/net/ipv4/inetpeer.c b/trunk/net/ipv4/inetpeer.c index e1e0a4e8fd34..c7527f6b9ad9 100644 --- a/trunk/net/ipv4/inetpeer.c +++ b/trunk/net/ipv4/inetpeer.c @@ -510,7 +510,10 @@ struct inet_peer *inet_getpeer(struct inet_peer_base *base, secure_ipv6_id(daddr->addr.a6)); p->metrics[RTAX_LOCK-1] = INETPEER_METRICS_NEW; p->rate_tokens = 0; - p->rate_last = 0; + /* 60*HZ is arbitrary, but chosen enough high so that the first + * calculation of tokens is at its maximum. + */ + p->rate_last = jiffies - 60*HZ; INIT_LIST_HEAD(&p->gc_list); /* Link the node. */ diff --git a/trunk/net/ipv4/ip_output.c b/trunk/net/ipv4/ip_output.c index ba39a52d18c1..c196d749daf2 100644 --- a/trunk/net/ipv4/ip_output.c +++ b/trunk/net/ipv4/ip_output.c @@ -197,7 +197,7 @@ static inline int ip_finish_output2(struct sk_buff *skb) neigh = __ipv4_neigh_lookup_noref(dev, nexthop); if (unlikely(!neigh)) neigh = __neigh_create(&arp_tbl, &nexthop, dev, false); - if (neigh) { + if (!IS_ERR(neigh)) { int res = dst_neigh_output(dst, neigh, skb); rcu_read_unlock_bh(); @@ -1338,10 +1338,10 @@ struct sk_buff *__ip_make_skb(struct sock *sk, iph->ihl = 5; iph->tos = inet->tos; iph->frag_off = df; - ip_select_ident(iph, &rt->dst, sk); iph->ttl = ttl; iph->protocol = sk->sk_protocol; ip_copy_addrs(iph, fl4); + ip_select_ident(iph, &rt->dst, sk); if (opt) { iph->ihl += opt->optlen>>2; @@ -1366,9 +1366,8 @@ struct sk_buff *__ip_make_skb(struct sock *sk, return skb; } -int ip_send_skb(struct sk_buff *skb) +int ip_send_skb(struct net *net, struct sk_buff *skb) { - struct net *net = sock_net(skb->sk); int err; err = ip_local_out(skb); @@ -1391,7 +1390,7 @@ int ip_push_pending_frames(struct sock *sk, struct flowi4 *fl4) return 0; /* Netfilter gets whole the not fragmented skb. */ - return ip_send_skb(skb); + return ip_send_skb(sock_net(sk), skb); } /* @@ -1536,6 +1535,7 @@ void ip_send_unicast_reply(struct net *net, struct sk_buff *skb, __be32 daddr, arg->csumoffset) = csum_fold(csum_add(nskb->csum, arg->csum)); nskb->ip_summed = CHECKSUM_NONE; + skb_orphan(nskb); skb_set_queue_mapping(nskb, skb_get_queue_mapping(skb)); ip_push_pending_frames(sk, &fl4); } diff --git a/trunk/net/ipv4/ipmr.c b/trunk/net/ipv4/ipmr.c index 8eec8f4a0536..ebdf06f938bf 100644 --- a/trunk/net/ipv4/ipmr.c +++ b/trunk/net/ipv4/ipmr.c @@ -124,6 +124,8 @@ static DEFINE_SPINLOCK(mfc_unres_lock); static struct kmem_cache *mrt_cachep __read_mostly; static struct mr_table *ipmr_new_table(struct net *net, u32 id); +static void ipmr_free_table(struct mr_table *mrt); + static int ip_mr_forward(struct net *net, struct mr_table *mrt, struct sk_buff *skb, struct mfc_cache *cache, int local); @@ -131,6 +133,7 @@ static int ipmr_cache_report(struct mr_table *mrt, struct sk_buff *pkt, vifi_t vifi, int assert); static int __ipmr_fill_mroute(struct mr_table *mrt, struct sk_buff *skb, struct mfc_cache *c, struct rtmsg *rtm); +static void mroute_clean_tables(struct mr_table *mrt); static void ipmr_expire_process(unsigned long arg); #ifdef CONFIG_IP_MROUTE_MULTIPLE_TABLES @@ -271,7 +274,7 @@ static void __net_exit ipmr_rules_exit(struct net *net) list_for_each_entry_safe(mrt, next, &net->ipv4.mr_tables, list) { list_del(&mrt->list); - kfree(mrt); + ipmr_free_table(mrt); } fib_rules_unregister(net->ipv4.mr_rules_ops); } @@ -299,7 +302,7 @@ static int __net_init ipmr_rules_init(struct net *net) static void __net_exit ipmr_rules_exit(struct net *net) { - kfree(net->ipv4.mrt); + ipmr_free_table(net->ipv4.mrt); } #endif @@ -336,6 +339,13 @@ static struct mr_table *ipmr_new_table(struct net *net, u32 id) return mrt; } +static void ipmr_free_table(struct mr_table *mrt) +{ + del_timer_sync(&mrt->ipmr_expire_timer); + mroute_clean_tables(mrt); + kfree(mrt); +} + /* Service routines creating virtual interfaces: DVMRP tunnels and PIMREG */ static void ipmr_del_tunnel(struct net_device *dev, struct vifctl *v) diff --git a/trunk/net/ipv4/netfilter/nf_nat_sip.c b/trunk/net/ipv4/netfilter/nf_nat_sip.c index ea4a23813d26..9c87cde28ff8 100644 --- a/trunk/net/ipv4/netfilter/nf_nat_sip.c +++ b/trunk/net/ipv4/netfilter/nf_nat_sip.c @@ -148,7 +148,7 @@ static unsigned int ip_nat_sip(struct sk_buff *skb, unsigned int dataoff, if (ct_sip_parse_header_uri(ct, *dptr, NULL, *datalen, hdr, NULL, &matchoff, &matchlen, &addr, &port) > 0) { - unsigned int matchend, poff, plen, buflen, n; + unsigned int olen, matchend, poff, plen, buflen, n; char buffer[sizeof("nnn.nnn.nnn.nnn:nnnnn")]; /* We're only interested in headers related to this @@ -163,17 +163,18 @@ static unsigned int ip_nat_sip(struct sk_buff *skb, unsigned int dataoff, goto next; } + olen = *datalen; if (!map_addr(skb, dataoff, dptr, datalen, matchoff, matchlen, &addr, port)) return NF_DROP; - matchend = matchoff + matchlen; + matchend = matchoff + matchlen + *datalen - olen; /* The maddr= parameter (RFC 2361) specifies where to send * the reply. */ if (ct_sip_parse_address_param(ct, *dptr, matchend, *datalen, "maddr=", &poff, &plen, - &addr) > 0 && + &addr, true) > 0 && addr.ip == ct->tuplehash[dir].tuple.src.u3.ip && addr.ip != ct->tuplehash[!dir].tuple.dst.u3.ip) { buflen = sprintf(buffer, "%pI4", @@ -187,7 +188,7 @@ static unsigned int ip_nat_sip(struct sk_buff *skb, unsigned int dataoff, * from which the server received the request. */ if (ct_sip_parse_address_param(ct, *dptr, matchend, *datalen, "received=", &poff, &plen, - &addr) > 0 && + &addr, false) > 0 && addr.ip == ct->tuplehash[dir].tuple.dst.u3.ip && addr.ip != ct->tuplehash[!dir].tuple.src.u3.ip) { buflen = sprintf(buffer, "%pI4", @@ -501,7 +502,10 @@ static unsigned int ip_nat_sdp_media(struct sk_buff *skb, unsigned int dataoff, ret = nf_ct_expect_related(rtcp_exp); if (ret == 0) break; - else if (ret != -EBUSY) { + else if (ret == -EBUSY) { + nf_ct_unexpect_related(rtp_exp); + continue; + } else if (ret < 0) { nf_ct_unexpect_related(rtp_exp); port = 0; break; diff --git a/trunk/net/ipv4/raw.c b/trunk/net/ipv4/raw.c index ff0f071969ea..d23c6571ba1c 100644 --- a/trunk/net/ipv4/raw.c +++ b/trunk/net/ipv4/raw.c @@ -131,18 +131,20 @@ static struct sock *__raw_v4_lookup(struct net *net, struct sock *sk, * 0 - deliver * 1 - block */ -static __inline__ int icmp_filter(struct sock *sk, struct sk_buff *skb) +static int icmp_filter(const struct sock *sk, const struct sk_buff *skb) { - int type; + struct icmphdr _hdr; + const struct icmphdr *hdr; - if (!pskb_may_pull(skb, sizeof(struct icmphdr))) + hdr = skb_header_pointer(skb, skb_transport_offset(skb), + sizeof(_hdr), &_hdr); + if (!hdr) return 1; - type = icmp_hdr(skb)->type; - if (type < 32) { + if (hdr->type < 32) { __u32 data = raw_sk(sk)->filter.data; - return ((1 << type) & data) != 0; + return ((1U << hdr->type) & data) != 0; } /* Do not block unknown ICMP types */ diff --git a/trunk/net/ipv4/route.c b/trunk/net/ipv4/route.c index c035251beb07..fd9af60397b5 100644 --- a/trunk/net/ipv4/route.c +++ b/trunk/net/ipv4/route.c @@ -70,7 +70,6 @@ #include #include #include -#include #include #include #include @@ -80,7 +79,6 @@ #include #include #include -#include #include #include #include @@ -88,11 +86,9 @@ #include #include #include -#include #include #include #include -#include #include #include #include @@ -206,11 +202,6 @@ EXPORT_SYMBOL(ip_tos2prio); static DEFINE_PER_CPU(struct rt_cache_stat, rt_cache_stat); #define RT_CACHE_STAT_INC(field) __this_cpu_inc(rt_cache_stat.field) -static inline int rt_genid(struct net *net) -{ - return atomic_read(&net->ipv4.rt_genid); -} - #ifdef CONFIG_PROC_FS static void *rt_cache_seq_start(struct seq_file *seq, loff_t *pos) { @@ -451,27 +442,9 @@ static inline bool rt_is_expired(const struct rtable *rth) return rth->rt_genid != rt_genid(dev_net(rth->dst.dev)); } -/* - * Perturbation of rt_genid by a small quantity [1..256] - * Using 8 bits of shuffling ensure we can call rt_cache_invalidate() - * many times (2^24) without giving recent rt_genid. - * Jenkins hash is strong enough that litle changes of rt_genid are OK. - */ -static void rt_cache_invalidate(struct net *net) -{ - unsigned char shuffle; - - get_random_bytes(&shuffle, sizeof(shuffle)); - atomic_add(shuffle + 1U, &net->ipv4.rt_genid); -} - -/* - * delay < 0 : invalidate cache (fast : entries will be deleted later) - * delay >= 0 : invalidate & flush cache (can be long) - */ -void rt_cache_flush(struct net *net, int delay) +void rt_cache_flush(struct net *net) { - rt_cache_invalidate(net); + rt_genid_bump(net); } static struct neighbour *ipv4_neigh_lookup(const struct dst_entry *dst, @@ -938,12 +911,14 @@ static u32 __ip_rt_update_pmtu(struct rtable *rt, struct flowi4 *fl4, u32 mtu) if (mtu < ip_rt_min_pmtu) mtu = ip_rt_min_pmtu; + rcu_read_lock(); if (fib_lookup(dev_net(rt->dst.dev), fl4, &res) == 0) { struct fib_nh *nh = &FIB_RES_NH(res); update_or_create_fnhe(nh, fl4->daddr, 0, mtu, jiffies + ip_rt_mtu_expires); } + rcu_read_unlock(); return mtu; } @@ -960,7 +935,7 @@ static void ip_rt_update_pmtu(struct dst_entry *dst, struct sock *sk, dst->obsolete = DST_OBSOLETE_KILL; } else { rt->rt_pmtu = mtu; - dst_set_expires(&rt->dst, ip_rt_mtu_expires); + rt->dst.expires = max(1UL, jiffies + ip_rt_mtu_expires); } } @@ -1267,7 +1242,7 @@ static void ipv4_dst_destroy(struct dst_entry *dst) { struct rtable *rt = (struct rtable *) dst; - if (dst->flags & DST_NOCACHE) { + if (!list_empty(&rt->rt_uncached)) { spin_lock_bh(&rt_uncached_lock); list_del(&rt->rt_uncached); spin_unlock_bh(&rt_uncached_lock); @@ -2032,7 +2007,6 @@ struct rtable *__ip_route_output_key(struct net *net, struct flowi4 *fl4) } dev_out = net->loopback_dev; fl4->flowi4_oif = dev_out->ifindex; - res.fi = NULL; flags |= RTCF_LOCAL; goto make_route; } @@ -2348,7 +2322,7 @@ int ip_rt_dump(struct sk_buff *skb, struct netlink_callback *cb) void ip_rt_multicast_event(struct in_device *in_dev) { - rt_cache_flush(dev_net(in_dev->dev), 0); + rt_cache_flush(dev_net(in_dev->dev)); } #ifdef CONFIG_SYSCTL @@ -2357,16 +2331,7 @@ static int ipv4_sysctl_rtcache_flush(ctl_table *__ctl, int write, size_t *lenp, loff_t *ppos) { if (write) { - int flush_delay; - ctl_table ctl; - struct net *net; - - memcpy(&ctl, __ctl, sizeof(ctl)); - ctl.data = &flush_delay; - proc_dointvec(&ctl, write, buffer, lenp, ppos); - - net = (struct net *)__ctl->extra1; - rt_cache_flush(net, flush_delay); + rt_cache_flush((struct net *)__ctl->extra1); return 0; } @@ -2536,8 +2501,7 @@ static __net_initdata struct pernet_operations sysctl_route_ops = { static __net_init int rt_genid_init(struct net *net) { - get_random_bytes(&net->ipv4.rt_genid, - sizeof(net->ipv4.rt_genid)); + atomic_set(&net->rt_genid, 0); get_random_bytes(&net->ipv4.dev_addr_genid, sizeof(net->ipv4.dev_addr_genid)); return 0; diff --git a/trunk/net/ipv4/tcp.c b/trunk/net/ipv4/tcp.c index e7e6eeae49c0..5f6419341821 100644 --- a/trunk/net/ipv4/tcp.c +++ b/trunk/net/ipv4/tcp.c @@ -811,7 +811,9 @@ static unsigned int tcp_xmit_size_goal(struct sock *sk, u32 mss_now, old_size_goal + mss_now > xmit_size_goal)) { xmit_size_goal = old_size_goal; } else { - tp->xmit_size_goal_segs = xmit_size_goal / mss_now; + tp->xmit_size_goal_segs = + min_t(u16, xmit_size_goal / mss_now, + sk->sk_gso_max_segs); xmit_size_goal = tp->xmit_size_goal_segs * mss_now; } } @@ -1760,8 +1762,14 @@ int tcp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, } #ifdef CONFIG_NET_DMA - if (tp->ucopy.dma_chan) - dma_async_memcpy_issue_pending(tp->ucopy.dma_chan); + if (tp->ucopy.dma_chan) { + if (tp->rcv_wnd == 0 && + !skb_queue_empty(&sk->sk_async_wait_queue)) { + tcp_service_net_dma(sk, true); + tcp_cleanup_rbuf(sk, copied); + } else + dma_async_memcpy_issue_pending(tp->ucopy.dma_chan); + } #endif if (copied >= target) { /* Do not sleep, just process backlog. */ @@ -2323,10 +2331,17 @@ static int tcp_repair_options_est(struct tcp_sock *tp, tp->rx_opt.mss_clamp = opt.opt_val; break; case TCPOPT_WINDOW: - if (opt.opt_val > 14) - return -EFBIG; + { + u16 snd_wscale = opt.opt_val & 0xFFFF; + u16 rcv_wscale = opt.opt_val >> 16; + + if (snd_wscale > 14 || rcv_wscale > 14) + return -EFBIG; - tp->rx_opt.snd_wscale = opt.opt_val; + tp->rx_opt.snd_wscale = snd_wscale; + tp->rx_opt.rcv_wscale = rcv_wscale; + tp->rx_opt.wscale_ok = 1; + } break; case TCPOPT_SACK_PERM: if (opt.opt_val != 0) diff --git a/trunk/net/ipv4/tcp_cong.c b/trunk/net/ipv4/tcp_cong.c index 4d4db16e336e..1432cdb0644c 100644 --- a/trunk/net/ipv4/tcp_cong.c +++ b/trunk/net/ipv4/tcp_cong.c @@ -291,7 +291,8 @@ bool tcp_is_cwnd_limited(const struct sock *sk, u32 in_flight) left = tp->snd_cwnd - in_flight; if (sk_can_gso(sk) && left * sysctl_tcp_tso_win_divisor < tp->snd_cwnd && - left * tp->mss_cache < sk->sk_gso_max_size) + left * tp->mss_cache < sk->sk_gso_max_size && + left < sk->sk_gso_max_segs) return true; return left <= tcp_max_tso_deferred_mss(tp); } diff --git a/trunk/net/ipv4/tcp_input.c b/trunk/net/ipv4/tcp_input.c index 2fd2bc9e3c64..d377f4854cb8 100644 --- a/trunk/net/ipv4/tcp_input.c +++ b/trunk/net/ipv4/tcp_input.c @@ -2926,13 +2926,14 @@ static void tcp_enter_recovery(struct sock *sk, bool ece_ack) * tcp_xmit_retransmit_queue(). */ static void tcp_fastretrans_alert(struct sock *sk, int pkts_acked, - int newly_acked_sacked, bool is_dupack, + int prior_sacked, bool is_dupack, int flag) { struct inet_connection_sock *icsk = inet_csk(sk); struct tcp_sock *tp = tcp_sk(sk); int do_lost = is_dupack || ((flag & FLAG_DATA_SACKED) && (tcp_fackets_out(tp) > tp->reordering)); + int newly_acked_sacked = 0; int fast_rexmit = 0; if (WARN_ON(!tp->packets_out && tp->sacked_out)) @@ -2992,6 +2993,7 @@ static void tcp_fastretrans_alert(struct sock *sk, int pkts_acked, tcp_add_reno_sack(sk); } else do_lost = tcp_try_undo_partial(sk, pkts_acked); + newly_acked_sacked = pkts_acked + tp->sacked_out - prior_sacked; break; case TCP_CA_Loss: if (flag & FLAG_DATA_ACKED) @@ -3013,6 +3015,7 @@ static void tcp_fastretrans_alert(struct sock *sk, int pkts_acked, if (is_dupack) tcp_add_reno_sack(sk); } + newly_acked_sacked = pkts_acked + tp->sacked_out - prior_sacked; if (icsk->icsk_ca_state <= TCP_CA_Disorder) tcp_try_undo_dsack(sk); @@ -3590,7 +3593,6 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag) int prior_packets; int prior_sacked = tp->sacked_out; int pkts_acked = 0; - int newly_acked_sacked = 0; bool frto_cwnd = false; /* If the ack is older than previous acks @@ -3666,8 +3668,6 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag) flag |= tcp_clean_rtx_queue(sk, prior_fackets, prior_snd_una); pkts_acked = prior_packets - tp->packets_out; - newly_acked_sacked = (prior_packets - prior_sacked) - - (tp->packets_out - tp->sacked_out); if (tp->frto_counter) frto_cwnd = tcp_process_frto(sk, flag); @@ -3681,7 +3681,7 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag) tcp_may_raise_cwnd(sk, flag)) tcp_cong_avoid(sk, ack, prior_in_flight); is_dupack = !(flag & (FLAG_SND_UNA_ADVANCED | FLAG_NOT_DUP)); - tcp_fastretrans_alert(sk, pkts_acked, newly_acked_sacked, + tcp_fastretrans_alert(sk, pkts_acked, prior_sacked, is_dupack, flag); } else { if ((flag & FLAG_DATA_ACKED) && !frto_cwnd) @@ -3698,7 +3698,7 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag) no_queue: /* If data was DSACKed, see if we can undo a cwnd reduction. */ if (flag & FLAG_DSACKING_ACK) - tcp_fastretrans_alert(sk, pkts_acked, newly_acked_sacked, + tcp_fastretrans_alert(sk, pkts_acked, prior_sacked, is_dupack, flag); /* If this ack opens up a zero window, clear backoff. It was * being used to time the probes, and is probably far higher than @@ -3718,8 +3718,7 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag) */ if (TCP_SKB_CB(skb)->sacked) { flag |= tcp_sacktag_write_queue(sk, skb, prior_snd_una); - newly_acked_sacked = tp->sacked_out - prior_sacked; - tcp_fastretrans_alert(sk, pkts_acked, newly_acked_sacked, + tcp_fastretrans_alert(sk, pkts_acked, prior_sacked, is_dupack, flag); } @@ -4662,7 +4661,7 @@ static void tcp_data_queue(struct sock *sk, struct sk_buff *skb) if (eaten > 0) kfree_skb_partial(skb, fragstolen); - else if (!sock_flag(sk, SOCK_DEAD)) + if (!sock_flag(sk, SOCK_DEAD)) sk->sk_data_ready(sk, 0); return; } @@ -5392,6 +5391,8 @@ int tcp_rcv_established(struct sock *sk, struct sk_buff *skb, { struct tcp_sock *tp = tcp_sk(sk); + if (unlikely(sk->sk_rx_dst == NULL)) + inet_csk(sk)->icsk_af_ops->sk_rx_dst_set(sk, skb); /* * Header prediction. * The code loosely follows the one in the famous @@ -5555,8 +5556,7 @@ int tcp_rcv_established(struct sock *sk, struct sk_buff *skb, #endif if (eaten) kfree_skb_partial(skb, fragstolen); - else - sk->sk_data_ready(sk, 0); + sk->sk_data_ready(sk, 0); return 0; } } @@ -5605,7 +5605,7 @@ void tcp_finish_connect(struct sock *sk, struct sk_buff *skb) tcp_set_state(sk, TCP_ESTABLISHED); if (skb != NULL) { - inet_sk_rx_dst_set(sk, skb); + icsk->icsk_af_ops->sk_rx_dst_set(sk, skb); security_inet_conn_established(sk, skb); } diff --git a/trunk/net/ipv4/tcp_ipv4.c b/trunk/net/ipv4/tcp_ipv4.c index 42b2a6a73092..00a748d14062 100644 --- a/trunk/net/ipv4/tcp_ipv4.c +++ b/trunk/net/ipv4/tcp_ipv4.c @@ -417,10 +417,12 @@ void tcp_v4_err(struct sk_buff *icmp_skb, u32 info) if (code == ICMP_FRAG_NEEDED) { /* PMTU discovery (RFC1191) */ tp->mtu_info = info; - if (!sock_owned_by_user(sk)) + if (!sock_owned_by_user(sk)) { tcp_v4_mtu_reduced(sk); - else - set_bit(TCP_MTU_REDUCED_DEFERRED, &tp->tsq_flags); + } else { + if (!test_and_set_bit(TCP_MTU_REDUCED_DEFERRED, &tp->tsq_flags)) + sock_hold(sk); + } goto out; } @@ -1462,6 +1464,7 @@ struct sock *tcp_v4_syn_recv_sock(struct sock *sk, struct sk_buff *skb, goto exit_nonewsk; newsk->sk_gso_type = SKB_GSO_TCPV4; + inet_sk_rx_dst_set(newsk, skb); newtp = tcp_sk(newsk); newinet = inet_sk(newsk); @@ -1627,9 +1630,6 @@ int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb) sk->sk_rx_dst = NULL; } } - if (unlikely(sk->sk_rx_dst == NULL)) - inet_sk_rx_dst_set(sk, skb); - if (tcp_rcv_established(sk, skb, tcp_hdr(skb), skb->len)) { rsk = sk; goto reset; @@ -1872,10 +1872,21 @@ static struct timewait_sock_ops tcp_timewait_sock_ops = { .twsk_destructor= tcp_twsk_destructor, }; +void inet_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb) +{ + struct dst_entry *dst = skb_dst(skb); + + dst_hold(dst); + sk->sk_rx_dst = dst; + inet_sk(sk)->rx_dst_ifindex = skb->skb_iif; +} +EXPORT_SYMBOL(inet_sk_rx_dst_set); + const struct inet_connection_sock_af_ops ipv4_specific = { .queue_xmit = ip_queue_xmit, .send_check = tcp_v4_send_check, .rebuild_header = inet_sk_rebuild_header, + .sk_rx_dst_set = inet_sk_rx_dst_set, .conn_request = tcp_v4_conn_request, .syn_recv_sock = tcp_v4_syn_recv_sock, .net_header_len = sizeof(struct iphdr), diff --git a/trunk/net/ipv4/tcp_metrics.c b/trunk/net/ipv4/tcp_metrics.c index 2288a6399e1e..0abe67bb4d3a 100644 --- a/trunk/net/ipv4/tcp_metrics.c +++ b/trunk/net/ipv4/tcp_metrics.c @@ -731,6 +731,18 @@ static int __net_init tcp_net_metrics_init(struct net *net) static void __net_exit tcp_net_metrics_exit(struct net *net) { + unsigned int i; + + for (i = 0; i < (1U << net->ipv4.tcp_metrics_hash_log) ; i++) { + struct tcp_metrics_block *tm, *next; + + tm = rcu_dereference_protected(net->ipv4.tcp_metrics_hash[i].chain, 1); + while (tm) { + next = rcu_dereference_protected(tm->tcpm_next, 1); + kfree(tm); + tm = next; + } + } kfree(net->ipv4.tcp_metrics_hash); } diff --git a/trunk/net/ipv4/tcp_minisocks.c b/trunk/net/ipv4/tcp_minisocks.c index 232a90c3ec86..6ff7f10dce9d 100644 --- a/trunk/net/ipv4/tcp_minisocks.c +++ b/trunk/net/ipv4/tcp_minisocks.c @@ -387,8 +387,6 @@ struct sock *tcp_create_openreq_child(struct sock *sk, struct request_sock *req, struct tcp_sock *oldtp = tcp_sk(sk); struct tcp_cookie_values *oldcvp = oldtp->cookie_values; - inet_sk_rx_dst_set(newsk, skb); - /* TCP Cookie Transactions require space for the cookie pair, * as it differs for each connection. There is no need to * copy any s_data_payload stored at the original socket. diff --git a/trunk/net/ipv4/tcp_output.c b/trunk/net/ipv4/tcp_output.c index 3f1bcff0b10b..d04632673a9e 100644 --- a/trunk/net/ipv4/tcp_output.c +++ b/trunk/net/ipv4/tcp_output.c @@ -910,14 +910,18 @@ void tcp_release_cb(struct sock *sk) if (flags & (1UL << TCP_TSQ_DEFERRED)) tcp_tsq_handler(sk); - if (flags & (1UL << TCP_WRITE_TIMER_DEFERRED)) + if (flags & (1UL << TCP_WRITE_TIMER_DEFERRED)) { tcp_write_timer_handler(sk); - - if (flags & (1UL << TCP_DELACK_TIMER_DEFERRED)) + __sock_put(sk); + } + if (flags & (1UL << TCP_DELACK_TIMER_DEFERRED)) { tcp_delack_timer_handler(sk); - - if (flags & (1UL << TCP_MTU_REDUCED_DEFERRED)) + __sock_put(sk); + } + if (flags & (1UL << TCP_MTU_REDUCED_DEFERRED)) { sk->sk_prot->mtu_reduced(sk); + __sock_put(sk); + } } EXPORT_SYMBOL(tcp_release_cb); @@ -940,7 +944,7 @@ void __init tcp_tasklet_init(void) * We cant xmit new skbs from this context, as we might already * hold qdisc lock. */ -void tcp_wfree(struct sk_buff *skb) +static void tcp_wfree(struct sk_buff *skb) { struct sock *sk = skb->sk; struct tcp_sock *tp = tcp_sk(sk); @@ -1522,21 +1526,21 @@ static void tcp_cwnd_validate(struct sock *sk) * when we would be allowed to send the split-due-to-Nagle skb fully. */ static unsigned int tcp_mss_split_point(const struct sock *sk, const struct sk_buff *skb, - unsigned int mss_now, unsigned int cwnd) + unsigned int mss_now, unsigned int max_segs) { const struct tcp_sock *tp = tcp_sk(sk); - u32 needed, window, cwnd_len; + u32 needed, window, max_len; window = tcp_wnd_end(tp) - TCP_SKB_CB(skb)->seq; - cwnd_len = mss_now * cwnd; + max_len = mss_now * max_segs; - if (likely(cwnd_len <= window && skb != tcp_write_queue_tail(sk))) - return cwnd_len; + if (likely(max_len <= window && skb != tcp_write_queue_tail(sk))) + return max_len; needed = min(skb->len, window); - if (cwnd_len <= needed) - return cwnd_len; + if (max_len <= needed) + return max_len; return needed - needed % mss_now; } @@ -1765,7 +1769,8 @@ static bool tcp_tso_should_defer(struct sock *sk, struct sk_buff *skb) limit = min(send_win, cong_win); /* If a full-sized TSO skb can be sent, do it. */ - if (limit >= sk->sk_gso_max_size) + if (limit >= min_t(unsigned int, sk->sk_gso_max_size, + sk->sk_gso_max_segs * tp->mss_cache)) goto send_now; /* Middle in queue won't get any more data, full sendable already? */ @@ -1999,7 +2004,9 @@ static bool tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle, limit = mss_now; if (tso_segs > 1 && !tcp_urg_mode(tp)) limit = tcp_mss_split_point(sk, skb, mss_now, - cwnd_quota); + min_t(unsigned int, + cwnd_quota, + sk->sk_gso_max_segs)); if (skb->len > limit && unlikely(tso_fragment(sk, skb, limit, mss_now, gfp))) diff --git a/trunk/net/ipv4/tcp_timer.c b/trunk/net/ipv4/tcp_timer.c index 6df36ad55a38..b774a03bd1dc 100644 --- a/trunk/net/ipv4/tcp_timer.c +++ b/trunk/net/ipv4/tcp_timer.c @@ -252,7 +252,8 @@ static void tcp_delack_timer(unsigned long data) inet_csk(sk)->icsk_ack.blocked = 1; NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_DELAYEDACKLOCKED); /* deleguate our work to tcp_release_cb() */ - set_bit(TCP_WRITE_TIMER_DEFERRED, &tcp_sk(sk)->tsq_flags); + if (!test_and_set_bit(TCP_DELACK_TIMER_DEFERRED, &tcp_sk(sk)->tsq_flags)) + sock_hold(sk); } bh_unlock_sock(sk); sock_put(sk); @@ -481,7 +482,8 @@ static void tcp_write_timer(unsigned long data) tcp_write_timer_handler(sk); } else { /* deleguate our work to tcp_release_cb() */ - set_bit(TCP_WRITE_TIMER_DEFERRED, &tcp_sk(sk)->tsq_flags); + if (!test_and_set_bit(TCP_WRITE_TIMER_DEFERRED, &tcp_sk(sk)->tsq_flags)) + sock_hold(sk); } bh_unlock_sock(sk); sock_put(sk); diff --git a/trunk/net/ipv4/udp.c b/trunk/net/ipv4/udp.c index b4c3582a991f..2814f66dac64 100644 --- a/trunk/net/ipv4/udp.c +++ b/trunk/net/ipv4/udp.c @@ -758,7 +758,7 @@ static int udp_send_skb(struct sk_buff *skb, struct flowi4 *fl4) uh->check = CSUM_MANGLED_0; send: - err = ip_send_skb(skb); + err = ip_send_skb(sock_net(sk), skb); if (err) { if (err == -ENOBUFS && !inet->recverr) { UDP_INC_STATS_USER(sock_net(sk), @@ -1226,6 +1226,11 @@ int udp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, if (unlikely(err)) { trace_kfree_skb(skb, udp_recvmsg); + if (!peeked) { + atomic_inc(&sk->sk_drops); + UDP_INC_STATS_USER(sock_net(sk), + UDP_MIB_INERRORS, is_udplite); + } goto out_free; } diff --git a/trunk/net/ipv6/addrconf.c b/trunk/net/ipv6/addrconf.c index 79181819a24f..6bc85f7c31e3 100644 --- a/trunk/net/ipv6/addrconf.c +++ b/trunk/net/ipv6/addrconf.c @@ -494,8 +494,7 @@ static void addrconf_forward_change(struct net *net, __s32 newf) struct net_device *dev; struct inet6_dev *idev; - rcu_read_lock(); - for_each_netdev_rcu(net, dev) { + for_each_netdev(net, dev) { idev = __in6_dev_get(dev); if (idev) { int changed = (!idev->cnf.forwarding) ^ (!newf); @@ -504,7 +503,6 @@ static void addrconf_forward_change(struct net *net, __s32 newf) dev_forward_change(idev); } } - rcu_read_unlock(); } static int addrconf_fixup_forwarding(struct ctl_table *table, int *p, int newf) diff --git a/trunk/net/ipv6/esp6.c b/trunk/net/ipv6/esp6.c index 6dc7fd353ef5..282f3723ee19 100644 --- a/trunk/net/ipv6/esp6.c +++ b/trunk/net/ipv6/esp6.c @@ -167,8 +167,6 @@ static int esp6_output(struct xfrm_state *x, struct sk_buff *skb) struct esp_data *esp = x->data; /* skb is pure payload to encrypt */ - err = -ENOMEM; - aead = esp->aead; alen = crypto_aead_authsize(aead); @@ -203,8 +201,10 @@ static int esp6_output(struct xfrm_state *x, struct sk_buff *skb) } tmp = esp_alloc_tmp(aead, nfrags + sglists, seqhilen); - if (!tmp) + if (!tmp) { + err = -ENOMEM; goto error; + } seqhi = esp_tmp_seqhi(tmp); iv = esp_tmp_iv(aead, tmp, seqhilen); diff --git a/trunk/net/ipv6/inet6_connection_sock.c b/trunk/net/ipv6/inet6_connection_sock.c index 0251a6005be8..c4f934176cab 100644 --- a/trunk/net/ipv6/inet6_connection_sock.c +++ b/trunk/net/ipv6/inet6_connection_sock.c @@ -175,33 +175,12 @@ void __inet6_csk_dst_store(struct sock *sk, struct dst_entry *dst, const struct in6_addr *saddr) { __ip6_dst_store(sk, dst, daddr, saddr); - -#ifdef CONFIG_XFRM - { - struct rt6_info *rt = (struct rt6_info *)dst; - rt->rt6i_flow_cache_genid = atomic_read(&flow_cache_genid); - } -#endif } static inline struct dst_entry *__inet6_csk_dst_check(struct sock *sk, u32 cookie) { - struct dst_entry *dst; - - dst = __sk_dst_check(sk, cookie); - -#ifdef CONFIG_XFRM - if (dst) { - struct rt6_info *rt = (struct rt6_info *)dst; - if (rt->rt6i_flow_cache_genid != atomic_read(&flow_cache_genid)) { - __sk_dst_reset(sk); - dst = NULL; - } - } -#endif - - return dst; + return __sk_dst_check(sk, cookie); } static struct dst_entry *inet6_csk_route_socket(struct sock *sk, diff --git a/trunk/net/ipv6/ip6_fib.c b/trunk/net/ipv6/ip6_fib.c index 13690d650c3e..286acfc21250 100644 --- a/trunk/net/ipv6/ip6_fib.c +++ b/trunk/net/ipv6/ip6_fib.c @@ -819,6 +819,10 @@ int fib6_add(struct fib6_node *root, struct rt6_info *rt, struct nl_info *info) offsetof(struct rt6_info, rt6i_src), allow_create, replace_required); + if (IS_ERR(sn)) { + err = PTR_ERR(sn); + sn = NULL; + } if (!sn) { /* If it is failed, discard just allocated root, and then (in st_failure) stale node diff --git a/trunk/net/ipv6/mip6.c b/trunk/net/ipv6/mip6.c index 5b087c31d87b..0f9bdc5ee9f3 100644 --- a/trunk/net/ipv6/mip6.c +++ b/trunk/net/ipv6/mip6.c @@ -86,28 +86,30 @@ static int mip6_mh_len(int type) static int mip6_mh_filter(struct sock *sk, struct sk_buff *skb) { - struct ip6_mh *mh; + struct ip6_mh _hdr; + const struct ip6_mh *mh; - if (!pskb_may_pull(skb, (skb_transport_offset(skb)) + 8) || - !pskb_may_pull(skb, (skb_transport_offset(skb) + - ((skb_transport_header(skb)[1] + 1) << 3)))) + mh = skb_header_pointer(skb, skb_transport_offset(skb), + sizeof(_hdr), &_hdr); + if (!mh) return -1; - mh = (struct ip6_mh *)skb_transport_header(skb); + if (((mh->ip6mh_hdrlen + 1) << 3) > skb->len) + return -1; if (mh->ip6mh_hdrlen < mip6_mh_len(mh->ip6mh_type)) { LIMIT_NETDEBUG(KERN_DEBUG "mip6: MH message too short: %d vs >=%d\n", mh->ip6mh_hdrlen, mip6_mh_len(mh->ip6mh_type)); - mip6_param_prob(skb, 0, ((&mh->ip6mh_hdrlen) - - skb_network_header(skb))); + mip6_param_prob(skb, 0, offsetof(struct ip6_mh, ip6mh_hdrlen) + + skb_network_header_len(skb)); return -1; } if (mh->ip6mh_proto != IPPROTO_NONE) { LIMIT_NETDEBUG(KERN_DEBUG "mip6: MH invalid payload proto = %d\n", mh->ip6mh_proto); - mip6_param_prob(skb, 0, ((&mh->ip6mh_proto) - - skb_network_header(skb))); + mip6_param_prob(skb, 0, offsetof(struct ip6_mh, ip6mh_proto) + + skb_network_header_len(skb)); return -1; } diff --git a/trunk/net/ipv6/proc.c b/trunk/net/ipv6/proc.c index da2e92d05c15..745a32042950 100644 --- a/trunk/net/ipv6/proc.c +++ b/trunk/net/ipv6/proc.c @@ -307,10 +307,10 @@ static int __net_init ipv6_proc_init_net(struct net *net) goto proc_dev_snmp6_fail; return 0; +proc_dev_snmp6_fail: + proc_net_remove(net, "snmp6"); proc_snmp6_fail: proc_net_remove(net, "sockstat6"); -proc_dev_snmp6_fail: - proc_net_remove(net, "dev_snmp6"); return -ENOMEM; } diff --git a/trunk/net/ipv6/raw.c b/trunk/net/ipv6/raw.c index ef0579d5bca6..4a5f78b50495 100644 --- a/trunk/net/ipv6/raw.c +++ b/trunk/net/ipv6/raw.c @@ -107,21 +107,20 @@ static struct sock *__raw_v6_lookup(struct net *net, struct sock *sk, * 0 - deliver * 1 - block */ -static __inline__ int icmpv6_filter(struct sock *sk, struct sk_buff *skb) +static int icmpv6_filter(const struct sock *sk, const struct sk_buff *skb) { - struct icmp6hdr *icmph; - struct raw6_sock *rp = raw6_sk(sk); - - if (pskb_may_pull(skb, sizeof(struct icmp6hdr))) { - __u32 *data = &rp->filter.data[0]; - int bit_nr; + struct icmp6hdr *_hdr; + const struct icmp6hdr *hdr; - icmph = (struct icmp6hdr *) skb->data; - bit_nr = icmph->icmp6_type; + hdr = skb_header_pointer(skb, skb_transport_offset(skb), + sizeof(_hdr), &_hdr); + if (hdr) { + const __u32 *data = &raw6_sk(sk)->filter.data[0]; + unsigned int type = hdr->icmp6_type; - return (data[bit_nr >> 5] & (1 << (bit_nr & 31))) != 0; + return (data[type >> 5] & (1U << (type & 31))) != 0; } - return 0; + return 1; } #if defined(CONFIG_IPV6_MIP6) || defined(CONFIG_IPV6_MIP6_MODULE) diff --git a/trunk/net/ipv6/route.c b/trunk/net/ipv6/route.c index 8e80fd279100..854e4018d205 100644 --- a/trunk/net/ipv6/route.c +++ b/trunk/net/ipv6/route.c @@ -226,7 +226,7 @@ static struct rt6_info ip6_null_entry_template = { .dst = { .__refcnt = ATOMIC_INIT(1), .__use = 1, - .obsolete = -1, + .obsolete = DST_OBSOLETE_FORCE_CHK, .error = -ENETUNREACH, .input = ip6_pkt_discard, .output = ip6_pkt_discard_out, @@ -246,7 +246,7 @@ static struct rt6_info ip6_prohibit_entry_template = { .dst = { .__refcnt = ATOMIC_INIT(1), .__use = 1, - .obsolete = -1, + .obsolete = DST_OBSOLETE_FORCE_CHK, .error = -EACCES, .input = ip6_pkt_prohibit, .output = ip6_pkt_prohibit_out, @@ -261,7 +261,7 @@ static struct rt6_info ip6_blk_hole_entry_template = { .dst = { .__refcnt = ATOMIC_INIT(1), .__use = 1, - .obsolete = -1, + .obsolete = DST_OBSOLETE_FORCE_CHK, .error = -EINVAL, .input = dst_discard, .output = dst_discard, @@ -281,13 +281,14 @@ static inline struct rt6_info *ip6_dst_alloc(struct net *net, struct fib6_table *table) { struct rt6_info *rt = dst_alloc(&net->ipv6.ip6_dst_ops, dev, - 0, DST_OBSOLETE_NONE, flags); + 0, DST_OBSOLETE_FORCE_CHK, flags); if (rt) { struct dst_entry *dst = &rt->dst; memset(dst + 1, 0, sizeof(*rt) - sizeof(*dst)); rt6_init_peer(rt, table ? &table->tb6_peers : net->ipv6.peers); + rt->rt6i_genid = rt_genid(net); } return rt; } @@ -1031,6 +1032,13 @@ static struct dst_entry *ip6_dst_check(struct dst_entry *dst, u32 cookie) rt = (struct rt6_info *) dst; + /* All IPV6 dsts are created with ->obsolete set to the value + * DST_OBSOLETE_FORCE_CHK which forces validation calls down + * into this function always. + */ + if (rt->rt6i_genid != rt_genid(dev_net(rt->dst.dev))) + return NULL; + if (rt->rt6i_node && (rt->rt6i_node->fn_sernum == cookie)) { if (rt->rt6i_peer_genid != rt6_peer_genid()) { if (!rt6_has_peer(rt)) @@ -1397,8 +1405,6 @@ int ip6_route_add(struct fib6_config *cfg) goto out; } - rt->dst.obsolete = -1; - if (cfg->fc_flags & RTF_EXPIRES) rt6_set_expires(rt, jiffies + clock_t_to_jiffies(cfg->fc_expires)); @@ -2080,7 +2086,6 @@ struct rt6_info *addrconf_dst_alloc(struct inet6_dev *idev, rt->dst.input = ip6_input; rt->dst.output = ip6_output; rt->rt6i_idev = idev; - rt->dst.obsolete = -1; rt->rt6i_flags = RTF_UP | RTF_NONEXTHOP; if (anycast) diff --git a/trunk/net/ipv6/tcp_ipv6.c b/trunk/net/ipv6/tcp_ipv6.c index c66b90f71c9b..acd32e3f1b68 100644 --- a/trunk/net/ipv6/tcp_ipv6.c +++ b/trunk/net/ipv6/tcp_ipv6.c @@ -94,6 +94,18 @@ static struct tcp_md5sig_key *tcp_v6_md5_do_lookup(struct sock *sk, } #endif +static void inet6_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb) +{ + struct dst_entry *dst = skb_dst(skb); + const struct rt6_info *rt = (const struct rt6_info *)dst; + + dst_hold(dst); + sk->sk_rx_dst = dst; + inet_sk(sk)->rx_dst_ifindex = skb->skb_iif; + if (rt->rt6i_node) + inet6_sk(sk)->rx_dst_cookie = rt->rt6i_node->fn_sernum; +} + static void tcp_v6_hash(struct sock *sk) { if (sk->sk_state != TCP_CLOSE) { @@ -391,8 +403,9 @@ static void tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt, tp->mtu_info = ntohl(info); if (!sock_owned_by_user(sk)) tcp_v6_mtu_reduced(sk); - else - set_bit(TCP_MTU_REDUCED_DEFERRED, &tp->tsq_flags); + else if (!test_and_set_bit(TCP_MTU_REDUCED_DEFERRED, + &tp->tsq_flags)) + sock_hold(sk); goto out; } @@ -1270,6 +1283,7 @@ static struct sock * tcp_v6_syn_recv_sock(struct sock *sk, struct sk_buff *skb, newsk->sk_gso_type = SKB_GSO_TCPV6; __ip6_dst_store(newsk, dst, NULL, NULL); + inet6_sk_rx_dst_set(newsk, skb); newtcp6sk = (struct tcp6_sock *)newsk; inet_sk(newsk)->pinet6 = &newtcp6sk->inet6; @@ -1447,7 +1461,17 @@ static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb) opt_skb = skb_clone(skb, sk_gfp_atomic(sk, GFP_ATOMIC)); if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */ + struct dst_entry *dst = sk->sk_rx_dst; + sock_rps_save_rxhash(sk, skb); + if (dst) { + if (inet_sk(sk)->rx_dst_ifindex != skb->skb_iif || + dst->ops->check(dst, np->rx_dst_cookie) == NULL) { + dst_release(dst); + sk->sk_rx_dst = NULL; + } + } + if (tcp_rcv_established(sk, skb, tcp_hdr(skb), skb->len)) goto reset; if (opt_skb) @@ -1705,9 +1729,9 @@ static void tcp_v6_early_demux(struct sk_buff *skb) struct dst_entry *dst = sk->sk_rx_dst; struct inet_sock *icsk = inet_sk(sk); if (dst) - dst = dst_check(dst, 0); + dst = dst_check(dst, inet6_sk(sk)->rx_dst_cookie); if (dst && - icsk->rx_dst_ifindex == inet6_iif(skb)) + icsk->rx_dst_ifindex == skb->skb_iif) skb_dst_set_noref(skb, dst); } } @@ -1723,6 +1747,7 @@ static const struct inet_connection_sock_af_ops ipv6_specific = { .queue_xmit = inet6_csk_xmit, .send_check = tcp_v6_send_check, .rebuild_header = inet6_sk_rebuild_header, + .sk_rx_dst_set = inet6_sk_rx_dst_set, .conn_request = tcp_v6_conn_request, .syn_recv_sock = tcp_v6_syn_recv_sock, .net_header_len = sizeof(struct ipv6hdr), @@ -1754,6 +1779,7 @@ static const struct inet_connection_sock_af_ops ipv6_mapped = { .queue_xmit = ip_queue_xmit, .send_check = tcp_v4_send_check, .rebuild_header = inet_sk_rebuild_header, + .sk_rx_dst_set = inet_sk_rx_dst_set, .conn_request = tcp_v6_conn_request, .syn_recv_sock = tcp_v6_syn_recv_sock, .net_header_len = sizeof(struct iphdr), diff --git a/trunk/net/ipv6/udp.c b/trunk/net/ipv6/udp.c index 99d0077b56b8..07e2bfef6845 100644 --- a/trunk/net/ipv6/udp.c +++ b/trunk/net/ipv6/udp.c @@ -394,6 +394,17 @@ int udpv6_recvmsg(struct kiocb *iocb, struct sock *sk, } if (unlikely(err)) { trace_kfree_skb(skb, udpv6_recvmsg); + if (!peeked) { + atomic_inc(&sk->sk_drops); + if (is_udp4) + UDP_INC_STATS_USER(sock_net(sk), + UDP_MIB_INERRORS, + is_udplite); + else + UDP6_INC_STATS_USER(sock_net(sk), + UDP_MIB_INERRORS, + is_udplite); + } goto out_free; } if (!peeked) { diff --git a/trunk/net/ipv6/xfrm6_policy.c b/trunk/net/ipv6/xfrm6_policy.c index ef39812107b1..f8c4c08ffb60 100644 --- a/trunk/net/ipv6/xfrm6_policy.c +++ b/trunk/net/ipv6/xfrm6_policy.c @@ -73,6 +73,13 @@ static int xfrm6_get_tos(const struct flowi *fl) return 0; } +static void xfrm6_init_dst(struct net *net, struct xfrm_dst *xdst) +{ + struct rt6_info *rt = (struct rt6_info *)xdst; + + rt6_init_peer(rt, net->ipv6.peers); +} + static int xfrm6_init_path(struct xfrm_dst *path, struct dst_entry *dst, int nfheader_len) { @@ -286,6 +293,7 @@ static struct xfrm_policy_afinfo xfrm6_policy_afinfo = { .get_saddr = xfrm6_get_saddr, .decode_session = _decode_session6, .get_tos = xfrm6_get_tos, + .init_dst = xfrm6_init_dst, .init_path = xfrm6_init_path, .fill_dst = xfrm6_fill_dst, .blackhole_route = ip6_blackhole_route, diff --git a/trunk/net/l2tp/l2tp_core.c b/trunk/net/l2tp/l2tp_core.c index 393355d37b47..1a9f3723c13c 100644 --- a/trunk/net/l2tp/l2tp_core.c +++ b/trunk/net/l2tp/l2tp_core.c @@ -1347,11 +1347,10 @@ static void l2tp_tunnel_free(struct l2tp_tunnel *tunnel) /* Remove from tunnel list */ spin_lock_bh(&pn->l2tp_tunnel_list_lock); list_del_rcu(&tunnel->list); + kfree_rcu(tunnel, rcu); spin_unlock_bh(&pn->l2tp_tunnel_list_lock); - synchronize_rcu(); atomic_dec(&l2tp_tunnel_count); - kfree(tunnel); } /* Create a socket for the tunnel, if one isn't set up by @@ -1502,6 +1501,8 @@ static int l2tp_tunnel_sock_create(u32 tunnel_id, u32 peer_tunnel_id, struct l2t return err; } +static struct lock_class_key l2tp_socket_class; + int l2tp_tunnel_create(struct net *net, int fd, int version, u32 tunnel_id, u32 peer_tunnel_id, struct l2tp_tunnel_cfg *cfg, struct l2tp_tunnel **tunnelp) { struct l2tp_tunnel *tunnel = NULL; @@ -1606,6 +1607,8 @@ int l2tp_tunnel_create(struct net *net, int fd, int version, u32 tunnel_id, u32 tunnel->old_sk_destruct = sk->sk_destruct; sk->sk_destruct = &l2tp_tunnel_destruct; tunnel->sock = sk; + lockdep_set_class_and_name(&sk->sk_lock.slock, &l2tp_socket_class, "l2tp_sock"); + sk->sk_allocation = GFP_ATOMIC; /* Add tunnel to our list */ diff --git a/trunk/net/l2tp/l2tp_core.h b/trunk/net/l2tp/l2tp_core.h index a38ec6cdeee1..56d583e083a7 100644 --- a/trunk/net/l2tp/l2tp_core.h +++ b/trunk/net/l2tp/l2tp_core.h @@ -163,6 +163,7 @@ struct l2tp_tunnel_cfg { struct l2tp_tunnel { int magic; /* Should be L2TP_TUNNEL_MAGIC */ + struct rcu_head rcu; rwlock_t hlist_lock; /* protect session_hlist */ struct hlist_head session_hlist[L2TP_HASH_SIZE]; /* hashed list of sessions, diff --git a/trunk/net/l2tp/l2tp_eth.c b/trunk/net/l2tp/l2tp_eth.c index f9ee74deeac2..3bfb34aaee29 100644 --- a/trunk/net/l2tp/l2tp_eth.c +++ b/trunk/net/l2tp/l2tp_eth.c @@ -153,7 +153,7 @@ static void l2tp_eth_dev_recv(struct l2tp_session *session, struct sk_buff *skb, print_hex_dump_bytes("", DUMP_PREFIX_OFFSET, skb->data, length); } - if (!pskb_may_pull(skb, sizeof(ETH_HLEN))) + if (!pskb_may_pull(skb, ETH_HLEN)) goto error; secpath_reset(skb); diff --git a/trunk/net/l2tp/l2tp_ip6.c b/trunk/net/l2tp/l2tp_ip6.c index 35e1e4bde587..927547171bc7 100644 --- a/trunk/net/l2tp/l2tp_ip6.c +++ b/trunk/net/l2tp/l2tp_ip6.c @@ -410,6 +410,7 @@ static int l2tp_ip6_getname(struct socket *sock, struct sockaddr *uaddr, lsa->l2tp_family = AF_INET6; lsa->l2tp_flowinfo = 0; lsa->l2tp_scope_id = 0; + lsa->l2tp_unused = 0; if (peer) { if (!lsk->peer_conn_id) return -ENOTCONN; diff --git a/trunk/net/l2tp/l2tp_netlink.c b/trunk/net/l2tp/l2tp_netlink.c index d71cd9229a47..6f936358d664 100644 --- a/trunk/net/l2tp/l2tp_netlink.c +++ b/trunk/net/l2tp/l2tp_netlink.c @@ -80,8 +80,8 @@ static int l2tp_nl_cmd_noop(struct sk_buff *skb, struct genl_info *info) hdr = genlmsg_put(msg, info->snd_pid, info->snd_seq, &l2tp_nl_family, 0, L2TP_CMD_NOOP); - if (IS_ERR(hdr)) { - ret = PTR_ERR(hdr); + if (!hdr) { + ret = -EMSGSIZE; goto err_out; } @@ -250,8 +250,8 @@ static int l2tp_nl_tunnel_send(struct sk_buff *skb, u32 pid, u32 seq, int flags, hdr = genlmsg_put(skb, pid, seq, &l2tp_nl_family, flags, L2TP_CMD_TUNNEL_GET); - if (IS_ERR(hdr)) - return PTR_ERR(hdr); + if (!hdr) + return -EMSGSIZE; if (nla_put_u8(skb, L2TP_ATTR_PROTO_VERSION, tunnel->version) || nla_put_u32(skb, L2TP_ATTR_CONN_ID, tunnel->tunnel_id) || @@ -617,8 +617,8 @@ static int l2tp_nl_session_send(struct sk_buff *skb, u32 pid, u32 seq, int flags sk = tunnel->sock; hdr = genlmsg_put(skb, pid, seq, &l2tp_nl_family, flags, L2TP_CMD_SESSION_GET); - if (IS_ERR(hdr)) - return PTR_ERR(hdr); + if (!hdr) + return -EMSGSIZE; if (nla_put_u32(skb, L2TP_ATTR_CONN_ID, tunnel->tunnel_id) || nla_put_u32(skb, L2TP_ATTR_SESSION_ID, session->session_id) || diff --git a/trunk/net/llc/af_llc.c b/trunk/net/llc/af_llc.c index f6fe4d400502..c2190005a114 100644 --- a/trunk/net/llc/af_llc.c +++ b/trunk/net/llc/af_llc.c @@ -969,14 +969,13 @@ static int llc_ui_getname(struct socket *sock, struct sockaddr *uaddr, struct sockaddr_llc sllc; struct sock *sk = sock->sk; struct llc_sock *llc = llc_sk(sk); - int rc = 0; + int rc = -EBADF; memset(&sllc, 0, sizeof(sllc)); lock_sock(sk); if (sock_flag(sk, SOCK_ZAPPED)) goto out; *uaddrlen = sizeof(sllc); - memset(uaddr, 0, *uaddrlen); if (peer) { rc = -ENOTCONN; if (sk->sk_state != TCP_ESTABLISHED) @@ -1206,7 +1205,7 @@ static int __init llc2_init(void) rc = llc_proc_init(); if (rc != 0) { printk(llc_proc_err_msg); - goto out_unregister_llc_proto; + goto out_station; } rc = llc_sysctl_init(); if (rc) { @@ -1226,7 +1225,8 @@ static int __init llc2_init(void) llc_sysctl_exit(); out_proc: llc_proc_exit(); -out_unregister_llc_proto: +out_station: + llc_station_exit(); proto_unregister(&llc_proto); goto out; } diff --git a/trunk/net/llc/llc_input.c b/trunk/net/llc/llc_input.c index e32cab44ea95..dd3e83328ad5 100644 --- a/trunk/net/llc/llc_input.c +++ b/trunk/net/llc/llc_input.c @@ -42,6 +42,7 @@ static void (*llc_type_handlers[2])(struct llc_sap *sap, void llc_add_pack(int type, void (*handler)(struct llc_sap *sap, struct sk_buff *skb)) { + smp_wmb(); /* ensure initialisation is complete before it's called */ if (type == LLC_DEST_SAP || type == LLC_DEST_CONN) llc_type_handlers[type - 1] = handler; } @@ -50,11 +51,19 @@ void llc_remove_pack(int type) { if (type == LLC_DEST_SAP || type == LLC_DEST_CONN) llc_type_handlers[type - 1] = NULL; + synchronize_net(); } void llc_set_station_handler(void (*handler)(struct sk_buff *skb)) { + /* Ensure initialisation is complete before it's called */ + if (handler) + smp_wmb(); + llc_station_handler = handler; + + if (!handler) + synchronize_net(); } /** @@ -150,6 +159,8 @@ int llc_rcv(struct sk_buff *skb, struct net_device *dev, int dest; int (*rcv)(struct sk_buff *, struct net_device *, struct packet_type *, struct net_device *); + void (*sta_handler)(struct sk_buff *skb); + void (*sap_handler)(struct llc_sap *sap, struct sk_buff *skb); if (!net_eq(dev_net(dev), &init_net)) goto drop; @@ -182,7 +193,8 @@ int llc_rcv(struct sk_buff *skb, struct net_device *dev, */ rcv = rcu_dereference(sap->rcv_func); dest = llc_pdu_type(skb); - if (unlikely(!dest || !llc_type_handlers[dest - 1])) { + sap_handler = dest ? ACCESS_ONCE(llc_type_handlers[dest - 1]) : NULL; + if (unlikely(!sap_handler)) { if (rcv) rcv(skb, dev, pt, orig_dev); else @@ -193,7 +205,7 @@ int llc_rcv(struct sk_buff *skb, struct net_device *dev, if (cskb) rcv(cskb, dev, pt, orig_dev); } - llc_type_handlers[dest - 1](sap, skb); + sap_handler(sap, skb); } llc_sap_put(sap); out: @@ -202,9 +214,10 @@ int llc_rcv(struct sk_buff *skb, struct net_device *dev, kfree_skb(skb); goto out; handle_station: - if (!llc_station_handler) + sta_handler = ACCESS_ONCE(llc_station_handler); + if (!sta_handler) goto drop; - llc_station_handler(skb); + sta_handler(skb); goto out; } diff --git a/trunk/net/llc/llc_station.c b/trunk/net/llc/llc_station.c index 39a8d8924b9c..b2f2bac2c2a2 100644 --- a/trunk/net/llc/llc_station.c +++ b/trunk/net/llc/llc_station.c @@ -268,7 +268,7 @@ static int llc_station_ac_send_null_dsap_xid_c(struct sk_buff *skb) out: return rc; free: - kfree_skb(skb); + kfree_skb(nskb); goto out; } @@ -293,7 +293,7 @@ static int llc_station_ac_send_xid_r(struct sk_buff *skb) out: return rc; free: - kfree_skb(skb); + kfree_skb(nskb); goto out; } @@ -322,7 +322,7 @@ static int llc_station_ac_send_test_r(struct sk_buff *skb) out: return rc; free: - kfree_skb(skb); + kfree_skb(nskb); goto out; } @@ -687,12 +687,8 @@ static void llc_station_rcv(struct sk_buff *skb) llc_station_state_process(skb); } -int __init llc_station_init(void) +void __init llc_station_init(void) { - int rc = -ENOBUFS; - struct sk_buff *skb; - struct llc_station_state_ev *ev; - skb_queue_head_init(&llc_main_station.mac_pdu_q); skb_queue_head_init(&llc_main_station.ev_q.list); spin_lock_init(&llc_main_station.ev_q.lock); @@ -700,23 +696,12 @@ int __init llc_station_init(void) (unsigned long)&llc_main_station); llc_main_station.ack_timer.expires = jiffies + sysctl_llc_station_ack_timeout; - skb = alloc_skb(0, GFP_ATOMIC); - if (!skb) - goto out; - rc = 0; - llc_set_station_handler(llc_station_rcv); - ev = llc_station_ev(skb); - memset(ev, 0, sizeof(*ev)); llc_main_station.maximum_retry = 1; - llc_main_station.state = LLC_STATION_STATE_DOWN; - ev->type = LLC_STATION_EV_TYPE_SIMPLE; - ev->prim_type = LLC_STATION_EV_ENABLE_WITHOUT_DUP_ADDR_CHECK; - rc = llc_station_next_state(skb); -out: - return rc; + llc_main_station.state = LLC_STATION_STATE_UP; + llc_set_station_handler(llc_station_rcv); } -void __exit llc_station_exit(void) +void llc_station_exit(void) { llc_set_station_handler(NULL); } diff --git a/trunk/net/mac80211/cfg.c b/trunk/net/mac80211/cfg.c index d41974aacf51..a58c0b649ba1 100644 --- a/trunk/net/mac80211/cfg.c +++ b/trunk/net/mac80211/cfg.c @@ -1378,6 +1378,8 @@ static void mpath_set_pinfo(struct mesh_path *mpath, u8 *next_hop, else memset(next_hop, 0, ETH_ALEN); + memset(pinfo, 0, sizeof(*pinfo)); + pinfo->generation = mesh_paths_generation; pinfo->filled = MPATH_INFO_FRAME_QLEN | @@ -1396,7 +1398,6 @@ static void mpath_set_pinfo(struct mesh_path *mpath, u8 *next_hop, pinfo->discovery_timeout = jiffies_to_msecs(mpath->discovery_timeout); pinfo->discovery_retries = mpath->discovery_retries; - pinfo->flags = 0; if (mpath->flags & MESH_PATH_ACTIVE) pinfo->flags |= NL80211_MPATH_FLAG_ACTIVE; if (mpath->flags & MESH_PATH_RESOLVING) @@ -1405,10 +1406,8 @@ static void mpath_set_pinfo(struct mesh_path *mpath, u8 *next_hop, pinfo->flags |= NL80211_MPATH_FLAG_SN_VALID; if (mpath->flags & MESH_PATH_FIXED) pinfo->flags |= NL80211_MPATH_FLAG_FIXED; - if (mpath->flags & MESH_PATH_RESOLVING) - pinfo->flags |= NL80211_MPATH_FLAG_RESOLVING; - - pinfo->flags = mpath->flags; + if (mpath->flags & MESH_PATH_RESOLVED) + pinfo->flags |= NL80211_MPATH_FLAG_RESOLVED; } static int ieee80211_get_mpath(struct wiphy *wiphy, struct net_device *dev, diff --git a/trunk/net/mac80211/mesh.c b/trunk/net/mac80211/mesh.c index 6fac18c0423f..85572353a7e3 100644 --- a/trunk/net/mac80211/mesh.c +++ b/trunk/net/mac80211/mesh.c @@ -622,6 +622,7 @@ void ieee80211_stop_mesh(struct ieee80211_sub_if_data *sdata) del_timer_sync(&sdata->u.mesh.housekeeping_timer); del_timer_sync(&sdata->u.mesh.mesh_path_root_timer); + del_timer_sync(&sdata->u.mesh.mesh_path_timer); /* * If the timer fired while we waited for it, it will have * requeued the work. Now the work will be running again @@ -634,6 +635,8 @@ void ieee80211_stop_mesh(struct ieee80211_sub_if_data *sdata) local->fif_other_bss--; atomic_dec(&local->iff_allmultis); ieee80211_configure_filter(local); + + sdata->u.mesh.timers_running = 0; } static void ieee80211_mesh_rx_bcn_presp(struct ieee80211_sub_if_data *sdata, diff --git a/trunk/net/mac80211/mlme.c b/trunk/net/mac80211/mlme.c index cef0c9e79aba..f76b83341cf9 100644 --- a/trunk/net/mac80211/mlme.c +++ b/trunk/net/mac80211/mlme.c @@ -1430,6 +1430,8 @@ static void ieee80211_set_disassoc(struct ieee80211_sub_if_data *sdata, del_timer_sync(&sdata->u.mgd.bcn_mon_timer); del_timer_sync(&sdata->u.mgd.timer); del_timer_sync(&sdata->u.mgd.chswitch_timer); + + sdata->u.mgd.timers_running = 0; } void ieee80211_sta_rx_notify(struct ieee80211_sub_if_data *sdata, @@ -3246,6 +3248,8 @@ int ieee80211_mgd_auth(struct ieee80211_sub_if_data *sdata, goto out_unlock; err_clear: + memset(ifmgd->bssid, 0, ETH_ALEN); + ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_BSSID); ifmgd->auth_data = NULL; err_free: kfree(auth_data); @@ -3437,6 +3441,8 @@ int ieee80211_mgd_assoc(struct ieee80211_sub_if_data *sdata, err = 0; goto out; err_clear: + memset(ifmgd->bssid, 0, ETH_ALEN); + ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_BSSID); ifmgd->assoc_data = NULL; err_free: kfree(assoc_data); diff --git a/trunk/net/mac80211/scan.c b/trunk/net/mac80211/scan.c index bcaee5d12839..839dd9737989 100644 --- a/trunk/net/mac80211/scan.c +++ b/trunk/net/mac80211/scan.c @@ -299,7 +299,7 @@ static void __ieee80211_scan_completed(struct ieee80211_hw *hw, bool aborted, if (local->scan_req != local->int_scan_req) cfg80211_scan_done(local->scan_req, aborted); local->scan_req = NULL; - local->scan_sdata = NULL; + rcu_assign_pointer(local->scan_sdata, NULL); local->scanning = 0; local->scan_channel = NULL; @@ -984,7 +984,6 @@ int ieee80211_request_sched_scan_stop(struct ieee80211_sub_if_data *sdata) kfree(local->sched_scan_ies.ie[i]); drv_sched_scan_stop(local, sdata); - rcu_assign_pointer(local->sched_scan_sdata, NULL); } out: mutex_unlock(&local->mtx); diff --git a/trunk/net/mac80211/tx.c b/trunk/net/mac80211/tx.c index acf712ffb5e6..c5e8c9c31f76 100644 --- a/trunk/net/mac80211/tx.c +++ b/trunk/net/mac80211/tx.c @@ -1811,37 +1811,31 @@ netdev_tx_t ieee80211_subif_start_xmit(struct sk_buff *skb, meshhdrlen = ieee80211_new_mesh_header(&mesh_hdr, sdata, NULL, NULL); } else { - int is_mesh_mcast = 1; - const u8 *mesh_da; + /* DS -> MBSS (802.11-2012 13.11.3.3). + * For unicast with unknown forwarding information, + * destination might be in the MBSS or if that fails + * forwarded to another mesh gate. In either case + * resolution will be handled in ieee80211_xmit(), so + * leave the original DA. This also works for mcast */ + const u8 *mesh_da = skb->data; + + if (mppath) + mesh_da = mppath->mpp; + else if (mpath) + mesh_da = mpath->dst; + rcu_read_unlock(); - if (is_multicast_ether_addr(skb->data)) - /* DA TA mSA AE:SA */ - mesh_da = skb->data; - else { - static const u8 bcast[ETH_ALEN] = - { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff }; - if (mppath) { - /* RA TA mDA mSA AE:DA SA */ - mesh_da = mppath->mpp; - is_mesh_mcast = 0; - } else if (mpath) { - mesh_da = mpath->dst; - is_mesh_mcast = 0; - } else { - /* DA TA mSA AE:SA */ - mesh_da = bcast; - } - } hdrlen = ieee80211_fill_mesh_addresses(&hdr, &fc, mesh_da, sdata->vif.addr); - rcu_read_unlock(); - if (is_mesh_mcast) + if (is_multicast_ether_addr(mesh_da)) + /* DA TA mSA AE:SA */ meshhdrlen = ieee80211_new_mesh_header(&mesh_hdr, sdata, skb->data + ETH_ALEN, NULL); else + /* RA TA mDA mSA AE:DA SA */ meshhdrlen = ieee80211_new_mesh_header(&mesh_hdr, sdata, diff --git a/trunk/net/netfilter/ipvs/ip_vs_ctl.c b/trunk/net/netfilter/ipvs/ip_vs_ctl.c index 84444dda194b..f51013c07b9f 100644 --- a/trunk/net/netfilter/ipvs/ip_vs_ctl.c +++ b/trunk/net/netfilter/ipvs/ip_vs_ctl.c @@ -1171,8 +1171,10 @@ ip_vs_add_service(struct net *net, struct ip_vs_service_user_kern *u, goto out_err; } svc->stats.cpustats = alloc_percpu(struct ip_vs_cpu_stats); - if (!svc->stats.cpustats) + if (!svc->stats.cpustats) { + ret = -ENOMEM; goto out_err; + } /* I'm the first user of the service */ atomic_set(&svc->usecnt, 0); @@ -2759,6 +2761,7 @@ do_ip_vs_get_ctl(struct sock *sk, int cmd, void __user *user, int *len) { struct ip_vs_timeout_user t; + memset(&t, 0, sizeof(t)); __ip_vs_get_timeouts(net, &t); if (copy_to_user(user, &t, sizeof(t)) != 0) ret = -EFAULT; diff --git a/trunk/net/netfilter/nf_conntrack_core.c b/trunk/net/netfilter/nf_conntrack_core.c index cf4875565d67..2ceec64b19f9 100644 --- a/trunk/net/netfilter/nf_conntrack_core.c +++ b/trunk/net/netfilter/nf_conntrack_core.c @@ -249,12 +249,15 @@ static void death_by_event(unsigned long ul_conntrack) { struct nf_conn *ct = (void *)ul_conntrack; struct net *net = nf_ct_net(ct); + struct nf_conntrack_ecache *ecache = nf_ct_ecache_find(ct); + + BUG_ON(ecache == NULL); if (nf_conntrack_event(IPCT_DESTROY, ct) < 0) { /* bad luck, let's retry again */ - ct->timeout.expires = jiffies + + ecache->timeout.expires = jiffies + (random32() % net->ct.sysctl_events_retry_timeout); - add_timer(&ct->timeout); + add_timer(&ecache->timeout); return; } /* we've got the event delivered, now it's dying */ @@ -268,6 +271,9 @@ static void death_by_event(unsigned long ul_conntrack) void nf_ct_insert_dying_list(struct nf_conn *ct) { struct net *net = nf_ct_net(ct); + struct nf_conntrack_ecache *ecache = nf_ct_ecache_find(ct); + + BUG_ON(ecache == NULL); /* add this conntrack to the dying list */ spin_lock_bh(&nf_conntrack_lock); @@ -275,10 +281,10 @@ void nf_ct_insert_dying_list(struct nf_conn *ct) &net->ct.dying); spin_unlock_bh(&nf_conntrack_lock); /* set a new timer to retry event delivery */ - setup_timer(&ct->timeout, death_by_event, (unsigned long)ct); - ct->timeout.expires = jiffies + + setup_timer(&ecache->timeout, death_by_event, (unsigned long)ct); + ecache->timeout.expires = jiffies + (random32() % net->ct.sysctl_events_retry_timeout); - add_timer(&ct->timeout); + add_timer(&ecache->timeout); } EXPORT_SYMBOL_GPL(nf_ct_insert_dying_list); diff --git a/trunk/net/netfilter/nf_conntrack_expect.c b/trunk/net/netfilter/nf_conntrack_expect.c index 45cf602a76bc..527651a53a45 100644 --- a/trunk/net/netfilter/nf_conntrack_expect.c +++ b/trunk/net/netfilter/nf_conntrack_expect.c @@ -361,23 +361,6 @@ static void evict_oldest_expect(struct nf_conn *master, } } -static inline int refresh_timer(struct nf_conntrack_expect *i) -{ - struct nf_conn_help *master_help = nfct_help(i->master); - const struct nf_conntrack_expect_policy *p; - - if (!del_timer(&i->timeout)) - return 0; - - p = &rcu_dereference_protected( - master_help->helper, - lockdep_is_held(&nf_conntrack_lock) - )->expect_policy[i->class]; - i->timeout.expires = jiffies + p->timeout * HZ; - add_timer(&i->timeout); - return 1; -} - static inline int __nf_ct_expect_check(struct nf_conntrack_expect *expect) { const struct nf_conntrack_expect_policy *p; @@ -386,7 +369,7 @@ static inline int __nf_ct_expect_check(struct nf_conntrack_expect *expect) struct nf_conn_help *master_help = nfct_help(master); struct nf_conntrack_helper *helper; struct net *net = nf_ct_exp_net(expect); - struct hlist_node *n; + struct hlist_node *n, *next; unsigned int h; int ret = 1; @@ -395,12 +378,12 @@ static inline int __nf_ct_expect_check(struct nf_conntrack_expect *expect) goto out; } h = nf_ct_expect_dst_hash(&expect->tuple); - hlist_for_each_entry(i, n, &net->ct.expect_hash[h], hnode) { + hlist_for_each_entry_safe(i, n, next, &net->ct.expect_hash[h], hnode) { if (expect_matches(i, expect)) { - /* Refresh timer: if it's dying, ignore.. */ - if (refresh_timer(i)) { - ret = 0; - goto out; + if (del_timer(&i->timeout)) { + nf_ct_unlink_expect(i); + nf_ct_expect_put(i); + break; } } else if (expect_clash(i, expect)) { ret = -EBUSY; diff --git a/trunk/net/netfilter/nf_conntrack_netlink.c b/trunk/net/netfilter/nf_conntrack_netlink.c index 14f67a2cbcb5..9807f3278fcb 100644 --- a/trunk/net/netfilter/nf_conntrack_netlink.c +++ b/trunk/net/netfilter/nf_conntrack_netlink.c @@ -1896,10 +1896,15 @@ static int ctnetlink_nfqueue_parse(const struct nlattr *attr, struct nf_conn *ct) { struct nlattr *cda[CTA_MAX+1]; + int ret; nla_parse_nested(cda, CTA_MAX, attr, ct_nla_policy); - return ctnetlink_nfqueue_parse_ct((const struct nlattr **)cda, ct); + spin_lock_bh(&nf_conntrack_lock); + ret = ctnetlink_nfqueue_parse_ct((const struct nlattr **)cda, ct); + spin_unlock_bh(&nf_conntrack_lock); + + return ret; } static struct nfq_ct_hook ctnetlink_nfqueue_hook = { @@ -2785,7 +2790,8 @@ static int __init ctnetlink_init(void) goto err_unreg_subsys; } - if (register_pernet_subsys(&ctnetlink_net_ops)) { + ret = register_pernet_subsys(&ctnetlink_net_ops); + if (ret < 0) { pr_err("ctnetlink_init: cannot register pernet operations\n"); goto err_unreg_exp_subsys; } diff --git a/trunk/net/netfilter/nf_conntrack_proto_tcp.c b/trunk/net/netfilter/nf_conntrack_proto_tcp.c index a5ac11ebef33..e046b3756aab 100644 --- a/trunk/net/netfilter/nf_conntrack_proto_tcp.c +++ b/trunk/net/netfilter/nf_conntrack_proto_tcp.c @@ -158,21 +158,18 @@ static const u8 tcp_conntracks[2][6][TCP_CONNTRACK_MAX] = { * sCL -> sSS */ /* sNO, sSS, sSR, sES, sFW, sCW, sLA, sTW, sCL, sS2 */ -/*synack*/ { sIV, sIV, sIG, sIG, sIG, sIG, sIG, sIG, sIG, sSR }, +/*synack*/ { sIV, sIV, sSR, sIV, sIV, sIV, sIV, sIV, sIV, sSR }, /* * sNO -> sIV Too late and no reason to do anything * sSS -> sIV Client can't send SYN and then SYN/ACK * sS2 -> sSR SYN/ACK sent to SYN2 in simultaneous open - * sSR -> sIG - * sES -> sIG Error: SYNs in window outside the SYN_SENT state - * are errors. Receiver will reply with RST - * and close the connection. - * Or we are not in sync and hold a dead connection. - * sFW -> sIG - * sCW -> sIG - * sLA -> sIG - * sTW -> sIG - * sCL -> sIG + * sSR -> sSR Late retransmitted SYN/ACK in simultaneous open + * sES -> sIV Invalid SYN/ACK packets sent by the client + * sFW -> sIV + * sCW -> sIV + * sLA -> sIV + * sTW -> sIV + * sCL -> sIV */ /* sNO, sSS, sSR, sES, sFW, sCW, sLA, sTW, sCL, sS2 */ /*fin*/ { sIV, sIV, sFW, sFW, sLA, sLA, sLA, sTW, sCL, sIV }, @@ -633,15 +630,9 @@ static bool tcp_in_window(const struct nf_conn *ct, ack = sack = receiver->td_end; } - if (seq == end - && (!tcph->rst - || (seq == 0 && state->state == TCP_CONNTRACK_SYN_SENT))) + if (tcph->rst && seq == 0 && state->state == TCP_CONNTRACK_SYN_SENT) /* - * Packets contains no data: we assume it is valid - * and check the ack value only. - * However RST segments are always validated by their - * SEQ number, except when seq == 0 (reset sent answering - * SYN. + * RST sent answering SYN. */ seq = end = sender->td_end; diff --git a/trunk/net/netfilter/nf_conntrack_sip.c b/trunk/net/netfilter/nf_conntrack_sip.c index 758a1bacc126..5c0a112aeee6 100644 --- a/trunk/net/netfilter/nf_conntrack_sip.c +++ b/trunk/net/netfilter/nf_conntrack_sip.c @@ -183,12 +183,12 @@ static int media_len(const struct nf_conn *ct, const char *dptr, return len + digits_len(ct, dptr, limit, shift); } -static int parse_addr(const struct nf_conn *ct, const char *cp, - const char **endp, union nf_inet_addr *addr, - const char *limit) +static int sip_parse_addr(const struct nf_conn *ct, const char *cp, + const char **endp, union nf_inet_addr *addr, + const char *limit, bool delim) { const char *end; - int ret = 0; + int ret; if (!ct) return 0; @@ -197,16 +197,28 @@ static int parse_addr(const struct nf_conn *ct, const char *cp, switch (nf_ct_l3num(ct)) { case AF_INET: ret = in4_pton(cp, limit - cp, (u8 *)&addr->ip, -1, &end); + if (ret == 0) + return 0; break; case AF_INET6: + if (cp < limit && *cp == '[') + cp++; + else if (delim) + return 0; + ret = in6_pton(cp, limit - cp, (u8 *)&addr->ip6, -1, &end); + if (ret == 0) + return 0; + + if (end < limit && *end == ']') + end++; + else if (delim) + return 0; break; default: BUG(); } - if (ret == 0 || end == cp) - return 0; if (endp) *endp = end; return 1; @@ -219,7 +231,7 @@ static int epaddr_len(const struct nf_conn *ct, const char *dptr, union nf_inet_addr addr; const char *aux = dptr; - if (!parse_addr(ct, dptr, &dptr, &addr, limit)) { + if (!sip_parse_addr(ct, dptr, &dptr, &addr, limit, true)) { pr_debug("ip: %s parse failed.!\n", dptr); return 0; } @@ -296,7 +308,7 @@ int ct_sip_parse_request(const struct nf_conn *ct, return 0; dptr += shift; - if (!parse_addr(ct, dptr, &end, addr, limit)) + if (!sip_parse_addr(ct, dptr, &end, addr, limit, true)) return -1; if (end < limit && *end == ':') { end++; @@ -550,7 +562,7 @@ int ct_sip_parse_header_uri(const struct nf_conn *ct, const char *dptr, if (ret == 0) return ret; - if (!parse_addr(ct, dptr + *matchoff, &c, addr, limit)) + if (!sip_parse_addr(ct, dptr + *matchoff, &c, addr, limit, true)) return -1; if (*c == ':') { c++; @@ -599,7 +611,7 @@ int ct_sip_parse_address_param(const struct nf_conn *ct, const char *dptr, unsigned int dataoff, unsigned int datalen, const char *name, unsigned int *matchoff, unsigned int *matchlen, - union nf_inet_addr *addr) + union nf_inet_addr *addr, bool delim) { const char *limit = dptr + datalen; const char *start, *end; @@ -613,7 +625,7 @@ int ct_sip_parse_address_param(const struct nf_conn *ct, const char *dptr, return 0; start += strlen(name); - if (!parse_addr(ct, start, &end, addr, limit)) + if (!sip_parse_addr(ct, start, &end, addr, limit, delim)) return 0; *matchoff = start - dptr; *matchlen = end - start; @@ -675,6 +687,47 @@ static int ct_sip_parse_transport(struct nf_conn *ct, const char *dptr, return 1; } +static int sdp_parse_addr(const struct nf_conn *ct, const char *cp, + const char **endp, union nf_inet_addr *addr, + const char *limit) +{ + const char *end; + int ret; + + memset(addr, 0, sizeof(*addr)); + switch (nf_ct_l3num(ct)) { + case AF_INET: + ret = in4_pton(cp, limit - cp, (u8 *)&addr->ip, -1, &end); + break; + case AF_INET6: + ret = in6_pton(cp, limit - cp, (u8 *)&addr->ip6, -1, &end); + break; + default: + BUG(); + } + + if (ret == 0) + return 0; + if (endp) + *endp = end; + return 1; +} + +/* skip ip address. returns its length. */ +static int sdp_addr_len(const struct nf_conn *ct, const char *dptr, + const char *limit, int *shift) +{ + union nf_inet_addr addr; + const char *aux = dptr; + + if (!sdp_parse_addr(ct, dptr, &dptr, &addr, limit)) { + pr_debug("ip: %s parse failed.!\n", dptr); + return 0; + } + + return dptr - aux; +} + /* SDP header parsing: a SDP session description contains an ordered set of * headers, starting with a section containing general session parameters, * optionally followed by multiple media descriptions. @@ -686,10 +739,10 @@ static int ct_sip_parse_transport(struct nf_conn *ct, const char *dptr, */ static const struct sip_header ct_sdp_hdrs[] = { [SDP_HDR_VERSION] = SDP_HDR("v=", NULL, digits_len), - [SDP_HDR_OWNER_IP4] = SDP_HDR("o=", "IN IP4 ", epaddr_len), - [SDP_HDR_CONNECTION_IP4] = SDP_HDR("c=", "IN IP4 ", epaddr_len), - [SDP_HDR_OWNER_IP6] = SDP_HDR("o=", "IN IP6 ", epaddr_len), - [SDP_HDR_CONNECTION_IP6] = SDP_HDR("c=", "IN IP6 ", epaddr_len), + [SDP_HDR_OWNER_IP4] = SDP_HDR("o=", "IN IP4 ", sdp_addr_len), + [SDP_HDR_CONNECTION_IP4] = SDP_HDR("c=", "IN IP4 ", sdp_addr_len), + [SDP_HDR_OWNER_IP6] = SDP_HDR("o=", "IN IP6 ", sdp_addr_len), + [SDP_HDR_CONNECTION_IP6] = SDP_HDR("c=", "IN IP6 ", sdp_addr_len), [SDP_HDR_MEDIA] = SDP_HDR("m=", NULL, media_len), }; @@ -775,8 +828,8 @@ static int ct_sip_parse_sdp_addr(const struct nf_conn *ct, const char *dptr, if (ret <= 0) return ret; - if (!parse_addr(ct, dptr + *matchoff, NULL, addr, - dptr + *matchoff + *matchlen)) + if (!sdp_parse_addr(ct, dptr + *matchoff, NULL, addr, + dptr + *matchoff + *matchlen)) return -1; return 1; } @@ -1515,7 +1568,6 @@ static int sip_help_udp(struct sk_buff *skb, unsigned int protoff, } static struct nf_conntrack_helper sip[MAX_PORTS][4] __read_mostly; -static char sip_names[MAX_PORTS][4][sizeof("sip-65535")] __read_mostly; static const struct nf_conntrack_expect_policy sip_exp_policy[SIP_EXPECT_MAX + 1] = { [SIP_EXPECT_SIGNALLING] = { @@ -1585,9 +1637,9 @@ static int __init nf_conntrack_sip_init(void) sip[i][j].me = THIS_MODULE; if (ports[i] == SIP_PORT) - sprintf(sip_names[i][j], "sip"); + sprintf(sip[i][j].name, "sip"); else - sprintf(sip_names[i][j], "sip-%u", i); + sprintf(sip[i][j].name, "sip-%u", i); pr_debug("port #%u: %u\n", i, ports[i]); diff --git a/trunk/net/netfilter/nfnetlink_log.c b/trunk/net/netfilter/nfnetlink_log.c index 169ab59ed9d4..5cfb5bedb2b8 100644 --- a/trunk/net/netfilter/nfnetlink_log.c +++ b/trunk/net/netfilter/nfnetlink_log.c @@ -381,6 +381,7 @@ __build_packet_message(struct nfulnl_instance *inst, struct nlmsghdr *nlh; struct nfgenmsg *nfmsg; sk_buff_data_t old_tail = inst->skb->tail; + struct sock *sk; nlh = nlmsg_put(inst->skb, 0, 0, NFNL_SUBSYS_ULOG << 8 | NFULNL_MSG_PACKET, @@ -480,7 +481,7 @@ __build_packet_message(struct nfulnl_instance *inst, } if (indev && skb_mac_header_was_set(skb)) { - if (nla_put_be32(inst->skb, NFULA_HWTYPE, htons(skb->dev->type)) || + if (nla_put_be16(inst->skb, NFULA_HWTYPE, htons(skb->dev->type)) || nla_put_be16(inst->skb, NFULA_HWLEN, htons(skb->dev->hard_header_len)) || nla_put(inst->skb, NFULA_HWHEADER, skb->dev->hard_header_len, @@ -499,18 +500,19 @@ __build_packet_message(struct nfulnl_instance *inst, } /* UID */ - if (skb->sk) { - read_lock_bh(&skb->sk->sk_callback_lock); - if (skb->sk->sk_socket && skb->sk->sk_socket->file) { - struct file *file = skb->sk->sk_socket->file; + sk = skb->sk; + if (sk && sk->sk_state != TCP_TIME_WAIT) { + read_lock_bh(&sk->sk_callback_lock); + if (sk->sk_socket && sk->sk_socket->file) { + struct file *file = sk->sk_socket->file; __be32 uid = htonl(file->f_cred->fsuid); __be32 gid = htonl(file->f_cred->fsgid); - read_unlock_bh(&skb->sk->sk_callback_lock); + read_unlock_bh(&sk->sk_callback_lock); if (nla_put_be32(inst->skb, NFULA_UID, uid) || nla_put_be32(inst->skb, NFULA_GID, gid)) goto nla_put_failure; } else - read_unlock_bh(&skb->sk->sk_callback_lock); + read_unlock_bh(&sk->sk_callback_lock); } /* local sequence number */ @@ -996,8 +998,10 @@ static int __init nfnetlink_log_init(void) #ifdef CONFIG_PROC_FS if (!proc_create("nfnetlink_log", 0440, - proc_net_netfilter, &nful_file_ops)) + proc_net_netfilter, &nful_file_ops)) { + status = -ENOMEM; goto cleanup_logger; + } #endif return status; diff --git a/trunk/net/netfilter/xt_LOG.c b/trunk/net/netfilter/xt_LOG.c index ff5f75fddb15..91e9af4d1f42 100644 --- a/trunk/net/netfilter/xt_LOG.c +++ b/trunk/net/netfilter/xt_LOG.c @@ -145,6 +145,19 @@ static int dump_tcp_header(struct sbuff *m, const struct sk_buff *skb, return 0; } +static void dump_sk_uid_gid(struct sbuff *m, struct sock *sk) +{ + if (!sk || sk->sk_state == TCP_TIME_WAIT) + return; + + read_lock_bh(&sk->sk_callback_lock); + if (sk->sk_socket && sk->sk_socket->file) + sb_add(m, "UID=%u GID=%u ", + sk->sk_socket->file->f_cred->fsuid, + sk->sk_socket->file->f_cred->fsgid); + read_unlock_bh(&sk->sk_callback_lock); +} + /* One level of recursion won't kill us */ static void dump_ipv4_packet(struct sbuff *m, const struct nf_loginfo *info, @@ -361,14 +374,8 @@ static void dump_ipv4_packet(struct sbuff *m, } /* Max length: 15 "UID=4294967295 " */ - if ((logflags & XT_LOG_UID) && !iphoff && skb->sk) { - read_lock_bh(&skb->sk->sk_callback_lock); - if (skb->sk->sk_socket && skb->sk->sk_socket->file) - sb_add(m, "UID=%u GID=%u ", - skb->sk->sk_socket->file->f_cred->fsuid, - skb->sk->sk_socket->file->f_cred->fsgid); - read_unlock_bh(&skb->sk->sk_callback_lock); - } + if ((logflags & XT_LOG_UID) && !iphoff) + dump_sk_uid_gid(m, skb->sk); /* Max length: 16 "MARK=0xFFFFFFFF " */ if (!iphoff && skb->mark) @@ -436,8 +443,8 @@ log_packet_common(struct sbuff *m, const struct nf_loginfo *loginfo, const char *prefix) { - sb_add(m, "<%d>%sIN=%s OUT=%s ", loginfo->u.log.level, - prefix, + sb_add(m, KERN_SOH "%c%sIN=%s OUT=%s ", + '0' + loginfo->u.log.level, prefix, in ? in->name : "", out ? out->name : ""); #ifdef CONFIG_BRIDGE_NETFILTER @@ -717,14 +724,8 @@ static void dump_ipv6_packet(struct sbuff *m, } /* Max length: 15 "UID=4294967295 " */ - if ((logflags & XT_LOG_UID) && recurse && skb->sk) { - read_lock_bh(&skb->sk->sk_callback_lock); - if (skb->sk->sk_socket && skb->sk->sk_socket->file) - sb_add(m, "UID=%u GID=%u ", - skb->sk->sk_socket->file->f_cred->fsuid, - skb->sk->sk_socket->file->f_cred->fsgid); - read_unlock_bh(&skb->sk->sk_callback_lock); - } + if ((logflags & XT_LOG_UID) && recurse) + dump_sk_uid_gid(m, skb->sk); /* Max length: 16 "MARK=0xFFFFFFFF " */ if (!recurse && skb->mark) diff --git a/trunk/net/netfilter/xt_limit.c b/trunk/net/netfilter/xt_limit.c index 5c22ce8ab309..a4c1e4528cac 100644 --- a/trunk/net/netfilter/xt_limit.c +++ b/trunk/net/netfilter/xt_limit.c @@ -117,11 +117,11 @@ static int limit_mt_check(const struct xt_mtchk_param *par) /* For SMP, we only want to use one set of state. */ r->master = priv; + /* User avg in seconds * XT_LIMIT_SCALE: convert to jiffies * + 128. */ + priv->prev = jiffies; + priv->credit = user2credits(r->avg * r->burst); /* Credits full. */ if (r->cost == 0) { - /* User avg in seconds * XT_LIMIT_SCALE: convert to jiffies * - 128. */ - priv->prev = jiffies; - priv->credit = user2credits(r->avg * r->burst); /* Credits full. */ r->credit_cap = priv->credit; /* Credits full. */ r->cost = user2credits(r->avg); } diff --git a/trunk/net/netlink/af_netlink.c b/trunk/net/netlink/af_netlink.c index 5463969da45b..527023823b5c 100644 --- a/trunk/net/netlink/af_netlink.c +++ b/trunk/net/netlink/af_netlink.c @@ -1362,7 +1362,7 @@ static int netlink_sendmsg(struct kiocb *kiocb, struct socket *sock, if (NULL == siocb->scm) siocb->scm = &scm; - err = scm_send(sock, msg, siocb->scm); + err = scm_send(sock, msg, siocb->scm, true); if (err < 0) return err; @@ -1373,7 +1373,8 @@ static int netlink_sendmsg(struct kiocb *kiocb, struct socket *sock, dst_pid = addr->nl_pid; dst_group = ffs(addr->nl_groups); err = -EPERM; - if (dst_group && !netlink_capable(sock, NL_NONROOT_SEND)) + if ((dst_group || dst_pid) && + !netlink_capable(sock, NL_NONROOT_SEND)) goto out; } else { dst_pid = nlk->dst_pid; @@ -2147,6 +2148,7 @@ static void __init netlink_add_usersock_entry(void) rcu_assign_pointer(nl_table[NETLINK_USERSOCK].listeners, listeners); nl_table[NETLINK_USERSOCK].module = THIS_MODULE; nl_table[NETLINK_USERSOCK].registered = 1; + nl_table[NETLINK_USERSOCK].nl_nonroot = NL_NONROOT_SEND; netlink_table_ungrab(); } diff --git a/trunk/net/netrom/af_netrom.c b/trunk/net/netrom/af_netrom.c index 06592d8b4a2b..7261eb81974f 100644 --- a/trunk/net/netrom/af_netrom.c +++ b/trunk/net/netrom/af_netrom.c @@ -601,7 +601,7 @@ static int nr_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len) if (!capable(CAP_NET_BIND_SERVICE)) { dev_put(dev); release_sock(sk); - return -EACCES; + return -EPERM; } nr->user_addr = addr->fsa_digipeater[0]; nr->source_addr = addr->fsa_ax25.sax25_call; @@ -1169,7 +1169,12 @@ static int nr_recvmsg(struct kiocb *iocb, struct socket *sock, msg->msg_flags |= MSG_TRUNC; } - skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied); + er = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied); + if (er < 0) { + skb_free_datagram(sk, skb); + release_sock(sk); + return er; + } if (sax != NULL) { sax->sax25_family = AF_NETROM; diff --git a/trunk/net/openvswitch/actions.c b/trunk/net/openvswitch/actions.c index f3f96badf5aa..954405ceae9e 100644 --- a/trunk/net/openvswitch/actions.c +++ b/trunk/net/openvswitch/actions.c @@ -45,7 +45,7 @@ static int make_writable(struct sk_buff *skb, int write_len) return pskb_expand_head(skb, 0, 0, GFP_ATOMIC); } -/* remove VLAN header from packet and update csum accrodingly. */ +/* remove VLAN header from packet and update csum accordingly. */ static int __pop_vlan_tci(struct sk_buff *skb, __be16 *current_tci) { struct vlan_hdr *vhdr; diff --git a/trunk/net/openvswitch/datapath.c b/trunk/net/openvswitch/datapath.c index d8277d29e710..cf58cedad083 100644 --- a/trunk/net/openvswitch/datapath.c +++ b/trunk/net/openvswitch/datapath.c @@ -425,10 +425,10 @@ static int validate_sample(const struct nlattr *attr, static int validate_tp_port(const struct sw_flow_key *flow_key) { if (flow_key->eth.type == htons(ETH_P_IP)) { - if (flow_key->ipv4.tp.src && flow_key->ipv4.tp.dst) + if (flow_key->ipv4.tp.src || flow_key->ipv4.tp.dst) return 0; } else if (flow_key->eth.type == htons(ETH_P_IPV6)) { - if (flow_key->ipv6.tp.src && flow_key->ipv6.tp.dst) + if (flow_key->ipv6.tp.src || flow_key->ipv6.tp.dst) return 0; } @@ -460,7 +460,7 @@ static int validate_set(const struct nlattr *a, if (flow_key->eth.type != htons(ETH_P_IP)) return -EINVAL; - if (!flow_key->ipv4.addr.src || !flow_key->ipv4.addr.dst) + if (!flow_key->ip.proto) return -EINVAL; ipv4_key = nla_data(ovs_key); diff --git a/trunk/net/openvswitch/flow.h b/trunk/net/openvswitch/flow.h index 9b75617ca4e0..c30df1a10c67 100644 --- a/trunk/net/openvswitch/flow.h +++ b/trunk/net/openvswitch/flow.h @@ -145,15 +145,17 @@ u64 ovs_flow_used_time(unsigned long flow_jiffies); * OVS_KEY_ATTR_PRIORITY 4 -- 4 8 * OVS_KEY_ATTR_IN_PORT 4 -- 4 8 * OVS_KEY_ATTR_ETHERNET 12 -- 4 16 + * OVS_KEY_ATTR_ETHERTYPE 2 2 4 8 (outer VLAN ethertype) * OVS_KEY_ATTR_8021Q 4 -- 4 8 - * OVS_KEY_ATTR_ETHERTYPE 2 2 4 8 + * OVS_KEY_ATTR_ENCAP 0 -- 4 4 (VLAN encapsulation) + * OVS_KEY_ATTR_ETHERTYPE 2 2 4 8 (inner VLAN ethertype) * OVS_KEY_ATTR_IPV6 40 -- 4 44 * OVS_KEY_ATTR_ICMPV6 2 2 4 8 * OVS_KEY_ATTR_ND 28 -- 4 32 * ------------------------------------------------- - * total 132 + * total 144 */ -#define FLOW_BUFSIZE 132 +#define FLOW_BUFSIZE 144 int ovs_flow_to_nlattrs(const struct sw_flow_key *, struct sk_buff *); int ovs_flow_from_nlattrs(struct sw_flow_key *swkey, int *key_lenp, diff --git a/trunk/net/packet/af_packet.c b/trunk/net/packet/af_packet.c index ceaca7c134a0..c5c9e2a54218 100644 --- a/trunk/net/packet/af_packet.c +++ b/trunk/net/packet/af_packet.c @@ -1079,7 +1079,7 @@ static void *packet_current_rx_frame(struct packet_sock *po, default: WARN(1, "TPACKET version not supported\n"); BUG(); - return 0; + return NULL; } } @@ -1273,6 +1273,14 @@ static void __fanout_unlink(struct sock *sk, struct packet_sock *po) spin_unlock(&f->lock); } +static bool match_fanout_group(struct packet_type *ptype, struct sock * sk) +{ + if (ptype->af_packet_priv == (void*)((struct packet_sock *)sk)->fanout) + return true; + + return false; +} + static int fanout_add(struct sock *sk, u16 id, u16 type_flags) { struct packet_sock *po = pkt_sk(sk); @@ -1325,6 +1333,7 @@ static int fanout_add(struct sock *sk, u16 id, u16 type_flags) match->prot_hook.dev = po->prot_hook.dev; match->prot_hook.func = packet_rcv_fanout; match->prot_hook.af_packet_priv = match; + match->prot_hook.id_match = match_fanout_group; dev_add_pack(&match->prot_hook); list_add(&match->list, &fanout_list); } @@ -1936,7 +1945,6 @@ static void tpacket_destruct_skb(struct sk_buff *skb) if (likely(po->tx_ring.pg_vec)) { ph = skb_shinfo(skb)->destructor_arg; - BUG_ON(__packet_get_status(po, ph) != TP_STATUS_SENDING); BUG_ON(atomic_read(&po->tx_ring.pending) == 0); atomic_dec(&po->tx_ring.pending); __packet_set_status(po, ph, TP_STATUS_AVAILABLE); diff --git a/trunk/net/sched/act_gact.c b/trunk/net/sched/act_gact.c index f10fb8256442..05d60859d8e3 100644 --- a/trunk/net/sched/act_gact.c +++ b/trunk/net/sched/act_gact.c @@ -67,6 +67,9 @@ static int tcf_gact_init(struct nlattr *nla, struct nlattr *est, struct tcf_common *pc; int ret = 0; int err; +#ifdef CONFIG_GACT_PROB + struct tc_gact_p *p_parm = NULL; +#endif if (nla == NULL) return -EINVAL; @@ -82,6 +85,12 @@ static int tcf_gact_init(struct nlattr *nla, struct nlattr *est, #ifndef CONFIG_GACT_PROB if (tb[TCA_GACT_PROB] != NULL) return -EOPNOTSUPP; +#else + if (tb[TCA_GACT_PROB]) { + p_parm = nla_data(tb[TCA_GACT_PROB]); + if (p_parm->ptype >= MAX_RAND) + return -EINVAL; + } #endif pc = tcf_hash_check(parm->index, a, bind, &gact_hash_info); @@ -103,8 +112,7 @@ static int tcf_gact_init(struct nlattr *nla, struct nlattr *est, spin_lock_bh(&gact->tcf_lock); gact->tcf_action = parm->action; #ifdef CONFIG_GACT_PROB - if (tb[TCA_GACT_PROB] != NULL) { - struct tc_gact_p *p_parm = nla_data(tb[TCA_GACT_PROB]); + if (p_parm) { gact->tcfg_paction = p_parm->paction; gact->tcfg_pval = p_parm->pval; gact->tcfg_ptype = p_parm->ptype; @@ -133,7 +141,7 @@ static int tcf_gact(struct sk_buff *skb, const struct tc_action *a, spin_lock(&gact->tcf_lock); #ifdef CONFIG_GACT_PROB - if (gact->tcfg_ptype && gact_rand[gact->tcfg_ptype] != NULL) + if (gact->tcfg_ptype) action = gact_rand[gact->tcfg_ptype](gact); else action = gact->tcf_action; diff --git a/trunk/net/sched/act_ipt.c b/trunk/net/sched/act_ipt.c index 60e281ad0f07..58fb3c7aab9e 100644 --- a/trunk/net/sched/act_ipt.c +++ b/trunk/net/sched/act_ipt.c @@ -185,7 +185,12 @@ static int tcf_ipt_init(struct nlattr *nla, struct nlattr *est, err2: kfree(tname); err1: - kfree(pc); + if (ret == ACT_P_CREATED) { + if (est) + gen_kill_estimator(&pc->tcfc_bstats, + &pc->tcfc_rate_est); + kfree_rcu(pc, tcfc_rcu); + } return err; } diff --git a/trunk/net/sched/act_mirred.c b/trunk/net/sched/act_mirred.c index fe81cc18e9e0..9c0fd0c78814 100644 --- a/trunk/net/sched/act_mirred.c +++ b/trunk/net/sched/act_mirred.c @@ -200,13 +200,12 @@ static int tcf_mirred(struct sk_buff *skb, const struct tc_action *a, out: if (err) { m->tcf_qstats.overlimits++; - /* should we be asking for packet to be dropped? - * may make sense for redirect case only - */ - retval = TC_ACT_SHOT; - } else { + if (m->tcfm_eaction != TCA_EGRESS_MIRROR) + retval = TC_ACT_SHOT; + else + retval = m->tcf_action; + } else retval = m->tcf_action; - } spin_unlock(&m->tcf_lock); return retval; diff --git a/trunk/net/sched/act_pedit.c b/trunk/net/sched/act_pedit.c index 26aa2f6ce257..45c53ab067a6 100644 --- a/trunk/net/sched/act_pedit.c +++ b/trunk/net/sched/act_pedit.c @@ -74,7 +74,10 @@ static int tcf_pedit_init(struct nlattr *nla, struct nlattr *est, p = to_pedit(pc); keys = kmalloc(ksize, GFP_KERNEL); if (keys == NULL) { - kfree(pc); + if (est) + gen_kill_estimator(&pc->tcfc_bstats, + &pc->tcfc_rate_est); + kfree_rcu(pc, tcfc_rcu); return -ENOMEM; } ret = ACT_P_CREATED; diff --git a/trunk/net/sched/act_simple.c b/trunk/net/sched/act_simple.c index 3922f2a2821b..3714f60f0b3c 100644 --- a/trunk/net/sched/act_simple.c +++ b/trunk/net/sched/act_simple.c @@ -131,7 +131,10 @@ static int tcf_simp_init(struct nlattr *nla, struct nlattr *est, d = to_defact(pc); ret = alloc_defdata(d, defdata); if (ret < 0) { - kfree(pc); + if (est) + gen_kill_estimator(&pc->tcfc_bstats, + &pc->tcfc_rate_est); + kfree_rcu(pc, tcfc_rcu); return ret; } d->tcf_action = parm->action; diff --git a/trunk/net/sched/sch_cbq.c b/trunk/net/sched/sch_cbq.c index 6aabd77d1cfd..564b9fc8efd3 100644 --- a/trunk/net/sched/sch_cbq.c +++ b/trunk/net/sched/sch_cbq.c @@ -250,10 +250,11 @@ cbq_classify(struct sk_buff *skb, struct Qdisc *sch, int *qerr) else if ((cl = defmap[res.classid & TC_PRIO_MAX]) == NULL) cl = defmap[TC_PRIO_BESTEFFORT]; - if (cl == NULL || cl->level >= head->level) + if (cl == NULL) goto fallback; } - + if (cl->level >= head->level) + goto fallback; #ifdef CONFIG_NET_CLS_ACT switch (result) { case TC_ACT_QUEUED: diff --git a/trunk/net/sched/sch_fq_codel.c b/trunk/net/sched/sch_fq_codel.c index 9fc1c62ec80e..4e606fcb2534 100644 --- a/trunk/net/sched/sch_fq_codel.c +++ b/trunk/net/sched/sch_fq_codel.c @@ -191,7 +191,6 @@ static int fq_codel_enqueue(struct sk_buff *skb, struct Qdisc *sch) if (list_empty(&flow->flowchain)) { list_add_tail(&flow->flowchain, &q->new_flows); - codel_vars_init(&flow->cvars); q->new_flow_count++; flow->deficit = q->quantum; flow->dropped = 0; @@ -418,6 +417,7 @@ static int fq_codel_init(struct Qdisc *sch, struct nlattr *opt) struct fq_codel_flow *flow = q->flows + i; INIT_LIST_HEAD(&flow->flowchain); + codel_vars_init(&flow->cvars); } } if (sch->limit >= 1) diff --git a/trunk/net/sched/sch_gred.c b/trunk/net/sched/sch_gred.c index e901583e4ea5..d42234c0f13b 100644 --- a/trunk/net/sched/sch_gred.c +++ b/trunk/net/sched/sch_gred.c @@ -102,9 +102,8 @@ static inline int gred_wred_mode_check(struct Qdisc *sch) if (q == NULL) continue; - for (n = 0; n < table->DPs; n++) - if (table->tab[n] && table->tab[n] != q && - table->tab[n]->prio == q->prio) + for (n = i + 1; n < table->DPs; n++) + if (table->tab[n] && table->tab[n]->prio == q->prio) return 1; } @@ -137,6 +136,7 @@ static inline void gred_store_wred_set(struct gred_sched *table, struct gred_sched_data *q) { table->wred_set.qavg = q->vars.qavg; + table->wred_set.qidlestart = q->vars.qidlestart; } static inline int gred_use_ecn(struct gred_sched *t) @@ -176,7 +176,7 @@ static int gred_enqueue(struct sk_buff *skb, struct Qdisc *sch) skb->tc_index = (skb->tc_index & ~GRED_VQ_MASK) | dp; } - /* sum up all the qaves of prios <= to ours to get the new qave */ + /* sum up all the qaves of prios < ours to get the new qave */ if (!gred_wred_mode(t) && gred_rio_mode(t)) { int i; @@ -260,16 +260,18 @@ static struct sk_buff *gred_dequeue(struct Qdisc *sch) } else { q->backlog -= qdisc_pkt_len(skb); - if (!q->backlog && !gred_wred_mode(t)) - red_start_of_idle_period(&q->vars); + if (gred_wred_mode(t)) { + if (!sch->qstats.backlog) + red_start_of_idle_period(&t->wred_set); + } else { + if (!q->backlog) + red_start_of_idle_period(&q->vars); + } } return skb; } - if (gred_wred_mode(t) && !red_is_idling(&t->wred_set)) - red_start_of_idle_period(&t->wred_set); - return NULL; } @@ -291,19 +293,20 @@ static unsigned int gred_drop(struct Qdisc *sch) q->backlog -= len; q->stats.other++; - if (!q->backlog && !gred_wred_mode(t)) - red_start_of_idle_period(&q->vars); + if (gred_wred_mode(t)) { + if (!sch->qstats.backlog) + red_start_of_idle_period(&t->wred_set); + } else { + if (!q->backlog) + red_start_of_idle_period(&q->vars); + } } qdisc_drop(skb, sch); return len; } - if (gred_wred_mode(t) && !red_is_idling(&t->wred_set)) - red_start_of_idle_period(&t->wred_set); - return 0; - } static void gred_reset(struct Qdisc *sch) @@ -535,6 +538,7 @@ static int gred_dump(struct Qdisc *sch, struct sk_buff *skb) for (i = 0; i < MAX_DPs; i++) { struct gred_sched_data *q = table->tab[i]; struct tc_gred_qopt opt; + unsigned long qavg; memset(&opt, 0, sizeof(opt)); @@ -566,7 +570,9 @@ static int gred_dump(struct Qdisc *sch, struct sk_buff *skb) if (gred_wred_mode(table)) gred_load_wred_set(table, q); - opt.qave = red_calc_qavg(&q->parms, &q->vars, q->vars.qavg); + qavg = red_calc_qavg(&q->parms, &q->vars, + q->vars.qavg >> q->parms.Wlog); + opt.qave = qavg >> q->parms.Wlog; append_opt: if (nla_append(skb, sizeof(opt), &opt) < 0) diff --git a/trunk/net/sched/sch_qfq.c b/trunk/net/sched/sch_qfq.c index 9af01f3df18c..211a21217045 100644 --- a/trunk/net/sched/sch_qfq.c +++ b/trunk/net/sched/sch_qfq.c @@ -203,6 +203,34 @@ static int qfq_calc_index(u32 inv_w, unsigned int maxlen) return index; } +/* Length of the next packet (0 if the queue is empty). */ +static unsigned int qdisc_peek_len(struct Qdisc *sch) +{ + struct sk_buff *skb; + + skb = sch->ops->peek(sch); + return skb ? qdisc_pkt_len(skb) : 0; +} + +static void qfq_deactivate_class(struct qfq_sched *, struct qfq_class *); +static void qfq_activate_class(struct qfq_sched *q, struct qfq_class *cl, + unsigned int len); + +static void qfq_update_class_params(struct qfq_sched *q, struct qfq_class *cl, + u32 lmax, u32 inv_w, int delta_w) +{ + int i; + + /* update qfq-specific data */ + cl->lmax = lmax; + cl->inv_w = inv_w; + i = qfq_calc_index(cl->inv_w, cl->lmax); + + cl->grp = &q->groups[i]; + + q->wsum += delta_w; +} + static int qfq_change_class(struct Qdisc *sch, u32 classid, u32 parentid, struct nlattr **tca, unsigned long *arg) { @@ -250,6 +278,8 @@ static int qfq_change_class(struct Qdisc *sch, u32 classid, u32 parentid, lmax = 1UL << QFQ_MTU_SHIFT; if (cl != NULL) { + bool need_reactivation = false; + if (tca[TCA_RATE]) { err = gen_replace_estimator(&cl->bstats, &cl->rate_est, qdisc_root_sleeping_lock(sch), @@ -258,12 +288,29 @@ static int qfq_change_class(struct Qdisc *sch, u32 classid, u32 parentid, return err; } - if (inv_w != cl->inv_w) { - sch_tree_lock(sch); - q->wsum += delta_w; - cl->inv_w = inv_w; - sch_tree_unlock(sch); + if (lmax == cl->lmax && inv_w == cl->inv_w) + return 0; /* nothing to update */ + + i = qfq_calc_index(inv_w, lmax); + sch_tree_lock(sch); + if (&q->groups[i] != cl->grp && cl->qdisc->q.qlen > 0) { + /* + * shift cl->F back, to not charge the + * class for the not-yet-served head + * packet + */ + cl->F = cl->S; + /* remove class from its slot in the old group */ + qfq_deactivate_class(q, cl); + need_reactivation = true; } + + qfq_update_class_params(q, cl, lmax, inv_w, delta_w); + + if (need_reactivation) /* activate in new group */ + qfq_activate_class(q, cl, qdisc_peek_len(cl->qdisc)); + sch_tree_unlock(sch); + return 0; } @@ -273,11 +320,8 @@ static int qfq_change_class(struct Qdisc *sch, u32 classid, u32 parentid, cl->refcnt = 1; cl->common.classid = classid; - cl->lmax = lmax; - cl->inv_w = inv_w; - i = qfq_calc_index(cl->inv_w, cl->lmax); - cl->grp = &q->groups[i]; + qfq_update_class_params(q, cl, lmax, inv_w, delta_w); cl->qdisc = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops, classid); @@ -294,7 +338,6 @@ static int qfq_change_class(struct Qdisc *sch, u32 classid, u32 parentid, return err; } } - q->wsum += weight; sch_tree_lock(sch); qdisc_class_hash_insert(&q->clhash, &cl->common); @@ -711,15 +754,6 @@ static void qfq_update_eligible(struct qfq_sched *q, u64 old_V) } } -/* What is length of next packet in queue (0 if queue is empty) */ -static unsigned int qdisc_peek_len(struct Qdisc *sch) -{ - struct sk_buff *skb; - - skb = sch->ops->peek(sch); - return skb ? qdisc_pkt_len(skb) : 0; -} - /* * Updates the class, returns true if also the group needs to be updated. */ @@ -831,7 +865,10 @@ static void qfq_update_start(struct qfq_sched *q, struct qfq_class *cl) if (mask) { struct qfq_group *next = qfq_ffs(q, mask); if (qfq_gt(roundedF, next->F)) { - cl->S = next->F; + if (qfq_gt(limit, next->F)) + cl->S = next->F; + else /* preserve timestamp correctness */ + cl->S = limit; return; } } @@ -843,11 +880,8 @@ static void qfq_update_start(struct qfq_sched *q, struct qfq_class *cl) static int qfq_enqueue(struct sk_buff *skb, struct Qdisc *sch) { struct qfq_sched *q = qdisc_priv(sch); - struct qfq_group *grp; struct qfq_class *cl; int err; - u64 roundedS; - int s; cl = qfq_classify(skb, sch, &err); if (cl == NULL) { @@ -876,11 +910,25 @@ static int qfq_enqueue(struct sk_buff *skb, struct Qdisc *sch) return err; /* If reach this point, queue q was idle */ - grp = cl->grp; + qfq_activate_class(q, cl, qdisc_pkt_len(skb)); + + return err; +} + +/* + * Handle class switch from idle to backlogged. + */ +static void qfq_activate_class(struct qfq_sched *q, struct qfq_class *cl, + unsigned int pkt_len) +{ + struct qfq_group *grp = cl->grp; + u64 roundedS; + int s; + qfq_update_start(q, cl); /* compute new finish time and rounded start. */ - cl->F = cl->S + (u64)qdisc_pkt_len(skb) * cl->inv_w; + cl->F = cl->S + (u64)pkt_len * cl->inv_w; roundedS = qfq_round_down(cl->S, grp->slot_shift); /* @@ -917,8 +965,6 @@ static int qfq_enqueue(struct sk_buff *skb, struct Qdisc *sch) skip_update: qfq_slot_insert(grp, cl, roundedS); - - return err; } diff --git a/trunk/net/sctp/output.c b/trunk/net/sctp/output.c index 838e18b4d7ea..be50aa234dcd 100644 --- a/trunk/net/sctp/output.c +++ b/trunk/net/sctp/output.c @@ -364,6 +364,25 @@ sctp_xmit_t sctp_packet_append_chunk(struct sctp_packet *packet, return retval; } +static void sctp_packet_release_owner(struct sk_buff *skb) +{ + sk_free(skb->sk); +} + +static void sctp_packet_set_owner_w(struct sk_buff *skb, struct sock *sk) +{ + skb_orphan(skb); + skb->sk = sk; + skb->destructor = sctp_packet_release_owner; + + /* + * The data chunks have already been accounted for in sctp_sendmsg(), + * therefore only reserve a single byte to keep socket around until + * the packet has been transmitted. + */ + atomic_inc(&sk->sk_wmem_alloc); +} + /* All packets are sent to the network through this function from * sctp_outq_tail(). * @@ -405,7 +424,7 @@ int sctp_packet_transmit(struct sctp_packet *packet) /* Set the owning socket so that we know where to get the * destination IP address. */ - skb_set_owner_w(nskb, sk); + sctp_packet_set_owner_w(nskb, sk); if (!sctp_transport_dst_check(tp)) { sctp_transport_route(tp, NULL, sctp_sk(sk)); diff --git a/trunk/net/socket.c b/trunk/net/socket.c index dfe5b66c97e0..edc3c4af9085 100644 --- a/trunk/net/socket.c +++ b/trunk/net/socket.c @@ -2604,7 +2604,7 @@ static int do_siocgstamp(struct net *net, struct socket *sock, err = sock_do_ioctl(net, sock, cmd, (unsigned long)&ktv); set_fs(old_fs); if (!err) - err = compat_put_timeval(up, &ktv); + err = compat_put_timeval(&ktv, up); return err; } @@ -2620,7 +2620,7 @@ static int do_siocgstampns(struct net *net, struct socket *sock, err = sock_do_ioctl(net, sock, cmd, (unsigned long)&kts); set_fs(old_fs); if (!err) - err = compat_put_timespec(up, &kts); + err = compat_put_timespec(&kts, up); return err; } @@ -2657,6 +2657,7 @@ static int dev_ifconf(struct net *net, struct compat_ifconf __user *uifc32) if (copy_from_user(&ifc32, uifc32, sizeof(struct compat_ifconf))) return -EFAULT; + memset(&ifc, 0, sizeof(ifc)); if (ifc32.ifcbuf == 0) { ifc32.ifc_len = 0; ifc.ifc_len = 0; diff --git a/trunk/net/sunrpc/svc_xprt.c b/trunk/net/sunrpc/svc_xprt.c index 88f2bf671960..bac973a31367 100644 --- a/trunk/net/sunrpc/svc_xprt.c +++ b/trunk/net/sunrpc/svc_xprt.c @@ -316,7 +316,6 @@ static bool svc_xprt_has_something_to_do(struct svc_xprt *xprt) */ void svc_xprt_enqueue(struct svc_xprt *xprt) { - struct svc_serv *serv = xprt->xpt_server; struct svc_pool *pool; struct svc_rqst *rqstp; int cpu; @@ -362,8 +361,6 @@ void svc_xprt_enqueue(struct svc_xprt *xprt) rqstp, rqstp->rq_xprt); rqstp->rq_xprt = xprt; svc_xprt_get(xprt); - rqstp->rq_reserved = serv->sv_max_mesg; - atomic_add(rqstp->rq_reserved, &xprt->xpt_reserved); pool->sp_stats.threads_woken++; wake_up(&rqstp->rq_wait); } else { @@ -640,8 +637,6 @@ int svc_recv(struct svc_rqst *rqstp, long timeout) if (xprt) { rqstp->rq_xprt = xprt; svc_xprt_get(xprt); - rqstp->rq_reserved = serv->sv_max_mesg; - atomic_add(rqstp->rq_reserved, &xprt->xpt_reserved); /* As there is a shortage of threads and this request * had to be queued, don't allow the thread to wait so @@ -738,6 +733,8 @@ int svc_recv(struct svc_rqst *rqstp, long timeout) else len = xprt->xpt_ops->xpo_recvfrom(rqstp); dprintk("svc: got len=%d\n", len); + rqstp->rq_reserved = serv->sv_max_mesg; + atomic_add(rqstp->rq_reserved, &xprt->xpt_reserved); } svc_xprt_received(xprt); @@ -794,7 +791,8 @@ int svc_send(struct svc_rqst *rqstp) /* Grab mutex to serialize outgoing data. */ mutex_lock(&xprt->xpt_mutex); - if (test_bit(XPT_DEAD, &xprt->xpt_flags)) + if (test_bit(XPT_DEAD, &xprt->xpt_flags) + || test_bit(XPT_CLOSE, &xprt->xpt_flags)) len = -ENOTCONN; else len = xprt->xpt_ops->xpo_sendto(rqstp); diff --git a/trunk/net/sunrpc/svcsock.c b/trunk/net/sunrpc/svcsock.c index 18bc130255a7..998aa8c1807c 100644 --- a/trunk/net/sunrpc/svcsock.c +++ b/trunk/net/sunrpc/svcsock.c @@ -1129,9 +1129,9 @@ static int svc_tcp_recvfrom(struct svc_rqst *rqstp) if (len >= 0) svsk->sk_tcplen += len; if (len != want) { + svc_tcp_save_pages(svsk, rqstp); if (len < 0 && len != -EAGAIN) goto err_other; - svc_tcp_save_pages(svsk, rqstp); dprintk("svc: incomplete TCP record (%d of %d)\n", svsk->sk_tcplen, svsk->sk_reclen); goto err_noclose; diff --git a/trunk/net/sunrpc/xprt.c b/trunk/net/sunrpc/xprt.c index a5a402a7d21f..5d7f61d7559c 100644 --- a/trunk/net/sunrpc/xprt.c +++ b/trunk/net/sunrpc/xprt.c @@ -969,11 +969,11 @@ static bool xprt_dynamic_free_slot(struct rpc_xprt *xprt, struct rpc_rqst *req) return false; } -static void xprt_alloc_slot(struct rpc_task *task) +void xprt_alloc_slot(struct rpc_xprt *xprt, struct rpc_task *task) { - struct rpc_xprt *xprt = task->tk_xprt; struct rpc_rqst *req; + spin_lock(&xprt->reserve_lock); if (!list_empty(&xprt->free)) { req = list_entry(xprt->free.next, struct rpc_rqst, rq_list); list_del(&req->rq_list); @@ -994,12 +994,29 @@ static void xprt_alloc_slot(struct rpc_task *task) default: task->tk_status = -EAGAIN; } + spin_unlock(&xprt->reserve_lock); return; out_init_req: task->tk_status = 0; task->tk_rqstp = req; xprt_request_init(task, xprt); + spin_unlock(&xprt->reserve_lock); +} +EXPORT_SYMBOL_GPL(xprt_alloc_slot); + +void xprt_lock_and_alloc_slot(struct rpc_xprt *xprt, struct rpc_task *task) +{ + /* Note: grabbing the xprt_lock_write() ensures that we throttle + * new slot allocation if the transport is congested (i.e. when + * reconnecting a stream transport or when out of socket write + * buffer space). + */ + if (xprt_lock_write(xprt, task)) { + xprt_alloc_slot(xprt, task); + xprt_release_write(xprt, task); + } } +EXPORT_SYMBOL_GPL(xprt_lock_and_alloc_slot); static void xprt_free_slot(struct rpc_xprt *xprt, struct rpc_rqst *req) { @@ -1083,20 +1100,9 @@ void xprt_reserve(struct rpc_task *task) if (task->tk_rqstp != NULL) return; - /* Note: grabbing the xprt_lock_write() here is not strictly needed, - * but ensures that we throttle new slot allocation if the transport - * is congested (e.g. if reconnecting or if we're out of socket - * write buffer space). - */ task->tk_timeout = 0; task->tk_status = -EAGAIN; - if (!xprt_lock_write(xprt, task)) - return; - - spin_lock(&xprt->reserve_lock); - xprt_alloc_slot(task); - spin_unlock(&xprt->reserve_lock); - xprt_release_write(xprt, task); + xprt->ops->alloc_slot(xprt, task); } static inline __be32 xprt_alloc_xid(struct rpc_xprt *xprt) diff --git a/trunk/net/sunrpc/xprtrdma/transport.c b/trunk/net/sunrpc/xprtrdma/transport.c index 06cdbff79e4a..5d9202dc7cb1 100644 --- a/trunk/net/sunrpc/xprtrdma/transport.c +++ b/trunk/net/sunrpc/xprtrdma/transport.c @@ -713,6 +713,7 @@ static void xprt_rdma_print_stats(struct rpc_xprt *xprt, struct seq_file *seq) static struct rpc_xprt_ops xprt_rdma_procs = { .reserve_xprt = xprt_rdma_reserve_xprt, .release_xprt = xprt_release_xprt_cong, /* sunrpc/xprt.c */ + .alloc_slot = xprt_alloc_slot, .release_request = xprt_release_rqst_cong, /* ditto */ .set_retrans_timeout = xprt_set_retrans_timeout_def, /* ditto */ .rpcbind = rpcb_getport_async, /* sunrpc/rpcb_clnt.c */ diff --git a/trunk/net/sunrpc/xprtsock.c b/trunk/net/sunrpc/xprtsock.c index 400567243f84..a35b8e52e551 100644 --- a/trunk/net/sunrpc/xprtsock.c +++ b/trunk/net/sunrpc/xprtsock.c @@ -2473,6 +2473,7 @@ static void bc_destroy(struct rpc_xprt *xprt) static struct rpc_xprt_ops xs_local_ops = { .reserve_xprt = xprt_reserve_xprt, .release_xprt = xs_tcp_release_xprt, + .alloc_slot = xprt_alloc_slot, .rpcbind = xs_local_rpcbind, .set_port = xs_local_set_port, .connect = xs_connect, @@ -2489,6 +2490,7 @@ static struct rpc_xprt_ops xs_udp_ops = { .set_buffer_size = xs_udp_set_buffer_size, .reserve_xprt = xprt_reserve_xprt_cong, .release_xprt = xprt_release_xprt_cong, + .alloc_slot = xprt_alloc_slot, .rpcbind = rpcb_getport_async, .set_port = xs_set_port, .connect = xs_connect, @@ -2506,6 +2508,7 @@ static struct rpc_xprt_ops xs_udp_ops = { static struct rpc_xprt_ops xs_tcp_ops = { .reserve_xprt = xprt_reserve_xprt, .release_xprt = xs_tcp_release_xprt, + .alloc_slot = xprt_lock_and_alloc_slot, .rpcbind = rpcb_getport_async, .set_port = xs_set_port, .connect = xs_connect, diff --git a/trunk/net/unix/af_unix.c b/trunk/net/unix/af_unix.c index e4768c180da2..c5ee4ff61364 100644 --- a/trunk/net/unix/af_unix.c +++ b/trunk/net/unix/af_unix.c @@ -1450,7 +1450,7 @@ static int unix_dgram_sendmsg(struct kiocb *kiocb, struct socket *sock, if (NULL == siocb->scm) siocb->scm = &tmp_scm; wait_for_unix_gc(); - err = scm_send(sock, msg, siocb->scm); + err = scm_send(sock, msg, siocb->scm, false); if (err < 0) return err; @@ -1619,7 +1619,7 @@ static int unix_stream_sendmsg(struct kiocb *kiocb, struct socket *sock, if (NULL == siocb->scm) siocb->scm = &tmp_scm; wait_for_unix_gc(); - err = scm_send(sock, msg, siocb->scm); + err = scm_send(sock, msg, siocb->scm, false); if (err < 0) return err; diff --git a/trunk/net/wireless/core.c b/trunk/net/wireless/core.c index 31b40cc4a9c3..dcd64d5b07aa 100644 --- a/trunk/net/wireless/core.c +++ b/trunk/net/wireless/core.c @@ -952,6 +952,11 @@ static int cfg80211_netdev_notifier_call(struct notifier_block *nb, */ synchronize_rcu(); INIT_LIST_HEAD(&wdev->list); + /* + * Ensure that all events have been processed and + * freed. + */ + cfg80211_process_wdev_events(wdev); break; case NETDEV_PRE_UP: if (!(wdev->wiphy->interface_modes & BIT(wdev->iftype))) diff --git a/trunk/net/wireless/core.h b/trunk/net/wireless/core.h index 5206c6844fd7..bc7430b54771 100644 --- a/trunk/net/wireless/core.h +++ b/trunk/net/wireless/core.h @@ -426,6 +426,7 @@ int cfg80211_change_iface(struct cfg80211_registered_device *rdev, struct net_device *dev, enum nl80211_iftype ntype, u32 *flags, struct vif_params *params); void cfg80211_process_rdev_events(struct cfg80211_registered_device *rdev); +void cfg80211_process_wdev_events(struct wireless_dev *wdev); int cfg80211_can_use_iftype_chan(struct cfg80211_registered_device *rdev, struct wireless_dev *wdev, diff --git a/trunk/net/wireless/nl80211.c b/trunk/net/wireless/nl80211.c index 97026f3b215a..1e37dbf00cb3 100644 --- a/trunk/net/wireless/nl80211.c +++ b/trunk/net/wireless/nl80211.c @@ -5633,8 +5633,10 @@ static int nl80211_connect(struct sk_buff *skb, struct genl_info *info) sizeof(connect.ht_capa_mask)); if (info->attrs[NL80211_ATTR_HT_CAPABILITY]) { - if (!info->attrs[NL80211_ATTR_HT_CAPABILITY_MASK]) + if (!info->attrs[NL80211_ATTR_HT_CAPABILITY_MASK]) { + kfree(connkeys); return -EINVAL; + } memcpy(&connect.ht_capa, nla_data(info->attrs[NL80211_ATTR_HT_CAPABILITY]), sizeof(connect.ht_capa)); diff --git a/trunk/net/wireless/reg.c b/trunk/net/wireless/reg.c index 2303ee73b50a..72d170ca3406 100644 --- a/trunk/net/wireless/reg.c +++ b/trunk/net/wireless/reg.c @@ -350,6 +350,9 @@ static void reg_regdb_search(struct work_struct *work) struct reg_regdb_search_request *request; const struct ieee80211_regdomain *curdom, *regdom; int i, r; + bool set_reg = false; + + mutex_lock(&cfg80211_mutex); mutex_lock(®_regdb_search_mutex); while (!list_empty(®_regdb_search_list)) { @@ -365,9 +368,7 @@ static void reg_regdb_search(struct work_struct *work) r = reg_copy_regd(®dom, curdom); if (r) break; - mutex_lock(&cfg80211_mutex); - set_regdom(regdom); - mutex_unlock(&cfg80211_mutex); + set_reg = true; break; } } @@ -375,6 +376,11 @@ static void reg_regdb_search(struct work_struct *work) kfree(request); } mutex_unlock(®_regdb_search_mutex); + + if (set_reg) + set_regdom(regdom); + + mutex_unlock(&cfg80211_mutex); } static DECLARE_WORK(reg_regdb_work, reg_regdb_search); @@ -680,6 +686,8 @@ static u32 map_regdom_flags(u32 rd_flags) channel_flags |= IEEE80211_CHAN_NO_IBSS; if (rd_flags & NL80211_RRF_DFS) channel_flags |= IEEE80211_CHAN_RADAR; + if (rd_flags & NL80211_RRF_NO_OFDM) + channel_flags |= IEEE80211_CHAN_NO_OFDM; return channel_flags; } @@ -901,7 +909,21 @@ static void handle_channel(struct wiphy *wiphy, chan->max_antenna_gain = min(chan->orig_mag, (int) MBI_TO_DBI(power_rule->max_antenna_gain)); chan->max_reg_power = (int) MBM_TO_DBM(power_rule->max_eirp); - chan->max_power = min(chan->max_power, chan->max_reg_power); + if (chan->orig_mpwr) { + /* + * Devices that have their own custom regulatory domain + * but also use WIPHY_FLAG_STRICT_REGULATORY will follow the + * passed country IE power settings. + */ + if (initiator == NL80211_REGDOM_SET_BY_COUNTRY_IE && + wiphy->flags & WIPHY_FLAG_CUSTOM_REGULATORY && + wiphy->flags & WIPHY_FLAG_STRICT_REGULATORY) + chan->max_power = chan->max_reg_power; + else + chan->max_power = min(chan->orig_mpwr, + chan->max_reg_power); + } else + chan->max_power = chan->max_reg_power; } static void handle_band(struct wiphy *wiphy, @@ -1885,6 +1907,7 @@ static void restore_custom_reg_settings(struct wiphy *wiphy) chan->flags = chan->orig_flags; chan->max_antenna_gain = chan->orig_mag; chan->max_power = chan->orig_mpwr; + chan->beacon_found = false; } } } diff --git a/trunk/net/wireless/util.c b/trunk/net/wireless/util.c index 26f8cd30f712..994e2f0cc7a8 100644 --- a/trunk/net/wireless/util.c +++ b/trunk/net/wireless/util.c @@ -735,7 +735,7 @@ void cfg80211_upload_connect_keys(struct wireless_dev *wdev) wdev->connect_keys = NULL; } -static void cfg80211_process_wdev_events(struct wireless_dev *wdev) +void cfg80211_process_wdev_events(struct wireless_dev *wdev) { struct cfg80211_event *ev; unsigned long flags; diff --git a/trunk/net/xfrm/xfrm_input.c b/trunk/net/xfrm/xfrm_input.c index 54a0dc2e2f8d..ab2bb42fe094 100644 --- a/trunk/net/xfrm/xfrm_input.c +++ b/trunk/net/xfrm/xfrm_input.c @@ -212,7 +212,7 @@ int xfrm_input(struct sk_buff *skb, int nexthdr, __be32 spi, int encap_type) /* only the first xfrm gets the encap type */ encap_type = 0; - if (async && x->repl->check(x, skb, seq)) { + if (async && x->repl->recheck(x, skb, seq)) { XFRM_INC_STATS(net, LINUX_MIB_XFRMINSTATESEQERROR); goto drop_unlock; } diff --git a/trunk/net/xfrm/xfrm_policy.c b/trunk/net/xfrm/xfrm_policy.c index c5a5165a5927..387848e90078 100644 --- a/trunk/net/xfrm/xfrm_policy.c +++ b/trunk/net/xfrm/xfrm_policy.c @@ -585,6 +585,7 @@ int xfrm_policy_insert(int dir, struct xfrm_policy *policy, int excl) xfrm_pol_hold(policy); net->xfrm.policy_count[dir]++; atomic_inc(&flow_cache_genid); + rt_genid_bump(net); if (delpol) __xfrm_policy_unlink(delpol, dir); policy->index = delpol ? delpol->index : xfrm_gen_index(net, dir); @@ -1357,6 +1358,8 @@ static inline struct xfrm_dst *xfrm_alloc_dst(struct net *net, int family) memset(dst + 1, 0, sizeof(*xdst) - sizeof(*dst)); xdst->flo.ops = &xfrm_bundle_fc_ops; + if (afinfo->init_dst) + afinfo->init_dst(net, xdst); } else xdst = ERR_PTR(-ENOBUFS); @@ -1761,7 +1764,7 @@ static struct dst_entry *make_blackhole(struct net *net, u16 family, if (!afinfo) { dst_release(dst_orig); - ret = ERR_PTR(-EINVAL); + return ERR_PTR(-EINVAL); } else { ret = afinfo->blackhole_route(net, dst_orig); } diff --git a/trunk/net/xfrm/xfrm_replay.c b/trunk/net/xfrm/xfrm_replay.c index 2f6d11d04a2b..3efb07d3eb27 100644 --- a/trunk/net/xfrm/xfrm_replay.c +++ b/trunk/net/xfrm/xfrm_replay.c @@ -420,6 +420,18 @@ static int xfrm_replay_check_esn(struct xfrm_state *x, return -EINVAL; } +static int xfrm_replay_recheck_esn(struct xfrm_state *x, + struct sk_buff *skb, __be32 net_seq) +{ + if (unlikely(XFRM_SKB_CB(skb)->seq.input.hi != + htonl(xfrm_replay_seqhi(x, net_seq)))) { + x->stats.replay_window++; + return -EINVAL; + } + + return xfrm_replay_check_esn(x, skb, net_seq); +} + static void xfrm_replay_advance_esn(struct xfrm_state *x, __be32 net_seq) { unsigned int bitnr, nr, i; @@ -479,6 +491,7 @@ static void xfrm_replay_advance_esn(struct xfrm_state *x, __be32 net_seq) static struct xfrm_replay xfrm_replay_legacy = { .advance = xfrm_replay_advance, .check = xfrm_replay_check, + .recheck = xfrm_replay_check, .notify = xfrm_replay_notify, .overflow = xfrm_replay_overflow, }; @@ -486,6 +499,7 @@ static struct xfrm_replay xfrm_replay_legacy = { static struct xfrm_replay xfrm_replay_bmp = { .advance = xfrm_replay_advance_bmp, .check = xfrm_replay_check_bmp, + .recheck = xfrm_replay_check_bmp, .notify = xfrm_replay_notify_bmp, .overflow = xfrm_replay_overflow_bmp, }; @@ -493,6 +507,7 @@ static struct xfrm_replay xfrm_replay_bmp = { static struct xfrm_replay xfrm_replay_esn = { .advance = xfrm_replay_advance_esn, .check = xfrm_replay_check_esn, + .recheck = xfrm_replay_recheck_esn, .notify = xfrm_replay_notify_bmp, .overflow = xfrm_replay_overflow_esn, }; diff --git a/trunk/net/xfrm/xfrm_state.c b/trunk/net/xfrm/xfrm_state.c index 5b228f97d4b3..210be48d8ae3 100644 --- a/trunk/net/xfrm/xfrm_state.c +++ b/trunk/net/xfrm/xfrm_state.c @@ -415,8 +415,17 @@ static enum hrtimer_restart xfrm_timer_handler(struct hrtimer * me) if (x->lft.hard_add_expires_seconds) { long tmo = x->lft.hard_add_expires_seconds + x->curlft.add_time - now; - if (tmo <= 0) - goto expired; + if (tmo <= 0) { + if (x->xflags & XFRM_SOFT_EXPIRE) { + /* enter hard expire without soft expire first?! + * setting a new date could trigger this. + * workarbound: fix x->curflt.add_time by below: + */ + x->curlft.add_time = now - x->saved_tmo - 1; + tmo = x->lft.hard_add_expires_seconds - x->saved_tmo; + } else + goto expired; + } if (tmo < next) next = tmo; } @@ -433,10 +442,14 @@ static enum hrtimer_restart xfrm_timer_handler(struct hrtimer * me) if (x->lft.soft_add_expires_seconds) { long tmo = x->lft.soft_add_expires_seconds + x->curlft.add_time - now; - if (tmo <= 0) + if (tmo <= 0) { warn = 1; - else if (tmo < next) + x->xflags &= ~XFRM_SOFT_EXPIRE; + } else if (tmo < next) { next = tmo; + x->xflags |= XFRM_SOFT_EXPIRE; + x->saved_tmo = tmo; + } } if (x->lft.soft_use_expires_seconds) { long tmo = x->lft.soft_use_expires_seconds + @@ -1981,8 +1994,10 @@ int __xfrm_init_state(struct xfrm_state *x, bool init_replay) goto error; x->outer_mode = xfrm_get_mode(x->props.mode, family); - if (x->outer_mode == NULL) + if (x->outer_mode == NULL) { + err = -EPROTONOSUPPORT; goto error; + } if (init_replay) { err = xfrm_init_replay(x); diff --git a/trunk/net/xfrm/xfrm_user.c b/trunk/net/xfrm/xfrm_user.c index e75d8e47f35c..289f4bf18ff0 100644 --- a/trunk/net/xfrm/xfrm_user.c +++ b/trunk/net/xfrm/xfrm_user.c @@ -123,9 +123,21 @@ static inline int verify_replay(struct xfrm_usersa_info *p, struct nlattr **attrs) { struct nlattr *rt = attrs[XFRMA_REPLAY_ESN_VAL]; + struct xfrm_replay_state_esn *rs; - if ((p->flags & XFRM_STATE_ESN) && !rt) - return -EINVAL; + if (p->flags & XFRM_STATE_ESN) { + if (!rt) + return -EINVAL; + + rs = nla_data(rt); + + if (rs->bmp_len > XFRMA_REPLAY_ESN_MAX / sizeof(rs->bmp[0]) / 8) + return -EINVAL; + + if (nla_len(rt) < xfrm_replay_state_esn_len(rs) && + nla_len(rt) != sizeof(*rs)) + return -EINVAL; + } if (!rt) return 0; @@ -370,14 +382,15 @@ static inline int xfrm_replay_verify_len(struct xfrm_replay_state_esn *replay_es struct nlattr *rp) { struct xfrm_replay_state_esn *up; + int ulen; if (!replay_esn || !rp) return 0; up = nla_data(rp); + ulen = xfrm_replay_state_esn_len(up); - if (xfrm_replay_state_esn_len(replay_esn) != - xfrm_replay_state_esn_len(up)) + if (nla_len(rp) < ulen || xfrm_replay_state_esn_len(replay_esn) != ulen) return -EINVAL; return 0; @@ -388,22 +401,28 @@ static int xfrm_alloc_replay_state_esn(struct xfrm_replay_state_esn **replay_esn struct nlattr *rta) { struct xfrm_replay_state_esn *p, *pp, *up; + int klen, ulen; if (!rta) return 0; up = nla_data(rta); + klen = xfrm_replay_state_esn_len(up); + ulen = nla_len(rta) >= klen ? klen : sizeof(*up); - p = kmemdup(up, xfrm_replay_state_esn_len(up), GFP_KERNEL); + p = kzalloc(klen, GFP_KERNEL); if (!p) return -ENOMEM; - pp = kmemdup(up, xfrm_replay_state_esn_len(up), GFP_KERNEL); + pp = kzalloc(klen, GFP_KERNEL); if (!pp) { kfree(p); return -ENOMEM; } + memcpy(p, up, ulen); + memcpy(pp, up, ulen); + *replay_esn = p; *preplay_esn = pp; @@ -442,10 +461,11 @@ static void copy_from_user_state(struct xfrm_state *x, struct xfrm_usersa_info * * somehow made shareable and move it to xfrm_state.c - JHS * */ -static void xfrm_update_ae_params(struct xfrm_state *x, struct nlattr **attrs) +static void xfrm_update_ae_params(struct xfrm_state *x, struct nlattr **attrs, + int update_esn) { struct nlattr *rp = attrs[XFRMA_REPLAY_VAL]; - struct nlattr *re = attrs[XFRMA_REPLAY_ESN_VAL]; + struct nlattr *re = update_esn ? attrs[XFRMA_REPLAY_ESN_VAL] : NULL; struct nlattr *lt = attrs[XFRMA_LTIME_VAL]; struct nlattr *et = attrs[XFRMA_ETIMER_THRESH]; struct nlattr *rt = attrs[XFRMA_REPLAY_THRESH]; @@ -555,7 +575,7 @@ static struct xfrm_state *xfrm_state_construct(struct net *net, goto error; /* override default values from above */ - xfrm_update_ae_params(x, attrs); + xfrm_update_ae_params(x, attrs, 0); return x; @@ -689,6 +709,7 @@ static int xfrm_del_sa(struct sk_buff *skb, struct nlmsghdr *nlh, static void copy_to_user_state(struct xfrm_state *x, struct xfrm_usersa_info *p) { + memset(p, 0, sizeof(*p)); memcpy(&p->id, &x->id, sizeof(p->id)); memcpy(&p->sel, &x->sel, sizeof(p->sel)); memcpy(&p->lft, &x->lft, sizeof(p->lft)); @@ -742,7 +763,7 @@ static int copy_to_user_auth(struct xfrm_algo_auth *auth, struct sk_buff *skb) return -EMSGSIZE; algo = nla_data(nla); - strcpy(algo->alg_name, auth->alg_name); + strncpy(algo->alg_name, auth->alg_name, sizeof(algo->alg_name)); memcpy(algo->alg_key, auth->alg_key, (auth->alg_key_len + 7) / 8); algo->alg_key_len = auth->alg_key_len; @@ -878,6 +899,7 @@ static struct sk_buff *xfrm_state_netlink(struct sk_buff *in_skb, { struct xfrm_dump_info info; struct sk_buff *skb; + int err; skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_ATOMIC); if (!skb) @@ -888,9 +910,10 @@ static struct sk_buff *xfrm_state_netlink(struct sk_buff *in_skb, info.nlmsg_seq = seq; info.nlmsg_flags = 0; - if (dump_one_state(x, 0, &info)) { + err = dump_one_state(x, 0, &info); + if (err) { kfree_skb(skb); - return NULL; + return ERR_PTR(err); } return skb; @@ -1317,6 +1340,7 @@ static void copy_from_user_policy(struct xfrm_policy *xp, struct xfrm_userpolicy static void copy_to_user_policy(struct xfrm_policy *xp, struct xfrm_userpolicy_info *p, int dir) { + memset(p, 0, sizeof(*p)); memcpy(&p->sel, &xp->selector, sizeof(p->sel)); memcpy(&p->lft, &xp->lft, sizeof(p->lft)); memcpy(&p->curlft, &xp->curlft, sizeof(p->curlft)); @@ -1421,6 +1445,7 @@ static int copy_to_user_tmpl(struct xfrm_policy *xp, struct sk_buff *skb) struct xfrm_user_tmpl *up = &vec[i]; struct xfrm_tmpl *kp = &xp->xfrm_vec[i]; + memset(up, 0, sizeof(*up)); memcpy(&up->id, &kp->id, sizeof(up->id)); up->family = kp->encap_family; memcpy(&up->saddr, &kp->saddr, sizeof(up->saddr)); @@ -1546,6 +1571,7 @@ static struct sk_buff *xfrm_policy_netlink(struct sk_buff *in_skb, { struct xfrm_dump_info info; struct sk_buff *skb; + int err; skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); if (!skb) @@ -1556,9 +1582,10 @@ static struct sk_buff *xfrm_policy_netlink(struct sk_buff *in_skb, info.nlmsg_seq = seq; info.nlmsg_flags = 0; - if (dump_one_policy(xp, dir, 0, &info) < 0) { + err = dump_one_policy(xp, dir, 0, &info); + if (err) { kfree_skb(skb); - return NULL; + return ERR_PTR(err); } return skb; @@ -1822,7 +1849,7 @@ static int xfrm_new_ae(struct sk_buff *skb, struct nlmsghdr *nlh, goto out; spin_lock_bh(&x->lock); - xfrm_update_ae_params(x, attrs); + xfrm_update_ae_params(x, attrs, 1); spin_unlock_bh(&x->lock); c.event = nlh->nlmsg_type; diff --git a/trunk/scripts/Makefile.fwinst b/trunk/scripts/Makefile.fwinst index 6bf8e87f1dcf..4d908d16c035 100644 --- a/trunk/scripts/Makefile.fwinst +++ b/trunk/scripts/Makefile.fwinst @@ -27,7 +27,7 @@ endif installed-mod-fw := $(addprefix $(INSTALL_FW_PATH)/,$(mod-fw)) installed-fw := $(addprefix $(INSTALL_FW_PATH)/,$(fw-shipped-all)) -installed-fw-dirs := $(sort $(dir $(installed-fw))) $(INSTALL_FW_PATH)/. +installed-fw-dirs := $(sort $(dir $(installed-fw))) $(INSTALL_FW_PATH)/./ # Workaround for make < 3.81, where .SECONDEXPANSION doesn't work. PHONY += $(INSTALL_FW_PATH)/$$(%) install-all-dirs diff --git a/trunk/scripts/checkpatch.pl b/trunk/scripts/checkpatch.pl index 913d6bdfdda3..ca05ba217f5f 100755 --- a/trunk/scripts/checkpatch.pl +++ b/trunk/scripts/checkpatch.pl @@ -3016,7 +3016,8 @@ sub process { $herectx .= raw_line($linenr, $n) . "\n"; } - if (($stmts =~ tr/;/;/) == 1) { + if (($stmts =~ tr/;/;/) == 1 && + $stmts !~ /^\s*(if|while|for|switch)\b/) { WARN("SINGLE_STATEMENT_DO_WHILE_MACRO", "Single statement macros should not use a do {} while (0) loop\n" . "$herectx"); } diff --git a/trunk/scripts/checksyscalls.sh b/trunk/scripts/checksyscalls.sh index d24810fc6af6..fd8fa9aa7c4e 100755 --- a/trunk/scripts/checksyscalls.sh +++ b/trunk/scripts/checksyscalls.sh @@ -200,7 +200,7 @@ EOF syscall_list() { grep '^[0-9]' "$1" | sort -n | ( while read nr abi name entry ; do - echo <> $T.s disas $T cat $T.dis >> $T.aa -faultline=`cat $T.dis | head -1 | cut -d":" -f2` +faultline=`cat $T.dis | head -1 | cut -d":" -f2-` faultline=`echo "$faultline" | sed -e 's/\[/\\\[/g; s/\]/\\\]/g'` cat $T.oo | sed -e "s/\($faultline\)/\*\1 <-- trapping instruction/g" diff --git a/trunk/scripts/kconfig/streamline_config.pl b/trunk/scripts/kconfig/streamline_config.pl index 2fbbbc1ddea0..33689396953a 100644 --- a/trunk/scripts/kconfig/streamline_config.pl +++ b/trunk/scripts/kconfig/streamline_config.pl @@ -100,7 +100,7 @@ sub dprint { }, ); -sub find_config { +sub read_config { foreach my $conf (@searchconfigs) { my $file = $conf->{"file"}; @@ -115,17 +115,15 @@ sub find_config { print STDERR "using config: '$file'\n"; - open(CIN, "$exec $file |") || die "Failed to run $exec $file"; - return; + open(my $infile, '-|', "$exec $file") || die "Failed to run $exec $file"; + my @x = <$infile>; + close $infile; + return @x; } die "No config file found"; } -find_config; - -# Read in the entire config file into config_file -my @config_file = ; -close CIN; +my @config_file = read_config; # Parse options my $localmodconfig = 0; @@ -135,7 +133,7 @@ sub find_config { "localyesconfig" => \$localyesconfig); # Get the build source and top level Kconfig file (passed in) -my $ksource = $ARGV[0]; +my $ksource = ($ARGV[0] ? $ARGV[0] : '.'); my $kconfig = $ARGV[1]; my $lsmod_file = $ENV{'LSMOD'}; @@ -173,8 +171,8 @@ sub read_kconfig { $source =~ s/\$$env/$ENV{$env}/; } - open(KIN, "$source") || die "Can't open $kconfig"; - while () { + open(my $kinfile, '<', $source) || die "Can't open $kconfig"; + while (<$kinfile>) { chomp; # Make sure that lines ending with \ continue @@ -251,10 +249,10 @@ sub read_kconfig { $state = "NONE"; } } - close(KIN); + close($kinfile); # read in any configs that were found. - foreach $kconfig (@kconfigs) { + foreach my $kconfig (@kconfigs) { if (!defined($read_kconfigs{$kconfig})) { $read_kconfigs{$kconfig} = 1; read_kconfig($kconfig); @@ -295,8 +293,8 @@ sub convert_vars { my $line = ""; my %make_vars; - open(MIN,$makefile) || die "Can't open $makefile"; - while () { + open(my $infile, '<', $makefile) || die "Can't open $makefile"; + while (<$infile>) { # if this line ends with a backslash, continue chomp; if (/^(.*)\\$/) { @@ -343,10 +341,11 @@ sub convert_vars { } } } - close(MIN); + close($infile); } my %modules; +my $linfile; if (defined($lsmod_file)) { if ( ! -f $lsmod_file) { @@ -356,13 +355,10 @@ sub convert_vars { die "$lsmod_file not found"; } } - if ( -x $lsmod_file) { - # the file is executable, run it - open(LIN, "$lsmod_file|"); - } else { - # Just read the contents - open(LIN, "$lsmod_file"); - } + + my $otype = ( -x $lsmod_file) ? '-|' : '<'; + open($linfile, $otype, $lsmod_file); + } else { # see what modules are loaded on this system @@ -379,16 +375,16 @@ sub convert_vars { $lsmod = "lsmod"; } - open(LIN,"$lsmod|") || die "Can not call lsmod with $lsmod"; + open($linfile, '-|', $lsmod) || die "Can not call lsmod with $lsmod"; } -while () { +while (<$linfile>) { next if (/^Module/); # Skip the first line. if (/^(\S+)/) { $modules{$1} = 1; } } -close (LIN); +close ($linfile); # add to the configs hash all configs that are needed to enable # a loaded module. This is a direct obj-${CONFIG_FOO} += bar.o @@ -605,6 +601,8 @@ sub loop_select { if (defined($configs{$1})) { if ($localyesconfig) { $setconfigs{$1} = 'y'; + print "$1=y\n"; + next; } else { $setconfigs{$1} = $2; } diff --git a/trunk/scripts/kernel-doc b/trunk/scripts/kernel-doc index 9b0c0b8b4ab4..8fd107a3fac4 100755 --- a/trunk/scripts/kernel-doc +++ b/trunk/scripts/kernel-doc @@ -1786,6 +1786,7 @@ sub dump_function($$) { $prototype =~ s/__init +//; $prototype =~ s/__init_or_module +//; $prototype =~ s/__must_check +//; + $prototype =~ s/__weak +//; $prototype =~ s/^#\s*define\s+//; #ak added $prototype =~ s/__attribute__\s*\(\([a-z,]*\)\)//; diff --git a/trunk/scripts/link-vmlinux.sh b/trunk/scripts/link-vmlinux.sh index 4629038c9e5a..b3d907eb93a9 100644 --- a/trunk/scripts/link-vmlinux.sh +++ b/trunk/scripts/link-vmlinux.sh @@ -74,8 +74,13 @@ kallsyms() info KSYM ${2} local kallsymopt; + if [ -n "${CONFIG_SYMBOL_PREFIX}" ]; then + kallsymopt="${kallsymopt} \ + --symbol-prefix=${CONFIG_SYMBOL_PREFIX}" + fi + if [ -n "${CONFIG_KALLSYMS_ALL}" ]; then - kallsymopt=--all-symbols + kallsymopt="${kallsymopt} --all-symbols" fi local aflags="${KBUILD_AFLAGS} ${KBUILD_AFLAGS_KERNEL} \ @@ -211,7 +216,7 @@ if [ -n "${CONFIG_KALLSYMS}" ]; then if ! cmp -s System.map .tmp_System.map; then echo >&2 Inconsistent kallsyms data - echo >&2 echo Try "make KALLSYMS_EXTRA_PASS=1" as a workaround + echo >&2 Try "make KALLSYMS_EXTRA_PASS=1" as a workaround cleanup exit 1 fi diff --git a/trunk/scripts/recordmcount.h b/trunk/scripts/recordmcount.h index 54e35c1e5948..9d1421e63ff8 100644 --- a/trunk/scripts/recordmcount.h +++ b/trunk/scripts/recordmcount.h @@ -261,11 +261,13 @@ static unsigned get_mcountsym(Elf_Sym const *const sym0, &sym0[Elf_r_sym(relp)]; char const *symname = &str0[w(symp->st_name)]; char const *mcount = gpfx == '_' ? "_mcount" : "mcount"; + char const *fentry = "__fentry__"; if (symname[0] == '.') ++symname; /* ppc64 hack */ if (strcmp(mcount, symname) == 0 || - (altmcount && strcmp(altmcount, symname) == 0)) + (altmcount && strcmp(altmcount, symname) == 0) || + (strcmp(fentry, symname) == 0)) mcountsym = Elf_r_sym(relp); return mcountsym; diff --git a/trunk/security/apparmor/.gitignore b/trunk/security/apparmor/.gitignore index 4d995aeaebc0..9cdec70d72b8 100644 --- a/trunk/security/apparmor/.gitignore +++ b/trunk/security/apparmor/.gitignore @@ -1,6 +1,5 @@ # # Generated include files # -af_names.h capability_names.h rlim_names.h diff --git a/trunk/security/keys/keyctl.c b/trunk/security/keys/keyctl.c index 3364fbf46807..6cfc6478863e 100644 --- a/trunk/security/keys/keyctl.c +++ b/trunk/security/keys/keyctl.c @@ -1486,7 +1486,6 @@ long keyctl_session_to_parent(void) oldwork = NULL; parent = me->real_parent; - task_lock(parent); /* the parent mustn't be init and mustn't be a kernel thread */ if (parent->pid <= 1 || !parent->mm) goto unlock; @@ -1530,7 +1529,6 @@ long keyctl_session_to_parent(void) if (!ret) newwork = NULL; unlock: - task_unlock(parent); write_unlock_irq(&tasklist_lock); rcu_read_unlock(); if (oldwork) diff --git a/trunk/security/selinux/include/xfrm.h b/trunk/security/selinux/include/xfrm.h index c220f314709c..65f67cb0aefb 100644 --- a/trunk/security/selinux/include/xfrm.h +++ b/trunk/security/selinux/include/xfrm.h @@ -51,6 +51,7 @@ int selinux_xfrm_decode_session(struct sk_buff *skb, u32 *sid, int ckall); static inline void selinux_xfrm_notify_policyload(void) { atomic_inc(&flow_cache_genid); + rt_genid_bump(&init_net); } #else static inline int selinux_xfrm_enabled(void) diff --git a/trunk/security/yama/yama_lsm.c b/trunk/security/yama/yama_lsm.c index 83554ee8a587..0cc99a3ea42d 100644 --- a/trunk/security/yama/yama_lsm.c +++ b/trunk/security/yama/yama_lsm.c @@ -279,12 +279,46 @@ static int yama_ptrace_access_check(struct task_struct *child, } if (rc) { - char name[sizeof(current->comm)]; printk_ratelimited(KERN_NOTICE "ptrace of pid %d was attempted by: %s (pid %d)\n", - child->pid, - get_task_comm(name, current), - current->pid); + child->pid, current->comm, current->pid); + } + + return rc; +} + +/** + * yama_ptrace_traceme - validate PTRACE_TRACEME calls + * @parent: task that will become the ptracer of the current task + * + * Returns 0 if following the ptrace is allowed, -ve on error. + */ +static int yama_ptrace_traceme(struct task_struct *parent) +{ + int rc; + + /* If standard caps disallows it, so does Yama. We should + * only tighten restrictions further. + */ + rc = cap_ptrace_traceme(parent); + if (rc) + return rc; + + /* Only disallow PTRACE_TRACEME on more aggressive settings. */ + switch (ptrace_scope) { + case YAMA_SCOPE_CAPABILITY: + if (!ns_capable(task_user_ns(parent), CAP_SYS_PTRACE)) + rc = -EPERM; + break; + case YAMA_SCOPE_NO_ATTACH: + rc = -EPERM; + break; + } + + if (rc) { + printk_ratelimited(KERN_NOTICE + "ptraceme of pid %d was attempted by: %s (pid %d)\n", + current->pid, parent->comm, parent->pid); } return rc; @@ -294,6 +328,7 @@ static struct security_operations yama_ops = { .name = "yama", .ptrace_access_check = yama_ptrace_access_check, + .ptrace_traceme = yama_ptrace_traceme, .task_prctl = yama_task_prctl, .task_free = yama_task_free, }; diff --git a/trunk/sound/arm/pxa2xx-ac97.c b/trunk/sound/arm/pxa2xx-ac97.c index 0d7b25e81643..4e1fda75c1c9 100644 --- a/trunk/sound/arm/pxa2xx-ac97.c +++ b/trunk/sound/arm/pxa2xx-ac97.c @@ -106,7 +106,7 @@ static struct pxa2xx_pcm_client pxa2xx_ac97_pcm_client = { .prepare = pxa2xx_ac97_pcm_prepare, }; -#ifdef CONFIG_PM +#ifdef CONFIG_PM_SLEEP static int pxa2xx_ac97_do_suspend(struct snd_card *card) { @@ -243,7 +243,7 @@ static struct platform_driver pxa2xx_ac97_driver = { .driver = { .name = "pxa2xx-ac97", .owner = THIS_MODULE, -#ifdef CONFIG_PM +#ifdef CONFIG_PM_SLEEP .pm = &pxa2xx_ac97_pm_ops, #endif }, diff --git a/trunk/sound/atmel/abdac.c b/trunk/sound/atmel/abdac.c index eb4ceb71123e..277ebce23a45 100644 --- a/trunk/sound/atmel/abdac.c +++ b/trunk/sound/atmel/abdac.c @@ -452,6 +452,7 @@ static int __devinit atmel_abdac_probe(struct platform_device *pdev) dac->regs = ioremap(regs->start, resource_size(regs)); if (!dac->regs) { dev_dbg(&pdev->dev, "could not remap register memory\n"); + retval = -ENOMEM; goto out_free_card; } @@ -534,7 +535,7 @@ static int __devinit atmel_abdac_probe(struct platform_device *pdev) return retval; } -#ifdef CONFIG_PM +#ifdef CONFIG_PM_SLEEP static int atmel_abdac_suspend(struct device *pdev) { struct snd_card *card = dev_get_drvdata(pdev); diff --git a/trunk/sound/atmel/ac97c.c b/trunk/sound/atmel/ac97c.c index bf47025bdf45..9052aff37f64 100644 --- a/trunk/sound/atmel/ac97c.c +++ b/trunk/sound/atmel/ac97c.c @@ -278,14 +278,9 @@ static int atmel_ac97c_capture_hw_params(struct snd_pcm_substream *substream, if (retval < 0) return retval; /* snd_pcm_lib_malloc_pages returns 1 if buffer is changed. */ - if (cpu_is_at32ap7000()) { - if (retval < 0) - return retval; - /* snd_pcm_lib_malloc_pages returns 1 if buffer is changed. */ - if (retval == 1) - if (test_and_clear_bit(DMA_RX_READY, &chip->flags)) - dw_dma_cyclic_free(chip->dma.rx_chan); - } + if (cpu_is_at32ap7000() && retval == 1) + if (test_and_clear_bit(DMA_RX_READY, &chip->flags)) + dw_dma_cyclic_free(chip->dma.rx_chan); /* Set restrictions to params. */ mutex_lock(&opened_mutex); @@ -980,6 +975,7 @@ static int __devinit atmel_ac97c_probe(struct platform_device *pdev) if (!chip->regs) { dev_dbg(&pdev->dev, "could not remap register memory\n"); + retval = -ENOMEM; goto err_ioremap; } @@ -1134,7 +1130,7 @@ static int __devinit atmel_ac97c_probe(struct platform_device *pdev) return retval; } -#ifdef CONFIG_PM +#ifdef CONFIG_PM_SLEEP static int atmel_ac97c_suspend(struct device *pdev) { struct snd_card *card = dev_get_drvdata(pdev); diff --git a/trunk/sound/core/compress_offload.c b/trunk/sound/core/compress_offload.c index ec2118d0e27a..eb60cb8dbb8a 100644 --- a/trunk/sound/core/compress_offload.c +++ b/trunk/sound/core/compress_offload.c @@ -80,14 +80,12 @@ static int snd_compr_open(struct inode *inode, struct file *f) int maj = imajor(inode); int ret; - if (f->f_flags & O_WRONLY) + if ((f->f_flags & O_ACCMODE) == O_WRONLY) dirn = SND_COMPRESS_PLAYBACK; - else if (f->f_flags & O_RDONLY) + else if ((f->f_flags & O_ACCMODE) == O_RDONLY) dirn = SND_COMPRESS_CAPTURE; - else { - pr_err("invalid direction\n"); + else return -EINVAL; - } if (maj == snd_major) compr = snd_lookup_minor_data(iminor(inode), diff --git a/trunk/sound/core/sgbuf.c b/trunk/sound/core/sgbuf.c index 4e7ec2b49873..d0f00356fc11 100644 --- a/trunk/sound/core/sgbuf.c +++ b/trunk/sound/core/sgbuf.c @@ -101,7 +101,7 @@ void *snd_malloc_sgbuf_pages(struct device *device, if (snd_dma_alloc_pages_fallback(SNDRV_DMA_TYPE_DEV, device, chunk, &tmpb) < 0) { if (!sgbuf->pages) - return NULL; + goto _failed; if (!res_size) goto _failed; size = sgbuf->pages * PAGE_SIZE; diff --git a/trunk/sound/drivers/aloop.c b/trunk/sound/drivers/aloop.c index 1128b35b2b05..5a34355e78e8 100644 --- a/trunk/sound/drivers/aloop.c +++ b/trunk/sound/drivers/aloop.c @@ -1176,7 +1176,7 @@ static int __devexit loopback_remove(struct platform_device *devptr) return 0; } -#ifdef CONFIG_PM +#ifdef CONFIG_PM_SLEEP static int loopback_suspend(struct device *pdev) { struct snd_card *card = dev_get_drvdata(pdev); diff --git a/trunk/sound/drivers/dummy.c b/trunk/sound/drivers/dummy.c index f7d3bfc6bca8..54bb6644a598 100644 --- a/trunk/sound/drivers/dummy.c +++ b/trunk/sound/drivers/dummy.c @@ -1064,7 +1064,7 @@ static int __devexit snd_dummy_remove(struct platform_device *devptr) return 0; } -#ifdef CONFIG_PM +#ifdef CONFIG_PM_SLEEP static int snd_dummy_suspend(struct device *pdev) { struct snd_card *card = dev_get_drvdata(pdev); diff --git a/trunk/sound/drivers/pcsp/pcsp.c b/trunk/sound/drivers/pcsp/pcsp.c index 6ca59fc6dcb9..ef171295f6d4 100644 --- a/trunk/sound/drivers/pcsp/pcsp.c +++ b/trunk/sound/drivers/pcsp/pcsp.c @@ -199,7 +199,7 @@ static void pcsp_stop_beep(struct snd_pcsp *chip) pcspkr_stop_sound(); } -#ifdef CONFIG_PM +#ifdef CONFIG_PM_SLEEP static int pcsp_suspend(struct device *dev) { struct snd_pcsp *chip = dev_get_drvdata(dev); @@ -212,7 +212,7 @@ static SIMPLE_DEV_PM_OPS(pcsp_pm, pcsp_suspend, NULL); #define PCSP_PM_OPS &pcsp_pm #else #define PCSP_PM_OPS NULL -#endif /* CONFIG_PM */ +#endif /* CONFIG_PM_SLEEP */ static void pcsp_shutdown(struct platform_device *dev) { diff --git a/trunk/sound/isa/als100.c b/trunk/sound/isa/als100.c index 2d67c78c9f4b..f7cdaf51512d 100644 --- a/trunk/sound/isa/als100.c +++ b/trunk/sound/isa/als100.c @@ -233,7 +233,7 @@ static int __devinit snd_card_als100_probe(int dev, irq[dev], dma8[dev], dma16[dev]); } - if ((error = snd_sb16dsp_pcm(chip, 0, NULL)) < 0) { + if ((error = snd_sb16dsp_pcm(chip, 0, &chip->pcm)) < 0) { snd_card_free(card); return error; } diff --git a/trunk/sound/oss/.gitignore b/trunk/sound/oss/.gitignore index 7efb12b45502..12a3920d6fb6 100644 --- a/trunk/sound/oss/.gitignore +++ b/trunk/sound/oss/.gitignore @@ -1,4 +1,3 @@ #Ignore generated files -maui_boot.h pss_boot.h trix_boot.h diff --git a/trunk/sound/oss/sb_audio.c b/trunk/sound/oss/sb_audio.c index 733b014ec7d1..b2b3c014221a 100644 --- a/trunk/sound/oss/sb_audio.c +++ b/trunk/sound/oss/sb_audio.c @@ -575,13 +575,15 @@ static int jazz16_audio_set_speed(int dev, int speed) if (speed > 0) { int tmp; - int s = speed * devc->channels; + int s; if (speed < 5000) speed = 5000; if (speed > 44100) speed = 44100; + s = speed * devc->channels; + devc->tconst = (256 - ((1000000 + s / 2) / s)) & 0xff; tmp = 256 - devc->tconst; diff --git a/trunk/sound/pci/cs46xx/cs46xx_lib.c b/trunk/sound/pci/cs46xx/cs46xx_lib.c index f75f5ffdfdfb..a71d1c14a0f6 100644 --- a/trunk/sound/pci/cs46xx/cs46xx_lib.c +++ b/trunk/sound/pci/cs46xx/cs46xx_lib.c @@ -94,7 +94,7 @@ static unsigned short snd_cs46xx_codec_read(struct snd_cs46xx *chip, if (snd_BUG_ON(codec_index != CS46XX_PRIMARY_CODEC_INDEX && codec_index != CS46XX_SECONDARY_CODEC_INDEX)) - return -EINVAL; + return 0xffff; chip->active_ctrl(chip, 1); diff --git a/trunk/sound/pci/ctxfi/ctatc.c b/trunk/sound/pci/ctxfi/ctatc.c index 8e40262d4117..2f6e9c762d3f 100644 --- a/trunk/sound/pci/ctxfi/ctatc.c +++ b/trunk/sound/pci/ctxfi/ctatc.c @@ -1725,8 +1725,10 @@ int __devinit ct_atc_create(struct snd_card *card, struct pci_dev *pci, atc_connect_resources(atc); atc->timer = ct_timer_new(atc); - if (!atc->timer) + if (!atc->timer) { + err = -ENOMEM; goto error1; + } err = snd_device_new(card, SNDRV_DEV_LOWLEVEL, atc, &ops); if (err < 0) diff --git a/trunk/sound/pci/emu10k1/memory.c b/trunk/sound/pci/emu10k1/memory.c index 4f502a2bdc3c..0a436626182b 100644 --- a/trunk/sound/pci/emu10k1/memory.c +++ b/trunk/sound/pci/emu10k1/memory.c @@ -326,7 +326,10 @@ snd_emu10k1_alloc_pages(struct snd_emu10k1 *emu, struct snd_pcm_substream *subst for (page = blk->first_page; page <= blk->last_page; page++, idx++) { unsigned long ofs = idx << PAGE_SHIFT; dma_addr_t addr; - addr = snd_pcm_sgbuf_get_addr(substream, ofs); + if (ofs >= runtime->dma_bytes) + addr = emu->silent_page.addr; + else + addr = snd_pcm_sgbuf_get_addr(substream, ofs); if (! is_valid_page(emu, addr)) { printk(KERN_ERR "emu: failure page = %d\n", idx); mutex_unlock(&hdr->block_mutex); diff --git a/trunk/sound/pci/hda/hda_auto_parser.c b/trunk/sound/pci/hda/hda_auto_parser.c index 647218d69f68..4f7d2dfcef7b 100644 --- a/trunk/sound/pci/hda/hda_auto_parser.c +++ b/trunk/sound/pci/hda/hda_auto_parser.c @@ -332,13 +332,12 @@ int snd_hda_parse_pin_defcfg(struct hda_codec *codec, if (cfg->dig_outs) snd_printd(" dig-out=0x%x/0x%x\n", cfg->dig_out_pins[0], cfg->dig_out_pins[1]); - snd_printd(" inputs:"); + snd_printd(" inputs:\n"); for (i = 0; i < cfg->num_inputs; i++) { - snd_printd(" %s=0x%x", + snd_printd(" %s=0x%x\n", hda_get_autocfg_input_label(codec, cfg, i), cfg->inputs[i].pin); } - snd_printd("\n"); if (cfg->dig_in_pin) snd_printd(" dig-in=0x%x\n", cfg->dig_in_pin); diff --git a/trunk/sound/pci/hda/hda_beep.c b/trunk/sound/pci/hda/hda_beep.c index 0bc2315b181d..0849aac449f2 100644 --- a/trunk/sound/pci/hda/hda_beep.c +++ b/trunk/sound/pci/hda/hda_beep.c @@ -231,16 +231,22 @@ void snd_hda_detach_beep_device(struct hda_codec *codec) } EXPORT_SYMBOL_HDA(snd_hda_detach_beep_device); +static bool ctl_has_mute(struct snd_kcontrol *kcontrol) +{ + struct hda_codec *codec = snd_kcontrol_chip(kcontrol); + return query_amp_caps(codec, get_amp_nid(kcontrol), + get_amp_direction(kcontrol)) & AC_AMPCAP_MUTE; +} + /* get/put callbacks for beep mute mixer switches */ int snd_hda_mixer_amp_switch_get_beep(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct hda_codec *codec = snd_kcontrol_chip(kcontrol); struct hda_beep *beep = codec->beep; - if (beep) { + if (beep && (!beep->enabled || !ctl_has_mute(kcontrol))) { ucontrol->value.integer.value[0] = - ucontrol->value.integer.value[1] = - beep->enabled; + ucontrol->value.integer.value[1] = beep->enabled; return 0; } return snd_hda_mixer_amp_switch_get(kcontrol, ucontrol); @@ -252,9 +258,20 @@ int snd_hda_mixer_amp_switch_put_beep(struct snd_kcontrol *kcontrol, { struct hda_codec *codec = snd_kcontrol_chip(kcontrol); struct hda_beep *beep = codec->beep; - if (beep) - snd_hda_enable_beep_device(codec, - *ucontrol->value.integer.value); + if (beep) { + u8 chs = get_amp_channels(kcontrol); + int enable = 0; + long *valp = ucontrol->value.integer.value; + if (chs & 1) { + enable |= *valp; + valp++; + } + if (chs & 2) + enable |= *valp; + snd_hda_enable_beep_device(codec, enable); + } + if (!ctl_has_mute(kcontrol)) + return 0; return snd_hda_mixer_amp_switch_put(kcontrol, ucontrol); } EXPORT_SYMBOL_HDA(snd_hda_mixer_amp_switch_put_beep); diff --git a/trunk/sound/pci/hda/hda_codec.c b/trunk/sound/pci/hda/hda_codec.c index 88a9c20eb7a2..1c65cc5e3a31 100644 --- a/trunk/sound/pci/hda/hda_codec.c +++ b/trunk/sound/pci/hda/hda_codec.c @@ -1209,6 +1209,9 @@ static void snd_hda_codec_free(struct hda_codec *codec) kfree(codec); } +static bool snd_hda_codec_get_supported_ps(struct hda_codec *codec, + hda_nid_t fg, unsigned int power_state); + static void hda_set_power_state(struct hda_codec *codec, hda_nid_t fg, unsigned int power_state); @@ -1317,6 +1320,10 @@ int /*__devinit*/ snd_hda_codec_new(struct hda_bus *bus, AC_VERB_GET_SUBSYSTEM_ID, 0); } + codec->epss = snd_hda_codec_get_supported_ps(codec, + codec->afg ? codec->afg : codec->mfg, + AC_PWRST_EPSS); + /* power-up all before initialization */ hda_set_power_state(codec, codec->afg ? codec->afg : codec->mfg, @@ -1386,6 +1393,44 @@ int snd_hda_codec_configure(struct hda_codec *codec) } EXPORT_SYMBOL_HDA(snd_hda_codec_configure); +/* update the stream-id if changed */ +static void update_pcm_stream_id(struct hda_codec *codec, + struct hda_cvt_setup *p, hda_nid_t nid, + u32 stream_tag, int channel_id) +{ + unsigned int oldval, newval; + + if (p->stream_tag != stream_tag || p->channel_id != channel_id) { + oldval = snd_hda_codec_read(codec, nid, 0, AC_VERB_GET_CONV, 0); + newval = (stream_tag << 4) | channel_id; + if (oldval != newval) + snd_hda_codec_write(codec, nid, 0, + AC_VERB_SET_CHANNEL_STREAMID, + newval); + p->stream_tag = stream_tag; + p->channel_id = channel_id; + } +} + +/* update the format-id if changed */ +static void update_pcm_format(struct hda_codec *codec, struct hda_cvt_setup *p, + hda_nid_t nid, int format) +{ + unsigned int oldval; + + if (p->format_id != format) { + oldval = snd_hda_codec_read(codec, nid, 0, + AC_VERB_GET_STREAM_FORMAT, 0); + if (oldval != format) { + msleep(1); + snd_hda_codec_write(codec, nid, 0, + AC_VERB_SET_STREAM_FORMAT, + format); + } + p->format_id = format; + } +} + /** * snd_hda_codec_setup_stream - set up the codec for streaming * @codec: the CODEC to set up @@ -1400,7 +1445,6 @@ void snd_hda_codec_setup_stream(struct hda_codec *codec, hda_nid_t nid, { struct hda_codec *c; struct hda_cvt_setup *p; - unsigned int oldval, newval; int type; int i; @@ -1413,29 +1457,13 @@ void snd_hda_codec_setup_stream(struct hda_codec *codec, hda_nid_t nid, p = get_hda_cvt_setup(codec, nid); if (!p) return; - /* update the stream-id if changed */ - if (p->stream_tag != stream_tag || p->channel_id != channel_id) { - oldval = snd_hda_codec_read(codec, nid, 0, AC_VERB_GET_CONV, 0); - newval = (stream_tag << 4) | channel_id; - if (oldval != newval) - snd_hda_codec_write(codec, nid, 0, - AC_VERB_SET_CHANNEL_STREAMID, - newval); - p->stream_tag = stream_tag; - p->channel_id = channel_id; - } - /* update the format-id if changed */ - if (p->format_id != format) { - oldval = snd_hda_codec_read(codec, nid, 0, - AC_VERB_GET_STREAM_FORMAT, 0); - if (oldval != format) { - msleep(1); - snd_hda_codec_write(codec, nid, 0, - AC_VERB_SET_STREAM_FORMAT, - format); - } - p->format_id = format; - } + + if (codec->pcm_format_first) + update_pcm_format(codec, p, nid, format); + update_pcm_stream_id(codec, p, nid, stream_tag, channel_id); + if (!codec->pcm_format_first) + update_pcm_format(codec, p, nid, format); + p->active = 1; p->dirty = 0; @@ -2325,6 +2353,7 @@ int snd_hda_codec_reset(struct hda_codec *codec) } if (codec->patch_ops.free) codec->patch_ops.free(codec); + memset(&codec->patch_ops, 0, sizeof(codec->patch_ops)); snd_hda_jack_tbl_clear(codec); codec->proc_widget_hook = NULL; codec->spec = NULL; @@ -2340,7 +2369,6 @@ int snd_hda_codec_reset(struct hda_codec *codec) codec->num_pcms = 0; codec->pcm_info = NULL; codec->preset = NULL; - memset(&codec->patch_ops, 0, sizeof(codec->patch_ops)); codec->slave_dig_outs = NULL; codec->spdif_status_reset = 0; module_put(codec->owner); @@ -3497,7 +3525,7 @@ static bool snd_hda_codec_get_supported_ps(struct hda_codec *codec, hda_nid_t fg { int sup = snd_hda_param_read(codec, fg, AC_PAR_POWER_STATE); - if (sup < 0) + if (sup == -1) return false; if (sup & power_state) return true; @@ -3522,8 +3550,7 @@ static void hda_set_power_state(struct hda_codec *codec, hda_nid_t fg, /* this delay seems necessary to avoid click noise at power-down */ if (power_state == AC_PWRST_D3) { /* transition time less than 10ms for power down */ - bool epss = snd_hda_codec_get_supported_ps(codec, fg, AC_PWRST_EPSS); - msleep(epss ? 10 : 100); + msleep(codec->epss ? 10 : 100); } /* repeat power states setting at most 10 times*/ @@ -4433,6 +4460,8 @@ static void __snd_hda_power_up(struct hda_codec *codec, bool wait_power_down) * then there is no need to go through power up here. */ if (codec->power_on) { + if (codec->power_transition < 0) + codec->power_transition = 0; spin_unlock(&codec->power_lock); return; } diff --git a/trunk/sound/pci/hda/hda_codec.h b/trunk/sound/pci/hda/hda_codec.h index c422d330ca54..e5a7e19a8071 100644 --- a/trunk/sound/pci/hda/hda_codec.h +++ b/trunk/sound/pci/hda/hda_codec.h @@ -861,6 +861,8 @@ struct hda_codec { unsigned int no_trigger_sense:1; /* don't trigger at pin-sensing */ unsigned int ignore_misc_bit:1; /* ignore MISC_NO_PRESENCE bit */ unsigned int no_jack_detect:1; /* Machine has no jack-detection */ + unsigned int pcm_format_first:1; /* PCM format must be set first */ + unsigned int epss:1; /* supporting EPSS? */ #ifdef CONFIG_SND_HDA_POWER_SAVE unsigned int power_on :1; /* current (global) power-state */ int power_transition; /* power-state in transition */ diff --git a/trunk/sound/pci/hda/hda_intel.c b/trunk/sound/pci/hda/hda_intel.c index c8aced182fd1..c4763c52eaf6 100644 --- a/trunk/sound/pci/hda/hda_intel.c +++ b/trunk/sound/pci/hda/hda_intel.c @@ -151,6 +151,7 @@ MODULE_SUPPORTED_DEVICE("{{Intel, ICH6}," "{Intel, CPT}," "{Intel, PPT}," "{Intel, LPT}," + "{Intel, LPT_LP}," "{Intel, HPT}," "{Intel, PBG}," "{Intel, SCH}," @@ -2700,6 +2701,8 @@ static struct snd_pci_quirk position_fix_list[] __devinitdata = { SND_PCI_QUIRK(0x1043, 0x813d, "ASUS P5AD2", POS_FIX_LPIB), SND_PCI_QUIRK(0x1043, 0x81b3, "ASUS", POS_FIX_LPIB), SND_PCI_QUIRK(0x1043, 0x81e7, "ASUS M2V", POS_FIX_LPIB), + SND_PCI_QUIRK(0x1043, 0x1ac3, "ASUS X53S", POS_FIX_POSBUF), + SND_PCI_QUIRK(0x1043, 0x1b43, "ASUS K53E", POS_FIX_POSBUF), SND_PCI_QUIRK(0x104d, 0x9069, "Sony VPCS11V9E", POS_FIX_LPIB), SND_PCI_QUIRK(0x10de, 0xcb89, "Macbook Pro 7,1", POS_FIX_LPIB), SND_PCI_QUIRK(0x1297, 0x3166, "Shuttle", POS_FIX_LPIB), @@ -3270,6 +3273,14 @@ static DEFINE_PCI_DEVICE_TABLE(azx_ids) = { { PCI_DEVICE(0x8086, 0x8c20), .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_SCH_SNOOP | AZX_DCAPS_BUFSIZE | AZX_DCAPS_POSFIX_COMBO }, + /* Lynx Point-LP */ + { PCI_DEVICE(0x8086, 0x9c20), + .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_SCH_SNOOP | + AZX_DCAPS_BUFSIZE | AZX_DCAPS_POSFIX_COMBO }, + /* Lynx Point-LP */ + { PCI_DEVICE(0x8086, 0x9c21), + .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_SCH_SNOOP | + AZX_DCAPS_BUFSIZE | AZX_DCAPS_POSFIX_COMBO }, /* Haswell */ { PCI_DEVICE(0x8086, 0x0c0c), .driver_data = AZX_DRIVER_SCH | AZX_DCAPS_SCH_SNOOP | diff --git a/trunk/sound/pci/hda/hda_proc.c b/trunk/sound/pci/hda/hda_proc.c index 7e46258fc700..6894ec66258c 100644 --- a/trunk/sound/pci/hda/hda_proc.c +++ b/trunk/sound/pci/hda/hda_proc.c @@ -412,7 +412,7 @@ static void print_digital_conv(struct snd_info_buffer *buffer, if (digi1 & AC_DIG1_EMPHASIS) snd_iprintf(buffer, " Preemphasis"); if (digi1 & AC_DIG1_COPYRIGHT) - snd_iprintf(buffer, " Copyright"); + snd_iprintf(buffer, " Non-Copyright"); if (digi1 & AC_DIG1_NONAUDIO) snd_iprintf(buffer, " Non-Audio"); if (digi1 & AC_DIG1_PROFESSIONAL) diff --git a/trunk/sound/pci/hda/patch_ca0132.c b/trunk/sound/pci/hda/patch_ca0132.c index d0d3540e39e7..49750a96d649 100644 --- a/trunk/sound/pci/hda/patch_ca0132.c +++ b/trunk/sound/pci/hda/patch_ca0132.c @@ -246,7 +246,7 @@ static void init_output(struct hda_codec *codec, hda_nid_t pin, hda_nid_t dac) AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_UNMUTE); } - if (dac) + if (dac && (get_wcaps(codec, dac) & AC_WCAP_OUT_AMP)) snd_hda_codec_write(codec, dac, 0, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_ZERO); } @@ -261,7 +261,7 @@ static void init_input(struct hda_codec *codec, hda_nid_t pin, hda_nid_t adc) AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_UNMUTE(0)); } - if (adc) + if (adc && (get_wcaps(codec, adc) & AC_WCAP_IN_AMP)) snd_hda_codec_write(codec, adc, 0, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_UNMUTE(0)); } @@ -275,6 +275,10 @@ static int _add_switch(struct hda_codec *codec, hda_nid_t nid, const char *pfx, int type = dir ? HDA_INPUT : HDA_OUTPUT; struct snd_kcontrol_new knew = HDA_CODEC_MUTE_MONO(namestr, nid, chan, 0, type); + if ((query_amp_caps(codec, nid, type) & AC_AMPCAP_MUTE) == 0) { + snd_printdd("Skipping '%s %s Switch' (no mute on node 0x%x)\n", pfx, dirstr[dir], nid); + return 0; + } sprintf(namestr, "%s %s Switch", pfx, dirstr[dir]); return snd_hda_ctl_add(codec, nid, snd_ctl_new1(&knew, codec)); } @@ -286,6 +290,10 @@ static int _add_volume(struct hda_codec *codec, hda_nid_t nid, const char *pfx, int type = dir ? HDA_INPUT : HDA_OUTPUT; struct snd_kcontrol_new knew = HDA_CODEC_VOLUME_MONO(namestr, nid, chan, 0, type); + if ((query_amp_caps(codec, nid, type) & AC_AMPCAP_NUM_STEPS) == 0) { + snd_printdd("Skipping '%s %s Volume' (no amp on node 0x%x)\n", pfx, dirstr[dir], nid); + return 0; + } sprintf(namestr, "%s %s Volume", pfx, dirstr[dir]); return snd_hda_ctl_add(codec, nid, snd_ctl_new1(&knew, codec)); } @@ -464,50 +472,17 @@ static int chipio_read(struct hda_codec *codec, } /* - * PCM stuffs + * PCM callbacks */ -static void ca0132_setup_stream(struct hda_codec *codec, hda_nid_t nid, - u32 stream_tag, - int channel_id, int format) +static int ca0132_playback_pcm_open(struct hda_pcm_stream *hinfo, + struct hda_codec *codec, + struct snd_pcm_substream *substream) { - unsigned int oldval, newval; - - if (!nid) - return; - - snd_printdd("ca0132_setup_stream: " - "NID=0x%x, stream=0x%x, channel=%d, format=0x%x\n", - nid, stream_tag, channel_id, format); - - /* update the format-id if changed */ - oldval = snd_hda_codec_read(codec, nid, 0, - AC_VERB_GET_STREAM_FORMAT, - 0); - if (oldval != format) { - msleep(20); - snd_hda_codec_write(codec, nid, 0, - AC_VERB_SET_STREAM_FORMAT, - format); - } - - oldval = snd_hda_codec_read(codec, nid, 0, AC_VERB_GET_CONV, 0); - newval = (stream_tag << 4) | channel_id; - if (oldval != newval) { - snd_hda_codec_write(codec, nid, 0, - AC_VERB_SET_CHANNEL_STREAMID, - newval); - } -} - -static void ca0132_cleanup_stream(struct hda_codec *codec, hda_nid_t nid) -{ - snd_hda_codec_write(codec, nid, 0, AC_VERB_SET_STREAM_FORMAT, 0); - snd_hda_codec_write(codec, nid, 0, AC_VERB_SET_CHANNEL_STREAMID, 0); + struct ca0132_spec *spec = codec->spec; + return snd_hda_multi_out_analog_open(codec, &spec->multiout, substream, + hinfo); } -/* - * PCM callbacks - */ static int ca0132_playback_pcm_prepare(struct hda_pcm_stream *hinfo, struct hda_codec *codec, unsigned int stream_tag, @@ -515,10 +490,8 @@ static int ca0132_playback_pcm_prepare(struct hda_pcm_stream *hinfo, struct snd_pcm_substream *substream) { struct ca0132_spec *spec = codec->spec; - - ca0132_setup_stream(codec, spec->dacs[0], stream_tag, 0, format); - - return 0; + return snd_hda_multi_out_analog_prepare(codec, &spec->multiout, + stream_tag, format, substream); } static int ca0132_playback_pcm_cleanup(struct hda_pcm_stream *hinfo, @@ -526,92 +499,45 @@ static int ca0132_playback_pcm_cleanup(struct hda_pcm_stream *hinfo, struct snd_pcm_substream *substream) { struct ca0132_spec *spec = codec->spec; - - ca0132_cleanup_stream(codec, spec->dacs[0]); - - return 0; + return snd_hda_multi_out_analog_cleanup(codec, &spec->multiout); } /* * Digital out */ -static int ca0132_dig_playback_pcm_prepare(struct hda_pcm_stream *hinfo, - struct hda_codec *codec, - unsigned int stream_tag, - unsigned int format, - struct snd_pcm_substream *substream) +static int ca0132_dig_playback_pcm_open(struct hda_pcm_stream *hinfo, + struct hda_codec *codec, + struct snd_pcm_substream *substream) { struct ca0132_spec *spec = codec->spec; - - ca0132_setup_stream(codec, spec->dig_out, stream_tag, 0, format); - - return 0; + return snd_hda_multi_out_dig_open(codec, &spec->multiout); } -static int ca0132_dig_playback_pcm_cleanup(struct hda_pcm_stream *hinfo, - struct hda_codec *codec, - struct snd_pcm_substream *substream) -{ - struct ca0132_spec *spec = codec->spec; - - ca0132_cleanup_stream(codec, spec->dig_out); - - return 0; -} - -/* - * Analog capture - */ -static int ca0132_capture_pcm_prepare(struct hda_pcm_stream *hinfo, +static int ca0132_dig_playback_pcm_prepare(struct hda_pcm_stream *hinfo, struct hda_codec *codec, unsigned int stream_tag, unsigned int format, struct snd_pcm_substream *substream) { struct ca0132_spec *spec = codec->spec; - - ca0132_setup_stream(codec, spec->adcs[substream->number], - stream_tag, 0, format); - - return 0; + return snd_hda_multi_out_dig_prepare(codec, &spec->multiout, + stream_tag, format, substream); } -static int ca0132_capture_pcm_cleanup(struct hda_pcm_stream *hinfo, +static int ca0132_dig_playback_pcm_cleanup(struct hda_pcm_stream *hinfo, struct hda_codec *codec, struct snd_pcm_substream *substream) { struct ca0132_spec *spec = codec->spec; - - ca0132_cleanup_stream(codec, spec->adcs[substream->number]); - - return 0; + return snd_hda_multi_out_dig_cleanup(codec, &spec->multiout); } -/* - * Digital capture - */ -static int ca0132_dig_capture_pcm_prepare(struct hda_pcm_stream *hinfo, - struct hda_codec *codec, - unsigned int stream_tag, - unsigned int format, - struct snd_pcm_substream *substream) +static int ca0132_dig_playback_pcm_close(struct hda_pcm_stream *hinfo, + struct hda_codec *codec, + struct snd_pcm_substream *substream) { struct ca0132_spec *spec = codec->spec; - - ca0132_setup_stream(codec, spec->dig_in, stream_tag, 0, format); - - return 0; -} - -static int ca0132_dig_capture_pcm_cleanup(struct hda_pcm_stream *hinfo, - struct hda_codec *codec, - struct snd_pcm_substream *substream) -{ - struct ca0132_spec *spec = codec->spec; - - ca0132_cleanup_stream(codec, spec->dig_in); - - return 0; + return snd_hda_multi_out_dig_close(codec, &spec->multiout); } /* @@ -621,6 +547,7 @@ static struct hda_pcm_stream ca0132_pcm_analog_playback = { .channels_min = 2, .channels_max = 2, .ops = { + .open = ca0132_playback_pcm_open, .prepare = ca0132_playback_pcm_prepare, .cleanup = ca0132_playback_pcm_cleanup }, @@ -630,10 +557,6 @@ static struct hda_pcm_stream ca0132_pcm_analog_capture = { .substreams = 1, .channels_min = 2, .channels_max = 2, - .ops = { - .prepare = ca0132_capture_pcm_prepare, - .cleanup = ca0132_capture_pcm_cleanup - }, }; static struct hda_pcm_stream ca0132_pcm_digital_playback = { @@ -641,6 +564,8 @@ static struct hda_pcm_stream ca0132_pcm_digital_playback = { .channels_min = 2, .channels_max = 2, .ops = { + .open = ca0132_dig_playback_pcm_open, + .close = ca0132_dig_playback_pcm_close, .prepare = ca0132_dig_playback_pcm_prepare, .cleanup = ca0132_dig_playback_pcm_cleanup }, @@ -650,10 +575,6 @@ static struct hda_pcm_stream ca0132_pcm_digital_capture = { .substreams = 1, .channels_min = 2, .channels_max = 2, - .ops = { - .prepare = ca0132_dig_capture_pcm_prepare, - .cleanup = ca0132_dig_capture_pcm_cleanup - }, }; static int ca0132_build_pcms(struct hda_codec *codec) @@ -928,18 +849,16 @@ static int ca0132_build_controls(struct hda_codec *codec) spec->dig_out); if (err < 0) return err; - err = add_out_volume(codec, spec->dig_out, "IEC958"); + err = snd_hda_create_spdif_share_sw(codec, &spec->multiout); if (err < 0) return err; + /* spec->multiout.share_spdif = 1; */ } if (spec->dig_in) { err = snd_hda_create_spdif_in_ctls(codec, spec->dig_in); if (err < 0) return err; - err = add_in_volume(codec, spec->dig_in, "IEC958"); - if (err < 0) - return err; } return 0; } @@ -961,6 +880,9 @@ static void ca0132_config(struct hda_codec *codec) struct ca0132_spec *spec = codec->spec; struct auto_pin_cfg *cfg = &spec->autocfg; + codec->pcm_format_first = 1; + codec->no_sticky_stream = 1; + /* line-outs */ cfg->line_outs = 1; cfg->line_out_pins[0] = 0x0b; /* front */ @@ -988,14 +910,24 @@ static void ca0132_config(struct hda_codec *codec) /* Mic-in */ spec->input_pins[0] = 0x12; - spec->input_labels[0] = "Mic-In"; + spec->input_labels[0] = "Mic"; spec->adcs[0] = 0x07; /* Line-In */ spec->input_pins[1] = 0x11; - spec->input_labels[1] = "Line-In"; + spec->input_labels[1] = "Line"; spec->adcs[1] = 0x08; spec->num_inputs = 2; + + /* SPDIF I/O */ + spec->dig_out = 0x05; + spec->multiout.dig_out_nid = spec->dig_out; + cfg->dig_out_pins[0] = 0x0c; + cfg->dig_outs = 1; + cfg->dig_out_type[0] = HDA_PCM_TYPE_SPDIF; + spec->dig_in = 0x09; + cfg->dig_in_pin = 0x0e; + cfg->dig_in_type = HDA_PCM_TYPE_SPDIF; } static void ca0132_init_chip(struct hda_codec *codec) diff --git a/trunk/sound/pci/hda/patch_conexant.c b/trunk/sound/pci/hda/patch_conexant.c index 14361184ae1e..5e22a8f43d2e 100644 --- a/trunk/sound/pci/hda/patch_conexant.c +++ b/trunk/sound/pci/hda/patch_conexant.c @@ -2967,12 +2967,10 @@ static const char * const cxt5066_models[CXT5066_MODELS] = { }; static const struct snd_pci_quirk cxt5066_cfg_tbl[] = { - SND_PCI_QUIRK(0x1025, 0x054c, "Acer Aspire 3830TG", CXT5066_AUTO), SND_PCI_QUIRK_MASK(0x1025, 0xff00, 0x0400, "Acer", CXT5066_IDEAPAD), SND_PCI_QUIRK(0x1028, 0x02d8, "Dell Vostro", CXT5066_DELL_VOSTRO), SND_PCI_QUIRK(0x1028, 0x02f5, "Dell Vostro 320", CXT5066_IDEAPAD), SND_PCI_QUIRK(0x1028, 0x0401, "Dell Vostro 1014", CXT5066_DELL_VOSTRO), - SND_PCI_QUIRK(0x1028, 0x0402, "Dell Vostro", CXT5066_DELL_VOSTRO), SND_PCI_QUIRK(0x1028, 0x0408, "Dell Inspiron One 19T", CXT5066_IDEAPAD), SND_PCI_QUIRK(0x1028, 0x050f, "Dell Inspiron", CXT5066_IDEAPAD), SND_PCI_QUIRK(0x1028, 0x0510, "Dell Vostro", CXT5066_IDEAPAD), @@ -2988,14 +2986,10 @@ static const struct snd_pci_quirk cxt5066_cfg_tbl[] = { SND_PCI_QUIRK(0x17aa, 0x20f2, "Lenovo T400s", CXT5066_THINKPAD), SND_PCI_QUIRK(0x17aa, 0x21c5, "Thinkpad Edge 13", CXT5066_THINKPAD), SND_PCI_QUIRK(0x17aa, 0x21c6, "Thinkpad Edge 13", CXT5066_ASUS), - SND_PCI_QUIRK(0x17aa, 0x215e, "Lenovo T510", CXT5066_AUTO), - SND_PCI_QUIRK(0x17aa, 0x21cf, "Lenovo T520 & W520", CXT5066_AUTO), SND_PCI_QUIRK(0x17aa, 0x21da, "Lenovo X220", CXT5066_THINKPAD), SND_PCI_QUIRK(0x17aa, 0x21db, "Lenovo X220-tablet", CXT5066_THINKPAD), SND_PCI_QUIRK(0x17aa, 0x3a0d, "Lenovo U350", CXT5066_ASUS), SND_PCI_QUIRK(0x17aa, 0x38af, "Lenovo G560", CXT5066_ASUS), - SND_PCI_QUIRK(0x17aa, 0x3938, "Lenovo G565", CXT5066_AUTO), - SND_PCI_QUIRK(0x1b0a, 0x2092, "CyberpowerPC Gamer Xplorer N57001", CXT5066_AUTO), {} }; diff --git a/trunk/sound/pci/hda/patch_hdmi.c b/trunk/sound/pci/hda/patch_hdmi.c index 69b928449789..8f23374fa642 100644 --- a/trunk/sound/pci/hda/patch_hdmi.c +++ b/trunk/sound/pci/hda/patch_hdmi.c @@ -877,8 +877,6 @@ static int hdmi_pcm_open(struct hda_pcm_stream *hinfo, struct hdmi_eld *eld; struct hdmi_spec_per_cvt *per_cvt = NULL; - hinfo->nid = 0; /* clear the leftover value */ - /* Validate hinfo */ pin_idx = hinfo_to_pin_index(spec, hinfo); if (snd_BUG_ON(pin_idx < 0)) @@ -1163,6 +1161,14 @@ static int generic_hdmi_playback_pcm_prepare(struct hda_pcm_stream *hinfo, return hdmi_setup_stream(codec, cvt_nid, pin_nid, stream_tag, format); } +static int generic_hdmi_playback_pcm_cleanup(struct hda_pcm_stream *hinfo, + struct hda_codec *codec, + struct snd_pcm_substream *substream) +{ + snd_hda_codec_cleanup_stream(codec, hinfo->nid); + return 0; +} + static int hdmi_pcm_close(struct hda_pcm_stream *hinfo, struct hda_codec *codec, struct snd_pcm_substream *substream) @@ -1202,6 +1208,7 @@ static const struct hda_pcm_ops generic_ops = { .open = hdmi_pcm_open, .close = hdmi_pcm_close, .prepare = generic_hdmi_playback_pcm_prepare, + .cleanup = generic_hdmi_playback_pcm_cleanup, }; static int generic_hdmi_build_pcms(struct hda_codec *codec) @@ -1220,7 +1227,6 @@ static int generic_hdmi_build_pcms(struct hda_codec *codec) pstr = &info->stream[SNDRV_PCM_STREAM_PLAYBACK]; pstr->substreams = 1; pstr->ops = generic_ops; - pstr->nid = 1; /* FIXME: just for avoiding a debug WARNING */ /* other pstr fields are set in open */ } diff --git a/trunk/sound/pci/hda/patch_realtek.c b/trunk/sound/pci/hda/patch_realtek.c index 344b221d2102..4f81dd44c837 100644 --- a/trunk/sound/pci/hda/patch_realtek.c +++ b/trunk/sound/pci/hda/patch_realtek.c @@ -6099,6 +6099,8 @@ static const struct alc_fixup alc269_fixups[] = { [ALC269_FIXUP_PCM_44K] = { .type = ALC_FIXUP_FUNC, .v.func = alc269_fixup_pcm_44k, + .chained = true, + .chain_id = ALC269_FIXUP_QUANTA_MUTE }, [ALC269_FIXUP_STEREO_DMIC] = { .type = ALC_FIXUP_FUNC, @@ -6206,9 +6208,11 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = { SND_PCI_QUIRK(0x17aa, 0x21b8, "Thinkpad Edge 14", ALC269_FIXUP_SKU_IGNORE), SND_PCI_QUIRK(0x17aa, 0x21ca, "Thinkpad L412", ALC269_FIXUP_SKU_IGNORE), SND_PCI_QUIRK(0x17aa, 0x21e9, "Thinkpad Edge 15", ALC269_FIXUP_SKU_IGNORE), + SND_PCI_QUIRK(0x17aa, 0x21f6, "Thinkpad T530", ALC269_FIXUP_LENOVO_DOCK), + SND_PCI_QUIRK(0x17aa, 0x21fa, "Thinkpad X230", ALC269_FIXUP_LENOVO_DOCK), + SND_PCI_QUIRK(0x17aa, 0x21fb, "Thinkpad T430s", ALC269_FIXUP_LENOVO_DOCK), SND_PCI_QUIRK(0x17aa, 0x2203, "Thinkpad X230 Tablet", ALC269_FIXUP_LENOVO_DOCK), - SND_PCI_QUIRK(0x17aa, 0x3bf8, "Quanta FL1", ALC269_FIXUP_QUANTA_MUTE), - SND_PCI_QUIRK(0x17aa, 0x3bf8, "Lenovo Ideapd", ALC269_FIXUP_PCM_44K), + SND_PCI_QUIRK(0x17aa, 0x3bf8, "Quanta FL1", ALC269_FIXUP_PCM_44K), SND_PCI_QUIRK(0x17aa, 0x9e54, "LENOVO NB", ALC269_FIXUP_LENOVO_EAPD), #if 0 diff --git a/trunk/sound/pci/hda/patch_sigmatel.c b/trunk/sound/pci/hda/patch_sigmatel.c index 94040ccf8e8f..3d4722f0a1ca 100644 --- a/trunk/sound/pci/hda/patch_sigmatel.c +++ b/trunk/sound/pci/hda/patch_sigmatel.c @@ -1075,7 +1075,7 @@ static struct snd_kcontrol_new stac_smux_mixer = { static const char * const slave_pfxs[] = { "Front", "Surround", "Center", "LFE", "Side", - "Headphone", "Speaker", "IEC958", + "Headphone", "Speaker", "IEC958", "PCM", NULL }; @@ -4272,7 +4272,8 @@ static int stac92xx_init(struct hda_codec *codec) unsigned int gpio; int i; - snd_hda_sequence_write(codec, spec->init); + if (spec->init) + snd_hda_sequence_write(codec, spec->init); /* power down adcs initially */ if (spec->powerdown_adcs) @@ -4542,6 +4543,9 @@ static void stac92xx_line_out_detect(struct hda_codec *codec, struct auto_pin_cfg *cfg = &spec->autocfg; int i; + if (cfg->speaker_outs == 0) + return; + for (i = 0; i < cfg->line_outs; i++) { if (presence) break; @@ -5530,6 +5534,7 @@ static int patch_stac92hd83xxx(struct hda_codec *codec) snd_hda_codec_set_pincfg(codec, 0xf, 0x2181205e); } + codec->epss = 0; /* longer delay needed for D3 */ codec->no_trigger_sense = 1; codec->spec = spec; @@ -5748,7 +5753,6 @@ static int patch_stac92hd71bxx(struct hda_codec *codec) /* fallthru */ case 0x111d76b4: /* 6 Port without Analog Mixer */ case 0x111d76b5: - spec->init = stac92hd71bxx_core_init; codec->slave_dig_outs = stac92hd71bxx_slave_dig_outs; spec->num_dmics = stac92xx_connected_ports(codec, stac92hd71bxx_dmic_nids, @@ -5773,7 +5777,6 @@ static int patch_stac92hd71bxx(struct hda_codec *codec) spec->stream_delay = 40; /* 40 milliseconds */ /* disable VSW */ - spec->init = stac92hd71bxx_core_init; unmute_init++; snd_hda_codec_set_pincfg(codec, 0x0f, 0x40f000f0); snd_hda_codec_set_pincfg(codec, 0x19, 0x40f000f3); @@ -5788,7 +5791,6 @@ static int patch_stac92hd71bxx(struct hda_codec *codec) /* fallthru */ default: - spec->init = stac92hd71bxx_core_init; codec->slave_dig_outs = stac92hd71bxx_slave_dig_outs; spec->num_dmics = stac92xx_connected_ports(codec, stac92hd71bxx_dmic_nids, @@ -5796,6 +5798,9 @@ static int patch_stac92hd71bxx(struct hda_codec *codec) break; } + if (get_wcaps_type(get_wcaps(codec, 0x28)) == AC_WID_VOL_KNB) + spec->init = stac92hd71bxx_core_init; + if (get_wcaps(codec, 0xa) & AC_WCAP_IN_AMP) snd_hda_sequence_write_cache(codec, unmute_init); diff --git a/trunk/sound/pci/hda/patch_via.c b/trunk/sound/pci/hda/patch_via.c index 80d90cb42853..430771776915 100644 --- a/trunk/sound/pci/hda/patch_via.c +++ b/trunk/sound/pci/hda/patch_via.c @@ -1752,6 +1752,14 @@ static int via_suspend(struct hda_codec *codec) { struct via_spec *spec = codec->spec; vt1708_stop_hp_work(spec); + + if (spec->codec_type == VT1802) { + /* Fix pop noise on headphones */ + int i; + for (i = 0; i < spec->autocfg.hp_outs; i++) + snd_hda_set_pin_ctl(codec, spec->autocfg.hp_pins[i], 0); + } + return 0; } #endif diff --git a/trunk/sound/pci/ice1712/prodigy_hifi.c b/trunk/sound/pci/ice1712/prodigy_hifi.c index 764cc93dbca4..075d5aa1fee0 100644 --- a/trunk/sound/pci/ice1712/prodigy_hifi.c +++ b/trunk/sound/pci/ice1712/prodigy_hifi.c @@ -297,6 +297,7 @@ static int ak4396_dac_vol_put(struct snd_kcontrol *kcontrol, struct snd_ctl_elem } static const DECLARE_TLV_DB_SCALE(db_scale_wm_dac, -12700, 100, 1); +static const DECLARE_TLV_DB_LINEAR(ak4396_db_scale, TLV_DB_GAIN_MUTE, 0); static struct snd_kcontrol_new prodigy_hd2_controls[] __devinitdata = { { @@ -307,7 +308,7 @@ static struct snd_kcontrol_new prodigy_hd2_controls[] __devinitdata = { .info = ak4396_dac_vol_info, .get = ak4396_dac_vol_get, .put = ak4396_dac_vol_put, - .tlv = { .p = db_scale_wm_dac }, + .tlv = { .p = ak4396_db_scale }, }, }; diff --git a/trunk/sound/pci/lx6464es/lx6464es.c b/trunk/sound/pci/lx6464es/lx6464es.c index d1ab43706735..5579b08bb35b 100644 --- a/trunk/sound/pci/lx6464es/lx6464es.c +++ b/trunk/sound/pci/lx6464es/lx6464es.c @@ -851,6 +851,8 @@ static int __devinit lx_pcm_create(struct lx6464es *chip) /* hardcoded device name & channel count */ err = snd_pcm_new(chip->card, (char *)card_name, 0, 1, 1, &pcm); + if (err < 0) + return err; pcm->private_data = chip; diff --git a/trunk/sound/pci/rme9652/hdspm.c b/trunk/sound/pci/rme9652/hdspm.c index b8ac8710f47f..b12308b5ba2a 100644 --- a/trunk/sound/pci/rme9652/hdspm.c +++ b/trunk/sound/pci/rme9652/hdspm.c @@ -6585,7 +6585,7 @@ static int __devinit snd_hdspm_create(struct snd_card *card, snd_printk(KERN_ERR "HDSPM: " "unable to kmalloc Mixer memory of %d Bytes\n", (int)sizeof(struct hdspm_mixer)); - return err; + return -ENOMEM; } hdspm->port_names_in = NULL; diff --git a/trunk/sound/pci/sis7019.c b/trunk/sound/pci/sis7019.c index 512434efcc31..805ab6e9a78f 100644 --- a/trunk/sound/pci/sis7019.c +++ b/trunk/sound/pci/sis7019.c @@ -1377,8 +1377,9 @@ static int __devinit sis_chip_create(struct snd_card *card, if (rc) goto error_out_cleanup; - if (request_irq(pci->irq, sis_interrupt, IRQF_SHARED, KBUILD_MODNAME, - sis)) { + rc = request_irq(pci->irq, sis_interrupt, IRQF_SHARED, KBUILD_MODNAME, + sis); + if (rc) { dev_err(&pci->dev, "unable to allocate irq %d\n", sis->irq); goto error_out_cleanup; } diff --git a/trunk/sound/ppc/powermac.c b/trunk/sound/ppc/powermac.c index f5ceb6f282de..210cafe04890 100644 --- a/trunk/sound/ppc/powermac.c +++ b/trunk/sound/ppc/powermac.c @@ -143,7 +143,7 @@ static int __devexit snd_pmac_remove(struct platform_device *devptr) return 0; } -#ifdef CONFIG_PM +#ifdef CONFIG_PM_SLEEP static int snd_pmac_driver_suspend(struct device *dev) { struct snd_card *card = dev_get_drvdata(dev); diff --git a/trunk/sound/ppc/snd_ps3.c b/trunk/sound/ppc/snd_ps3.c index 1aa52eff526a..9b18b5243a56 100644 --- a/trunk/sound/ppc/snd_ps3.c +++ b/trunk/sound/ppc/snd_ps3.c @@ -1040,6 +1040,7 @@ static int __devinit snd_ps3_driver_probe(struct ps3_system_bus_device *dev) GFP_KERNEL); if (!the_card.null_buffer_start_vaddr) { pr_info("%s: nullbuffer alloc failed\n", __func__); + ret = -ENOMEM; goto clean_preallocate; } pr_debug("%s: null vaddr=%p dma=%#llx\n", __func__, diff --git a/trunk/sound/soc/blackfin/bf6xx-sport.c b/trunk/sound/soc/blackfin/bf6xx-sport.c index 318c5ba5360f..dfb744381c42 100644 --- a/trunk/sound/soc/blackfin/bf6xx-sport.c +++ b/trunk/sound/soc/blackfin/bf6xx-sport.c @@ -413,7 +413,14 @@ EXPORT_SYMBOL(sport_create); void sport_delete(struct sport_device *sport) { + if (sport->tx_desc) + dma_free_coherent(NULL, sport->tx_desc_size, + sport->tx_desc, 0); + if (sport->rx_desc) + dma_free_coherent(NULL, sport->rx_desc_size, + sport->rx_desc, 0); sport_free_resource(sport); + kfree(sport); } EXPORT_SYMBOL(sport_delete); diff --git a/trunk/sound/soc/codecs/ab8500-codec.c b/trunk/sound/soc/codecs/ab8500-codec.c index 3c795921c5f6..23b40186f9b8 100644 --- a/trunk/sound/soc/codecs/ab8500-codec.c +++ b/trunk/sound/soc/codecs/ab8500-codec.c @@ -2406,6 +2406,10 @@ static int ab8500_codec_probe(struct snd_soc_codec *codec) /* Setup AB8500 according to board-settings */ pdata = (struct ab8500_platform_data *)dev_get_platdata(dev->parent); + + /* Inform SoC Core that we have our own I/O arrangements. */ + codec->control_data = (void *)true; + status = ab8500_audio_setup_mics(codec, &pdata->codec->amics); if (status < 0) { pr_err("%s: Failed to setup mics (%d)!\n", __func__, status); diff --git a/trunk/sound/soc/codecs/ad1980.c b/trunk/sound/soc/codecs/ad1980.c index 8c39dddd7d00..11b1b714b8b5 100644 --- a/trunk/sound/soc/codecs/ad1980.c +++ b/trunk/sound/soc/codecs/ad1980.c @@ -186,6 +186,7 @@ static int ad1980_soc_probe(struct snd_soc_codec *codec) printk(KERN_INFO "AD1980 SoC Audio Codec\n"); + codec->control_data = codec; /* we don't use regmap! */ ret = snd_soc_new_ac97_codec(codec, &soc_ac97_ops, 0); if (ret < 0) { printk(KERN_ERR "ad1980: failed to register AC97 codec\n"); diff --git a/trunk/sound/soc/codecs/arizona.c b/trunk/sound/soc/codecs/arizona.c index 5c9cacaf2d52..1cf7a32d1b21 100644 --- a/trunk/sound/soc/codecs/arizona.c +++ b/trunk/sound/soc/codecs/arizona.c @@ -426,7 +426,7 @@ static const int arizona_44k1_bclk_rates[] = { 940800, 1411200, 1881600, - 2882400, + 2822400, 3763200, 5644800, 7526400, diff --git a/trunk/sound/soc/codecs/mc13783.c b/trunk/sound/soc/codecs/mc13783.c index 6276e352125f..115a40301810 100644 --- a/trunk/sound/soc/codecs/mc13783.c +++ b/trunk/sound/soc/codecs/mc13783.c @@ -581,6 +581,8 @@ static int mc13783_probe(struct snd_soc_codec *codec) { struct mc13783_priv *priv = snd_soc_codec_get_drvdata(codec); + codec->control_data = priv->mc13xxx; + mc13xxx_lock(priv->mc13xxx); /* these are the reset values */ @@ -657,7 +659,7 @@ static struct snd_soc_dai_driver mc13783_dai_async[] = { .id = MC13783_ID_STEREO_DAC, .playback = { .stream_name = "Playback", - .channels_min = 1, + .channels_min = 2, .channels_max = 2, .rates = SNDRV_PCM_RATE_8000_96000, .formats = MC13783_FORMATS, @@ -668,7 +670,7 @@ static struct snd_soc_dai_driver mc13783_dai_async[] = { .id = MC13783_ID_STEREO_CODEC, .capture = { .stream_name = "Capture", - .channels_min = 1, + .channels_min = 2, .channels_max = 2, .rates = MC13783_RATES_RECORD, .formats = MC13783_FORMATS, @@ -690,14 +692,14 @@ static struct snd_soc_dai_driver mc13783_dai_sync[] = { .id = MC13783_ID_SYNC, .playback = { .stream_name = "Playback", - .channels_min = 1, + .channels_min = 2, .channels_max = 2, .rates = SNDRV_PCM_RATE_8000_96000, .formats = MC13783_FORMATS, }, .capture = { .stream_name = "Capture", - .channels_min = 1, + .channels_min = 2, .channels_max = 2, .rates = MC13783_RATES_RECORD, .formats = MC13783_FORMATS, diff --git a/trunk/sound/soc/codecs/sgtl5000.c b/trunk/sound/soc/codecs/sgtl5000.c index 8af6a5245b18..df2f99d1d428 100644 --- a/trunk/sound/soc/codecs/sgtl5000.c +++ b/trunk/sound/soc/codecs/sgtl5000.c @@ -239,6 +239,7 @@ static const struct snd_soc_dapm_route sgtl5000_dapm_routes[] = { {"Headphone Mux", "DAC", "DAC"}, /* dac --> hp_mux */ {"LO", NULL, "DAC"}, /* dac --> line_out */ + {"LINE_IN", NULL, "VAG_POWER"}, {"Headphone Mux", "LINE_IN", "LINE_IN"},/* line_in --> hp_mux */ {"HP", NULL, "Headphone Mux"}, /* hp_mux --> hp */ @@ -1357,8 +1358,6 @@ static int sgtl5000_probe(struct snd_soc_codec *codec) if (ret) goto err; - snd_soc_dapm_new_widgets(&codec->dapm); - return 0; err: diff --git a/trunk/sound/soc/codecs/stac9766.c b/trunk/sound/soc/codecs/stac9766.c index 982e437799a8..33c0f3d39c87 100644 --- a/trunk/sound/soc/codecs/stac9766.c +++ b/trunk/sound/soc/codecs/stac9766.c @@ -340,6 +340,7 @@ static int stac9766_codec_probe(struct snd_soc_codec *codec) printk(KERN_INFO "STAC9766 SoC Audio Codec %s\n", STAC9766_VERSION); + codec->control_data = codec; /* we don't use regmap! */ ret = snd_soc_new_ac97_codec(codec, &soc_ac97_ops, 0); if (ret < 0) goto codec_err; diff --git a/trunk/sound/soc/codecs/wm2000.c b/trunk/sound/soc/codecs/wm2000.c index 3fd5b29dc933..a3acb7a85f6a 100644 --- a/trunk/sound/soc/codecs/wm2000.c +++ b/trunk/sound/soc/codecs/wm2000.c @@ -702,7 +702,7 @@ static bool wm2000_readable_reg(struct device *dev, unsigned int reg) } static const struct regmap_config wm2000_regmap = { - .reg_bits = 8, + .reg_bits = 16, .val_bits = 8, .max_register = WM2000_REG_IF_CTL, diff --git a/trunk/sound/soc/codecs/wm5102.c b/trunk/sound/soc/codecs/wm5102.c index 6537f16d383e..e33d327396ad 100644 --- a/trunk/sound/soc/codecs/wm5102.c +++ b/trunk/sound/soc/codecs/wm5102.c @@ -128,13 +128,9 @@ SOC_SINGLE_TLV("EQ4 B5 Volume", ARIZONA_EQ4_2, ARIZONA_EQ4_B5_GAIN_SHIFT, ARIZONA_MIXER_CONTROLS("DRC1L", ARIZONA_DRC1LMIX_INPUT_1_SOURCE), ARIZONA_MIXER_CONTROLS("DRC1R", ARIZONA_DRC1RMIX_INPUT_1_SOURCE), -ARIZONA_MIXER_CONTROLS("DRC2L", ARIZONA_DRC2LMIX_INPUT_1_SOURCE), -ARIZONA_MIXER_CONTROLS("DRC2R", ARIZONA_DRC2RMIX_INPUT_1_SOURCE), SND_SOC_BYTES_MASK("DRC1", ARIZONA_DRC1_CTRL1, 5, ARIZONA_DRC1R_ENA | ARIZONA_DRC1L_ENA), -SND_SOC_BYTES_MASK("DRC2", ARIZONA_DRC2_CTRL1, 5, - ARIZONA_DRC2R_ENA | ARIZONA_DRC2L_ENA), ARIZONA_MIXER_CONTROLS("LHPF1", ARIZONA_HPLP1MIX_INPUT_1_SOURCE), ARIZONA_MIXER_CONTROLS("LHPF2", ARIZONA_HPLP2MIX_INPUT_1_SOURCE), @@ -236,8 +232,6 @@ ARIZONA_MIXER_ENUMS(EQ4, ARIZONA_EQ4MIX_INPUT_1_SOURCE); ARIZONA_MIXER_ENUMS(DRC1L, ARIZONA_DRC1LMIX_INPUT_1_SOURCE); ARIZONA_MIXER_ENUMS(DRC1R, ARIZONA_DRC1RMIX_INPUT_1_SOURCE); -ARIZONA_MIXER_ENUMS(DRC2L, ARIZONA_DRC2LMIX_INPUT_1_SOURCE); -ARIZONA_MIXER_ENUMS(DRC2R, ARIZONA_DRC2RMIX_INPUT_1_SOURCE); ARIZONA_MIXER_ENUMS(LHPF1, ARIZONA_HPLP1MIX_INPUT_1_SOURCE); ARIZONA_MIXER_ENUMS(LHPF2, ARIZONA_HPLP2MIX_INPUT_1_SOURCE); @@ -349,10 +343,6 @@ SND_SOC_DAPM_PGA("DRC1L", ARIZONA_DRC1_CTRL1, ARIZONA_DRC1L_ENA_SHIFT, 0, NULL, 0), SND_SOC_DAPM_PGA("DRC1R", ARIZONA_DRC1_CTRL1, ARIZONA_DRC1R_ENA_SHIFT, 0, NULL, 0), -SND_SOC_DAPM_PGA("DRC2L", ARIZONA_DRC2_CTRL1, ARIZONA_DRC2L_ENA_SHIFT, 0, - NULL, 0), -SND_SOC_DAPM_PGA("DRC2R", ARIZONA_DRC2_CTRL1, ARIZONA_DRC2R_ENA_SHIFT, 0, - NULL, 0), SND_SOC_DAPM_PGA("LHPF1", ARIZONA_HPLPF1_1, ARIZONA_LHPF1_ENA_SHIFT, 0, NULL, 0), @@ -466,8 +456,6 @@ ARIZONA_MIXER_WIDGETS(EQ4, "EQ4"), ARIZONA_MIXER_WIDGETS(DRC1L, "DRC1L"), ARIZONA_MIXER_WIDGETS(DRC1R, "DRC1R"), -ARIZONA_MIXER_WIDGETS(DRC2L, "DRC2L"), -ARIZONA_MIXER_WIDGETS(DRC2R, "DRC2R"), ARIZONA_MIXER_WIDGETS(LHPF1, "LHPF1"), ARIZONA_MIXER_WIDGETS(LHPF2, "LHPF2"), @@ -553,8 +541,6 @@ SND_SOC_DAPM_OUTPUT("SPKDAT1R"), { name, "EQ4", "EQ4" }, \ { name, "DRC1L", "DRC1L" }, \ { name, "DRC1R", "DRC1R" }, \ - { name, "DRC2L", "DRC2L" }, \ - { name, "DRC2R", "DRC2R" }, \ { name, "LHPF1", "LHPF1" }, \ { name, "LHPF2", "LHPF2" }, \ { name, "LHPF3", "LHPF3" }, \ @@ -639,6 +625,15 @@ static const struct snd_soc_dapm_route wm5102_dapm_routes[] = { { "AIF2 Capture", NULL, "SYSCLK" }, { "AIF3 Capture", NULL, "SYSCLK" }, + { "IN1L PGA", NULL, "IN1L" }, + { "IN1R PGA", NULL, "IN1R" }, + + { "IN2L PGA", NULL, "IN2L" }, + { "IN2R PGA", NULL, "IN2R" }, + + { "IN3L PGA", NULL, "IN3L" }, + { "IN3R PGA", NULL, "IN3R" }, + ARIZONA_MIXER_ROUTES("OUT1L", "HPOUT1L"), ARIZONA_MIXER_ROUTES("OUT1R", "HPOUT1R"), ARIZONA_MIXER_ROUTES("OUT2L", "HPOUT2L"), @@ -675,8 +670,6 @@ static const struct snd_soc_dapm_route wm5102_dapm_routes[] = { ARIZONA_MIXER_ROUTES("DRC1L", "DRC1L"), ARIZONA_MIXER_ROUTES("DRC1R", "DRC1R"), - ARIZONA_MIXER_ROUTES("DRC2L", "DRC2L"), - ARIZONA_MIXER_ROUTES("DRC2R", "DRC2R"), ARIZONA_MIXER_ROUTES("LHPF1", "LHPF1"), ARIZONA_MIXER_ROUTES("LHPF2", "LHPF2"), diff --git a/trunk/sound/soc/codecs/wm5110.c b/trunk/sound/soc/codecs/wm5110.c index 8033f7065189..01ebbcc5c6a4 100644 --- a/trunk/sound/soc/codecs/wm5110.c +++ b/trunk/sound/soc/codecs/wm5110.c @@ -681,6 +681,18 @@ static const struct snd_soc_dapm_route wm5110_dapm_routes[] = { { "AIF2 Capture", NULL, "SYSCLK" }, { "AIF3 Capture", NULL, "SYSCLK" }, + { "IN1L PGA", NULL, "IN1L" }, + { "IN1R PGA", NULL, "IN1R" }, + + { "IN2L PGA", NULL, "IN2L" }, + { "IN2R PGA", NULL, "IN2R" }, + + { "IN3L PGA", NULL, "IN3L" }, + { "IN3R PGA", NULL, "IN3R" }, + + { "IN4L PGA", NULL, "IN4L" }, + { "IN4R PGA", NULL, "IN4R" }, + ARIZONA_MIXER_ROUTES("OUT1L", "HPOUT1L"), ARIZONA_MIXER_ROUTES("OUT1R", "HPOUT1R"), ARIZONA_MIXER_ROUTES("OUT2L", "HPOUT2L"), diff --git a/trunk/sound/soc/codecs/wm8904.c b/trunk/sound/soc/codecs/wm8904.c index 0013afe48e66..dc4262eea4b7 100644 --- a/trunk/sound/soc/codecs/wm8904.c +++ b/trunk/sound/soc/codecs/wm8904.c @@ -100,7 +100,7 @@ static const struct reg_default wm8904_reg_defaults[] = { { 14, 0x0000 }, /* R14 - Power Management 2 */ { 15, 0x0000 }, /* R15 - Power Management 3 */ { 18, 0x0000 }, /* R18 - Power Management 6 */ - { 19, 0x945E }, /* R20 - Clock Rates 0 */ + { 20, 0x945E }, /* R20 - Clock Rates 0 */ { 21, 0x0C05 }, /* R21 - Clock Rates 1 */ { 22, 0x0006 }, /* R22 - Clock Rates 2 */ { 24, 0x0050 }, /* R24 - Audio Interface 0 */ diff --git a/trunk/sound/soc/codecs/wm8962.c b/trunk/sound/soc/codecs/wm8962.c index eaf65863ec21..ce6720073798 100644 --- a/trunk/sound/soc/codecs/wm8962.c +++ b/trunk/sound/soc/codecs/wm8962.c @@ -2501,6 +2501,9 @@ static int wm8962_set_bias_level(struct snd_soc_codec *codec, /* VMID 2*250k */ snd_soc_update_bits(codec, WM8962_PWR_MGMT_1, WM8962_VMID_SEL_MASK, 0x100); + + if (codec->dapm.bias_level == SND_SOC_BIAS_OFF) + msleep(100); break; case SND_SOC_BIAS_OFF: @@ -3730,21 +3733,6 @@ static int wm8962_runtime_resume(struct device *dev) regcache_sync(wm8962->regmap); - regmap_update_bits(wm8962->regmap, WM8962_ANTI_POP, - WM8962_STARTUP_BIAS_ENA | WM8962_VMID_BUF_ENA, - WM8962_STARTUP_BIAS_ENA | WM8962_VMID_BUF_ENA); - - /* Bias enable at 2*50k for ramp */ - regmap_update_bits(wm8962->regmap, WM8962_PWR_MGMT_1, - WM8962_VMID_SEL_MASK | WM8962_BIAS_ENA, - WM8962_BIAS_ENA | 0x180); - - msleep(5); - - /* VMID back to 2x250k for standby */ - regmap_update_bits(wm8962->regmap, WM8962_PWR_MGMT_1, - WM8962_VMID_SEL_MASK, 0x100); - return 0; } diff --git a/trunk/sound/soc/codecs/wm8994.c b/trunk/sound/soc/codecs/wm8994.c index bb62f4b3d563..6c9eeca85b95 100644 --- a/trunk/sound/soc/codecs/wm8994.c +++ b/trunk/sound/soc/codecs/wm8994.c @@ -2649,7 +2649,7 @@ static int wm8994_hw_params(struct snd_pcm_substream *substream, return -EINVAL; } - bclk_rate = params_rate(params) * 2; + bclk_rate = params_rate(params) * 4; switch (params_format(params)) { case SNDRV_PCM_FORMAT_S16_LE: bclk_rate *= 16; @@ -3253,10 +3253,13 @@ static void wm8994_mic_work(struct work_struct *work) int ret; int report; + pm_runtime_get_sync(dev); + ret = regmap_read(regmap, WM8994_INTERRUPT_RAW_STATUS_2, ®); if (ret < 0) { dev_err(dev, "Failed to read microphone status: %d\n", ret); + pm_runtime_put(dev); return; } @@ -3299,6 +3302,8 @@ static void wm8994_mic_work(struct work_struct *work) snd_soc_jack_report(priv->micdet[1].jack, report, SND_JACK_HEADSET | SND_JACK_BTN_0); + + pm_runtime_put(dev); } static irqreturn_t wm8994_mic_irq(int irq, void *data) @@ -3421,12 +3426,15 @@ static irqreturn_t wm1811_jackdet_irq(int irq, void *data) int reg; bool present; + pm_runtime_get_sync(codec->dev); + mutex_lock(&wm8994->accdet_lock); reg = snd_soc_read(codec, WM1811_JACKDET_CTRL); if (reg < 0) { dev_err(codec->dev, "Failed to read jack status: %d\n", reg); mutex_unlock(&wm8994->accdet_lock); + pm_runtime_put(codec->dev); return IRQ_NONE; } @@ -3491,6 +3499,7 @@ static irqreturn_t wm1811_jackdet_irq(int irq, void *data) SND_JACK_MECHANICAL | SND_JACK_HEADSET | wm8994->btn_mask); + pm_runtime_put(codec->dev); return IRQ_HANDLED; } @@ -3602,6 +3611,8 @@ static irqreturn_t wm8958_mic_irq(int irq, void *data) if (!(snd_soc_read(codec, WM8958_MIC_DETECT_1) & WM8958_MICD_ENA)) return IRQ_HANDLED; + pm_runtime_get_sync(codec->dev); + /* We may occasionally read a detection without an impedence * range being provided - if that happens loop again. */ @@ -3612,6 +3623,7 @@ static irqreturn_t wm8958_mic_irq(int irq, void *data) dev_err(codec->dev, "Failed to read mic detect status: %d\n", reg); + pm_runtime_put(codec->dev); return IRQ_NONE; } @@ -3639,6 +3651,7 @@ static irqreturn_t wm8958_mic_irq(int irq, void *data) dev_warn(codec->dev, "Accessory detection with no callback\n"); out: + pm_runtime_put(codec->dev); return IRQ_HANDLED; } @@ -4025,6 +4038,8 @@ static int wm8994_codec_probe(struct snd_soc_codec *codec) break; case WM8958: if (wm8994->revision < 1) { + snd_soc_dapm_add_routes(dapm, wm8994_intercon, + ARRAY_SIZE(wm8994_intercon)); snd_soc_dapm_add_routes(dapm, wm8994_revd_intercon, ARRAY_SIZE(wm8994_revd_intercon)); snd_soc_dapm_add_routes(dapm, wm8994_lateclk_revd_intercon, diff --git a/trunk/sound/soc/codecs/wm9712.c b/trunk/sound/soc/codecs/wm9712.c index 099e6ec32125..c6d2076a796b 100644 --- a/trunk/sound/soc/codecs/wm9712.c +++ b/trunk/sound/soc/codecs/wm9712.c @@ -148,7 +148,7 @@ SOC_SINGLE("Treble Volume", AC97_MASTER_TONE, 0, 15, 1), SOC_SINGLE("Capture ADC Switch", AC97_REC_GAIN, 15, 1, 1), SOC_ENUM("Capture Volume Steps", wm9712_enum[6]), -SOC_DOUBLE("Capture Volume", AC97_REC_GAIN, 8, 0, 63, 1), +SOC_DOUBLE("Capture Volume", AC97_REC_GAIN, 8, 0, 63, 0), SOC_SINGLE("Capture ZC Switch", AC97_REC_GAIN, 7, 1, 0), SOC_SINGLE_TLV("Mic 1 Volume", AC97_MIC, 8, 31, 1, main_tlv), @@ -272,7 +272,7 @@ SOC_DAPM_ENUM("Route", wm9712_enum[9]); /* Mic select */ static const struct snd_kcontrol_new wm9712_mic_src_controls = -SOC_DAPM_ENUM("Route", wm9712_enum[7]); +SOC_DAPM_ENUM("Mic Source Select", wm9712_enum[7]); /* diff select */ static const struct snd_kcontrol_new wm9712_diff_sel_controls = @@ -291,7 +291,9 @@ SND_SOC_DAPM_MUX("Left Capture Select", SND_SOC_NOPM, 0, 0, &wm9712_capture_selectl_controls), SND_SOC_DAPM_MUX("Right Capture Select", SND_SOC_NOPM, 0, 0, &wm9712_capture_selectr_controls), -SND_SOC_DAPM_MUX("Mic Select Source", SND_SOC_NOPM, 0, 0, +SND_SOC_DAPM_MUX("Left Mic Select Source", SND_SOC_NOPM, 0, 0, + &wm9712_mic_src_controls), +SND_SOC_DAPM_MUX("Right Mic Select Source", SND_SOC_NOPM, 0, 0, &wm9712_mic_src_controls), SND_SOC_DAPM_MUX("Differential Source", SND_SOC_NOPM, 0, 0, &wm9712_diff_sel_controls), @@ -319,6 +321,7 @@ SND_SOC_DAPM_PGA("Out 3 PGA", AC97_INT_PAGING, 5, 1, NULL, 0), SND_SOC_DAPM_PGA("Line PGA", AC97_INT_PAGING, 2, 1, NULL, 0), SND_SOC_DAPM_PGA("Phone PGA", AC97_INT_PAGING, 1, 1, NULL, 0), SND_SOC_DAPM_PGA("Mic PGA", AC97_INT_PAGING, 0, 1, NULL, 0), +SND_SOC_DAPM_PGA("Differential Mic", SND_SOC_NOPM, 0, 0, NULL, 0), SND_SOC_DAPM_MICBIAS("Mic Bias", AC97_INT_PAGING, 10, 1), SND_SOC_DAPM_OUTPUT("MONOOUT"), SND_SOC_DAPM_OUTPUT("HPOUTL"), @@ -379,6 +382,18 @@ static const struct snd_soc_dapm_route wm9712_audio_map[] = { {"Mic PGA", NULL, "MIC1"}, {"Mic PGA", NULL, "MIC2"}, + /* microphones */ + {"Differential Mic", NULL, "MIC1"}, + {"Differential Mic", NULL, "MIC2"}, + {"Left Mic Select Source", "Mic 1", "MIC1"}, + {"Left Mic Select Source", "Mic 2", "MIC2"}, + {"Left Mic Select Source", "Stereo", "MIC1"}, + {"Left Mic Select Source", "Differential", "Differential Mic"}, + {"Right Mic Select Source", "Mic 1", "MIC1"}, + {"Right Mic Select Source", "Mic 2", "MIC2"}, + {"Right Mic Select Source", "Stereo", "MIC2"}, + {"Right Mic Select Source", "Differential", "Differential Mic"}, + /* left capture selector */ {"Left Capture Select", "Mic", "MIC1"}, {"Left Capture Select", "Speaker Mixer", "Speaker Mixer"}, @@ -619,6 +634,7 @@ static int wm9712_soc_probe(struct snd_soc_codec *codec) { int ret = 0; + codec->control_data = codec; /* we don't use regmap! */ ret = snd_soc_new_ac97_codec(codec, &soc_ac97_ops, 0); if (ret < 0) { printk(KERN_ERR "wm9712: failed to register AC97 codec\n"); diff --git a/trunk/sound/soc/codecs/wm9713.c b/trunk/sound/soc/codecs/wm9713.c index 3eb19fb71d17..d0b8a3287a85 100644 --- a/trunk/sound/soc/codecs/wm9713.c +++ b/trunk/sound/soc/codecs/wm9713.c @@ -1196,6 +1196,7 @@ static int wm9713_soc_probe(struct snd_soc_codec *codec) if (wm9713 == NULL) return -ENOMEM; snd_soc_codec_set_drvdata(codec, wm9713); + codec->control_data = wm9713; /* we don't use regmap! */ ret = snd_soc_new_ac97_codec(codec, &soc_ac97_ops, 0); if (ret < 0) diff --git a/trunk/sound/soc/davinci/davinci-mcasp.c b/trunk/sound/soc/davinci/davinci-mcasp.c index 95441bfc8190..ce5e5cd254dd 100644 --- a/trunk/sound/soc/davinci/davinci-mcasp.c +++ b/trunk/sound/soc/davinci/davinci-mcasp.c @@ -380,14 +380,20 @@ static void mcasp_start_tx(struct davinci_audio_dev *dev) static void davinci_mcasp_start(struct davinci_audio_dev *dev, int stream) { if (stream == SNDRV_PCM_STREAM_PLAYBACK) { - if (dev->txnumevt) /* enable FIFO */ + if (dev->txnumevt) { /* enable FIFO */ + mcasp_clr_bits(dev->base + DAVINCI_MCASP_WFIFOCTL, + FIFO_ENABLE); mcasp_set_bits(dev->base + DAVINCI_MCASP_WFIFOCTL, FIFO_ENABLE); + } mcasp_start_tx(dev); } else { - if (dev->rxnumevt) /* enable FIFO */ + if (dev->rxnumevt) { /* enable FIFO */ + mcasp_clr_bits(dev->base + DAVINCI_MCASP_RFIFOCTL, + FIFO_ENABLE); mcasp_set_bits(dev->base + DAVINCI_MCASP_RFIFOCTL, FIFO_ENABLE); + } mcasp_start_rx(dev); } } diff --git a/trunk/sound/soc/fsl/imx-sgtl5000.c b/trunk/sound/soc/fsl/imx-sgtl5000.c index fb21b17f17f5..199408ec4261 100644 --- a/trunk/sound/soc/fsl/imx-sgtl5000.c +++ b/trunk/sound/soc/fsl/imx-sgtl5000.c @@ -94,7 +94,7 @@ static int __devinit imx_sgtl5000_probe(struct platform_device *pdev) dev_err(&pdev->dev, "audmux internal port setup failed\n"); return ret; } - imx_audmux_v2_configure_port(ext_port, + ret = imx_audmux_v2_configure_port(ext_port, IMX_AUDMUX_V2_PTCR_SYN, IMX_AUDMUX_V2_PDCR_RXDSEL(int_port)); if (ret) { diff --git a/trunk/sound/soc/fsl/imx-ssi.c b/trunk/sound/soc/fsl/imx-ssi.c index 28dd76c7cb1c..81d7728cf67f 100644 --- a/trunk/sound/soc/fsl/imx-ssi.c +++ b/trunk/sound/soc/fsl/imx-ssi.c @@ -380,13 +380,14 @@ static int imx_ssi_dai_probe(struct snd_soc_dai *dai) static struct snd_soc_dai_driver imx_ssi_dai = { .probe = imx_ssi_dai_probe, .playback = { - .channels_min = 1, + /* The SSI does not support monaural audio. */ + .channels_min = 2, .channels_max = 2, .rates = SNDRV_PCM_RATE_8000_96000, .formats = SNDRV_PCM_FMTBIT_S16_LE, }, .capture = { - .channels_min = 1, + .channels_min = 2, .channels_max = 2, .rates = SNDRV_PCM_RATE_8000_96000, .formats = SNDRV_PCM_FMTBIT_S16_LE, diff --git a/trunk/sound/soc/mxs/Kconfig b/trunk/sound/soc/mxs/Kconfig index 99a997f19bb9..b6fa77678d97 100644 --- a/trunk/sound/soc/mxs/Kconfig +++ b/trunk/sound/soc/mxs/Kconfig @@ -10,7 +10,7 @@ menuconfig SND_MXS_SOC if SND_MXS_SOC config SND_SOC_MXS_SGTL5000 - tristate "SoC Audio support for i.MX boards with sgtl5000" + tristate "SoC Audio support for MXS boards with sgtl5000" depends on I2C select SND_SOC_SGTL5000 help diff --git a/trunk/sound/soc/mxs/mxs-saif.c b/trunk/sound/soc/mxs/mxs-saif.c index aba71bfa33b1..b3030718c228 100644 --- a/trunk/sound/soc/mxs/mxs-saif.c +++ b/trunk/sound/soc/mxs/mxs-saif.c @@ -394,9 +394,14 @@ static int mxs_saif_hw_params(struct snd_pcm_substream *substream, struct snd_soc_dai *cpu_dai) { struct mxs_saif *saif = snd_soc_dai_get_drvdata(cpu_dai); + struct mxs_saif *master_saif; u32 scr, stat; int ret; + master_saif = mxs_saif_get_master(saif); + if (!master_saif) + return -EINVAL; + /* mclk should already be set */ if (!saif->mclk && saif->mclk_in_use) { dev_err(cpu_dai->dev, "set mclk first\n"); @@ -420,6 +425,25 @@ static int mxs_saif_hw_params(struct snd_pcm_substream *substream, return ret; } + /* prepare clk in hw_param, enable in trigger */ + clk_prepare(saif->clk); + if (saif != master_saif) { + /* + * Set an initial clock rate for the saif internal logic to work + * properly. This is important when working in EXTMASTER mode + * that uses the other saif's BITCLK&LRCLK but it still needs a + * basic clock which should be fast enough for the internal + * logic. + */ + clk_enable(saif->clk); + ret = clk_set_rate(saif->clk, 24000000); + clk_disable(saif->clk); + if (ret) + return ret; + + clk_prepare(master_saif->clk); + } + scr = __raw_readl(saif->base + SAIF_CTRL); scr &= ~BM_SAIF_CTRL_WORD_LENGTH; diff --git a/trunk/sound/soc/omap/am3517evm.c b/trunk/sound/soc/omap/am3517evm.c index 009533ab8d18..df65f98211ec 100644 --- a/trunk/sound/soc/omap/am3517evm.c +++ b/trunk/sound/soc/omap/am3517evm.c @@ -59,7 +59,7 @@ static int am3517evm_hw_params(struct snd_pcm_substream *substream, return ret; } - snd_soc_dai_set_sysclk(cpu_dai, OMAP_MCBSP_FSR_SRC_FSX, 0, + ret = snd_soc_dai_set_sysclk(cpu_dai, OMAP_MCBSP_FSR_SRC_FSX, 0, SND_SOC_CLOCK_IN); if (ret < 0) { printk(KERN_ERR "can't set CPU system clock OMAP_MCBSP_FSR_SRC_FSX\n"); diff --git a/trunk/sound/soc/omap/mcbsp.c b/trunk/sound/soc/omap/mcbsp.c index 34835e8a9160..d33c48baaf71 100644 --- a/trunk/sound/soc/omap/mcbsp.c +++ b/trunk/sound/soc/omap/mcbsp.c @@ -745,7 +745,7 @@ int omap_mcbsp_6pin_src_mux(struct omap_mcbsp *mcbsp, u8 mux) { const char *signal, *src; - if (mcbsp->pdata->mux_signal) + if (!mcbsp->pdata->mux_signal) return -EINVAL; switch (mux) { diff --git a/trunk/sound/soc/omap/omap-mcbsp.c b/trunk/sound/soc/omap/omap-mcbsp.c index 1046083e90a0..acdd3ef14e08 100644 --- a/trunk/sound/soc/omap/omap-mcbsp.c +++ b/trunk/sound/soc/omap/omap-mcbsp.c @@ -820,3 +820,4 @@ module_platform_driver(asoc_mcbsp_driver); MODULE_AUTHOR("Jarkko Nikula "); MODULE_DESCRIPTION("OMAP I2S SoC Interface"); MODULE_LICENSE("GPL"); +MODULE_ALIAS("platform:omap-mcbsp"); diff --git a/trunk/sound/soc/omap/omap-pcm.c b/trunk/sound/soc/omap/omap-pcm.c index 5a649da9122a..f0feb06615f8 100644 --- a/trunk/sound/soc/omap/omap-pcm.c +++ b/trunk/sound/soc/omap/omap-pcm.c @@ -441,3 +441,4 @@ module_platform_driver(omap_pcm_driver); MODULE_AUTHOR("Jarkko Nikula "); MODULE_DESCRIPTION("OMAP PCM DMA module"); MODULE_LICENSE("GPL"); +MODULE_ALIAS("platform:omap-pcm-audio"); diff --git a/trunk/sound/soc/samsung/dma.c b/trunk/sound/soc/samsung/dma.c index f3ebc38c10fe..b70964ea448c 100644 --- a/trunk/sound/soc/samsung/dma.c +++ b/trunk/sound/soc/samsung/dma.c @@ -34,9 +34,7 @@ static const struct snd_pcm_hardware dma_hardware = { .info = SNDRV_PCM_INFO_INTERLEAVED | SNDRV_PCM_INFO_BLOCK_TRANSFER | SNDRV_PCM_INFO_MMAP | - SNDRV_PCM_INFO_MMAP_VALID | - SNDRV_PCM_INFO_PAUSE | - SNDRV_PCM_INFO_RESUME, + SNDRV_PCM_INFO_MMAP_VALID, .formats = SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_U16_LE | SNDRV_PCM_FMTBIT_U8 | @@ -248,15 +246,11 @@ static int dma_trigger(struct snd_pcm_substream *substream, int cmd) switch (cmd) { case SNDRV_PCM_TRIGGER_START: - case SNDRV_PCM_TRIGGER_RESUME: - case SNDRV_PCM_TRIGGER_PAUSE_RELEASE: prtd->state |= ST_RUNNING; prtd->params->ops->trigger(prtd->params->ch); break; case SNDRV_PCM_TRIGGER_STOP: - case SNDRV_PCM_TRIGGER_SUSPEND: - case SNDRV_PCM_TRIGGER_PAUSE_PUSH: prtd->state &= ~ST_RUNNING; prtd->params->ops->stop(prtd->params->ch); break; diff --git a/trunk/sound/soc/samsung/pcm.c b/trunk/sound/soc/samsung/pcm.c index b7b2a1f91425..89b064650f14 100644 --- a/trunk/sound/soc/samsung/pcm.c +++ b/trunk/sound/soc/samsung/pcm.c @@ -20,7 +20,7 @@ #include #include -#include +#include #include "dma.h" #include "pcm.h" diff --git a/trunk/sound/soc/soc-core.c b/trunk/sound/soc/soc-core.c index f219b2f7ee68..c501af6d8dbe 100644 --- a/trunk/sound/soc/soc-core.c +++ b/trunk/sound/soc/soc-core.c @@ -826,7 +826,7 @@ static int soc_bind_dai_link(struct snd_soc_card *card, int num) } if (!rtd->cpu_dai) { - dev_dbg(card->dev, "CPU DAI %s not registered\n", + dev_err(card->dev, "CPU DAI %s not registered\n", dai_link->cpu_dai_name); return -EPROBE_DEFER; } @@ -857,14 +857,14 @@ static int soc_bind_dai_link(struct snd_soc_card *card, int num) } if (!rtd->codec_dai) { - dev_dbg(card->dev, "CODEC DAI %s not registered\n", + dev_err(card->dev, "CODEC DAI %s not registered\n", dai_link->codec_dai_name); return -EPROBE_DEFER; } } if (!rtd->codec) { - dev_dbg(card->dev, "CODEC %s not registered\n", + dev_err(card->dev, "CODEC %s not registered\n", dai_link->codec_name); return -EPROBE_DEFER; } @@ -888,7 +888,7 @@ static int soc_bind_dai_link(struct snd_soc_card *card, int num) rtd->platform = platform; } if (!rtd->platform) { - dev_dbg(card->dev, "platform %s not registered\n", + dev_err(card->dev, "platform %s not registered\n", dai_link->platform_name); return -EPROBE_DEFER; } @@ -1096,7 +1096,7 @@ static int soc_probe_codec(struct snd_soc_card *card, } /* If the driver didn't set I/O up try regmap */ - if (!codec->control_data) + if (!codec->write && dev_get_regmap(codec->dev, NULL)) snd_soc_codec_set_cache_io(codec, 0, 0, SND_SOC_REGMAP); if (driver->controls) @@ -1481,6 +1481,8 @@ static int soc_check_aux_dev(struct snd_soc_card *card, int num) return 0; } + dev_err(card->dev, "%s not registered\n", aux_dev->codec_name); + return -EPROBE_DEFER; } diff --git a/trunk/sound/soc/soc-dapm.c b/trunk/sound/soc/soc-dapm.c index dd7c49fafd75..f90139b5f50d 100644 --- a/trunk/sound/soc/soc-dapm.c +++ b/trunk/sound/soc/soc-dapm.c @@ -291,8 +291,11 @@ static int snd_soc_dapm_set_bias_level(struct snd_soc_dapm_context *dapm, if (dapm->codec->driver->set_bias_level) ret = dapm->codec->driver->set_bias_level(dapm->codec, level); - } else + else + dapm->bias_level = level; + } else if (!card || dapm != &card->dapm) { dapm->bias_level = level; + } if (ret != 0) goto out; diff --git a/trunk/sound/soc/soc-jack.c b/trunk/sound/soc/soc-jack.c index 7f8b3b7428bb..0c172938b82a 100644 --- a/trunk/sound/soc/soc-jack.c +++ b/trunk/sound/soc/soc-jack.c @@ -103,7 +103,7 @@ void snd_soc_jack_report(struct snd_soc_jack *jack, int status, int mask) } /* Report before the DAPM sync to help users updating micbias status */ - blocking_notifier_call_chain(&jack->notifier, status, jack); + blocking_notifier_call_chain(&jack->notifier, jack->status, jack); snd_soc_dapm_sync(dapm); diff --git a/trunk/sound/soc/spear/spear_pcm.c b/trunk/sound/soc/spear/spear_pcm.c index 97c2cac8e92c..8c7f23729446 100644 --- a/trunk/sound/soc/spear/spear_pcm.c +++ b/trunk/sound/soc/spear/spear_pcm.c @@ -138,7 +138,7 @@ static void spear_pcm_free(struct snd_pcm *pcm) continue; buf = &substream->dma_buffer; - if (!buf && !buf->area) + if (!buf || !buf->area) continue; dma_free_writecombine(pcm->card->dev, buf->bytes, diff --git a/trunk/sound/soc/tegra/tegra_alc5632.c b/trunk/sound/soc/tegra/tegra_alc5632.c index d684df294c0c..76cb1b363b71 100644 --- a/trunk/sound/soc/tegra/tegra_alc5632.c +++ b/trunk/sound/soc/tegra/tegra_alc5632.c @@ -89,7 +89,6 @@ static struct snd_soc_jack_gpio tegra_alc5632_hp_jack_gpio = { .name = "Headset detection", .report = SND_JACK_HEADSET, .debounce_time = 150, - .invert = 1, }; static const struct snd_soc_dapm_widget tegra_alc5632_dapm_widgets[] = { @@ -177,7 +176,7 @@ static __devinit int tegra_alc5632_probe(struct platform_device *pdev) } alc5632->gpio_hp_det = of_get_named_gpio(np, "nvidia,hp-det-gpios", 0); - if (alc5632->gpio_hp_det == -ENODEV) + if (alc5632->gpio_hp_det == -EPROBE_DEFER) return -EPROBE_DEFER; ret = snd_soc_of_parse_card_name(card, "nvidia,model"); diff --git a/trunk/sound/soc/tegra/tegra_pcm.c b/trunk/sound/soc/tegra/tegra_pcm.c index 5658bcec1931..8d6900c1ee47 100644 --- a/trunk/sound/soc/tegra/tegra_pcm.c +++ b/trunk/sound/soc/tegra/tegra_pcm.c @@ -334,11 +334,11 @@ static int tegra_pcm_hw_params(struct snd_pcm_substream *substream, if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) { slave_config.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; slave_config.dst_addr = dmap->addr; - slave_config.src_maxburst = 0; + slave_config.dst_maxburst = 4; } else { slave_config.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; slave_config.src_addr = dmap->addr; - slave_config.dst_maxburst = 0; + slave_config.src_maxburst = 4; } slave_config.slave_id = dmap->req_sel; diff --git a/trunk/sound/soc/tegra/tegra_wm8903.c b/trunk/sound/soc/tegra/tegra_wm8903.c index 0c5bb33d258e..d4f14e492341 100644 --- a/trunk/sound/soc/tegra/tegra_wm8903.c +++ b/trunk/sound/soc/tegra/tegra_wm8903.c @@ -284,27 +284,27 @@ static __devinit int tegra_wm8903_driver_probe(struct platform_device *pdev) } else if (np) { pdata->gpio_spkr_en = of_get_named_gpio(np, "nvidia,spkr-en-gpios", 0); - if (pdata->gpio_spkr_en == -ENODEV) + if (pdata->gpio_spkr_en == -EPROBE_DEFER) return -EPROBE_DEFER; pdata->gpio_hp_mute = of_get_named_gpio(np, "nvidia,hp-mute-gpios", 0); - if (pdata->gpio_hp_mute == -ENODEV) + if (pdata->gpio_hp_mute == -EPROBE_DEFER) return -EPROBE_DEFER; pdata->gpio_hp_det = of_get_named_gpio(np, "nvidia,hp-det-gpios", 0); - if (pdata->gpio_hp_det == -ENODEV) + if (pdata->gpio_hp_det == -EPROBE_DEFER) return -EPROBE_DEFER; pdata->gpio_int_mic_en = of_get_named_gpio(np, "nvidia,int-mic-en-gpios", 0); - if (pdata->gpio_int_mic_en == -ENODEV) + if (pdata->gpio_int_mic_en == -EPROBE_DEFER) return -EPROBE_DEFER; pdata->gpio_ext_mic_en = of_get_named_gpio(np, "nvidia,ext-mic-en-gpios", 0); - if (pdata->gpio_ext_mic_en == -ENODEV) + if (pdata->gpio_ext_mic_en == -EPROBE_DEFER) return -EPROBE_DEFER; } diff --git a/trunk/sound/soc/ux500/ux500_msp_dai.c b/trunk/sound/soc/ux500/ux500_msp_dai.c index 62ac0285bfaf..057e28ef770e 100644 --- a/trunk/sound/soc/ux500/ux500_msp_dai.c +++ b/trunk/sound/soc/ux500/ux500_msp_dai.c @@ -21,7 +21,7 @@ #include #include -#include +#include #include #include diff --git a/trunk/sound/soc/ux500/ux500_msp_i2s.c b/trunk/sound/soc/ux500/ux500_msp_i2s.c index ee14d2dac2f5..eb85113d472a 100644 --- a/trunk/sound/soc/ux500/ux500_msp_i2s.c +++ b/trunk/sound/soc/ux500/ux500_msp_i2s.c @@ -19,7 +19,7 @@ #include #include -#include +#include #include @@ -663,7 +663,6 @@ int ux500_msp_i2s_init_msp(struct platform_device *pdev, struct ux500_msp **msp_p, struct msp_i2s_platform_data *platform_data) { - int ret = 0; struct resource *res = NULL; struct i2s_controller *i2s_cont; struct ux500_msp *msp; @@ -685,15 +684,14 @@ int ux500_msp_i2s_init_msp(struct platform_device *pdev, if (res == NULL) { dev_err(&pdev->dev, "%s: ERROR: Unable to get resource!\n", __func__); - ret = -ENOMEM; - goto err_res; + return -ENOMEM; } - msp->registers = ioremap(res->start, (res->end - res->start + 1)); + msp->registers = devm_ioremap(&pdev->dev, res->start, + resource_size(res)); if (msp->registers == NULL) { dev_err(&pdev->dev, "%s: ERROR: ioremap failed!\n", __func__); - ret = -ENOMEM; - goto err_res; + return -ENOMEM; } msp->msp_state = MSP_STATE_IDLE; @@ -705,7 +703,7 @@ int ux500_msp_i2s_init_msp(struct platform_device *pdev, dev_err(&pdev->dev, "%s: ERROR: Failed to allocate I2S-controller!\n", __func__); - goto err_i2s_cont; + return -ENOMEM; } i2s_cont->dev.parent = &pdev->dev; i2s_cont->data = (void *)msp; @@ -716,14 +714,6 @@ int ux500_msp_i2s_init_msp(struct platform_device *pdev, msp->i2s_cont = i2s_cont; return 0; - -err_i2s_cont: - iounmap(msp->registers); - -err_res: - devm_kfree(&pdev->dev, msp); - - return ret; } void ux500_msp_i2s_cleanup_msp(struct platform_device *pdev, @@ -732,11 +722,6 @@ void ux500_msp_i2s_cleanup_msp(struct platform_device *pdev, dev_dbg(msp->dev, "%s: Enter (id = %d).\n", __func__, msp->id); device_unregister(&msp->i2s_cont->dev); - devm_kfree(&pdev->dev, msp->i2s_cont); - - iounmap(msp->registers); - - devm_kfree(&pdev->dev, msp); } MODULE_LICENSE("GPL v2"); diff --git a/trunk/sound/soc/ux500/ux500_msp_i2s.h b/trunk/sound/soc/ux500/ux500_msp_i2s.h index 7f71b4a0d4bc..2d9136da9865 100644 --- a/trunk/sound/soc/ux500/ux500_msp_i2s.h +++ b/trunk/sound/soc/ux500/ux500_msp_i2s.h @@ -17,7 +17,7 @@ #include -#include +#include #define MSP_INPUT_FREQ_APB 48000000 diff --git a/trunk/sound/usb/card.c b/trunk/sound/usb/card.c index d5b5c3388e28..4a469f0cb6d4 100644 --- a/trunk/sound/usb/card.c +++ b/trunk/sound/usb/card.c @@ -553,7 +553,7 @@ static void snd_usb_audio_disconnect(struct usb_device *dev, struct snd_usb_audio *chip) { struct snd_card *card; - struct list_head *p; + struct list_head *p, *n; if (chip == (void *)-1L) return; @@ -570,7 +570,7 @@ static void snd_usb_audio_disconnect(struct usb_device *dev, snd_usb_stream_disconnect(p); } /* release the endpoint resources */ - list_for_each(p, &chip->ep_list) { + list_for_each_safe(p, n, &chip->ep_list) { snd_usb_endpoint_free(p); } /* release the midi resources */ diff --git a/trunk/sound/usb/endpoint.c b/trunk/sound/usb/endpoint.c index 0f647d22cb4a..060dccb9ec75 100644 --- a/trunk/sound/usb/endpoint.c +++ b/trunk/sound/usb/endpoint.c @@ -141,7 +141,7 @@ int snd_usb_endpoint_implict_feedback_sink(struct snd_usb_endpoint *ep) * * For implicit feedback, next_packet_size() is unused. */ -static int next_packet_size(struct snd_usb_endpoint *ep) +int snd_usb_endpoint_next_packet_size(struct snd_usb_endpoint *ep) { unsigned long flags; int ret; @@ -177,15 +177,6 @@ static void retire_inbound_urb(struct snd_usb_endpoint *ep, ep->retire_data_urb(ep->data_subs, urb); } -static void prepare_outbound_urb_sizes(struct snd_usb_endpoint *ep, - struct snd_urb_ctx *ctx) -{ - int i; - - for (i = 0; i < ctx->packets; ++i) - ctx->packet_size[i] = next_packet_size(ep); -} - /* * Prepare a PLAYBACK urb for submission to the bus. */ @@ -206,7 +197,13 @@ static void prepare_outbound_urb(struct snd_usb_endpoint *ep, /* no data provider, so send silence */ unsigned int offs = 0; for (i = 0; i < ctx->packets; ++i) { - int counts = ctx->packet_size[i]; + int counts; + + if (ctx->packet_size[i]) + counts = ctx->packet_size[i]; + else + counts = snd_usb_endpoint_next_packet_size(ep); + urb->iso_frame_desc[i].offset = offs * ep->stride; urb->iso_frame_desc[i].length = counts * ep->stride; offs += counts; @@ -370,7 +367,6 @@ static void snd_complete_urb(struct urb *urb) goto exit_clear; } - prepare_outbound_urb_sizes(ep, ctx); prepare_outbound_urb(ep, ctx); } else { retire_inbound_urb(ep, ctx); @@ -799,7 +795,9 @@ int snd_usb_endpoint_set_params(struct snd_usb_endpoint *ep, /** * snd_usb_endpoint_start: start an snd_usb_endpoint * - * @ep: the endpoint to start + * @ep: the endpoint to start + * @can_sleep: flag indicating whether the operation is executed in + * non-atomic context * * A call to this function will increment the use count of the endpoint. * In case it is not already running, the URBs for this endpoint will be @@ -809,7 +807,7 @@ int snd_usb_endpoint_set_params(struct snd_usb_endpoint *ep, * * Returns an error if the URB submission failed, 0 in all other cases. */ -int snd_usb_endpoint_start(struct snd_usb_endpoint *ep) +int snd_usb_endpoint_start(struct snd_usb_endpoint *ep, int can_sleep) { int err; unsigned int i; @@ -822,8 +820,9 @@ int snd_usb_endpoint_start(struct snd_usb_endpoint *ep) return 0; /* just to be sure */ - deactivate_urbs(ep, 0, 1); - wait_clear_urbs(ep); + deactivate_urbs(ep, 0, can_sleep); + if (can_sleep) + wait_clear_urbs(ep); ep->active_mask = 0; ep->unlink_mask = 0; @@ -854,7 +853,6 @@ int snd_usb_endpoint_start(struct snd_usb_endpoint *ep) goto __error; if (usb_pipeout(ep->pipe)) { - prepare_outbound_urb_sizes(ep, urb->context); prepare_outbound_urb(ep, urb->context); } else { prepare_inbound_urb(ep, urb->context); diff --git a/trunk/sound/usb/endpoint.h b/trunk/sound/usb/endpoint.h index ee2723fb174f..cbbbdf226d66 100644 --- a/trunk/sound/usb/endpoint.h +++ b/trunk/sound/usb/endpoint.h @@ -13,7 +13,7 @@ int snd_usb_endpoint_set_params(struct snd_usb_endpoint *ep, struct audioformat *fmt, struct snd_usb_endpoint *sync_ep); -int snd_usb_endpoint_start(struct snd_usb_endpoint *ep); +int snd_usb_endpoint_start(struct snd_usb_endpoint *ep, int can_sleep); void snd_usb_endpoint_stop(struct snd_usb_endpoint *ep, int force, int can_sleep, int wait); int snd_usb_endpoint_activate(struct snd_usb_endpoint *ep); @@ -21,6 +21,7 @@ int snd_usb_endpoint_deactivate(struct snd_usb_endpoint *ep); void snd_usb_endpoint_free(struct list_head *head); int snd_usb_endpoint_implict_feedback_sink(struct snd_usb_endpoint *ep); +int snd_usb_endpoint_next_packet_size(struct snd_usb_endpoint *ep); void snd_usb_handle_sync_urb(struct snd_usb_endpoint *ep, struct snd_usb_endpoint *sender, diff --git a/trunk/sound/usb/pcm.c b/trunk/sound/usb/pcm.c index a1298f379428..f782ce19bf5a 100644 --- a/trunk/sound/usb/pcm.c +++ b/trunk/sound/usb/pcm.c @@ -212,7 +212,7 @@ int snd_usb_init_pitch(struct snd_usb_audio *chip, int iface, } } -static int start_endpoints(struct snd_usb_substream *subs) +static int start_endpoints(struct snd_usb_substream *subs, int can_sleep) { int err; @@ -225,7 +225,7 @@ static int start_endpoints(struct snd_usb_substream *subs) snd_printdd(KERN_DEBUG "Starting data EP @%p\n", ep); ep->data_subs = subs; - err = snd_usb_endpoint_start(ep); + err = snd_usb_endpoint_start(ep, can_sleep); if (err < 0) { clear_bit(SUBSTREAM_FLAG_DATA_EP_STARTED, &subs->flags); return err; @@ -236,10 +236,25 @@ static int start_endpoints(struct snd_usb_substream *subs) !test_and_set_bit(SUBSTREAM_FLAG_SYNC_EP_STARTED, &subs->flags)) { struct snd_usb_endpoint *ep = subs->sync_endpoint; + if (subs->data_endpoint->iface != subs->sync_endpoint->iface || + subs->data_endpoint->alt_idx != subs->sync_endpoint->alt_idx) { + err = usb_set_interface(subs->dev, + subs->sync_endpoint->iface, + subs->sync_endpoint->alt_idx); + if (err < 0) { + snd_printk(KERN_ERR + "%d:%d:%d: cannot set interface (%d)\n", + subs->dev->devnum, + subs->sync_endpoint->iface, + subs->sync_endpoint->alt_idx, err); + return -EIO; + } + } + snd_printdd(KERN_DEBUG "Starting sync EP @%p\n", ep); ep->sync_slave = subs->data_endpoint; - err = snd_usb_endpoint_start(ep); + err = snd_usb_endpoint_start(ep, can_sleep); if (err < 0) { clear_bit(SUBSTREAM_FLAG_SYNC_EP_STARTED, &subs->flags); return err; @@ -547,7 +562,7 @@ static int snd_usb_pcm_prepare(struct snd_pcm_substream *substream) /* for playback, submit the URBs now; otherwise, the first hwptr_done * updates for all URBs would happen at the same time when starting */ if (subs->direction == SNDRV_PCM_STREAM_PLAYBACK) - return start_endpoints(subs); + return start_endpoints(subs, 1); return 0; } @@ -1029,6 +1044,7 @@ static void prepare_playback_urb(struct snd_usb_substream *subs, struct urb *urb) { struct snd_pcm_runtime *runtime = subs->pcm_substream->runtime; + struct snd_usb_endpoint *ep = subs->data_endpoint; struct snd_urb_ctx *ctx = urb->context; unsigned int counts, frames, bytes; int i, stride, period_elapsed = 0; @@ -1040,7 +1056,11 @@ static void prepare_playback_urb(struct snd_usb_substream *subs, urb->number_of_packets = 0; spin_lock_irqsave(&subs->lock, flags); for (i = 0; i < ctx->packets; i++) { - counts = ctx->packet_size[i]; + if (ctx->packet_size[i]) + counts = ctx->packet_size[i]; + else + counts = snd_usb_endpoint_next_packet_size(ep); + /* set up descriptor */ urb->iso_frame_desc[i].offset = frames * stride; urb->iso_frame_desc[i].length = counts * stride; @@ -1091,7 +1111,16 @@ static void prepare_playback_urb(struct snd_usb_substream *subs, subs->hwptr_done += bytes; if (subs->hwptr_done >= runtime->buffer_size * stride) subs->hwptr_done -= runtime->buffer_size * stride; + + /* update delay with exact number of samples queued */ + runtime->delay = subs->last_delay; runtime->delay += frames; + subs->last_delay = runtime->delay; + + /* realign last_frame_number */ + subs->last_frame_number = usb_get_current_frame_number(subs->dev); + subs->last_frame_number &= 0xFF; /* keep 8 LSBs */ + spin_unlock_irqrestore(&subs->lock, flags); urb->transfer_buffer_length = bytes; if (period_elapsed) @@ -1109,12 +1138,32 @@ static void retire_playback_urb(struct snd_usb_substream *subs, struct snd_pcm_runtime *runtime = subs->pcm_substream->runtime; int stride = runtime->frame_bits >> 3; int processed = urb->transfer_buffer_length / stride; + int est_delay; + + /* ignore the delay accounting when procssed=0 is given, i.e. + * silent payloads are procssed before handling the actual data + */ + if (!processed) + return; spin_lock_irqsave(&subs->lock, flags); - if (processed > runtime->delay) - runtime->delay = 0; + est_delay = snd_usb_pcm_delay(subs, runtime->rate); + /* update delay with exact number of samples played */ + if (processed > subs->last_delay) + subs->last_delay = 0; else - runtime->delay -= processed; + subs->last_delay -= processed; + runtime->delay = subs->last_delay; + + /* + * Report when delay estimate is off by more than 2ms. + * The error should be lower than 2ms since the estimate relies + * on two reads of a counter updated every ms. + */ + if (abs(est_delay - subs->last_delay) * 1000 > runtime->rate * 2) + snd_printk(KERN_DEBUG "delay: estimated %d, actual %d\n", + est_delay, subs->last_delay); + spin_unlock_irqrestore(&subs->lock, flags); } @@ -1172,7 +1221,7 @@ static int snd_usb_substream_capture_trigger(struct snd_pcm_substream *substream switch (cmd) { case SNDRV_PCM_TRIGGER_START: - err = start_endpoints(subs); + err = start_endpoints(subs, 0); if (err < 0) return err; diff --git a/trunk/tools/lib/traceevent/Makefile b/trunk/tools/lib/traceevent/Makefile index 14131cb0522d..04d959fa0226 100644 --- a/trunk/tools/lib/traceevent/Makefile +++ b/trunk/tools/lib/traceevent/Makefile @@ -129,7 +129,7 @@ CFLAGS ?= -g -Wall # Append required CFLAGS override CFLAGS += $(CONFIG_FLAGS) $(INCLUDES) $(PLUGIN_DIR_SQ) -override CFLAGS += $(udis86-flags) +override CFLAGS += $(udis86-flags) -D_GNU_SOURCE ifeq ($(VERBOSE),1) Q = diff --git a/trunk/tools/lib/traceevent/event-parse.c b/trunk/tools/lib/traceevent/event-parse.c index 5f34aa371b56..47264b4652b9 100644 --- a/trunk/tools/lib/traceevent/event-parse.c +++ b/trunk/tools/lib/traceevent/event-parse.c @@ -24,13 +24,14 @@ * Frederic Weisbecker gave his permission to relicense the code to * the Lesser General Public License. */ -#define _GNU_SOURCE #include #include #include #include #include #include +#include +#include #include "event-parse.h" #include "event-utils.h" @@ -117,14 +118,7 @@ void breakpoint(void) struct print_arg *alloc_arg(void) { - struct print_arg *arg; - - arg = malloc_or_die(sizeof(*arg)); - if (!arg) - return NULL; - memset(arg, 0, sizeof(*arg)); - - return arg; + return calloc(1, sizeof(struct print_arg)); } struct cmdline { @@ -158,7 +152,9 @@ static int cmdline_init(struct pevent *pevent) struct cmdline *cmdlines; int i; - cmdlines = malloc_or_die(sizeof(*cmdlines) * pevent->cmdline_count); + cmdlines = malloc(sizeof(*cmdlines) * pevent->cmdline_count); + if (!cmdlines) + return -1; i = 0; while (cmdlist) { @@ -186,8 +182,8 @@ static char *find_cmdline(struct pevent *pevent, int pid) if (!pid) return ""; - if (!pevent->cmdlines) - cmdline_init(pevent); + if (!pevent->cmdlines && cmdline_init(pevent)) + return ""; key.pid = pid; @@ -215,8 +211,8 @@ int pevent_pid_is_registered(struct pevent *pevent, int pid) if (!pid) return 1; - if (!pevent->cmdlines) - cmdline_init(pevent); + if (!pevent->cmdlines && cmdline_init(pevent)) + return 0; key.pid = pid; @@ -258,10 +254,14 @@ static int add_new_comm(struct pevent *pevent, const char *comm, int pid) return -1; } - cmdlines[pevent->cmdline_count].pid = pid; cmdlines[pevent->cmdline_count].comm = strdup(comm); - if (!cmdlines[pevent->cmdline_count].comm) - die("malloc comm"); + if (!cmdlines[pevent->cmdline_count].comm) { + free(cmdlines); + errno = ENOMEM; + return -1; + } + + cmdlines[pevent->cmdline_count].pid = pid; if (cmdlines[pevent->cmdline_count].comm) pevent->cmdline_count++; @@ -288,10 +288,15 @@ int pevent_register_comm(struct pevent *pevent, const char *comm, int pid) if (pevent->cmdlines) return add_new_comm(pevent, comm, pid); - item = malloc_or_die(sizeof(*item)); + item = malloc(sizeof(*item)); + if (!item) + return -1; + item->comm = strdup(comm); - if (!item->comm) - die("malloc comm"); + if (!item->comm) { + free(item); + return -1; + } item->pid = pid; item->next = pevent->cmdlist; @@ -355,7 +360,10 @@ static int func_map_init(struct pevent *pevent) struct func_map *func_map; int i; - func_map = malloc_or_die(sizeof(*func_map) * (pevent->func_count + 1)); + func_map = malloc(sizeof(*func_map) * (pevent->func_count + 1)); + if (!func_map) + return -1; + funclist = pevent->funclist; i = 0; @@ -455,25 +463,36 @@ pevent_find_function_address(struct pevent *pevent, unsigned long long addr) int pevent_register_function(struct pevent *pevent, char *func, unsigned long long addr, char *mod) { - struct func_list *item; + struct func_list *item = malloc(sizeof(*item)); - item = malloc_or_die(sizeof(*item)); + if (!item) + return -1; item->next = pevent->funclist; item->func = strdup(func); - if (mod) + if (!item->func) + goto out_free; + + if (mod) { item->mod = strdup(mod); - else + if (!item->mod) + goto out_free_func; + } else item->mod = NULL; item->addr = addr; - if (!item->func || (mod && !item->mod)) - die("malloc func"); - pevent->funclist = item; pevent->func_count++; return 0; + +out_free_func: + free(item->func); + item->func = NULL; +out_free: + free(item); + errno = ENOMEM; + return -1; } /** @@ -524,14 +543,16 @@ static int printk_cmp(const void *a, const void *b) return 0; } -static void printk_map_init(struct pevent *pevent) +static int printk_map_init(struct pevent *pevent) { struct printk_list *printklist; struct printk_list *item; struct printk_map *printk_map; int i; - printk_map = malloc_or_die(sizeof(*printk_map) * (pevent->printk_count + 1)); + printk_map = malloc(sizeof(*printk_map) * (pevent->printk_count + 1)); + if (!printk_map) + return -1; printklist = pevent->printklist; @@ -549,6 +570,8 @@ static void printk_map_init(struct pevent *pevent) pevent->printk_map = printk_map; pevent->printklist = NULL; + + return 0; } static struct printk_map * @@ -557,8 +580,8 @@ find_printk(struct pevent *pevent, unsigned long long addr) struct printk_map *printk; struct printk_map key; - if (!pevent->printk_map) - printk_map_init(pevent); + if (!pevent->printk_map && printk_map_init(pevent)) + return NULL; key.addr = addr; @@ -580,21 +603,27 @@ find_printk(struct pevent *pevent, unsigned long long addr) int pevent_register_print_string(struct pevent *pevent, char *fmt, unsigned long long addr) { - struct printk_list *item; + struct printk_list *item = malloc(sizeof(*item)); - item = malloc_or_die(sizeof(*item)); + if (!item) + return -1; item->next = pevent->printklist; - item->printk = strdup(fmt); item->addr = addr; + item->printk = strdup(fmt); if (!item->printk) - die("malloc fmt"); + goto out_free; pevent->printklist = item; pevent->printk_count++; return 0; + +out_free: + free(item); + errno = ENOMEM; + return -1; } /** @@ -619,24 +648,18 @@ void pevent_print_printk(struct pevent *pevent) static struct event_format *alloc_event(void) { - struct event_format *event; - - event = malloc(sizeof(*event)); - if (!event) - return NULL; - memset(event, 0, sizeof(*event)); - - return event; + return calloc(1, sizeof(struct event_format)); } -static void add_event(struct pevent *pevent, struct event_format *event) +static int add_event(struct pevent *pevent, struct event_format *event) { int i; + struct event_format **events = realloc(pevent->events, sizeof(event) * + (pevent->nr_events + 1)); + if (!events) + return -1; - pevent->events = realloc(pevent->events, sizeof(event) * - (pevent->nr_events + 1)); - if (!pevent->events) - die("Can not allocate events"); + pevent->events = events; for (i = 0; i < pevent->nr_events; i++) { if (pevent->events[i]->id > event->id) @@ -651,6 +674,8 @@ static void add_event(struct pevent *pevent, struct event_format *event) pevent->nr_events++; event->pevent = pevent; + + return 0; } static int event_item_type(enum event_type type) @@ -827,9 +852,9 @@ static enum event_type __read_token(char **tok) switch (type) { case EVENT_NEWLINE: case EVENT_DELIM: - *tok = malloc_or_die(2); - (*tok)[0] = ch; - (*tok)[1] = 0; + if (asprintf(tok, "%c", ch) < 0) + return EVENT_ERROR; + return type; case EVENT_OP: @@ -1240,8 +1265,10 @@ static int event_read_fields(struct event_format *event, struct format_field **f last_token = token; - field = malloc_or_die(sizeof(*field)); - memset(field, 0, sizeof(*field)); + field = calloc(1, sizeof(*field)); + if (!field) + goto fail; + field->event = event; /* read the rest of the type */ @@ -1282,7 +1309,7 @@ static int event_read_fields(struct event_format *event, struct format_field **f } if (!field->type) { - die("no type found"); + do_warning("%s: no type found", __func__); goto fail; } field->name = last_token; @@ -1329,7 +1356,7 @@ static int event_read_fields(struct event_format *event, struct format_field **f free_token(token); type = read_token(&token); if (type == EVENT_NONE) { - die("failed to find token"); + do_warning("failed to find token"); goto fail; } } @@ -1538,6 +1565,14 @@ process_cond(struct event_format *event, struct print_arg *top, char **tok) left = alloc_arg(); right = alloc_arg(); + if (!arg || !left || !right) { + do_warning("%s: not enough memory!", __func__); + /* arg will be freed at out_free */ + free_arg(left); + free_arg(right); + goto out_free; + } + arg->type = PRINT_OP; arg->op.left = left; arg->op.right = right; @@ -1580,6 +1615,12 @@ process_array(struct event_format *event, struct print_arg *top, char **tok) char *token = NULL; arg = alloc_arg(); + if (!arg) { + do_warning("%s: not enough memory!", __func__); + /* '*tok' is set to top->op.op. No need to free. */ + *tok = NULL; + return EVENT_ERROR; + } *tok = NULL; type = process_arg(event, arg, &token); @@ -1595,8 +1636,7 @@ process_array(struct event_format *event, struct print_arg *top, char **tok) return type; out_free: - free_token(*tok); - *tok = NULL; + free_token(token); free_arg(arg); return EVENT_ERROR; } @@ -1682,7 +1722,7 @@ process_op(struct event_format *event, struct print_arg *arg, char **tok) if (arg->type == PRINT_OP && !arg->op.left) { /* handle single op */ if (token[1]) { - die("bad op token %s", token); + do_warning("bad op token %s", token); goto out_free; } switch (token[0]) { @@ -1699,10 +1739,16 @@ process_op(struct event_format *event, struct print_arg *arg, char **tok) /* make an empty left */ left = alloc_arg(); + if (!left) + goto out_warn_free; + left->type = PRINT_NULL; arg->op.left = left; right = alloc_arg(); + if (!right) + goto out_warn_free; + arg->op.right = right; /* do not free the token, it belongs to an op */ @@ -1712,6 +1758,9 @@ process_op(struct event_format *event, struct print_arg *arg, char **tok) } else if (strcmp(token, "?") == 0) { left = alloc_arg(); + if (!left) + goto out_warn_free; + /* copy the top arg to the left */ *left = *arg; @@ -1720,6 +1769,7 @@ process_op(struct event_format *event, struct print_arg *arg, char **tok) arg->op.left = left; arg->op.prio = 0; + /* it will set arg->op.right */ type = process_cond(event, arg, tok); } else if (strcmp(token, ">>") == 0 || @@ -1739,6 +1789,8 @@ process_op(struct event_format *event, struct print_arg *arg, char **tok) strcmp(token, "!=") == 0) { left = alloc_arg(); + if (!left) + goto out_warn_free; /* copy the top arg to the left */ *left = *arg; @@ -1746,6 +1798,7 @@ process_op(struct event_format *event, struct print_arg *arg, char **tok) arg->type = PRINT_OP; arg->op.op = token; arg->op.left = left; + arg->op.right = NULL; if (set_op_prio(arg) == -1) { event->flags |= EVENT_FL_FAILED; @@ -1762,12 +1815,14 @@ process_op(struct event_format *event, struct print_arg *arg, char **tok) type == EVENT_DELIM && (strcmp(token, ")") == 0)) { char *new_atom; - if (left->type != PRINT_ATOM) - die("bad pointer type"); + if (left->type != PRINT_ATOM) { + do_warning("bad pointer type"); + goto out_free; + } new_atom = realloc(left->atom.atom, strlen(left->atom.atom) + 3); if (!new_atom) - goto out_free; + goto out_warn_free; left->atom.atom = new_atom; strcat(left->atom.atom, " *"); @@ -1779,12 +1834,18 @@ process_op(struct event_format *event, struct print_arg *arg, char **tok) } right = alloc_arg(); + if (!right) + goto out_warn_free; + type = process_arg_token(event, right, tok, type); arg->op.right = right; } else if (strcmp(token, "[") == 0) { left = alloc_arg(); + if (!left) + goto out_warn_free; + *left = *arg; arg->type = PRINT_OP; @@ -1793,6 +1854,7 @@ process_op(struct event_format *event, struct print_arg *arg, char **tok) arg->op.prio = 0; + /* it will set arg->op.right */ type = process_array(event, arg, tok); } else { @@ -1816,14 +1878,16 @@ process_op(struct event_format *event, struct print_arg *arg, char **tok) return type; - out_free: +out_warn_free: + do_warning("%s: not enough memory!", __func__); +out_free: free_token(token); *tok = NULL; return EVENT_ERROR; } static enum event_type -process_entry(struct event_format *event __unused, struct print_arg *arg, +process_entry(struct event_format *event __maybe_unused, struct print_arg *arg, char **tok) { enum event_type type; @@ -1880,7 +1944,11 @@ eval_type_str(unsigned long long val, const char *type, int pointer) return val; } - ref = malloc_or_die(len); + ref = malloc(len); + if (!ref) { + do_warning("%s: not enough memory!", __func__); + return val; + } memcpy(ref, type, len); /* chop off the " *" */ @@ -1957,8 +2025,10 @@ eval_type_str(unsigned long long val, const char *type, int pointer) static unsigned long long eval_type(unsigned long long val, struct print_arg *arg, int pointer) { - if (arg->type != PRINT_TYPE) - die("expected type argument"); + if (arg->type != PRINT_TYPE) { + do_warning("expected type argument"); + return 0; + } return eval_type_str(val, arg->typecast.type, pointer); } @@ -2143,7 +2213,7 @@ static char *arg_eval (struct print_arg *arg) case PRINT_STRING: case PRINT_BSTRING: default: - die("invalid eval type %d", arg->type); + do_warning("invalid eval type %d", arg->type); break; } @@ -2166,6 +2236,8 @@ process_fields(struct event_format *event, struct print_flag_sym **list, char ** break; arg = alloc_arg(); + if (!arg) + goto out_free; free_token(token); type = process_arg(event, arg, &token); @@ -2179,30 +2251,33 @@ process_fields(struct event_format *event, struct print_flag_sym **list, char ** if (test_type_token(type, token, EVENT_DELIM, ",")) goto out_free; - field = malloc_or_die(sizeof(*field)); - memset(field, 0, sizeof(*field)); + field = calloc(1, sizeof(*field)); + if (!field) + goto out_free; value = arg_eval(arg); if (value == NULL) - goto out_free; + goto out_free_field; field->value = strdup(value); if (field->value == NULL) - goto out_free; + goto out_free_field; free_arg(arg); arg = alloc_arg(); + if (!arg) + goto out_free; free_token(token); type = process_arg(event, arg, &token); if (test_type_token(type, token, EVENT_OP, "}")) - goto out_free; + goto out_free_field; value = arg_eval(arg); if (value == NULL) - goto out_free; + goto out_free_field; field->str = strdup(value); if (field->str == NULL) - goto out_free; + goto out_free_field; free_arg(arg); arg = NULL; @@ -2216,6 +2291,8 @@ process_fields(struct event_format *event, struct print_flag_sym **list, char ** *tok = token; return type; +out_free_field: + free_flag_sym(field); out_free: free_arg(arg); free_token(token); @@ -2235,6 +2312,10 @@ process_flags(struct event_format *event, struct print_arg *arg, char **tok) arg->type = PRINT_FLAGS; field = alloc_arg(); + if (!field) { + do_warning("%s: not enough memory!", __func__); + goto out_free; + } type = process_arg(event, field, &token); @@ -2243,7 +2324,7 @@ process_flags(struct event_format *event, struct print_arg *arg, char **tok) type = process_op(event, field, &token); if (test_type_token(type, token, EVENT_DELIM, ",")) - goto out_free; + goto out_free_field; free_token(token); arg->flags.field = field; @@ -2265,7 +2346,9 @@ process_flags(struct event_format *event, struct print_arg *arg, char **tok) type = read_token_item(tok); return type; - out_free: +out_free_field: + free_arg(field); +out_free: free_token(token); *tok = NULL; return EVENT_ERROR; @@ -2282,10 +2365,14 @@ process_symbols(struct event_format *event, struct print_arg *arg, char **tok) arg->type = PRINT_SYMBOL; field = alloc_arg(); + if (!field) { + do_warning("%s: not enough memory!", __func__); + goto out_free; + } type = process_arg(event, field, &token); if (test_type_token(type, token, EVENT_DELIM, ",")) - goto out_free; + goto out_free_field; arg->symbol.field = field; @@ -2297,7 +2384,9 @@ process_symbols(struct event_format *event, struct print_arg *arg, char **tok) type = read_token_item(tok); return type; - out_free: +out_free_field: + free_arg(field); +out_free: free_token(token); *tok = NULL; return EVENT_ERROR; @@ -2314,6 +2403,11 @@ process_hex(struct event_format *event, struct print_arg *arg, char **tok) arg->type = PRINT_HEX; field = alloc_arg(); + if (!field) { + do_warning("%s: not enough memory!", __func__); + goto out_free; + } + type = process_arg(event, field, &token); if (test_type_token(type, token, EVENT_DELIM, ",")) @@ -2324,6 +2418,12 @@ process_hex(struct event_format *event, struct print_arg *arg, char **tok) free_token(token); field = alloc_arg(); + if (!field) { + do_warning("%s: not enough memory!", __func__); + *tok = NULL; + return EVENT_ERROR; + } + type = process_arg(event, field, &token); if (test_type_token(type, token, EVENT_DELIM, ")")) @@ -2381,6 +2481,12 @@ process_dynamic_array(struct event_format *event, struct print_arg *arg, char ** free_token(token); arg = alloc_arg(); + if (!field) { + do_warning("%s: not enough memory!", __func__); + *tok = NULL; + return EVENT_ERROR; + } + type = process_arg(event, arg, &token); if (type == EVENT_ERROR) goto out_free_arg; @@ -2434,10 +2540,16 @@ process_paren(struct event_format *event, struct print_arg *arg, char **tok) /* make this a typecast and contine */ /* prevous must be an atom */ - if (arg->type != PRINT_ATOM) - die("previous needed to be PRINT_ATOM"); + if (arg->type != PRINT_ATOM) { + do_warning("previous needed to be PRINT_ATOM"); + goto out_free; + } item_arg = alloc_arg(); + if (!item_arg) { + do_warning("%s: not enough memory!", __func__); + goto out_free; + } arg->type = PRINT_TYPE; arg->typecast.type = arg->atom.atom; @@ -2457,7 +2569,8 @@ process_paren(struct event_format *event, struct print_arg *arg, char **tok) static enum event_type -process_str(struct event_format *event __unused, struct print_arg *arg, char **tok) +process_str(struct event_format *event __maybe_unused, struct print_arg *arg, + char **tok) { enum event_type type; char *token; @@ -2532,6 +2645,11 @@ process_func_handler(struct event_format *event, struct pevent_function_handler next_arg = &(arg->func.args); for (i = 0; i < func->nr_args; i++) { farg = alloc_arg(); + if (!farg) { + do_warning("%s: not enough memory!", __func__); + return EVENT_ERROR; + } + type = process_arg(event, farg, &token); if (i < (func->nr_args - 1)) test = ","; @@ -2676,7 +2794,8 @@ process_arg_token(struct event_format *event, struct print_arg *arg, case EVENT_ERROR ... EVENT_NEWLINE: default: - die("unexpected type %d", type); + do_warning("unexpected type %d", type); + return EVENT_ERROR; } *tok = token; @@ -2697,6 +2816,10 @@ static int event_read_print_args(struct event_format *event, struct print_arg ** } arg = alloc_arg(); + if (!arg) { + do_warning("%s: not enough memory!", __func__); + return -1; + } type = process_arg(event, arg, &token); @@ -2768,10 +2891,8 @@ static int event_read_print(struct event_format *event) if (type == EVENT_DQUOTE) { char *cat; - cat = malloc_or_die(strlen(event->print_fmt.format) + - strlen(token) + 1); - strcpy(cat, event->print_fmt.format); - strcat(cat, token); + if (asprintf(&cat, "%s%s", event->print_fmt.format, token) < 0) + goto fail; free_token(token); free_token(event->print_fmt.format); event->print_fmt.format = NULL; @@ -2925,8 +3046,10 @@ static int get_common_info(struct pevent *pevent, * All events should have the same common elements. * Pick any event to find where the type is; */ - if (!pevent->events) - die("no event_list!"); + if (!pevent->events) { + do_warning("no event_list!"); + return -1; + } event = pevent->events[0]; field = pevent_find_common_field(event, type); @@ -3084,7 +3207,8 @@ eval_num_arg(void *data, int size, struct event_format *event, struct print_arg if (!arg->field.field) { arg->field.field = pevent_find_any_field(event, arg->field.name); if (!arg->field.field) - die("field %s not found", arg->field.name); + goto out_warning_field; + } /* must be a number */ val = pevent_read_number(pevent, data + arg->field.field->offset, @@ -3145,8 +3269,10 @@ eval_num_arg(void *data, int size, struct event_format *event, struct print_arg if (!larg->field.field) { larg->field.field = pevent_find_any_field(event, larg->field.name); - if (!larg->field.field) - die("field %s not found", larg->field.name); + if (!larg->field.field) { + arg = larg; + goto out_warning_field; + } } field_size = larg->field.field->elementsize; offset = larg->field.field->offset + @@ -3182,7 +3308,7 @@ eval_num_arg(void *data, int size, struct event_format *event, struct print_arg val = left != right; break; default: - die("unknown op '%s'", arg->op.op); + goto out_warning_op; } break; case '~': @@ -3212,7 +3338,7 @@ eval_num_arg(void *data, int size, struct event_format *event, struct print_arg val = left <= right; break; default: - die("unknown op '%s'", arg->op.op); + goto out_warning_op; } break; case '>': @@ -3227,12 +3353,13 @@ eval_num_arg(void *data, int size, struct event_format *event, struct print_arg val = left >= right; break; default: - die("unknown op '%s'", arg->op.op); + goto out_warning_op; } break; case '=': if (arg->op.op[1] != '=') - die("unknown op '%s'", arg->op.op); + goto out_warning_op; + val = left == right; break; case '-': @@ -3248,13 +3375,21 @@ eval_num_arg(void *data, int size, struct event_format *event, struct print_arg val = left * right; break; default: - die("unknown op '%s'", arg->op.op); + goto out_warning_op; } break; default: /* not sure what to do there */ return 0; } return val; + +out_warning_op: + do_warning("%s: unknown op '%s'", __func__, arg->op.op); + return 0; + +out_warning_field: + do_warning("%s: field %s not found", __func__, arg->field.name); + return 0; } struct flag { @@ -3331,8 +3466,10 @@ static void print_str_arg(struct trace_seq *s, void *data, int size, field = arg->field.field; if (!field) { field = pevent_find_any_field(event, arg->field.name); - if (!field) - die("field %s not found", arg->field.name); + if (!field) { + str = arg->field.name; + goto out_warning_field; + } arg->field.field = field; } /* Zero sized fields, mean the rest of the data */ @@ -3349,7 +3486,11 @@ static void print_str_arg(struct trace_seq *s, void *data, int size, trace_seq_printf(s, "%lx", addr); break; } - str = malloc_or_die(len + 1); + str = malloc(len + 1); + if (!str) { + do_warning("%s: not enough memory!", __func__); + return; + } memcpy(str, data + field->offset, len); str[len] = 0; print_str_to_seq(s, format, len_arg, str); @@ -3389,7 +3530,7 @@ static void print_str_arg(struct trace_seq *s, void *data, int size, str = arg->hex.field->field.name; field = pevent_find_any_field(event, str); if (!field) - die("field %s not found", str); + goto out_warning_field; arg->hex.field->field.field = field; } hex = data + field->offset; @@ -3441,6 +3582,11 @@ static void print_str_arg(struct trace_seq *s, void *data, int size, /* well... */ break; } + + return; + +out_warning_field: + do_warning("%s: field %s not found", __func__, arg->field.name); } static unsigned long long @@ -3467,7 +3613,11 @@ process_defined_func(struct trace_seq *s, void *data, int size, farg = arg->func.args; param = func_handle->params; - args = malloc_or_die(sizeof(*args) * func_handle->nr_args); + ret = ULLONG_MAX; + args = malloc(sizeof(*args) * func_handle->nr_args); + if (!args) + goto out; + for (i = 0; i < func_handle->nr_args; i++) { switch (param->type) { case PEVENT_FUNC_ARG_INT: @@ -3479,13 +3629,19 @@ process_defined_func(struct trace_seq *s, void *data, int size, trace_seq_init(&str); print_str_arg(&str, data, size, event, "%s", -1, farg); trace_seq_terminate(&str); - string = malloc_or_die(sizeof(*string)); + string = malloc(sizeof(*string)); + if (!string) { + do_warning("%s(%d): malloc str", __func__, __LINE__); + goto out_free; + } string->next = strings; string->str = strdup(str.buffer); - if (!string->str) - die("malloc str"); - - args[i] = (unsigned long long)string->str; + if (!string->str) { + free(string); + do_warning("%s(%d): malloc str", __func__, __LINE__); + goto out_free; + } + args[i] = (uintptr_t)string->str; strings = string; trace_seq_destroy(&str); break; @@ -3494,14 +3650,15 @@ process_defined_func(struct trace_seq *s, void *data, int size, * Something went totally wrong, this is not * an input error, something in this code broke. */ - die("Unexpected end of arguments\n"); - break; + do_warning("Unexpected end of arguments\n"); + goto out_free; } farg = farg->next; param = param->next; } ret = (*func_handle->func)(s, args); +out_free: free(args); while (strings) { string = strings; @@ -3515,6 +3672,18 @@ process_defined_func(struct trace_seq *s, void *data, int size, return ret; } +static void free_args(struct print_arg *args) +{ + struct print_arg *next; + + while (args) { + next = args->next; + + free_arg(args); + args = next; + } +} + static struct print_arg *make_bprint_args(char *fmt, void *data, int size, struct event_format *event) { struct pevent *pevent = event->pevent; @@ -3530,11 +3699,15 @@ static struct print_arg *make_bprint_args(char *fmt, void *data, int size, struc if (!field) { field = pevent_find_field(event, "buf"); - if (!field) - die("can't find buffer field for binary printk"); + if (!field) { + do_warning("can't find buffer field for binary printk"); + return NULL; + } ip_field = pevent_find_field(event, "ip"); - if (!ip_field) - die("can't find ip field for binary printk"); + if (!ip_field) { + do_warning("can't find ip field for binary printk"); + return NULL; + } pevent->bprint_buf_field = field; pevent->bprint_ip_field = ip_field; } @@ -3545,13 +3718,18 @@ static struct print_arg *make_bprint_args(char *fmt, void *data, int size, struc * The first arg is the IP pointer. */ args = alloc_arg(); + if (!args) { + do_warning("%s(%d): not enough memory!", __func__, __LINE__); + return NULL; + } arg = args; arg->next = NULL; next = &arg->next; arg->type = PRINT_ATOM; - arg->atom.atom = malloc_or_die(32); - sprintf(arg->atom.atom, "%lld", ip); + + if (asprintf(&arg->atom.atom, "%lld", ip) < 0) + goto out_free; /* skip the first "%pf : " */ for (ptr = fmt + 6, bptr = data + field->offset; @@ -3606,10 +3784,17 @@ static struct print_arg *make_bprint_args(char *fmt, void *data, int size, struc val = pevent_read_number(pevent, bptr, vsize); bptr += vsize; arg = alloc_arg(); + if (!arg) { + do_warning("%s(%d): not enough memory!", + __func__, __LINE__); + goto out_free; + } arg->next = NULL; arg->type = PRINT_ATOM; - arg->atom.atom = malloc_or_die(32); - sprintf(arg->atom.atom, "%lld", val); + if (asprintf(&arg->atom.atom, "%lld", val) < 0) { + free(arg); + goto out_free; + } *next = arg; next = &arg->next; /* @@ -3622,11 +3807,16 @@ static struct print_arg *make_bprint_args(char *fmt, void *data, int size, struc break; case 's': arg = alloc_arg(); + if (!arg) { + do_warning("%s(%d): not enough memory!", + __func__, __LINE__); + goto out_free; + } arg->next = NULL; arg->type = PRINT_BSTRING; arg->string.string = strdup(bptr); if (!arg->string.string) - break; + goto out_free; bptr += strlen(bptr) + 1; *next = arg; next = &arg->next; @@ -3637,22 +3827,15 @@ static struct print_arg *make_bprint_args(char *fmt, void *data, int size, struc } return args; -} -static void free_args(struct print_arg *args) -{ - struct print_arg *next; - - while (args) { - next = args->next; - - free_arg(args); - args = next; - } +out_free: + free_args(args); + return NULL; } static char * -get_bprint_format(void *data, int size __unused, struct event_format *event) +get_bprint_format(void *data, int size __maybe_unused, + struct event_format *event) { struct pevent *pevent = event->pevent; unsigned long long addr; @@ -3665,8 +3848,10 @@ get_bprint_format(void *data, int size __unused, struct event_format *event) if (!field) { field = pevent_find_field(event, "fmt"); - if (!field) - die("can't find format field for binary printk"); + if (!field) { + do_warning("can't find format field for binary printk"); + return NULL; + } pevent->bprint_fmt_field = field; } @@ -3674,9 +3859,8 @@ get_bprint_format(void *data, int size __unused, struct event_format *event) printk = find_printk(pevent, addr); if (!printk) { - format = malloc_or_die(45); - sprintf(format, "%%pf : (NO FORMAT FOUND at %llx)\n", - addr); + if (asprintf(&format, "%%pf : (NO FORMAT FOUND at %llx)\n", addr) < 0) + return NULL; return format; } @@ -3684,8 +3868,8 @@ get_bprint_format(void *data, int size __unused, struct event_format *event) /* Remove any quotes. */ if (*p == '"') p++; - format = malloc_or_die(strlen(p) + 10); - sprintf(format, "%s : %s", "%pf", p); + if (asprintf(&format, "%s : %s", "%pf", p) < 0) + return NULL; /* remove ending quotes and new line since we will add one too */ p = format + strlen(format) - 1; if (*p == '"') @@ -3720,8 +3904,11 @@ static void print_mac_arg(struct trace_seq *s, int mac, void *data, int size, if (!arg->field.field) { arg->field.field = pevent_find_any_field(event, arg->field.name); - if (!arg->field.field) - die("field %s not found", arg->field.name); + if (!arg->field.field) { + do_warning("%s: field %s not found", + __func__, arg->field.name); + return; + } } if (arg->field.field->size != 6) { trace_seq_printf(s, "INVALIDMAC"); @@ -3888,8 +4075,11 @@ static void pretty_print(struct trace_seq *s, void *data, int size, struct event goto cont_process; case '*': /* The argument is the length. */ - if (!arg) - die("no argument match"); + if (!arg) { + do_warning("no argument match"); + event->flags |= EVENT_FL_FAILED; + goto out_failed; + } len_arg = eval_num_arg(data, size, event, arg); len_as_arg = 1; arg = arg->next; @@ -3922,15 +4112,21 @@ static void pretty_print(struct trace_seq *s, void *data, int size, struct event case 'x': case 'X': case 'u': - if (!arg) - die("no argument match"); + if (!arg) { + do_warning("no argument match"); + event->flags |= EVENT_FL_FAILED; + goto out_failed; + } len = ((unsigned long)ptr + 1) - (unsigned long)saveptr; /* should never happen */ - if (len > 31) - die("bad format!"); + if (len > 31) { + do_warning("bad format!"); + event->flags |= EVENT_FL_FAILED; + len = 31; + } memcpy(format, saveptr, len); format[len] = 0; @@ -3994,19 +4190,26 @@ static void pretty_print(struct trace_seq *s, void *data, int size, struct event trace_seq_printf(s, format, (long long)val); break; default: - die("bad count (%d)", ls); + do_warning("bad count (%d)", ls); + event->flags |= EVENT_FL_FAILED; } break; case 's': - if (!arg) - die("no matching argument"); + if (!arg) { + do_warning("no matching argument"); + event->flags |= EVENT_FL_FAILED; + goto out_failed; + } len = ((unsigned long)ptr + 1) - (unsigned long)saveptr; /* should never happen */ - if (len > 31) - die("bad format!"); + if (len > 31) { + do_warning("bad format!"); + event->flags |= EVENT_FL_FAILED; + len = 31; + } memcpy(format, saveptr, len); format[len] = 0; @@ -4024,6 +4227,11 @@ static void pretty_print(struct trace_seq *s, void *data, int size, struct event trace_seq_putc(s, *ptr); } + if (event->flags & EVENT_FL_FAILED) { +out_failed: + trace_seq_printf(s, "[FAILED TO PARSE]"); + } + if (args) { free_args(args); free(bprint_fmt); @@ -4356,7 +4564,10 @@ get_event_fields(const char *type, const char *name, struct format_field *field; int i = 0; - fields = malloc_or_die(sizeof(*fields) * (count + 1)); + fields = malloc(sizeof(*fields) * (count + 1)); + if (!fields) + return NULL; + for (field = list; field; field = field->next) { fields[i++] = field; if (i == count + 1) { @@ -4672,8 +4883,7 @@ static int find_event_handle(struct pevent *pevent, struct event_format *event) } /** - * pevent_parse_event - parse the event format - * @pevent: the handle to the pevent + * __pevent_parse_format - parse the event format * @buf: the buffer storing the event format string * @size: the size of @buf * @sys: the system the event belongs to @@ -4685,28 +4895,27 @@ static int find_event_handle(struct pevent *pevent, struct event_format *event) * * /sys/kernel/debug/tracing/events/.../.../format */ -int pevent_parse_event(struct pevent *pevent, - const char *buf, unsigned long size, - const char *sys) +enum pevent_errno __pevent_parse_format(struct event_format **eventp, + struct pevent *pevent, const char *buf, + unsigned long size, const char *sys) { struct event_format *event; int ret; init_input_buf(buf, size); - event = alloc_event(); + *eventp = event = alloc_event(); if (!event) - return -ENOMEM; + return PEVENT_ERRNO__MEM_ALLOC_FAILED; event->name = event_read_name(); if (!event->name) { /* Bad event? */ - free(event); - return -1; + ret = PEVENT_ERRNO__MEM_ALLOC_FAILED; + goto event_alloc_failed; } if (strcmp(sys, "ftrace") == 0) { - event->flags |= EVENT_FL_ISFTRACE; if (strcmp(event->name, "bprint") == 0) @@ -4714,74 +4923,189 @@ int pevent_parse_event(struct pevent *pevent, } event->id = event_read_id(); - if (event->id < 0) - die("failed to read event id"); + if (event->id < 0) { + ret = PEVENT_ERRNO__READ_ID_FAILED; + /* + * This isn't an allocation error actually. + * But as the ID is critical, just bail out. + */ + goto event_alloc_failed; + } event->system = strdup(sys); - if (!event->system) - die("failed to allocate system"); - - /* Add pevent to event so that it can be referenced */ - event->pevent = pevent; + if (!event->system) { + ret = PEVENT_ERRNO__MEM_ALLOC_FAILED; + goto event_alloc_failed; + } ret = event_read_format(event); if (ret < 0) { - do_warning("failed to read event format for %s", event->name); - goto event_failed; + ret = PEVENT_ERRNO__READ_FORMAT_FAILED; + goto event_parse_failed; } /* * If the event has an override, don't print warnings if the event * print format fails to parse. */ - if (find_event_handle(pevent, event)) + if (pevent && find_event_handle(pevent, event)) show_warning = 0; ret = event_read_print(event); - if (ret < 0) { - do_warning("failed to read event print fmt for %s", - event->name); - show_warning = 1; - goto event_failed; - } show_warning = 1; - add_event(pevent, event); + if (ret < 0) { + ret = PEVENT_ERRNO__READ_PRINT_FAILED; + goto event_parse_failed; + } if (!ret && (event->flags & EVENT_FL_ISFTRACE)) { struct format_field *field; struct print_arg *arg, **list; /* old ftrace had no args */ - list = &event->print_fmt.args; for (field = event->format.fields; field; field = field->next) { arg = alloc_arg(); - *list = arg; - list = &arg->next; + if (!arg) { + event->flags |= EVENT_FL_FAILED; + return PEVENT_ERRNO__OLD_FTRACE_ARG_FAILED; + } arg->type = PRINT_FIELD; arg->field.name = strdup(field->name); if (!arg->field.name) { - do_warning("failed to allocate field name"); event->flags |= EVENT_FL_FAILED; - return -1; + free_arg(arg); + return PEVENT_ERRNO__OLD_FTRACE_ARG_FAILED; } arg->field.field = field; + *list = arg; + list = &arg->next; } return 0; } + return 0; + + event_parse_failed: + event->flags |= EVENT_FL_FAILED; + return ret; + + event_alloc_failed: + free(event->system); + free(event->name); + free(event); + *eventp = NULL; + return ret; +} + +/** + * pevent_parse_format - parse the event format + * @buf: the buffer storing the event format string + * @size: the size of @buf + * @sys: the system the event belongs to + * + * This parses the event format and creates an event structure + * to quickly parse raw data for a given event. + * + * These files currently come from: + * + * /sys/kernel/debug/tracing/events/.../.../format + */ +enum pevent_errno pevent_parse_format(struct event_format **eventp, const char *buf, + unsigned long size, const char *sys) +{ + return __pevent_parse_format(eventp, NULL, buf, size, sys); +} + +/** + * pevent_parse_event - parse the event format + * @pevent: the handle to the pevent + * @buf: the buffer storing the event format string + * @size: the size of @buf + * @sys: the system the event belongs to + * + * This parses the event format and creates an event structure + * to quickly parse raw data for a given event. + * + * These files currently come from: + * + * /sys/kernel/debug/tracing/events/.../.../format + */ +enum pevent_errno pevent_parse_event(struct pevent *pevent, const char *buf, + unsigned long size, const char *sys) +{ + struct event_format *event = NULL; + int ret = __pevent_parse_format(&event, pevent, buf, size, sys); + + if (event == NULL) + return ret; + + /* Add pevent to event so that it can be referenced */ + event->pevent = pevent; + + if (add_event(pevent, event)) { + ret = PEVENT_ERRNO__MEM_ALLOC_FAILED; + goto event_add_failed; + } + #define PRINT_ARGS 0 if (PRINT_ARGS && event->print_fmt.args) print_args(event->print_fmt.args); return 0; - event_failed: - event->flags |= EVENT_FL_FAILED; - /* still add it even if it failed */ - add_event(pevent, event); - return -1; +event_add_failed: + pevent_free_format(event); + return ret; +} + +#undef _PE +#define _PE(code, str) str +static const char * const pevent_error_str[] = { + PEVENT_ERRORS +}; +#undef _PE + +int pevent_strerror(struct pevent *pevent, enum pevent_errno errnum, + char *buf, size_t buflen) +{ + int idx; + const char *msg; + + if (errnum >= 0) { + msg = strerror_r(errnum, buf, buflen); + if (msg != buf) { + size_t len = strlen(msg); + memcpy(buf, msg, min(buflen - 1, len)); + *(buf + min(buflen - 1, len)) = '\0'; + } + return 0; + } + + if (errnum <= __PEVENT_ERRNO__START || + errnum >= __PEVENT_ERRNO__END) + return -1; + + idx = errnum - __PEVENT_ERRNO__START - 1; + msg = pevent_error_str[idx]; + + switch (errnum) { + case PEVENT_ERRNO__MEM_ALLOC_FAILED: + case PEVENT_ERRNO__PARSE_EVENT_FAILED: + case PEVENT_ERRNO__READ_ID_FAILED: + case PEVENT_ERRNO__READ_FORMAT_FAILED: + case PEVENT_ERRNO__READ_PRINT_FAILED: + case PEVENT_ERRNO__OLD_FTRACE_ARG_FAILED: + snprintf(buf, buflen, "%s", msg); + break; + + default: + /* cannot reach here */ + break; + } + + return 0; } int get_field_val(struct trace_seq *s, struct format_field *field, @@ -5000,6 +5324,7 @@ int pevent_register_print_function(struct pevent *pevent, struct pevent_func_params *param; enum pevent_func_arg_type type; va_list ap; + int ret; func_handle = find_func_handler(pevent, name); if (func_handle) { @@ -5012,14 +5337,20 @@ int pevent_register_print_function(struct pevent *pevent, remove_func_handler(pevent, name); } - func_handle = malloc_or_die(sizeof(*func_handle)); - memset(func_handle, 0, sizeof(*func_handle)); + func_handle = calloc(1, sizeof(*func_handle)); + if (!func_handle) { + do_warning("Failed to allocate function handler"); + return PEVENT_ERRNO__MEM_ALLOC_FAILED; + } func_handle->ret_type = ret_type; func_handle->name = strdup(name); func_handle->func = func; - if (!func_handle->name) - die("Failed to allocate function name"); + if (!func_handle->name) { + do_warning("Failed to allocate function name"); + free(func_handle); + return PEVENT_ERRNO__MEM_ALLOC_FAILED; + } next_param = &(func_handle->params); va_start(ap, name); @@ -5029,11 +5360,17 @@ int pevent_register_print_function(struct pevent *pevent, break; if (type < 0 || type >= PEVENT_FUNC_ARG_MAX_TYPES) { - warning("Invalid argument type %d", type); + do_warning("Invalid argument type %d", type); + ret = PEVENT_ERRNO__INVALID_ARG_TYPE; goto out_free; } - param = malloc_or_die(sizeof(*param)); + param = malloc(sizeof(*param)); + if (!param) { + do_warning("Failed to allocate function param"); + ret = PEVENT_ERRNO__MEM_ALLOC_FAILED; + goto out_free; + } param->type = type; param->next = NULL; @@ -5051,7 +5388,7 @@ int pevent_register_print_function(struct pevent *pevent, out_free: va_end(ap); free_func_handle(func_handle); - return -1; + return ret; } /** @@ -5103,8 +5440,12 @@ int pevent_register_event_handler(struct pevent *pevent, not_found: /* Save for later use. */ - handle = malloc_or_die(sizeof(*handle)); - memset(handle, 0, sizeof(*handle)); + handle = calloc(1, sizeof(*handle)); + if (!handle) { + do_warning("Failed to allocate event handler"); + return PEVENT_ERRNO__MEM_ALLOC_FAILED; + } + handle->id = id; if (event_name) handle->event_name = strdup(event_name); @@ -5113,7 +5454,11 @@ int pevent_register_event_handler(struct pevent *pevent, if ((event_name && !handle->event_name) || (sys_name && !handle->sys_name)) { - die("Failed to allocate event/sys name"); + do_warning("Failed to allocate event/sys name"); + free((void *)handle->event_name); + free((void *)handle->sys_name); + free(handle); + return PEVENT_ERRNO__MEM_ALLOC_FAILED; } handle->func = func; @@ -5129,13 +5474,10 @@ int pevent_register_event_handler(struct pevent *pevent, */ struct pevent *pevent_alloc(void) { - struct pevent *pevent; + struct pevent *pevent = calloc(1, sizeof(*pevent)); - pevent = malloc(sizeof(*pevent)); - if (!pevent) - return NULL; - memset(pevent, 0, sizeof(*pevent)); - pevent->ref_count = 1; + if (pevent) + pevent->ref_count = 1; return pevent; } @@ -5164,7 +5506,7 @@ static void free_formats(struct format *format) free_format_fields(format->fields); } -static void free_event(struct event_format *event) +void pevent_free_format(struct event_format *event) { free(event->name); free(event->system); @@ -5250,7 +5592,7 @@ void pevent_free(struct pevent *pevent) } for (i = 0; i < pevent->nr_events; i++) - free_event(pevent->events[i]); + pevent_free_format(pevent->events[i]); while (pevent->handlers) { handle = pevent->handlers; diff --git a/trunk/tools/lib/traceevent/event-parse.h b/trunk/tools/lib/traceevent/event-parse.h index 5772ad8cb386..24a4bbabc5d5 100644 --- a/trunk/tools/lib/traceevent/event-parse.h +++ b/trunk/tools/lib/traceevent/event-parse.h @@ -24,8 +24,8 @@ #include #include -#ifndef __unused -#define __unused __attribute__ ((unused)) +#ifndef __maybe_unused +#define __maybe_unused __attribute__((unused)) #endif /* ----------------------- trace_seq ----------------------- */ @@ -49,7 +49,7 @@ struct pevent_record { int cpu; int ref_count; int locked; /* Do not free, even if ref_count is zero */ - void *private; + void *priv; #if DEBUG_RECORD struct pevent_record *prev; struct pevent_record *next; @@ -106,7 +106,7 @@ struct plugin_option { char *plugin_alias; char *description; char *value; - void *private; + void *priv; int set; }; @@ -345,6 +345,35 @@ enum pevent_flag { PEVENT_NSEC_OUTPUT = 1, /* output in NSECS */ }; +#define PEVENT_ERRORS \ + _PE(MEM_ALLOC_FAILED, "failed to allocate memory"), \ + _PE(PARSE_EVENT_FAILED, "failed to parse event"), \ + _PE(READ_ID_FAILED, "failed to read event id"), \ + _PE(READ_FORMAT_FAILED, "failed to read event format"), \ + _PE(READ_PRINT_FAILED, "failed to read event print fmt"), \ + _PE(OLD_FTRACE_ARG_FAILED,"failed to allocate field name for ftrace"),\ + _PE(INVALID_ARG_TYPE, "invalid argument type") + +#undef _PE +#define _PE(__code, __str) PEVENT_ERRNO__ ## __code +enum pevent_errno { + PEVENT_ERRNO__SUCCESS = 0, + + /* + * Choose an arbitrary negative big number not to clash with standard + * errno since SUS requires the errno has distinct positive values. + * See 'Issue 6' in the link below. + * + * http://pubs.opengroup.org/onlinepubs/9699919799/basedefs/errno.h.html + */ + __PEVENT_ERRNO__START = -100000, + + PEVENT_ERRORS, + + __PEVENT_ERRNO__END, +}; +#undef _PE + struct cmdline; struct cmdline_list; struct func_map; @@ -509,8 +538,11 @@ void pevent_print_event(struct pevent *pevent, struct trace_seq *s, int pevent_parse_header_page(struct pevent *pevent, char *buf, unsigned long size, int long_size); -int pevent_parse_event(struct pevent *pevent, const char *buf, - unsigned long size, const char *sys); +enum pevent_errno pevent_parse_event(struct pevent *pevent, const char *buf, + unsigned long size, const char *sys); +enum pevent_errno pevent_parse_format(struct event_format **eventp, const char *buf, + unsigned long size, const char *sys); +void pevent_free_format(struct event_format *event); void *pevent_get_field_raw(struct trace_seq *s, struct event_format *event, const char *name, struct pevent_record *record, @@ -561,6 +593,8 @@ int pevent_data_pid(struct pevent *pevent, struct pevent_record *rec); const char *pevent_data_comm_from_pid(struct pevent *pevent, int pid); void pevent_event_info(struct trace_seq *s, struct event_format *event, struct pevent_record *record); +int pevent_strerror(struct pevent *pevent, enum pevent_errno errnum, + char *buf, size_t buflen); struct event_format **pevent_list_events(struct pevent *pevent, enum event_sort_type); struct format_field **pevent_event_common_fields(struct event_format *event); diff --git a/trunk/tools/lib/traceevent/event-utils.h b/trunk/tools/lib/traceevent/event-utils.h index 08296383d1e6..bc075006966e 100644 --- a/trunk/tools/lib/traceevent/event-utils.h +++ b/trunk/tools/lib/traceevent/event-utils.h @@ -39,6 +39,12 @@ void __vdie(const char *fmt, ...); void __vwarning(const char *fmt, ...); void __vpr_stat(const char *fmt, ...); +#define min(x, y) ({ \ + typeof(x) _min1 = (x); \ + typeof(y) _min2 = (y); \ + (void) (&_min1 == &_min2); \ + _min1 < _min2 ? _min1 : _min2; }) + static inline char *strim(char *string) { char *ret; diff --git a/trunk/tools/perf/.gitignore b/trunk/tools/perf/.gitignore index 26b823b61aa1..8f8fbc227a46 100644 --- a/trunk/tools/perf/.gitignore +++ b/trunk/tools/perf/.gitignore @@ -21,3 +21,5 @@ config.mak config.mak.autogen *-bison.* *-flex.* +*.pyc +*.pyo diff --git a/trunk/tools/perf/Documentation/Makefile b/trunk/tools/perf/Documentation/Makefile index ca600e09c8d4..9f2e44f2b17a 100644 --- a/trunk/tools/perf/Documentation/Makefile +++ b/trunk/tools/perf/Documentation/Makefile @@ -195,10 +195,10 @@ install-pdf: pdf #install-html: html # '$(SHELL_PATH_SQ)' ./install-webdoc.sh $(DESTDIR)$(htmldir) -../PERF-VERSION-FILE: .FORCE-PERF-VERSION-FILE - $(QUIET_SUBDIR0)../ $(QUIET_SUBDIR1) PERF-VERSION-FILE +$(OUTPUT)PERF-VERSION-FILE: .FORCE-PERF-VERSION-FILE + $(QUIET_SUBDIR0)../ $(QUIET_SUBDIR1) $(OUTPUT)PERF-VERSION-FILE --include ../PERF-VERSION-FILE +-include $(OUTPUT)PERF-VERSION-FILE # # Determine "include::" file references in asciidoc files. diff --git a/trunk/tools/perf/Documentation/jit-interface.txt b/trunk/tools/perf/Documentation/jit-interface.txt new file mode 100644 index 000000000000..a8656f564915 --- /dev/null +++ b/trunk/tools/perf/Documentation/jit-interface.txt @@ -0,0 +1,15 @@ +perf supports a simple JIT interface to resolve symbols for dynamic code generated +by a JIT. + +The JIT has to write a /tmp/perf-%d.map (%d = pid of process) file + +This is a text file. + +Each line has the following format, fields separated with spaces: + +START SIZE symbolname + +START and SIZE are hex numbers without 0x. +symbolname is the rest of the line, so it could contain special characters. + +The ownership of the file has to match the process. diff --git a/trunk/tools/perf/Documentation/perf-annotate.txt b/trunk/tools/perf/Documentation/perf-annotate.txt index c89f9e1453f7..c8ffd9fd5c6a 100644 --- a/trunk/tools/perf/Documentation/perf-annotate.txt +++ b/trunk/tools/perf/Documentation/perf-annotate.txt @@ -85,6 +85,9 @@ OPTIONS -M:: --disassembler-style=:: Set disassembler style for objdump. +--objdump=:: + Path to objdump binary. + SEE ALSO -------- linkperf:perf-record[1], linkperf:perf-report[1] diff --git a/trunk/tools/perf/Documentation/perf-diff.txt b/trunk/tools/perf/Documentation/perf-diff.txt index 74d7481ed7a6..ab7f667de1b1 100644 --- a/trunk/tools/perf/Documentation/perf-diff.txt +++ b/trunk/tools/perf/Documentation/perf-diff.txt @@ -17,6 +17,9 @@ captured via perf record. If no parameters are passed it will assume perf.data.old and perf.data. +The differential profile is displayed only for events matching both +specified perf.data files. + OPTIONS ------- -M:: diff --git a/trunk/tools/perf/Documentation/perf-kvm.txt b/trunk/tools/perf/Documentation/perf-kvm.txt index dd84cb2f0a88..326f2cb333cb 100644 --- a/trunk/tools/perf/Documentation/perf-kvm.txt +++ b/trunk/tools/perf/Documentation/perf-kvm.txt @@ -12,7 +12,7 @@ SYNOPSIS [--guestkallsyms= --guestmodules= | --guestvmlinux=]] {top|record|report|diff|buildid-list} 'perf kvm' [--host] [--guest] [--guestkallsyms= --guestmodules= - | --guestvmlinux=] {top|record|report|diff|buildid-list} + | --guestvmlinux=] {top|record|report|diff|buildid-list|stat} DESCRIPTION ----------- @@ -38,6 +38,18 @@ There are a couple of variants of perf kvm: so that other tools can be used to fetch packages with matching symbol tables for use by perf report. + 'perf kvm stat ' to run a command and gather performance counter + statistics. + Especially, perf 'kvm stat record/report' generates a statistical analysis + of KVM events. Currently, vmexit, mmio and ioport events are supported. + 'perf kvm stat record ' records kvm events and the events between + start and end . + And this command produces a file which contains tracing results of kvm + events. + + 'perf kvm stat report' reports statistical data which includes events + handled time, samples, and so on. + OPTIONS ------- -i:: @@ -68,7 +80,21 @@ OPTIONS --guestvmlinux=:: Guest os kernel vmlinux. +STAT REPORT OPTIONS +------------------- +--vcpu=:: + analyze events which occures on this vcpu. (default: all vcpus) + +--events=:: + events to be analyzed. Possible values: vmexit, mmio, ioport. + (default: vmexit) +-k:: +--key=:: + Sorting key. Possible values: sample (default, sort by samples + number), time (sort by average time). + SEE ALSO -------- linkperf:perf-top[1], linkperf:perf-record[1], linkperf:perf-report[1], -linkperf:perf-diff[1], linkperf:perf-buildid-list[1] +linkperf:perf-diff[1], linkperf:perf-buildid-list[1], +linkperf:perf-stat[1] diff --git a/trunk/tools/perf/Documentation/perf-list.txt b/trunk/tools/perf/Documentation/perf-list.txt index ddc22525228d..d1e39dc8c810 100644 --- a/trunk/tools/perf/Documentation/perf-list.txt +++ b/trunk/tools/perf/Documentation/perf-list.txt @@ -15,24 +15,43 @@ DESCRIPTION This command displays the symbolic event types which can be selected in the various perf commands with the -e option. +[[EVENT_MODIFIERS]] EVENT MODIFIERS --------------- Events can optionally have a modifer by appending a colon and one or -more modifiers. Modifiers allow the user to restrict when events are -counted with 'u' for user-space, 'k' for kernel, 'h' for hypervisor. -Additional modifiers are 'G' for guest counting (in KVM guests) and 'H' -for host counting (not in KVM guests). +more modifiers. Modifiers allow the user to restrict the events to be +counted. The following modifiers exist: + + u - user-space counting + k - kernel counting + h - hypervisor counting + G - guest counting (in KVM guests) + H - host counting (not in KVM guests) + p - precise level The 'p' modifier can be used for specifying how precise the instruction -address should be. The 'p' modifier is currently only implemented for -Intel PEBS and can be specified multiple times: - 0 - SAMPLE_IP can have arbitrary skid - 1 - SAMPLE_IP must have constant skid - 2 - SAMPLE_IP requested to have 0 skid - 3 - SAMPLE_IP must have 0 skid +address should be. The 'p' modifier can be specified multiple times: + + 0 - SAMPLE_IP can have arbitrary skid + 1 - SAMPLE_IP must have constant skid + 2 - SAMPLE_IP requested to have 0 skid + 3 - SAMPLE_IP must have 0 skid + +For Intel systems precise event sampling is implemented with PEBS +which supports up to precise-level 2. -The PEBS implementation now supports up to 2. +On AMD systems it is implemented using IBS (up to precise-level 2). +The precise modifier works with event types 0x76 (cpu-cycles, CPU +clocks not halted) and 0xC1 (micro-ops retired). Both events map to +IBS execution sampling (IBS op) with the IBS Op Counter Control bit +(IbsOpCntCtl) set respectively (see AMD64 Architecture Programmer’s +Manual Volume 2: System Programming, 13.3 Instruction-Based +Sampling). Examples to use IBS: + + perf record -a -e cpu-cycles:p ... # use ibs op counting cycles + perf record -a -e r076:p ... # same as -e cpu-cycles:p + perf record -a -e r0C1:p ... # use ibs op counting micro-ops RAW HARDWARE EVENT DESCRIPTOR ----------------------------- @@ -44,6 +63,11 @@ layout of IA32_PERFEVTSELx MSRs (see [Intel® 64 and IA-32 Architectures Softwar of IA32_PERFEVTSELx MSRs) or AMD's PerfEvtSeln (see [AMD64 Architecture Programmer’s Manual Volume 2: System Programming], Page 344, Figure 13-7 Performance Event-Select Register (PerfEvtSeln)). +Note: Only the following bit fields can be set in x86 counter +registers: event, umask, edge, inv, cmask. Esp. guest/host only and +OS/user mode flags must be setup using <>. + Example: If the Intel docs for a QM720 Core i7 describe an event as: @@ -91,4 +115,4 @@ SEE ALSO linkperf:perf-stat[1], linkperf:perf-top[1], linkperf:perf-record[1], http://www.intel.com/Assets/PDF/manual/253669.pdf[Intel® 64 and IA-32 Architectures Software Developer's Manual Volume 3B: System Programming Guide], -http://support.amd.com/us/Processor_TechDocs/24593.pdf[AMD64 Architecture Programmer’s Manual Volume 2: System Programming] +http://support.amd.com/us/Processor_TechDocs/24593_APM_v2.pdf[AMD64 Architecture Programmer’s Manual Volume 2: System Programming] diff --git a/trunk/tools/perf/Documentation/perf-report.txt b/trunk/tools/perf/Documentation/perf-report.txt index 495210a612c4..f4d91bebd59d 100644 --- a/trunk/tools/perf/Documentation/perf-report.txt +++ b/trunk/tools/perf/Documentation/perf-report.txt @@ -168,6 +168,9 @@ OPTIONS branch stacks and it will automatically switch to the branch view mode, unless --no-branch-stack is used. +--objdump=:: + Path to objdump binary. + SEE ALSO -------- linkperf:perf-stat[1], linkperf:perf-annotate[1] diff --git a/trunk/tools/perf/Documentation/perf-script-perl.txt b/trunk/tools/perf/Documentation/perf-script-perl.txt index 3152cca15501..d00bef231340 100644 --- a/trunk/tools/perf/Documentation/perf-script-perl.txt +++ b/trunk/tools/perf/Documentation/perf-script-perl.txt @@ -116,8 +116,8 @@ search path and 'use'ing a few support modules (see module descriptions below): ---- - use lib "$ENV{'PERF_EXEC_PATH'}/scripts/perl/perf-script-Util/lib"; - use lib "./perf-script-Util/lib"; + use lib "$ENV{'PERF_EXEC_PATH'}/scripts/perl/Perf-Trace-Util/lib"; + use lib "./Perf-Trace-Util/lib"; use Perf::Trace::Core; use Perf::Trace::Context; use Perf::Trace::Util; diff --git a/trunk/tools/perf/Documentation/perf-script-python.txt b/trunk/tools/perf/Documentation/perf-script-python.txt index 471022069119..a4027f221a53 100644 --- a/trunk/tools/perf/Documentation/perf-script-python.txt +++ b/trunk/tools/perf/Documentation/perf-script-python.txt @@ -129,7 +129,7 @@ import os import sys sys.path.append(os.environ['PERF_EXEC_PATH'] + \ - '/scripts/python/perf-script-Util/lib/Perf/Trace') + '/scripts/python/Perf-Trace-Util/lib/Perf/Trace') from perf_trace_context import * from Core import * @@ -216,7 +216,7 @@ import os import sys sys.path.append(os.environ['PERF_EXEC_PATH'] + \ - '/scripts/python/perf-script-Util/lib/Perf/Trace') + '/scripts/python/Perf-Trace-Util/lib/Perf/Trace') from perf_trace_context import * from Core import * @@ -279,7 +279,7 @@ import os import sys sys.path.append(os.environ['PERF_EXEC_PATH'] + \ - '/scripts/python/perf-script-Util/lib/Perf/Trace') + '/scripts/python/Perf-Trace-Util/lib/Perf/Trace') from perf_trace_context import * from Core import * @@ -391,7 +391,7 @@ drwxr-xr-x 4 trz trz 4096 2010-01-26 22:30 . drwxr-xr-x 4 trz trz 4096 2010-01-26 22:29 .. drwxr-xr-x 2 trz trz 4096 2010-01-26 22:29 bin -rw-r--r-- 1 trz trz 2548 2010-01-26 22:29 check-perf-script.py -drwxr-xr-x 3 trz trz 4096 2010-01-26 22:49 perf-script-Util +drwxr-xr-x 3 trz trz 4096 2010-01-26 22:49 Perf-Trace-Util -rw-r--r-- 1 trz trz 1462 2010-01-26 22:30 syscall-counts.py ---- @@ -518,7 +518,7 @@ descriptions below): import sys sys.path.append(os.environ['PERF_EXEC_PATH'] + \ - '/scripts/python/perf-script-Util/lib/Perf/Trace') + '/scripts/python/Perf-Trace-Util/lib/Perf/Trace') from perf_trace_context import * from Core import * diff --git a/trunk/tools/perf/Documentation/perf-trace.txt b/trunk/tools/perf/Documentation/perf-trace.txt new file mode 100644 index 000000000000..3a2ae37310a9 --- /dev/null +++ b/trunk/tools/perf/Documentation/perf-trace.txt @@ -0,0 +1,53 @@ +perf-trace(1) +============= + +NAME +---- +perf-trace - strace inspired tool + +SYNOPSIS +-------- +[verse] +'perf trace' + +DESCRIPTION +----------- +This command will show the events associated with the target, initially +syscalls, but other system events like pagefaults, task lifetime events, +scheduling events, etc. + +Initially this is a live mode only tool, but eventually will work with +perf.data files like the other tools, allowing a detached 'record' from +analysis phases. + +OPTIONS +------- + +--all-cpus:: + System-wide collection from all CPUs. + +-p:: +--pid=:: + Record events on existing process ID (comma separated list). + +--tid=:: + Record events on existing thread ID (comma separated list). + +--uid=:: + Record events in threads owned by uid. Name or number. + +--no-inherit:: + Child tasks do not inherit counters. + +--mmap-pages=:: + Number of mmap data pages. Must be a power of two. + +--cpu:: +Collect samples only on the list of CPUs provided. Multiple CPUs can be provided as a +comma-separated list with no space: 0,1. Ranges of CPUs are specified with -: 0-2. +In per-thread mode with inheritance mode on (default), Events are captured only when +the thread executes on the designated CPUs. Default is to monitor all CPUs. + +SEE ALSO +-------- +linkperf:perf-record[1], linkperf:perf-script[1] diff --git a/trunk/tools/perf/MANIFEST b/trunk/tools/perf/MANIFEST index b4b572e8c100..80db3f4bcf7a 100644 --- a/trunk/tools/perf/MANIFEST +++ b/trunk/tools/perf/MANIFEST @@ -10,8 +10,12 @@ include/linux/stringify.h lib/rbtree.c include/linux/swab.h arch/*/include/asm/unistd*.h +arch/*/include/asm/perf_regs.h arch/*/lib/memcpy*.S arch/*/lib/memset*.S include/linux/poison.h include/linux/magic.h include/linux/hw_breakpoint.h +arch/x86/include/asm/svm.h +arch/x86/include/asm/vmx.h +arch/x86/include/asm/kvm_host.h diff --git a/trunk/tools/perf/Makefile b/trunk/tools/perf/Makefile index 77f124fe57ad..e5e71e7d95a0 100644 --- a/trunk/tools/perf/Makefile +++ b/trunk/tools/perf/Makefile @@ -37,7 +37,14 @@ include config/utilities.mak # # Define NO_NEWT if you do not want TUI support. # +# Define NO_GTK2 if you do not want GTK+ GUI support. +# # Define NO_DEMANGLE if you do not want C++ symbol demangling. +# +# Define NO_LIBELF if you do not want libelf dependency (e.g. cross-builds) +# +# Define NO_LIBUNWIND if you do not want libunwind dependency for dwarf +# backtrace post unwind. $(OUTPUT)PERF-VERSION-FILE: .FORCE-PERF-VERSION-FILE @$(SHELL_PATH) util/PERF-VERSION-GEN $(OUTPUT) @@ -50,16 +57,19 @@ ARCH ?= $(shell echo $(uname_M) | sed -e s/i.86/i386/ -e s/sun4u/sparc64/ \ -e s/s390x/s390/ -e s/parisc64/parisc/ \ -e s/ppc.*/powerpc/ -e s/mips.*/mips/ \ -e s/sh[234].*/sh/ ) +NO_PERF_REGS := 1 CC = $(CROSS_COMPILE)gcc AR = $(CROSS_COMPILE)ar # Additional ARCH settings for x86 ifeq ($(ARCH),i386) - ARCH := x86 + override ARCH := x86 + NO_PERF_REGS := 0 + LIBUNWIND_LIBS = -lunwind -lunwind-x86 endif ifeq ($(ARCH),x86_64) - ARCH := x86 + override ARCH := x86 IS_X86_64 := 0 ifeq (, $(findstring m32,$(EXTRA_CFLAGS))) IS_X86_64 := $(shell echo __x86_64__ | ${CC} -E -xc - | tail -n 1) @@ -69,6 +79,8 @@ ifeq ($(ARCH),x86_64) ARCH_CFLAGS := -DARCH_X86_64 ARCH_INCLUDE = ../../arch/x86/lib/memcpy_64.S ../../arch/x86/lib/memset_64.S endif + NO_PERF_REGS := 0 + LIBUNWIND_LIBS = -lunwind -lunwind-x86_64 endif # Treat warnings as errors unless directed not to @@ -89,7 +101,7 @@ ifdef PARSER_DEBUG PARSER_DEBUG_CFLAGS := -DPARSER_DEBUG endif -CFLAGS = -fno-omit-frame-pointer -ggdb3 -Wall -Wextra -std=gnu99 $(CFLAGS_WERROR) $(CFLAGS_OPTIMIZE) $(EXTRA_WARNINGS) $(EXTRA_CFLAGS) $(PARSER_DEBUG_CFLAGS) +CFLAGS = -fno-omit-frame-pointer -ggdb3 -funwind-tables -Wall -Wextra -std=gnu99 $(CFLAGS_WERROR) $(CFLAGS_OPTIMIZE) $(EXTRA_WARNINGS) $(EXTRA_CFLAGS) $(PARSER_DEBUG_CFLAGS) EXTLIBS = -lpthread -lrt -lelf -lm ALL_CFLAGS = $(CFLAGS) -D_LARGEFILE64_SOURCE -D_FILE_OFFSET_BITS=64 -D_GNU_SOURCE ALL_LDFLAGS = $(LDFLAGS) @@ -186,10 +198,10 @@ SCRIPTS = $(patsubst %.sh,%,$(SCRIPT_SH)) TRACE_EVENT_DIR = ../lib/traceevent/ -ifeq ("$(origin O)", "command line") - TE_PATH=$(OUTPUT)/ +ifneq ($(OUTPUT),) + TE_PATH=$(OUTPUT) else - TE_PATH=$(TRACE_EVENT_DIR)/ + TE_PATH=$(TRACE_EVENT_DIR) endif LIBTRACEEVENT = $(TE_PATH)libtraceevent.a @@ -221,13 +233,13 @@ export PERL_PATH FLEX = flex BISON= bison -$(OUTPUT)util/parse-events-flex.c: util/parse-events.l +$(OUTPUT)util/parse-events-flex.c: util/parse-events.l $(OUTPUT)util/parse-events-bison.c $(QUIET_FLEX)$(FLEX) --header-file=$(OUTPUT)util/parse-events-flex.h $(PARSER_DEBUG_FLEX) -t util/parse-events.l > $(OUTPUT)util/parse-events-flex.c $(OUTPUT)util/parse-events-bison.c: util/parse-events.y $(QUIET_BISON)$(BISON) -v util/parse-events.y -d $(PARSER_DEBUG_BISON) -o $(OUTPUT)util/parse-events-bison.c -$(OUTPUT)util/pmu-flex.c: util/pmu.l +$(OUTPUT)util/pmu-flex.c: util/pmu.l $(OUTPUT)util/pmu-bison.c $(QUIET_FLEX)$(FLEX) --header-file=$(OUTPUT)util/pmu-flex.h -t util/pmu.l > $(OUTPUT)util/pmu-flex.c $(OUTPUT)util/pmu-bison.c: util/pmu.y @@ -252,6 +264,7 @@ LIB_H += util/include/linux/ctype.h LIB_H += util/include/linux/kernel.h LIB_H += util/include/linux/list.h LIB_H += util/include/linux/export.h +LIB_H += util/include/linux/magic.h LIB_H += util/include/linux/poison.h LIB_H += util/include/linux/prefetch.h LIB_H += util/include/linux/rbtree.h @@ -319,6 +332,12 @@ LIB_H += $(ARCH_INCLUDE) LIB_H += util/cgroup.h LIB_H += $(TRACE_EVENT_DIR)event-parse.h LIB_H += util/target.h +LIB_H += util/rblist.h +LIB_H += util/intlist.h +LIB_H += util/perf_regs.h +LIB_H += util/unwind.h +LIB_H += ui/helpline.h +LIB_H += util/vdso.h LIB_OBJS += $(OUTPUT)util/abspath.o LIB_OBJS += $(OUTPUT)util/alias.o @@ -354,6 +373,7 @@ LIB_OBJS += $(OUTPUT)util/usage.o LIB_OBJS += $(OUTPUT)util/wrapper.o LIB_OBJS += $(OUTPUT)util/sigchain.o LIB_OBJS += $(OUTPUT)util/symbol.o +LIB_OBJS += $(OUTPUT)util/symbol-elf.o LIB_OBJS += $(OUTPUT)util/dso-test-data.o LIB_OBJS += $(OUTPUT)util/color.o LIB_OBJS += $(OUTPUT)util/pager.o @@ -383,11 +403,17 @@ LIB_OBJS += $(OUTPUT)util/xyarray.o LIB_OBJS += $(OUTPUT)util/cpumap.o LIB_OBJS += $(OUTPUT)util/cgroup.o LIB_OBJS += $(OUTPUT)util/target.o +LIB_OBJS += $(OUTPUT)util/rblist.o +LIB_OBJS += $(OUTPUT)util/intlist.o +LIB_OBJS += $(OUTPUT)util/vdso.o +LIB_OBJS += $(OUTPUT)util/stat.o -BUILTIN_OBJS += $(OUTPUT)builtin-annotate.o +LIB_OBJS += $(OUTPUT)ui/helpline.o +LIB_OBJS += $(OUTPUT)ui/hist.o +LIB_OBJS += $(OUTPUT)ui/stdio/hist.o +BUILTIN_OBJS += $(OUTPUT)builtin-annotate.o BUILTIN_OBJS += $(OUTPUT)builtin-bench.o - # Benchmark modules BUILTIN_OBJS += $(OUTPUT)bench/sched-messaging.o BUILTIN_OBJS += $(OUTPUT)bench/sched-pipe.o @@ -445,34 +471,73 @@ PYRF_OBJS += $(OUTPUT)util/xyarray.o -include config.mak.autogen -include config.mak -ifndef NO_DWARF -FLAGS_DWARF=$(ALL_CFLAGS) -ldw -lelf $(ALL_LDFLAGS) $(EXTLIBS) -ifneq ($(call try-cc,$(SOURCE_DWARF),$(FLAGS_DWARF)),y) - msg := $(warning No libdw.h found or old libdw.h found or elfutils is older than 0.138, disables dwarf support. Please install new elfutils-devel/libdw-dev); +ifdef NO_LIBELF NO_DWARF := 1 -endif # Dwarf support -endif # NO_DWARF - --include arch/$(ARCH)/Makefile - -ifneq ($(OUTPUT),) - BASIC_CFLAGS += -I$(OUTPUT) -endif - + NO_DEMANGLE := 1 + NO_LIBUNWIND := 1 +else FLAGS_LIBELF=$(ALL_CFLAGS) $(ALL_LDFLAGS) $(EXTLIBS) ifneq ($(call try-cc,$(SOURCE_LIBELF),$(FLAGS_LIBELF)),y) FLAGS_GLIBC=$(ALL_CFLAGS) $(ALL_LDFLAGS) ifneq ($(call try-cc,$(SOURCE_GLIBC),$(FLAGS_GLIBC)),y) msg := $(error No gnu/libc-version.h found, please install glibc-dev[el]/glibc-static); else - msg := $(error No libelf.h/libelf found, please install libelf-dev/elfutils-libelf-devel); + NO_LIBELF := 1 + NO_DWARF := 1 + NO_DEMANGLE := 1 endif endif +endif # NO_LIBELF + +ifndef NO_LIBUNWIND +# for linking with debug library, run like: +# make DEBUG=1 LIBUNWIND_DIR=/opt/libunwind/ +ifdef LIBUNWIND_DIR + LIBUNWIND_CFLAGS := -I$(LIBUNWIND_DIR)/include + LIBUNWIND_LDFLAGS := -L$(LIBUNWIND_DIR)/lib +endif + +FLAGS_UNWIND=$(LIBUNWIND_CFLAGS) $(ALL_CFLAGS) $(LIBUNWIND_LDFLAGS) $(ALL_LDFLAGS) $(EXTLIBS) $(LIBUNWIND_LIBS) +ifneq ($(call try-cc,$(SOURCE_LIBUNWIND),$(FLAGS_UNWIND)),y) + msg := $(warning No libunwind found, disabling post unwind support. Please install libunwind-dev[el] >= 0.99); + NO_LIBUNWIND := 1 +endif # Libunwind support +endif # NO_LIBUNWIND + +-include arch/$(ARCH)/Makefile + +ifneq ($(OUTPUT),) + BASIC_CFLAGS += -I$(OUTPUT) +endif + +ifdef NO_LIBELF +BASIC_CFLAGS += -DNO_LIBELF_SUPPORT + +EXTLIBS := $(filter-out -lelf,$(EXTLIBS)) + +# Remove ELF/DWARF dependent codes +LIB_OBJS := $(filter-out $(OUTPUT)util/symbol-elf.o,$(LIB_OBJS)) +LIB_OBJS := $(filter-out $(OUTPUT)util/dwarf-aux.o,$(LIB_OBJS)) +LIB_OBJS := $(filter-out $(OUTPUT)util/probe-event.o,$(LIB_OBJS)) +LIB_OBJS := $(filter-out $(OUTPUT)util/probe-finder.o,$(LIB_OBJS)) + +BUILTIN_OBJS := $(filter-out $(OUTPUT)builtin-probe.o,$(BUILTIN_OBJS)) + +# Use minimal symbol handling +LIB_OBJS += $(OUTPUT)util/symbol-minimal.o + +else # NO_LIBELF ifneq ($(call try-cc,$(SOURCE_ELF_MMAP),$(FLAGS_COMMON)),y) BASIC_CFLAGS += -DLIBELF_NO_MMAP endif +FLAGS_DWARF=$(ALL_CFLAGS) -ldw -lelf $(ALL_LDFLAGS) $(EXTLIBS) +ifneq ($(call try-cc,$(SOURCE_DWARF),$(FLAGS_DWARF)),y) + msg := $(warning No libdw.h found or old libdw.h found or elfutils is older than 0.138, disables dwarf support. Please install new elfutils-devel/libdw-dev); + NO_DWARF := 1 +endif # Dwarf support + ifndef NO_DWARF ifeq ($(origin PERF_HAVE_DWARF_REGS), undefined) msg := $(warning DWARF register mappings have not been defined for architecture $(ARCH), DWARF support disabled); @@ -483,6 +548,29 @@ else LIB_OBJS += $(OUTPUT)util/dwarf-aux.o endif # PERF_HAVE_DWARF_REGS endif # NO_DWARF +endif # NO_LIBELF + +ifdef NO_LIBUNWIND + BASIC_CFLAGS += -DNO_LIBUNWIND_SUPPORT +else + EXTLIBS += $(LIBUNWIND_LIBS) + BASIC_CFLAGS := $(LIBUNWIND_CFLAGS) $(BASIC_CFLAGS) + BASIC_LDFLAGS := $(LIBUNWIND_LDFLAGS) $(BASIC_LDFLAGS) + LIB_OBJS += $(OUTPUT)util/unwind.o +endif + +ifdef NO_LIBAUDIT + BASIC_CFLAGS += -DNO_LIBAUDIT_SUPPORT +else + FLAGS_LIBAUDIT = $(ALL_CFLAGS) $(ALL_LDFLAGS) -laudit + ifneq ($(call try-cc,$(SOURCE_LIBAUDIT),$(FLAGS_LIBAUDIT)),y) + msg := $(warning No libaudit.h found, disables 'trace' tool, please install audit-libs-devel or libaudit-dev); + BASIC_CFLAGS += -DNO_LIBAUDIT_SUPPORT + else + BUILTIN_OBJS += $(OUTPUT)builtin-trace.o + EXTLIBS += -laudit + endif +endif ifdef NO_NEWT BASIC_CFLAGS += -DNO_NEWT_SUPPORT @@ -500,14 +588,13 @@ else LIB_OBJS += $(OUTPUT)ui/browsers/annotate.o LIB_OBJS += $(OUTPUT)ui/browsers/hists.o LIB_OBJS += $(OUTPUT)ui/browsers/map.o - LIB_OBJS += $(OUTPUT)ui/helpline.o LIB_OBJS += $(OUTPUT)ui/progress.o LIB_OBJS += $(OUTPUT)ui/util.o LIB_OBJS += $(OUTPUT)ui/tui/setup.o LIB_OBJS += $(OUTPUT)ui/tui/util.o + LIB_OBJS += $(OUTPUT)ui/tui/helpline.o LIB_H += ui/browser.h LIB_H += ui/browsers/map.h - LIB_H += ui/helpline.h LIB_H += ui/keysyms.h LIB_H += ui/libslang.h LIB_H += ui/progress.h @@ -519,7 +606,7 @@ endif ifdef NO_GTK2 BASIC_CFLAGS += -DNO_GTK2_SUPPORT else - FLAGS_GTK2=$(ALL_CFLAGS) $(ALL_LDFLAGS) $(EXTLIBS) $(shell pkg-config --libs --cflags gtk+-2.0) + FLAGS_GTK2=$(ALL_CFLAGS) $(ALL_LDFLAGS) $(EXTLIBS) $(shell pkg-config --libs --cflags gtk+-2.0 2>/dev/null) ifneq ($(call try-cc,$(SOURCE_GTK2),$(FLAGS_GTK2)),y) msg := $(warning GTK2 not found, disables GTK2 support. Please install gtk2-devel or libgtk2.0-dev); BASIC_CFLAGS += -DNO_GTK2_SUPPORT @@ -527,11 +614,12 @@ else ifeq ($(call try-cc,$(SOURCE_GTK2_INFOBAR),$(FLAGS_GTK2)),y) BASIC_CFLAGS += -DHAVE_GTK_INFO_BAR endif - BASIC_CFLAGS += $(shell pkg-config --cflags gtk+-2.0) - EXTLIBS += $(shell pkg-config --libs gtk+-2.0) + BASIC_CFLAGS += $(shell pkg-config --cflags gtk+-2.0 2>/dev/null) + EXTLIBS += $(shell pkg-config --libs gtk+-2.0 2>/dev/null) LIB_OBJS += $(OUTPUT)ui/gtk/browser.o LIB_OBJS += $(OUTPUT)ui/gtk/setup.o LIB_OBJS += $(OUTPUT)ui/gtk/util.o + LIB_OBJS += $(OUTPUT)ui/gtk/helpline.o # Make sure that it'd be included only once. ifneq ($(findstring -DNO_NEWT_SUPPORT,$(BASIC_CFLAGS)),) LIB_OBJS += $(OUTPUT)ui/setup.o @@ -640,7 +728,7 @@ else EXTLIBS += -liberty BASIC_CFLAGS += -DHAVE_CPLUS_DEMANGLE else - FLAGS_BFD=$(ALL_CFLAGS) $(ALL_LDFLAGS) $(EXTLIBS) -lbfd + FLAGS_BFD=$(ALL_CFLAGS) $(ALL_LDFLAGS) $(EXTLIBS) -DPACKAGE='perf' -lbfd has_bfd := $(call try-cc,$(SOURCE_BFD),$(FLAGS_BFD)) ifeq ($(has_bfd),y) EXTLIBS += -lbfd @@ -670,6 +758,13 @@ else endif endif +ifeq ($(NO_PERF_REGS),0) + ifeq ($(ARCH),x86) + LIB_H += arch/x86/include/perf_regs.h + endif +else + BASIC_CFLAGS += -DNO_PERF_REGS +endif ifdef NO_STRLCPY BASIC_CFLAGS += -DNO_STRLCPY @@ -679,6 +774,14 @@ else endif endif +ifdef NO_BACKTRACE + BASIC_CFLAGS += -DNO_BACKTRACE +else + ifneq ($(call try-cc,$(SOURCE_BACKTRACE),),y) + BASIC_CFLAGS += -DNO_BACKTRACE + endif +endif + ifdef ASCIIDOC8 export ASCIIDOC8 endif @@ -696,6 +799,7 @@ perfexecdir_SQ = $(subst ','\'',$(perfexecdir)) template_dir_SQ = $(subst ','\'',$(template_dir)) htmldir_SQ = $(subst ','\'',$(htmldir)) prefix_SQ = $(subst ','\'',$(prefix)) +sysconfdir_SQ = $(subst ','\'',$(sysconfdir)) SHELL_PATH_SQ = $(subst ','\'',$(SHELL_PATH)) @@ -763,10 +867,10 @@ $(OUTPUT)perf.o perf.spec \ # over the general rule for .o $(OUTPUT)util/%-flex.o: $(OUTPUT)util/%-flex.c $(OUTPUT)PERF-CFLAGS - $(QUIET_CC)$(CC) -o $@ -c $(ALL_CFLAGS) -Iutil/ -w $< + $(QUIET_CC)$(CC) -o $@ -c -Iutil/ $(ALL_CFLAGS) -w $< $(OUTPUT)util/%-bison.o: $(OUTPUT)util/%-bison.c $(OUTPUT)PERF-CFLAGS - $(QUIET_CC)$(CC) -o $@ -c $(ALL_CFLAGS) -DYYENABLE_NLS=0 -DYYLTYPE_IS_TRIVIAL=0 -Iutil/ -w $< + $(QUIET_CC)$(CC) -o $@ -c -Iutil/ $(ALL_CFLAGS) -DYYENABLE_NLS=0 -DYYLTYPE_IS_TRIVIAL=0 -w $< $(OUTPUT)%.o: %.c $(OUTPUT)PERF-CFLAGS $(QUIET_CC)$(CC) -o $@ -c $(ALL_CFLAGS) $< @@ -838,7 +942,10 @@ $(LIB_FILE): $(LIB_OBJS) # libtraceevent.a $(LIBTRACEEVENT): - $(QUIET_SUBDIR0)$(TRACE_EVENT_DIR) $(QUIET_SUBDIR1) $(COMMAND_O) libtraceevent.a + $(QUIET_SUBDIR0)$(TRACE_EVENT_DIR) $(QUIET_SUBDIR1) O=$(OUTPUT) libtraceevent.a + +$(LIBTRACEEVENT)-clean: + $(QUIET_SUBDIR0)$(TRACE_EVENT_DIR) $(QUIET_SUBDIR1) O=$(OUTPUT) clean help: @echo 'Perf make targets:' @@ -947,6 +1054,8 @@ install: all $(INSTALL) scripts/python/Perf-Trace-Util/lib/Perf/Trace/* -t '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/scripts/python/Perf-Trace-Util/lib/Perf/Trace' $(INSTALL) scripts/python/*.py -t '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/scripts/python' $(INSTALL) scripts/python/bin/* -t '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/scripts/python/bin' + $(INSTALL) -d -m 755 '$(DESTDIR_SQ)$(sysconfdir_SQ)/bash_completion.d' + $(INSTALL) bash_completion '$(DESTDIR_SQ)$(sysconfdir_SQ)/bash_completion.d/perf' install-python_ext: $(PYTHON_WORD) util/setup.py --quiet install --root='/$(DESTDIR_SQ)' @@ -977,13 +1086,14 @@ quick-install-html: ### Cleaning rules -clean: +clean: $(LIBTRACEEVENT)-clean $(RM) $(LIB_OBJS) $(BUILTIN_OBJS) $(LIB_FILE) $(OUTPUT)perf-archive $(OUTPUT)perf.o $(LANG_BINDINGS) $(RM) $(ALL_PROGRAMS) perf $(RM) *.spec *.pyc *.pyo */*.pyc */*.pyo $(OUTPUT)common-cmds.h TAGS tags cscope* $(MAKE) -C Documentation/ clean $(RM) $(OUTPUT)PERF-VERSION-FILE $(OUTPUT)PERF-CFLAGS - $(RM) $(OUTPUT)util/*-{bison,flex}* + $(RM) $(OUTPUT)util/*-bison* + $(RM) $(OUTPUT)util/*-flex* $(python-clean) .PHONY: all install clean strip $(LIBTRACEEVENT) diff --git a/trunk/tools/perf/arch/x86/Makefile b/trunk/tools/perf/arch/x86/Makefile index 744e629797be..815841c04eb2 100644 --- a/trunk/tools/perf/arch/x86/Makefile +++ b/trunk/tools/perf/arch/x86/Makefile @@ -2,4 +2,7 @@ ifndef NO_DWARF PERF_HAVE_DWARF_REGS := 1 LIB_OBJS += $(OUTPUT)arch/$(ARCH)/util/dwarf-regs.o endif +ifndef NO_LIBUNWIND +LIB_OBJS += $(OUTPUT)arch/$(ARCH)/util/unwind.o +endif LIB_OBJS += $(OUTPUT)arch/$(ARCH)/util/header.o diff --git a/trunk/tools/perf/arch/x86/include/perf_regs.h b/trunk/tools/perf/arch/x86/include/perf_regs.h new file mode 100644 index 000000000000..46fc9f15c6b3 --- /dev/null +++ b/trunk/tools/perf/arch/x86/include/perf_regs.h @@ -0,0 +1,80 @@ +#ifndef ARCH_PERF_REGS_H +#define ARCH_PERF_REGS_H + +#include +#include "../../util/types.h" +#include "../../../../../arch/x86/include/asm/perf_regs.h" + +#ifndef ARCH_X86_64 +#define PERF_REGS_MASK ((1ULL << PERF_REG_X86_32_MAX) - 1) +#else +#define REG_NOSUPPORT ((1ULL << PERF_REG_X86_DS) | \ + (1ULL << PERF_REG_X86_ES) | \ + (1ULL << PERF_REG_X86_FS) | \ + (1ULL << PERF_REG_X86_GS)) +#define PERF_REGS_MASK (((1ULL << PERF_REG_X86_64_MAX) - 1) & ~REG_NOSUPPORT) +#endif +#define PERF_REG_IP PERF_REG_X86_IP +#define PERF_REG_SP PERF_REG_X86_SP + +static inline const char *perf_reg_name(int id) +{ + switch (id) { + case PERF_REG_X86_AX: + return "AX"; + case PERF_REG_X86_BX: + return "BX"; + case PERF_REG_X86_CX: + return "CX"; + case PERF_REG_X86_DX: + return "DX"; + case PERF_REG_X86_SI: + return "SI"; + case PERF_REG_X86_DI: + return "DI"; + case PERF_REG_X86_BP: + return "BP"; + case PERF_REG_X86_SP: + return "SP"; + case PERF_REG_X86_IP: + return "IP"; + case PERF_REG_X86_FLAGS: + return "FLAGS"; + case PERF_REG_X86_CS: + return "CS"; + case PERF_REG_X86_SS: + return "SS"; + case PERF_REG_X86_DS: + return "DS"; + case PERF_REG_X86_ES: + return "ES"; + case PERF_REG_X86_FS: + return "FS"; + case PERF_REG_X86_GS: + return "GS"; +#ifdef ARCH_X86_64 + case PERF_REG_X86_R8: + return "R8"; + case PERF_REG_X86_R9: + return "R9"; + case PERF_REG_X86_R10: + return "R10"; + case PERF_REG_X86_R11: + return "R11"; + case PERF_REG_X86_R12: + return "R12"; + case PERF_REG_X86_R13: + return "R13"; + case PERF_REG_X86_R14: + return "R14"; + case PERF_REG_X86_R15: + return "R15"; +#endif /* ARCH_X86_64 */ + default: + return NULL; + } + + return NULL; +} + +#endif /* ARCH_PERF_REGS_H */ diff --git a/trunk/tools/perf/arch/x86/util/unwind.c b/trunk/tools/perf/arch/x86/util/unwind.c new file mode 100644 index 000000000000..78d956eff96f --- /dev/null +++ b/trunk/tools/perf/arch/x86/util/unwind.c @@ -0,0 +1,111 @@ + +#include +#include +#include "perf_regs.h" +#include "../../util/unwind.h" + +#ifdef ARCH_X86_64 +int unwind__arch_reg_id(int regnum) +{ + int id; + + switch (regnum) { + case UNW_X86_64_RAX: + id = PERF_REG_X86_AX; + break; + case UNW_X86_64_RDX: + id = PERF_REG_X86_DX; + break; + case UNW_X86_64_RCX: + id = PERF_REG_X86_CX; + break; + case UNW_X86_64_RBX: + id = PERF_REG_X86_BX; + break; + case UNW_X86_64_RSI: + id = PERF_REG_X86_SI; + break; + case UNW_X86_64_RDI: + id = PERF_REG_X86_DI; + break; + case UNW_X86_64_RBP: + id = PERF_REG_X86_BP; + break; + case UNW_X86_64_RSP: + id = PERF_REG_X86_SP; + break; + case UNW_X86_64_R8: + id = PERF_REG_X86_R8; + break; + case UNW_X86_64_R9: + id = PERF_REG_X86_R9; + break; + case UNW_X86_64_R10: + id = PERF_REG_X86_R10; + break; + case UNW_X86_64_R11: + id = PERF_REG_X86_R11; + break; + case UNW_X86_64_R12: + id = PERF_REG_X86_R12; + break; + case UNW_X86_64_R13: + id = PERF_REG_X86_R13; + break; + case UNW_X86_64_R14: + id = PERF_REG_X86_R14; + break; + case UNW_X86_64_R15: + id = PERF_REG_X86_R15; + break; + case UNW_X86_64_RIP: + id = PERF_REG_X86_IP; + break; + default: + pr_err("unwind: invalid reg id %d\n", regnum); + return -EINVAL; + } + + return id; +} +#else +int unwind__arch_reg_id(int regnum) +{ + int id; + + switch (regnum) { + case UNW_X86_EAX: + id = PERF_REG_X86_AX; + break; + case UNW_X86_EDX: + id = PERF_REG_X86_DX; + break; + case UNW_X86_ECX: + id = PERF_REG_X86_CX; + break; + case UNW_X86_EBX: + id = PERF_REG_X86_BX; + break; + case UNW_X86_ESI: + id = PERF_REG_X86_SI; + break; + case UNW_X86_EDI: + id = PERF_REG_X86_DI; + break; + case UNW_X86_EBP: + id = PERF_REG_X86_BP; + break; + case UNW_X86_ESP: + id = PERF_REG_X86_SP; + break; + case UNW_X86_EIP: + id = PERF_REG_X86_IP; + break; + default: + pr_err("unwind: invalid reg id %d\n", regnum); + return -EINVAL; + } + + return id; +} +#endif /* ARCH_X86_64 */ diff --git a/trunk/tools/perf/bash_completion b/trunk/tools/perf/bash_completion new file mode 100644 index 000000000000..1958fa539d0f --- /dev/null +++ b/trunk/tools/perf/bash_completion @@ -0,0 +1,26 @@ +# perf completion + +have perf && +_perf() +{ + local cur cmd + + COMPREPLY=() + _get_comp_words_by_ref cur prev + + cmd=${COMP_WORDS[0]} + + # List perf subcommands + if [ $COMP_CWORD -eq 1 ]; then + cmds=$($cmd --list-cmds) + COMPREPLY=( $( compgen -W '$cmds' -- "$cur" ) ) + # List possible events for -e option + elif [[ $prev == "-e" && "${COMP_WORDS[1]}" == @(record|stat|top) ]]; then + cmds=$($cmd list --raw-dump) + COMPREPLY=( $( compgen -W '$cmds' -- "$cur" ) ) + # Fall down to list regular files + else + _filedir + fi +} && +complete -F _perf perf diff --git a/trunk/tools/perf/bench/bench.h b/trunk/tools/perf/bench/bench.h index a09bece6dad2..8f89998eeaf4 100644 --- a/trunk/tools/perf/bench/bench.h +++ b/trunk/tools/perf/bench/bench.h @@ -3,7 +3,8 @@ extern int bench_sched_messaging(int argc, const char **argv, const char *prefix); extern int bench_sched_pipe(int argc, const char **argv, const char *prefix); -extern int bench_mem_memcpy(int argc, const char **argv, const char *prefix __used); +extern int bench_mem_memcpy(int argc, const char **argv, + const char *prefix __maybe_unused); extern int bench_mem_memset(int argc, const char **argv, const char *prefix); #define BENCH_FORMAT_DEFAULT_STR "default" diff --git a/trunk/tools/perf/bench/mem-memcpy.c b/trunk/tools/perf/bench/mem-memcpy.c index 02dad5d3359b..93c83e3cb4a7 100644 --- a/trunk/tools/perf/bench/mem-memcpy.c +++ b/trunk/tools/perf/bench/mem-memcpy.c @@ -177,7 +177,7 @@ static double do_memcpy_gettimeofday(memcpy_t fn, size_t len, bool prefault) } while (0) int bench_mem_memcpy(int argc, const char **argv, - const char *prefix __used) + const char *prefix __maybe_unused) { int i; size_t len; diff --git a/trunk/tools/perf/bench/mem-memset.c b/trunk/tools/perf/bench/mem-memset.c index 350cc9557265..c6e4bc523492 100644 --- a/trunk/tools/perf/bench/mem-memset.c +++ b/trunk/tools/perf/bench/mem-memset.c @@ -171,7 +171,7 @@ static double do_memset_gettimeofday(memset_t fn, size_t len, bool prefault) } while (0) int bench_mem_memset(int argc, const char **argv, - const char *prefix __used) + const char *prefix __maybe_unused) { int i; size_t len; diff --git a/trunk/tools/perf/bench/sched-messaging.c b/trunk/tools/perf/bench/sched-messaging.c index d1d1b30f99c1..cc1190a0849b 100644 --- a/trunk/tools/perf/bench/sched-messaging.c +++ b/trunk/tools/perf/bench/sched-messaging.c @@ -267,7 +267,7 @@ static const char * const bench_sched_message_usage[] = { }; int bench_sched_messaging(int argc, const char **argv, - const char *prefix __used) + const char *prefix __maybe_unused) { unsigned int i, total_children; struct timeval start, stop, diff; diff --git a/trunk/tools/perf/bench/sched-pipe.c b/trunk/tools/perf/bench/sched-pipe.c index 0c7454f8b8a9..69cfba8d4c6c 100644 --- a/trunk/tools/perf/bench/sched-pipe.c +++ b/trunk/tools/perf/bench/sched-pipe.c @@ -43,7 +43,7 @@ static const char * const bench_sched_pipe_usage[] = { }; int bench_sched_pipe(int argc, const char **argv, - const char *prefix __used) + const char *prefix __maybe_unused) { int pipe_1[2], pipe_2[2]; int m = 0, i; @@ -55,14 +55,14 @@ int bench_sched_pipe(int argc, const char **argv, * discarding returned value of read(), write() * causes error in building environment for perf */ - int __used ret, wait_stat; - pid_t pid, retpid; + int __maybe_unused ret, wait_stat; + pid_t pid, retpid __maybe_unused; argc = parse_options(argc, argv, options, bench_sched_pipe_usage, 0); - assert(!pipe(pipe_1)); - assert(!pipe(pipe_2)); + BUG_ON(pipe(pipe_1)); + BUG_ON(pipe(pipe_2)); pid = fork(); assert(pid >= 0); diff --git a/trunk/tools/perf/builtin-annotate.c b/trunk/tools/perf/builtin-annotate.c index 67522cf87405..9ea38540b873 100644 --- a/trunk/tools/perf/builtin-annotate.c +++ b/trunk/tools/perf/builtin-annotate.c @@ -239,7 +239,7 @@ static const char * const annotate_usage[] = { NULL }; -int cmd_annotate(int argc, const char **argv, const char *prefix __used) +int cmd_annotate(int argc, const char **argv, const char *prefix __maybe_unused) { struct perf_annotate annotate = { .tool = { @@ -282,6 +282,8 @@ int cmd_annotate(int argc, const char **argv, const char *prefix __used) "Display raw encoding of assembly instructions (default)"), OPT_STRING('M', "disassembler-style", &disassembler_style, "disassembler style", "Specify disassembler style (e.g. -M intel for intel syntax)"), + OPT_STRING(0, "objdump", &objdump_path, "path", + "objdump binary to use for disassembly and annotations"), OPT_END() }; diff --git a/trunk/tools/perf/builtin-bench.c b/trunk/tools/perf/builtin-bench.c index 1f3100216448..cae9a5fd2ecf 100644 --- a/trunk/tools/perf/builtin-bench.c +++ b/trunk/tools/perf/builtin-bench.c @@ -173,7 +173,7 @@ static void all_subsystem(void) all_suite(&subsystems[i]); } -int cmd_bench(int argc, const char **argv, const char *prefix __used) +int cmd_bench(int argc, const char **argv, const char *prefix __maybe_unused) { int i, j, status = 0; diff --git a/trunk/tools/perf/builtin-buildid-cache.c b/trunk/tools/perf/builtin-buildid-cache.c index 29ad20e67919..83654557e108 100644 --- a/trunk/tools/perf/builtin-buildid-cache.c +++ b/trunk/tools/perf/builtin-buildid-cache.c @@ -43,15 +43,16 @@ static int build_id_cache__add_file(const char *filename, const char *debugdir) } build_id__sprintf(build_id, sizeof(build_id), sbuild_id); - err = build_id_cache__add_s(sbuild_id, debugdir, filename, false); + err = build_id_cache__add_s(sbuild_id, debugdir, filename, + false, false); if (verbose) pr_info("Adding %s %s: %s\n", sbuild_id, filename, err ? "FAIL" : "Ok"); return err; } -static int build_id_cache__remove_file(const char *filename __used, - const char *debugdir __used) +static int build_id_cache__remove_file(const char *filename __maybe_unused, + const char *debugdir __maybe_unused) { u8 build_id[BUILD_ID_SIZE]; char sbuild_id[BUILD_ID_SIZE * 2 + 1]; @@ -119,7 +120,8 @@ static int __cmd_buildid_cache(void) return 0; } -int cmd_buildid_cache(int argc, const char **argv, const char *prefix __used) +int cmd_buildid_cache(int argc, const char **argv, + const char *prefix __maybe_unused) { argc = parse_options(argc, argv, buildid_cache_options, buildid_cache_usage, 0); diff --git a/trunk/tools/perf/builtin-buildid-list.c b/trunk/tools/perf/builtin-buildid-list.c index 6b2bcfbde150..1159feeebb19 100644 --- a/trunk/tools/perf/builtin-buildid-list.c +++ b/trunk/tools/perf/builtin-buildid-list.c @@ -16,8 +16,6 @@ #include "util/session.h" #include "util/symbol.h" -#include - static const char *input_name; static bool force; static bool show_kernel; @@ -71,7 +69,7 @@ static int perf_session__list_build_ids(void) { struct perf_session *session; - elf_version(EV_CURRENT); + symbol__elf_init(); session = perf_session__new(input_name, O_RDONLY, force, false, &build_id__mark_dso_hit_ops); @@ -105,7 +103,8 @@ static int __cmd_buildid_list(void) return perf_session__list_build_ids(); } -int cmd_buildid_list(int argc, const char **argv, const char *prefix __used) +int cmd_buildid_list(int argc, const char **argv, + const char *prefix __maybe_unused) { argc = parse_options(argc, argv, options, buildid_list_usage, 0); setup_pager(); diff --git a/trunk/tools/perf/builtin-diff.c b/trunk/tools/perf/builtin-diff.c index d29d350fb2b7..761f4197a9e2 100644 --- a/trunk/tools/perf/builtin-diff.c +++ b/trunk/tools/perf/builtin-diff.c @@ -10,6 +10,7 @@ #include "util/event.h" #include "util/hist.h" #include "util/evsel.h" +#include "util/evlist.h" #include "util/session.h" #include "util/tool.h" #include "util/sort.h" @@ -24,11 +25,6 @@ static char diff__default_sort_order[] = "dso,symbol"; static bool force; static bool show_displacement; -struct perf_diff { - struct perf_tool tool; - struct perf_session *session; -}; - static int hists__add_entry(struct hists *self, struct addr_location *al, u64 period) { @@ -37,14 +33,12 @@ static int hists__add_entry(struct hists *self, return -ENOMEM; } -static int diff__process_sample_event(struct perf_tool *tool, +static int diff__process_sample_event(struct perf_tool *tool __maybe_unused, union perf_event *event, struct perf_sample *sample, - struct perf_evsel *evsel __used, + struct perf_evsel *evsel, struct machine *machine) { - struct perf_diff *_diff = container_of(tool, struct perf_diff, tool); - struct perf_session *session = _diff->session; struct addr_location al; if (perf_event__preprocess_sample(event, machine, &al, sample, NULL) < 0) { @@ -56,26 +50,24 @@ static int diff__process_sample_event(struct perf_tool *tool, if (al.filtered || al.sym == NULL) return 0; - if (hists__add_entry(&session->hists, &al, sample->period)) { + if (hists__add_entry(&evsel->hists, &al, sample->period)) { pr_warning("problem incrementing symbol period, skipping event\n"); return -1; } - session->hists.stats.total_period += sample->period; + evsel->hists.stats.total_period += sample->period; return 0; } -static struct perf_diff diff = { - .tool = { - .sample = diff__process_sample_event, - .mmap = perf_event__process_mmap, - .comm = perf_event__process_comm, - .exit = perf_event__process_task, - .fork = perf_event__process_task, - .lost = perf_event__process_lost, - .ordered_samples = true, - .ordering_requires_timestamps = true, - }, +static struct perf_tool tool = { + .sample = diff__process_sample_event, + .mmap = perf_event__process_mmap, + .comm = perf_event__process_comm, + .exit = perf_event__process_task, + .fork = perf_event__process_task, + .lost = perf_event__process_lost, + .ordered_samples = true, + .ordering_requires_timestamps = true, }; static void perf_session__insert_hist_entry_by_name(struct rb_root *root, @@ -146,34 +138,71 @@ static void hists__match(struct hists *older, struct hists *newer) } } +static struct perf_evsel *evsel_match(struct perf_evsel *evsel, + struct perf_evlist *evlist) +{ + struct perf_evsel *e; + + list_for_each_entry(e, &evlist->entries, node) + if (perf_evsel__match2(evsel, e)) + return e; + + return NULL; +} + static int __cmd_diff(void) { int ret, i; #define older (session[0]) #define newer (session[1]) struct perf_session *session[2]; + struct perf_evlist *evlist_new, *evlist_old; + struct perf_evsel *evsel; + bool first = true; older = perf_session__new(input_old, O_RDONLY, force, false, - &diff.tool); + &tool); newer = perf_session__new(input_new, O_RDONLY, force, false, - &diff.tool); + &tool); if (session[0] == NULL || session[1] == NULL) return -ENOMEM; for (i = 0; i < 2; ++i) { - diff.session = session[i]; - ret = perf_session__process_events(session[i], &diff.tool); + ret = perf_session__process_events(session[i], &tool); if (ret) goto out_delete; - hists__output_resort(&session[i]->hists); } - if (show_displacement) - hists__resort_entries(&older->hists); + evlist_old = older->evlist; + evlist_new = newer->evlist; + + list_for_each_entry(evsel, &evlist_new->entries, node) + hists__output_resort(&evsel->hists); + + list_for_each_entry(evsel, &evlist_old->entries, node) { + hists__output_resort(&evsel->hists); + + if (show_displacement) + hists__resort_entries(&evsel->hists); + } + + list_for_each_entry(evsel, &evlist_new->entries, node) { + struct perf_evsel *evsel_old; + + evsel_old = evsel_match(evsel, evlist_old); + if (!evsel_old) + continue; + + fprintf(stdout, "%s# Event '%s'\n#\n", first ? "" : "\n", + perf_evsel__name(evsel)); + + first = false; + + hists__match(&evsel_old->hists, &evsel->hists); + hists__fprintf(&evsel->hists, &evsel_old->hists, + show_displacement, true, 0, 0, stdout); + } - hists__match(&older->hists, &newer->hists); - hists__fprintf(&newer->hists, &older->hists, - show_displacement, true, 0, 0, stdout); out_delete: for (i = 0; i < 2; ++i) perf_session__delete(session[i]); @@ -213,7 +242,7 @@ static const struct option options[] = { OPT_END() }; -int cmd_diff(int argc, const char **argv, const char *prefix __used) +int cmd_diff(int argc, const char **argv, const char *prefix __maybe_unused) { sort_order = diff__default_sort_order; argc = parse_options(argc, argv, options, diff_usage, 0); @@ -235,6 +264,7 @@ int cmd_diff(int argc, const char **argv, const char *prefix __used) if (symbol__init() < 0) return -1; + perf_hpp__init(true, show_displacement); setup_sorting(diff_usage, options); setup_pager(); diff --git a/trunk/tools/perf/builtin-evlist.c b/trunk/tools/perf/builtin-evlist.c index 0dd5a058f766..1fb164164fd0 100644 --- a/trunk/tools/perf/builtin-evlist.c +++ b/trunk/tools/perf/builtin-evlist.c @@ -113,7 +113,7 @@ static const char * const evlist_usage[] = { NULL }; -int cmd_evlist(int argc, const char **argv, const char *prefix __used) +int cmd_evlist(int argc, const char **argv, const char *prefix __maybe_unused) { struct perf_attr_details details = { .verbose = false, }; const char *input_name = NULL; diff --git a/trunk/tools/perf/builtin-help.c b/trunk/tools/perf/builtin-help.c index 6d5a8a7faf48..25c8b942ff85 100644 --- a/trunk/tools/perf/builtin-help.c +++ b/trunk/tools/perf/builtin-help.c @@ -24,13 +24,14 @@ static struct man_viewer_info_list { } *man_viewer_info_list; enum help_format { + HELP_FORMAT_NONE, HELP_FORMAT_MAN, HELP_FORMAT_INFO, HELP_FORMAT_WEB, }; static bool show_all = false; -static enum help_format help_format = HELP_FORMAT_MAN; +static enum help_format help_format = HELP_FORMAT_NONE; static struct option builtin_help_options[] = { OPT_BOOLEAN('a', "all", &show_all, "print all available commands"), OPT_SET_UINT('m', "man", &help_format, "show man page", HELP_FORMAT_MAN), @@ -54,7 +55,9 @@ static enum help_format parse_help_format(const char *format) return HELP_FORMAT_INFO; if (!strcmp(format, "web") || !strcmp(format, "html")) return HELP_FORMAT_WEB; - die("unrecognized help format '%s'", format); + + pr_err("unrecognized help format '%s'", format); + return HELP_FORMAT_NONE; } static const char *get_man_viewer_info(const char *name) @@ -259,6 +262,8 @@ static int perf_help_config(const char *var, const char *value, void *cb) if (!value) return config_error_nonbool(var); help_format = parse_help_format(value); + if (help_format == HELP_FORMAT_NONE) + return -1; return 0; } if (!strcmp(var, "man.viewer")) { @@ -352,7 +357,7 @@ static void exec_viewer(const char *name, const char *page) warning("'%s': unknown man viewer.", name); } -static void show_man_page(const char *perf_cmd) +static int show_man_page(const char *perf_cmd) { struct man_viewer_list *viewer; const char *page = cmd_to_page(perf_cmd); @@ -365,28 +370,35 @@ static void show_man_page(const char *perf_cmd) if (fallback) exec_viewer(fallback, page); exec_viewer("man", page); - die("no man viewer handled the request"); + + pr_err("no man viewer handled the request"); + return -1; } -static void show_info_page(const char *perf_cmd) +static int show_info_page(const char *perf_cmd) { const char *page = cmd_to_page(perf_cmd); setenv("INFOPATH", system_path(PERF_INFO_PATH), 1); execlp("info", "info", "perfman", page, NULL); + return -1; } -static void get_html_page_path(struct strbuf *page_path, const char *page) +static int get_html_page_path(struct strbuf *page_path, const char *page) { struct stat st; const char *html_path = system_path(PERF_HTML_PATH); /* Check that we have a perf documentation directory. */ if (stat(mkpath("%s/perf.html", html_path), &st) - || !S_ISREG(st.st_mode)) - die("'%s': not a documentation directory.", html_path); + || !S_ISREG(st.st_mode)) { + pr_err("'%s': not a documentation directory.", html_path); + return -1; + } strbuf_init(page_path, 0); strbuf_addf(page_path, "%s/%s.html", html_path, page); + + return 0; } /* @@ -401,19 +413,23 @@ static void open_html(const char *path) } #endif -static void show_html_page(const char *perf_cmd) +static int show_html_page(const char *perf_cmd) { const char *page = cmd_to_page(perf_cmd); struct strbuf page_path; /* it leaks but we exec bellow */ - get_html_page_path(&page_path, page); + if (get_html_page_path(&page_path, page) != 0) + return -1; open_html(page_path.buf); + + return 0; } -int cmd_help(int argc, const char **argv, const char *prefix __used) +int cmd_help(int argc, const char **argv, const char *prefix __maybe_unused) { const char *alias; + int rc = 0; load_command_list("perf-", &main_cmds, &other_cmds); @@ -444,16 +460,20 @@ int cmd_help(int argc, const char **argv, const char *prefix __used) switch (help_format) { case HELP_FORMAT_MAN: - show_man_page(argv[0]); + rc = show_man_page(argv[0]); break; case HELP_FORMAT_INFO: - show_info_page(argv[0]); + rc = show_info_page(argv[0]); break; case HELP_FORMAT_WEB: - show_html_page(argv[0]); + rc = show_html_page(argv[0]); + break; + case HELP_FORMAT_NONE: + /* fall-through */ default: + rc = -1; break; } - return 0; + return rc; } diff --git a/trunk/tools/perf/builtin-inject.c b/trunk/tools/perf/builtin-inject.c index 3beab489afc5..1eaa6617c814 100644 --- a/trunk/tools/perf/builtin-inject.c +++ b/trunk/tools/perf/builtin-inject.c @@ -17,9 +17,9 @@ static char const *input_name = "-"; static bool inject_build_ids; -static int perf_event__repipe_synth(struct perf_tool *tool __used, +static int perf_event__repipe_synth(struct perf_tool *tool __maybe_unused, union perf_event *event, - struct machine *machine __used) + struct machine *machine __maybe_unused) { uint32_t size; void *buf = event; @@ -40,7 +40,8 @@ static int perf_event__repipe_synth(struct perf_tool *tool __used, static int perf_event__repipe_op2_synth(struct perf_tool *tool, union perf_event *event, - struct perf_session *session __used) + struct perf_session *session + __maybe_unused) { return perf_event__repipe_synth(tool, event, NULL); } @@ -52,13 +53,14 @@ static int perf_event__repipe_event_type_synth(struct perf_tool *tool, } static int perf_event__repipe_tracing_data_synth(union perf_event *event, - struct perf_session *session __used) + struct perf_session *session + __maybe_unused) { return perf_event__repipe_synth(NULL, event, NULL); } static int perf_event__repipe_attr(union perf_event *event, - struct perf_evlist **pevlist __used) + struct perf_evlist **pevlist __maybe_unused) { int ret; ret = perf_event__process_attr(event, pevlist); @@ -70,7 +72,7 @@ static int perf_event__repipe_attr(union perf_event *event, static int perf_event__repipe(struct perf_tool *tool, union perf_event *event, - struct perf_sample *sample __used, + struct perf_sample *sample __maybe_unused, struct machine *machine) { return perf_event__repipe_synth(tool, event, machine); @@ -78,8 +80,8 @@ static int perf_event__repipe(struct perf_tool *tool, static int perf_event__repipe_sample(struct perf_tool *tool, union perf_event *event, - struct perf_sample *sample __used, - struct perf_evsel *evsel __used, + struct perf_sample *sample __maybe_unused, + struct perf_evsel *evsel __maybe_unused, struct machine *machine) { return perf_event__repipe_synth(tool, event, machine); @@ -163,7 +165,7 @@ static int dso__inject_build_id(struct dso *self, struct perf_tool *tool, static int perf_event__inject_buildid(struct perf_tool *tool, union perf_event *event, struct perf_sample *sample, - struct perf_evsel *evsel __used, + struct perf_evsel *evsel __maybe_unused, struct machine *machine) { struct addr_location al; @@ -191,10 +193,13 @@ static int perf_event__inject_buildid(struct perf_tool *tool, * If this fails, too bad, let the other side * account this as unresolved. */ - } else + } else { +#ifndef NO_LIBELF_SUPPORT pr_warning("no symbols found in %s, maybe " "install a debug package?\n", al.map->dso->long_name); +#endif + } } } @@ -221,7 +226,7 @@ struct perf_tool perf_inject = { extern volatile int session_done; -static void sig_handler(int sig __attribute__((__unused__))) +static void sig_handler(int sig __maybe_unused) { session_done = 1; } @@ -264,7 +269,7 @@ static const struct option options[] = { OPT_END() }; -int cmd_inject(int argc, const char **argv, const char *prefix __used) +int cmd_inject(int argc, const char **argv, const char *prefix __maybe_unused) { argc = parse_options(argc, argv, options, report_usage, 0); diff --git a/trunk/tools/perf/builtin-kmem.c b/trunk/tools/perf/builtin-kmem.c index ce35015f2dc6..bc912c68f49a 100644 --- a/trunk/tools/perf/builtin-kmem.c +++ b/trunk/tools/perf/builtin-kmem.c @@ -1,6 +1,8 @@ #include "builtin.h" #include "perf.h" +#include "util/evlist.h" +#include "util/evsel.h" #include "util/util.h" #include "util/cache.h" #include "util/symbol.h" @@ -57,46 +59,52 @@ static unsigned long nr_allocs, nr_cross_allocs; #define PATH_SYS_NODE "/sys/devices/system/node" -struct perf_kmem { - struct perf_tool tool; - struct perf_session *session; -}; - -static void init_cpunode_map(void) +static int init_cpunode_map(void) { FILE *fp; - int i; + int i, err = -1; fp = fopen("/sys/devices/system/cpu/kernel_max", "r"); if (!fp) { max_cpu_num = 4096; - return; + return 0; + } + + if (fscanf(fp, "%d", &max_cpu_num) < 1) { + pr_err("Failed to read 'kernel_max' from sysfs"); + goto out_close; } - if (fscanf(fp, "%d", &max_cpu_num) < 1) - die("Failed to read 'kernel_max' from sysfs"); max_cpu_num++; cpunode_map = calloc(max_cpu_num, sizeof(int)); - if (!cpunode_map) - die("calloc"); + if (!cpunode_map) { + pr_err("%s: calloc failed\n", __func__); + goto out_close; + } + for (i = 0; i < max_cpu_num; i++) cpunode_map[i] = -1; + + err = 0; +out_close: fclose(fp); + return err; } -static void setup_cpunode_map(void) +static int setup_cpunode_map(void) { struct dirent *dent1, *dent2; DIR *dir1, *dir2; unsigned int cpu, mem; char buf[PATH_MAX]; - init_cpunode_map(); + if (init_cpunode_map()) + return -1; dir1 = opendir(PATH_SYS_NODE); if (!dir1) - return; + return -1; while ((dent1 = readdir(dir1)) != NULL) { if (dent1->d_type != DT_DIR || @@ -116,10 +124,11 @@ static void setup_cpunode_map(void) closedir(dir2); } closedir(dir1); + return 0; } -static void insert_alloc_stat(unsigned long call_site, unsigned long ptr, - int bytes_req, int bytes_alloc, int cpu) +static int insert_alloc_stat(unsigned long call_site, unsigned long ptr, + int bytes_req, int bytes_alloc, int cpu) { struct rb_node **node = &root_alloc_stat.rb_node; struct rb_node *parent = NULL; @@ -143,8 +152,10 @@ static void insert_alloc_stat(unsigned long call_site, unsigned long ptr, data->bytes_alloc += bytes_alloc; } else { data = malloc(sizeof(*data)); - if (!data) - die("malloc"); + if (!data) { + pr_err("%s: malloc failed\n", __func__); + return -1; + } data->ptr = ptr; data->pingpong = 0; data->hit = 1; @@ -156,9 +167,10 @@ static void insert_alloc_stat(unsigned long call_site, unsigned long ptr, } data->call_site = call_site; data->alloc_cpu = cpu; + return 0; } -static void insert_caller_stat(unsigned long call_site, +static int insert_caller_stat(unsigned long call_site, int bytes_req, int bytes_alloc) { struct rb_node **node = &root_caller_stat.rb_node; @@ -183,8 +195,10 @@ static void insert_caller_stat(unsigned long call_site, data->bytes_alloc += bytes_alloc; } else { data = malloc(sizeof(*data)); - if (!data) - die("malloc"); + if (!data) { + pr_err("%s: malloc failed\n", __func__); + return -1; + } data->call_site = call_site; data->pingpong = 0; data->hit = 1; @@ -194,39 +208,43 @@ static void insert_caller_stat(unsigned long call_site, rb_link_node(&data->node, parent, node); rb_insert_color(&data->node, &root_caller_stat); } + + return 0; } -static void process_alloc_event(void *data, - struct event_format *event, - int cpu, - u64 timestamp __used, - struct thread *thread __used, - int node) +static int perf_evsel__process_alloc_event(struct perf_evsel *evsel, + struct perf_sample *sample) { - unsigned long call_site; - unsigned long ptr; - int bytes_req; - int bytes_alloc; - int node1, node2; - - ptr = raw_field_value(event, "ptr", data); - call_site = raw_field_value(event, "call_site", data); - bytes_req = raw_field_value(event, "bytes_req", data); - bytes_alloc = raw_field_value(event, "bytes_alloc", data); + unsigned long ptr = perf_evsel__intval(evsel, sample, "ptr"), + call_site = perf_evsel__intval(evsel, sample, "call_site"); + int bytes_req = perf_evsel__intval(evsel, sample, "bytes_req"), + bytes_alloc = perf_evsel__intval(evsel, sample, "bytes_alloc"); - insert_alloc_stat(call_site, ptr, bytes_req, bytes_alloc, cpu); - insert_caller_stat(call_site, bytes_req, bytes_alloc); + if (insert_alloc_stat(call_site, ptr, bytes_req, bytes_alloc, sample->cpu) || + insert_caller_stat(call_site, bytes_req, bytes_alloc)) + return -1; total_requested += bytes_req; total_allocated += bytes_alloc; - if (node) { - node1 = cpunode_map[cpu]; - node2 = raw_field_value(event, "node", data); + nr_allocs++; + return 0; +} + +static int perf_evsel__process_alloc_node_event(struct perf_evsel *evsel, + struct perf_sample *sample) +{ + int ret = perf_evsel__process_alloc_event(evsel, sample); + + if (!ret) { + int node1 = cpunode_map[sample->cpu], + node2 = perf_evsel__intval(evsel, sample, "node"); + if (node1 != node2) nr_cross_allocs++; } - nr_allocs++; + + return ret; } static int ptr_cmp(struct alloc_stat *, struct alloc_stat *); @@ -257,66 +275,37 @@ static struct alloc_stat *search_alloc_stat(unsigned long ptr, return NULL; } -static void process_free_event(void *data, - struct event_format *event, - int cpu, - u64 timestamp __used, - struct thread *thread __used) +static int perf_evsel__process_free_event(struct perf_evsel *evsel, + struct perf_sample *sample) { - unsigned long ptr; + unsigned long ptr = perf_evsel__intval(evsel, sample, "ptr"); struct alloc_stat *s_alloc, *s_caller; - ptr = raw_field_value(event, "ptr", data); - s_alloc = search_alloc_stat(ptr, 0, &root_alloc_stat, ptr_cmp); if (!s_alloc) - return; + return 0; - if (cpu != s_alloc->alloc_cpu) { + if ((short)sample->cpu != s_alloc->alloc_cpu) { s_alloc->pingpong++; s_caller = search_alloc_stat(0, s_alloc->call_site, &root_caller_stat, callsite_cmp); - assert(s_caller); + if (!s_caller) + return -1; s_caller->pingpong++; } s_alloc->alloc_cpu = -1; -} -static void process_raw_event(struct perf_tool *tool, - union perf_event *raw_event __used, void *data, - int cpu, u64 timestamp, struct thread *thread) -{ - struct perf_kmem *kmem = container_of(tool, struct perf_kmem, tool); - struct event_format *event; - int type; - - type = trace_parse_common_type(kmem->session->pevent, data); - event = pevent_find_event(kmem->session->pevent, type); - - if (!strcmp(event->name, "kmalloc") || - !strcmp(event->name, "kmem_cache_alloc")) { - process_alloc_event(data, event, cpu, timestamp, thread, 0); - return; - } - - if (!strcmp(event->name, "kmalloc_node") || - !strcmp(event->name, "kmem_cache_alloc_node")) { - process_alloc_event(data, event, cpu, timestamp, thread, 1); - return; - } - - if (!strcmp(event->name, "kfree") || - !strcmp(event->name, "kmem_cache_free")) { - process_free_event(data, event, cpu, timestamp, thread); - return; - } + return 0; } -static int process_sample_event(struct perf_tool *tool, +typedef int (*tracepoint_handler)(struct perf_evsel *evsel, + struct perf_sample *sample); + +static int process_sample_event(struct perf_tool *tool __maybe_unused, union perf_event *event, struct perf_sample *sample, - struct perf_evsel *evsel __used, + struct perf_evsel *evsel, struct machine *machine) { struct thread *thread = machine__findnew_thread(machine, event->ip.pid); @@ -329,18 +318,18 @@ static int process_sample_event(struct perf_tool *tool, dump_printf(" ... thread: %s:%d\n", thread->comm, thread->pid); - process_raw_event(tool, event, sample->raw_data, sample->cpu, - sample->time, thread); + if (evsel->handler.func != NULL) { + tracepoint_handler f = evsel->handler.func; + return f(evsel, sample); + } return 0; } -static struct perf_kmem perf_kmem = { - .tool = { - .sample = process_sample_event, - .comm = perf_event__process_comm, - .ordered_samples = true, - }, +static struct perf_tool perf_kmem = { + .sample = process_sample_event, + .comm = perf_event__process_comm, + .ordered_samples = true, }; static double fragmentation(unsigned long n_req, unsigned long n_alloc) @@ -496,22 +485,32 @@ static int __cmd_kmem(void) { int err = -EINVAL; struct perf_session *session; - - session = perf_session__new(input_name, O_RDONLY, 0, false, - &perf_kmem.tool); + const struct perf_evsel_str_handler kmem_tracepoints[] = { + { "kmem:kmalloc", perf_evsel__process_alloc_event, }, + { "kmem:kmem_cache_alloc", perf_evsel__process_alloc_event, }, + { "kmem:kmalloc_node", perf_evsel__process_alloc_node_event, }, + { "kmem:kmem_cache_alloc_node", perf_evsel__process_alloc_node_event, }, + { "kmem:kfree", perf_evsel__process_free_event, }, + { "kmem:kmem_cache_free", perf_evsel__process_free_event, }, + }; + + session = perf_session__new(input_name, O_RDONLY, 0, false, &perf_kmem); if (session == NULL) return -ENOMEM; - perf_kmem.session = session; - if (perf_session__create_kernel_maps(session) < 0) goto out_delete; if (!perf_session__has_traces(session, "kmem record")) goto out_delete; + if (perf_session__set_tracepoints_handlers(session, kmem_tracepoints)) { + pr_err("Initializing perf session tracepoint handlers failed\n"); + return -1; + } + setup_pager(); - err = perf_session__process_events(session, &perf_kmem.tool); + err = perf_session__process_events(session, &perf_kmem); if (err != 0) goto out_delete; sort_result(); @@ -635,8 +634,10 @@ static int sort_dimension__add(const char *tok, struct list_head *list) for (i = 0; i < NUM_AVAIL_SORTS; i++) { if (!strcmp(avail_sorts[i]->name, tok)) { sort = malloc(sizeof(*sort)); - if (!sort) - die("malloc"); + if (!sort) { + pr_err("%s: malloc failed\n", __func__); + return -1; + } memcpy(sort, avail_sorts[i], sizeof(*sort)); list_add_tail(&sort->list, list); return 0; @@ -651,8 +652,10 @@ static int setup_sorting(struct list_head *sort_list, const char *arg) char *tok; char *str = strdup(arg); - if (!str) - die("strdup"); + if (!str) { + pr_err("%s: strdup failed\n", __func__); + return -1; + } while (true) { tok = strsep(&str, ","); @@ -669,8 +672,8 @@ static int setup_sorting(struct list_head *sort_list, const char *arg) return 0; } -static int parse_sort_opt(const struct option *opt __used, - const char *arg, int unset __used) +static int parse_sort_opt(const struct option *opt __maybe_unused, + const char *arg, int unset __maybe_unused) { if (!arg) return -1; @@ -683,22 +686,24 @@ static int parse_sort_opt(const struct option *opt __used, return 0; } -static int parse_caller_opt(const struct option *opt __used, - const char *arg __used, int unset __used) +static int parse_caller_opt(const struct option *opt __maybe_unused, + const char *arg __maybe_unused, + int unset __maybe_unused) { caller_flag = (alloc_flag + 1); return 0; } -static int parse_alloc_opt(const struct option *opt __used, - const char *arg __used, int unset __used) +static int parse_alloc_opt(const struct option *opt __maybe_unused, + const char *arg __maybe_unused, + int unset __maybe_unused) { alloc_flag = (caller_flag + 1); return 0; } -static int parse_line_opt(const struct option *opt __used, - const char *arg, int unset __used) +static int parse_line_opt(const struct option *opt __maybe_unused, + const char *arg, int unset __maybe_unused) { int lines; @@ -768,7 +773,7 @@ static int __cmd_record(int argc, const char **argv) return cmd_record(i, rec_argv, NULL); } -int cmd_kmem(int argc, const char **argv, const char *prefix __used) +int cmd_kmem(int argc, const char **argv, const char *prefix __maybe_unused) { argc = parse_options(argc, argv, kmem_options, kmem_usage, 0); @@ -780,7 +785,8 @@ int cmd_kmem(int argc, const char **argv, const char *prefix __used) if (!strncmp(argv[0], "rec", 3)) { return __cmd_record(argc, argv); } else if (!strcmp(argv[0], "stat")) { - setup_cpunode_map(); + if (setup_cpunode_map()) + return -1; if (list_empty(&caller_sort)) setup_sorting(&caller_sort, default_sort_order); diff --git a/trunk/tools/perf/builtin-kvm.c b/trunk/tools/perf/builtin-kvm.c index 9fc6e0fa3dce..a28c9cad9048 100644 --- a/trunk/tools/perf/builtin-kvm.c +++ b/trunk/tools/perf/builtin-kvm.c @@ -1,6 +1,7 @@ #include "builtin.h" #include "perf.h" +#include "util/evsel.h" #include "util/util.h" #include "util/cache.h" #include "util/symbol.h" @@ -10,8 +11,10 @@ #include "util/parse-options.h" #include "util/trace-event.h" - #include "util/debug.h" +#include "util/debugfs.h" +#include "util/tool.h" +#include "util/stat.h" #include @@ -19,11 +22,836 @@ #include #include -static const char *file_name; +#include "../../arch/x86/include/asm/svm.h" +#include "../../arch/x86/include/asm/vmx.h" +#include "../../arch/x86/include/asm/kvm.h" + +struct event_key { + #define INVALID_KEY (~0ULL) + u64 key; + int info; +}; + +struct kvm_events_ops { + bool (*is_begin_event)(struct perf_evsel *evsel, + struct perf_sample *sample, + struct event_key *key); + bool (*is_end_event)(struct perf_evsel *evsel, + struct perf_sample *sample, struct event_key *key); + void (*decode_key)(struct event_key *key, char decode[20]); + const char *name; +}; + +static void exit_event_get_key(struct perf_evsel *evsel, + struct perf_sample *sample, + struct event_key *key) +{ + key->info = 0; + key->key = perf_evsel__intval(evsel, sample, "exit_reason"); +} + +static bool kvm_exit_event(struct perf_evsel *evsel) +{ + return !strcmp(evsel->name, "kvm:kvm_exit"); +} + +static bool exit_event_begin(struct perf_evsel *evsel, + struct perf_sample *sample, struct event_key *key) +{ + if (kvm_exit_event(evsel)) { + exit_event_get_key(evsel, sample, key); + return true; + } + + return false; +} + +static bool kvm_entry_event(struct perf_evsel *evsel) +{ + return !strcmp(evsel->name, "kvm:kvm_entry"); +} + +static bool exit_event_end(struct perf_evsel *evsel, + struct perf_sample *sample __maybe_unused, + struct event_key *key __maybe_unused) +{ + return kvm_entry_event(evsel); +} + +struct exit_reasons_table { + unsigned long exit_code; + const char *reason; +}; + +struct exit_reasons_table vmx_exit_reasons[] = { + VMX_EXIT_REASONS +}; + +struct exit_reasons_table svm_exit_reasons[] = { + SVM_EXIT_REASONS +}; + +static int cpu_isa; + +static const char *get_exit_reason(u64 exit_code) +{ + int table_size = ARRAY_SIZE(svm_exit_reasons); + struct exit_reasons_table *table = svm_exit_reasons; + + if (cpu_isa == 1) { + table = vmx_exit_reasons; + table_size = ARRAY_SIZE(vmx_exit_reasons); + } + + while (table_size--) { + if (table->exit_code == exit_code) + return table->reason; + table++; + } + + pr_err("unknown kvm exit code:%lld on %s\n", + (unsigned long long)exit_code, cpu_isa ? "VMX" : "SVM"); + return "UNKNOWN"; +} + +static void exit_event_decode_key(struct event_key *key, char decode[20]) +{ + const char *exit_reason = get_exit_reason(key->key); + + scnprintf(decode, 20, "%s", exit_reason); +} + +static struct kvm_events_ops exit_events = { + .is_begin_event = exit_event_begin, + .is_end_event = exit_event_end, + .decode_key = exit_event_decode_key, + .name = "VM-EXIT" +}; + + /* + * For the mmio events, we treat: + * the time of MMIO write: kvm_mmio(KVM_TRACE_MMIO_WRITE...) -> kvm_entry + * the time of MMIO read: kvm_exit -> kvm_mmio(KVM_TRACE_MMIO_READ...). + */ +static void mmio_event_get_key(struct perf_evsel *evsel, struct perf_sample *sample, + struct event_key *key) +{ + key->key = perf_evsel__intval(evsel, sample, "gpa"); + key->info = perf_evsel__intval(evsel, sample, "type"); +} + +#define KVM_TRACE_MMIO_READ_UNSATISFIED 0 +#define KVM_TRACE_MMIO_READ 1 +#define KVM_TRACE_MMIO_WRITE 2 + +static bool mmio_event_begin(struct perf_evsel *evsel, + struct perf_sample *sample, struct event_key *key) +{ + /* MMIO read begin event in kernel. */ + if (kvm_exit_event(evsel)) + return true; + + /* MMIO write begin event in kernel. */ + if (!strcmp(evsel->name, "kvm:kvm_mmio") && + perf_evsel__intval(evsel, sample, "type") == KVM_TRACE_MMIO_WRITE) { + mmio_event_get_key(evsel, sample, key); + return true; + } + + return false; +} + +static bool mmio_event_end(struct perf_evsel *evsel, struct perf_sample *sample, + struct event_key *key) +{ + /* MMIO write end event in kernel. */ + if (kvm_entry_event(evsel)) + return true; + + /* MMIO read end event in kernel.*/ + if (!strcmp(evsel->name, "kvm:kvm_mmio") && + perf_evsel__intval(evsel, sample, "type") == KVM_TRACE_MMIO_READ) { + mmio_event_get_key(evsel, sample, key); + return true; + } + + return false; +} + +static void mmio_event_decode_key(struct event_key *key, char decode[20]) +{ + scnprintf(decode, 20, "%#lx:%s", (unsigned long)key->key, + key->info == KVM_TRACE_MMIO_WRITE ? "W" : "R"); +} + +static struct kvm_events_ops mmio_events = { + .is_begin_event = mmio_event_begin, + .is_end_event = mmio_event_end, + .decode_key = mmio_event_decode_key, + .name = "MMIO Access" +}; + + /* The time of emulation pio access is from kvm_pio to kvm_entry. */ +static void ioport_event_get_key(struct perf_evsel *evsel, + struct perf_sample *sample, + struct event_key *key) +{ + key->key = perf_evsel__intval(evsel, sample, "port"); + key->info = perf_evsel__intval(evsel, sample, "rw"); +} + +static bool ioport_event_begin(struct perf_evsel *evsel, + struct perf_sample *sample, + struct event_key *key) +{ + if (!strcmp(evsel->name, "kvm:kvm_pio")) { + ioport_event_get_key(evsel, sample, key); + return true; + } + + return false; +} + +static bool ioport_event_end(struct perf_evsel *evsel, + struct perf_sample *sample __maybe_unused, + struct event_key *key __maybe_unused) +{ + return kvm_entry_event(evsel); +} + +static void ioport_event_decode_key(struct event_key *key, char decode[20]) +{ + scnprintf(decode, 20, "%#llx:%s", (unsigned long long)key->key, + key->info ? "POUT" : "PIN"); +} + +static struct kvm_events_ops ioport_events = { + .is_begin_event = ioport_event_begin, + .is_end_event = ioport_event_end, + .decode_key = ioport_event_decode_key, + .name = "IO Port Access" +}; + +static const char *report_event = "vmexit"; +struct kvm_events_ops *events_ops; + +static bool register_kvm_events_ops(void) +{ + bool ret = true; + + if (!strcmp(report_event, "vmexit")) + events_ops = &exit_events; + else if (!strcmp(report_event, "mmio")) + events_ops = &mmio_events; + else if (!strcmp(report_event, "ioport")) + events_ops = &ioport_events; + else { + pr_err("Unknown report event:%s\n", report_event); + ret = false; + } + + return ret; +} + +struct kvm_event_stats { + u64 time; + struct stats stats; +}; + +struct kvm_event { + struct list_head hash_entry; + struct rb_node rb; + + struct event_key key; + + struct kvm_event_stats total; + + #define DEFAULT_VCPU_NUM 8 + int max_vcpu; + struct kvm_event_stats *vcpu; +}; + +struct vcpu_event_record { + int vcpu_id; + u64 start_time; + struct kvm_event *last_event; +}; + +#define EVENTS_BITS 12 +#define EVENTS_CACHE_SIZE (1UL << EVENTS_BITS) + +static u64 total_time; +static u64 total_count; +static struct list_head kvm_events_cache[EVENTS_CACHE_SIZE]; + +static void init_kvm_event_record(void) +{ + int i; + + for (i = 0; i < (int)EVENTS_CACHE_SIZE; i++) + INIT_LIST_HEAD(&kvm_events_cache[i]); +} + +static int kvm_events_hash_fn(u64 key) +{ + return key & (EVENTS_CACHE_SIZE - 1); +} + +static bool kvm_event_expand(struct kvm_event *event, int vcpu_id) +{ + int old_max_vcpu = event->max_vcpu; + + if (vcpu_id < event->max_vcpu) + return true; + + while (event->max_vcpu <= vcpu_id) + event->max_vcpu += DEFAULT_VCPU_NUM; + + event->vcpu = realloc(event->vcpu, + event->max_vcpu * sizeof(*event->vcpu)); + if (!event->vcpu) { + pr_err("Not enough memory\n"); + return false; + } + + memset(event->vcpu + old_max_vcpu, 0, + (event->max_vcpu - old_max_vcpu) * sizeof(*event->vcpu)); + return true; +} + +static struct kvm_event *kvm_alloc_init_event(struct event_key *key) +{ + struct kvm_event *event; + + event = zalloc(sizeof(*event)); + if (!event) { + pr_err("Not enough memory\n"); + return NULL; + } + + event->key = *key; + return event; +} + +static struct kvm_event *find_create_kvm_event(struct event_key *key) +{ + struct kvm_event *event; + struct list_head *head; + + BUG_ON(key->key == INVALID_KEY); + + head = &kvm_events_cache[kvm_events_hash_fn(key->key)]; + list_for_each_entry(event, head, hash_entry) + if (event->key.key == key->key && event->key.info == key->info) + return event; + + event = kvm_alloc_init_event(key); + if (!event) + return NULL; + + list_add(&event->hash_entry, head); + return event; +} + +static bool handle_begin_event(struct vcpu_event_record *vcpu_record, + struct event_key *key, u64 timestamp) +{ + struct kvm_event *event = NULL; + + if (key->key != INVALID_KEY) + event = find_create_kvm_event(key); + + vcpu_record->last_event = event; + vcpu_record->start_time = timestamp; + return true; +} + +static void +kvm_update_event_stats(struct kvm_event_stats *kvm_stats, u64 time_diff) +{ + kvm_stats->time += time_diff; + update_stats(&kvm_stats->stats, time_diff); +} + +static double kvm_event_rel_stddev(int vcpu_id, struct kvm_event *event) +{ + struct kvm_event_stats *kvm_stats = &event->total; + + if (vcpu_id != -1) + kvm_stats = &event->vcpu[vcpu_id]; + + return rel_stddev_stats(stddev_stats(&kvm_stats->stats), + avg_stats(&kvm_stats->stats)); +} + +static bool update_kvm_event(struct kvm_event *event, int vcpu_id, + u64 time_diff) +{ + kvm_update_event_stats(&event->total, time_diff); + + if (!kvm_event_expand(event, vcpu_id)) + return false; + + kvm_update_event_stats(&event->vcpu[vcpu_id], time_diff); + return true; +} + +static bool handle_end_event(struct vcpu_event_record *vcpu_record, + struct event_key *key, u64 timestamp) +{ + struct kvm_event *event; + u64 time_begin, time_diff; + + event = vcpu_record->last_event; + time_begin = vcpu_record->start_time; + + /* The begin event is not caught. */ + if (!time_begin) + return true; + + /* + * In some case, the 'begin event' only records the start timestamp, + * the actual event is recognized in the 'end event' (e.g. mmio-event). + */ + + /* Both begin and end events did not get the key. */ + if (!event && key->key == INVALID_KEY) + return true; + + if (!event) + event = find_create_kvm_event(key); + + if (!event) + return false; + + vcpu_record->last_event = NULL; + vcpu_record->start_time = 0; + + BUG_ON(timestamp < time_begin); + + time_diff = timestamp - time_begin; + return update_kvm_event(event, vcpu_record->vcpu_id, time_diff); +} + +static +struct vcpu_event_record *per_vcpu_record(struct thread *thread, + struct perf_evsel *evsel, + struct perf_sample *sample) +{ + /* Only kvm_entry records vcpu id. */ + if (!thread->priv && kvm_entry_event(evsel)) { + struct vcpu_event_record *vcpu_record; + + vcpu_record = zalloc(sizeof(*vcpu_record)); + if (!vcpu_record) { + pr_err("%s: Not enough memory\n", __func__); + return NULL; + } + + vcpu_record->vcpu_id = perf_evsel__intval(evsel, sample, "vcpu_id"); + thread->priv = vcpu_record; + } + + return thread->priv; +} + +static bool handle_kvm_event(struct thread *thread, struct perf_evsel *evsel, + struct perf_sample *sample) +{ + struct vcpu_event_record *vcpu_record; + struct event_key key = {.key = INVALID_KEY}; + + vcpu_record = per_vcpu_record(thread, evsel, sample); + if (!vcpu_record) + return true; + + if (events_ops->is_begin_event(evsel, sample, &key)) + return handle_begin_event(vcpu_record, &key, sample->time); + + if (events_ops->is_end_event(evsel, sample, &key)) + return handle_end_event(vcpu_record, &key, sample->time); + + return true; +} + +typedef int (*key_cmp_fun)(struct kvm_event*, struct kvm_event*, int); +struct kvm_event_key { + const char *name; + key_cmp_fun key; +}; + +static int trace_vcpu = -1; +#define GET_EVENT_KEY(func, field) \ +static u64 get_event_ ##func(struct kvm_event *event, int vcpu) \ +{ \ + if (vcpu == -1) \ + return event->total.field; \ + \ + if (vcpu >= event->max_vcpu) \ + return 0; \ + \ + return event->vcpu[vcpu].field; \ +} + +#define COMPARE_EVENT_KEY(func, field) \ +GET_EVENT_KEY(func, field) \ +static int compare_kvm_event_ ## func(struct kvm_event *one, \ + struct kvm_event *two, int vcpu)\ +{ \ + return get_event_ ##func(one, vcpu) > \ + get_event_ ##func(two, vcpu); \ +} + +GET_EVENT_KEY(time, time); +COMPARE_EVENT_KEY(count, stats.n); +COMPARE_EVENT_KEY(mean, stats.mean); + +#define DEF_SORT_NAME_KEY(name, compare_key) \ + { #name, compare_kvm_event_ ## compare_key } + +static struct kvm_event_key keys[] = { + DEF_SORT_NAME_KEY(sample, count), + DEF_SORT_NAME_KEY(time, mean), + { NULL, NULL } +}; + +static const char *sort_key = "sample"; +static key_cmp_fun compare; + +static bool select_key(void) +{ + int i; + + for (i = 0; keys[i].name; i++) { + if (!strcmp(keys[i].name, sort_key)) { + compare = keys[i].key; + return true; + } + } + + pr_err("Unknown compare key:%s\n", sort_key); + return false; +} + +static struct rb_root result; +static void insert_to_result(struct kvm_event *event, key_cmp_fun bigger, + int vcpu) +{ + struct rb_node **rb = &result.rb_node; + struct rb_node *parent = NULL; + struct kvm_event *p; + + while (*rb) { + p = container_of(*rb, struct kvm_event, rb); + parent = *rb; + + if (bigger(event, p, vcpu)) + rb = &(*rb)->rb_left; + else + rb = &(*rb)->rb_right; + } + + rb_link_node(&event->rb, parent, rb); + rb_insert_color(&event->rb, &result); +} + +static void update_total_count(struct kvm_event *event, int vcpu) +{ + total_count += get_event_count(event, vcpu); + total_time += get_event_time(event, vcpu); +} + +static bool event_is_valid(struct kvm_event *event, int vcpu) +{ + return !!get_event_count(event, vcpu); +} + +static void sort_result(int vcpu) +{ + unsigned int i; + struct kvm_event *event; + + for (i = 0; i < EVENTS_CACHE_SIZE; i++) + list_for_each_entry(event, &kvm_events_cache[i], hash_entry) + if (event_is_valid(event, vcpu)) { + update_total_count(event, vcpu); + insert_to_result(event, compare, vcpu); + } +} + +/* returns left most element of result, and erase it */ +static struct kvm_event *pop_from_result(void) +{ + struct rb_node *node = rb_first(&result); + + if (!node) + return NULL; + + rb_erase(node, &result); + return container_of(node, struct kvm_event, rb); +} + +static void print_vcpu_info(int vcpu) +{ + pr_info("Analyze events for "); + + if (vcpu == -1) + pr_info("all VCPUs:\n\n"); + else + pr_info("VCPU %d:\n\n", vcpu); +} + +static void print_result(int vcpu) +{ + char decode[20]; + struct kvm_event *event; + + pr_info("\n\n"); + print_vcpu_info(vcpu); + pr_info("%20s ", events_ops->name); + pr_info("%10s ", "Samples"); + pr_info("%9s ", "Samples%"); + + pr_info("%9s ", "Time%"); + pr_info("%16s ", "Avg time"); + pr_info("\n\n"); + + while ((event = pop_from_result())) { + u64 ecount, etime; + + ecount = get_event_count(event, vcpu); + etime = get_event_time(event, vcpu); + + events_ops->decode_key(&event->key, decode); + pr_info("%20s ", decode); + pr_info("%10llu ", (unsigned long long)ecount); + pr_info("%8.2f%% ", (double)ecount / total_count * 100); + pr_info("%8.2f%% ", (double)etime / total_time * 100); + pr_info("%9.2fus ( +-%7.2f%% )", (double)etime / ecount/1e3, + kvm_event_rel_stddev(vcpu, event)); + pr_info("\n"); + } + + pr_info("\nTotal Samples:%lld, Total events handled time:%.2fus.\n\n", + (unsigned long long)total_count, total_time / 1e3); +} + +static int process_sample_event(struct perf_tool *tool __maybe_unused, + union perf_event *event, + struct perf_sample *sample, + struct perf_evsel *evsel, + struct machine *machine) +{ + struct thread *thread = machine__findnew_thread(machine, sample->tid); + + if (thread == NULL) { + pr_debug("problem processing %d event, skipping it.\n", + event->header.type); + return -1; + } + + if (!handle_kvm_event(thread, evsel, sample)) + return -1; + + return 0; +} + +static struct perf_tool eops = { + .sample = process_sample_event, + .comm = perf_event__process_comm, + .ordered_samples = true, +}; + +static int get_cpu_isa(struct perf_session *session) +{ + char *cpuid = session->header.env.cpuid; + int isa; + + if (strstr(cpuid, "Intel")) + isa = 1; + else if (strstr(cpuid, "AMD")) + isa = 0; + else { + pr_err("CPU %s is not supported.\n", cpuid); + isa = -ENOTSUP; + } + + return isa; +} + +static const char *file_name; + +static int read_events(void) +{ + struct perf_session *kvm_session; + int ret; + + kvm_session = perf_session__new(file_name, O_RDONLY, 0, false, &eops); + if (!kvm_session) { + pr_err("Initializing perf session failed\n"); + return -EINVAL; + } + + if (!perf_session__has_traces(kvm_session, "kvm record")) + return -EINVAL; + + /* + * Do not use 'isa' recorded in kvm_exit tracepoint since it is not + * traced in the old kernel. + */ + ret = get_cpu_isa(kvm_session); + + if (ret < 0) + return ret; + + cpu_isa = ret; + + return perf_session__process_events(kvm_session, &eops); +} + +static bool verify_vcpu(int vcpu) +{ + if (vcpu != -1 && vcpu < 0) { + pr_err("Invalid vcpu:%d.\n", vcpu); + return false; + } + + return true; +} + +static int kvm_events_report_vcpu(int vcpu) +{ + int ret = -EINVAL; + + if (!verify_vcpu(vcpu)) + goto exit; + + if (!select_key()) + goto exit; + + if (!register_kvm_events_ops()) + goto exit; + + init_kvm_event_record(); + setup_pager(); + + ret = read_events(); + if (ret) + goto exit; + + sort_result(vcpu); + print_result(vcpu); +exit: + return ret; +} + +static const char * const record_args[] = { + "record", + "-R", + "-f", + "-m", "1024", + "-c", "1", + "-e", "kvm:kvm_entry", + "-e", "kvm:kvm_exit", + "-e", "kvm:kvm_mmio", + "-e", "kvm:kvm_pio", +}; + +#define STRDUP_FAIL_EXIT(s) \ + ({ char *_p; \ + _p = strdup(s); \ + if (!_p) \ + return -ENOMEM; \ + _p; \ + }) + +static int kvm_events_record(int argc, const char **argv) +{ + unsigned int rec_argc, i, j; + const char **rec_argv; + + rec_argc = ARRAY_SIZE(record_args) + argc + 2; + rec_argv = calloc(rec_argc + 1, sizeof(char *)); + + if (rec_argv == NULL) + return -ENOMEM; + + for (i = 0; i < ARRAY_SIZE(record_args); i++) + rec_argv[i] = STRDUP_FAIL_EXIT(record_args[i]); + + rec_argv[i++] = STRDUP_FAIL_EXIT("-o"); + rec_argv[i++] = STRDUP_FAIL_EXIT(file_name); + + for (j = 1; j < (unsigned int)argc; j++, i++) + rec_argv[i] = argv[j]; + + return cmd_record(i, rec_argv, NULL); +} + +static const char * const kvm_events_report_usage[] = { + "perf kvm stat report []", + NULL +}; + +static const struct option kvm_events_report_options[] = { + OPT_STRING(0, "event", &report_event, "report event", + "event for reporting: vmexit, mmio, ioport"), + OPT_INTEGER(0, "vcpu", &trace_vcpu, + "vcpu id to report"), + OPT_STRING('k', "key", &sort_key, "sort-key", + "key for sorting: sample(sort by samples number)" + " time (sort by avg time)"), + OPT_END() +}; + +static int kvm_events_report(int argc, const char **argv) +{ + symbol__init(); + + if (argc) { + argc = parse_options(argc, argv, + kvm_events_report_options, + kvm_events_report_usage, 0); + if (argc) + usage_with_options(kvm_events_report_usage, + kvm_events_report_options); + } + + return kvm_events_report_vcpu(trace_vcpu); +} + +static void print_kvm_stat_usage(void) +{ + printf("Usage: perf kvm stat \n\n"); + + printf("# Available commands:\n"); + printf("\trecord: record kvm events\n"); + printf("\treport: report statistical data of kvm events\n"); + + printf("\nOtherwise, it is the alias of 'perf stat':\n"); +} + +static int kvm_cmd_stat(int argc, const char **argv) +{ + if (argc == 1) { + print_kvm_stat_usage(); + goto perf_stat; + } + + if (!strncmp(argv[1], "rec", 3)) + return kvm_events_record(argc - 1, argv + 1); + + if (!strncmp(argv[1], "rep", 3)) + return kvm_events_report(argc - 1 , argv + 1); + +perf_stat: + return cmd_stat(argc, argv, NULL); +} + static char name_buffer[256]; static const char * const kvm_usage[] = { - "perf kvm [] {top|record|report|diff|buildid-list}", + "perf kvm [] {top|record|report|diff|buildid-list|stat}", NULL }; @@ -102,7 +930,7 @@ static int __cmd_buildid_list(int argc, const char **argv) return cmd_buildid_list(i, rec_argv, NULL); } -int cmd_kvm(int argc, const char **argv, const char *prefix __used) +int cmd_kvm(int argc, const char **argv, const char *prefix __maybe_unused) { perf_host = 0; perf_guest = 1; @@ -135,6 +963,8 @@ int cmd_kvm(int argc, const char **argv, const char *prefix __used) return cmd_top(argc, argv, NULL); else if (!strncmp(argv[0], "buildid-list", 12)) return __cmd_buildid_list(argc, argv); + else if (!strncmp(argv[0], "stat", 4)) + return kvm_cmd_stat(argc, argv); else usage_with_options(kvm_usage, kvm_options); diff --git a/trunk/tools/perf/builtin-list.c b/trunk/tools/perf/builtin-list.c index 6313b6eb3ebb..1948eceb517a 100644 --- a/trunk/tools/perf/builtin-list.c +++ b/trunk/tools/perf/builtin-list.c @@ -14,20 +14,20 @@ #include "util/parse-events.h" #include "util/cache.h" -int cmd_list(int argc, const char **argv, const char *prefix __used) +int cmd_list(int argc, const char **argv, const char *prefix __maybe_unused) { setup_pager(); if (argc == 1) - print_events(NULL); + print_events(NULL, false); else { int i; for (i = 1; i < argc; ++i) { - if (i > 1) + if (i > 2) putchar('\n'); if (strncmp(argv[i], "tracepoint", 10) == 0) - print_tracepoint_events(NULL, NULL); + print_tracepoint_events(NULL, NULL, false); else if (strcmp(argv[i], "hw") == 0 || strcmp(argv[i], "hardware") == 0) print_events_type(PERF_TYPE_HARDWARE); @@ -36,13 +36,15 @@ int cmd_list(int argc, const char **argv, const char *prefix __used) print_events_type(PERF_TYPE_SOFTWARE); else if (strcmp(argv[i], "cache") == 0 || strcmp(argv[i], "hwcache") == 0) - print_hwcache_events(NULL); + print_hwcache_events(NULL, false); + else if (strcmp(argv[i], "--raw-dump") == 0) + print_events(NULL, true); else { char *sep = strchr(argv[i], ':'), *s; int sep_idx; if (sep == NULL) { - print_events(argv[i]); + print_events(argv[i], false); continue; } sep_idx = sep - argv[i]; @@ -51,7 +53,7 @@ int cmd_list(int argc, const char **argv, const char *prefix __used) return -1; s[sep_idx] = '\0'; - print_tracepoint_events(s, s + sep_idx + 1); + print_tracepoint_events(s, s + sep_idx + 1, false); free(s); } } diff --git a/trunk/tools/perf/builtin-lock.c b/trunk/tools/perf/builtin-lock.c index b3c428548868..7d6e09949880 100644 --- a/trunk/tools/perf/builtin-lock.c +++ b/trunk/tools/perf/builtin-lock.c @@ -1,6 +1,8 @@ #include "builtin.h" #include "perf.h" +#include "util/evlist.h" +#include "util/evsel.h" #include "util/util.h" #include "util/cache.h" #include "util/symbol.h" @@ -40,7 +42,7 @@ struct lock_stat { struct rb_node rb; /* used for sorting */ /* - * FIXME: raw_field_value() returns unsigned long long, + * FIXME: perf_evsel__intval() returns u64, * so address of lockdep_map should be dealed as 64bit. * Is there more better solution? */ @@ -160,8 +162,10 @@ static struct thread_stat *thread_stat_findnew_after_first(u32 tid) return st; st = zalloc(sizeof(struct thread_stat)); - if (!st) - die("memory allocation failed\n"); + if (!st) { + pr_err("memory allocation failed\n"); + return NULL; + } st->tid = tid; INIT_LIST_HEAD(&st->seq_list); @@ -180,8 +184,10 @@ static struct thread_stat *thread_stat_findnew_first(u32 tid) struct thread_stat *st; st = zalloc(sizeof(struct thread_stat)); - if (!st) - die("memory allocation failed\n"); + if (!st) { + pr_err("memory allocation failed\n"); + return NULL; + } st->tid = tid; INIT_LIST_HEAD(&st->seq_list); @@ -247,18 +253,20 @@ struct lock_key keys[] = { { NULL, NULL } }; -static void select_key(void) +static int select_key(void) { int i; for (i = 0; keys[i].name; i++) { if (!strcmp(keys[i].name, sort_key)) { compare = keys[i].key; - return; + return 0; } } - die("Unknown compare key:%s\n", sort_key); + pr_err("Unknown compare key: %s\n", sort_key); + + return -1; } static void insert_to_result(struct lock_stat *st, @@ -323,61 +331,24 @@ static struct lock_stat *lock_stat_findnew(void *addr, const char *name) return new; alloc_failed: - die("memory allocation failed\n"); + pr_err("memory allocation failed\n"); + return NULL; } static const char *input_name; -struct raw_event_sample { - u32 size; - char data[0]; -}; - -struct trace_acquire_event { - void *addr; - const char *name; - int flag; -}; - -struct trace_acquired_event { - void *addr; - const char *name; -}; +struct trace_lock_handler { + int (*acquire_event)(struct perf_evsel *evsel, + struct perf_sample *sample); -struct trace_contended_event { - void *addr; - const char *name; -}; + int (*acquired_event)(struct perf_evsel *evsel, + struct perf_sample *sample); -struct trace_release_event { - void *addr; - const char *name; -}; + int (*contended_event)(struct perf_evsel *evsel, + struct perf_sample *sample); -struct trace_lock_handler { - void (*acquire_event)(struct trace_acquire_event *, - struct event_format *, - int cpu, - u64 timestamp, - struct thread *thread); - - void (*acquired_event)(struct trace_acquired_event *, - struct event_format *, - int cpu, - u64 timestamp, - struct thread *thread); - - void (*contended_event)(struct trace_contended_event *, - struct event_format *, - int cpu, - u64 timestamp, - struct thread *thread); - - void (*release_event)(struct trace_release_event *, - struct event_format *, - int cpu, - u64 timestamp, - struct thread *thread); + int (*release_event)(struct perf_evsel *evsel, + struct perf_sample *sample); }; static struct lock_seq_stat *get_seq(struct thread_stat *ts, void *addr) @@ -390,8 +361,10 @@ static struct lock_seq_stat *get_seq(struct thread_stat *ts, void *addr) } seq = zalloc(sizeof(struct lock_seq_stat)); - if (!seq) - die("Not enough memory\n"); + if (!seq) { + pr_err("memory allocation failed\n"); + return NULL; + } seq->state = SEQ_STATE_UNINITIALIZED; seq->addr = addr; @@ -414,33 +387,42 @@ enum acquire_flags { READ_LOCK = 2, }; -static void -report_lock_acquire_event(struct trace_acquire_event *acquire_event, - struct event_format *__event __used, - int cpu __used, - u64 timestamp __used, - struct thread *thread __used) +static int report_lock_acquire_event(struct perf_evsel *evsel, + struct perf_sample *sample) { + void *addr; struct lock_stat *ls; struct thread_stat *ts; struct lock_seq_stat *seq; + const char *name = perf_evsel__strval(evsel, sample, "name"); + u64 tmp = perf_evsel__intval(evsel, sample, "lockdep_addr"); + int flag = perf_evsel__intval(evsel, sample, "flag"); + + memcpy(&addr, &tmp, sizeof(void *)); - ls = lock_stat_findnew(acquire_event->addr, acquire_event->name); + ls = lock_stat_findnew(addr, name); + if (!ls) + return -1; if (ls->discard) - return; + return 0; - ts = thread_stat_findnew(thread->pid); - seq = get_seq(ts, acquire_event->addr); + ts = thread_stat_findnew(sample->tid); + if (!ts) + return -1; + + seq = get_seq(ts, addr); + if (!seq) + return -1; switch (seq->state) { case SEQ_STATE_UNINITIALIZED: case SEQ_STATE_RELEASED: - if (!acquire_event->flag) { + if (!flag) { seq->state = SEQ_STATE_ACQUIRING; } else { - if (acquire_event->flag & TRY_LOCK) + if (flag & TRY_LOCK) ls->nr_trylock++; - if (acquire_event->flag & READ_LOCK) + if (flag & READ_LOCK) ls->nr_readlock++; seq->state = SEQ_STATE_READ_ACQUIRED; seq->read_count = 1; @@ -448,7 +430,7 @@ report_lock_acquire_event(struct trace_acquire_event *acquire_event, } break; case SEQ_STATE_READ_ACQUIRED: - if (acquire_event->flag & READ_LOCK) { + if (flag & READ_LOCK) { seq->read_count++; ls->nr_acquired++; goto end; @@ -473,38 +455,46 @@ report_lock_acquire_event(struct trace_acquire_event *acquire_event, } ls->nr_acquire++; - seq->prev_event_time = timestamp; + seq->prev_event_time = sample->time; end: - return; + return 0; } -static void -report_lock_acquired_event(struct trace_acquired_event *acquired_event, - struct event_format *__event __used, - int cpu __used, - u64 timestamp __used, - struct thread *thread __used) +static int report_lock_acquired_event(struct perf_evsel *evsel, + struct perf_sample *sample) { + void *addr; struct lock_stat *ls; struct thread_stat *ts; struct lock_seq_stat *seq; u64 contended_term; + const char *name = perf_evsel__strval(evsel, sample, "name"); + u64 tmp = perf_evsel__intval(evsel, sample, "lockdep_addr"); + + memcpy(&addr, &tmp, sizeof(void *)); - ls = lock_stat_findnew(acquired_event->addr, acquired_event->name); + ls = lock_stat_findnew(addr, name); + if (!ls) + return -1; if (ls->discard) - return; + return 0; + + ts = thread_stat_findnew(sample->tid); + if (!ts) + return -1; - ts = thread_stat_findnew(thread->pid); - seq = get_seq(ts, acquired_event->addr); + seq = get_seq(ts, addr); + if (!seq) + return -1; switch (seq->state) { case SEQ_STATE_UNINITIALIZED: /* orphan event, do nothing */ - return; + return 0; case SEQ_STATE_ACQUIRING: break; case SEQ_STATE_CONTENDED: - contended_term = timestamp - seq->prev_event_time; + contended_term = sample->time - seq->prev_event_time; ls->wait_time_total += contended_term; if (contended_term < ls->wait_time_min) ls->wait_time_min = contended_term; @@ -529,33 +519,41 @@ report_lock_acquired_event(struct trace_acquired_event *acquired_event, seq->state = SEQ_STATE_ACQUIRED; ls->nr_acquired++; - seq->prev_event_time = timestamp; + seq->prev_event_time = sample->time; end: - return; + return 0; } -static void -report_lock_contended_event(struct trace_contended_event *contended_event, - struct event_format *__event __used, - int cpu __used, - u64 timestamp __used, - struct thread *thread __used) +static int report_lock_contended_event(struct perf_evsel *evsel, + struct perf_sample *sample) { + void *addr; struct lock_stat *ls; struct thread_stat *ts; struct lock_seq_stat *seq; + const char *name = perf_evsel__strval(evsel, sample, "name"); + u64 tmp = perf_evsel__intval(evsel, sample, "lockdep_addr"); - ls = lock_stat_findnew(contended_event->addr, contended_event->name); + memcpy(&addr, &tmp, sizeof(void *)); + + ls = lock_stat_findnew(addr, name); + if (!ls) + return -1; if (ls->discard) - return; + return 0; + + ts = thread_stat_findnew(sample->tid); + if (!ts) + return -1; - ts = thread_stat_findnew(thread->pid); - seq = get_seq(ts, contended_event->addr); + seq = get_seq(ts, addr); + if (!seq) + return -1; switch (seq->state) { case SEQ_STATE_UNINITIALIZED: /* orphan event, do nothing */ - return; + return 0; case SEQ_STATE_ACQUIRING: break; case SEQ_STATE_RELEASED: @@ -576,28 +574,36 @@ report_lock_contended_event(struct trace_contended_event *contended_event, seq->state = SEQ_STATE_CONTENDED; ls->nr_contended++; - seq->prev_event_time = timestamp; + seq->prev_event_time = sample->time; end: - return; + return 0; } -static void -report_lock_release_event(struct trace_release_event *release_event, - struct event_format *__event __used, - int cpu __used, - u64 timestamp __used, - struct thread *thread __used) +static int report_lock_release_event(struct perf_evsel *evsel, + struct perf_sample *sample) { + void *addr; struct lock_stat *ls; struct thread_stat *ts; struct lock_seq_stat *seq; + const char *name = perf_evsel__strval(evsel, sample, "name"); + u64 tmp = perf_evsel__intval(evsel, sample, "lockdep_addr"); + + memcpy(&addr, &tmp, sizeof(void *)); - ls = lock_stat_findnew(release_event->addr, release_event->name); + ls = lock_stat_findnew(addr, name); + if (!ls) + return -1; if (ls->discard) - return; + return 0; + + ts = thread_stat_findnew(sample->tid); + if (!ts) + return -1; - ts = thread_stat_findnew(thread->pid); - seq = get_seq(ts, release_event->addr); + seq = get_seq(ts, addr); + if (!seq) + return -1; switch (seq->state) { case SEQ_STATE_UNINITIALIZED: @@ -631,7 +637,7 @@ report_lock_release_event(struct trace_release_event *release_event, list_del(&seq->list); free(seq); end: - return; + return 0; } /* lock oriented handlers */ @@ -645,96 +651,36 @@ static struct trace_lock_handler report_lock_ops = { static struct trace_lock_handler *trace_handler; -static void -process_lock_acquire_event(void *data, - struct event_format *event __used, - int cpu __used, - u64 timestamp __used, - struct thread *thread __used) -{ - struct trace_acquire_event acquire_event; - u64 tmp; /* this is required for casting... */ - - tmp = raw_field_value(event, "lockdep_addr", data); - memcpy(&acquire_event.addr, &tmp, sizeof(void *)); - acquire_event.name = (char *)raw_field_ptr(event, "name", data); - acquire_event.flag = (int)raw_field_value(event, "flag", data); - - if (trace_handler->acquire_event) - trace_handler->acquire_event(&acquire_event, event, cpu, timestamp, thread); -} - -static void -process_lock_acquired_event(void *data, - struct event_format *event __used, - int cpu __used, - u64 timestamp __used, - struct thread *thread __used) +static int perf_evsel__process_lock_acquire(struct perf_evsel *evsel, + struct perf_sample *sample) { - struct trace_acquired_event acquired_event; - u64 tmp; /* this is required for casting... */ - - tmp = raw_field_value(event, "lockdep_addr", data); - memcpy(&acquired_event.addr, &tmp, sizeof(void *)); - acquired_event.name = (char *)raw_field_ptr(event, "name", data); - if (trace_handler->acquire_event) - trace_handler->acquired_event(&acquired_event, event, cpu, timestamp, thread); + return trace_handler->acquire_event(evsel, sample); + return 0; } -static void -process_lock_contended_event(void *data, - struct event_format *event __used, - int cpu __used, - u64 timestamp __used, - struct thread *thread __used) +static int perf_evsel__process_lock_acquired(struct perf_evsel *evsel, + struct perf_sample *sample) { - struct trace_contended_event contended_event; - u64 tmp; /* this is required for casting... */ - - tmp = raw_field_value(event, "lockdep_addr", data); - memcpy(&contended_event.addr, &tmp, sizeof(void *)); - contended_event.name = (char *)raw_field_ptr(event, "name", data); - - if (trace_handler->acquire_event) - trace_handler->contended_event(&contended_event, event, cpu, timestamp, thread); + if (trace_handler->acquired_event) + return trace_handler->acquired_event(evsel, sample); + return 0; } -static void -process_lock_release_event(void *data, - struct event_format *event __used, - int cpu __used, - u64 timestamp __used, - struct thread *thread __used) +static int perf_evsel__process_lock_contended(struct perf_evsel *evsel, + struct perf_sample *sample) { - struct trace_release_event release_event; - u64 tmp; /* this is required for casting... */ - - tmp = raw_field_value(event, "lockdep_addr", data); - memcpy(&release_event.addr, &tmp, sizeof(void *)); - release_event.name = (char *)raw_field_ptr(event, "name", data); - - if (trace_handler->acquire_event) - trace_handler->release_event(&release_event, event, cpu, timestamp, thread); + if (trace_handler->contended_event) + return trace_handler->contended_event(evsel, sample); + return 0; } -static void -process_raw_event(void *data, int cpu, u64 timestamp, struct thread *thread) +static int perf_evsel__process_lock_release(struct perf_evsel *evsel, + struct perf_sample *sample) { - struct event_format *event; - int type; - - type = trace_parse_common_type(session->pevent, data); - event = pevent_find_event(session->pevent, type); - - if (!strcmp(event->name, "lock_acquire")) - process_lock_acquire_event(data, event, cpu, timestamp, thread); - if (!strcmp(event->name, "lock_acquired")) - process_lock_acquired_event(data, event, cpu, timestamp, thread); - if (!strcmp(event->name, "lock_contended")) - process_lock_contended_event(data, event, cpu, timestamp, thread); - if (!strcmp(event->name, "lock_release")) - process_lock_release_event(data, event, cpu, timestamp, thread); + if (trace_handler->release_event) + return trace_handler->release_event(evsel, sample); + return 0; } static void print_bad_events(int bad, int total) @@ -836,20 +782,29 @@ static void dump_map(void) } } -static void dump_info(void) +static int dump_info(void) { + int rc = 0; + if (info_threads) dump_threads(); else if (info_map) dump_map(); - else - die("Unknown type of information\n"); + else { + rc = -1; + pr_err("Unknown type of information\n"); + } + + return rc; } -static int process_sample_event(struct perf_tool *tool __used, +typedef int (*tracepoint_handler)(struct perf_evsel *evsel, + struct perf_sample *sample); + +static int process_sample_event(struct perf_tool *tool __maybe_unused, union perf_event *event, struct perf_sample *sample, - struct perf_evsel *evsel __used, + struct perf_evsel *evsel, struct machine *machine) { struct thread *thread = machine__findnew_thread(machine, sample->tid); @@ -860,7 +815,10 @@ static int process_sample_event(struct perf_tool *tool __used, return -1; } - process_raw_event(sample->raw_data, sample->cpu, sample->time, thread); + if (evsel->handler.func != NULL) { + tracepoint_handler f = evsel->handler.func; + return f(evsel, sample); + } return 0; } @@ -871,11 +829,25 @@ static struct perf_tool eops = { .ordered_samples = true, }; +static const struct perf_evsel_str_handler lock_tracepoints[] = { + { "lock:lock_acquire", perf_evsel__process_lock_acquire, }, /* CONFIG_LOCKDEP */ + { "lock:lock_acquired", perf_evsel__process_lock_acquired, }, /* CONFIG_LOCKDEP, CONFIG_LOCK_STAT */ + { "lock:lock_contended", perf_evsel__process_lock_contended, }, /* CONFIG_LOCKDEP, CONFIG_LOCK_STAT */ + { "lock:lock_release", perf_evsel__process_lock_release, }, /* CONFIG_LOCKDEP */ +}; + static int read_events(void) { session = perf_session__new(input_name, O_RDONLY, 0, false, &eops); - if (!session) - die("Initializing perf session failed\n"); + if (!session) { + pr_err("Initializing perf session failed\n"); + return -1; + } + + if (perf_session__set_tracepoints_handlers(session, lock_tracepoints)) { + pr_err("Initializing perf session tracepoint handlers failed\n"); + return -1; + } return perf_session__process_events(session, &eops); } @@ -892,13 +864,18 @@ static void sort_result(void) } } -static void __cmd_report(void) +static int __cmd_report(void) { setup_pager(); - select_key(); - read_events(); + + if ((select_key() != 0) || + (read_events() != 0)) + return -1; + sort_result(); print_result(); + + return 0; } static const char * const report_usage[] = { @@ -944,10 +921,6 @@ static const char *record_args[] = { "-f", "-m", "1024", "-c", "1", - "-e", "lock:lock_acquire", - "-e", "lock:lock_acquired", - "-e", "lock:lock_contended", - "-e", "lock:lock_release", }; static int __cmd_record(int argc, const char **argv) @@ -955,15 +928,31 @@ static int __cmd_record(int argc, const char **argv) unsigned int rec_argc, i, j; const char **rec_argv; + for (i = 0; i < ARRAY_SIZE(lock_tracepoints); i++) { + if (!is_valid_tracepoint(lock_tracepoints[i].name)) { + pr_err("tracepoint %s is not enabled. " + "Are CONFIG_LOCKDEP and CONFIG_LOCK_STAT enabled?\n", + lock_tracepoints[i].name); + return 1; + } + } + rec_argc = ARRAY_SIZE(record_args) + argc - 1; - rec_argv = calloc(rec_argc + 1, sizeof(char *)); + /* factor of 2 is for -e in front of each tracepoint */ + rec_argc += 2 * ARRAY_SIZE(lock_tracepoints); + rec_argv = calloc(rec_argc + 1, sizeof(char *)); if (rec_argv == NULL) return -ENOMEM; for (i = 0; i < ARRAY_SIZE(record_args); i++) rec_argv[i] = strdup(record_args[i]); + for (j = 0; j < ARRAY_SIZE(lock_tracepoints); j++) { + rec_argv[i++] = "-e"; + rec_argv[i++] = strdup(lock_tracepoints[j].name); + } + for (j = 1; j < (unsigned int)argc; j++, i++) rec_argv[i] = argv[j]; @@ -972,9 +961,10 @@ static int __cmd_record(int argc, const char **argv) return cmd_record(i, rec_argv, NULL); } -int cmd_lock(int argc, const char **argv, const char *prefix __used) +int cmd_lock(int argc, const char **argv, const char *prefix __maybe_unused) { unsigned int i; + int rc = 0; symbol__init(); for (i = 0; i < LOCKHASH_SIZE; i++) @@ -1009,11 +999,13 @@ int cmd_lock(int argc, const char **argv, const char *prefix __used) /* recycling report_lock_ops */ trace_handler = &report_lock_ops; setup_pager(); - read_events(); - dump_info(); + if (read_events() != 0) + rc = -1; + else + rc = dump_info(); } else { usage_with_options(lock_usage, lock_options); } - return 0; + return rc; } diff --git a/trunk/tools/perf/builtin-probe.c b/trunk/tools/perf/builtin-probe.c index e215ae61b2ae..118aa8946573 100644 --- a/trunk/tools/perf/builtin-probe.c +++ b/trunk/tools/perf/builtin-probe.c @@ -143,8 +143,8 @@ static int parse_probe_event_argv(int argc, const char **argv) return ret; } -static int opt_add_probe_event(const struct option *opt __used, - const char *str, int unset __used) +static int opt_add_probe_event(const struct option *opt __maybe_unused, + const char *str, int unset __maybe_unused) { if (str) { params.mod_events = true; @@ -153,8 +153,8 @@ static int opt_add_probe_event(const struct option *opt __used, return 0; } -static int opt_del_probe_event(const struct option *opt __used, - const char *str, int unset __used) +static int opt_del_probe_event(const struct option *opt __maybe_unused, + const char *str, int unset __maybe_unused) { if (str) { params.mod_events = true; @@ -166,7 +166,7 @@ static int opt_del_probe_event(const struct option *opt __used, } static int opt_set_target(const struct option *opt, const char *str, - int unset __used) + int unset __maybe_unused) { int ret = -ENOENT; @@ -188,8 +188,8 @@ static int opt_set_target(const struct option *opt, const char *str, } #ifdef DWARF_SUPPORT -static int opt_show_lines(const struct option *opt __used, - const char *str, int unset __used) +static int opt_show_lines(const struct option *opt __maybe_unused, + const char *str, int unset __maybe_unused) { int ret = 0; @@ -209,8 +209,8 @@ static int opt_show_lines(const struct option *opt __used, return ret; } -static int opt_show_vars(const struct option *opt __used, - const char *str, int unset __used) +static int opt_show_vars(const struct option *opt __maybe_unused, + const char *str, int unset __maybe_unused) { struct perf_probe_event *pev = ¶ms.events[params.nevents]; int ret; @@ -229,8 +229,8 @@ static int opt_show_vars(const struct option *opt __used, } #endif -static int opt_set_filter(const struct option *opt __used, - const char *str, int unset __used) +static int opt_set_filter(const struct option *opt __maybe_unused, + const char *str, int unset __maybe_unused) { const char *err; @@ -327,7 +327,7 @@ static const struct option options[] = { OPT_END() }; -int cmd_probe(int argc, const char **argv, const char *prefix __used) +int cmd_probe(int argc, const char **argv, const char *prefix __maybe_unused) { int ret; diff --git a/trunk/tools/perf/builtin-record.c b/trunk/tools/perf/builtin-record.c index f5a6452931e6..f14cb5fdb91f 100644 --- a/trunk/tools/perf/builtin-record.c +++ b/trunk/tools/perf/builtin-record.c @@ -31,6 +31,15 @@ #include #include +#define CALLCHAIN_HELP "do call-graph (stack chain/backtrace) recording: " + +#ifdef NO_LIBUNWIND_SUPPORT +static char callchain_help[] = CALLCHAIN_HELP "[fp]"; +#else +static unsigned long default_stack_dump_size = 8192; +static char callchain_help[] = CALLCHAIN_HELP "[fp] dwarf"; +#endif + enum write_mode_t { WRITE_FORCE, WRITE_APPEND @@ -62,32 +71,38 @@ static void advance_output(struct perf_record *rec, size_t size) rec->bytes_written += size; } -static void write_output(struct perf_record *rec, void *buf, size_t size) +static int write_output(struct perf_record *rec, void *buf, size_t size) { while (size) { int ret = write(rec->output, buf, size); - if (ret < 0) - die("failed to write"); + if (ret < 0) { + pr_err("failed to write\n"); + return -1; + } size -= ret; buf += ret; rec->bytes_written += ret; } + + return 0; } static int process_synthesized_event(struct perf_tool *tool, union perf_event *event, - struct perf_sample *sample __used, - struct machine *machine __used) + struct perf_sample *sample __maybe_unused, + struct machine *machine __maybe_unused) { struct perf_record *rec = container_of(tool, struct perf_record, tool); - write_output(rec, event, event->header.size); + if (write_output(rec, event, event->header.size) < 0) + return -1; + return 0; } -static void perf_record__mmap_read(struct perf_record *rec, +static int perf_record__mmap_read(struct perf_record *rec, struct perf_mmap *md) { unsigned int head = perf_mmap__read_head(md); @@ -95,9 +110,10 @@ static void perf_record__mmap_read(struct perf_record *rec, unsigned char *data = md->base + rec->page_size; unsigned long size; void *buf; + int rc = 0; if (old == head) - return; + return 0; rec->samples++; @@ -108,17 +124,26 @@ static void perf_record__mmap_read(struct perf_record *rec, size = md->mask + 1 - (old & md->mask); old += size; - write_output(rec, buf, size); + if (write_output(rec, buf, size) < 0) { + rc = -1; + goto out; + } } buf = &data[old & md->mask]; size = head - old; old += size; - write_output(rec, buf, size); + if (write_output(rec, buf, size) < 0) { + rc = -1; + goto out; + } md->prev = old; perf_mmap__write_tail(md, old); + +out: + return rc; } static volatile int done = 0; @@ -134,7 +159,7 @@ static void sig_handler(int sig) signr = sig; } -static void perf_record__sig_exit(int exit_status __used, void *arg) +static void perf_record__sig_exit(int exit_status __maybe_unused, void *arg) { struct perf_record *rec = arg; int status; @@ -163,31 +188,32 @@ static bool perf_evlist__equal(struct perf_evlist *evlist, if (evlist->nr_entries != other->nr_entries) return false; - pair = list_entry(other->entries.next, struct perf_evsel, node); + pair = perf_evlist__first(other); list_for_each_entry(pos, &evlist->entries, node) { if (memcmp(&pos->attr, &pair->attr, sizeof(pos->attr) != 0)) return false; - pair = list_entry(pair->node.next, struct perf_evsel, node); + pair = perf_evsel__next(pair); } return true; } -static void perf_record__open(struct perf_record *rec) +static int perf_record__open(struct perf_record *rec) { - struct perf_evsel *pos, *first; + struct perf_evsel *pos; struct perf_evlist *evlist = rec->evlist; struct perf_session *session = rec->session; struct perf_record_opts *opts = &rec->opts; - - first = list_entry(evlist->entries.next, struct perf_evsel, node); + int rc = 0; perf_evlist__config_attrs(evlist, opts); + if (opts->group) + perf_evlist__set_leader(evlist); + list_for_each_entry(pos, &evlist->entries, node) { struct perf_event_attr *attr = &pos->attr; - struct xyarray *group_fd = NULL; /* * Check if parse_single_tracepoint_event has already asked for * PERF_SAMPLE_TIME. @@ -202,24 +228,24 @@ static void perf_record__open(struct perf_record *rec) */ bool time_needed = attr->sample_type & PERF_SAMPLE_TIME; - if (opts->group && pos != first) - group_fd = first->fd; fallback_missing_features: if (opts->exclude_guest_missing) attr->exclude_guest = attr->exclude_host = 0; retry_sample_id: attr->sample_id_all = opts->sample_id_all_missing ? 0 : 1; try_again: - if (perf_evsel__open(pos, evlist->cpus, evlist->threads, - opts->group, group_fd) < 0) { + if (perf_evsel__open(pos, evlist->cpus, evlist->threads) < 0) { int err = errno; if (err == EPERM || err == EACCES) { ui__error_paranoid(); - exit(EXIT_FAILURE); + rc = -err; + goto out; } else if (err == ENODEV && opts->target.cpu_list) { - die("No such device - did you specify" - " an out-of-range profile CPU?\n"); + pr_err("No such device - did you specify" + " an out-of-range profile CPU?\n"); + rc = -err; + goto out; } else if (err == EINVAL) { if (!opts->exclude_guest_missing && (attr->exclude_guest || attr->exclude_host)) { @@ -266,42 +292,57 @@ static void perf_record__open(struct perf_record *rec) if (err == ENOENT) { ui__error("The %s event is not supported.\n", perf_evsel__name(pos)); - exit(EXIT_FAILURE); + rc = -err; + goto out; } printf("\n"); - error("sys_perf_event_open() syscall returned with %d (%s). /bin/dmesg may provide additional information.\n", - err, strerror(err)); + error("sys_perf_event_open() syscall returned with %d " + "(%s) for event %s. /bin/dmesg may provide " + "additional information.\n", + err, strerror(err), perf_evsel__name(pos)); #if defined(__i386__) || defined(__x86_64__) - if (attr->type == PERF_TYPE_HARDWARE && err == EOPNOTSUPP) - die("No hardware sampling interrupt available." - " No APIC? If so then you can boot the kernel" - " with the \"lapic\" boot parameter to" - " force-enable it.\n"); + if (attr->type == PERF_TYPE_HARDWARE && + err == EOPNOTSUPP) { + pr_err("No hardware sampling interrupt available." + " No APIC? If so then you can boot the kernel" + " with the \"lapic\" boot parameter to" + " force-enable it.\n"); + rc = -err; + goto out; + } #endif - die("No CONFIG_PERF_EVENTS=y kernel support configured?\n"); + pr_err("No CONFIG_PERF_EVENTS=y kernel support configured?\n"); + rc = -err; + goto out; } } - if (perf_evlist__set_filters(evlist)) { + if (perf_evlist__apply_filters(evlist)) { error("failed to set filter with %d (%s)\n", errno, strerror(errno)); - exit(-1); + rc = -1; + goto out; } if (perf_evlist__mmap(evlist, opts->mmap_pages, false) < 0) { - if (errno == EPERM) - die("Permission error mapping pages.\n" - "Consider increasing " - "/proc/sys/kernel/perf_event_mlock_kb,\n" - "or try again with a smaller value of -m/--mmap_pages.\n" - "(current value: %d)\n", opts->mmap_pages); - else if (!is_power_of_2(opts->mmap_pages)) - die("--mmap_pages/-m value must be a power of two."); - - die("failed to mmap with %d (%s)\n", errno, strerror(errno)); + if (errno == EPERM) { + pr_err("Permission error mapping pages.\n" + "Consider increasing " + "/proc/sys/kernel/perf_event_mlock_kb,\n" + "or try again with a smaller value of -m/--mmap_pages.\n" + "(current value: %d)\n", opts->mmap_pages); + rc = -errno; + } else if (!is_power_of_2(opts->mmap_pages)) { + pr_err("--mmap_pages/-m value must be a power of two."); + rc = -EINVAL; + } else { + pr_err("failed to mmap with %d (%s)\n", errno, strerror(errno)); + rc = -errno; + } + goto out; } if (rec->file_new) @@ -309,11 +350,14 @@ static void perf_record__open(struct perf_record *rec) else { if (!perf_evlist__equal(session->evlist, evlist)) { fprintf(stderr, "incompatible append\n"); - exit(-1); + rc = -1; + goto out; } } - perf_session__update_sample_type(session); + perf_session__set_id_hdr_size(session); +out: + return rc; } static int process_buildids(struct perf_record *rec) @@ -329,10 +373,13 @@ static int process_buildids(struct perf_record *rec) size, &build_id__mark_dso_hit_ops); } -static void perf_record__exit(int status __used, void *arg) +static void perf_record__exit(int status, void *arg) { struct perf_record *rec = arg; + if (status != 0) + return; + if (!rec->opts.pipe_output) { rec->session->header.data_size += rec->bytes_written; @@ -387,17 +434,26 @@ static struct perf_event_header finished_round_event = { .type = PERF_RECORD_FINISHED_ROUND, }; -static void perf_record__mmap_read_all(struct perf_record *rec) +static int perf_record__mmap_read_all(struct perf_record *rec) { int i; + int rc = 0; for (i = 0; i < rec->evlist->nr_mmaps; i++) { - if (rec->evlist->mmap[i].base) - perf_record__mmap_read(rec, &rec->evlist->mmap[i]); + if (rec->evlist->mmap[i].base) { + if (perf_record__mmap_read(rec, &rec->evlist->mmap[i]) != 0) { + rc = -1; + goto out; + } + } } if (perf_header__has_feat(&rec->session->header, HEADER_TRACING_DATA)) - write_output(rec, &finished_round_event, sizeof(finished_round_event)); + rc = write_output(rec, &finished_round_event, + sizeof(finished_round_event)); + +out: + return rc; } static int __cmd_record(struct perf_record *rec, int argc, const char **argv) @@ -457,7 +513,7 @@ static int __cmd_record(struct perf_record *rec, int argc, const char **argv) output = open(output_name, flags, S_IRUSR | S_IWUSR); if (output < 0) { perror("failed to create output file"); - exit(-1); + return -1; } rec->output = output; @@ -497,7 +553,10 @@ static int __cmd_record(struct perf_record *rec, int argc, const char **argv) } } - perf_record__open(rec); + if (perf_record__open(rec) != 0) { + err = -1; + goto out_delete_session; + } /* * perf_session__delete(session) will be called at perf_record__exit() @@ -507,19 +566,20 @@ static int __cmd_record(struct perf_record *rec, int argc, const char **argv) if (opts->pipe_output) { err = perf_header__write_pipe(output); if (err < 0) - return err; + goto out_delete_session; } else if (rec->file_new) { err = perf_session__write_header(session, evsel_list, output, false); if (err < 0) - return err; + goto out_delete_session; } if (!rec->no_buildid && !perf_header__has_feat(&session->header, HEADER_BUILD_ID)) { pr_err("Couldn't generate buildids. " "Use --no-buildid to profile anyway.\n"); - return -1; + err = -1; + goto out_delete_session; } rec->post_processing_offset = lseek(output, 0, SEEK_CUR); @@ -527,7 +587,8 @@ static int __cmd_record(struct perf_record *rec, int argc, const char **argv) machine = perf_session__find_host_machine(session); if (!machine) { pr_err("Couldn't find native kernel information.\n"); - return -1; + err = -1; + goto out_delete_session; } if (opts->pipe_output) { @@ -535,14 +596,14 @@ static int __cmd_record(struct perf_record *rec, int argc, const char **argv) process_synthesized_event); if (err < 0) { pr_err("Couldn't synthesize attrs.\n"); - return err; + goto out_delete_session; } err = perf_event__synthesize_event_types(tool, process_synthesized_event, machine); if (err < 0) { pr_err("Couldn't synthesize event_types.\n"); - return err; + goto out_delete_session; } if (have_tracepoints(&evsel_list->entries)) { @@ -558,7 +619,7 @@ static int __cmd_record(struct perf_record *rec, int argc, const char **argv) process_synthesized_event); if (err <= 0) { pr_err("Couldn't record tracing data.\n"); - return err; + goto out_delete_session; } advance_output(rec, err); } @@ -586,20 +647,24 @@ static int __cmd_record(struct perf_record *rec, int argc, const char **argv) perf_event__synthesize_guest_os); if (!opts->target.system_wide) - perf_event__synthesize_thread_map(tool, evsel_list->threads, + err = perf_event__synthesize_thread_map(tool, evsel_list->threads, process_synthesized_event, machine); else - perf_event__synthesize_threads(tool, process_synthesized_event, + err = perf_event__synthesize_threads(tool, process_synthesized_event, machine); + if (err != 0) + goto out_delete_session; + if (rec->realtime_prio) { struct sched_param param; param.sched_priority = rec->realtime_prio; if (sched_setscheduler(0, SCHED_FIFO, ¶m)) { pr_err("Could not set realtime priority.\n"); - exit(-1); + err = -1; + goto out_delete_session; } } @@ -614,7 +679,10 @@ static int __cmd_record(struct perf_record *rec, int argc, const char **argv) for (;;) { int hits = rec->samples; - perf_record__mmap_read_all(rec); + if (perf_record__mmap_read_all(rec) < 0) { + err = -1; + goto out_delete_session; + } if (hits == rec->samples) { if (done) @@ -732,6 +800,106 @@ parse_branch_stack(const struct option *opt, const char *str, int unset) return ret; } +#ifndef NO_LIBUNWIND_SUPPORT +static int get_stack_size(char *str, unsigned long *_size) +{ + char *endptr; + unsigned long size; + unsigned long max_size = round_down(USHRT_MAX, sizeof(u64)); + + size = strtoul(str, &endptr, 0); + + do { + if (*endptr) + break; + + size = round_up(size, sizeof(u64)); + if (!size || size > max_size) + break; + + *_size = size; + return 0; + + } while (0); + + pr_err("callchain: Incorrect stack dump size (max %ld): %s\n", + max_size, str); + return -1; +} +#endif /* !NO_LIBUNWIND_SUPPORT */ + +static int +parse_callchain_opt(const struct option *opt __maybe_unused, const char *arg, + int unset) +{ + struct perf_record *rec = (struct perf_record *)opt->value; + char *tok, *name, *saveptr = NULL; + char *buf; + int ret = -1; + + /* --no-call-graph */ + if (unset) + return 0; + + /* We specified default option if none is provided. */ + BUG_ON(!arg); + + /* We need buffer that we know we can write to. */ + buf = malloc(strlen(arg) + 1); + if (!buf) + return -ENOMEM; + + strcpy(buf, arg); + + tok = strtok_r((char *)buf, ",", &saveptr); + name = tok ? : (char *)buf; + + do { + /* Framepointer style */ + if (!strncmp(name, "fp", sizeof("fp"))) { + if (!strtok_r(NULL, ",", &saveptr)) { + rec->opts.call_graph = CALLCHAIN_FP; + ret = 0; + } else + pr_err("callchain: No more arguments " + "needed for -g fp\n"); + break; + +#ifndef NO_LIBUNWIND_SUPPORT + /* Dwarf style */ + } else if (!strncmp(name, "dwarf", sizeof("dwarf"))) { + ret = 0; + rec->opts.call_graph = CALLCHAIN_DWARF; + rec->opts.stack_dump_size = default_stack_dump_size; + + tok = strtok_r(NULL, ",", &saveptr); + if (tok) { + unsigned long size = 0; + + ret = get_stack_size(tok, &size); + rec->opts.stack_dump_size = size; + } + + if (!ret) + pr_debug("callchain: stack dump size %d\n", + rec->opts.stack_dump_size); +#endif /* !NO_LIBUNWIND_SUPPORT */ + } else { + pr_err("callchain: Unknown -g option " + "value: %s\n", arg); + break; + } + + } while (0); + + free(buf); + + if (!ret) + pr_debug("callchain: type %d\n", rec->opts.call_graph); + + return ret; +} + static const char * const record_usage[] = { "perf record [] []", "perf record [] -- []", @@ -803,8 +971,9 @@ const struct option record_options[] = { "number of mmap data pages"), OPT_BOOLEAN(0, "group", &record.opts.group, "put the counters into a counter group"), - OPT_BOOLEAN('g', "call-graph", &record.opts.call_graph, - "do call-graph (stack chain/backtrace) recording"), + OPT_CALLBACK_DEFAULT('g', "call-graph", &record, "mode[,dump_size]", + callchain_help, &parse_callchain_opt, + "fp"), OPT_INCR('v', "verbose", &verbose, "be more verbose (show counter open errors, etc)"), OPT_BOOLEAN('q', "quiet", &quiet, "don't print any message"), @@ -836,7 +1005,7 @@ const struct option record_options[] = { OPT_END() }; -int cmd_record(int argc, const char **argv, const char *prefix __used) +int cmd_record(int argc, const char **argv, const char *prefix __maybe_unused) { int err = -ENOMEM; struct perf_evsel *pos; @@ -844,8 +1013,6 @@ int cmd_record(int argc, const char **argv, const char *prefix __used) struct perf_record *rec = &record; char errbuf[BUFSIZ]; - perf_header__set_cmdline(argc, argv); - evsel_list = perf_evlist__new(NULL, NULL); if (evsel_list == NULL) return -ENOMEM; diff --git a/trunk/tools/perf/builtin-report.c b/trunk/tools/perf/builtin-report.c index 69b1c1185159..1da243dfbc3e 100644 --- a/trunk/tools/perf/builtin-report.c +++ b/trunk/tools/perf/builtin-report.c @@ -69,8 +69,8 @@ static int perf_report__add_branch_hist_entry(struct perf_tool *tool, if ((sort__has_parent || symbol_conf.use_callchain) && sample->callchain) { - err = machine__resolve_callchain(machine, al->thread, - sample->callchain, &parent); + err = machine__resolve_callchain(machine, evsel, al->thread, + sample, &parent); if (err) return err; } @@ -93,7 +93,7 @@ static int perf_report__add_branch_hist_entry(struct perf_tool *tool, struct annotation *notes; err = -ENOMEM; bx = he->branch_info; - if (bx->from.sym && use_browser > 0) { + if (bx->from.sym && use_browser == 1 && sort__has_sym) { notes = symbol__annotation(bx->from.sym); if (!notes->src && symbol__alloc_hist(bx->from.sym) < 0) @@ -107,7 +107,7 @@ static int perf_report__add_branch_hist_entry(struct perf_tool *tool, goto out; } - if (bx->to.sym && use_browser > 0) { + if (bx->to.sym && use_browser == 1 && sort__has_sym) { notes = symbol__annotation(bx->to.sym); if (!notes->src && symbol__alloc_hist(bx->to.sym) < 0) @@ -140,8 +140,8 @@ static int perf_evsel__add_hist_entry(struct perf_evsel *evsel, struct hist_entry *he; if ((sort__has_parent || symbol_conf.use_callchain) && sample->callchain) { - err = machine__resolve_callchain(machine, al->thread, - sample->callchain, &parent); + err = machine__resolve_callchain(machine, evsel, al->thread, + sample, &parent); if (err) return err; } @@ -162,7 +162,7 @@ static int perf_evsel__add_hist_entry(struct perf_evsel *evsel, * so we don't allocated the extra space needed because the stdio * code will not use it. */ - if (he->ms.sym != NULL && use_browser > 0) { + if (he->ms.sym != NULL && use_browser == 1 && sort__has_sym) { struct annotation *notes = symbol__annotation(he->ms.sym); assert(evsel != NULL); @@ -223,9 +223,9 @@ static int process_sample_event(struct perf_tool *tool, static int process_read_event(struct perf_tool *tool, union perf_event *event, - struct perf_sample *sample __used, + struct perf_sample *sample __maybe_unused, struct perf_evsel *evsel, - struct machine *machine __used) + struct machine *machine __maybe_unused) { struct perf_report *rep = container_of(tool, struct perf_report, tool); @@ -249,8 +249,9 @@ static int process_read_event(struct perf_tool *tool, static int perf_report__setup_sample_type(struct perf_report *rep) { struct perf_session *self = rep->session; + u64 sample_type = perf_evlist__sample_type(self->evlist); - if (!self->fd_pipe && !(self->sample_type & PERF_SAMPLE_CALLCHAIN)) { + if (!self->fd_pipe && !(sample_type & PERF_SAMPLE_CALLCHAIN)) { if (sort__has_parent) { ui__error("Selected --sort parent, but no " "callchain data. Did you call " @@ -274,7 +275,7 @@ static int perf_report__setup_sample_type(struct perf_report *rep) if (sort__branch_mode == 1) { if (!self->fd_pipe && - !(self->sample_type & PERF_SAMPLE_BRANCH_STACK)) { + !(sample_type & PERF_SAMPLE_BRANCH_STACK)) { ui__error("Selected -b but no branch data. " "Did you call perf record without -b?\n"); return -1; @@ -286,7 +287,7 @@ static int perf_report__setup_sample_type(struct perf_report *rep) extern volatile int session_done; -static void sig_handler(int sig __used) +static void sig_handler(int sig __maybe_unused) { session_done = 1; } @@ -396,17 +397,17 @@ static int __cmd_report(struct perf_report *rep) desc); } - if (dump_trace) { - perf_session__fprintf_nr_events(session, stdout); - goto out_delete; - } - if (verbose > 3) perf_session__fprintf(session, stdout); if (verbose > 2) perf_session__fprintf_dsos(session, stdout); + if (dump_trace) { + perf_session__fprintf_nr_events(session, stdout); + goto out_delete; + } + nr_samples = 0; list_for_each_entry(pos, &session->evlist->entries, node) { struct hists *hists = &pos->hists; @@ -532,13 +533,14 @@ parse_callchain_opt(const struct option *opt, const char *arg, int unset) } static int -parse_branch_mode(const struct option *opt __used, const char *str __used, int unset) +parse_branch_mode(const struct option *opt __maybe_unused, + const char *str __maybe_unused, int unset) { sort__branch_mode = !unset; return 0; } -int cmd_report(int argc, const char **argv, const char *prefix __used) +int cmd_report(int argc, const char **argv, const char *prefix __maybe_unused) { struct perf_session *session; struct stat st; @@ -637,6 +639,8 @@ int cmd_report(int argc, const char **argv, const char *prefix __used) "Show a column with the sum of periods"), OPT_CALLBACK_NOOPT('b', "branch-stack", &sort__branch_mode, "", "use branch records for histogram filling", parse_branch_mode), + OPT_STRING(0, "objdump", &objdump_path, "path", + "objdump binary to use for disassembly and annotations"), OPT_END() }; @@ -685,15 +689,19 @@ int cmd_report(int argc, const char **argv, const char *prefix __used) if (strcmp(report.input_name, "-") != 0) setup_browser(true); - else + else { use_browser = 0; + perf_hpp__init(false, false); + } + + setup_sorting(report_usage, options); /* * Only in the newt browser we are doing integrated annotation, * so don't allocate extra space that won't be used in the stdio * implementation. */ - if (use_browser > 0) { + if (use_browser == 1 && sort__has_sym) { symbol_conf.priv_size = sizeof(struct annotation); report.annotate_init = symbol__annotate_init; /* @@ -716,8 +724,6 @@ int cmd_report(int argc, const char **argv, const char *prefix __used) if (symbol__init() < 0) goto error; - setup_sorting(report_usage, options); - if (parent_pattern != default_parent_pattern) { if (sort_dimension__add("parent") < 0) goto error; diff --git a/trunk/tools/perf/builtin-sched.c b/trunk/tools/perf/builtin-sched.c index 7a9ad2b1ee76..9b9e32eaa805 100644 --- a/trunk/tools/perf/builtin-sched.c +++ b/trunk/tools/perf/builtin-sched.c @@ -23,31 +23,12 @@ #include #include -static const char *input_name; - -static char default_sort_order[] = "avg, max, switch, runtime"; -static const char *sort_order = default_sort_order; - -static int profile_cpu = -1; - #define PR_SET_NAME 15 /* Set process name */ #define MAX_CPUS 4096 - -static u64 run_measurement_overhead; -static u64 sleep_measurement_overhead; - #define COMM_LEN 20 #define SYM_LEN 129 - #define MAX_PID 65536 -static unsigned long nr_tasks; - -struct perf_sched { - struct perf_tool tool; - struct perf_session *session; -}; - struct sched_atom; struct task_desc { @@ -85,44 +66,6 @@ struct sched_atom { struct task_desc *wakee; }; -static struct task_desc *pid_to_task[MAX_PID]; - -static struct task_desc **tasks; - -static pthread_mutex_t start_work_mutex = PTHREAD_MUTEX_INITIALIZER; -static u64 start_time; - -static pthread_mutex_t work_done_wait_mutex = PTHREAD_MUTEX_INITIALIZER; - -static unsigned long nr_run_events; -static unsigned long nr_sleep_events; -static unsigned long nr_wakeup_events; - -static unsigned long nr_sleep_corrections; -static unsigned long nr_run_events_optimized; - -static unsigned long targetless_wakeups; -static unsigned long multitarget_wakeups; - -static u64 cpu_usage; -static u64 runavg_cpu_usage; -static u64 parent_cpu_usage; -static u64 runavg_parent_cpu_usage; - -static unsigned long nr_runs; -static u64 sum_runtime; -static u64 sum_fluct; -static u64 run_avg; - -static unsigned int replay_repeat = 10; -static unsigned long nr_timestamps; -static unsigned long nr_unordered_timestamps; -static unsigned long nr_state_machine_bugs; -static unsigned long nr_context_switch_bugs; -static unsigned long nr_events; -static unsigned long nr_lost_chunks; -static unsigned long nr_lost_events; - #define TASK_STATE_TO_CHAR_STR "RSDTtZX" enum thread_state { @@ -154,11 +97,79 @@ struct work_atoms { typedef int (*sort_fn_t)(struct work_atoms *, struct work_atoms *); -static struct rb_root atom_root, sorted_atom_root; +struct perf_sched; + +struct trace_sched_handler { + int (*switch_event)(struct perf_sched *sched, struct perf_evsel *evsel, + struct perf_sample *sample, struct machine *machine); -static u64 all_runtime; -static u64 all_count; + int (*runtime_event)(struct perf_sched *sched, struct perf_evsel *evsel, + struct perf_sample *sample, struct machine *machine); + int (*wakeup_event)(struct perf_sched *sched, struct perf_evsel *evsel, + struct perf_sample *sample, struct machine *machine); + + int (*fork_event)(struct perf_sched *sched, struct perf_evsel *evsel, + struct perf_sample *sample); + + int (*migrate_task_event)(struct perf_sched *sched, + struct perf_evsel *evsel, + struct perf_sample *sample, + struct machine *machine); +}; + +struct perf_sched { + struct perf_tool tool; + const char *input_name; + const char *sort_order; + unsigned long nr_tasks; + struct task_desc *pid_to_task[MAX_PID]; + struct task_desc **tasks; + const struct trace_sched_handler *tp_handler; + pthread_mutex_t start_work_mutex; + pthread_mutex_t work_done_wait_mutex; + int profile_cpu; +/* + * Track the current task - that way we can know whether there's any + * weird events, such as a task being switched away that is not current. + */ + int max_cpu; + u32 curr_pid[MAX_CPUS]; + struct thread *curr_thread[MAX_CPUS]; + char next_shortname1; + char next_shortname2; + unsigned int replay_repeat; + unsigned long nr_run_events; + unsigned long nr_sleep_events; + unsigned long nr_wakeup_events; + unsigned long nr_sleep_corrections; + unsigned long nr_run_events_optimized; + unsigned long targetless_wakeups; + unsigned long multitarget_wakeups; + unsigned long nr_runs; + unsigned long nr_timestamps; + unsigned long nr_unordered_timestamps; + unsigned long nr_state_machine_bugs; + unsigned long nr_context_switch_bugs; + unsigned long nr_events; + unsigned long nr_lost_chunks; + unsigned long nr_lost_events; + u64 run_measurement_overhead; + u64 sleep_measurement_overhead; + u64 start_time; + u64 cpu_usage; + u64 runavg_cpu_usage; + u64 parent_cpu_usage; + u64 runavg_parent_cpu_usage; + u64 sum_runtime; + u64 sum_fluct; + u64 run_avg; + u64 all_runtime; + u64 all_count; + u64 cpu_last_switched[MAX_CPUS]; + struct rb_root atom_root, sorted_atom_root; + struct list_head sort_list, cmp_pid; +}; static u64 get_nsecs(void) { @@ -169,13 +180,13 @@ static u64 get_nsecs(void) return ts.tv_sec * 1000000000ULL + ts.tv_nsec; } -static void burn_nsecs(u64 nsecs) +static void burn_nsecs(struct perf_sched *sched, u64 nsecs) { u64 T0 = get_nsecs(), T1; do { T1 = get_nsecs(); - } while (T1 + run_measurement_overhead < T0 + nsecs); + } while (T1 + sched->run_measurement_overhead < T0 + nsecs); } static void sleep_nsecs(u64 nsecs) @@ -188,24 +199,24 @@ static void sleep_nsecs(u64 nsecs) nanosleep(&ts, NULL); } -static void calibrate_run_measurement_overhead(void) +static void calibrate_run_measurement_overhead(struct perf_sched *sched) { u64 T0, T1, delta, min_delta = 1000000000ULL; int i; for (i = 0; i < 10; i++) { T0 = get_nsecs(); - burn_nsecs(0); + burn_nsecs(sched, 0); T1 = get_nsecs(); delta = T1-T0; min_delta = min(min_delta, delta); } - run_measurement_overhead = min_delta; + sched->run_measurement_overhead = min_delta; printf("run measurement overhead: %" PRIu64 " nsecs\n", min_delta); } -static void calibrate_sleep_measurement_overhead(void) +static void calibrate_sleep_measurement_overhead(struct perf_sched *sched) { u64 T0, T1, delta, min_delta = 1000000000ULL; int i; @@ -218,7 +229,7 @@ static void calibrate_sleep_measurement_overhead(void) min_delta = min(min_delta, delta); } min_delta -= 10000; - sleep_measurement_overhead = min_delta; + sched->sleep_measurement_overhead = min_delta; printf("sleep measurement overhead: %" PRIu64 " nsecs\n", min_delta); } @@ -251,8 +262,8 @@ static struct sched_atom *last_event(struct task_desc *task) return task->atoms[task->nr_events - 1]; } -static void -add_sched_event_run(struct task_desc *task, u64 timestamp, u64 duration) +static void add_sched_event_run(struct perf_sched *sched, struct task_desc *task, + u64 timestamp, u64 duration) { struct sched_atom *event, *curr_event = last_event(task); @@ -261,7 +272,7 @@ add_sched_event_run(struct task_desc *task, u64 timestamp, u64 duration) * to it: */ if (curr_event && curr_event->type == SCHED_EVENT_RUN) { - nr_run_events_optimized++; + sched->nr_run_events_optimized++; curr_event->duration += duration; return; } @@ -271,12 +282,11 @@ add_sched_event_run(struct task_desc *task, u64 timestamp, u64 duration) event->type = SCHED_EVENT_RUN; event->duration = duration; - nr_run_events++; + sched->nr_run_events++; } -static void -add_sched_event_wakeup(struct task_desc *task, u64 timestamp, - struct task_desc *wakee) +static void add_sched_event_wakeup(struct perf_sched *sched, struct task_desc *task, + u64 timestamp, struct task_desc *wakee) { struct sched_atom *event, *wakee_event; @@ -286,11 +296,11 @@ add_sched_event_wakeup(struct task_desc *task, u64 timestamp, wakee_event = last_event(wakee); if (!wakee_event || wakee_event->type != SCHED_EVENT_SLEEP) { - targetless_wakeups++; + sched->targetless_wakeups++; return; } if (wakee_event->wait_sem) { - multitarget_wakeups++; + sched->multitarget_wakeups++; return; } @@ -299,89 +309,89 @@ add_sched_event_wakeup(struct task_desc *task, u64 timestamp, wakee_event->specific_wait = 1; event->wait_sem = wakee_event->wait_sem; - nr_wakeup_events++; + sched->nr_wakeup_events++; } -static void -add_sched_event_sleep(struct task_desc *task, u64 timestamp, - u64 task_state __used) +static void add_sched_event_sleep(struct perf_sched *sched, struct task_desc *task, + u64 timestamp, u64 task_state __maybe_unused) { struct sched_atom *event = get_new_event(task, timestamp); event->type = SCHED_EVENT_SLEEP; - nr_sleep_events++; + sched->nr_sleep_events++; } -static struct task_desc *register_pid(unsigned long pid, const char *comm) +static struct task_desc *register_pid(struct perf_sched *sched, + unsigned long pid, const char *comm) { struct task_desc *task; BUG_ON(pid >= MAX_PID); - task = pid_to_task[pid]; + task = sched->pid_to_task[pid]; if (task) return task; task = zalloc(sizeof(*task)); task->pid = pid; - task->nr = nr_tasks; + task->nr = sched->nr_tasks; strcpy(task->comm, comm); /* * every task starts in sleeping state - this gets ignored * if there's no wakeup pointing to this sleep state: */ - add_sched_event_sleep(task, 0, 0); + add_sched_event_sleep(sched, task, 0, 0); - pid_to_task[pid] = task; - nr_tasks++; - tasks = realloc(tasks, nr_tasks*sizeof(struct task_task *)); - BUG_ON(!tasks); - tasks[task->nr] = task; + sched->pid_to_task[pid] = task; + sched->nr_tasks++; + sched->tasks = realloc(sched->tasks, sched->nr_tasks * sizeof(struct task_task *)); + BUG_ON(!sched->tasks); + sched->tasks[task->nr] = task; if (verbose) - printf("registered task #%ld, PID %ld (%s)\n", nr_tasks, pid, comm); + printf("registered task #%ld, PID %ld (%s)\n", sched->nr_tasks, pid, comm); return task; } -static void print_task_traces(void) +static void print_task_traces(struct perf_sched *sched) { struct task_desc *task; unsigned long i; - for (i = 0; i < nr_tasks; i++) { - task = tasks[i]; + for (i = 0; i < sched->nr_tasks; i++) { + task = sched->tasks[i]; printf("task %6ld (%20s:%10ld), nr_events: %ld\n", task->nr, task->comm, task->pid, task->nr_events); } } -static void add_cross_task_wakeups(void) +static void add_cross_task_wakeups(struct perf_sched *sched) { struct task_desc *task1, *task2; unsigned long i, j; - for (i = 0; i < nr_tasks; i++) { - task1 = tasks[i]; + for (i = 0; i < sched->nr_tasks; i++) { + task1 = sched->tasks[i]; j = i + 1; - if (j == nr_tasks) + if (j == sched->nr_tasks) j = 0; - task2 = tasks[j]; - add_sched_event_wakeup(task1, 0, task2); + task2 = sched->tasks[j]; + add_sched_event_wakeup(sched, task1, 0, task2); } } -static void -process_sched_event(struct task_desc *this_task __used, struct sched_atom *atom) +static void perf_sched__process_event(struct perf_sched *sched, + struct sched_atom *atom) { int ret = 0; switch (atom->type) { case SCHED_EVENT_RUN: - burn_nsecs(atom->duration); + burn_nsecs(sched, atom->duration); break; case SCHED_EVENT_SLEEP: if (atom->wait_sem) @@ -428,8 +438,8 @@ static int self_open_counters(void) fd = sys_perf_event_open(&attr, 0, -1, -1, 0); if (fd < 0) - die("Error: sys_perf_event_open() syscall returned" - "with %d (%s)\n", fd, strerror(errno)); + pr_err("Error: sys_perf_event_open() syscall returned " + "with %d (%s)\n", fd, strerror(errno)); return fd; } @@ -444,31 +454,41 @@ static u64 get_cpu_usage_nsec_self(int fd) return runtime; } +struct sched_thread_parms { + struct task_desc *task; + struct perf_sched *sched; +}; + static void *thread_func(void *ctx) { - struct task_desc *this_task = ctx; + struct sched_thread_parms *parms = ctx; + struct task_desc *this_task = parms->task; + struct perf_sched *sched = parms->sched; u64 cpu_usage_0, cpu_usage_1; unsigned long i, ret; char comm2[22]; int fd; + free(parms); + sprintf(comm2, ":%s", this_task->comm); prctl(PR_SET_NAME, comm2); fd = self_open_counters(); - + if (fd < 0) + return NULL; again: ret = sem_post(&this_task->ready_for_work); BUG_ON(ret); - ret = pthread_mutex_lock(&start_work_mutex); + ret = pthread_mutex_lock(&sched->start_work_mutex); BUG_ON(ret); - ret = pthread_mutex_unlock(&start_work_mutex); + ret = pthread_mutex_unlock(&sched->start_work_mutex); BUG_ON(ret); cpu_usage_0 = get_cpu_usage_nsec_self(fd); for (i = 0; i < this_task->nr_events; i++) { this_task->curr_event = i; - process_sched_event(this_task, this_task->atoms[i]); + perf_sched__process_event(sched, this_task->atoms[i]); } cpu_usage_1 = get_cpu_usage_nsec_self(fd); @@ -476,15 +496,15 @@ static void *thread_func(void *ctx) ret = sem_post(&this_task->work_done_sem); BUG_ON(ret); - ret = pthread_mutex_lock(&work_done_wait_mutex); + ret = pthread_mutex_lock(&sched->work_done_wait_mutex); BUG_ON(ret); - ret = pthread_mutex_unlock(&work_done_wait_mutex); + ret = pthread_mutex_unlock(&sched->work_done_wait_mutex); BUG_ON(ret); goto again; } -static void create_tasks(void) +static void create_tasks(struct perf_sched *sched) { struct task_desc *task; pthread_attr_t attr; @@ -496,128 +516,129 @@ static void create_tasks(void) err = pthread_attr_setstacksize(&attr, (size_t) max(16 * 1024, PTHREAD_STACK_MIN)); BUG_ON(err); - err = pthread_mutex_lock(&start_work_mutex); + err = pthread_mutex_lock(&sched->start_work_mutex); BUG_ON(err); - err = pthread_mutex_lock(&work_done_wait_mutex); + err = pthread_mutex_lock(&sched->work_done_wait_mutex); BUG_ON(err); - for (i = 0; i < nr_tasks; i++) { - task = tasks[i]; + for (i = 0; i < sched->nr_tasks; i++) { + struct sched_thread_parms *parms = malloc(sizeof(*parms)); + BUG_ON(parms == NULL); + parms->task = task = sched->tasks[i]; + parms->sched = sched; sem_init(&task->sleep_sem, 0, 0); sem_init(&task->ready_for_work, 0, 0); sem_init(&task->work_done_sem, 0, 0); task->curr_event = 0; - err = pthread_create(&task->thread, &attr, thread_func, task); + err = pthread_create(&task->thread, &attr, thread_func, parms); BUG_ON(err); } } -static void wait_for_tasks(void) +static void wait_for_tasks(struct perf_sched *sched) { u64 cpu_usage_0, cpu_usage_1; struct task_desc *task; unsigned long i, ret; - start_time = get_nsecs(); - cpu_usage = 0; - pthread_mutex_unlock(&work_done_wait_mutex); + sched->start_time = get_nsecs(); + sched->cpu_usage = 0; + pthread_mutex_unlock(&sched->work_done_wait_mutex); - for (i = 0; i < nr_tasks; i++) { - task = tasks[i]; + for (i = 0; i < sched->nr_tasks; i++) { + task = sched->tasks[i]; ret = sem_wait(&task->ready_for_work); BUG_ON(ret); sem_init(&task->ready_for_work, 0, 0); } - ret = pthread_mutex_lock(&work_done_wait_mutex); + ret = pthread_mutex_lock(&sched->work_done_wait_mutex); BUG_ON(ret); cpu_usage_0 = get_cpu_usage_nsec_parent(); - pthread_mutex_unlock(&start_work_mutex); + pthread_mutex_unlock(&sched->start_work_mutex); - for (i = 0; i < nr_tasks; i++) { - task = tasks[i]; + for (i = 0; i < sched->nr_tasks; i++) { + task = sched->tasks[i]; ret = sem_wait(&task->work_done_sem); BUG_ON(ret); sem_init(&task->work_done_sem, 0, 0); - cpu_usage += task->cpu_usage; + sched->cpu_usage += task->cpu_usage; task->cpu_usage = 0; } cpu_usage_1 = get_cpu_usage_nsec_parent(); - if (!runavg_cpu_usage) - runavg_cpu_usage = cpu_usage; - runavg_cpu_usage = (runavg_cpu_usage*9 + cpu_usage)/10; + if (!sched->runavg_cpu_usage) + sched->runavg_cpu_usage = sched->cpu_usage; + sched->runavg_cpu_usage = (sched->runavg_cpu_usage * 9 + sched->cpu_usage) / 10; - parent_cpu_usage = cpu_usage_1 - cpu_usage_0; - if (!runavg_parent_cpu_usage) - runavg_parent_cpu_usage = parent_cpu_usage; - runavg_parent_cpu_usage = (runavg_parent_cpu_usage*9 + - parent_cpu_usage)/10; + sched->parent_cpu_usage = cpu_usage_1 - cpu_usage_0; + if (!sched->runavg_parent_cpu_usage) + sched->runavg_parent_cpu_usage = sched->parent_cpu_usage; + sched->runavg_parent_cpu_usage = (sched->runavg_parent_cpu_usage * 9 + + sched->parent_cpu_usage)/10; - ret = pthread_mutex_lock(&start_work_mutex); + ret = pthread_mutex_lock(&sched->start_work_mutex); BUG_ON(ret); - for (i = 0; i < nr_tasks; i++) { - task = tasks[i]; + for (i = 0; i < sched->nr_tasks; i++) { + task = sched->tasks[i]; sem_init(&task->sleep_sem, 0, 0); task->curr_event = 0; } } -static void run_one_test(void) +static void run_one_test(struct perf_sched *sched) { u64 T0, T1, delta, avg_delta, fluct; T0 = get_nsecs(); - wait_for_tasks(); + wait_for_tasks(sched); T1 = get_nsecs(); delta = T1 - T0; - sum_runtime += delta; - nr_runs++; + sched->sum_runtime += delta; + sched->nr_runs++; - avg_delta = sum_runtime / nr_runs; + avg_delta = sched->sum_runtime / sched->nr_runs; if (delta < avg_delta) fluct = avg_delta - delta; else fluct = delta - avg_delta; - sum_fluct += fluct; - if (!run_avg) - run_avg = delta; - run_avg = (run_avg*9 + delta)/10; + sched->sum_fluct += fluct; + if (!sched->run_avg) + sched->run_avg = delta; + sched->run_avg = (sched->run_avg * 9 + delta) / 10; - printf("#%-3ld: %0.3f, ", - nr_runs, (double)delta/1000000.0); + printf("#%-3ld: %0.3f, ", sched->nr_runs, (double)delta / 1000000.0); - printf("ravg: %0.2f, ", - (double)run_avg/1e6); + printf("ravg: %0.2f, ", (double)sched->run_avg / 1e6); printf("cpu: %0.2f / %0.2f", - (double)cpu_usage/1e6, (double)runavg_cpu_usage/1e6); + (double)sched->cpu_usage / 1e6, (double)sched->runavg_cpu_usage / 1e6); #if 0 /* * rusage statistics done by the parent, these are less - * accurate than the sum_exec_runtime based statistics: + * accurate than the sched->sum_exec_runtime based statistics: */ printf(" [%0.2f / %0.2f]", - (double)parent_cpu_usage/1e6, - (double)runavg_parent_cpu_usage/1e6); + (double)sched->parent_cpu_usage/1e6, + (double)sched->runavg_parent_cpu_usage/1e6); #endif printf("\n"); - if (nr_sleep_corrections) - printf(" (%ld sleep corrections)\n", nr_sleep_corrections); - nr_sleep_corrections = 0; + if (sched->nr_sleep_corrections) + printf(" (%ld sleep corrections)\n", sched->nr_sleep_corrections); + sched->nr_sleep_corrections = 0; } -static void test_calibrations(void) +static void test_calibrations(struct perf_sched *sched) { u64 T0, T1; T0 = get_nsecs(); - burn_nsecs(1e6); + burn_nsecs(sched, 1e6); T1 = get_nsecs(); printf("the run test took %" PRIu64 " nsecs\n", T1 - T0); @@ -629,236 +650,92 @@ static void test_calibrations(void) printf("the sleep test took %" PRIu64 " nsecs\n", T1 - T0); } -#define FILL_FIELD(ptr, field, event, data) \ - ptr.field = (typeof(ptr.field)) raw_field_value(event, #field, data) - -#define FILL_ARRAY(ptr, array, event, data) \ -do { \ - void *__array = raw_field_ptr(event, #array, data); \ - memcpy(ptr.array, __array, sizeof(ptr.array)); \ -} while(0) - -#define FILL_COMMON_FIELDS(ptr, event, data) \ -do { \ - FILL_FIELD(ptr, common_type, event, data); \ - FILL_FIELD(ptr, common_flags, event, data); \ - FILL_FIELD(ptr, common_preempt_count, event, data); \ - FILL_FIELD(ptr, common_pid, event, data); \ - FILL_FIELD(ptr, common_tgid, event, data); \ -} while (0) - - - -struct trace_switch_event { - u32 size; - - u16 common_type; - u8 common_flags; - u8 common_preempt_count; - u32 common_pid; - u32 common_tgid; - - char prev_comm[16]; - u32 prev_pid; - u32 prev_prio; - u64 prev_state; - char next_comm[16]; - u32 next_pid; - u32 next_prio; -}; - -struct trace_runtime_event { - u32 size; - - u16 common_type; - u8 common_flags; - u8 common_preempt_count; - u32 common_pid; - u32 common_tgid; - - char comm[16]; - u32 pid; - u64 runtime; - u64 vruntime; -}; - -struct trace_wakeup_event { - u32 size; - - u16 common_type; - u8 common_flags; - u8 common_preempt_count; - u32 common_pid; - u32 common_tgid; - - char comm[16]; - u32 pid; - - u32 prio; - u32 success; - u32 cpu; -}; - -struct trace_fork_event { - u32 size; - - u16 common_type; - u8 common_flags; - u8 common_preempt_count; - u32 common_pid; - u32 common_tgid; - - char parent_comm[16]; - u32 parent_pid; - char child_comm[16]; - u32 child_pid; -}; - -struct trace_migrate_task_event { - u32 size; - - u16 common_type; - u8 common_flags; - u8 common_preempt_count; - u32 common_pid; - u32 common_tgid; - - char comm[16]; - u32 pid; - - u32 prio; - u32 cpu; -}; - -struct trace_sched_handler { - void (*switch_event)(struct trace_switch_event *, - struct machine *, - struct event_format *, - int cpu, - u64 timestamp, - struct thread *thread); - - void (*runtime_event)(struct trace_runtime_event *, - struct machine *, - struct event_format *, - int cpu, - u64 timestamp, - struct thread *thread); - - void (*wakeup_event)(struct trace_wakeup_event *, - struct machine *, - struct event_format *, - int cpu, - u64 timestamp, - struct thread *thread); - - void (*fork_event)(struct trace_fork_event *, - struct event_format *, - int cpu, - u64 timestamp, - struct thread *thread); - - void (*migrate_task_event)(struct trace_migrate_task_event *, - struct machine *machine, - struct event_format *, - int cpu, - u64 timestamp, - struct thread *thread); -}; - - -static void -replay_wakeup_event(struct trace_wakeup_event *wakeup_event, - struct machine *machine __used, - struct event_format *event, - int cpu __used, - u64 timestamp __used, - struct thread *thread __used) +static int +replay_wakeup_event(struct perf_sched *sched, + struct perf_evsel *evsel, struct perf_sample *sample, + struct machine *machine __maybe_unused) { + const char *comm = perf_evsel__strval(evsel, sample, "comm"); + const u32 pid = perf_evsel__intval(evsel, sample, "pid"); struct task_desc *waker, *wakee; if (verbose) { - printf("sched_wakeup event %p\n", event); + printf("sched_wakeup event %p\n", evsel); - printf(" ... pid %d woke up %s/%d\n", - wakeup_event->common_pid, - wakeup_event->comm, - wakeup_event->pid); + printf(" ... pid %d woke up %s/%d\n", sample->tid, comm, pid); } - waker = register_pid(wakeup_event->common_pid, ""); - wakee = register_pid(wakeup_event->pid, wakeup_event->comm); + waker = register_pid(sched, sample->tid, ""); + wakee = register_pid(sched, pid, comm); - add_sched_event_wakeup(waker, timestamp, wakee); + add_sched_event_wakeup(sched, waker, sample->time, wakee); + return 0; } -static u64 cpu_last_switched[MAX_CPUS]; - -static void -replay_switch_event(struct trace_switch_event *switch_event, - struct machine *machine __used, - struct event_format *event, - int cpu, - u64 timestamp, - struct thread *thread __used) +static int replay_switch_event(struct perf_sched *sched, + struct perf_evsel *evsel, + struct perf_sample *sample, + struct machine *machine __maybe_unused) { - struct task_desc *prev, __used *next; - u64 timestamp0; + const char *prev_comm = perf_evsel__strval(evsel, sample, "prev_comm"), + *next_comm = perf_evsel__strval(evsel, sample, "next_comm"); + const u32 prev_pid = perf_evsel__intval(evsel, sample, "prev_pid"), + next_pid = perf_evsel__intval(evsel, sample, "next_pid"); + const u64 prev_state = perf_evsel__intval(evsel, sample, "prev_state"); + struct task_desc *prev, __maybe_unused *next; + u64 timestamp0, timestamp = sample->time; + int cpu = sample->cpu; s64 delta; if (verbose) - printf("sched_switch event %p\n", event); + printf("sched_switch event %p\n", evsel); if (cpu >= MAX_CPUS || cpu < 0) - return; + return 0; - timestamp0 = cpu_last_switched[cpu]; + timestamp0 = sched->cpu_last_switched[cpu]; if (timestamp0) delta = timestamp - timestamp0; else delta = 0; - if (delta < 0) - die("hm, delta: %" PRIu64 " < 0 ?\n", delta); - - if (verbose) { - printf(" ... switch from %s/%d to %s/%d [ran %" PRIu64 " nsecs]\n", - switch_event->prev_comm, switch_event->prev_pid, - switch_event->next_comm, switch_event->next_pid, - delta); + if (delta < 0) { + pr_err("hm, delta: %" PRIu64 " < 0 ?\n", delta); + return -1; } - prev = register_pid(switch_event->prev_pid, switch_event->prev_comm); - next = register_pid(switch_event->next_pid, switch_event->next_comm); + pr_debug(" ... switch from %s/%d to %s/%d [ran %" PRIu64 " nsecs]\n", + prev_comm, prev_pid, next_comm, next_pid, delta); - cpu_last_switched[cpu] = timestamp; + prev = register_pid(sched, prev_pid, prev_comm); + next = register_pid(sched, next_pid, next_comm); - add_sched_event_run(prev, timestamp, delta); - add_sched_event_sleep(prev, timestamp, switch_event->prev_state); -} + sched->cpu_last_switched[cpu] = timestamp; + add_sched_event_run(sched, prev, timestamp, delta); + add_sched_event_sleep(sched, prev, timestamp, prev_state); -static void -replay_fork_event(struct trace_fork_event *fork_event, - struct event_format *event, - int cpu __used, - u64 timestamp __used, - struct thread *thread __used) + return 0; +} + +static int replay_fork_event(struct perf_sched *sched, struct perf_evsel *evsel, + struct perf_sample *sample) { + const char *parent_comm = perf_evsel__strval(evsel, sample, "parent_comm"), + *child_comm = perf_evsel__strval(evsel, sample, "child_comm"); + const u32 parent_pid = perf_evsel__intval(evsel, sample, "parent_pid"), + child_pid = perf_evsel__intval(evsel, sample, "child_pid"); + if (verbose) { - printf("sched_fork event %p\n", event); - printf("... parent: %s/%d\n", fork_event->parent_comm, fork_event->parent_pid); - printf("... child: %s/%d\n", fork_event->child_comm, fork_event->child_pid); + printf("sched_fork event %p\n", evsel); + printf("... parent: %s/%d\n", parent_comm, parent_pid); + printf("... child: %s/%d\n", child_comm, child_pid); } - register_pid(fork_event->parent_pid, fork_event->parent_comm); - register_pid(fork_event->child_pid, fork_event->child_comm); -} -static struct trace_sched_handler replay_ops = { - .wakeup_event = replay_wakeup_event, - .switch_event = replay_switch_event, - .fork_event = replay_fork_event, -}; + register_pid(sched, parent_pid, parent_comm); + register_pid(sched, child_pid, child_comm); + return 0; +} struct sort_dimension { const char *name; @@ -866,8 +743,6 @@ struct sort_dimension { struct list_head list; }; -static LIST_HEAD(cmp_pid); - static int thread_lat_cmp(struct list_head *list, struct work_atoms *l, struct work_atoms *r) { @@ -936,43 +811,45 @@ __thread_latency_insert(struct rb_root *root, struct work_atoms *data, rb_insert_color(&data->node, root); } -static void thread_atoms_insert(struct thread *thread) +static int thread_atoms_insert(struct perf_sched *sched, struct thread *thread) { struct work_atoms *atoms = zalloc(sizeof(*atoms)); - if (!atoms) - die("No memory"); + if (!atoms) { + pr_err("No memory at %s\n", __func__); + return -1; + } atoms->thread = thread; INIT_LIST_HEAD(&atoms->work_list); - __thread_latency_insert(&atom_root, atoms, &cmp_pid); + __thread_latency_insert(&sched->atom_root, atoms, &sched->cmp_pid); + return 0; } -static void -latency_fork_event(struct trace_fork_event *fork_event __used, - struct event_format *event __used, - int cpu __used, - u64 timestamp __used, - struct thread *thread __used) +static int latency_fork_event(struct perf_sched *sched __maybe_unused, + struct perf_evsel *evsel __maybe_unused, + struct perf_sample *sample __maybe_unused) { /* should insert the newcomer */ + return 0; } -__used -static char sched_out_state(struct trace_switch_event *switch_event) +static char sched_out_state(u64 prev_state) { const char *str = TASK_STATE_TO_CHAR_STR; - return str[switch_event->prev_state]; + return str[prev_state]; } -static void +static int add_sched_out_event(struct work_atoms *atoms, char run_state, u64 timestamp) { struct work_atom *atom = zalloc(sizeof(*atom)); - if (!atom) - die("Non memory"); + if (!atom) { + pr_err("Non memory at %s", __func__); + return -1; + } atom->sched_out_time = timestamp; @@ -982,10 +859,12 @@ add_sched_out_event(struct work_atoms *atoms, } list_add_tail(&atom->list, &atoms->work_list); + return 0; } static void -add_runtime_event(struct work_atoms *atoms, u64 delta, u64 timestamp __used) +add_runtime_event(struct work_atoms *atoms, u64 delta, + u64 timestamp __maybe_unused) { struct work_atom *atom; @@ -1028,106 +907,128 @@ add_sched_in_event(struct work_atoms *atoms, u64 timestamp) atoms->nb_atoms++; } -static void -latency_switch_event(struct trace_switch_event *switch_event, - struct machine *machine, - struct event_format *event __used, - int cpu, - u64 timestamp, - struct thread *thread __used) +static int latency_switch_event(struct perf_sched *sched, + struct perf_evsel *evsel, + struct perf_sample *sample, + struct machine *machine) { + const u32 prev_pid = perf_evsel__intval(evsel, sample, "prev_pid"), + next_pid = perf_evsel__intval(evsel, sample, "next_pid"); + const u64 prev_state = perf_evsel__intval(evsel, sample, "prev_state"); struct work_atoms *out_events, *in_events; struct thread *sched_out, *sched_in; - u64 timestamp0; + u64 timestamp0, timestamp = sample->time; + int cpu = sample->cpu; s64 delta; BUG_ON(cpu >= MAX_CPUS || cpu < 0); - timestamp0 = cpu_last_switched[cpu]; - cpu_last_switched[cpu] = timestamp; + timestamp0 = sched->cpu_last_switched[cpu]; + sched->cpu_last_switched[cpu] = timestamp; if (timestamp0) delta = timestamp - timestamp0; else delta = 0; - if (delta < 0) - die("hm, delta: %" PRIu64 " < 0 ?\n", delta); - + if (delta < 0) { + pr_err("hm, delta: %" PRIu64 " < 0 ?\n", delta); + return -1; + } - sched_out = machine__findnew_thread(machine, switch_event->prev_pid); - sched_in = machine__findnew_thread(machine, switch_event->next_pid); + sched_out = machine__findnew_thread(machine, prev_pid); + sched_in = machine__findnew_thread(machine, next_pid); - out_events = thread_atoms_search(&atom_root, sched_out, &cmp_pid); + out_events = thread_atoms_search(&sched->atom_root, sched_out, &sched->cmp_pid); if (!out_events) { - thread_atoms_insert(sched_out); - out_events = thread_atoms_search(&atom_root, sched_out, &cmp_pid); - if (!out_events) - die("out-event: Internal tree error"); + if (thread_atoms_insert(sched, sched_out)) + return -1; + out_events = thread_atoms_search(&sched->atom_root, sched_out, &sched->cmp_pid); + if (!out_events) { + pr_err("out-event: Internal tree error"); + return -1; + } } - add_sched_out_event(out_events, sched_out_state(switch_event), timestamp); + if (add_sched_out_event(out_events, sched_out_state(prev_state), timestamp)) + return -1; - in_events = thread_atoms_search(&atom_root, sched_in, &cmp_pid); + in_events = thread_atoms_search(&sched->atom_root, sched_in, &sched->cmp_pid); if (!in_events) { - thread_atoms_insert(sched_in); - in_events = thread_atoms_search(&atom_root, sched_in, &cmp_pid); - if (!in_events) - die("in-event: Internal tree error"); + if (thread_atoms_insert(sched, sched_in)) + return -1; + in_events = thread_atoms_search(&sched->atom_root, sched_in, &sched->cmp_pid); + if (!in_events) { + pr_err("in-event: Internal tree error"); + return -1; + } /* * Take came in we have not heard about yet, * add in an initial atom in runnable state: */ - add_sched_out_event(in_events, 'R', timestamp); + if (add_sched_out_event(in_events, 'R', timestamp)) + return -1; } add_sched_in_event(in_events, timestamp); + + return 0; } -static void -latency_runtime_event(struct trace_runtime_event *runtime_event, - struct machine *machine, - struct event_format *event __used, - int cpu, - u64 timestamp, - struct thread *this_thread __used) +static int latency_runtime_event(struct perf_sched *sched, + struct perf_evsel *evsel, + struct perf_sample *sample, + struct machine *machine) { - struct thread *thread = machine__findnew_thread(machine, runtime_event->pid); - struct work_atoms *atoms = thread_atoms_search(&atom_root, thread, &cmp_pid); + const u32 pid = perf_evsel__intval(evsel, sample, "pid"); + const u64 runtime = perf_evsel__intval(evsel, sample, "runtime"); + struct thread *thread = machine__findnew_thread(machine, pid); + struct work_atoms *atoms = thread_atoms_search(&sched->atom_root, thread, &sched->cmp_pid); + u64 timestamp = sample->time; + int cpu = sample->cpu; BUG_ON(cpu >= MAX_CPUS || cpu < 0); if (!atoms) { - thread_atoms_insert(thread); - atoms = thread_atoms_search(&atom_root, thread, &cmp_pid); - if (!atoms) - die("in-event: Internal tree error"); - add_sched_out_event(atoms, 'R', timestamp); + if (thread_atoms_insert(sched, thread)) + return -1; + atoms = thread_atoms_search(&sched->atom_root, thread, &sched->cmp_pid); + if (!atoms) { + pr_err("in-event: Internal tree error"); + return -1; + } + if (add_sched_out_event(atoms, 'R', timestamp)) + return -1; } - add_runtime_event(atoms, runtime_event->runtime, timestamp); + add_runtime_event(atoms, runtime, timestamp); + return 0; } -static void -latency_wakeup_event(struct trace_wakeup_event *wakeup_event, - struct machine *machine, - struct event_format *__event __used, - int cpu __used, - u64 timestamp, - struct thread *thread __used) +static int latency_wakeup_event(struct perf_sched *sched, + struct perf_evsel *evsel, + struct perf_sample *sample, + struct machine *machine) { + const u32 pid = perf_evsel__intval(evsel, sample, "pid"), + success = perf_evsel__intval(evsel, sample, "success"); struct work_atoms *atoms; struct work_atom *atom; struct thread *wakee; + u64 timestamp = sample->time; /* Note for later, it may be interesting to observe the failing cases */ - if (!wakeup_event->success) - return; + if (!success) + return 0; - wakee = machine__findnew_thread(machine, wakeup_event->pid); - atoms = thread_atoms_search(&atom_root, wakee, &cmp_pid); + wakee = machine__findnew_thread(machine, pid); + atoms = thread_atoms_search(&sched->atom_root, wakee, &sched->cmp_pid); if (!atoms) { - thread_atoms_insert(wakee); - atoms = thread_atoms_search(&atom_root, wakee, &cmp_pid); - if (!atoms) - die("wakeup-event: Internal tree error"); - add_sched_out_event(atoms, 'S', timestamp); + if (thread_atoms_insert(sched, wakee)) + return -1; + atoms = thread_atoms_search(&sched->atom_root, wakee, &sched->cmp_pid); + if (!atoms) { + pr_err("wakeup-event: Internal tree error"); + return -1; + } + if (add_sched_out_event(atoms, 'S', timestamp)) + return -1; } BUG_ON(list_empty(&atoms->work_list)); @@ -1139,27 +1040,27 @@ latency_wakeup_event(struct trace_wakeup_event *wakeup_event, * one CPU, or are only looking at only one, so don't * make useless noise. */ - if (profile_cpu == -1 && atom->state != THREAD_SLEEPING) - nr_state_machine_bugs++; + if (sched->profile_cpu == -1 && atom->state != THREAD_SLEEPING) + sched->nr_state_machine_bugs++; - nr_timestamps++; + sched->nr_timestamps++; if (atom->sched_out_time > timestamp) { - nr_unordered_timestamps++; - return; + sched->nr_unordered_timestamps++; + return 0; } atom->state = THREAD_WAIT_CPU; atom->wake_up_time = timestamp; + return 0; } -static void -latency_migrate_task_event(struct trace_migrate_task_event *migrate_task_event, - struct machine *machine, - struct event_format *__event __used, - int cpu __used, - u64 timestamp, - struct thread *thread __used) +static int latency_migrate_task_event(struct perf_sched *sched, + struct perf_evsel *evsel, + struct perf_sample *sample, + struct machine *machine) { + const u32 pid = perf_evsel__intval(evsel, sample, "pid"); + u64 timestamp = sample->time; struct work_atoms *atoms; struct work_atom *atom; struct thread *migrant; @@ -1167,18 +1068,22 @@ latency_migrate_task_event(struct trace_migrate_task_event *migrate_task_event, /* * Only need to worry about migration when profiling one CPU. */ - if (profile_cpu == -1) - return; + if (sched->profile_cpu == -1) + return 0; - migrant = machine__findnew_thread(machine, migrate_task_event->pid); - atoms = thread_atoms_search(&atom_root, migrant, &cmp_pid); + migrant = machine__findnew_thread(machine, pid); + atoms = thread_atoms_search(&sched->atom_root, migrant, &sched->cmp_pid); if (!atoms) { - thread_atoms_insert(migrant); - register_pid(migrant->pid, migrant->comm); - atoms = thread_atoms_search(&atom_root, migrant, &cmp_pid); - if (!atoms) - die("migration-event: Internal tree error"); - add_sched_out_event(atoms, 'R', timestamp); + if (thread_atoms_insert(sched, migrant)) + return -1; + register_pid(sched, migrant->pid, migrant->comm); + atoms = thread_atoms_search(&sched->atom_root, migrant, &sched->cmp_pid); + if (!atoms) { + pr_err("migration-event: Internal tree error"); + return -1; + } + if (add_sched_out_event(atoms, 'R', timestamp)) + return -1; } BUG_ON(list_empty(&atoms->work_list)); @@ -1186,21 +1091,15 @@ latency_migrate_task_event(struct trace_migrate_task_event *migrate_task_event, atom = list_entry(atoms->work_list.prev, struct work_atom, list); atom->sched_in_time = atom->sched_out_time = atom->wake_up_time = timestamp; - nr_timestamps++; + sched->nr_timestamps++; if (atom->sched_out_time > timestamp) - nr_unordered_timestamps++; -} + sched->nr_unordered_timestamps++; -static struct trace_sched_handler lat_ops = { - .wakeup_event = latency_wakeup_event, - .switch_event = latency_switch_event, - .runtime_event = latency_runtime_event, - .fork_event = latency_fork_event, - .migrate_task_event = latency_migrate_task_event, -}; + return 0; +} -static void output_lat_thread(struct work_atoms *work_list) +static void output_lat_thread(struct perf_sched *sched, struct work_atoms *work_list) { int i; int ret; @@ -1214,8 +1113,8 @@ static void output_lat_thread(struct work_atoms *work_list) if (!strcmp(work_list->thread->comm, "swapper")) return; - all_runtime += work_list->total_runtime; - all_count += work_list->nb_atoms; + sched->all_runtime += work_list->total_runtime; + sched->all_count += work_list->nb_atoms; ret = printf(" %s:%d ", work_list->thread->comm, work_list->thread->pid); @@ -1241,11 +1140,6 @@ static int pid_cmp(struct work_atoms *l, struct work_atoms *r) return 0; } -static struct sort_dimension pid_sort_dimension = { - .name = "pid", - .cmp = pid_cmp, -}; - static int avg_cmp(struct work_atoms *l, struct work_atoms *r) { u64 avgl, avgr; @@ -1267,11 +1161,6 @@ static int avg_cmp(struct work_atoms *l, struct work_atoms *r) return 0; } -static struct sort_dimension avg_sort_dimension = { - .name = "avg", - .cmp = avg_cmp, -}; - static int max_cmp(struct work_atoms *l, struct work_atoms *r) { if (l->max_lat < r->max_lat) @@ -1282,11 +1171,6 @@ static int max_cmp(struct work_atoms *l, struct work_atoms *r) return 0; } -static struct sort_dimension max_sort_dimension = { - .name = "max", - .cmp = max_cmp, -}; - static int switch_cmp(struct work_atoms *l, struct work_atoms *r) { if (l->nb_atoms < r->nb_atoms) @@ -1297,11 +1181,6 @@ static int switch_cmp(struct work_atoms *l, struct work_atoms *r) return 0; } -static struct sort_dimension switch_sort_dimension = { - .name = "switch", - .cmp = switch_cmp, -}; - static int runtime_cmp(struct work_atoms *l, struct work_atoms *r) { if (l->total_runtime < r->total_runtime) @@ -1312,28 +1191,38 @@ static int runtime_cmp(struct work_atoms *l, struct work_atoms *r) return 0; } -static struct sort_dimension runtime_sort_dimension = { - .name = "runtime", - .cmp = runtime_cmp, -}; - -static struct sort_dimension *available_sorts[] = { - &pid_sort_dimension, - &avg_sort_dimension, - &max_sort_dimension, - &switch_sort_dimension, - &runtime_sort_dimension, -}; - -#define NB_AVAILABLE_SORTS (int)(sizeof(available_sorts) / sizeof(struct sort_dimension *)) - -static LIST_HEAD(sort_list); - static int sort_dimension__add(const char *tok, struct list_head *list) { - int i; + size_t i; + static struct sort_dimension avg_sort_dimension = { + .name = "avg", + .cmp = avg_cmp, + }; + static struct sort_dimension max_sort_dimension = { + .name = "max", + .cmp = max_cmp, + }; + static struct sort_dimension pid_sort_dimension = { + .name = "pid", + .cmp = pid_cmp, + }; + static struct sort_dimension runtime_sort_dimension = { + .name = "runtime", + .cmp = runtime_cmp, + }; + static struct sort_dimension switch_sort_dimension = { + .name = "switch", + .cmp = switch_cmp, + }; + struct sort_dimension *available_sorts[] = { + &pid_sort_dimension, + &avg_sort_dimension, + &max_sort_dimension, + &switch_sort_dimension, + &runtime_sort_dimension, + }; - for (i = 0; i < NB_AVAILABLE_SORTS; i++) { + for (i = 0; i < ARRAY_SIZE(available_sorts); i++) { if (!strcmp(available_sorts[i]->name, tok)) { list_add_tail(&available_sorts[i]->list, list); @@ -1344,126 +1233,97 @@ static int sort_dimension__add(const char *tok, struct list_head *list) return -1; } -static void setup_sorting(void); - -static void sort_lat(void) +static void perf_sched__sort_lat(struct perf_sched *sched) { struct rb_node *node; for (;;) { struct work_atoms *data; - node = rb_first(&atom_root); + node = rb_first(&sched->atom_root); if (!node) break; - rb_erase(node, &atom_root); + rb_erase(node, &sched->atom_root); data = rb_entry(node, struct work_atoms, node); - __thread_latency_insert(&sorted_atom_root, data, &sort_list); + __thread_latency_insert(&sched->sorted_atom_root, data, &sched->sort_list); } } -static struct trace_sched_handler *trace_handler; - -static void -process_sched_wakeup_event(struct perf_tool *tool __used, - struct event_format *event, - struct perf_sample *sample, - struct machine *machine, - struct thread *thread) +static int process_sched_wakeup_event(struct perf_tool *tool, + struct perf_evsel *evsel, + struct perf_sample *sample, + struct machine *machine) { - void *data = sample->raw_data; - struct trace_wakeup_event wakeup_event; - - FILL_COMMON_FIELDS(wakeup_event, event, data); + struct perf_sched *sched = container_of(tool, struct perf_sched, tool); - FILL_ARRAY(wakeup_event, comm, event, data); - FILL_FIELD(wakeup_event, pid, event, data); - FILL_FIELD(wakeup_event, prio, event, data); - FILL_FIELD(wakeup_event, success, event, data); - FILL_FIELD(wakeup_event, cpu, event, data); + if (sched->tp_handler->wakeup_event) + return sched->tp_handler->wakeup_event(sched, evsel, sample, machine); - if (trace_handler->wakeup_event) - trace_handler->wakeup_event(&wakeup_event, machine, event, - sample->cpu, sample->time, thread); + return 0; } -/* - * Track the current task - that way we can know whether there's any - * weird events, such as a task being switched away that is not current. - */ -static int max_cpu; - -static u32 curr_pid[MAX_CPUS] = { [0 ... MAX_CPUS-1] = -1 }; - -static struct thread *curr_thread[MAX_CPUS]; - -static char next_shortname1 = 'A'; -static char next_shortname2 = '0'; - -static void -map_switch_event(struct trace_switch_event *switch_event, - struct machine *machine, - struct event_format *event __used, - int this_cpu, - u64 timestamp, - struct thread *thread __used) +static int map_switch_event(struct perf_sched *sched, struct perf_evsel *evsel, + struct perf_sample *sample, struct machine *machine) { - struct thread *sched_out __used, *sched_in; + const u32 prev_pid = perf_evsel__intval(evsel, sample, "prev_pid"), + next_pid = perf_evsel__intval(evsel, sample, "next_pid"); + struct thread *sched_out __maybe_unused, *sched_in; int new_shortname; - u64 timestamp0; + u64 timestamp0, timestamp = sample->time; s64 delta; - int cpu; + int cpu, this_cpu = sample->cpu; BUG_ON(this_cpu >= MAX_CPUS || this_cpu < 0); - if (this_cpu > max_cpu) - max_cpu = this_cpu; + if (this_cpu > sched->max_cpu) + sched->max_cpu = this_cpu; - timestamp0 = cpu_last_switched[this_cpu]; - cpu_last_switched[this_cpu] = timestamp; + timestamp0 = sched->cpu_last_switched[this_cpu]; + sched->cpu_last_switched[this_cpu] = timestamp; if (timestamp0) delta = timestamp - timestamp0; else delta = 0; - if (delta < 0) - die("hm, delta: %" PRIu64 " < 0 ?\n", delta); - + if (delta < 0) { + pr_err("hm, delta: %" PRIu64 " < 0 ?\n", delta); + return -1; + } - sched_out = machine__findnew_thread(machine, switch_event->prev_pid); - sched_in = machine__findnew_thread(machine, switch_event->next_pid); + sched_out = machine__findnew_thread(machine, prev_pid); + sched_in = machine__findnew_thread(machine, next_pid); - curr_thread[this_cpu] = sched_in; + sched->curr_thread[this_cpu] = sched_in; printf(" "); new_shortname = 0; if (!sched_in->shortname[0]) { - sched_in->shortname[0] = next_shortname1; - sched_in->shortname[1] = next_shortname2; + sched_in->shortname[0] = sched->next_shortname1; + sched_in->shortname[1] = sched->next_shortname2; - if (next_shortname1 < 'Z') { - next_shortname1++; + if (sched->next_shortname1 < 'Z') { + sched->next_shortname1++; } else { - next_shortname1='A'; - if (next_shortname2 < '9') { - next_shortname2++; + sched->next_shortname1='A'; + if (sched->next_shortname2 < '9') { + sched->next_shortname2++; } else { - next_shortname2='0'; + sched->next_shortname2='0'; } } new_shortname = 1; } - for (cpu = 0; cpu <= max_cpu; cpu++) { + for (cpu = 0; cpu <= sched->max_cpu; cpu++) { if (cpu != this_cpu) printf(" "); else printf("*"); - if (curr_thread[cpu]) { - if (curr_thread[cpu]->pid) - printf("%2s ", curr_thread[cpu]->shortname); + if (sched->curr_thread[cpu]) { + if (sched->curr_thread[cpu]->pid) + printf("%2s ", sched->curr_thread[cpu]->shortname); else printf(". "); } else @@ -1477,134 +1337,97 @@ map_switch_event(struct trace_switch_event *switch_event, } else { printf("\n"); } + + return 0; } -static void -process_sched_switch_event(struct perf_tool *tool __used, - struct event_format *event, - struct perf_sample *sample, - struct machine *machine, - struct thread *thread) +static int process_sched_switch_event(struct perf_tool *tool, + struct perf_evsel *evsel, + struct perf_sample *sample, + struct machine *machine) { - int this_cpu = sample->cpu; - void *data = sample->raw_data; - struct trace_switch_event switch_event; - - FILL_COMMON_FIELDS(switch_event, event, data); - - FILL_ARRAY(switch_event, prev_comm, event, data); - FILL_FIELD(switch_event, prev_pid, event, data); - FILL_FIELD(switch_event, prev_prio, event, data); - FILL_FIELD(switch_event, prev_state, event, data); - FILL_ARRAY(switch_event, next_comm, event, data); - FILL_FIELD(switch_event, next_pid, event, data); - FILL_FIELD(switch_event, next_prio, event, data); + struct perf_sched *sched = container_of(tool, struct perf_sched, tool); + int this_cpu = sample->cpu, err = 0; + u32 prev_pid = perf_evsel__intval(evsel, sample, "prev_pid"), + next_pid = perf_evsel__intval(evsel, sample, "next_pid"); - if (curr_pid[this_cpu] != (u32)-1) { + if (sched->curr_pid[this_cpu] != (u32)-1) { /* * Are we trying to switch away a PID that is * not current? */ - if (curr_pid[this_cpu] != switch_event.prev_pid) - nr_context_switch_bugs++; + if (sched->curr_pid[this_cpu] != prev_pid) + sched->nr_context_switch_bugs++; } - if (trace_handler->switch_event) - trace_handler->switch_event(&switch_event, machine, event, - this_cpu, sample->time, thread); - curr_pid[this_cpu] = switch_event.next_pid; + if (sched->tp_handler->switch_event) + err = sched->tp_handler->switch_event(sched, evsel, sample, machine); + + sched->curr_pid[this_cpu] = next_pid; + return err; } -static void -process_sched_runtime_event(struct perf_tool *tool __used, - struct event_format *event, - struct perf_sample *sample, - struct machine *machine, - struct thread *thread) +static int process_sched_runtime_event(struct perf_tool *tool, + struct perf_evsel *evsel, + struct perf_sample *sample, + struct machine *machine) { - void *data = sample->raw_data; - struct trace_runtime_event runtime_event; + struct perf_sched *sched = container_of(tool, struct perf_sched, tool); - FILL_ARRAY(runtime_event, comm, event, data); - FILL_FIELD(runtime_event, pid, event, data); - FILL_FIELD(runtime_event, runtime, event, data); - FILL_FIELD(runtime_event, vruntime, event, data); + if (sched->tp_handler->runtime_event) + return sched->tp_handler->runtime_event(sched, evsel, sample, machine); - if (trace_handler->runtime_event) - trace_handler->runtime_event(&runtime_event, machine, event, - sample->cpu, sample->time, thread); + return 0; } -static void -process_sched_fork_event(struct perf_tool *tool __used, - struct event_format *event, - struct perf_sample *sample, - struct machine *machine __used, - struct thread *thread) +static int process_sched_fork_event(struct perf_tool *tool, + struct perf_evsel *evsel, + struct perf_sample *sample, + struct machine *machine __maybe_unused) { - void *data = sample->raw_data; - struct trace_fork_event fork_event; - - FILL_COMMON_FIELDS(fork_event, event, data); + struct perf_sched *sched = container_of(tool, struct perf_sched, tool); - FILL_ARRAY(fork_event, parent_comm, event, data); - FILL_FIELD(fork_event, parent_pid, event, data); - FILL_ARRAY(fork_event, child_comm, event, data); - FILL_FIELD(fork_event, child_pid, event, data); + if (sched->tp_handler->fork_event) + return sched->tp_handler->fork_event(sched, evsel, sample); - if (trace_handler->fork_event) - trace_handler->fork_event(&fork_event, event, - sample->cpu, sample->time, thread); + return 0; } -static void -process_sched_exit_event(struct perf_tool *tool __used, - struct event_format *event, - struct perf_sample *sample __used, - struct machine *machine __used, - struct thread *thread __used) +static int process_sched_exit_event(struct perf_tool *tool __maybe_unused, + struct perf_evsel *evsel, + struct perf_sample *sample __maybe_unused, + struct machine *machine __maybe_unused) { - if (verbose) - printf("sched_exit event %p\n", event); + pr_debug("sched_exit event %p\n", evsel); + return 0; } -static void -process_sched_migrate_task_event(struct perf_tool *tool __used, - struct event_format *event, - struct perf_sample *sample, - struct machine *machine, - struct thread *thread) +static int process_sched_migrate_task_event(struct perf_tool *tool, + struct perf_evsel *evsel, + struct perf_sample *sample, + struct machine *machine) { - void *data = sample->raw_data; - struct trace_migrate_task_event migrate_task_event; - - FILL_COMMON_FIELDS(migrate_task_event, event, data); + struct perf_sched *sched = container_of(tool, struct perf_sched, tool); - FILL_ARRAY(migrate_task_event, comm, event, data); - FILL_FIELD(migrate_task_event, pid, event, data); - FILL_FIELD(migrate_task_event, prio, event, data); - FILL_FIELD(migrate_task_event, cpu, event, data); + if (sched->tp_handler->migrate_task_event) + return sched->tp_handler->migrate_task_event(sched, evsel, sample, machine); - if (trace_handler->migrate_task_event) - trace_handler->migrate_task_event(&migrate_task_event, machine, - event, sample->cpu, - sample->time, thread); + return 0; } -typedef void (*tracepoint_handler)(struct perf_tool *tool, struct event_format *event, - struct perf_sample *sample, - struct machine *machine, - struct thread *thread); +typedef int (*tracepoint_handler)(struct perf_tool *tool, + struct perf_evsel *evsel, + struct perf_sample *sample, + struct machine *machine); -static int perf_sched__process_tracepoint_sample(struct perf_tool *tool, - union perf_event *event __used, +static int perf_sched__process_tracepoint_sample(struct perf_tool *tool __maybe_unused, + union perf_event *event __maybe_unused, struct perf_sample *sample, struct perf_evsel *evsel, struct machine *machine) { - struct perf_sched *sched = container_of(tool, struct perf_sched, tool); - struct pevent *pevent = sched->session->pevent; struct thread *thread = machine__findnew_thread(machine, sample->pid); + int err = 0; if (thread == NULL) { pr_debug("problem processing %s event, skipping it.\n", @@ -1617,30 +1440,15 @@ static int perf_sched__process_tracepoint_sample(struct perf_tool *tool, if (evsel->handler.func != NULL) { tracepoint_handler f = evsel->handler.func; - - if (evsel->handler.data == NULL) - evsel->handler.data = pevent_find_event(pevent, - evsel->attr.config); - - f(tool, evsel->handler.data, sample, machine, thread); + err = f(tool, evsel, sample, machine); } - return 0; + return err; } -static struct perf_sched perf_sched = { - .tool = { - .sample = perf_sched__process_tracepoint_sample, - .comm = perf_event__process_comm, - .lost = perf_event__process_lost, - .fork = perf_event__process_task, - .ordered_samples = true, - }, -}; - -static void read_events(bool destroy, struct perf_session **psession) +static int perf_sched__read_events(struct perf_sched *sched, bool destroy, + struct perf_session **psession) { - int err = -EINVAL; const struct perf_evsel_str_handler handlers[] = { { "sched:sched_switch", process_sched_switch_event, }, { "sched:sched_stat_runtime", process_sched_runtime_event, }, @@ -1652,24 +1460,25 @@ static void read_events(bool destroy, struct perf_session **psession) }; struct perf_session *session; - session = perf_session__new(input_name, O_RDONLY, 0, false, - &perf_sched.tool); - if (session == NULL) - die("No Memory"); - - perf_sched.session = session; + session = perf_session__new(sched->input_name, O_RDONLY, 0, false, &sched->tool); + if (session == NULL) { + pr_debug("No Memory for session\n"); + return -1; + } - err = perf_session__set_tracepoints_handlers(session, handlers); - assert(err == 0); + if (perf_session__set_tracepoints_handlers(session, handlers)) + goto out_delete; if (perf_session__has_traces(session, "record -R")) { - err = perf_session__process_events(session, &perf_sched.tool); - if (err) - die("Failed to process events, error %d", err); + int err = perf_session__process_events(session, &sched->tool); + if (err) { + pr_err("Failed to process events, error %d", err); + goto out_delete; + } - nr_events = session->hists.stats.nr_events[0]; - nr_lost_events = session->hists.stats.total_lost; - nr_lost_chunks = session->hists.stats.nr_events[PERF_RECORD_LOST]; + sched->nr_events = session->hists.stats.nr_events[0]; + sched->nr_lost_events = session->hists.stats.total_lost; + sched->nr_lost_chunks = session->hists.stats.nr_events[PERF_RECORD_LOST]; } if (destroy) @@ -1677,208 +1486,166 @@ static void read_events(bool destroy, struct perf_session **psession) if (psession) *psession = session; + + return 0; + +out_delete: + perf_session__delete(session); + return -1; } -static void print_bad_events(void) +static void print_bad_events(struct perf_sched *sched) { - if (nr_unordered_timestamps && nr_timestamps) { + if (sched->nr_unordered_timestamps && sched->nr_timestamps) { printf(" INFO: %.3f%% unordered timestamps (%ld out of %ld)\n", - (double)nr_unordered_timestamps/(double)nr_timestamps*100.0, - nr_unordered_timestamps, nr_timestamps); + (double)sched->nr_unordered_timestamps/(double)sched->nr_timestamps*100.0, + sched->nr_unordered_timestamps, sched->nr_timestamps); } - if (nr_lost_events && nr_events) { + if (sched->nr_lost_events && sched->nr_events) { printf(" INFO: %.3f%% lost events (%ld out of %ld, in %ld chunks)\n", - (double)nr_lost_events/(double)nr_events*100.0, - nr_lost_events, nr_events, nr_lost_chunks); + (double)sched->nr_lost_events/(double)sched->nr_events * 100.0, + sched->nr_lost_events, sched->nr_events, sched->nr_lost_chunks); } - if (nr_state_machine_bugs && nr_timestamps) { + if (sched->nr_state_machine_bugs && sched->nr_timestamps) { printf(" INFO: %.3f%% state machine bugs (%ld out of %ld)", - (double)nr_state_machine_bugs/(double)nr_timestamps*100.0, - nr_state_machine_bugs, nr_timestamps); - if (nr_lost_events) + (double)sched->nr_state_machine_bugs/(double)sched->nr_timestamps*100.0, + sched->nr_state_machine_bugs, sched->nr_timestamps); + if (sched->nr_lost_events) printf(" (due to lost events?)"); printf("\n"); } - if (nr_context_switch_bugs && nr_timestamps) { + if (sched->nr_context_switch_bugs && sched->nr_timestamps) { printf(" INFO: %.3f%% context switch bugs (%ld out of %ld)", - (double)nr_context_switch_bugs/(double)nr_timestamps*100.0, - nr_context_switch_bugs, nr_timestamps); - if (nr_lost_events) + (double)sched->nr_context_switch_bugs/(double)sched->nr_timestamps*100.0, + sched->nr_context_switch_bugs, sched->nr_timestamps); + if (sched->nr_lost_events) printf(" (due to lost events?)"); printf("\n"); } } -static void __cmd_lat(void) +static int perf_sched__lat(struct perf_sched *sched) { struct rb_node *next; struct perf_session *session; setup_pager(); - read_events(false, &session); - sort_lat(); + if (perf_sched__read_events(sched, false, &session)) + return -1; + perf_sched__sort_lat(sched); printf("\n ---------------------------------------------------------------------------------------------------------------\n"); printf(" Task | Runtime ms | Switches | Average delay ms | Maximum delay ms | Maximum delay at |\n"); printf(" ---------------------------------------------------------------------------------------------------------------\n"); - next = rb_first(&sorted_atom_root); + next = rb_first(&sched->sorted_atom_root); while (next) { struct work_atoms *work_list; work_list = rb_entry(next, struct work_atoms, node); - output_lat_thread(work_list); + output_lat_thread(sched, work_list); next = rb_next(next); } printf(" -----------------------------------------------------------------------------------------\n"); printf(" TOTAL: |%11.3f ms |%9" PRIu64 " |\n", - (double)all_runtime/1e6, all_count); + (double)sched->all_runtime / 1e6, sched->all_count); printf(" ---------------------------------------------------\n"); - print_bad_events(); + print_bad_events(sched); printf("\n"); perf_session__delete(session); + return 0; } -static struct trace_sched_handler map_ops = { - .wakeup_event = NULL, - .switch_event = map_switch_event, - .runtime_event = NULL, - .fork_event = NULL, -}; - -static void __cmd_map(void) +static int perf_sched__map(struct perf_sched *sched) { - max_cpu = sysconf(_SC_NPROCESSORS_CONF); + sched->max_cpu = sysconf(_SC_NPROCESSORS_CONF); setup_pager(); - read_events(true, NULL); - print_bad_events(); + if (perf_sched__read_events(sched, true, NULL)) + return -1; + print_bad_events(sched); + return 0; } -static void __cmd_replay(void) +static int perf_sched__replay(struct perf_sched *sched) { unsigned long i; - calibrate_run_measurement_overhead(); - calibrate_sleep_measurement_overhead(); + calibrate_run_measurement_overhead(sched); + calibrate_sleep_measurement_overhead(sched); - test_calibrations(); + test_calibrations(sched); - read_events(true, NULL); + if (perf_sched__read_events(sched, true, NULL)) + return -1; - printf("nr_run_events: %ld\n", nr_run_events); - printf("nr_sleep_events: %ld\n", nr_sleep_events); - printf("nr_wakeup_events: %ld\n", nr_wakeup_events); + printf("nr_run_events: %ld\n", sched->nr_run_events); + printf("nr_sleep_events: %ld\n", sched->nr_sleep_events); + printf("nr_wakeup_events: %ld\n", sched->nr_wakeup_events); - if (targetless_wakeups) - printf("target-less wakeups: %ld\n", targetless_wakeups); - if (multitarget_wakeups) - printf("multi-target wakeups: %ld\n", multitarget_wakeups); - if (nr_run_events_optimized) + if (sched->targetless_wakeups) + printf("target-less wakeups: %ld\n", sched->targetless_wakeups); + if (sched->multitarget_wakeups) + printf("multi-target wakeups: %ld\n", sched->multitarget_wakeups); + if (sched->nr_run_events_optimized) printf("run atoms optimized: %ld\n", - nr_run_events_optimized); + sched->nr_run_events_optimized); - print_task_traces(); - add_cross_task_wakeups(); + print_task_traces(sched); + add_cross_task_wakeups(sched); - create_tasks(); + create_tasks(sched); printf("------------------------------------------------------------\n"); - for (i = 0; i < replay_repeat; i++) - run_one_test(); -} - - -static const char * const sched_usage[] = { - "perf sched [] {record|latency|map|replay|script}", - NULL -}; - -static const struct option sched_options[] = { - OPT_STRING('i', "input", &input_name, "file", - "input file name"), - OPT_INCR('v', "verbose", &verbose, - "be more verbose (show symbol address, etc)"), - OPT_BOOLEAN('D', "dump-raw-trace", &dump_trace, - "dump raw trace in ASCII"), - OPT_END() -}; - -static const char * const latency_usage[] = { - "perf sched latency []", - NULL -}; - -static const struct option latency_options[] = { - OPT_STRING('s', "sort", &sort_order, "key[,key2...]", - "sort by key(s): runtime, switch, avg, max"), - OPT_INCR('v', "verbose", &verbose, - "be more verbose (show symbol address, etc)"), - OPT_INTEGER('C', "CPU", &profile_cpu, - "CPU to profile on"), - OPT_BOOLEAN('D', "dump-raw-trace", &dump_trace, - "dump raw trace in ASCII"), - OPT_END() -}; - -static const char * const replay_usage[] = { - "perf sched replay []", - NULL -}; + for (i = 0; i < sched->replay_repeat; i++) + run_one_test(sched); -static const struct option replay_options[] = { - OPT_UINTEGER('r', "repeat", &replay_repeat, - "repeat the workload replay N times (-1: infinite)"), - OPT_INCR('v', "verbose", &verbose, - "be more verbose (show symbol address, etc)"), - OPT_BOOLEAN('D', "dump-raw-trace", &dump_trace, - "dump raw trace in ASCII"), - OPT_END() -}; + return 0; +} -static void setup_sorting(void) +static void setup_sorting(struct perf_sched *sched, const struct option *options, + const char * const usage_msg[]) { - char *tmp, *tok, *str = strdup(sort_order); + char *tmp, *tok, *str = strdup(sched->sort_order); for (tok = strtok_r(str, ", ", &tmp); tok; tok = strtok_r(NULL, ", ", &tmp)) { - if (sort_dimension__add(tok, &sort_list) < 0) { + if (sort_dimension__add(tok, &sched->sort_list) < 0) { error("Unknown --sort key: `%s'", tok); - usage_with_options(latency_usage, latency_options); + usage_with_options(usage_msg, options); } } free(str); - sort_dimension__add("pid", &cmp_pid); + sort_dimension__add("pid", &sched->cmp_pid); } -static const char *record_args[] = { - "record", - "-a", - "-R", - "-f", - "-m", "1024", - "-c", "1", - "-e", "sched:sched_switch", - "-e", "sched:sched_stat_wait", - "-e", "sched:sched_stat_sleep", - "-e", "sched:sched_stat_iowait", - "-e", "sched:sched_stat_runtime", - "-e", "sched:sched_process_exit", - "-e", "sched:sched_process_fork", - "-e", "sched:sched_wakeup", - "-e", "sched:sched_migrate_task", -}; - static int __cmd_record(int argc, const char **argv) { unsigned int rec_argc, i, j; const char **rec_argv; + const char * const record_args[] = { + "record", + "-a", + "-R", + "-f", + "-m", "1024", + "-c", "1", + "-e", "sched:sched_switch", + "-e", "sched:sched_stat_wait", + "-e", "sched:sched_stat_sleep", + "-e", "sched:sched_stat_iowait", + "-e", "sched:sched_stat_runtime", + "-e", "sched:sched_process_exit", + "-e", "sched:sched_process_fork", + "-e", "sched:sched_wakeup", + "-e", "sched:sched_migrate_task", + }; rec_argc = ARRAY_SIZE(record_args) + argc - 1; rec_argv = calloc(rec_argc + 1, sizeof(char *)); @@ -1897,8 +1664,85 @@ static int __cmd_record(int argc, const char **argv) return cmd_record(i, rec_argv, NULL); } -int cmd_sched(int argc, const char **argv, const char *prefix __used) +int cmd_sched(int argc, const char **argv, const char *prefix __maybe_unused) { + const char default_sort_order[] = "avg, max, switch, runtime"; + struct perf_sched sched = { + .tool = { + .sample = perf_sched__process_tracepoint_sample, + .comm = perf_event__process_comm, + .lost = perf_event__process_lost, + .fork = perf_event__process_task, + .ordered_samples = true, + }, + .cmp_pid = LIST_HEAD_INIT(sched.cmp_pid), + .sort_list = LIST_HEAD_INIT(sched.sort_list), + .start_work_mutex = PTHREAD_MUTEX_INITIALIZER, + .work_done_wait_mutex = PTHREAD_MUTEX_INITIALIZER, + .curr_pid = { [0 ... MAX_CPUS - 1] = -1 }, + .sort_order = default_sort_order, + .replay_repeat = 10, + .profile_cpu = -1, + .next_shortname1 = 'A', + .next_shortname2 = '0', + }; + const struct option latency_options[] = { + OPT_STRING('s', "sort", &sched.sort_order, "key[,key2...]", + "sort by key(s): runtime, switch, avg, max"), + OPT_INCR('v', "verbose", &verbose, + "be more verbose (show symbol address, etc)"), + OPT_INTEGER('C', "CPU", &sched.profile_cpu, + "CPU to profile on"), + OPT_BOOLEAN('D', "dump-raw-trace", &dump_trace, + "dump raw trace in ASCII"), + OPT_END() + }; + const struct option replay_options[] = { + OPT_UINTEGER('r', "repeat", &sched.replay_repeat, + "repeat the workload replay N times (-1: infinite)"), + OPT_INCR('v', "verbose", &verbose, + "be more verbose (show symbol address, etc)"), + OPT_BOOLEAN('D', "dump-raw-trace", &dump_trace, + "dump raw trace in ASCII"), + OPT_END() + }; + const struct option sched_options[] = { + OPT_STRING('i', "input", &sched.input_name, "file", + "input file name"), + OPT_INCR('v', "verbose", &verbose, + "be more verbose (show symbol address, etc)"), + OPT_BOOLEAN('D', "dump-raw-trace", &dump_trace, + "dump raw trace in ASCII"), + OPT_END() + }; + const char * const latency_usage[] = { + "perf sched latency []", + NULL + }; + const char * const replay_usage[] = { + "perf sched replay []", + NULL + }; + const char * const sched_usage[] = { + "perf sched [] {record|latency|map|replay|script}", + NULL + }; + struct trace_sched_handler lat_ops = { + .wakeup_event = latency_wakeup_event, + .switch_event = latency_switch_event, + .runtime_event = latency_runtime_event, + .fork_event = latency_fork_event, + .migrate_task_event = latency_migrate_task_event, + }; + struct trace_sched_handler map_ops = { + .switch_event = map_switch_event, + }; + struct trace_sched_handler replay_ops = { + .wakeup_event = replay_wakeup_event, + .switch_event = replay_switch_event, + .fork_event = replay_fork_event, + }; + argc = parse_options(argc, argv, sched_options, sched_usage, PARSE_OPT_STOP_AT_NON_OPTION); if (!argc) @@ -1914,26 +1758,26 @@ int cmd_sched(int argc, const char **argv, const char *prefix __used) if (!strncmp(argv[0], "rec", 3)) { return __cmd_record(argc, argv); } else if (!strncmp(argv[0], "lat", 3)) { - trace_handler = &lat_ops; + sched.tp_handler = &lat_ops; if (argc > 1) { argc = parse_options(argc, argv, latency_options, latency_usage, 0); if (argc) usage_with_options(latency_usage, latency_options); } - setup_sorting(); - __cmd_lat(); + setup_sorting(&sched, latency_options, latency_usage); + return perf_sched__lat(&sched); } else if (!strcmp(argv[0], "map")) { - trace_handler = &map_ops; - setup_sorting(); - __cmd_map(); + sched.tp_handler = &map_ops; + setup_sorting(&sched, latency_options, latency_usage); + return perf_sched__map(&sched); } else if (!strncmp(argv[0], "rep", 3)) { - trace_handler = &replay_ops; + sched.tp_handler = &replay_ops; if (argc) { argc = parse_options(argc, argv, replay_options, replay_usage, 0); if (argc) usage_with_options(replay_usage, replay_options); } - __cmd_replay(); + return perf_sched__replay(&sched); } else { usage_with_options(sched_usage, sched_options); } diff --git a/trunk/tools/perf/builtin-script.c b/trunk/tools/perf/builtin-script.c index 1e60ab70b2b1..1be843aa1546 100644 --- a/trunk/tools/perf/builtin-script.c +++ b/trunk/tools/perf/builtin-script.c @@ -14,6 +14,7 @@ #include "util/util.h" #include "util/evlist.h" #include "util/evsel.h" +#include "util/sort.h" #include static char const *script_name; @@ -28,11 +29,6 @@ static bool system_wide; static const char *cpu_list; static DECLARE_BITMAP(cpu_bitmap, MAX_NR_CPUS); -struct perf_script { - struct perf_tool tool; - struct perf_session *session; -}; - enum perf_output_field { PERF_OUTPUT_COMM = 1U << 0, PERF_OUTPUT_TID = 1U << 1, @@ -262,14 +258,11 @@ static int perf_session__check_output_opt(struct perf_session *session) return 0; } -static void print_sample_start(struct pevent *pevent, - struct perf_sample *sample, +static void print_sample_start(struct perf_sample *sample, struct thread *thread, struct perf_evsel *evsel) { - int type; struct perf_event_attr *attr = &evsel->attr; - struct event_format *event; const char *evname = NULL; unsigned long secs; unsigned long usecs; @@ -307,20 +300,7 @@ static void print_sample_start(struct pevent *pevent, } if (PRINT_FIELD(EVNAME)) { - if (attr->type == PERF_TYPE_TRACEPOINT) { - /* - * XXX Do we really need this here? - * perf_evlist__set_tracepoint_names should have done - * this already - */ - type = trace_parse_common_type(pevent, - sample->raw_data); - event = pevent_find_event(pevent, type); - if (event) - evname = event->name; - } else - evname = perf_evsel__name(evsel); - + evname = perf_evsel__name(evsel); printf("%s: ", evname ? evname : "[unknown]"); } } @@ -401,7 +381,7 @@ static void print_sample_bts(union perf_event *event, printf(" "); else printf("\n"); - perf_event__print_ip(event, sample, machine, + perf_evsel__print_ip(evsel, event, sample, machine, PRINT_FIELD(SYM), PRINT_FIELD(DSO), PRINT_FIELD(SYMOFFSET)); } @@ -415,19 +395,17 @@ static void print_sample_bts(union perf_event *event, printf("\n"); } -static void process_event(union perf_event *event __unused, - struct pevent *pevent, - struct perf_sample *sample, - struct perf_evsel *evsel, - struct machine *machine, - struct thread *thread) +static void process_event(union perf_event *event, struct perf_sample *sample, + struct perf_evsel *evsel, struct machine *machine, + struct addr_location *al) { struct perf_event_attr *attr = &evsel->attr; + struct thread *thread = al->thread; if (output[attr->type].fields == 0) return; - print_sample_start(pevent, sample, thread, evsel); + print_sample_start(sample, thread, evsel); if (is_bts_event(attr)) { print_sample_bts(event, sample, evsel, machine, thread); @@ -435,9 +413,8 @@ static void process_event(union perf_event *event __unused, } if (PRINT_FIELD(TRACE)) - print_trace_event(pevent, sample->cpu, sample->raw_data, - sample->raw_size); - + event_format__print(evsel->tp_format, sample->cpu, + sample->raw_data, sample->raw_size); if (PRINT_FIELD(ADDR)) print_sample_addr(event, sample, machine, thread, attr); @@ -446,7 +423,7 @@ static void process_event(union perf_event *event __unused, printf(" "); else printf("\n"); - perf_event__print_ip(event, sample, machine, + perf_evsel__print_ip(evsel, event, sample, machine, PRINT_FIELD(SYM), PRINT_FIELD(DSO), PRINT_FIELD(SYMOFFSET)); } @@ -454,9 +431,9 @@ static void process_event(union perf_event *event __unused, printf("\n"); } -static int default_start_script(const char *script __unused, - int argc __unused, - const char **argv __unused) +static int default_start_script(const char *script __maybe_unused, + int argc __maybe_unused, + const char **argv __maybe_unused) { return 0; } @@ -466,8 +443,8 @@ static int default_stop_script(void) return 0; } -static int default_generate_script(struct pevent *pevent __unused, - const char *outfile __unused) +static int default_generate_script(struct pevent *pevent __maybe_unused, + const char *outfile __maybe_unused) { return 0; } @@ -498,14 +475,13 @@ static int cleanup_scripting(void) static const char *input_name; -static int process_sample_event(struct perf_tool *tool __used, +static int process_sample_event(struct perf_tool *tool __maybe_unused, union perf_event *event, struct perf_sample *sample, struct perf_evsel *evsel, struct machine *machine) { struct addr_location al; - struct perf_script *scr = container_of(tool, struct perf_script, tool); struct thread *thread = machine__findnew_thread(machine, event->ip.tid); if (thread == NULL) { @@ -537,32 +513,29 @@ static int process_sample_event(struct perf_tool *tool __used, if (cpu_list && !test_bit(sample->cpu, cpu_bitmap)) return 0; - scripting_ops->process_event(event, scr->session->pevent, - sample, evsel, machine, thread); + scripting_ops->process_event(event, sample, evsel, machine, &al); evsel->hists.stats.total_period += sample->period; return 0; } -static struct perf_script perf_script = { - .tool = { - .sample = process_sample_event, - .mmap = perf_event__process_mmap, - .comm = perf_event__process_comm, - .exit = perf_event__process_task, - .fork = perf_event__process_task, - .attr = perf_event__process_attr, - .event_type = perf_event__process_event_type, - .tracing_data = perf_event__process_tracing_data, - .build_id = perf_event__process_build_id, - .ordered_samples = true, - .ordering_requires_timestamps = true, - }, +static struct perf_tool perf_script = { + .sample = process_sample_event, + .mmap = perf_event__process_mmap, + .comm = perf_event__process_comm, + .exit = perf_event__process_task, + .fork = perf_event__process_task, + .attr = perf_event__process_attr, + .event_type = perf_event__process_event_type, + .tracing_data = perf_event__process_tracing_data, + .build_id = perf_event__process_build_id, + .ordered_samples = true, + .ordering_requires_timestamps = true, }; extern volatile int session_done; -static void sig_handler(int sig __unused) +static void sig_handler(int sig __maybe_unused) { session_done = 1; } @@ -573,7 +546,7 @@ static int __cmd_script(struct perf_session *session) signal(SIGINT, sig_handler); - ret = perf_session__process_events(session, &perf_script.tool); + ret = perf_session__process_events(session, &perf_script); if (debug_mode) pr_err("Misordered timestamps: %" PRIu64 "\n", nr_unordered); @@ -672,8 +645,8 @@ static void list_available_languages(void) fprintf(stderr, "\n"); } -static int parse_scriptname(const struct option *opt __used, - const char *str, int unset __used) +static int parse_scriptname(const struct option *opt __maybe_unused, + const char *str, int unset __maybe_unused) { char spec[PATH_MAX]; const char *script, *ext; @@ -718,8 +691,8 @@ static int parse_scriptname(const struct option *opt __used, return 0; } -static int parse_output_fields(const struct option *opt __used, - const char *arg, int unset __used) +static int parse_output_fields(const struct option *opt __maybe_unused, + const char *arg, int unset __maybe_unused) { char *tok; int i, imax = sizeof(all_output_options) / sizeof(struct output_option); @@ -1010,8 +983,9 @@ static char *get_script_root(struct dirent *script_dirent, const char *suffix) return script_root; } -static int list_available_scripts(const struct option *opt __used, - const char *s __used, int unset __used) +static int list_available_scripts(const struct option *opt __maybe_unused, + const char *s __maybe_unused, + int unset __maybe_unused) { struct dirent *script_next, *lang_next, script_dirent, lang_dirent; char scripts_path[MAXPATHLEN]; @@ -1058,6 +1032,61 @@ static int list_available_scripts(const struct option *opt __used, exit(0); } +/* + * Return -1 if none is found, otherwise the actual scripts number. + * + * Currently the only user of this function is the script browser, which + * will list all statically runnable scripts, select one, execute it and + * show the output in a perf browser. + */ +int find_scripts(char **scripts_array, char **scripts_path_array) +{ + struct dirent *script_next, *lang_next, script_dirent, lang_dirent; + char scripts_path[MAXPATHLEN]; + DIR *scripts_dir, *lang_dir; + char lang_path[MAXPATHLEN]; + char *temp; + int i = 0; + + snprintf(scripts_path, MAXPATHLEN, "%s/scripts", perf_exec_path()); + + scripts_dir = opendir(scripts_path); + if (!scripts_dir) + return -1; + + for_each_lang(scripts_path, scripts_dir, lang_dirent, lang_next) { + snprintf(lang_path, MAXPATHLEN, "%s/%s", scripts_path, + lang_dirent.d_name); +#ifdef NO_LIBPERL + if (strstr(lang_path, "perl")) + continue; +#endif +#ifdef NO_LIBPYTHON + if (strstr(lang_path, "python")) + continue; +#endif + + lang_dir = opendir(lang_path); + if (!lang_dir) + continue; + + for_each_script(lang_path, lang_dir, script_dirent, script_next) { + /* Skip those real time scripts: xxxtop.p[yl] */ + if (strstr(script_dirent.d_name, "top.")) + continue; + sprintf(scripts_path_array[i], "%s/%s", lang_path, + script_dirent.d_name); + temp = strchr(script_dirent.d_name, '.'); + snprintf(scripts_array[i], + (temp - script_dirent.d_name) + 1, + "%s", script_dirent.d_name); + i++; + } + } + + return i; +} + static char *get_script_path(const char *script_root, const char *suffix) { struct dirent *script_next, *lang_next, script_dirent, lang_dirent; @@ -1170,6 +1199,8 @@ static const struct option options[] = { parse_output_fields), OPT_BOOLEAN('a', "all-cpus", &system_wide, "system-wide collection from all CPUs"), + OPT_STRING('S', "symbols", &symbol_conf.sym_list_str, "symbol[,symbol...]", + "only consider these symbols"), OPT_STRING('C', "cpu", &cpu_list, "cpu", "list of cpus to profile"), OPT_STRING('c', "comms", &symbol_conf.comm_list_str, "comm[,comm...]", "only display events for these comms"), @@ -1181,21 +1212,26 @@ static const struct option options[] = { OPT_END() }; -static bool have_cmd(int argc, const char **argv) +static int have_cmd(int argc, const char **argv) { char **__argv = malloc(sizeof(const char *) * argc); - if (!__argv) - die("malloc"); + if (!__argv) { + pr_err("malloc failed\n"); + return -1; + } + memcpy(__argv, argv, sizeof(const char *) * argc); argc = parse_options(argc, (const char **)__argv, record_options, NULL, PARSE_OPT_STOP_AT_NON_OPTION); free(__argv); - return argc != 0; + system_wide = (argc == 0); + + return 0; } -int cmd_script(int argc, const char **argv, const char *prefix __used) +int cmd_script(int argc, const char **argv, const char *prefix __maybe_unused) { char *rec_script_path = NULL; char *rep_script_path = NULL; @@ -1259,13 +1295,13 @@ int cmd_script(int argc, const char **argv, const char *prefix __used) if (pipe(live_pipe) < 0) { perror("failed to create pipe"); - exit(-1); + return -1; } pid = fork(); if (pid < 0) { perror("failed to fork"); - exit(-1); + return -1; } if (!pid) { @@ -1277,13 +1313,18 @@ int cmd_script(int argc, const char **argv, const char *prefix __used) if (is_top_script(argv[0])) { system_wide = true; } else if (!system_wide) { - system_wide = !have_cmd(argc - rep_args, - &argv[rep_args]); + if (have_cmd(argc - rep_args, &argv[rep_args]) != 0) { + err = -1; + goto out; + } } __argv = malloc((argc + 6) * sizeof(const char *)); - if (!__argv) - die("malloc"); + if (!__argv) { + pr_err("malloc failed\n"); + err = -ENOMEM; + goto out; + } __argv[j++] = "/bin/sh"; __argv[j++] = rec_script_path; @@ -1305,8 +1346,12 @@ int cmd_script(int argc, const char **argv, const char *prefix __used) close(live_pipe[1]); __argv = malloc((argc + 4) * sizeof(const char *)); - if (!__argv) - die("malloc"); + if (!__argv) { + pr_err("malloc failed\n"); + err = -ENOMEM; + goto out; + } + j = 0; __argv[j++] = "/bin/sh"; __argv[j++] = rep_script_path; @@ -1331,12 +1376,20 @@ int cmd_script(int argc, const char **argv, const char *prefix __used) if (!rec_script_path) system_wide = false; - else if (!system_wide) - system_wide = !have_cmd(argc - 1, &argv[1]); + else if (!system_wide) { + if (have_cmd(argc - 1, &argv[1]) != 0) { + err = -1; + goto out; + } + } __argv = malloc((argc + 2) * sizeof(const char *)); - if (!__argv) - die("malloc"); + if (!__argv) { + pr_err("malloc failed\n"); + err = -ENOMEM; + goto out; + } + __argv[j++] = "/bin/sh"; __argv[j++] = script_path; if (system_wide) @@ -1356,12 +1409,10 @@ int cmd_script(int argc, const char **argv, const char *prefix __used) setup_pager(); session = perf_session__new(input_name, O_RDONLY, 0, false, - &perf_script.tool); + &perf_script); if (session == NULL) return -ENOMEM; - perf_script.session = session; - if (cpu_list) { if (perf_session__cpu_bitmap(session, cpu_list, cpu_bitmap)) return -1; @@ -1387,18 +1438,18 @@ int cmd_script(int argc, const char **argv, const char *prefix __used) input = open(session->filename, O_RDONLY); /* input_name */ if (input < 0) { perror("failed to open file"); - exit(-1); + return -1; } err = fstat(input, &perf_stat); if (err < 0) { perror("failed to stat file"); - exit(-1); + return -1; } if (!perf_stat.st_size) { fprintf(stderr, "zero-sized file, nothing to do!\n"); - exit(0); + return 0; } scripting_ops = script_spec__lookup(generate_script_lang); diff --git a/trunk/tools/perf/builtin-stat.c b/trunk/tools/perf/builtin-stat.c index 861f0aec77ae..e8cd4d81b06e 100644 --- a/trunk/tools/perf/builtin-stat.c +++ b/trunk/tools/perf/builtin-stat.c @@ -51,13 +51,13 @@ #include "util/evsel.h" #include "util/debug.h" #include "util/color.h" +#include "util/stat.h" #include "util/header.h" #include "util/cpumap.h" #include "util/thread.h" #include "util/thread_map.h" #include -#include #include #define DEFAULT_SEPARATOR " " @@ -199,11 +199,6 @@ static int output_fd; static volatile int done = 0; -struct stats -{ - double n, mean, M2; -}; - struct perf_stat { struct stats res_stats[3]; }; @@ -220,48 +215,14 @@ static void perf_evsel__free_stat_priv(struct perf_evsel *evsel) evsel->priv = NULL; } -static void update_stats(struct stats *stats, u64 val) -{ - double delta; - - stats->n++; - delta = val - stats->mean; - stats->mean += delta / stats->n; - stats->M2 += delta*(val - stats->mean); -} - -static double avg_stats(struct stats *stats) +static inline struct cpu_map *perf_evsel__cpus(struct perf_evsel *evsel) { - return stats->mean; + return (evsel->cpus && !target.cpu_list) ? evsel->cpus : evsel_list->cpus; } -/* - * http://en.wikipedia.org/wiki/Algorithms_for_calculating_variance - * - * (\Sum n_i^2) - ((\Sum n_i)^2)/n - * s^2 = ------------------------------- - * n - 1 - * - * http://en.wikipedia.org/wiki/Stddev - * - * The std dev of the mean is related to the std dev by: - * - * s - * s_mean = ------- - * sqrt(n) - * - */ -static double stddev_stats(struct stats *stats) +static inline int perf_evsel__nr_cpus(struct perf_evsel *evsel) { - double variance, variance_mean; - - if (!stats->n) - return 0.0; - - variance = stats->M2 / (stats->n - 1); - variance_mean = variance / stats->n; - - return sqrt(variance_mean); + return perf_evsel__cpus(evsel)->nr; } static struct stats runtime_nsecs_stats[MAX_NR_CPUS]; @@ -281,13 +242,9 @@ static int create_perf_stat_counter(struct perf_evsel *evsel, struct perf_evsel *first) { struct perf_event_attr *attr = &evsel->attr; - struct xyarray *group_fd = NULL; bool exclude_guest_missing = false; int ret; - if (group && evsel != first) - group_fd = first->fd; - if (scale) attr->read_format = PERF_FORMAT_TOTAL_TIME_ENABLED | PERF_FORMAT_TOTAL_TIME_RUNNING; @@ -299,8 +256,7 @@ static int create_perf_stat_counter(struct perf_evsel *evsel, evsel->attr.exclude_guest = evsel->attr.exclude_host = 0; if (perf_target__has_cpu(&target)) { - ret = perf_evsel__open_per_cpu(evsel, evsel_list->cpus, - group, group_fd); + ret = perf_evsel__open_per_cpu(evsel, perf_evsel__cpus(evsel)); if (ret) goto check_ret; return 0; @@ -311,8 +267,7 @@ static int create_perf_stat_counter(struct perf_evsel *evsel, attr->enable_on_exec = 1; } - ret = perf_evsel__open_per_thread(evsel, evsel_list->threads, - group, group_fd); + ret = perf_evsel__open_per_thread(evsel, evsel_list->threads); if (!ret) return 0; /* fall through */ @@ -382,7 +337,7 @@ static int read_counter_aggr(struct perf_evsel *counter) u64 *count = counter->counts->aggr.values; int i; - if (__perf_evsel__read(counter, evsel_list->cpus->nr, + if (__perf_evsel__read(counter, perf_evsel__nr_cpus(counter), evsel_list->threads->nr, scale) < 0) return -1; @@ -411,7 +366,7 @@ static int read_counter(struct perf_evsel *counter) u64 *count; int cpu; - for (cpu = 0; cpu < evsel_list->cpus->nr; cpu++) { + for (cpu = 0; cpu < perf_evsel__nr_cpus(counter); cpu++) { if (__perf_evsel__read_on_cpu(counter, cpu, 0, scale) < 0) return -1; @@ -423,7 +378,7 @@ static int read_counter(struct perf_evsel *counter) return 0; } -static int run_perf_stat(int argc __used, const char **argv) +static int run_perf_stat(int argc __maybe_unused, const char **argv) { unsigned long long t0, t1; struct perf_evsel *counter, *first; @@ -434,7 +389,7 @@ static int run_perf_stat(int argc __used, const char **argv) if (forks && (pipe(child_ready_pipe) < 0 || pipe(go_pipe) < 0)) { perror("failed to create pipes"); - exit(1); + return -1; } if (forks) { @@ -483,7 +438,10 @@ static int run_perf_stat(int argc __used, const char **argv) close(child_ready_pipe[0]); } - first = list_entry(evsel_list->entries.next, struct perf_evsel, node); + if (group) + perf_evlist__set_leader(evsel_list); + + first = perf_evlist__first(evsel_list); list_for_each_entry(counter, &evsel_list->entries, node) { if (create_perf_stat_counter(counter, first) < 0) { @@ -513,13 +471,14 @@ static int run_perf_stat(int argc __used, const char **argv) } if (child_pid != -1) kill(child_pid, SIGTERM); - die("Not all events could be opened.\n"); + + pr_err("Not all events could be opened.\n"); return -1; } counter->supported = true; } - if (perf_evlist__set_filters(evsel_list)) { + if (perf_evlist__apply_filters(evsel_list)) { error("failed to set filter with %d (%s)\n", errno, strerror(errno)); return -1; @@ -546,12 +505,12 @@ static int run_perf_stat(int argc __used, const char **argv) if (no_aggr) { list_for_each_entry(counter, &evsel_list->entries, node) { read_counter(counter); - perf_evsel__close_fd(counter, evsel_list->cpus->nr, 1); + perf_evsel__close_fd(counter, perf_evsel__nr_cpus(counter), 1); } } else { list_for_each_entry(counter, &evsel_list->entries, node) { read_counter_aggr(counter); - perf_evsel__close_fd(counter, evsel_list->cpus->nr, + perf_evsel__close_fd(counter, perf_evsel__nr_cpus(counter), evsel_list->threads->nr); } } @@ -561,10 +520,7 @@ static int run_perf_stat(int argc __used, const char **argv) static void print_noise_pct(double total, double avg) { - double pct = 0.0; - - if (avg) - pct = 100.0*total/avg; + double pct = rel_stddev_stats(total, avg); if (csv_output) fprintf(output, "%s%.2f%%", csv_sep, pct); @@ -592,7 +548,7 @@ static void nsec_printout(int cpu, struct perf_evsel *evsel, double avg) if (no_aggr) sprintf(cpustr, "CPU%*d%s", csv_output ? 0 : -4, - evsel_list->cpus->map[cpu], csv_sep); + perf_evsel__cpus(evsel)->map[cpu], csv_sep); fprintf(output, fmt, cpustr, msecs, csv_sep, perf_evsel__name(evsel)); @@ -636,7 +592,9 @@ static const char *get_ratio_color(enum grc_type type, double ratio) return color; } -static void print_stalled_cycles_frontend(int cpu, struct perf_evsel *evsel __used, double avg) +static void print_stalled_cycles_frontend(int cpu, + struct perf_evsel *evsel + __maybe_unused, double avg) { double total, ratio = 0.0; const char *color; @@ -653,7 +611,9 @@ static void print_stalled_cycles_frontend(int cpu, struct perf_evsel *evsel __us fprintf(output, " frontend cycles idle "); } -static void print_stalled_cycles_backend(int cpu, struct perf_evsel *evsel __used, double avg) +static void print_stalled_cycles_backend(int cpu, + struct perf_evsel *evsel + __maybe_unused, double avg) { double total, ratio = 0.0; const char *color; @@ -670,7 +630,9 @@ static void print_stalled_cycles_backend(int cpu, struct perf_evsel *evsel __use fprintf(output, " backend cycles idle "); } -static void print_branch_misses(int cpu, struct perf_evsel *evsel __used, double avg) +static void print_branch_misses(int cpu, + struct perf_evsel *evsel __maybe_unused, + double avg) { double total, ratio = 0.0; const char *color; @@ -687,7 +649,9 @@ static void print_branch_misses(int cpu, struct perf_evsel *evsel __used, double fprintf(output, " of all branches "); } -static void print_l1_dcache_misses(int cpu, struct perf_evsel *evsel __used, double avg) +static void print_l1_dcache_misses(int cpu, + struct perf_evsel *evsel __maybe_unused, + double avg) { double total, ratio = 0.0; const char *color; @@ -704,7 +668,9 @@ static void print_l1_dcache_misses(int cpu, struct perf_evsel *evsel __used, dou fprintf(output, " of all L1-dcache hits "); } -static void print_l1_icache_misses(int cpu, struct perf_evsel *evsel __used, double avg) +static void print_l1_icache_misses(int cpu, + struct perf_evsel *evsel __maybe_unused, + double avg) { double total, ratio = 0.0; const char *color; @@ -721,7 +687,9 @@ static void print_l1_icache_misses(int cpu, struct perf_evsel *evsel __used, dou fprintf(output, " of all L1-icache hits "); } -static void print_dtlb_cache_misses(int cpu, struct perf_evsel *evsel __used, double avg) +static void print_dtlb_cache_misses(int cpu, + struct perf_evsel *evsel __maybe_unused, + double avg) { double total, ratio = 0.0; const char *color; @@ -738,7 +706,9 @@ static void print_dtlb_cache_misses(int cpu, struct perf_evsel *evsel __used, do fprintf(output, " of all dTLB cache hits "); } -static void print_itlb_cache_misses(int cpu, struct perf_evsel *evsel __used, double avg) +static void print_itlb_cache_misses(int cpu, + struct perf_evsel *evsel __maybe_unused, + double avg) { double total, ratio = 0.0; const char *color; @@ -755,7 +725,9 @@ static void print_itlb_cache_misses(int cpu, struct perf_evsel *evsel __used, do fprintf(output, " of all iTLB cache hits "); } -static void print_ll_cache_misses(int cpu, struct perf_evsel *evsel __used, double avg) +static void print_ll_cache_misses(int cpu, + struct perf_evsel *evsel __maybe_unused, + double avg) { double total, ratio = 0.0; const char *color; @@ -788,7 +760,7 @@ static void abs_printout(int cpu, struct perf_evsel *evsel, double avg) if (no_aggr) sprintf(cpustr, "CPU%*d%s", csv_output ? 0 : -4, - evsel_list->cpus->map[cpu], csv_sep); + perf_evsel__cpus(evsel)->map[cpu], csv_sep); else cpu = 0; @@ -949,14 +921,14 @@ static void print_counter(struct perf_evsel *counter) u64 ena, run, val; int cpu; - for (cpu = 0; cpu < evsel_list->cpus->nr; cpu++) { + for (cpu = 0; cpu < perf_evsel__nr_cpus(counter); cpu++) { val = counter->counts->cpu[cpu].val; ena = counter->counts->cpu[cpu].ena; run = counter->counts->cpu[cpu].run; if (run == 0 || ena == 0) { fprintf(output, "CPU%*d%s%*s%s%*s", csv_output ? 0 : -4, - evsel_list->cpus->map[cpu], csv_sep, + perf_evsel__cpus(counter)->map[cpu], csv_sep, csv_output ? 0 : 18, counter->supported ? CNTR_NOT_COUNTED : CNTR_NOT_SUPPORTED, csv_sep, @@ -1061,8 +1033,8 @@ static const char * const stat_usage[] = { NULL }; -static int stat__set_big_num(const struct option *opt __used, - const char *s __used, int unset) +static int stat__set_big_num(const struct option *opt __maybe_unused, + const char *s __maybe_unused, int unset) { big_num_opt = unset ? 0 : 1; return 0; @@ -1156,7 +1128,7 @@ static int add_default_attributes(void) return perf_evlist__add_default_attrs(evsel_list, very_very_detailed_attrs); } -int cmd_stat(int argc, const char **argv, const char *prefix __used) +int cmd_stat(int argc, const char **argv, const char *prefix __maybe_unused) { struct perf_evsel *pos; int status = -ENOMEM; @@ -1192,7 +1164,7 @@ int cmd_stat(int argc, const char **argv, const char *prefix __used) output = fopen(output_name, mode); if (!output) { perror("failed to create output file"); - exit(-1); + return -1; } clock_gettime(CLOCK_REALTIME, &tm); fprintf(output, "# started on %s\n", ctime(&tm.tv_sec)); @@ -1255,7 +1227,7 @@ int cmd_stat(int argc, const char **argv, const char *prefix __used) list_for_each_entry(pos, &evsel_list->entries, node) { if (perf_evsel__alloc_stat_priv(pos) < 0 || - perf_evsel__alloc_counts(pos, evsel_list->cpus->nr) < 0) + perf_evsel__alloc_counts(pos, perf_evsel__nr_cpus(pos)) < 0) goto out_free_fd; } diff --git a/trunk/tools/perf/builtin-test.c b/trunk/tools/perf/builtin-test.c index d909eb74a0eb..484f26cc0c00 100644 --- a/trunk/tools/perf/builtin-test.c +++ b/trunk/tools/perf/builtin-test.c @@ -14,11 +14,13 @@ #include "util/symbol.h" #include "util/thread_map.h" #include "util/pmu.h" +#include "event-parse.h" #include "../../include/linux/hw_breakpoint.h" #include -static int vmlinux_matches_kallsyms_filter(struct map *map __used, struct symbol *sym) +static int vmlinux_matches_kallsyms_filter(struct map *map __maybe_unused, + struct symbol *sym) { bool *visited = symbol__priv(sym); *visited = true; @@ -294,7 +296,7 @@ static int test__open_syscall_event(void) goto out_thread_map_delete; } - if (perf_evsel__open_per_thread(evsel, threads, false, NULL) < 0) { + if (perf_evsel__open_per_thread(evsel, threads) < 0) { pr_debug("failed to open counter: %s, " "tweak /proc/sys/kernel/perf_event_paranoid?\n", strerror(errno)); @@ -369,7 +371,7 @@ static int test__open_syscall_event_on_all_cpus(void) goto out_thread_map_delete; } - if (perf_evsel__open(evsel, cpus, threads, false, NULL) < 0) { + if (perf_evsel__open(evsel, cpus, threads) < 0) { pr_debug("failed to open counter: %s, " "tweak /proc/sys/kernel/perf_event_paranoid?\n", strerror(errno)); @@ -478,7 +480,6 @@ static int test__basic_mmap(void) unsigned int nr_events[nsyscalls], expected_nr_events[nsyscalls], i, j; struct perf_evsel *evsels[nsyscalls], *evsel; - int sample_size = __perf_evsel__sample_size(attr.sample_type); for (i = 0; i < nsyscalls; ++i) { char name[64]; @@ -534,7 +535,7 @@ static int test__basic_mmap(void) perf_evlist__add(evlist, evsels[i]); - if (perf_evsel__open(evsels[i], cpus, threads, false, NULL) < 0) { + if (perf_evsel__open(evsels[i], cpus, threads) < 0) { pr_debug("failed to open counter: %s, " "tweak /proc/sys/kernel/perf_event_paranoid?\n", strerror(errno)); @@ -563,8 +564,7 @@ static int test__basic_mmap(void) goto out_munmap; } - err = perf_event__parse_sample(event, attr.sample_type, sample_size, - false, &sample, false); + err = perf_evlist__parse_sample(evlist, event, &sample); if (err) { pr_err("Can't parse sample, err = %d\n", err); goto out_munmap; @@ -661,12 +661,12 @@ static int test__PERF_RECORD(void) const char *cmd = "sleep"; const char *argv[] = { cmd, "1", NULL, }; char *bname; - u64 sample_type, prev_time = 0; + u64 prev_time = 0; bool found_cmd_mmap = false, found_libc_mmap = false, found_vdso_mmap = false, found_ld_mmap = false; - int err = -1, errs = 0, i, wakeups = 0, sample_size; + int err = -1, errs = 0, i, wakeups = 0; u32 cpu; int total_events = 0, nr_events[PERF_RECORD_MAX] = { 0, }; @@ -712,7 +712,7 @@ static int test__PERF_RECORD(void) /* * Config the evsels, setting attr->comm on the first one, etc. */ - evsel = list_entry(evlist->entries.next, struct perf_evsel, node); + evsel = perf_evlist__first(evlist); evsel->attr.sample_type |= PERF_SAMPLE_CPU; evsel->attr.sample_type |= PERF_SAMPLE_TID; evsel->attr.sample_type |= PERF_SAMPLE_TIME; @@ -739,7 +739,7 @@ static int test__PERF_RECORD(void) * Call sys_perf_event_open on all the fds on all the evsels, * grouping them if asked to. */ - err = perf_evlist__open(evlist, opts.group); + err = perf_evlist__open(evlist); if (err < 0) { pr_debug("perf_evlist__open: %s\n", strerror(errno)); goto out_delete_evlist; @@ -756,13 +756,6 @@ static int test__PERF_RECORD(void) goto out_delete_evlist; } - /* - * We'll need these two to parse the PERF_SAMPLE_* fields in each - * event. - */ - sample_type = perf_evlist__sample_type(evlist); - sample_size = __perf_evsel__sample_size(sample_type); - /* * Now that all is properly set up, enable the events, they will * count just on workload.pid, which will start... @@ -788,9 +781,7 @@ static int test__PERF_RECORD(void) if (type < PERF_RECORD_MAX) nr_events[type]++; - err = perf_event__parse_sample(event, sample_type, - sample_size, true, - &sample, false); + err = perf_evlist__parse_sample(evlist, event, &sample); if (err < 0) { if (verbose) perf_event__fprintf(event, stderr); @@ -1007,7 +998,9 @@ static u64 mmap_read_self(void *addr) /* * If the RDPMC instruction faults then signal this back to the test parent task: */ -static void segfault_handler(int sig __used, siginfo_t *info __used, void *uc __used) +static void segfault_handler(int sig __maybe_unused, + siginfo_t *info __maybe_unused, + void *uc __maybe_unused) { exit(-1); } @@ -1034,14 +1027,16 @@ static int __test__rdpmc(void) fd = sys_perf_event_open(&attr, 0, -1, -1, 0); if (fd < 0) { - die("Error: sys_perf_event_open() syscall returned " - "with %d (%s)\n", fd, strerror(errno)); + pr_err("Error: sys_perf_event_open() syscall returned " + "with %d (%s)\n", fd, strerror(errno)); + return -1; } addr = mmap(NULL, page_size, PROT_READ, MAP_SHARED, fd, 0); if (addr == (void *)(-1)) { - die("Error: mmap() syscall returned " - "with (%s)\n", strerror(errno)); + pr_err("Error: mmap() syscall returned with (%s)\n", + strerror(errno)); + goto out_close; } for (n = 0; n < 6; n++) { @@ -1062,9 +1057,9 @@ static int __test__rdpmc(void) } munmap(addr, page_size); - close(fd); - pr_debug(" "); +out_close: + close(fd); if (!delta_sum) return -1; @@ -1103,6 +1098,309 @@ static int test__perf_pmu(void) return perf_pmu__test(); } +static int perf_evsel__roundtrip_cache_name_test(void) +{ + char name[128]; + int type, op, err = 0, ret = 0, i, idx; + struct perf_evsel *evsel; + struct perf_evlist *evlist = perf_evlist__new(NULL, NULL); + + if (evlist == NULL) + return -ENOMEM; + + for (type = 0; type < PERF_COUNT_HW_CACHE_MAX; type++) { + for (op = 0; op < PERF_COUNT_HW_CACHE_OP_MAX; op++) { + /* skip invalid cache type */ + if (!perf_evsel__is_cache_op_valid(type, op)) + continue; + + for (i = 0; i < PERF_COUNT_HW_CACHE_RESULT_MAX; i++) { + __perf_evsel__hw_cache_type_op_res_name(type, op, i, + name, sizeof(name)); + err = parse_events(evlist, name, 0); + if (err) + ret = err; + } + } + } + + idx = 0; + evsel = perf_evlist__first(evlist); + + for (type = 0; type < PERF_COUNT_HW_CACHE_MAX; type++) { + for (op = 0; op < PERF_COUNT_HW_CACHE_OP_MAX; op++) { + /* skip invalid cache type */ + if (!perf_evsel__is_cache_op_valid(type, op)) + continue; + + for (i = 0; i < PERF_COUNT_HW_CACHE_RESULT_MAX; i++) { + __perf_evsel__hw_cache_type_op_res_name(type, op, i, + name, sizeof(name)); + if (evsel->idx != idx) + continue; + + ++idx; + + if (strcmp(perf_evsel__name(evsel), name)) { + pr_debug("%s != %s\n", perf_evsel__name(evsel), name); + ret = -1; + } + + evsel = perf_evsel__next(evsel); + } + } + } + + perf_evlist__delete(evlist); + return ret; +} + +static int __perf_evsel__name_array_test(const char *names[], int nr_names) +{ + int i, err; + struct perf_evsel *evsel; + struct perf_evlist *evlist = perf_evlist__new(NULL, NULL); + + if (evlist == NULL) + return -ENOMEM; + + for (i = 0; i < nr_names; ++i) { + err = parse_events(evlist, names[i], 0); + if (err) { + pr_debug("failed to parse event '%s', err %d\n", + names[i], err); + goto out_delete_evlist; + } + } + + err = 0; + list_for_each_entry(evsel, &evlist->entries, node) { + if (strcmp(perf_evsel__name(evsel), names[evsel->idx])) { + --err; + pr_debug("%s != %s\n", perf_evsel__name(evsel), names[evsel->idx]); + } + } + +out_delete_evlist: + perf_evlist__delete(evlist); + return err; +} + +#define perf_evsel__name_array_test(names) \ + __perf_evsel__name_array_test(names, ARRAY_SIZE(names)) + +static int perf_evsel__roundtrip_name_test(void) +{ + int err = 0, ret = 0; + + err = perf_evsel__name_array_test(perf_evsel__hw_names); + if (err) + ret = err; + + err = perf_evsel__name_array_test(perf_evsel__sw_names); + if (err) + ret = err; + + err = perf_evsel__roundtrip_cache_name_test(); + if (err) + ret = err; + + return ret; +} + +static int perf_evsel__test_field(struct perf_evsel *evsel, const char *name, + int size, bool should_be_signed) +{ + struct format_field *field = perf_evsel__field(evsel, name); + int is_signed; + int ret = 0; + + if (field == NULL) { + pr_debug("%s: \"%s\" field not found!\n", evsel->name, name); + return -1; + } + + is_signed = !!(field->flags | FIELD_IS_SIGNED); + if (should_be_signed && !is_signed) { + pr_debug("%s: \"%s\" signedness(%d) is wrong, should be %d\n", + evsel->name, name, is_signed, should_be_signed); + ret = -1; + } + + if (field->size != size) { + pr_debug("%s: \"%s\" size (%d) should be %d!\n", + evsel->name, name, field->size, size); + ret = -1; + } + + return ret; +} + +static int perf_evsel__tp_sched_test(void) +{ + struct perf_evsel *evsel = perf_evsel__newtp("sched", "sched_switch", 0); + int ret = 0; + + if (evsel == NULL) { + pr_debug("perf_evsel__new\n"); + return -1; + } + + if (perf_evsel__test_field(evsel, "prev_comm", 16, true)) + ret = -1; + + if (perf_evsel__test_field(evsel, "prev_pid", 4, true)) + ret = -1; + + if (perf_evsel__test_field(evsel, "prev_prio", 4, true)) + ret = -1; + + if (perf_evsel__test_field(evsel, "prev_state", 8, true)) + ret = -1; + + if (perf_evsel__test_field(evsel, "next_comm", 16, true)) + ret = -1; + + if (perf_evsel__test_field(evsel, "next_pid", 4, true)) + ret = -1; + + if (perf_evsel__test_field(evsel, "next_prio", 4, true)) + ret = -1; + + perf_evsel__delete(evsel); + + evsel = perf_evsel__newtp("sched", "sched_wakeup", 0); + + if (perf_evsel__test_field(evsel, "comm", 16, true)) + ret = -1; + + if (perf_evsel__test_field(evsel, "pid", 4, true)) + ret = -1; + + if (perf_evsel__test_field(evsel, "prio", 4, true)) + ret = -1; + + if (perf_evsel__test_field(evsel, "success", 4, true)) + ret = -1; + + if (perf_evsel__test_field(evsel, "target_cpu", 4, true)) + ret = -1; + + return ret; +} + +static int test__syscall_open_tp_fields(void) +{ + struct perf_record_opts opts = { + .target = { + .uid = UINT_MAX, + .uses_mmap = true, + }, + .no_delay = true, + .freq = 1, + .mmap_pages = 256, + .raw_samples = true, + }; + const char *filename = "/etc/passwd"; + int flags = O_RDONLY | O_DIRECTORY; + struct perf_evlist *evlist = perf_evlist__new(NULL, NULL); + struct perf_evsel *evsel; + int err = -1, i, nr_events = 0, nr_polls = 0; + + if (evlist == NULL) { + pr_debug("%s: perf_evlist__new\n", __func__); + goto out; + } + + evsel = perf_evsel__newtp("syscalls", "sys_enter_open", 0); + if (evsel == NULL) { + pr_debug("%s: perf_evsel__newtp\n", __func__); + goto out_delete_evlist; + } + + perf_evlist__add(evlist, evsel); + + err = perf_evlist__create_maps(evlist, &opts.target); + if (err < 0) { + pr_debug("%s: perf_evlist__create_maps\n", __func__); + goto out_delete_evlist; + } + + perf_evsel__config(evsel, &opts, evsel); + + evlist->threads->map[0] = getpid(); + + err = perf_evlist__open(evlist); + if (err < 0) { + pr_debug("perf_evlist__open: %s\n", strerror(errno)); + goto out_delete_evlist; + } + + err = perf_evlist__mmap(evlist, UINT_MAX, false); + if (err < 0) { + pr_debug("perf_evlist__mmap: %s\n", strerror(errno)); + goto out_delete_evlist; + } + + perf_evlist__enable(evlist); + + /* + * Generate the event: + */ + open(filename, flags); + + while (1) { + int before = nr_events; + + for (i = 0; i < evlist->nr_mmaps; i++) { + union perf_event *event; + + while ((event = perf_evlist__mmap_read(evlist, i)) != NULL) { + const u32 type = event->header.type; + int tp_flags; + struct perf_sample sample; + + ++nr_events; + + if (type != PERF_RECORD_SAMPLE) + continue; + + err = perf_evsel__parse_sample(evsel, event, &sample); + if (err) { + pr_err("Can't parse sample, err = %d\n", err); + goto out_munmap; + } + + tp_flags = perf_evsel__intval(evsel, &sample, "flags"); + + if (flags != tp_flags) { + pr_debug("%s: Expected flags=%#x, got %#x\n", + __func__, flags, tp_flags); + goto out_munmap; + } + + goto out_ok; + } + } + + if (nr_events == before) + poll(evlist->pollfd, evlist->nr_fds, 10); + + if (++nr_polls > 5) { + pr_debug("%s: no events!\n", __func__); + goto out_munmap; + } + } +out_ok: + err = 0; +out_munmap: + perf_evlist__munmap(evlist); +out_delete_evlist: + perf_evlist__delete(evlist); +out: + return err; +} + static struct test { const char *desc; int (*func)(void); @@ -1145,6 +1443,18 @@ static struct test { .desc = "Test dso data interface", .func = dso__test_data, }, + { + .desc = "roundtrip evsel->name check", + .func = perf_evsel__roundtrip_name_test, + }, + { + .desc = "Check parsing of sched tracepoints fields", + .func = perf_evsel__tp_sched_test, + }, + { + .desc = "Generate and check syscalls:sys_enter_open event fields", + .func = test__syscall_open_tp_fields, + }, { .func = NULL, }, @@ -1210,7 +1520,7 @@ static int perf_test__list(int argc, const char **argv) return 0; } -int cmd_test(int argc, const char **argv, const char *prefix __used) +int cmd_test(int argc, const char **argv, const char *prefix __maybe_unused) { const char * const test_usage[] = { "perf test [] [{list |[|]}]", diff --git a/trunk/tools/perf/builtin-timechart.c b/trunk/tools/perf/builtin-timechart.c index 3b75b2e21ea5..b1a8a3b841cc 100644 --- a/trunk/tools/perf/builtin-timechart.c +++ b/trunk/tools/perf/builtin-timechart.c @@ -168,9 +168,8 @@ static struct per_pid *find_create_pid(int pid) return cursor; cursor = cursor->next; } - cursor = malloc(sizeof(struct per_pid)); + cursor = zalloc(sizeof(*cursor)); assert(cursor != NULL); - memset(cursor, 0, sizeof(struct per_pid)); cursor->pid = pid; cursor->next = all_data; all_data = cursor; @@ -195,9 +194,8 @@ static void pid_set_comm(int pid, char *comm) } c = c->next; } - c = malloc(sizeof(struct per_pidcomm)); + c = zalloc(sizeof(*c)); assert(c != NULL); - memset(c, 0, sizeof(struct per_pidcomm)); c->comm = strdup(comm); p->current = c; c->next = p->all; @@ -239,17 +237,15 @@ pid_put_sample(int pid, int type, unsigned int cpu, u64 start, u64 end) p = find_create_pid(pid); c = p->current; if (!c) { - c = malloc(sizeof(struct per_pidcomm)); + c = zalloc(sizeof(*c)); assert(c != NULL); - memset(c, 0, sizeof(struct per_pidcomm)); p->current = c; c->next = p->all; p->all = c; } - sample = malloc(sizeof(struct cpu_sample)); + sample = zalloc(sizeof(*sample)); assert(sample != NULL); - memset(sample, 0, sizeof(struct cpu_sample)); sample->start_time = start; sample->end_time = end; sample->type = type; @@ -275,28 +271,28 @@ static int cpus_cstate_state[MAX_CPUS]; static u64 cpus_pstate_start_times[MAX_CPUS]; static u64 cpus_pstate_state[MAX_CPUS]; -static int process_comm_event(struct perf_tool *tool __used, +static int process_comm_event(struct perf_tool *tool __maybe_unused, union perf_event *event, - struct perf_sample *sample __used, - struct machine *machine __used) + struct perf_sample *sample __maybe_unused, + struct machine *machine __maybe_unused) { pid_set_comm(event->comm.tid, event->comm.comm); return 0; } -static int process_fork_event(struct perf_tool *tool __used, +static int process_fork_event(struct perf_tool *tool __maybe_unused, union perf_event *event, - struct perf_sample *sample __used, - struct machine *machine __used) + struct perf_sample *sample __maybe_unused, + struct machine *machine __maybe_unused) { pid_fork(event->fork.pid, event->fork.ppid, event->fork.time); return 0; } -static int process_exit_event(struct perf_tool *tool __used, +static int process_exit_event(struct perf_tool *tool __maybe_unused, union perf_event *event, - struct perf_sample *sample __used, - struct machine *machine __used) + struct perf_sample *sample __maybe_unused, + struct machine *machine __maybe_unused) { pid_exit(event->fork.pid, event->fork.time); return 0; @@ -373,11 +369,10 @@ static void c_state_start(int cpu, u64 timestamp, int state) static void c_state_end(int cpu, u64 timestamp) { - struct power_event *pwr; - pwr = malloc(sizeof(struct power_event)); + struct power_event *pwr = zalloc(sizeof(*pwr)); + if (!pwr) return; - memset(pwr, 0, sizeof(struct power_event)); pwr->state = cpus_cstate_state[cpu]; pwr->start_time = cpus_cstate_start_times[cpu]; @@ -392,14 +387,13 @@ static void c_state_end(int cpu, u64 timestamp) static void p_state_change(int cpu, u64 timestamp, u64 new_freq) { struct power_event *pwr; - pwr = malloc(sizeof(struct power_event)); if (new_freq > 8000000) /* detect invalid data */ return; + pwr = zalloc(sizeof(*pwr)); if (!pwr) return; - memset(pwr, 0, sizeof(struct power_event)); pwr->state = cpus_pstate_state[cpu]; pwr->start_time = cpus_pstate_start_times[cpu]; @@ -429,15 +423,13 @@ static void p_state_change(int cpu, u64 timestamp, u64 new_freq) static void sched_wakeup(int cpu, u64 timestamp, int pid, struct trace_entry *te) { - struct wake_event *we; struct per_pid *p; struct wakeup_entry *wake = (void *)te; + struct wake_event *we = zalloc(sizeof(*we)); - we = malloc(sizeof(struct wake_event)); if (!we) return; - memset(we, 0, sizeof(struct wake_event)); we->time = timestamp; we->waker = pid; @@ -491,11 +483,11 @@ static void sched_switch(int cpu, u64 timestamp, struct trace_entry *te) } -static int process_sample_event(struct perf_tool *tool __used, - union perf_event *event __used, +static int process_sample_event(struct perf_tool *tool __maybe_unused, + union perf_event *event __maybe_unused, struct perf_sample *sample, struct perf_evsel *evsel, - struct machine *machine __used) + struct machine *machine __maybe_unused) { struct trace_entry *te; @@ -579,13 +571,12 @@ static void end_sample_processing(void) struct power_event *pwr; for (cpu = 0; cpu <= numcpus; cpu++) { - pwr = malloc(sizeof(struct power_event)); + /* C state */ +#if 0 + pwr = zalloc(sizeof(*pwr)); if (!pwr) return; - memset(pwr, 0, sizeof(struct power_event)); - /* C state */ -#if 0 pwr->state = cpus_cstate_state[cpu]; pwr->start_time = cpus_cstate_start_times[cpu]; pwr->end_time = last_time; @@ -597,10 +588,9 @@ static void end_sample_processing(void) #endif /* P state */ - pwr = malloc(sizeof(struct power_event)); + pwr = zalloc(sizeof(*pwr)); if (!pwr) return; - memset(pwr, 0, sizeof(struct power_event)); pwr->state = cpus_pstate_state[cpu]; pwr->start_time = cpus_pstate_start_times[cpu]; @@ -830,11 +820,9 @@ static void draw_process_bars(void) static void add_process_filter(const char *string) { - struct process_filter *filt; - int pid; + int pid = strtoull(string, NULL, 10); + struct process_filter *filt = malloc(sizeof(*filt)); - pid = strtoull(string, NULL, 10); - filt = malloc(sizeof(struct process_filter)); if (!filt) return; @@ -1081,7 +1069,8 @@ static int __cmd_record(int argc, const char **argv) } static int -parse_process(const struct option *opt __used, const char *arg, int __used unset) +parse_process(const struct option *opt __maybe_unused, const char *arg, + int __maybe_unused unset) { if (arg) add_process_filter(arg); @@ -1106,7 +1095,8 @@ static const struct option options[] = { }; -int cmd_timechart(int argc, const char **argv, const char *prefix __used) +int cmd_timechart(int argc, const char **argv, + const char *prefix __maybe_unused) { argc = parse_options(argc, argv, options, timechart_usage, PARSE_OPT_STOP_AT_NON_OPTION); diff --git a/trunk/tools/perf/builtin-top.c b/trunk/tools/perf/builtin-top.c index 35e86c6df713..e434a16bb5ac 100644 --- a/trunk/tools/perf/builtin-top.c +++ b/trunk/tools/perf/builtin-top.c @@ -38,6 +38,7 @@ #include "util/cpumap.h" #include "util/xyarray.h" #include "util/sort.h" +#include "util/intlist.h" #include "util/debug.h" @@ -94,7 +95,8 @@ static void perf_top__update_print_entries(struct perf_top *top) top->print_entries -= 9; } -static void perf_top__sig_winch(int sig __used, siginfo_t *info __used, void *arg) +static void perf_top__sig_winch(int sig __maybe_unused, + siginfo_t *info __maybe_unused, void *arg) { struct perf_top *top = arg; @@ -508,7 +510,7 @@ static void perf_top__handle_keypress(struct perf_top *top, int c) prompt_integer(&counter, "Enter details event counter"); if (counter >= top->evlist->nr_entries) { - top->sym_evsel = list_entry(top->evlist->entries.next, struct perf_evsel, node); + top->sym_evsel = perf_evlist__first(top->evlist); fprintf(stderr, "Sorry, no such event, using %s.\n", perf_evsel__name(top->sym_evsel)); sleep(1); break; @@ -517,7 +519,7 @@ static void perf_top__handle_keypress(struct perf_top *top, int c) if (top->sym_evsel->idx == counter) break; } else - top->sym_evsel = list_entry(top->evlist->entries.next, struct perf_evsel, node); + top->sym_evsel = perf_evlist__first(top->evlist); break; case 'f': prompt_integer(&top->count_filter, "Enter display event count filter"); @@ -662,7 +664,7 @@ static const char *skip_symbols[] = { NULL }; -static int symbol_filter(struct map *map __used, struct symbol *sym) +static int symbol_filter(struct map *map __maybe_unused, struct symbol *sym) { const char *name = sym->name; int i; @@ -706,8 +708,16 @@ static void perf_event__process_sample(struct perf_tool *tool, int err; if (!machine && perf_guest) { - pr_err("Can't find guest [%d]'s kernel information\n", - event->ip.pid); + static struct intlist *seen; + + if (!seen) + seen = intlist__new(); + + if (!intlist__has_entry(seen, event->ip.pid)) { + pr_err("Can't find guest [%d]'s kernel information\n", + event->ip.pid); + intlist__add(seen, event->ip.pid); + } return; } @@ -774,8 +784,10 @@ static void perf_event__process_sample(struct perf_tool *tool, if ((sort__has_parent || symbol_conf.use_callchain) && sample->callchain) { - err = machine__resolve_callchain(machine, al.thread, - sample->callchain, &parent); + err = machine__resolve_callchain(machine, evsel, + al.thread, sample, + &parent); + if (err) return; } @@ -811,7 +823,7 @@ static void perf_top__mmap_read_idx(struct perf_top *top, int idx) int ret; while ((event = perf_evlist__mmap_read(top->evlist, idx)) != NULL) { - ret = perf_session__parse_sample(session, event, &sample); + ret = perf_evlist__parse_sample(top->evlist, event, &sample); if (ret) { pr_err("Can't parse sample, err = %d\n", ret); continue; @@ -875,17 +887,14 @@ static void perf_top__mmap_read(struct perf_top *top) static void perf_top__start_counters(struct perf_top *top) { - struct perf_evsel *counter, *first; + struct perf_evsel *counter; struct perf_evlist *evlist = top->evlist; - first = list_entry(evlist->entries.next, struct perf_evsel, node); + if (top->group) + perf_evlist__set_leader(evlist); list_for_each_entry(counter, &evlist->entries, node) { struct perf_event_attr *attr = &counter->attr; - struct xyarray *group_fd = NULL; - - if (top->group && counter != first) - group_fd = first->fd; attr->sample_type = PERF_SAMPLE_IP | PERF_SAMPLE_TID; @@ -916,8 +925,7 @@ static void perf_top__start_counters(struct perf_top *top) attr->sample_id_all = top->sample_id_all_missing ? 0 : 1; try_again: if (perf_evsel__open(counter, top->evlist->cpus, - top->evlist->threads, top->group, - group_fd) < 0) { + top->evlist->threads) < 0) { int err = errno; if (err == EPERM || err == EACCES) { @@ -943,8 +951,10 @@ static void perf_top__start_counters(struct perf_top *top) * based cpu-clock-tick sw counter, which * is always available even if no PMU support: */ - if (attr->type == PERF_TYPE_HARDWARE && - attr->config == PERF_COUNT_HW_CPU_CYCLES) { + if ((err == ENOENT || err == ENXIO) && + (attr->type == PERF_TYPE_HARDWARE) && + (attr->config == PERF_COUNT_HW_CPU_CYCLES)) { + if (verbose) ui__warning("Cycles event not supported,\n" "trying to fall back to cpu-clock-ticks\n"); @@ -1032,7 +1042,7 @@ static int __cmd_top(struct perf_top *top) &top->session->host_machine); perf_top__start_counters(top); top->session->evlist = top->evlist; - perf_session__update_sample_type(top->session); + perf_session__set_id_hdr_size(top->session); /* Wait for a minimal set of events before starting the snapshot */ poll(top->evlist->pollfd, top->evlist->nr_fds, 100); @@ -1154,7 +1164,7 @@ static const char * const top_usage[] = { NULL }; -int cmd_top(int argc, const char **argv, const char *prefix __used) +int cmd_top(int argc, const char **argv, const char *prefix __maybe_unused) { struct perf_evsel *pos; int status; @@ -1317,7 +1327,7 @@ int cmd_top(int argc, const char **argv, const char *prefix __used) pos->attr.sample_period = top.default_interval; } - top.sym_evsel = list_entry(top.evlist->entries.next, struct perf_evsel, node); + top.sym_evsel = perf_evlist__first(top.evlist); symbol_conf.priv_size = sizeof(struct annotation); diff --git a/trunk/tools/perf/builtin-trace.c b/trunk/tools/perf/builtin-trace.c new file mode 100644 index 000000000000..8f113dab8bf1 --- /dev/null +++ b/trunk/tools/perf/builtin-trace.c @@ -0,0 +1,310 @@ +#include "builtin.h" +#include "util/evlist.h" +#include "util/parse-options.h" +#include "util/thread_map.h" +#include "event-parse.h" + +#include +#include + +static struct syscall_fmt { + const char *name; + const char *alias; + bool errmsg; + bool timeout; +} syscall_fmts[] = { + { .name = "arch_prctl", .errmsg = true, .alias = "prctl", }, + { .name = "fstat", .errmsg = true, .alias = "newfstat", }, + { .name = "fstatat", .errmsg = true, .alias = "newfstatat", }, + { .name = "futex", .errmsg = true, }, + { .name = "poll", .errmsg = true, .timeout = true, }, + { .name = "ppoll", .errmsg = true, .timeout = true, }, + { .name = "read", .errmsg = true, }, + { .name = "recvfrom", .errmsg = true, }, + { .name = "select", .errmsg = true, .timeout = true, }, + { .name = "stat", .errmsg = true, .alias = "newstat", }, +}; + +static int syscall_fmt__cmp(const void *name, const void *fmtp) +{ + const struct syscall_fmt *fmt = fmtp; + return strcmp(name, fmt->name); +} + +static struct syscall_fmt *syscall_fmt__find(const char *name) +{ + const int nmemb = ARRAY_SIZE(syscall_fmts); + return bsearch(name, syscall_fmts, nmemb, sizeof(struct syscall_fmt), syscall_fmt__cmp); +} + +struct syscall { + struct event_format *tp_format; + const char *name; + struct syscall_fmt *fmt; +}; + +struct trace { + int audit_machine; + struct { + int max; + struct syscall *table; + } syscalls; + struct perf_record_opts opts; +}; + +static int trace__read_syscall_info(struct trace *trace, int id) +{ + char tp_name[128]; + struct syscall *sc; + + if (id > trace->syscalls.max) { + struct syscall *nsyscalls = realloc(trace->syscalls.table, (id + 1) * sizeof(*sc)); + + if (nsyscalls == NULL) + return -1; + + if (trace->syscalls.max != -1) { + memset(nsyscalls + trace->syscalls.max + 1, 0, + (id - trace->syscalls.max) * sizeof(*sc)); + } else { + memset(nsyscalls, 0, (id + 1) * sizeof(*sc)); + } + + trace->syscalls.table = nsyscalls; + trace->syscalls.max = id; + } + + sc = trace->syscalls.table + id; + sc->name = audit_syscall_to_name(id, trace->audit_machine); + if (sc->name == NULL) + return -1; + + sc->fmt = syscall_fmt__find(sc->name); + + snprintf(tp_name, sizeof(tp_name), "sys_enter_%s", sc->name); + sc->tp_format = event_format__new("syscalls", tp_name); + + if (sc->tp_format == NULL && sc->fmt && sc->fmt->alias) { + snprintf(tp_name, sizeof(tp_name), "sys_enter_%s", sc->fmt->alias); + sc->tp_format = event_format__new("syscalls", tp_name); + } + + return sc->tp_format != NULL ? 0 : -1; +} + +static size_t syscall__fprintf_args(struct syscall *sc, unsigned long *args, FILE *fp) +{ + int i = 0; + size_t printed = 0; + + if (sc->tp_format != NULL) { + struct format_field *field; + + for (field = sc->tp_format->format.fields->next; field; field = field->next) { + printed += fprintf(fp, "%s%s: %ld", printed ? ", " : "", + field->name, args[i++]); + } + } else { + while (i < 6) { + printed += fprintf(fp, "%sarg%d: %ld", printed ? ", " : "", i, args[i]); + ++i; + } + } + + return printed; +} + +static int trace__run(struct trace *trace) +{ + struct perf_evlist *evlist = perf_evlist__new(NULL, NULL); + struct perf_evsel *evsel, *evsel_enter, *evsel_exit; + int err = -1, i, nr_events = 0, before; + + if (evlist == NULL) { + printf("Not enough memory to run!\n"); + goto out; + } + + evsel_enter = perf_evsel__newtp("raw_syscalls", "sys_enter", 0); + if (evsel_enter == NULL) { + printf("Couldn't read the raw_syscalls:sys_enter tracepoint information!\n"); + goto out_delete_evlist; + } + + perf_evlist__add(evlist, evsel_enter); + + evsel_exit = perf_evsel__newtp("raw_syscalls", "sys_exit", 1); + if (evsel_exit == NULL) { + printf("Couldn't read the raw_syscalls:sys_exit tracepoint information!\n"); + goto out_delete_evlist; + } + + perf_evlist__add(evlist, evsel_exit); + + err = perf_evlist__create_maps(evlist, &trace->opts.target); + if (err < 0) { + printf("Problems parsing the target to trace, check your options!\n"); + goto out_delete_evlist; + } + + perf_evlist__config_attrs(evlist, &trace->opts); + + err = perf_evlist__open(evlist); + if (err < 0) { + printf("Couldn't create the events: %s\n", strerror(errno)); + goto out_delete_evlist; + } + + err = perf_evlist__mmap(evlist, UINT_MAX, false); + if (err < 0) { + printf("Couldn't mmap the events: %s\n", strerror(errno)); + goto out_delete_evlist; + } + + perf_evlist__enable(evlist); +again: + before = nr_events; + + for (i = 0; i < evlist->nr_mmaps; i++) { + union perf_event *event; + + while ((event = perf_evlist__mmap_read(evlist, i)) != NULL) { + const u32 type = event->header.type; + struct syscall *sc; + struct perf_sample sample; + int id; + + ++nr_events; + + switch (type) { + case PERF_RECORD_SAMPLE: + break; + case PERF_RECORD_LOST: + printf("LOST %" PRIu64 " events!\n", event->lost.lost); + continue; + default: + printf("Unexpected %s event, skipping...\n", + perf_event__name(type)); + continue; + } + + err = perf_evlist__parse_sample(evlist, event, &sample); + if (err) { + printf("Can't parse sample, err = %d, skipping...\n", err); + continue; + } + + evsel = perf_evlist__id2evsel(evlist, sample.id); + if (evsel == NULL) { + printf("Unknown tp ID %" PRIu64 ", skipping...\n", sample.id); + continue; + } + + id = perf_evsel__intval(evsel, &sample, "id"); + if (id < 0) { + printf("Invalid syscall %d id, skipping...\n", id); + continue; + } + + if ((id > trace->syscalls.max || trace->syscalls.table[id].name == NULL) && + trace__read_syscall_info(trace, id)) + continue; + + if ((id > trace->syscalls.max || trace->syscalls.table[id].name == NULL)) + continue; + + sc = &trace->syscalls.table[id]; + + if (evlist->threads->map[0] == -1 || evlist->threads->nr > 1) + printf("%d ", sample.tid); + + if (evsel == evsel_enter) { + void *args = perf_evsel__rawptr(evsel, &sample, "args"); + + printf("%s(", sc->name); + syscall__fprintf_args(sc, args, stdout); + } else if (evsel == evsel_exit) { + int ret = perf_evsel__intval(evsel, &sample, "ret"); + + if (ret < 0 && sc->fmt && sc->fmt->errmsg) { + char bf[256]; + const char *emsg = strerror_r(-ret, bf, sizeof(bf)), + *e = audit_errno_to_name(-ret); + + printf(") = -1 %s %s", e, emsg); + } else if (ret == 0 && sc->fmt && sc->fmt->timeout) + printf(") = 0 Timeout"); + else + printf(") = %d", ret); + + putchar('\n'); + } + } + } + + if (nr_events == before) + poll(evlist->pollfd, evlist->nr_fds, -1); + + goto again; + +out_delete_evlist: + perf_evlist__delete(evlist); +out: + return err; +} + +int cmd_trace(int argc, const char **argv, const char *prefix __maybe_unused) +{ + const char * const trace_usage[] = { + "perf trace []", + NULL + }; + struct trace trace = { + .audit_machine = audit_detect_machine(), + .syscalls = { + . max = -1, + }, + .opts = { + .target = { + .uid = UINT_MAX, + .uses_mmap = true, + }, + .user_freq = UINT_MAX, + .user_interval = ULLONG_MAX, + .no_delay = true, + .mmap_pages = 1024, + }, + }; + const struct option trace_options[] = { + OPT_STRING('p', "pid", &trace.opts.target.pid, "pid", + "trace events on existing process id"), + OPT_STRING(0, "tid", &trace.opts.target.tid, "tid", + "trace events on existing thread id"), + OPT_BOOLEAN(0, "all-cpus", &trace.opts.target.system_wide, + "system-wide collection from all CPUs"), + OPT_STRING(0, "cpu", &trace.opts.target.cpu_list, "cpu", + "list of cpus to monitor"), + OPT_BOOLEAN(0, "no-inherit", &trace.opts.no_inherit, + "child tasks do not inherit counters"), + OPT_UINTEGER(0, "mmap-pages", &trace.opts.mmap_pages, + "number of mmap data pages"), + OPT_STRING(0, "uid", &trace.opts.target.uid_str, "user", + "user to profile"), + OPT_END() + }; + int err; + + argc = parse_options(argc, argv, trace_options, trace_usage, 0); + if (argc) + usage_with_options(trace_usage, trace_options); + + err = perf_target__parse_uid(&trace.opts.target); + if (err) { + char bf[BUFSIZ]; + perf_target__strerror(&trace.opts.target, err, bf, sizeof(bf)); + printf("%s", bf); + return err; + } + + return trace__run(&trace); +} diff --git a/trunk/tools/perf/builtin.h b/trunk/tools/perf/builtin.h index b382bd551aac..08143bd854c7 100644 --- a/trunk/tools/perf/builtin.h +++ b/trunk/tools/perf/builtin.h @@ -34,6 +34,8 @@ extern int cmd_kmem(int argc, const char **argv, const char *prefix); extern int cmd_lock(int argc, const char **argv, const char *prefix); extern int cmd_kvm(int argc, const char **argv, const char *prefix); extern int cmd_test(int argc, const char **argv, const char *prefix); +extern int cmd_trace(int argc, const char **argv, const char *prefix); extern int cmd_inject(int argc, const char **argv, const char *prefix); +extern int find_scripts(char **scripts_array, char **scripts_path_array); #endif diff --git a/trunk/tools/perf/command-list.txt b/trunk/tools/perf/command-list.txt index d695fe40fbff..3e86bbd8c2d5 100644 --- a/trunk/tools/perf/command-list.txt +++ b/trunk/tools/perf/command-list.txt @@ -17,8 +17,9 @@ perf-report mainporcelain common perf-stat mainporcelain common perf-timechart mainporcelain common perf-top mainporcelain common +perf-trace mainporcelain common perf-script mainporcelain common -perf-probe mainporcelain common +perf-probe mainporcelain full perf-kmem mainporcelain common perf-lock mainporcelain common perf-kvm mainporcelain common diff --git a/trunk/tools/perf/config/feature-tests.mak b/trunk/tools/perf/config/feature-tests.mak index 6c18785a6417..4add41bb0c7e 100644 --- a/trunk/tools/perf/config/feature-tests.mak +++ b/trunk/tools/perf/config/feature-tests.mak @@ -154,3 +154,53 @@ int main(void) return 0; } endef + +ifndef NO_LIBUNWIND +define SOURCE_LIBUNWIND +#include +#include + +extern int UNW_OBJ(dwarf_search_unwind_table) (unw_addr_space_t as, + unw_word_t ip, + unw_dyn_info_t *di, + unw_proc_info_t *pi, + int need_unwind_info, void *arg); + + +#define dwarf_search_unwind_table UNW_OBJ(dwarf_search_unwind_table) + +int main(void) +{ + unw_addr_space_t addr_space; + addr_space = unw_create_addr_space(NULL, 0); + unw_init_remote(NULL, addr_space, NULL); + dwarf_search_unwind_table(addr_space, 0, NULL, NULL, 0, NULL); + return 0; +} +endef +endif + +ifndef NO_BACKTRACE +define SOURCE_BACKTRACE +#include +#include + +int main(void) +{ + backtrace(NULL, 0); + backtrace_symbols(NULL, 0); + return 0; +} +endef +endif + +ifndef NO_LIBAUDIT +define SOURCE_LIBAUDIT +#include + +int main(void) +{ + return audit_open(); +} +endef +endif \ No newline at end of file diff --git a/trunk/tools/perf/perf-archive.sh b/trunk/tools/perf/perf-archive.sh index 95b6f8b6177a..e91930620269 100644 --- a/trunk/tools/perf/perf-archive.sh +++ b/trunk/tools/perf/perf-archive.sh @@ -24,7 +24,7 @@ NOBUILDID=0000000000000000000000000000000000000000 perf buildid-list -i $PERF_DATA --with-hits | grep -v "^$NOBUILDID " > $BUILDIDS if [ ! -s $BUILDIDS ] ; then echo "perf archive: no build-ids found" - rm -f $BUILDIDS + rm $BUILDIDS || true exit 1 fi @@ -39,8 +39,8 @@ while read build_id ; do echo ${filename#$PERF_BUILDID_LINKDIR} >> $MANIFEST done -tar cfj $PERF_DATA.tar.bz2 -C $PERF_BUILDID_DIR -T $MANIFEST -rm -f $MANIFEST $BUILDIDS +tar cjf $PERF_DATA.tar.bz2 -C $PERF_BUILDID_DIR -T $MANIFEST +rm $MANIFEST $BUILDIDS || true echo -e "Now please run:\n" echo -e "$ tar xvf $PERF_DATA.tar.bz2 -C ~/.debug\n" echo "wherever you need to run 'perf report' on." diff --git a/trunk/tools/perf/perf.c b/trunk/tools/perf/perf.c index 2b2e225a4d4c..fc2f770e3027 100644 --- a/trunk/tools/perf/perf.c +++ b/trunk/tools/perf/perf.c @@ -14,6 +14,7 @@ #include "util/run-command.h" #include "util/parse-events.h" #include "util/debugfs.h" +#include const char perf_usage_string[] = "perf [--version] [--help] COMMAND [ARGS]"; @@ -24,6 +25,42 @@ const char perf_more_info_string[] = int use_browser = -1; static int use_pager = -1; +struct cmd_struct { + const char *cmd; + int (*fn)(int, const char **, const char *); + int option; +}; + +static struct cmd_struct commands[] = { + { "buildid-cache", cmd_buildid_cache, 0 }, + { "buildid-list", cmd_buildid_list, 0 }, + { "diff", cmd_diff, 0 }, + { "evlist", cmd_evlist, 0 }, + { "help", cmd_help, 0 }, + { "list", cmd_list, 0 }, + { "record", cmd_record, 0 }, + { "report", cmd_report, 0 }, + { "bench", cmd_bench, 0 }, + { "stat", cmd_stat, 0 }, + { "timechart", cmd_timechart, 0 }, + { "top", cmd_top, 0 }, + { "annotate", cmd_annotate, 0 }, + { "version", cmd_version, 0 }, + { "script", cmd_script, 0 }, + { "sched", cmd_sched, 0 }, +#ifndef NO_LIBELF_SUPPORT + { "probe", cmd_probe, 0 }, +#endif + { "kmem", cmd_kmem, 0 }, + { "lock", cmd_lock, 0 }, + { "kvm", cmd_kvm, 0 }, + { "test", cmd_test, 0 }, +#ifndef NO_LIBAUDIT_SUPPORT + { "trace", cmd_trace, 0 }, +#endif + { "inject", cmd_inject, 0 }, +}; + struct pager_config { const char *cmd; int val; @@ -160,6 +197,14 @@ static int handle_options(const char ***argv, int *argc, int *envchanged) fprintf(stderr, "dir: %s\n", debugfs_mountpoint); if (envchanged) *envchanged = 1; + } else if (!strcmp(cmd, "--list-cmds")) { + unsigned int i; + + for (i = 0; i < ARRAY_SIZE(commands); i++) { + struct cmd_struct *p = commands+i; + printf("%s ", p->cmd); + } + exit(0); } else { fprintf(stderr, "Unknown option: %s\n", cmd); usage(perf_usage_string); @@ -245,12 +290,6 @@ const char perf_version_string[] = PERF_VERSION; */ #define NEED_WORK_TREE (1<<2) -struct cmd_struct { - const char *cmd; - int (*fn)(int, const char **, const char *); - int option; -}; - static int run_builtin(struct cmd_struct *p, int argc, const char **argv) { int status; @@ -296,30 +335,6 @@ static int run_builtin(struct cmd_struct *p, int argc, const char **argv) static void handle_internal_command(int argc, const char **argv) { const char *cmd = argv[0]; - static struct cmd_struct commands[] = { - { "buildid-cache", cmd_buildid_cache, 0 }, - { "buildid-list", cmd_buildid_list, 0 }, - { "diff", cmd_diff, 0 }, - { "evlist", cmd_evlist, 0 }, - { "help", cmd_help, 0 }, - { "list", cmd_list, 0 }, - { "record", cmd_record, 0 }, - { "report", cmd_report, 0 }, - { "bench", cmd_bench, 0 }, - { "stat", cmd_stat, 0 }, - { "timechart", cmd_timechart, 0 }, - { "top", cmd_top, 0 }, - { "annotate", cmd_annotate, 0 }, - { "version", cmd_version, 0 }, - { "script", cmd_script, 0 }, - { "sched", cmd_sched, 0 }, - { "probe", cmd_probe, 0 }, - { "kmem", cmd_kmem, 0 }, - { "lock", cmd_lock, 0 }, - { "kvm", cmd_kvm, 0 }, - { "test", cmd_test, 0 }, - { "inject", cmd_inject, 0 }, - }; unsigned int i; static const char ext[] = STRIP_EXTENSION; diff --git a/trunk/tools/perf/perf.h b/trunk/tools/perf/perf.h index f960ccb2edc6..87f4ec6d1f36 100644 --- a/trunk/tools/perf/perf.h +++ b/trunk/tools/perf/perf.h @@ -209,9 +209,15 @@ void pthread__unblock_sigwinch(void); #include "util/target.h" +enum perf_call_graph_mode { + CALLCHAIN_NONE, + CALLCHAIN_FP, + CALLCHAIN_DWARF +}; + struct perf_record_opts { struct perf_target target; - bool call_graph; + int call_graph; bool group; bool inherit_stat; bool no_delay; @@ -230,6 +236,7 @@ struct perf_record_opts { u64 branch_stack; u64 default_interval; u64 user_interval; + u16 stack_dump_size; }; #endif diff --git a/trunk/tools/perf/scripts/python/Perf-Trace-Util/lib/Perf/Trace/EventClass.py b/trunk/tools/perf/scripts/python/Perf-Trace-Util/lib/Perf/Trace/EventClass.py new file mode 100755 index 000000000000..9e0985794e20 --- /dev/null +++ b/trunk/tools/perf/scripts/python/Perf-Trace-Util/lib/Perf/Trace/EventClass.py @@ -0,0 +1,94 @@ +# EventClass.py +# +# This is a library defining some events types classes, which could +# be used by other scripts to analyzing the perf samples. +# +# Currently there are just a few classes defined for examples, +# PerfEvent is the base class for all perf event sample, PebsEvent +# is a HW base Intel x86 PEBS event, and user could add more SW/HW +# event classes based on requirements. + +import struct + +# Event types, user could add more here +EVTYPE_GENERIC = 0 +EVTYPE_PEBS = 1 # Basic PEBS event +EVTYPE_PEBS_LL = 2 # PEBS event with load latency info +EVTYPE_IBS = 3 + +# +# Currently we don't have good way to tell the event type, but by +# the size of raw buffer, raw PEBS event with load latency data's +# size is 176 bytes, while the pure PEBS event's size is 144 bytes. +# +def create_event(name, comm, dso, symbol, raw_buf): + if (len(raw_buf) == 144): + event = PebsEvent(name, comm, dso, symbol, raw_buf) + elif (len(raw_buf) == 176): + event = PebsNHM(name, comm, dso, symbol, raw_buf) + else: + event = PerfEvent(name, comm, dso, symbol, raw_buf) + + return event + +class PerfEvent(object): + event_num = 0 + def __init__(self, name, comm, dso, symbol, raw_buf, ev_type=EVTYPE_GENERIC): + self.name = name + self.comm = comm + self.dso = dso + self.symbol = symbol + self.raw_buf = raw_buf + self.ev_type = ev_type + PerfEvent.event_num += 1 + + def show(self): + print "PMU event: name=%12s, symbol=%24s, comm=%8s, dso=%12s" % (self.name, self.symbol, self.comm, self.dso) + +# +# Basic Intel PEBS (Precise Event-based Sampling) event, whose raw buffer +# contains the context info when that event happened: the EFLAGS and +# linear IP info, as well as all the registers. +# +class PebsEvent(PerfEvent): + pebs_num = 0 + def __init__(self, name, comm, dso, symbol, raw_buf, ev_type=EVTYPE_PEBS): + tmp_buf=raw_buf[0:80] + flags, ip, ax, bx, cx, dx, si, di, bp, sp = struct.unpack('QQQQQQQQQQ', tmp_buf) + self.flags = flags + self.ip = ip + self.ax = ax + self.bx = bx + self.cx = cx + self.dx = dx + self.si = si + self.di = di + self.bp = bp + self.sp = sp + + PerfEvent.__init__(self, name, comm, dso, symbol, raw_buf, ev_type) + PebsEvent.pebs_num += 1 + del tmp_buf + +# +# Intel Nehalem and Westmere support PEBS plus Load Latency info which lie +# in the four 64 bit words write after the PEBS data: +# Status: records the IA32_PERF_GLOBAL_STATUS register value +# DLA: Data Linear Address (EIP) +# DSE: Data Source Encoding, where the latency happens, hit or miss +# in L1/L2/L3 or IO operations +# LAT: the actual latency in cycles +# +class PebsNHM(PebsEvent): + pebs_nhm_num = 0 + def __init__(self, name, comm, dso, symbol, raw_buf, ev_type=EVTYPE_PEBS_LL): + tmp_buf=raw_buf[144:176] + status, dla, dse, lat = struct.unpack('QQQQ', tmp_buf) + self.status = status + self.dla = dla + self.dse = dse + self.lat = lat + + PebsEvent.__init__(self, name, comm, dso, symbol, raw_buf, ev_type) + PebsNHM.pebs_nhm_num += 1 + del tmp_buf diff --git a/trunk/tools/perf/scripts/python/bin/event_analyzing_sample-record b/trunk/tools/perf/scripts/python/bin/event_analyzing_sample-record new file mode 100644 index 000000000000..5ce652dabd02 --- /dev/null +++ b/trunk/tools/perf/scripts/python/bin/event_analyzing_sample-record @@ -0,0 +1,8 @@ +#!/bin/bash + +# +# event_analyzing_sample.py can cover all type of perf samples including +# the tracepoints, so no special record requirements, just record what +# you want to analyze. +# +perf record $@ diff --git a/trunk/tools/perf/scripts/python/bin/event_analyzing_sample-report b/trunk/tools/perf/scripts/python/bin/event_analyzing_sample-report new file mode 100644 index 000000000000..0941fc94e158 --- /dev/null +++ b/trunk/tools/perf/scripts/python/bin/event_analyzing_sample-report @@ -0,0 +1,3 @@ +#!/bin/bash +# description: analyze all perf samples +perf script $@ -s "$PERF_EXEC_PATH"/scripts/python/event_analyzing_sample.py diff --git a/trunk/tools/perf/scripts/python/event_analyzing_sample.py b/trunk/tools/perf/scripts/python/event_analyzing_sample.py new file mode 100644 index 000000000000..163c39fa12d9 --- /dev/null +++ b/trunk/tools/perf/scripts/python/event_analyzing_sample.py @@ -0,0 +1,189 @@ +# event_analyzing_sample.py: general event handler in python +# +# Current perf report is already very powerful with the annotation integrated, +# and this script is not trying to be as powerful as perf report, but +# providing end user/developer a flexible way to analyze the events other +# than trace points. +# +# The 2 database related functions in this script just show how to gather +# the basic information, and users can modify and write their own functions +# according to their specific requirement. +# +# The first function "show_general_events" just does a basic grouping for all +# generic events with the help of sqlite, and the 2nd one "show_pebs_ll" is +# for a x86 HW PMU event: PEBS with load latency data. +# + +import os +import sys +import math +import struct +import sqlite3 + +sys.path.append(os.environ['PERF_EXEC_PATH'] + \ + '/scripts/python/Perf-Trace-Util/lib/Perf/Trace') + +from perf_trace_context import * +from EventClass import * + +# +# If the perf.data has a big number of samples, then the insert operation +# will be very time consuming (about 10+ minutes for 10000 samples) if the +# .db database is on disk. Move the .db file to RAM based FS to speedup +# the handling, which will cut the time down to several seconds. +# +con = sqlite3.connect("/dev/shm/perf.db") +con.isolation_level = None + +def trace_begin(): + print "In trace_begin:\n" + + # + # Will create several tables at the start, pebs_ll is for PEBS data with + # load latency info, while gen_events is for general event. + # + con.execute(""" + create table if not exists gen_events ( + name text, + symbol text, + comm text, + dso text + );""") + con.execute(""" + create table if not exists pebs_ll ( + name text, + symbol text, + comm text, + dso text, + flags integer, + ip integer, + status integer, + dse integer, + dla integer, + lat integer + );""") + +# +# Create and insert event object to a database so that user could +# do more analysis with simple database commands. +# +def process_event(param_dict): + event_attr = param_dict["attr"] + sample = param_dict["sample"] + raw_buf = param_dict["raw_buf"] + comm = param_dict["comm"] + name = param_dict["ev_name"] + + # Symbol and dso info are not always resolved + if (param_dict.has_key("dso")): + dso = param_dict["dso"] + else: + dso = "Unknown_dso" + + if (param_dict.has_key("symbol")): + symbol = param_dict["symbol"] + else: + symbol = "Unknown_symbol" + + # Create the event object and insert it to the right table in database + event = create_event(name, comm, dso, symbol, raw_buf) + insert_db(event) + +def insert_db(event): + if event.ev_type == EVTYPE_GENERIC: + con.execute("insert into gen_events values(?, ?, ?, ?)", + (event.name, event.symbol, event.comm, event.dso)) + elif event.ev_type == EVTYPE_PEBS_LL: + event.ip &= 0x7fffffffffffffff + event.dla &= 0x7fffffffffffffff + con.execute("insert into pebs_ll values (?, ?, ?, ?, ?, ?, ?, ?, ?, ?)", + (event.name, event.symbol, event.comm, event.dso, event.flags, + event.ip, event.status, event.dse, event.dla, event.lat)) + +def trace_end(): + print "In trace_end:\n" + # We show the basic info for the 2 type of event classes + show_general_events() + show_pebs_ll() + con.close() + +# +# As the event number may be very big, so we can't use linear way +# to show the histogram in real number, but use a log2 algorithm. +# + +def num2sym(num): + # Each number will have at least one '#' + snum = '#' * (int)(math.log(num, 2) + 1) + return snum + +def show_general_events(): + + # Check the total record number in the table + count = con.execute("select count(*) from gen_events") + for t in count: + print "There is %d records in gen_events table" % t[0] + if t[0] == 0: + return + + print "Statistics about the general events grouped by thread/symbol/dso: \n" + + # Group by thread + commq = con.execute("select comm, count(comm) from gen_events group by comm order by -count(comm)") + print "\n%16s %8s %16s\n%s" % ("comm", "number", "histogram", "="*42) + for row in commq: + print "%16s %8d %s" % (row[0], row[1], num2sym(row[1])) + + # Group by symbol + print "\n%32s %8s %16s\n%s" % ("symbol", "number", "histogram", "="*58) + symbolq = con.execute("select symbol, count(symbol) from gen_events group by symbol order by -count(symbol)") + for row in symbolq: + print "%32s %8d %s" % (row[0], row[1], num2sym(row[1])) + + # Group by dso + print "\n%40s %8s %16s\n%s" % ("dso", "number", "histogram", "="*74) + dsoq = con.execute("select dso, count(dso) from gen_events group by dso order by -count(dso)") + for row in dsoq: + print "%40s %8d %s" % (row[0], row[1], num2sym(row[1])) + +# +# This function just shows the basic info, and we could do more with the +# data in the tables, like checking the function parameters when some +# big latency events happen. +# +def show_pebs_ll(): + + count = con.execute("select count(*) from pebs_ll") + for t in count: + print "There is %d records in pebs_ll table" % t[0] + if t[0] == 0: + return + + print "Statistics about the PEBS Load Latency events grouped by thread/symbol/dse/latency: \n" + + # Group by thread + commq = con.execute("select comm, count(comm) from pebs_ll group by comm order by -count(comm)") + print "\n%16s %8s %16s\n%s" % ("comm", "number", "histogram", "="*42) + for row in commq: + print "%16s %8d %s" % (row[0], row[1], num2sym(row[1])) + + # Group by symbol + print "\n%32s %8s %16s\n%s" % ("symbol", "number", "histogram", "="*58) + symbolq = con.execute("select symbol, count(symbol) from pebs_ll group by symbol order by -count(symbol)") + for row in symbolq: + print "%32s %8d %s" % (row[0], row[1], num2sym(row[1])) + + # Group by dse + dseq = con.execute("select dse, count(dse) from pebs_ll group by dse order by -count(dse)") + print "\n%32s %8s %16s\n%s" % ("dse", "number", "histogram", "="*58) + for row in dseq: + print "%32s %8d %s" % (row[0], row[1], num2sym(row[1])) + + # Group by latency + latq = con.execute("select lat, count(lat) from pebs_ll group by lat order by lat") + print "\n%32s %8s %16s\n%s" % ("latency", "number", "histogram", "="*58) + for row in latq: + print "%32s %8d %s" % (row[0], row[1], num2sym(row[1])) + +def trace_unhandled(event_name, context, event_fields_dict): + print ' '.join(['%s=%s'%(k,str(v))for k,v in sorted(event_fields_dict.items())]) diff --git a/trunk/tools/perf/ui/browser.c b/trunk/tools/perf/ui/browser.c index 1818a531f1d3..4aeb7d5df939 100644 --- a/trunk/tools/perf/ui/browser.c +++ b/trunk/tools/perf/ui/browser.c @@ -269,7 +269,7 @@ int ui_browser__show(struct ui_browser *browser, const char *title, return err ? 0 : -1; } -void ui_browser__hide(struct ui_browser *browser __used) +void ui_browser__hide(struct ui_browser *browser __maybe_unused) { pthread_mutex_lock(&ui__lock); ui_helpline__pop(); @@ -518,7 +518,7 @@ static struct ui_browser__colorset { static int ui_browser__color_config(const char *var, const char *value, - void *data __used) + void *data __maybe_unused) { char *fg = NULL, *bg; int i; @@ -602,7 +602,8 @@ void __ui_browser__vline(struct ui_browser *browser, unsigned int column, SLsmg_set_char_set(0); } -void ui_browser__write_graph(struct ui_browser *browser __used, int graph) +void ui_browser__write_graph(struct ui_browser *browser __maybe_unused, + int graph) { SLsmg_set_char_set(1); SLsmg_write_char(graph); diff --git a/trunk/tools/perf/ui/browsers/annotate.c b/trunk/tools/perf/ui/browsers/annotate.c index 67a2703e666a..8f8cd2d73b3b 100644 --- a/trunk/tools/perf/ui/browsers/annotate.c +++ b/trunk/tools/perf/ui/browsers/annotate.c @@ -54,7 +54,8 @@ static inline struct browser_disasm_line *disasm_line__browser(struct disasm_lin return (struct browser_disasm_line *)(dl + 1); } -static bool disasm_line__filter(struct ui_browser *browser __used, void *entry) +static bool disasm_line__filter(struct ui_browser *browser __maybe_unused, + void *entry) { if (annotate_browser__opts.hide_src_code) { struct disasm_line *dl = list_entry(entry, struct disasm_line, node); @@ -928,7 +929,8 @@ static int annotate_config__cmp(const void *name, const void *cfgp) return strcmp(name, cfg->name); } -static int annotate__config(const char *var, const char *value, void *data __used) +static int annotate__config(const char *var, const char *value, + void *data __maybe_unused) { struct annotate__config *cfg; const char *name; diff --git a/trunk/tools/perf/ui/browsers/hists.c b/trunk/tools/perf/ui/browsers/hists.c index 413bd62eedb1..a21f40bebbac 100644 --- a/trunk/tools/perf/ui/browsers/hists.c +++ b/trunk/tools/perf/ui/browsers/hists.c @@ -24,9 +24,12 @@ struct hist_browser { struct hist_entry *he_selection; struct map_symbol *selection; int print_seq; + bool show_dso; bool has_symbols; }; +extern void hist_browser__init_hpp(void); + static int hists__browser_title(struct hists *hists, char *bf, size_t size, const char *ev_name); @@ -376,12 +379,19 @@ static int hist_browser__run(struct hist_browser *browser, const char *ev_name, } static char *callchain_list__sym_name(struct callchain_list *cl, - char *bf, size_t bfsize) + char *bf, size_t bfsize, bool show_dso) { + int printed; + if (cl->ms.sym) - return cl->ms.sym->name; + printed = scnprintf(bf, bfsize, "%s", cl->ms.sym->name); + else + printed = scnprintf(bf, bfsize, "%#" PRIx64, cl->ip); + + if (show_dso) + scnprintf(bf + printed, bfsize - printed, " %s", + cl->ms.map ? cl->ms.map->dso->short_name : "unknown"); - snprintf(bf, bfsize, "%#" PRIx64, cl->ip); return bf; } @@ -417,7 +427,7 @@ static int hist_browser__show_callchain_node_rb_tree(struct hist_browser *browse remaining -= cumul; list_for_each_entry(chain, &child->val, list) { - char ipstr[BITS_PER_LONG / 4 + 1], *alloc_str; + char bf[1024], *alloc_str; const char *str; int color; bool was_first = first; @@ -434,7 +444,8 @@ static int hist_browser__show_callchain_node_rb_tree(struct hist_browser *browse } alloc_str = NULL; - str = callchain_list__sym_name(chain, ipstr, sizeof(ipstr)); + str = callchain_list__sym_name(chain, bf, sizeof(bf), + browser->show_dso); if (was_first) { double percent = cumul * 100.0 / new_total; @@ -493,7 +504,7 @@ static int hist_browser__show_callchain_node(struct hist_browser *browser, char folded_sign = ' '; list_for_each_entry(chain, &node->val, list) { - char ipstr[BITS_PER_LONG / 4 + 1], *s; + char bf[1024], *s; int color; folded_sign = callchain_list__folded(chain); @@ -510,7 +521,8 @@ static int hist_browser__show_callchain_node(struct hist_browser *browser, *is_current_entry = true; } - s = callchain_list__sym_name(chain, ipstr, sizeof(ipstr)); + s = callchain_list__sym_name(chain, bf, sizeof(bf), + browser->show_dso); ui_browser__gotorc(&browser->b, row, 0); ui_browser__set_color(&browser->b, color); slsmg_write_nstring(" ", offset); @@ -553,14 +565,47 @@ static int hist_browser__show_callchain(struct hist_browser *browser, return row - first_row; } +#define HPP__COLOR_FN(_name, _field) \ +static int hist_browser__hpp_color_ ## _name(struct perf_hpp *hpp, \ + struct hist_entry *he) \ +{ \ + double percent = 100.0 * he->_field / hpp->total_period; \ + *(double *)hpp->ptr = percent; \ + return scnprintf(hpp->buf, hpp->size, "%6.2f%%", percent); \ +} + +HPP__COLOR_FN(overhead, period) +HPP__COLOR_FN(overhead_sys, period_sys) +HPP__COLOR_FN(overhead_us, period_us) +HPP__COLOR_FN(overhead_guest_sys, period_guest_sys) +HPP__COLOR_FN(overhead_guest_us, period_guest_us) + +#undef HPP__COLOR_FN + +void hist_browser__init_hpp(void) +{ + perf_hpp__init(false, false); + + perf_hpp__format[PERF_HPP__OVERHEAD].color = + hist_browser__hpp_color_overhead; + perf_hpp__format[PERF_HPP__OVERHEAD_SYS].color = + hist_browser__hpp_color_overhead_sys; + perf_hpp__format[PERF_HPP__OVERHEAD_US].color = + hist_browser__hpp_color_overhead_us; + perf_hpp__format[PERF_HPP__OVERHEAD_GUEST_SYS].color = + hist_browser__hpp_color_overhead_guest_sys; + perf_hpp__format[PERF_HPP__OVERHEAD_GUEST_US].color = + hist_browser__hpp_color_overhead_guest_us; +} + static int hist_browser__show_entry(struct hist_browser *browser, struct hist_entry *entry, unsigned short row) { char s[256]; double percent; - int printed = 0; - int width = browser->b.width - 6; /* The percentage */ + int i, printed = 0; + int width = browser->b.width; char folded_sign = ' '; bool current_entry = ui_browser__is_current_entry(&browser->b, row); off_t row_offset = entry->row_offset; @@ -576,35 +621,50 @@ static int hist_browser__show_entry(struct hist_browser *browser, } if (row_offset == 0) { - hist_entry__snprintf(entry, s, sizeof(s), browser->hists); - percent = (entry->period * 100.0) / browser->hists->stats.total_period; + struct perf_hpp hpp = { + .buf = s, + .size = sizeof(s), + .total_period = browser->hists->stats.total_period, + }; - ui_browser__set_percent_color(&browser->b, percent, current_entry); ui_browser__gotorc(&browser->b, row, 0); - if (symbol_conf.use_callchain) { - slsmg_printf("%c ", folded_sign); - width -= 2; - } - slsmg_printf(" %5.2f%%", percent); + for (i = 0; i < PERF_HPP__MAX_INDEX; i++) { + if (!perf_hpp__format[i].cond) + continue; - /* The scroll bar isn't being used */ - if (!browser->b.navkeypressed) - width += 1; + if (i) { + slsmg_printf(" "); + width -= 2; + } - if (!current_entry || !browser->b.navkeypressed) - ui_browser__set_color(&browser->b, HE_COLORSET_NORMAL); + if (perf_hpp__format[i].color) { + hpp.ptr = &percent; + /* It will set percent for us. See HPP__COLOR_FN above. */ + width -= perf_hpp__format[i].color(&hpp, entry); - if (symbol_conf.show_nr_samples) { - slsmg_printf(" %11u", entry->nr_events); - width -= 12; - } + ui_browser__set_percent_color(&browser->b, percent, current_entry); + + if (i == 0 && symbol_conf.use_callchain) { + slsmg_printf("%c ", folded_sign); + width -= 2; + } + + slsmg_printf("%s", s); - if (symbol_conf.show_total_period) { - slsmg_printf(" %12" PRIu64, entry->period); - width -= 13; + if (!current_entry || !browser->b.navkeypressed) + ui_browser__set_color(&browser->b, HE_COLORSET_NORMAL); + } else { + width -= perf_hpp__format[i].entry(&hpp, entry); + slsmg_printf("%s", s); + } } + /* The scroll bar isn't being used */ + if (!browser->b.navkeypressed) + width += 1; + + hist_entry__sort_snprintf(entry, s, sizeof(s), browser->hists); slsmg_write_nstring(s, width); ++row; ++printed; @@ -830,7 +890,7 @@ static int hist_browser__fprintf_callchain_node_rb_tree(struct hist_browser *bro remaining -= cumul; list_for_each_entry(chain, &child->val, list) { - char ipstr[BITS_PER_LONG / 4 + 1], *alloc_str; + char bf[1024], *alloc_str; const char *str; bool was_first = first; @@ -842,7 +902,8 @@ static int hist_browser__fprintf_callchain_node_rb_tree(struct hist_browser *bro folded_sign = callchain_list__folded(chain); alloc_str = NULL; - str = callchain_list__sym_name(chain, ipstr, sizeof(ipstr)); + str = callchain_list__sym_name(chain, bf, sizeof(bf), + browser->show_dso); if (was_first) { double percent = cumul * 100.0 / new_total; @@ -880,10 +941,10 @@ static int hist_browser__fprintf_callchain_node(struct hist_browser *browser, int printed = 0; list_for_each_entry(chain, &node->val, list) { - char ipstr[BITS_PER_LONG / 4 + 1], *s; + char bf[1024], *s; folded_sign = callchain_list__folded(chain); - s = callchain_list__sym_name(chain, ipstr, sizeof(ipstr)); + s = callchain_list__sym_name(chain, bf, sizeof(bf), browser->show_dso); printed += fprintf(fp, "%*s%c %s\n", offset, " ", folded_sign, s); } @@ -920,7 +981,7 @@ static int hist_browser__fprintf_entry(struct hist_browser *browser, if (symbol_conf.use_callchain) folded_sign = hist_entry__folded(he); - hist_entry__snprintf(he, s, sizeof(s), browser->hists); + hist_entry__sort_snprintf(he, s, sizeof(s), browser->hists); percent = (he->period * 100.0) / browser->hists->stats.total_period; if (symbol_conf.use_callchain) @@ -1133,6 +1194,9 @@ static int perf_evsel__hists_browse(struct perf_evsel *evsel, int nr_events, continue; case 'd': goto zoom_dso; + case 'V': + browser->show_dso = !browser->show_dso; + continue; case 't': goto zoom_thread; case '/': @@ -1164,6 +1228,7 @@ static int perf_evsel__hists_browse(struct perf_evsel *evsel, int nr_events, "d Zoom into current DSO\n" "t Zoom into current Thread\n" "P Print histograms to perf.hist.N\n" + "V Verbose (DSO names in callchains, etc)\n" "/ Filter symbol by name"); continue; case K_ENTER: diff --git a/trunk/tools/perf/ui/gtk/browser.c b/trunk/tools/perf/ui/gtk/browser.c index ec12e0b4ded6..7ff99ec1d95e 100644 --- a/trunk/tools/perf/ui/gtk/browser.c +++ b/trunk/tools/perf/ui/gtk/browser.c @@ -3,6 +3,7 @@ #include "../evsel.h" #include "../sort.h" #include "../hist.h" +#include "../helpline.h" #include "gtk.h" #include @@ -35,6 +36,57 @@ static void perf_gtk__resize_window(GtkWidget *window) gtk_window_resize(GTK_WINDOW(window), width, height); } +static const char *perf_gtk__get_percent_color(double percent) +{ + if (percent >= MIN_RED) + return ""; + if (percent >= MIN_GREEN) + return ""; + return NULL; +} + +#define HPP__COLOR_FN(_name, _field) \ +static int perf_gtk__hpp_color_ ## _name(struct perf_hpp *hpp, \ + struct hist_entry *he) \ +{ \ + double percent = 100.0 * he->_field / hpp->total_period; \ + const char *markup; \ + int ret = 0; \ + \ + markup = perf_gtk__get_percent_color(percent); \ + if (markup) \ + ret += scnprintf(hpp->buf, hpp->size, "%s", markup); \ + ret += scnprintf(hpp->buf + ret, hpp->size - ret, "%6.2f%%", percent); \ + if (markup) \ + ret += scnprintf(hpp->buf + ret, hpp->size - ret, ""); \ + \ + return ret; \ +} + +HPP__COLOR_FN(overhead, period) +HPP__COLOR_FN(overhead_sys, period_sys) +HPP__COLOR_FN(overhead_us, period_us) +HPP__COLOR_FN(overhead_guest_sys, period_guest_sys) +HPP__COLOR_FN(overhead_guest_us, period_guest_us) + +#undef HPP__COLOR_FN + +void perf_gtk__init_hpp(void) +{ + perf_hpp__init(false, false); + + perf_hpp__format[PERF_HPP__OVERHEAD].color = + perf_gtk__hpp_color_overhead; + perf_hpp__format[PERF_HPP__OVERHEAD_SYS].color = + perf_gtk__hpp_color_overhead_sys; + perf_hpp__format[PERF_HPP__OVERHEAD_US].color = + perf_gtk__hpp_color_overhead_us; + perf_hpp__format[PERF_HPP__OVERHEAD_GUEST_SYS].color = + perf_gtk__hpp_color_overhead_guest_sys; + perf_hpp__format[PERF_HPP__OVERHEAD_GUEST_US].color = + perf_gtk__hpp_color_overhead_guest_us; +} + static void perf_gtk__show_hists(GtkWidget *window, struct hists *hists) { GType col_types[MAX_COLUMNS]; @@ -42,15 +94,25 @@ static void perf_gtk__show_hists(GtkWidget *window, struct hists *hists) struct sort_entry *se; GtkListStore *store; struct rb_node *nd; - u64 total_period; GtkWidget *view; - int col_idx; + int i, col_idx; int nr_cols; + char s[512]; + + struct perf_hpp hpp = { + .buf = s, + .size = sizeof(s), + .total_period = hists->stats.total_period, + }; nr_cols = 0; - /* The percentage column */ - col_types[nr_cols++] = G_TYPE_STRING; + for (i = 0; i < PERF_HPP__MAX_INDEX; i++) { + if (!perf_hpp__format[i].cond) + continue; + + col_types[nr_cols++] = G_TYPE_STRING; + } list_for_each_entry(se, &hist_entry__sort_list, list) { if (se->elide) @@ -67,11 +129,17 @@ static void perf_gtk__show_hists(GtkWidget *window, struct hists *hists) col_idx = 0; - /* The percentage column */ - gtk_tree_view_insert_column_with_attributes(GTK_TREE_VIEW(view), - -1, "Overhead (%)", - renderer, "text", - col_idx++, NULL); + for (i = 0; i < PERF_HPP__MAX_INDEX; i++) { + if (!perf_hpp__format[i].cond) + continue; + + perf_hpp__format[i].header(&hpp); + + gtk_tree_view_insert_column_with_attributes(GTK_TREE_VIEW(view), + -1, s, + renderer, "markup", + col_idx++, NULL); + } list_for_each_entry(se, &hist_entry__sort_list, list) { if (se->elide) @@ -87,13 +155,9 @@ static void perf_gtk__show_hists(GtkWidget *window, struct hists *hists) g_object_unref(GTK_TREE_MODEL(store)); - total_period = hists->stats.total_period; - for (nd = rb_first(&hists->entries); nd; nd = rb_next(nd)) { struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node); GtkTreeIter iter; - double percent; - char s[512]; if (h->filtered) continue; @@ -102,11 +166,17 @@ static void perf_gtk__show_hists(GtkWidget *window, struct hists *hists) col_idx = 0; - percent = (h->period * 100.0) / total_period; + for (i = 0; i < PERF_HPP__MAX_INDEX; i++) { + if (!perf_hpp__format[i].cond) + continue; - snprintf(s, ARRAY_SIZE(s), "%.2f", percent); + if (perf_hpp__format[i].color) + perf_hpp__format[i].color(&hpp, h); + else + perf_hpp__format[i].entry(&hpp, h); - gtk_list_store_set(store, &iter, col_idx++, s, -1); + gtk_list_store_set(store, &iter, col_idx++, s, -1); + } list_for_each_entry(se, &hist_entry__sort_list, list) { if (se->elide) @@ -166,9 +236,10 @@ static GtkWidget *perf_gtk__setup_statusbar(void) } int perf_evlist__gtk_browse_hists(struct perf_evlist *evlist, - const char *help __used, - void (*timer) (void *arg)__used, - void *arg __used, int delay_secs __used) + const char *help, + void (*timer) (void *arg)__maybe_unused, + void *arg __maybe_unused, + int delay_secs __maybe_unused) { struct perf_evsel *pos; GtkWidget *vbox; @@ -233,6 +304,8 @@ int perf_evlist__gtk_browse_hists(struct perf_evlist *evlist, gtk_window_set_position(GTK_WINDOW(window), GTK_WIN_POS_CENTER); + ui_helpline__push(help); + gtk_main(); perf_gtk__deactivate_context(&pgctx); diff --git a/trunk/tools/perf/ui/gtk/gtk.h b/trunk/tools/perf/ui/gtk/gtk.h index a4d0f2b4a2dc..687af0bba187 100644 --- a/trunk/tools/perf/ui/gtk/gtk.h +++ b/trunk/tools/perf/ui/gtk/gtk.h @@ -29,6 +29,9 @@ static inline bool perf_gtk__is_active_context(struct perf_gtk_context *ctx) struct perf_gtk_context *perf_gtk__activate_context(GtkWidget *window); int perf_gtk__deactivate_context(struct perf_gtk_context **ctx); +void perf_gtk__init_helpline(void); +void perf_gtk__init_hpp(void); + #ifndef HAVE_GTK_INFO_BAR static inline GtkWidget *perf_gtk__setup_info_bar(void) { diff --git a/trunk/tools/perf/ui/gtk/helpline.c b/trunk/tools/perf/ui/gtk/helpline.c new file mode 100644 index 000000000000..5db4432ff12a --- /dev/null +++ b/trunk/tools/perf/ui/gtk/helpline.c @@ -0,0 +1,56 @@ +#include +#include + +#include "gtk.h" +#include "../ui.h" +#include "../helpline.h" +#include "../../util/debug.h" + +static void gtk_helpline_pop(void) +{ + if (!perf_gtk__is_active_context(pgctx)) + return; + + gtk_statusbar_pop(GTK_STATUSBAR(pgctx->statbar), + pgctx->statbar_ctx_id); +} + +static void gtk_helpline_push(const char *msg) +{ + if (!perf_gtk__is_active_context(pgctx)) + return; + + gtk_statusbar_push(GTK_STATUSBAR(pgctx->statbar), + pgctx->statbar_ctx_id, msg); +} + +static struct ui_helpline gtk_helpline_fns = { + .pop = gtk_helpline_pop, + .push = gtk_helpline_push, +}; + +void perf_gtk__init_helpline(void) +{ + helpline_fns = >k_helpline_fns; +} + +int perf_gtk__show_helpline(const char *fmt, va_list ap) +{ + int ret; + char *ptr; + static int backlog; + + ret = vscnprintf(ui_helpline__current + backlog, + sizeof(ui_helpline__current) - backlog, fmt, ap); + backlog += ret; + + /* only first line can be displayed */ + ptr = strchr(ui_helpline__current, '\n'); + if (ptr && (ptr - ui_helpline__current) <= backlog) { + *ptr = '\0'; + ui_helpline__puts(ui_helpline__current); + backlog = 0; + } + + return ret; +} diff --git a/trunk/tools/perf/ui/gtk/setup.c b/trunk/tools/perf/ui/gtk/setup.c index 92879ce61e2f..3c4c6ef78283 100644 --- a/trunk/tools/perf/ui/gtk/setup.c +++ b/trunk/tools/perf/ui/gtk/setup.c @@ -7,11 +7,15 @@ extern struct perf_error_ops perf_gtk_eops; int perf_gtk__init(void) { perf_error__register(&perf_gtk_eops); + perf_gtk__init_helpline(); + perf_gtk__init_hpp(); return gtk_init_check(NULL, NULL) ? 0 : -1; } -void perf_gtk__exit(bool wait_for_ok __used) +void perf_gtk__exit(bool wait_for_ok __maybe_unused) { + if (!perf_gtk__is_active_context(pgctx)) + return; perf_error__unregister(&perf_gtk_eops); gtk_main_quit(); } diff --git a/trunk/tools/perf/ui/gtk/util.c b/trunk/tools/perf/ui/gtk/util.c index 0ead373c0dfb..8aada5b3c04c 100644 --- a/trunk/tools/perf/ui/gtk/util.c +++ b/trunk/tools/perf/ui/gtk/util.c @@ -117,13 +117,8 @@ struct perf_error_ops perf_gtk_eops = { * For now, just add stubs for NO_NEWT=1 build. */ #ifdef NO_NEWT_SUPPORT -int ui_helpline__show_help(const char *format __used, va_list ap __used) -{ - return 0; -} - -void ui_progress__update(u64 curr __used, u64 total __used, - const char *title __used) +void ui_progress__update(u64 curr __maybe_unused, u64 total __maybe_unused, + const char *title __maybe_unused) { } #endif diff --git a/trunk/tools/perf/ui/helpline.c b/trunk/tools/perf/ui/helpline.c index 2f950c2641c8..a49bcf3c190b 100644 --- a/trunk/tools/perf/ui/helpline.c +++ b/trunk/tools/perf/ui/helpline.c @@ -5,23 +5,32 @@ #include "../debug.h" #include "helpline.h" #include "ui.h" -#include "libslang.h" -void ui_helpline__pop(void) +char ui_helpline__current[512]; + +static void nop_helpline__pop(void) { } -char ui_helpline__current[512]; +static void nop_helpline__push(const char *msg __maybe_unused) +{ +} -void ui_helpline__push(const char *msg) +static struct ui_helpline default_helpline_fns = { + .pop = nop_helpline__pop, + .push = nop_helpline__push, +}; + +struct ui_helpline *helpline_fns = &default_helpline_fns; + +void ui_helpline__pop(void) { - const size_t sz = sizeof(ui_helpline__current); + helpline_fns->pop(); +} - SLsmg_gotorc(SLtt_Screen_Rows - 1, 0); - SLsmg_set_color(0); - SLsmg_write_nstring((char *)msg, SLtt_Screen_Cols); - SLsmg_refresh(); - strncpy(ui_helpline__current, msg, sz)[sz - 1] = '\0'; +void ui_helpline__push(const char *msg) +{ + helpline_fns->push(msg); } void ui_helpline__vpush(const char *fmt, va_list ap) @@ -50,30 +59,3 @@ void ui_helpline__puts(const char *msg) ui_helpline__pop(); ui_helpline__push(msg); } - -void ui_helpline__init(void) -{ - ui_helpline__puts(" "); -} - -char ui_helpline__last_msg[1024]; - -int ui_helpline__show_help(const char *format, va_list ap) -{ - int ret; - static int backlog; - - pthread_mutex_lock(&ui__lock); - ret = vscnprintf(ui_helpline__last_msg + backlog, - sizeof(ui_helpline__last_msg) - backlog, format, ap); - backlog += ret; - - if (ui_helpline__last_msg[backlog - 1] == '\n') { - ui_helpline__puts(ui_helpline__last_msg); - SLsmg_refresh(); - backlog = 0; - } - pthread_mutex_unlock(&ui__lock); - - return ret; -} diff --git a/trunk/tools/perf/ui/helpline.h b/trunk/tools/perf/ui/helpline.h index 7bab6b34e35e..2b667ee454c3 100644 --- a/trunk/tools/perf/ui/helpline.h +++ b/trunk/tools/perf/ui/helpline.h @@ -4,13 +4,44 @@ #include #include +#include "../util/cache.h" + +struct ui_helpline { + void (*pop)(void); + void (*push)(const char *msg); +}; + +extern struct ui_helpline *helpline_fns; + void ui_helpline__init(void); + void ui_helpline__pop(void); void ui_helpline__push(const char *msg); void ui_helpline__vpush(const char *fmt, va_list ap); void ui_helpline__fpush(const char *fmt, ...); void ui_helpline__puts(const char *msg); -extern char ui_helpline__current[]; +extern char ui_helpline__current[512]; + +#ifdef NO_NEWT_SUPPORT +static inline int ui_helpline__show_help(const char *format __maybe_unused, + va_list ap __maybe_unused) +{ + return 0; +} +#else +extern char ui_helpline__last_msg[]; +int ui_helpline__show_help(const char *format, va_list ap); +#endif /* NO_NEWT_SUPPORT */ + +#ifdef NO_GTK2_SUPPORT +static inline int perf_gtk__show_helpline(const char *format __maybe_unused, + va_list ap __maybe_unused) +{ + return 0; +} +#else +int perf_gtk__show_helpline(const char *format, va_list ap); +#endif /* NO_GTK2_SUPPORT */ #endif /* _PERF_UI_HELPLINE_H_ */ diff --git a/trunk/tools/perf/ui/hist.c b/trunk/tools/perf/ui/hist.c new file mode 100644 index 000000000000..e3f8cd46e7d7 --- /dev/null +++ b/trunk/tools/perf/ui/hist.c @@ -0,0 +1,390 @@ +#include + +#include "../util/hist.h" +#include "../util/util.h" +#include "../util/sort.h" + + +/* hist period print (hpp) functions */ +static int hpp__header_overhead(struct perf_hpp *hpp) +{ + const char *fmt = hpp->ptr ? "Baseline" : "Overhead"; + + return scnprintf(hpp->buf, hpp->size, fmt); +} + +static int hpp__width_overhead(struct perf_hpp *hpp __maybe_unused) +{ + return 8; +} + +static int hpp__color_overhead(struct perf_hpp *hpp, struct hist_entry *he) +{ + double percent = 100.0 * he->period / hpp->total_period; + + if (hpp->ptr) { + struct hists *old_hists = hpp->ptr; + u64 total_period = old_hists->stats.total_period; + u64 base_period = he->pair ? he->pair->period : 0; + + if (total_period) + percent = 100.0 * base_period / total_period; + else + percent = 0.0; + } + + return percent_color_snprintf(hpp->buf, hpp->size, " %6.2f%%", percent); +} + +static int hpp__entry_overhead(struct perf_hpp *hpp, struct hist_entry *he) +{ + double percent = 100.0 * he->period / hpp->total_period; + const char *fmt = symbol_conf.field_sep ? "%.2f" : " %6.2f%%"; + + if (hpp->ptr) { + struct hists *old_hists = hpp->ptr; + u64 total_period = old_hists->stats.total_period; + u64 base_period = he->pair ? he->pair->period : 0; + + if (total_period) + percent = 100.0 * base_period / total_period; + else + percent = 0.0; + } + + return scnprintf(hpp->buf, hpp->size, fmt, percent); +} + +static int hpp__header_overhead_sys(struct perf_hpp *hpp) +{ + const char *fmt = symbol_conf.field_sep ? "%s" : "%7s"; + + return scnprintf(hpp->buf, hpp->size, fmt, "sys"); +} + +static int hpp__width_overhead_sys(struct perf_hpp *hpp __maybe_unused) +{ + return 7; +} + +static int hpp__color_overhead_sys(struct perf_hpp *hpp, struct hist_entry *he) +{ + double percent = 100.0 * he->period_sys / hpp->total_period; + return percent_color_snprintf(hpp->buf, hpp->size, "%6.2f%%", percent); +} + +static int hpp__entry_overhead_sys(struct perf_hpp *hpp, struct hist_entry *he) +{ + double percent = 100.0 * he->period_sys / hpp->total_period; + const char *fmt = symbol_conf.field_sep ? "%.2f" : "%6.2f%%"; + + return scnprintf(hpp->buf, hpp->size, fmt, percent); +} + +static int hpp__header_overhead_us(struct perf_hpp *hpp) +{ + const char *fmt = symbol_conf.field_sep ? "%s" : "%7s"; + + return scnprintf(hpp->buf, hpp->size, fmt, "user"); +} + +static int hpp__width_overhead_us(struct perf_hpp *hpp __maybe_unused) +{ + return 7; +} + +static int hpp__color_overhead_us(struct perf_hpp *hpp, struct hist_entry *he) +{ + double percent = 100.0 * he->period_us / hpp->total_period; + return percent_color_snprintf(hpp->buf, hpp->size, "%6.2f%%", percent); +} + +static int hpp__entry_overhead_us(struct perf_hpp *hpp, struct hist_entry *he) +{ + double percent = 100.0 * he->period_us / hpp->total_period; + const char *fmt = symbol_conf.field_sep ? "%.2f" : "%6.2f%%"; + + return scnprintf(hpp->buf, hpp->size, fmt, percent); +} + +static int hpp__header_overhead_guest_sys(struct perf_hpp *hpp) +{ + return scnprintf(hpp->buf, hpp->size, "guest sys"); +} + +static int hpp__width_overhead_guest_sys(struct perf_hpp *hpp __maybe_unused) +{ + return 9; +} + +static int hpp__color_overhead_guest_sys(struct perf_hpp *hpp, + struct hist_entry *he) +{ + double percent = 100.0 * he->period_guest_sys / hpp->total_period; + return percent_color_snprintf(hpp->buf, hpp->size, " %6.2f%% ", percent); +} + +static int hpp__entry_overhead_guest_sys(struct perf_hpp *hpp, + struct hist_entry *he) +{ + double percent = 100.0 * he->period_guest_sys / hpp->total_period; + const char *fmt = symbol_conf.field_sep ? "%.2f" : " %6.2f%% "; + + return scnprintf(hpp->buf, hpp->size, fmt, percent); +} + +static int hpp__header_overhead_guest_us(struct perf_hpp *hpp) +{ + return scnprintf(hpp->buf, hpp->size, "guest usr"); +} + +static int hpp__width_overhead_guest_us(struct perf_hpp *hpp __maybe_unused) +{ + return 9; +} + +static int hpp__color_overhead_guest_us(struct perf_hpp *hpp, + struct hist_entry *he) +{ + double percent = 100.0 * he->period_guest_us / hpp->total_period; + return percent_color_snprintf(hpp->buf, hpp->size, " %6.2f%% ", percent); +} + +static int hpp__entry_overhead_guest_us(struct perf_hpp *hpp, + struct hist_entry *he) +{ + double percent = 100.0 * he->period_guest_us / hpp->total_period; + const char *fmt = symbol_conf.field_sep ? "%.2f" : " %6.2f%% "; + + return scnprintf(hpp->buf, hpp->size, fmt, percent); +} + +static int hpp__header_samples(struct perf_hpp *hpp) +{ + const char *fmt = symbol_conf.field_sep ? "%s" : "%11s"; + + return scnprintf(hpp->buf, hpp->size, fmt, "Samples"); +} + +static int hpp__width_samples(struct perf_hpp *hpp __maybe_unused) +{ + return 11; +} + +static int hpp__entry_samples(struct perf_hpp *hpp, struct hist_entry *he) +{ + const char *fmt = symbol_conf.field_sep ? "%" PRIu64 : "%11" PRIu64; + + return scnprintf(hpp->buf, hpp->size, fmt, he->nr_events); +} + +static int hpp__header_period(struct perf_hpp *hpp) +{ + const char *fmt = symbol_conf.field_sep ? "%s" : "%12s"; + + return scnprintf(hpp->buf, hpp->size, fmt, "Period"); +} + +static int hpp__width_period(struct perf_hpp *hpp __maybe_unused) +{ + return 12; +} + +static int hpp__entry_period(struct perf_hpp *hpp, struct hist_entry *he) +{ + const char *fmt = symbol_conf.field_sep ? "%" PRIu64 : "%12" PRIu64; + + return scnprintf(hpp->buf, hpp->size, fmt, he->period); +} + +static int hpp__header_delta(struct perf_hpp *hpp) +{ + const char *fmt = symbol_conf.field_sep ? "%s" : "%7s"; + + return scnprintf(hpp->buf, hpp->size, fmt, "Delta"); +} + +static int hpp__width_delta(struct perf_hpp *hpp __maybe_unused) +{ + return 7; +} + +static int hpp__entry_delta(struct perf_hpp *hpp, struct hist_entry *he) +{ + struct hists *pair_hists = hpp->ptr; + u64 old_total, new_total; + double old_percent = 0, new_percent = 0; + double diff; + const char *fmt = symbol_conf.field_sep ? "%s" : "%7.7s"; + char buf[32] = " "; + + old_total = pair_hists->stats.total_period; + if (old_total > 0 && he->pair) + old_percent = 100.0 * he->pair->period / old_total; + + new_total = hpp->total_period; + if (new_total > 0) + new_percent = 100.0 * he->period / new_total; + + diff = new_percent - old_percent; + if (fabs(diff) >= 0.01) + scnprintf(buf, sizeof(buf), "%+4.2F%%", diff); + + return scnprintf(hpp->buf, hpp->size, fmt, buf); +} + +static int hpp__header_displ(struct perf_hpp *hpp) +{ + return scnprintf(hpp->buf, hpp->size, "Displ."); +} + +static int hpp__width_displ(struct perf_hpp *hpp __maybe_unused) +{ + return 6; +} + +static int hpp__entry_displ(struct perf_hpp *hpp, + struct hist_entry *he __maybe_unused) +{ + const char *fmt = symbol_conf.field_sep ? "%s" : "%6.6s"; + char buf[32] = " "; + + if (hpp->displacement) + scnprintf(buf, sizeof(buf), "%+4ld", hpp->displacement); + + return scnprintf(hpp->buf, hpp->size, fmt, buf); +} + +#define HPP__COLOR_PRINT_FNS(_name) \ + .header = hpp__header_ ## _name, \ + .width = hpp__width_ ## _name, \ + .color = hpp__color_ ## _name, \ + .entry = hpp__entry_ ## _name + +#define HPP__PRINT_FNS(_name) \ + .header = hpp__header_ ## _name, \ + .width = hpp__width_ ## _name, \ + .entry = hpp__entry_ ## _name + +struct perf_hpp_fmt perf_hpp__format[] = { + { .cond = true, HPP__COLOR_PRINT_FNS(overhead) }, + { .cond = false, HPP__COLOR_PRINT_FNS(overhead_sys) }, + { .cond = false, HPP__COLOR_PRINT_FNS(overhead_us) }, + { .cond = false, HPP__COLOR_PRINT_FNS(overhead_guest_sys) }, + { .cond = false, HPP__COLOR_PRINT_FNS(overhead_guest_us) }, + { .cond = false, HPP__PRINT_FNS(samples) }, + { .cond = false, HPP__PRINT_FNS(period) }, + { .cond = false, HPP__PRINT_FNS(delta) }, + { .cond = false, HPP__PRINT_FNS(displ) } +}; + +#undef HPP__COLOR_PRINT_FNS +#undef HPP__PRINT_FNS + +void perf_hpp__init(bool need_pair, bool show_displacement) +{ + if (symbol_conf.show_cpu_utilization) { + perf_hpp__format[PERF_HPP__OVERHEAD_SYS].cond = true; + perf_hpp__format[PERF_HPP__OVERHEAD_US].cond = true; + + if (perf_guest) { + perf_hpp__format[PERF_HPP__OVERHEAD_GUEST_SYS].cond = true; + perf_hpp__format[PERF_HPP__OVERHEAD_GUEST_US].cond = true; + } + } + + if (symbol_conf.show_nr_samples) + perf_hpp__format[PERF_HPP__SAMPLES].cond = true; + + if (symbol_conf.show_total_period) + perf_hpp__format[PERF_HPP__PERIOD].cond = true; + + if (need_pair) { + perf_hpp__format[PERF_HPP__DELTA].cond = true; + + if (show_displacement) + perf_hpp__format[PERF_HPP__DISPL].cond = true; + } +} + +static inline void advance_hpp(struct perf_hpp *hpp, int inc) +{ + hpp->buf += inc; + hpp->size -= inc; +} + +int hist_entry__period_snprintf(struct perf_hpp *hpp, struct hist_entry *he, + bool color) +{ + const char *sep = symbol_conf.field_sep; + char *start = hpp->buf; + int i, ret; + + if (symbol_conf.exclude_other && !he->parent) + return 0; + + for (i = 0; i < PERF_HPP__MAX_INDEX; i++) { + if (!perf_hpp__format[i].cond) + continue; + + if (!sep || i > 0) { + ret = scnprintf(hpp->buf, hpp->size, "%s", sep ?: " "); + advance_hpp(hpp, ret); + } + + if (color && perf_hpp__format[i].color) + ret = perf_hpp__format[i].color(hpp, he); + else + ret = perf_hpp__format[i].entry(hpp, he); + + advance_hpp(hpp, ret); + } + + return hpp->buf - start; +} + +int hist_entry__sort_snprintf(struct hist_entry *he, char *s, size_t size, + struct hists *hists) +{ + const char *sep = symbol_conf.field_sep; + struct sort_entry *se; + int ret = 0; + + list_for_each_entry(se, &hist_entry__sort_list, list) { + if (se->elide) + continue; + + ret += scnprintf(s + ret, size - ret, "%s", sep ?: " "); + ret += se->se_snprintf(he, s + ret, size - ret, + hists__col_len(hists, se->se_width_idx)); + } + + return ret; +} + +/* + * See hists__fprintf to match the column widths + */ +unsigned int hists__sort_list_width(struct hists *hists) +{ + struct sort_entry *se; + int i, ret = 0; + + for (i = 0; i < PERF_HPP__MAX_INDEX; i++) { + if (!perf_hpp__format[i].cond) + continue; + if (i) + ret += 2; + + ret += perf_hpp__format[i].width(NULL); + } + + list_for_each_entry(se, &hist_entry__sort_list, list) + if (!se->elide) + ret += 2 + hists__col_len(hists, se->se_width_idx); + + if (verbose) /* Addr + origin */ + ret += 3 + BITS_PER_LONG / 4; + + return ret; +} diff --git a/trunk/tools/perf/ui/setup.c b/trunk/tools/perf/ui/setup.c index 791fb15ce350..bd7d460f844c 100644 --- a/trunk/tools/perf/ui/setup.c +++ b/trunk/tools/perf/ui/setup.c @@ -1,6 +1,10 @@ -#include "../cache.h" -#include "../debug.h" +#include +#include "../util/cache.h" +#include "../util/debug.h" +#include "../util/hist.h" + +pthread_mutex_t ui__lock = PTHREAD_MUTEX_INITIALIZER; void setup_browser(bool fallback_to_pager) { @@ -25,6 +29,8 @@ void setup_browser(bool fallback_to_pager) use_browser = 0; if (fallback_to_pager) setup_pager(); + + perf_hpp__init(false, false); break; } } diff --git a/trunk/tools/perf/ui/stdio/hist.c b/trunk/tools/perf/ui/stdio/hist.c new file mode 100644 index 000000000000..882461a42830 --- /dev/null +++ b/trunk/tools/perf/ui/stdio/hist.c @@ -0,0 +1,498 @@ +#include + +#include "../../util/util.h" +#include "../../util/hist.h" +#include "../../util/sort.h" + + +static size_t callchain__fprintf_left_margin(FILE *fp, int left_margin) +{ + int i; + int ret = fprintf(fp, " "); + + for (i = 0; i < left_margin; i++) + ret += fprintf(fp, " "); + + return ret; +} + +static size_t ipchain__fprintf_graph_line(FILE *fp, int depth, int depth_mask, + int left_margin) +{ + int i; + size_t ret = callchain__fprintf_left_margin(fp, left_margin); + + for (i = 0; i < depth; i++) + if (depth_mask & (1 << i)) + ret += fprintf(fp, "| "); + else + ret += fprintf(fp, " "); + + ret += fprintf(fp, "\n"); + + return ret; +} + +static size_t ipchain__fprintf_graph(FILE *fp, struct callchain_list *chain, + int depth, int depth_mask, int period, + u64 total_samples, u64 hits, + int left_margin) +{ + int i; + size_t ret = 0; + + ret += callchain__fprintf_left_margin(fp, left_margin); + for (i = 0; i < depth; i++) { + if (depth_mask & (1 << i)) + ret += fprintf(fp, "|"); + else + ret += fprintf(fp, " "); + if (!period && i == depth - 1) { + double percent; + + percent = hits * 100.0 / total_samples; + ret += percent_color_fprintf(fp, "--%2.2f%%-- ", percent); + } else + ret += fprintf(fp, "%s", " "); + } + if (chain->ms.sym) + ret += fprintf(fp, "%s\n", chain->ms.sym->name); + else + ret += fprintf(fp, "0x%0" PRIx64 "\n", chain->ip); + + return ret; +} + +static struct symbol *rem_sq_bracket; +static struct callchain_list rem_hits; + +static void init_rem_hits(void) +{ + rem_sq_bracket = malloc(sizeof(*rem_sq_bracket) + 6); + if (!rem_sq_bracket) { + fprintf(stderr, "Not enough memory to display remaining hits\n"); + return; + } + + strcpy(rem_sq_bracket->name, "[...]"); + rem_hits.ms.sym = rem_sq_bracket; +} + +static size_t __callchain__fprintf_graph(FILE *fp, struct rb_root *root, + u64 total_samples, int depth, + int depth_mask, int left_margin) +{ + struct rb_node *node, *next; + struct callchain_node *child; + struct callchain_list *chain; + int new_depth_mask = depth_mask; + u64 remaining; + size_t ret = 0; + int i; + uint entries_printed = 0; + + remaining = total_samples; + + node = rb_first(root); + while (node) { + u64 new_total; + u64 cumul; + + child = rb_entry(node, struct callchain_node, rb_node); + cumul = callchain_cumul_hits(child); + remaining -= cumul; + + /* + * The depth mask manages the output of pipes that show + * the depth. We don't want to keep the pipes of the current + * level for the last child of this depth. + * Except if we have remaining filtered hits. They will + * supersede the last child + */ + next = rb_next(node); + if (!next && (callchain_param.mode != CHAIN_GRAPH_REL || !remaining)) + new_depth_mask &= ~(1 << (depth - 1)); + + /* + * But we keep the older depth mask for the line separator + * to keep the level link until we reach the last child + */ + ret += ipchain__fprintf_graph_line(fp, depth, depth_mask, + left_margin); + i = 0; + list_for_each_entry(chain, &child->val, list) { + ret += ipchain__fprintf_graph(fp, chain, depth, + new_depth_mask, i++, + total_samples, + cumul, + left_margin); + } + + if (callchain_param.mode == CHAIN_GRAPH_REL) + new_total = child->children_hit; + else + new_total = total_samples; + + ret += __callchain__fprintf_graph(fp, &child->rb_root, new_total, + depth + 1, + new_depth_mask | (1 << depth), + left_margin); + node = next; + if (++entries_printed == callchain_param.print_limit) + break; + } + + if (callchain_param.mode == CHAIN_GRAPH_REL && + remaining && remaining != total_samples) { + + if (!rem_sq_bracket) + return ret; + + new_depth_mask &= ~(1 << (depth - 1)); + ret += ipchain__fprintf_graph(fp, &rem_hits, depth, + new_depth_mask, 0, total_samples, + remaining, left_margin); + } + + return ret; +} + +static size_t callchain__fprintf_graph(FILE *fp, struct rb_root *root, + u64 total_samples, int left_margin) +{ + struct callchain_node *cnode; + struct callchain_list *chain; + u32 entries_printed = 0; + bool printed = false; + struct rb_node *node; + int i = 0; + int ret = 0; + + /* + * If have one single callchain root, don't bother printing + * its percentage (100 % in fractal mode and the same percentage + * than the hist in graph mode). This also avoid one level of column. + */ + node = rb_first(root); + if (node && !rb_next(node)) { + cnode = rb_entry(node, struct callchain_node, rb_node); + list_for_each_entry(chain, &cnode->val, list) { + /* + * If we sort by symbol, the first entry is the same than + * the symbol. No need to print it otherwise it appears as + * displayed twice. + */ + if (!i++ && sort__first_dimension == SORT_SYM) + continue; + if (!printed) { + ret += callchain__fprintf_left_margin(fp, left_margin); + ret += fprintf(fp, "|\n"); + ret += callchain__fprintf_left_margin(fp, left_margin); + ret += fprintf(fp, "---"); + left_margin += 3; + printed = true; + } else + ret += callchain__fprintf_left_margin(fp, left_margin); + + if (chain->ms.sym) + ret += fprintf(fp, " %s\n", chain->ms.sym->name); + else + ret += fprintf(fp, " %p\n", (void *)(long)chain->ip); + + if (++entries_printed == callchain_param.print_limit) + break; + } + root = &cnode->rb_root; + } + + ret += __callchain__fprintf_graph(fp, root, total_samples, + 1, 1, left_margin); + ret += fprintf(fp, "\n"); + + return ret; +} + +static size_t __callchain__fprintf_flat(FILE *fp, + struct callchain_node *self, + u64 total_samples) +{ + struct callchain_list *chain; + size_t ret = 0; + + if (!self) + return 0; + + ret += __callchain__fprintf_flat(fp, self->parent, total_samples); + + + list_for_each_entry(chain, &self->val, list) { + if (chain->ip >= PERF_CONTEXT_MAX) + continue; + if (chain->ms.sym) + ret += fprintf(fp, " %s\n", chain->ms.sym->name); + else + ret += fprintf(fp, " %p\n", + (void *)(long)chain->ip); + } + + return ret; +} + +static size_t callchain__fprintf_flat(FILE *fp, struct rb_root *self, + u64 total_samples) +{ + size_t ret = 0; + u32 entries_printed = 0; + struct rb_node *rb_node; + struct callchain_node *chain; + + rb_node = rb_first(self); + while (rb_node) { + double percent; + + chain = rb_entry(rb_node, struct callchain_node, rb_node); + percent = chain->hit * 100.0 / total_samples; + + ret = percent_color_fprintf(fp, " %6.2f%%\n", percent); + ret += __callchain__fprintf_flat(fp, chain, total_samples); + ret += fprintf(fp, "\n"); + if (++entries_printed == callchain_param.print_limit) + break; + + rb_node = rb_next(rb_node); + } + + return ret; +} + +static size_t hist_entry_callchain__fprintf(struct hist_entry *he, + u64 total_samples, int left_margin, + FILE *fp) +{ + switch (callchain_param.mode) { + case CHAIN_GRAPH_REL: + return callchain__fprintf_graph(fp, &he->sorted_chain, he->period, + left_margin); + break; + case CHAIN_GRAPH_ABS: + return callchain__fprintf_graph(fp, &he->sorted_chain, total_samples, + left_margin); + break; + case CHAIN_FLAT: + return callchain__fprintf_flat(fp, &he->sorted_chain, total_samples); + break; + case CHAIN_NONE: + break; + default: + pr_err("Bad callchain mode\n"); + } + + return 0; +} + +static size_t hist_entry__callchain_fprintf(struct hist_entry *he, + struct hists *hists, + u64 total_period, FILE *fp) +{ + int left_margin = 0; + + if (sort__first_dimension == SORT_COMM) { + struct sort_entry *se = list_first_entry(&hist_entry__sort_list, + typeof(*se), list); + left_margin = hists__col_len(hists, se->se_width_idx); + left_margin -= thread__comm_len(he->thread); + } + + return hist_entry_callchain__fprintf(he, total_period, left_margin, fp); +} + +static int hist_entry__fprintf(struct hist_entry *he, size_t size, + struct hists *hists, struct hists *pair_hists, + long displacement, u64 total_period, FILE *fp) +{ + char bf[512]; + int ret; + struct perf_hpp hpp = { + .buf = bf, + .size = size, + .total_period = total_period, + .displacement = displacement, + .ptr = pair_hists, + }; + bool color = !symbol_conf.field_sep; + + if (size == 0 || size > sizeof(bf)) + size = hpp.size = sizeof(bf); + + ret = hist_entry__period_snprintf(&hpp, he, color); + hist_entry__sort_snprintf(he, bf + ret, size - ret, hists); + + ret = fprintf(fp, "%s\n", bf); + + if (symbol_conf.use_callchain) + ret += hist_entry__callchain_fprintf(he, hists, + total_period, fp); + + return ret; +} + +size_t hists__fprintf(struct hists *hists, struct hists *pair, + bool show_displacement, bool show_header, int max_rows, + int max_cols, FILE *fp) +{ + struct sort_entry *se; + struct rb_node *nd; + size_t ret = 0; + u64 total_period; + unsigned long position = 1; + long displacement = 0; + unsigned int width; + const char *sep = symbol_conf.field_sep; + const char *col_width = symbol_conf.col_width_list_str; + int idx, nr_rows = 0; + char bf[64]; + struct perf_hpp dummy_hpp = { + .buf = bf, + .size = sizeof(bf), + .ptr = pair, + }; + + init_rem_hits(); + + if (!show_header) + goto print_entries; + + fprintf(fp, "# "); + for (idx = 0; idx < PERF_HPP__MAX_INDEX; idx++) { + if (!perf_hpp__format[idx].cond) + continue; + + if (idx) + fprintf(fp, "%s", sep ?: " "); + + perf_hpp__format[idx].header(&dummy_hpp); + fprintf(fp, "%s", bf); + } + + list_for_each_entry(se, &hist_entry__sort_list, list) { + if (se->elide) + continue; + if (sep) { + fprintf(fp, "%c%s", *sep, se->se_header); + continue; + } + width = strlen(se->se_header); + if (symbol_conf.col_width_list_str) { + if (col_width) { + hists__set_col_len(hists, se->se_width_idx, + atoi(col_width)); + col_width = strchr(col_width, ','); + if (col_width) + ++col_width; + } + } + if (!hists__new_col_len(hists, se->se_width_idx, width)) + width = hists__col_len(hists, se->se_width_idx); + fprintf(fp, " %*s", width, se->se_header); + } + + fprintf(fp, "\n"); + if (max_rows && ++nr_rows >= max_rows) + goto out; + + if (sep) + goto print_entries; + + fprintf(fp, "# "); + for (idx = 0; idx < PERF_HPP__MAX_INDEX; idx++) { + unsigned int i; + + if (!perf_hpp__format[idx].cond) + continue; + + if (idx) + fprintf(fp, "%s", sep ?: " "); + + width = perf_hpp__format[idx].width(&dummy_hpp); + for (i = 0; i < width; i++) + fprintf(fp, "."); + } + + list_for_each_entry(se, &hist_entry__sort_list, list) { + unsigned int i; + + if (se->elide) + continue; + + fprintf(fp, " "); + width = hists__col_len(hists, se->se_width_idx); + if (width == 0) + width = strlen(se->se_header); + for (i = 0; i < width; i++) + fprintf(fp, "."); + } + + fprintf(fp, "\n"); + if (max_rows && ++nr_rows >= max_rows) + goto out; + + fprintf(fp, "#\n"); + if (max_rows && ++nr_rows >= max_rows) + goto out; + +print_entries: + total_period = hists->stats.total_period; + + for (nd = rb_first(&hists->entries); nd; nd = rb_next(nd)) { + struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node); + + if (h->filtered) + continue; + + if (show_displacement) { + if (h->pair != NULL) + displacement = ((long)h->pair->position - + (long)position); + else + displacement = 0; + ++position; + } + ret += hist_entry__fprintf(h, max_cols, hists, pair, displacement, + total_period, fp); + + if (max_rows && ++nr_rows >= max_rows) + goto out; + + if (h->ms.map == NULL && verbose > 1) { + __map_groups__fprintf_maps(&h->thread->mg, + MAP__FUNCTION, verbose, fp); + fprintf(fp, "%.10s end\n", graph_dotted_line); + } + } +out: + free(rem_sq_bracket); + + return ret; +} + +size_t hists__fprintf_nr_events(struct hists *hists, FILE *fp) +{ + int i; + size_t ret = 0; + + for (i = 0; i < PERF_RECORD_HEADER_MAX; ++i) { + const char *name; + + if (hists->stats.nr_events[i] == 0) + continue; + + name = perf_event__name(i); + if (!strcmp(name, "UNKNOWN")) + continue; + + ret += fprintf(fp, "%16s events: %10d\n", name, + hists->stats.nr_events[i]); + } + + return ret; +} diff --git a/trunk/tools/perf/ui/tui/helpline.c b/trunk/tools/perf/ui/tui/helpline.c new file mode 100644 index 000000000000..2884d2f41e33 --- /dev/null +++ b/trunk/tools/perf/ui/tui/helpline.c @@ -0,0 +1,57 @@ +#include +#include +#include +#include + +#include "../../util/debug.h" +#include "../helpline.h" +#include "../ui.h" +#include "../libslang.h" + +static void tui_helpline__pop(void) +{ +} + +static void tui_helpline__push(const char *msg) +{ + const size_t sz = sizeof(ui_helpline__current); + + SLsmg_gotorc(SLtt_Screen_Rows - 1, 0); + SLsmg_set_color(0); + SLsmg_write_nstring((char *)msg, SLtt_Screen_Cols); + SLsmg_refresh(); + strncpy(ui_helpline__current, msg, sz)[sz - 1] = '\0'; +} + +struct ui_helpline tui_helpline_fns = { + .pop = tui_helpline__pop, + .push = tui_helpline__push, +}; + +void ui_helpline__init(void) +{ + helpline_fns = &tui_helpline_fns; + ui_helpline__puts(" "); +} + +char ui_helpline__last_msg[1024]; + +int ui_helpline__show_help(const char *format, va_list ap) +{ + int ret; + static int backlog; + + pthread_mutex_lock(&ui__lock); + ret = vscnprintf(ui_helpline__last_msg + backlog, + sizeof(ui_helpline__last_msg) - backlog, format, ap); + backlog += ret; + + if (ui_helpline__last_msg[backlog - 1] == '\n') { + ui_helpline__puts(ui_helpline__last_msg); + SLsmg_refresh(); + backlog = 0; + } + pthread_mutex_unlock(&ui__lock); + + return ret; +} diff --git a/trunk/tools/perf/ui/tui/setup.c b/trunk/tools/perf/ui/tui/setup.c index e813c1d17346..60debb81537a 100644 --- a/trunk/tools/perf/ui/tui/setup.c +++ b/trunk/tools/perf/ui/tui/setup.c @@ -11,12 +11,12 @@ #include "../libslang.h" #include "../keysyms.h" -pthread_mutex_t ui__lock = PTHREAD_MUTEX_INITIALIZER; - static volatile int ui__need_resize; extern struct perf_error_ops perf_tui_eops; +extern void hist_browser__init_hpp(void); + void ui__refresh_dimensions(bool force) { if (force || ui__need_resize) { @@ -28,7 +28,7 @@ void ui__refresh_dimensions(bool force) } } -static void ui__sigwinch(int sig __used) +static void ui__sigwinch(int sig __maybe_unused) { ui__need_resize = 1; } @@ -88,7 +88,7 @@ int ui__getch(int delay_secs) return SLkp_getkey(); } -static void newt_suspend(void *d __used) +static void newt_suspend(void *d __maybe_unused) { newtSuspend(); raise(SIGTSTP); @@ -126,6 +126,8 @@ int ui__init(void) signal(SIGTERM, ui__signal); perf_error__register(&perf_tui_eops); + + hist_browser__init_hpp(); out: return err; } diff --git a/trunk/tools/perf/util/alias.c b/trunk/tools/perf/util/alias.c index b8144e80bb1e..e6d134773d0a 100644 --- a/trunk/tools/perf/util/alias.c +++ b/trunk/tools/perf/util/alias.c @@ -3,7 +3,8 @@ static const char *alias_key; static char *alias_val; -static int alias_lookup_cb(const char *k, const char *v, void *cb __used) +static int alias_lookup_cb(const char *k, const char *v, + void *cb __maybe_unused) { if (!prefixcmp(k, "alias.") && !strcmp(k+6, alias_key)) { if (!v) diff --git a/trunk/tools/perf/util/annotate.c b/trunk/tools/perf/util/annotate.c index 3a282c0057d2..f0a910371377 100644 --- a/trunk/tools/perf/util/annotate.c +++ b/trunk/tools/perf/util/annotate.c @@ -17,6 +17,7 @@ #include const char *disassembler_style; +const char *objdump_path; static struct ins *ins__find(const char *name); static int disasm_line__parse(char *line, char **namep, char **rawp); @@ -312,8 +313,8 @@ static struct ins_ops dec_ops = { .scnprintf = dec__scnprintf, }; -static int nop__scnprintf(struct ins *ins __used, char *bf, size_t size, - struct ins_operands *ops __used) +static int nop__scnprintf(struct ins *ins __maybe_unused, char *bf, size_t size, + struct ins_operands *ops __maybe_unused) { return scnprintf(bf, size, "%-6.6s", "nop"); } @@ -415,7 +416,7 @@ static struct ins *ins__find(const char *name) return bsearch(name, instructions, nmemb, sizeof(struct ins), ins__cmp); } -int symbol__annotate_init(struct map *map __used, struct symbol *sym) +int symbol__annotate_init(struct map *map __maybe_unused, struct symbol *sym) { struct annotation *notes = symbol__annotation(sym); pthread_mutex_init(¬es->lock, NULL); @@ -820,9 +821,10 @@ int symbol__annotate(struct symbol *sym, struct map *map, size_t privsize) dso, dso->long_name, sym, sym->name); snprintf(command, sizeof(command), - "objdump %s%s --start-address=0x%016" PRIx64 + "%s %s%s --start-address=0x%016" PRIx64 " --stop-address=0x%016" PRIx64 " -d %s %s -C %s|grep -v %s|expand", + objdump_path ? objdump_path : "objdump", disassembler_style ? "-M " : "", disassembler_style ? disassembler_style : "", map__rip_2objdump(map, sym->start), @@ -982,7 +984,8 @@ int symbol__annotate_printf(struct symbol *sym, struct map *map, int evidx, int context) { struct dso *dso = map->dso; - const char *filename = dso->long_name, *d_filename; + char *filename; + const char *d_filename; struct annotation *notes = symbol__annotation(sym); struct disasm_line *pos, *queue = NULL; u64 start = map__rip_2objdump(map, sym->start); @@ -990,6 +993,10 @@ int symbol__annotate_printf(struct symbol *sym, struct map *map, int evidx, int more = 0; u64 len; + filename = strdup(dso->long_name); + if (!filename) + return -ENOMEM; + if (full_paths) d_filename = filename; else @@ -1040,6 +1047,8 @@ int symbol__annotate_printf(struct symbol *sym, struct map *map, int evidx, } } + free(filename); + return more; } diff --git a/trunk/tools/perf/util/annotate.h b/trunk/tools/perf/util/annotate.h index 78a5692dd718..9b5b21e7b032 100644 --- a/trunk/tools/perf/util/annotate.h +++ b/trunk/tools/perf/util/annotate.h @@ -7,6 +7,7 @@ #include "symbol.h" #include #include +#include struct ins; @@ -125,7 +126,7 @@ int symbol__alloc_hist(struct symbol *sym); void symbol__annotate_zero_histograms(struct symbol *sym); int symbol__annotate(struct symbol *sym, struct map *map, size_t privsize); -int symbol__annotate_init(struct map *map __used, struct symbol *sym); +int symbol__annotate_init(struct map *map __maybe_unused, struct symbol *sym); int symbol__annotate_printf(struct symbol *sym, struct map *map, int evidx, bool full_paths, int min_pcnt, int max_lines, int context); @@ -138,11 +139,12 @@ int symbol__tty_annotate(struct symbol *sym, struct map *map, int evidx, int max_lines); #ifdef NO_NEWT_SUPPORT -static inline int symbol__tui_annotate(struct symbol *sym __used, - struct map *map __used, - int evidx __used, - void(*timer)(void *arg) __used, - void *arg __used, int delay_secs __used) +static inline int symbol__tui_annotate(struct symbol *sym __maybe_unused, + struct map *map __maybe_unused, + int evidx __maybe_unused, + void(*timer)(void *arg) __maybe_unused, + void *arg __maybe_unused, + int delay_secs __maybe_unused) { return 0; } @@ -152,5 +154,6 @@ int symbol__tui_annotate(struct symbol *sym, struct map *map, int evidx, #endif extern const char *disassembler_style; +extern const char *objdump_path; #endif /* __PERF_ANNOTATE_H */ diff --git a/trunk/tools/perf/util/build-id.c b/trunk/tools/perf/util/build-id.c index fd9a5944b627..8e3a740ddbd4 100644 --- a/trunk/tools/perf/util/build-id.c +++ b/trunk/tools/perf/util/build-id.c @@ -16,10 +16,10 @@ #include "session.h" #include "tool.h" -static int build_id__mark_dso_hit(struct perf_tool *tool __used, +static int build_id__mark_dso_hit(struct perf_tool *tool __maybe_unused, union perf_event *event, - struct perf_sample *sample __used, - struct perf_evsel *evsel __used, + struct perf_sample *sample __maybe_unused, + struct perf_evsel *evsel __maybe_unused, struct machine *machine) { struct addr_location al; @@ -41,9 +41,10 @@ static int build_id__mark_dso_hit(struct perf_tool *tool __used, return 0; } -static int perf_event__exit_del_thread(struct perf_tool *tool __used, +static int perf_event__exit_del_thread(struct perf_tool *tool __maybe_unused, union perf_event *event, - struct perf_sample *sample __used, + struct perf_sample *sample + __maybe_unused, struct machine *machine) { struct thread *thread = machine__findnew_thread(machine, event->fork.tid); diff --git a/trunk/tools/perf/util/cache.h b/trunk/tools/perf/util/cache.h index cff18c617d13..ab1769426541 100644 --- a/trunk/tools/perf/util/cache.h +++ b/trunk/tools/perf/util/cache.h @@ -39,7 +39,7 @@ static inline void setup_browser(bool fallback_to_pager) if (fallback_to_pager) setup_pager(); } -static inline void exit_browser(bool wait_for_ok __used) {} +static inline void exit_browser(bool wait_for_ok __maybe_unused) {} #else void setup_browser(bool fallback_to_pager); void exit_browser(bool wait_for_ok); @@ -49,7 +49,7 @@ static inline int ui__init(void) { return -1; } -static inline void ui__exit(bool wait_for_ok __used) {} +static inline void ui__exit(bool wait_for_ok __maybe_unused) {} #else int ui__init(void); void ui__exit(bool wait_for_ok); @@ -60,7 +60,7 @@ static inline int perf_gtk__init(void) { return -1; } -static inline void perf_gtk__exit(bool wait_for_ok __used) {} +static inline void perf_gtk__exit(bool wait_for_ok __maybe_unused) {} #else int perf_gtk__init(void); void perf_gtk__exit(bool wait_for_ok); diff --git a/trunk/tools/perf/util/callchain.c b/trunk/tools/perf/util/callchain.c index 3a6bff47614f..d3b3f5d82137 100644 --- a/trunk/tools/perf/util/callchain.c +++ b/trunk/tools/perf/util/callchain.c @@ -93,7 +93,7 @@ __sort_chain_flat(struct rb_root *rb_root, struct callchain_node *node, */ static void sort_chain_flat(struct rb_root *rb_root, struct callchain_root *root, - u64 min_hit, struct callchain_param *param __used) + u64 min_hit, struct callchain_param *param __maybe_unused) { __sort_chain_flat(rb_root, &root->node, min_hit); } @@ -115,7 +115,7 @@ static void __sort_chain_graph_abs(struct callchain_node *node, static void sort_chain_graph_abs(struct rb_root *rb_root, struct callchain_root *chain_root, - u64 min_hit, struct callchain_param *param __used) + u64 min_hit, struct callchain_param *param __maybe_unused) { __sort_chain_graph_abs(&chain_root->node, min_hit); rb_root->rb_node = chain_root->node.rb_root.rb_node; @@ -140,7 +140,7 @@ static void __sort_chain_graph_rel(struct callchain_node *node, static void sort_chain_graph_rel(struct rb_root *rb_root, struct callchain_root *chain_root, - u64 min_hit __used, struct callchain_param *param) + u64 min_hit __maybe_unused, struct callchain_param *param) { __sort_chain_graph_rel(&chain_root->node, param->min_percent / 100.0); rb_root->rb_node = chain_root->node.rb_root.rb_node; diff --git a/trunk/tools/perf/util/callchain.h b/trunk/tools/perf/util/callchain.h index 3bdb407f9cd9..eb340571e7d6 100644 --- a/trunk/tools/perf/util/callchain.h +++ b/trunk/tools/perf/util/callchain.h @@ -58,7 +58,7 @@ struct callchain_list { /* * A callchain cursor is a single linked list that * let one feed a callchain progressively. - * It keeps persitent allocated entries to minimize + * It keeps persistent allocated entries to minimize * allocations. */ struct callchain_cursor_node { diff --git a/trunk/tools/perf/util/cgroup.c b/trunk/tools/perf/util/cgroup.c index dbe2f16b1a1a..96bbda1ddb83 100644 --- a/trunk/tools/perf/util/cgroup.c +++ b/trunk/tools/perf/util/cgroup.c @@ -138,8 +138,8 @@ void close_cgroup(struct cgroup_sel *cgrp) } } -int parse_cgroups(const struct option *opt __used, const char *str, - int unset __used) +int parse_cgroups(const struct option *opt __maybe_unused, const char *str, + int unset __maybe_unused) { struct perf_evlist *evlist = *(struct perf_evlist **)opt->value; const char *p, *e, *eos = str + strlen(str); diff --git a/trunk/tools/perf/util/config.c b/trunk/tools/perf/util/config.c index 6faa3a18bfbd..3e0fdd369ccb 100644 --- a/trunk/tools/perf/util/config.c +++ b/trunk/tools/perf/util/config.c @@ -342,13 +342,15 @@ const char *perf_config_dirname(const char *name, const char *value) return value; } -static int perf_default_core_config(const char *var __used, const char *value __used) +static int perf_default_core_config(const char *var __maybe_unused, + const char *value __maybe_unused) { /* Add other config variables here. */ return 0; } -int perf_default_config(const char *var, const char *value, void *dummy __used) +int perf_default_config(const char *var, const char *value, + void *dummy __maybe_unused) { if (!prefixcmp(var, "core.")) return perf_default_core_config(var, value); diff --git a/trunk/tools/perf/util/cpumap.c b/trunk/tools/perf/util/cpumap.c index adc72f09914d..2b32ffa9ebdb 100644 --- a/trunk/tools/perf/util/cpumap.c +++ b/trunk/tools/perf/util/cpumap.c @@ -38,24 +38,19 @@ static struct cpu_map *cpu_map__trim_new(int nr_cpus, int *tmp_cpus) return cpus; } -static struct cpu_map *cpu_map__read_all_cpu_map(void) +struct cpu_map *cpu_map__read(FILE *file) { struct cpu_map *cpus = NULL; - FILE *onlnf; int nr_cpus = 0; int *tmp_cpus = NULL, *tmp; int max_entries = 0; int n, cpu, prev; char sep; - onlnf = fopen("/sys/devices/system/cpu/online", "r"); - if (!onlnf) - return cpu_map__default_new(); - sep = 0; prev = -1; for (;;) { - n = fscanf(onlnf, "%u%c", &cpu, &sep); + n = fscanf(file, "%u%c", &cpu, &sep); if (n <= 0) break; if (prev >= 0) { @@ -95,6 +90,19 @@ static struct cpu_map *cpu_map__read_all_cpu_map(void) cpus = cpu_map__default_new(); out_free_tmp: free(tmp_cpus); + return cpus; +} + +static struct cpu_map *cpu_map__read_all_cpu_map(void) +{ + struct cpu_map *cpus = NULL; + FILE *onlnf; + + onlnf = fopen("/sys/devices/system/cpu/online", "r"); + if (!onlnf) + return cpu_map__default_new(); + + cpus = cpu_map__read(onlnf); fclose(onlnf); return cpus; } diff --git a/trunk/tools/perf/util/cpumap.h b/trunk/tools/perf/util/cpumap.h index c41518573c6a..2f68a3b8c285 100644 --- a/trunk/tools/perf/util/cpumap.h +++ b/trunk/tools/perf/util/cpumap.h @@ -2,6 +2,7 @@ #define __PERF_CPUMAP_H #include +#include struct cpu_map { int nr; @@ -11,7 +12,17 @@ struct cpu_map { struct cpu_map *cpu_map__new(const char *cpu_list); struct cpu_map *cpu_map__dummy_new(void); void cpu_map__delete(struct cpu_map *map); - +struct cpu_map *cpu_map__read(FILE *file); size_t cpu_map__fprintf(struct cpu_map *map, FILE *fp); +static inline int cpu_map__nr(const struct cpu_map *map) +{ + return map ? map->nr : 1; +} + +static inline bool cpu_map__all(const struct cpu_map *map) +{ + return map ? map->map[0] == -1 : true; +} + #endif /* __PERF_CPUMAP_H */ diff --git a/trunk/tools/perf/util/debug.c b/trunk/tools/perf/util/debug.c index 4dfe0bb3c322..66eb3828ceb5 100644 --- a/trunk/tools/perf/util/debug.c +++ b/trunk/tools/perf/util/debug.c @@ -23,8 +23,10 @@ int eprintf(int level, const char *fmt, ...) if (verbose >= level) { va_start(args, fmt); - if (use_browser > 0) + if (use_browser == 1) ret = ui_helpline__show_help(fmt, args); + else if (use_browser == 2) + ret = perf_gtk__show_helpline(fmt, args); else ret = vfprintf(stderr, fmt, args); va_end(args); diff --git a/trunk/tools/perf/util/debug.h b/trunk/tools/perf/util/debug.h index 015c91dbc096..bb2e7d1007ab 100644 --- a/trunk/tools/perf/util/debug.h +++ b/trunk/tools/perf/util/debug.h @@ -4,6 +4,7 @@ #include #include "event.h" +#include "../ui/helpline.h" extern int verbose; extern bool quiet, dump_trace; @@ -15,32 +16,26 @@ struct ui_progress; struct perf_error_ops; #if defined(NO_NEWT_SUPPORT) && defined(NO_GTK2_SUPPORT) -static inline int ui_helpline__show_help(const char *format __used, va_list ap __used) -{ - return 0; -} - -static inline void ui_progress__update(u64 curr __used, u64 total __used, - const char *title __used) {} +static inline void ui_progress__update(u64 curr __maybe_unused, + u64 total __maybe_unused, + const char *title __maybe_unused) {} #define ui__error(format, arg...) ui__warning(format, ##arg) static inline int -perf_error__register(struct perf_error_ops *eops __used) +perf_error__register(struct perf_error_ops *eops __maybe_unused) { return 0; } static inline int -perf_error__unregister(struct perf_error_ops *eops __used) +perf_error__unregister(struct perf_error_ops *eops __maybe_unused) { return 0; } #else /* NO_NEWT_SUPPORT && NO_GTK2_SUPPORT */ -extern char ui_helpline__last_msg[]; -int ui_helpline__show_help(const char *format, va_list ap); #include "../ui/progress.h" int ui__error(const char *format, ...) __attribute__((format(printf, 1, 2))); #include "../ui/util.h" diff --git a/trunk/tools/perf/util/dso-test-data.c b/trunk/tools/perf/util/dso-test-data.c index 541cdc72c7df..c6caedeb1d6b 100644 --- a/trunk/tools/perf/util/dso-test-data.c +++ b/trunk/tools/perf/util/dso-test-data.c @@ -23,7 +23,7 @@ static char *test_file(int size) int fd, i; unsigned char *buf; - fd = mkostemp(templ, O_CREAT|O_WRONLY|O_TRUNC); + fd = mkstemp(templ); buf = malloc(size); if (!buf) { diff --git a/trunk/tools/perf/util/dwarf-aux.c b/trunk/tools/perf/util/dwarf-aux.c index ee51e9b4dc09..3e5f5430a28a 100644 --- a/trunk/tools/perf/util/dwarf-aux.c +++ b/trunk/tools/perf/util/dwarf-aux.c @@ -804,6 +804,8 @@ int die_get_typename(Dwarf_Die *vr_die, char *buf, int len) tmp = "union "; else if (tag == DW_TAG_structure_type) tmp = "struct "; + else if (tag == DW_TAG_enumeration_type) + tmp = "enum "; /* Write a base name */ ret = snprintf(buf, len, "%s%s", tmp, dwarf_diename(&type)); return (ret >= len) ? -E2BIG : ret; diff --git a/trunk/tools/perf/util/event.c b/trunk/tools/perf/util/event.c index 2a6f33cd888c..6715b1938725 100644 --- a/trunk/tools/perf/util/event.c +++ b/trunk/tools/perf/util/event.c @@ -112,7 +112,7 @@ static pid_t perf_event__synthesize_comm(struct perf_tool *tool, event->comm.header.type = PERF_RECORD_COMM; size = strlen(event->comm.comm) + 1; - size = ALIGN(size, sizeof(u64)); + size = PERF_ALIGN(size, sizeof(u64)); memset(event->comm.comm + size, 0, machine->id_hdr_size); event->comm.header.size = (sizeof(event->comm) - (sizeof(event->comm.comm) - size) + @@ -120,7 +120,9 @@ static pid_t perf_event__synthesize_comm(struct perf_tool *tool, if (!full) { event->comm.tid = pid; - process(tool, event, &synth_sample, machine); + if (process(tool, event, &synth_sample, machine) != 0) + return -1; + goto out; } @@ -143,7 +145,7 @@ static pid_t perf_event__synthesize_comm(struct perf_tool *tool, sizeof(event->comm.comm)); size = strlen(event->comm.comm) + 1; - size = ALIGN(size, sizeof(u64)); + size = PERF_ALIGN(size, sizeof(u64)); memset(event->comm.comm + size, 0, machine->id_hdr_size); event->comm.header.size = (sizeof(event->comm) - (sizeof(event->comm.comm) - size) + @@ -151,7 +153,10 @@ static pid_t perf_event__synthesize_comm(struct perf_tool *tool, event->comm.tid = pid; - process(tool, event, &synth_sample, machine); + if (process(tool, event, &synth_sample, machine) != 0) { + tgid = -1; + break; + } } closedir(tasks); @@ -167,6 +172,7 @@ static int perf_event__synthesize_mmap_events(struct perf_tool *tool, { char filename[PATH_MAX]; FILE *fp; + int rc = 0; snprintf(filename, sizeof(filename), "/proc/%d/maps", pid); @@ -222,7 +228,7 @@ static int perf_event__synthesize_mmap_events(struct perf_tool *tool, size = strlen(execname); execname[size - 1] = '\0'; /* Remove \n */ memcpy(event->mmap.filename, execname, size); - size = ALIGN(size, sizeof(u64)); + size = PERF_ALIGN(size, sizeof(u64)); event->mmap.len -= event->mmap.start; event->mmap.header.size = (sizeof(event->mmap) - (sizeof(event->mmap.filename) - size)); @@ -231,18 +237,22 @@ static int perf_event__synthesize_mmap_events(struct perf_tool *tool, event->mmap.pid = tgid; event->mmap.tid = pid; - process(tool, event, &synth_sample, machine); + if (process(tool, event, &synth_sample, machine) != 0) { + rc = -1; + break; + } } } fclose(fp); - return 0; + return rc; } int perf_event__synthesize_modules(struct perf_tool *tool, perf_event__handler_t process, struct machine *machine) { + int rc = 0; struct rb_node *nd; struct map_groups *kmaps = &machine->kmaps; union perf_event *event = zalloc((sizeof(event->mmap) + @@ -272,7 +282,7 @@ int perf_event__synthesize_modules(struct perf_tool *tool, if (pos->dso->kernel) continue; - size = ALIGN(pos->dso->long_name_len + 1, sizeof(u64)); + size = PERF_ALIGN(pos->dso->long_name_len + 1, sizeof(u64)); event->mmap.header.type = PERF_RECORD_MMAP; event->mmap.header.size = (sizeof(event->mmap) - (sizeof(event->mmap.filename) - size)); @@ -284,11 +294,14 @@ int perf_event__synthesize_modules(struct perf_tool *tool, memcpy(event->mmap.filename, pos->dso->long_name, pos->dso->long_name_len + 1); - process(tool, event, &synth_sample, machine); + if (process(tool, event, &synth_sample, machine) != 0) { + rc = -1; + break; + } } free(event); - return 0; + return rc; } static int __event__synthesize_thread(union perf_event *comm_event, @@ -392,12 +405,16 @@ int perf_event__synthesize_threads(struct perf_tool *tool, if (*end) /* only interested in proper numerical dirents */ continue; - __event__synthesize_thread(comm_event, mmap_event, pid, 1, - process, tool, machine); + if (__event__synthesize_thread(comm_event, mmap_event, pid, 1, + process, tool, machine) != 0) { + err = -1; + goto out_closedir; + } } - closedir(proc); err = 0; +out_closedir: + closedir(proc); out_free_mmap: free(mmap_event); out_free_comm: @@ -412,7 +429,7 @@ struct process_symbol_args { }; static int find_symbol_cb(void *arg, const char *name, char type, - u64 start, u64 end __used) + u64 start) { struct process_symbol_args *args = arg; @@ -477,7 +494,7 @@ int perf_event__synthesize_kernel_mmap(struct perf_tool *tool, map = machine->vmlinux_maps[MAP__FUNCTION]; size = snprintf(event->mmap.filename, sizeof(event->mmap.filename), "%s%s", mmap_name, symbol_name) + 1; - size = ALIGN(size, sizeof(u64)); + size = PERF_ALIGN(size, sizeof(u64)); event->mmap.header.type = PERF_RECORD_MMAP; event->mmap.header.size = (sizeof(event->mmap) - (sizeof(event->mmap.filename) - size) + machine->id_hdr_size); @@ -497,9 +514,9 @@ size_t perf_event__fprintf_comm(union perf_event *event, FILE *fp) return fprintf(fp, ": %s:%d\n", event->comm.comm, event->comm.tid); } -int perf_event__process_comm(struct perf_tool *tool __used, +int perf_event__process_comm(struct perf_tool *tool __maybe_unused, union perf_event *event, - struct perf_sample *sample __used, + struct perf_sample *sample __maybe_unused, struct machine *machine) { struct thread *thread = machine__findnew_thread(machine, event->comm.tid); @@ -515,10 +532,10 @@ int perf_event__process_comm(struct perf_tool *tool __used, return 0; } -int perf_event__process_lost(struct perf_tool *tool __used, +int perf_event__process_lost(struct perf_tool *tool __maybe_unused, union perf_event *event, - struct perf_sample *sample __used, - struct machine *machine __used) + struct perf_sample *sample __maybe_unused, + struct machine *machine __maybe_unused) { dump_printf(": id:%" PRIu64 ": lost:%" PRIu64 "\n", event->lost.id, event->lost.lost); @@ -538,7 +555,8 @@ static void perf_event__set_kernel_mmap_len(union perf_event *event, maps[MAP__FUNCTION]->end = ~0ULL; } -static int perf_event__process_kernel_mmap(struct perf_tool *tool __used, +static int perf_event__process_kernel_mmap(struct perf_tool *tool + __maybe_unused, union perf_event *event, struct machine *machine) { @@ -640,7 +658,7 @@ size_t perf_event__fprintf_mmap(union perf_event *event, FILE *fp) int perf_event__process_mmap(struct perf_tool *tool, union perf_event *event, - struct perf_sample *sample __used, + struct perf_sample *sample __maybe_unused, struct machine *machine) { struct thread *thread; @@ -684,9 +702,9 @@ size_t perf_event__fprintf_task(union perf_event *event, FILE *fp) event->fork.ppid, event->fork.ptid); } -int perf_event__process_task(struct perf_tool *tool __used, +int perf_event__process_task(struct perf_tool *tool __maybe_unused, union perf_event *event, - struct perf_sample *sample __used, + struct perf_sample *sample __maybe_unused, struct machine *machine) { struct thread *thread = machine__findnew_thread(machine, event->fork.tid); @@ -886,8 +904,9 @@ int perf_event__preprocess_sample(const union perf_event *event, al->sym = map__find_symbol(al->map, al->addr, filter); } - if (symbol_conf.sym_list && al->sym && - !strlist__has_entry(symbol_conf.sym_list, al->sym->name)) + if (symbol_conf.sym_list && + (!al->sym || !strlist__has_entry(symbol_conf.sym_list, + al->sym->name))) goto out_filtered; return 0; diff --git a/trunk/tools/perf/util/event.h b/trunk/tools/perf/util/event.h index 1b197280c621..21b99e741a87 100644 --- a/trunk/tools/perf/util/event.h +++ b/trunk/tools/perf/util/event.h @@ -69,6 +69,16 @@ struct sample_event { u64 array[]; }; +struct regs_dump { + u64 *regs; +}; + +struct stack_dump { + u16 offset; + u64 size; + char *data; +}; + struct perf_sample { u64 ip; u32 pid, tid; @@ -82,6 +92,8 @@ struct perf_sample { void *raw_data; struct ip_callchain *callchain; struct branch_stack *branch_stack; + struct regs_dump user_regs; + struct stack_dump user_stack; }; #define BUILD_ID_SIZE 20 @@ -89,7 +101,7 @@ struct perf_sample { struct build_id_event { struct perf_event_header header; pid_t pid; - u8 build_id[ALIGN(BUILD_ID_SIZE, sizeof(u64))]; + u8 build_id[PERF_ALIGN(BUILD_ID_SIZE, sizeof(u64))]; char filename[]; }; @@ -197,9 +209,6 @@ int perf_event__preprocess_sample(const union perf_event *self, const char *perf_event__name(unsigned int id); -int perf_event__parse_sample(const union perf_event *event, u64 type, - int sample_size, bool sample_id_all, - struct perf_sample *sample, bool swapped); int perf_event__synthesize_sample(union perf_event *event, u64 type, const struct perf_sample *sample, bool swapped); diff --git a/trunk/tools/perf/util/evlist.c b/trunk/tools/perf/util/evlist.c index 3edfd3483816..ae89686102f4 100644 --- a/trunk/tools/perf/util/evlist.c +++ b/trunk/tools/perf/util/evlist.c @@ -57,7 +57,7 @@ void perf_evlist__config_attrs(struct perf_evlist *evlist, if (evlist->cpus->map[0] < 0) opts->no_inherit = true; - first = list_entry(evlist->entries.next, struct perf_evsel, node); + first = perf_evlist__first(evlist); list_for_each_entry(evsel, &evlist->entries, node) { perf_evsel__config(evsel, opts, first); @@ -108,6 +108,25 @@ void perf_evlist__splice_list_tail(struct perf_evlist *evlist, evlist->nr_entries += nr_entries; } +void __perf_evlist__set_leader(struct list_head *list) +{ + struct perf_evsel *evsel, *leader; + + leader = list_entry(list->next, struct perf_evsel, node); + leader->leader = NULL; + + list_for_each_entry(evsel, list, node) { + if (evsel != leader) + evsel->leader = leader; + } +} + +void perf_evlist__set_leader(struct perf_evlist *evlist) +{ + if (evlist->nr_entries) + __perf_evlist__set_leader(&evlist->entries); +} + int perf_evlist__add_default(struct perf_evlist *evlist) { struct perf_event_attr attr = { @@ -285,7 +304,7 @@ void perf_evlist__enable(struct perf_evlist *evlist) int cpu, thread; struct perf_evsel *pos; - for (cpu = 0; cpu < evlist->cpus->nr; cpu++) { + for (cpu = 0; cpu < cpu_map__nr(evlist->cpus); cpu++) { list_for_each_entry(pos, &evlist->entries, node) { for (thread = 0; thread < evlist->threads->nr; thread++) ioctl(FD(pos, cpu, thread), @@ -296,7 +315,7 @@ void perf_evlist__enable(struct perf_evlist *evlist) static int perf_evlist__alloc_pollfd(struct perf_evlist *evlist) { - int nfds = evlist->cpus->nr * evlist->threads->nr * evlist->nr_entries; + int nfds = cpu_map__nr(evlist->cpus) * evlist->threads->nr * evlist->nr_entries; evlist->pollfd = malloc(sizeof(struct pollfd) * nfds); return evlist->pollfd != NULL ? 0 : -ENOMEM; } @@ -357,7 +376,7 @@ struct perf_evsel *perf_evlist__id2evsel(struct perf_evlist *evlist, u64 id) int hash; if (evlist->nr_entries == 1) - return list_entry(evlist->entries.next, struct perf_evsel, node); + return perf_evlist__first(evlist); hash = hash_64(id, PERF_EVLIST__HLIST_BITS); head = &evlist->heads[hash]; @@ -367,7 +386,7 @@ struct perf_evsel *perf_evlist__id2evsel(struct perf_evlist *evlist, u64 id) return sid->evsel; if (!perf_evlist__sample_id_all(evlist)) - return list_entry(evlist->entries.next, struct perf_evsel, node); + return perf_evlist__first(evlist); return NULL; } @@ -456,8 +475,8 @@ void perf_evlist__munmap(struct perf_evlist *evlist) static int perf_evlist__alloc_mmap(struct perf_evlist *evlist) { - evlist->nr_mmaps = evlist->cpus->nr; - if (evlist->cpus->map[0] == -1) + evlist->nr_mmaps = cpu_map__nr(evlist->cpus); + if (cpu_map__all(evlist->cpus)) evlist->nr_mmaps = evlist->threads->nr; evlist->mmap = zalloc(evlist->nr_mmaps * sizeof(struct perf_mmap)); return evlist->mmap != NULL ? 0 : -ENOMEM; @@ -603,11 +622,11 @@ int perf_evlist__mmap(struct perf_evlist *evlist, unsigned int pages, list_for_each_entry(evsel, &evlist->entries, node) { if ((evsel->attr.read_format & PERF_FORMAT_ID) && evsel->sample_id == NULL && - perf_evsel__alloc_id(evsel, cpus->nr, threads->nr) < 0) + perf_evsel__alloc_id(evsel, cpu_map__nr(cpus), threads->nr) < 0) return -ENOMEM; } - if (evlist->cpus->map[0] == -1) + if (cpu_map__all(cpus)) return perf_evlist__mmap_per_thread(evlist, prot, mask); return perf_evlist__mmap_per_cpu(evlist, prot, mask); @@ -647,39 +666,44 @@ void perf_evlist__delete_maps(struct perf_evlist *evlist) evlist->threads = NULL; } -int perf_evlist__set_filters(struct perf_evlist *evlist) +int perf_evlist__apply_filters(struct perf_evlist *evlist) { - const struct thread_map *threads = evlist->threads; - const struct cpu_map *cpus = evlist->cpus; struct perf_evsel *evsel; - char *filter; - int thread; - int cpu; - int err; - int fd; + int err = 0; + const int ncpus = cpu_map__nr(evlist->cpus), + nthreads = evlist->threads->nr; list_for_each_entry(evsel, &evlist->entries, node) { - filter = evsel->filter; - if (!filter) + if (evsel->filter == NULL) continue; - for (cpu = 0; cpu < cpus->nr; cpu++) { - for (thread = 0; thread < threads->nr; thread++) { - fd = FD(evsel, cpu, thread); - err = ioctl(fd, PERF_EVENT_IOC_SET_FILTER, filter); - if (err) - return err; - } - } + + err = perf_evsel__set_filter(evsel, ncpus, nthreads, evsel->filter); + if (err) + break; } - return 0; + return err; } -bool perf_evlist__valid_sample_type(const struct perf_evlist *evlist) +int perf_evlist__set_filter(struct perf_evlist *evlist, const char *filter) { - struct perf_evsel *pos, *first; + struct perf_evsel *evsel; + int err = 0; + const int ncpus = cpu_map__nr(evlist->cpus), + nthreads = evlist->threads->nr; - pos = first = list_entry(evlist->entries.next, struct perf_evsel, node); + list_for_each_entry(evsel, &evlist->entries, node) { + err = perf_evsel__set_filter(evsel, ncpus, nthreads, filter); + if (err) + break; + } + + return err; +} + +bool perf_evlist__valid_sample_type(struct perf_evlist *evlist) +{ + struct perf_evsel *first = perf_evlist__first(evlist), *pos = first; list_for_each_entry_continue(pos, &evlist->entries, node) { if (first->attr.sample_type != pos->attr.sample_type) @@ -689,23 +713,19 @@ bool perf_evlist__valid_sample_type(const struct perf_evlist *evlist) return true; } -u64 perf_evlist__sample_type(const struct perf_evlist *evlist) +u64 perf_evlist__sample_type(struct perf_evlist *evlist) { - struct perf_evsel *first; - - first = list_entry(evlist->entries.next, struct perf_evsel, node); + struct perf_evsel *first = perf_evlist__first(evlist); return first->attr.sample_type; } -u16 perf_evlist__id_hdr_size(const struct perf_evlist *evlist) +u16 perf_evlist__id_hdr_size(struct perf_evlist *evlist) { - struct perf_evsel *first; + struct perf_evsel *first = perf_evlist__first(evlist); struct perf_sample *data; u64 sample_type; u16 size = 0; - first = list_entry(evlist->entries.next, struct perf_evsel, node); - if (!first->attr.sample_id_all) goto out; @@ -729,11 +749,9 @@ u16 perf_evlist__id_hdr_size(const struct perf_evlist *evlist) return size; } -bool perf_evlist__valid_sample_id_all(const struct perf_evlist *evlist) +bool perf_evlist__valid_sample_id_all(struct perf_evlist *evlist) { - struct perf_evsel *pos, *first; - - pos = first = list_entry(evlist->entries.next, struct perf_evsel, node); + struct perf_evsel *first = perf_evlist__first(evlist), *pos = first; list_for_each_entry_continue(pos, &evlist->entries, node) { if (first->attr.sample_id_all != pos->attr.sample_id_all) @@ -743,11 +761,9 @@ bool perf_evlist__valid_sample_id_all(const struct perf_evlist *evlist) return true; } -bool perf_evlist__sample_id_all(const struct perf_evlist *evlist) +bool perf_evlist__sample_id_all(struct perf_evlist *evlist) { - struct perf_evsel *first; - - first = list_entry(evlist->entries.next, struct perf_evsel, node); + struct perf_evsel *first = perf_evlist__first(evlist); return first->attr.sample_id_all; } @@ -757,21 +773,13 @@ void perf_evlist__set_selected(struct perf_evlist *evlist, evlist->selected = evsel; } -int perf_evlist__open(struct perf_evlist *evlist, bool group) +int perf_evlist__open(struct perf_evlist *evlist) { - struct perf_evsel *evsel, *first; + struct perf_evsel *evsel; int err, ncpus, nthreads; - first = list_entry(evlist->entries.next, struct perf_evsel, node); - list_for_each_entry(evsel, &evlist->entries, node) { - struct xyarray *group_fd = NULL; - - if (group && evsel != first) - group_fd = first->fd; - - err = perf_evsel__open(evsel, evlist->cpus, evlist->threads, - group, group_fd); + err = perf_evsel__open(evsel, evlist->cpus, evlist->threads); if (err < 0) goto out_err; } @@ -881,3 +889,23 @@ int perf_evlist__start_workload(struct perf_evlist *evlist) return 0; } + +int perf_evlist__parse_sample(struct perf_evlist *evlist, union perf_event *event, + struct perf_sample *sample) +{ + struct perf_evsel *evsel = perf_evlist__first(evlist); + return perf_evsel__parse_sample(evsel, event, sample); +} + +size_t perf_evlist__fprintf(struct perf_evlist *evlist, FILE *fp) +{ + struct perf_evsel *evsel; + size_t printed = 0; + + list_for_each_entry(evsel, &evlist->entries, node) { + printed += fprintf(fp, "%s%s", evsel->idx ? ", " : "", + perf_evsel__name(evsel)); + } + + return printed + fprintf(fp, "\n");; +} diff --git a/trunk/tools/perf/util/evlist.h b/trunk/tools/perf/util/evlist.h index 40d4d3cdced0..3f1fb66be022 100644 --- a/trunk/tools/perf/util/evlist.h +++ b/trunk/tools/perf/util/evlist.h @@ -5,6 +5,7 @@ #include #include "../perf.h" #include "event.h" +#include "evsel.h" #include "util.h" #include @@ -41,8 +42,6 @@ struct perf_evsel_str_handler { void *handler; }; -struct perf_evsel; - struct perf_evlist *perf_evlist__new(struct cpu_map *cpus, struct thread_map *threads); void perf_evlist__init(struct perf_evlist *evlist, struct cpu_map *cpus, @@ -73,6 +72,8 @@ int perf_evlist__set_tracepoints_handlers(struct perf_evlist *evlist, #define perf_evlist__set_tracepoints_handlers_array(evlist, array) \ perf_evlist__set_tracepoints_handlers(evlist, array, ARRAY_SIZE(array)) +int perf_evlist__set_filter(struct perf_evlist *evlist, const char *filter); + struct perf_evsel * perf_evlist__find_tracepoint_by_id(struct perf_evlist *evlist, int id); @@ -85,7 +86,7 @@ struct perf_evsel *perf_evlist__id2evsel(struct perf_evlist *evlist, u64 id); union perf_event *perf_evlist__mmap_read(struct perf_evlist *self, int idx); -int perf_evlist__open(struct perf_evlist *evlist, bool group); +int perf_evlist__open(struct perf_evlist *evlist); void perf_evlist__config_attrs(struct perf_evlist *evlist, struct perf_record_opts *opts); @@ -116,17 +117,34 @@ static inline void perf_evlist__set_maps(struct perf_evlist *evlist, int perf_evlist__create_maps(struct perf_evlist *evlist, struct perf_target *target); void perf_evlist__delete_maps(struct perf_evlist *evlist); -int perf_evlist__set_filters(struct perf_evlist *evlist); +int perf_evlist__apply_filters(struct perf_evlist *evlist); + +void __perf_evlist__set_leader(struct list_head *list); +void perf_evlist__set_leader(struct perf_evlist *evlist); -u64 perf_evlist__sample_type(const struct perf_evlist *evlist); -bool perf_evlist__sample_id_all(const const struct perf_evlist *evlist); -u16 perf_evlist__id_hdr_size(const struct perf_evlist *evlist); +u64 perf_evlist__sample_type(struct perf_evlist *evlist); +bool perf_evlist__sample_id_all(struct perf_evlist *evlist); +u16 perf_evlist__id_hdr_size(struct perf_evlist *evlist); -bool perf_evlist__valid_sample_type(const struct perf_evlist *evlist); -bool perf_evlist__valid_sample_id_all(const struct perf_evlist *evlist); +int perf_evlist__parse_sample(struct perf_evlist *evlist, union perf_event *event, + struct perf_sample *sample); + +bool perf_evlist__valid_sample_type(struct perf_evlist *evlist); +bool perf_evlist__valid_sample_id_all(struct perf_evlist *evlist); void perf_evlist__splice_list_tail(struct perf_evlist *evlist, struct list_head *list, int nr_entries); +static inline struct perf_evsel *perf_evlist__first(struct perf_evlist *evlist) +{ + return list_entry(evlist->entries.next, struct perf_evsel, node); +} + +static inline struct perf_evsel *perf_evlist__last(struct perf_evlist *evlist) +{ + return list_entry(evlist->entries.prev, struct perf_evsel, node); +} + +size_t perf_evlist__fprintf(struct perf_evlist *evlist, FILE *fp); #endif /* __PERF_EVLIST_H */ diff --git a/trunk/tools/perf/util/evsel.c b/trunk/tools/perf/util/evsel.c index e81771364867..ffdd94e9c9c3 100644 --- a/trunk/tools/perf/util/evsel.c +++ b/trunk/tools/perf/util/evsel.c @@ -8,7 +8,10 @@ */ #include +#include #include "asm/bug.h" +#include "debugfs.h" +#include "event-parse.h" #include "evsel.h" #include "evlist.h" #include "util.h" @@ -16,11 +19,12 @@ #include "thread_map.h" #include "target.h" #include "../../../include/linux/hw_breakpoint.h" +#include "../../include/linux/perf_event.h" +#include "perf_regs.h" #define FD(e, x, y) (*(int *)xyarray__entry(e->fd, x, y)) -#define GROUP_FD(group_fd, cpu) (*(int *)xyarray__entry(group_fd, cpu, 0)) -int __perf_evsel__sample_size(u64 sample_type) +static int __perf_evsel__sample_size(u64 sample_type) { u64 mask = sample_type & PERF_SAMPLE_MASK; int size = 0; @@ -53,6 +57,7 @@ void perf_evsel__init(struct perf_evsel *evsel, evsel->attr = *attr; INIT_LIST_HEAD(&evsel->node); hists__init(&evsel->hists); + evsel->sample_size = __perf_evsel__sample_size(attr->sample_type); } struct perf_evsel *perf_evsel__new(struct perf_event_attr *attr, int idx) @@ -65,7 +70,80 @@ struct perf_evsel *perf_evsel__new(struct perf_event_attr *attr, int idx) return evsel; } -static const char *perf_evsel__hw_names[PERF_COUNT_HW_MAX] = { +struct event_format *event_format__new(const char *sys, const char *name) +{ + int fd, n; + char *filename; + void *bf = NULL, *nbf; + size_t size = 0, alloc_size = 0; + struct event_format *format = NULL; + + if (asprintf(&filename, "%s/%s/%s/format", tracing_events_path, sys, name) < 0) + goto out; + + fd = open(filename, O_RDONLY); + if (fd < 0) + goto out_free_filename; + + do { + if (size == alloc_size) { + alloc_size += BUFSIZ; + nbf = realloc(bf, alloc_size); + if (nbf == NULL) + goto out_free_bf; + bf = nbf; + } + + n = read(fd, bf + size, BUFSIZ); + if (n < 0) + goto out_free_bf; + size += n; + } while (n > 0); + + pevent_parse_format(&format, bf, size, sys); + +out_free_bf: + free(bf); + close(fd); +out_free_filename: + free(filename); +out: + return format; +} + +struct perf_evsel *perf_evsel__newtp(const char *sys, const char *name, int idx) +{ + struct perf_evsel *evsel = zalloc(sizeof(*evsel)); + + if (evsel != NULL) { + struct perf_event_attr attr = { + .type = PERF_TYPE_TRACEPOINT, + .sample_type = (PERF_SAMPLE_RAW | PERF_SAMPLE_TIME | + PERF_SAMPLE_CPU | PERF_SAMPLE_PERIOD), + }; + + if (asprintf(&evsel->name, "%s:%s", sys, name) < 0) + goto out_free; + + evsel->tp_format = event_format__new(sys, name); + if (evsel->tp_format == NULL) + goto out_free; + + event_attr_init(&attr); + attr.config = evsel->tp_format->id; + attr.sample_period = 1; + perf_evsel__init(evsel, &attr, idx); + } + + return evsel; + +out_free: + free(evsel->name); + free(evsel); + return NULL; +} + +const char *perf_evsel__hw_names[PERF_COUNT_HW_MAX] = { "cycles", "instructions", "cache-references", @@ -128,12 +206,12 @@ static int perf_evsel__hw_name(struct perf_evsel *evsel, char *bf, size_t size) return r + perf_evsel__add_modifiers(evsel, bf + r, size - r); } -static const char *perf_evsel__sw_names[PERF_COUNT_SW_MAX] = { +const char *perf_evsel__sw_names[PERF_COUNT_SW_MAX] = { "cpu-clock", "task-clock", "page-faults", "context-switches", - "CPU-migrations", + "cpu-migrations", "minor-faults", "major-faults", "alignment-faults", @@ -316,7 +394,8 @@ const char *perf_evsel__name(struct perf_evsel *evsel) break; default: - scnprintf(bf, sizeof(bf), "%s", "unknown attr type"); + scnprintf(bf, sizeof(bf), "unknown attr type: %d", + evsel->attr.type); break; } @@ -366,9 +445,18 @@ void perf_evsel__config(struct perf_evsel *evsel, struct perf_record_opts *opts, attr->mmap_data = track; } - if (opts->call_graph) + if (opts->call_graph) { attr->sample_type |= PERF_SAMPLE_CALLCHAIN; + if (opts->call_graph == CALLCHAIN_DWARF) { + attr->sample_type |= PERF_SAMPLE_REGS_USER | + PERF_SAMPLE_STACK_USER; + attr->sample_regs_user = PERF_REGS_MASK; + attr->sample_stack_user = opts->stack_dump_size; + attr->exclude_callchain_user = 1; + } + } + if (perf_target__has_cpu(&opts->target)) attr->sample_type |= PERF_SAMPLE_CPU; @@ -420,6 +508,24 @@ int perf_evsel__alloc_fd(struct perf_evsel *evsel, int ncpus, int nthreads) return evsel->fd != NULL ? 0 : -ENOMEM; } +int perf_evsel__set_filter(struct perf_evsel *evsel, int ncpus, int nthreads, + const char *filter) +{ + int cpu, thread; + + for (cpu = 0; cpu < ncpus; cpu++) { + for (thread = 0; thread < nthreads; thread++) { + int fd = FD(evsel, cpu, thread), + err = ioctl(fd, PERF_EVENT_IOC_SET_FILTER, filter); + + if (err) + return err; + } + } + + return 0; +} + int perf_evsel__alloc_id(struct perf_evsel *evsel, int ncpus, int nthreads) { evsel->sample_id = xyarray__new(ncpus, nthreads, sizeof(struct perf_sample_id)); @@ -480,6 +586,9 @@ void perf_evsel__delete(struct perf_evsel *evsel) { perf_evsel__exit(evsel); close_cgroup(evsel->cgrp); + free(evsel->group_name); + if (evsel->tp_format) + pevent_free_format(evsel->tp_format); free(evsel->name); free(evsel); } @@ -555,9 +664,28 @@ int __perf_evsel__read(struct perf_evsel *evsel, return 0; } +static int get_group_fd(struct perf_evsel *evsel, int cpu, int thread) +{ + struct perf_evsel *leader = evsel->leader; + int fd; + + if (!leader) + return -1; + + /* + * Leader must be already processed/open, + * if not it's a bug. + */ + BUG_ON(!leader->fd); + + fd = FD(leader, cpu, thread); + BUG_ON(fd == -1); + + return fd; +} + static int __perf_evsel__open(struct perf_evsel *evsel, struct cpu_map *cpus, - struct thread_map *threads, bool group, - struct xyarray *group_fds) + struct thread_map *threads) { int cpu, thread; unsigned long flags = 0; @@ -573,13 +701,15 @@ static int __perf_evsel__open(struct perf_evsel *evsel, struct cpu_map *cpus, } for (cpu = 0; cpu < cpus->nr; cpu++) { - int group_fd = group_fds ? GROUP_FD(group_fds, cpu) : -1; for (thread = 0; thread < threads->nr; thread++) { + int group_fd; if (!evsel->cgrp) pid = threads->map[thread]; + group_fd = get_group_fd(evsel, cpu, thread); + FD(evsel, cpu, thread) = sys_perf_event_open(&evsel->attr, pid, cpus->map[cpu], @@ -588,9 +718,6 @@ static int __perf_evsel__open(struct perf_evsel *evsel, struct cpu_map *cpus, err = -errno; goto out_close; } - - if (group && group_fd == -1) - group_fd = FD(evsel, cpu, thread); } } @@ -634,8 +761,7 @@ static struct { }; int perf_evsel__open(struct perf_evsel *evsel, struct cpu_map *cpus, - struct thread_map *threads, bool group, - struct xyarray *group_fd) + struct thread_map *threads) { if (cpus == NULL) { /* Work around old compiler warnings about strict aliasing */ @@ -645,30 +771,28 @@ int perf_evsel__open(struct perf_evsel *evsel, struct cpu_map *cpus, if (threads == NULL) threads = &empty_thread_map.map; - return __perf_evsel__open(evsel, cpus, threads, group, group_fd); + return __perf_evsel__open(evsel, cpus, threads); } int perf_evsel__open_per_cpu(struct perf_evsel *evsel, - struct cpu_map *cpus, bool group, - struct xyarray *group_fd) + struct cpu_map *cpus) { - return __perf_evsel__open(evsel, cpus, &empty_thread_map.map, group, - group_fd); + return __perf_evsel__open(evsel, cpus, &empty_thread_map.map); } int perf_evsel__open_per_thread(struct perf_evsel *evsel, - struct thread_map *threads, bool group, - struct xyarray *group_fd) + struct thread_map *threads) { - return __perf_evsel__open(evsel, &empty_cpu_map.map, threads, group, - group_fd); + return __perf_evsel__open(evsel, &empty_cpu_map.map, threads); } -static int perf_event__parse_id_sample(const union perf_event *event, u64 type, - struct perf_sample *sample, - bool swapped) +static int perf_evsel__parse_id_sample(const struct perf_evsel *evsel, + const union perf_event *event, + struct perf_sample *sample) { + u64 type = evsel->attr.sample_type; const u64 *array = event->sample.array; + bool swapped = evsel->needs_swap; union u64_swap u; array += ((event->header.size - @@ -728,10 +852,12 @@ static bool sample_overlap(const union perf_event *event, return false; } -int perf_event__parse_sample(const union perf_event *event, u64 type, - int sample_size, bool sample_id_all, - struct perf_sample *data, bool swapped) +int perf_evsel__parse_sample(struct perf_evsel *evsel, union perf_event *event, + struct perf_sample *data) { + u64 type = evsel->attr.sample_type; + u64 regs_user = evsel->attr.sample_regs_user; + bool swapped = evsel->needs_swap; const u64 *array; /* @@ -746,14 +872,14 @@ int perf_event__parse_sample(const union perf_event *event, u64 type, data->period = 1; if (event->header.type != PERF_RECORD_SAMPLE) { - if (!sample_id_all) + if (!evsel->attr.sample_id_all) return 0; - return perf_event__parse_id_sample(event, type, data, swapped); + return perf_evsel__parse_id_sample(evsel, event, data); } array = event->sample.array; - if (sample_size + sizeof(event->header) > event->header.size) + if (evsel->sample_size + sizeof(event->header) > event->header.size) return -EFAULT; if (type & PERF_SAMPLE_IP) { @@ -868,6 +994,32 @@ int perf_event__parse_sample(const union perf_event *event, u64 type, sz /= sizeof(u64); array += sz; } + + if (type & PERF_SAMPLE_REGS_USER) { + /* First u64 tells us if we have any regs in sample. */ + u64 avail = *array++; + + if (avail) { + data->user_regs.regs = (u64 *)array; + array += hweight_long(regs_user); + } + } + + if (type & PERF_SAMPLE_STACK_USER) { + u64 size = *array++; + + data->user_stack.offset = ((char *)(array - 1) + - (char *) event); + + if (!size) { + data->user_stack.size = 0; + } else { + data->user_stack.data = (char *)array; + array += size / sizeof(*array); + data->user_stack.size = *array; + } + } + return 0; } @@ -895,7 +1047,7 @@ int perf_event__synthesize_sample(union perf_event *event, u64 type, u.val32[1] = sample->tid; if (swapped) { /* - * Inverse of what is done in perf_event__parse_sample + * Inverse of what is done in perf_evsel__parse_sample */ u.val32[0] = bswap_32(u.val32[0]); u.val32[1] = bswap_32(u.val32[1]); @@ -930,7 +1082,7 @@ int perf_event__synthesize_sample(union perf_event *event, u64 type, u.val32[0] = sample->cpu; if (swapped) { /* - * Inverse of what is done in perf_event__parse_sample + * Inverse of what is done in perf_evsel__parse_sample */ u.val32[0] = bswap_32(u.val32[0]); u.val64 = bswap_64(u.val64); @@ -946,3 +1098,72 @@ int perf_event__synthesize_sample(union perf_event *event, u64 type, return 0; } + +struct format_field *perf_evsel__field(struct perf_evsel *evsel, const char *name) +{ + return pevent_find_field(evsel->tp_format, name); +} + +void *perf_evsel__rawptr(struct perf_evsel *evsel, struct perf_sample *sample, + const char *name) +{ + struct format_field *field = perf_evsel__field(evsel, name); + int offset; + + if (!field) + return NULL; + + offset = field->offset; + + if (field->flags & FIELD_IS_DYNAMIC) { + offset = *(int *)(sample->raw_data + field->offset); + offset &= 0xffff; + } + + return sample->raw_data + offset; +} + +u64 perf_evsel__intval(struct perf_evsel *evsel, struct perf_sample *sample, + const char *name) +{ + struct format_field *field = perf_evsel__field(evsel, name); + void *ptr; + u64 value; + + if (!field) + return 0; + + ptr = sample->raw_data + field->offset; + + switch (field->size) { + case 1: + return *(u8 *)ptr; + case 2: + value = *(u16 *)ptr; + break; + case 4: + value = *(u32 *)ptr; + break; + case 8: + value = *(u64 *)ptr; + break; + default: + return 0; + } + + if (!evsel->needs_swap) + return value; + + switch (field->size) { + case 2: + return bswap_16(value); + case 4: + return bswap_32(value); + case 8: + return bswap_64(value); + default: + return 0; + } + + return 0; +} diff --git a/trunk/tools/perf/util/evsel.h b/trunk/tools/perf/util/evsel.h index 67cc5033d192..3ead0d59c03d 100644 --- a/trunk/tools/perf/util/evsel.h +++ b/trunk/tools/perf/util/evsel.h @@ -53,9 +53,10 @@ struct perf_evsel { u64 *id; struct perf_counts *counts; int idx; - int ids; + u32 ids; struct hists hists; char *name; + struct event_format *tp_format; union { void *priv; off_t id_offset; @@ -65,7 +66,14 @@ struct perf_evsel { void *func; void *data; } handler; + struct cpu_map *cpus; + unsigned int sample_size; bool supported; + bool needs_swap; + /* parse modifier helper */ + int exclude_GH; + struct perf_evsel *leader; + char *group_name; }; struct cpu_map; @@ -74,6 +82,10 @@ struct perf_evlist; struct perf_record_opts; struct perf_evsel *perf_evsel__new(struct perf_event_attr *attr, int idx); +struct perf_evsel *perf_evsel__newtp(const char *sys, const char *name, int idx); + +struct event_format *event_format__new(const char *sys, const char *name); + void perf_evsel__init(struct perf_evsel *evsel, struct perf_event_attr *attr, int idx); void perf_evsel__exit(struct perf_evsel *evsel); @@ -91,8 +103,10 @@ extern const char *perf_evsel__hw_cache[PERF_COUNT_HW_CACHE_MAX] [PERF_EVSEL__MAX_ALIASES]; extern const char *perf_evsel__hw_cache_op[PERF_COUNT_HW_CACHE_OP_MAX] [PERF_EVSEL__MAX_ALIASES]; -const char *perf_evsel__hw_cache_result[PERF_COUNT_HW_CACHE_RESULT_MAX] - [PERF_EVSEL__MAX_ALIASES]; +extern const char *perf_evsel__hw_cache_result[PERF_COUNT_HW_CACHE_RESULT_MAX] + [PERF_EVSEL__MAX_ALIASES]; +extern const char *perf_evsel__hw_names[PERF_COUNT_HW_MAX]; +extern const char *perf_evsel__sw_names[PERF_COUNT_SW_MAX]; int __perf_evsel__hw_cache_type_op_res_name(u8 type, u8 op, u8 result, char *bf, size_t size); const char *perf_evsel__name(struct perf_evsel *evsel); @@ -104,21 +118,46 @@ void perf_evsel__free_fd(struct perf_evsel *evsel); void perf_evsel__free_id(struct perf_evsel *evsel); void perf_evsel__close_fd(struct perf_evsel *evsel, int ncpus, int nthreads); +int perf_evsel__set_filter(struct perf_evsel *evsel, int ncpus, int nthreads, + const char *filter); + int perf_evsel__open_per_cpu(struct perf_evsel *evsel, - struct cpu_map *cpus, bool group, - struct xyarray *group_fds); + struct cpu_map *cpus); int perf_evsel__open_per_thread(struct perf_evsel *evsel, - struct thread_map *threads, bool group, - struct xyarray *group_fds); + struct thread_map *threads); int perf_evsel__open(struct perf_evsel *evsel, struct cpu_map *cpus, - struct thread_map *threads, bool group, - struct xyarray *group_fds); + struct thread_map *threads); void perf_evsel__close(struct perf_evsel *evsel, int ncpus, int nthreads); +struct perf_sample; + +void *perf_evsel__rawptr(struct perf_evsel *evsel, struct perf_sample *sample, + const char *name); +u64 perf_evsel__intval(struct perf_evsel *evsel, struct perf_sample *sample, + const char *name); + +static inline char *perf_evsel__strval(struct perf_evsel *evsel, + struct perf_sample *sample, + const char *name) +{ + return perf_evsel__rawptr(evsel, sample, name); +} + +struct format_field; + +struct format_field *perf_evsel__field(struct perf_evsel *evsel, const char *name); + #define perf_evsel__match(evsel, t, c) \ (evsel->attr.type == PERF_TYPE_##t && \ evsel->attr.config == PERF_COUNT_##c) +static inline bool perf_evsel__match2(struct perf_evsel *e1, + struct perf_evsel *e2) +{ + return (e1->attr.type == e2->attr.type) && + (e1->attr.config == e2->attr.config); +} + int __perf_evsel__read_on_cpu(struct perf_evsel *evsel, int cpu, int thread, bool scale); @@ -177,13 +216,13 @@ static inline int perf_evsel__read_scaled(struct perf_evsel *evsel, return __perf_evsel__read(evsel, ncpus, nthreads, true); } -int __perf_evsel__sample_size(u64 sample_type); +void hists__init(struct hists *hists); -static inline int perf_evsel__sample_size(struct perf_evsel *evsel) +int perf_evsel__parse_sample(struct perf_evsel *evsel, union perf_event *event, + struct perf_sample *sample); + +static inline struct perf_evsel *perf_evsel__next(struct perf_evsel *evsel) { - return __perf_evsel__sample_size(evsel->attr.sample_type); + return list_entry(evsel->node.next, struct perf_evsel, node); } - -void hists__init(struct hists *hists); - #endif /* __PERF_EVSEL_H */ diff --git a/trunk/tools/perf/util/generate-cmdlist.sh b/trunk/tools/perf/util/generate-cmdlist.sh index f06f6fd148f8..389590c1ad21 100755 --- a/trunk/tools/perf/util/generate-cmdlist.sh +++ b/trunk/tools/perf/util/generate-cmdlist.sh @@ -21,4 +21,19 @@ do p }' "Documentation/perf-$cmd.txt" done + +echo "#ifndef NO_LIBELF_SUPPORT" +sed -n -e 's/^perf-\([^ ]*\)[ ].* full.*/\1/p' command-list.txt | +sort | +while read cmd +do + sed -n ' + /^NAME/,/perf-'"$cmd"'/H + ${ + x + s/.*perf-'"$cmd"' - \(.*\)/ {"'"$cmd"'", "\1"},/ + p + }' "Documentation/perf-$cmd.txt" +done +echo "#endif /* NO_LIBELF_SUPPORT */" echo "};" diff --git a/trunk/tools/perf/util/header.c b/trunk/tools/perf/util/header.c index 3a6d20443330..7daad237dea5 100644 --- a/trunk/tools/perf/util/header.c +++ b/trunk/tools/perf/util/header.c @@ -20,11 +20,14 @@ #include "symbol.h" #include "debug.h" #include "cpumap.h" +#include "pmu.h" +#include "vdso.h" +#include "strbuf.h" static bool no_buildid_cache = false; -static int event_count; -static struct perf_trace_event_type *events; +static int trace_event_count; +static struct perf_trace_event_type *trace_events; static u32 header_argc; static const char **header_argv; @@ -36,24 +39,24 @@ int perf_header__push_event(u64 id, const char *name) if (strlen(name) > MAX_EVENT_NAME) pr_warning("Event %s will be truncated\n", name); - nevents = realloc(events, (event_count + 1) * sizeof(*events)); + nevents = realloc(trace_events, (trace_event_count + 1) * sizeof(*trace_events)); if (nevents == NULL) return -ENOMEM; - events = nevents; + trace_events = nevents; - memset(&events[event_count], 0, sizeof(struct perf_trace_event_type)); - events[event_count].event_id = id; - strncpy(events[event_count].name, name, MAX_EVENT_NAME - 1); - event_count++; + memset(&trace_events[trace_event_count], 0, sizeof(struct perf_trace_event_type)); + trace_events[trace_event_count].event_id = id; + strncpy(trace_events[trace_event_count].name, name, MAX_EVENT_NAME - 1); + trace_event_count++; return 0; } char *perf_header__find_event(u64 id) { int i; - for (i = 0 ; i < event_count; i++) { - if (events[i].event_id == id) - return events[i].name; + for (i = 0 ; i < trace_event_count; i++) { + if (trace_events[i].event_id == id) + return trace_events[i].name; } return NULL; } @@ -128,7 +131,7 @@ static int do_write_string(int fd, const char *str) int ret; olen = strlen(str) + 1; - len = ALIGN(olen, NAME_ALIGN); + len = PERF_ALIGN(olen, NAME_ALIGN); /* write len, incl. \0 */ ret = do_write(fd, &len, sizeof(len)); @@ -174,6 +177,15 @@ perf_header__set_cmdline(int argc, const char **argv) { int i; + /* + * If header_argv has already been set, do not override it. + * This allows a command to set the cmdline, parse args and + * then call another builtin function that implements a + * command -- e.g, cmd_kvm calling cmd_record. + */ + if (header_argv) + return 0; + header_argc = (u32)argc; /* do not include NULL termination */ @@ -197,6 +209,29 @@ perf_header__set_cmdline(int argc, const char **argv) continue; \ else +static int write_buildid(char *name, size_t name_len, u8 *build_id, + pid_t pid, u16 misc, int fd) +{ + int err; + struct build_id_event b; + size_t len; + + len = name_len + 1; + len = PERF_ALIGN(len, NAME_ALIGN); + + memset(&b, 0, sizeof(b)); + memcpy(&b.build_id, build_id, BUILD_ID_SIZE); + b.pid = pid; + b.header.misc = misc; + b.header.size = sizeof(b) + len; + + err = do_write(fd, &b, sizeof(b)); + if (err < 0) + return err; + + return write_padded(fd, name, name_len + 1, len); +} + static int __dsos__write_buildid_table(struct list_head *head, pid_t pid, u16 misc, int fd) { @@ -204,24 +239,23 @@ static int __dsos__write_buildid_table(struct list_head *head, pid_t pid, dsos__for_each_with_build_id(pos, head) { int err; - struct build_id_event b; - size_t len; + char *name; + size_t name_len; if (!pos->hit) continue; - len = pos->long_name_len + 1; - len = ALIGN(len, NAME_ALIGN); - memset(&b, 0, sizeof(b)); - memcpy(&b.build_id, pos->build_id, sizeof(pos->build_id)); - b.pid = pid; - b.header.misc = misc; - b.header.size = sizeof(b) + len; - err = do_write(fd, &b, sizeof(b)); - if (err < 0) - return err; - err = write_padded(fd, pos->long_name, - pos->long_name_len + 1, len); - if (err < 0) + + if (is_vdso_map(pos->short_name)) { + name = (char *) VDSO__MAP_NAME; + name_len = sizeof(VDSO__MAP_NAME) + 1; + } else { + name = pos->long_name; + name_len = pos->long_name_len + 1; + } + + err = write_buildid(name, name_len, pos->build_id, + pid, misc, fd); + if (err) return err; } @@ -267,19 +301,20 @@ static int dsos__write_buildid_table(struct perf_header *header, int fd) } int build_id_cache__add_s(const char *sbuild_id, const char *debugdir, - const char *name, bool is_kallsyms) + const char *name, bool is_kallsyms, bool is_vdso) { const size_t size = PATH_MAX; char *realname, *filename = zalloc(size), *linkname = zalloc(size), *targetname; int len, err = -1; + bool slash = is_kallsyms || is_vdso; if (is_kallsyms) { if (symbol_conf.kptr_restrict) { pr_debug("Not caching a kptr_restrict'ed /proc/kallsyms\n"); return 0; } - realname = (char *)name; + realname = (char *) name; } else realname = realpath(name, NULL); @@ -287,7 +322,8 @@ int build_id_cache__add_s(const char *sbuild_id, const char *debugdir, goto out_free; len = scnprintf(filename, size, "%s%s%s", - debugdir, is_kallsyms ? "/" : "", realname); + debugdir, slash ? "/" : "", + is_vdso ? VDSO__MAP_NAME : realname); if (mkdir_p(filename, 0755)) goto out_free; @@ -323,13 +359,14 @@ int build_id_cache__add_s(const char *sbuild_id, const char *debugdir, static int build_id_cache__add_b(const u8 *build_id, size_t build_id_size, const char *name, const char *debugdir, - bool is_kallsyms) + bool is_kallsyms, bool is_vdso) { char sbuild_id[BUILD_ID_SIZE * 2 + 1]; build_id__sprintf(build_id, build_id_size, sbuild_id); - return build_id_cache__add_s(sbuild_id, debugdir, name, is_kallsyms); + return build_id_cache__add_s(sbuild_id, debugdir, name, + is_kallsyms, is_vdso); } int build_id_cache__remove_s(const char *sbuild_id, const char *debugdir) @@ -373,9 +410,11 @@ int build_id_cache__remove_s(const char *sbuild_id, const char *debugdir) static int dso__cache_build_id(struct dso *dso, const char *debugdir) { bool is_kallsyms = dso->kernel && dso->long_name[0] != '/'; + bool is_vdso = is_vdso_map(dso->short_name); return build_id_cache__add_b(dso->build_id, sizeof(dso->build_id), - dso->long_name, debugdir, is_kallsyms); + dso->long_name, debugdir, + is_kallsyms, is_vdso); } static int __dsos__cache_build_ids(struct list_head *head, const char *debugdir) @@ -437,7 +476,7 @@ static bool perf_session__read_build_ids(struct perf_session *session, bool with return ret; } -static int write_tracing_data(int fd, struct perf_header *h __used, +static int write_tracing_data(int fd, struct perf_header *h __maybe_unused, struct perf_evlist *evlist) { return read_tracing_data(fd, &evlist->entries); @@ -445,7 +484,7 @@ static int write_tracing_data(int fd, struct perf_header *h __used, static int write_build_id(int fd, struct perf_header *h, - struct perf_evlist *evlist __used) + struct perf_evlist *evlist __maybe_unused) { struct perf_session *session; int err; @@ -466,8 +505,8 @@ static int write_build_id(int fd, struct perf_header *h, return 0; } -static int write_hostname(int fd, struct perf_header *h __used, - struct perf_evlist *evlist __used) +static int write_hostname(int fd, struct perf_header *h __maybe_unused, + struct perf_evlist *evlist __maybe_unused) { struct utsname uts; int ret; @@ -479,8 +518,8 @@ static int write_hostname(int fd, struct perf_header *h __used, return do_write_string(fd, uts.nodename); } -static int write_osrelease(int fd, struct perf_header *h __used, - struct perf_evlist *evlist __used) +static int write_osrelease(int fd, struct perf_header *h __maybe_unused, + struct perf_evlist *evlist __maybe_unused) { struct utsname uts; int ret; @@ -492,8 +531,8 @@ static int write_osrelease(int fd, struct perf_header *h __used, return do_write_string(fd, uts.release); } -static int write_arch(int fd, struct perf_header *h __used, - struct perf_evlist *evlist __used) +static int write_arch(int fd, struct perf_header *h __maybe_unused, + struct perf_evlist *evlist __maybe_unused) { struct utsname uts; int ret; @@ -505,14 +544,14 @@ static int write_arch(int fd, struct perf_header *h __used, return do_write_string(fd, uts.machine); } -static int write_version(int fd, struct perf_header *h __used, - struct perf_evlist *evlist __used) +static int write_version(int fd, struct perf_header *h __maybe_unused, + struct perf_evlist *evlist __maybe_unused) { return do_write_string(fd, perf_version_string); } -static int write_cpudesc(int fd, struct perf_header *h __used, - struct perf_evlist *evlist __used) +static int write_cpudesc(int fd, struct perf_header *h __maybe_unused, + struct perf_evlist *evlist __maybe_unused) { #ifndef CPUINFO_PROC #define CPUINFO_PROC NULL @@ -570,8 +609,8 @@ static int write_cpudesc(int fd, struct perf_header *h __used, return ret; } -static int write_nrcpus(int fd, struct perf_header *h __used, - struct perf_evlist *evlist __used) +static int write_nrcpus(int fd, struct perf_header *h __maybe_unused, + struct perf_evlist *evlist __maybe_unused) { long nr; u32 nrc, nra; @@ -596,15 +635,14 @@ static int write_nrcpus(int fd, struct perf_header *h __used, return do_write(fd, &nra, sizeof(nra)); } -static int write_event_desc(int fd, struct perf_header *h __used, +static int write_event_desc(int fd, struct perf_header *h __maybe_unused, struct perf_evlist *evlist) { - struct perf_evsel *attr; - u32 nre = 0, nri, sz; + struct perf_evsel *evsel; + u32 nre, nri, sz; int ret; - list_for_each_entry(attr, &evlist->entries, node) - nre++; + nre = evlist->nr_entries; /* * write number of events @@ -616,14 +654,14 @@ static int write_event_desc(int fd, struct perf_header *h __used, /* * size of perf_event_attr struct */ - sz = (u32)sizeof(attr->attr); + sz = (u32)sizeof(evsel->attr); ret = do_write(fd, &sz, sizeof(sz)); if (ret < 0) return ret; - list_for_each_entry(attr, &evlist->entries, node) { + list_for_each_entry(evsel, &evlist->entries, node) { - ret = do_write(fd, &attr->attr, sz); + ret = do_write(fd, &evsel->attr, sz); if (ret < 0) return ret; /* @@ -633,7 +671,7 @@ static int write_event_desc(int fd, struct perf_header *h __used, * copy into an nri to be independent of the * type of ids, */ - nri = attr->ids; + nri = evsel->ids; ret = do_write(fd, &nri, sizeof(nri)); if (ret < 0) return ret; @@ -641,21 +679,21 @@ static int write_event_desc(int fd, struct perf_header *h __used, /* * write event string as passed on cmdline */ - ret = do_write_string(fd, perf_evsel__name(attr)); + ret = do_write_string(fd, perf_evsel__name(evsel)); if (ret < 0) return ret; /* * write unique ids for this event */ - ret = do_write(fd, attr->id, attr->ids * sizeof(u64)); + ret = do_write(fd, evsel->id, evsel->ids * sizeof(u64)); if (ret < 0) return ret; } return 0; } -static int write_cmdline(int fd, struct perf_header *h __used, - struct perf_evlist *evlist __used) +static int write_cmdline(int fd, struct perf_header *h __maybe_unused, + struct perf_evlist *evlist __maybe_unused) { char buf[MAXPATHLEN]; char proc[32]; @@ -823,8 +861,8 @@ static struct cpu_topo *build_cpu_topology(void) return tp; } -static int write_cpu_topology(int fd, struct perf_header *h __used, - struct perf_evlist *evlist __used) +static int write_cpu_topology(int fd, struct perf_header *h __maybe_unused, + struct perf_evlist *evlist __maybe_unused) { struct cpu_topo *tp; u32 i; @@ -859,8 +897,8 @@ static int write_cpu_topology(int fd, struct perf_header *h __used, -static int write_total_mem(int fd, struct perf_header *h __used, - struct perf_evlist *evlist __used) +static int write_total_mem(int fd, struct perf_header *h __maybe_unused, + struct perf_evlist *evlist __maybe_unused) { char *buf = NULL; FILE *fp; @@ -945,8 +983,8 @@ static int write_topo_node(int fd, int node) return ret; } -static int write_numa_topology(int fd, struct perf_header *h __used, - struct perf_evlist *evlist __used) +static int write_numa_topology(int fd, struct perf_header *h __maybe_unused, + struct perf_evlist *evlist __maybe_unused) { char *buf = NULL; size_t len = 0; @@ -994,17 +1032,57 @@ static int write_numa_topology(int fd, struct perf_header *h __used, return ret; } +/* + * File format: + * + * struct pmu_mappings { + * u32 pmu_num; + * struct pmu_map { + * u32 type; + * char name[]; + * }[pmu_num]; + * }; + */ + +static int write_pmu_mappings(int fd, struct perf_header *h __maybe_unused, + struct perf_evlist *evlist __maybe_unused) +{ + struct perf_pmu *pmu = NULL; + off_t offset = lseek(fd, 0, SEEK_CUR); + __u32 pmu_num = 0; + + /* write real pmu_num later */ + do_write(fd, &pmu_num, sizeof(pmu_num)); + + while ((pmu = perf_pmu__scan(pmu))) { + if (!pmu->name) + continue; + pmu_num++; + do_write(fd, &pmu->type, sizeof(pmu->type)); + do_write_string(fd, pmu->name); + } + + if (pwrite(fd, &pmu_num, sizeof(pmu_num), offset) != sizeof(pmu_num)) { + /* discard all */ + lseek(fd, offset, SEEK_SET); + return -1; + } + + return 0; +} + /* * default get_cpuid(): nothing gets recorded * actual implementation must be in arch/$(ARCH)/util/header.c */ -int __attribute__((weak)) get_cpuid(char *buffer __used, size_t sz __used) +int __attribute__ ((weak)) get_cpuid(char *buffer __maybe_unused, + size_t sz __maybe_unused) { return -1; } -static int write_cpuid(int fd, struct perf_header *h __used, - struct perf_evlist *evlist __used) +static int write_cpuid(int fd, struct perf_header *h __maybe_unused, + struct perf_evlist *evlist __maybe_unused) { char buffer[64]; int ret; @@ -1018,133 +1096,113 @@ static int write_cpuid(int fd, struct perf_header *h __used, return do_write_string(fd, buffer); } -static int write_branch_stack(int fd __used, struct perf_header *h __used, - struct perf_evlist *evlist __used) +static int write_branch_stack(int fd __maybe_unused, + struct perf_header *h __maybe_unused, + struct perf_evlist *evlist __maybe_unused) { return 0; } -static void print_hostname(struct perf_header *ph, int fd, FILE *fp) +static void print_hostname(struct perf_header *ph, int fd __maybe_unused, + FILE *fp) { - char *str = do_read_string(fd, ph); - fprintf(fp, "# hostname : %s\n", str); - free(str); + fprintf(fp, "# hostname : %s\n", ph->env.hostname); } -static void print_osrelease(struct perf_header *ph, int fd, FILE *fp) +static void print_osrelease(struct perf_header *ph, int fd __maybe_unused, + FILE *fp) { - char *str = do_read_string(fd, ph); - fprintf(fp, "# os release : %s\n", str); - free(str); + fprintf(fp, "# os release : %s\n", ph->env.os_release); } -static void print_arch(struct perf_header *ph, int fd, FILE *fp) +static void print_arch(struct perf_header *ph, int fd __maybe_unused, FILE *fp) { - char *str = do_read_string(fd, ph); - fprintf(fp, "# arch : %s\n", str); - free(str); + fprintf(fp, "# arch : %s\n", ph->env.arch); } -static void print_cpudesc(struct perf_header *ph, int fd, FILE *fp) +static void print_cpudesc(struct perf_header *ph, int fd __maybe_unused, + FILE *fp) { - char *str = do_read_string(fd, ph); - fprintf(fp, "# cpudesc : %s\n", str); - free(str); + fprintf(fp, "# cpudesc : %s\n", ph->env.cpu_desc); } -static void print_nrcpus(struct perf_header *ph, int fd, FILE *fp) +static void print_nrcpus(struct perf_header *ph, int fd __maybe_unused, + FILE *fp) { - ssize_t ret; - u32 nr; - - ret = read(fd, &nr, sizeof(nr)); - if (ret != (ssize_t)sizeof(nr)) - nr = -1; /* interpreted as error */ - - if (ph->needs_swap) - nr = bswap_32(nr); - - fprintf(fp, "# nrcpus online : %u\n", nr); - - ret = read(fd, &nr, sizeof(nr)); - if (ret != (ssize_t)sizeof(nr)) - nr = -1; /* interpreted as error */ - - if (ph->needs_swap) - nr = bswap_32(nr); - - fprintf(fp, "# nrcpus avail : %u\n", nr); + fprintf(fp, "# nrcpus online : %u\n", ph->env.nr_cpus_online); + fprintf(fp, "# nrcpus avail : %u\n", ph->env.nr_cpus_avail); } -static void print_version(struct perf_header *ph, int fd, FILE *fp) +static void print_version(struct perf_header *ph, int fd __maybe_unused, + FILE *fp) { - char *str = do_read_string(fd, ph); - fprintf(fp, "# perf version : %s\n", str); - free(str); + fprintf(fp, "# perf version : %s\n", ph->env.version); } -static void print_cmdline(struct perf_header *ph, int fd, FILE *fp) +static void print_cmdline(struct perf_header *ph, int fd __maybe_unused, + FILE *fp) { - ssize_t ret; + int nr, i; char *str; - u32 nr, i; - ret = read(fd, &nr, sizeof(nr)); - if (ret != (ssize_t)sizeof(nr)) - return; - - if (ph->needs_swap) - nr = bswap_32(nr); + nr = ph->env.nr_cmdline; + str = ph->env.cmdline; fprintf(fp, "# cmdline : "); for (i = 0; i < nr; i++) { - str = do_read_string(fd, ph); fprintf(fp, "%s ", str); - free(str); + str += strlen(str) + 1; } fputc('\n', fp); } -static void print_cpu_topology(struct perf_header *ph, int fd, FILE *fp) +static void print_cpu_topology(struct perf_header *ph, int fd __maybe_unused, + FILE *fp) { - ssize_t ret; - u32 nr, i; + int nr, i; char *str; - ret = read(fd, &nr, sizeof(nr)); - if (ret != (ssize_t)sizeof(nr)) - return; - - if (ph->needs_swap) - nr = bswap_32(nr); + nr = ph->env.nr_sibling_cores; + str = ph->env.sibling_cores; for (i = 0; i < nr; i++) { - str = do_read_string(fd, ph); fprintf(fp, "# sibling cores : %s\n", str); - free(str); + str += strlen(str) + 1; } - ret = read(fd, &nr, sizeof(nr)); - if (ret != (ssize_t)sizeof(nr)) - return; - - if (ph->needs_swap) - nr = bswap_32(nr); + nr = ph->env.nr_sibling_threads; + str = ph->env.sibling_threads; for (i = 0; i < nr; i++) { - str = do_read_string(fd, ph); fprintf(fp, "# sibling threads : %s\n", str); - free(str); + str += strlen(str) + 1; } } -static void print_event_desc(struct perf_header *ph, int fd, FILE *fp) +static void free_event_desc(struct perf_evsel *events) +{ + struct perf_evsel *evsel; + + if (!events) + return; + + for (evsel = events; evsel->attr.size; evsel++) { + if (evsel->name) + free(evsel->name); + if (evsel->id) + free(evsel->id); + } + + free(events); +} + +static struct perf_evsel * +read_event_desc(struct perf_header *ph, int fd) { - struct perf_event_attr attr; - uint64_t id; + struct perf_evsel *evsel, *events = NULL; + u64 *id; void *buf = NULL; - char *str; u32 nre, sz, nr, i, j; ssize_t ret; size_t msz; @@ -1164,18 +1222,22 @@ static void print_event_desc(struct perf_header *ph, int fd, FILE *fp) if (ph->needs_swap) sz = bswap_32(sz); - memset(&attr, 0, sizeof(attr)); - /* buffer to hold on file attr struct */ buf = malloc(sz); if (!buf) goto error; - msz = sizeof(attr); + /* the last event terminates with evsel->attr.size == 0: */ + events = calloc(nre + 1, sizeof(*events)); + if (!events) + goto error; + + msz = sizeof(evsel->attr); if (sz < msz) msz = sz; - for (i = 0 ; i < nre; i++) { + for (i = 0, evsel = events; i < nre; evsel++, i++) { + evsel->idx = i; /* * must read entire on-file attr struct to @@ -1188,146 +1250,188 @@ static void print_event_desc(struct perf_header *ph, int fd, FILE *fp) if (ph->needs_swap) perf_event__attr_swap(buf); - memcpy(&attr, buf, msz); + memcpy(&evsel->attr, buf, msz); ret = read(fd, &nr, sizeof(nr)); if (ret != (ssize_t)sizeof(nr)) goto error; - if (ph->needs_swap) + if (ph->needs_swap) { nr = bswap_32(nr); + evsel->needs_swap = true; + } - str = do_read_string(fd, ph); - fprintf(fp, "# event : name = %s, ", str); - free(str); + evsel->name = do_read_string(fd, ph); + + if (!nr) + continue; + + id = calloc(nr, sizeof(*id)); + if (!id) + goto error; + evsel->ids = nr; + evsel->id = id; + + for (j = 0 ; j < nr; j++) { + ret = read(fd, id, sizeof(*id)); + if (ret != (ssize_t)sizeof(*id)) + goto error; + if (ph->needs_swap) + *id = bswap_64(*id); + id++; + } + } +out: + if (buf) + free(buf); + return events; +error: + if (events) + free_event_desc(events); + events = NULL; + goto out; +} + +static void print_event_desc(struct perf_header *ph, int fd, FILE *fp) +{ + struct perf_evsel *evsel, *events = read_event_desc(ph, fd); + u32 j; + u64 *id; + + if (!events) { + fprintf(fp, "# event desc: not available or unable to read\n"); + return; + } + + for (evsel = events; evsel->attr.size; evsel++) { + fprintf(fp, "# event : name = %s, ", evsel->name); fprintf(fp, "type = %d, config = 0x%"PRIx64 ", config1 = 0x%"PRIx64", config2 = 0x%"PRIx64, - attr.type, - (u64)attr.config, - (u64)attr.config1, - (u64)attr.config2); + evsel->attr.type, + (u64)evsel->attr.config, + (u64)evsel->attr.config1, + (u64)evsel->attr.config2); fprintf(fp, ", excl_usr = %d, excl_kern = %d", - attr.exclude_user, - attr.exclude_kernel); + evsel->attr.exclude_user, + evsel->attr.exclude_kernel); fprintf(fp, ", excl_host = %d, excl_guest = %d", - attr.exclude_host, - attr.exclude_guest); + evsel->attr.exclude_host, + evsel->attr.exclude_guest); - fprintf(fp, ", precise_ip = %d", attr.precise_ip); + fprintf(fp, ", precise_ip = %d", evsel->attr.precise_ip); - if (nr) + if (evsel->ids) { fprintf(fp, ", id = {"); - - for (j = 0 ; j < nr; j++) { - ret = read(fd, &id, sizeof(id)); - if (ret != (ssize_t)sizeof(id)) - goto error; - - if (ph->needs_swap) - id = bswap_64(id); - - if (j) - fputc(',', fp); - - fprintf(fp, " %"PRIu64, id); - } - if (nr && j == nr) + for (j = 0, id = evsel->id; j < evsel->ids; j++, id++) { + if (j) + fputc(',', fp); + fprintf(fp, " %"PRIu64, *id); + } fprintf(fp, " }"); + } + fputc('\n', fp); } - free(buf); - return; -error: - fprintf(fp, "# event desc: not available or unable to read\n"); + + free_event_desc(events); } -static void print_total_mem(struct perf_header *h __used, int fd, FILE *fp) +static void print_total_mem(struct perf_header *ph, int fd __maybe_unused, + FILE *fp) { - uint64_t mem; - ssize_t ret; - - ret = read(fd, &mem, sizeof(mem)); - if (ret != sizeof(mem)) - goto error; - - if (h->needs_swap) - mem = bswap_64(mem); - - fprintf(fp, "# total memory : %"PRIu64" kB\n", mem); - return; -error: - fprintf(fp, "# total memory : unknown\n"); + fprintf(fp, "# total memory : %Lu kB\n", ph->env.total_mem); } -static void print_numa_topology(struct perf_header *h __used, int fd, FILE *fp) +static void print_numa_topology(struct perf_header *ph, int fd __maybe_unused, + FILE *fp) { - ssize_t ret; u32 nr, c, i; - char *str; + char *str, *tmp; uint64_t mem_total, mem_free; /* nr nodes */ - ret = read(fd, &nr, sizeof(nr)); - if (ret != (ssize_t)sizeof(nr)) - goto error; - - if (h->needs_swap) - nr = bswap_32(nr); + nr = ph->env.nr_numa_nodes; + str = ph->env.numa_nodes; for (i = 0; i < nr; i++) { - /* node number */ - ret = read(fd, &c, sizeof(c)); - if (ret != (ssize_t)sizeof(c)) + c = strtoul(str, &tmp, 0); + if (*tmp != ':') goto error; - if (h->needs_swap) - c = bswap_32(c); - - ret = read(fd, &mem_total, sizeof(u64)); - if (ret != sizeof(u64)) + str = tmp + 1; + mem_total = strtoull(str, &tmp, 0); + if (*tmp != ':') goto error; - ret = read(fd, &mem_free, sizeof(u64)); - if (ret != sizeof(u64)) + str = tmp + 1; + mem_free = strtoull(str, &tmp, 0); + if (*tmp != ':') goto error; - if (h->needs_swap) { - mem_total = bswap_64(mem_total); - mem_free = bswap_64(mem_free); - } - fprintf(fp, "# node%u meminfo : total = %"PRIu64" kB," " free = %"PRIu64" kB\n", - c, - mem_total, - mem_free); + c, mem_total, mem_free); - str = do_read_string(fd, h); + str = tmp + 1; fprintf(fp, "# node%u cpu list : %s\n", c, str); - free(str); } return; error: fprintf(fp, "# numa topology : not available\n"); } -static void print_cpuid(struct perf_header *ph, int fd, FILE *fp) +static void print_cpuid(struct perf_header *ph, int fd __maybe_unused, FILE *fp) { - char *str = do_read_string(fd, ph); - fprintf(fp, "# cpuid : %s\n", str); - free(str); + fprintf(fp, "# cpuid : %s\n", ph->env.cpuid); } -static void print_branch_stack(struct perf_header *ph __used, int fd __used, - FILE *fp) +static void print_branch_stack(struct perf_header *ph __maybe_unused, + int fd __maybe_unused, FILE *fp) { fprintf(fp, "# contains samples with branch stack\n"); } +static void print_pmu_mappings(struct perf_header *ph, int fd __maybe_unused, + FILE *fp) +{ + const char *delimiter = "# pmu mappings: "; + char *str, *tmp; + u32 pmu_num; + u32 type; + + pmu_num = ph->env.nr_pmu_mappings; + if (!pmu_num) { + fprintf(fp, "# pmu mappings: not available\n"); + return; + } + + str = ph->env.pmu_mappings; + + while (pmu_num) { + type = strtoul(str, &tmp, 0); + if (*tmp != ':') + goto error; + + str = tmp + 1; + fprintf(fp, "%s%s = %" PRIu32, delimiter, str, type); + + delimiter = ", "; + str += strlen(str) + 1; + pmu_num--; + } + + fprintf(fp, "\n"); + + if (!pmu_num) + return; +error: + fprintf(fp, "# pmu mappings: unable to read\n"); +} + static int __event_process_build_id(struct build_id_event *bev, char *filename, struct perf_session *session) @@ -1389,7 +1493,7 @@ static int perf_header__read_build_ids_abi_quirk(struct perf_header *header, struct perf_session *session = container_of(header, struct perf_session, header); struct { struct perf_event_header header; - u8 build_id[ALIGN(BUILD_ID_SIZE, sizeof(u64))]; + u8 build_id[PERF_ALIGN(BUILD_ID_SIZE, sizeof(u64))]; char filename[0]; } old_bev; struct build_id_event bev; @@ -1478,28 +1582,375 @@ static int perf_header__read_build_ids(struct perf_header *header, return err; } -static int process_tracing_data(struct perf_file_section *section __unused, - struct perf_header *ph __unused, - int feat __unused, int fd, void *data) +static int process_tracing_data(struct perf_file_section *section __maybe_unused, + struct perf_header *ph __maybe_unused, + int fd, void *data) { trace_report(fd, data, false); return 0; } static int process_build_id(struct perf_file_section *section, - struct perf_header *ph, - int feat __unused, int fd, void *data __used) + struct perf_header *ph, int fd, + void *data __maybe_unused) { if (perf_header__read_build_ids(ph, fd, section->offset, section->size)) pr_debug("Failed to read buildids, continuing...\n"); return 0; } +static int process_hostname(struct perf_file_section *section __maybe_unused, + struct perf_header *ph, int fd, + void *data __maybe_unused) +{ + ph->env.hostname = do_read_string(fd, ph); + return ph->env.hostname ? 0 : -ENOMEM; +} + +static int process_osrelease(struct perf_file_section *section __maybe_unused, + struct perf_header *ph, int fd, + void *data __maybe_unused) +{ + ph->env.os_release = do_read_string(fd, ph); + return ph->env.os_release ? 0 : -ENOMEM; +} + +static int process_version(struct perf_file_section *section __maybe_unused, + struct perf_header *ph, int fd, + void *data __maybe_unused) +{ + ph->env.version = do_read_string(fd, ph); + return ph->env.version ? 0 : -ENOMEM; +} + +static int process_arch(struct perf_file_section *section __maybe_unused, + struct perf_header *ph, int fd, + void *data __maybe_unused) +{ + ph->env.arch = do_read_string(fd, ph); + return ph->env.arch ? 0 : -ENOMEM; +} + +static int process_nrcpus(struct perf_file_section *section __maybe_unused, + struct perf_header *ph, int fd, + void *data __maybe_unused) +{ + size_t ret; + u32 nr; + + ret = read(fd, &nr, sizeof(nr)); + if (ret != sizeof(nr)) + return -1; + + if (ph->needs_swap) + nr = bswap_32(nr); + + ph->env.nr_cpus_online = nr; + + ret = read(fd, &nr, sizeof(nr)); + if (ret != sizeof(nr)) + return -1; + + if (ph->needs_swap) + nr = bswap_32(nr); + + ph->env.nr_cpus_avail = nr; + return 0; +} + +static int process_cpudesc(struct perf_file_section *section __maybe_unused, + struct perf_header *ph, int fd, + void *data __maybe_unused) +{ + ph->env.cpu_desc = do_read_string(fd, ph); + return ph->env.cpu_desc ? 0 : -ENOMEM; +} + +static int process_cpuid(struct perf_file_section *section __maybe_unused, + struct perf_header *ph, int fd, + void *data __maybe_unused) +{ + ph->env.cpuid = do_read_string(fd, ph); + return ph->env.cpuid ? 0 : -ENOMEM; +} + +static int process_total_mem(struct perf_file_section *section __maybe_unused, + struct perf_header *ph, int fd, + void *data __maybe_unused) +{ + uint64_t mem; + size_t ret; + + ret = read(fd, &mem, sizeof(mem)); + if (ret != sizeof(mem)) + return -1; + + if (ph->needs_swap) + mem = bswap_64(mem); + + ph->env.total_mem = mem; + return 0; +} + +static struct perf_evsel * +perf_evlist__find_by_index(struct perf_evlist *evlist, int idx) +{ + struct perf_evsel *evsel; + + list_for_each_entry(evsel, &evlist->entries, node) { + if (evsel->idx == idx) + return evsel; + } + + return NULL; +} + +static void +perf_evlist__set_event_name(struct perf_evlist *evlist, + struct perf_evsel *event) +{ + struct perf_evsel *evsel; + + if (!event->name) + return; + + evsel = perf_evlist__find_by_index(evlist, event->idx); + if (!evsel) + return; + + if (evsel->name) + return; + + evsel->name = strdup(event->name); +} + +static int +process_event_desc(struct perf_file_section *section __maybe_unused, + struct perf_header *header, int fd, + void *data __maybe_unused) +{ + struct perf_session *session; + struct perf_evsel *evsel, *events = read_event_desc(header, fd); + + if (!events) + return 0; + + session = container_of(header, struct perf_session, header); + for (evsel = events; evsel->attr.size; evsel++) + perf_evlist__set_event_name(session->evlist, evsel); + + free_event_desc(events); + + return 0; +} + +static int process_cmdline(struct perf_file_section *section __maybe_unused, + struct perf_header *ph, int fd, + void *data __maybe_unused) +{ + size_t ret; + char *str; + u32 nr, i; + struct strbuf sb; + + ret = read(fd, &nr, sizeof(nr)); + if (ret != sizeof(nr)) + return -1; + + if (ph->needs_swap) + nr = bswap_32(nr); + + ph->env.nr_cmdline = nr; + strbuf_init(&sb, 128); + + for (i = 0; i < nr; i++) { + str = do_read_string(fd, ph); + if (!str) + goto error; + + /* include a NULL character at the end */ + strbuf_add(&sb, str, strlen(str) + 1); + free(str); + } + ph->env.cmdline = strbuf_detach(&sb, NULL); + return 0; + +error: + strbuf_release(&sb); + return -1; +} + +static int process_cpu_topology(struct perf_file_section *section __maybe_unused, + struct perf_header *ph, int fd, + void *data __maybe_unused) +{ + size_t ret; + u32 nr, i; + char *str; + struct strbuf sb; + + ret = read(fd, &nr, sizeof(nr)); + if (ret != sizeof(nr)) + return -1; + + if (ph->needs_swap) + nr = bswap_32(nr); + + ph->env.nr_sibling_cores = nr; + strbuf_init(&sb, 128); + + for (i = 0; i < nr; i++) { + str = do_read_string(fd, ph); + if (!str) + goto error; + + /* include a NULL character at the end */ + strbuf_add(&sb, str, strlen(str) + 1); + free(str); + } + ph->env.sibling_cores = strbuf_detach(&sb, NULL); + + ret = read(fd, &nr, sizeof(nr)); + if (ret != sizeof(nr)) + return -1; + + if (ph->needs_swap) + nr = bswap_32(nr); + + ph->env.nr_sibling_threads = nr; + + for (i = 0; i < nr; i++) { + str = do_read_string(fd, ph); + if (!str) + goto error; + + /* include a NULL character at the end */ + strbuf_add(&sb, str, strlen(str) + 1); + free(str); + } + ph->env.sibling_threads = strbuf_detach(&sb, NULL); + return 0; + +error: + strbuf_release(&sb); + return -1; +} + +static int process_numa_topology(struct perf_file_section *section __maybe_unused, + struct perf_header *ph, int fd, + void *data __maybe_unused) +{ + size_t ret; + u32 nr, node, i; + char *str; + uint64_t mem_total, mem_free; + struct strbuf sb; + + /* nr nodes */ + ret = read(fd, &nr, sizeof(nr)); + if (ret != sizeof(nr)) + goto error; + + if (ph->needs_swap) + nr = bswap_32(nr); + + ph->env.nr_numa_nodes = nr; + strbuf_init(&sb, 256); + + for (i = 0; i < nr; i++) { + /* node number */ + ret = read(fd, &node, sizeof(node)); + if (ret != sizeof(node)) + goto error; + + ret = read(fd, &mem_total, sizeof(u64)); + if (ret != sizeof(u64)) + goto error; + + ret = read(fd, &mem_free, sizeof(u64)); + if (ret != sizeof(u64)) + goto error; + + if (ph->needs_swap) { + node = bswap_32(node); + mem_total = bswap_64(mem_total); + mem_free = bswap_64(mem_free); + } + + strbuf_addf(&sb, "%u:%"PRIu64":%"PRIu64":", + node, mem_total, mem_free); + + str = do_read_string(fd, ph); + if (!str) + goto error; + + /* include a NULL character at the end */ + strbuf_add(&sb, str, strlen(str) + 1); + free(str); + } + ph->env.numa_nodes = strbuf_detach(&sb, NULL); + return 0; + +error: + strbuf_release(&sb); + return -1; +} + +static int process_pmu_mappings(struct perf_file_section *section __maybe_unused, + struct perf_header *ph, int fd, + void *data __maybe_unused) +{ + size_t ret; + char *name; + u32 pmu_num; + u32 type; + struct strbuf sb; + + ret = read(fd, &pmu_num, sizeof(pmu_num)); + if (ret != sizeof(pmu_num)) + return -1; + + if (ph->needs_swap) + pmu_num = bswap_32(pmu_num); + + if (!pmu_num) { + pr_debug("pmu mappings not available\n"); + return 0; + } + + ph->env.nr_pmu_mappings = pmu_num; + strbuf_init(&sb, 128); + + while (pmu_num) { + if (read(fd, &type, sizeof(type)) != sizeof(type)) + goto error; + if (ph->needs_swap) + type = bswap_32(type); + + name = do_read_string(fd, ph); + if (!name) + goto error; + + strbuf_addf(&sb, "%u:%s", type, name); + /* include a NULL character at the end */ + strbuf_add(&sb, "", 1); + + free(name); + pmu_num--; + } + ph->env.pmu_mappings = strbuf_detach(&sb, NULL); + return 0; + +error: + strbuf_release(&sb); + return -1; +} + struct feature_ops { int (*write)(int fd, struct perf_header *h, struct perf_evlist *evlist); void (*print)(struct perf_header *h, int fd, FILE *fp); int (*process)(struct perf_file_section *section, - struct perf_header *h, int feat, int fd, void *data); + struct perf_header *h, int fd, void *data); const char *name; bool full_only; }; @@ -1511,7 +1962,7 @@ struct feature_ops { .process = process_##func } #define FEAT_OPF(n, func) \ [n] = { .name = #n, .write = write_##func, .print = print_##func, \ - .full_only = true } + .process = process_##func, .full_only = true } /* feature_ops not implemented: */ #define print_tracing_data NULL @@ -1520,19 +1971,20 @@ struct feature_ops { static const struct feature_ops feat_ops[HEADER_LAST_FEATURE] = { FEAT_OPP(HEADER_TRACING_DATA, tracing_data), FEAT_OPP(HEADER_BUILD_ID, build_id), - FEAT_OPA(HEADER_HOSTNAME, hostname), - FEAT_OPA(HEADER_OSRELEASE, osrelease), - FEAT_OPA(HEADER_VERSION, version), - FEAT_OPA(HEADER_ARCH, arch), - FEAT_OPA(HEADER_NRCPUS, nrcpus), - FEAT_OPA(HEADER_CPUDESC, cpudesc), - FEAT_OPA(HEADER_CPUID, cpuid), - FEAT_OPA(HEADER_TOTAL_MEM, total_mem), - FEAT_OPA(HEADER_EVENT_DESC, event_desc), - FEAT_OPA(HEADER_CMDLINE, cmdline), + FEAT_OPP(HEADER_HOSTNAME, hostname), + FEAT_OPP(HEADER_OSRELEASE, osrelease), + FEAT_OPP(HEADER_VERSION, version), + FEAT_OPP(HEADER_ARCH, arch), + FEAT_OPP(HEADER_NRCPUS, nrcpus), + FEAT_OPP(HEADER_CPUDESC, cpudesc), + FEAT_OPP(HEADER_CPUID, cpuid), + FEAT_OPP(HEADER_TOTAL_MEM, total_mem), + FEAT_OPP(HEADER_EVENT_DESC, event_desc), + FEAT_OPP(HEADER_CMDLINE, cmdline), FEAT_OPF(HEADER_CPU_TOPOLOGY, cpu_topology), FEAT_OPF(HEADER_NUMA_TOPOLOGY, numa_topology), FEAT_OPA(HEADER_BRANCH_STACK, branch_stack), + FEAT_OPP(HEADER_PMU_MAPPINGS, pmu_mappings), }; struct header_print_data { @@ -1674,17 +2126,17 @@ int perf_session__write_header(struct perf_session *session, struct perf_file_header f_header; struct perf_file_attr f_attr; struct perf_header *header = &session->header; - struct perf_evsel *attr, *pair = NULL; + struct perf_evsel *evsel, *pair = NULL; int err; lseek(fd, sizeof(f_header), SEEK_SET); if (session->evlist != evlist) - pair = list_entry(session->evlist->entries.next, struct perf_evsel, node); + pair = perf_evlist__first(session->evlist); - list_for_each_entry(attr, &evlist->entries, node) { - attr->id_offset = lseek(fd, 0, SEEK_CUR); - err = do_write(fd, attr->id, attr->ids * sizeof(u64)); + list_for_each_entry(evsel, &evlist->entries, node) { + evsel->id_offset = lseek(fd, 0, SEEK_CUR); + err = do_write(fd, evsel->id, evsel->ids * sizeof(u64)); if (err < 0) { out_err_write: pr_debug("failed to write perf header\n"); @@ -1694,19 +2146,19 @@ int perf_session__write_header(struct perf_session *session, err = do_write(fd, pair->id, pair->ids * sizeof(u64)); if (err < 0) goto out_err_write; - attr->ids += pair->ids; - pair = list_entry(pair->node.next, struct perf_evsel, node); + evsel->ids += pair->ids; + pair = perf_evsel__next(pair); } } header->attr_offset = lseek(fd, 0, SEEK_CUR); - list_for_each_entry(attr, &evlist->entries, node) { + list_for_each_entry(evsel, &evlist->entries, node) { f_attr = (struct perf_file_attr){ - .attr = attr->attr, + .attr = evsel->attr, .ids = { - .offset = attr->id_offset, - .size = attr->ids * sizeof(u64), + .offset = evsel->id_offset, + .size = evsel->ids * sizeof(u64), } }; err = do_write(fd, &f_attr, sizeof(f_attr)); @@ -1717,9 +2169,9 @@ int perf_session__write_header(struct perf_session *session, } header->event_offset = lseek(fd, 0, SEEK_CUR); - header->event_size = event_count * sizeof(struct perf_trace_event_type); - if (events) { - err = do_write(fd, events, header->event_size); + header->event_size = trace_event_count * sizeof(struct perf_trace_event_type); + if (trace_events) { + err = do_write(fd, trace_events, header->event_size); if (err < 0) { pr_debug("failed to write perf header events\n"); return err; @@ -1820,6 +2272,8 @@ int perf_header__process_sections(struct perf_header *header, int fd, static const int attr_file_abi_sizes[] = { [0] = PERF_ATTR_SIZE_VER0, [1] = PERF_ATTR_SIZE_VER1, + [2] = PERF_ATTR_SIZE_VER2, + [3] = PERF_ATTR_SIZE_VER3, 0, }; @@ -2010,7 +2464,7 @@ static int perf_file_section__process(struct perf_file_section *section, if (!feat_ops[feat].process) return 0; - return feat_ops[feat].process(section, ph, feat, fd, data); + return feat_ops[feat].process(section, ph, fd, data); } static int perf_file_header__read_pipe(struct perf_pipe_file_header *header, @@ -2099,32 +2553,39 @@ static int read_attr(int fd, struct perf_header *ph, return ret <= 0 ? -1 : 0; } -static int perf_evsel__set_tracepoint_name(struct perf_evsel *evsel, - struct pevent *pevent) +static int perf_evsel__prepare_tracepoint_event(struct perf_evsel *evsel, + struct pevent *pevent) { - struct event_format *event = pevent_find_event(pevent, - evsel->attr.config); + struct event_format *event; char bf[128]; + /* already prepared */ + if (evsel->tp_format) + return 0; + + event = pevent_find_event(pevent, evsel->attr.config); if (event == NULL) return -1; - snprintf(bf, sizeof(bf), "%s:%s", event->system, event->name); - evsel->name = strdup(bf); - if (event->name == NULL) - return -1; + if (!evsel->name) { + snprintf(bf, sizeof(bf), "%s:%s", event->system, event->name); + evsel->name = strdup(bf); + if (evsel->name == NULL) + return -1; + } + evsel->tp_format = event; return 0; } -static int perf_evlist__set_tracepoint_names(struct perf_evlist *evlist, - struct pevent *pevent) +static int perf_evlist__prepare_tracepoint_events(struct perf_evlist *evlist, + struct pevent *pevent) { struct perf_evsel *pos; list_for_each_entry(pos, &evlist->entries, node) { if (pos->attr.type == PERF_TYPE_TRACEPOINT && - perf_evsel__set_tracepoint_name(pos, pevent)) + perf_evsel__prepare_tracepoint_event(pos, pevent)) return -1; } @@ -2167,6 +2628,8 @@ int perf_session__read_header(struct perf_session *session, int fd) if (evsel == NULL) goto out_delete_evlist; + + evsel->needs_swap = header->needs_swap; /* * Do it before so that if perf_evsel__alloc_id fails, this * entry gets purged too at perf_evlist__delete(). @@ -2198,13 +2661,13 @@ int perf_session__read_header(struct perf_session *session, int fd) if (f_header.event_types.size) { lseek(fd, f_header.event_types.offset, SEEK_SET); - events = malloc(f_header.event_types.size); - if (events == NULL) + trace_events = malloc(f_header.event_types.size); + if (trace_events == NULL) return -ENOMEM; - if (perf_header__getbuffer64(header, fd, events, + if (perf_header__getbuffer64(header, fd, trace_events, f_header.event_types.size)) goto out_errno; - event_count = f_header.event_types.size / sizeof(struct perf_trace_event_type); + trace_event_count = f_header.event_types.size / sizeof(struct perf_trace_event_type); } perf_header__process_sections(header, fd, &session->pevent, @@ -2212,7 +2675,8 @@ int perf_session__read_header(struct perf_session *session, int fd) lseek(fd, header->data_offset, SEEK_SET); - if (perf_evlist__set_tracepoint_names(session->evlist, session->pevent)) + if (perf_evlist__prepare_tracepoint_events(session->evlist, + session->pevent)) goto out_delete_evlist; header->frozen = 1; @@ -2227,7 +2691,7 @@ int perf_session__read_header(struct perf_session *session, int fd) } int perf_event__synthesize_attr(struct perf_tool *tool, - struct perf_event_attr *attr, u16 ids, u64 *id, + struct perf_event_attr *attr, u32 ids, u64 *id, perf_event__handler_t process) { union perf_event *ev; @@ -2235,7 +2699,7 @@ int perf_event__synthesize_attr(struct perf_tool *tool, int err; size = sizeof(struct perf_event_attr); - size = ALIGN(size, sizeof(u64)); + size = PERF_ALIGN(size, sizeof(u64)); size += sizeof(struct perf_event_header); size += ids * sizeof(u64); @@ -2248,9 +2712,12 @@ int perf_event__synthesize_attr(struct perf_tool *tool, memcpy(ev->attr.id, id, ids * sizeof(u64)); ev->attr.header.type = PERF_RECORD_HEADER_ATTR; - ev->attr.header.size = size; + ev->attr.header.size = (u16)size; - err = process(tool, ev, NULL, NULL); + if (ev->attr.header.size == size) + err = process(tool, ev, NULL, NULL); + else + err = -E2BIG; free(ev); @@ -2261,12 +2728,12 @@ int perf_event__synthesize_attrs(struct perf_tool *tool, struct perf_session *session, perf_event__handler_t process) { - struct perf_evsel *attr; + struct perf_evsel *evsel; int err = 0; - list_for_each_entry(attr, &session->evlist->entries, node) { - err = perf_event__synthesize_attr(tool, &attr->attr, attr->ids, - attr->id, process); + list_for_each_entry(evsel, &session->evlist->entries, node) { + err = perf_event__synthesize_attr(tool, &evsel->attr, evsel->ids, + evsel->id, process); if (err) { pr_debug("failed to create perf header attribute\n"); return err; @@ -2279,7 +2746,7 @@ int perf_event__synthesize_attrs(struct perf_tool *tool, int perf_event__process_attr(union perf_event *event, struct perf_evlist **pevlist) { - unsigned int i, ids, n_ids; + u32 i, ids, n_ids; struct perf_evsel *evsel; struct perf_evlist *evlist = *pevlist; @@ -2330,7 +2797,7 @@ int perf_event__synthesize_event_type(struct perf_tool *tool, ev.event_type.header.type = PERF_RECORD_HEADER_EVENT_TYPE; size = strlen(ev.event_type.event_type.name); - size = ALIGN(size, sizeof(u64)); + size = PERF_ALIGN(size, sizeof(u64)); ev.event_type.header.size = sizeof(ev.event_type) - (sizeof(ev.event_type.event_type.name) - size); @@ -2346,8 +2813,8 @@ int perf_event__synthesize_event_types(struct perf_tool *tool, struct perf_trace_event_type *type; int i, err = 0; - for (i = 0; i < event_count; i++) { - type = &events[i]; + for (i = 0; i < trace_event_count; i++) { + type = &trace_events[i]; err = perf_event__synthesize_event_type(tool, type->event_id, type->name, process, @@ -2361,7 +2828,7 @@ int perf_event__synthesize_event_types(struct perf_tool *tool, return err; } -int perf_event__process_event_type(struct perf_tool *tool __unused, +int perf_event__process_event_type(struct perf_tool *tool __maybe_unused, union perf_event *event) { if (perf_header__push_event(event->event_type.event_type.event_id, @@ -2378,7 +2845,7 @@ int perf_event__synthesize_tracing_data(struct perf_tool *tool, int fd, union perf_event ev; struct tracing_data *tdata; ssize_t size = 0, aligned_size = 0, padding; - int err __used = 0; + int err __maybe_unused = 0; /* * We are going to store the size of the data followed @@ -2399,7 +2866,7 @@ int perf_event__synthesize_tracing_data(struct perf_tool *tool, int fd, ev.tracing_data.header.type = PERF_RECORD_HEADER_TRACING_DATA; size = tdata->size; - aligned_size = ALIGN(size, sizeof(u64)); + aligned_size = PERF_ALIGN(size, sizeof(u64)); padding = aligned_size - size; ev.tracing_data.header.size = sizeof(ev.tracing_data); ev.tracing_data.size = aligned_size; @@ -2430,7 +2897,7 @@ int perf_event__process_tracing_data(union perf_event *event, size_read = trace_report(session->fd, &session->pevent, session->repipe); - padding = ALIGN(size_read, sizeof(u64)) - size_read; + padding = PERF_ALIGN(size_read, sizeof(u64)) - size_read; if (read(session->fd, buf, padding) < 0) die("reading input file"); @@ -2443,6 +2910,9 @@ int perf_event__process_tracing_data(union perf_event *event, if (size_read + padding != size) die("tracing data size mismatch"); + perf_evlist__prepare_tracepoint_events(session->evlist, + session->pevent); + return size_read + padding; } @@ -2461,7 +2931,7 @@ int perf_event__synthesize_build_id(struct perf_tool *tool, memset(&ev, 0, sizeof(ev)); len = pos->long_name_len + 1; - len = ALIGN(len, NAME_ALIGN); + len = PERF_ALIGN(len, NAME_ALIGN); memcpy(&ev.build_id.build_id, pos->build_id, sizeof(pos->build_id)); ev.build_id.header.type = PERF_RECORD_HEADER_BUILD_ID; ev.build_id.header.misc = misc; @@ -2474,7 +2944,7 @@ int perf_event__synthesize_build_id(struct perf_tool *tool, return err; } -int perf_event__process_build_id(struct perf_tool *tool __used, +int perf_event__process_build_id(struct perf_tool *tool __maybe_unused, union perf_event *event, struct perf_session *session) { diff --git a/trunk/tools/perf/util/header.h b/trunk/tools/perf/util/header.h index 2d42b3e1826f..99bdd3abce59 100644 --- a/trunk/tools/perf/util/header.h +++ b/trunk/tools/perf/util/header.h @@ -28,6 +28,7 @@ enum { HEADER_CPU_TOPOLOGY, HEADER_NUMA_TOPOLOGY, HEADER_BRANCH_STACK, + HEADER_PMU_MAPPINGS, HEADER_LAST_FEATURE, HEADER_FEAT_BITS = 256, }; @@ -57,6 +58,29 @@ struct perf_header; int perf_file_header__read(struct perf_file_header *header, struct perf_header *ph, int fd); +struct perf_session_env { + char *hostname; + char *os_release; + char *version; + char *arch; + int nr_cpus_online; + int nr_cpus_avail; + char *cpu_desc; + char *cpuid; + unsigned long long total_mem; + + int nr_cmdline; + char *cmdline; + int nr_sibling_cores; + char *sibling_cores; + int nr_sibling_threads; + char *sibling_threads; + int nr_numa_nodes; + char *numa_nodes; + int nr_pmu_mappings; + char *pmu_mappings; +}; + struct perf_header { int frozen; bool needs_swap; @@ -66,6 +90,7 @@ struct perf_header { u64 event_offset; u64 event_size; DECLARE_BITMAP(adds_features, HEADER_FEAT_BITS); + struct perf_session_env env; }; struct perf_evlist; @@ -95,11 +120,11 @@ int perf_header__process_sections(struct perf_header *header, int fd, int perf_header__fprintf_info(struct perf_session *s, FILE *fp, bool full); int build_id_cache__add_s(const char *sbuild_id, const char *debugdir, - const char *name, bool is_kallsyms); + const char *name, bool is_kallsyms, bool is_vdso); int build_id_cache__remove_s(const char *sbuild_id, const char *debugdir); int perf_event__synthesize_attr(struct perf_tool *tool, - struct perf_event_attr *attr, u16 ids, u64 *id, + struct perf_event_attr *attr, u32 ids, u64 *id, perf_event__handler_t process); int perf_event__synthesize_attrs(struct perf_tool *tool, struct perf_session *session, diff --git a/trunk/tools/perf/util/help.c b/trunk/tools/perf/util/help.c index 6f2975a00358..8b1f6e891b8a 100644 --- a/trunk/tools/perf/util/help.c +++ b/trunk/tools/perf/util/help.c @@ -3,6 +3,7 @@ #include "exec_cmd.h" #include "levenshtein.h" #include "help.h" +#include void add_cmdname(struct cmdnames *cmds, const char *name, size_t len) { @@ -331,7 +332,8 @@ const char *help_unknown_cmd(const char *cmd) exit(1); } -int cmd_version(int argc __used, const char **argv __used, const char *prefix __used) +int cmd_version(int argc __maybe_unused, const char **argv __maybe_unused, + const char *prefix __maybe_unused) { printf("perf version %s\n", perf_version_string); return 0; diff --git a/trunk/tools/perf/util/hist.c b/trunk/tools/perf/util/hist.c index f247ef2789a4..236bc9d98ff2 100644 --- a/trunk/tools/perf/util/hist.c +++ b/trunk/tools/perf/util/hist.c @@ -45,7 +45,7 @@ bool hists__new_col_len(struct hists *hists, enum hist_column col, u16 len) return false; } -static void hists__reset_col_len(struct hists *hists) +void hists__reset_col_len(struct hists *hists) { enum hist_column col; @@ -63,7 +63,7 @@ static void hists__set_unres_dso_col_len(struct hists *hists, int dso) hists__set_col_len(hists, dso, unresolved_col_width); } -static void hists__calc_col_len(struct hists *hists, struct hist_entry *h) +void hists__calc_col_len(struct hists *hists, struct hist_entry *h) { const unsigned int unresolved_col_width = BITS_PER_LONG / 4; u16 len; @@ -114,6 +114,22 @@ static void hists__calc_col_len(struct hists *hists, struct hist_entry *h) } } +void hists__output_recalc_col_len(struct hists *hists, int max_rows) +{ + struct rb_node *next = rb_first(&hists->entries); + struct hist_entry *n; + int row = 0; + + hists__reset_col_len(hists); + + while (next && row++ < max_rows) { + n = rb_entry(next, struct hist_entry, rb_node); + if (!n->filtered) + hists__calc_col_len(hists, n); + next = rb_next(&n->rb_node); + } +} + static void hist_entry__add_cpumode_period(struct hist_entry *he, unsigned int cpumode, u64 period) { @@ -378,7 +394,7 @@ void hist_entry__free(struct hist_entry *he) * collapse the histogram */ -static bool hists__collapse_insert_entry(struct hists *hists __used, +static bool hists__collapse_insert_entry(struct hists *hists __maybe_unused, struct rb_root *root, struct hist_entry *he) { @@ -394,8 +410,13 @@ static bool hists__collapse_insert_entry(struct hists *hists __used, cmp = hist_entry__collapse(iter, he); if (!cmp) { - iter->period += he->period; - iter->nr_events += he->nr_events; + iter->period += he->period; + iter->period_sys += he->period_sys; + iter->period_us += he->period_us; + iter->period_guest_sys += he->period_guest_sys; + iter->period_guest_us += he->period_guest_us; + iter->nr_events += he->nr_events; + if (symbol_conf.use_callchain) { callchain_cursor_reset(&callchain_cursor); callchain_merge(&callchain_cursor, @@ -547,674 +568,6 @@ void hists__output_resort_threaded(struct hists *hists) return __hists__output_resort(hists, true); } -static size_t callchain__fprintf_left_margin(FILE *fp, int left_margin) -{ - int i; - int ret = fprintf(fp, " "); - - for (i = 0; i < left_margin; i++) - ret += fprintf(fp, " "); - - return ret; -} - -static size_t ipchain__fprintf_graph_line(FILE *fp, int depth, int depth_mask, - int left_margin) -{ - int i; - size_t ret = callchain__fprintf_left_margin(fp, left_margin); - - for (i = 0; i < depth; i++) - if (depth_mask & (1 << i)) - ret += fprintf(fp, "| "); - else - ret += fprintf(fp, " "); - - ret += fprintf(fp, "\n"); - - return ret; -} - -static size_t ipchain__fprintf_graph(FILE *fp, struct callchain_list *chain, - int depth, int depth_mask, int period, - u64 total_samples, u64 hits, - int left_margin) -{ - int i; - size_t ret = 0; - - ret += callchain__fprintf_left_margin(fp, left_margin); - for (i = 0; i < depth; i++) { - if (depth_mask & (1 << i)) - ret += fprintf(fp, "|"); - else - ret += fprintf(fp, " "); - if (!period && i == depth - 1) { - double percent; - - percent = hits * 100.0 / total_samples; - ret += percent_color_fprintf(fp, "--%2.2f%%-- ", percent); - } else - ret += fprintf(fp, "%s", " "); - } - if (chain->ms.sym) - ret += fprintf(fp, "%s\n", chain->ms.sym->name); - else - ret += fprintf(fp, "0x%0" PRIx64 "\n", chain->ip); - - return ret; -} - -static struct symbol *rem_sq_bracket; -static struct callchain_list rem_hits; - -static void init_rem_hits(void) -{ - rem_sq_bracket = malloc(sizeof(*rem_sq_bracket) + 6); - if (!rem_sq_bracket) { - fprintf(stderr, "Not enough memory to display remaining hits\n"); - return; - } - - strcpy(rem_sq_bracket->name, "[...]"); - rem_hits.ms.sym = rem_sq_bracket; -} - -static size_t __callchain__fprintf_graph(FILE *fp, struct rb_root *root, - u64 total_samples, int depth, - int depth_mask, int left_margin) -{ - struct rb_node *node, *next; - struct callchain_node *child; - struct callchain_list *chain; - int new_depth_mask = depth_mask; - u64 remaining; - size_t ret = 0; - int i; - uint entries_printed = 0; - - remaining = total_samples; - - node = rb_first(root); - while (node) { - u64 new_total; - u64 cumul; - - child = rb_entry(node, struct callchain_node, rb_node); - cumul = callchain_cumul_hits(child); - remaining -= cumul; - - /* - * The depth mask manages the output of pipes that show - * the depth. We don't want to keep the pipes of the current - * level for the last child of this depth. - * Except if we have remaining filtered hits. They will - * supersede the last child - */ - next = rb_next(node); - if (!next && (callchain_param.mode != CHAIN_GRAPH_REL || !remaining)) - new_depth_mask &= ~(1 << (depth - 1)); - - /* - * But we keep the older depth mask for the line separator - * to keep the level link until we reach the last child - */ - ret += ipchain__fprintf_graph_line(fp, depth, depth_mask, - left_margin); - i = 0; - list_for_each_entry(chain, &child->val, list) { - ret += ipchain__fprintf_graph(fp, chain, depth, - new_depth_mask, i++, - total_samples, - cumul, - left_margin); - } - - if (callchain_param.mode == CHAIN_GRAPH_REL) - new_total = child->children_hit; - else - new_total = total_samples; - - ret += __callchain__fprintf_graph(fp, &child->rb_root, new_total, - depth + 1, - new_depth_mask | (1 << depth), - left_margin); - node = next; - if (++entries_printed == callchain_param.print_limit) - break; - } - - if (callchain_param.mode == CHAIN_GRAPH_REL && - remaining && remaining != total_samples) { - - if (!rem_sq_bracket) - return ret; - - new_depth_mask &= ~(1 << (depth - 1)); - ret += ipchain__fprintf_graph(fp, &rem_hits, depth, - new_depth_mask, 0, total_samples, - remaining, left_margin); - } - - return ret; -} - -static size_t callchain__fprintf_graph(FILE *fp, struct rb_root *root, - u64 total_samples, int left_margin) -{ - struct callchain_node *cnode; - struct callchain_list *chain; - u32 entries_printed = 0; - bool printed = false; - struct rb_node *node; - int i = 0; - int ret = 0; - - /* - * If have one single callchain root, don't bother printing - * its percentage (100 % in fractal mode and the same percentage - * than the hist in graph mode). This also avoid one level of column. - */ - node = rb_first(root); - if (node && !rb_next(node)) { - cnode = rb_entry(node, struct callchain_node, rb_node); - list_for_each_entry(chain, &cnode->val, list) { - /* - * If we sort by symbol, the first entry is the same than - * the symbol. No need to print it otherwise it appears as - * displayed twice. - */ - if (!i++ && sort__first_dimension == SORT_SYM) - continue; - if (!printed) { - ret += callchain__fprintf_left_margin(fp, left_margin); - ret += fprintf(fp, "|\n"); - ret += callchain__fprintf_left_margin(fp, left_margin); - ret += fprintf(fp, "---"); - left_margin += 3; - printed = true; - } else - ret += callchain__fprintf_left_margin(fp, left_margin); - - if (chain->ms.sym) - ret += fprintf(fp, " %s\n", chain->ms.sym->name); - else - ret += fprintf(fp, " %p\n", (void *)(long)chain->ip); - - if (++entries_printed == callchain_param.print_limit) - break; - } - root = &cnode->rb_root; - } - - ret += __callchain__fprintf_graph(fp, root, total_samples, - 1, 1, left_margin); - ret += fprintf(fp, "\n"); - - return ret; -} - -static size_t __callchain__fprintf_flat(FILE *fp, - struct callchain_node *self, - u64 total_samples) -{ - struct callchain_list *chain; - size_t ret = 0; - - if (!self) - return 0; - - ret += __callchain__fprintf_flat(fp, self->parent, total_samples); - - - list_for_each_entry(chain, &self->val, list) { - if (chain->ip >= PERF_CONTEXT_MAX) - continue; - if (chain->ms.sym) - ret += fprintf(fp, " %s\n", chain->ms.sym->name); - else - ret += fprintf(fp, " %p\n", - (void *)(long)chain->ip); - } - - return ret; -} - -static size_t callchain__fprintf_flat(FILE *fp, struct rb_root *self, - u64 total_samples) -{ - size_t ret = 0; - u32 entries_printed = 0; - struct rb_node *rb_node; - struct callchain_node *chain; - - rb_node = rb_first(self); - while (rb_node) { - double percent; - - chain = rb_entry(rb_node, struct callchain_node, rb_node); - percent = chain->hit * 100.0 / total_samples; - - ret = percent_color_fprintf(fp, " %6.2f%%\n", percent); - ret += __callchain__fprintf_flat(fp, chain, total_samples); - ret += fprintf(fp, "\n"); - if (++entries_printed == callchain_param.print_limit) - break; - - rb_node = rb_next(rb_node); - } - - return ret; -} - -static size_t hist_entry_callchain__fprintf(struct hist_entry *he, - u64 total_samples, int left_margin, - FILE *fp) -{ - switch (callchain_param.mode) { - case CHAIN_GRAPH_REL: - return callchain__fprintf_graph(fp, &he->sorted_chain, he->period, - left_margin); - break; - case CHAIN_GRAPH_ABS: - return callchain__fprintf_graph(fp, &he->sorted_chain, total_samples, - left_margin); - break; - case CHAIN_FLAT: - return callchain__fprintf_flat(fp, &he->sorted_chain, total_samples); - break; - case CHAIN_NONE: - break; - default: - pr_err("Bad callchain mode\n"); - } - - return 0; -} - -void hists__output_recalc_col_len(struct hists *hists, int max_rows) -{ - struct rb_node *next = rb_first(&hists->entries); - struct hist_entry *n; - int row = 0; - - hists__reset_col_len(hists); - - while (next && row++ < max_rows) { - n = rb_entry(next, struct hist_entry, rb_node); - if (!n->filtered) - hists__calc_col_len(hists, n); - next = rb_next(&n->rb_node); - } -} - -static int hist_entry__pcnt_snprintf(struct hist_entry *he, char *s, - size_t size, struct hists *pair_hists, - bool show_displacement, long displacement, - bool color, u64 total_period) -{ - u64 period, total, period_sys, period_us, period_guest_sys, period_guest_us; - u64 nr_events; - const char *sep = symbol_conf.field_sep; - int ret; - - if (symbol_conf.exclude_other && !he->parent) - return 0; - - if (pair_hists) { - period = he->pair ? he->pair->period : 0; - nr_events = he->pair ? he->pair->nr_events : 0; - total = pair_hists->stats.total_period; - period_sys = he->pair ? he->pair->period_sys : 0; - period_us = he->pair ? he->pair->period_us : 0; - period_guest_sys = he->pair ? he->pair->period_guest_sys : 0; - period_guest_us = he->pair ? he->pair->period_guest_us : 0; - } else { - period = he->period; - nr_events = he->nr_events; - total = total_period; - period_sys = he->period_sys; - period_us = he->period_us; - period_guest_sys = he->period_guest_sys; - period_guest_us = he->period_guest_us; - } - - if (total) { - if (color) - ret = percent_color_snprintf(s, size, - sep ? "%.2f" : " %6.2f%%", - (period * 100.0) / total); - else - ret = scnprintf(s, size, sep ? "%.2f" : " %6.2f%%", - (period * 100.0) / total); - if (symbol_conf.show_cpu_utilization) { - ret += percent_color_snprintf(s + ret, size - ret, - sep ? "%.2f" : " %6.2f%%", - (period_sys * 100.0) / total); - ret += percent_color_snprintf(s + ret, size - ret, - sep ? "%.2f" : " %6.2f%%", - (period_us * 100.0) / total); - if (perf_guest) { - ret += percent_color_snprintf(s + ret, - size - ret, - sep ? "%.2f" : " %6.2f%%", - (period_guest_sys * 100.0) / - total); - ret += percent_color_snprintf(s + ret, - size - ret, - sep ? "%.2f" : " %6.2f%%", - (period_guest_us * 100.0) / - total); - } - } - } else - ret = scnprintf(s, size, sep ? "%" PRIu64 : "%12" PRIu64 " ", period); - - if (symbol_conf.show_nr_samples) { - if (sep) - ret += scnprintf(s + ret, size - ret, "%c%" PRIu64, *sep, nr_events); - else - ret += scnprintf(s + ret, size - ret, "%11" PRIu64, nr_events); - } - - if (symbol_conf.show_total_period) { - if (sep) - ret += scnprintf(s + ret, size - ret, "%c%" PRIu64, *sep, period); - else - ret += scnprintf(s + ret, size - ret, " %12" PRIu64, period); - } - - if (pair_hists) { - char bf[32]; - double old_percent = 0, new_percent = 0, diff; - - if (total > 0) - old_percent = (period * 100.0) / total; - if (total_period > 0) - new_percent = (he->period * 100.0) / total_period; - - diff = new_percent - old_percent; - - if (fabs(diff) >= 0.01) - scnprintf(bf, sizeof(bf), "%+4.2F%%", diff); - else - scnprintf(bf, sizeof(bf), " "); - - if (sep) - ret += scnprintf(s + ret, size - ret, "%c%s", *sep, bf); - else - ret += scnprintf(s + ret, size - ret, "%11.11s", bf); - - if (show_displacement) { - if (displacement) - scnprintf(bf, sizeof(bf), "%+4ld", displacement); - else - scnprintf(bf, sizeof(bf), " "); - - if (sep) - ret += scnprintf(s + ret, size - ret, "%c%s", *sep, bf); - else - ret += scnprintf(s + ret, size - ret, "%6.6s", bf); - } - } - - return ret; -} - -int hist_entry__snprintf(struct hist_entry *he, char *s, size_t size, - struct hists *hists) -{ - const char *sep = symbol_conf.field_sep; - struct sort_entry *se; - int ret = 0; - - list_for_each_entry(se, &hist_entry__sort_list, list) { - if (se->elide) - continue; - - ret += scnprintf(s + ret, size - ret, "%s", sep ?: " "); - ret += se->se_snprintf(he, s + ret, size - ret, - hists__col_len(hists, se->se_width_idx)); - } - - return ret; -} - -static int hist_entry__fprintf(struct hist_entry *he, size_t size, - struct hists *hists, struct hists *pair_hists, - bool show_displacement, long displacement, - u64 total_period, FILE *fp) -{ - char bf[512]; - int ret; - - if (size == 0 || size > sizeof(bf)) - size = sizeof(bf); - - ret = hist_entry__pcnt_snprintf(he, bf, size, pair_hists, - show_displacement, displacement, - true, total_period); - hist_entry__snprintf(he, bf + ret, size - ret, hists); - return fprintf(fp, "%s\n", bf); -} - -static size_t hist_entry__fprintf_callchain(struct hist_entry *he, - struct hists *hists, - u64 total_period, FILE *fp) -{ - int left_margin = 0; - - if (sort__first_dimension == SORT_COMM) { - struct sort_entry *se = list_first_entry(&hist_entry__sort_list, - typeof(*se), list); - left_margin = hists__col_len(hists, se->se_width_idx); - left_margin -= thread__comm_len(he->thread); - } - - return hist_entry_callchain__fprintf(he, total_period, left_margin, fp); -} - -size_t hists__fprintf(struct hists *hists, struct hists *pair, - bool show_displacement, bool show_header, int max_rows, - int max_cols, FILE *fp) -{ - struct sort_entry *se; - struct rb_node *nd; - size_t ret = 0; - u64 total_period; - unsigned long position = 1; - long displacement = 0; - unsigned int width; - const char *sep = symbol_conf.field_sep; - const char *col_width = symbol_conf.col_width_list_str; - int nr_rows = 0; - - init_rem_hits(); - - if (!show_header) - goto print_entries; - - fprintf(fp, "# %s", pair ? "Baseline" : "Overhead"); - - if (symbol_conf.show_cpu_utilization) { - if (sep) { - ret += fprintf(fp, "%csys", *sep); - ret += fprintf(fp, "%cus", *sep); - if (perf_guest) { - ret += fprintf(fp, "%cguest sys", *sep); - ret += fprintf(fp, "%cguest us", *sep); - } - } else { - ret += fprintf(fp, " sys "); - ret += fprintf(fp, " us "); - if (perf_guest) { - ret += fprintf(fp, " guest sys "); - ret += fprintf(fp, " guest us "); - } - } - } - - if (symbol_conf.show_nr_samples) { - if (sep) - fprintf(fp, "%cSamples", *sep); - else - fputs(" Samples ", fp); - } - - if (symbol_conf.show_total_period) { - if (sep) - ret += fprintf(fp, "%cPeriod", *sep); - else - ret += fprintf(fp, " Period "); - } - - if (pair) { - if (sep) - ret += fprintf(fp, "%cDelta", *sep); - else - ret += fprintf(fp, " Delta "); - - if (show_displacement) { - if (sep) - ret += fprintf(fp, "%cDisplacement", *sep); - else - ret += fprintf(fp, " Displ"); - } - } - - list_for_each_entry(se, &hist_entry__sort_list, list) { - if (se->elide) - continue; - if (sep) { - fprintf(fp, "%c%s", *sep, se->se_header); - continue; - } - width = strlen(se->se_header); - if (symbol_conf.col_width_list_str) { - if (col_width) { - hists__set_col_len(hists, se->se_width_idx, - atoi(col_width)); - col_width = strchr(col_width, ','); - if (col_width) - ++col_width; - } - } - if (!hists__new_col_len(hists, se->se_width_idx, width)) - width = hists__col_len(hists, se->se_width_idx); - fprintf(fp, " %*s", width, se->se_header); - } - - fprintf(fp, "\n"); - if (max_rows && ++nr_rows >= max_rows) - goto out; - - if (sep) - goto print_entries; - - fprintf(fp, "# ........"); - if (symbol_conf.show_cpu_utilization) - fprintf(fp, " ....... ......."); - if (symbol_conf.show_nr_samples) - fprintf(fp, " .........."); - if (symbol_conf.show_total_period) - fprintf(fp, " ............"); - if (pair) { - fprintf(fp, " .........."); - if (show_displacement) - fprintf(fp, " ....."); - } - list_for_each_entry(se, &hist_entry__sort_list, list) { - unsigned int i; - - if (se->elide) - continue; - - fprintf(fp, " "); - width = hists__col_len(hists, se->se_width_idx); - if (width == 0) - width = strlen(se->se_header); - for (i = 0; i < width; i++) - fprintf(fp, "."); - } - - fprintf(fp, "\n"); - if (max_rows && ++nr_rows >= max_rows) - goto out; - - fprintf(fp, "#\n"); - if (max_rows && ++nr_rows >= max_rows) - goto out; - -print_entries: - total_period = hists->stats.total_period; - - for (nd = rb_first(&hists->entries); nd; nd = rb_next(nd)) { - struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node); - - if (h->filtered) - continue; - - if (show_displacement) { - if (h->pair != NULL) - displacement = ((long)h->pair->position - - (long)position); - else - displacement = 0; - ++position; - } - ret += hist_entry__fprintf(h, max_cols, hists, pair, show_displacement, - displacement, total_period, fp); - - if (symbol_conf.use_callchain) - ret += hist_entry__fprintf_callchain(h, hists, total_period, fp); - if (max_rows && ++nr_rows >= max_rows) - goto out; - - if (h->ms.map == NULL && verbose > 1) { - __map_groups__fprintf_maps(&h->thread->mg, - MAP__FUNCTION, verbose, fp); - fprintf(fp, "%.10s end\n", graph_dotted_line); - } - } -out: - free(rem_sq_bracket); - - return ret; -} - -/* - * See hists__fprintf to match the column widths - */ -unsigned int hists__sort_list_width(struct hists *hists) -{ - struct sort_entry *se; - int ret = 9; /* total % */ - - if (symbol_conf.show_cpu_utilization) { - ret += 7; /* count_sys % */ - ret += 6; /* count_us % */ - if (perf_guest) { - ret += 13; /* count_guest_sys % */ - ret += 12; /* count_guest_us % */ - } - } - - if (symbol_conf.show_nr_samples) - ret += 11; - - if (symbol_conf.show_total_period) - ret += 13; - - list_for_each_entry(se, &hist_entry__sort_list, list) - if (!se->elide) - ret += 2 + hists__col_len(hists, se->se_width_idx); - - if (verbose) /* Addr + origin */ - ret += 3 + BITS_PER_LONG / 4; - - return ret; -} - static void hists__remove_entry_filter(struct hists *hists, struct hist_entry *h, enum hist_filter filter) { @@ -1342,25 +695,3 @@ void hists__inc_nr_events(struct hists *hists, u32 type) ++hists->stats.nr_events[0]; ++hists->stats.nr_events[type]; } - -size_t hists__fprintf_nr_events(struct hists *hists, FILE *fp) -{ - int i; - size_t ret = 0; - - for (i = 0; i < PERF_RECORD_HEADER_MAX; ++i) { - const char *name; - - if (hists->stats.nr_events[i] == 0) - continue; - - name = perf_event__name(i); - if (!strcmp(name, "UNKNOWN")) - continue; - - ret += fprintf(fp, "%16s events: %10d\n", name, - hists->stats.nr_events[i]); - } - - return ret; -} diff --git a/trunk/tools/perf/util/hist.h b/trunk/tools/perf/util/hist.h index 0b096c27a419..f011ad4756e8 100644 --- a/trunk/tools/perf/util/hist.h +++ b/trunk/tools/perf/util/hist.h @@ -75,8 +75,8 @@ struct hist_entry *__hists__add_entry(struct hists *self, struct symbol *parent, u64 period); int64_t hist_entry__cmp(struct hist_entry *left, struct hist_entry *right); int64_t hist_entry__collapse(struct hist_entry *left, struct hist_entry *right); -int hist_entry__snprintf(struct hist_entry *self, char *bf, size_t size, - struct hists *hists); +int hist_entry__sort_snprintf(struct hist_entry *self, char *bf, size_t size, + struct hists *hists); void hist_entry__free(struct hist_entry *); struct hist_entry *__hists__add_branch_entry(struct hists *self, @@ -112,25 +112,66 @@ void hists__filter_by_symbol(struct hists *hists); u16 hists__col_len(struct hists *self, enum hist_column col); void hists__set_col_len(struct hists *self, enum hist_column col, u16 len); bool hists__new_col_len(struct hists *self, enum hist_column col, u16 len); +void hists__reset_col_len(struct hists *hists); +void hists__calc_col_len(struct hists *hists, struct hist_entry *he); + +struct perf_hpp { + char *buf; + size_t size; + u64 total_period; + const char *sep; + long displacement; + void *ptr; +}; + +struct perf_hpp_fmt { + bool cond; + int (*header)(struct perf_hpp *hpp); + int (*width)(struct perf_hpp *hpp); + int (*color)(struct perf_hpp *hpp, struct hist_entry *he); + int (*entry)(struct perf_hpp *hpp, struct hist_entry *he); +}; + +extern struct perf_hpp_fmt perf_hpp__format[]; + +enum { + PERF_HPP__OVERHEAD, + PERF_HPP__OVERHEAD_SYS, + PERF_HPP__OVERHEAD_US, + PERF_HPP__OVERHEAD_GUEST_SYS, + PERF_HPP__OVERHEAD_GUEST_US, + PERF_HPP__SAMPLES, + PERF_HPP__PERIOD, + PERF_HPP__DELTA, + PERF_HPP__DISPL, + + PERF_HPP__MAX_INDEX +}; + +void perf_hpp__init(bool need_pair, bool show_displacement); +int hist_entry__period_snprintf(struct perf_hpp *hpp, struct hist_entry *he, + bool color); struct perf_evlist; #ifdef NO_NEWT_SUPPORT static inline -int perf_evlist__tui_browse_hists(struct perf_evlist *evlist __used, - const char *help __used, - void(*timer)(void *arg) __used, - void *arg __used, - int refresh __used) +int perf_evlist__tui_browse_hists(struct perf_evlist *evlist __maybe_unused, + const char *help __maybe_unused, + void(*timer)(void *arg) __maybe_unused, + void *arg __maybe_unused, + int refresh __maybe_unused) { return 0; } -static inline int hist_entry__tui_annotate(struct hist_entry *self __used, - int evidx __used, - void(*timer)(void *arg) __used, - void *arg __used, - int delay_secs __used) +static inline int hist_entry__tui_annotate(struct hist_entry *self + __maybe_unused, + int evidx __maybe_unused, + void(*timer)(void *arg) + __maybe_unused, + void *arg __maybe_unused, + int delay_secs __maybe_unused) { return 0; } @@ -148,11 +189,11 @@ int perf_evlist__tui_browse_hists(struct perf_evlist *evlist, const char *help, #ifdef NO_GTK2_SUPPORT static inline -int perf_evlist__gtk_browse_hists(struct perf_evlist *evlist __used, - const char *help __used, - void(*timer)(void *arg) __used, - void *arg __used, - int refresh __used) +int perf_evlist__gtk_browse_hists(struct perf_evlist *evlist __maybe_unused, + const char *help __maybe_unused, + void(*timer)(void *arg) __maybe_unused, + void *arg __maybe_unused, + int refresh __maybe_unused) { return 0; } diff --git a/trunk/tools/perf/util/include/linux/bitops.h b/trunk/tools/perf/util/include/linux/bitops.h index 587a230d2075..a55d8cf083c9 100644 --- a/trunk/tools/perf/util/include/linux/bitops.h +++ b/trunk/tools/perf/util/include/linux/bitops.h @@ -5,6 +5,10 @@ #include #include +#ifndef __WORDSIZE +#define __WORDSIZE (__SIZEOF_LONG__ * 8) +#endif + #define BITS_PER_LONG __WORDSIZE #define BITS_PER_BYTE 8 #define BITS_TO_LONGS(nr) DIV_ROUND_UP(nr, BITS_PER_BYTE * sizeof(long)) diff --git a/trunk/tools/perf/util/include/linux/compiler.h b/trunk/tools/perf/util/include/linux/compiler.h index 547628e97f3d..96b919dae11c 100644 --- a/trunk/tools/perf/util/include/linux/compiler.h +++ b/trunk/tools/perf/util/include/linux/compiler.h @@ -9,6 +9,13 @@ #define __attribute_const__ #endif -#define __used __attribute__((__unused__)) +#ifndef __maybe_unused +#define __maybe_unused __attribute__((unused)) +#endif +#define __packed __attribute__((__packed__)) + +#ifndef __force +#define __force +#endif #endif diff --git a/trunk/tools/perf/util/include/linux/kernel.h b/trunk/tools/perf/util/include/linux/kernel.h index b6842c1d02a8..d8c927c868ee 100644 --- a/trunk/tools/perf/util/include/linux/kernel.h +++ b/trunk/tools/perf/util/include/linux/kernel.h @@ -8,8 +8,8 @@ #define DIV_ROUND_UP(n,d) (((n) + (d) - 1) / (d)) -#define ALIGN(x,a) __ALIGN_MASK(x,(typeof(x))(a)-1) -#define __ALIGN_MASK(x,mask) (((x)+(mask))&~(mask)) +#define PERF_ALIGN(x, a) __PERF_ALIGN_MASK(x, (typeof(x))(a)-1) +#define __PERF_ALIGN_MASK(x, mask) (((x)+(mask))&~(mask)) #ifndef offsetof #define offsetof(TYPE, MEMBER) ((size_t) &((TYPE *)0)->MEMBER) @@ -46,9 +46,22 @@ _min1 < _min2 ? _min1 : _min2; }) #endif +#ifndef roundup +#define roundup(x, y) ( \ +{ \ + const typeof(y) __y = y; \ + (((x) + (__y - 1)) / __y) * __y; \ +} \ +) +#endif + #ifndef BUG_ON +#ifdef NDEBUG +#define BUG_ON(cond) do { if (cond) {} } while (0) +#else #define BUG_ON(cond) assert(!(cond)) #endif +#endif /* * Both need more care to handle endianness diff --git a/trunk/tools/perf/util/include/linux/magic.h b/trunk/tools/perf/util/include/linux/magic.h new file mode 100644 index 000000000000..58b64ed4da12 --- /dev/null +++ b/trunk/tools/perf/util/include/linux/magic.h @@ -0,0 +1,12 @@ +#ifndef _PERF_LINUX_MAGIC_H_ +#define _PERF_LINUX_MAGIC_H_ + +#ifndef DEBUGFS_MAGIC +#define DEBUGFS_MAGIC 0x64626720 +#endif + +#ifndef SYSFS_MAGIC +#define SYSFS_MAGIC 0x62656572 +#endif + +#endif diff --git a/trunk/tools/perf/util/include/linux/rbtree.h b/trunk/tools/perf/util/include/linux/rbtree.h index 7a243a143037..2a030c5af3aa 100644 --- a/trunk/tools/perf/util/include/linux/rbtree.h +++ b/trunk/tools/perf/util/include/linux/rbtree.h @@ -1 +1,2 @@ +#include #include "../../../../include/linux/rbtree.h" diff --git a/trunk/tools/perf/util/include/linux/string.h b/trunk/tools/perf/util/include/linux/string.h index 3b2f5900276f..6f19c548ecc0 100644 --- a/trunk/tools/perf/util/include/linux/string.h +++ b/trunk/tools/perf/util/include/linux/string.h @@ -1 +1,3 @@ #include + +void *memdup(const void *src, size_t len); diff --git a/trunk/tools/perf/util/include/linux/types.h b/trunk/tools/perf/util/include/linux/types.h index 12de3b8112f9..eb464786c084 100644 --- a/trunk/tools/perf/util/include/linux/types.h +++ b/trunk/tools/perf/util/include/linux/types.h @@ -3,6 +3,14 @@ #include +#ifndef __bitwise +#define __bitwise +#endif + +#ifndef __le32 +typedef __u32 __bitwise __le32; +#endif + #define DECLARE_BITMAP(name,bits) \ unsigned long name[BITS_TO_LONGS(bits)] diff --git a/trunk/tools/perf/util/intlist.c b/trunk/tools/perf/util/intlist.c new file mode 100644 index 000000000000..9d0740024ba8 --- /dev/null +++ b/trunk/tools/perf/util/intlist.c @@ -0,0 +1,101 @@ +/* + * Based on intlist.c by: + * (c) 2009 Arnaldo Carvalho de Melo + * + * Licensed under the GPLv2. + */ + +#include +#include +#include + +#include "intlist.h" + +static struct rb_node *intlist__node_new(struct rblist *rblist __maybe_unused, + const void *entry) +{ + int i = (int)((long)entry); + struct rb_node *rc = NULL; + struct int_node *node = malloc(sizeof(*node)); + + if (node != NULL) { + node->i = i; + rc = &node->rb_node; + } + + return rc; +} + +static void int_node__delete(struct int_node *ilist) +{ + free(ilist); +} + +static void intlist__node_delete(struct rblist *rblist __maybe_unused, + struct rb_node *rb_node) +{ + struct int_node *node = container_of(rb_node, struct int_node, rb_node); + + int_node__delete(node); +} + +static int intlist__node_cmp(struct rb_node *rb_node, const void *entry) +{ + int i = (int)((long)entry); + struct int_node *node = container_of(rb_node, struct int_node, rb_node); + + return node->i - i; +} + +int intlist__add(struct intlist *ilist, int i) +{ + return rblist__add_node(&ilist->rblist, (void *)((long)i)); +} + +void intlist__remove(struct intlist *ilist, struct int_node *node) +{ + rblist__remove_node(&ilist->rblist, &node->rb_node); +} + +struct int_node *intlist__find(struct intlist *ilist, int i) +{ + struct int_node *node = NULL; + struct rb_node *rb_node = rblist__find(&ilist->rblist, (void *)((long)i)); + + if (rb_node) + node = container_of(rb_node, struct int_node, rb_node); + + return node; +} + +struct intlist *intlist__new(void) +{ + struct intlist *ilist = malloc(sizeof(*ilist)); + + if (ilist != NULL) { + rblist__init(&ilist->rblist); + ilist->rblist.node_cmp = intlist__node_cmp; + ilist->rblist.node_new = intlist__node_new; + ilist->rblist.node_delete = intlist__node_delete; + } + + return ilist; +} + +void intlist__delete(struct intlist *ilist) +{ + if (ilist != NULL) + rblist__delete(&ilist->rblist); +} + +struct int_node *intlist__entry(const struct intlist *ilist, unsigned int idx) +{ + struct int_node *node = NULL; + struct rb_node *rb_node; + + rb_node = rblist__entry(&ilist->rblist, idx); + if (rb_node) + node = container_of(rb_node, struct int_node, rb_node); + + return node; +} diff --git a/trunk/tools/perf/util/intlist.h b/trunk/tools/perf/util/intlist.h new file mode 100644 index 000000000000..6d63ab90db50 --- /dev/null +++ b/trunk/tools/perf/util/intlist.h @@ -0,0 +1,75 @@ +#ifndef __PERF_INTLIST_H +#define __PERF_INTLIST_H + +#include +#include + +#include "rblist.h" + +struct int_node { + struct rb_node rb_node; + int i; +}; + +struct intlist { + struct rblist rblist; +}; + +struct intlist *intlist__new(void); +void intlist__delete(struct intlist *ilist); + +void intlist__remove(struct intlist *ilist, struct int_node *in); +int intlist__add(struct intlist *ilist, int i); + +struct int_node *intlist__entry(const struct intlist *ilist, unsigned int idx); +struct int_node *intlist__find(struct intlist *ilist, int i); + +static inline bool intlist__has_entry(struct intlist *ilist, int i) +{ + return intlist__find(ilist, i) != NULL; +} + +static inline bool intlist__empty(const struct intlist *ilist) +{ + return rblist__empty(&ilist->rblist); +} + +static inline unsigned int intlist__nr_entries(const struct intlist *ilist) +{ + return rblist__nr_entries(&ilist->rblist); +} + +/* For intlist iteration */ +static inline struct int_node *intlist__first(struct intlist *ilist) +{ + struct rb_node *rn = rb_first(&ilist->rblist.entries); + return rn ? rb_entry(rn, struct int_node, rb_node) : NULL; +} +static inline struct int_node *intlist__next(struct int_node *in) +{ + struct rb_node *rn; + if (!in) + return NULL; + rn = rb_next(&in->rb_node); + return rn ? rb_entry(rn, struct int_node, rb_node) : NULL; +} + +/** + * intlist_for_each - iterate over a intlist + * @pos: the &struct int_node to use as a loop cursor. + * @ilist: the &struct intlist for loop. + */ +#define intlist__for_each(pos, ilist) \ + for (pos = intlist__first(ilist); pos; pos = intlist__next(pos)) + +/** + * intlist_for_each_safe - iterate over a intlist safe against removal of + * int_node + * @pos: the &struct int_node to use as a loop cursor. + * @n: another &struct int_node to use as temporary storage. + * @ilist: the &struct intlist for loop. + */ +#define intlist__for_each_safe(pos, n, ilist) \ + for (pos = intlist__first(ilist), n = intlist__next(pos); pos;\ + pos = n, n = intlist__next(n)) +#endif /* __PERF_INTLIST_H */ diff --git a/trunk/tools/perf/util/map.c b/trunk/tools/perf/util/map.c index cc33486ad9e2..ead5316b3f89 100644 --- a/trunk/tools/perf/util/map.c +++ b/trunk/tools/perf/util/map.c @@ -9,6 +9,7 @@ #include "map.h" #include "thread.h" #include "strlist.h" +#include "vdso.h" const char *map_type__name[MAP__NR_TYPES] = { [MAP__FUNCTION] = "Functions", @@ -23,7 +24,6 @@ static inline int is_anon_memory(const char *filename) static inline int is_no_dso_memory(const char *filename) { return !strcmp(filename, "[stack]") || - !strcmp(filename, "[vdso]") || !strcmp(filename, "[heap]"); } @@ -52,9 +52,10 @@ struct map *map__new(struct list_head *dsos__list, u64 start, u64 len, if (self != NULL) { char newfilename[PATH_MAX]; struct dso *dso; - int anon, no_dso; + int anon, no_dso, vdso; anon = is_anon_memory(filename); + vdso = is_vdso_map(filename); no_dso = is_no_dso_memory(filename); if (anon) { @@ -62,7 +63,12 @@ struct map *map__new(struct list_head *dsos__list, u64 start, u64 len, filename = newfilename; } - dso = __dsos__findnew(dsos__list, filename); + if (vdso) { + pgoff = 0; + dso = vdso__dso_findnew(dsos__list); + } else + dso = __dsos__findnew(dsos__list, filename); + if (dso == NULL) goto out_delete; @@ -86,6 +92,25 @@ struct map *map__new(struct list_head *dsos__list, u64 start, u64 len, return NULL; } +/* + * Constructor variant for modules (where we know from /proc/modules where + * they are loaded) and for vmlinux, where only after we load all the + * symbols we'll know where it starts and ends. + */ +struct map *map__new2(u64 start, struct dso *dso, enum map_type type) +{ + struct map *map = calloc(1, (sizeof(*map) + + (dso->kernel ? sizeof(struct kmap) : 0))); + if (map != NULL) { + /* + * ->end will be filled after we load all the symbols + */ + map__init(map, type, start, 0, 0, dso); + } + + return map; +} + void map__delete(struct map *self) { free(self); @@ -137,6 +162,7 @@ int map__load(struct map *self, symbol_filter_t filter) pr_warning(", continuing without symbols\n"); return -1; } else if (nr == 0) { +#ifndef NO_LIBELF_SUPPORT const size_t len = strlen(name); const size_t real_len = len - sizeof(DSO__DELETED); @@ -149,7 +175,7 @@ int map__load(struct map *self, symbol_filter_t filter) pr_warning("no symbols found in %s, maybe install " "a debug package?\n", name); } - +#endif return -1; } /* @@ -217,15 +243,14 @@ size_t map__fprintf(struct map *self, FILE *fp) size_t map__fprintf_dsoname(struct map *map, FILE *fp) { - const char *dsoname; + const char *dsoname = "[unknown]"; if (map && map->dso && (map->dso->name || map->dso->long_name)) { if (symbol_conf.show_kernel_path && map->dso->long_name) dsoname = map->dso->long_name; else if (map->dso->name) dsoname = map->dso->name; - } else - dsoname = "[unknown]"; + } return fprintf(fp, "%s", dsoname); } @@ -242,14 +267,6 @@ u64 map__rip_2objdump(struct map *map, u64 rip) return addr; } -u64 map__objdump_2ip(struct map *map, u64 addr) -{ - u64 ip = map->dso->adjust_symbols ? - addr : - map->unmap_ip(map, addr); /* RIP -> IP */ - return ip; -} - void map_groups__init(struct map_groups *mg) { int i; diff --git a/trunk/tools/perf/util/map.h b/trunk/tools/perf/util/map.h index 03a1e9b08b21..d2250fc97e25 100644 --- a/trunk/tools/perf/util/map.h +++ b/trunk/tools/perf/util/map.h @@ -96,7 +96,7 @@ static inline u64 map__unmap_ip(struct map *map, u64 ip) return ip + map->start - map->pgoff; } -static inline u64 identity__map_ip(struct map *map __used, u64 ip) +static inline u64 identity__map_ip(struct map *map __maybe_unused, u64 ip) { return ip; } @@ -104,7 +104,6 @@ static inline u64 identity__map_ip(struct map *map __used, u64 ip) /* rip/ip <-> addr suitable for passing to `objdump --start-address=` */ u64 map__rip_2objdump(struct map *map, u64 rip); -u64 map__objdump_2ip(struct map *map, u64 addr); struct symbol; @@ -115,6 +114,7 @@ void map__init(struct map *self, enum map_type type, struct map *map__new(struct list_head *dsos__list, u64 start, u64 len, u64 pgoff, u32 pid, char *filename, enum map_type type); +struct map *map__new2(u64 start, struct dso *dso, enum map_type type); void map__delete(struct map *self); struct map *map__clone(struct map *self); int map__overlap(struct map *l, struct map *r); @@ -157,9 +157,12 @@ int machine__init(struct machine *self, const char *root_dir, pid_t pid); void machine__exit(struct machine *self); void machine__delete(struct machine *self); +struct perf_evsel; +struct perf_sample; int machine__resolve_callchain(struct machine *machine, + struct perf_evsel *evsel, struct thread *thread, - struct ip_callchain *chain, + struct perf_sample *sample, struct symbol **parent); int maps__set_kallsyms_ref_reloc_sym(struct map **maps, const char *symbol_name, u64 addr); diff --git a/trunk/tools/perf/util/parse-events-test.c b/trunk/tools/perf/util/parse-events-test.c index 1b997d2b89ce..28c18d1d52c3 100644 --- a/trunk/tools/perf/util/parse-events-test.c +++ b/trunk/tools/perf/util/parse-events-test.c @@ -13,16 +13,17 @@ do { \ } \ } while (0) +#define PERF_TP_SAMPLE_TYPE (PERF_SAMPLE_RAW | PERF_SAMPLE_TIME | \ + PERF_SAMPLE_CPU | PERF_SAMPLE_PERIOD) + static int test__checkevent_tracepoint(struct perf_evlist *evlist) { - struct perf_evsel *evsel = list_entry(evlist->entries.next, - struct perf_evsel, node); + struct perf_evsel *evsel = perf_evlist__first(evlist); TEST_ASSERT_VAL("wrong number of entries", 1 == evlist->nr_entries); TEST_ASSERT_VAL("wrong type", PERF_TYPE_TRACEPOINT == evsel->attr.type); TEST_ASSERT_VAL("wrong sample_type", - (PERF_SAMPLE_RAW | PERF_SAMPLE_TIME | PERF_SAMPLE_CPU) == - evsel->attr.sample_type); + PERF_TP_SAMPLE_TYPE == evsel->attr.sample_type); TEST_ASSERT_VAL("wrong sample_period", 1 == evsel->attr.sample_period); return 0; } @@ -37,8 +38,7 @@ static int test__checkevent_tracepoint_multi(struct perf_evlist *evlist) TEST_ASSERT_VAL("wrong type", PERF_TYPE_TRACEPOINT == evsel->attr.type); TEST_ASSERT_VAL("wrong sample_type", - (PERF_SAMPLE_RAW | PERF_SAMPLE_TIME | PERF_SAMPLE_CPU) - == evsel->attr.sample_type); + PERF_TP_SAMPLE_TYPE == evsel->attr.sample_type); TEST_ASSERT_VAL("wrong sample_period", 1 == evsel->attr.sample_period); } @@ -47,8 +47,7 @@ static int test__checkevent_tracepoint_multi(struct perf_evlist *evlist) static int test__checkevent_raw(struct perf_evlist *evlist) { - struct perf_evsel *evsel = list_entry(evlist->entries.next, - struct perf_evsel, node); + struct perf_evsel *evsel = perf_evlist__first(evlist); TEST_ASSERT_VAL("wrong number of entries", 1 == evlist->nr_entries); TEST_ASSERT_VAL("wrong type", PERF_TYPE_RAW == evsel->attr.type); @@ -58,8 +57,7 @@ static int test__checkevent_raw(struct perf_evlist *evlist) static int test__checkevent_numeric(struct perf_evlist *evlist) { - struct perf_evsel *evsel = list_entry(evlist->entries.next, - struct perf_evsel, node); + struct perf_evsel *evsel = perf_evlist__first(evlist); TEST_ASSERT_VAL("wrong number of entries", 1 == evlist->nr_entries); TEST_ASSERT_VAL("wrong type", 1 == evsel->attr.type); @@ -69,8 +67,7 @@ static int test__checkevent_numeric(struct perf_evlist *evlist) static int test__checkevent_symbolic_name(struct perf_evlist *evlist) { - struct perf_evsel *evsel = list_entry(evlist->entries.next, - struct perf_evsel, node); + struct perf_evsel *evsel = perf_evlist__first(evlist); TEST_ASSERT_VAL("wrong number of entries", 1 == evlist->nr_entries); TEST_ASSERT_VAL("wrong type", PERF_TYPE_HARDWARE == evsel->attr.type); @@ -81,8 +78,7 @@ static int test__checkevent_symbolic_name(struct perf_evlist *evlist) static int test__checkevent_symbolic_name_config(struct perf_evlist *evlist) { - struct perf_evsel *evsel = list_entry(evlist->entries.next, - struct perf_evsel, node); + struct perf_evsel *evsel = perf_evlist__first(evlist); TEST_ASSERT_VAL("wrong number of entries", 1 == evlist->nr_entries); TEST_ASSERT_VAL("wrong type", PERF_TYPE_HARDWARE == evsel->attr.type); @@ -99,8 +95,7 @@ static int test__checkevent_symbolic_name_config(struct perf_evlist *evlist) static int test__checkevent_symbolic_alias(struct perf_evlist *evlist) { - struct perf_evsel *evsel = list_entry(evlist->entries.next, - struct perf_evsel, node); + struct perf_evsel *evsel = perf_evlist__first(evlist); TEST_ASSERT_VAL("wrong number of entries", 1 == evlist->nr_entries); TEST_ASSERT_VAL("wrong type", PERF_TYPE_SOFTWARE == evsel->attr.type); @@ -111,8 +106,7 @@ static int test__checkevent_symbolic_alias(struct perf_evlist *evlist) static int test__checkevent_genhw(struct perf_evlist *evlist) { - struct perf_evsel *evsel = list_entry(evlist->entries.next, - struct perf_evsel, node); + struct perf_evsel *evsel = perf_evlist__first(evlist); TEST_ASSERT_VAL("wrong number of entries", 1 == evlist->nr_entries); TEST_ASSERT_VAL("wrong type", PERF_TYPE_HW_CACHE == evsel->attr.type); @@ -122,8 +116,7 @@ static int test__checkevent_genhw(struct perf_evlist *evlist) static int test__checkevent_breakpoint(struct perf_evlist *evlist) { - struct perf_evsel *evsel = list_entry(evlist->entries.next, - struct perf_evsel, node); + struct perf_evsel *evsel = perf_evlist__first(evlist); TEST_ASSERT_VAL("wrong number of entries", 1 == evlist->nr_entries); TEST_ASSERT_VAL("wrong type", PERF_TYPE_BREAKPOINT == evsel->attr.type); @@ -137,8 +130,7 @@ static int test__checkevent_breakpoint(struct perf_evlist *evlist) static int test__checkevent_breakpoint_x(struct perf_evlist *evlist) { - struct perf_evsel *evsel = list_entry(evlist->entries.next, - struct perf_evsel, node); + struct perf_evsel *evsel = perf_evlist__first(evlist); TEST_ASSERT_VAL("wrong number of entries", 1 == evlist->nr_entries); TEST_ASSERT_VAL("wrong type", PERF_TYPE_BREAKPOINT == evsel->attr.type); @@ -151,8 +143,7 @@ static int test__checkevent_breakpoint_x(struct perf_evlist *evlist) static int test__checkevent_breakpoint_r(struct perf_evlist *evlist) { - struct perf_evsel *evsel = list_entry(evlist->entries.next, - struct perf_evsel, node); + struct perf_evsel *evsel = perf_evlist__first(evlist); TEST_ASSERT_VAL("wrong number of entries", 1 == evlist->nr_entries); TEST_ASSERT_VAL("wrong type", @@ -167,8 +158,7 @@ static int test__checkevent_breakpoint_r(struct perf_evlist *evlist) static int test__checkevent_breakpoint_w(struct perf_evlist *evlist) { - struct perf_evsel *evsel = list_entry(evlist->entries.next, - struct perf_evsel, node); + struct perf_evsel *evsel = perf_evlist__first(evlist); TEST_ASSERT_VAL("wrong number of entries", 1 == evlist->nr_entries); TEST_ASSERT_VAL("wrong type", @@ -183,8 +173,7 @@ static int test__checkevent_breakpoint_w(struct perf_evlist *evlist) static int test__checkevent_breakpoint_rw(struct perf_evlist *evlist) { - struct perf_evsel *evsel = list_entry(evlist->entries.next, - struct perf_evsel, node); + struct perf_evsel *evsel = perf_evlist__first(evlist); TEST_ASSERT_VAL("wrong number of entries", 1 == evlist->nr_entries); TEST_ASSERT_VAL("wrong type", @@ -199,8 +188,7 @@ static int test__checkevent_breakpoint_rw(struct perf_evlist *evlist) static int test__checkevent_tracepoint_modifier(struct perf_evlist *evlist) { - struct perf_evsel *evsel = list_entry(evlist->entries.next, - struct perf_evsel, node); + struct perf_evsel *evsel = perf_evlist__first(evlist); TEST_ASSERT_VAL("wrong exclude_user", evsel->attr.exclude_user); TEST_ASSERT_VAL("wrong exclude_kernel", !evsel->attr.exclude_kernel); @@ -231,8 +219,7 @@ test__checkevent_tracepoint_multi_modifier(struct perf_evlist *evlist) static int test__checkevent_raw_modifier(struct perf_evlist *evlist) { - struct perf_evsel *evsel = list_entry(evlist->entries.next, - struct perf_evsel, node); + struct perf_evsel *evsel = perf_evlist__first(evlist); TEST_ASSERT_VAL("wrong exclude_user", evsel->attr.exclude_user); TEST_ASSERT_VAL("wrong exclude_kernel", !evsel->attr.exclude_kernel); @@ -244,8 +231,7 @@ static int test__checkevent_raw_modifier(struct perf_evlist *evlist) static int test__checkevent_numeric_modifier(struct perf_evlist *evlist) { - struct perf_evsel *evsel = list_entry(evlist->entries.next, - struct perf_evsel, node); + struct perf_evsel *evsel = perf_evlist__first(evlist); TEST_ASSERT_VAL("wrong exclude_user", evsel->attr.exclude_user); TEST_ASSERT_VAL("wrong exclude_kernel", evsel->attr.exclude_kernel); @@ -257,8 +243,7 @@ static int test__checkevent_numeric_modifier(struct perf_evlist *evlist) static int test__checkevent_symbolic_name_modifier(struct perf_evlist *evlist) { - struct perf_evsel *evsel = list_entry(evlist->entries.next, - struct perf_evsel, node); + struct perf_evsel *evsel = perf_evlist__first(evlist); TEST_ASSERT_VAL("wrong exclude_user", evsel->attr.exclude_user); TEST_ASSERT_VAL("wrong exclude_kernel", evsel->attr.exclude_kernel); @@ -270,8 +255,7 @@ static int test__checkevent_symbolic_name_modifier(struct perf_evlist *evlist) static int test__checkevent_exclude_host_modifier(struct perf_evlist *evlist) { - struct perf_evsel *evsel = list_entry(evlist->entries.next, - struct perf_evsel, node); + struct perf_evsel *evsel = perf_evlist__first(evlist); TEST_ASSERT_VAL("wrong exclude guest", !evsel->attr.exclude_guest); TEST_ASSERT_VAL("wrong exclude host", evsel->attr.exclude_host); @@ -281,8 +265,7 @@ static int test__checkevent_exclude_host_modifier(struct perf_evlist *evlist) static int test__checkevent_exclude_guest_modifier(struct perf_evlist *evlist) { - struct perf_evsel *evsel = list_entry(evlist->entries.next, - struct perf_evsel, node); + struct perf_evsel *evsel = perf_evlist__first(evlist); TEST_ASSERT_VAL("wrong exclude guest", evsel->attr.exclude_guest); TEST_ASSERT_VAL("wrong exclude host", !evsel->attr.exclude_host); @@ -292,8 +275,7 @@ static int test__checkevent_exclude_guest_modifier(struct perf_evlist *evlist) static int test__checkevent_symbolic_alias_modifier(struct perf_evlist *evlist) { - struct perf_evsel *evsel = list_entry(evlist->entries.next, - struct perf_evsel, node); + struct perf_evsel *evsel = perf_evlist__first(evlist); TEST_ASSERT_VAL("wrong exclude_user", !evsel->attr.exclude_user); TEST_ASSERT_VAL("wrong exclude_kernel", evsel->attr.exclude_kernel); @@ -305,8 +287,7 @@ static int test__checkevent_symbolic_alias_modifier(struct perf_evlist *evlist) static int test__checkevent_genhw_modifier(struct perf_evlist *evlist) { - struct perf_evsel *evsel = list_entry(evlist->entries.next, - struct perf_evsel, node); + struct perf_evsel *evsel = perf_evlist__first(evlist); TEST_ASSERT_VAL("wrong exclude_user", evsel->attr.exclude_user); TEST_ASSERT_VAL("wrong exclude_kernel", !evsel->attr.exclude_kernel); @@ -318,75 +299,71 @@ static int test__checkevent_genhw_modifier(struct perf_evlist *evlist) static int test__checkevent_breakpoint_modifier(struct perf_evlist *evlist) { - struct perf_evsel *evsel = list_entry(evlist->entries.next, - struct perf_evsel, node); + struct perf_evsel *evsel = perf_evlist__first(evlist); + TEST_ASSERT_VAL("wrong exclude_user", !evsel->attr.exclude_user); TEST_ASSERT_VAL("wrong exclude_kernel", evsel->attr.exclude_kernel); TEST_ASSERT_VAL("wrong exclude_hv", evsel->attr.exclude_hv); TEST_ASSERT_VAL("wrong precise_ip", !evsel->attr.precise_ip); TEST_ASSERT_VAL("wrong name", - !strcmp(perf_evsel__name(evsel), "mem:0x0:rw:u")); + !strcmp(perf_evsel__name(evsel), "mem:0:u")); return test__checkevent_breakpoint(evlist); } static int test__checkevent_breakpoint_x_modifier(struct perf_evlist *evlist) { - struct perf_evsel *evsel = list_entry(evlist->entries.next, - struct perf_evsel, node); + struct perf_evsel *evsel = perf_evlist__first(evlist); TEST_ASSERT_VAL("wrong exclude_user", evsel->attr.exclude_user); TEST_ASSERT_VAL("wrong exclude_kernel", !evsel->attr.exclude_kernel); TEST_ASSERT_VAL("wrong exclude_hv", evsel->attr.exclude_hv); TEST_ASSERT_VAL("wrong precise_ip", !evsel->attr.precise_ip); TEST_ASSERT_VAL("wrong name", - !strcmp(perf_evsel__name(evsel), "mem:0x0:x:k")); + !strcmp(perf_evsel__name(evsel), "mem:0:x:k")); return test__checkevent_breakpoint_x(evlist); } static int test__checkevent_breakpoint_r_modifier(struct perf_evlist *evlist) { - struct perf_evsel *evsel = list_entry(evlist->entries.next, - struct perf_evsel, node); + struct perf_evsel *evsel = perf_evlist__first(evlist); TEST_ASSERT_VAL("wrong exclude_user", evsel->attr.exclude_user); TEST_ASSERT_VAL("wrong exclude_kernel", evsel->attr.exclude_kernel); TEST_ASSERT_VAL("wrong exclude_hv", !evsel->attr.exclude_hv); TEST_ASSERT_VAL("wrong precise_ip", evsel->attr.precise_ip); TEST_ASSERT_VAL("wrong name", - !strcmp(perf_evsel__name(evsel), "mem:0x0:r:hp")); + !strcmp(perf_evsel__name(evsel), "mem:0:r:hp")); return test__checkevent_breakpoint_r(evlist); } static int test__checkevent_breakpoint_w_modifier(struct perf_evlist *evlist) { - struct perf_evsel *evsel = list_entry(evlist->entries.next, - struct perf_evsel, node); + struct perf_evsel *evsel = perf_evlist__first(evlist); TEST_ASSERT_VAL("wrong exclude_user", !evsel->attr.exclude_user); TEST_ASSERT_VAL("wrong exclude_kernel", evsel->attr.exclude_kernel); TEST_ASSERT_VAL("wrong exclude_hv", evsel->attr.exclude_hv); TEST_ASSERT_VAL("wrong precise_ip", evsel->attr.precise_ip); TEST_ASSERT_VAL("wrong name", - !strcmp(perf_evsel__name(evsel), "mem:0x0:w:up")); + !strcmp(perf_evsel__name(evsel), "mem:0:w:up")); return test__checkevent_breakpoint_w(evlist); } static int test__checkevent_breakpoint_rw_modifier(struct perf_evlist *evlist) { - struct perf_evsel *evsel = list_entry(evlist->entries.next, - struct perf_evsel, node); + struct perf_evsel *evsel = perf_evlist__first(evlist); TEST_ASSERT_VAL("wrong exclude_user", evsel->attr.exclude_user); TEST_ASSERT_VAL("wrong exclude_kernel", !evsel->attr.exclude_kernel); TEST_ASSERT_VAL("wrong exclude_hv", evsel->attr.exclude_hv); TEST_ASSERT_VAL("wrong precise_ip", evsel->attr.precise_ip); TEST_ASSERT_VAL("wrong name", - !strcmp(perf_evsel__name(evsel), "mem:0x0:rw:kp")); + !strcmp(perf_evsel__name(evsel), "mem:0:rw:kp")); return test__checkevent_breakpoint_rw(evlist); } @@ -394,8 +371,7 @@ static int test__checkevent_breakpoint_rw_modifier(struct perf_evlist *evlist) static int test__checkevent_pmu(struct perf_evlist *evlist) { - struct perf_evsel *evsel = list_entry(evlist->entries.next, - struct perf_evsel, node); + struct perf_evsel *evsel = perf_evlist__first(evlist); TEST_ASSERT_VAL("wrong number of entries", 1 == evlist->nr_entries); TEST_ASSERT_VAL("wrong type", PERF_TYPE_RAW == evsel->attr.type); @@ -409,12 +385,11 @@ static int test__checkevent_pmu(struct perf_evlist *evlist) static int test__checkevent_list(struct perf_evlist *evlist) { - struct perf_evsel *evsel; + struct perf_evsel *evsel = perf_evlist__first(evlist); TEST_ASSERT_VAL("wrong number of entries", 3 == evlist->nr_entries); /* r1 */ - evsel = list_entry(evlist->entries.next, struct perf_evsel, node); TEST_ASSERT_VAL("wrong type", PERF_TYPE_RAW == evsel->attr.type); TEST_ASSERT_VAL("wrong config", 1 == evsel->attr.config); TEST_ASSERT_VAL("wrong config1", 0 == evsel->attr.config1); @@ -425,11 +400,10 @@ static int test__checkevent_list(struct perf_evlist *evlist) TEST_ASSERT_VAL("wrong precise_ip", !evsel->attr.precise_ip); /* syscalls:sys_enter_open:k */ - evsel = list_entry(evsel->node.next, struct perf_evsel, node); + evsel = perf_evsel__next(evsel); TEST_ASSERT_VAL("wrong type", PERF_TYPE_TRACEPOINT == evsel->attr.type); TEST_ASSERT_VAL("wrong sample_type", - (PERF_SAMPLE_RAW | PERF_SAMPLE_TIME | PERF_SAMPLE_CPU) == - evsel->attr.sample_type); + PERF_TP_SAMPLE_TYPE == evsel->attr.sample_type); TEST_ASSERT_VAL("wrong sample_period", 1 == evsel->attr.sample_period); TEST_ASSERT_VAL("wrong exclude_user", evsel->attr.exclude_user); TEST_ASSERT_VAL("wrong exclude_kernel", !evsel->attr.exclude_kernel); @@ -437,7 +411,7 @@ static int test__checkevent_list(struct perf_evlist *evlist) TEST_ASSERT_VAL("wrong precise_ip", !evsel->attr.precise_ip); /* 1:1:hp */ - evsel = list_entry(evsel->node.next, struct perf_evsel, node); + evsel = perf_evsel__next(evsel); TEST_ASSERT_VAL("wrong type", 1 == evsel->attr.type); TEST_ASSERT_VAL("wrong config", 1 == evsel->attr.config); TEST_ASSERT_VAL("wrong exclude_user", evsel->attr.exclude_user); @@ -450,22 +424,21 @@ static int test__checkevent_list(struct perf_evlist *evlist) static int test__checkevent_pmu_name(struct perf_evlist *evlist) { - struct perf_evsel *evsel; + struct perf_evsel *evsel = perf_evlist__first(evlist); /* cpu/config=1,name=krava/u */ - evsel = list_entry(evlist->entries.next, struct perf_evsel, node); TEST_ASSERT_VAL("wrong number of entries", 2 == evlist->nr_entries); TEST_ASSERT_VAL("wrong type", PERF_TYPE_RAW == evsel->attr.type); TEST_ASSERT_VAL("wrong config", 1 == evsel->attr.config); TEST_ASSERT_VAL("wrong name", !strcmp(perf_evsel__name(evsel), "krava")); /* cpu/config=2/u" */ - evsel = list_entry(evsel->node.next, struct perf_evsel, node); + evsel = perf_evsel__next(evsel); TEST_ASSERT_VAL("wrong number of entries", 2 == evlist->nr_entries); TEST_ASSERT_VAL("wrong type", PERF_TYPE_RAW == evsel->attr.type); TEST_ASSERT_VAL("wrong config", 2 == evsel->attr.config); TEST_ASSERT_VAL("wrong name", - !strcmp(perf_evsel__name(evsel), "raw 0x2:u")); + !strcmp(perf_evsel__name(evsel), "cpu/config=2/u")); return 0; } @@ -513,6 +486,280 @@ static int test__checkterms_simple(struct list_head *terms) return 0; } +static int test__group1(struct perf_evlist *evlist) +{ + struct perf_evsel *evsel, *leader; + + TEST_ASSERT_VAL("wrong number of entries", 2 == evlist->nr_entries); + + /* instructions:k */ + evsel = leader = perf_evlist__first(evlist); + TEST_ASSERT_VAL("wrong type", PERF_TYPE_HARDWARE == evsel->attr.type); + TEST_ASSERT_VAL("wrong config", + PERF_COUNT_HW_INSTRUCTIONS == evsel->attr.config); + TEST_ASSERT_VAL("wrong exclude_user", evsel->attr.exclude_user); + TEST_ASSERT_VAL("wrong exclude_kernel", !evsel->attr.exclude_kernel); + TEST_ASSERT_VAL("wrong exclude_hv", evsel->attr.exclude_hv); + TEST_ASSERT_VAL("wrong exclude guest", !evsel->attr.exclude_guest); + TEST_ASSERT_VAL("wrong exclude host", !evsel->attr.exclude_host); + TEST_ASSERT_VAL("wrong precise_ip", !evsel->attr.precise_ip); + TEST_ASSERT_VAL("wrong leader", evsel->leader == NULL); + + /* cycles:upp */ + evsel = perf_evsel__next(evsel); + TEST_ASSERT_VAL("wrong type", PERF_TYPE_HARDWARE == evsel->attr.type); + TEST_ASSERT_VAL("wrong config", + PERF_COUNT_HW_CPU_CYCLES == evsel->attr.config); + TEST_ASSERT_VAL("wrong exclude_user", !evsel->attr.exclude_user); + TEST_ASSERT_VAL("wrong exclude_kernel", evsel->attr.exclude_kernel); + TEST_ASSERT_VAL("wrong exclude_hv", evsel->attr.exclude_hv); + TEST_ASSERT_VAL("wrong exclude guest", !evsel->attr.exclude_guest); + TEST_ASSERT_VAL("wrong exclude host", !evsel->attr.exclude_host); + TEST_ASSERT_VAL("wrong precise_ip", evsel->attr.precise_ip == 2); + TEST_ASSERT_VAL("wrong leader", evsel->leader == leader); + + return 0; +} + +static int test__group2(struct perf_evlist *evlist) +{ + struct perf_evsel *evsel, *leader; + + TEST_ASSERT_VAL("wrong number of entries", 3 == evlist->nr_entries); + + /* faults + :ku modifier */ + evsel = leader = perf_evlist__first(evlist); + TEST_ASSERT_VAL("wrong type", PERF_TYPE_SOFTWARE == evsel->attr.type); + TEST_ASSERT_VAL("wrong config", + PERF_COUNT_SW_PAGE_FAULTS == evsel->attr.config); + TEST_ASSERT_VAL("wrong exclude_user", !evsel->attr.exclude_user); + TEST_ASSERT_VAL("wrong exclude_kernel", !evsel->attr.exclude_kernel); + TEST_ASSERT_VAL("wrong exclude_hv", evsel->attr.exclude_hv); + TEST_ASSERT_VAL("wrong exclude guest", !evsel->attr.exclude_guest); + TEST_ASSERT_VAL("wrong exclude host", !evsel->attr.exclude_host); + TEST_ASSERT_VAL("wrong precise_ip", !evsel->attr.precise_ip); + TEST_ASSERT_VAL("wrong leader", evsel->leader == NULL); + + /* cache-references + :u modifier */ + evsel = perf_evsel__next(evsel); + TEST_ASSERT_VAL("wrong type", PERF_TYPE_HARDWARE == evsel->attr.type); + TEST_ASSERT_VAL("wrong config", + PERF_COUNT_HW_CACHE_REFERENCES == evsel->attr.config); + TEST_ASSERT_VAL("wrong exclude_user", !evsel->attr.exclude_user); + TEST_ASSERT_VAL("wrong exclude_kernel", evsel->attr.exclude_kernel); + TEST_ASSERT_VAL("wrong exclude_hv", evsel->attr.exclude_hv); + TEST_ASSERT_VAL("wrong exclude guest", !evsel->attr.exclude_guest); + TEST_ASSERT_VAL("wrong exclude host", !evsel->attr.exclude_host); + TEST_ASSERT_VAL("wrong precise_ip", !evsel->attr.precise_ip); + TEST_ASSERT_VAL("wrong leader", evsel->leader == leader); + + /* cycles:k */ + evsel = perf_evsel__next(evsel); + TEST_ASSERT_VAL("wrong type", PERF_TYPE_HARDWARE == evsel->attr.type); + TEST_ASSERT_VAL("wrong config", + PERF_COUNT_HW_CPU_CYCLES == evsel->attr.config); + TEST_ASSERT_VAL("wrong exclude_user", evsel->attr.exclude_user); + TEST_ASSERT_VAL("wrong exclude_kernel", !evsel->attr.exclude_kernel); + TEST_ASSERT_VAL("wrong exclude_hv", evsel->attr.exclude_hv); + TEST_ASSERT_VAL("wrong exclude guest", !evsel->attr.exclude_guest); + TEST_ASSERT_VAL("wrong exclude host", !evsel->attr.exclude_host); + TEST_ASSERT_VAL("wrong precise_ip", !evsel->attr.precise_ip); + TEST_ASSERT_VAL("wrong leader", evsel->leader == NULL); + + return 0; +} + +static int test__group3(struct perf_evlist *evlist __maybe_unused) +{ + struct perf_evsel *evsel, *leader; + + TEST_ASSERT_VAL("wrong number of entries", 5 == evlist->nr_entries); + + /* group1 syscalls:sys_enter_open:H */ + evsel = leader = perf_evlist__first(evlist); + TEST_ASSERT_VAL("wrong type", PERF_TYPE_TRACEPOINT == evsel->attr.type); + TEST_ASSERT_VAL("wrong sample_type", + PERF_TP_SAMPLE_TYPE == evsel->attr.sample_type); + TEST_ASSERT_VAL("wrong sample_period", 1 == evsel->attr.sample_period); + TEST_ASSERT_VAL("wrong exclude_user", !evsel->attr.exclude_user); + TEST_ASSERT_VAL("wrong exclude_kernel", !evsel->attr.exclude_kernel); + TEST_ASSERT_VAL("wrong exclude_hv", !evsel->attr.exclude_hv); + TEST_ASSERT_VAL("wrong exclude guest", evsel->attr.exclude_guest); + TEST_ASSERT_VAL("wrong exclude host", !evsel->attr.exclude_host); + TEST_ASSERT_VAL("wrong precise_ip", !evsel->attr.precise_ip); + TEST_ASSERT_VAL("wrong leader", evsel->leader == NULL); + TEST_ASSERT_VAL("wrong group name", + !strcmp(leader->group_name, "group1")); + + /* group1 cycles:kppp */ + evsel = perf_evsel__next(evsel); + TEST_ASSERT_VAL("wrong type", PERF_TYPE_HARDWARE == evsel->attr.type); + TEST_ASSERT_VAL("wrong config", + PERF_COUNT_HW_CPU_CYCLES == evsel->attr.config); + TEST_ASSERT_VAL("wrong exclude_user", evsel->attr.exclude_user); + TEST_ASSERT_VAL("wrong exclude_kernel", !evsel->attr.exclude_kernel); + TEST_ASSERT_VAL("wrong exclude_hv", evsel->attr.exclude_hv); + TEST_ASSERT_VAL("wrong exclude guest", !evsel->attr.exclude_guest); + TEST_ASSERT_VAL("wrong exclude host", !evsel->attr.exclude_host); + TEST_ASSERT_VAL("wrong precise_ip", evsel->attr.precise_ip == 3); + TEST_ASSERT_VAL("wrong leader", evsel->leader == leader); + TEST_ASSERT_VAL("wrong group name", !evsel->group_name); + + /* group2 cycles + G modifier */ + evsel = leader = perf_evsel__next(evsel); + TEST_ASSERT_VAL("wrong type", PERF_TYPE_HARDWARE == evsel->attr.type); + TEST_ASSERT_VAL("wrong config", + PERF_COUNT_HW_CPU_CYCLES == evsel->attr.config); + TEST_ASSERT_VAL("wrong exclude_user", !evsel->attr.exclude_user); + TEST_ASSERT_VAL("wrong exclude_kernel", !evsel->attr.exclude_kernel); + TEST_ASSERT_VAL("wrong exclude_hv", !evsel->attr.exclude_hv); + TEST_ASSERT_VAL("wrong exclude guest", !evsel->attr.exclude_guest); + TEST_ASSERT_VAL("wrong exclude host", evsel->attr.exclude_host); + TEST_ASSERT_VAL("wrong precise_ip", !evsel->attr.precise_ip); + TEST_ASSERT_VAL("wrong leader", evsel->leader == NULL); + TEST_ASSERT_VAL("wrong group name", + !strcmp(leader->group_name, "group2")); + + /* group2 1:3 + G modifier */ + evsel = perf_evsel__next(evsel); + TEST_ASSERT_VAL("wrong type", 1 == evsel->attr.type); + TEST_ASSERT_VAL("wrong config", 3 == evsel->attr.config); + TEST_ASSERT_VAL("wrong exclude_user", !evsel->attr.exclude_user); + TEST_ASSERT_VAL("wrong exclude_kernel", !evsel->attr.exclude_kernel); + TEST_ASSERT_VAL("wrong exclude_hv", !evsel->attr.exclude_hv); + TEST_ASSERT_VAL("wrong exclude guest", !evsel->attr.exclude_guest); + TEST_ASSERT_VAL("wrong exclude host", evsel->attr.exclude_host); + TEST_ASSERT_VAL("wrong precise_ip", !evsel->attr.precise_ip); + TEST_ASSERT_VAL("wrong leader", evsel->leader == leader); + + /* instructions:u */ + evsel = perf_evsel__next(evsel); + TEST_ASSERT_VAL("wrong type", PERF_TYPE_HARDWARE == evsel->attr.type); + TEST_ASSERT_VAL("wrong config", + PERF_COUNT_HW_INSTRUCTIONS == evsel->attr.config); + TEST_ASSERT_VAL("wrong exclude_user", !evsel->attr.exclude_user); + TEST_ASSERT_VAL("wrong exclude_kernel", evsel->attr.exclude_kernel); + TEST_ASSERT_VAL("wrong exclude_hv", evsel->attr.exclude_hv); + TEST_ASSERT_VAL("wrong exclude guest", !evsel->attr.exclude_guest); + TEST_ASSERT_VAL("wrong exclude host", !evsel->attr.exclude_host); + TEST_ASSERT_VAL("wrong precise_ip", !evsel->attr.precise_ip); + TEST_ASSERT_VAL("wrong leader", evsel->leader == NULL); + + return 0; +} + +static int test__group4(struct perf_evlist *evlist __maybe_unused) +{ + struct perf_evsel *evsel, *leader; + + TEST_ASSERT_VAL("wrong number of entries", 2 == evlist->nr_entries); + + /* cycles:u + p */ + evsel = leader = perf_evlist__first(evlist); + TEST_ASSERT_VAL("wrong type", PERF_TYPE_HARDWARE == evsel->attr.type); + TEST_ASSERT_VAL("wrong config", + PERF_COUNT_HW_CPU_CYCLES == evsel->attr.config); + TEST_ASSERT_VAL("wrong exclude_user", !evsel->attr.exclude_user); + TEST_ASSERT_VAL("wrong exclude_kernel", evsel->attr.exclude_kernel); + TEST_ASSERT_VAL("wrong exclude_hv", evsel->attr.exclude_hv); + TEST_ASSERT_VAL("wrong exclude guest", !evsel->attr.exclude_guest); + TEST_ASSERT_VAL("wrong exclude host", !evsel->attr.exclude_host); + TEST_ASSERT_VAL("wrong precise_ip", evsel->attr.precise_ip == 1); + TEST_ASSERT_VAL("wrong group name", !evsel->group_name); + TEST_ASSERT_VAL("wrong leader", evsel->leader == NULL); + + /* instructions:kp + p */ + evsel = perf_evsel__next(evsel); + TEST_ASSERT_VAL("wrong type", PERF_TYPE_HARDWARE == evsel->attr.type); + TEST_ASSERT_VAL("wrong config", + PERF_COUNT_HW_INSTRUCTIONS == evsel->attr.config); + TEST_ASSERT_VAL("wrong exclude_user", evsel->attr.exclude_user); + TEST_ASSERT_VAL("wrong exclude_kernel", !evsel->attr.exclude_kernel); + TEST_ASSERT_VAL("wrong exclude_hv", evsel->attr.exclude_hv); + TEST_ASSERT_VAL("wrong exclude guest", !evsel->attr.exclude_guest); + TEST_ASSERT_VAL("wrong exclude host", !evsel->attr.exclude_host); + TEST_ASSERT_VAL("wrong precise_ip", evsel->attr.precise_ip == 2); + TEST_ASSERT_VAL("wrong leader", evsel->leader == leader); + + return 0; +} + +static int test__group5(struct perf_evlist *evlist __maybe_unused) +{ + struct perf_evsel *evsel, *leader; + + TEST_ASSERT_VAL("wrong number of entries", 5 == evlist->nr_entries); + + /* cycles + G */ + evsel = leader = perf_evlist__first(evlist); + TEST_ASSERT_VAL("wrong type", PERF_TYPE_HARDWARE == evsel->attr.type); + TEST_ASSERT_VAL("wrong config", + PERF_COUNT_HW_CPU_CYCLES == evsel->attr.config); + TEST_ASSERT_VAL("wrong exclude_user", !evsel->attr.exclude_user); + TEST_ASSERT_VAL("wrong exclude_kernel", !evsel->attr.exclude_kernel); + TEST_ASSERT_VAL("wrong exclude_hv", !evsel->attr.exclude_hv); + TEST_ASSERT_VAL("wrong exclude guest", !evsel->attr.exclude_guest); + TEST_ASSERT_VAL("wrong exclude host", evsel->attr.exclude_host); + TEST_ASSERT_VAL("wrong precise_ip", !evsel->attr.precise_ip); + TEST_ASSERT_VAL("wrong group name", !evsel->group_name); + TEST_ASSERT_VAL("wrong leader", evsel->leader == NULL); + + /* instructions + G */ + evsel = perf_evsel__next(evsel); + TEST_ASSERT_VAL("wrong type", PERF_TYPE_HARDWARE == evsel->attr.type); + TEST_ASSERT_VAL("wrong config", + PERF_COUNT_HW_INSTRUCTIONS == evsel->attr.config); + TEST_ASSERT_VAL("wrong exclude_user", !evsel->attr.exclude_user); + TEST_ASSERT_VAL("wrong exclude_kernel", !evsel->attr.exclude_kernel); + TEST_ASSERT_VAL("wrong exclude_hv", !evsel->attr.exclude_hv); + TEST_ASSERT_VAL("wrong exclude guest", !evsel->attr.exclude_guest); + TEST_ASSERT_VAL("wrong exclude host", evsel->attr.exclude_host); + TEST_ASSERT_VAL("wrong precise_ip", !evsel->attr.precise_ip); + TEST_ASSERT_VAL("wrong leader", evsel->leader == leader); + + /* cycles:G */ + evsel = leader = perf_evsel__next(evsel); + TEST_ASSERT_VAL("wrong type", PERF_TYPE_HARDWARE == evsel->attr.type); + TEST_ASSERT_VAL("wrong config", + PERF_COUNT_HW_CPU_CYCLES == evsel->attr.config); + TEST_ASSERT_VAL("wrong exclude_user", !evsel->attr.exclude_user); + TEST_ASSERT_VAL("wrong exclude_kernel", !evsel->attr.exclude_kernel); + TEST_ASSERT_VAL("wrong exclude_hv", !evsel->attr.exclude_hv); + TEST_ASSERT_VAL("wrong exclude guest", !evsel->attr.exclude_guest); + TEST_ASSERT_VAL("wrong exclude host", evsel->attr.exclude_host); + TEST_ASSERT_VAL("wrong precise_ip", !evsel->attr.precise_ip); + TEST_ASSERT_VAL("wrong group name", !evsel->group_name); + TEST_ASSERT_VAL("wrong leader", evsel->leader == NULL); + + /* instructions:G */ + evsel = perf_evsel__next(evsel); + TEST_ASSERT_VAL("wrong type", PERF_TYPE_HARDWARE == evsel->attr.type); + TEST_ASSERT_VAL("wrong config", + PERF_COUNT_HW_INSTRUCTIONS == evsel->attr.config); + TEST_ASSERT_VAL("wrong exclude_user", !evsel->attr.exclude_user); + TEST_ASSERT_VAL("wrong exclude_kernel", !evsel->attr.exclude_kernel); + TEST_ASSERT_VAL("wrong exclude_hv", !evsel->attr.exclude_hv); + TEST_ASSERT_VAL("wrong exclude guest", !evsel->attr.exclude_guest); + TEST_ASSERT_VAL("wrong exclude host", evsel->attr.exclude_host); + TEST_ASSERT_VAL("wrong precise_ip", !evsel->attr.precise_ip); + TEST_ASSERT_VAL("wrong leader", evsel->leader == leader); + + /* cycles */ + evsel = perf_evsel__next(evsel); + TEST_ASSERT_VAL("wrong type", PERF_TYPE_HARDWARE == evsel->attr.type); + TEST_ASSERT_VAL("wrong config", + PERF_COUNT_HW_CPU_CYCLES == evsel->attr.config); + TEST_ASSERT_VAL("wrong exclude_user", !evsel->attr.exclude_user); + TEST_ASSERT_VAL("wrong exclude_kernel", !evsel->attr.exclude_kernel); + TEST_ASSERT_VAL("wrong exclude_hv", !evsel->attr.exclude_hv); + TEST_ASSERT_VAL("wrong exclude guest", evsel->attr.exclude_guest); + TEST_ASSERT_VAL("wrong exclude host", !evsel->attr.exclude_host); + TEST_ASSERT_VAL("wrong precise_ip", !evsel->attr.precise_ip); + TEST_ASSERT_VAL("wrong leader", evsel->leader == NULL); + + return 0; +} + struct test__event_st { const char *name; __u32 type; @@ -632,6 +879,26 @@ static struct test__event_st test__events[] = { .name = "mem:0:rw:kp", .check = test__checkevent_breakpoint_rw_modifier, }, + [28] = { + .name = "{instructions:k,cycles:upp}", + .check = test__group1, + }, + [29] = { + .name = "{faults:k,cache-references}:u,cycles:k", + .check = test__group2, + }, + [30] = { + .name = "group1{syscalls:sys_enter_open:H,cycles:kppp},group2{cycles,1:3}:G,instructions:u", + .check = test__group3, + }, + [31] = { + .name = "{cycles:u,instructions:kp}:p", + .check = test__group4, + }, + [32] = { + .name = "{cycles,instructions}:G,{cycles:G,instructions:G},cycles", + .check = test__group5, + }, }; static struct test__event_st test__events_pmu[] = { @@ -658,9 +925,6 @@ static struct test__term test__terms[] = { }, }; -#define TEST__TERMS_CNT (sizeof(test__terms) / \ - sizeof(struct test__term)) - static int test_event(struct test__event_st *e) { struct perf_evlist *evlist; @@ -685,19 +949,19 @@ static int test_event(struct test__event_st *e) static int test_events(struct test__event_st *events, unsigned cnt) { - int ret = 0; + int ret1, ret2 = 0; unsigned i; for (i = 0; i < cnt; i++) { struct test__event_st *e = &events[i]; pr_debug("running test %d '%s'\n", i, e->name); - ret = test_event(e); - if (ret) - break; + ret1 = test_event(e); + if (ret1) + ret2 = ret1; } - return ret; + return ret2; } static int test_term(struct test__term *t) @@ -752,19 +1016,19 @@ static int test_pmu(void) ret = stat(path, &st); if (ret) - pr_debug("ommiting PMU cpu tests\n"); + pr_debug("omitting PMU cpu tests\n"); return !ret; } int parse_events__test(void) { - int ret; + int ret1, ret2 = 0; #define TEST_EVENTS(tests) \ do { \ - ret = test_events(tests, ARRAY_SIZE(tests)); \ - if (ret) \ - return ret; \ + ret1 = test_events(tests, ARRAY_SIZE(tests)); \ + if (!ret2) \ + ret2 = ret1; \ } while (0) TEST_EVENTS(test__events); @@ -772,5 +1036,9 @@ do { \ if (test_pmu()) TEST_EVENTS(test__events_pmu); - return test_terms(test__terms, ARRAY_SIZE(test__terms)); + ret1 = test_terms(test__terms, ARRAY_SIZE(test__terms)); + if (!ret2) + ret2 = ret1; + + return ret2; } diff --git a/trunk/tools/perf/util/parse-events.c b/trunk/tools/perf/util/parse-events.c index 74a5af4d33ec..aed38e4b9dfa 100644 --- a/trunk/tools/perf/util/parse-events.c +++ b/trunk/tools/perf/util/parse-events.c @@ -239,8 +239,11 @@ const char *event_type(int type) return "unknown"; } -static int add_event(struct list_head **_list, int *idx, - struct perf_event_attr *attr, char *name) + + +static int __add_event(struct list_head **_list, int *idx, + struct perf_event_attr *attr, + char *name, struct cpu_map *cpus) { struct perf_evsel *evsel; struct list_head *list = *_list; @@ -260,6 +263,7 @@ static int add_event(struct list_head **_list, int *idx, return -ENOMEM; } + evsel->cpus = cpus; if (name) evsel->name = strdup(name); list_add_tail(&evsel->node, list); @@ -267,6 +271,12 @@ static int add_event(struct list_head **_list, int *idx, return 0; } +static int add_event(struct list_head **_list, int *idx, + struct perf_event_attr *attr, char *name) +{ + return __add_event(_list, idx, attr, name, NULL); +} + static int parse_aliases(char *str, const char *names[][PERF_EVSEL__MAX_ALIASES], int size) { int i, j; @@ -308,7 +318,7 @@ int parse_events_add_cache(struct list_head **list, int *idx, for (i = 0; (i < 2) && (op_result[i]); i++) { char *str = op_result[i]; - snprintf(name + n, MAX_NAME_LEN - n, "-%s\n", str); + n += snprintf(name + n, MAX_NAME_LEN - n, "-%s", str); if (cache_op == -1) { cache_op = parse_aliases(str, perf_evsel__hw_cache_op, @@ -346,42 +356,28 @@ int parse_events_add_cache(struct list_head **list, int *idx, return add_event(list, idx, &attr, name); } -static int add_tracepoint(struct list_head **list, int *idx, +static int add_tracepoint(struct list_head **listp, int *idx, char *sys_name, char *evt_name) { - struct perf_event_attr attr; - char name[MAX_NAME_LEN]; - char evt_path[MAXPATHLEN]; - char id_buf[4]; - u64 id; - int fd; - - snprintf(evt_path, MAXPATHLEN, "%s/%s/%s/id", tracing_events_path, - sys_name, evt_name); - - fd = open(evt_path, O_RDONLY); - if (fd < 0) - return -1; + struct perf_evsel *evsel; + struct list_head *list = *listp; - if (read(fd, id_buf, sizeof(id_buf)) < 0) { - close(fd); - return -1; + if (!list) { + list = malloc(sizeof(*list)); + if (!list) + return -ENOMEM; + INIT_LIST_HEAD(list); } - close(fd); - id = atoll(id_buf); - - memset(&attr, 0, sizeof(attr)); - attr.config = id; - attr.type = PERF_TYPE_TRACEPOINT; - attr.sample_type |= PERF_SAMPLE_RAW; - attr.sample_type |= PERF_SAMPLE_TIME; - attr.sample_type |= PERF_SAMPLE_CPU; - attr.sample_type |= PERF_SAMPLE_PERIOD; - attr.sample_period = 1; + evsel = perf_evsel__newtp(sys_name, evt_name, (*idx)++); + if (!evsel) { + free(list); + return -ENOMEM; + } - snprintf(name, MAX_NAME_LEN, "%s:%s", sys_name, evt_name); - return add_event(list, idx, &attr, name); + list_add_tail(&evsel->node, list); + *listp = list; + return 0; } static int add_tracepoint_multi(struct list_head **list, int *idx, @@ -551,7 +547,7 @@ static int config_attr(struct perf_event_attr *attr, } int parse_events_add_numeric(struct list_head **list, int *idx, - unsigned long type, unsigned long config, + u32 type, u64 config, struct list_head *head_config) { struct perf_event_attr attr; @@ -607,8 +603,23 @@ int parse_events_add_pmu(struct list_head **list, int *idx, if (perf_pmu__config(pmu, &attr, head_config)) return -EINVAL; - return add_event(list, idx, &attr, - pmu_event_name(head_config)); + return __add_event(list, idx, &attr, pmu_event_name(head_config), + pmu->cpus); +} + +int parse_events__modifier_group(struct list_head *list, + char *event_mod) +{ + return parse_events__modifier_event(list, event_mod, true); +} + +void parse_events__set_leader(char *name, struct list_head *list) +{ + struct perf_evsel *leader; + + __perf_evlist__set_leader(list); + leader = list_entry(list->next, struct perf_evsel, node); + leader->group_name = name ? strdup(name) : NULL; } void parse_events_update_lists(struct list_head *list_event, @@ -616,21 +627,45 @@ void parse_events_update_lists(struct list_head *list_event, { /* * Called for single event definition. Update the - * 'all event' list, and reinit the 'signle event' + * 'all event' list, and reinit the 'single event' * list, for next event definition. */ list_splice_tail(list_event, list_all); free(list_event); } -int parse_events_modifier(struct list_head *list, char *str) +struct event_modifier { + int eu; + int ek; + int eh; + int eH; + int eG; + int precise; + int exclude_GH; +}; + +static int get_event_modifier(struct event_modifier *mod, char *str, + struct perf_evsel *evsel) { - struct perf_evsel *evsel; - int exclude = 0, exclude_GH = 0; - int eu = 0, ek = 0, eh = 0, eH = 0, eG = 0, precise = 0; + int eu = evsel ? evsel->attr.exclude_user : 0; + int ek = evsel ? evsel->attr.exclude_kernel : 0; + int eh = evsel ? evsel->attr.exclude_hv : 0; + int eH = evsel ? evsel->attr.exclude_host : 0; + int eG = evsel ? evsel->attr.exclude_guest : 0; + int precise = evsel ? evsel->attr.precise_ip : 0; - if (str == NULL) - return 0; + int exclude = eu | ek | eh; + int exclude_GH = evsel ? evsel->exclude_GH : 0; + + /* + * We are here for group and 'GH' was not set as event + * modifier and whatever event/group modifier override + * default 'GH' setup. + */ + if (evsel && !exclude_GH) + eH = eG = 0; + + memset(mod, 0, sizeof(*mod)); while (*str) { if (*str == 'u') { @@ -674,13 +709,51 @@ int parse_events_modifier(struct list_head *list, char *str) if (precise > 3) return -EINVAL; + mod->eu = eu; + mod->ek = ek; + mod->eh = eh; + mod->eH = eH; + mod->eG = eG; + mod->precise = precise; + mod->exclude_GH = exclude_GH; + return 0; +} + +int parse_events__modifier_event(struct list_head *list, char *str, bool add) +{ + struct perf_evsel *evsel; + struct event_modifier mod; + + if (str == NULL) + return 0; + + if (!add && get_event_modifier(&mod, str, NULL)) + return -EINVAL; + list_for_each_entry(evsel, list, node) { - evsel->attr.exclude_user = eu; - evsel->attr.exclude_kernel = ek; - evsel->attr.exclude_hv = eh; - evsel->attr.precise_ip = precise; - evsel->attr.exclude_host = eH; - evsel->attr.exclude_guest = eG; + + if (add && get_event_modifier(&mod, str, evsel)) + return -EINVAL; + + evsel->attr.exclude_user = mod.eu; + evsel->attr.exclude_kernel = mod.ek; + evsel->attr.exclude_hv = mod.eh; + evsel->attr.precise_ip = mod.precise; + evsel->attr.exclude_host = mod.eH; + evsel->attr.exclude_guest = mod.eG; + evsel->exclude_GH = mod.exclude_GH; + } + + return 0; +} + +int parse_events_name(struct list_head *list, char *name) +{ + struct perf_evsel *evsel; + + list_for_each_entry(evsel, list, node) { + if (!evsel->name) + evsel->name = strdup(name); } return 0; @@ -730,7 +803,8 @@ int parse_events_terms(struct list_head *terms, const char *str) return ret; } -int parse_events(struct perf_evlist *evlist, const char *str, int unset __used) +int parse_events(struct perf_evlist *evlist, const char *str, + int unset __maybe_unused) { struct parse_events_data__events data = { .list = LIST_HEAD_INIT(data.list), @@ -756,20 +830,20 @@ int parse_events(struct perf_evlist *evlist, const char *str, int unset __used) } int parse_events_option(const struct option *opt, const char *str, - int unset __used) + int unset __maybe_unused) { struct perf_evlist *evlist = *(struct perf_evlist **)opt->value; return parse_events(evlist, str, unset); } int parse_filter(const struct option *opt, const char *str, - int unset __used) + int unset __maybe_unused) { struct perf_evlist *evlist = *(struct perf_evlist **)opt->value; struct perf_evsel *last = NULL; if (evlist->nr_entries > 0) - last = list_entry(evlist->entries.prev, struct perf_evsel, node); + last = perf_evlist__last(evlist); if (last == NULL || last->attr.type != PERF_TYPE_TRACEPOINT) { fprintf(stderr, @@ -799,7 +873,8 @@ static const char * const event_type_descriptors[] = { * Print the events from /tracing/events */ -void print_tracepoint_events(const char *subsys_glob, const char *event_glob) +void print_tracepoint_events(const char *subsys_glob, const char *event_glob, + bool name_only) { DIR *sys_dir, *evt_dir; struct dirent *sys_next, *evt_next, sys_dirent, evt_dirent; @@ -829,6 +904,11 @@ void print_tracepoint_events(const char *subsys_glob, const char *event_glob) !strglobmatch(evt_dirent.d_name, event_glob)) continue; + if (name_only) { + printf("%s:%s ", sys_dirent.d_name, evt_dirent.d_name); + continue; + } + snprintf(evt_path, MAXPATHLEN, "%s:%s", sys_dirent.d_name, evt_dirent.d_name); printf(" %-50s [%s]\n", evt_path, @@ -906,7 +986,7 @@ void print_events_type(u8 type) __print_events_type(type, event_symbols_hw, PERF_COUNT_HW_MAX); } -int print_hwcache_events(const char *event_glob) +int print_hwcache_events(const char *event_glob, bool name_only) { unsigned int type, op, i, printed = 0; char name[64]; @@ -923,8 +1003,11 @@ int print_hwcache_events(const char *event_glob) if (event_glob != NULL && !strglobmatch(name, event_glob)) continue; - printf(" %-50s [%s]\n", name, - event_type_descriptors[PERF_TYPE_HW_CACHE]); + if (name_only) + printf("%s ", name); + else + printf(" %-50s [%s]\n", name, + event_type_descriptors[PERF_TYPE_HW_CACHE]); ++printed; } } @@ -934,7 +1017,8 @@ int print_hwcache_events(const char *event_glob) } static void print_symbol_events(const char *event_glob, unsigned type, - struct event_symbol *syms, unsigned max) + struct event_symbol *syms, unsigned max, + bool name_only) { unsigned i, printed = 0; char name[MAX_NAME_LEN]; @@ -946,6 +1030,11 @@ static void print_symbol_events(const char *event_glob, unsigned type, (syms->alias && strglobmatch(syms->alias, event_glob)))) continue; + if (name_only) { + printf("%s ", syms->symbol); + continue; + } + if (strlen(syms->alias)) snprintf(name, MAX_NAME_LEN, "%s OR %s", syms->symbol, syms->alias); else @@ -963,39 +1052,42 @@ static void print_symbol_events(const char *event_glob, unsigned type, /* * Print the help text for the event symbols: */ -void print_events(const char *event_glob) +void print_events(const char *event_glob, bool name_only) { - - printf("\n"); - printf("List of pre-defined events (to be used in -e):\n"); + if (!name_only) { + printf("\n"); + printf("List of pre-defined events (to be used in -e):\n"); + } print_symbol_events(event_glob, PERF_TYPE_HARDWARE, - event_symbols_hw, PERF_COUNT_HW_MAX); + event_symbols_hw, PERF_COUNT_HW_MAX, name_only); print_symbol_events(event_glob, PERF_TYPE_SOFTWARE, - event_symbols_sw, PERF_COUNT_SW_MAX); + event_symbols_sw, PERF_COUNT_SW_MAX, name_only); - print_hwcache_events(event_glob); + print_hwcache_events(event_glob, name_only); if (event_glob != NULL) return; - printf("\n"); - printf(" %-50s [%s]\n", - "rNNN", - event_type_descriptors[PERF_TYPE_RAW]); - printf(" %-50s [%s]\n", - "cpu/t1=v1[,t2=v2,t3 ...]/modifier", - event_type_descriptors[PERF_TYPE_RAW]); - printf(" (see 'perf list --help' on how to encode it)\n"); - printf("\n"); - - printf(" %-50s [%s]\n", - "mem:[:access]", + if (!name_only) { + printf("\n"); + printf(" %-50s [%s]\n", + "rNNN", + event_type_descriptors[PERF_TYPE_RAW]); + printf(" %-50s [%s]\n", + "cpu/t1=v1[,t2=v2,t3 ...]/modifier", + event_type_descriptors[PERF_TYPE_RAW]); + printf(" (see 'perf list --help' on how to encode it)\n"); + printf("\n"); + + printf(" %-50s [%s]\n", + "mem:[:access]", event_type_descriptors[PERF_TYPE_BREAKPOINT]); - printf("\n"); + printf("\n"); + } - print_tracepoint_events(NULL, NULL); + print_tracepoint_events(NULL, NULL, name_only); } int parse_events__is_hardcoded_term(struct parse_events__term *term) @@ -1005,7 +1097,7 @@ int parse_events__is_hardcoded_term(struct parse_events__term *term) static int new_term(struct parse_events__term **_term, int type_val, int type_term, char *config, - char *str, long num) + char *str, u64 num) { struct parse_events__term *term; @@ -1034,7 +1126,7 @@ static int new_term(struct parse_events__term **_term, int type_val, } int parse_events__term_num(struct parse_events__term **term, - int type_term, char *config, long num) + int type_term, char *config, u64 num) { return new_term(term, PARSE_EVENTS__TERM_TYPE_NUM, type_term, config, NULL, num); diff --git a/trunk/tools/perf/util/parse-events.h b/trunk/tools/perf/util/parse-events.h index ee9c218a193c..c356e443448d 100644 --- a/trunk/tools/perf/util/parse-events.h +++ b/trunk/tools/perf/util/parse-events.h @@ -55,7 +55,7 @@ struct parse_events__term { char *config; union { char *str; - long num; + u64 num; } val; int type_val; int type_term; @@ -73,17 +73,19 @@ struct parse_events_data__terms { int parse_events__is_hardcoded_term(struct parse_events__term *term); int parse_events__term_num(struct parse_events__term **_term, - int type_term, char *config, long num); + int type_term, char *config, u64 num); int parse_events__term_str(struct parse_events__term **_term, int type_term, char *config, char *str); int parse_events__term_clone(struct parse_events__term **new, struct parse_events__term *term); void parse_events__free_terms(struct list_head *terms); -int parse_events_modifier(struct list_head *list, char *str); +int parse_events__modifier_event(struct list_head *list, char *str, bool add); +int parse_events__modifier_group(struct list_head *list, char *event_mod); +int parse_events_name(struct list_head *list, char *name); int parse_events_add_tracepoint(struct list_head **list, int *idx, char *sys, char *event); int parse_events_add_numeric(struct list_head **list, int *idx, - unsigned long type, unsigned long config, + u32 type, u64 config, struct list_head *head_config); int parse_events_add_cache(struct list_head **list, int *idx, char *type, char *op_result1, char *op_result2); @@ -91,15 +93,17 @@ int parse_events_add_breakpoint(struct list_head **list, int *idx, void *ptr, char *type); int parse_events_add_pmu(struct list_head **list, int *idx, char *pmu , struct list_head *head_config); +void parse_events__set_leader(char *name, struct list_head *list); void parse_events_update_lists(struct list_head *list_event, struct list_head *list_all); void parse_events_error(void *data, void *scanner, char const *msg); int parse_events__test(void); -void print_events(const char *event_glob); +void print_events(const char *event_glob, bool name_only); void print_events_type(u8 type); -void print_tracepoint_events(const char *subsys_glob, const char *event_glob); -int print_hwcache_events(const char *event_glob); +void print_tracepoint_events(const char *subsys_glob, const char *event_glob, + bool name_only); +int print_hwcache_events(const char *event_glob, bool name_only); extern int is_valid_tracepoint(const char *event_string); extern int valid_debugfs_mount(const char *debugfs); diff --git a/trunk/tools/perf/util/parse-events.l b/trunk/tools/perf/util/parse-events.l index 384ca74c6b22..c87efc12579d 100644 --- a/trunk/tools/perf/util/parse-events.l +++ b/trunk/tools/perf/util/parse-events.l @@ -15,10 +15,10 @@ YYSTYPE *parse_events_get_lval(yyscan_t yyscanner); static int __value(YYSTYPE *yylval, char *str, int base, int token) { - long num; + u64 num; errno = 0; - num = strtoul(str, NULL, base); + num = strtoull(str, NULL, base); if (errno) return PE_ERROR; @@ -70,6 +70,12 @@ static int term(yyscan_t scanner, int type) %} %x mem +%s config +%x event + +group [^,{}/]*[{][^}]*[}][^,{}/]* +event_pmu [^,{}/]+[/][^/]*[/][^,{}/]* +event [^,{}/]+ num_dec [0-9]+ num_hex 0x[a-fA-F0-9]+ @@ -84,7 +90,13 @@ modifier_bp [rwx]{1,3} { int start_token; - start_token = (int) parse_events_get_extra(yyscanner); + start_token = parse_events_get_extra(yyscanner); + + if (start_token == PE_START_TERMS) + BEGIN(config); + else if (start_token == PE_START_EVENTS) + BEGIN(event); + if (start_token) { parse_events_set_extra(NULL, yyscanner); return start_token; @@ -92,6 +104,26 @@ modifier_bp [rwx]{1,3} } %} +{ + +{group} { + BEGIN(INITIAL); yyless(0); + } + +{event_pmu} | +{event} { + str(yyscanner, PE_EVENT_NAME); + BEGIN(INITIAL); yyless(0); + return PE_EVENT_NAME; + } + +. | +<> { + BEGIN(INITIAL); yyless(0); + } + +} + cpu-cycles|cycles { return sym(yyscanner, PERF_TYPE_HARDWARE, PERF_COUNT_HW_CPU_CYCLES); } stalled-cycles-frontend|idle-cycles-frontend { return sym(yyscanner, PERF_TYPE_HARDWARE, PERF_COUNT_HW_STALLED_CYCLES_FRONTEND); } stalled-cycles-backend|idle-cycles-backend { return sym(yyscanner, PERF_TYPE_HARDWARE, PERF_COUNT_HW_STALLED_CYCLES_BACKEND); } @@ -127,18 +159,16 @@ speculative-read|speculative-load | refs|Reference|ops|access | misses|miss { return str(yyscanner, PE_NAME_CACHE_OP_RESULT); } - /* - * These are event config hardcoded term names to be specified - * within xxx/.../ syntax. So far we dont clash with other names, - * so we can put them here directly. In case the we have a conflict - * in future, this needs to go into '//' condition block. - */ +{ config { return term(yyscanner, PARSE_EVENTS__TERM_TYPE_CONFIG); } config1 { return term(yyscanner, PARSE_EVENTS__TERM_TYPE_CONFIG1); } config2 { return term(yyscanner, PARSE_EVENTS__TERM_TYPE_CONFIG2); } name { return term(yyscanner, PARSE_EVENTS__TERM_TYPE_NAME); } period { return term(yyscanner, PARSE_EVENTS__TERM_TYPE_SAMPLE_PERIOD); } branch_type { return term(yyscanner, PARSE_EVENTS__TERM_TYPE_BRANCH_SAMPLE_TYPE); } +, { return ','; } +"/" { BEGIN(INITIAL); return '/'; } +} mem: { BEGIN(mem); return PE_PREFIX_MEM; } r{num_raw_hex} { return raw(yyscanner); } @@ -147,10 +177,12 @@ r{num_raw_hex} { return raw(yyscanner); } {modifier_event} { return str(yyscanner, PE_MODIFIER_EVENT); } {name} { return str(yyscanner, PE_NAME); } -"/" { return '/'; } +"/" { BEGIN(config); return '/'; } - { return '-'; } -, { return ','; } +, { BEGIN(event); return ','; } : { return ':'; } +"{" { BEGIN(event); return '{'; } +"}" { return '}'; } = { return '='; } \n { } @@ -175,7 +207,7 @@ r{num_raw_hex} { return raw(yyscanner); } %% -int parse_events_wrap(void *scanner __used) +int parse_events_wrap(void *scanner __maybe_unused) { return 1; } diff --git a/trunk/tools/perf/util/parse-events.y b/trunk/tools/perf/util/parse-events.y index 2bc5fbff2b5d..cd88209e3c58 100644 --- a/trunk/tools/perf/util/parse-events.y +++ b/trunk/tools/perf/util/parse-events.y @@ -27,10 +27,11 @@ do { \ %token PE_START_EVENTS PE_START_TERMS %token PE_VALUE PE_VALUE_SYM_HW PE_VALUE_SYM_SW PE_RAW PE_TERM +%token PE_EVENT_NAME %token PE_NAME %token PE_MODIFIER_EVENT PE_MODIFIER_BP %token PE_NAME_CACHE_TYPE PE_NAME_CACHE_OP_RESULT -%token PE_PREFIX_MEM PE_PREFIX_RAW +%token PE_PREFIX_MEM PE_PREFIX_RAW PE_PREFIX_GROUP %token PE_ERROR %type PE_VALUE %type PE_VALUE_SYM_HW @@ -42,6 +43,7 @@ do { \ %type PE_NAME_CACHE_OP_RESULT %type PE_MODIFIER_EVENT %type PE_MODIFIER_BP +%type PE_EVENT_NAME %type value_sym %type event_config %type event_term @@ -53,44 +55,125 @@ do { \ %type event_legacy_numeric %type event_legacy_raw %type event_def +%type event_mod +%type event_name +%type event +%type events +%type group_def +%type group +%type groups %union { char *str; - unsigned long num; + u64 num; struct list_head *head; struct parse_events__term *term; } %% start: -PE_START_EVENTS events +PE_START_EVENTS start_events | -PE_START_TERMS terms +PE_START_TERMS start_terms + +start_events: groups +{ + struct parse_events_data__events *data = _data; + + parse_events_update_lists($1, &data->list); +} + +groups: +groups ',' group +{ + struct list_head *list = $1; + struct list_head *group = $3; + + parse_events_update_lists(group, list); + $$ = list; +} +| +groups ',' event +{ + struct list_head *list = $1; + struct list_head *event = $3; + + parse_events_update_lists(event, list); + $$ = list; +} +| +group +| +event + +group: +group_def ':' PE_MODIFIER_EVENT +{ + struct list_head *list = $1; + + ABORT_ON(parse_events__modifier_group(list, $3)); + $$ = list; +} +| +group_def + +group_def: +PE_NAME '{' events '}' +{ + struct list_head *list = $3; + + parse_events__set_leader($1, list); + $$ = list; +} +| +'{' events '}' +{ + struct list_head *list = $2; + + parse_events__set_leader(NULL, list); + $$ = list; +} events: -events ',' event | event +events ',' event +{ + struct list_head *event = $3; + struct list_head *list = $1; -event: -event_def PE_MODIFIER_EVENT + parse_events_update_lists(event, list); + $$ = list; +} +| +event + +event: event_mod + +event_mod: +event_name PE_MODIFIER_EVENT { - struct parse_events_data__events *data = _data; + struct list_head *list = $1; /* * Apply modifier on all events added by single event definition * (there could be more events added for multiple tracepoint * definitions via '*?'. */ - ABORT_ON(parse_events_modifier($1, $2)); - parse_events_update_lists($1, &data->list); + ABORT_ON(parse_events__modifier_event(list, $2, false)); + $$ = list; } | -event_def -{ - struct parse_events_data__events *data = _data; +event_name - parse_events_update_lists($1, &data->list); +event_name: +PE_EVENT_NAME event_def +{ + ABORT_ON(parse_events_name($2, $1)); + free($1); + $$ = $2; } +| +event_def event_def: event_pmu | event_legacy_symbol | @@ -207,7 +290,7 @@ PE_VALUE ':' PE_VALUE struct parse_events_data__events *data = _data; struct list_head *list = NULL; - ABORT_ON(parse_events_add_numeric(&list, &data->idx, $1, $3, NULL)); + ABORT_ON(parse_events_add_numeric(&list, &data->idx, (u32)$1, $3, NULL)); $$ = list; } @@ -222,7 +305,7 @@ PE_RAW $$ = list; } -terms: event_config +start_terms: event_config { struct parse_events_data__terms *data = _data; data->terms = $1; @@ -282,7 +365,7 @@ PE_TERM '=' PE_NAME { struct parse_events__term *term; - ABORT_ON(parse_events__term_str(&term, $1, NULL, $3)); + ABORT_ON(parse_events__term_str(&term, (int)$1, NULL, $3)); $$ = term; } | @@ -290,7 +373,7 @@ PE_TERM '=' PE_VALUE { struct parse_events__term *term; - ABORT_ON(parse_events__term_num(&term, $1, NULL, $3)); + ABORT_ON(parse_events__term_num(&term, (int)$1, NULL, $3)); $$ = term; } | @@ -298,7 +381,7 @@ PE_TERM { struct parse_events__term *term; - ABORT_ON(parse_events__term_num(&term, $1, NULL, 1)); + ABORT_ON(parse_events__term_num(&term, (int)$1, NULL, 1)); $$ = term; } @@ -308,7 +391,7 @@ sep_slash_dc: '/' | ':' | %% -void parse_events_error(void *data __used, void *scanner __used, - char const *msg __used) +void parse_events_error(void *data __maybe_unused, void *scanner __maybe_unused, + char const *msg __maybe_unused) { } diff --git a/trunk/tools/perf/util/parse-options.c b/trunk/tools/perf/util/parse-options.c index 99d02aa57dbf..443fc116512b 100644 --- a/trunk/tools/perf/util/parse-options.c +++ b/trunk/tools/perf/util/parse-options.c @@ -1,6 +1,7 @@ #include "util.h" #include "parse-options.h" #include "cache.h" +#include "header.h" #define OPT_SHORT 1 #define OPT_UNSET 2 @@ -413,6 +414,8 @@ int parse_options(int argc, const char **argv, const struct option *options, { struct parse_opt_ctx_t ctx; + perf_header__set_cmdline(argc, argv); + parse_options_start(&ctx, argc, argv, flags); switch (parse_options_step(&ctx, options, usagestr)) { case PARSE_OPT_HELP: @@ -554,7 +557,8 @@ int parse_options_usage(const char * const *usagestr, } -int parse_opt_verbosity_cb(const struct option *opt, const char *arg __used, +int parse_opt_verbosity_cb(const struct option *opt, + const char *arg __maybe_unused, int unset) { int *target = opt->value; diff --git a/trunk/tools/perf/util/perf_regs.h b/trunk/tools/perf/util/perf_regs.h new file mode 100644 index 000000000000..316dbe7f86ed --- /dev/null +++ b/trunk/tools/perf/util/perf_regs.h @@ -0,0 +1,14 @@ +#ifndef __PERF_REGS_H +#define __PERF_REGS_H + +#ifndef NO_PERF_REGS +#include +#else +#define PERF_REGS_MASK 0 + +static inline const char *perf_reg_name(int id __maybe_unused) +{ + return NULL; +} +#endif /* NO_PERF_REGS */ +#endif /* __PERF_REGS_H */ diff --git a/trunk/tools/perf/util/pmu.c b/trunk/tools/perf/util/pmu.c index 67715a42cd6d..8a2229da594f 100644 --- a/trunk/tools/perf/util/pmu.c +++ b/trunk/tools/perf/util/pmu.c @@ -9,6 +9,9 @@ #include "util.h" #include "pmu.h" #include "parse-events.h" +#include "cpumap.h" + +#define EVENT_SOURCE_DEVICE_PATH "/bus/event_source/devices/" int perf_pmu_parse(struct list_head *list, char *name); extern FILE *perf_pmu_in; @@ -69,7 +72,7 @@ static int pmu_format(char *name, struct list_head *format) return -1; snprintf(path, PATH_MAX, - "%s/bus/event_source/devices/%s/format", sysfs, name); + "%s" EVENT_SOURCE_DEVICE_PATH "%s/format", sysfs, name); if (stat(path, &st) < 0) return 0; /* no error if format does not exist */ @@ -206,7 +209,7 @@ static int pmu_type(char *name, __u32 *type) return -1; snprintf(path, PATH_MAX, - "%s/bus/event_source/devices/%s/type", sysfs, name); + "%s" EVENT_SOURCE_DEVICE_PATH "%s/type", sysfs, name); if (stat(path, &st) < 0) return -1; @@ -222,6 +225,62 @@ static int pmu_type(char *name, __u32 *type) return ret; } +/* Add all pmus in sysfs to pmu list: */ +static void pmu_read_sysfs(void) +{ + char path[PATH_MAX]; + const char *sysfs; + DIR *dir; + struct dirent *dent; + + sysfs = sysfs_find_mountpoint(); + if (!sysfs) + return; + + snprintf(path, PATH_MAX, + "%s" EVENT_SOURCE_DEVICE_PATH, sysfs); + + dir = opendir(path); + if (!dir) + return; + + while ((dent = readdir(dir))) { + if (!strcmp(dent->d_name, ".") || !strcmp(dent->d_name, "..")) + continue; + /* add to static LIST_HEAD(pmus): */ + perf_pmu__find(dent->d_name); + } + + closedir(dir); +} + +static struct cpu_map *pmu_cpumask(char *name) +{ + struct stat st; + char path[PATH_MAX]; + const char *sysfs; + FILE *file; + struct cpu_map *cpus; + + sysfs = sysfs_find_mountpoint(); + if (!sysfs) + return NULL; + + snprintf(path, PATH_MAX, + "%s/bus/event_source/devices/%s/cpumask", sysfs, name); + + if (stat(path, &st) < 0) + return NULL; + + file = fopen(path, "r"); + if (!file) + return NULL; + + cpus = cpu_map__read(file); + fclose(file); + return cpus; +} + static struct perf_pmu *pmu_lookup(char *name) { struct perf_pmu *pmu; @@ -244,6 +303,8 @@ static struct perf_pmu *pmu_lookup(char *name) if (!pmu) return NULL; + pmu->cpus = pmu_cpumask(name); + pmu_aliases(name, &aliases); INIT_LIST_HEAD(&pmu->format); @@ -267,6 +328,21 @@ static struct perf_pmu *pmu_find(char *name) return NULL; } +struct perf_pmu *perf_pmu__scan(struct perf_pmu *pmu) +{ + /* + * pmu iterator: If pmu is NULL, we start at the begin, + * otherwise return the next pmu. Returns NULL on end. + */ + if (!pmu) { + pmu_read_sysfs(); + pmu = list_prepare_entry(pmu, &pmus, list); + } + list_for_each_entry_continue(pmu, &pmus, list) + return pmu; + return NULL; +} + struct perf_pmu *perf_pmu__find(char *name) { struct perf_pmu *pmu; diff --git a/trunk/tools/perf/util/pmu.h b/trunk/tools/perf/util/pmu.h index 535f2c5258ab..53c7794fc4be 100644 --- a/trunk/tools/perf/util/pmu.h +++ b/trunk/tools/perf/util/pmu.h @@ -28,6 +28,7 @@ struct perf_pmu__alias { struct perf_pmu { char *name; __u32 type; + struct cpu_map *cpus; struct list_head format; struct list_head aliases; struct list_head list; @@ -46,5 +47,7 @@ int perf_pmu__new_format(struct list_head *list, char *name, int config, unsigned long *bits); void perf_pmu__set_format(unsigned long *bits, long from, long to); +struct perf_pmu *perf_pmu__scan(struct perf_pmu *pmu); + int perf_pmu__test(void); #endif /* __PMU_H */ diff --git a/trunk/tools/perf/util/pmu.y b/trunk/tools/perf/util/pmu.y index 20ea77e93169..ec898047ebb9 100644 --- a/trunk/tools/perf/util/pmu.y +++ b/trunk/tools/perf/util/pmu.y @@ -86,8 +86,8 @@ PP_VALUE %% -void perf_pmu_error(struct list_head *list __used, - char *name __used, - char const *msg __used) +void perf_pmu_error(struct list_head *list __maybe_unused, + char *name __maybe_unused, + char const *msg __maybe_unused) { } diff --git a/trunk/tools/perf/util/probe-event.c b/trunk/tools/perf/util/probe-event.c index 0dda25d82d06..49a256e6e0a2 100644 --- a/trunk/tools/perf/util/probe-event.c +++ b/trunk/tools/perf/util/probe-event.c @@ -41,7 +41,7 @@ #include "symbol.h" #include "thread.h" #include "debugfs.h" -#include "trace-event.h" /* For __unused */ +#include "trace-event.h" /* For __maybe_unused */ #include "probe-event.h" #include "probe-finder.h" #include "session.h" @@ -647,8 +647,8 @@ static int kprobe_convert_to_perf_probe(struct probe_trace_point *tp, } static int try_to_find_probe_trace_events(struct perf_probe_event *pev, - struct probe_trace_event **tevs __unused, - int max_tevs __unused, const char *target) + struct probe_trace_event **tevs __maybe_unused, + int max_tevs __maybe_unused, const char *target) { if (perf_probe_event_need_dwarf(pev)) { pr_warning("Debuginfo-analysis is not supported.\n"); @@ -661,17 +661,18 @@ static int try_to_find_probe_trace_events(struct perf_probe_event *pev, return 0; } -int show_line_range(struct line_range *lr __unused, const char *module __unused) +int show_line_range(struct line_range *lr __maybe_unused, + const char *module __maybe_unused) { pr_warning("Debuginfo-analysis is not supported.\n"); return -ENOSYS; } -int show_available_vars(struct perf_probe_event *pevs __unused, - int npevs __unused, int max_vls __unused, - const char *module __unused, - struct strfilter *filter __unused, - bool externs __unused) +int show_available_vars(struct perf_probe_event *pevs __maybe_unused, + int npevs __maybe_unused, int max_vls __maybe_unused, + const char *module __maybe_unused, + struct strfilter *filter __maybe_unused, + bool externs __maybe_unused) { pr_warning("Debuginfo-analysis is not supported.\n"); return -ENOSYS; @@ -1099,6 +1100,7 @@ static int parse_probe_trace_command(const char *cmd, struct probe_trace_point *tp = &tev->point; char pr; char *p; + char *argv0_str = NULL, *fmt, *fmt1_str, *fmt2_str, *fmt3_str; int ret, i, argc; char **argv; @@ -1115,14 +1117,27 @@ static int parse_probe_trace_command(const char *cmd, } /* Scan event and group name. */ - ret = sscanf(argv[0], "%c:%a[^/ \t]/%a[^ \t]", - &pr, (float *)(void *)&tev->group, - (float *)(void *)&tev->event); - if (ret != 3) { + argv0_str = strdup(argv[0]); + if (argv0_str == NULL) { + ret = -ENOMEM; + goto out; + } + fmt1_str = strtok_r(argv0_str, ":", &fmt); + fmt2_str = strtok_r(NULL, "/", &fmt); + fmt3_str = strtok_r(NULL, " \t", &fmt); + if (fmt1_str == NULL || strlen(fmt1_str) != 1 || fmt2_str == NULL + || fmt3_str == NULL) { semantic_error("Failed to parse event name: %s\n", argv[0]); ret = -EINVAL; goto out; } + pr = fmt1_str[0]; + tev->group = strdup(fmt2_str); + tev->event = strdup(fmt3_str); + if (tev->group == NULL || tev->event == NULL) { + ret = -ENOMEM; + goto out; + } pr_debug("Group:%s Event:%s probe:%c\n", tev->group, tev->event, pr); tp->retprobe = (pr == 'r'); @@ -1134,10 +1149,17 @@ static int parse_probe_trace_command(const char *cmd, p++; } else p = argv[1]; - ret = sscanf(p, "%a[^+]+%lu", (float *)(void *)&tp->symbol, - &tp->offset); - if (ret == 1) + fmt1_str = strtok_r(p, "+", &fmt); + tp->symbol = strdup(fmt1_str); + if (tp->symbol == NULL) { + ret = -ENOMEM; + goto out; + } + fmt2_str = strtok_r(NULL, "", &fmt); + if (fmt2_str == NULL) tp->offset = 0; + else + tp->offset = strtoul(fmt2_str, NULL, 10); tev->nargs = argc - 2; tev->args = zalloc(sizeof(struct probe_trace_arg) * tev->nargs); @@ -1161,6 +1183,7 @@ static int parse_probe_trace_command(const char *cmd, } ret = 0; out: + free(argv0_str); argv_free(argv); return ret; } @@ -2183,7 +2206,7 @@ static struct strfilter *available_func_filter; * If a symbol corresponds to a function with global binding and * matches filter return 0. For all others return 1. */ -static int filter_available_functions(struct map *map __unused, +static int filter_available_functions(struct map *map __maybe_unused, struct symbol *sym) { if (sym->binding == STB_GLOBAL && @@ -2307,10 +2330,17 @@ static int convert_name_to_addr(struct perf_probe_event *pev, const char *exec) function = NULL; } if (!pev->group) { - char *ptr1, *ptr2; + char *ptr1, *ptr2, *exec_copy; pev->group = zalloc(sizeof(char *) * 64); - ptr1 = strdup(basename(exec)); + exec_copy = strdup(exec); + if (!exec_copy) { + ret = -ENOMEM; + pr_warning("Failed to copy exec string.\n"); + goto out; + } + + ptr1 = strdup(basename(exec_copy)); if (ptr1) { ptr2 = strpbrk(ptr1, "-._"); if (ptr2) @@ -2319,6 +2349,7 @@ static int convert_name_to_addr(struct perf_probe_event *pev, const char *exec) ptr1); free(ptr1); } + free(exec_copy); } free(pp->function); pp->function = zalloc(sizeof(char *) * MAX_PROBE_ARGS); diff --git a/trunk/tools/perf/util/probe-finder.c b/trunk/tools/perf/util/probe-finder.c index d448984ed789..1daf5c14e751 100644 --- a/trunk/tools/perf/util/probe-finder.c +++ b/trunk/tools/perf/util/probe-finder.c @@ -207,7 +207,7 @@ static int debuginfo__init_online_kernel_dwarf(struct debuginfo *self, #else /* With older elfutils, this just support kernel module... */ static int debuginfo__init_online_kernel_dwarf(struct debuginfo *self, - Dwarf_Addr addr __used) + Dwarf_Addr addr __maybe_unused) { const char *path = kernel_get_module_path("kernel"); @@ -525,8 +525,10 @@ static int convert_variable_fields(Dwarf_Die *vr_die, const char *varname, return -ENOENT; } /* Verify it is a data structure */ - if (dwarf_tag(&type) != DW_TAG_structure_type) { - pr_warning("%s is not a data structure.\n", varname); + tag = dwarf_tag(&type); + if (tag != DW_TAG_structure_type && tag != DW_TAG_union_type) { + pr_warning("%s is not a data structure nor an union.\n", + varname); return -EINVAL; } @@ -539,8 +541,9 @@ static int convert_variable_fields(Dwarf_Die *vr_die, const char *varname, *ref_ptr = ref; } else { /* Verify it is a data structure */ - if (tag != DW_TAG_structure_type) { - pr_warning("%s is not a data structure.\n", varname); + if (tag != DW_TAG_structure_type && tag != DW_TAG_union_type) { + pr_warning("%s is not a data structure nor an union.\n", + varname); return -EINVAL; } if (field->name[0] == '[') { @@ -567,10 +570,15 @@ static int convert_variable_fields(Dwarf_Die *vr_die, const char *varname, } /* Get the offset of the field */ - ret = die_get_data_member_location(die_mem, &offs); - if (ret < 0) { - pr_warning("Failed to get the offset of %s.\n", field->name); - return ret; + if (tag == DW_TAG_union_type) { + offs = 0; + } else { + ret = die_get_data_member_location(die_mem, &offs); + if (ret < 0) { + pr_warning("Failed to get the offset of %s.\n", + field->name); + return ret; + } } ref->offset += (long)offs; @@ -1419,7 +1427,7 @@ static int line_range_add_line(const char *src, unsigned int lineno, } static int line_range_walk_cb(const char *fname, int lineno, - Dwarf_Addr addr __used, + Dwarf_Addr addr __maybe_unused, void *data) { struct line_finder *lf = data; diff --git a/trunk/tools/perf/util/python-ext-sources b/trunk/tools/perf/util/python-ext-sources index 2884e67ee625..c40c2d33199e 100644 --- a/trunk/tools/perf/util/python-ext-sources +++ b/trunk/tools/perf/util/python-ext-sources @@ -1,5 +1,5 @@ # -# List of files needed by perf python extention +# List of files needed by perf python extension # # Each source file must be placed on its own line so that it can be # processed by Makefile and util/setup.py accordingly. @@ -10,10 +10,12 @@ util/ctype.c util/evlist.c util/evsel.c util/cpumap.c +util/hweight.c util/thread_map.c util/util.c util/xyarray.c util/cgroup.c util/debugfs.c +util/rblist.c util/strlist.c ../../lib/rbtree.c diff --git a/trunk/tools/perf/util/python.c b/trunk/tools/perf/util/python.c index e03b58a48424..9181bf212fb9 100644 --- a/trunk/tools/perf/util/python.c +++ b/trunk/tools/perf/util/python.c @@ -627,7 +627,7 @@ static PyObject *pyrf_evsel__open(struct pyrf_evsel *pevsel, * This will group just the fds for this single evsel, to group * multiple events, use evlist.open(). */ - if (perf_evsel__open(evsel, cpus, threads, group, NULL) < 0) { + if (perf_evsel__open(evsel, cpus, threads) < 0) { PyErr_SetFromErrno(PyExc_OSError); return NULL; } @@ -672,7 +672,7 @@ struct pyrf_evlist { }; static int pyrf_evlist__init(struct pyrf_evlist *pevlist, - PyObject *args, PyObject *kwargs __used) + PyObject *args, PyObject *kwargs __maybe_unused) { PyObject *pcpus = NULL, *pthreads = NULL; struct cpu_map *cpus; @@ -733,7 +733,8 @@ static PyObject *pyrf_evlist__poll(struct pyrf_evlist *pevlist, } static PyObject *pyrf_evlist__get_pollfd(struct pyrf_evlist *pevlist, - PyObject *args __used, PyObject *kwargs __used) + PyObject *args __maybe_unused, + PyObject *kwargs __maybe_unused) { struct perf_evlist *evlist = &pevlist->evlist; PyObject *list = PyList_New(0); @@ -765,7 +766,8 @@ static PyObject *pyrf_evlist__get_pollfd(struct pyrf_evlist *pevlist, static PyObject *pyrf_evlist__add(struct pyrf_evlist *pevlist, - PyObject *args, PyObject *kwargs __used) + PyObject *args, + PyObject *kwargs __maybe_unused) { struct perf_evlist *evlist = &pevlist->evlist; PyObject *pevsel; @@ -797,17 +799,13 @@ static PyObject *pyrf_evlist__read_on_cpu(struct pyrf_evlist *pevlist, event = perf_evlist__mmap_read(evlist, cpu); if (event != NULL) { - struct perf_evsel *first; PyObject *pyevent = pyrf_event__new(event); struct pyrf_event *pevent = (struct pyrf_event *)pyevent; if (pyevent == NULL) return PyErr_NoMemory(); - first = list_entry(evlist->entries.next, struct perf_evsel, node); - err = perf_event__parse_sample(event, first->attr.sample_type, - perf_evsel__sample_size(first), - sample_id_all, &pevent->sample, false); + err = perf_evlist__parse_sample(evlist, event, &pevent->sample); if (err) return PyErr_Format(PyExc_OSError, "perf: can't parse sample, err=%d", err); @@ -828,7 +826,10 @@ static PyObject *pyrf_evlist__open(struct pyrf_evlist *pevlist, if (!PyArg_ParseTupleAndKeywords(args, kwargs, "|OOii", kwlist, &group)) return NULL; - if (perf_evlist__open(evlist, group) < 0) { + if (group) + perf_evlist__set_leader(evlist); + + if (perf_evlist__open(evlist) < 0) { PyErr_SetFromErrno(PyExc_OSError); return NULL; } diff --git a/trunk/tools/perf/util/rblist.c b/trunk/tools/perf/util/rblist.c new file mode 100644 index 000000000000..0171fb611004 --- /dev/null +++ b/trunk/tools/perf/util/rblist.c @@ -0,0 +1,107 @@ +/* + * Based on strlist.c by: + * (c) 2009 Arnaldo Carvalho de Melo + * + * Licensed under the GPLv2. + */ + +#include +#include +#include + +#include "rblist.h" + +int rblist__add_node(struct rblist *rblist, const void *new_entry) +{ + struct rb_node **p = &rblist->entries.rb_node; + struct rb_node *parent = NULL, *new_node; + + while (*p != NULL) { + int rc; + + parent = *p; + + rc = rblist->node_cmp(parent, new_entry); + if (rc > 0) + p = &(*p)->rb_left; + else if (rc < 0) + p = &(*p)->rb_right; + else + return -EEXIST; + } + + new_node = rblist->node_new(rblist, new_entry); + if (new_node == NULL) + return -ENOMEM; + + rb_link_node(new_node, parent, p); + rb_insert_color(new_node, &rblist->entries); + ++rblist->nr_entries; + + return 0; +} + +void rblist__remove_node(struct rblist *rblist, struct rb_node *rb_node) +{ + rb_erase(rb_node, &rblist->entries); + rblist->node_delete(rblist, rb_node); +} + +struct rb_node *rblist__find(struct rblist *rblist, const void *entry) +{ + struct rb_node **p = &rblist->entries.rb_node; + struct rb_node *parent = NULL; + + while (*p != NULL) { + int rc; + + parent = *p; + + rc = rblist->node_cmp(parent, entry); + if (rc > 0) + p = &(*p)->rb_left; + else if (rc < 0) + p = &(*p)->rb_right; + else + return parent; + } + + return NULL; +} + +void rblist__init(struct rblist *rblist) +{ + if (rblist != NULL) { + rblist->entries = RB_ROOT; + rblist->nr_entries = 0; + } + + return; +} + +void rblist__delete(struct rblist *rblist) +{ + if (rblist != NULL) { + struct rb_node *pos, *next = rb_first(&rblist->entries); + + while (next) { + pos = next; + next = rb_next(pos); + rb_erase(pos, &rblist->entries); + rblist->node_delete(rblist, pos); + } + free(rblist); + } +} + +struct rb_node *rblist__entry(const struct rblist *rblist, unsigned int idx) +{ + struct rb_node *node; + + for (node = rb_first(&rblist->entries); node; node = rb_next(node)) { + if (!idx--) + return node; + } + + return NULL; +} diff --git a/trunk/tools/perf/util/rblist.h b/trunk/tools/perf/util/rblist.h new file mode 100644 index 000000000000..6d0cae5ae83d --- /dev/null +++ b/trunk/tools/perf/util/rblist.h @@ -0,0 +1,47 @@ +#ifndef __PERF_RBLIST_H +#define __PERF_RBLIST_H + +#include +#include + +/* + * create node structs of the form: + * struct my_node { + * struct rb_node rb_node; + * ... my data ... + * }; + * + * create list structs of the form: + * struct mylist { + * struct rblist rblist; + * ... my data ... + * }; + */ + +struct rblist { + struct rb_root entries; + unsigned int nr_entries; + + int (*node_cmp)(struct rb_node *rbn, const void *entry); + struct rb_node *(*node_new)(struct rblist *rlist, const void *new_entry); + void (*node_delete)(struct rblist *rblist, struct rb_node *rb_node); +}; + +void rblist__init(struct rblist *rblist); +void rblist__delete(struct rblist *rblist); +int rblist__add_node(struct rblist *rblist, const void *new_entry); +void rblist__remove_node(struct rblist *rblist, struct rb_node *rb_node); +struct rb_node *rblist__find(struct rblist *rblist, const void *entry); +struct rb_node *rblist__entry(const struct rblist *rblist, unsigned int idx); + +static inline bool rblist__empty(const struct rblist *rblist) +{ + return rblist->nr_entries == 0; +} + +static inline unsigned int rblist__nr_entries(const struct rblist *rblist) +{ + return rblist->nr_entries; +} + +#endif /* __PERF_RBLIST_H */ diff --git a/trunk/tools/perf/util/scripting-engines/trace-event-perl.c b/trunk/tools/perf/util/scripting-engines/trace-event-perl.c index 02dfa19a467f..f80605eb1855 100644 --- a/trunk/tools/perf/util/scripting-engines/trace-event-perl.c +++ b/trunk/tools/perf/util/scripting-engines/trace-event-perl.c @@ -25,16 +25,16 @@ #include #include -#include "../../perf.h" #include "../util.h" +#include +#include + +#include "../../perf.h" #include "../thread.h" #include "../event.h" #include "../trace-event.h" #include "../evsel.h" -#include -#include - void boot_Perf__Trace__Context(pTHX_ CV *cv); void boot_DynaLoader(pTHX_ CV *cv); typedef PerlInterpreter * INTERP; @@ -237,16 +237,16 @@ static void define_event_symbols(struct event_format *event, define_event_symbols(event, ev_name, args->next); } -static inline -struct event_format *find_cache_event(struct pevent *pevent, int type) +static inline struct event_format *find_cache_event(struct perf_evsel *evsel) { static char ev_name[256]; struct event_format *event; + int type = evsel->attr.config; if (events[type]) return events[type]; - events[type] = event = pevent_find_event(pevent, type); + events[type] = event = evsel->tp_format; if (!event) return NULL; @@ -257,23 +257,22 @@ struct event_format *find_cache_event(struct pevent *pevent, int type) return event; } -static void perl_process_tracepoint(union perf_event *perf_event __unused, - struct pevent *pevent, +static void perl_process_tracepoint(union perf_event *perf_event __maybe_unused, struct perf_sample *sample, struct perf_evsel *evsel, - struct machine *machine __unused, - struct thread *thread) + struct machine *machine __maybe_unused, + struct addr_location *al) { struct format_field *field; static char handler[256]; unsigned long long val; unsigned long s, ns; struct event_format *event; - int type; int pid; int cpu = sample->cpu; void *data = sample->raw_data; unsigned long long nsecs = sample->time; + struct thread *thread = al->thread; char *comm = thread->comm; dSP; @@ -281,13 +280,11 @@ static void perl_process_tracepoint(union perf_event *perf_event __unused, if (evsel->attr.type != PERF_TYPE_TRACEPOINT) return; - type = trace_parse_common_type(pevent, data); - - event = find_cache_event(pevent, type); + event = find_cache_event(evsel); if (!event) - die("ug! no event found for type %d", type); + die("ug! no event found for type %" PRIu64, evsel->attr.config); - pid = trace_parse_common_pid(pevent, data); + pid = raw_field_value(event, "common_pid", data); sprintf(handler, "%s::%s", event->system, event->name); @@ -320,7 +317,7 @@ static void perl_process_tracepoint(union perf_event *perf_event __unused, offset = field->offset; XPUSHs(sv_2mortal(newSVpv((char *)data + offset, 0))); } else { /* FIELD_IS_NUMERIC */ - val = read_size(pevent, data + field->offset, + val = read_size(event, data + field->offset, field->size); if (field->flags & FIELD_IS_SIGNED) { XPUSHs(sv_2mortal(newSViv(val))); @@ -349,11 +346,11 @@ static void perl_process_tracepoint(union perf_event *perf_event __unused, LEAVE; } -static void perl_process_event_generic(union perf_event *pevent __unused, +static void perl_process_event_generic(union perf_event *event, struct perf_sample *sample, - struct perf_evsel *evsel __unused, - struct machine *machine __unused, - struct thread *thread __unused) + struct perf_evsel *evsel, + struct machine *machine __maybe_unused, + struct addr_location *al __maybe_unused) { dSP; @@ -363,7 +360,7 @@ static void perl_process_event_generic(union perf_event *pevent __unused, ENTER; SAVETMPS; PUSHMARK(SP); - XPUSHs(sv_2mortal(newSVpvn((const char *)pevent, pevent->header.size))); + XPUSHs(sv_2mortal(newSVpvn((const char *)event, event->header.size))); XPUSHs(sv_2mortal(newSVpvn((const char *)&evsel->attr, sizeof(evsel->attr)))); XPUSHs(sv_2mortal(newSVpvn((const char *)sample, sizeof(*sample)))); XPUSHs(sv_2mortal(newSVpvn((const char *)sample->raw_data, sample->raw_size))); @@ -376,14 +373,13 @@ static void perl_process_event_generic(union perf_event *pevent __unused, } static void perl_process_event(union perf_event *event, - struct pevent *pevent, struct perf_sample *sample, struct perf_evsel *evsel, struct machine *machine, - struct thread *thread) + struct addr_location *al) { - perl_process_tracepoint(event, pevent, sample, evsel, machine, thread); - perl_process_event_generic(event, sample, evsel, machine, thread); + perl_process_tracepoint(event, sample, evsel, machine, al); + perl_process_event_generic(event, sample, evsel, machine, al); } static void run_start_sub(void) diff --git a/trunk/tools/perf/util/scripting-engines/trace-event-python.c b/trunk/tools/perf/util/scripting-engines/trace-event-python.c index ce4d1b0c3862..730c6630cba5 100644 --- a/trunk/tools/perf/util/scripting-engines/trace-event-python.c +++ b/trunk/tools/perf/util/scripting-engines/trace-event-python.c @@ -27,10 +27,12 @@ #include #include "../../perf.h" +#include "../evsel.h" #include "../util.h" #include "../event.h" #include "../thread.h" #include "../trace-event.h" +#include "../evsel.h" PyMODINIT_FUNC initperf_trace_context(void); @@ -194,16 +196,21 @@ static void define_event_symbols(struct event_format *event, define_event_symbols(event, ev_name, args->next); } -static inline -struct event_format *find_cache_event(struct pevent *pevent, int type) +static inline struct event_format *find_cache_event(struct perf_evsel *evsel) { static char ev_name[256]; struct event_format *event; + int type = evsel->attr.config; + /* + * XXX: Do we really need to cache this since now we have evsel->tp_format + * cached already? Need to re-read this "cache" routine that as well calls + * define_event_symbols() :-\ + */ if (events[type]) return events[type]; - events[type] = event = pevent_find_event(pevent, type); + events[type] = event = evsel->tp_format; if (!event) return NULL; @@ -214,12 +221,12 @@ struct event_format *find_cache_event(struct pevent *pevent, int type) return event; } -static void python_process_event(union perf_event *perf_event __unused, - struct pevent *pevent, +static void python_process_tracepoint(union perf_event *perf_event + __maybe_unused, struct perf_sample *sample, - struct perf_evsel *evsel __unused, - struct machine *machine __unused, - struct thread *thread) + struct perf_evsel *evsel, + struct machine *machine __maybe_unused, + struct addr_location *al) { PyObject *handler, *retval, *context, *t, *obj, *dict = NULL; static char handler_name[256]; @@ -228,24 +235,22 @@ static void python_process_event(union perf_event *perf_event __unused, unsigned long s, ns; struct event_format *event; unsigned n = 0; - int type; int pid; int cpu = sample->cpu; void *data = sample->raw_data; unsigned long long nsecs = sample->time; + struct thread *thread = al->thread; char *comm = thread->comm; t = PyTuple_New(MAX_FIELDS); if (!t) Py_FatalError("couldn't create Python tuple"); - type = trace_parse_common_type(pevent, data); - - event = find_cache_event(pevent, type); + event = find_cache_event(evsel); if (!event) - die("ug! no event found for type %d", type); + die("ug! no event found for type %d", (int)evsel->attr.config); - pid = trace_parse_common_pid(pevent, data); + pid = raw_field_value(event, "common_pid", data); sprintf(handler_name, "%s__%s", event->system, event->name); @@ -290,7 +295,7 @@ static void python_process_event(union perf_event *perf_event __unused, offset = field->offset; obj = PyString_FromString((char *)data + offset); } else { /* FIELD_IS_NUMERIC */ - val = read_size(pevent, data + field->offset, + val = read_size(event, data + field->offset, field->size); if (field->flags & FIELD_IS_SIGNED) { if ((long long)val >= LONG_MIN && @@ -335,6 +340,84 @@ static void python_process_event(union perf_event *perf_event __unused, Py_DECREF(t); } +static void python_process_general_event(union perf_event *perf_event + __maybe_unused, + struct perf_sample *sample, + struct perf_evsel *evsel, + struct machine *machine __maybe_unused, + struct addr_location *al) +{ + PyObject *handler, *retval, *t, *dict; + static char handler_name[64]; + unsigned n = 0; + struct thread *thread = al->thread; + + /* + * Use the MAX_FIELDS to make the function expandable, though + * currently there is only one item for the tuple. + */ + t = PyTuple_New(MAX_FIELDS); + if (!t) + Py_FatalError("couldn't create Python tuple"); + + dict = PyDict_New(); + if (!dict) + Py_FatalError("couldn't create Python dictionary"); + + snprintf(handler_name, sizeof(handler_name), "%s", "process_event"); + + handler = PyDict_GetItemString(main_dict, handler_name); + if (!handler || !PyCallable_Check(handler)) + goto exit; + + PyDict_SetItemString(dict, "ev_name", PyString_FromString(perf_evsel__name(evsel))); + PyDict_SetItemString(dict, "attr", PyString_FromStringAndSize( + (const char *)&evsel->attr, sizeof(evsel->attr))); + PyDict_SetItemString(dict, "sample", PyString_FromStringAndSize( + (const char *)sample, sizeof(*sample))); + PyDict_SetItemString(dict, "raw_buf", PyString_FromStringAndSize( + (const char *)sample->raw_data, sample->raw_size)); + PyDict_SetItemString(dict, "comm", + PyString_FromString(thread->comm)); + if (al->map) { + PyDict_SetItemString(dict, "dso", + PyString_FromString(al->map->dso->name)); + } + if (al->sym) { + PyDict_SetItemString(dict, "symbol", + PyString_FromString(al->sym->name)); + } + + PyTuple_SetItem(t, n++, dict); + if (_PyTuple_Resize(&t, n) == -1) + Py_FatalError("error resizing Python tuple"); + + retval = PyObject_CallObject(handler, t); + if (retval == NULL) + handler_call_die(handler_name); +exit: + Py_DECREF(dict); + Py_DECREF(t); +} + +static void python_process_event(union perf_event *perf_event, + struct perf_sample *sample, + struct perf_evsel *evsel, + struct machine *machine, + struct addr_location *al) +{ + switch (evsel->attr.type) { + case PERF_TYPE_TRACEPOINT: + python_process_tracepoint(perf_event, sample, evsel, + machine, al); + break; + /* Reserve for future process_hw/sw/raw APIs */ + default: + python_process_general_event(perf_event, sample, evsel, + machine, al); + } +} + static int run_start_sub(void) { PyObject *handler, *retval; diff --git a/trunk/tools/perf/util/session.c b/trunk/tools/perf/util/session.c index 8e4f0755d2aa..8cdd23239c90 100644 --- a/trunk/tools/perf/util/session.c +++ b/trunk/tools/perf/util/session.c @@ -15,6 +15,9 @@ #include "util.h" #include "cpumap.h" #include "event-parse.h" +#include "perf_regs.h" +#include "unwind.h" +#include "vdso.h" static int perf_session__open(struct perf_session *self, bool force) { @@ -80,14 +83,12 @@ static int perf_session__open(struct perf_session *self, bool force) return -1; } -void perf_session__update_sample_type(struct perf_session *self) +void perf_session__set_id_hdr_size(struct perf_session *session) { - self->sample_type = perf_evlist__sample_type(self->evlist); - self->sample_size = __perf_evsel__sample_size(self->sample_type); - self->sample_id_all = perf_evlist__sample_id_all(self->evlist); - self->id_hdr_size = perf_evlist__id_hdr_size(self->evlist); - self->host_machine.id_hdr_size = self->id_hdr_size; - machines__set_id_hdr_size(&self->machines, self->id_hdr_size); + u16 id_hdr_size = perf_evlist__id_hdr_size(session->evlist); + + session->host_machine.id_hdr_size = id_hdr_size; + machines__set_id_hdr_size(&session->machines, id_hdr_size); } int perf_session__create_kernel_maps(struct perf_session *self) @@ -147,7 +148,7 @@ struct perf_session *perf_session__new(const char *filename, int mode, if (mode == O_RDONLY) { if (perf_session__open(self, force) < 0) goto out_delete; - perf_session__update_sample_type(self); + perf_session__set_id_hdr_size(self); } else if (mode == O_WRONLY) { /* * In O_RDONLY mode this will be performed when reading the @@ -158,7 +159,7 @@ struct perf_session *perf_session__new(const char *filename, int mode, } if (tool && tool->ordering_requires_timestamps && - tool->ordered_samples && !self->sample_id_all) { + tool->ordered_samples && !perf_evlist__sample_id_all(self->evlist)) { dump_printf("WARNING: No sample_id_all support, falling back to unordered processing\n"); tool->ordered_samples = false; } @@ -211,6 +212,7 @@ void perf_session__delete(struct perf_session *self) machine__exit(&self->host_machine); close(self->fd); free(self); + vdso__exit(); } void machine__remove_thread(struct machine *self, struct thread *th) @@ -290,10 +292,11 @@ struct branch_info *machine__resolve_bstack(struct machine *self, return bi; } -int machine__resolve_callchain(struct machine *self, - struct thread *thread, - struct ip_callchain *chain, - struct symbol **parent) +static int machine__resolve_callchain_sample(struct machine *machine, + struct thread *thread, + struct ip_callchain *chain, + struct symbol **parent) + { u8 cpumode = PERF_RECORD_MISC_USER; unsigned int i; @@ -318,11 +321,14 @@ int machine__resolve_callchain(struct machine *self, if (ip >= PERF_CONTEXT_MAX) { switch (ip) { case PERF_CONTEXT_HV: - cpumode = PERF_RECORD_MISC_HYPERVISOR; break; + cpumode = PERF_RECORD_MISC_HYPERVISOR; + break; case PERF_CONTEXT_KERNEL: - cpumode = PERF_RECORD_MISC_KERNEL; break; + cpumode = PERF_RECORD_MISC_KERNEL; + break; case PERF_CONTEXT_USER: - cpumode = PERF_RECORD_MISC_USER; break; + cpumode = PERF_RECORD_MISC_USER; + break; default: pr_debug("invalid callchain context: " "%"PRId64"\n", (s64) ip); @@ -337,7 +343,7 @@ int machine__resolve_callchain(struct machine *self, } al.filtered = false; - thread__find_addr_location(thread, self, cpumode, + thread__find_addr_location(thread, machine, cpumode, MAP__FUNCTION, ip, &al, NULL); if (al.sym != NULL) { if (sort__has_parent && !*parent && @@ -356,49 +362,92 @@ int machine__resolve_callchain(struct machine *self, return 0; } -static int process_event_synth_tracing_data_stub(union perf_event *event __used, - struct perf_session *session __used) +static int unwind_entry(struct unwind_entry *entry, void *arg) +{ + struct callchain_cursor *cursor = arg; + return callchain_cursor_append(cursor, entry->ip, + entry->map, entry->sym); +} + +int machine__resolve_callchain(struct machine *machine, + struct perf_evsel *evsel, + struct thread *thread, + struct perf_sample *sample, + struct symbol **parent) + +{ + int ret; + + callchain_cursor_reset(&callchain_cursor); + + ret = machine__resolve_callchain_sample(machine, thread, + sample->callchain, parent); + if (ret) + return ret; + + /* Can we do dwarf post unwind? */ + if (!((evsel->attr.sample_type & PERF_SAMPLE_REGS_USER) && + (evsel->attr.sample_type & PERF_SAMPLE_STACK_USER))) + return 0; + + /* Bail out if nothing was captured. */ + if ((!sample->user_regs.regs) || + (!sample->user_stack.size)) + return 0; + + return unwind__get_entries(unwind_entry, &callchain_cursor, machine, + thread, evsel->attr.sample_regs_user, + sample); + +} + +static int process_event_synth_tracing_data_stub(union perf_event *event + __maybe_unused, + struct perf_session *session + __maybe_unused) { dump_printf(": unhandled!\n"); return 0; } -static int process_event_synth_attr_stub(union perf_event *event __used, - struct perf_evlist **pevlist __used) +static int process_event_synth_attr_stub(union perf_event *event __maybe_unused, + struct perf_evlist **pevlist + __maybe_unused) { dump_printf(": unhandled!\n"); return 0; } -static int process_event_sample_stub(struct perf_tool *tool __used, - union perf_event *event __used, - struct perf_sample *sample __used, - struct perf_evsel *evsel __used, - struct machine *machine __used) +static int process_event_sample_stub(struct perf_tool *tool __maybe_unused, + union perf_event *event __maybe_unused, + struct perf_sample *sample __maybe_unused, + struct perf_evsel *evsel __maybe_unused, + struct machine *machine __maybe_unused) { dump_printf(": unhandled!\n"); return 0; } -static int process_event_stub(struct perf_tool *tool __used, - union perf_event *event __used, - struct perf_sample *sample __used, - struct machine *machine __used) +static int process_event_stub(struct perf_tool *tool __maybe_unused, + union perf_event *event __maybe_unused, + struct perf_sample *sample __maybe_unused, + struct machine *machine __maybe_unused) { dump_printf(": unhandled!\n"); return 0; } -static int process_finished_round_stub(struct perf_tool *tool __used, - union perf_event *event __used, - struct perf_session *perf_session __used) +static int process_finished_round_stub(struct perf_tool *tool __maybe_unused, + union perf_event *event __maybe_unused, + struct perf_session *perf_session + __maybe_unused) { dump_printf(": unhandled!\n"); return 0; } -static int process_event_type_stub(struct perf_tool *tool __used, - union perf_event *event __used) +static int process_event_type_stub(struct perf_tool *tool __maybe_unused, + union perf_event *event __maybe_unused) { dump_printf(": unhandled!\n"); return 0; @@ -475,7 +524,7 @@ static void swap_sample_id_all(union perf_event *event, void *data) } static void perf_event__all64_swap(union perf_event *event, - bool sample_id_all __used) + bool sample_id_all __maybe_unused) { struct perf_event_header *hdr = &event->header; mem_bswap_64(hdr + 1, event->header.size - sizeof(*hdr)); @@ -489,7 +538,7 @@ static void perf_event__comm_swap(union perf_event *event, bool sample_id_all) if (sample_id_all) { void *data = &event->comm.comm; - data += ALIGN(strlen(data) + 1, sizeof(u64)); + data += PERF_ALIGN(strlen(data) + 1, sizeof(u64)); swap_sample_id_all(event, data); } } @@ -506,7 +555,7 @@ static void perf_event__mmap_swap(union perf_event *event, if (sample_id_all) { void *data = &event->mmap.filename; - data += ALIGN(strlen(data) + 1, sizeof(u64)); + data += PERF_ALIGN(strlen(data) + 1, sizeof(u64)); swap_sample_id_all(event, data); } } @@ -586,7 +635,7 @@ void perf_event__attr_swap(struct perf_event_attr *attr) } static void perf_event__hdr_attr_swap(union perf_event *event, - bool sample_id_all __used) + bool sample_id_all __maybe_unused) { size_t size; @@ -598,14 +647,14 @@ static void perf_event__hdr_attr_swap(union perf_event *event, } static void perf_event__event_type_swap(union perf_event *event, - bool sample_id_all __used) + bool sample_id_all __maybe_unused) { event->event_type.event_type.event_id = bswap_64(event->event_type.event_type.event_id); } static void perf_event__tracing_data_swap(union perf_event *event, - bool sample_id_all __used) + bool sample_id_all __maybe_unused) { event->tracing_data.size = bswap_32(event->tracing_data.size); } @@ -654,7 +703,7 @@ static int perf_session_deliver_event(struct perf_session *session, struct perf_tool *tool, u64 file_offset); -static void flush_sample_queue(struct perf_session *s, +static int flush_sample_queue(struct perf_session *s, struct perf_tool *tool) { struct ordered_samples *os = &s->ordered_samples; @@ -667,18 +716,21 @@ static void flush_sample_queue(struct perf_session *s, int ret; if (!tool->ordered_samples || !limit) - return; + return 0; list_for_each_entry_safe(iter, tmp, head, list) { if (iter->timestamp > limit) break; - ret = perf_session__parse_sample(s, iter->event, &sample); + ret = perf_evlist__parse_sample(s->evlist, iter->event, &sample); if (ret) pr_err("Can't parse sample, err = %d\n", ret); - else - perf_session_deliver_event(s, iter->event, &sample, tool, - iter->file_offset); + else { + ret = perf_session_deliver_event(s, iter->event, &sample, tool, + iter->file_offset); + if (ret) + return ret; + } os->last_flush = iter->timestamp; list_del(&iter->list); @@ -698,6 +750,8 @@ static void flush_sample_queue(struct perf_session *s, } os->nr_samples = 0; + + return 0; } /* @@ -740,13 +794,14 @@ static void flush_sample_queue(struct perf_session *s, * etc... */ static int process_finished_round(struct perf_tool *tool, - union perf_event *event __used, + union perf_event *event __maybe_unused, struct perf_session *session) { - flush_sample_queue(session, tool); - session->ordered_samples.next_flush = session->ordered_samples.max_timestamp; + int ret = flush_sample_queue(session, tool); + if (!ret) + session->ordered_samples.next_flush = session->ordered_samples.max_timestamp; - return 0; + return ret; } /* The queue is ordered by time */ @@ -861,20 +916,50 @@ static void branch_stack__printf(struct perf_sample *sample) sample->branch_stack->entries[i].to); } +static void regs_dump__printf(u64 mask, u64 *regs) +{ + unsigned rid, i = 0; + + for_each_set_bit(rid, (unsigned long *) &mask, sizeof(mask) * 8) { + u64 val = regs[i++]; + + printf(".... %-5s 0x%" PRIx64 "\n", + perf_reg_name(rid), val); + } +} + +static void regs_user__printf(struct perf_sample *sample, u64 mask) +{ + struct regs_dump *user_regs = &sample->user_regs; + + if (user_regs->regs) { + printf("... user regs: mask 0x%" PRIx64 "\n", mask); + regs_dump__printf(mask, user_regs->regs); + } +} + +static void stack_user__printf(struct stack_dump *dump) +{ + printf("... ustack: size %" PRIu64 ", offset 0x%x\n", + dump->size, dump->offset); +} + static void perf_session__print_tstamp(struct perf_session *session, union perf_event *event, struct perf_sample *sample) { + u64 sample_type = perf_evlist__sample_type(session->evlist); + if (event->header.type != PERF_RECORD_SAMPLE && - !session->sample_id_all) { + !perf_evlist__sample_id_all(session->evlist)) { fputs("-1 -1 ", stdout); return; } - if ((session->sample_type & PERF_SAMPLE_CPU)) + if ((sample_type & PERF_SAMPLE_CPU)) printf("%u ", sample->cpu); - if (session->sample_type & PERF_SAMPLE_TIME) + if (sample_type & PERF_SAMPLE_TIME) printf("%" PRIu64 " ", sample->time); } @@ -896,9 +981,11 @@ static void dump_event(struct perf_session *session, union perf_event *event, event->header.size, perf_event__name(event->header.type)); } -static void dump_sample(struct perf_session *session, union perf_event *event, +static void dump_sample(struct perf_evsel *evsel, union perf_event *event, struct perf_sample *sample) { + u64 sample_type; + if (!dump_trace) return; @@ -906,11 +993,19 @@ static void dump_sample(struct perf_session *session, union perf_event *event, event->header.misc, sample->pid, sample->tid, sample->ip, sample->period, sample->addr); - if (session->sample_type & PERF_SAMPLE_CALLCHAIN) + sample_type = evsel->attr.sample_type; + + if (sample_type & PERF_SAMPLE_CALLCHAIN) callchain__printf(sample); - if (session->sample_type & PERF_SAMPLE_BRANCH_STACK) + if (sample_type & PERF_SAMPLE_BRANCH_STACK) branch_stack__printf(sample); + + if (sample_type & PERF_SAMPLE_REGS_USER) + regs_user__printf(sample, evsel->attr.sample_regs_user); + + if (sample_type & PERF_SAMPLE_STACK_USER) + stack_user__printf(&sample->user_stack); } static struct machine * @@ -968,7 +1063,7 @@ static int perf_session_deliver_event(struct perf_session *session, switch (event->header.type) { case PERF_RECORD_SAMPLE: - dump_sample(session, event, sample); + dump_sample(evsel, event, sample); if (evsel == NULL) { ++session->hists.stats.nr_unknown_id; return 0; @@ -1006,7 +1101,7 @@ static int perf_session__preprocess_sample(struct perf_session *session, union perf_event *event, struct perf_sample *sample) { if (event->header.type != PERF_RECORD_SAMPLE || - !(session->sample_type & PERF_SAMPLE_CALLCHAIN)) + !(perf_evlist__sample_type(session->evlist) & PERF_SAMPLE_CALLCHAIN)) return 0; if (!ip_callchain__valid(sample->callchain, event)) { @@ -1030,7 +1125,7 @@ static int perf_session__process_user_event(struct perf_session *session, union case PERF_RECORD_HEADER_ATTR: err = tool->attr(event, &session->evlist); if (err == 0) - perf_session__update_sample_type(session); + perf_session__set_id_hdr_size(session); return err; case PERF_RECORD_HEADER_EVENT_TYPE: return tool->event_type(tool, event); @@ -1065,7 +1160,7 @@ static int perf_session__process_event(struct perf_session *session, int ret; if (session->header.needs_swap) - event_swap(event, session->sample_id_all); + event_swap(event, perf_evlist__sample_id_all(session->evlist)); if (event->header.type >= PERF_RECORD_HEADER_MAX) return -EINVAL; @@ -1078,7 +1173,7 @@ static int perf_session__process_event(struct perf_session *session, /* * For all kernel events we get the sample data */ - ret = perf_session__parse_sample(session, event, &sample); + ret = perf_evlist__parse_sample(session->evlist, event, &sample); if (ret) return ret; @@ -1363,7 +1458,7 @@ int __perf_session__process_events(struct perf_session *session, err = 0; /* do the final flush for ordered samples */ session->ordered_samples.next_flush = ULLONG_MAX; - flush_sample_queue(session, tool); + err = flush_sample_queue(session, tool); out_err: perf_session__warn_about_errors(session, tool); perf_session_free_sample_buffers(session); @@ -1389,9 +1484,9 @@ int perf_session__process_events(struct perf_session *self, return err; } -bool perf_session__has_traces(struct perf_session *self, const char *msg) +bool perf_session__has_traces(struct perf_session *session, const char *msg) { - if (!(self->sample_type & PERF_SAMPLE_RAW)) { + if (!(perf_evlist__sample_type(session->evlist) & PERF_SAMPLE_RAW)) { pr_err("No trace sample to read. Did you call 'perf %s'?\n", msg); return false; } @@ -1492,9 +1587,9 @@ struct perf_evsel *perf_session__find_first_evtype(struct perf_session *session, return NULL; } -void perf_event__print_ip(union perf_event *event, struct perf_sample *sample, - struct machine *machine, int print_sym, - int print_dso, int print_symoffset) +void perf_evsel__print_ip(struct perf_evsel *evsel, union perf_event *event, + struct perf_sample *sample, struct machine *machine, + int print_sym, int print_dso, int print_symoffset) { struct addr_location al; struct callchain_cursor_node *node; @@ -1508,8 +1603,9 @@ void perf_event__print_ip(union perf_event *event, struct perf_sample *sample, if (symbol_conf.use_callchain && sample->callchain) { - if (machine__resolve_callchain(machine, al.thread, - sample->callchain, NULL) != 0) { + + if (machine__resolve_callchain(machine, evsel, al.thread, + sample, NULL) != 0) { if (verbose) error("Failed to resolve callchain. Skipping\n"); return; diff --git a/trunk/tools/perf/util/session.h b/trunk/tools/perf/util/session.h index 7c435bde6eb0..aab414fbb64b 100644 --- a/trunk/tools/perf/util/session.h +++ b/trunk/tools/perf/util/session.h @@ -36,18 +36,12 @@ struct perf_session { struct pevent *pevent; /* * FIXME: Need to split this up further, we need global - * stats + per event stats. 'perf diff' also needs - * to properly support multiple events in a single - * perf.data file. + * stats + per event stats. */ struct hists hists; - u64 sample_type; - int sample_size; int fd; bool fd_pipe; bool repipe; - bool sample_id_all; - u16 id_hdr_size; int cwdlen; char *cwd; struct ordered_samples ordered_samples; @@ -86,7 +80,7 @@ void perf_event__attr_swap(struct perf_event_attr *attr); int perf_session__create_kernel_maps(struct perf_session *self); -void perf_session__update_sample_type(struct perf_session *self); +void perf_session__set_id_hdr_size(struct perf_session *session); void perf_session__remove_thread(struct perf_session *self, struct thread *th); static inline @@ -130,30 +124,12 @@ size_t perf_session__fprintf_dsos_buildid(struct perf_session *self, size_t perf_session__fprintf_nr_events(struct perf_session *session, FILE *fp); -static inline int perf_session__parse_sample(struct perf_session *session, - const union perf_event *event, - struct perf_sample *sample) -{ - return perf_event__parse_sample(event, session->sample_type, - session->sample_size, - session->sample_id_all, sample, - session->header.needs_swap); -} - -static inline int perf_session__synthesize_sample(struct perf_session *session, - union perf_event *event, - const struct perf_sample *sample) -{ - return perf_event__synthesize_sample(event, session->sample_type, - sample, session->header.needs_swap); -} - struct perf_evsel *perf_session__find_first_evtype(struct perf_session *session, unsigned int type); -void perf_event__print_ip(union perf_event *event, struct perf_sample *sample, - struct machine *machine, int print_sym, - int print_dso, int print_symoffset); +void perf_evsel__print_ip(struct perf_evsel *evsel, union perf_event *event, + struct perf_sample *sample, struct machine *machine, + int print_sym, int print_dso, int print_symoffset); int perf_session__cpu_bitmap(struct perf_session *session, const char *cpu_list, unsigned long *cpu_bitmap); diff --git a/trunk/tools/perf/util/sort.c b/trunk/tools/perf/util/sort.c index 0f5a0a496bc4..b5b1b9211960 100644 --- a/trunk/tools/perf/util/sort.c +++ b/trunk/tools/perf/util/sort.c @@ -8,12 +8,11 @@ const char default_sort_order[] = "comm,dso,symbol"; const char *sort_order = default_sort_order; int sort__need_collapse = 0; int sort__has_parent = 0; +int sort__has_sym = 0; int sort__branch_mode = -1; /* -1 = means not set */ enum sort_type sort__first_dimension; -char * field_sep; - LIST_HEAD(hist_entry__sort_list); static int repsep_snprintf(char *bf, size_t size, const char *fmt, ...) @@ -23,11 +22,11 @@ static int repsep_snprintf(char *bf, size_t size, const char *fmt, ...) va_start(ap, fmt); n = vsnprintf(bf, size, fmt, ap); - if (field_sep && n > 0) { + if (symbol_conf.field_sep && n > 0) { char *sep = bf; while (1) { - sep = strchr(sep, *field_sep); + sep = strchr(sep, *symbol_conf.field_sep); if (sep == NULL) break; *sep = '.'; @@ -172,7 +171,7 @@ static int hist_entry__dso_snprintf(struct hist_entry *self, char *bf, static int _hist_entry__sym_snprintf(struct map *map, struct symbol *sym, u64 ip, char level, char *bf, size_t size, - unsigned int width __used) + unsigned int width __maybe_unused) { size_t ret = 0; @@ -207,7 +206,8 @@ struct sort_entry sort_dso = { }; static int hist_entry__sym_snprintf(struct hist_entry *self, char *bf, - size_t size, unsigned int width __used) + size_t size, + unsigned int width __maybe_unused) { return _hist_entry__sym_snprintf(self->ms.map, self->ms.sym, self->ip, self->level, bf, size, width); @@ -250,7 +250,8 @@ sort__srcline_cmp(struct hist_entry *left, struct hist_entry *right) } static int hist_entry__srcline_snprintf(struct hist_entry *self, char *bf, - size_t size, unsigned int width __used) + size_t size, + unsigned int width __maybe_unused) { FILE *fp; char cmd[PATH_MAX + 2], *path = self->srcline, *nl; @@ -399,7 +400,8 @@ sort__sym_to_cmp(struct hist_entry *left, struct hist_entry *right) } static int hist_entry__sym_from_snprintf(struct hist_entry *self, char *bf, - size_t size, unsigned int width __used) + size_t size, + unsigned int width __maybe_unused) { struct addr_map_symbol *from = &self->branch_info->from; return _hist_entry__sym_snprintf(from->map, from->sym, from->addr, @@ -408,7 +410,8 @@ static int hist_entry__sym_from_snprintf(struct hist_entry *self, char *bf, } static int hist_entry__sym_to_snprintf(struct hist_entry *self, char *bf, - size_t size, unsigned int width __used) + size_t size, + unsigned int width __maybe_unused) { struct addr_map_symbol *to = &self->branch_info->to; return _hist_entry__sym_snprintf(to->map, to->sym, to->addr, @@ -509,6 +512,10 @@ int sort_dimension__add(const char *tok) return -EINVAL; } sort__has_parent = 1; + } else if (sd->entry == &sort_sym || + sd->entry == &sort_sym_from || + sd->entry == &sort_sym_to) { + sort__has_sym = 1; } if (sd->taken) diff --git a/trunk/tools/perf/util/sort.h b/trunk/tools/perf/util/sort.h index e724b26acd51..12d634792de5 100644 --- a/trunk/tools/perf/util/sort.h +++ b/trunk/tools/perf/util/sort.h @@ -31,8 +31,8 @@ extern const char *parent_pattern; extern const char default_sort_order[]; extern int sort__need_collapse; extern int sort__has_parent; +extern int sort__has_sym; extern int sort__branch_mode; -extern char *field_sep; extern struct sort_entry sort_comm; extern struct sort_entry sort_dso; extern struct sort_entry sort_sym; diff --git a/trunk/tools/perf/util/stat.c b/trunk/tools/perf/util/stat.c new file mode 100644 index 000000000000..23742126f47c --- /dev/null +++ b/trunk/tools/perf/util/stat.c @@ -0,0 +1,57 @@ +#include + +#include "stat.h" + +void update_stats(struct stats *stats, u64 val) +{ + double delta; + + stats->n++; + delta = val - stats->mean; + stats->mean += delta / stats->n; + stats->M2 += delta*(val - stats->mean); +} + +double avg_stats(struct stats *stats) +{ + return stats->mean; +} + +/* + * http://en.wikipedia.org/wiki/Algorithms_for_calculating_variance + * + * (\Sum n_i^2) - ((\Sum n_i)^2)/n + * s^2 = ------------------------------- + * n - 1 + * + * http://en.wikipedia.org/wiki/Stddev + * + * The std dev of the mean is related to the std dev by: + * + * s + * s_mean = ------- + * sqrt(n) + * + */ +double stddev_stats(struct stats *stats) +{ + double variance, variance_mean; + + if (!stats->n) + return 0.0; + + variance = stats->M2 / (stats->n - 1); + variance_mean = variance / stats->n; + + return sqrt(variance_mean); +} + +double rel_stddev_stats(double stddev, double avg) +{ + double pct = 0.0; + + if (avg) + pct = 100.0 * stddev/avg; + + return pct; +} diff --git a/trunk/tools/perf/util/stat.h b/trunk/tools/perf/util/stat.h new file mode 100644 index 000000000000..588367c3c767 --- /dev/null +++ b/trunk/tools/perf/util/stat.h @@ -0,0 +1,16 @@ +#ifndef __PERF_STATS_H +#define __PERF_STATS_H + +#include "types.h" + +struct stats +{ + double n, mean, M2; +}; + +void update_stats(struct stats *stats, u64 val); +double avg_stats(struct stats *stats); +double stddev_stats(struct stats *stats); +double rel_stddev_stats(double stddev, double avg); + +#endif diff --git a/trunk/tools/perf/util/string.c b/trunk/tools/perf/util/string.c index 199bc4d8905d..32170590892d 100644 --- a/trunk/tools/perf/util/string.c +++ b/trunk/tools/perf/util/string.c @@ -1,5 +1,5 @@ #include "util.h" -#include "string.h" +#include "linux/string.h" #define K 1024LL /* @@ -335,3 +335,19 @@ char *rtrim(char *s) return s; } + +/** + * memdup - duplicate region of memory + * @src: memory region to duplicate + * @len: memory region length + */ +void *memdup(const void *src, size_t len) +{ + void *p; + + p = malloc(len); + if (p) + memcpy(p, src, len); + + return p; +} diff --git a/trunk/tools/perf/util/strlist.c b/trunk/tools/perf/util/strlist.c index 6783a2043555..155d8b7078a7 100644 --- a/trunk/tools/perf/util/strlist.c +++ b/trunk/tools/perf/util/strlist.c @@ -10,23 +10,28 @@ #include #include -static struct str_node *str_node__new(const char *s, bool dupstr) +static +struct rb_node *strlist__node_new(struct rblist *rblist, const void *entry) { - struct str_node *self = malloc(sizeof(*self)); + const char *s = entry; + struct rb_node *rc = NULL; + struct strlist *strlist = container_of(rblist, struct strlist, rblist); + struct str_node *snode = malloc(sizeof(*snode)); - if (self != NULL) { - if (dupstr) { + if (snode != NULL) { + if (strlist->dupstr) { s = strdup(s); if (s == NULL) goto out_delete; } - self->s = s; + snode->s = s; + rc = &snode->rb_node; } - return self; + return rc; out_delete: - free(self); + free(snode); return NULL; } @@ -37,36 +42,26 @@ static void str_node__delete(struct str_node *self, bool dupstr) free(self); } -int strlist__add(struct strlist *self, const char *new_entry) +static +void strlist__node_delete(struct rblist *rblist, struct rb_node *rb_node) { - struct rb_node **p = &self->entries.rb_node; - struct rb_node *parent = NULL; - struct str_node *sn; - - while (*p != NULL) { - int rc; - - parent = *p; - sn = rb_entry(parent, struct str_node, rb_node); - rc = strcmp(sn->s, new_entry); - - if (rc > 0) - p = &(*p)->rb_left; - else if (rc < 0) - p = &(*p)->rb_right; - else - return -EEXIST; - } + struct strlist *slist = container_of(rblist, struct strlist, rblist); + struct str_node *snode = container_of(rb_node, struct str_node, rb_node); - sn = str_node__new(new_entry, self->dupstr); - if (sn == NULL) - return -ENOMEM; + str_node__delete(snode, slist->dupstr); +} - rb_link_node(&sn->rb_node, parent, p); - rb_insert_color(&sn->rb_node, &self->entries); - ++self->nr_entries; +static int strlist__node_cmp(struct rb_node *rb_node, const void *entry) +{ + const char *str = entry; + struct str_node *snode = container_of(rb_node, struct str_node, rb_node); + + return strcmp(snode->s, str); +} - return 0; +int strlist__add(struct strlist *self, const char *new_entry) +{ + return rblist__add_node(&self->rblist, new_entry); } int strlist__load(struct strlist *self, const char *filename) @@ -96,34 +91,20 @@ int strlist__load(struct strlist *self, const char *filename) return err; } -void strlist__remove(struct strlist *self, struct str_node *sn) +void strlist__remove(struct strlist *slist, struct str_node *snode) { - rb_erase(&sn->rb_node, &self->entries); - str_node__delete(sn, self->dupstr); + rblist__remove_node(&slist->rblist, &snode->rb_node); } -struct str_node *strlist__find(struct strlist *self, const char *entry) +struct str_node *strlist__find(struct strlist *slist, const char *entry) { - struct rb_node **p = &self->entries.rb_node; - struct rb_node *parent = NULL; - - while (*p != NULL) { - struct str_node *sn; - int rc; - - parent = *p; - sn = rb_entry(parent, struct str_node, rb_node); - rc = strcmp(sn->s, entry); - - if (rc > 0) - p = &(*p)->rb_left; - else if (rc < 0) - p = &(*p)->rb_right; - else - return sn; - } + struct str_node *snode = NULL; + struct rb_node *rb_node = rblist__find(&slist->rblist, entry); - return NULL; + if (rb_node) + snode = container_of(rb_node, struct str_node, rb_node); + + return snode; } static int strlist__parse_list_entry(struct strlist *self, const char *s) @@ -156,9 +137,12 @@ struct strlist *strlist__new(bool dupstr, const char *slist) struct strlist *self = malloc(sizeof(*self)); if (self != NULL) { - self->entries = RB_ROOT; + rblist__init(&self->rblist); + self->rblist.node_cmp = strlist__node_cmp; + self->rblist.node_new = strlist__node_new; + self->rblist.node_delete = strlist__node_delete; + self->dupstr = dupstr; - self->nr_entries = 0; if (slist && strlist__parse_list(self, slist) != 0) goto out_error; } @@ -171,30 +155,18 @@ struct strlist *strlist__new(bool dupstr, const char *slist) void strlist__delete(struct strlist *self) { - if (self != NULL) { - struct str_node *pos; - struct rb_node *next = rb_first(&self->entries); - - while (next) { - pos = rb_entry(next, struct str_node, rb_node); - next = rb_next(&pos->rb_node); - strlist__remove(self, pos); - } - self->entries = RB_ROOT; - free(self); - } + if (self != NULL) + rblist__delete(&self->rblist); } -struct str_node *strlist__entry(const struct strlist *self, unsigned int idx) +struct str_node *strlist__entry(const struct strlist *slist, unsigned int idx) { - struct rb_node *nd; + struct str_node *snode = NULL; + struct rb_node *rb_node; - for (nd = rb_first(&self->entries); nd; nd = rb_next(nd)) { - struct str_node *pos = rb_entry(nd, struct str_node, rb_node); + rb_node = rblist__entry(&slist->rblist, idx); + if (rb_node) + snode = container_of(rb_node, struct str_node, rb_node); - if (!idx--) - return pos; - } - - return NULL; + return snode; } diff --git a/trunk/tools/perf/util/strlist.h b/trunk/tools/perf/util/strlist.h index 3ba839007d2c..dd9f922ec67c 100644 --- a/trunk/tools/perf/util/strlist.h +++ b/trunk/tools/perf/util/strlist.h @@ -4,14 +4,15 @@ #include #include +#include "rblist.h" + struct str_node { struct rb_node rb_node; const char *s; }; struct strlist { - struct rb_root entries; - unsigned int nr_entries; + struct rblist rblist; bool dupstr; }; @@ -32,18 +33,18 @@ static inline bool strlist__has_entry(struct strlist *self, const char *entry) static inline bool strlist__empty(const struct strlist *self) { - return self->nr_entries == 0; + return rblist__empty(&self->rblist); } static inline unsigned int strlist__nr_entries(const struct strlist *self) { - return self->nr_entries; + return rblist__nr_entries(&self->rblist); } /* For strlist iteration */ static inline struct str_node *strlist__first(struct strlist *self) { - struct rb_node *rn = rb_first(&self->entries); + struct rb_node *rn = rb_first(&self->rblist.entries); return rn ? rb_entry(rn, struct str_node, rb_node) : NULL; } static inline struct str_node *strlist__next(struct str_node *sn) diff --git a/trunk/tools/perf/util/symbol-elf.c b/trunk/tools/perf/util/symbol-elf.c new file mode 100644 index 000000000000..db0cc92cf2ea --- /dev/null +++ b/trunk/tools/perf/util/symbol-elf.c @@ -0,0 +1,841 @@ +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "symbol.h" +#include "debug.h" + +#ifndef NT_GNU_BUILD_ID +#define NT_GNU_BUILD_ID 3 +#endif + +/** + * elf_symtab__for_each_symbol - iterate thru all the symbols + * + * @syms: struct elf_symtab instance to iterate + * @idx: uint32_t idx + * @sym: GElf_Sym iterator + */ +#define elf_symtab__for_each_symbol(syms, nr_syms, idx, sym) \ + for (idx = 0, gelf_getsym(syms, idx, &sym);\ + idx < nr_syms; \ + idx++, gelf_getsym(syms, idx, &sym)) + +static inline uint8_t elf_sym__type(const GElf_Sym *sym) +{ + return GELF_ST_TYPE(sym->st_info); +} + +static inline int elf_sym__is_function(const GElf_Sym *sym) +{ + return elf_sym__type(sym) == STT_FUNC && + sym->st_name != 0 && + sym->st_shndx != SHN_UNDEF; +} + +static inline bool elf_sym__is_object(const GElf_Sym *sym) +{ + return elf_sym__type(sym) == STT_OBJECT && + sym->st_name != 0 && + sym->st_shndx != SHN_UNDEF; +} + +static inline int elf_sym__is_label(const GElf_Sym *sym) +{ + return elf_sym__type(sym) == STT_NOTYPE && + sym->st_name != 0 && + sym->st_shndx != SHN_UNDEF && + sym->st_shndx != SHN_ABS; +} + +static bool elf_sym__is_a(GElf_Sym *sym, enum map_type type) +{ + switch (type) { + case MAP__FUNCTION: + return elf_sym__is_function(sym); + case MAP__VARIABLE: + return elf_sym__is_object(sym); + default: + return false; + } +} + +static inline const char *elf_sym__name(const GElf_Sym *sym, + const Elf_Data *symstrs) +{ + return symstrs->d_buf + sym->st_name; +} + +static inline const char *elf_sec__name(const GElf_Shdr *shdr, + const Elf_Data *secstrs) +{ + return secstrs->d_buf + shdr->sh_name; +} + +static inline int elf_sec__is_text(const GElf_Shdr *shdr, + const Elf_Data *secstrs) +{ + return strstr(elf_sec__name(shdr, secstrs), "text") != NULL; +} + +static inline bool elf_sec__is_data(const GElf_Shdr *shdr, + const Elf_Data *secstrs) +{ + return strstr(elf_sec__name(shdr, secstrs), "data") != NULL; +} + +static bool elf_sec__is_a(GElf_Shdr *shdr, Elf_Data *secstrs, + enum map_type type) +{ + switch (type) { + case MAP__FUNCTION: + return elf_sec__is_text(shdr, secstrs); + case MAP__VARIABLE: + return elf_sec__is_data(shdr, secstrs); + default: + return false; + } +} + +static size_t elf_addr_to_index(Elf *elf, GElf_Addr addr) +{ + Elf_Scn *sec = NULL; + GElf_Shdr shdr; + size_t cnt = 1; + + while ((sec = elf_nextscn(elf, sec)) != NULL) { + gelf_getshdr(sec, &shdr); + + if ((addr >= shdr.sh_addr) && + (addr < (shdr.sh_addr + shdr.sh_size))) + return cnt; + + ++cnt; + } + + return -1; +} + +static Elf_Scn *elf_section_by_name(Elf *elf, GElf_Ehdr *ep, + GElf_Shdr *shp, const char *name, + size_t *idx) +{ + Elf_Scn *sec = NULL; + size_t cnt = 1; + + /* Elf is corrupted/truncated, avoid calling elf_strptr. */ + if (!elf_rawdata(elf_getscn(elf, ep->e_shstrndx), NULL)) + return NULL; + + while ((sec = elf_nextscn(elf, sec)) != NULL) { + char *str; + + gelf_getshdr(sec, shp); + str = elf_strptr(elf, ep->e_shstrndx, shp->sh_name); + if (!strcmp(name, str)) { + if (idx) + *idx = cnt; + break; + } + ++cnt; + } + + return sec; +} + +#define elf_section__for_each_rel(reldata, pos, pos_mem, idx, nr_entries) \ + for (idx = 0, pos = gelf_getrel(reldata, 0, &pos_mem); \ + idx < nr_entries; \ + ++idx, pos = gelf_getrel(reldata, idx, &pos_mem)) + +#define elf_section__for_each_rela(reldata, pos, pos_mem, idx, nr_entries) \ + for (idx = 0, pos = gelf_getrela(reldata, 0, &pos_mem); \ + idx < nr_entries; \ + ++idx, pos = gelf_getrela(reldata, idx, &pos_mem)) + +/* + * We need to check if we have a .dynsym, so that we can handle the + * .plt, synthesizing its symbols, that aren't on the symtabs (be it + * .dynsym or .symtab). + * And always look at the original dso, not at debuginfo packages, that + * have the PLT data stripped out (shdr_rel_plt.sh_type == SHT_NOBITS). + */ +int dso__synthesize_plt_symbols(struct dso *dso, struct symsrc *ss, struct map *map, + symbol_filter_t filter) +{ + uint32_t nr_rel_entries, idx; + GElf_Sym sym; + u64 plt_offset; + GElf_Shdr shdr_plt; + struct symbol *f; + GElf_Shdr shdr_rel_plt, shdr_dynsym; + Elf_Data *reldata, *syms, *symstrs; + Elf_Scn *scn_plt_rel, *scn_symstrs, *scn_dynsym; + size_t dynsym_idx; + GElf_Ehdr ehdr; + char sympltname[1024]; + Elf *elf; + int nr = 0, symidx, err = 0; + + if (!ss->dynsym) + return 0; + + elf = ss->elf; + ehdr = ss->ehdr; + + scn_dynsym = ss->dynsym; + shdr_dynsym = ss->dynshdr; + dynsym_idx = ss->dynsym_idx; + + if (scn_dynsym == NULL) + goto out_elf_end; + + scn_plt_rel = elf_section_by_name(elf, &ehdr, &shdr_rel_plt, + ".rela.plt", NULL); + if (scn_plt_rel == NULL) { + scn_plt_rel = elf_section_by_name(elf, &ehdr, &shdr_rel_plt, + ".rel.plt", NULL); + if (scn_plt_rel == NULL) + goto out_elf_end; + } + + err = -1; + + if (shdr_rel_plt.sh_link != dynsym_idx) + goto out_elf_end; + + if (elf_section_by_name(elf, &ehdr, &shdr_plt, ".plt", NULL) == NULL) + goto out_elf_end; + + /* + * Fetch the relocation section to find the idxes to the GOT + * and the symbols in the .dynsym they refer to. + */ + reldata = elf_getdata(scn_plt_rel, NULL); + if (reldata == NULL) + goto out_elf_end; + + syms = elf_getdata(scn_dynsym, NULL); + if (syms == NULL) + goto out_elf_end; + + scn_symstrs = elf_getscn(elf, shdr_dynsym.sh_link); + if (scn_symstrs == NULL) + goto out_elf_end; + + symstrs = elf_getdata(scn_symstrs, NULL); + if (symstrs == NULL) + goto out_elf_end; + + if (symstrs->d_size == 0) + goto out_elf_end; + + nr_rel_entries = shdr_rel_plt.sh_size / shdr_rel_plt.sh_entsize; + plt_offset = shdr_plt.sh_offset; + + if (shdr_rel_plt.sh_type == SHT_RELA) { + GElf_Rela pos_mem, *pos; + + elf_section__for_each_rela(reldata, pos, pos_mem, idx, + nr_rel_entries) { + symidx = GELF_R_SYM(pos->r_info); + plt_offset += shdr_plt.sh_entsize; + gelf_getsym(syms, symidx, &sym); + snprintf(sympltname, sizeof(sympltname), + "%s@plt", elf_sym__name(&sym, symstrs)); + + f = symbol__new(plt_offset, shdr_plt.sh_entsize, + STB_GLOBAL, sympltname); + if (!f) + goto out_elf_end; + + if (filter && filter(map, f)) + symbol__delete(f); + else { + symbols__insert(&dso->symbols[map->type], f); + ++nr; + } + } + } else if (shdr_rel_plt.sh_type == SHT_REL) { + GElf_Rel pos_mem, *pos; + elf_section__for_each_rel(reldata, pos, pos_mem, idx, + nr_rel_entries) { + symidx = GELF_R_SYM(pos->r_info); + plt_offset += shdr_plt.sh_entsize; + gelf_getsym(syms, symidx, &sym); + snprintf(sympltname, sizeof(sympltname), + "%s@plt", elf_sym__name(&sym, symstrs)); + + f = symbol__new(plt_offset, shdr_plt.sh_entsize, + STB_GLOBAL, sympltname); + if (!f) + goto out_elf_end; + + if (filter && filter(map, f)) + symbol__delete(f); + else { + symbols__insert(&dso->symbols[map->type], f); + ++nr; + } + } + } + + err = 0; +out_elf_end: + if (err == 0) + return nr; + pr_debug("%s: problems reading %s PLT info.\n", + __func__, dso->long_name); + return 0; +} + +/* + * Align offset to 4 bytes as needed for note name and descriptor data. + */ +#define NOTE_ALIGN(n) (((n) + 3) & -4U) + +static int elf_read_build_id(Elf *elf, void *bf, size_t size) +{ + int err = -1; + GElf_Ehdr ehdr; + GElf_Shdr shdr; + Elf_Data *data; + Elf_Scn *sec; + Elf_Kind ek; + void *ptr; + + if (size < BUILD_ID_SIZE) + goto out; + + ek = elf_kind(elf); + if (ek != ELF_K_ELF) + goto out; + + if (gelf_getehdr(elf, &ehdr) == NULL) { + pr_err("%s: cannot get elf header.\n", __func__); + goto out; + } + + /* + * Check following sections for notes: + * '.note.gnu.build-id' + * '.notes' + * '.note' (VDSO specific) + */ + do { + sec = elf_section_by_name(elf, &ehdr, &shdr, + ".note.gnu.build-id", NULL); + if (sec) + break; + + sec = elf_section_by_name(elf, &ehdr, &shdr, + ".notes", NULL); + if (sec) + break; + + sec = elf_section_by_name(elf, &ehdr, &shdr, + ".note", NULL); + if (sec) + break; + + return err; + + } while (0); + + data = elf_getdata(sec, NULL); + if (data == NULL) + goto out; + + ptr = data->d_buf; + while (ptr < (data->d_buf + data->d_size)) { + GElf_Nhdr *nhdr = ptr; + size_t namesz = NOTE_ALIGN(nhdr->n_namesz), + descsz = NOTE_ALIGN(nhdr->n_descsz); + const char *name; + + ptr += sizeof(*nhdr); + name = ptr; + ptr += namesz; + if (nhdr->n_type == NT_GNU_BUILD_ID && + nhdr->n_namesz == sizeof("GNU")) { + if (memcmp(name, "GNU", sizeof("GNU")) == 0) { + size_t sz = min(size, descsz); + memcpy(bf, ptr, sz); + memset(bf + sz, 0, size - sz); + err = descsz; + break; + } + } + ptr += descsz; + } + +out: + return err; +} + +int filename__read_build_id(const char *filename, void *bf, size_t size) +{ + int fd, err = -1; + Elf *elf; + + if (size < BUILD_ID_SIZE) + goto out; + + fd = open(filename, O_RDONLY); + if (fd < 0) + goto out; + + elf = elf_begin(fd, PERF_ELF_C_READ_MMAP, NULL); + if (elf == NULL) { + pr_debug2("%s: cannot read %s ELF file.\n", __func__, filename); + goto out_close; + } + + err = elf_read_build_id(elf, bf, size); + + elf_end(elf); +out_close: + close(fd); +out: + return err; +} + +int sysfs__read_build_id(const char *filename, void *build_id, size_t size) +{ + int fd, err = -1; + + if (size < BUILD_ID_SIZE) + goto out; + + fd = open(filename, O_RDONLY); + if (fd < 0) + goto out; + + while (1) { + char bf[BUFSIZ]; + GElf_Nhdr nhdr; + size_t namesz, descsz; + + if (read(fd, &nhdr, sizeof(nhdr)) != sizeof(nhdr)) + break; + + namesz = NOTE_ALIGN(nhdr.n_namesz); + descsz = NOTE_ALIGN(nhdr.n_descsz); + if (nhdr.n_type == NT_GNU_BUILD_ID && + nhdr.n_namesz == sizeof("GNU")) { + if (read(fd, bf, namesz) != (ssize_t)namesz) + break; + if (memcmp(bf, "GNU", sizeof("GNU")) == 0) { + size_t sz = min(descsz, size); + if (read(fd, build_id, sz) == (ssize_t)sz) { + memset(build_id + sz, 0, size - sz); + err = 0; + break; + } + } else if (read(fd, bf, descsz) != (ssize_t)descsz) + break; + } else { + int n = namesz + descsz; + if (read(fd, bf, n) != n) + break; + } + } + close(fd); +out: + return err; +} + +int filename__read_debuglink(const char *filename, char *debuglink, + size_t size) +{ + int fd, err = -1; + Elf *elf; + GElf_Ehdr ehdr; + GElf_Shdr shdr; + Elf_Data *data; + Elf_Scn *sec; + Elf_Kind ek; + + fd = open(filename, O_RDONLY); + if (fd < 0) + goto out; + + elf = elf_begin(fd, PERF_ELF_C_READ_MMAP, NULL); + if (elf == NULL) { + pr_debug2("%s: cannot read %s ELF file.\n", __func__, filename); + goto out_close; + } + + ek = elf_kind(elf); + if (ek != ELF_K_ELF) + goto out_close; + + if (gelf_getehdr(elf, &ehdr) == NULL) { + pr_err("%s: cannot get elf header.\n", __func__); + goto out_close; + } + + sec = elf_section_by_name(elf, &ehdr, &shdr, + ".gnu_debuglink", NULL); + if (sec == NULL) + goto out_close; + + data = elf_getdata(sec, NULL); + if (data == NULL) + goto out_close; + + /* the start of this section is a zero-terminated string */ + strncpy(debuglink, data->d_buf, size); + + elf_end(elf); + +out_close: + close(fd); +out: + return err; +} + +static int dso__swap_init(struct dso *dso, unsigned char eidata) +{ + static unsigned int const endian = 1; + + dso->needs_swap = DSO_SWAP__NO; + + switch (eidata) { + case ELFDATA2LSB: + /* We are big endian, DSO is little endian. */ + if (*(unsigned char const *)&endian != 1) + dso->needs_swap = DSO_SWAP__YES; + break; + + case ELFDATA2MSB: + /* We are little endian, DSO is big endian. */ + if (*(unsigned char const *)&endian != 0) + dso->needs_swap = DSO_SWAP__YES; + break; + + default: + pr_err("unrecognized DSO data encoding %d\n", eidata); + return -EINVAL; + } + + return 0; +} + +bool symsrc__possibly_runtime(struct symsrc *ss) +{ + return ss->dynsym || ss->opdsec; +} + +bool symsrc__has_symtab(struct symsrc *ss) +{ + return ss->symtab != NULL; +} + +void symsrc__destroy(struct symsrc *ss) +{ + free(ss->name); + elf_end(ss->elf); + close(ss->fd); +} + +int symsrc__init(struct symsrc *ss, struct dso *dso, const char *name, + enum dso_binary_type type) +{ + int err = -1; + GElf_Ehdr ehdr; + Elf *elf; + int fd; + + fd = open(name, O_RDONLY); + if (fd < 0) + return -1; + + elf = elf_begin(fd, PERF_ELF_C_READ_MMAP, NULL); + if (elf == NULL) { + pr_debug("%s: cannot read %s ELF file.\n", __func__, name); + goto out_close; + } + + if (gelf_getehdr(elf, &ehdr) == NULL) { + pr_debug("%s: cannot get elf header.\n", __func__); + goto out_elf_end; + } + + if (dso__swap_init(dso, ehdr.e_ident[EI_DATA])) + goto out_elf_end; + + /* Always reject images with a mismatched build-id: */ + if (dso->has_build_id) { + u8 build_id[BUILD_ID_SIZE]; + + if (elf_read_build_id(elf, build_id, BUILD_ID_SIZE) < 0) + goto out_elf_end; + + if (!dso__build_id_equal(dso, build_id)) + goto out_elf_end; + } + + ss->symtab = elf_section_by_name(elf, &ehdr, &ss->symshdr, ".symtab", + NULL); + if (ss->symshdr.sh_type != SHT_SYMTAB) + ss->symtab = NULL; + + ss->dynsym_idx = 0; + ss->dynsym = elf_section_by_name(elf, &ehdr, &ss->dynshdr, ".dynsym", + &ss->dynsym_idx); + if (ss->dynshdr.sh_type != SHT_DYNSYM) + ss->dynsym = NULL; + + ss->opdidx = 0; + ss->opdsec = elf_section_by_name(elf, &ehdr, &ss->opdshdr, ".opd", + &ss->opdidx); + if (ss->opdshdr.sh_type != SHT_PROGBITS) + ss->opdsec = NULL; + + if (dso->kernel == DSO_TYPE_USER) { + GElf_Shdr shdr; + ss->adjust_symbols = (ehdr.e_type == ET_EXEC || + elf_section_by_name(elf, &ehdr, &shdr, + ".gnu.prelink_undo", + NULL) != NULL); + } else { + ss->adjust_symbols = 0; + } + + ss->name = strdup(name); + if (!ss->name) + goto out_elf_end; + + ss->elf = elf; + ss->fd = fd; + ss->ehdr = ehdr; + ss->type = type; + + return 0; + +out_elf_end: + elf_end(elf); +out_close: + close(fd); + return err; +} + +int dso__load_sym(struct dso *dso, struct map *map, + struct symsrc *syms_ss, struct symsrc *runtime_ss, + symbol_filter_t filter, int kmodule) +{ + struct kmap *kmap = dso->kernel ? map__kmap(map) : NULL; + struct map *curr_map = map; + struct dso *curr_dso = dso; + Elf_Data *symstrs, *secstrs; + uint32_t nr_syms; + int err = -1; + uint32_t idx; + GElf_Ehdr ehdr; + GElf_Shdr shdr; + Elf_Data *syms, *opddata = NULL; + GElf_Sym sym; + Elf_Scn *sec, *sec_strndx; + Elf *elf; + int nr = 0; + + dso->symtab_type = syms_ss->type; + + if (!syms_ss->symtab) { + syms_ss->symtab = syms_ss->dynsym; + syms_ss->symshdr = syms_ss->dynshdr; + } + + elf = syms_ss->elf; + ehdr = syms_ss->ehdr; + sec = syms_ss->symtab; + shdr = syms_ss->symshdr; + + if (runtime_ss->opdsec) + opddata = elf_rawdata(runtime_ss->opdsec, NULL); + + syms = elf_getdata(sec, NULL); + if (syms == NULL) + goto out_elf_end; + + sec = elf_getscn(elf, shdr.sh_link); + if (sec == NULL) + goto out_elf_end; + + symstrs = elf_getdata(sec, NULL); + if (symstrs == NULL) + goto out_elf_end; + + sec_strndx = elf_getscn(elf, ehdr.e_shstrndx); + if (sec_strndx == NULL) + goto out_elf_end; + + secstrs = elf_getdata(sec_strndx, NULL); + if (secstrs == NULL) + goto out_elf_end; + + nr_syms = shdr.sh_size / shdr.sh_entsize; + + memset(&sym, 0, sizeof(sym)); + dso->adjust_symbols = runtime_ss->adjust_symbols; + elf_symtab__for_each_symbol(syms, nr_syms, idx, sym) { + struct symbol *f; + const char *elf_name = elf_sym__name(&sym, symstrs); + char *demangled = NULL; + int is_label = elf_sym__is_label(&sym); + const char *section_name; + bool used_opd = false; + + if (kmap && kmap->ref_reloc_sym && kmap->ref_reloc_sym->name && + strcmp(elf_name, kmap->ref_reloc_sym->name) == 0) + kmap->ref_reloc_sym->unrelocated_addr = sym.st_value; + + if (!is_label && !elf_sym__is_a(&sym, map->type)) + continue; + + /* Reject ARM ELF "mapping symbols": these aren't unique and + * don't identify functions, so will confuse the profile + * output: */ + if (ehdr.e_machine == EM_ARM) { + if (!strcmp(elf_name, "$a") || + !strcmp(elf_name, "$d") || + !strcmp(elf_name, "$t")) + continue; + } + + if (runtime_ss->opdsec && sym.st_shndx == runtime_ss->opdidx) { + u32 offset = sym.st_value - syms_ss->opdshdr.sh_addr; + u64 *opd = opddata->d_buf + offset; + sym.st_value = DSO__SWAP(dso, u64, *opd); + sym.st_shndx = elf_addr_to_index(runtime_ss->elf, + sym.st_value); + used_opd = true; + } + + sec = elf_getscn(runtime_ss->elf, sym.st_shndx); + if (!sec) + goto out_elf_end; + + gelf_getshdr(sec, &shdr); + + if (is_label && !elf_sec__is_a(&shdr, secstrs, map->type)) + continue; + + section_name = elf_sec__name(&shdr, secstrs); + + /* On ARM, symbols for thumb functions have 1 added to + * the symbol address as a flag - remove it */ + if ((ehdr.e_machine == EM_ARM) && + (map->type == MAP__FUNCTION) && + (sym.st_value & 1)) + --sym.st_value; + + if (dso->kernel != DSO_TYPE_USER || kmodule) { + char dso_name[PATH_MAX]; + + if (strcmp(section_name, + (curr_dso->short_name + + dso->short_name_len)) == 0) + goto new_symbol; + + if (strcmp(section_name, ".text") == 0) { + curr_map = map; + curr_dso = dso; + goto new_symbol; + } + + snprintf(dso_name, sizeof(dso_name), + "%s%s", dso->short_name, section_name); + + curr_map = map_groups__find_by_name(kmap->kmaps, map->type, dso_name); + if (curr_map == NULL) { + u64 start = sym.st_value; + + if (kmodule) + start += map->start + shdr.sh_offset; + + curr_dso = dso__new(dso_name); + if (curr_dso == NULL) + goto out_elf_end; + curr_dso->kernel = dso->kernel; + curr_dso->long_name = dso->long_name; + curr_dso->long_name_len = dso->long_name_len; + curr_map = map__new2(start, curr_dso, + map->type); + if (curr_map == NULL) { + dso__delete(curr_dso); + goto out_elf_end; + } + curr_map->map_ip = identity__map_ip; + curr_map->unmap_ip = identity__map_ip; + curr_dso->symtab_type = dso->symtab_type; + map_groups__insert(kmap->kmaps, curr_map); + dsos__add(&dso->node, curr_dso); + dso__set_loaded(curr_dso, map->type); + } else + curr_dso = curr_map->dso; + + goto new_symbol; + } + + if ((used_opd && runtime_ss->adjust_symbols) + || (!used_opd && syms_ss->adjust_symbols)) { + pr_debug4("%s: adjusting symbol: st_value: %#" PRIx64 " " + "sh_addr: %#" PRIx64 " sh_offset: %#" PRIx64 "\n", __func__, + (u64)sym.st_value, (u64)shdr.sh_addr, + (u64)shdr.sh_offset); + sym.st_value -= shdr.sh_addr - shdr.sh_offset; + } + /* + * We need to figure out if the object was created from C++ sources + * DWARF DW_compile_unit has this, but we don't always have access + * to it... + */ + demangled = bfd_demangle(NULL, elf_name, DMGL_PARAMS | DMGL_ANSI); + if (demangled != NULL) + elf_name = demangled; +new_symbol: + f = symbol__new(sym.st_value, sym.st_size, + GELF_ST_BIND(sym.st_info), elf_name); + free(demangled); + if (!f) + goto out_elf_end; + + if (filter && filter(curr_map, f)) + symbol__delete(f); + else { + symbols__insert(&curr_dso->symbols[curr_map->type], f); + nr++; + } + } + + /* + * For misannotated, zeroed, ASM function sizes. + */ + if (nr > 0) { + symbols__fixup_duplicate(&dso->symbols[map->type]); + symbols__fixup_end(&dso->symbols[map->type]); + if (kmap) { + /* + * We need to fixup this here too because we create new + * maps here, for things like vsyscall sections. + */ + __map_groups__fixup_end(kmap->kmaps, map->type); + } + } + err = nr; +out_elf_end: + return err; +} + +void symbol__elf_init(void) +{ + elf_version(EV_CURRENT); +} diff --git a/trunk/tools/perf/util/symbol-minimal.c b/trunk/tools/perf/util/symbol-minimal.c new file mode 100644 index 000000000000..259f8f2ea9c9 --- /dev/null +++ b/trunk/tools/perf/util/symbol-minimal.c @@ -0,0 +1,307 @@ +#include "symbol.h" + +#include +#include +#include +#include +#include +#include + + +static bool check_need_swap(int file_endian) +{ + const int data = 1; + u8 *check = (u8 *)&data; + int host_endian; + + if (check[0] == 1) + host_endian = ELFDATA2LSB; + else + host_endian = ELFDATA2MSB; + + return host_endian != file_endian; +} + +#define NOTE_ALIGN(sz) (((sz) + 3) & ~3) + +#define NT_GNU_BUILD_ID 3 + +static int read_build_id(void *note_data, size_t note_len, void *bf, + size_t size, bool need_swap) +{ + struct { + u32 n_namesz; + u32 n_descsz; + u32 n_type; + } *nhdr; + void *ptr; + + ptr = note_data; + while (ptr < (note_data + note_len)) { + const char *name; + size_t namesz, descsz; + + nhdr = ptr; + if (need_swap) { + nhdr->n_namesz = bswap_32(nhdr->n_namesz); + nhdr->n_descsz = bswap_32(nhdr->n_descsz); + nhdr->n_type = bswap_32(nhdr->n_type); + } + + namesz = NOTE_ALIGN(nhdr->n_namesz); + descsz = NOTE_ALIGN(nhdr->n_descsz); + + ptr += sizeof(*nhdr); + name = ptr; + ptr += namesz; + if (nhdr->n_type == NT_GNU_BUILD_ID && + nhdr->n_namesz == sizeof("GNU")) { + if (memcmp(name, "GNU", sizeof("GNU")) == 0) { + size_t sz = min(size, descsz); + memcpy(bf, ptr, sz); + memset(bf + sz, 0, size - sz); + return 0; + } + } + ptr += descsz; + } + + return -1; +} + +int filename__read_debuglink(const char *filename __maybe_unused, + char *debuglink __maybe_unused, + size_t size __maybe_unused) +{ + return -1; +} + +/* + * Just try PT_NOTE header otherwise fails + */ +int filename__read_build_id(const char *filename, void *bf, size_t size) +{ + FILE *fp; + int ret = -1; + bool need_swap = false; + u8 e_ident[EI_NIDENT]; + size_t buf_size; + void *buf; + int i; + + fp = fopen(filename, "r"); + if (fp == NULL) + return -1; + + if (fread(e_ident, sizeof(e_ident), 1, fp) != 1) + goto out; + + if (memcmp(e_ident, ELFMAG, SELFMAG) || + e_ident[EI_VERSION] != EV_CURRENT) + goto out; + + need_swap = check_need_swap(e_ident[EI_DATA]); + + /* for simplicity */ + fseek(fp, 0, SEEK_SET); + + if (e_ident[EI_CLASS] == ELFCLASS32) { + Elf32_Ehdr ehdr; + Elf32_Phdr *phdr; + + if (fread(&ehdr, sizeof(ehdr), 1, fp) != 1) + goto out; + + if (need_swap) { + ehdr.e_phoff = bswap_32(ehdr.e_phoff); + ehdr.e_phentsize = bswap_16(ehdr.e_phentsize); + ehdr.e_phnum = bswap_16(ehdr.e_phnum); + } + + buf_size = ehdr.e_phentsize * ehdr.e_phnum; + buf = malloc(buf_size); + if (buf == NULL) + goto out; + + fseek(fp, ehdr.e_phoff, SEEK_SET); + if (fread(buf, buf_size, 1, fp) != 1) + goto out_free; + + for (i = 0, phdr = buf; i < ehdr.e_phnum; i++, phdr++) { + void *tmp; + + if (need_swap) { + phdr->p_type = bswap_32(phdr->p_type); + phdr->p_offset = bswap_32(phdr->p_offset); + phdr->p_filesz = bswap_32(phdr->p_filesz); + } + + if (phdr->p_type != PT_NOTE) + continue; + + buf_size = phdr->p_filesz; + tmp = realloc(buf, buf_size); + if (tmp == NULL) + goto out_free; + + buf = tmp; + fseek(fp, phdr->p_offset, SEEK_SET); + if (fread(buf, buf_size, 1, fp) != 1) + goto out_free; + + ret = read_build_id(buf, buf_size, bf, size, need_swap); + if (ret == 0) + ret = size; + break; + } + } else { + Elf64_Ehdr ehdr; + Elf64_Phdr *phdr; + + if (fread(&ehdr, sizeof(ehdr), 1, fp) != 1) + goto out; + + if (need_swap) { + ehdr.e_phoff = bswap_64(ehdr.e_phoff); + ehdr.e_phentsize = bswap_16(ehdr.e_phentsize); + ehdr.e_phnum = bswap_16(ehdr.e_phnum); + } + + buf_size = ehdr.e_phentsize * ehdr.e_phnum; + buf = malloc(buf_size); + if (buf == NULL) + goto out; + + fseek(fp, ehdr.e_phoff, SEEK_SET); + if (fread(buf, buf_size, 1, fp) != 1) + goto out_free; + + for (i = 0, phdr = buf; i < ehdr.e_phnum; i++, phdr++) { + void *tmp; + + if (need_swap) { + phdr->p_type = bswap_32(phdr->p_type); + phdr->p_offset = bswap_64(phdr->p_offset); + phdr->p_filesz = bswap_64(phdr->p_filesz); + } + + if (phdr->p_type != PT_NOTE) + continue; + + buf_size = phdr->p_filesz; + tmp = realloc(buf, buf_size); + if (tmp == NULL) + goto out_free; + + buf = tmp; + fseek(fp, phdr->p_offset, SEEK_SET); + if (fread(buf, buf_size, 1, fp) != 1) + goto out_free; + + ret = read_build_id(buf, buf_size, bf, size, need_swap); + if (ret == 0) + ret = size; + break; + } + } +out_free: + free(buf); +out: + fclose(fp); + return ret; +} + +int sysfs__read_build_id(const char *filename, void *build_id, size_t size) +{ + int fd; + int ret = -1; + struct stat stbuf; + size_t buf_size; + void *buf; + + fd = open(filename, O_RDONLY); + if (fd < 0) + return -1; + + if (fstat(fd, &stbuf) < 0) + goto out; + + buf_size = stbuf.st_size; + buf = malloc(buf_size); + if (buf == NULL) + goto out; + + if (read(fd, buf, buf_size) != (ssize_t) buf_size) + goto out_free; + + ret = read_build_id(buf, buf_size, build_id, size, false); +out_free: + free(buf); +out: + close(fd); + return ret; +} + +int symsrc__init(struct symsrc *ss, struct dso *dso __maybe_unused, + const char *name, + enum dso_binary_type type) +{ + int fd = open(name, O_RDONLY); + if (fd < 0) + return -1; + + ss->name = strdup(name); + if (!ss->name) + goto out_close; + + ss->type = type; + + return 0; +out_close: + close(fd); + return -1; +} + +bool symsrc__possibly_runtime(struct symsrc *ss __maybe_unused) +{ + /* Assume all sym sources could be a runtime image. */ + return true; +} + +bool symsrc__has_symtab(struct symsrc *ss __maybe_unused) +{ + return false; +} + +void symsrc__destroy(struct symsrc *ss) +{ + free(ss->name); + close(ss->fd); +} + +int dso__synthesize_plt_symbols(struct dso *dso __maybe_unused, + struct symsrc *ss __maybe_unused, + struct map *map __maybe_unused, + symbol_filter_t filter __maybe_unused) +{ + return 0; +} + +int dso__load_sym(struct dso *dso, struct map *map __maybe_unused, + struct symsrc *ss, + struct symsrc *runtime_ss __maybe_unused, + symbol_filter_t filter __maybe_unused, + int kmodule __maybe_unused) +{ + unsigned char *build_id[BUILD_ID_SIZE]; + + if (filename__read_build_id(ss->name, build_id, BUILD_ID_SIZE) > 0) { + dso__set_build_id(dso, build_id); + return 1; + } + return 0; +} + +void symbol__elf_init(void) +{ +} diff --git a/trunk/tools/perf/util/symbol.c b/trunk/tools/perf/util/symbol.c index fdad4eeeb429..e2e8c697cffe 100644 --- a/trunk/tools/perf/util/symbol.c +++ b/trunk/tools/perf/util/symbol.c @@ -15,8 +15,6 @@ #include "symbol.h" #include "strlist.h" -#include -#include #include #include #include @@ -25,15 +23,7 @@ #define KSYM_NAME_LEN 256 #endif -#ifndef NT_GNU_BUILD_ID -#define NT_GNU_BUILD_ID 3 -#endif - static void dso_cache__free(struct rb_root *root); -static bool dso__build_id_equal(const struct dso *dso, u8 *build_id); -static int elf_read_build_id(Elf *elf, void *bf, size_t size); -static void dsos__add(struct list_head *head, struct dso *dso); -static struct map *map__new2(u64 start, struct dso *dso, enum map_type type); static int dso__load_kernel_sym(struct dso *dso, struct map *map, symbol_filter_t filter); static int dso__load_guest_kernel_sym(struct dso *dso, struct map *map, @@ -64,7 +54,7 @@ static enum dso_binary_type binary_type_symtab[] = { DSO_BINARY_TYPE__NOT_FOUND, }; -#define DSO_BINARY_TYPE__SYMTAB_CNT sizeof(binary_type_symtab) +#define DSO_BINARY_TYPE__SYMTAB_CNT ARRAY_SIZE(binary_type_symtab) static enum dso_binary_type binary_type_data[] = { DSO_BINARY_TYPE__BUILD_ID_CACHE, @@ -72,7 +62,7 @@ static enum dso_binary_type binary_type_data[] = { DSO_BINARY_TYPE__NOT_FOUND, }; -#define DSO_BINARY_TYPE__DATA_CNT sizeof(binary_type_data) +#define DSO_BINARY_TYPE__DATA_CNT ARRAY_SIZE(binary_type_data) int dso__name_len(const struct dso *dso) { @@ -170,7 +160,7 @@ static int choose_best_symbol(struct symbol *syma, struct symbol *symb) return SYMBOL_B; } -static void symbols__fixup_duplicate(struct rb_root *symbols) +void symbols__fixup_duplicate(struct rb_root *symbols) { struct rb_node *nd; struct symbol *curr, *next; @@ -199,7 +189,7 @@ static void symbols__fixup_duplicate(struct rb_root *symbols) } } -static void symbols__fixup_end(struct rb_root *symbols) +void symbols__fixup_end(struct rb_root *symbols) { struct rb_node *nd, *prevnd = rb_first(symbols); struct symbol *curr, *prev; @@ -222,7 +212,7 @@ static void symbols__fixup_end(struct rb_root *symbols) curr->end = roundup(curr->start, 4096); } -static void __map_groups__fixup_end(struct map_groups *mg, enum map_type type) +void __map_groups__fixup_end(struct map_groups *mg, enum map_type type) { struct map *prev, *curr; struct rb_node *nd, *prevnd = rb_first(&mg->maps[type]); @@ -252,8 +242,7 @@ static void map_groups__fixup_end(struct map_groups *mg) __map_groups__fixup_end(mg, i); } -static struct symbol *symbol__new(u64 start, u64 len, u8 binding, - const char *name) +struct symbol *symbol__new(u64 start, u64 len, u8 binding, const char *name) { size_t namelen = strlen(name) + 1; struct symbol *sym = calloc(1, (symbol_conf.priv_size + @@ -390,7 +379,7 @@ void dso__set_build_id(struct dso *dso, void *build_id) dso->has_build_id = 1; } -static void symbols__insert(struct rb_root *symbols, struct symbol *sym) +void symbols__insert(struct rb_root *symbols, struct symbol *sym) { struct rb_node **p = &symbols->rb_node; struct rb_node *parent = NULL; @@ -574,7 +563,7 @@ size_t dso__fprintf(struct dso *dso, enum map_type type, FILE *fp) int kallsyms__parse(const char *filename, void *arg, int (*process_symbol)(void *arg, const char *name, - char type, u64 start, u64 end)) + char type, u64 start)) { char *line = NULL; size_t n; @@ -614,13 +603,8 @@ int kallsyms__parse(const char *filename, void *arg, break; } - /* - * module symbols are not sorted so we add all - * symbols with zero length and rely on - * symbols__fixup_end() to fix it up. - */ err = process_symbol(arg, symbol_name, - symbol_type, start, start); + symbol_type, start); if (err) break; } @@ -647,7 +631,7 @@ static u8 kallsyms2elf_type(char type) } static int map__process_kallsym_symbol(void *arg, const char *name, - char type, u64 start, u64 end) + char type, u64 start) { struct symbol *sym; struct process_kallsyms_args *a = arg; @@ -656,8 +640,12 @@ static int map__process_kallsym_symbol(void *arg, const char *name, if (!symbol_type__is_a(type, a->map->type)) return 0; - sym = symbol__new(start, end - start + 1, - kallsyms2elf_type(type), name); + /* + * module symbols are not sorted so we add all + * symbols, setting length to 0, and rely on + * symbols__fixup_end() to fix it up. + */ + sym = symbol__new(start, 0, kallsyms2elf_type(type), name); if (sym == NULL) return -ENOMEM; /* @@ -904,556 +892,7 @@ static int dso__load_perf_map(struct dso *dso, struct map *map, return -1; } -/** - * elf_symtab__for_each_symbol - iterate thru all the symbols - * - * @syms: struct elf_symtab instance to iterate - * @idx: uint32_t idx - * @sym: GElf_Sym iterator - */ -#define elf_symtab__for_each_symbol(syms, nr_syms, idx, sym) \ - for (idx = 0, gelf_getsym(syms, idx, &sym);\ - idx < nr_syms; \ - idx++, gelf_getsym(syms, idx, &sym)) - -static inline uint8_t elf_sym__type(const GElf_Sym *sym) -{ - return GELF_ST_TYPE(sym->st_info); -} - -static inline int elf_sym__is_function(const GElf_Sym *sym) -{ - return elf_sym__type(sym) == STT_FUNC && - sym->st_name != 0 && - sym->st_shndx != SHN_UNDEF; -} - -static inline bool elf_sym__is_object(const GElf_Sym *sym) -{ - return elf_sym__type(sym) == STT_OBJECT && - sym->st_name != 0 && - sym->st_shndx != SHN_UNDEF; -} - -static inline int elf_sym__is_label(const GElf_Sym *sym) -{ - return elf_sym__type(sym) == STT_NOTYPE && - sym->st_name != 0 && - sym->st_shndx != SHN_UNDEF && - sym->st_shndx != SHN_ABS; -} - -static inline const char *elf_sec__name(const GElf_Shdr *shdr, - const Elf_Data *secstrs) -{ - return secstrs->d_buf + shdr->sh_name; -} - -static inline int elf_sec__is_text(const GElf_Shdr *shdr, - const Elf_Data *secstrs) -{ - return strstr(elf_sec__name(shdr, secstrs), "text") != NULL; -} - -static inline bool elf_sec__is_data(const GElf_Shdr *shdr, - const Elf_Data *secstrs) -{ - return strstr(elf_sec__name(shdr, secstrs), "data") != NULL; -} - -static inline const char *elf_sym__name(const GElf_Sym *sym, - const Elf_Data *symstrs) -{ - return symstrs->d_buf + sym->st_name; -} - -static Elf_Scn *elf_section_by_name(Elf *elf, GElf_Ehdr *ep, - GElf_Shdr *shp, const char *name, - size_t *idx) -{ - Elf_Scn *sec = NULL; - size_t cnt = 1; - - while ((sec = elf_nextscn(elf, sec)) != NULL) { - char *str; - - gelf_getshdr(sec, shp); - str = elf_strptr(elf, ep->e_shstrndx, shp->sh_name); - if (!strcmp(name, str)) { - if (idx) - *idx = cnt; - break; - } - ++cnt; - } - - return sec; -} - -#define elf_section__for_each_rel(reldata, pos, pos_mem, idx, nr_entries) \ - for (idx = 0, pos = gelf_getrel(reldata, 0, &pos_mem); \ - idx < nr_entries; \ - ++idx, pos = gelf_getrel(reldata, idx, &pos_mem)) - -#define elf_section__for_each_rela(reldata, pos, pos_mem, idx, nr_entries) \ - for (idx = 0, pos = gelf_getrela(reldata, 0, &pos_mem); \ - idx < nr_entries; \ - ++idx, pos = gelf_getrela(reldata, idx, &pos_mem)) - -/* - * We need to check if we have a .dynsym, so that we can handle the - * .plt, synthesizing its symbols, that aren't on the symtabs (be it - * .dynsym or .symtab). - * And always look at the original dso, not at debuginfo packages, that - * have the PLT data stripped out (shdr_rel_plt.sh_type == SHT_NOBITS). - */ -static int -dso__synthesize_plt_symbols(struct dso *dso, char *name, struct map *map, - symbol_filter_t filter) -{ - uint32_t nr_rel_entries, idx; - GElf_Sym sym; - u64 plt_offset; - GElf_Shdr shdr_plt; - struct symbol *f; - GElf_Shdr shdr_rel_plt, shdr_dynsym; - Elf_Data *reldata, *syms, *symstrs; - Elf_Scn *scn_plt_rel, *scn_symstrs, *scn_dynsym; - size_t dynsym_idx; - GElf_Ehdr ehdr; - char sympltname[1024]; - Elf *elf; - int nr = 0, symidx, fd, err = 0; - - fd = open(name, O_RDONLY); - if (fd < 0) - goto out; - - elf = elf_begin(fd, PERF_ELF_C_READ_MMAP, NULL); - if (elf == NULL) - goto out_close; - - if (gelf_getehdr(elf, &ehdr) == NULL) - goto out_elf_end; - - scn_dynsym = elf_section_by_name(elf, &ehdr, &shdr_dynsym, - ".dynsym", &dynsym_idx); - if (scn_dynsym == NULL) - goto out_elf_end; - - scn_plt_rel = elf_section_by_name(elf, &ehdr, &shdr_rel_plt, - ".rela.plt", NULL); - if (scn_plt_rel == NULL) { - scn_plt_rel = elf_section_by_name(elf, &ehdr, &shdr_rel_plt, - ".rel.plt", NULL); - if (scn_plt_rel == NULL) - goto out_elf_end; - } - - err = -1; - - if (shdr_rel_plt.sh_link != dynsym_idx) - goto out_elf_end; - - if (elf_section_by_name(elf, &ehdr, &shdr_plt, ".plt", NULL) == NULL) - goto out_elf_end; - - /* - * Fetch the relocation section to find the idxes to the GOT - * and the symbols in the .dynsym they refer to. - */ - reldata = elf_getdata(scn_plt_rel, NULL); - if (reldata == NULL) - goto out_elf_end; - - syms = elf_getdata(scn_dynsym, NULL); - if (syms == NULL) - goto out_elf_end; - - scn_symstrs = elf_getscn(elf, shdr_dynsym.sh_link); - if (scn_symstrs == NULL) - goto out_elf_end; - - symstrs = elf_getdata(scn_symstrs, NULL); - if (symstrs == NULL) - goto out_elf_end; - - nr_rel_entries = shdr_rel_plt.sh_size / shdr_rel_plt.sh_entsize; - plt_offset = shdr_plt.sh_offset; - - if (shdr_rel_plt.sh_type == SHT_RELA) { - GElf_Rela pos_mem, *pos; - - elf_section__for_each_rela(reldata, pos, pos_mem, idx, - nr_rel_entries) { - symidx = GELF_R_SYM(pos->r_info); - plt_offset += shdr_plt.sh_entsize; - gelf_getsym(syms, symidx, &sym); - snprintf(sympltname, sizeof(sympltname), - "%s@plt", elf_sym__name(&sym, symstrs)); - - f = symbol__new(plt_offset, shdr_plt.sh_entsize, - STB_GLOBAL, sympltname); - if (!f) - goto out_elf_end; - - if (filter && filter(map, f)) - symbol__delete(f); - else { - symbols__insert(&dso->symbols[map->type], f); - ++nr; - } - } - } else if (shdr_rel_plt.sh_type == SHT_REL) { - GElf_Rel pos_mem, *pos; - elf_section__for_each_rel(reldata, pos, pos_mem, idx, - nr_rel_entries) { - symidx = GELF_R_SYM(pos->r_info); - plt_offset += shdr_plt.sh_entsize; - gelf_getsym(syms, symidx, &sym); - snprintf(sympltname, sizeof(sympltname), - "%s@plt", elf_sym__name(&sym, symstrs)); - - f = symbol__new(plt_offset, shdr_plt.sh_entsize, - STB_GLOBAL, sympltname); - if (!f) - goto out_elf_end; - - if (filter && filter(map, f)) - symbol__delete(f); - else { - symbols__insert(&dso->symbols[map->type], f); - ++nr; - } - } - } - - err = 0; -out_elf_end: - elf_end(elf); -out_close: - close(fd); - - if (err == 0) - return nr; -out: - pr_debug("%s: problems reading %s PLT info.\n", - __func__, dso->long_name); - return 0; -} - -static bool elf_sym__is_a(GElf_Sym *sym, enum map_type type) -{ - switch (type) { - case MAP__FUNCTION: - return elf_sym__is_function(sym); - case MAP__VARIABLE: - return elf_sym__is_object(sym); - default: - return false; - } -} - -static bool elf_sec__is_a(GElf_Shdr *shdr, Elf_Data *secstrs, - enum map_type type) -{ - switch (type) { - case MAP__FUNCTION: - return elf_sec__is_text(shdr, secstrs); - case MAP__VARIABLE: - return elf_sec__is_data(shdr, secstrs); - default: - return false; - } -} - -static size_t elf_addr_to_index(Elf *elf, GElf_Addr addr) -{ - Elf_Scn *sec = NULL; - GElf_Shdr shdr; - size_t cnt = 1; - - while ((sec = elf_nextscn(elf, sec)) != NULL) { - gelf_getshdr(sec, &shdr); - - if ((addr >= shdr.sh_addr) && - (addr < (shdr.sh_addr + shdr.sh_size))) - return cnt; - - ++cnt; - } - - return -1; -} - -static int dso__swap_init(struct dso *dso, unsigned char eidata) -{ - static unsigned int const endian = 1; - - dso->needs_swap = DSO_SWAP__NO; - - switch (eidata) { - case ELFDATA2LSB: - /* We are big endian, DSO is little endian. */ - if (*(unsigned char const *)&endian != 1) - dso->needs_swap = DSO_SWAP__YES; - break; - - case ELFDATA2MSB: - /* We are little endian, DSO is big endian. */ - if (*(unsigned char const *)&endian != 0) - dso->needs_swap = DSO_SWAP__YES; - break; - - default: - pr_err("unrecognized DSO data encoding %d\n", eidata); - return -EINVAL; - } - - return 0; -} - -static int dso__load_sym(struct dso *dso, struct map *map, const char *name, - int fd, symbol_filter_t filter, int kmodule, - int want_symtab) -{ - struct kmap *kmap = dso->kernel ? map__kmap(map) : NULL; - struct map *curr_map = map; - struct dso *curr_dso = dso; - Elf_Data *symstrs, *secstrs; - uint32_t nr_syms; - int err = -1; - uint32_t idx; - GElf_Ehdr ehdr; - GElf_Shdr shdr, opdshdr; - Elf_Data *syms, *opddata = NULL; - GElf_Sym sym; - Elf_Scn *sec, *sec_strndx, *opdsec; - Elf *elf; - int nr = 0; - size_t opdidx = 0; - - elf = elf_begin(fd, PERF_ELF_C_READ_MMAP, NULL); - if (elf == NULL) { - pr_debug("%s: cannot read %s ELF file.\n", __func__, name); - goto out_close; - } - - if (gelf_getehdr(elf, &ehdr) == NULL) { - pr_debug("%s: cannot get elf header.\n", __func__); - goto out_elf_end; - } - - if (dso__swap_init(dso, ehdr.e_ident[EI_DATA])) - goto out_elf_end; - - /* Always reject images with a mismatched build-id: */ - if (dso->has_build_id) { - u8 build_id[BUILD_ID_SIZE]; - - if (elf_read_build_id(elf, build_id, BUILD_ID_SIZE) < 0) - goto out_elf_end; - - if (!dso__build_id_equal(dso, build_id)) - goto out_elf_end; - } - - sec = elf_section_by_name(elf, &ehdr, &shdr, ".symtab", NULL); - if (sec == NULL) { - if (want_symtab) - goto out_elf_end; - - sec = elf_section_by_name(elf, &ehdr, &shdr, ".dynsym", NULL); - if (sec == NULL) - goto out_elf_end; - } - - opdsec = elf_section_by_name(elf, &ehdr, &opdshdr, ".opd", &opdidx); - if (opdshdr.sh_type != SHT_PROGBITS) - opdsec = NULL; - if (opdsec) - opddata = elf_rawdata(opdsec, NULL); - - syms = elf_getdata(sec, NULL); - if (syms == NULL) - goto out_elf_end; - - sec = elf_getscn(elf, shdr.sh_link); - if (sec == NULL) - goto out_elf_end; - - symstrs = elf_getdata(sec, NULL); - if (symstrs == NULL) - goto out_elf_end; - - sec_strndx = elf_getscn(elf, ehdr.e_shstrndx); - if (sec_strndx == NULL) - goto out_elf_end; - - secstrs = elf_getdata(sec_strndx, NULL); - if (secstrs == NULL) - goto out_elf_end; - - nr_syms = shdr.sh_size / shdr.sh_entsize; - - memset(&sym, 0, sizeof(sym)); - if (dso->kernel == DSO_TYPE_USER) { - dso->adjust_symbols = (ehdr.e_type == ET_EXEC || - elf_section_by_name(elf, &ehdr, &shdr, - ".gnu.prelink_undo", - NULL) != NULL); - } else { - dso->adjust_symbols = 0; - } - elf_symtab__for_each_symbol(syms, nr_syms, idx, sym) { - struct symbol *f; - const char *elf_name = elf_sym__name(&sym, symstrs); - char *demangled = NULL; - int is_label = elf_sym__is_label(&sym); - const char *section_name; - - if (kmap && kmap->ref_reloc_sym && kmap->ref_reloc_sym->name && - strcmp(elf_name, kmap->ref_reloc_sym->name) == 0) - kmap->ref_reloc_sym->unrelocated_addr = sym.st_value; - - if (!is_label && !elf_sym__is_a(&sym, map->type)) - continue; - - /* Reject ARM ELF "mapping symbols": these aren't unique and - * don't identify functions, so will confuse the profile - * output: */ - if (ehdr.e_machine == EM_ARM) { - if (!strcmp(elf_name, "$a") || - !strcmp(elf_name, "$d") || - !strcmp(elf_name, "$t")) - continue; - } - - if (opdsec && sym.st_shndx == opdidx) { - u32 offset = sym.st_value - opdshdr.sh_addr; - u64 *opd = opddata->d_buf + offset; - sym.st_value = DSO__SWAP(dso, u64, *opd); - sym.st_shndx = elf_addr_to_index(elf, sym.st_value); - } - - sec = elf_getscn(elf, sym.st_shndx); - if (!sec) - goto out_elf_end; - - gelf_getshdr(sec, &shdr); - - if (is_label && !elf_sec__is_a(&shdr, secstrs, map->type)) - continue; - - section_name = elf_sec__name(&shdr, secstrs); - - /* On ARM, symbols for thumb functions have 1 added to - * the symbol address as a flag - remove it */ - if ((ehdr.e_machine == EM_ARM) && - (map->type == MAP__FUNCTION) && - (sym.st_value & 1)) - --sym.st_value; - - if (dso->kernel != DSO_TYPE_USER || kmodule) { - char dso_name[PATH_MAX]; - - if (strcmp(section_name, - (curr_dso->short_name + - dso->short_name_len)) == 0) - goto new_symbol; - - if (strcmp(section_name, ".text") == 0) { - curr_map = map; - curr_dso = dso; - goto new_symbol; - } - - snprintf(dso_name, sizeof(dso_name), - "%s%s", dso->short_name, section_name); - - curr_map = map_groups__find_by_name(kmap->kmaps, map->type, dso_name); - if (curr_map == NULL) { - u64 start = sym.st_value; - - if (kmodule) - start += map->start + shdr.sh_offset; - - curr_dso = dso__new(dso_name); - if (curr_dso == NULL) - goto out_elf_end; - curr_dso->kernel = dso->kernel; - curr_dso->long_name = dso->long_name; - curr_dso->long_name_len = dso->long_name_len; - curr_map = map__new2(start, curr_dso, - map->type); - if (curr_map == NULL) { - dso__delete(curr_dso); - goto out_elf_end; - } - curr_map->map_ip = identity__map_ip; - curr_map->unmap_ip = identity__map_ip; - curr_dso->symtab_type = dso->symtab_type; - map_groups__insert(kmap->kmaps, curr_map); - dsos__add(&dso->node, curr_dso); - dso__set_loaded(curr_dso, map->type); - } else - curr_dso = curr_map->dso; - - goto new_symbol; - } - - if (curr_dso->adjust_symbols) { - pr_debug4("%s: adjusting symbol: st_value: %#" PRIx64 " " - "sh_addr: %#" PRIx64 " sh_offset: %#" PRIx64 "\n", __func__, - (u64)sym.st_value, (u64)shdr.sh_addr, - (u64)shdr.sh_offset); - sym.st_value -= shdr.sh_addr - shdr.sh_offset; - } - /* - * We need to figure out if the object was created from C++ sources - * DWARF DW_compile_unit has this, but we don't always have access - * to it... - */ - demangled = bfd_demangle(NULL, elf_name, DMGL_PARAMS | DMGL_ANSI); - if (demangled != NULL) - elf_name = demangled; -new_symbol: - f = symbol__new(sym.st_value, sym.st_size, - GELF_ST_BIND(sym.st_info), elf_name); - free(demangled); - if (!f) - goto out_elf_end; - - if (filter && filter(curr_map, f)) - symbol__delete(f); - else { - symbols__insert(&curr_dso->symbols[curr_map->type], f); - nr++; - } - } - - /* - * For misannotated, zeroed, ASM function sizes. - */ - if (nr > 0) { - symbols__fixup_duplicate(&dso->symbols[map->type]); - symbols__fixup_end(&dso->symbols[map->type]); - if (kmap) { - /* - * We need to fixup this here too because we create new - * maps here, for things like vsyscall sections. - */ - __map_groups__fixup_end(kmap->kmaps, map->type); - } - } - err = nr; -out_elf_end: - elf_end(elf); -out_close: - return err; -} - -static bool dso__build_id_equal(const struct dso *dso, u8 *build_id) +bool dso__build_id_equal(const struct dso *dso, u8 *build_id) { return memcmp(dso->build_id, build_id, sizeof(dso->build_id)) == 0; } @@ -1480,216 +919,11 @@ bool __dsos__read_build_ids(struct list_head *head, bool with_hits) return have_build_id; } -/* - * Align offset to 4 bytes as needed for note name and descriptor data. - */ -#define NOTE_ALIGN(n) (((n) + 3) & -4U) - -static int elf_read_build_id(Elf *elf, void *bf, size_t size) -{ - int err = -1; - GElf_Ehdr ehdr; - GElf_Shdr shdr; - Elf_Data *data; - Elf_Scn *sec; - Elf_Kind ek; - void *ptr; - - if (size < BUILD_ID_SIZE) - goto out; - - ek = elf_kind(elf); - if (ek != ELF_K_ELF) - goto out; - - if (gelf_getehdr(elf, &ehdr) == NULL) { - pr_err("%s: cannot get elf header.\n", __func__); - goto out; - } - - /* - * Check following sections for notes: - * '.note.gnu.build-id' - * '.notes' - * '.note' (VDSO specific) - */ - do { - sec = elf_section_by_name(elf, &ehdr, &shdr, - ".note.gnu.build-id", NULL); - if (sec) - break; - - sec = elf_section_by_name(elf, &ehdr, &shdr, - ".notes", NULL); - if (sec) - break; - - sec = elf_section_by_name(elf, &ehdr, &shdr, - ".note", NULL); - if (sec) - break; - - return err; - - } while (0); - - data = elf_getdata(sec, NULL); - if (data == NULL) - goto out; - - ptr = data->d_buf; - while (ptr < (data->d_buf + data->d_size)) { - GElf_Nhdr *nhdr = ptr; - size_t namesz = NOTE_ALIGN(nhdr->n_namesz), - descsz = NOTE_ALIGN(nhdr->n_descsz); - const char *name; - - ptr += sizeof(*nhdr); - name = ptr; - ptr += namesz; - if (nhdr->n_type == NT_GNU_BUILD_ID && - nhdr->n_namesz == sizeof("GNU")) { - if (memcmp(name, "GNU", sizeof("GNU")) == 0) { - size_t sz = min(size, descsz); - memcpy(bf, ptr, sz); - memset(bf + sz, 0, size - sz); - err = descsz; - break; - } - } - ptr += descsz; - } - -out: - return err; -} - -int filename__read_build_id(const char *filename, void *bf, size_t size) -{ - int fd, err = -1; - Elf *elf; - - if (size < BUILD_ID_SIZE) - goto out; - - fd = open(filename, O_RDONLY); - if (fd < 0) - goto out; - - elf = elf_begin(fd, PERF_ELF_C_READ_MMAP, NULL); - if (elf == NULL) { - pr_debug2("%s: cannot read %s ELF file.\n", __func__, filename); - goto out_close; - } - - err = elf_read_build_id(elf, bf, size); - - elf_end(elf); -out_close: - close(fd); -out: - return err; -} - -int sysfs__read_build_id(const char *filename, void *build_id, size_t size) -{ - int fd, err = -1; - - if (size < BUILD_ID_SIZE) - goto out; - - fd = open(filename, O_RDONLY); - if (fd < 0) - goto out; - - while (1) { - char bf[BUFSIZ]; - GElf_Nhdr nhdr; - size_t namesz, descsz; - - if (read(fd, &nhdr, sizeof(nhdr)) != sizeof(nhdr)) - break; - - namesz = NOTE_ALIGN(nhdr.n_namesz); - descsz = NOTE_ALIGN(nhdr.n_descsz); - if (nhdr.n_type == NT_GNU_BUILD_ID && - nhdr.n_namesz == sizeof("GNU")) { - if (read(fd, bf, namesz) != (ssize_t)namesz) - break; - if (memcmp(bf, "GNU", sizeof("GNU")) == 0) { - size_t sz = min(descsz, size); - if (read(fd, build_id, sz) == (ssize_t)sz) { - memset(build_id + sz, 0, size - sz); - err = 0; - break; - } - } else if (read(fd, bf, descsz) != (ssize_t)descsz) - break; - } else { - int n = namesz + descsz; - if (read(fd, bf, n) != n) - break; - } - } - close(fd); -out: - return err; -} - -static int filename__read_debuglink(const char *filename, - char *debuglink, size_t size) -{ - int fd, err = -1; - Elf *elf; - GElf_Ehdr ehdr; - GElf_Shdr shdr; - Elf_Data *data; - Elf_Scn *sec; - Elf_Kind ek; - - fd = open(filename, O_RDONLY); - if (fd < 0) - goto out; - - elf = elf_begin(fd, PERF_ELF_C_READ_MMAP, NULL); - if (elf == NULL) { - pr_debug2("%s: cannot read %s ELF file.\n", __func__, filename); - goto out_close; - } - - ek = elf_kind(elf); - if (ek != ELF_K_ELF) - goto out_close; - - if (gelf_getehdr(elf, &ehdr) == NULL) { - pr_err("%s: cannot get elf header.\n", __func__); - goto out_close; - } - - sec = elf_section_by_name(elf, &ehdr, &shdr, - ".gnu_debuglink", NULL); - if (sec == NULL) - goto out_close; - - data = elf_getdata(sec, NULL); - if (data == NULL) - goto out_close; - - /* the start of this section is a zero-terminated string */ - strncpy(debuglink, data->d_buf, size); - - elf_end(elf); - -out_close: - close(fd); -out: - return err; -} - char dso__symtab_origin(const struct dso *dso) { static const char origin[] = { [DSO_BINARY_TYPE__KALLSYMS] = 'k', + [DSO_BINARY_TYPE__VMLINUX] = 'v', [DSO_BINARY_TYPE__JAVA_JIT] = 'j', [DSO_BINARY_TYPE__DEBUGLINK] = 'l', [DSO_BINARY_TYPE__BUILD_ID_CACHE] = 'B', @@ -1700,6 +934,7 @@ char dso__symtab_origin(const struct dso *dso) [DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE] = 'K', [DSO_BINARY_TYPE__GUEST_KALLSYMS] = 'g', [DSO_BINARY_TYPE__GUEST_KMODULE] = 'G', + [DSO_BINARY_TYPE__GUEST_VMLINUX] = 'V', }; if (dso == NULL || dso->symtab_type == DSO_BINARY_TYPE__NOT_FOUND) @@ -1775,7 +1010,9 @@ int dso__binary_type_file(struct dso *dso, enum dso_binary_type type, default: case DSO_BINARY_TYPE__KALLSYMS: + case DSO_BINARY_TYPE__VMLINUX: case DSO_BINARY_TYPE__GUEST_KALLSYMS: + case DSO_BINARY_TYPE__GUEST_VMLINUX: case DSO_BINARY_TYPE__JAVA_JIT: case DSO_BINARY_TYPE__NOT_FOUND: ret = -1; @@ -1789,11 +1026,12 @@ int dso__load(struct dso *dso, struct map *map, symbol_filter_t filter) { char *name; int ret = -1; - int fd; u_int i; struct machine *machine; char *root_dir = (char *) ""; - int want_symtab; + int ss_pos = 0; + struct symsrc ss_[2]; + struct symsrc *syms_ss = NULL, *runtime_ss = NULL; dso__set_loaded(dso, map->type); @@ -1835,54 +1073,69 @@ int dso__load(struct dso *dso, struct map *map, symbol_filter_t filter) root_dir = machine->root_dir; /* Iterate over candidate debug images. - * On the first pass, only load images if they have a full symtab. - * Failing that, do a second pass where we accept .dynsym also + * Keep track of "interesting" ones (those which have a symtab, dynsym, + * and/or opd section) for processing. */ - want_symtab = 1; -restart: for (i = 0; i < DSO_BINARY_TYPE__SYMTAB_CNT; i++) { + struct symsrc *ss = &ss_[ss_pos]; + bool next_slot = false; - dso->symtab_type = binary_type_symtab[i]; + enum dso_binary_type symtab_type = binary_type_symtab[i]; - if (dso__binary_type_file(dso, dso->symtab_type, + if (dso__binary_type_file(dso, symtab_type, root_dir, name, PATH_MAX)) continue; /* Name is now the name of the next image to try */ - fd = open(name, O_RDONLY); - if (fd < 0) + if (symsrc__init(ss, dso, name, symtab_type) < 0) continue; - ret = dso__load_sym(dso, map, name, fd, filter, 0, - want_symtab); - close(fd); + if (!syms_ss && symsrc__has_symtab(ss)) { + syms_ss = ss; + next_slot = true; + } - /* - * Some people seem to have debuginfo files _WITHOUT_ debug - * info!?!? - */ - if (!ret) - continue; + if (!runtime_ss && symsrc__possibly_runtime(ss)) { + runtime_ss = ss; + next_slot = true; + } - if (ret > 0) { - int nr_plt; + if (next_slot) { + ss_pos++; - nr_plt = dso__synthesize_plt_symbols(dso, name, map, filter); - if (nr_plt > 0) - ret += nr_plt; - break; + if (syms_ss && runtime_ss) + break; } + } - /* - * If we wanted a full symtab but no image had one, - * relax our requirements and repeat the search. - */ - if (ret <= 0 && want_symtab) { - want_symtab = 0; - goto restart; + if (!runtime_ss && !syms_ss) + goto out_free; + + if (runtime_ss && !syms_ss) { + syms_ss = runtime_ss; + } + + /* We'll have to hope for the best */ + if (!runtime_ss && syms_ss) + runtime_ss = syms_ss; + + if (syms_ss) + ret = dso__load_sym(dso, map, syms_ss, runtime_ss, filter, 0); + else + ret = -1; + + if (ret > 0) { + int nr_plt; + + nr_plt = dso__synthesize_plt_symbols(dso, runtime_ss, map, filter); + if (nr_plt > 0) + ret += nr_plt; } + for (; ss_pos > 0; ss_pos--) + symsrc__destroy(&ss_[ss_pos - 1]); +out_free: free(name); if (ret < 0 && strstr(dso->name, " (deleted)") != NULL) return 0; @@ -2030,25 +1283,6 @@ static int machine__set_modules_path(struct machine *machine) return map_groups__set_modules_path_dir(&machine->kmaps, modules_path); } -/* - * Constructor variant for modules (where we know from /proc/modules where - * they are loaded) and for vmlinux, where only after we load all the - * symbols we'll know where it starts and ends. - */ -static struct map *map__new2(u64 start, struct dso *dso, enum map_type type) -{ - struct map *map = calloc(1, (sizeof(*map) + - (dso->kernel ? sizeof(struct kmap) : 0))); - if (map != NULL) { - /* - * ->end will be filled after we load all the symbols - */ - map__init(map, type, start, 0, 0, dso); - } - - return map; -} - struct map *machine__new_module(struct machine *machine, u64 start, const char *filename) { @@ -2141,22 +1375,30 @@ static int machine__create_modules(struct machine *machine) int dso__load_vmlinux(struct dso *dso, struct map *map, const char *vmlinux, symbol_filter_t filter) { - int err = -1, fd; + int err = -1; + struct symsrc ss; char symfs_vmlinux[PATH_MAX]; + enum dso_binary_type symtab_type; snprintf(symfs_vmlinux, sizeof(symfs_vmlinux), "%s%s", symbol_conf.symfs, vmlinux); - fd = open(symfs_vmlinux, O_RDONLY); - if (fd < 0) + + if (dso->kernel == DSO_TYPE_GUEST_KERNEL) + symtab_type = DSO_BINARY_TYPE__GUEST_VMLINUX; + else + symtab_type = DSO_BINARY_TYPE__VMLINUX; + + if (symsrc__init(&ss, dso, symfs_vmlinux, symtab_type)) return -1; - dso__set_long_name(dso, (char *)vmlinux); - dso__set_loaded(dso, map->type); - err = dso__load_sym(dso, map, symfs_vmlinux, fd, filter, 0, 0); - close(fd); + err = dso__load_sym(dso, map, &ss, &ss, filter, 0); + symsrc__destroy(&ss); - if (err > 0) + if (err > 0) { + dso__set_long_name(dso, (char *)vmlinux); + dso__set_loaded(dso, map->type); pr_debug("Using %s for symbols\n", symfs_vmlinux); + } return err; } @@ -2173,10 +1415,8 @@ int dso__load_vmlinux_path(struct dso *dso, struct map *map, filename = dso__build_id_filename(dso, NULL, 0); if (filename != NULL) { err = dso__load_vmlinux(dso, map, filename, filter); - if (err > 0) { - dso__set_long_name(dso, filename); + if (err > 0) goto out; - } free(filename); } @@ -2291,9 +1531,8 @@ static int dso__load_kernel_sym(struct dso *dso, struct map *map, free(kallsyms_allocated_filename); if (err > 0) { + dso__set_long_name(dso, strdup("[kernel.kallsyms]")); out_fixup: - if (kallsyms_filename != NULL) - dso__set_long_name(dso, strdup("[kernel.kallsyms]")); map__fixup_start(map); map__fixup_end(map); } @@ -2352,12 +1591,12 @@ static int dso__load_guest_kernel_sym(struct dso *dso, struct map *map, return err; } -static void dsos__add(struct list_head *head, struct dso *dso) +void dsos__add(struct list_head *head, struct dso *dso) { list_add_tail(&dso->node, head); } -static struct dso *dsos__find(struct list_head *head, const char *name) +struct dso *dsos__find(struct list_head *head, const char *name) { struct dso *pos; @@ -2516,7 +1755,7 @@ struct process_args { }; static int symbol__in_kernel(void *arg, const char *name, - char type __used, u64 start, u64 end __used) + char type __maybe_unused, u64 start) { struct process_args *args = arg; @@ -2752,9 +1991,10 @@ int symbol__init(void) if (symbol_conf.initialized) return 0; - symbol_conf.priv_size = ALIGN(symbol_conf.priv_size, sizeof(u64)); + symbol_conf.priv_size = PERF_ALIGN(symbol_conf.priv_size, sizeof(u64)); + + symbol__elf_init(); - elf_version(EV_CURRENT); if (symbol_conf.sort_by_name) symbol_conf.priv_size += (sizeof(struct symbol_name_rb_node) - sizeof(struct symbol)); @@ -2875,6 +2115,7 @@ int machines__create_guest_kernel_maps(struct rb_root *machines) int i, items = 0; char path[PATH_MAX]; pid_t pid; + char *endp; if (symbol_conf.default_guest_vmlinux_name || symbol_conf.default_guest_modules || @@ -2891,7 +2132,14 @@ int machines__create_guest_kernel_maps(struct rb_root *machines) /* Filter out . and .. */ continue; } - pid = atoi(namelist[i]->d_name); + pid = (pid_t)strtol(namelist[i]->d_name, &endp, 10); + if ((*endp != '\0') || + (endp == namelist[i]->d_name) || + (errno == ERANGE)) { + pr_debug("invalid directory (%s). Skipping.\n", + namelist[i]->d_name); + continue; + } sprintf(path, "%s/%s/proc/kallsyms", symbol_conf.guestmount, namelist[i]->d_name); diff --git a/trunk/tools/perf/util/symbol.h b/trunk/tools/perf/util/symbol.h index 1fe733a1e21f..b441b07172b7 100644 --- a/trunk/tools/perf/util/symbol.h +++ b/trunk/tools/perf/util/symbol.h @@ -10,22 +10,31 @@ #include #include #include +#include + +#ifndef NO_LIBELF_SUPPORT +#include +#include +#include +#endif #ifdef HAVE_CPLUS_DEMANGLE extern char *cplus_demangle(const char *, int); -static inline char *bfd_demangle(void __used *v, const char *c, int i) +static inline char *bfd_demangle(void __maybe_unused *v, const char *c, int i) { return cplus_demangle(c, i); } #else #ifdef NO_DEMANGLE -static inline char *bfd_demangle(void __used *v, const char __used *c, - int __used i) +static inline char *bfd_demangle(void __maybe_unused *v, + const char __maybe_unused *c, + int __maybe_unused i) { return NULL; } #else +#define PACKAGE 'perf' #include #endif #endif @@ -158,6 +167,8 @@ struct addr_location { enum dso_binary_type { DSO_BINARY_TYPE__KALLSYMS = 0, DSO_BINARY_TYPE__GUEST_KALLSYMS, + DSO_BINARY_TYPE__VMLINUX, + DSO_BINARY_TYPE__GUEST_VMLINUX, DSO_BINARY_TYPE__JAVA_JIT, DSO_BINARY_TYPE__DEBUGLINK, DSO_BINARY_TYPE__BUILD_ID_CACHE, @@ -217,6 +228,36 @@ struct dso { char name[0]; }; +struct symsrc { + char *name; + int fd; + enum dso_binary_type type; + +#ifndef NO_LIBELF_SUPPORT + Elf *elf; + GElf_Ehdr ehdr; + + Elf_Scn *opdsec; + size_t opdidx; + GElf_Shdr opdshdr; + + Elf_Scn *symtab; + GElf_Shdr symshdr; + + Elf_Scn *dynsym; + size_t dynsym_idx; + GElf_Shdr dynshdr; + + bool adjust_symbols; +#endif +}; + +void symsrc__destroy(struct symsrc *ss); +int symsrc__init(struct symsrc *ss, struct dso *dso, const char *name, + enum dso_binary_type type); +bool symsrc__has_symtab(struct symsrc *ss); +bool symsrc__possibly_runtime(struct symsrc *ss); + #define DSO__SWAP(dso, type, val) \ ({ \ type ____r = val; \ @@ -254,6 +295,8 @@ static inline void dso__set_loaded(struct dso *dso, enum map_type type) void dso__sort_by_name(struct dso *dso, enum map_type type); +void dsos__add(struct list_head *head, struct dso *dso); +struct dso *dsos__find(struct list_head *head, const char *name); struct dso *__dsos__findnew(struct list_head *head, const char *name); int dso__load(struct dso *dso, struct map *map, symbol_filter_t filter); @@ -283,6 +326,7 @@ size_t dso__fprintf(struct dso *dso, enum map_type type, FILE *fp); char dso__symtab_origin(const struct dso *dso); void dso__set_long_name(struct dso *dso, char *name); void dso__set_build_id(struct dso *dso, void *build_id); +bool dso__build_id_equal(const struct dso *dso, u8 *build_id); void dso__read_running_kernel_build_id(struct dso *dso, struct machine *machine); struct map *dso__new_map(const char *name); @@ -297,7 +341,9 @@ bool __dsos__read_build_ids(struct list_head *head, bool with_hits); int build_id__sprintf(const u8 *build_id, int len, char *bf); int kallsyms__parse(const char *filename, void *arg, int (*process_symbol)(void *arg, const char *name, - char type, u64 start, u64 end)); + char type, u64 start)); +int filename__read_debuglink(const char *filename, char *debuglink, + size_t size); void machine__destroy_kernel_maps(struct machine *machine); int __machine__create_kernel_maps(struct machine *machine, struct dso *kernel); @@ -309,6 +355,8 @@ void machines__destroy_guest_kernel_maps(struct rb_root *machines); int symbol__init(void); void symbol__exit(void); +void symbol__elf_init(void); +struct symbol *symbol__new(u64 start, u64 len, u8 binding, const char *name); size_t symbol__fprintf_symname_offs(const struct symbol *sym, const struct addr_location *al, FILE *fp); size_t symbol__fprintf_symname(const struct symbol *sym, FILE *fp); @@ -326,4 +374,15 @@ ssize_t dso__data_read_addr(struct dso *dso, struct map *map, struct machine *machine, u64 addr, u8 *data, ssize_t size); int dso__test_data(void); +int dso__load_sym(struct dso *dso, struct map *map, struct symsrc *syms_ss, + struct symsrc *runtime_ss, symbol_filter_t filter, + int kmodule); +int dso__synthesize_plt_symbols(struct dso *dso, struct symsrc *ss, + struct map *map, symbol_filter_t filter); + +void symbols__insert(struct rb_root *symbols, struct symbol *sym); +void symbols__fixup_duplicate(struct rb_root *symbols); +void symbols__fixup_end(struct rb_root *symbols); +void __map_groups__fixup_end(struct map_groups *mg, enum map_type type); + #endif /* __PERF_SYMBOL */ diff --git a/trunk/tools/perf/util/target.c b/trunk/tools/perf/util/target.c index 3f59c496e64c..065528b7563e 100644 --- a/trunk/tools/perf/util/target.c +++ b/trunk/tools/perf/util/target.c @@ -110,15 +110,15 @@ int perf_target__strerror(struct perf_target *target, int errnum, int idx; const char *msg; - BUG_ON(buflen > 0); + BUG_ON(buflen == 0); if (errnum >= 0) { const char *err = strerror_r(errnum, buf, buflen); if (err != buf) { size_t len = strlen(err); - char *c = mempcpy(buf, err, min(buflen - 1, len)); - *c = '\0'; + memcpy(buf, err, min(buflen - 1, len)); + *(buf + min(buflen - 1, len)) = '\0'; } return 0; diff --git a/trunk/tools/perf/util/thread.h b/trunk/tools/perf/util/thread.h index 70c2c13ff679..f66610b7bacf 100644 --- a/trunk/tools/perf/util/thread.h +++ b/trunk/tools/perf/util/thread.h @@ -16,6 +16,8 @@ struct thread { bool comm_set; char *comm; int comm_len; + + void *priv; }; struct machine; diff --git a/trunk/tools/perf/util/top.c b/trunk/tools/perf/util/top.c index 7eeebcee291c..884dde9b9bc1 100644 --- a/trunk/tools/perf/util/top.c +++ b/trunk/tools/perf/util/top.c @@ -58,8 +58,7 @@ size_t perf_top__header_snprintf(struct perf_top *top, char *bf, size_t size) } if (top->evlist->nr_entries == 1) { - struct perf_evsel *first; - first = list_entry(top->evlist->entries.next, struct perf_evsel, node); + struct perf_evsel *first = perf_evlist__first(top->evlist); ret += SNPRINTF(bf + ret, size - ret, "%" PRIu64 "%s ", (uint64_t)first->attr.sample_period, top->freq ? "Hz" : ""); diff --git a/trunk/tools/perf/util/top.h b/trunk/tools/perf/util/top.h index 33347ca89ee4..86ff1b15059b 100644 --- a/trunk/tools/perf/util/top.h +++ b/trunk/tools/perf/util/top.h @@ -5,6 +5,7 @@ #include "types.h" #include #include +#include struct perf_evlist; struct perf_evsel; diff --git a/trunk/tools/perf/util/trace-event-parse.c b/trunk/tools/perf/util/trace-event-parse.c index 0715c843c2e7..3aabcd687cd5 100644 --- a/trunk/tools/perf/util/trace-event-parse.c +++ b/trunk/tools/perf/util/trace-event-parse.c @@ -162,25 +162,16 @@ int trace_parse_common_pid(struct pevent *pevent, void *data) return pevent_data_pid(pevent, &record); } -unsigned long long read_size(struct pevent *pevent, void *ptr, int size) +unsigned long long read_size(struct event_format *event, void *ptr, int size) { - return pevent_read_number(pevent, ptr, size); + return pevent_read_number(event->pevent, ptr, size); } -void print_trace_event(struct pevent *pevent, int cpu, void *data, int size) +void event_format__print(struct event_format *event, + int cpu, void *data, int size) { - struct event_format *event; struct pevent_record record; struct trace_seq s; - int type; - - type = trace_parse_common_type(pevent, data); - - event = pevent_find_event(pevent, type); - if (!event) { - warning("ug! no event found for type %d", type); - return; - } memset(&record, 0, sizeof(record)); record.cpu = cpu; @@ -192,6 +183,19 @@ void print_trace_event(struct pevent *pevent, int cpu, void *data, int size) trace_seq_do_printf(&s); } +void print_trace_event(struct pevent *pevent, int cpu, void *data, int size) +{ + int type = trace_parse_common_type(pevent, data); + struct event_format *event = pevent_find_event(pevent, type); + + if (!event) { + warning("ug! no event found for type %d", type); + return; + } + + event_format__print(event, cpu, data, size); +} + void print_event(struct pevent *pevent, int cpu, void *data, int size, unsigned long long nsecs, char *comm) { @@ -217,7 +221,7 @@ void print_event(struct pevent *pevent, int cpu, void *data, int size, } void parse_proc_kallsyms(struct pevent *pevent, - char *file, unsigned int size __unused) + char *file, unsigned int size __maybe_unused) { unsigned long long addr; char *func; @@ -225,31 +229,29 @@ void parse_proc_kallsyms(struct pevent *pevent, char *next = NULL; char *addr_str; char *mod; - char ch; + char *fmt; line = strtok_r(file, "\n", &next); while (line) { mod = NULL; - sscanf(line, "%as %c %as\t[%as", - (float *)(void *)&addr_str, /* workaround gcc warning */ - &ch, (float *)(void *)&func, (float *)(void *)&mod); + addr_str = strtok_r(line, " ", &fmt); addr = strtoull(addr_str, NULL, 16); - free(addr_str); - - /* truncate the extra ']' */ + /* skip character */ + strtok_r(NULL, " ", &fmt); + func = strtok_r(NULL, "\t", &fmt); + mod = strtok_r(NULL, "]", &fmt); + /* truncate the extra '[' */ if (mod) - mod[strlen(mod) - 1] = 0; + mod = mod + 1; pevent_register_function(pevent, func, addr, mod); - free(func); - free(mod); line = strtok_r(NULL, "\n", &next); } } void parse_ftrace_printk(struct pevent *pevent, - char *file, unsigned int size __unused) + char *file, unsigned int size __maybe_unused) { unsigned long long addr; char *printk; @@ -289,7 +291,7 @@ struct event_format *trace_find_next_event(struct pevent *pevent, { static int idx; - if (!pevent->events) + if (!pevent || !pevent->events) return NULL; if (!event) { diff --git a/trunk/tools/perf/util/trace-event-scripting.c b/trunk/tools/perf/util/trace-event-scripting.c index 474aa7a7df43..8715a1006d00 100644 --- a/trunk/tools/perf/util/trace-event-scripting.c +++ b/trunk/tools/perf/util/trace-event-scripting.c @@ -35,12 +35,11 @@ static int stop_script_unsupported(void) return 0; } -static void process_event_unsupported(union perf_event *event __unused, - struct pevent *pevent __unused, - struct perf_sample *sample __unused, - struct perf_evsel *evsel __unused, - struct machine *machine __unused, - struct thread *thread __unused) +static void process_event_unsupported(union perf_event *event __maybe_unused, + struct perf_sample *sample __maybe_unused, + struct perf_evsel *evsel __maybe_unused, + struct machine *machine __maybe_unused, + struct addr_location *al __maybe_unused) { } @@ -53,17 +52,19 @@ static void print_python_unsupported_msg(void) "\n etc.\n"); } -static int python_start_script_unsupported(const char *script __unused, - int argc __unused, - const char **argv __unused) +static int python_start_script_unsupported(const char *script __maybe_unused, + int argc __maybe_unused, + const char **argv __maybe_unused) { print_python_unsupported_msg(); return -1; } -static int python_generate_script_unsupported(struct pevent *pevent __unused, - const char *outfile __unused) +static int python_generate_script_unsupported(struct pevent *pevent + __maybe_unused, + const char *outfile + __maybe_unused) { print_python_unsupported_msg(); @@ -115,17 +116,18 @@ static void print_perl_unsupported_msg(void) "\n etc.\n"); } -static int perl_start_script_unsupported(const char *script __unused, - int argc __unused, - const char **argv __unused) +static int perl_start_script_unsupported(const char *script __maybe_unused, + int argc __maybe_unused, + const char **argv __maybe_unused) { print_perl_unsupported_msg(); return -1; } -static int perl_generate_script_unsupported(struct pevent *pevent __unused, - const char *outfile __unused) +static int perl_generate_script_unsupported(struct pevent *pevent + __maybe_unused, + const char *outfile __maybe_unused) { print_perl_unsupported_msg(); diff --git a/trunk/tools/perf/util/trace-event.h b/trunk/tools/perf/util/trace-event.h index 8fef1d6687b7..a55fd37ffea1 100644 --- a/trunk/tools/perf/util/trace-event.h +++ b/trunk/tools/perf/util/trace-event.h @@ -9,7 +9,6 @@ struct machine; struct perf_sample; union perf_event; struct perf_tool; -struct thread; extern int header_page_size_size; extern int header_page_ts_size; @@ -32,6 +31,8 @@ int bigendian(void); struct pevent *read_trace_init(int file_bigendian, int host_bigendian); void print_trace_event(struct pevent *pevent, int cpu, void *data, int size); +void event_format__print(struct event_format *event, + int cpu, void *data, int size); void print_event(struct pevent *pevent, int cpu, void *data, int size, unsigned long long nsecs, char *comm); @@ -56,7 +57,7 @@ int trace_parse_common_pid(struct pevent *pevent, void *data); struct event_format *trace_find_next_event(struct pevent *pevent, struct event_format *event); -unsigned long long read_size(struct pevent *pevent, void *ptr, int size); +unsigned long long read_size(struct event_format *event, void *ptr, int size); unsigned long long eval_flag(const char *flag); struct pevent_record *trace_read_data(struct pevent *pevent, int cpu); @@ -74,16 +75,19 @@ struct tracing_data *tracing_data_get(struct list_head *pattrs, void tracing_data_put(struct tracing_data *tdata); +struct addr_location; + +struct perf_session; + struct scripting_ops { const char *name; int (*start_script) (const char *script, int argc, const char **argv); int (*stop_script) (void); void (*process_event) (union perf_event *event, - struct pevent *pevent, struct perf_sample *sample, struct perf_evsel *evsel, struct machine *machine, - struct thread *thread); + struct addr_location *al); int (*generate_script) (struct pevent *pevent, const char *outfile); }; diff --git a/trunk/tools/perf/util/unwind.c b/trunk/tools/perf/util/unwind.c new file mode 100644 index 000000000000..958723ba3d2e --- /dev/null +++ b/trunk/tools/perf/util/unwind.c @@ -0,0 +1,571 @@ +/* + * Post mortem Dwarf CFI based unwinding on top of regs and stack dumps. + * + * Lots of this code have been borrowed or heavily inspired from parts of + * the libunwind 0.99 code which are (amongst other contributors I may have + * forgotten): + * + * Copyright (C) 2002-2007 Hewlett-Packard Co + * Contributed by David Mosberger-Tang + * + * And the bugs have been added by: + * + * Copyright (C) 2010, Frederic Weisbecker + * Copyright (C) 2012, Jiri Olsa + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "thread.h" +#include "session.h" +#include "perf_regs.h" +#include "unwind.h" +#include "util.h" + +extern int +UNW_OBJ(dwarf_search_unwind_table) (unw_addr_space_t as, + unw_word_t ip, + unw_dyn_info_t *di, + unw_proc_info_t *pi, + int need_unwind_info, void *arg); + +#define dwarf_search_unwind_table UNW_OBJ(dwarf_search_unwind_table) + +#define DW_EH_PE_FORMAT_MASK 0x0f /* format of the encoded value */ +#define DW_EH_PE_APPL_MASK 0x70 /* how the value is to be applied */ + +/* Pointer-encoding formats: */ +#define DW_EH_PE_omit 0xff +#define DW_EH_PE_ptr 0x00 /* pointer-sized unsigned value */ +#define DW_EH_PE_udata4 0x03 /* unsigned 32-bit value */ +#define DW_EH_PE_udata8 0x04 /* unsigned 64-bit value */ +#define DW_EH_PE_sdata4 0x0b /* signed 32-bit value */ +#define DW_EH_PE_sdata8 0x0c /* signed 64-bit value */ + +/* Pointer-encoding application: */ +#define DW_EH_PE_absptr 0x00 /* absolute value */ +#define DW_EH_PE_pcrel 0x10 /* rel. to addr. of encoded value */ + +/* + * The following are not documented by LSB v1.3, yet they are used by + * GCC, presumably they aren't documented by LSB since they aren't + * used on Linux: + */ +#define DW_EH_PE_funcrel 0x40 /* start-of-procedure-relative */ +#define DW_EH_PE_aligned 0x50 /* aligned pointer */ + +/* Flags intentionaly not handled, since they're not needed: + * #define DW_EH_PE_indirect 0x80 + * #define DW_EH_PE_uleb128 0x01 + * #define DW_EH_PE_udata2 0x02 + * #define DW_EH_PE_sleb128 0x09 + * #define DW_EH_PE_sdata2 0x0a + * #define DW_EH_PE_textrel 0x20 + * #define DW_EH_PE_datarel 0x30 + */ + +struct unwind_info { + struct perf_sample *sample; + struct machine *machine; + struct thread *thread; + u64 sample_uregs; +}; + +#define dw_read(ptr, type, end) ({ \ + type *__p = (type *) ptr; \ + type __v; \ + if ((__p + 1) > (type *) end) \ + return -EINVAL; \ + __v = *__p++; \ + ptr = (typeof(ptr)) __p; \ + __v; \ + }) + +static int __dw_read_encoded_value(u8 **p, u8 *end, u64 *val, + u8 encoding) +{ + u8 *cur = *p; + *val = 0; + + switch (encoding) { + case DW_EH_PE_omit: + *val = 0; + goto out; + case DW_EH_PE_ptr: + *val = dw_read(cur, unsigned long, end); + goto out; + default: + break; + } + + switch (encoding & DW_EH_PE_APPL_MASK) { + case DW_EH_PE_absptr: + break; + case DW_EH_PE_pcrel: + *val = (unsigned long) cur; + break; + default: + return -EINVAL; + } + + if ((encoding & 0x07) == 0x00) + encoding |= DW_EH_PE_udata4; + + switch (encoding & DW_EH_PE_FORMAT_MASK) { + case DW_EH_PE_sdata4: + *val += dw_read(cur, s32, end); + break; + case DW_EH_PE_udata4: + *val += dw_read(cur, u32, end); + break; + case DW_EH_PE_sdata8: + *val += dw_read(cur, s64, end); + break; + case DW_EH_PE_udata8: + *val += dw_read(cur, u64, end); + break; + default: + return -EINVAL; + } + + out: + *p = cur; + return 0; +} + +#define dw_read_encoded_value(ptr, end, enc) ({ \ + u64 __v; \ + if (__dw_read_encoded_value(&ptr, end, &__v, enc)) { \ + return -EINVAL; \ + } \ + __v; \ + }) + +static Elf_Scn *elf_section_by_name(Elf *elf, GElf_Ehdr *ep, + GElf_Shdr *shp, const char *name) +{ + Elf_Scn *sec = NULL; + + while ((sec = elf_nextscn(elf, sec)) != NULL) { + char *str; + + gelf_getshdr(sec, shp); + str = elf_strptr(elf, ep->e_shstrndx, shp->sh_name); + if (!strcmp(name, str)) + break; + } + + return sec; +} + +static u64 elf_section_offset(int fd, const char *name) +{ + Elf *elf; + GElf_Ehdr ehdr; + GElf_Shdr shdr; + u64 offset = 0; + + elf = elf_begin(fd, PERF_ELF_C_READ_MMAP, NULL); + if (elf == NULL) + return 0; + + do { + if (gelf_getehdr(elf, &ehdr) == NULL) + break; + + if (!elf_section_by_name(elf, &ehdr, &shdr, name)) + break; + + offset = shdr.sh_offset; + } while (0); + + elf_end(elf); + return offset; +} + +struct table_entry { + u32 start_ip_offset; + u32 fde_offset; +}; + +struct eh_frame_hdr { + unsigned char version; + unsigned char eh_frame_ptr_enc; + unsigned char fde_count_enc; + unsigned char table_enc; + + /* + * The rest of the header is variable-length and consists of the + * following members: + * + * encoded_t eh_frame_ptr; + * encoded_t fde_count; + */ + + /* A single encoded pointer should not be more than 8 bytes. */ + u64 enc[2]; + + /* + * struct { + * encoded_t start_ip; + * encoded_t fde_addr; + * } binary_search_table[fde_count]; + */ + char data[0]; +} __packed; + +static int unwind_spec_ehframe(struct dso *dso, struct machine *machine, + u64 offset, u64 *table_data, u64 *segbase, + u64 *fde_count) +{ + struct eh_frame_hdr hdr; + u8 *enc = (u8 *) &hdr.enc; + u8 *end = (u8 *) &hdr.data; + ssize_t r; + + r = dso__data_read_offset(dso, machine, offset, + (u8 *) &hdr, sizeof(hdr)); + if (r != sizeof(hdr)) + return -EINVAL; + + /* We dont need eh_frame_ptr, just skip it. */ + dw_read_encoded_value(enc, end, hdr.eh_frame_ptr_enc); + + *fde_count = dw_read_encoded_value(enc, end, hdr.fde_count_enc); + *segbase = offset; + *table_data = (enc - (u8 *) &hdr) + offset; + return 0; +} + +static int read_unwind_spec(struct dso *dso, struct machine *machine, + u64 *table_data, u64 *segbase, u64 *fde_count) +{ + int ret = -EINVAL, fd; + u64 offset; + + fd = dso__data_fd(dso, machine); + if (fd < 0) + return -EINVAL; + + offset = elf_section_offset(fd, ".eh_frame_hdr"); + close(fd); + + if (offset) + ret = unwind_spec_ehframe(dso, machine, offset, + table_data, segbase, + fde_count); + + /* TODO .debug_frame check if eh_frame_hdr fails */ + return ret; +} + +static struct map *find_map(unw_word_t ip, struct unwind_info *ui) +{ + struct addr_location al; + + thread__find_addr_map(ui->thread, ui->machine, PERF_RECORD_MISC_USER, + MAP__FUNCTION, ip, &al); + return al.map; +} + +static int +find_proc_info(unw_addr_space_t as, unw_word_t ip, unw_proc_info_t *pi, + int need_unwind_info, void *arg) +{ + struct unwind_info *ui = arg; + struct map *map; + unw_dyn_info_t di; + u64 table_data, segbase, fde_count; + + map = find_map(ip, ui); + if (!map || !map->dso) + return -EINVAL; + + pr_debug("unwind: find_proc_info dso %s\n", map->dso->name); + + if (read_unwind_spec(map->dso, ui->machine, + &table_data, &segbase, &fde_count)) + return -EINVAL; + + memset(&di, 0, sizeof(di)); + di.format = UNW_INFO_FORMAT_REMOTE_TABLE; + di.start_ip = map->start; + di.end_ip = map->end; + di.u.rti.segbase = map->start + segbase; + di.u.rti.table_data = map->start + table_data; + di.u.rti.table_len = fde_count * sizeof(struct table_entry) + / sizeof(unw_word_t); + return dwarf_search_unwind_table(as, ip, &di, pi, + need_unwind_info, arg); +} + +static int access_fpreg(unw_addr_space_t __maybe_unused as, + unw_regnum_t __maybe_unused num, + unw_fpreg_t __maybe_unused *val, + int __maybe_unused __write, + void __maybe_unused *arg) +{ + pr_err("unwind: access_fpreg unsupported\n"); + return -UNW_EINVAL; +} + +static int get_dyn_info_list_addr(unw_addr_space_t __maybe_unused as, + unw_word_t __maybe_unused *dil_addr, + void __maybe_unused *arg) +{ + return -UNW_ENOINFO; +} + +static int resume(unw_addr_space_t __maybe_unused as, + unw_cursor_t __maybe_unused *cu, + void __maybe_unused *arg) +{ + pr_err("unwind: resume unsupported\n"); + return -UNW_EINVAL; +} + +static int +get_proc_name(unw_addr_space_t __maybe_unused as, + unw_word_t __maybe_unused addr, + char __maybe_unused *bufp, size_t __maybe_unused buf_len, + unw_word_t __maybe_unused *offp, void __maybe_unused *arg) +{ + pr_err("unwind: get_proc_name unsupported\n"); + return -UNW_EINVAL; +} + +static int access_dso_mem(struct unwind_info *ui, unw_word_t addr, + unw_word_t *data) +{ + struct addr_location al; + ssize_t size; + + thread__find_addr_map(ui->thread, ui->machine, PERF_RECORD_MISC_USER, + MAP__FUNCTION, addr, &al); + if (!al.map) { + pr_debug("unwind: no map for %lx\n", (unsigned long)addr); + return -1; + } + + if (!al.map->dso) + return -1; + + size = dso__data_read_addr(al.map->dso, al.map, ui->machine, + addr, (u8 *) data, sizeof(*data)); + + return !(size == sizeof(*data)); +} + +static int reg_value(unw_word_t *valp, struct regs_dump *regs, int id, + u64 sample_regs) +{ + int i, idx = 0; + + if (!(sample_regs & (1 << id))) + return -EINVAL; + + for (i = 0; i < id; i++) { + if (sample_regs & (1 << i)) + idx++; + } + + *valp = regs->regs[idx]; + return 0; +} + +static int access_mem(unw_addr_space_t __maybe_unused as, + unw_word_t addr, unw_word_t *valp, + int __write, void *arg) +{ + struct unwind_info *ui = arg; + struct stack_dump *stack = &ui->sample->user_stack; + unw_word_t start, end; + int offset; + int ret; + + /* Don't support write, probably not needed. */ + if (__write || !stack || !ui->sample->user_regs.regs) { + *valp = 0; + return 0; + } + + ret = reg_value(&start, &ui->sample->user_regs, PERF_REG_SP, + ui->sample_uregs); + if (ret) + return ret; + + end = start + stack->size; + + /* Check overflow. */ + if (addr + sizeof(unw_word_t) < addr) + return -EINVAL; + + if (addr < start || addr + sizeof(unw_word_t) >= end) { + ret = access_dso_mem(ui, addr, valp); + if (ret) { + pr_debug("unwind: access_mem %p not inside range %p-%p\n", + (void *)addr, (void *)start, (void *)end); + *valp = 0; + return ret; + } + return 0; + } + + offset = addr - start; + *valp = *(unw_word_t *)&stack->data[offset]; + pr_debug("unwind: access_mem addr %p, val %lx, offset %d\n", + (void *)addr, (unsigned long)*valp, offset); + return 0; +} + +static int access_reg(unw_addr_space_t __maybe_unused as, + unw_regnum_t regnum, unw_word_t *valp, + int __write, void *arg) +{ + struct unwind_info *ui = arg; + int id, ret; + + /* Don't support write, I suspect we don't need it. */ + if (__write) { + pr_err("unwind: access_reg w %d\n", regnum); + return 0; + } + + if (!ui->sample->user_regs.regs) { + *valp = 0; + return 0; + } + + id = unwind__arch_reg_id(regnum); + if (id < 0) + return -EINVAL; + + ret = reg_value(valp, &ui->sample->user_regs, id, ui->sample_uregs); + if (ret) { + pr_err("unwind: can't read reg %d\n", regnum); + return ret; + } + + pr_debug("unwind: reg %d, val %lx\n", regnum, (unsigned long)*valp); + return 0; +} + +static void put_unwind_info(unw_addr_space_t __maybe_unused as, + unw_proc_info_t *pi __maybe_unused, + void *arg __maybe_unused) +{ + pr_debug("unwind: put_unwind_info called\n"); +} + +static int entry(u64 ip, struct thread *thread, struct machine *machine, + unwind_entry_cb_t cb, void *arg) +{ + struct unwind_entry e; + struct addr_location al; + + thread__find_addr_location(thread, machine, + PERF_RECORD_MISC_USER, + MAP__FUNCTION, ip, &al, NULL); + + e.ip = ip; + e.map = al.map; + e.sym = al.sym; + + pr_debug("unwind: %s:ip = 0x%" PRIx64 " (0x%" PRIx64 ")\n", + al.sym ? al.sym->name : "''", + ip, + al.map ? al.map->map_ip(al.map, ip) : (u64) 0); + + return cb(&e, arg); +} + +static void display_error(int err) +{ + switch (err) { + case UNW_EINVAL: + pr_err("unwind: Only supports local.\n"); + break; + case UNW_EUNSPEC: + pr_err("unwind: Unspecified error.\n"); + break; + case UNW_EBADREG: + pr_err("unwind: Register unavailable.\n"); + break; + default: + break; + } +} + +static unw_accessors_t accessors = { + .find_proc_info = find_proc_info, + .put_unwind_info = put_unwind_info, + .get_dyn_info_list_addr = get_dyn_info_list_addr, + .access_mem = access_mem, + .access_reg = access_reg, + .access_fpreg = access_fpreg, + .resume = resume, + .get_proc_name = get_proc_name, +}; + +static int get_entries(struct unwind_info *ui, unwind_entry_cb_t cb, + void *arg) +{ + unw_addr_space_t addr_space; + unw_cursor_t c; + int ret; + + addr_space = unw_create_addr_space(&accessors, 0); + if (!addr_space) { + pr_err("unwind: Can't create unwind address space.\n"); + return -ENOMEM; + } + + ret = unw_init_remote(&c, addr_space, ui); + if (ret) + display_error(ret); + + while (!ret && (unw_step(&c) > 0)) { + unw_word_t ip; + + unw_get_reg(&c, UNW_REG_IP, &ip); + ret = entry(ip, ui->thread, ui->machine, cb, arg); + } + + unw_destroy_addr_space(addr_space); + return ret; +} + +int unwind__get_entries(unwind_entry_cb_t cb, void *arg, + struct machine *machine, struct thread *thread, + u64 sample_uregs, struct perf_sample *data) +{ + unw_word_t ip; + struct unwind_info ui = { + .sample = data, + .sample_uregs = sample_uregs, + .thread = thread, + .machine = machine, + }; + int ret; + + if (!data->user_regs.regs) + return -EINVAL; + + ret = reg_value(&ip, &data->user_regs, PERF_REG_IP, sample_uregs); + if (ret) + return ret; + + ret = entry(ip, thread, machine, cb, arg); + if (ret) + return -ENOMEM; + + return get_entries(&ui, cb, arg); +} diff --git a/trunk/tools/perf/util/unwind.h b/trunk/tools/perf/util/unwind.h new file mode 100644 index 000000000000..a78c8b303bb5 --- /dev/null +++ b/trunk/tools/perf/util/unwind.h @@ -0,0 +1,35 @@ +#ifndef __UNWIND_H +#define __UNWIND_H + +#include "types.h" +#include "event.h" +#include "symbol.h" + +struct unwind_entry { + struct map *map; + struct symbol *sym; + u64 ip; +}; + +typedef int (*unwind_entry_cb_t)(struct unwind_entry *entry, void *arg); + +#ifndef NO_LIBUNWIND_SUPPORT +int unwind__get_entries(unwind_entry_cb_t cb, void *arg, + struct machine *machine, + struct thread *thread, + u64 sample_uregs, + struct perf_sample *data); +int unwind__arch_reg_id(int regnum); +#else +static inline int +unwind__get_entries(unwind_entry_cb_t cb __maybe_unused, + void *arg __maybe_unused, + struct machine *machine __maybe_unused, + struct thread *thread __maybe_unused, + u64 sample_uregs __maybe_unused, + struct perf_sample *data __maybe_unused) +{ + return 0; +} +#endif /* NO_LIBUNWIND_SUPPORT */ +#endif /* __UNWIND_H */ diff --git a/trunk/tools/perf/util/util.c b/trunk/tools/perf/util/util.c index d03599fbe78b..2055cf38041c 100644 --- a/trunk/tools/perf/util/util.c +++ b/trunk/tools/perf/util/util.c @@ -1,6 +1,11 @@ #include "../perf.h" #include "util.h" #include +#ifndef NO_BACKTRACE +#include +#endif +#include +#include /* * XXX We need to find a better place for these things... @@ -158,3 +163,23 @@ size_t hex_width(u64 v) return n; } + +/* Obtain a backtrace and print it to stdout. */ +#ifndef NO_BACKTRACE +void dump_stack(void) +{ + void *array[16]; + size_t size = backtrace(array, ARRAY_SIZE(array)); + char **strings = backtrace_symbols(array, size); + size_t i; + + printf("Obtained %zd stack frames.\n", size); + + for (i = 0; i < size; i++) + printf("%s\n", strings[i]); + + free(strings); +} +#else +void dump_stack(void) {} +#endif diff --git a/trunk/tools/perf/util/util.h b/trunk/tools/perf/util/util.h index b13c7331eaf8..70fa70b535b2 100644 --- a/trunk/tools/perf/util/util.h +++ b/trunk/tools/perf/util/util.h @@ -69,13 +69,8 @@ #include #include #include -#include -#include -#include -#include -#include #include -#include "../../../include/linux/magic.h" +#include #include "types.h" #include @@ -266,4 +261,6 @@ size_t hex_width(u64 v); char *rtrim(char *s); +void dump_stack(void); + #endif diff --git a/trunk/tools/perf/util/vdso.c b/trunk/tools/perf/util/vdso.c new file mode 100644 index 000000000000..e60951fcdb12 --- /dev/null +++ b/trunk/tools/perf/util/vdso.c @@ -0,0 +1,111 @@ + +#include +#include +#include +#include +#include +#include +#include +#include + +#include "vdso.h" +#include "util.h" +#include "symbol.h" +#include "linux/string.h" + +static bool vdso_found; +static char vdso_file[] = "/tmp/perf-vdso.so-XXXXXX"; + +static int find_vdso_map(void **start, void **end) +{ + FILE *maps; + char line[128]; + int found = 0; + + maps = fopen("/proc/self/maps", "r"); + if (!maps) { + pr_err("vdso: cannot open maps\n"); + return -1; + } + + while (!found && fgets(line, sizeof(line), maps)) { + int m = -1; + + /* We care only about private r-x mappings. */ + if (2 != sscanf(line, "%p-%p r-xp %*x %*x:%*x %*u %n", + start, end, &m)) + continue; + if (m < 0) + continue; + + if (!strncmp(&line[m], VDSO__MAP_NAME, + sizeof(VDSO__MAP_NAME) - 1)) + found = 1; + } + + fclose(maps); + return !found; +} + +static char *get_file(void) +{ + char *vdso = NULL; + char *buf = NULL; + void *start, *end; + size_t size; + int fd; + + if (vdso_found) + return vdso_file; + + if (find_vdso_map(&start, &end)) + return NULL; + + size = end - start; + + buf = memdup(start, size); + if (!buf) + return NULL; + + fd = mkstemp(vdso_file); + if (fd < 0) + goto out; + + if (size == (size_t) write(fd, buf, size)) + vdso = vdso_file; + + close(fd); + + out: + free(buf); + + vdso_found = (vdso != NULL); + return vdso; +} + +void vdso__exit(void) +{ + if (vdso_found) + unlink(vdso_file); +} + +struct dso *vdso__dso_findnew(struct list_head *head) +{ + struct dso *dso = dsos__find(head, VDSO__MAP_NAME); + + if (!dso) { + char *file; + + file = get_file(); + if (!file) + return NULL; + + dso = dso__new(VDSO__MAP_NAME); + if (dso != NULL) { + dsos__add(head, dso); + dso__set_long_name(dso, file); + } + } + + return dso; +} diff --git a/trunk/tools/perf/util/vdso.h b/trunk/tools/perf/util/vdso.h new file mode 100644 index 000000000000..0f76e7caf6f8 --- /dev/null +++ b/trunk/tools/perf/util/vdso.h @@ -0,0 +1,18 @@ +#ifndef __PERF_VDSO__ +#define __PERF_VDSO__ + +#include +#include +#include + +#define VDSO__MAP_NAME "[vdso]" + +static inline bool is_vdso_map(const char *filename) +{ + return !strcmp(filename, VDSO__MAP_NAME); +} + +struct dso *vdso__dso_findnew(struct list_head *head); +void vdso__exit(void); + +#endif /* __PERF_VDSO__ */ diff --git a/trunk/tools/perf/util/wrapper.c b/trunk/tools/perf/util/wrapper.c index 73e900edb5a2..19f15b650703 100644 --- a/trunk/tools/perf/util/wrapper.c +++ b/trunk/tools/perf/util/wrapper.c @@ -7,7 +7,8 @@ * There's no pack memory to release - but stay close to the Git * version so wrap this away: */ -static inline void release_pack_memory(size_t size __used, int flag __used) +static inline void release_pack_memory(size_t size __maybe_unused, + int flag __maybe_unused) { } diff --git a/trunk/tools/scripts/Makefile.include b/trunk/tools/scripts/Makefile.include index bde8521d56bb..96ce80a3743b 100644 --- a/trunk/tools/scripts/Makefile.include +++ b/trunk/tools/scripts/Makefile.include @@ -1,6 +1,8 @@ ifeq ("$(origin O)", "command line") - OUTPUT := $(O)/ - COMMAND_O := O=$(O) + dummy := $(if $(shell test -d $(O) || echo $(O)),$(error O=$(O) does not exist),) + ABSOLUTE_O := $(shell cd $(O) ; pwd) + OUTPUT := $(ABSOLUTE_O)/ + COMMAND_O := O=$(ABSOLUTE_O) endif ifneq ($(OUTPUT),) diff --git a/trunk/tools/testing/ktest/examples/include/defaults.conf b/trunk/tools/testing/ktest/examples/include/defaults.conf index 323a552ce642..63a1a83f4f0b 100644 --- a/trunk/tools/testing/ktest/examples/include/defaults.conf +++ b/trunk/tools/testing/ktest/examples/include/defaults.conf @@ -33,7 +33,7 @@ DEFAULTS THIS_DIR := ${PWD} -# to orginize your configs, having each machine save their configs +# to organize your configs, having each machine save their configs # into a separate directly is useful. CONFIG_DIR := ${THIS_DIR}/configs/${MACHINE} diff --git a/trunk/tools/testing/ktest/examples/include/tests.conf b/trunk/tools/testing/ktest/examples/include/tests.conf index 4fdb811bd810..60cedb1a1154 100644 --- a/trunk/tools/testing/ktest/examples/include/tests.conf +++ b/trunk/tools/testing/ktest/examples/include/tests.conf @@ -47,7 +47,7 @@ BUILD_NOCLEAN = 1 # Build, install, boot and test with a randconfg 10 times. # It is important that you have set MIN_CONFIG in the config # that includes this file otherwise it is likely that the -# randconfig will not have the neccessary configs needed to +# randconfig will not have the necessary configs needed to # boot your box. This version of the test requires a min # config that has enough to make sure the target has network # working. diff --git a/trunk/tools/testing/ktest/ktest.pl b/trunk/tools/testing/ktest/ktest.pl index 52b7959cd513..c05bcd293d8c 100755 --- a/trunk/tools/testing/ktest/ktest.pl +++ b/trunk/tools/testing/ktest/ktest.pl @@ -840,7 +840,9 @@ sub __read_config { if ($rest =~ /\sIF\s+(.*)/) { # May be a ELSE IF section. - if (!process_if($name, $1)) { + if (process_if($name, $1)) { + $if_set = 1; + } else { $skip = 1; } $rest = ""; diff --git a/trunk/tools/testing/selftests/vm/run_vmtests b/trunk/tools/testing/selftests/vm/run_vmtests index 8b40bd5e5cc2..4c53cae6c273 100644 --- a/trunk/tools/testing/selftests/vm/run_vmtests +++ b/trunk/tools/testing/selftests/vm/run_vmtests @@ -36,7 +36,7 @@ mkdir $mnt mount -t hugetlbfs none $mnt echo "--------------------" -echo "runing hugepage-mmap" +echo "running hugepage-mmap" echo "--------------------" ./hugepage-mmap if [ $? -ne 0 ]; then @@ -50,7 +50,7 @@ shmall=`cat /proc/sys/kernel/shmall` echo 268435456 > /proc/sys/kernel/shmmax echo 4194304 > /proc/sys/kernel/shmall echo "--------------------" -echo "runing hugepage-shm" +echo "running hugepage-shm" echo "--------------------" ./hugepage-shm if [ $? -ne 0 ]; then @@ -62,7 +62,7 @@ echo $shmmax > /proc/sys/kernel/shmmax echo $shmall > /proc/sys/kernel/shmall echo "--------------------" -echo "runing map_hugetlb" +echo "running map_hugetlb" echo "--------------------" ./map_hugetlb if [ $? -ne 0 ]; then diff --git a/trunk/virt/kvm/kvm_main.c b/trunk/virt/kvm/kvm_main.c index 246852397e30..d617f69131d7 100644 --- a/trunk/virt/kvm/kvm_main.c +++ b/trunk/virt/kvm/kvm_main.c @@ -1976,9 +1976,10 @@ static long kvm_vcpu_compat_ioctl(struct file *filp, if (copy_from_user(&csigset, sigmask_arg->sigset, sizeof csigset)) goto out; - } - sigset_from_compat(&sigset, &csigset); - r = kvm_vcpu_ioctl_set_sigmask(vcpu, &sigset); + sigset_from_compat(&sigset, &csigset); + r = kvm_vcpu_ioctl_set_sigmask(vcpu, &sigset); + } else + r = kvm_vcpu_ioctl_set_sigmask(vcpu, NULL); break; } default: